[linux-yocto] [PATCH] kmemleak: change the lock of kmemleak_object to raw_spinlock_t
Bruce Ashfield
bruce.ashfield at gmail.com
Fri May 10 13:55:27 PDT 2019
On Fri, May 10, 2019 at 4:32 AM <zhe.he at windriver.com> wrote:
> From: Liu Haitao <haitao.liu at windriver.com>
>
> The following call trace would be triggered as kmemleak is running.
>
> BUG: sleeping function called from invalid context at
> kernel/locking/rtmutex.c:974
> in_atomic(): 1, irqs_disabled(): 1, pid: 2401, name: kmemleak
> Preemption disabled at:
> [<ffffffff905f6271>] scan_block+0x31/0x120
> CPU: 12 PID: 247 Comm: kmemleak Tainted: G W 4.18.20-rt8-yocto-preempt-rt
> #1
> Hardware name: Intel Corp. Harcuvar/Server, BIOS
> HAVLCRB1.X64.0015.D62.1708310404 08/31/2017
> Call Trace:
> dump_stack+0x4f/0x6a
> ? scan_block+0x31/0x120
> ___might_sleep.cold.18+0xca/0xdb
> rt_spin_lock+0x37/0x60
> scan_block+0x9d/0x120
> scan_gray_list+0x108/0x170
> kmemleak_scan+0x230/0x460
> kmemleak_scan_thread+0x9d/0xba
> kthread+0x12c/0x150
> ? kmemleak_scan.cold.19+0x1b/0x1b
> ? kthread_flush_work_fn+0x20/0x20
> ret_from_fork+0x35/0x40
>
> The commit(41cf9e5bb14fbbf782e4191b792d3b898c130469) changed the
> kmemleak_lock
> to raw spinlock. However the kmemleak_object->lock is held after the
> kmemleak_lock is held in scan_block().
>
> scan_block()
> |
> raw_spin_lock_irqsave(&kmemleak_lock, flags)
> |
> spin_lock_nested(&object->lock, SINGLE_DEPTH_NESTING);
>
> In this case, the object->lock is implemented by mutex in RT. It could
> casue a sleeping problem.
>
> Fixes: 41cf9e5bb14f ("kmemleak: Turn kmemleak_lock to raw spinlock on RT")
>
> Signed-off-by: Liu Haitao <haitao.liu at windriver.com>
>
> Rebase on top of latest RT code
>
> Signed-off-by: He Zhe <zhe.he at windriver.com>
> ---
> This is just for v5.0 preempt-rt branches.
>
> kmemleak_lock is going to be changed to rcu in upstream, which would get
> rid of
> this error at the same time. But it'll cost some time. So for this moment
> it's
> worthing merging this patch.
>
>
Sounds good. I've gone ahead and merged it to the 5.0-rt branches.
Bruce
> mm/kmemleak.c | 72
> +++++++++++++++++++++++++++++------------------------------
> 1 file changed, 36 insertions(+), 36 deletions(-)
>
> diff --git a/mm/kmemleak.c b/mm/kmemleak.c
> index b4036ff..00a7941 100644
> --- a/mm/kmemleak.c
> +++ b/mm/kmemleak.c
> @@ -148,7 +148,7 @@ struct kmemleak_scan_area {
> * (use_count) and freed using the RCU mechanism.
> */
> struct kmemleak_object {
> - spinlock_t lock;
> + raw_spinlock_t lock;
> unsigned int flags; /* object status flags */
> struct list_head object_list;
> struct list_head gray_list;
> @@ -586,7 +586,7 @@ static struct kmemleak_object *create_object(unsigned
> long ptr, size_t size,
> INIT_LIST_HEAD(&object->object_list);
> INIT_LIST_HEAD(&object->gray_list);
> INIT_HLIST_HEAD(&object->area_list);
> - spin_lock_init(&object->lock);
> + raw_spin_lock_init(&object->lock);
> atomic_set(&object->use_count, 1);
> object->flags = OBJECT_ALLOCATED;
> object->pointer = ptr;
> @@ -668,9 +668,9 @@ static void __delete_object(struct kmemleak_object
> *object)
> * Locking here also ensures that the corresponding memory block
> * cannot be freed when it is being scanned.
> */
> - spin_lock_irqsave(&object->lock, flags);
> + raw_spin_lock_irqsave(&object->lock, flags);
> object->flags &= ~OBJECT_ALLOCATED;
> - spin_unlock_irqrestore(&object->lock, flags);
> + raw_spin_unlock_irqrestore(&object->lock, flags);
> put_object(object);
> }
>
> @@ -742,9 +742,9 @@ static void paint_it(struct kmemleak_object *object,
> int color)
> {
> unsigned long flags;
>
> - spin_lock_irqsave(&object->lock, flags);
> + raw_spin_lock_irqsave(&object->lock, flags);
> __paint_it(object, color);
> - spin_unlock_irqrestore(&object->lock, flags);
> + raw_spin_unlock_irqrestore(&object->lock, flags);
> }
>
> static void paint_ptr(unsigned long ptr, int color)
> @@ -804,7 +804,7 @@ static void add_scan_area(unsigned long ptr, size_t
> size, gfp_t gfp)
> goto out;
> }
>
> - spin_lock_irqsave(&object->lock, flags);
> + raw_spin_lock_irqsave(&object->lock, flags);
> if (size == SIZE_MAX) {
> size = object->pointer + object->size - ptr;
> } else if (ptr + size > object->pointer + object->size) {
> @@ -820,7 +820,7 @@ static void add_scan_area(unsigned long ptr, size_t
> size, gfp_t gfp)
>
> hlist_add_head(&area->node, &object->area_list);
> out_unlock:
> - spin_unlock_irqrestore(&object->lock, flags);
> + raw_spin_unlock_irqrestore(&object->lock, flags);
> out:
> put_object(object);
> }
> @@ -843,9 +843,9 @@ static void object_set_excess_ref(unsigned long ptr,
> unsigned long excess_ref)
> return;
> }
>
> - spin_lock_irqsave(&object->lock, flags);
> + raw_spin_lock_irqsave(&object->lock, flags);
> object->excess_ref = excess_ref;
> - spin_unlock_irqrestore(&object->lock, flags);
> + raw_spin_unlock_irqrestore(&object->lock, flags);
> put_object(object);
> }
>
> @@ -865,9 +865,9 @@ static void object_no_scan(unsigned long ptr)
> return;
> }
>
> - spin_lock_irqsave(&object->lock, flags);
> + raw_spin_lock_irqsave(&object->lock, flags);
> object->flags |= OBJECT_NO_SCAN;
> - spin_unlock_irqrestore(&object->lock, flags);
> + raw_spin_unlock_irqrestore(&object->lock, flags);
> put_object(object);
> }
>
> @@ -928,11 +928,11 @@ static void early_alloc(struct early_log *log)
> log->min_count, GFP_ATOMIC);
> if (!object)
> goto out;
> - spin_lock_irqsave(&object->lock, flags);
> + raw_spin_lock_irqsave(&object->lock, flags);
> for (i = 0; i < log->trace_len; i++)
> object->trace[i] = log->trace[i];
> object->trace_len = log->trace_len;
> - spin_unlock_irqrestore(&object->lock, flags);
> + raw_spin_unlock_irqrestore(&object->lock, flags);
> out:
> rcu_read_unlock();
> }
> @@ -1122,9 +1122,9 @@ void __ref kmemleak_update_trace(const void *ptr)
> return;
> }
>
> - spin_lock_irqsave(&object->lock, flags);
> + raw_spin_lock_irqsave(&object->lock, flags);
> object->trace_len = __save_stack_trace(object->trace);
> - spin_unlock_irqrestore(&object->lock, flags);
> + raw_spin_unlock_irqrestore(&object->lock, flags);
>
> put_object(object);
> }
> @@ -1372,7 +1372,7 @@ static void scan_block(void *_start, void *_end,
> * previously acquired in scan_object(). These locks are
> * enclosed by scan_mutex.
> */
> - spin_lock_nested(&object->lock, SINGLE_DEPTH_NESTING);
> + raw_spin_lock_nested(&object->lock, SINGLE_DEPTH_NESTING);
> /* only pass surplus references (object already gray) */
> if (color_gray(object)) {
> excess_ref = object->excess_ref;
> @@ -1381,7 +1381,7 @@ static void scan_block(void *_start, void *_end,
> excess_ref = 0;
> update_refs(object);
> }
> - spin_unlock(&object->lock);
> + raw_spin_unlock(&object->lock);
>
> if (excess_ref) {
> object = lookup_object(excess_ref, 0);
> @@ -1390,9 +1390,9 @@ static void scan_block(void *_start, void *_end,
> if (object == scanned)
> /* circular reference, ignore */
> continue;
> - spin_lock_nested(&object->lock,
> SINGLE_DEPTH_NESTING);
> + raw_spin_lock_nested(&object->lock,
> SINGLE_DEPTH_NESTING);
> update_refs(object);
> - spin_unlock(&object->lock);
> + raw_spin_unlock(&object->lock);
> }
> }
> raw_spin_unlock_irqrestore(&kmemleak_lock, flags);
> @@ -1426,7 +1426,7 @@ static void scan_object(struct kmemleak_object
> *object)
> * Once the object->lock is acquired, the corresponding memory
> block
> * cannot be freed (the same lock is acquired in delete_object).
> */
> - spin_lock_irqsave(&object->lock, flags);
> + raw_spin_lock_irqsave(&object->lock, flags);
> if (object->flags & OBJECT_NO_SCAN)
> goto out;
> if (!(object->flags & OBJECT_ALLOCATED))
> @@ -1445,9 +1445,9 @@ static void scan_object(struct kmemleak_object
> *object)
> if (start >= end)
> break;
>
> - spin_unlock_irqrestore(&object->lock, flags);
> + raw_spin_unlock_irqrestore(&object->lock, flags);
> cond_resched();
> - spin_lock_irqsave(&object->lock, flags);
> + raw_spin_lock_irqsave(&object->lock, flags);
> } while (object->flags & OBJECT_ALLOCATED);
> } else
> hlist_for_each_entry(area, &object->area_list, node)
> @@ -1455,7 +1455,7 @@ static void scan_object(struct kmemleak_object
> *object)
> (void *)(area->start + area->size),
> object);
> out:
> - spin_unlock_irqrestore(&object->lock, flags);
> + raw_spin_unlock_irqrestore(&object->lock, flags);
> }
>
> /*
> @@ -1508,7 +1508,7 @@ static void kmemleak_scan(void)
> /* prepare the kmemleak_object's */
> rcu_read_lock();
> list_for_each_entry_rcu(object, &object_list, object_list) {
> - spin_lock_irqsave(&object->lock, flags);
> + raw_spin_lock_irqsave(&object->lock, flags);
> #ifdef DEBUG
> /*
> * With a few exceptions there should be a maximum of
> @@ -1525,7 +1525,7 @@ static void kmemleak_scan(void)
> if (color_gray(object) && get_object(object))
> list_add_tail(&object->gray_list, &gray_list);
>
> - spin_unlock_irqrestore(&object->lock, flags);
> + raw_spin_unlock_irqrestore(&object->lock, flags);
> }
> rcu_read_unlock();
>
> @@ -1598,14 +1598,14 @@ static void kmemleak_scan(void)
> */
> rcu_read_lock();
> list_for_each_entry_rcu(object, &object_list, object_list) {
> - spin_lock_irqsave(&object->lock, flags);
> + raw_spin_lock_irqsave(&object->lock, flags);
> if (color_white(object) && (object->flags &
> OBJECT_ALLOCATED)
> && update_checksum(object) && get_object(object)) {
> /* color it gray temporarily */
> object->count = object->min_count;
> list_add_tail(&object->gray_list, &gray_list);
> }
> - spin_unlock_irqrestore(&object->lock, flags);
> + raw_spin_unlock_irqrestore(&object->lock, flags);
> }
> rcu_read_unlock();
>
> @@ -1625,7 +1625,7 @@ static void kmemleak_scan(void)
> */
> rcu_read_lock();
> list_for_each_entry_rcu(object, &object_list, object_list) {
> - spin_lock_irqsave(&object->lock, flags);
> + raw_spin_lock_irqsave(&object->lock, flags);
> if (unreferenced_object(object) &&
> !(object->flags & OBJECT_REPORTED)) {
> object->flags |= OBJECT_REPORTED;
> @@ -1635,7 +1635,7 @@ static void kmemleak_scan(void)
>
> new_leaks++;
> }
> - spin_unlock_irqrestore(&object->lock, flags);
> + raw_spin_unlock_irqrestore(&object->lock, flags);
> }
> rcu_read_unlock();
>
> @@ -1787,10 +1787,10 @@ static int kmemleak_seq_show(struct seq_file *seq,
> void *v)
> struct kmemleak_object *object = v;
> unsigned long flags;
>
> - spin_lock_irqsave(&object->lock, flags);
> + raw_spin_lock_irqsave(&object->lock, flags);
> if ((object->flags & OBJECT_REPORTED) &&
> unreferenced_object(object))
> print_unreferenced(seq, object);
> - spin_unlock_irqrestore(&object->lock, flags);
> + raw_spin_unlock_irqrestore(&object->lock, flags);
> return 0;
> }
>
> @@ -1820,9 +1820,9 @@ static int dump_str_object_info(const char *str)
> return -EINVAL;
> }
>
> - spin_lock_irqsave(&object->lock, flags);
> + raw_spin_lock_irqsave(&object->lock, flags);
> dump_object_info(object);
> - spin_unlock_irqrestore(&object->lock, flags);
> + raw_spin_unlock_irqrestore(&object->lock, flags);
>
> put_object(object);
> return 0;
> @@ -1841,11 +1841,11 @@ static void kmemleak_clear(void)
>
> rcu_read_lock();
> list_for_each_entry_rcu(object, &object_list, object_list) {
> - spin_lock_irqsave(&object->lock, flags);
> + raw_spin_lock_irqsave(&object->lock, flags);
> if ((object->flags & OBJECT_REPORTED) &&
> unreferenced_object(object))
> __paint_it(object, KMEMLEAK_GREY);
> - spin_unlock_irqrestore(&object->lock, flags);
> + raw_spin_unlock_irqrestore(&object->lock, flags);
> }
> rcu_read_unlock();
>
> --
> 2.7.4
>
> --
> _______________________________________________
> linux-yocto mailing list
> linux-yocto at yoctoproject.org
> https://lists.yoctoproject.org/listinfo/linux-yocto
>
--
- Thou shalt not follow the NULL pointer, for chaos and madness await thee
at its end
- "Use the force Harry" - Gandalf, Star Trek II
-------------- next part --------------
An HTML attachment was scrubbed...
URL: <http://lists.yoctoproject.org/pipermail/linux-yocto/attachments/20190510/59009c0f/attachment-0001.html>
More information about the linux-yocto
mailing list