4 * Copyright (C) 2008 ARM Limited
5 * Written by Catalin Marinas <catalin.marinas@arm.com>
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 * For more information on the algorithm and kmemleak usage, please see
22 * Documentation/kmemleak.txt.
27 * The following locks and mutexes are used by kmemleak:
29 * - kmemleak_lock (rwlock): protects the object_list modifications and
30 * accesses to the object_tree_root. The object_list is the main list
31 * holding the metadata (struct kmemleak_object) for the allocated memory
32 * blocks. The object_tree_root is a priority search tree used to look-up
33 * metadata based on a pointer to the corresponding memory block. The
34 * kmemleak_object structures are added to the object_list and
35 * object_tree_root in the create_object() function called from the
36 * kmemleak_alloc() callback and removed in delete_object() called from the
37 * kmemleak_free() callback
38 * - kmemleak_object.lock (spinlock): protects a kmemleak_object. Accesses to
39 * the metadata (e.g. count) are protected by this lock. Note that some
40 * members of this structure may be protected by other means (atomic or
41 * kmemleak_lock). This lock is also held when scanning the corresponding
42 * memory block to avoid the kernel freeing it via the kmemleak_free()
43 * callback. This is less heavyweight than holding a global lock like
44 * kmemleak_lock during scanning
45 * - scan_mutex (mutex): ensures that only one thread may scan the memory for
46 * unreferenced objects at a time. The gray_list contains the objects which
47 * are already referenced or marked as false positives and need to be
48 * scanned. This list is only modified during a scanning episode when the
49 * scan_mutex is held. At the end of a scan, the gray_list is always empty.
50 * Note that the kmemleak_object.use_count is incremented when an object is
51 * added to the gray_list and therefore cannot be freed. This mutex also
52 * prevents multiple users of the "kmemleak" debugfs file together with
53 * modifications to the memory scanning parameters including the scan_thread
56 * The kmemleak_object structures have a use_count incremented or decremented
57 * using the get_object()/put_object() functions. When the use_count becomes
58 * 0, this count can no longer be incremented and put_object() schedules the
59 * kmemleak_object freeing via an RCU callback. All calls to the get_object()
60 * function must be protected by rcu_read_lock() to avoid accessing a freed
64 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
66 #include <linux/init.h>
67 #include <linux/kernel.h>
68 #include <linux/list.h>
69 #include <linux/sched.h>
70 #include <linux/jiffies.h>
71 #include <linux/delay.h>
72 #include <linux/export.h>
73 #include <linux/kthread.h>
74 #include <linux/prio_tree.h>
76 #include <linux/debugfs.h>
77 #include <linux/seq_file.h>
78 #include <linux/cpumask.h>
79 #include <linux/spinlock.h>
80 #include <linux/mutex.h>
81 #include <linux/rcupdate.h>
82 #include <linux/stacktrace.h>
83 #include <linux/cache.h>
84 #include <linux/percpu.h>
85 #include <linux/hardirq.h>
86 #include <linux/mmzone.h>
87 #include <linux/slab.h>
88 #include <linux/thread_info.h>
89 #include <linux/err.h>
90 #include <linux/uaccess.h>
91 #include <linux/string.h>
92 #include <linux/nodemask.h>
94 #include <linux/workqueue.h>
95 #include <linux/crc32.h>
97 #include <asm/sections.h>
98 #include <asm/processor.h>
99 #include <linux/atomic.h>
101 #include <linux/kmemcheck.h>
102 #include <linux/kmemleak.h>
105 * Kmemleak configuration and common defines.
107 #define MAX_TRACE 16 /* stack trace length */
108 #define MSECS_MIN_AGE 5000 /* minimum object age for reporting */
109 #define SECS_FIRST_SCAN 60 /* delay before the first scan */
110 #define SECS_SCAN_WAIT 600 /* subsequent auto scanning delay */
111 #define MAX_SCAN_SIZE 4096 /* maximum size of a scanned block */
113 #define BYTES_PER_POINTER sizeof(void *)
115 /* GFP bitmask for kmemleak internal allocations */
116 #define gfp_kmemleak_mask(gfp) (((gfp) & (GFP_KERNEL | GFP_ATOMIC)) | \
117 __GFP_NORETRY | __GFP_NOMEMALLOC | \
120 /* scanning area inside a memory block */
121 struct kmemleak_scan_area {
122 struct hlist_node node;
127 #define KMEMLEAK_GREY 0
128 #define KMEMLEAK_BLACK -1
131 * Structure holding the metadata for each allocated memory block.
132 * Modifications to such objects should be made while holding the
133 * object->lock. Insertions or deletions from object_list, gray_list or
134 * tree_node are already protected by the corresponding locks or mutex (see
135 * the notes on locking above). These objects are reference-counted
136 * (use_count) and freed using the RCU mechanism.
138 struct kmemleak_object {
140 unsigned long flags; /* object status flags */
141 struct list_head object_list;
142 struct list_head gray_list;
143 struct prio_tree_node tree_node;
144 struct rcu_head rcu; /* object_list lockless traversal */
145 /* object usage count; object freed when use_count == 0 */
147 unsigned long pointer;
149 /* minimum number of a pointers found before it is considered leak */
151 /* the total number of pointers found pointing to this object */
153 /* checksum for detecting modified objects */
155 /* memory ranges to be scanned inside an object (empty for all) */
156 struct hlist_head area_list;
157 unsigned long trace[MAX_TRACE];
158 unsigned int trace_len;
159 unsigned long jiffies; /* creation timestamp */
160 pid_t pid; /* pid of the current task */
161 char comm[TASK_COMM_LEN]; /* executable name */
164 /* flag representing the memory block allocation status */
165 #define OBJECT_ALLOCATED (1 << 0)
166 /* flag set after the first reporting of an unreference object */
167 #define OBJECT_REPORTED (1 << 1)
168 /* flag set to not scan the object */
169 #define OBJECT_NO_SCAN (1 << 2)
171 /* number of bytes to print per line; must be 16 or 32 */
172 #define HEX_ROW_SIZE 16
173 /* number of bytes to print at a time (1, 2, 4, 8) */
174 #define HEX_GROUP_SIZE 1
175 /* include ASCII after the hex output */
177 /* max number of lines to be printed */
178 #define HEX_MAX_LINES 2
180 /* the list of all allocated objects */
181 static LIST_HEAD(object_list);
182 /* the list of gray-colored objects (see color_gray comment below) */
183 static LIST_HEAD(gray_list);
184 /* prio search tree for object boundaries */
185 static struct prio_tree_root object_tree_root;
186 /* rw_lock protecting the access to object_list and prio_tree_root */
187 static DEFINE_RWLOCK(kmemleak_lock);
189 /* allocation caches for kmemleak internal data */
190 static struct kmem_cache *object_cache;
191 static struct kmem_cache *scan_area_cache;
193 /* set if tracing memory operations is enabled */
194 static atomic_t kmemleak_enabled = ATOMIC_INIT(0);
195 /* same as above but only for the kmemleak_free() callback */
196 static int kmemleak_free_enabled;
197 /* set in the late_initcall if there were no errors */
198 static atomic_t kmemleak_initialized = ATOMIC_INIT(0);
199 /* enables or disables early logging of the memory operations */
200 static atomic_t kmemleak_early_log = ATOMIC_INIT(1);
201 /* set if a fata kmemleak error has occurred */
202 static atomic_t kmemleak_error = ATOMIC_INIT(0);
204 /* minimum and maximum address that may be valid pointers */
205 static unsigned long min_addr = ULONG_MAX;
206 static unsigned long max_addr;
208 static struct task_struct *scan_thread;
209 /* used to avoid reporting of recently allocated objects */
210 static unsigned long jiffies_min_age;
211 static unsigned long jiffies_last_scan;
212 /* delay between automatic memory scannings */
213 static signed long jiffies_scan_wait;
214 /* enables or disables the task stacks scanning */
215 static int kmemleak_stack_scan = 1;
216 /* protects the memory scanning, parameters and debug/kmemleak file access */
217 static DEFINE_MUTEX(scan_mutex);
218 /* setting kmemleak=on, will set this var, skipping the disable */
219 static int kmemleak_skip_disable;
223 * Early object allocation/freeing logging. Kmemleak is initialized after the
224 * kernel allocator. However, both the kernel allocator and kmemleak may
225 * allocate memory blocks which need to be tracked. Kmemleak defines an
226 * arbitrary buffer to hold the allocation/freeing information before it is
230 /* kmemleak operation type for early logging */
242 * Structure holding the information passed to kmemleak callbacks during the
246 int op_type; /* kmemleak operation type */
247 const void *ptr; /* allocated/freed memory block */
248 size_t size; /* memory block size */
249 int min_count; /* minimum reference count */
250 unsigned long trace[MAX_TRACE]; /* stack trace */
251 unsigned int trace_len; /* stack trace length */
254 /* early logging buffer and current position */
255 static struct early_log
256 early_log[CONFIG_DEBUG_KMEMLEAK_EARLY_LOG_SIZE] __initdata;
257 static int crt_early_log __initdata;
259 static void kmemleak_disable(void);
262 * Print a warning and dump the stack trace.
264 #define kmemleak_warn(x...) do { \
270 * Macro invoked when a serious kmemleak condition occurred and cannot be
271 * recovered from. Kmemleak will be disabled and further allocation/freeing
272 * tracing no longer available.
274 #define kmemleak_stop(x...) do { \
276 kmemleak_disable(); \
280 * Printing of the objects hex dump to the seq file. The number of lines to be
281 * printed is limited to HEX_MAX_LINES to prevent seq file spamming. The
282 * actual number of printed bytes depends on HEX_ROW_SIZE. It must be called
283 * with the object->lock held.
285 static void hex_dump_object(struct seq_file *seq,
286 struct kmemleak_object *object)
288 const u8 *ptr = (const u8 *)object->pointer;
289 int i, len, remaining;
290 unsigned char linebuf[HEX_ROW_SIZE * 5];
292 /* limit the number of lines to HEX_MAX_LINES */
294 min(object->size, (size_t)(HEX_MAX_LINES * HEX_ROW_SIZE));
296 seq_printf(seq, " hex dump (first %d bytes):\n", len);
297 for (i = 0; i < len; i += HEX_ROW_SIZE) {
298 int linelen = min(remaining, HEX_ROW_SIZE);
300 remaining -= HEX_ROW_SIZE;
301 hex_dump_to_buffer(ptr + i, linelen, HEX_ROW_SIZE,
302 HEX_GROUP_SIZE, linebuf, sizeof(linebuf),
304 seq_printf(seq, " %s\n", linebuf);
309 * Object colors, encoded with count and min_count:
310 * - white - orphan object, not enough references to it (count < min_count)
311 * - gray - not orphan, not marked as false positive (min_count == 0) or
312 * sufficient references to it (count >= min_count)
313 * - black - ignore, it doesn't contain references (e.g. text section)
314 * (min_count == -1). No function defined for this color.
315 * Newly created objects don't have any color assigned (object->count == -1)
316 * before the next memory scan when they become white.
318 static bool color_white(const struct kmemleak_object *object)
320 return object->count != KMEMLEAK_BLACK &&
321 object->count < object->min_count;
324 static bool color_gray(const struct kmemleak_object *object)
326 return object->min_count != KMEMLEAK_BLACK &&
327 object->count >= object->min_count;
331 * Objects are considered unreferenced only if their color is white, they have
332 * not be deleted and have a minimum age to avoid false positives caused by
333 * pointers temporarily stored in CPU registers.
335 static bool unreferenced_object(struct kmemleak_object *object)
337 return (color_white(object) && object->flags & OBJECT_ALLOCATED) &&
338 time_before_eq(object->jiffies + jiffies_min_age,
343 * Printing of the unreferenced objects information to the seq file. The
344 * print_unreferenced function must be called with the object->lock held.
346 static void print_unreferenced(struct seq_file *seq,
347 struct kmemleak_object *object)
350 unsigned int msecs_age = jiffies_to_msecs(jiffies - object->jiffies);
352 seq_printf(seq, "unreferenced object 0x%08lx (size %zu):\n",
353 object->pointer, object->size);
354 seq_printf(seq, " comm \"%s\", pid %d, jiffies %lu (age %d.%03ds)\n",
355 object->comm, object->pid, object->jiffies,
356 msecs_age / 1000, msecs_age % 1000);
357 hex_dump_object(seq, object);
358 seq_printf(seq, " backtrace:\n");
360 for (i = 0; i < object->trace_len; i++) {
361 void *ptr = (void *)object->trace[i];
362 seq_printf(seq, " [<%p>] %pS\n", ptr, ptr);
367 * Print the kmemleak_object information. This function is used mainly for
368 * debugging special cases when kmemleak operations. It must be called with
369 * the object->lock held.
371 static void dump_object_info(struct kmemleak_object *object)
373 struct stack_trace trace;
375 trace.nr_entries = object->trace_len;
376 trace.entries = object->trace;
378 pr_notice("Object 0x%08lx (size %zu):\n",
379 object->tree_node.start, object->size);
380 pr_notice(" comm \"%s\", pid %d, jiffies %lu\n",
381 object->comm, object->pid, object->jiffies);
382 pr_notice(" min_count = %d\n", object->min_count);
383 pr_notice(" count = %d\n", object->count);
384 pr_notice(" flags = 0x%lx\n", object->flags);
385 pr_notice(" checksum = %d\n", object->checksum);
386 pr_notice(" backtrace:\n");
387 print_stack_trace(&trace, 4);
391 * Look-up a memory block metadata (kmemleak_object) in the priority search
392 * tree based on a pointer value. If alias is 0, only values pointing to the
393 * beginning of the memory block are allowed. The kmemleak_lock must be held
394 * when calling this function.
396 static struct kmemleak_object *lookup_object(unsigned long ptr, int alias)
398 struct prio_tree_node *node;
399 struct prio_tree_iter iter;
400 struct kmemleak_object *object;
402 prio_tree_iter_init(&iter, &object_tree_root, ptr, ptr);
403 node = prio_tree_next(&iter);
405 object = prio_tree_entry(node, struct kmemleak_object,
407 if (!alias && object->pointer != ptr) {
408 pr_warning("Found object by alias at 0x%08lx\n", ptr);
410 dump_object_info(object);
420 * Increment the object use_count. Return 1 if successful or 0 otherwise. Note
421 * that once an object's use_count reached 0, the RCU freeing was already
422 * registered and the object should no longer be used. This function must be
423 * called under the protection of rcu_read_lock().
425 static int get_object(struct kmemleak_object *object)
427 return atomic_inc_not_zero(&object->use_count);
431 * RCU callback to free a kmemleak_object.
433 static void free_object_rcu(struct rcu_head *rcu)
435 struct hlist_node *elem, *tmp;
436 struct kmemleak_scan_area *area;
437 struct kmemleak_object *object =
438 container_of(rcu, struct kmemleak_object, rcu);
441 * Once use_count is 0 (guaranteed by put_object), there is no other
442 * code accessing this object, hence no need for locking.
444 hlist_for_each_entry_safe(area, elem, tmp, &object->area_list, node) {
446 kmem_cache_free(scan_area_cache, area);
448 kmem_cache_free(object_cache, object);
452 * Decrement the object use_count. Once the count is 0, free the object using
453 * an RCU callback. Since put_object() may be called via the kmemleak_free() ->
454 * delete_object() path, the delayed RCU freeing ensures that there is no
455 * recursive call to the kernel allocator. Lock-less RCU object_list traversal
458 static void put_object(struct kmemleak_object *object)
460 if (!atomic_dec_and_test(&object->use_count))
463 /* should only get here after delete_object was called */
464 WARN_ON(object->flags & OBJECT_ALLOCATED);
466 call_rcu(&object->rcu, free_object_rcu);
470 * Look up an object in the prio search tree and increase its use_count.
472 static struct kmemleak_object *find_and_get_object(unsigned long ptr, int alias)
475 struct kmemleak_object *object = NULL;
478 read_lock_irqsave(&kmemleak_lock, flags);
479 if (ptr >= min_addr && ptr < max_addr)
480 object = lookup_object(ptr, alias);
481 read_unlock_irqrestore(&kmemleak_lock, flags);
483 /* check whether the object is still available */
484 if (object && !get_object(object))
492 * Save stack trace to the given array of MAX_TRACE size.
494 static int __save_stack_trace(unsigned long *trace)
496 struct stack_trace stack_trace;
498 stack_trace.max_entries = MAX_TRACE;
499 stack_trace.nr_entries = 0;
500 stack_trace.entries = trace;
501 stack_trace.skip = 2;
502 save_stack_trace(&stack_trace);
504 return stack_trace.nr_entries;
508 * Create the metadata (struct kmemleak_object) corresponding to an allocated
509 * memory block and add it to the object_list and object_tree_root.
511 static struct kmemleak_object *create_object(unsigned long ptr, size_t size,
512 int min_count, gfp_t gfp)
515 struct kmemleak_object *object;
516 struct prio_tree_node *node;
518 object = kmem_cache_alloc(object_cache, gfp_kmemleak_mask(gfp));
520 pr_warning("Cannot allocate a kmemleak_object structure\n");
525 INIT_LIST_HEAD(&object->object_list);
526 INIT_LIST_HEAD(&object->gray_list);
527 INIT_HLIST_HEAD(&object->area_list);
528 spin_lock_init(&object->lock);
529 atomic_set(&object->use_count, 1);
530 object->flags = OBJECT_ALLOCATED;
531 object->pointer = ptr;
533 object->min_count = min_count;
534 object->count = 0; /* white color initially */
535 object->jiffies = jiffies;
536 object->checksum = 0;
538 /* task information */
541 strncpy(object->comm, "hardirq", sizeof(object->comm));
542 } else if (in_softirq()) {
544 strncpy(object->comm, "softirq", sizeof(object->comm));
546 object->pid = current->pid;
548 * There is a small chance of a race with set_task_comm(),
549 * however using get_task_comm() here may cause locking
550 * dependency issues with current->alloc_lock. In the worst
551 * case, the command line is not correct.
553 strncpy(object->comm, current->comm, sizeof(object->comm));
556 /* kernel backtrace */
557 object->trace_len = __save_stack_trace(object->trace);
559 INIT_PRIO_TREE_NODE(&object->tree_node);
560 object->tree_node.start = ptr;
561 object->tree_node.last = ptr + size - 1;
563 write_lock_irqsave(&kmemleak_lock, flags);
565 min_addr = min(min_addr, ptr);
566 max_addr = max(max_addr, ptr + size);
567 node = prio_tree_insert(&object_tree_root, &object->tree_node);
569 * The code calling the kernel does not yet have the pointer to the
570 * memory block to be able to free it. However, we still hold the
571 * kmemleak_lock here in case parts of the kernel started freeing
572 * random memory blocks.
574 if (node != &object->tree_node) {
575 kmemleak_stop("Cannot insert 0x%lx into the object search tree "
576 "(already existing)\n", ptr);
577 object = lookup_object(ptr, 1);
578 spin_lock(&object->lock);
579 dump_object_info(object);
580 spin_unlock(&object->lock);
584 list_add_tail_rcu(&object->object_list, &object_list);
586 write_unlock_irqrestore(&kmemleak_lock, flags);
591 * Remove the metadata (struct kmemleak_object) for a memory block from the
592 * object_list and object_tree_root and decrement its use_count.
594 static void __delete_object(struct kmemleak_object *object)
598 write_lock_irqsave(&kmemleak_lock, flags);
599 prio_tree_remove(&object_tree_root, &object->tree_node);
600 list_del_rcu(&object->object_list);
601 write_unlock_irqrestore(&kmemleak_lock, flags);
603 WARN_ON(!(object->flags & OBJECT_ALLOCATED));
604 WARN_ON(atomic_read(&object->use_count) < 2);
607 * Locking here also ensures that the corresponding memory block
608 * cannot be freed when it is being scanned.
610 spin_lock_irqsave(&object->lock, flags);
611 object->flags &= ~OBJECT_ALLOCATED;
612 spin_unlock_irqrestore(&object->lock, flags);
617 * Look up the metadata (struct kmemleak_object) corresponding to ptr and
620 static void delete_object_full(unsigned long ptr)
622 struct kmemleak_object *object;
624 object = find_and_get_object(ptr, 0);
627 kmemleak_warn("Freeing unknown object at 0x%08lx\n",
632 __delete_object(object);
637 * Look up the metadata (struct kmemleak_object) corresponding to ptr and
638 * delete it. If the memory block is partially freed, the function may create
639 * additional metadata for the remaining parts of the block.
641 static void delete_object_part(unsigned long ptr, size_t size)
643 struct kmemleak_object *object;
644 unsigned long start, end;
646 object = find_and_get_object(ptr, 1);
649 kmemleak_warn("Partially freeing unknown object at 0x%08lx "
650 "(size %zu)\n", ptr, size);
654 __delete_object(object);
657 * Create one or two objects that may result from the memory block
658 * split. Note that partial freeing is only done by free_bootmem() and
659 * this happens before kmemleak_init() is called. The path below is
660 * only executed during early log recording in kmemleak_init(), so
661 * GFP_KERNEL is enough.
663 start = object->pointer;
664 end = object->pointer + object->size;
666 create_object(start, ptr - start, object->min_count,
668 if (ptr + size < end)
669 create_object(ptr + size, end - ptr - size, object->min_count,
675 static void __paint_it(struct kmemleak_object *object, int color)
677 object->min_count = color;
678 if (color == KMEMLEAK_BLACK)
679 object->flags |= OBJECT_NO_SCAN;
682 static void paint_it(struct kmemleak_object *object, int color)
686 spin_lock_irqsave(&object->lock, flags);
687 __paint_it(object, color);
688 spin_unlock_irqrestore(&object->lock, flags);
691 static void paint_ptr(unsigned long ptr, int color)
693 struct kmemleak_object *object;
695 object = find_and_get_object(ptr, 0);
697 kmemleak_warn("Trying to color unknown object "
698 "at 0x%08lx as %s\n", ptr,
699 (color == KMEMLEAK_GREY) ? "Grey" :
700 (color == KMEMLEAK_BLACK) ? "Black" : "Unknown");
703 paint_it(object, color);
708 * Mark an object permanently as gray-colored so that it can no longer be
709 * reported as a leak. This is used in general to mark a false positive.
711 static void make_gray_object(unsigned long ptr)
713 paint_ptr(ptr, KMEMLEAK_GREY);
717 * Mark the object as black-colored so that it is ignored from scans and
720 static void make_black_object(unsigned long ptr)
722 paint_ptr(ptr, KMEMLEAK_BLACK);
726 * Add a scanning area to the object. If at least one such area is added,
727 * kmemleak will only scan these ranges rather than the whole memory block.
729 static void add_scan_area(unsigned long ptr, size_t size, gfp_t gfp)
732 struct kmemleak_object *object;
733 struct kmemleak_scan_area *area;
735 object = find_and_get_object(ptr, 1);
737 kmemleak_warn("Adding scan area to unknown object at 0x%08lx\n",
742 area = kmem_cache_alloc(scan_area_cache, gfp_kmemleak_mask(gfp));
744 pr_warning("Cannot allocate a scan area\n");
748 spin_lock_irqsave(&object->lock, flags);
749 if (size == SIZE_MAX) {
750 size = object->pointer + object->size - ptr;
751 } else if (ptr + size > object->pointer + object->size) {
752 kmemleak_warn("Scan area larger than object 0x%08lx\n", ptr);
753 dump_object_info(object);
754 kmem_cache_free(scan_area_cache, area);
758 INIT_HLIST_NODE(&area->node);
762 hlist_add_head(&area->node, &object->area_list);
764 spin_unlock_irqrestore(&object->lock, flags);
770 * Set the OBJECT_NO_SCAN flag for the object corresponding to the give
771 * pointer. Such object will not be scanned by kmemleak but references to it
774 static void object_no_scan(unsigned long ptr)
777 struct kmemleak_object *object;
779 object = find_and_get_object(ptr, 0);
781 kmemleak_warn("Not scanning unknown object at 0x%08lx\n", ptr);
785 spin_lock_irqsave(&object->lock, flags);
786 object->flags |= OBJECT_NO_SCAN;
787 spin_unlock_irqrestore(&object->lock, flags);
792 * Log an early kmemleak_* call to the early_log buffer. These calls will be
793 * processed later once kmemleak is fully initialized.
795 static void __init log_early(int op_type, const void *ptr, size_t size,
799 struct early_log *log;
801 if (crt_early_log >= ARRAY_SIZE(early_log)) {
802 pr_warning("Early log buffer exceeded, "
803 "please increase DEBUG_KMEMLEAK_EARLY_LOG_SIZE\n");
809 * There is no need for locking since the kernel is still in UP mode
810 * at this stage. Disabling the IRQs is enough.
812 local_irq_save(flags);
813 log = &early_log[crt_early_log];
814 log->op_type = op_type;
817 log->min_count = min_count;
818 if (op_type == KMEMLEAK_ALLOC)
819 log->trace_len = __save_stack_trace(log->trace);
821 local_irq_restore(flags);
825 * Log an early allocated block and populate the stack trace.
827 static void early_alloc(struct early_log *log)
829 struct kmemleak_object *object;
833 if (!atomic_read(&kmemleak_enabled) || !log->ptr || IS_ERR(log->ptr))
837 * RCU locking needed to ensure object is not freed via put_object().
840 object = create_object((unsigned long)log->ptr, log->size,
841 log->min_count, GFP_ATOMIC);
844 spin_lock_irqsave(&object->lock, flags);
845 for (i = 0; i < log->trace_len; i++)
846 object->trace[i] = log->trace[i];
847 object->trace_len = log->trace_len;
848 spin_unlock_irqrestore(&object->lock, flags);
854 * kmemleak_alloc - register a newly allocated object
855 * @ptr: pointer to beginning of the object
856 * @size: size of the object
857 * @min_count: minimum number of references to this object. If during memory
858 * scanning a number of references less than @min_count is found,
859 * the object is reported as a memory leak. If @min_count is 0,
860 * the object is never reported as a leak. If @min_count is -1,
861 * the object is ignored (not scanned and not reported as a leak)
862 * @gfp: kmalloc() flags used for kmemleak internal memory allocations
864 * This function is called from the kernel allocators when a new object
865 * (memory block) is allocated (kmem_cache_alloc, kmalloc, vmalloc etc.).
867 void __ref kmemleak_alloc(const void *ptr, size_t size, int min_count,
870 pr_debug("%s(0x%p, %zu, %d)\n", __func__, ptr, size, min_count);
872 if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr))
873 create_object((unsigned long)ptr, size, min_count, gfp);
874 else if (atomic_read(&kmemleak_early_log))
875 log_early(KMEMLEAK_ALLOC, ptr, size, min_count);
877 EXPORT_SYMBOL_GPL(kmemleak_alloc);
880 * kmemleak_free - unregister a previously registered object
881 * @ptr: pointer to beginning of the object
883 * This function is called from the kernel allocators when an object (memory
884 * block) is freed (kmem_cache_free, kfree, vfree etc.).
886 void __ref kmemleak_free(const void *ptr)
888 pr_debug("%s(0x%p)\n", __func__, ptr);
890 if (kmemleak_free_enabled && ptr && !IS_ERR(ptr))
891 delete_object_full((unsigned long)ptr);
892 else if (atomic_read(&kmemleak_early_log))
893 log_early(KMEMLEAK_FREE, ptr, 0, 0);
895 EXPORT_SYMBOL_GPL(kmemleak_free);
898 * kmemleak_free_part - partially unregister a previously registered object
899 * @ptr: pointer to the beginning or inside the object. This also
900 * represents the start of the range to be freed
901 * @size: size to be unregistered
903 * This function is called when only a part of a memory block is freed
904 * (usually from the bootmem allocator).
906 void __ref kmemleak_free_part(const void *ptr, size_t size)
908 pr_debug("%s(0x%p)\n", __func__, ptr);
910 if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr))
911 delete_object_part((unsigned long)ptr, size);
912 else if (atomic_read(&kmemleak_early_log))
913 log_early(KMEMLEAK_FREE_PART, ptr, size, 0);
915 EXPORT_SYMBOL_GPL(kmemleak_free_part);
918 * kmemleak_not_leak - mark an allocated object as false positive
919 * @ptr: pointer to beginning of the object
921 * Calling this function on an object will cause the memory block to no longer
922 * be reported as leak and always be scanned.
924 void __ref kmemleak_not_leak(const void *ptr)
926 pr_debug("%s(0x%p)\n", __func__, ptr);
928 if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr))
929 make_gray_object((unsigned long)ptr);
930 else if (atomic_read(&kmemleak_early_log))
931 log_early(KMEMLEAK_NOT_LEAK, ptr, 0, 0);
933 EXPORT_SYMBOL(kmemleak_not_leak);
936 * kmemleak_ignore - ignore an allocated object
937 * @ptr: pointer to beginning of the object
939 * Calling this function on an object will cause the memory block to be
940 * ignored (not scanned and not reported as a leak). This is usually done when
941 * it is known that the corresponding block is not a leak and does not contain
942 * any references to other allocated memory blocks.
944 void __ref kmemleak_ignore(const void *ptr)
946 pr_debug("%s(0x%p)\n", __func__, ptr);
948 if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr))
949 make_black_object((unsigned long)ptr);
950 else if (atomic_read(&kmemleak_early_log))
951 log_early(KMEMLEAK_IGNORE, ptr, 0, 0);
953 EXPORT_SYMBOL(kmemleak_ignore);
956 * kmemleak_scan_area - limit the range to be scanned in an allocated object
957 * @ptr: pointer to beginning or inside the object. This also
958 * represents the start of the scan area
959 * @size: size of the scan area
960 * @gfp: kmalloc() flags used for kmemleak internal memory allocations
962 * This function is used when it is known that only certain parts of an object
963 * contain references to other objects. Kmemleak will only scan these areas
964 * reducing the number false negatives.
966 void __ref kmemleak_scan_area(const void *ptr, size_t size, gfp_t gfp)
968 pr_debug("%s(0x%p)\n", __func__, ptr);
970 if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr))
971 add_scan_area((unsigned long)ptr, size, gfp);
972 else if (atomic_read(&kmemleak_early_log))
973 log_early(KMEMLEAK_SCAN_AREA, ptr, size, 0);
975 EXPORT_SYMBOL(kmemleak_scan_area);
978 * kmemleak_no_scan - do not scan an allocated object
979 * @ptr: pointer to beginning of the object
981 * This function notifies kmemleak not to scan the given memory block. Useful
982 * in situations where it is known that the given object does not contain any
983 * references to other objects. Kmemleak will not scan such objects reducing
984 * the number of false negatives.
986 void __ref kmemleak_no_scan(const void *ptr)
988 pr_debug("%s(0x%p)\n", __func__, ptr);
990 if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr))
991 object_no_scan((unsigned long)ptr);
992 else if (atomic_read(&kmemleak_early_log))
993 log_early(KMEMLEAK_NO_SCAN, ptr, 0, 0);
995 EXPORT_SYMBOL(kmemleak_no_scan);
998 * Update an object's checksum and return true if it was modified.
1000 static bool update_checksum(struct kmemleak_object *object)
1002 u32 old_csum = object->checksum;
1004 if (!kmemcheck_is_obj_initialized(object->pointer, object->size))
1007 object->checksum = crc32(0, (void *)object->pointer, object->size);
1008 return object->checksum != old_csum;
1012 * Memory scanning is a long process and it needs to be interruptable. This
1013 * function checks whether such interrupt condition occurred.
1015 static int scan_should_stop(void)
1017 if (!atomic_read(&kmemleak_enabled))
1021 * This function may be called from either process or kthread context,
1022 * hence the need to check for both stop conditions.
1025 return signal_pending(current);
1027 return kthread_should_stop();
1033 * Scan a memory block (exclusive range) for valid pointers and add those
1034 * found to the gray list.
1036 static void scan_block(void *_start, void *_end,
1037 struct kmemleak_object *scanned, int allow_resched)
1040 unsigned long *start = PTR_ALIGN(_start, BYTES_PER_POINTER);
1041 unsigned long *end = _end - (BYTES_PER_POINTER - 1);
1043 for (ptr = start; ptr < end; ptr++) {
1044 struct kmemleak_object *object;
1045 unsigned long flags;
1046 unsigned long pointer;
1050 if (scan_should_stop())
1053 /* don't scan uninitialized memory */
1054 if (!kmemcheck_is_obj_initialized((unsigned long)ptr,
1060 object = find_and_get_object(pointer, 1);
1063 if (object == scanned) {
1064 /* self referenced, ignore */
1070 * Avoid the lockdep recursive warning on object->lock being
1071 * previously acquired in scan_object(). These locks are
1072 * enclosed by scan_mutex.
1074 spin_lock_irqsave_nested(&object->lock, flags,
1075 SINGLE_DEPTH_NESTING);
1076 if (!color_white(object)) {
1077 /* non-orphan, ignored or new */
1078 spin_unlock_irqrestore(&object->lock, flags);
1084 * Increase the object's reference count (number of pointers
1085 * to the memory block). If this count reaches the required
1086 * minimum, the object's color will become gray and it will be
1087 * added to the gray_list.
1090 if (color_gray(object)) {
1091 list_add_tail(&object->gray_list, &gray_list);
1092 spin_unlock_irqrestore(&object->lock, flags);
1096 spin_unlock_irqrestore(&object->lock, flags);
1102 * Scan a memory block corresponding to a kmemleak_object. A condition is
1103 * that object->use_count >= 1.
1105 static void scan_object(struct kmemleak_object *object)
1107 struct kmemleak_scan_area *area;
1108 struct hlist_node *elem;
1109 unsigned long flags;
1112 * Once the object->lock is acquired, the corresponding memory block
1113 * cannot be freed (the same lock is acquired in delete_object).
1115 spin_lock_irqsave(&object->lock, flags);
1116 if (object->flags & OBJECT_NO_SCAN)
1118 if (!(object->flags & OBJECT_ALLOCATED))
1119 /* already freed object */
1121 if (hlist_empty(&object->area_list)) {
1122 void *start = (void *)object->pointer;
1123 void *end = (void *)(object->pointer + object->size);
1125 while (start < end && (object->flags & OBJECT_ALLOCATED) &&
1126 !(object->flags & OBJECT_NO_SCAN)) {
1127 scan_block(start, min(start + MAX_SCAN_SIZE, end),
1129 start += MAX_SCAN_SIZE;
1131 spin_unlock_irqrestore(&object->lock, flags);
1133 spin_lock_irqsave(&object->lock, flags);
1136 hlist_for_each_entry(area, elem, &object->area_list, node)
1137 scan_block((void *)area->start,
1138 (void *)(area->start + area->size),
1141 spin_unlock_irqrestore(&object->lock, flags);
1145 * Scan the objects already referenced (gray objects). More objects will be
1146 * referenced and, if there are no memory leaks, all the objects are scanned.
1148 static void scan_gray_list(void)
1150 struct kmemleak_object *object, *tmp;
1153 * The list traversal is safe for both tail additions and removals
1154 * from inside the loop. The kmemleak objects cannot be freed from
1155 * outside the loop because their use_count was incremented.
1157 object = list_entry(gray_list.next, typeof(*object), gray_list);
1158 while (&object->gray_list != &gray_list) {
1161 /* may add new objects to the list */
1162 if (!scan_should_stop())
1163 scan_object(object);
1165 tmp = list_entry(object->gray_list.next, typeof(*object),
1168 /* remove the object from the list and release it */
1169 list_del(&object->gray_list);
1174 WARN_ON(!list_empty(&gray_list));
1178 * Scan data sections and all the referenced memory blocks allocated via the
1179 * kernel's standard allocators. This function must be called with the
1182 static void kmemleak_scan(void)
1184 unsigned long flags;
1185 struct kmemleak_object *object;
1189 jiffies_last_scan = jiffies;
1191 /* prepare the kmemleak_object's */
1193 list_for_each_entry_rcu(object, &object_list, object_list) {
1194 spin_lock_irqsave(&object->lock, flags);
1197 * With a few exceptions there should be a maximum of
1198 * 1 reference to any object at this point.
1200 if (atomic_read(&object->use_count) > 1) {
1201 pr_debug("object->use_count = %d\n",
1202 atomic_read(&object->use_count));
1203 dump_object_info(object);
1206 /* reset the reference count (whiten the object) */
1208 if (color_gray(object) && get_object(object))
1209 list_add_tail(&object->gray_list, &gray_list);
1211 spin_unlock_irqrestore(&object->lock, flags);
1215 /* data/bss scanning */
1216 scan_block(_sdata, _edata, NULL, 1);
1217 scan_block(__bss_start, __bss_stop, NULL, 1);
1220 /* per-cpu sections scanning */
1221 for_each_possible_cpu(i)
1222 scan_block(__per_cpu_start + per_cpu_offset(i),
1223 __per_cpu_end + per_cpu_offset(i), NULL, 1);
1227 * Struct page scanning for each node. The code below is not yet safe
1228 * with MEMORY_HOTPLUG.
1230 for_each_online_node(i) {
1231 pg_data_t *pgdat = NODE_DATA(i);
1232 unsigned long start_pfn = pgdat->node_start_pfn;
1233 unsigned long end_pfn = start_pfn + pgdat->node_spanned_pages;
1236 for (pfn = start_pfn; pfn < end_pfn; pfn++) {
1239 if (!pfn_valid(pfn))
1241 page = pfn_to_page(pfn);
1242 /* only scan if page is in use */
1243 if (page_count(page) == 0)
1245 scan_block(page, page + 1, NULL, 1);
1250 * Scanning the task stacks (may introduce false negatives).
1252 if (kmemleak_stack_scan) {
1253 struct task_struct *p, *g;
1255 read_lock(&tasklist_lock);
1256 do_each_thread(g, p) {
1257 scan_block(task_stack_page(p), task_stack_page(p) +
1258 THREAD_SIZE, NULL, 0);
1259 } while_each_thread(g, p);
1260 read_unlock(&tasklist_lock);
1264 * Scan the objects already referenced from the sections scanned
1270 * Check for new or unreferenced objects modified since the previous
1271 * scan and color them gray until the next scan.
1274 list_for_each_entry_rcu(object, &object_list, object_list) {
1275 spin_lock_irqsave(&object->lock, flags);
1276 if (color_white(object) && (object->flags & OBJECT_ALLOCATED)
1277 && update_checksum(object) && get_object(object)) {
1278 /* color it gray temporarily */
1279 object->count = object->min_count;
1280 list_add_tail(&object->gray_list, &gray_list);
1282 spin_unlock_irqrestore(&object->lock, flags);
1287 * Re-scan the gray list for modified unreferenced objects.
1292 * If scanning was stopped do not report any new unreferenced objects.
1294 if (scan_should_stop())
1298 * Scanning result reporting.
1301 list_for_each_entry_rcu(object, &object_list, object_list) {
1302 spin_lock_irqsave(&object->lock, flags);
1303 if (unreferenced_object(object) &&
1304 !(object->flags & OBJECT_REPORTED)) {
1305 object->flags |= OBJECT_REPORTED;
1308 spin_unlock_irqrestore(&object->lock, flags);
1313 pr_info("%d new suspected memory leaks (see "
1314 "/sys/kernel/debug/kmemleak)\n", new_leaks);
1319 * Thread function performing automatic memory scanning. Unreferenced objects
1320 * at the end of a memory scan are reported but only the first time.
1322 static int kmemleak_scan_thread(void *arg)
1324 static int first_run = 1;
1326 pr_info("Automatic memory scanning thread started\n");
1327 set_user_nice(current, 10);
1330 * Wait before the first scan to allow the system to fully initialize.
1334 ssleep(SECS_FIRST_SCAN);
1337 while (!kthread_should_stop()) {
1338 signed long timeout = jiffies_scan_wait;
1340 mutex_lock(&scan_mutex);
1342 mutex_unlock(&scan_mutex);
1344 /* wait before the next scan */
1345 while (timeout && !kthread_should_stop())
1346 timeout = schedule_timeout_interruptible(timeout);
1349 pr_info("Automatic memory scanning thread ended\n");
1355 * Start the automatic memory scanning thread. This function must be called
1356 * with the scan_mutex held.
1358 static void start_scan_thread(void)
1362 scan_thread = kthread_run(kmemleak_scan_thread, NULL, "kmemleak");
1363 if (IS_ERR(scan_thread)) {
1364 pr_warning("Failed to create the scan thread\n");
1370 * Stop the automatic memory scanning thread. This function must be called
1371 * with the scan_mutex held.
1373 static void stop_scan_thread(void)
1376 kthread_stop(scan_thread);
1382 * Iterate over the object_list and return the first valid object at or after
1383 * the required position with its use_count incremented. The function triggers
1384 * a memory scanning when the pos argument points to the first position.
1386 static void *kmemleak_seq_start(struct seq_file *seq, loff_t *pos)
1388 struct kmemleak_object *object;
1392 err = mutex_lock_interruptible(&scan_mutex);
1394 return ERR_PTR(err);
1397 list_for_each_entry_rcu(object, &object_list, object_list) {
1400 if (get_object(object))
1409 * Return the next object in the object_list. The function decrements the
1410 * use_count of the previous object and increases that of the next one.
1412 static void *kmemleak_seq_next(struct seq_file *seq, void *v, loff_t *pos)
1414 struct kmemleak_object *prev_obj = v;
1415 struct kmemleak_object *next_obj = NULL;
1416 struct list_head *n = &prev_obj->object_list;
1420 list_for_each_continue_rcu(n, &object_list) {
1421 struct kmemleak_object *obj =
1422 list_entry(n, struct kmemleak_object, object_list);
1423 if (get_object(obj)) {
1429 put_object(prev_obj);
1434 * Decrement the use_count of the last object required, if any.
1436 static void kmemleak_seq_stop(struct seq_file *seq, void *v)
1440 * kmemleak_seq_start may return ERR_PTR if the scan_mutex
1441 * waiting was interrupted, so only release it if !IS_ERR.
1444 mutex_unlock(&scan_mutex);
1451 * Print the information for an unreferenced object to the seq file.
1453 static int kmemleak_seq_show(struct seq_file *seq, void *v)
1455 struct kmemleak_object *object = v;
1456 unsigned long flags;
1458 spin_lock_irqsave(&object->lock, flags);
1459 if ((object->flags & OBJECT_REPORTED) && unreferenced_object(object))
1460 print_unreferenced(seq, object);
1461 spin_unlock_irqrestore(&object->lock, flags);
1465 static const struct seq_operations kmemleak_seq_ops = {
1466 .start = kmemleak_seq_start,
1467 .next = kmemleak_seq_next,
1468 .stop = kmemleak_seq_stop,
1469 .show = kmemleak_seq_show,
1472 static int kmemleak_open(struct inode *inode, struct file *file)
1474 if (!atomic_read(&kmemleak_enabled))
1477 return seq_open(file, &kmemleak_seq_ops);
1480 static int kmemleak_release(struct inode *inode, struct file *file)
1482 return seq_release(inode, file);
1485 static int dump_str_object_info(const char *str)
1487 unsigned long flags;
1488 struct kmemleak_object *object;
1491 addr= simple_strtoul(str, NULL, 0);
1492 object = find_and_get_object(addr, 0);
1494 pr_info("Unknown object at 0x%08lx\n", addr);
1498 spin_lock_irqsave(&object->lock, flags);
1499 dump_object_info(object);
1500 spin_unlock_irqrestore(&object->lock, flags);
1507 * We use grey instead of black to ensure we can do future scans on the same
1508 * objects. If we did not do future scans these black objects could
1509 * potentially contain references to newly allocated objects in the future and
1510 * we'd end up with false positives.
1512 static void kmemleak_clear(void)
1514 struct kmemleak_object *object;
1515 unsigned long flags;
1518 list_for_each_entry_rcu(object, &object_list, object_list) {
1519 spin_lock_irqsave(&object->lock, flags);
1520 if ((object->flags & OBJECT_REPORTED) &&
1521 unreferenced_object(object))
1522 __paint_it(object, KMEMLEAK_GREY);
1523 spin_unlock_irqrestore(&object->lock, flags);
1529 * File write operation to configure kmemleak at run-time. The following
1530 * commands can be written to the /sys/kernel/debug/kmemleak file:
1531 * off - disable kmemleak (irreversible)
1532 * stack=on - enable the task stacks scanning
1533 * stack=off - disable the tasks stacks scanning
1534 * scan=on - start the automatic memory scanning thread
1535 * scan=off - stop the automatic memory scanning thread
1536 * scan=... - set the automatic memory scanning period in seconds (0 to
1538 * scan - trigger a memory scan
1539 * clear - mark all current reported unreferenced kmemleak objects as
1540 * grey to ignore printing them
1541 * dump=... - dump information about the object found at the given address
1543 static ssize_t kmemleak_write(struct file *file, const char __user *user_buf,
1544 size_t size, loff_t *ppos)
1550 buf_size = min(size, (sizeof(buf) - 1));
1551 if (strncpy_from_user(buf, user_buf, buf_size) < 0)
1555 ret = mutex_lock_interruptible(&scan_mutex);
1559 if (strncmp(buf, "off", 3) == 0)
1561 else if (strncmp(buf, "stack=on", 8) == 0)
1562 kmemleak_stack_scan = 1;
1563 else if (strncmp(buf, "stack=off", 9) == 0)
1564 kmemleak_stack_scan = 0;
1565 else if (strncmp(buf, "scan=on", 7) == 0)
1566 start_scan_thread();
1567 else if (strncmp(buf, "scan=off", 8) == 0)
1569 else if (strncmp(buf, "scan=", 5) == 0) {
1572 ret = strict_strtoul(buf + 5, 0, &secs);
1577 jiffies_scan_wait = msecs_to_jiffies(secs * 1000);
1578 start_scan_thread();
1580 } else if (strncmp(buf, "scan", 4) == 0)
1582 else if (strncmp(buf, "clear", 5) == 0)
1584 else if (strncmp(buf, "dump=", 5) == 0)
1585 ret = dump_str_object_info(buf + 5);
1590 mutex_unlock(&scan_mutex);
1594 /* ignore the rest of the buffer, only one command at a time */
1599 static const struct file_operations kmemleak_fops = {
1600 .owner = THIS_MODULE,
1601 .open = kmemleak_open,
1603 .write = kmemleak_write,
1604 .llseek = seq_lseek,
1605 .release = kmemleak_release,
1609 * Perform the freeing of the kmemleak internal objects after waiting for any
1610 * current memory scan to complete.
1612 static void kmemleak_do_cleanup(struct work_struct *work)
1614 struct kmemleak_object *object;
1616 mutex_lock(&scan_mutex);
1620 * Once the scan thread has stopped, it is safe to no longer track
1621 * object freeing. Ordering of the scan thread stopping and the memory
1622 * accesses below is guaranteed by the kthread_stop() function.
1624 kmemleak_free_enabled = 0;
1627 list_for_each_entry_rcu(object, &object_list, object_list)
1628 delete_object_full(object->pointer);
1630 mutex_unlock(&scan_mutex);
1633 static DECLARE_WORK(cleanup_work, kmemleak_do_cleanup);
1636 * Disable kmemleak. No memory allocation/freeing will be traced once this
1637 * function is called. Disabling kmemleak is an irreversible operation.
1639 static void kmemleak_disable(void)
1641 /* atomically check whether it was already invoked */
1642 if (atomic_cmpxchg(&kmemleak_error, 0, 1))
1645 /* stop any memory operation tracing */
1646 atomic_set(&kmemleak_early_log, 0);
1647 atomic_set(&kmemleak_enabled, 0);
1649 /* check whether it is too early for a kernel thread */
1650 if (atomic_read(&kmemleak_initialized))
1651 schedule_work(&cleanup_work);
1653 kmemleak_free_enabled = 0;
1655 pr_info("Kernel memory leak detector disabled\n");
1659 * Allow boot-time kmemleak disabling (enabled by default).
1661 static int kmemleak_boot_config(char *str)
1665 if (strcmp(str, "off") == 0)
1667 else if (strcmp(str, "on") == 0)
1668 kmemleak_skip_disable = 1;
1673 early_param("kmemleak", kmemleak_boot_config);
1676 * Kmemleak initialization.
1678 void __init kmemleak_init(void)
1681 unsigned long flags;
1683 #ifdef CONFIG_DEBUG_KMEMLEAK_DEFAULT_OFF
1684 if (!kmemleak_skip_disable) {
1690 jiffies_min_age = msecs_to_jiffies(MSECS_MIN_AGE);
1691 jiffies_scan_wait = msecs_to_jiffies(SECS_SCAN_WAIT * 1000);
1693 object_cache = KMEM_CACHE(kmemleak_object, SLAB_NOLEAKTRACE);
1694 scan_area_cache = KMEM_CACHE(kmemleak_scan_area, SLAB_NOLEAKTRACE);
1695 INIT_PRIO_TREE_ROOT(&object_tree_root);
1697 /* the kernel is still in UP mode, so disabling the IRQs is enough */
1698 local_irq_save(flags);
1699 if (!atomic_read(&kmemleak_error)) {
1700 atomic_set(&kmemleak_enabled, 1);
1701 atomic_set(&kmemleak_early_log, 0);
1702 kmemleak_free_enabled = 1;
1704 local_irq_restore(flags);
1707 * This is the point where tracking allocations is safe. Automatic
1708 * scanning is started during the late initcall. Add the early logged
1709 * callbacks to the kmemleak infrastructure.
1711 for (i = 0; i < crt_early_log; i++) {
1712 struct early_log *log = &early_log[i];
1714 switch (log->op_type) {
1715 case KMEMLEAK_ALLOC:
1719 kmemleak_free(log->ptr);
1721 case KMEMLEAK_FREE_PART:
1722 kmemleak_free_part(log->ptr, log->size);
1724 case KMEMLEAK_NOT_LEAK:
1725 kmemleak_not_leak(log->ptr);
1727 case KMEMLEAK_IGNORE:
1728 kmemleak_ignore(log->ptr);
1730 case KMEMLEAK_SCAN_AREA:
1731 kmemleak_scan_area(log->ptr, log->size, GFP_KERNEL);
1733 case KMEMLEAK_NO_SCAN:
1734 kmemleak_no_scan(log->ptr);
1743 * Late initialization function.
1745 static int __init kmemleak_late_init(void)
1747 struct dentry *dentry;
1749 atomic_set(&kmemleak_initialized, 1);
1751 if (atomic_read(&kmemleak_error)) {
1753 * Some error occurred and kmemleak was disabled. There is a
1754 * small chance that kmemleak_disable() was called immediately
1755 * after setting kmemleak_initialized and we may end up with
1756 * two clean-up threads but serialized by scan_mutex.
1758 schedule_work(&cleanup_work);
1762 dentry = debugfs_create_file("kmemleak", S_IRUGO, NULL, NULL,
1765 pr_warning("Failed to create the debugfs kmemleak file\n");
1766 mutex_lock(&scan_mutex);
1767 start_scan_thread();
1768 mutex_unlock(&scan_mutex);
1770 pr_info("Kernel memory leak detector initialized\n");
1774 late_initcall(kmemleak_late_init);