xtensa: s6000 dma engine support
[pandora-kernel.git] / mm / kmemleak.c
1 /*
2  * mm/kmemleak.c
3  *
4  * Copyright (C) 2008 ARM Limited
5  * Written by Catalin Marinas <catalin.marinas@arm.com>
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License version 2 as
9  * published by the Free Software Foundation.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  * GNU General Public License for more details.
15  *
16  * You should have received a copy of the GNU General Public License
17  * along with this program; if not, write to the Free Software
18  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19  *
20  *
21  * For more information on the algorithm and kmemleak usage, please see
22  * Documentation/kmemleak.txt.
23  *
24  * Notes on locking
25  * ----------------
26  *
27  * The following locks and mutexes are used by kmemleak:
28  *
29  * - kmemleak_lock (rwlock): protects the object_list modifications and
30  *   accesses to the object_tree_root. The object_list is the main list
31  *   holding the metadata (struct kmemleak_object) for the allocated memory
32  *   blocks. The object_tree_root is a priority search tree used to look-up
33  *   metadata based on a pointer to the corresponding memory block.  The
34  *   kmemleak_object structures are added to the object_list and
35  *   object_tree_root in the create_object() function called from the
36  *   kmemleak_alloc() callback and removed in delete_object() called from the
37  *   kmemleak_free() callback
38  * - kmemleak_object.lock (spinlock): protects a kmemleak_object. Accesses to
39  *   the metadata (e.g. count) are protected by this lock. Note that some
40  *   members of this structure may be protected by other means (atomic or
41  *   kmemleak_lock). This lock is also held when scanning the corresponding
42  *   memory block to avoid the kernel freeing it via the kmemleak_free()
43  *   callback. This is less heavyweight than holding a global lock like
44  *   kmemleak_lock during scanning
45  * - scan_mutex (mutex): ensures that only one thread may scan the memory for
46  *   unreferenced objects at a time. The gray_list contains the objects which
47  *   are already referenced or marked as false positives and need to be
48  *   scanned. This list is only modified during a scanning episode when the
49  *   scan_mutex is held. At the end of a scan, the gray_list is always empty.
50  *   Note that the kmemleak_object.use_count is incremented when an object is
51  *   added to the gray_list and therefore cannot be freed
52  * - kmemleak_mutex (mutex): prevents multiple users of the "kmemleak" debugfs
53  *   file together with modifications to the memory scanning parameters
54  *   including the scan_thread pointer
55  *
56  * The kmemleak_object structures have a use_count incremented or decremented
57  * using the get_object()/put_object() functions. When the use_count becomes
58  * 0, this count can no longer be incremented and put_object() schedules the
59  * kmemleak_object freeing via an RCU callback. All calls to the get_object()
60  * function must be protected by rcu_read_lock() to avoid accessing a freed
61  * structure.
62  */
63
64 #include <linux/init.h>
65 #include <linux/kernel.h>
66 #include <linux/list.h>
67 #include <linux/sched.h>
68 #include <linux/jiffies.h>
69 #include <linux/delay.h>
70 #include <linux/module.h>
71 #include <linux/kthread.h>
72 #include <linux/prio_tree.h>
73 #include <linux/gfp.h>
74 #include <linux/fs.h>
75 #include <linux/debugfs.h>
76 #include <linux/seq_file.h>
77 #include <linux/cpumask.h>
78 #include <linux/spinlock.h>
79 #include <linux/mutex.h>
80 #include <linux/rcupdate.h>
81 #include <linux/stacktrace.h>
82 #include <linux/cache.h>
83 #include <linux/percpu.h>
84 #include <linux/hardirq.h>
85 #include <linux/mmzone.h>
86 #include <linux/slab.h>
87 #include <linux/thread_info.h>
88 #include <linux/err.h>
89 #include <linux/uaccess.h>
90 #include <linux/string.h>
91 #include <linux/nodemask.h>
92 #include <linux/mm.h>
93
94 #include <asm/sections.h>
95 #include <asm/processor.h>
96 #include <asm/atomic.h>
97
98 #include <linux/kmemleak.h>
99
100 /*
101  * Kmemleak configuration and common defines.
102  */
103 #define MAX_TRACE               16      /* stack trace length */
104 #define REPORTS_NR              50      /* maximum number of reported leaks */
105 #define MSECS_MIN_AGE           5000    /* minimum object age for reporting */
106 #define MSECS_SCAN_YIELD        10      /* CPU yielding period */
107 #define SECS_FIRST_SCAN         60      /* delay before the first scan */
108 #define SECS_SCAN_WAIT          600     /* subsequent auto scanning delay */
109
110 #define BYTES_PER_POINTER       sizeof(void *)
111
112 /* GFP bitmask for kmemleak internal allocations */
113 #define GFP_KMEMLEAK_MASK       (GFP_KERNEL | GFP_ATOMIC)
114
115 /* scanning area inside a memory block */
116 struct kmemleak_scan_area {
117         struct hlist_node node;
118         unsigned long offset;
119         size_t length;
120 };
121
122 /*
123  * Structure holding the metadata for each allocated memory block.
124  * Modifications to such objects should be made while holding the
125  * object->lock. Insertions or deletions from object_list, gray_list or
126  * tree_node are already protected by the corresponding locks or mutex (see
127  * the notes on locking above). These objects are reference-counted
128  * (use_count) and freed using the RCU mechanism.
129  */
130 struct kmemleak_object {
131         spinlock_t lock;
132         unsigned long flags;            /* object status flags */
133         struct list_head object_list;
134         struct list_head gray_list;
135         struct prio_tree_node tree_node;
136         struct rcu_head rcu;            /* object_list lockless traversal */
137         /* object usage count; object freed when use_count == 0 */
138         atomic_t use_count;
139         unsigned long pointer;
140         size_t size;
141         /* minimum number of a pointers found before it is considered leak */
142         int min_count;
143         /* the total number of pointers found pointing to this object */
144         int count;
145         /* memory ranges to be scanned inside an object (empty for all) */
146         struct hlist_head area_list;
147         unsigned long trace[MAX_TRACE];
148         unsigned int trace_len;
149         unsigned long jiffies;          /* creation timestamp */
150         pid_t pid;                      /* pid of the current task */
151         char comm[TASK_COMM_LEN];       /* executable name */
152 };
153
154 /* flag representing the memory block allocation status */
155 #define OBJECT_ALLOCATED        (1 << 0)
156 /* flag set after the first reporting of an unreference object */
157 #define OBJECT_REPORTED         (1 << 1)
158 /* flag set to not scan the object */
159 #define OBJECT_NO_SCAN          (1 << 2)
160
161 /* the list of all allocated objects */
162 static LIST_HEAD(object_list);
163 /* the list of gray-colored objects (see color_gray comment below) */
164 static LIST_HEAD(gray_list);
165 /* prio search tree for object boundaries */
166 static struct prio_tree_root object_tree_root;
167 /* rw_lock protecting the access to object_list and prio_tree_root */
168 static DEFINE_RWLOCK(kmemleak_lock);
169
170 /* allocation caches for kmemleak internal data */
171 static struct kmem_cache *object_cache;
172 static struct kmem_cache *scan_area_cache;
173
174 /* set if tracing memory operations is enabled */
175 static atomic_t kmemleak_enabled = ATOMIC_INIT(0);
176 /* set in the late_initcall if there were no errors */
177 static atomic_t kmemleak_initialized = ATOMIC_INIT(0);
178 /* enables or disables early logging of the memory operations */
179 static atomic_t kmemleak_early_log = ATOMIC_INIT(1);
180 /* set if a fata kmemleak error has occurred */
181 static atomic_t kmemleak_error = ATOMIC_INIT(0);
182
183 /* minimum and maximum address that may be valid pointers */
184 static unsigned long min_addr = ULONG_MAX;
185 static unsigned long max_addr;
186
187 /* used for yielding the CPU to other tasks during scanning */
188 static unsigned long next_scan_yield;
189 static struct task_struct *scan_thread;
190 static unsigned long jiffies_scan_yield;
191 static unsigned long jiffies_min_age;
192 /* delay between automatic memory scannings */
193 static signed long jiffies_scan_wait;
194 /* enables or disables the task stacks scanning */
195 static int kmemleak_stack_scan;
196 /* mutex protecting the memory scanning */
197 static DEFINE_MUTEX(scan_mutex);
198 /* mutex protecting the access to the /sys/kernel/debug/kmemleak file */
199 static DEFINE_MUTEX(kmemleak_mutex);
200
201 /* number of leaks reported (for limitation purposes) */
202 static int reported_leaks;
203
204 /*
205  * Early object allocation/freeing logging. Kmemleak is initialized after the
206  * kernel allocator. However, both the kernel allocator and kmemleak may
207  * allocate memory blocks which need to be tracked. Kmemleak defines an
208  * arbitrary buffer to hold the allocation/freeing information before it is
209  * fully initialized.
210  */
211
212 /* kmemleak operation type for early logging */
213 enum {
214         KMEMLEAK_ALLOC,
215         KMEMLEAK_FREE,
216         KMEMLEAK_NOT_LEAK,
217         KMEMLEAK_IGNORE,
218         KMEMLEAK_SCAN_AREA,
219         KMEMLEAK_NO_SCAN
220 };
221
222 /*
223  * Structure holding the information passed to kmemleak callbacks during the
224  * early logging.
225  */
226 struct early_log {
227         int op_type;                    /* kmemleak operation type */
228         const void *ptr;                /* allocated/freed memory block */
229         size_t size;                    /* memory block size */
230         int min_count;                  /* minimum reference count */
231         unsigned long offset;           /* scan area offset */
232         size_t length;                  /* scan area length */
233 };
234
235 /* early logging buffer and current position */
236 static struct early_log early_log[200];
237 static int crt_early_log;
238
239 static void kmemleak_disable(void);
240
241 /*
242  * Print a warning and dump the stack trace.
243  */
244 #define kmemleak_warn(x...)     do {    \
245         pr_warning(x);                  \
246         dump_stack();                   \
247 } while (0)
248
249 /*
250  * Macro invoked when a serious kmemleak condition occured and cannot be
251  * recovered from. Kmemleak will be disabled and further allocation/freeing
252  * tracing no longer available.
253  */
254 #define kmemleak_stop(x...)     do {    \
255         kmemleak_warn(x);               \
256         kmemleak_disable();             \
257 } while (0)
258
259 /*
260  * Object colors, encoded with count and min_count:
261  * - white - orphan object, not enough references to it (count < min_count)
262  * - gray  - not orphan, not marked as false positive (min_count == 0) or
263  *              sufficient references to it (count >= min_count)
264  * - black - ignore, it doesn't contain references (e.g. text section)
265  *              (min_count == -1). No function defined for this color.
266  * Newly created objects don't have any color assigned (object->count == -1)
267  * before the next memory scan when they become white.
268  */
269 static int color_white(const struct kmemleak_object *object)
270 {
271         return object->count != -1 && object->count < object->min_count;
272 }
273
274 static int color_gray(const struct kmemleak_object *object)
275 {
276         return object->min_count != -1 && object->count >= object->min_count;
277 }
278
279 /*
280  * Objects are considered referenced if their color is gray and they have not
281  * been deleted.
282  */
283 static int referenced_object(struct kmemleak_object *object)
284 {
285         return (object->flags & OBJECT_ALLOCATED) && color_gray(object);
286 }
287
288 /*
289  * Objects are considered unreferenced only if their color is white, they have
290  * not be deleted and have a minimum age to avoid false positives caused by
291  * pointers temporarily stored in CPU registers.
292  */
293 static int unreferenced_object(struct kmemleak_object *object)
294 {
295         return (object->flags & OBJECT_ALLOCATED) && color_white(object) &&
296                 time_is_before_eq_jiffies(object->jiffies + jiffies_min_age);
297 }
298
299 /*
300  * Printing of the (un)referenced objects information, either to the seq file
301  * or to the kernel log. The print_referenced/print_unreferenced functions
302  * must be called with the object->lock held.
303  */
304 #define print_helper(seq, x...) do {    \
305         struct seq_file *s = (seq);     \
306         if (s)                          \
307                 seq_printf(s, x);       \
308         else                            \
309                 pr_info(x);             \
310 } while (0)
311
312 static void print_referenced(struct kmemleak_object *object)
313 {
314         pr_info("kmemleak: referenced object 0x%08lx (size %zu)\n",
315                 object->pointer, object->size);
316 }
317
318 static void print_unreferenced(struct seq_file *seq,
319                                struct kmemleak_object *object)
320 {
321         int i;
322
323         print_helper(seq, "kmemleak: unreferenced object 0x%08lx (size %zu):\n",
324                      object->pointer, object->size);
325         print_helper(seq, "  comm \"%s\", pid %d, jiffies %lu\n",
326                      object->comm, object->pid, object->jiffies);
327         print_helper(seq, "  backtrace:\n");
328
329         for (i = 0; i < object->trace_len; i++) {
330                 void *ptr = (void *)object->trace[i];
331                 print_helper(seq, "    [<%p>] %pS\n", ptr, ptr);
332         }
333 }
334
335 /*
336  * Print the kmemleak_object information. This function is used mainly for
337  * debugging special cases when kmemleak operations. It must be called with
338  * the object->lock held.
339  */
340 static void dump_object_info(struct kmemleak_object *object)
341 {
342         struct stack_trace trace;
343
344         trace.nr_entries = object->trace_len;
345         trace.entries = object->trace;
346
347         pr_notice("kmemleak: Object 0x%08lx (size %zu):\n",
348                   object->tree_node.start, object->size);
349         pr_notice("  comm \"%s\", pid %d, jiffies %lu\n",
350                   object->comm, object->pid, object->jiffies);
351         pr_notice("  min_count = %d\n", object->min_count);
352         pr_notice("  count = %d\n", object->count);
353         pr_notice("  backtrace:\n");
354         print_stack_trace(&trace, 4);
355 }
356
357 /*
358  * Look-up a memory block metadata (kmemleak_object) in the priority search
359  * tree based on a pointer value. If alias is 0, only values pointing to the
360  * beginning of the memory block are allowed. The kmemleak_lock must be held
361  * when calling this function.
362  */
363 static struct kmemleak_object *lookup_object(unsigned long ptr, int alias)
364 {
365         struct prio_tree_node *node;
366         struct prio_tree_iter iter;
367         struct kmemleak_object *object;
368
369         prio_tree_iter_init(&iter, &object_tree_root, ptr, ptr);
370         node = prio_tree_next(&iter);
371         if (node) {
372                 object = prio_tree_entry(node, struct kmemleak_object,
373                                          tree_node);
374                 if (!alias && object->pointer != ptr) {
375                         kmemleak_warn("kmemleak: Found object by alias");
376                         object = NULL;
377                 }
378         } else
379                 object = NULL;
380
381         return object;
382 }
383
384 /*
385  * Increment the object use_count. Return 1 if successful or 0 otherwise. Note
386  * that once an object's use_count reached 0, the RCU freeing was already
387  * registered and the object should no longer be used. This function must be
388  * called under the protection of rcu_read_lock().
389  */
390 static int get_object(struct kmemleak_object *object)
391 {
392         return atomic_inc_not_zero(&object->use_count);
393 }
394
395 /*
396  * RCU callback to free a kmemleak_object.
397  */
398 static void free_object_rcu(struct rcu_head *rcu)
399 {
400         struct hlist_node *elem, *tmp;
401         struct kmemleak_scan_area *area;
402         struct kmemleak_object *object =
403                 container_of(rcu, struct kmemleak_object, rcu);
404
405         /*
406          * Once use_count is 0 (guaranteed by put_object), there is no other
407          * code accessing this object, hence no need for locking.
408          */
409         hlist_for_each_entry_safe(area, elem, tmp, &object->area_list, node) {
410                 hlist_del(elem);
411                 kmem_cache_free(scan_area_cache, area);
412         }
413         kmem_cache_free(object_cache, object);
414 }
415
416 /*
417  * Decrement the object use_count. Once the count is 0, free the object using
418  * an RCU callback. Since put_object() may be called via the kmemleak_free() ->
419  * delete_object() path, the delayed RCU freeing ensures that there is no
420  * recursive call to the kernel allocator. Lock-less RCU object_list traversal
421  * is also possible.
422  */
423 static void put_object(struct kmemleak_object *object)
424 {
425         if (!atomic_dec_and_test(&object->use_count))
426                 return;
427
428         /* should only get here after delete_object was called */
429         WARN_ON(object->flags & OBJECT_ALLOCATED);
430
431         call_rcu(&object->rcu, free_object_rcu);
432 }
433
434 /*
435  * Look up an object in the prio search tree and increase its use_count.
436  */
437 static struct kmemleak_object *find_and_get_object(unsigned long ptr, int alias)
438 {
439         unsigned long flags;
440         struct kmemleak_object *object = NULL;
441
442         rcu_read_lock();
443         read_lock_irqsave(&kmemleak_lock, flags);
444         if (ptr >= min_addr && ptr < max_addr)
445                 object = lookup_object(ptr, alias);
446         read_unlock_irqrestore(&kmemleak_lock, flags);
447
448         /* check whether the object is still available */
449         if (object && !get_object(object))
450                 object = NULL;
451         rcu_read_unlock();
452
453         return object;
454 }
455
456 /*
457  * Create the metadata (struct kmemleak_object) corresponding to an allocated
458  * memory block and add it to the object_list and object_tree_root.
459  */
460 static void create_object(unsigned long ptr, size_t size, int min_count,
461                           gfp_t gfp)
462 {
463         unsigned long flags;
464         struct kmemleak_object *object;
465         struct prio_tree_node *node;
466         struct stack_trace trace;
467
468         object = kmem_cache_alloc(object_cache, gfp & GFP_KMEMLEAK_MASK);
469         if (!object) {
470                 kmemleak_stop("kmemleak: Cannot allocate a kmemleak_object "
471                               "structure\n");
472                 return;
473         }
474
475         INIT_LIST_HEAD(&object->object_list);
476         INIT_LIST_HEAD(&object->gray_list);
477         INIT_HLIST_HEAD(&object->area_list);
478         spin_lock_init(&object->lock);
479         atomic_set(&object->use_count, 1);
480         object->flags = OBJECT_ALLOCATED;
481         object->pointer = ptr;
482         object->size = size;
483         object->min_count = min_count;
484         object->count = -1;                     /* no color initially */
485         object->jiffies = jiffies;
486
487         /* task information */
488         if (in_irq()) {
489                 object->pid = 0;
490                 strncpy(object->comm, "hardirq", sizeof(object->comm));
491         } else if (in_softirq()) {
492                 object->pid = 0;
493                 strncpy(object->comm, "softirq", sizeof(object->comm));
494         } else {
495                 object->pid = current->pid;
496                 /*
497                  * There is a small chance of a race with set_task_comm(),
498                  * however using get_task_comm() here may cause locking
499                  * dependency issues with current->alloc_lock. In the worst
500                  * case, the command line is not correct.
501                  */
502                 strncpy(object->comm, current->comm, sizeof(object->comm));
503         }
504
505         /* kernel backtrace */
506         trace.max_entries = MAX_TRACE;
507         trace.nr_entries = 0;
508         trace.entries = object->trace;
509         trace.skip = 1;
510         save_stack_trace(&trace);
511         object->trace_len = trace.nr_entries;
512
513         INIT_PRIO_TREE_NODE(&object->tree_node);
514         object->tree_node.start = ptr;
515         object->tree_node.last = ptr + size - 1;
516
517         write_lock_irqsave(&kmemleak_lock, flags);
518         min_addr = min(min_addr, ptr);
519         max_addr = max(max_addr, ptr + size);
520         node = prio_tree_insert(&object_tree_root, &object->tree_node);
521         /*
522          * The code calling the kernel does not yet have the pointer to the
523          * memory block to be able to free it.  However, we still hold the
524          * kmemleak_lock here in case parts of the kernel started freeing
525          * random memory blocks.
526          */
527         if (node != &object->tree_node) {
528                 unsigned long flags;
529
530                 kmemleak_stop("kmemleak: Cannot insert 0x%lx into the object "
531                               "search tree (already existing)\n", ptr);
532                 object = lookup_object(ptr, 1);
533                 spin_lock_irqsave(&object->lock, flags);
534                 dump_object_info(object);
535                 spin_unlock_irqrestore(&object->lock, flags);
536
537                 goto out;
538         }
539         list_add_tail_rcu(&object->object_list, &object_list);
540 out:
541         write_unlock_irqrestore(&kmemleak_lock, flags);
542 }
543
544 /*
545  * Remove the metadata (struct kmemleak_object) for a memory block from the
546  * object_list and object_tree_root and decrement its use_count.
547  */
548 static void delete_object(unsigned long ptr)
549 {
550         unsigned long flags;
551         struct kmemleak_object *object;
552
553         write_lock_irqsave(&kmemleak_lock, flags);
554         object = lookup_object(ptr, 0);
555         if (!object) {
556                 kmemleak_warn("kmemleak: Freeing unknown object at 0x%08lx\n",
557                               ptr);
558                 write_unlock_irqrestore(&kmemleak_lock, flags);
559                 return;
560         }
561         prio_tree_remove(&object_tree_root, &object->tree_node);
562         list_del_rcu(&object->object_list);
563         write_unlock_irqrestore(&kmemleak_lock, flags);
564
565         WARN_ON(!(object->flags & OBJECT_ALLOCATED));
566         WARN_ON(atomic_read(&object->use_count) < 1);
567
568         /*
569          * Locking here also ensures that the corresponding memory block
570          * cannot be freed when it is being scanned.
571          */
572         spin_lock_irqsave(&object->lock, flags);
573         if (object->flags & OBJECT_REPORTED)
574                 print_referenced(object);
575         object->flags &= ~OBJECT_ALLOCATED;
576         spin_unlock_irqrestore(&object->lock, flags);
577         put_object(object);
578 }
579
580 /*
581  * Make a object permanently as gray-colored so that it can no longer be
582  * reported as a leak. This is used in general to mark a false positive.
583  */
584 static void make_gray_object(unsigned long ptr)
585 {
586         unsigned long flags;
587         struct kmemleak_object *object;
588
589         object = find_and_get_object(ptr, 0);
590         if (!object) {
591                 kmemleak_warn("kmemleak: Graying unknown object at 0x%08lx\n",
592                               ptr);
593                 return;
594         }
595
596         spin_lock_irqsave(&object->lock, flags);
597         object->min_count = 0;
598         spin_unlock_irqrestore(&object->lock, flags);
599         put_object(object);
600 }
601
602 /*
603  * Mark the object as black-colored so that it is ignored from scans and
604  * reporting.
605  */
606 static void make_black_object(unsigned long ptr)
607 {
608         unsigned long flags;
609         struct kmemleak_object *object;
610
611         object = find_and_get_object(ptr, 0);
612         if (!object) {
613                 kmemleak_warn("kmemleak: Blacking unknown object at 0x%08lx\n",
614                               ptr);
615                 return;
616         }
617
618         spin_lock_irqsave(&object->lock, flags);
619         object->min_count = -1;
620         spin_unlock_irqrestore(&object->lock, flags);
621         put_object(object);
622 }
623
624 /*
625  * Add a scanning area to the object. If at least one such area is added,
626  * kmemleak will only scan these ranges rather than the whole memory block.
627  */
628 static void add_scan_area(unsigned long ptr, unsigned long offset,
629                           size_t length, gfp_t gfp)
630 {
631         unsigned long flags;
632         struct kmemleak_object *object;
633         struct kmemleak_scan_area *area;
634
635         object = find_and_get_object(ptr, 0);
636         if (!object) {
637                 kmemleak_warn("kmemleak: Adding scan area to unknown "
638                               "object at 0x%08lx\n", ptr);
639                 return;
640         }
641
642         area = kmem_cache_alloc(scan_area_cache, gfp & GFP_KMEMLEAK_MASK);
643         if (!area) {
644                 kmemleak_warn("kmemleak: Cannot allocate a scan area\n");
645                 goto out;
646         }
647
648         spin_lock_irqsave(&object->lock, flags);
649         if (offset + length > object->size) {
650                 kmemleak_warn("kmemleak: Scan area larger than object "
651                               "0x%08lx\n", ptr);
652                 dump_object_info(object);
653                 kmem_cache_free(scan_area_cache, area);
654                 goto out_unlock;
655         }
656
657         INIT_HLIST_NODE(&area->node);
658         area->offset = offset;
659         area->length = length;
660
661         hlist_add_head(&area->node, &object->area_list);
662 out_unlock:
663         spin_unlock_irqrestore(&object->lock, flags);
664 out:
665         put_object(object);
666 }
667
668 /*
669  * Set the OBJECT_NO_SCAN flag for the object corresponding to the give
670  * pointer. Such object will not be scanned by kmemleak but references to it
671  * are searched.
672  */
673 static void object_no_scan(unsigned long ptr)
674 {
675         unsigned long flags;
676         struct kmemleak_object *object;
677
678         object = find_and_get_object(ptr, 0);
679         if (!object) {
680                 kmemleak_warn("kmemleak: Not scanning unknown object at "
681                               "0x%08lx\n", ptr);
682                 return;
683         }
684
685         spin_lock_irqsave(&object->lock, flags);
686         object->flags |= OBJECT_NO_SCAN;
687         spin_unlock_irqrestore(&object->lock, flags);
688         put_object(object);
689 }
690
691 /*
692  * Log an early kmemleak_* call to the early_log buffer. These calls will be
693  * processed later once kmemleak is fully initialized.
694  */
695 static void log_early(int op_type, const void *ptr, size_t size,
696                       int min_count, unsigned long offset, size_t length)
697 {
698         unsigned long flags;
699         struct early_log *log;
700
701         if (crt_early_log >= ARRAY_SIZE(early_log)) {
702                 kmemleak_stop("kmemleak: Early log buffer exceeded\n");
703                 return;
704         }
705
706         /*
707          * There is no need for locking since the kernel is still in UP mode
708          * at this stage. Disabling the IRQs is enough.
709          */
710         local_irq_save(flags);
711         log = &early_log[crt_early_log];
712         log->op_type = op_type;
713         log->ptr = ptr;
714         log->size = size;
715         log->min_count = min_count;
716         log->offset = offset;
717         log->length = length;
718         crt_early_log++;
719         local_irq_restore(flags);
720 }
721
722 /*
723  * Memory allocation function callback. This function is called from the
724  * kernel allocators when a new block is allocated (kmem_cache_alloc, kmalloc,
725  * vmalloc etc.).
726  */
727 void kmemleak_alloc(const void *ptr, size_t size, int min_count, gfp_t gfp)
728 {
729         pr_debug("%s(0x%p, %zu, %d)\n", __func__, ptr, size, min_count);
730
731         if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr))
732                 create_object((unsigned long)ptr, size, min_count, gfp);
733         else if (atomic_read(&kmemleak_early_log))
734                 log_early(KMEMLEAK_ALLOC, ptr, size, min_count, 0, 0);
735 }
736 EXPORT_SYMBOL_GPL(kmemleak_alloc);
737
738 /*
739  * Memory freeing function callback. This function is called from the kernel
740  * allocators when a block is freed (kmem_cache_free, kfree, vfree etc.).
741  */
742 void kmemleak_free(const void *ptr)
743 {
744         pr_debug("%s(0x%p)\n", __func__, ptr);
745
746         if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr))
747                 delete_object((unsigned long)ptr);
748         else if (atomic_read(&kmemleak_early_log))
749                 log_early(KMEMLEAK_FREE, ptr, 0, 0, 0, 0);
750 }
751 EXPORT_SYMBOL_GPL(kmemleak_free);
752
753 /*
754  * Mark an already allocated memory block as a false positive. This will cause
755  * the block to no longer be reported as leak and always be scanned.
756  */
757 void kmemleak_not_leak(const void *ptr)
758 {
759         pr_debug("%s(0x%p)\n", __func__, ptr);
760
761         if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr))
762                 make_gray_object((unsigned long)ptr);
763         else if (atomic_read(&kmemleak_early_log))
764                 log_early(KMEMLEAK_NOT_LEAK, ptr, 0, 0, 0, 0);
765 }
766 EXPORT_SYMBOL(kmemleak_not_leak);
767
768 /*
769  * Ignore a memory block. This is usually done when it is known that the
770  * corresponding block is not a leak and does not contain any references to
771  * other allocated memory blocks.
772  */
773 void kmemleak_ignore(const void *ptr)
774 {
775         pr_debug("%s(0x%p)\n", __func__, ptr);
776
777         if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr))
778                 make_black_object((unsigned long)ptr);
779         else if (atomic_read(&kmemleak_early_log))
780                 log_early(KMEMLEAK_IGNORE, ptr, 0, 0, 0, 0);
781 }
782 EXPORT_SYMBOL(kmemleak_ignore);
783
784 /*
785  * Limit the range to be scanned in an allocated memory block.
786  */
787 void kmemleak_scan_area(const void *ptr, unsigned long offset, size_t length,
788                         gfp_t gfp)
789 {
790         pr_debug("%s(0x%p)\n", __func__, ptr);
791
792         if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr))
793                 add_scan_area((unsigned long)ptr, offset, length, gfp);
794         else if (atomic_read(&kmemleak_early_log))
795                 log_early(KMEMLEAK_SCAN_AREA, ptr, 0, 0, offset, length);
796 }
797 EXPORT_SYMBOL(kmemleak_scan_area);
798
799 /*
800  * Inform kmemleak not to scan the given memory block.
801  */
802 void kmemleak_no_scan(const void *ptr)
803 {
804         pr_debug("%s(0x%p)\n", __func__, ptr);
805
806         if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr))
807                 object_no_scan((unsigned long)ptr);
808         else if (atomic_read(&kmemleak_early_log))
809                 log_early(KMEMLEAK_NO_SCAN, ptr, 0, 0, 0, 0);
810 }
811 EXPORT_SYMBOL(kmemleak_no_scan);
812
813 /*
814  * Yield the CPU so that other tasks get a chance to run.  The yielding is
815  * rate-limited to avoid excessive number of calls to the schedule() function
816  * during memory scanning.
817  */
818 static void scan_yield(void)
819 {
820         might_sleep();
821
822         if (time_is_before_eq_jiffies(next_scan_yield)) {
823                 schedule();
824                 next_scan_yield = jiffies + jiffies_scan_yield;
825         }
826 }
827
828 /*
829  * Memory scanning is a long process and it needs to be interruptable. This
830  * function checks whether such interrupt condition occured.
831  */
832 static int scan_should_stop(void)
833 {
834         if (!atomic_read(&kmemleak_enabled))
835                 return 1;
836
837         /*
838          * This function may be called from either process or kthread context,
839          * hence the need to check for both stop conditions.
840          */
841         if (current->mm)
842                 return signal_pending(current);
843         else
844                 return kthread_should_stop();
845
846         return 0;
847 }
848
849 /*
850  * Scan a memory block (exclusive range) for valid pointers and add those
851  * found to the gray list.
852  */
853 static void scan_block(void *_start, void *_end,
854                        struct kmemleak_object *scanned)
855 {
856         unsigned long *ptr;
857         unsigned long *start = PTR_ALIGN(_start, BYTES_PER_POINTER);
858         unsigned long *end = _end - (BYTES_PER_POINTER - 1);
859
860         for (ptr = start; ptr < end; ptr++) {
861                 unsigned long flags;
862                 unsigned long pointer = *ptr;
863                 struct kmemleak_object *object;
864
865                 if (scan_should_stop())
866                         break;
867
868                 /*
869                  * When scanning a memory block with a corresponding
870                  * kmemleak_object, the CPU yielding is handled in the calling
871                  * code since it holds the object->lock to avoid the block
872                  * freeing.
873                  */
874                 if (!scanned)
875                         scan_yield();
876
877                 object = find_and_get_object(pointer, 1);
878                 if (!object)
879                         continue;
880                 if (object == scanned) {
881                         /* self referenced, ignore */
882                         put_object(object);
883                         continue;
884                 }
885
886                 /*
887                  * Avoid the lockdep recursive warning on object->lock being
888                  * previously acquired in scan_object(). These locks are
889                  * enclosed by scan_mutex.
890                  */
891                 spin_lock_irqsave_nested(&object->lock, flags,
892                                          SINGLE_DEPTH_NESTING);
893                 if (!color_white(object)) {
894                         /* non-orphan, ignored or new */
895                         spin_unlock_irqrestore(&object->lock, flags);
896                         put_object(object);
897                         continue;
898                 }
899
900                 /*
901                  * Increase the object's reference count (number of pointers
902                  * to the memory block). If this count reaches the required
903                  * minimum, the object's color will become gray and it will be
904                  * added to the gray_list.
905                  */
906                 object->count++;
907                 if (color_gray(object))
908                         list_add_tail(&object->gray_list, &gray_list);
909                 else
910                         put_object(object);
911                 spin_unlock_irqrestore(&object->lock, flags);
912         }
913 }
914
915 /*
916  * Scan a memory block corresponding to a kmemleak_object. A condition is
917  * that object->use_count >= 1.
918  */
919 static void scan_object(struct kmemleak_object *object)
920 {
921         struct kmemleak_scan_area *area;
922         struct hlist_node *elem;
923         unsigned long flags;
924
925         /*
926          * Once the object->lock is aquired, the corresponding memory block
927          * cannot be freed (the same lock is aquired in delete_object).
928          */
929         spin_lock_irqsave(&object->lock, flags);
930         if (object->flags & OBJECT_NO_SCAN)
931                 goto out;
932         if (!(object->flags & OBJECT_ALLOCATED))
933                 /* already freed object */
934                 goto out;
935         if (hlist_empty(&object->area_list))
936                 scan_block((void *)object->pointer,
937                            (void *)(object->pointer + object->size), object);
938         else
939                 hlist_for_each_entry(area, elem, &object->area_list, node)
940                         scan_block((void *)(object->pointer + area->offset),
941                                    (void *)(object->pointer + area->offset
942                                             + area->length), object);
943 out:
944         spin_unlock_irqrestore(&object->lock, flags);
945 }
946
947 /*
948  * Scan data sections and all the referenced memory blocks allocated via the
949  * kernel's standard allocators. This function must be called with the
950  * scan_mutex held.
951  */
952 static void kmemleak_scan(void)
953 {
954         unsigned long flags;
955         struct kmemleak_object *object, *tmp;
956         struct task_struct *task;
957         int i;
958
959         /* prepare the kmemleak_object's */
960         rcu_read_lock();
961         list_for_each_entry_rcu(object, &object_list, object_list) {
962                 spin_lock_irqsave(&object->lock, flags);
963 #ifdef DEBUG
964                 /*
965                  * With a few exceptions there should be a maximum of
966                  * 1 reference to any object at this point.
967                  */
968                 if (atomic_read(&object->use_count) > 1) {
969                         pr_debug("kmemleak: object->use_count = %d\n",
970                                  atomic_read(&object->use_count));
971                         dump_object_info(object);
972                 }
973 #endif
974                 /* reset the reference count (whiten the object) */
975                 object->count = 0;
976                 if (color_gray(object) && get_object(object))
977                         list_add_tail(&object->gray_list, &gray_list);
978
979                 spin_unlock_irqrestore(&object->lock, flags);
980         }
981         rcu_read_unlock();
982
983         /* data/bss scanning */
984         scan_block(_sdata, _edata, NULL);
985         scan_block(__bss_start, __bss_stop, NULL);
986
987 #ifdef CONFIG_SMP
988         /* per-cpu sections scanning */
989         for_each_possible_cpu(i)
990                 scan_block(__per_cpu_start + per_cpu_offset(i),
991                            __per_cpu_end + per_cpu_offset(i), NULL);
992 #endif
993
994         /*
995          * Struct page scanning for each node. The code below is not yet safe
996          * with MEMORY_HOTPLUG.
997          */
998         for_each_online_node(i) {
999                 pg_data_t *pgdat = NODE_DATA(i);
1000                 unsigned long start_pfn = pgdat->node_start_pfn;
1001                 unsigned long end_pfn = start_pfn + pgdat->node_spanned_pages;
1002                 unsigned long pfn;
1003
1004                 for (pfn = start_pfn; pfn < end_pfn; pfn++) {
1005                         struct page *page;
1006
1007                         if (!pfn_valid(pfn))
1008                                 continue;
1009                         page = pfn_to_page(pfn);
1010                         /* only scan if page is in use */
1011                         if (page_count(page) == 0)
1012                                 continue;
1013                         scan_block(page, page + 1, NULL);
1014                 }
1015         }
1016
1017         /*
1018          * Scanning the task stacks may introduce false negatives and it is
1019          * not enabled by default.
1020          */
1021         if (kmemleak_stack_scan) {
1022                 read_lock(&tasklist_lock);
1023                 for_each_process(task)
1024                         scan_block(task_stack_page(task),
1025                                    task_stack_page(task) + THREAD_SIZE, NULL);
1026                 read_unlock(&tasklist_lock);
1027         }
1028
1029         /*
1030          * Scan the objects already referenced from the sections scanned
1031          * above. More objects will be referenced and, if there are no memory
1032          * leaks, all the objects will be scanned. The list traversal is safe
1033          * for both tail additions and removals from inside the loop. The
1034          * kmemleak objects cannot be freed from outside the loop because their
1035          * use_count was increased.
1036          */
1037         object = list_entry(gray_list.next, typeof(*object), gray_list);
1038         while (&object->gray_list != &gray_list) {
1039                 scan_yield();
1040
1041                 /* may add new objects to the list */
1042                 if (!scan_should_stop())
1043                         scan_object(object);
1044
1045                 tmp = list_entry(object->gray_list.next, typeof(*object),
1046                                  gray_list);
1047
1048                 /* remove the object from the list and release it */
1049                 list_del(&object->gray_list);
1050                 put_object(object);
1051
1052                 object = tmp;
1053         }
1054         WARN_ON(!list_empty(&gray_list));
1055 }
1056
1057 /*
1058  * Thread function performing automatic memory scanning. Unreferenced objects
1059  * at the end of a memory scan are reported but only the first time.
1060  */
1061 static int kmemleak_scan_thread(void *arg)
1062 {
1063         static int first_run = 1;
1064
1065         pr_info("kmemleak: Automatic memory scanning thread started\n");
1066
1067         /*
1068          * Wait before the first scan to allow the system to fully initialize.
1069          */
1070         if (first_run) {
1071                 first_run = 0;
1072                 ssleep(SECS_FIRST_SCAN);
1073         }
1074
1075         while (!kthread_should_stop()) {
1076                 struct kmemleak_object *object;
1077                 signed long timeout = jiffies_scan_wait;
1078
1079                 mutex_lock(&scan_mutex);
1080
1081                 kmemleak_scan();
1082                 reported_leaks = 0;
1083
1084                 rcu_read_lock();
1085                 list_for_each_entry_rcu(object, &object_list, object_list) {
1086                         unsigned long flags;
1087
1088                         if (reported_leaks >= REPORTS_NR)
1089                                 break;
1090                         spin_lock_irqsave(&object->lock, flags);
1091                         if (!(object->flags & OBJECT_REPORTED) &&
1092                             unreferenced_object(object)) {
1093                                 print_unreferenced(NULL, object);
1094                                 object->flags |= OBJECT_REPORTED;
1095                                 reported_leaks++;
1096                         } else if ((object->flags & OBJECT_REPORTED) &&
1097                                    referenced_object(object)) {
1098                                 print_referenced(object);
1099                                 object->flags &= ~OBJECT_REPORTED;
1100                         }
1101                         spin_unlock_irqrestore(&object->lock, flags);
1102                 }
1103                 rcu_read_unlock();
1104
1105                 mutex_unlock(&scan_mutex);
1106                 /* wait before the next scan */
1107                 while (timeout && !kthread_should_stop())
1108                         timeout = schedule_timeout_interruptible(timeout);
1109         }
1110
1111         pr_info("kmemleak: Automatic memory scanning thread ended\n");
1112
1113         return 0;
1114 }
1115
1116 /*
1117  * Start the automatic memory scanning thread. This function must be called
1118  * with the kmemleak_mutex held.
1119  */
1120 void start_scan_thread(void)
1121 {
1122         if (scan_thread)
1123                 return;
1124         scan_thread = kthread_run(kmemleak_scan_thread, NULL, "kmemleak");
1125         if (IS_ERR(scan_thread)) {
1126                 pr_warning("kmemleak: Failed to create the scan thread\n");
1127                 scan_thread = NULL;
1128         }
1129 }
1130
1131 /*
1132  * Stop the automatic memory scanning thread. This function must be called
1133  * with the kmemleak_mutex held.
1134  */
1135 void stop_scan_thread(void)
1136 {
1137         if (scan_thread) {
1138                 kthread_stop(scan_thread);
1139                 scan_thread = NULL;
1140         }
1141 }
1142
1143 /*
1144  * Iterate over the object_list and return the first valid object at or after
1145  * the required position with its use_count incremented. The function triggers
1146  * a memory scanning when the pos argument points to the first position.
1147  */
1148 static void *kmemleak_seq_start(struct seq_file *seq, loff_t *pos)
1149 {
1150         struct kmemleak_object *object;
1151         loff_t n = *pos;
1152
1153         if (!n) {
1154                 kmemleak_scan();
1155                 reported_leaks = 0;
1156         }
1157         if (reported_leaks >= REPORTS_NR)
1158                 return NULL;
1159
1160         rcu_read_lock();
1161         list_for_each_entry_rcu(object, &object_list, object_list) {
1162                 if (n-- > 0)
1163                         continue;
1164                 if (get_object(object))
1165                         goto out;
1166         }
1167         object = NULL;
1168 out:
1169         rcu_read_unlock();
1170         return object;
1171 }
1172
1173 /*
1174  * Return the next object in the object_list. The function decrements the
1175  * use_count of the previous object and increases that of the next one.
1176  */
1177 static void *kmemleak_seq_next(struct seq_file *seq, void *v, loff_t *pos)
1178 {
1179         struct kmemleak_object *prev_obj = v;
1180         struct kmemleak_object *next_obj = NULL;
1181         struct list_head *n = &prev_obj->object_list;
1182
1183         ++(*pos);
1184         if (reported_leaks >= REPORTS_NR)
1185                 goto out;
1186
1187         rcu_read_lock();
1188         list_for_each_continue_rcu(n, &object_list) {
1189                 next_obj = list_entry(n, struct kmemleak_object, object_list);
1190                 if (get_object(next_obj))
1191                         break;
1192         }
1193         rcu_read_unlock();
1194 out:
1195         put_object(prev_obj);
1196         return next_obj;
1197 }
1198
1199 /*
1200  * Decrement the use_count of the last object required, if any.
1201  */
1202 static void kmemleak_seq_stop(struct seq_file *seq, void *v)
1203 {
1204         if (v)
1205                 put_object(v);
1206 }
1207
1208 /*
1209  * Print the information for an unreferenced object to the seq file.
1210  */
1211 static int kmemleak_seq_show(struct seq_file *seq, void *v)
1212 {
1213         struct kmemleak_object *object = v;
1214         unsigned long flags;
1215
1216         spin_lock_irqsave(&object->lock, flags);
1217         if (!unreferenced_object(object))
1218                 goto out;
1219         print_unreferenced(seq, object);
1220         reported_leaks++;
1221 out:
1222         spin_unlock_irqrestore(&object->lock, flags);
1223         return 0;
1224 }
1225
1226 static const struct seq_operations kmemleak_seq_ops = {
1227         .start = kmemleak_seq_start,
1228         .next  = kmemleak_seq_next,
1229         .stop  = kmemleak_seq_stop,
1230         .show  = kmemleak_seq_show,
1231 };
1232
1233 static int kmemleak_open(struct inode *inode, struct file *file)
1234 {
1235         int ret = 0;
1236
1237         if (!atomic_read(&kmemleak_enabled))
1238                 return -EBUSY;
1239
1240         ret = mutex_lock_interruptible(&kmemleak_mutex);
1241         if (ret < 0)
1242                 goto out;
1243         if (file->f_mode & FMODE_READ) {
1244                 ret = mutex_lock_interruptible(&scan_mutex);
1245                 if (ret < 0)
1246                         goto kmemleak_unlock;
1247                 ret = seq_open(file, &kmemleak_seq_ops);
1248                 if (ret < 0)
1249                         goto scan_unlock;
1250         }
1251         return ret;
1252
1253 scan_unlock:
1254         mutex_unlock(&scan_mutex);
1255 kmemleak_unlock:
1256         mutex_unlock(&kmemleak_mutex);
1257 out:
1258         return ret;
1259 }
1260
1261 static int kmemleak_release(struct inode *inode, struct file *file)
1262 {
1263         int ret = 0;
1264
1265         if (file->f_mode & FMODE_READ) {
1266                 seq_release(inode, file);
1267                 mutex_unlock(&scan_mutex);
1268         }
1269         mutex_unlock(&kmemleak_mutex);
1270
1271         return ret;
1272 }
1273
1274 /*
1275  * File write operation to configure kmemleak at run-time. The following
1276  * commands can be written to the /sys/kernel/debug/kmemleak file:
1277  *   off        - disable kmemleak (irreversible)
1278  *   stack=on   - enable the task stacks scanning
1279  *   stack=off  - disable the tasks stacks scanning
1280  *   scan=on    - start the automatic memory scanning thread
1281  *   scan=off   - stop the automatic memory scanning thread
1282  *   scan=...   - set the automatic memory scanning period in seconds (0 to
1283  *                disable it)
1284  */
1285 static ssize_t kmemleak_write(struct file *file, const char __user *user_buf,
1286                               size_t size, loff_t *ppos)
1287 {
1288         char buf[64];
1289         int buf_size;
1290
1291         if (!atomic_read(&kmemleak_enabled))
1292                 return -EBUSY;
1293
1294         buf_size = min(size, (sizeof(buf) - 1));
1295         if (strncpy_from_user(buf, user_buf, buf_size) < 0)
1296                 return -EFAULT;
1297         buf[buf_size] = 0;
1298
1299         if (strncmp(buf, "off", 3) == 0)
1300                 kmemleak_disable();
1301         else if (strncmp(buf, "stack=on", 8) == 0)
1302                 kmemleak_stack_scan = 1;
1303         else if (strncmp(buf, "stack=off", 9) == 0)
1304                 kmemleak_stack_scan = 0;
1305         else if (strncmp(buf, "scan=on", 7) == 0)
1306                 start_scan_thread();
1307         else if (strncmp(buf, "scan=off", 8) == 0)
1308                 stop_scan_thread();
1309         else if (strncmp(buf, "scan=", 5) == 0) {
1310                 unsigned long secs;
1311                 int err;
1312
1313                 err = strict_strtoul(buf + 5, 0, &secs);
1314                 if (err < 0)
1315                         return err;
1316                 stop_scan_thread();
1317                 if (secs) {
1318                         jiffies_scan_wait = msecs_to_jiffies(secs * 1000);
1319                         start_scan_thread();
1320                 }
1321         } else
1322                 return -EINVAL;
1323
1324         /* ignore the rest of the buffer, only one command at a time */
1325         *ppos += size;
1326         return size;
1327 }
1328
1329 static const struct file_operations kmemleak_fops = {
1330         .owner          = THIS_MODULE,
1331         .open           = kmemleak_open,
1332         .read           = seq_read,
1333         .write          = kmemleak_write,
1334         .llseek         = seq_lseek,
1335         .release        = kmemleak_release,
1336 };
1337
1338 /*
1339  * Perform the freeing of the kmemleak internal objects after waiting for any
1340  * current memory scan to complete.
1341  */
1342 static int kmemleak_cleanup_thread(void *arg)
1343 {
1344         struct kmemleak_object *object;
1345
1346         mutex_lock(&kmemleak_mutex);
1347         stop_scan_thread();
1348         mutex_unlock(&kmemleak_mutex);
1349
1350         mutex_lock(&scan_mutex);
1351         rcu_read_lock();
1352         list_for_each_entry_rcu(object, &object_list, object_list)
1353                 delete_object(object->pointer);
1354         rcu_read_unlock();
1355         mutex_unlock(&scan_mutex);
1356
1357         return 0;
1358 }
1359
1360 /*
1361  * Start the clean-up thread.
1362  */
1363 static void kmemleak_cleanup(void)
1364 {
1365         struct task_struct *cleanup_thread;
1366
1367         cleanup_thread = kthread_run(kmemleak_cleanup_thread, NULL,
1368                                      "kmemleak-clean");
1369         if (IS_ERR(cleanup_thread))
1370                 pr_warning("kmemleak: Failed to create the clean-up thread\n");
1371 }
1372
1373 /*
1374  * Disable kmemleak. No memory allocation/freeing will be traced once this
1375  * function is called. Disabling kmemleak is an irreversible operation.
1376  */
1377 static void kmemleak_disable(void)
1378 {
1379         /* atomically check whether it was already invoked */
1380         if (atomic_cmpxchg(&kmemleak_error, 0, 1))
1381                 return;
1382
1383         /* stop any memory operation tracing */
1384         atomic_set(&kmemleak_early_log, 0);
1385         atomic_set(&kmemleak_enabled, 0);
1386
1387         /* check whether it is too early for a kernel thread */
1388         if (atomic_read(&kmemleak_initialized))
1389                 kmemleak_cleanup();
1390
1391         pr_info("Kernel memory leak detector disabled\n");
1392 }
1393
1394 /*
1395  * Allow boot-time kmemleak disabling (enabled by default).
1396  */
1397 static int kmemleak_boot_config(char *str)
1398 {
1399         if (!str)
1400                 return -EINVAL;
1401         if (strcmp(str, "off") == 0)
1402                 kmemleak_disable();
1403         else if (strcmp(str, "on") != 0)
1404                 return -EINVAL;
1405         return 0;
1406 }
1407 early_param("kmemleak", kmemleak_boot_config);
1408
1409 /*
1410  * Kmemleak initialization.
1411  */
1412 void __init kmemleak_init(void)
1413 {
1414         int i;
1415         unsigned long flags;
1416
1417         jiffies_scan_yield = msecs_to_jiffies(MSECS_SCAN_YIELD);
1418         jiffies_min_age = msecs_to_jiffies(MSECS_MIN_AGE);
1419         jiffies_scan_wait = msecs_to_jiffies(SECS_SCAN_WAIT * 1000);
1420
1421         object_cache = KMEM_CACHE(kmemleak_object, SLAB_NOLEAKTRACE);
1422         scan_area_cache = KMEM_CACHE(kmemleak_scan_area, SLAB_NOLEAKTRACE);
1423         INIT_PRIO_TREE_ROOT(&object_tree_root);
1424
1425         /* the kernel is still in UP mode, so disabling the IRQs is enough */
1426         local_irq_save(flags);
1427         if (!atomic_read(&kmemleak_error)) {
1428                 atomic_set(&kmemleak_enabled, 1);
1429                 atomic_set(&kmemleak_early_log, 0);
1430         }
1431         local_irq_restore(flags);
1432
1433         /*
1434          * This is the point where tracking allocations is safe. Automatic
1435          * scanning is started during the late initcall. Add the early logged
1436          * callbacks to the kmemleak infrastructure.
1437          */
1438         for (i = 0; i < crt_early_log; i++) {
1439                 struct early_log *log = &early_log[i];
1440
1441                 switch (log->op_type) {
1442                 case KMEMLEAK_ALLOC:
1443                         kmemleak_alloc(log->ptr, log->size, log->min_count,
1444                                        GFP_KERNEL);
1445                         break;
1446                 case KMEMLEAK_FREE:
1447                         kmemleak_free(log->ptr);
1448                         break;
1449                 case KMEMLEAK_NOT_LEAK:
1450                         kmemleak_not_leak(log->ptr);
1451                         break;
1452                 case KMEMLEAK_IGNORE:
1453                         kmemleak_ignore(log->ptr);
1454                         break;
1455                 case KMEMLEAK_SCAN_AREA:
1456                         kmemleak_scan_area(log->ptr, log->offset, log->length,
1457                                            GFP_KERNEL);
1458                         break;
1459                 case KMEMLEAK_NO_SCAN:
1460                         kmemleak_no_scan(log->ptr);
1461                         break;
1462                 default:
1463                         WARN_ON(1);
1464                 }
1465         }
1466 }
1467
1468 /*
1469  * Late initialization function.
1470  */
1471 static int __init kmemleak_late_init(void)
1472 {
1473         struct dentry *dentry;
1474
1475         atomic_set(&kmemleak_initialized, 1);
1476
1477         if (atomic_read(&kmemleak_error)) {
1478                 /*
1479                  * Some error occured and kmemleak was disabled. There is a
1480                  * small chance that kmemleak_disable() was called immediately
1481                  * after setting kmemleak_initialized and we may end up with
1482                  * two clean-up threads but serialized by scan_mutex.
1483                  */
1484                 kmemleak_cleanup();
1485                 return -ENOMEM;
1486         }
1487
1488         dentry = debugfs_create_file("kmemleak", S_IRUGO, NULL, NULL,
1489                                      &kmemleak_fops);
1490         if (!dentry)
1491                 pr_warning("kmemleak: Failed to create the debugfs kmemleak "
1492                            "file\n");
1493         mutex_lock(&kmemleak_mutex);
1494         start_scan_thread();
1495         mutex_unlock(&kmemleak_mutex);
1496
1497         pr_info("Kernel memory leak detector initialized\n");
1498
1499         return 0;
1500 }
1501 late_initcall(kmemleak_late_init);