net: bcmgenet: Delay PHY initialization to bcmgenet_open()
[deliverable/linux.git] / mm / kmemleak.c
1 /*
2 * mm/kmemleak.c
3 *
4 * Copyright (C) 2008 ARM Limited
5 * Written by Catalin Marinas <catalin.marinas@arm.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 *
20 *
21 * For more information on the algorithm and kmemleak usage, please see
22 * Documentation/kmemleak.txt.
23 *
24 * Notes on locking
25 * ----------------
26 *
27 * The following locks and mutexes are used by kmemleak:
28 *
29 * - kmemleak_lock (rwlock): protects the object_list modifications and
30 * accesses to the object_tree_root. The object_list is the main list
31 * holding the metadata (struct kmemleak_object) for the allocated memory
32 * blocks. The object_tree_root is a red black tree used to look-up
33 * metadata based on a pointer to the corresponding memory block. The
34 * kmemleak_object structures are added to the object_list and
35 * object_tree_root in the create_object() function called from the
36 * kmemleak_alloc() callback and removed in delete_object() called from the
37 * kmemleak_free() callback
38 * - kmemleak_object.lock (spinlock): protects a kmemleak_object. Accesses to
39 * the metadata (e.g. count) are protected by this lock. Note that some
40 * members of this structure may be protected by other means (atomic or
41 * kmemleak_lock). This lock is also held when scanning the corresponding
42 * memory block to avoid the kernel freeing it via the kmemleak_free()
43 * callback. This is less heavyweight than holding a global lock like
44 * kmemleak_lock during scanning
45 * - scan_mutex (mutex): ensures that only one thread may scan the memory for
46 * unreferenced objects at a time. The gray_list contains the objects which
47 * are already referenced or marked as false positives and need to be
48 * scanned. This list is only modified during a scanning episode when the
49 * scan_mutex is held. At the end of a scan, the gray_list is always empty.
50 * Note that the kmemleak_object.use_count is incremented when an object is
51 * added to the gray_list and therefore cannot be freed. This mutex also
52 * prevents multiple users of the "kmemleak" debugfs file together with
53 * modifications to the memory scanning parameters including the scan_thread
54 * pointer
55 *
56 * Locks and mutexes are acquired/nested in the following order:
57 *
58 * scan_mutex [-> object->lock] -> kmemleak_lock -> other_object->lock (SINGLE_DEPTH_NESTING)
59 *
60 * No kmemleak_lock and object->lock nesting is allowed outside scan_mutex
61 * regions.
62 *
63 * The kmemleak_object structures have a use_count incremented or decremented
64 * using the get_object()/put_object() functions. When the use_count becomes
65 * 0, this count can no longer be incremented and put_object() schedules the
66 * kmemleak_object freeing via an RCU callback. All calls to the get_object()
67 * function must be protected by rcu_read_lock() to avoid accessing a freed
68 * structure.
69 */
70
71 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
72
73 #include <linux/init.h>
74 #include <linux/kernel.h>
75 #include <linux/list.h>
76 #include <linux/sched.h>
77 #include <linux/jiffies.h>
78 #include <linux/delay.h>
79 #include <linux/export.h>
80 #include <linux/kthread.h>
81 #include <linux/rbtree.h>
82 #include <linux/fs.h>
83 #include <linux/debugfs.h>
84 #include <linux/seq_file.h>
85 #include <linux/cpumask.h>
86 #include <linux/spinlock.h>
87 #include <linux/mutex.h>
88 #include <linux/rcupdate.h>
89 #include <linux/stacktrace.h>
90 #include <linux/cache.h>
91 #include <linux/percpu.h>
92 #include <linux/hardirq.h>
93 #include <linux/mmzone.h>
94 #include <linux/slab.h>
95 #include <linux/thread_info.h>
96 #include <linux/err.h>
97 #include <linux/uaccess.h>
98 #include <linux/string.h>
99 #include <linux/nodemask.h>
100 #include <linux/mm.h>
101 #include <linux/workqueue.h>
102 #include <linux/crc32.h>
103
104 #include <asm/sections.h>
105 #include <asm/processor.h>
106 #include <linux/atomic.h>
107
108 #include <linux/kasan.h>
109 #include <linux/kmemcheck.h>
110 #include <linux/kmemleak.h>
111 #include <linux/memory_hotplug.h>
112
113 /*
114 * Kmemleak configuration and common defines.
115 */
116 #define MAX_TRACE 16 /* stack trace length */
117 #define MSECS_MIN_AGE 5000 /* minimum object age for reporting */
118 #define SECS_FIRST_SCAN 60 /* delay before the first scan */
119 #define SECS_SCAN_WAIT 600 /* subsequent auto scanning delay */
120 #define MAX_SCAN_SIZE 4096 /* maximum size of a scanned block */
121
122 #define BYTES_PER_POINTER sizeof(void *)
123
124 /* GFP bitmask for kmemleak internal allocations */
125 #define gfp_kmemleak_mask(gfp) (((gfp) & (GFP_KERNEL | GFP_ATOMIC | \
126 __GFP_NOACCOUNT)) | \
127 __GFP_NORETRY | __GFP_NOMEMALLOC | \
128 __GFP_NOWARN)
129
130 /* scanning area inside a memory block */
131 struct kmemleak_scan_area {
132 struct hlist_node node;
133 unsigned long start;
134 size_t size;
135 };
136
137 #define KMEMLEAK_GREY 0
138 #define KMEMLEAK_BLACK -1
139
140 /*
141 * Structure holding the metadata for each allocated memory block.
142 * Modifications to such objects should be made while holding the
143 * object->lock. Insertions or deletions from object_list, gray_list or
144 * rb_node are already protected by the corresponding locks or mutex (see
145 * the notes on locking above). These objects are reference-counted
146 * (use_count) and freed using the RCU mechanism.
147 */
148 struct kmemleak_object {
149 spinlock_t lock;
150 unsigned long flags; /* object status flags */
151 struct list_head object_list;
152 struct list_head gray_list;
153 struct rb_node rb_node;
154 struct rcu_head rcu; /* object_list lockless traversal */
155 /* object usage count; object freed when use_count == 0 */
156 atomic_t use_count;
157 unsigned long pointer;
158 size_t size;
159 /* minimum number of a pointers found before it is considered leak */
160 int min_count;
161 /* the total number of pointers found pointing to this object */
162 int count;
163 /* checksum for detecting modified objects */
164 u32 checksum;
165 /* memory ranges to be scanned inside an object (empty for all) */
166 struct hlist_head area_list;
167 unsigned long trace[MAX_TRACE];
168 unsigned int trace_len;
169 unsigned long jiffies; /* creation timestamp */
170 pid_t pid; /* pid of the current task */
171 char comm[TASK_COMM_LEN]; /* executable name */
172 };
173
174 /* flag representing the memory block allocation status */
175 #define OBJECT_ALLOCATED (1 << 0)
176 /* flag set after the first reporting of an unreference object */
177 #define OBJECT_REPORTED (1 << 1)
178 /* flag set to not scan the object */
179 #define OBJECT_NO_SCAN (1 << 2)
180
181 /* number of bytes to print per line; must be 16 or 32 */
182 #define HEX_ROW_SIZE 16
183 /* number of bytes to print at a time (1, 2, 4, 8) */
184 #define HEX_GROUP_SIZE 1
185 /* include ASCII after the hex output */
186 #define HEX_ASCII 1
187 /* max number of lines to be printed */
188 #define HEX_MAX_LINES 2
189
190 /* the list of all allocated objects */
191 static LIST_HEAD(object_list);
192 /* the list of gray-colored objects (see color_gray comment below) */
193 static LIST_HEAD(gray_list);
194 /* search tree for object boundaries */
195 static struct rb_root object_tree_root = RB_ROOT;
196 /* rw_lock protecting the access to object_list and object_tree_root */
197 static DEFINE_RWLOCK(kmemleak_lock);
198
199 /* allocation caches for kmemleak internal data */
200 static struct kmem_cache *object_cache;
201 static struct kmem_cache *scan_area_cache;
202
203 /* set if tracing memory operations is enabled */
204 static int kmemleak_enabled;
205 /* same as above but only for the kmemleak_free() callback */
206 static int kmemleak_free_enabled;
207 /* set in the late_initcall if there were no errors */
208 static int kmemleak_initialized;
209 /* enables or disables early logging of the memory operations */
210 static int kmemleak_early_log = 1;
211 /* set if a kmemleak warning was issued */
212 static int kmemleak_warning;
213 /* set if a fatal kmemleak error has occurred */
214 static int kmemleak_error;
215
216 /* minimum and maximum address that may be valid pointers */
217 static unsigned long min_addr = ULONG_MAX;
218 static unsigned long max_addr;
219
220 static struct task_struct *scan_thread;
221 /* used to avoid reporting of recently allocated objects */
222 static unsigned long jiffies_min_age;
223 static unsigned long jiffies_last_scan;
224 /* delay between automatic memory scannings */
225 static signed long jiffies_scan_wait;
226 /* enables or disables the task stacks scanning */
227 static int kmemleak_stack_scan = 1;
228 /* protects the memory scanning, parameters and debug/kmemleak file access */
229 static DEFINE_MUTEX(scan_mutex);
230 /* setting kmemleak=on, will set this var, skipping the disable */
231 static int kmemleak_skip_disable;
232 /* If there are leaks that can be reported */
233 static bool kmemleak_found_leaks;
234
235 /*
236 * Early object allocation/freeing logging. Kmemleak is initialized after the
237 * kernel allocator. However, both the kernel allocator and kmemleak may
238 * allocate memory blocks which need to be tracked. Kmemleak defines an
239 * arbitrary buffer to hold the allocation/freeing information before it is
240 * fully initialized.
241 */
242
243 /* kmemleak operation type for early logging */
244 enum {
245 KMEMLEAK_ALLOC,
246 KMEMLEAK_ALLOC_PERCPU,
247 KMEMLEAK_FREE,
248 KMEMLEAK_FREE_PART,
249 KMEMLEAK_FREE_PERCPU,
250 KMEMLEAK_NOT_LEAK,
251 KMEMLEAK_IGNORE,
252 KMEMLEAK_SCAN_AREA,
253 KMEMLEAK_NO_SCAN
254 };
255
256 /*
257 * Structure holding the information passed to kmemleak callbacks during the
258 * early logging.
259 */
260 struct early_log {
261 int op_type; /* kmemleak operation type */
262 const void *ptr; /* allocated/freed memory block */
263 size_t size; /* memory block size */
264 int min_count; /* minimum reference count */
265 unsigned long trace[MAX_TRACE]; /* stack trace */
266 unsigned int trace_len; /* stack trace length */
267 };
268
269 /* early logging buffer and current position */
270 static struct early_log
271 early_log[CONFIG_DEBUG_KMEMLEAK_EARLY_LOG_SIZE] __initdata;
272 static int crt_early_log __initdata;
273
274 static void kmemleak_disable(void);
275
276 /*
277 * Print a warning and dump the stack trace.
278 */
279 #define kmemleak_warn(x...) do { \
280 pr_warning(x); \
281 dump_stack(); \
282 kmemleak_warning = 1; \
283 } while (0)
284
285 /*
286 * Macro invoked when a serious kmemleak condition occurred and cannot be
287 * recovered from. Kmemleak will be disabled and further allocation/freeing
288 * tracing no longer available.
289 */
290 #define kmemleak_stop(x...) do { \
291 kmemleak_warn(x); \
292 kmemleak_disable(); \
293 } while (0)
294
295 /*
296 * Printing of the objects hex dump to the seq file. The number of lines to be
297 * printed is limited to HEX_MAX_LINES to prevent seq file spamming. The
298 * actual number of printed bytes depends on HEX_ROW_SIZE. It must be called
299 * with the object->lock held.
300 */
301 static void hex_dump_object(struct seq_file *seq,
302 struct kmemleak_object *object)
303 {
304 const u8 *ptr = (const u8 *)object->pointer;
305 int i, len, remaining;
306 unsigned char linebuf[HEX_ROW_SIZE * 5];
307
308 /* limit the number of lines to HEX_MAX_LINES */
309 remaining = len =
310 min(object->size, (size_t)(HEX_MAX_LINES * HEX_ROW_SIZE));
311
312 seq_printf(seq, " hex dump (first %d bytes):\n", len);
313 for (i = 0; i < len; i += HEX_ROW_SIZE) {
314 int linelen = min(remaining, HEX_ROW_SIZE);
315
316 remaining -= HEX_ROW_SIZE;
317 hex_dump_to_buffer(ptr + i, linelen, HEX_ROW_SIZE,
318 HEX_GROUP_SIZE, linebuf, sizeof(linebuf),
319 HEX_ASCII);
320 seq_printf(seq, " %s\n", linebuf);
321 }
322 }
323
324 /*
325 * Object colors, encoded with count and min_count:
326 * - white - orphan object, not enough references to it (count < min_count)
327 * - gray - not orphan, not marked as false positive (min_count == 0) or
328 * sufficient references to it (count >= min_count)
329 * - black - ignore, it doesn't contain references (e.g. text section)
330 * (min_count == -1). No function defined for this color.
331 * Newly created objects don't have any color assigned (object->count == -1)
332 * before the next memory scan when they become white.
333 */
334 static bool color_white(const struct kmemleak_object *object)
335 {
336 return object->count != KMEMLEAK_BLACK &&
337 object->count < object->min_count;
338 }
339
340 static bool color_gray(const struct kmemleak_object *object)
341 {
342 return object->min_count != KMEMLEAK_BLACK &&
343 object->count >= object->min_count;
344 }
345
346 /*
347 * Objects are considered unreferenced only if their color is white, they have
348 * not be deleted and have a minimum age to avoid false positives caused by
349 * pointers temporarily stored in CPU registers.
350 */
351 static bool unreferenced_object(struct kmemleak_object *object)
352 {
353 return (color_white(object) && object->flags & OBJECT_ALLOCATED) &&
354 time_before_eq(object->jiffies + jiffies_min_age,
355 jiffies_last_scan);
356 }
357
358 /*
359 * Printing of the unreferenced objects information to the seq file. The
360 * print_unreferenced function must be called with the object->lock held.
361 */
362 static void print_unreferenced(struct seq_file *seq,
363 struct kmemleak_object *object)
364 {
365 int i;
366 unsigned int msecs_age = jiffies_to_msecs(jiffies - object->jiffies);
367
368 seq_printf(seq, "unreferenced object 0x%08lx (size %zu):\n",
369 object->pointer, object->size);
370 seq_printf(seq, " comm \"%s\", pid %d, jiffies %lu (age %d.%03ds)\n",
371 object->comm, object->pid, object->jiffies,
372 msecs_age / 1000, msecs_age % 1000);
373 hex_dump_object(seq, object);
374 seq_printf(seq, " backtrace:\n");
375
376 for (i = 0; i < object->trace_len; i++) {
377 void *ptr = (void *)object->trace[i];
378 seq_printf(seq, " [<%p>] %pS\n", ptr, ptr);
379 }
380 }
381
382 /*
383 * Print the kmemleak_object information. This function is used mainly for
384 * debugging special cases when kmemleak operations. It must be called with
385 * the object->lock held.
386 */
387 static void dump_object_info(struct kmemleak_object *object)
388 {
389 struct stack_trace trace;
390
391 trace.nr_entries = object->trace_len;
392 trace.entries = object->trace;
393
394 pr_notice("Object 0x%08lx (size %zu):\n",
395 object->pointer, object->size);
396 pr_notice(" comm \"%s\", pid %d, jiffies %lu\n",
397 object->comm, object->pid, object->jiffies);
398 pr_notice(" min_count = %d\n", object->min_count);
399 pr_notice(" count = %d\n", object->count);
400 pr_notice(" flags = 0x%lx\n", object->flags);
401 pr_notice(" checksum = %u\n", object->checksum);
402 pr_notice(" backtrace:\n");
403 print_stack_trace(&trace, 4);
404 }
405
406 /*
407 * Look-up a memory block metadata (kmemleak_object) in the object search
408 * tree based on a pointer value. If alias is 0, only values pointing to the
409 * beginning of the memory block are allowed. The kmemleak_lock must be held
410 * when calling this function.
411 */
412 static struct kmemleak_object *lookup_object(unsigned long ptr, int alias)
413 {
414 struct rb_node *rb = object_tree_root.rb_node;
415
416 while (rb) {
417 struct kmemleak_object *object =
418 rb_entry(rb, struct kmemleak_object, rb_node);
419 if (ptr < object->pointer)
420 rb = object->rb_node.rb_left;
421 else if (object->pointer + object->size <= ptr)
422 rb = object->rb_node.rb_right;
423 else if (object->pointer == ptr || alias)
424 return object;
425 else {
426 kmemleak_warn("Found object by alias at 0x%08lx\n",
427 ptr);
428 dump_object_info(object);
429 break;
430 }
431 }
432 return NULL;
433 }
434
435 /*
436 * Increment the object use_count. Return 1 if successful or 0 otherwise. Note
437 * that once an object's use_count reached 0, the RCU freeing was already
438 * registered and the object should no longer be used. This function must be
439 * called under the protection of rcu_read_lock().
440 */
441 static int get_object(struct kmemleak_object *object)
442 {
443 return atomic_inc_not_zero(&object->use_count);
444 }
445
446 /*
447 * RCU callback to free a kmemleak_object.
448 */
449 static void free_object_rcu(struct rcu_head *rcu)
450 {
451 struct hlist_node *tmp;
452 struct kmemleak_scan_area *area;
453 struct kmemleak_object *object =
454 container_of(rcu, struct kmemleak_object, rcu);
455
456 /*
457 * Once use_count is 0 (guaranteed by put_object), there is no other
458 * code accessing this object, hence no need for locking.
459 */
460 hlist_for_each_entry_safe(area, tmp, &object->area_list, node) {
461 hlist_del(&area->node);
462 kmem_cache_free(scan_area_cache, area);
463 }
464 kmem_cache_free(object_cache, object);
465 }
466
467 /*
468 * Decrement the object use_count. Once the count is 0, free the object using
469 * an RCU callback. Since put_object() may be called via the kmemleak_free() ->
470 * delete_object() path, the delayed RCU freeing ensures that there is no
471 * recursive call to the kernel allocator. Lock-less RCU object_list traversal
472 * is also possible.
473 */
474 static void put_object(struct kmemleak_object *object)
475 {
476 if (!atomic_dec_and_test(&object->use_count))
477 return;
478
479 /* should only get here after delete_object was called */
480 WARN_ON(object->flags & OBJECT_ALLOCATED);
481
482 call_rcu(&object->rcu, free_object_rcu);
483 }
484
485 /*
486 * Look up an object in the object search tree and increase its use_count.
487 */
488 static struct kmemleak_object *find_and_get_object(unsigned long ptr, int alias)
489 {
490 unsigned long flags;
491 struct kmemleak_object *object = NULL;
492
493 rcu_read_lock();
494 read_lock_irqsave(&kmemleak_lock, flags);
495 object = lookup_object(ptr, alias);
496 read_unlock_irqrestore(&kmemleak_lock, flags);
497
498 /* check whether the object is still available */
499 if (object && !get_object(object))
500 object = NULL;
501 rcu_read_unlock();
502
503 return object;
504 }
505
506 /*
507 * Look up an object in the object search tree and remove it from both
508 * object_tree_root and object_list. The returned object's use_count should be
509 * at least 1, as initially set by create_object().
510 */
511 static struct kmemleak_object *find_and_remove_object(unsigned long ptr, int alias)
512 {
513 unsigned long flags;
514 struct kmemleak_object *object;
515
516 write_lock_irqsave(&kmemleak_lock, flags);
517 object = lookup_object(ptr, alias);
518 if (object) {
519 rb_erase(&object->rb_node, &object_tree_root);
520 list_del_rcu(&object->object_list);
521 }
522 write_unlock_irqrestore(&kmemleak_lock, flags);
523
524 return object;
525 }
526
527 /*
528 * Save stack trace to the given array of MAX_TRACE size.
529 */
530 static int __save_stack_trace(unsigned long *trace)
531 {
532 struct stack_trace stack_trace;
533
534 stack_trace.max_entries = MAX_TRACE;
535 stack_trace.nr_entries = 0;
536 stack_trace.entries = trace;
537 stack_trace.skip = 2;
538 save_stack_trace(&stack_trace);
539
540 return stack_trace.nr_entries;
541 }
542
543 /*
544 * Create the metadata (struct kmemleak_object) corresponding to an allocated
545 * memory block and add it to the object_list and object_tree_root.
546 */
547 static struct kmemleak_object *create_object(unsigned long ptr, size_t size,
548 int min_count, gfp_t gfp)
549 {
550 unsigned long flags;
551 struct kmemleak_object *object, *parent;
552 struct rb_node **link, *rb_parent;
553
554 object = kmem_cache_alloc(object_cache, gfp_kmemleak_mask(gfp));
555 if (!object) {
556 pr_warning("Cannot allocate a kmemleak_object structure\n");
557 kmemleak_disable();
558 return NULL;
559 }
560
561 INIT_LIST_HEAD(&object->object_list);
562 INIT_LIST_HEAD(&object->gray_list);
563 INIT_HLIST_HEAD(&object->area_list);
564 spin_lock_init(&object->lock);
565 atomic_set(&object->use_count, 1);
566 object->flags = OBJECT_ALLOCATED;
567 object->pointer = ptr;
568 object->size = size;
569 object->min_count = min_count;
570 object->count = 0; /* white color initially */
571 object->jiffies = jiffies;
572 object->checksum = 0;
573
574 /* task information */
575 if (in_irq()) {
576 object->pid = 0;
577 strncpy(object->comm, "hardirq", sizeof(object->comm));
578 } else if (in_softirq()) {
579 object->pid = 0;
580 strncpy(object->comm, "softirq", sizeof(object->comm));
581 } else {
582 object->pid = current->pid;
583 /*
584 * There is a small chance of a race with set_task_comm(),
585 * however using get_task_comm() here may cause locking
586 * dependency issues with current->alloc_lock. In the worst
587 * case, the command line is not correct.
588 */
589 strncpy(object->comm, current->comm, sizeof(object->comm));
590 }
591
592 /* kernel backtrace */
593 object->trace_len = __save_stack_trace(object->trace);
594
595 write_lock_irqsave(&kmemleak_lock, flags);
596
597 min_addr = min(min_addr, ptr);
598 max_addr = max(max_addr, ptr + size);
599 link = &object_tree_root.rb_node;
600 rb_parent = NULL;
601 while (*link) {
602 rb_parent = *link;
603 parent = rb_entry(rb_parent, struct kmemleak_object, rb_node);
604 if (ptr + size <= parent->pointer)
605 link = &parent->rb_node.rb_left;
606 else if (parent->pointer + parent->size <= ptr)
607 link = &parent->rb_node.rb_right;
608 else {
609 kmemleak_stop("Cannot insert 0x%lx into the object "
610 "search tree (overlaps existing)\n",
611 ptr);
612 /*
613 * No need for parent->lock here since "parent" cannot
614 * be freed while the kmemleak_lock is held.
615 */
616 dump_object_info(parent);
617 kmem_cache_free(object_cache, object);
618 object = NULL;
619 goto out;
620 }
621 }
622 rb_link_node(&object->rb_node, rb_parent, link);
623 rb_insert_color(&object->rb_node, &object_tree_root);
624
625 list_add_tail_rcu(&object->object_list, &object_list);
626 out:
627 write_unlock_irqrestore(&kmemleak_lock, flags);
628 return object;
629 }
630
631 /*
632 * Mark the object as not allocated and schedule RCU freeing via put_object().
633 */
634 static void __delete_object(struct kmemleak_object *object)
635 {
636 unsigned long flags;
637
638 WARN_ON(!(object->flags & OBJECT_ALLOCATED));
639 WARN_ON(atomic_read(&object->use_count) < 1);
640
641 /*
642 * Locking here also ensures that the corresponding memory block
643 * cannot be freed when it is being scanned.
644 */
645 spin_lock_irqsave(&object->lock, flags);
646 object->flags &= ~OBJECT_ALLOCATED;
647 spin_unlock_irqrestore(&object->lock, flags);
648 put_object(object);
649 }
650
651 /*
652 * Look up the metadata (struct kmemleak_object) corresponding to ptr and
653 * delete it.
654 */
655 static void delete_object_full(unsigned long ptr)
656 {
657 struct kmemleak_object *object;
658
659 object = find_and_remove_object(ptr, 0);
660 if (!object) {
661 #ifdef DEBUG
662 kmemleak_warn("Freeing unknown object at 0x%08lx\n",
663 ptr);
664 #endif
665 return;
666 }
667 __delete_object(object);
668 }
669
670 /*
671 * Look up the metadata (struct kmemleak_object) corresponding to ptr and
672 * delete it. If the memory block is partially freed, the function may create
673 * additional metadata for the remaining parts of the block.
674 */
675 static void delete_object_part(unsigned long ptr, size_t size)
676 {
677 struct kmemleak_object *object;
678 unsigned long start, end;
679
680 object = find_and_remove_object(ptr, 1);
681 if (!object) {
682 #ifdef DEBUG
683 kmemleak_warn("Partially freeing unknown object at 0x%08lx "
684 "(size %zu)\n", ptr, size);
685 #endif
686 return;
687 }
688
689 /*
690 * Create one or two objects that may result from the memory block
691 * split. Note that partial freeing is only done by free_bootmem() and
692 * this happens before kmemleak_init() is called. The path below is
693 * only executed during early log recording in kmemleak_init(), so
694 * GFP_KERNEL is enough.
695 */
696 start = object->pointer;
697 end = object->pointer + object->size;
698 if (ptr > start)
699 create_object(start, ptr - start, object->min_count,
700 GFP_KERNEL);
701 if (ptr + size < end)
702 create_object(ptr + size, end - ptr - size, object->min_count,
703 GFP_KERNEL);
704
705 __delete_object(object);
706 }
707
708 static void __paint_it(struct kmemleak_object *object, int color)
709 {
710 object->min_count = color;
711 if (color == KMEMLEAK_BLACK)
712 object->flags |= OBJECT_NO_SCAN;
713 }
714
715 static void paint_it(struct kmemleak_object *object, int color)
716 {
717 unsigned long flags;
718
719 spin_lock_irqsave(&object->lock, flags);
720 __paint_it(object, color);
721 spin_unlock_irqrestore(&object->lock, flags);
722 }
723
724 static void paint_ptr(unsigned long ptr, int color)
725 {
726 struct kmemleak_object *object;
727
728 object = find_and_get_object(ptr, 0);
729 if (!object) {
730 kmemleak_warn("Trying to color unknown object "
731 "at 0x%08lx as %s\n", ptr,
732 (color == KMEMLEAK_GREY) ? "Grey" :
733 (color == KMEMLEAK_BLACK) ? "Black" : "Unknown");
734 return;
735 }
736 paint_it(object, color);
737 put_object(object);
738 }
739
740 /*
741 * Mark an object permanently as gray-colored so that it can no longer be
742 * reported as a leak. This is used in general to mark a false positive.
743 */
744 static void make_gray_object(unsigned long ptr)
745 {
746 paint_ptr(ptr, KMEMLEAK_GREY);
747 }
748
749 /*
750 * Mark the object as black-colored so that it is ignored from scans and
751 * reporting.
752 */
753 static void make_black_object(unsigned long ptr)
754 {
755 paint_ptr(ptr, KMEMLEAK_BLACK);
756 }
757
758 /*
759 * Add a scanning area to the object. If at least one such area is added,
760 * kmemleak will only scan these ranges rather than the whole memory block.
761 */
762 static void add_scan_area(unsigned long ptr, size_t size, gfp_t gfp)
763 {
764 unsigned long flags;
765 struct kmemleak_object *object;
766 struct kmemleak_scan_area *area;
767
768 object = find_and_get_object(ptr, 1);
769 if (!object) {
770 kmemleak_warn("Adding scan area to unknown object at 0x%08lx\n",
771 ptr);
772 return;
773 }
774
775 area = kmem_cache_alloc(scan_area_cache, gfp_kmemleak_mask(gfp));
776 if (!area) {
777 pr_warning("Cannot allocate a scan area\n");
778 goto out;
779 }
780
781 spin_lock_irqsave(&object->lock, flags);
782 if (size == SIZE_MAX) {
783 size = object->pointer + object->size - ptr;
784 } else if (ptr + size > object->pointer + object->size) {
785 kmemleak_warn("Scan area larger than object 0x%08lx\n", ptr);
786 dump_object_info(object);
787 kmem_cache_free(scan_area_cache, area);
788 goto out_unlock;
789 }
790
791 INIT_HLIST_NODE(&area->node);
792 area->start = ptr;
793 area->size = size;
794
795 hlist_add_head(&area->node, &object->area_list);
796 out_unlock:
797 spin_unlock_irqrestore(&object->lock, flags);
798 out:
799 put_object(object);
800 }
801
802 /*
803 * Set the OBJECT_NO_SCAN flag for the object corresponding to the give
804 * pointer. Such object will not be scanned by kmemleak but references to it
805 * are searched.
806 */
807 static void object_no_scan(unsigned long ptr)
808 {
809 unsigned long flags;
810 struct kmemleak_object *object;
811
812 object = find_and_get_object(ptr, 0);
813 if (!object) {
814 kmemleak_warn("Not scanning unknown object at 0x%08lx\n", ptr);
815 return;
816 }
817
818 spin_lock_irqsave(&object->lock, flags);
819 object->flags |= OBJECT_NO_SCAN;
820 spin_unlock_irqrestore(&object->lock, flags);
821 put_object(object);
822 }
823
824 /*
825 * Log an early kmemleak_* call to the early_log buffer. These calls will be
826 * processed later once kmemleak is fully initialized.
827 */
828 static void __init log_early(int op_type, const void *ptr, size_t size,
829 int min_count)
830 {
831 unsigned long flags;
832 struct early_log *log;
833
834 if (kmemleak_error) {
835 /* kmemleak stopped recording, just count the requests */
836 crt_early_log++;
837 return;
838 }
839
840 if (crt_early_log >= ARRAY_SIZE(early_log)) {
841 kmemleak_disable();
842 return;
843 }
844
845 /*
846 * There is no need for locking since the kernel is still in UP mode
847 * at this stage. Disabling the IRQs is enough.
848 */
849 local_irq_save(flags);
850 log = &early_log[crt_early_log];
851 log->op_type = op_type;
852 log->ptr = ptr;
853 log->size = size;
854 log->min_count = min_count;
855 log->trace_len = __save_stack_trace(log->trace);
856 crt_early_log++;
857 local_irq_restore(flags);
858 }
859
860 /*
861 * Log an early allocated block and populate the stack trace.
862 */
863 static void early_alloc(struct early_log *log)
864 {
865 struct kmemleak_object *object;
866 unsigned long flags;
867 int i;
868
869 if (!kmemleak_enabled || !log->ptr || IS_ERR(log->ptr))
870 return;
871
872 /*
873 * RCU locking needed to ensure object is not freed via put_object().
874 */
875 rcu_read_lock();
876 object = create_object((unsigned long)log->ptr, log->size,
877 log->min_count, GFP_ATOMIC);
878 if (!object)
879 goto out;
880 spin_lock_irqsave(&object->lock, flags);
881 for (i = 0; i < log->trace_len; i++)
882 object->trace[i] = log->trace[i];
883 object->trace_len = log->trace_len;
884 spin_unlock_irqrestore(&object->lock, flags);
885 out:
886 rcu_read_unlock();
887 }
888
889 /*
890 * Log an early allocated block and populate the stack trace.
891 */
892 static void early_alloc_percpu(struct early_log *log)
893 {
894 unsigned int cpu;
895 const void __percpu *ptr = log->ptr;
896
897 for_each_possible_cpu(cpu) {
898 log->ptr = per_cpu_ptr(ptr, cpu);
899 early_alloc(log);
900 }
901 }
902
903 /**
904 * kmemleak_alloc - register a newly allocated object
905 * @ptr: pointer to beginning of the object
906 * @size: size of the object
907 * @min_count: minimum number of references to this object. If during memory
908 * scanning a number of references less than @min_count is found,
909 * the object is reported as a memory leak. If @min_count is 0,
910 * the object is never reported as a leak. If @min_count is -1,
911 * the object is ignored (not scanned and not reported as a leak)
912 * @gfp: kmalloc() flags used for kmemleak internal memory allocations
913 *
914 * This function is called from the kernel allocators when a new object
915 * (memory block) is allocated (kmem_cache_alloc, kmalloc, vmalloc etc.).
916 */
917 void __ref kmemleak_alloc(const void *ptr, size_t size, int min_count,
918 gfp_t gfp)
919 {
920 pr_debug("%s(0x%p, %zu, %d)\n", __func__, ptr, size, min_count);
921
922 if (kmemleak_enabled && ptr && !IS_ERR(ptr))
923 create_object((unsigned long)ptr, size, min_count, gfp);
924 else if (kmemleak_early_log)
925 log_early(KMEMLEAK_ALLOC, ptr, size, min_count);
926 }
927 EXPORT_SYMBOL_GPL(kmemleak_alloc);
928
929 /**
930 * kmemleak_alloc_percpu - register a newly allocated __percpu object
931 * @ptr: __percpu pointer to beginning of the object
932 * @size: size of the object
933 * @gfp: flags used for kmemleak internal memory allocations
934 *
935 * This function is called from the kernel percpu allocator when a new object
936 * (memory block) is allocated (alloc_percpu).
937 */
938 void __ref kmemleak_alloc_percpu(const void __percpu *ptr, size_t size,
939 gfp_t gfp)
940 {
941 unsigned int cpu;
942
943 pr_debug("%s(0x%p, %zu)\n", __func__, ptr, size);
944
945 /*
946 * Percpu allocations are only scanned and not reported as leaks
947 * (min_count is set to 0).
948 */
949 if (kmemleak_enabled && ptr && !IS_ERR(ptr))
950 for_each_possible_cpu(cpu)
951 create_object((unsigned long)per_cpu_ptr(ptr, cpu),
952 size, 0, gfp);
953 else if (kmemleak_early_log)
954 log_early(KMEMLEAK_ALLOC_PERCPU, ptr, size, 0);
955 }
956 EXPORT_SYMBOL_GPL(kmemleak_alloc_percpu);
957
958 /**
959 * kmemleak_free - unregister a previously registered object
960 * @ptr: pointer to beginning of the object
961 *
962 * This function is called from the kernel allocators when an object (memory
963 * block) is freed (kmem_cache_free, kfree, vfree etc.).
964 */
965 void __ref kmemleak_free(const void *ptr)
966 {
967 pr_debug("%s(0x%p)\n", __func__, ptr);
968
969 if (kmemleak_free_enabled && ptr && !IS_ERR(ptr))
970 delete_object_full((unsigned long)ptr);
971 else if (kmemleak_early_log)
972 log_early(KMEMLEAK_FREE, ptr, 0, 0);
973 }
974 EXPORT_SYMBOL_GPL(kmemleak_free);
975
976 /**
977 * kmemleak_free_part - partially unregister a previously registered object
978 * @ptr: pointer to the beginning or inside the object. This also
979 * represents the start of the range to be freed
980 * @size: size to be unregistered
981 *
982 * This function is called when only a part of a memory block is freed
983 * (usually from the bootmem allocator).
984 */
985 void __ref kmemleak_free_part(const void *ptr, size_t size)
986 {
987 pr_debug("%s(0x%p)\n", __func__, ptr);
988
989 if (kmemleak_enabled && ptr && !IS_ERR(ptr))
990 delete_object_part((unsigned long)ptr, size);
991 else if (kmemleak_early_log)
992 log_early(KMEMLEAK_FREE_PART, ptr, size, 0);
993 }
994 EXPORT_SYMBOL_GPL(kmemleak_free_part);
995
996 /**
997 * kmemleak_free_percpu - unregister a previously registered __percpu object
998 * @ptr: __percpu pointer to beginning of the object
999 *
1000 * This function is called from the kernel percpu allocator when an object
1001 * (memory block) is freed (free_percpu).
1002 */
1003 void __ref kmemleak_free_percpu(const void __percpu *ptr)
1004 {
1005 unsigned int cpu;
1006
1007 pr_debug("%s(0x%p)\n", __func__, ptr);
1008
1009 if (kmemleak_free_enabled && ptr && !IS_ERR(ptr))
1010 for_each_possible_cpu(cpu)
1011 delete_object_full((unsigned long)per_cpu_ptr(ptr,
1012 cpu));
1013 else if (kmemleak_early_log)
1014 log_early(KMEMLEAK_FREE_PERCPU, ptr, 0, 0);
1015 }
1016 EXPORT_SYMBOL_GPL(kmemleak_free_percpu);
1017
1018 /**
1019 * kmemleak_update_trace - update object allocation stack trace
1020 * @ptr: pointer to beginning of the object
1021 *
1022 * Override the object allocation stack trace for cases where the actual
1023 * allocation place is not always useful.
1024 */
1025 void __ref kmemleak_update_trace(const void *ptr)
1026 {
1027 struct kmemleak_object *object;
1028 unsigned long flags;
1029
1030 pr_debug("%s(0x%p)\n", __func__, ptr);
1031
1032 if (!kmemleak_enabled || IS_ERR_OR_NULL(ptr))
1033 return;
1034
1035 object = find_and_get_object((unsigned long)ptr, 1);
1036 if (!object) {
1037 #ifdef DEBUG
1038 kmemleak_warn("Updating stack trace for unknown object at %p\n",
1039 ptr);
1040 #endif
1041 return;
1042 }
1043
1044 spin_lock_irqsave(&object->lock, flags);
1045 object->trace_len = __save_stack_trace(object->trace);
1046 spin_unlock_irqrestore(&object->lock, flags);
1047
1048 put_object(object);
1049 }
1050 EXPORT_SYMBOL(kmemleak_update_trace);
1051
1052 /**
1053 * kmemleak_not_leak - mark an allocated object as false positive
1054 * @ptr: pointer to beginning of the object
1055 *
1056 * Calling this function on an object will cause the memory block to no longer
1057 * be reported as leak and always be scanned.
1058 */
1059 void __ref kmemleak_not_leak(const void *ptr)
1060 {
1061 pr_debug("%s(0x%p)\n", __func__, ptr);
1062
1063 if (kmemleak_enabled && ptr && !IS_ERR(ptr))
1064 make_gray_object((unsigned long)ptr);
1065 else if (kmemleak_early_log)
1066 log_early(KMEMLEAK_NOT_LEAK, ptr, 0, 0);
1067 }
1068 EXPORT_SYMBOL(kmemleak_not_leak);
1069
1070 /**
1071 * kmemleak_ignore - ignore an allocated object
1072 * @ptr: pointer to beginning of the object
1073 *
1074 * Calling this function on an object will cause the memory block to be
1075 * ignored (not scanned and not reported as a leak). This is usually done when
1076 * it is known that the corresponding block is not a leak and does not contain
1077 * any references to other allocated memory blocks.
1078 */
1079 void __ref kmemleak_ignore(const void *ptr)
1080 {
1081 pr_debug("%s(0x%p)\n", __func__, ptr);
1082
1083 if (kmemleak_enabled && ptr && !IS_ERR(ptr))
1084 make_black_object((unsigned long)ptr);
1085 else if (kmemleak_early_log)
1086 log_early(KMEMLEAK_IGNORE, ptr, 0, 0);
1087 }
1088 EXPORT_SYMBOL(kmemleak_ignore);
1089
1090 /**
1091 * kmemleak_scan_area - limit the range to be scanned in an allocated object
1092 * @ptr: pointer to beginning or inside the object. This also
1093 * represents the start of the scan area
1094 * @size: size of the scan area
1095 * @gfp: kmalloc() flags used for kmemleak internal memory allocations
1096 *
1097 * This function is used when it is known that only certain parts of an object
1098 * contain references to other objects. Kmemleak will only scan these areas
1099 * reducing the number false negatives.
1100 */
1101 void __ref kmemleak_scan_area(const void *ptr, size_t size, gfp_t gfp)
1102 {
1103 pr_debug("%s(0x%p)\n", __func__, ptr);
1104
1105 if (kmemleak_enabled && ptr && size && !IS_ERR(ptr))
1106 add_scan_area((unsigned long)ptr, size, gfp);
1107 else if (kmemleak_early_log)
1108 log_early(KMEMLEAK_SCAN_AREA, ptr, size, 0);
1109 }
1110 EXPORT_SYMBOL(kmemleak_scan_area);
1111
1112 /**
1113 * kmemleak_no_scan - do not scan an allocated object
1114 * @ptr: pointer to beginning of the object
1115 *
1116 * This function notifies kmemleak not to scan the given memory block. Useful
1117 * in situations where it is known that the given object does not contain any
1118 * references to other objects. Kmemleak will not scan such objects reducing
1119 * the number of false negatives.
1120 */
1121 void __ref kmemleak_no_scan(const void *ptr)
1122 {
1123 pr_debug("%s(0x%p)\n", __func__, ptr);
1124
1125 if (kmemleak_enabled && ptr && !IS_ERR(ptr))
1126 object_no_scan((unsigned long)ptr);
1127 else if (kmemleak_early_log)
1128 log_early(KMEMLEAK_NO_SCAN, ptr, 0, 0);
1129 }
1130 EXPORT_SYMBOL(kmemleak_no_scan);
1131
1132 /*
1133 * Update an object's checksum and return true if it was modified.
1134 */
1135 static bool update_checksum(struct kmemleak_object *object)
1136 {
1137 u32 old_csum = object->checksum;
1138
1139 if (!kmemcheck_is_obj_initialized(object->pointer, object->size))
1140 return false;
1141
1142 kasan_disable_current();
1143 object->checksum = crc32(0, (void *)object->pointer, object->size);
1144 kasan_enable_current();
1145
1146 return object->checksum != old_csum;
1147 }
1148
1149 /*
1150 * Memory scanning is a long process and it needs to be interruptable. This
1151 * function checks whether such interrupt condition occurred.
1152 */
1153 static int scan_should_stop(void)
1154 {
1155 if (!kmemleak_enabled)
1156 return 1;
1157
1158 /*
1159 * This function may be called from either process or kthread context,
1160 * hence the need to check for both stop conditions.
1161 */
1162 if (current->mm)
1163 return signal_pending(current);
1164 else
1165 return kthread_should_stop();
1166
1167 return 0;
1168 }
1169
1170 /*
1171 * Scan a memory block (exclusive range) for valid pointers and add those
1172 * found to the gray list.
1173 */
1174 static void scan_block(void *_start, void *_end,
1175 struct kmemleak_object *scanned)
1176 {
1177 unsigned long *ptr;
1178 unsigned long *start = PTR_ALIGN(_start, BYTES_PER_POINTER);
1179 unsigned long *end = _end - (BYTES_PER_POINTER - 1);
1180 unsigned long flags;
1181
1182 read_lock_irqsave(&kmemleak_lock, flags);
1183 for (ptr = start; ptr < end; ptr++) {
1184 struct kmemleak_object *object;
1185 unsigned long pointer;
1186
1187 if (scan_should_stop())
1188 break;
1189
1190 /* don't scan uninitialized memory */
1191 if (!kmemcheck_is_obj_initialized((unsigned long)ptr,
1192 BYTES_PER_POINTER))
1193 continue;
1194
1195 kasan_disable_current();
1196 pointer = *ptr;
1197 kasan_enable_current();
1198
1199 if (pointer < min_addr || pointer >= max_addr)
1200 continue;
1201
1202 /*
1203 * No need for get_object() here since we hold kmemleak_lock.
1204 * object->use_count cannot be dropped to 0 while the object
1205 * is still present in object_tree_root and object_list
1206 * (with updates protected by kmemleak_lock).
1207 */
1208 object = lookup_object(pointer, 1);
1209 if (!object)
1210 continue;
1211 if (object == scanned)
1212 /* self referenced, ignore */
1213 continue;
1214
1215 /*
1216 * Avoid the lockdep recursive warning on object->lock being
1217 * previously acquired in scan_object(). These locks are
1218 * enclosed by scan_mutex.
1219 */
1220 spin_lock_nested(&object->lock, SINGLE_DEPTH_NESTING);
1221 if (!color_white(object)) {
1222 /* non-orphan, ignored or new */
1223 spin_unlock(&object->lock);
1224 continue;
1225 }
1226
1227 /*
1228 * Increase the object's reference count (number of pointers
1229 * to the memory block). If this count reaches the required
1230 * minimum, the object's color will become gray and it will be
1231 * added to the gray_list.
1232 */
1233 object->count++;
1234 if (color_gray(object)) {
1235 /* put_object() called when removing from gray_list */
1236 WARN_ON(!get_object(object));
1237 list_add_tail(&object->gray_list, &gray_list);
1238 }
1239 spin_unlock(&object->lock);
1240 }
1241 read_unlock_irqrestore(&kmemleak_lock, flags);
1242 }
1243
1244 /*
1245 * Scan a large memory block in MAX_SCAN_SIZE chunks to reduce the latency.
1246 */
1247 static void scan_large_block(void *start, void *end)
1248 {
1249 void *next;
1250
1251 while (start < end) {
1252 next = min(start + MAX_SCAN_SIZE, end);
1253 scan_block(start, next, NULL);
1254 start = next;
1255 cond_resched();
1256 }
1257 }
1258
1259 /*
1260 * Scan a memory block corresponding to a kmemleak_object. A condition is
1261 * that object->use_count >= 1.
1262 */
1263 static void scan_object(struct kmemleak_object *object)
1264 {
1265 struct kmemleak_scan_area *area;
1266 unsigned long flags;
1267
1268 /*
1269 * Once the object->lock is acquired, the corresponding memory block
1270 * cannot be freed (the same lock is acquired in delete_object).
1271 */
1272 spin_lock_irqsave(&object->lock, flags);
1273 if (object->flags & OBJECT_NO_SCAN)
1274 goto out;
1275 if (!(object->flags & OBJECT_ALLOCATED))
1276 /* already freed object */
1277 goto out;
1278 if (hlist_empty(&object->area_list)) {
1279 void *start = (void *)object->pointer;
1280 void *end = (void *)(object->pointer + object->size);
1281 void *next;
1282
1283 do {
1284 next = min(start + MAX_SCAN_SIZE, end);
1285 scan_block(start, next, object);
1286
1287 start = next;
1288 if (start >= end)
1289 break;
1290
1291 spin_unlock_irqrestore(&object->lock, flags);
1292 cond_resched();
1293 spin_lock_irqsave(&object->lock, flags);
1294 } while (object->flags & OBJECT_ALLOCATED);
1295 } else
1296 hlist_for_each_entry(area, &object->area_list, node)
1297 scan_block((void *)area->start,
1298 (void *)(area->start + area->size),
1299 object);
1300 out:
1301 spin_unlock_irqrestore(&object->lock, flags);
1302 }
1303
1304 /*
1305 * Scan the objects already referenced (gray objects). More objects will be
1306 * referenced and, if there are no memory leaks, all the objects are scanned.
1307 */
1308 static void scan_gray_list(void)
1309 {
1310 struct kmemleak_object *object, *tmp;
1311
1312 /*
1313 * The list traversal is safe for both tail additions and removals
1314 * from inside the loop. The kmemleak objects cannot be freed from
1315 * outside the loop because their use_count was incremented.
1316 */
1317 object = list_entry(gray_list.next, typeof(*object), gray_list);
1318 while (&object->gray_list != &gray_list) {
1319 cond_resched();
1320
1321 /* may add new objects to the list */
1322 if (!scan_should_stop())
1323 scan_object(object);
1324
1325 tmp = list_entry(object->gray_list.next, typeof(*object),
1326 gray_list);
1327
1328 /* remove the object from the list and release it */
1329 list_del(&object->gray_list);
1330 put_object(object);
1331
1332 object = tmp;
1333 }
1334 WARN_ON(!list_empty(&gray_list));
1335 }
1336
1337 /*
1338 * Scan data sections and all the referenced memory blocks allocated via the
1339 * kernel's standard allocators. This function must be called with the
1340 * scan_mutex held.
1341 */
1342 static void kmemleak_scan(void)
1343 {
1344 unsigned long flags;
1345 struct kmemleak_object *object;
1346 int i;
1347 int new_leaks = 0;
1348
1349 jiffies_last_scan = jiffies;
1350
1351 /* prepare the kmemleak_object's */
1352 rcu_read_lock();
1353 list_for_each_entry_rcu(object, &object_list, object_list) {
1354 spin_lock_irqsave(&object->lock, flags);
1355 #ifdef DEBUG
1356 /*
1357 * With a few exceptions there should be a maximum of
1358 * 1 reference to any object at this point.
1359 */
1360 if (atomic_read(&object->use_count) > 1) {
1361 pr_debug("object->use_count = %d\n",
1362 atomic_read(&object->use_count));
1363 dump_object_info(object);
1364 }
1365 #endif
1366 /* reset the reference count (whiten the object) */
1367 object->count = 0;
1368 if (color_gray(object) && get_object(object))
1369 list_add_tail(&object->gray_list, &gray_list);
1370
1371 spin_unlock_irqrestore(&object->lock, flags);
1372 }
1373 rcu_read_unlock();
1374
1375 /* data/bss scanning */
1376 scan_large_block(_sdata, _edata);
1377 scan_large_block(__bss_start, __bss_stop);
1378
1379 #ifdef CONFIG_SMP
1380 /* per-cpu sections scanning */
1381 for_each_possible_cpu(i)
1382 scan_large_block(__per_cpu_start + per_cpu_offset(i),
1383 __per_cpu_end + per_cpu_offset(i));
1384 #endif
1385
1386 /*
1387 * Struct page scanning for each node.
1388 */
1389 get_online_mems();
1390 for_each_online_node(i) {
1391 unsigned long start_pfn = node_start_pfn(i);
1392 unsigned long end_pfn = node_end_pfn(i);
1393 unsigned long pfn;
1394
1395 for (pfn = start_pfn; pfn < end_pfn; pfn++) {
1396 struct page *page;
1397
1398 if (!pfn_valid(pfn))
1399 continue;
1400 page = pfn_to_page(pfn);
1401 /* only scan if page is in use */
1402 if (page_count(page) == 0)
1403 continue;
1404 scan_block(page, page + 1, NULL);
1405 }
1406 }
1407 put_online_mems();
1408
1409 /*
1410 * Scanning the task stacks (may introduce false negatives).
1411 */
1412 if (kmemleak_stack_scan) {
1413 struct task_struct *p, *g;
1414
1415 read_lock(&tasklist_lock);
1416 do_each_thread(g, p) {
1417 scan_block(task_stack_page(p), task_stack_page(p) +
1418 THREAD_SIZE, NULL);
1419 } while_each_thread(g, p);
1420 read_unlock(&tasklist_lock);
1421 }
1422
1423 /*
1424 * Scan the objects already referenced from the sections scanned
1425 * above.
1426 */
1427 scan_gray_list();
1428
1429 /*
1430 * Check for new or unreferenced objects modified since the previous
1431 * scan and color them gray until the next scan.
1432 */
1433 rcu_read_lock();
1434 list_for_each_entry_rcu(object, &object_list, object_list) {
1435 spin_lock_irqsave(&object->lock, flags);
1436 if (color_white(object) && (object->flags & OBJECT_ALLOCATED)
1437 && update_checksum(object) && get_object(object)) {
1438 /* color it gray temporarily */
1439 object->count = object->min_count;
1440 list_add_tail(&object->gray_list, &gray_list);
1441 }
1442 spin_unlock_irqrestore(&object->lock, flags);
1443 }
1444 rcu_read_unlock();
1445
1446 /*
1447 * Re-scan the gray list for modified unreferenced objects.
1448 */
1449 scan_gray_list();
1450
1451 /*
1452 * If scanning was stopped do not report any new unreferenced objects.
1453 */
1454 if (scan_should_stop())
1455 return;
1456
1457 /*
1458 * Scanning result reporting.
1459 */
1460 rcu_read_lock();
1461 list_for_each_entry_rcu(object, &object_list, object_list) {
1462 spin_lock_irqsave(&object->lock, flags);
1463 if (unreferenced_object(object) &&
1464 !(object->flags & OBJECT_REPORTED)) {
1465 object->flags |= OBJECT_REPORTED;
1466 new_leaks++;
1467 }
1468 spin_unlock_irqrestore(&object->lock, flags);
1469 }
1470 rcu_read_unlock();
1471
1472 if (new_leaks) {
1473 kmemleak_found_leaks = true;
1474
1475 pr_info("%d new suspected memory leaks (see "
1476 "/sys/kernel/debug/kmemleak)\n", new_leaks);
1477 }
1478
1479 }
1480
1481 /*
1482 * Thread function performing automatic memory scanning. Unreferenced objects
1483 * at the end of a memory scan are reported but only the first time.
1484 */
1485 static int kmemleak_scan_thread(void *arg)
1486 {
1487 static int first_run = 1;
1488
1489 pr_info("Automatic memory scanning thread started\n");
1490 set_user_nice(current, 10);
1491
1492 /*
1493 * Wait before the first scan to allow the system to fully initialize.
1494 */
1495 if (first_run) {
1496 first_run = 0;
1497 ssleep(SECS_FIRST_SCAN);
1498 }
1499
1500 while (!kthread_should_stop()) {
1501 signed long timeout = jiffies_scan_wait;
1502
1503 mutex_lock(&scan_mutex);
1504 kmemleak_scan();
1505 mutex_unlock(&scan_mutex);
1506
1507 /* wait before the next scan */
1508 while (timeout && !kthread_should_stop())
1509 timeout = schedule_timeout_interruptible(timeout);
1510 }
1511
1512 pr_info("Automatic memory scanning thread ended\n");
1513
1514 return 0;
1515 }
1516
1517 /*
1518 * Start the automatic memory scanning thread. This function must be called
1519 * with the scan_mutex held.
1520 */
1521 static void start_scan_thread(void)
1522 {
1523 if (scan_thread)
1524 return;
1525 scan_thread = kthread_run(kmemleak_scan_thread, NULL, "kmemleak");
1526 if (IS_ERR(scan_thread)) {
1527 pr_warning("Failed to create the scan thread\n");
1528 scan_thread = NULL;
1529 }
1530 }
1531
1532 /*
1533 * Stop the automatic memory scanning thread. This function must be called
1534 * with the scan_mutex held.
1535 */
1536 static void stop_scan_thread(void)
1537 {
1538 if (scan_thread) {
1539 kthread_stop(scan_thread);
1540 scan_thread = NULL;
1541 }
1542 }
1543
1544 /*
1545 * Iterate over the object_list and return the first valid object at or after
1546 * the required position with its use_count incremented. The function triggers
1547 * a memory scanning when the pos argument points to the first position.
1548 */
1549 static void *kmemleak_seq_start(struct seq_file *seq, loff_t *pos)
1550 {
1551 struct kmemleak_object *object;
1552 loff_t n = *pos;
1553 int err;
1554
1555 err = mutex_lock_interruptible(&scan_mutex);
1556 if (err < 0)
1557 return ERR_PTR(err);
1558
1559 rcu_read_lock();
1560 list_for_each_entry_rcu(object, &object_list, object_list) {
1561 if (n-- > 0)
1562 continue;
1563 if (get_object(object))
1564 goto out;
1565 }
1566 object = NULL;
1567 out:
1568 return object;
1569 }
1570
1571 /*
1572 * Return the next object in the object_list. The function decrements the
1573 * use_count of the previous object and increases that of the next one.
1574 */
1575 static void *kmemleak_seq_next(struct seq_file *seq, void *v, loff_t *pos)
1576 {
1577 struct kmemleak_object *prev_obj = v;
1578 struct kmemleak_object *next_obj = NULL;
1579 struct kmemleak_object *obj = prev_obj;
1580
1581 ++(*pos);
1582
1583 list_for_each_entry_continue_rcu(obj, &object_list, object_list) {
1584 if (get_object(obj)) {
1585 next_obj = obj;
1586 break;
1587 }
1588 }
1589
1590 put_object(prev_obj);
1591 return next_obj;
1592 }
1593
1594 /*
1595 * Decrement the use_count of the last object required, if any.
1596 */
1597 static void kmemleak_seq_stop(struct seq_file *seq, void *v)
1598 {
1599 if (!IS_ERR(v)) {
1600 /*
1601 * kmemleak_seq_start may return ERR_PTR if the scan_mutex
1602 * waiting was interrupted, so only release it if !IS_ERR.
1603 */
1604 rcu_read_unlock();
1605 mutex_unlock(&scan_mutex);
1606 if (v)
1607 put_object(v);
1608 }
1609 }
1610
1611 /*
1612 * Print the information for an unreferenced object to the seq file.
1613 */
1614 static int kmemleak_seq_show(struct seq_file *seq, void *v)
1615 {
1616 struct kmemleak_object *object = v;
1617 unsigned long flags;
1618
1619 spin_lock_irqsave(&object->lock, flags);
1620 if ((object->flags & OBJECT_REPORTED) && unreferenced_object(object))
1621 print_unreferenced(seq, object);
1622 spin_unlock_irqrestore(&object->lock, flags);
1623 return 0;
1624 }
1625
1626 static const struct seq_operations kmemleak_seq_ops = {
1627 .start = kmemleak_seq_start,
1628 .next = kmemleak_seq_next,
1629 .stop = kmemleak_seq_stop,
1630 .show = kmemleak_seq_show,
1631 };
1632
1633 static int kmemleak_open(struct inode *inode, struct file *file)
1634 {
1635 return seq_open(file, &kmemleak_seq_ops);
1636 }
1637
1638 static int dump_str_object_info(const char *str)
1639 {
1640 unsigned long flags;
1641 struct kmemleak_object *object;
1642 unsigned long addr;
1643
1644 if (kstrtoul(str, 0, &addr))
1645 return -EINVAL;
1646 object = find_and_get_object(addr, 0);
1647 if (!object) {
1648 pr_info("Unknown object at 0x%08lx\n", addr);
1649 return -EINVAL;
1650 }
1651
1652 spin_lock_irqsave(&object->lock, flags);
1653 dump_object_info(object);
1654 spin_unlock_irqrestore(&object->lock, flags);
1655
1656 put_object(object);
1657 return 0;
1658 }
1659
1660 /*
1661 * We use grey instead of black to ensure we can do future scans on the same
1662 * objects. If we did not do future scans these black objects could
1663 * potentially contain references to newly allocated objects in the future and
1664 * we'd end up with false positives.
1665 */
1666 static void kmemleak_clear(void)
1667 {
1668 struct kmemleak_object *object;
1669 unsigned long flags;
1670
1671 rcu_read_lock();
1672 list_for_each_entry_rcu(object, &object_list, object_list) {
1673 spin_lock_irqsave(&object->lock, flags);
1674 if ((object->flags & OBJECT_REPORTED) &&
1675 unreferenced_object(object))
1676 __paint_it(object, KMEMLEAK_GREY);
1677 spin_unlock_irqrestore(&object->lock, flags);
1678 }
1679 rcu_read_unlock();
1680
1681 kmemleak_found_leaks = false;
1682 }
1683
1684 static void __kmemleak_do_cleanup(void);
1685
1686 /*
1687 * File write operation to configure kmemleak at run-time. The following
1688 * commands can be written to the /sys/kernel/debug/kmemleak file:
1689 * off - disable kmemleak (irreversible)
1690 * stack=on - enable the task stacks scanning
1691 * stack=off - disable the tasks stacks scanning
1692 * scan=on - start the automatic memory scanning thread
1693 * scan=off - stop the automatic memory scanning thread
1694 * scan=... - set the automatic memory scanning period in seconds (0 to
1695 * disable it)
1696 * scan - trigger a memory scan
1697 * clear - mark all current reported unreferenced kmemleak objects as
1698 * grey to ignore printing them, or free all kmemleak objects
1699 * if kmemleak has been disabled.
1700 * dump=... - dump information about the object found at the given address
1701 */
1702 static ssize_t kmemleak_write(struct file *file, const char __user *user_buf,
1703 size_t size, loff_t *ppos)
1704 {
1705 char buf[64];
1706 int buf_size;
1707 int ret;
1708
1709 buf_size = min(size, (sizeof(buf) - 1));
1710 if (strncpy_from_user(buf, user_buf, buf_size) < 0)
1711 return -EFAULT;
1712 buf[buf_size] = 0;
1713
1714 ret = mutex_lock_interruptible(&scan_mutex);
1715 if (ret < 0)
1716 return ret;
1717
1718 if (strncmp(buf, "clear", 5) == 0) {
1719 if (kmemleak_enabled)
1720 kmemleak_clear();
1721 else
1722 __kmemleak_do_cleanup();
1723 goto out;
1724 }
1725
1726 if (!kmemleak_enabled) {
1727 ret = -EBUSY;
1728 goto out;
1729 }
1730
1731 if (strncmp(buf, "off", 3) == 0)
1732 kmemleak_disable();
1733 else if (strncmp(buf, "stack=on", 8) == 0)
1734 kmemleak_stack_scan = 1;
1735 else if (strncmp(buf, "stack=off", 9) == 0)
1736 kmemleak_stack_scan = 0;
1737 else if (strncmp(buf, "scan=on", 7) == 0)
1738 start_scan_thread();
1739 else if (strncmp(buf, "scan=off", 8) == 0)
1740 stop_scan_thread();
1741 else if (strncmp(buf, "scan=", 5) == 0) {
1742 unsigned long secs;
1743
1744 ret = kstrtoul(buf + 5, 0, &secs);
1745 if (ret < 0)
1746 goto out;
1747 stop_scan_thread();
1748 if (secs) {
1749 jiffies_scan_wait = msecs_to_jiffies(secs * 1000);
1750 start_scan_thread();
1751 }
1752 } else if (strncmp(buf, "scan", 4) == 0)
1753 kmemleak_scan();
1754 else if (strncmp(buf, "dump=", 5) == 0)
1755 ret = dump_str_object_info(buf + 5);
1756 else
1757 ret = -EINVAL;
1758
1759 out:
1760 mutex_unlock(&scan_mutex);
1761 if (ret < 0)
1762 return ret;
1763
1764 /* ignore the rest of the buffer, only one command at a time */
1765 *ppos += size;
1766 return size;
1767 }
1768
1769 static const struct file_operations kmemleak_fops = {
1770 .owner = THIS_MODULE,
1771 .open = kmemleak_open,
1772 .read = seq_read,
1773 .write = kmemleak_write,
1774 .llseek = seq_lseek,
1775 .release = seq_release,
1776 };
1777
1778 static void __kmemleak_do_cleanup(void)
1779 {
1780 struct kmemleak_object *object;
1781
1782 rcu_read_lock();
1783 list_for_each_entry_rcu(object, &object_list, object_list)
1784 delete_object_full(object->pointer);
1785 rcu_read_unlock();
1786 }
1787
1788 /*
1789 * Stop the memory scanning thread and free the kmemleak internal objects if
1790 * no previous scan thread (otherwise, kmemleak may still have some useful
1791 * information on memory leaks).
1792 */
1793 static void kmemleak_do_cleanup(struct work_struct *work)
1794 {
1795 stop_scan_thread();
1796
1797 /*
1798 * Once the scan thread has stopped, it is safe to no longer track
1799 * object freeing. Ordering of the scan thread stopping and the memory
1800 * accesses below is guaranteed by the kthread_stop() function.
1801 */
1802 kmemleak_free_enabled = 0;
1803
1804 if (!kmemleak_found_leaks)
1805 __kmemleak_do_cleanup();
1806 else
1807 pr_info("Kmemleak disabled without freeing internal data. "
1808 "Reclaim the memory with \"echo clear > /sys/kernel/debug/kmemleak\"\n");
1809 }
1810
1811 static DECLARE_WORK(cleanup_work, kmemleak_do_cleanup);
1812
1813 /*
1814 * Disable kmemleak. No memory allocation/freeing will be traced once this
1815 * function is called. Disabling kmemleak is an irreversible operation.
1816 */
1817 static void kmemleak_disable(void)
1818 {
1819 /* atomically check whether it was already invoked */
1820 if (cmpxchg(&kmemleak_error, 0, 1))
1821 return;
1822
1823 /* stop any memory operation tracing */
1824 kmemleak_enabled = 0;
1825
1826 /* check whether it is too early for a kernel thread */
1827 if (kmemleak_initialized)
1828 schedule_work(&cleanup_work);
1829 else
1830 kmemleak_free_enabled = 0;
1831
1832 pr_info("Kernel memory leak detector disabled\n");
1833 }
1834
1835 /*
1836 * Allow boot-time kmemleak disabling (enabled by default).
1837 */
1838 static int kmemleak_boot_config(char *str)
1839 {
1840 if (!str)
1841 return -EINVAL;
1842 if (strcmp(str, "off") == 0)
1843 kmemleak_disable();
1844 else if (strcmp(str, "on") == 0)
1845 kmemleak_skip_disable = 1;
1846 else
1847 return -EINVAL;
1848 return 0;
1849 }
1850 early_param("kmemleak", kmemleak_boot_config);
1851
1852 static void __init print_log_trace(struct early_log *log)
1853 {
1854 struct stack_trace trace;
1855
1856 trace.nr_entries = log->trace_len;
1857 trace.entries = log->trace;
1858
1859 pr_notice("Early log backtrace:\n");
1860 print_stack_trace(&trace, 2);
1861 }
1862
1863 /*
1864 * Kmemleak initialization.
1865 */
1866 void __init kmemleak_init(void)
1867 {
1868 int i;
1869 unsigned long flags;
1870
1871 #ifdef CONFIG_DEBUG_KMEMLEAK_DEFAULT_OFF
1872 if (!kmemleak_skip_disable) {
1873 kmemleak_early_log = 0;
1874 kmemleak_disable();
1875 return;
1876 }
1877 #endif
1878
1879 jiffies_min_age = msecs_to_jiffies(MSECS_MIN_AGE);
1880 jiffies_scan_wait = msecs_to_jiffies(SECS_SCAN_WAIT * 1000);
1881
1882 object_cache = KMEM_CACHE(kmemleak_object, SLAB_NOLEAKTRACE);
1883 scan_area_cache = KMEM_CACHE(kmemleak_scan_area, SLAB_NOLEAKTRACE);
1884
1885 if (crt_early_log >= ARRAY_SIZE(early_log))
1886 pr_warning("Early log buffer exceeded (%d), please increase "
1887 "DEBUG_KMEMLEAK_EARLY_LOG_SIZE\n", crt_early_log);
1888
1889 /* the kernel is still in UP mode, so disabling the IRQs is enough */
1890 local_irq_save(flags);
1891 kmemleak_early_log = 0;
1892 if (kmemleak_error) {
1893 local_irq_restore(flags);
1894 return;
1895 } else {
1896 kmemleak_enabled = 1;
1897 kmemleak_free_enabled = 1;
1898 }
1899 local_irq_restore(flags);
1900
1901 /*
1902 * This is the point where tracking allocations is safe. Automatic
1903 * scanning is started during the late initcall. Add the early logged
1904 * callbacks to the kmemleak infrastructure.
1905 */
1906 for (i = 0; i < crt_early_log; i++) {
1907 struct early_log *log = &early_log[i];
1908
1909 switch (log->op_type) {
1910 case KMEMLEAK_ALLOC:
1911 early_alloc(log);
1912 break;
1913 case KMEMLEAK_ALLOC_PERCPU:
1914 early_alloc_percpu(log);
1915 break;
1916 case KMEMLEAK_FREE:
1917 kmemleak_free(log->ptr);
1918 break;
1919 case KMEMLEAK_FREE_PART:
1920 kmemleak_free_part(log->ptr, log->size);
1921 break;
1922 case KMEMLEAK_FREE_PERCPU:
1923 kmemleak_free_percpu(log->ptr);
1924 break;
1925 case KMEMLEAK_NOT_LEAK:
1926 kmemleak_not_leak(log->ptr);
1927 break;
1928 case KMEMLEAK_IGNORE:
1929 kmemleak_ignore(log->ptr);
1930 break;
1931 case KMEMLEAK_SCAN_AREA:
1932 kmemleak_scan_area(log->ptr, log->size, GFP_KERNEL);
1933 break;
1934 case KMEMLEAK_NO_SCAN:
1935 kmemleak_no_scan(log->ptr);
1936 break;
1937 default:
1938 kmemleak_warn("Unknown early log operation: %d\n",
1939 log->op_type);
1940 }
1941
1942 if (kmemleak_warning) {
1943 print_log_trace(log);
1944 kmemleak_warning = 0;
1945 }
1946 }
1947 }
1948
1949 /*
1950 * Late initialization function.
1951 */
1952 static int __init kmemleak_late_init(void)
1953 {
1954 struct dentry *dentry;
1955
1956 kmemleak_initialized = 1;
1957
1958 if (kmemleak_error) {
1959 /*
1960 * Some error occurred and kmemleak was disabled. There is a
1961 * small chance that kmemleak_disable() was called immediately
1962 * after setting kmemleak_initialized and we may end up with
1963 * two clean-up threads but serialized by scan_mutex.
1964 */
1965 schedule_work(&cleanup_work);
1966 return -ENOMEM;
1967 }
1968
1969 dentry = debugfs_create_file("kmemleak", S_IRUGO, NULL, NULL,
1970 &kmemleak_fops);
1971 if (!dentry)
1972 pr_warning("Failed to create the debugfs kmemleak file\n");
1973 mutex_lock(&scan_mutex);
1974 start_scan_thread();
1975 mutex_unlock(&scan_mutex);
1976
1977 pr_info("Kernel memory leak detector initialized\n");
1978
1979 return 0;
1980 }
1981 late_initcall(kmemleak_late_init);
This page took 0.070457 seconds and 5 git commands to generate.