406944207b61dbd607bc7f2d2b244b6998f47254
[deliverable/linux.git] / mm / slab_common.c
1 /*
2 * Slab allocator functions that are independent of the allocator strategy
3 *
4 * (C) 2012 Christoph Lameter <cl@linux.com>
5 */
6 #include <linux/slab.h>
7
8 #include <linux/mm.h>
9 #include <linux/poison.h>
10 #include <linux/interrupt.h>
11 #include <linux/memory.h>
12 #include <linux/compiler.h>
13 #include <linux/module.h>
14 #include <linux/cpu.h>
15 #include <linux/uaccess.h>
16 #include <linux/seq_file.h>
17 #include <linux/proc_fs.h>
18 #include <asm/cacheflush.h>
19 #include <asm/tlbflush.h>
20 #include <asm/page.h>
21 #include <linux/memcontrol.h>
22
23 #define CREATE_TRACE_POINTS
24 #include <trace/events/kmem.h>
25
26 #include "slab.h"
27
28 enum slab_state slab_state;
29 LIST_HEAD(slab_caches);
30 DEFINE_MUTEX(slab_mutex);
31 struct kmem_cache *kmem_cache;
32
33 /*
34 * Set of flags that will prevent slab merging
35 */
36 #define SLAB_NEVER_MERGE (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \
37 SLAB_TRACE | SLAB_DESTROY_BY_RCU | SLAB_NOLEAKTRACE | \
38 SLAB_FAILSLAB)
39
40 #define SLAB_MERGE_SAME (SLAB_DEBUG_FREE | SLAB_RECLAIM_ACCOUNT | \
41 SLAB_CACHE_DMA | SLAB_NOTRACK)
42
43 /*
44 * Merge control. If this is set then no merging of slab caches will occur.
45 * (Could be removed. This was introduced to pacify the merge skeptics.)
46 */
47 static int slab_nomerge;
48
49 static int __init setup_slab_nomerge(char *str)
50 {
51 slab_nomerge = 1;
52 return 1;
53 }
54
55 #ifdef CONFIG_SLUB
56 __setup_param("slub_nomerge", slub_nomerge, setup_slab_nomerge, 0);
57 #endif
58
59 __setup("slab_nomerge", setup_slab_nomerge);
60
61 /*
62 * Determine the size of a slab object
63 */
64 unsigned int kmem_cache_size(struct kmem_cache *s)
65 {
66 return s->object_size;
67 }
68 EXPORT_SYMBOL(kmem_cache_size);
69
70 #ifdef CONFIG_DEBUG_VM
71 static int kmem_cache_sanity_check(const char *name, size_t size)
72 {
73 struct kmem_cache *s = NULL;
74
75 if (!name || in_interrupt() || size < sizeof(void *) ||
76 size > KMALLOC_MAX_SIZE) {
77 pr_err("kmem_cache_create(%s) integrity check failed\n", name);
78 return -EINVAL;
79 }
80
81 list_for_each_entry(s, &slab_caches, list) {
82 char tmp;
83 int res;
84
85 /*
86 * This happens when the module gets unloaded and doesn't
87 * destroy its slab cache and no-one else reuses the vmalloc
88 * area of the module. Print a warning.
89 */
90 res = probe_kernel_address(s->name, tmp);
91 if (res) {
92 pr_err("Slab cache with size %d has lost its name\n",
93 s->object_size);
94 continue;
95 }
96 }
97
98 WARN_ON(strchr(name, ' ')); /* It confuses parsers */
99 return 0;
100 }
101 #else
102 static inline int kmem_cache_sanity_check(const char *name, size_t size)
103 {
104 return 0;
105 }
106 #endif
107
108 #ifdef CONFIG_MEMCG_KMEM
109 static int memcg_alloc_cache_params(struct mem_cgroup *memcg,
110 struct kmem_cache *s, struct kmem_cache *root_cache)
111 {
112 size_t size;
113
114 if (!memcg_kmem_enabled())
115 return 0;
116
117 if (!memcg) {
118 size = offsetof(struct memcg_cache_params, memcg_caches);
119 size += memcg_limited_groups_array_size * sizeof(void *);
120 } else
121 size = sizeof(struct memcg_cache_params);
122
123 s->memcg_params = kzalloc(size, GFP_KERNEL);
124 if (!s->memcg_params)
125 return -ENOMEM;
126
127 if (memcg) {
128 s->memcg_params->memcg = memcg;
129 s->memcg_params->root_cache = root_cache;
130 } else
131 s->memcg_params->is_root_cache = true;
132
133 return 0;
134 }
135
136 static void memcg_free_cache_params(struct kmem_cache *s)
137 {
138 kfree(s->memcg_params);
139 }
140
141 static int memcg_update_cache_params(struct kmem_cache *s, int num_memcgs)
142 {
143 int size;
144 struct memcg_cache_params *new_params, *cur_params;
145
146 BUG_ON(!is_root_cache(s));
147
148 size = offsetof(struct memcg_cache_params, memcg_caches);
149 size += num_memcgs * sizeof(void *);
150
151 new_params = kzalloc(size, GFP_KERNEL);
152 if (!new_params)
153 return -ENOMEM;
154
155 cur_params = s->memcg_params;
156 memcpy(new_params->memcg_caches, cur_params->memcg_caches,
157 memcg_limited_groups_array_size * sizeof(void *));
158
159 new_params->is_root_cache = true;
160
161 rcu_assign_pointer(s->memcg_params, new_params);
162 if (cur_params)
163 kfree_rcu(cur_params, rcu_head);
164
165 return 0;
166 }
167
168 int memcg_update_all_caches(int num_memcgs)
169 {
170 struct kmem_cache *s;
171 int ret = 0;
172 mutex_lock(&slab_mutex);
173
174 list_for_each_entry(s, &slab_caches, list) {
175 if (!is_root_cache(s))
176 continue;
177
178 ret = memcg_update_cache_params(s, num_memcgs);
179 /*
180 * Instead of freeing the memory, we'll just leave the caches
181 * up to this point in an updated state.
182 */
183 if (ret)
184 goto out;
185 }
186
187 memcg_update_array_size(num_memcgs);
188 out:
189 mutex_unlock(&slab_mutex);
190 return ret;
191 }
192 #else
193 static inline int memcg_alloc_cache_params(struct mem_cgroup *memcg,
194 struct kmem_cache *s, struct kmem_cache *root_cache)
195 {
196 return 0;
197 }
198
199 static inline void memcg_free_cache_params(struct kmem_cache *s)
200 {
201 }
202 #endif /* CONFIG_MEMCG_KMEM */
203
204 /*
205 * Find a mergeable slab cache
206 */
207 int slab_unmergeable(struct kmem_cache *s)
208 {
209 if (slab_nomerge || (s->flags & SLAB_NEVER_MERGE))
210 return 1;
211
212 if (!is_root_cache(s))
213 return 1;
214
215 if (s->ctor)
216 return 1;
217
218 /*
219 * We may have set a slab to be unmergeable during bootstrap.
220 */
221 if (s->refcount < 0)
222 return 1;
223
224 return 0;
225 }
226
227 struct kmem_cache *find_mergeable(size_t size, size_t align,
228 unsigned long flags, const char *name, void (*ctor)(void *))
229 {
230 struct kmem_cache *s;
231
232 if (slab_nomerge || (flags & SLAB_NEVER_MERGE))
233 return NULL;
234
235 if (ctor)
236 return NULL;
237
238 size = ALIGN(size, sizeof(void *));
239 align = calculate_alignment(flags, align, size);
240 size = ALIGN(size, align);
241 flags = kmem_cache_flags(size, flags, name, NULL);
242
243 list_for_each_entry(s, &slab_caches, list) {
244 if (slab_unmergeable(s))
245 continue;
246
247 if (size > s->size)
248 continue;
249
250 if ((flags & SLAB_MERGE_SAME) != (s->flags & SLAB_MERGE_SAME))
251 continue;
252 /*
253 * Check if alignment is compatible.
254 * Courtesy of Adrian Drzewiecki
255 */
256 if ((s->size & ~(align - 1)) != s->size)
257 continue;
258
259 if (s->size - size >= sizeof(void *))
260 continue;
261
262 return s;
263 }
264 return NULL;
265 }
266
267 /*
268 * Figure out what the alignment of the objects will be given a set of
269 * flags, a user specified alignment and the size of the objects.
270 */
271 unsigned long calculate_alignment(unsigned long flags,
272 unsigned long align, unsigned long size)
273 {
274 /*
275 * If the user wants hardware cache aligned objects then follow that
276 * suggestion if the object is sufficiently large.
277 *
278 * The hardware cache alignment cannot override the specified
279 * alignment though. If that is greater then use it.
280 */
281 if (flags & SLAB_HWCACHE_ALIGN) {
282 unsigned long ralign = cache_line_size();
283 while (size <= ralign / 2)
284 ralign /= 2;
285 align = max(align, ralign);
286 }
287
288 if (align < ARCH_SLAB_MINALIGN)
289 align = ARCH_SLAB_MINALIGN;
290
291 return ALIGN(align, sizeof(void *));
292 }
293
294 static struct kmem_cache *
295 do_kmem_cache_create(char *name, size_t object_size, size_t size, size_t align,
296 unsigned long flags, void (*ctor)(void *),
297 struct mem_cgroup *memcg, struct kmem_cache *root_cache)
298 {
299 struct kmem_cache *s;
300 int err;
301
302 err = -ENOMEM;
303 s = kmem_cache_zalloc(kmem_cache, GFP_KERNEL);
304 if (!s)
305 goto out;
306
307 s->name = name;
308 s->object_size = object_size;
309 s->size = size;
310 s->align = align;
311 s->ctor = ctor;
312
313 err = memcg_alloc_cache_params(memcg, s, root_cache);
314 if (err)
315 goto out_free_cache;
316
317 err = __kmem_cache_create(s, flags);
318 if (err)
319 goto out_free_cache;
320
321 s->refcount = 1;
322 list_add(&s->list, &slab_caches);
323 out:
324 if (err)
325 return ERR_PTR(err);
326 return s;
327
328 out_free_cache:
329 memcg_free_cache_params(s);
330 kfree(s);
331 goto out;
332 }
333
334 /*
335 * kmem_cache_create - Create a cache.
336 * @name: A string which is used in /proc/slabinfo to identify this cache.
337 * @size: The size of objects to be created in this cache.
338 * @align: The required alignment for the objects.
339 * @flags: SLAB flags
340 * @ctor: A constructor for the objects.
341 *
342 * Returns a ptr to the cache on success, NULL on failure.
343 * Cannot be called within a interrupt, but can be interrupted.
344 * The @ctor is run when new pages are allocated by the cache.
345 *
346 * The flags are
347 *
348 * %SLAB_POISON - Poison the slab with a known test pattern (a5a5a5a5)
349 * to catch references to uninitialised memory.
350 *
351 * %SLAB_RED_ZONE - Insert `Red' zones around the allocated memory to check
352 * for buffer overruns.
353 *
354 * %SLAB_HWCACHE_ALIGN - Align the objects in this cache to a hardware
355 * cacheline. This can be beneficial if you're counting cycles as closely
356 * as davem.
357 */
358 struct kmem_cache *
359 kmem_cache_create(const char *name, size_t size, size_t align,
360 unsigned long flags, void (*ctor)(void *))
361 {
362 struct kmem_cache *s;
363 char *cache_name;
364 int err;
365
366 get_online_cpus();
367 get_online_mems();
368
369 mutex_lock(&slab_mutex);
370
371 err = kmem_cache_sanity_check(name, size);
372 if (err) {
373 s = NULL; /* suppress uninit var warning */
374 goto out_unlock;
375 }
376
377 /*
378 * Some allocators will constraint the set of valid flags to a subset
379 * of all flags. We expect them to define CACHE_CREATE_MASK in this
380 * case, and we'll just provide them with a sanitized version of the
381 * passed flags.
382 */
383 flags &= CACHE_CREATE_MASK;
384
385 s = __kmem_cache_alias(name, size, align, flags, ctor);
386 if (s)
387 goto out_unlock;
388
389 cache_name = kstrdup(name, GFP_KERNEL);
390 if (!cache_name) {
391 err = -ENOMEM;
392 goto out_unlock;
393 }
394
395 s = do_kmem_cache_create(cache_name, size, size,
396 calculate_alignment(flags, align, size),
397 flags, ctor, NULL, NULL);
398 if (IS_ERR(s)) {
399 err = PTR_ERR(s);
400 kfree(cache_name);
401 }
402
403 out_unlock:
404 mutex_unlock(&slab_mutex);
405
406 put_online_mems();
407 put_online_cpus();
408
409 if (err) {
410 if (flags & SLAB_PANIC)
411 panic("kmem_cache_create: Failed to create slab '%s'. Error %d\n",
412 name, err);
413 else {
414 printk(KERN_WARNING "kmem_cache_create(%s) failed with error %d",
415 name, err);
416 dump_stack();
417 }
418 return NULL;
419 }
420 return s;
421 }
422 EXPORT_SYMBOL(kmem_cache_create);
423
424 #ifdef CONFIG_MEMCG_KMEM
425 /*
426 * memcg_create_kmem_cache - Create a cache for a memory cgroup.
427 * @memcg: The memory cgroup the new cache is for.
428 * @root_cache: The parent of the new cache.
429 * @memcg_name: The name of the memory cgroup (used for naming the new cache).
430 *
431 * This function attempts to create a kmem cache that will serve allocation
432 * requests going from @memcg to @root_cache. The new cache inherits properties
433 * from its parent.
434 */
435 struct kmem_cache *memcg_create_kmem_cache(struct mem_cgroup *memcg,
436 struct kmem_cache *root_cache,
437 const char *memcg_name)
438 {
439 struct kmem_cache *s = NULL;
440 char *cache_name;
441
442 get_online_cpus();
443 get_online_mems();
444
445 mutex_lock(&slab_mutex);
446
447 cache_name = kasprintf(GFP_KERNEL, "%s(%d:%s)", root_cache->name,
448 memcg_cache_id(memcg), memcg_name);
449 if (!cache_name)
450 goto out_unlock;
451
452 s = do_kmem_cache_create(cache_name, root_cache->object_size,
453 root_cache->size, root_cache->align,
454 root_cache->flags, root_cache->ctor,
455 memcg, root_cache);
456 if (IS_ERR(s)) {
457 kfree(cache_name);
458 s = NULL;
459 }
460
461 out_unlock:
462 mutex_unlock(&slab_mutex);
463
464 put_online_mems();
465 put_online_cpus();
466
467 return s;
468 }
469
470 static int memcg_cleanup_cache_params(struct kmem_cache *s)
471 {
472 int rc;
473
474 if (!s->memcg_params ||
475 !s->memcg_params->is_root_cache)
476 return 0;
477
478 mutex_unlock(&slab_mutex);
479 rc = __memcg_cleanup_cache_params(s);
480 mutex_lock(&slab_mutex);
481
482 return rc;
483 }
484 #else
485 static int memcg_cleanup_cache_params(struct kmem_cache *s)
486 {
487 return 0;
488 }
489 #endif /* CONFIG_MEMCG_KMEM */
490
491 void slab_kmem_cache_release(struct kmem_cache *s)
492 {
493 kfree(s->name);
494 kmem_cache_free(kmem_cache, s);
495 }
496
497 void kmem_cache_destroy(struct kmem_cache *s)
498 {
499 get_online_cpus();
500 get_online_mems();
501
502 mutex_lock(&slab_mutex);
503
504 s->refcount--;
505 if (s->refcount)
506 goto out_unlock;
507
508 if (memcg_cleanup_cache_params(s) != 0)
509 goto out_unlock;
510
511 if (__kmem_cache_shutdown(s) != 0) {
512 printk(KERN_ERR "kmem_cache_destroy %s: "
513 "Slab cache still has objects\n", s->name);
514 dump_stack();
515 goto out_unlock;
516 }
517
518 list_del(&s->list);
519
520 mutex_unlock(&slab_mutex);
521 if (s->flags & SLAB_DESTROY_BY_RCU)
522 rcu_barrier();
523
524 memcg_free_cache_params(s);
525 #ifdef SLAB_SUPPORTS_SYSFS
526 sysfs_slab_remove(s);
527 #else
528 slab_kmem_cache_release(s);
529 #endif
530 goto out;
531
532 out_unlock:
533 mutex_unlock(&slab_mutex);
534 out:
535 put_online_mems();
536 put_online_cpus();
537 }
538 EXPORT_SYMBOL(kmem_cache_destroy);
539
540 /**
541 * kmem_cache_shrink - Shrink a cache.
542 * @cachep: The cache to shrink.
543 *
544 * Releases as many slabs as possible for a cache.
545 * To help debugging, a zero exit status indicates all slabs were released.
546 */
547 int kmem_cache_shrink(struct kmem_cache *cachep)
548 {
549 int ret;
550
551 get_online_cpus();
552 get_online_mems();
553 ret = __kmem_cache_shrink(cachep);
554 put_online_mems();
555 put_online_cpus();
556 return ret;
557 }
558 EXPORT_SYMBOL(kmem_cache_shrink);
559
560 int slab_is_available(void)
561 {
562 return slab_state >= UP;
563 }
564
565 #ifndef CONFIG_SLOB
566 /* Create a cache during boot when no slab services are available yet */
567 void __init create_boot_cache(struct kmem_cache *s, const char *name, size_t size,
568 unsigned long flags)
569 {
570 int err;
571
572 s->name = name;
573 s->size = s->object_size = size;
574 s->align = calculate_alignment(flags, ARCH_KMALLOC_MINALIGN, size);
575 err = __kmem_cache_create(s, flags);
576
577 if (err)
578 panic("Creation of kmalloc slab %s size=%zu failed. Reason %d\n",
579 name, size, err);
580
581 s->refcount = -1; /* Exempt from merging for now */
582 }
583
584 struct kmem_cache *__init create_kmalloc_cache(const char *name, size_t size,
585 unsigned long flags)
586 {
587 struct kmem_cache *s = kmem_cache_zalloc(kmem_cache, GFP_NOWAIT);
588
589 if (!s)
590 panic("Out of memory when creating slab %s\n", name);
591
592 create_boot_cache(s, name, size, flags);
593 list_add(&s->list, &slab_caches);
594 s->refcount = 1;
595 return s;
596 }
597
598 struct kmem_cache *kmalloc_caches[KMALLOC_SHIFT_HIGH + 1];
599 EXPORT_SYMBOL(kmalloc_caches);
600
601 #ifdef CONFIG_ZONE_DMA
602 struct kmem_cache *kmalloc_dma_caches[KMALLOC_SHIFT_HIGH + 1];
603 EXPORT_SYMBOL(kmalloc_dma_caches);
604 #endif
605
606 /*
607 * Conversion table for small slabs sizes / 8 to the index in the
608 * kmalloc array. This is necessary for slabs < 192 since we have non power
609 * of two cache sizes there. The size of larger slabs can be determined using
610 * fls.
611 */
612 static s8 size_index[24] = {
613 3, /* 8 */
614 4, /* 16 */
615 5, /* 24 */
616 5, /* 32 */
617 6, /* 40 */
618 6, /* 48 */
619 6, /* 56 */
620 6, /* 64 */
621 1, /* 72 */
622 1, /* 80 */
623 1, /* 88 */
624 1, /* 96 */
625 7, /* 104 */
626 7, /* 112 */
627 7, /* 120 */
628 7, /* 128 */
629 2, /* 136 */
630 2, /* 144 */
631 2, /* 152 */
632 2, /* 160 */
633 2, /* 168 */
634 2, /* 176 */
635 2, /* 184 */
636 2 /* 192 */
637 };
638
639 static inline int size_index_elem(size_t bytes)
640 {
641 return (bytes - 1) / 8;
642 }
643
644 /*
645 * Find the kmem_cache structure that serves a given size of
646 * allocation
647 */
648 struct kmem_cache *kmalloc_slab(size_t size, gfp_t flags)
649 {
650 int index;
651
652 if (unlikely(size > KMALLOC_MAX_SIZE)) {
653 WARN_ON_ONCE(!(flags & __GFP_NOWARN));
654 return NULL;
655 }
656
657 if (size <= 192) {
658 if (!size)
659 return ZERO_SIZE_PTR;
660
661 index = size_index[size_index_elem(size)];
662 } else
663 index = fls(size - 1);
664
665 #ifdef CONFIG_ZONE_DMA
666 if (unlikely((flags & GFP_DMA)))
667 return kmalloc_dma_caches[index];
668
669 #endif
670 return kmalloc_caches[index];
671 }
672
673 /*
674 * Create the kmalloc array. Some of the regular kmalloc arrays
675 * may already have been created because they were needed to
676 * enable allocations for slab creation.
677 */
678 void __init create_kmalloc_caches(unsigned long flags)
679 {
680 int i;
681
682 /*
683 * Patch up the size_index table if we have strange large alignment
684 * requirements for the kmalloc array. This is only the case for
685 * MIPS it seems. The standard arches will not generate any code here.
686 *
687 * Largest permitted alignment is 256 bytes due to the way we
688 * handle the index determination for the smaller caches.
689 *
690 * Make sure that nothing crazy happens if someone starts tinkering
691 * around with ARCH_KMALLOC_MINALIGN
692 */
693 BUILD_BUG_ON(KMALLOC_MIN_SIZE > 256 ||
694 (KMALLOC_MIN_SIZE & (KMALLOC_MIN_SIZE - 1)));
695
696 for (i = 8; i < KMALLOC_MIN_SIZE; i += 8) {
697 int elem = size_index_elem(i);
698
699 if (elem >= ARRAY_SIZE(size_index))
700 break;
701 size_index[elem] = KMALLOC_SHIFT_LOW;
702 }
703
704 if (KMALLOC_MIN_SIZE >= 64) {
705 /*
706 * The 96 byte size cache is not used if the alignment
707 * is 64 byte.
708 */
709 for (i = 64 + 8; i <= 96; i += 8)
710 size_index[size_index_elem(i)] = 7;
711
712 }
713
714 if (KMALLOC_MIN_SIZE >= 128) {
715 /*
716 * The 192 byte sized cache is not used if the alignment
717 * is 128 byte. Redirect kmalloc to use the 256 byte cache
718 * instead.
719 */
720 for (i = 128 + 8; i <= 192; i += 8)
721 size_index[size_index_elem(i)] = 8;
722 }
723 for (i = KMALLOC_SHIFT_LOW; i <= KMALLOC_SHIFT_HIGH; i++) {
724 if (!kmalloc_caches[i]) {
725 kmalloc_caches[i] = create_kmalloc_cache(NULL,
726 1 << i, flags);
727 }
728
729 /*
730 * Caches that are not of the two-to-the-power-of size.
731 * These have to be created immediately after the
732 * earlier power of two caches
733 */
734 if (KMALLOC_MIN_SIZE <= 32 && !kmalloc_caches[1] && i == 6)
735 kmalloc_caches[1] = create_kmalloc_cache(NULL, 96, flags);
736
737 if (KMALLOC_MIN_SIZE <= 64 && !kmalloc_caches[2] && i == 7)
738 kmalloc_caches[2] = create_kmalloc_cache(NULL, 192, flags);
739 }
740
741 /* Kmalloc array is now usable */
742 slab_state = UP;
743
744 for (i = 0; i <= KMALLOC_SHIFT_HIGH; i++) {
745 struct kmem_cache *s = kmalloc_caches[i];
746 char *n;
747
748 if (s) {
749 n = kasprintf(GFP_NOWAIT, "kmalloc-%d", kmalloc_size(i));
750
751 BUG_ON(!n);
752 s->name = n;
753 }
754 }
755
756 #ifdef CONFIG_ZONE_DMA
757 for (i = 0; i <= KMALLOC_SHIFT_HIGH; i++) {
758 struct kmem_cache *s = kmalloc_caches[i];
759
760 if (s) {
761 int size = kmalloc_size(i);
762 char *n = kasprintf(GFP_NOWAIT,
763 "dma-kmalloc-%d", size);
764
765 BUG_ON(!n);
766 kmalloc_dma_caches[i] = create_kmalloc_cache(n,
767 size, SLAB_CACHE_DMA | flags);
768 }
769 }
770 #endif
771 }
772 #endif /* !CONFIG_SLOB */
773
774 /*
775 * To avoid unnecessary overhead, we pass through large allocation requests
776 * directly to the page allocator. We use __GFP_COMP, because we will need to
777 * know the allocation order to free the pages properly in kfree.
778 */
779 void *kmalloc_order(size_t size, gfp_t flags, unsigned int order)
780 {
781 void *ret;
782 struct page *page;
783
784 flags |= __GFP_COMP;
785 page = alloc_kmem_pages(flags, order);
786 ret = page ? page_address(page) : NULL;
787 kmemleak_alloc(ret, size, 1, flags);
788 return ret;
789 }
790 EXPORT_SYMBOL(kmalloc_order);
791
792 #ifdef CONFIG_TRACING
793 void *kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order)
794 {
795 void *ret = kmalloc_order(size, flags, order);
796 trace_kmalloc(_RET_IP_, ret, size, PAGE_SIZE << order, flags);
797 return ret;
798 }
799 EXPORT_SYMBOL(kmalloc_order_trace);
800 #endif
801
802 #ifdef CONFIG_SLABINFO
803
804 #ifdef CONFIG_SLAB
805 #define SLABINFO_RIGHTS (S_IWUSR | S_IRUSR)
806 #else
807 #define SLABINFO_RIGHTS S_IRUSR
808 #endif
809
810 void print_slabinfo_header(struct seq_file *m)
811 {
812 /*
813 * Output format version, so at least we can change it
814 * without _too_ many complaints.
815 */
816 #ifdef CONFIG_DEBUG_SLAB
817 seq_puts(m, "slabinfo - version: 2.1 (statistics)\n");
818 #else
819 seq_puts(m, "slabinfo - version: 2.1\n");
820 #endif
821 seq_puts(m, "# name <active_objs> <num_objs> <objsize> "
822 "<objperslab> <pagesperslab>");
823 seq_puts(m, " : tunables <limit> <batchcount> <sharedfactor>");
824 seq_puts(m, " : slabdata <active_slabs> <num_slabs> <sharedavail>");
825 #ifdef CONFIG_DEBUG_SLAB
826 seq_puts(m, " : globalstat <listallocs> <maxobjs> <grown> <reaped> "
827 "<error> <maxfreeable> <nodeallocs> <remotefrees> <alienoverflow>");
828 seq_puts(m, " : cpustat <allochit> <allocmiss> <freehit> <freemiss>");
829 #endif
830 seq_putc(m, '\n');
831 }
832
833 static void *s_start(struct seq_file *m, loff_t *pos)
834 {
835 loff_t n = *pos;
836
837 mutex_lock(&slab_mutex);
838 if (!n)
839 print_slabinfo_header(m);
840
841 return seq_list_start(&slab_caches, *pos);
842 }
843
844 void *slab_next(struct seq_file *m, void *p, loff_t *pos)
845 {
846 return seq_list_next(p, &slab_caches, pos);
847 }
848
849 void slab_stop(struct seq_file *m, void *p)
850 {
851 mutex_unlock(&slab_mutex);
852 }
853
854 static void
855 memcg_accumulate_slabinfo(struct kmem_cache *s, struct slabinfo *info)
856 {
857 struct kmem_cache *c;
858 struct slabinfo sinfo;
859 int i;
860
861 if (!is_root_cache(s))
862 return;
863
864 for_each_memcg_cache_index(i) {
865 c = cache_from_memcg_idx(s, i);
866 if (!c)
867 continue;
868
869 memset(&sinfo, 0, sizeof(sinfo));
870 get_slabinfo(c, &sinfo);
871
872 info->active_slabs += sinfo.active_slabs;
873 info->num_slabs += sinfo.num_slabs;
874 info->shared_avail += sinfo.shared_avail;
875 info->active_objs += sinfo.active_objs;
876 info->num_objs += sinfo.num_objs;
877 }
878 }
879
880 int cache_show(struct kmem_cache *s, struct seq_file *m)
881 {
882 struct slabinfo sinfo;
883
884 memset(&sinfo, 0, sizeof(sinfo));
885 get_slabinfo(s, &sinfo);
886
887 memcg_accumulate_slabinfo(s, &sinfo);
888
889 seq_printf(m, "%-17s %6lu %6lu %6u %4u %4d",
890 cache_name(s), sinfo.active_objs, sinfo.num_objs, s->size,
891 sinfo.objects_per_slab, (1 << sinfo.cache_order));
892
893 seq_printf(m, " : tunables %4u %4u %4u",
894 sinfo.limit, sinfo.batchcount, sinfo.shared);
895 seq_printf(m, " : slabdata %6lu %6lu %6lu",
896 sinfo.active_slabs, sinfo.num_slabs, sinfo.shared_avail);
897 slabinfo_show_stats(m, s);
898 seq_putc(m, '\n');
899 return 0;
900 }
901
902 static int s_show(struct seq_file *m, void *p)
903 {
904 struct kmem_cache *s = list_entry(p, struct kmem_cache, list);
905
906 if (!is_root_cache(s))
907 return 0;
908 return cache_show(s, m);
909 }
910
911 /*
912 * slabinfo_op - iterator that generates /proc/slabinfo
913 *
914 * Output layout:
915 * cache-name
916 * num-active-objs
917 * total-objs
918 * object size
919 * num-active-slabs
920 * total-slabs
921 * num-pages-per-slab
922 * + further values on SMP and with statistics enabled
923 */
924 static const struct seq_operations slabinfo_op = {
925 .start = s_start,
926 .next = slab_next,
927 .stop = slab_stop,
928 .show = s_show,
929 };
930
931 static int slabinfo_open(struct inode *inode, struct file *file)
932 {
933 return seq_open(file, &slabinfo_op);
934 }
935
936 static const struct file_operations proc_slabinfo_operations = {
937 .open = slabinfo_open,
938 .read = seq_read,
939 .write = slabinfo_write,
940 .llseek = seq_lseek,
941 .release = seq_release,
942 };
943
944 static int __init slab_proc_init(void)
945 {
946 proc_create("slabinfo", SLABINFO_RIGHTS, NULL,
947 &proc_slabinfo_operations);
948 return 0;
949 }
950 module_init(slab_proc_init);
951 #endif /* CONFIG_SLABINFO */
952
953 static __always_inline void *__do_krealloc(const void *p, size_t new_size,
954 gfp_t flags)
955 {
956 void *ret;
957 size_t ks = 0;
958
959 if (p)
960 ks = ksize(p);
961
962 if (ks >= new_size)
963 return (void *)p;
964
965 ret = kmalloc_track_caller(new_size, flags);
966 if (ret && p)
967 memcpy(ret, p, ks);
968
969 return ret;
970 }
971
972 /**
973 * __krealloc - like krealloc() but don't free @p.
974 * @p: object to reallocate memory for.
975 * @new_size: how many bytes of memory are required.
976 * @flags: the type of memory to allocate.
977 *
978 * This function is like krealloc() except it never frees the originally
979 * allocated buffer. Use this if you don't want to free the buffer immediately
980 * like, for example, with RCU.
981 */
982 void *__krealloc(const void *p, size_t new_size, gfp_t flags)
983 {
984 if (unlikely(!new_size))
985 return ZERO_SIZE_PTR;
986
987 return __do_krealloc(p, new_size, flags);
988
989 }
990 EXPORT_SYMBOL(__krealloc);
991
992 /**
993 * krealloc - reallocate memory. The contents will remain unchanged.
994 * @p: object to reallocate memory for.
995 * @new_size: how many bytes of memory are required.
996 * @flags: the type of memory to allocate.
997 *
998 * The contents of the object pointed to are preserved up to the
999 * lesser of the new and old sizes. If @p is %NULL, krealloc()
1000 * behaves exactly like kmalloc(). If @new_size is 0 and @p is not a
1001 * %NULL pointer, the object pointed to is freed.
1002 */
1003 void *krealloc(const void *p, size_t new_size, gfp_t flags)
1004 {
1005 void *ret;
1006
1007 if (unlikely(!new_size)) {
1008 kfree(p);
1009 return ZERO_SIZE_PTR;
1010 }
1011
1012 ret = __do_krealloc(p, new_size, flags);
1013 if (ret && p != ret)
1014 kfree(p);
1015
1016 return ret;
1017 }
1018 EXPORT_SYMBOL(krealloc);
1019
1020 /**
1021 * kzfree - like kfree but zero memory
1022 * @p: object to free memory of
1023 *
1024 * The memory of the object @p points to is zeroed before freed.
1025 * If @p is %NULL, kzfree() does nothing.
1026 *
1027 * Note: this function zeroes the whole allocated buffer which can be a good
1028 * deal bigger than the requested buffer size passed to kmalloc(). So be
1029 * careful when using this function in performance sensitive code.
1030 */
1031 void kzfree(const void *p)
1032 {
1033 size_t ks;
1034 void *mem = (void *)p;
1035
1036 if (unlikely(ZERO_OR_NULL_PTR(mem)))
1037 return;
1038 ks = ksize(mem);
1039 memset(mem, 0, ks);
1040 kfree(mem);
1041 }
1042 EXPORT_SYMBOL(kzfree);
1043
1044 /* Tracepoints definitions. */
1045 EXPORT_TRACEPOINT_SYMBOL(kmalloc);
1046 EXPORT_TRACEPOINT_SYMBOL(kmem_cache_alloc);
1047 EXPORT_TRACEPOINT_SYMBOL(kmalloc_node);
1048 EXPORT_TRACEPOINT_SYMBOL(kmem_cache_alloc_node);
1049 EXPORT_TRACEPOINT_SYMBOL(kfree);
1050 EXPORT_TRACEPOINT_SYMBOL(kmem_cache_free);
This page took 0.050816 seconds and 4 git commands to generate.