Fix: rseq: arm branch to failure
[deliverable/linux.git] / mm / kasan / kasan.c
CommitLineData
0b24becc
AR
1/*
2 * This file contains shadow memory manipulation code.
3 *
4 * Copyright (c) 2014 Samsung Electronics Co., Ltd.
2baf9e89 5 * Author: Andrey Ryabinin <ryabinin.a.a@gmail.com>
0b24becc 6 *
5d0926ef 7 * Some code borrowed from https://github.com/xairy/kasan-prototype by
0b24becc
AR
8 * Andrey Konovalov <adech.fo@gmail.com>
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
13 *
14 */
15
16#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
17#define DISABLE_BRANCH_PROFILING
18
19#include <linux/export.h>
cd11016e 20#include <linux/interrupt.h>
0b24becc 21#include <linux/init.h>
cd11016e 22#include <linux/kasan.h>
0b24becc 23#include <linux/kernel.h>
45937254 24#include <linux/kmemleak.h>
e3ae1163 25#include <linux/linkage.h>
0b24becc 26#include <linux/memblock.h>
786a8959 27#include <linux/memory.h>
0b24becc 28#include <linux/mm.h>
bebf56a1 29#include <linux/module.h>
0b24becc
AR
30#include <linux/printk.h>
31#include <linux/sched.h>
32#include <linux/slab.h>
33#include <linux/stacktrace.h>
34#include <linux/string.h>
35#include <linux/types.h>
a5af5aa8 36#include <linux/vmalloc.h>
0b24becc
AR
37
38#include "kasan.h"
0316bec2 39#include "../slab.h"
0b24becc
AR
40
41/*
42 * Poisons the shadow memory for 'size' bytes starting from 'addr'.
43 * Memory addresses should be aligned to KASAN_SHADOW_SCALE_SIZE.
44 */
45static void kasan_poison_shadow(const void *address, size_t size, u8 value)
46{
47 void *shadow_start, *shadow_end;
48
49 shadow_start = kasan_mem_to_shadow(address);
50 shadow_end = kasan_mem_to_shadow(address + size);
51
52 memset(shadow_start, value, shadow_end - shadow_start);
53}
54
55void kasan_unpoison_shadow(const void *address, size_t size)
56{
57 kasan_poison_shadow(address, size, 0);
58
59 if (size & KASAN_SHADOW_MASK) {
60 u8 *shadow = (u8 *)kasan_mem_to_shadow(address + size);
61 *shadow = size & KASAN_SHADOW_MASK;
62 }
63}
64
e3ae1163
MR
65static void __kasan_unpoison_stack(struct task_struct *task, void *sp)
66{
67 void *base = task_stack_page(task);
68 size_t size = sp - base;
69
70 kasan_unpoison_shadow(base, size);
71}
72
73/* Unpoison the entire stack for a task. */
74void kasan_unpoison_task_stack(struct task_struct *task)
75{
76 __kasan_unpoison_stack(task, task_stack_page(task) + THREAD_SIZE);
77}
78
79/* Unpoison the stack for the current task beyond a watermark sp value. */
80asmlinkage void kasan_unpoison_remaining_stack(void *sp)
81{
82 __kasan_unpoison_stack(current, sp);
83}
0b24becc
AR
84
85/*
86 * All functions below always inlined so compiler could
87 * perform better optimizations in each of __asan_loadX/__assn_storeX
88 * depending on memory access size X.
89 */
90
91static __always_inline bool memory_is_poisoned_1(unsigned long addr)
92{
93 s8 shadow_value = *(s8 *)kasan_mem_to_shadow((void *)addr);
94
95 if (unlikely(shadow_value)) {
96 s8 last_accessible_byte = addr & KASAN_SHADOW_MASK;
97 return unlikely(last_accessible_byte >= shadow_value);
98 }
99
100 return false;
101}
102
103static __always_inline bool memory_is_poisoned_2(unsigned long addr)
104{
105 u16 *shadow_addr = (u16 *)kasan_mem_to_shadow((void *)addr);
106
107 if (unlikely(*shadow_addr)) {
108 if (memory_is_poisoned_1(addr + 1))
109 return true;
110
10f70262
XQ
111 /*
112 * If single shadow byte covers 2-byte access, we don't
113 * need to do anything more. Otherwise, test the first
114 * shadow byte.
115 */
0b24becc
AR
116 if (likely(((addr + 1) & KASAN_SHADOW_MASK) != 0))
117 return false;
118
119 return unlikely(*(u8 *)shadow_addr);
120 }
121
122 return false;
123}
124
125static __always_inline bool memory_is_poisoned_4(unsigned long addr)
126{
127 u16 *shadow_addr = (u16 *)kasan_mem_to_shadow((void *)addr);
128
129 if (unlikely(*shadow_addr)) {
130 if (memory_is_poisoned_1(addr + 3))
131 return true;
132
10f70262
XQ
133 /*
134 * If single shadow byte covers 4-byte access, we don't
135 * need to do anything more. Otherwise, test the first
136 * shadow byte.
137 */
0b24becc
AR
138 if (likely(((addr + 3) & KASAN_SHADOW_MASK) >= 3))
139 return false;
140
141 return unlikely(*(u8 *)shadow_addr);
142 }
143
144 return false;
145}
146
147static __always_inline bool memory_is_poisoned_8(unsigned long addr)
148{
149 u16 *shadow_addr = (u16 *)kasan_mem_to_shadow((void *)addr);
150
151 if (unlikely(*shadow_addr)) {
152 if (memory_is_poisoned_1(addr + 7))
153 return true;
154
10f70262
XQ
155 /*
156 * If single shadow byte covers 8-byte access, we don't
157 * need to do anything more. Otherwise, test the first
158 * shadow byte.
159 */
160 if (likely(IS_ALIGNED(addr, KASAN_SHADOW_SCALE_SIZE)))
0b24becc
AR
161 return false;
162
163 return unlikely(*(u8 *)shadow_addr);
164 }
165
166 return false;
167}
168
169static __always_inline bool memory_is_poisoned_16(unsigned long addr)
170{
171 u32 *shadow_addr = (u32 *)kasan_mem_to_shadow((void *)addr);
172
173 if (unlikely(*shadow_addr)) {
174 u16 shadow_first_bytes = *(u16 *)shadow_addr;
0b24becc
AR
175
176 if (unlikely(shadow_first_bytes))
177 return true;
178
10f70262
XQ
179 /*
180 * If two shadow bytes covers 16-byte access, we don't
181 * need to do anything more. Otherwise, test the last
182 * shadow byte.
183 */
184 if (likely(IS_ALIGNED(addr, KASAN_SHADOW_SCALE_SIZE)))
0b24becc
AR
185 return false;
186
187 return memory_is_poisoned_1(addr + 15);
188 }
189
190 return false;
191}
192
193static __always_inline unsigned long bytes_is_zero(const u8 *start,
194 size_t size)
195{
196 while (size) {
197 if (unlikely(*start))
198 return (unsigned long)start;
199 start++;
200 size--;
201 }
202
203 return 0;
204}
205
206static __always_inline unsigned long memory_is_zero(const void *start,
207 const void *end)
208{
209 unsigned int words;
210 unsigned long ret;
211 unsigned int prefix = (unsigned long)start % 8;
212
213 if (end - start <= 16)
214 return bytes_is_zero(start, end - start);
215
216 if (prefix) {
217 prefix = 8 - prefix;
218 ret = bytes_is_zero(start, prefix);
219 if (unlikely(ret))
220 return ret;
221 start += prefix;
222 }
223
224 words = (end - start) / 8;
225 while (words) {
226 if (unlikely(*(u64 *)start))
227 return bytes_is_zero(start, 8);
228 start += 8;
229 words--;
230 }
231
232 return bytes_is_zero(start, (end - start) % 8);
233}
234
235static __always_inline bool memory_is_poisoned_n(unsigned long addr,
236 size_t size)
237{
238 unsigned long ret;
239
240 ret = memory_is_zero(kasan_mem_to_shadow((void *)addr),
241 kasan_mem_to_shadow((void *)addr + size - 1) + 1);
242
243 if (unlikely(ret)) {
244 unsigned long last_byte = addr + size - 1;
245 s8 *last_shadow = (s8 *)kasan_mem_to_shadow((void *)last_byte);
246
247 if (unlikely(ret != (unsigned long)last_shadow ||
e0d57714 248 ((long)(last_byte & KASAN_SHADOW_MASK) >= *last_shadow)))
0b24becc
AR
249 return true;
250 }
251 return false;
252}
253
254static __always_inline bool memory_is_poisoned(unsigned long addr, size_t size)
255{
256 if (__builtin_constant_p(size)) {
257 switch (size) {
258 case 1:
259 return memory_is_poisoned_1(addr);
260 case 2:
261 return memory_is_poisoned_2(addr);
262 case 4:
263 return memory_is_poisoned_4(addr);
264 case 8:
265 return memory_is_poisoned_8(addr);
266 case 16:
267 return memory_is_poisoned_16(addr);
268 default:
269 BUILD_BUG();
270 }
271 }
272
273 return memory_is_poisoned_n(addr, size);
274}
275
936bb4bb
AR
276static __always_inline void check_memory_region_inline(unsigned long addr,
277 size_t size, bool write,
278 unsigned long ret_ip)
0b24becc 279{
0b24becc
AR
280 if (unlikely(size == 0))
281 return;
282
283 if (unlikely((void *)addr <
284 kasan_shadow_to_mem((void *)KASAN_SHADOW_START))) {
936bb4bb 285 kasan_report(addr, size, write, ret_ip);
0b24becc
AR
286 return;
287 }
288
289 if (likely(!memory_is_poisoned(addr, size)))
290 return;
291
936bb4bb 292 kasan_report(addr, size, write, ret_ip);
0b24becc
AR
293}
294
936bb4bb
AR
295static void check_memory_region(unsigned long addr,
296 size_t size, bool write,
297 unsigned long ret_ip)
298{
299 check_memory_region_inline(addr, size, write, ret_ip);
300}
393f203f 301
64f8ebaf
AR
302void kasan_check_read(const void *p, unsigned int size)
303{
304 check_memory_region((unsigned long)p, size, false, _RET_IP_);
305}
306EXPORT_SYMBOL(kasan_check_read);
307
308void kasan_check_write(const void *p, unsigned int size)
309{
310 check_memory_region((unsigned long)p, size, true, _RET_IP_);
311}
312EXPORT_SYMBOL(kasan_check_write);
313
393f203f
AR
314#undef memset
315void *memset(void *addr, int c, size_t len)
316{
936bb4bb 317 check_memory_region((unsigned long)addr, len, true, _RET_IP_);
393f203f
AR
318
319 return __memset(addr, c, len);
320}
321
322#undef memmove
323void *memmove(void *dest, const void *src, size_t len)
324{
936bb4bb
AR
325 check_memory_region((unsigned long)src, len, false, _RET_IP_);
326 check_memory_region((unsigned long)dest, len, true, _RET_IP_);
393f203f
AR
327
328 return __memmove(dest, src, len);
329}
330
331#undef memcpy
332void *memcpy(void *dest, const void *src, size_t len)
333{
936bb4bb
AR
334 check_memory_region((unsigned long)src, len, false, _RET_IP_);
335 check_memory_region((unsigned long)dest, len, true, _RET_IP_);
393f203f
AR
336
337 return __memcpy(dest, src, len);
338}
339
b8c73fc2
AR
340void kasan_alloc_pages(struct page *page, unsigned int order)
341{
342 if (likely(!PageHighMem(page)))
343 kasan_unpoison_shadow(page_address(page), PAGE_SIZE << order);
344}
345
346void kasan_free_pages(struct page *page, unsigned int order)
347{
348 if (likely(!PageHighMem(page)))
349 kasan_poison_shadow(page_address(page),
350 PAGE_SIZE << order,
351 KASAN_FREE_PAGE);
352}
353
7ed2f9e6
AP
354/*
355 * Adaptive redzone policy taken from the userspace AddressSanitizer runtime.
356 * For larger allocations larger redzones are used.
357 */
358static size_t optimal_redzone(size_t object_size)
359{
360 int rz =
361 object_size <= 64 - 16 ? 16 :
362 object_size <= 128 - 32 ? 32 :
363 object_size <= 512 - 64 ? 64 :
364 object_size <= 4096 - 128 ? 128 :
365 object_size <= (1 << 14) - 256 ? 256 :
366 object_size <= (1 << 15) - 512 ? 512 :
367 object_size <= (1 << 16) - 1024 ? 1024 : 2048;
368 return rz;
369}
370
371void kasan_cache_create(struct kmem_cache *cache, size_t *size,
372 unsigned long *flags)
373{
374 int redzone_adjust;
80a9201a
AP
375 int orig_size = *size;
376
7ed2f9e6
AP
377 /* Add alloc meta. */
378 cache->kasan_info.alloc_meta_offset = *size;
379 *size += sizeof(struct kasan_alloc_meta);
380
381 /* Add free meta. */
382 if (cache->flags & SLAB_DESTROY_BY_RCU || cache->ctor ||
383 cache->object_size < sizeof(struct kasan_free_meta)) {
384 cache->kasan_info.free_meta_offset = *size;
385 *size += sizeof(struct kasan_free_meta);
386 }
387 redzone_adjust = optimal_redzone(cache->object_size) -
388 (*size - cache->object_size);
80a9201a 389
7ed2f9e6
AP
390 if (redzone_adjust > 0)
391 *size += redzone_adjust;
80a9201a
AP
392
393 *size = min(KMALLOC_MAX_SIZE, max(*size, cache->object_size +
394 optimal_redzone(cache->object_size)));
395
396 /*
397 * If the metadata doesn't fit, don't enable KASAN at all.
398 */
399 if (*size <= cache->kasan_info.alloc_meta_offset ||
400 *size <= cache->kasan_info.free_meta_offset) {
401 cache->kasan_info.alloc_meta_offset = 0;
402 cache->kasan_info.free_meta_offset = 0;
403 *size = orig_size;
404 return;
405 }
406
407 *flags |= SLAB_KASAN;
7ed2f9e6 408}
7ed2f9e6 409
55834c59
AP
410void kasan_cache_shrink(struct kmem_cache *cache)
411{
412 quarantine_remove_cache(cache);
413}
414
415void kasan_cache_destroy(struct kmem_cache *cache)
416{
417 quarantine_remove_cache(cache);
418}
419
80a9201a
AP
420size_t kasan_metadata_size(struct kmem_cache *cache)
421{
422 return (cache->kasan_info.alloc_meta_offset ?
423 sizeof(struct kasan_alloc_meta) : 0) +
424 (cache->kasan_info.free_meta_offset ?
425 sizeof(struct kasan_free_meta) : 0);
426}
427
0316bec2
AR
428void kasan_poison_slab(struct page *page)
429{
430 kasan_poison_shadow(page_address(page),
431 PAGE_SIZE << compound_order(page),
432 KASAN_KMALLOC_REDZONE);
433}
434
435void kasan_unpoison_object_data(struct kmem_cache *cache, void *object)
436{
437 kasan_unpoison_shadow(object, cache->object_size);
438}
439
440void kasan_poison_object_data(struct kmem_cache *cache, void *object)
441{
442 kasan_poison_shadow(object,
443 round_up(cache->object_size, KASAN_SHADOW_SCALE_SIZE),
444 KASAN_KMALLOC_REDZONE);
445}
446
cd11016e
AP
447static inline int in_irqentry_text(unsigned long ptr)
448{
449 return (ptr >= (unsigned long)&__irqentry_text_start &&
450 ptr < (unsigned long)&__irqentry_text_end) ||
451 (ptr >= (unsigned long)&__softirqentry_text_start &&
452 ptr < (unsigned long)&__softirqentry_text_end);
453}
454
455static inline void filter_irq_stacks(struct stack_trace *trace)
456{
457 int i;
458
459 if (!trace->nr_entries)
460 return;
461 for (i = 0; i < trace->nr_entries; i++)
462 if (in_irqentry_text(trace->entries[i])) {
463 /* Include the irqentry function into the stack. */
464 trace->nr_entries = i + 1;
465 break;
466 }
467}
468
469static inline depot_stack_handle_t save_stack(gfp_t flags)
470{
471 unsigned long entries[KASAN_STACK_DEPTH];
472 struct stack_trace trace = {
473 .nr_entries = 0,
474 .entries = entries,
475 .max_entries = KASAN_STACK_DEPTH,
476 .skip = 0
477 };
478
479 save_stack_trace(&trace);
480 filter_irq_stacks(&trace);
481 if (trace.nr_entries != 0 &&
482 trace.entries[trace.nr_entries-1] == ULONG_MAX)
483 trace.nr_entries--;
484
485 return depot_save_stack(&trace, flags);
486}
487
488static inline void set_track(struct kasan_track *track, gfp_t flags)
7ed2f9e6 489{
7ed2f9e6 490 track->pid = current->pid;
cd11016e 491 track->stack = save_stack(flags);
7ed2f9e6
AP
492}
493
7ed2f9e6
AP
494struct kasan_alloc_meta *get_alloc_info(struct kmem_cache *cache,
495 const void *object)
496{
cd11016e 497 BUILD_BUG_ON(sizeof(struct kasan_alloc_meta) > 32);
7ed2f9e6
AP
498 return (void *)object + cache->kasan_info.alloc_meta_offset;
499}
500
501struct kasan_free_meta *get_free_info(struct kmem_cache *cache,
502 const void *object)
503{
cd11016e 504 BUILD_BUG_ON(sizeof(struct kasan_free_meta) > 32);
7ed2f9e6
AP
505 return (void *)object + cache->kasan_info.free_meta_offset;
506}
7ed2f9e6 507
b3cbd9bf
AR
508void kasan_init_slab_obj(struct kmem_cache *cache, const void *object)
509{
510 struct kasan_alloc_meta *alloc_info;
511
512 if (!(cache->flags & SLAB_KASAN))
513 return;
514
515 alloc_info = get_alloc_info(cache, object);
516 __memset(alloc_info, 0, sizeof(*alloc_info));
517}
518
505f5dcb 519void kasan_slab_alloc(struct kmem_cache *cache, void *object, gfp_t flags)
0316bec2 520{
505f5dcb 521 kasan_kmalloc(cache, object, cache->object_size, flags);
0316bec2
AR
522}
523
9b75a867 524static void kasan_poison_slab_free(struct kmem_cache *cache, void *object)
0316bec2
AR
525{
526 unsigned long size = cache->object_size;
527 unsigned long rounded_up_size = round_up(size, KASAN_SHADOW_SCALE_SIZE);
528
529 /* RCU slabs could be legally used after free within the RCU period */
530 if (unlikely(cache->flags & SLAB_DESTROY_BY_RCU))
531 return;
532
55834c59
AP
533 kasan_poison_shadow(object, rounded_up_size, KASAN_KMALLOC_FREE);
534}
535
536bool kasan_slab_free(struct kmem_cache *cache, void *object)
537{
b3cbd9bf
AR
538 s8 shadow_byte;
539
55834c59
AP
540 /* RCU slabs could be legally used after free within the RCU period */
541 if (unlikely(cache->flags & SLAB_DESTROY_BY_RCU))
542 return false;
543
b3cbd9bf
AR
544 shadow_byte = READ_ONCE(*(s8 *)kasan_mem_to_shadow(object));
545 if (shadow_byte < 0 || shadow_byte >= KASAN_SHADOW_SCALE_SIZE) {
7e088978 546 kasan_report_double_free(cache, object, shadow_byte);
b3cbd9bf
AR
547 return true;
548 }
80a9201a 549
b3cbd9bf 550 kasan_poison_slab_free(cache, object);
55834c59 551
b3cbd9bf
AR
552 if (unlikely(!(cache->flags & SLAB_KASAN)))
553 return false;
554
555 set_track(&get_alloc_info(cache, object)->free_track, GFP_NOWAIT);
556 quarantine_put(get_free_info(cache, object), cache);
557 return true;
0316bec2
AR
558}
559
505f5dcb
AP
560void kasan_kmalloc(struct kmem_cache *cache, const void *object, size_t size,
561 gfp_t flags)
0316bec2
AR
562{
563 unsigned long redzone_start;
564 unsigned long redzone_end;
565
4b3ec5a3 566 if (gfpflags_allow_blocking(flags))
55834c59
AP
567 quarantine_reduce();
568
0316bec2
AR
569 if (unlikely(object == NULL))
570 return;
571
572 redzone_start = round_up((unsigned long)(object + size),
573 KASAN_SHADOW_SCALE_SIZE);
574 redzone_end = round_up((unsigned long)object + cache->object_size,
575 KASAN_SHADOW_SCALE_SIZE);
576
577 kasan_unpoison_shadow(object, size);
578 kasan_poison_shadow((void *)redzone_start, redzone_end - redzone_start,
579 KASAN_KMALLOC_REDZONE);
7ed2f9e6 580
b3cbd9bf
AR
581 if (cache->flags & SLAB_KASAN)
582 set_track(&get_alloc_info(cache, object)->alloc_track, flags);
0316bec2
AR
583}
584EXPORT_SYMBOL(kasan_kmalloc);
585
505f5dcb 586void kasan_kmalloc_large(const void *ptr, size_t size, gfp_t flags)
0316bec2
AR
587{
588 struct page *page;
589 unsigned long redzone_start;
590 unsigned long redzone_end;
591
4b3ec5a3 592 if (gfpflags_allow_blocking(flags))
55834c59
AP
593 quarantine_reduce();
594
0316bec2
AR
595 if (unlikely(ptr == NULL))
596 return;
597
598 page = virt_to_page(ptr);
599 redzone_start = round_up((unsigned long)(ptr + size),
600 KASAN_SHADOW_SCALE_SIZE);
601 redzone_end = (unsigned long)ptr + (PAGE_SIZE << compound_order(page));
602
603 kasan_unpoison_shadow(ptr, size);
604 kasan_poison_shadow((void *)redzone_start, redzone_end - redzone_start,
605 KASAN_PAGE_REDZONE);
606}
607
505f5dcb 608void kasan_krealloc(const void *object, size_t size, gfp_t flags)
0316bec2
AR
609{
610 struct page *page;
611
612 if (unlikely(object == ZERO_SIZE_PTR))
613 return;
614
615 page = virt_to_head_page(object);
616
617 if (unlikely(!PageSlab(page)))
505f5dcb 618 kasan_kmalloc_large(object, size, flags);
0316bec2 619 else
505f5dcb 620 kasan_kmalloc(page->slab_cache, object, size, flags);
0316bec2
AR
621}
622
9b75a867 623void kasan_poison_kfree(void *ptr)
92393615
AR
624{
625 struct page *page;
626
627 page = virt_to_head_page(ptr);
628
629 if (unlikely(!PageSlab(page)))
630 kasan_poison_shadow(ptr, PAGE_SIZE << compound_order(page),
631 KASAN_FREE_PAGE);
632 else
9b75a867 633 kasan_poison_slab_free(page->slab_cache, ptr);
92393615
AR
634}
635
0316bec2
AR
636void kasan_kfree_large(const void *ptr)
637{
638 struct page *page = virt_to_page(ptr);
639
640 kasan_poison_shadow(ptr, PAGE_SIZE << compound_order(page),
641 KASAN_FREE_PAGE);
642}
643
bebf56a1
AR
644int kasan_module_alloc(void *addr, size_t size)
645{
646 void *ret;
647 size_t shadow_size;
648 unsigned long shadow_start;
649
650 shadow_start = (unsigned long)kasan_mem_to_shadow(addr);
651 shadow_size = round_up(size >> KASAN_SHADOW_SCALE_SHIFT,
652 PAGE_SIZE);
653
654 if (WARN_ON(!PAGE_ALIGNED(shadow_start)))
655 return -EINVAL;
656
657 ret = __vmalloc_node_range(shadow_size, 1, shadow_start,
658 shadow_start + shadow_size,
659 GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO,
660 PAGE_KERNEL, VM_NO_GUARD, NUMA_NO_NODE,
661 __builtin_return_address(0));
a5af5aa8
AR
662
663 if (ret) {
664 find_vm_area(addr)->flags |= VM_KASAN;
45937254 665 kmemleak_ignore(ret);
a5af5aa8
AR
666 return 0;
667 }
668
669 return -ENOMEM;
bebf56a1
AR
670}
671
a5af5aa8 672void kasan_free_shadow(const struct vm_struct *vm)
bebf56a1 673{
a5af5aa8
AR
674 if (vm->flags & VM_KASAN)
675 vfree(kasan_mem_to_shadow(vm->addr));
bebf56a1
AR
676}
677
678static void register_global(struct kasan_global *global)
679{
680 size_t aligned_size = round_up(global->size, KASAN_SHADOW_SCALE_SIZE);
681
682 kasan_unpoison_shadow(global->beg, global->size);
683
684 kasan_poison_shadow(global->beg + aligned_size,
685 global->size_with_redzone - aligned_size,
686 KASAN_GLOBAL_REDZONE);
687}
688
689void __asan_register_globals(struct kasan_global *globals, size_t size)
690{
691 int i;
692
693 for (i = 0; i < size; i++)
694 register_global(&globals[i]);
695}
696EXPORT_SYMBOL(__asan_register_globals);
697
698void __asan_unregister_globals(struct kasan_global *globals, size_t size)
699{
700}
701EXPORT_SYMBOL(__asan_unregister_globals);
702
936bb4bb
AR
703#define DEFINE_ASAN_LOAD_STORE(size) \
704 void __asan_load##size(unsigned long addr) \
705 { \
706 check_memory_region_inline(addr, size, false, _RET_IP_);\
707 } \
708 EXPORT_SYMBOL(__asan_load##size); \
709 __alias(__asan_load##size) \
710 void __asan_load##size##_noabort(unsigned long); \
711 EXPORT_SYMBOL(__asan_load##size##_noabort); \
712 void __asan_store##size(unsigned long addr) \
713 { \
714 check_memory_region_inline(addr, size, true, _RET_IP_); \
715 } \
716 EXPORT_SYMBOL(__asan_store##size); \
717 __alias(__asan_store##size) \
718 void __asan_store##size##_noabort(unsigned long); \
0b24becc
AR
719 EXPORT_SYMBOL(__asan_store##size##_noabort)
720
721DEFINE_ASAN_LOAD_STORE(1);
722DEFINE_ASAN_LOAD_STORE(2);
723DEFINE_ASAN_LOAD_STORE(4);
724DEFINE_ASAN_LOAD_STORE(8);
725DEFINE_ASAN_LOAD_STORE(16);
726
727void __asan_loadN(unsigned long addr, size_t size)
728{
936bb4bb 729 check_memory_region(addr, size, false, _RET_IP_);
0b24becc
AR
730}
731EXPORT_SYMBOL(__asan_loadN);
732
733__alias(__asan_loadN)
734void __asan_loadN_noabort(unsigned long, size_t);
735EXPORT_SYMBOL(__asan_loadN_noabort);
736
737void __asan_storeN(unsigned long addr, size_t size)
738{
936bb4bb 739 check_memory_region(addr, size, true, _RET_IP_);
0b24becc
AR
740}
741EXPORT_SYMBOL(__asan_storeN);
742
743__alias(__asan_storeN)
744void __asan_storeN_noabort(unsigned long, size_t);
745EXPORT_SYMBOL(__asan_storeN_noabort);
746
747/* to shut up compiler complaints */
748void __asan_handle_no_return(void) {}
749EXPORT_SYMBOL(__asan_handle_no_return);
786a8959
AR
750
751#ifdef CONFIG_MEMORY_HOTPLUG
752static int kasan_mem_notifier(struct notifier_block *nb,
753 unsigned long action, void *data)
754{
755 return (action == MEM_GOING_ONLINE) ? NOTIFY_BAD : NOTIFY_OK;
756}
757
758static int __init kasan_memhotplug_init(void)
759{
91a4c272
SK
760 pr_info("WARNING: KASAN doesn't support memory hot-add\n");
761 pr_info("Memory hot-add will be disabled\n");
786a8959
AR
762
763 hotplug_memory_notifier(kasan_mem_notifier, 0);
764
765 return 0;
766}
767
768module_init(kasan_memhotplug_init);
769#endif
This page took 0.165885 seconds and 5 git commands to generate.