Commit | Line | Data |
---|---|---|
55834c59 AP |
1 | /* |
2 | * KASAN quarantine. | |
3 | * | |
4 | * Author: Alexander Potapenko <glider@google.com> | |
5 | * Copyright (C) 2016 Google, Inc. | |
6 | * | |
7 | * Based on code by Dmitry Chernenkov. | |
8 | * | |
9 | * This program is free software; you can redistribute it and/or | |
10 | * modify it under the terms of the GNU General Public License | |
11 | * version 2 as published by the Free Software Foundation. | |
12 | * | |
13 | * This program is distributed in the hope that it will be useful, but | |
14 | * WITHOUT ANY WARRANTY; without even the implied warranty of | |
15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
16 | * General Public License for more details. | |
17 | * | |
18 | */ | |
19 | ||
20 | #include <linux/gfp.h> | |
21 | #include <linux/hash.h> | |
22 | #include <linux/kernel.h> | |
23 | #include <linux/mm.h> | |
24 | #include <linux/percpu.h> | |
25 | #include <linux/printk.h> | |
26 | #include <linux/shrinker.h> | |
27 | #include <linux/slab.h> | |
28 | #include <linux/string.h> | |
29 | #include <linux/types.h> | |
30 | ||
31 | #include "../slab.h" | |
32 | #include "kasan.h" | |
33 | ||
34 | /* Data structure and operations for quarantine queues. */ | |
35 | ||
36 | /* | |
37 | * Each queue is a signle-linked list, which also stores the total size of | |
38 | * objects inside of it. | |
39 | */ | |
40 | struct qlist_head { | |
41 | struct qlist_node *head; | |
42 | struct qlist_node *tail; | |
43 | size_t bytes; | |
44 | }; | |
45 | ||
46 | #define QLIST_INIT { NULL, NULL, 0 } | |
47 | ||
48 | static bool qlist_empty(struct qlist_head *q) | |
49 | { | |
50 | return !q->head; | |
51 | } | |
52 | ||
53 | static void qlist_init(struct qlist_head *q) | |
54 | { | |
55 | q->head = q->tail = NULL; | |
56 | q->bytes = 0; | |
57 | } | |
58 | ||
59 | static void qlist_put(struct qlist_head *q, struct qlist_node *qlink, | |
60 | size_t size) | |
61 | { | |
62 | if (unlikely(qlist_empty(q))) | |
63 | q->head = qlink; | |
64 | else | |
65 | q->tail->next = qlink; | |
66 | q->tail = qlink; | |
67 | qlink->next = NULL; | |
68 | q->bytes += size; | |
69 | } | |
70 | ||
71 | static void qlist_move_all(struct qlist_head *from, struct qlist_head *to) | |
72 | { | |
73 | if (unlikely(qlist_empty(from))) | |
74 | return; | |
75 | ||
76 | if (qlist_empty(to)) { | |
77 | *to = *from; | |
78 | qlist_init(from); | |
79 | return; | |
80 | } | |
81 | ||
82 | to->tail->next = from->head; | |
83 | to->tail = from->tail; | |
84 | to->bytes += from->bytes; | |
85 | ||
86 | qlist_init(from); | |
87 | } | |
88 | ||
89 | static void qlist_move(struct qlist_head *from, struct qlist_node *last, | |
90 | struct qlist_head *to, size_t size) | |
91 | { | |
92 | if (unlikely(last == from->tail)) { | |
93 | qlist_move_all(from, to); | |
94 | return; | |
95 | } | |
96 | if (qlist_empty(to)) | |
97 | to->head = from->head; | |
98 | else | |
99 | to->tail->next = from->head; | |
100 | to->tail = last; | |
101 | from->head = last->next; | |
102 | last->next = NULL; | |
103 | from->bytes -= size; | |
104 | to->bytes += size; | |
105 | } | |
106 | ||
107 | ||
108 | /* | |
109 | * The object quarantine consists of per-cpu queues and a global queue, | |
110 | * guarded by quarantine_lock. | |
111 | */ | |
112 | static DEFINE_PER_CPU(struct qlist_head, cpu_quarantine); | |
113 | ||
114 | static struct qlist_head global_quarantine; | |
115 | static DEFINE_SPINLOCK(quarantine_lock); | |
116 | ||
117 | /* Maximum size of the global queue. */ | |
118 | static unsigned long quarantine_size; | |
119 | ||
120 | /* | |
121 | * The fraction of physical memory the quarantine is allowed to occupy. | |
122 | * Quarantine doesn't support memory shrinker with SLAB allocator, so we keep | |
123 | * the ratio low to avoid OOM. | |
124 | */ | |
125 | #define QUARANTINE_FRACTION 32 | |
126 | ||
127 | #define QUARANTINE_LOW_SIZE (READ_ONCE(quarantine_size) * 3 / 4) | |
128 | #define QUARANTINE_PERCPU_SIZE (1 << 20) | |
129 | ||
130 | static struct kmem_cache *qlink_to_cache(struct qlist_node *qlink) | |
131 | { | |
132 | return virt_to_head_page(qlink)->slab_cache; | |
133 | } | |
134 | ||
135 | static void *qlink_to_object(struct qlist_node *qlink, struct kmem_cache *cache) | |
136 | { | |
137 | struct kasan_free_meta *free_info = | |
138 | container_of(qlink, struct kasan_free_meta, | |
139 | quarantine_link); | |
140 | ||
141 | return ((void *)free_info) - cache->kasan_info.free_meta_offset; | |
142 | } | |
143 | ||
144 | static void qlink_free(struct qlist_node *qlink, struct kmem_cache *cache) | |
145 | { | |
146 | void *object = qlink_to_object(qlink, cache); | |
147 | struct kasan_alloc_meta *alloc_info = get_alloc_info(cache, object); | |
148 | unsigned long flags; | |
149 | ||
150 | local_irq_save(flags); | |
151 | alloc_info->state = KASAN_STATE_FREE; | |
152 | ___cache_free(cache, object, _THIS_IP_); | |
153 | local_irq_restore(flags); | |
154 | } | |
155 | ||
156 | static void qlist_free_all(struct qlist_head *q, struct kmem_cache *cache) | |
157 | { | |
158 | struct qlist_node *qlink; | |
159 | ||
160 | if (unlikely(qlist_empty(q))) | |
161 | return; | |
162 | ||
163 | qlink = q->head; | |
164 | while (qlink) { | |
165 | struct kmem_cache *obj_cache = | |
166 | cache ? cache : qlink_to_cache(qlink); | |
167 | struct qlist_node *next = qlink->next; | |
168 | ||
169 | qlink_free(qlink, obj_cache); | |
170 | qlink = next; | |
171 | } | |
172 | qlist_init(q); | |
173 | } | |
174 | ||
175 | void quarantine_put(struct kasan_free_meta *info, struct kmem_cache *cache) | |
176 | { | |
177 | unsigned long flags; | |
178 | struct qlist_head *q; | |
179 | struct qlist_head temp = QLIST_INIT; | |
180 | ||
181 | local_irq_save(flags); | |
182 | ||
183 | q = this_cpu_ptr(&cpu_quarantine); | |
184 | qlist_put(q, &info->quarantine_link, cache->size); | |
185 | if (unlikely(q->bytes > QUARANTINE_PERCPU_SIZE)) | |
186 | qlist_move_all(q, &temp); | |
187 | ||
188 | local_irq_restore(flags); | |
189 | ||
190 | if (unlikely(!qlist_empty(&temp))) { | |
191 | spin_lock_irqsave(&quarantine_lock, flags); | |
192 | qlist_move_all(&temp, &global_quarantine); | |
193 | spin_unlock_irqrestore(&quarantine_lock, flags); | |
194 | } | |
195 | } | |
196 | ||
197 | void quarantine_reduce(void) | |
198 | { | |
199 | size_t new_quarantine_size; | |
200 | unsigned long flags; | |
201 | struct qlist_head to_free = QLIST_INIT; | |
202 | size_t size_to_free = 0; | |
203 | struct qlist_node *last; | |
204 | ||
205 | if (likely(READ_ONCE(global_quarantine.bytes) <= | |
206 | READ_ONCE(quarantine_size))) | |
207 | return; | |
208 | ||
209 | spin_lock_irqsave(&quarantine_lock, flags); | |
210 | ||
211 | /* | |
212 | * Update quarantine size in case of hotplug. Allocate a fraction of | |
213 | * the installed memory to quarantine minus per-cpu queue limits. | |
214 | */ | |
215 | new_quarantine_size = (READ_ONCE(totalram_pages) << PAGE_SHIFT) / | |
216 | QUARANTINE_FRACTION; | |
217 | new_quarantine_size -= QUARANTINE_PERCPU_SIZE * num_online_cpus(); | |
218 | WRITE_ONCE(quarantine_size, new_quarantine_size); | |
219 | ||
220 | last = global_quarantine.head; | |
221 | while (last) { | |
222 | struct kmem_cache *cache = qlink_to_cache(last); | |
223 | ||
224 | size_to_free += cache->size; | |
225 | if (!last->next || size_to_free > | |
226 | global_quarantine.bytes - QUARANTINE_LOW_SIZE) | |
227 | break; | |
228 | last = last->next; | |
229 | } | |
230 | qlist_move(&global_quarantine, last, &to_free, size_to_free); | |
231 | ||
232 | spin_unlock_irqrestore(&quarantine_lock, flags); | |
233 | ||
234 | qlist_free_all(&to_free, NULL); | |
235 | } | |
236 | ||
237 | static void qlist_move_cache(struct qlist_head *from, | |
238 | struct qlist_head *to, | |
239 | struct kmem_cache *cache) | |
240 | { | |
0ab686d8 | 241 | struct qlist_node *curr; |
55834c59 AP |
242 | |
243 | if (unlikely(qlist_empty(from))) | |
244 | return; | |
245 | ||
246 | curr = from->head; | |
0ab686d8 | 247 | qlist_init(from); |
55834c59 | 248 | while (curr) { |
0ab686d8 JK |
249 | struct qlist_node *next = curr->next; |
250 | struct kmem_cache *obj_cache = qlink_to_cache(curr); | |
251 | ||
252 | if (obj_cache == cache) | |
253 | qlist_put(to, curr, obj_cache->size); | |
254 | else | |
255 | qlist_put(from, curr, obj_cache->size); | |
256 | ||
257 | curr = next; | |
55834c59 AP |
258 | } |
259 | } | |
260 | ||
261 | static void per_cpu_remove_cache(void *arg) | |
262 | { | |
263 | struct kmem_cache *cache = arg; | |
264 | struct qlist_head to_free = QLIST_INIT; | |
265 | struct qlist_head *q; | |
266 | ||
267 | q = this_cpu_ptr(&cpu_quarantine); | |
268 | qlist_move_cache(q, &to_free, cache); | |
269 | qlist_free_all(&to_free, cache); | |
270 | } | |
271 | ||
272 | void quarantine_remove_cache(struct kmem_cache *cache) | |
273 | { | |
274 | unsigned long flags; | |
275 | struct qlist_head to_free = QLIST_INIT; | |
276 | ||
277 | on_each_cpu(per_cpu_remove_cache, cache, 1); | |
278 | ||
279 | spin_lock_irqsave(&quarantine_lock, flags); | |
280 | qlist_move_cache(&global_quarantine, &to_free, cache); | |
281 | spin_unlock_irqrestore(&quarantine_lock, flags); | |
282 | ||
283 | qlist_free_all(&to_free, cache); | |
284 | } |