Commit | Line | Data |
---|---|---|
06e0ffa6 MH |
1 | /* |
2 | * Copyright(c) 2016 Intel Corporation. | |
3 | * | |
4 | * This file is provided under a dual BSD/GPLv2 license. When using or | |
5 | * redistributing this file, you may do so under either license. | |
6 | * | |
7 | * GPL LICENSE SUMMARY | |
8 | * | |
9 | * This program is free software; you can redistribute it and/or modify | |
10 | * it under the terms of version 2 of the GNU General Public License as | |
11 | * published by the Free Software Foundation. | |
12 | * | |
13 | * This program is distributed in the hope that it will be useful, but | |
14 | * WITHOUT ANY WARRANTY; without even the implied warranty of | |
15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
16 | * General Public License for more details. | |
17 | * | |
18 | * BSD LICENSE | |
19 | * | |
20 | * Redistribution and use in source and binary forms, with or without | |
21 | * modification, are permitted provided that the following conditions | |
22 | * are met: | |
23 | * | |
24 | * - Redistributions of source code must retain the above copyright | |
25 | * notice, this list of conditions and the following disclaimer. | |
26 | * - Redistributions in binary form must reproduce the above copyright | |
27 | * notice, this list of conditions and the following disclaimer in | |
28 | * the documentation and/or other materials provided with the | |
29 | * distribution. | |
30 | * - Neither the name of Intel Corporation nor the names of its | |
31 | * contributors may be used to endorse or promote products derived | |
32 | * from this software without specific prior written permission. | |
33 | * | |
34 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | |
35 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | |
36 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR | |
37 | * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | |
38 | * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | |
39 | * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | |
40 | * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | |
41 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | |
42 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | |
43 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | |
44 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | |
45 | * | |
46 | */ | |
47 | #include <linux/list.h> | |
48 | #include <linux/mmu_notifier.h> | |
49 | #include <linux/rbtree.h> | |
50 | ||
51 | #include "mmu_rb.h" | |
52 | #include "trace.h" | |
53 | ||
54 | struct mmu_rb_handler { | |
55 | struct list_head list; | |
56 | struct mmu_notifier mn; | |
57 | struct rb_root *root; | |
58 | spinlock_t lock; /* protect the RB tree */ | |
59 | struct mmu_rb_ops *ops; | |
60 | }; | |
61 | ||
62 | static LIST_HEAD(mmu_rb_handlers); | |
63 | static DEFINE_SPINLOCK(mmu_rb_lock); /* protect mmu_rb_handlers list */ | |
64 | ||
65 | static struct mmu_rb_handler *find_mmu_handler(struct rb_root *); | |
66 | static inline void mmu_notifier_page(struct mmu_notifier *, struct mm_struct *, | |
67 | unsigned long); | |
68 | static inline void mmu_notifier_range_start(struct mmu_notifier *, | |
69 | struct mm_struct *, | |
70 | unsigned long, unsigned long); | |
71 | static void mmu_notifier_mem_invalidate(struct mmu_notifier *, | |
72 | unsigned long, unsigned long); | |
73 | static struct mmu_rb_node *__mmu_rb_search(struct mmu_rb_handler *, | |
74 | unsigned long, unsigned long); | |
75 | ||
76 | static struct mmu_notifier_ops mn_opts = { | |
77 | .invalidate_page = mmu_notifier_page, | |
78 | .invalidate_range_start = mmu_notifier_range_start, | |
79 | }; | |
80 | ||
81 | int hfi1_mmu_rb_register(struct rb_root *root, struct mmu_rb_ops *ops) | |
82 | { | |
83 | struct mmu_rb_handler *handlr; | |
c81e1f64 | 84 | unsigned long flags; |
06e0ffa6 MH |
85 | |
86 | if (!ops->compare || !ops->invalidate) | |
87 | return -EINVAL; | |
88 | ||
89 | handlr = kmalloc(sizeof(*handlr), GFP_KERNEL); | |
90 | if (!handlr) | |
91 | return -ENOMEM; | |
92 | ||
93 | handlr->root = root; | |
94 | handlr->ops = ops; | |
95 | INIT_HLIST_NODE(&handlr->mn.hlist); | |
96 | spin_lock_init(&handlr->lock); | |
97 | handlr->mn.ops = &mn_opts; | |
c81e1f64 | 98 | spin_lock_irqsave(&mmu_rb_lock, flags); |
06e0ffa6 | 99 | list_add_tail(&handlr->list, &mmu_rb_handlers); |
c81e1f64 | 100 | spin_unlock_irqrestore(&mmu_rb_lock, flags); |
06e0ffa6 MH |
101 | |
102 | return mmu_notifier_register(&handlr->mn, current->mm); | |
103 | } | |
104 | ||
105 | void hfi1_mmu_rb_unregister(struct rb_root *root) | |
106 | { | |
107 | struct mmu_rb_handler *handler = find_mmu_handler(root); | |
c81e1f64 | 108 | unsigned long flags; |
06e0ffa6 | 109 | |
c81e1f64 | 110 | spin_lock_irqsave(&mmu_rb_lock, flags); |
06e0ffa6 | 111 | list_del(&handler->list); |
c81e1f64 | 112 | spin_unlock_irqrestore(&mmu_rb_lock, flags); |
06e0ffa6 MH |
113 | |
114 | if (!RB_EMPTY_ROOT(root)) { | |
115 | struct rb_node *node; | |
116 | struct mmu_rb_node *rbnode; | |
117 | ||
118 | while ((node = rb_first(root))) { | |
119 | rbnode = rb_entry(node, struct mmu_rb_node, node); | |
120 | if (handler->ops->remove) | |
121 | handler->ops->remove(root, rbnode); | |
122 | rb_erase(node, root); | |
123 | kfree(rbnode); | |
124 | } | |
125 | } | |
126 | ||
127 | if (current->mm) | |
128 | mmu_notifier_unregister(&handler->mn, current->mm); | |
129 | kfree(handler); | |
130 | } | |
131 | ||
132 | int hfi1_mmu_rb_insert(struct rb_root *root, struct mmu_rb_node *mnode) | |
133 | { | |
134 | struct rb_node **new, *parent = NULL; | |
135 | struct mmu_rb_handler *handler = find_mmu_handler(root); | |
136 | struct mmu_rb_node *this; | |
c81e1f64 | 137 | unsigned long flags; |
06e0ffa6 MH |
138 | int res, ret = 0; |
139 | ||
140 | if (!handler) | |
141 | return -EINVAL; | |
142 | ||
143 | new = &handler->root->rb_node; | |
c81e1f64 | 144 | spin_lock_irqsave(&handler->lock, flags); |
06e0ffa6 MH |
145 | while (*new) { |
146 | this = container_of(*new, struct mmu_rb_node, node); | |
147 | res = handler->ops->compare(this, mnode->addr, mnode->len); | |
148 | parent = *new; | |
149 | ||
150 | if (res < 0) { | |
151 | new = &((*new)->rb_left); | |
152 | } else if (res > 0) { | |
153 | new = &((*new)->rb_right); | |
154 | } else { | |
155 | ret = 1; | |
156 | goto unlock; | |
157 | } | |
158 | } | |
159 | ||
160 | if (handler->ops->insert) { | |
161 | ret = handler->ops->insert(root, mnode); | |
162 | if (ret) | |
163 | goto unlock; | |
164 | } | |
165 | ||
166 | rb_link_node(&mnode->node, parent, new); | |
167 | rb_insert_color(&mnode->node, root); | |
168 | unlock: | |
c81e1f64 | 169 | spin_unlock_irqrestore(&handler->lock, flags); |
06e0ffa6 MH |
170 | return ret; |
171 | } | |
172 | ||
173 | /* Caller must host handler lock */ | |
174 | static struct mmu_rb_node *__mmu_rb_search(struct mmu_rb_handler *handler, | |
175 | unsigned long addr, | |
176 | unsigned long len) | |
177 | { | |
178 | struct rb_node *node = handler->root->rb_node; | |
179 | struct mmu_rb_node *mnode; | |
180 | int res; | |
181 | ||
182 | while (node) { | |
183 | mnode = container_of(node, struct mmu_rb_node, node); | |
184 | res = handler->ops->compare(mnode, addr, len); | |
185 | ||
186 | if (res < 0) | |
187 | node = node->rb_left; | |
188 | else if (res > 0) | |
189 | node = node->rb_right; | |
190 | else | |
191 | return mnode; | |
192 | } | |
193 | return NULL; | |
194 | } | |
195 | ||
196 | static void __mmu_rb_remove(struct mmu_rb_handler *handler, | |
197 | struct mmu_rb_node *node) | |
198 | { | |
199 | /* Validity of handler and node pointers has been checked by caller. */ | |
200 | if (handler->ops->remove) | |
201 | handler->ops->remove(handler->root, node); | |
202 | rb_erase(&node->node, handler->root); | |
203 | } | |
204 | ||
205 | struct mmu_rb_node *hfi1_mmu_rb_search(struct rb_root *root, unsigned long addr, | |
206 | unsigned long len) | |
207 | { | |
208 | struct mmu_rb_handler *handler = find_mmu_handler(root); | |
209 | struct mmu_rb_node *node; | |
c81e1f64 | 210 | unsigned long flags; |
06e0ffa6 MH |
211 | |
212 | if (!handler) | |
213 | return ERR_PTR(-EINVAL); | |
214 | ||
c81e1f64 | 215 | spin_lock_irqsave(&handler->lock, flags); |
06e0ffa6 | 216 | node = __mmu_rb_search(handler, addr, len); |
c81e1f64 | 217 | spin_unlock_irqrestore(&handler->lock, flags); |
06e0ffa6 MH |
218 | |
219 | return node; | |
220 | } | |
221 | ||
222 | void hfi1_mmu_rb_remove(struct rb_root *root, struct mmu_rb_node *node) | |
223 | { | |
224 | struct mmu_rb_handler *handler = find_mmu_handler(root); | |
c81e1f64 | 225 | unsigned long flags; |
06e0ffa6 MH |
226 | |
227 | if (!handler || !node) | |
228 | return; | |
229 | ||
c81e1f64 | 230 | spin_lock_irqsave(&handler->lock, flags); |
06e0ffa6 | 231 | __mmu_rb_remove(handler, node); |
c81e1f64 | 232 | spin_unlock_irqrestore(&handler->lock, flags); |
06e0ffa6 MH |
233 | } |
234 | ||
235 | static struct mmu_rb_handler *find_mmu_handler(struct rb_root *root) | |
236 | { | |
237 | struct mmu_rb_handler *handler; | |
c81e1f64 | 238 | unsigned long flags; |
06e0ffa6 | 239 | |
c81e1f64 | 240 | spin_lock_irqsave(&mmu_rb_lock, flags); |
06e0ffa6 MH |
241 | list_for_each_entry(handler, &mmu_rb_handlers, list) { |
242 | if (handler->root == root) | |
243 | goto unlock; | |
244 | } | |
245 | handler = NULL; | |
246 | unlock: | |
c81e1f64 | 247 | spin_unlock_irqrestore(&mmu_rb_lock, flags); |
06e0ffa6 MH |
248 | return handler; |
249 | } | |
250 | ||
251 | static inline void mmu_notifier_page(struct mmu_notifier *mn, | |
252 | struct mm_struct *mm, unsigned long addr) | |
253 | { | |
254 | mmu_notifier_mem_invalidate(mn, addr, addr + PAGE_SIZE); | |
255 | } | |
256 | ||
257 | static inline void mmu_notifier_range_start(struct mmu_notifier *mn, | |
258 | struct mm_struct *mm, | |
259 | unsigned long start, | |
260 | unsigned long end) | |
261 | { | |
262 | mmu_notifier_mem_invalidate(mn, start, end); | |
263 | } | |
264 | ||
265 | static void mmu_notifier_mem_invalidate(struct mmu_notifier *mn, | |
266 | unsigned long start, unsigned long end) | |
267 | { | |
268 | struct mmu_rb_handler *handler = | |
269 | container_of(mn, struct mmu_rb_handler, mn); | |
270 | struct rb_root *root = handler->root; | |
271 | struct mmu_rb_node *node; | |
c81e1f64 | 272 | unsigned long addr = start, flags; |
06e0ffa6 | 273 | |
c81e1f64 | 274 | spin_lock_irqsave(&handler->lock, flags); |
06e0ffa6 MH |
275 | while (addr < end) { |
276 | /* | |
277 | * There is no good way to provide a reasonable length to the | |
278 | * search function at this point. Using the remaining length in | |
279 | * the invalidation range is not the right thing to do. | |
280 | * We have to rely on the fact that the insertion algorithm | |
281 | * takes care of any overlap or length restrictions by using the | |
282 | * actual size of each node. Therefore, we can use a page as an | |
283 | * arbitrary, non-zero value. | |
284 | */ | |
285 | node = __mmu_rb_search(handler, addr, PAGE_SIZE); | |
286 | ||
287 | if (!node) { | |
288 | /* | |
289 | * Didn't find a node at this address. However, the | |
290 | * range could be bigger than what we have registered | |
291 | * so we have to keep looking. | |
292 | */ | |
293 | addr += PAGE_SIZE; | |
294 | continue; | |
295 | } | |
296 | if (handler->ops->invalidate(root, node)) | |
297 | __mmu_rb_remove(handler, node); | |
298 | ||
299 | /* | |
300 | * The next address to be looked up is computed based | |
301 | * on the node's starting address. This is due to the | |
302 | * fact that the range where we start might be in the | |
303 | * middle of the node's buffer so simply incrementing | |
304 | * the address by the node's size would result is a | |
305 | * bad address. | |
306 | */ | |
307 | addr = node->addr + node->len; | |
308 | } | |
c81e1f64 | 309 | spin_unlock_irqrestore(&handler->lock, flags); |
06e0ffa6 | 310 | } |