Commit | Line | Data |
---|---|---|
06e0ffa6 MH |
1 | /* |
2 | * Copyright(c) 2016 Intel Corporation. | |
3 | * | |
4 | * This file is provided under a dual BSD/GPLv2 license. When using or | |
5 | * redistributing this file, you may do so under either license. | |
6 | * | |
7 | * GPL LICENSE SUMMARY | |
8 | * | |
9 | * This program is free software; you can redistribute it and/or modify | |
10 | * it under the terms of version 2 of the GNU General Public License as | |
11 | * published by the Free Software Foundation. | |
12 | * | |
13 | * This program is distributed in the hope that it will be useful, but | |
14 | * WITHOUT ANY WARRANTY; without even the implied warranty of | |
15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
16 | * General Public License for more details. | |
17 | * | |
18 | * BSD LICENSE | |
19 | * | |
20 | * Redistribution and use in source and binary forms, with or without | |
21 | * modification, are permitted provided that the following conditions | |
22 | * are met: | |
23 | * | |
24 | * - Redistributions of source code must retain the above copyright | |
25 | * notice, this list of conditions and the following disclaimer. | |
26 | * - Redistributions in binary form must reproduce the above copyright | |
27 | * notice, this list of conditions and the following disclaimer in | |
28 | * the documentation and/or other materials provided with the | |
29 | * distribution. | |
30 | * - Neither the name of Intel Corporation nor the names of its | |
31 | * contributors may be used to endorse or promote products derived | |
32 | * from this software without specific prior written permission. | |
33 | * | |
34 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | |
35 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | |
36 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR | |
37 | * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | |
38 | * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | |
39 | * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | |
40 | * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | |
41 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | |
42 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | |
43 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | |
44 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | |
45 | * | |
46 | */ | |
47 | #include <linux/list.h> | |
48 | #include <linux/mmu_notifier.h> | |
df5a00f8 | 49 | #include <linux/interval_tree_generic.h> |
06e0ffa6 MH |
50 | |
51 | #include "mmu_rb.h" | |
52 | #include "trace.h" | |
53 | ||
54 | struct mmu_rb_handler { | |
55 | struct list_head list; | |
56 | struct mmu_notifier mn; | |
57 | struct rb_root *root; | |
58 | spinlock_t lock; /* protect the RB tree */ | |
59 | struct mmu_rb_ops *ops; | |
60 | }; | |
61 | ||
62 | static LIST_HEAD(mmu_rb_handlers); | |
63 | static DEFINE_SPINLOCK(mmu_rb_lock); /* protect mmu_rb_handlers list */ | |
64 | ||
df5a00f8 MH |
65 | static unsigned long mmu_node_start(struct mmu_rb_node *); |
66 | static unsigned long mmu_node_last(struct mmu_rb_node *); | |
06e0ffa6 MH |
67 | static struct mmu_rb_handler *find_mmu_handler(struct rb_root *); |
68 | static inline void mmu_notifier_page(struct mmu_notifier *, struct mm_struct *, | |
69 | unsigned long); | |
70 | static inline void mmu_notifier_range_start(struct mmu_notifier *, | |
71 | struct mm_struct *, | |
72 | unsigned long, unsigned long); | |
73 | static void mmu_notifier_mem_invalidate(struct mmu_notifier *, | |
74 | unsigned long, unsigned long); | |
75 | static struct mmu_rb_node *__mmu_rb_search(struct mmu_rb_handler *, | |
76 | unsigned long, unsigned long); | |
77 | ||
78 | static struct mmu_notifier_ops mn_opts = { | |
79 | .invalidate_page = mmu_notifier_page, | |
80 | .invalidate_range_start = mmu_notifier_range_start, | |
81 | }; | |
82 | ||
df5a00f8 MH |
83 | INTERVAL_TREE_DEFINE(struct mmu_rb_node, node, unsigned long, __last, |
84 | mmu_node_start, mmu_node_last, static, __mmu_int_rb); | |
85 | ||
86 | static unsigned long mmu_node_start(struct mmu_rb_node *node) | |
87 | { | |
88 | return node->addr & PAGE_MASK; | |
89 | } | |
90 | ||
91 | static unsigned long mmu_node_last(struct mmu_rb_node *node) | |
92 | { | |
93 | return ((node->addr & PAGE_MASK) + node->len); | |
94 | } | |
95 | ||
06e0ffa6 MH |
96 | int hfi1_mmu_rb_register(struct rb_root *root, struct mmu_rb_ops *ops) |
97 | { | |
98 | struct mmu_rb_handler *handlr; | |
c81e1f64 | 99 | unsigned long flags; |
06e0ffa6 MH |
100 | |
101 | if (!ops->compare || !ops->invalidate) | |
102 | return -EINVAL; | |
103 | ||
104 | handlr = kmalloc(sizeof(*handlr), GFP_KERNEL); | |
105 | if (!handlr) | |
106 | return -ENOMEM; | |
107 | ||
108 | handlr->root = root; | |
109 | handlr->ops = ops; | |
110 | INIT_HLIST_NODE(&handlr->mn.hlist); | |
111 | spin_lock_init(&handlr->lock); | |
112 | handlr->mn.ops = &mn_opts; | |
c81e1f64 | 113 | spin_lock_irqsave(&mmu_rb_lock, flags); |
06e0ffa6 | 114 | list_add_tail(&handlr->list, &mmu_rb_handlers); |
c81e1f64 | 115 | spin_unlock_irqrestore(&mmu_rb_lock, flags); |
06e0ffa6 MH |
116 | |
117 | return mmu_notifier_register(&handlr->mn, current->mm); | |
118 | } | |
119 | ||
120 | void hfi1_mmu_rb_unregister(struct rb_root *root) | |
121 | { | |
122 | struct mmu_rb_handler *handler = find_mmu_handler(root); | |
c81e1f64 | 123 | unsigned long flags; |
06e0ffa6 | 124 | |
4b00d949 MH |
125 | if (!handler) |
126 | return; | |
127 | ||
c81e1f64 | 128 | spin_lock_irqsave(&mmu_rb_lock, flags); |
06e0ffa6 | 129 | list_del(&handler->list); |
c81e1f64 | 130 | spin_unlock_irqrestore(&mmu_rb_lock, flags); |
06e0ffa6 MH |
131 | |
132 | if (!RB_EMPTY_ROOT(root)) { | |
133 | struct rb_node *node; | |
134 | struct mmu_rb_node *rbnode; | |
135 | ||
136 | while ((node = rb_first(root))) { | |
137 | rbnode = rb_entry(node, struct mmu_rb_node, node); | |
eef9c896 | 138 | rb_erase(node, root); |
06e0ffa6 | 139 | if (handler->ops->remove) |
909e2cd0 | 140 | handler->ops->remove(root, rbnode, false); |
06e0ffa6 MH |
141 | } |
142 | } | |
143 | ||
144 | if (current->mm) | |
145 | mmu_notifier_unregister(&handler->mn, current->mm); | |
146 | kfree(handler); | |
147 | } | |
148 | ||
149 | int hfi1_mmu_rb_insert(struct rb_root *root, struct mmu_rb_node *mnode) | |
150 | { | |
06e0ffa6 | 151 | struct mmu_rb_handler *handler = find_mmu_handler(root); |
df5a00f8 | 152 | struct mmu_rb_node *node; |
c81e1f64 | 153 | unsigned long flags; |
df5a00f8 | 154 | int ret = 0; |
06e0ffa6 MH |
155 | |
156 | if (!handler) | |
157 | return -EINVAL; | |
158 | ||
c81e1f64 | 159 | spin_lock_irqsave(&handler->lock, flags); |
353b71c7 MH |
160 | hfi1_cdbg(MMU, "Inserting node addr 0x%llx, len %u", mnode->addr, |
161 | mnode->len); | |
df5a00f8 MH |
162 | node = __mmu_rb_search(handler, mnode->addr, mnode->len); |
163 | if (node) { | |
164 | ret = -EINVAL; | |
165 | goto unlock; | |
06e0ffa6 | 166 | } |
df5a00f8 | 167 | __mmu_int_rb_insert(mnode, root); |
06e0ffa6 MH |
168 | |
169 | if (handler->ops->insert) { | |
170 | ret = handler->ops->insert(root, mnode); | |
171 | if (ret) | |
df5a00f8 | 172 | __mmu_int_rb_remove(mnode, root); |
06e0ffa6 | 173 | } |
06e0ffa6 | 174 | unlock: |
c81e1f64 | 175 | spin_unlock_irqrestore(&handler->lock, flags); |
06e0ffa6 MH |
176 | return ret; |
177 | } | |
178 | ||
179 | /* Caller must host handler lock */ | |
180 | static struct mmu_rb_node *__mmu_rb_search(struct mmu_rb_handler *handler, | |
181 | unsigned long addr, | |
182 | unsigned long len) | |
183 | { | |
df5a00f8 MH |
184 | struct mmu_rb_node *node; |
185 | ||
353b71c7 | 186 | hfi1_cdbg(MMU, "Searching for addr 0x%llx, len %u", addr, len); |
df5a00f8 | 187 | node = __mmu_int_rb_iter_first(handler->root, addr, len); |
353b71c7 MH |
188 | if (node) |
189 | hfi1_cdbg(MMU, "Found node addr 0x%llx, len %u", node->addr, | |
190 | node->len); | |
df5a00f8 | 191 | return node; |
06e0ffa6 MH |
192 | } |
193 | ||
194 | static void __mmu_rb_remove(struct mmu_rb_handler *handler, | |
909e2cd0 | 195 | struct mmu_rb_node *node, bool arg) |
06e0ffa6 MH |
196 | { |
197 | /* Validity of handler and node pointers has been checked by caller. */ | |
353b71c7 MH |
198 | hfi1_cdbg(MMU, "Removing node addr 0x%llx, len %u", node->addr, |
199 | node->len); | |
df5a00f8 | 200 | __mmu_int_rb_remove(node, handler->root); |
06e0ffa6 | 201 | if (handler->ops->remove) |
909e2cd0 | 202 | handler->ops->remove(handler->root, node, arg); |
06e0ffa6 MH |
203 | } |
204 | ||
205 | struct mmu_rb_node *hfi1_mmu_rb_search(struct rb_root *root, unsigned long addr, | |
206 | unsigned long len) | |
207 | { | |
208 | struct mmu_rb_handler *handler = find_mmu_handler(root); | |
209 | struct mmu_rb_node *node; | |
c81e1f64 | 210 | unsigned long flags; |
06e0ffa6 MH |
211 | |
212 | if (!handler) | |
213 | return ERR_PTR(-EINVAL); | |
214 | ||
c81e1f64 | 215 | spin_lock_irqsave(&handler->lock, flags); |
06e0ffa6 | 216 | node = __mmu_rb_search(handler, addr, len); |
c81e1f64 | 217 | spin_unlock_irqrestore(&handler->lock, flags); |
06e0ffa6 MH |
218 | |
219 | return node; | |
220 | } | |
221 | ||
222 | void hfi1_mmu_rb_remove(struct rb_root *root, struct mmu_rb_node *node) | |
223 | { | |
224 | struct mmu_rb_handler *handler = find_mmu_handler(root); | |
c81e1f64 | 225 | unsigned long flags; |
06e0ffa6 MH |
226 | |
227 | if (!handler || !node) | |
228 | return; | |
229 | ||
c81e1f64 | 230 | spin_lock_irqsave(&handler->lock, flags); |
909e2cd0 | 231 | __mmu_rb_remove(handler, node, false); |
c81e1f64 | 232 | spin_unlock_irqrestore(&handler->lock, flags); |
06e0ffa6 MH |
233 | } |
234 | ||
235 | static struct mmu_rb_handler *find_mmu_handler(struct rb_root *root) | |
236 | { | |
237 | struct mmu_rb_handler *handler; | |
c81e1f64 | 238 | unsigned long flags; |
06e0ffa6 | 239 | |
c81e1f64 | 240 | spin_lock_irqsave(&mmu_rb_lock, flags); |
06e0ffa6 MH |
241 | list_for_each_entry(handler, &mmu_rb_handlers, list) { |
242 | if (handler->root == root) | |
243 | goto unlock; | |
244 | } | |
245 | handler = NULL; | |
246 | unlock: | |
c81e1f64 | 247 | spin_unlock_irqrestore(&mmu_rb_lock, flags); |
06e0ffa6 MH |
248 | return handler; |
249 | } | |
250 | ||
251 | static inline void mmu_notifier_page(struct mmu_notifier *mn, | |
252 | struct mm_struct *mm, unsigned long addr) | |
253 | { | |
254 | mmu_notifier_mem_invalidate(mn, addr, addr + PAGE_SIZE); | |
255 | } | |
256 | ||
257 | static inline void mmu_notifier_range_start(struct mmu_notifier *mn, | |
258 | struct mm_struct *mm, | |
259 | unsigned long start, | |
260 | unsigned long end) | |
261 | { | |
262 | mmu_notifier_mem_invalidate(mn, start, end); | |
263 | } | |
264 | ||
265 | static void mmu_notifier_mem_invalidate(struct mmu_notifier *mn, | |
266 | unsigned long start, unsigned long end) | |
267 | { | |
268 | struct mmu_rb_handler *handler = | |
269 | container_of(mn, struct mmu_rb_handler, mn); | |
270 | struct rb_root *root = handler->root; | |
271 | struct mmu_rb_node *node; | |
df5a00f8 | 272 | unsigned long flags; |
06e0ffa6 | 273 | |
c81e1f64 | 274 | spin_lock_irqsave(&handler->lock, flags); |
df5a00f8 MH |
275 | for (node = __mmu_int_rb_iter_first(root, start, end); node; |
276 | node = __mmu_int_rb_iter_next(node, start, end)) { | |
353b71c7 MH |
277 | hfi1_cdbg(MMU, "Invalidating node addr 0x%llx, len %u", |
278 | node->addr, node->len); | |
06e0ffa6 | 279 | if (handler->ops->invalidate(root, node)) |
909e2cd0 | 280 | __mmu_rb_remove(handler, node, true); |
06e0ffa6 | 281 | } |
c81e1f64 | 282 | spin_unlock_irqrestore(&handler->lock, flags); |
06e0ffa6 | 283 | } |