Commit | Line | Data |
---|---|---|
f8de50eb | 1 | /* |
a15a519e | 2 | * Copyright © 2006-2009, Intel Corporation. |
f8de50eb | 3 | * |
a15a519e DW |
4 | * This program is free software; you can redistribute it and/or modify it |
5 | * under the terms and conditions of the GNU General Public License, | |
6 | * version 2, as published by the Free Software Foundation. | |
7 | * | |
8 | * This program is distributed in the hope it will be useful, but WITHOUT | |
9 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
10 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | |
11 | * more details. | |
12 | * | |
13 | * You should have received a copy of the GNU General Public License along with | |
14 | * this program; if not, write to the Free Software Foundation, Inc., 59 Temple | |
15 | * Place - Suite 330, Boston, MA 02111-1307 USA. | |
f8de50eb | 16 | * |
98bcef56 | 17 | * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com> |
f8de50eb KA |
18 | */ |
19 | ||
38717946 | 20 | #include <linux/iova.h> |
85b45456 RM |
21 | #include <linux/slab.h> |
22 | ||
23 | static struct kmem_cache *iommu_iova_cache; | |
24 | ||
25 | int iommu_iova_cache_init(void) | |
26 | { | |
27 | int ret = 0; | |
28 | ||
29 | iommu_iova_cache = kmem_cache_create("iommu_iova", | |
30 | sizeof(struct iova), | |
31 | 0, | |
32 | SLAB_HWCACHE_ALIGN, | |
33 | NULL); | |
34 | if (!iommu_iova_cache) { | |
35 | pr_err("Couldn't create iova cache\n"); | |
36 | ret = -ENOMEM; | |
37 | } | |
38 | ||
39 | return ret; | |
40 | } | |
41 | ||
42 | void iommu_iova_cache_destroy(void) | |
43 | { | |
44 | kmem_cache_destroy(iommu_iova_cache); | |
45 | } | |
46 | ||
47 | struct iova *alloc_iova_mem(void) | |
48 | { | |
49 | return kmem_cache_alloc(iommu_iova_cache, GFP_ATOMIC); | |
50 | } | |
51 | ||
52 | void free_iova_mem(struct iova *iova) | |
53 | { | |
54 | kmem_cache_free(iommu_iova_cache, iova); | |
55 | } | |
f8de50eb KA |
56 | |
57 | void | |
0fb5fe87 RM |
58 | init_iova_domain(struct iova_domain *iovad, unsigned long granule, |
59 | unsigned long start_pfn, unsigned long pfn_32bit) | |
f8de50eb | 60 | { |
0fb5fe87 RM |
61 | /* |
62 | * IOVA granularity will normally be equal to the smallest | |
63 | * supported IOMMU page size; both *must* be capable of | |
64 | * representing individual CPU pages exactly. | |
65 | */ | |
66 | BUG_ON((granule > PAGE_SIZE) || !is_power_of_2(granule)); | |
67 | ||
f8de50eb KA |
68 | spin_lock_init(&iovad->iova_rbtree_lock); |
69 | iovad->rbroot = RB_ROOT; | |
70 | iovad->cached32_node = NULL; | |
0fb5fe87 | 71 | iovad->granule = granule; |
1b722500 | 72 | iovad->start_pfn = start_pfn; |
f661197e | 73 | iovad->dma_32bit_pfn = pfn_32bit; |
f8de50eb KA |
74 | } |
75 | ||
76 | static struct rb_node * | |
77 | __get_cached_rbnode(struct iova_domain *iovad, unsigned long *limit_pfn) | |
78 | { | |
f661197e | 79 | if ((*limit_pfn != iovad->dma_32bit_pfn) || |
f8de50eb KA |
80 | (iovad->cached32_node == NULL)) |
81 | return rb_last(&iovad->rbroot); | |
82 | else { | |
83 | struct rb_node *prev_node = rb_prev(iovad->cached32_node); | |
84 | struct iova *curr_iova = | |
85 | container_of(iovad->cached32_node, struct iova, node); | |
86 | *limit_pfn = curr_iova->pfn_lo - 1; | |
87 | return prev_node; | |
88 | } | |
89 | } | |
90 | ||
91 | static void | |
92 | __cached_rbnode_insert_update(struct iova_domain *iovad, | |
93 | unsigned long limit_pfn, struct iova *new) | |
94 | { | |
f661197e | 95 | if (limit_pfn != iovad->dma_32bit_pfn) |
f8de50eb KA |
96 | return; |
97 | iovad->cached32_node = &new->node; | |
98 | } | |
99 | ||
100 | static void | |
101 | __cached_rbnode_delete_update(struct iova_domain *iovad, struct iova *free) | |
102 | { | |
103 | struct iova *cached_iova; | |
104 | struct rb_node *curr; | |
105 | ||
106 | if (!iovad->cached32_node) | |
107 | return; | |
108 | curr = iovad->cached32_node; | |
109 | cached_iova = container_of(curr, struct iova, node); | |
110 | ||
1c9fc3d1 CW |
111 | if (free->pfn_lo >= cached_iova->pfn_lo) { |
112 | struct rb_node *node = rb_next(&free->node); | |
113 | struct iova *iova = container_of(node, struct iova, node); | |
114 | ||
115 | /* only cache if it's below 32bit pfn */ | |
116 | if (node && iova->pfn_lo < iovad->dma_32bit_pfn) | |
117 | iovad->cached32_node = node; | |
118 | else | |
119 | iovad->cached32_node = NULL; | |
120 | } | |
f8de50eb KA |
121 | } |
122 | ||
f76aec76 KA |
123 | /* Computes the padding size required, to make the |
124 | * the start address naturally aligned on its size | |
125 | */ | |
126 | static int | |
127 | iova_get_pad_size(int size, unsigned int limit_pfn) | |
128 | { | |
129 | unsigned int pad_size = 0; | |
130 | unsigned int order = ilog2(size); | |
131 | ||
132 | if (order) | |
133 | pad_size = (limit_pfn + 1) % (1 << order); | |
134 | ||
135 | return pad_size; | |
136 | } | |
137 | ||
ddf02886 | 138 | static int __alloc_and_insert_iova_range(struct iova_domain *iovad, |
139 | unsigned long size, unsigned long limit_pfn, | |
140 | struct iova *new, bool size_aligned) | |
f8de50eb | 141 | { |
ddf02886 | 142 | struct rb_node *prev, *curr = NULL; |
f8de50eb KA |
143 | unsigned long flags; |
144 | unsigned long saved_pfn; | |
f76aec76 | 145 | unsigned int pad_size = 0; |
f8de50eb KA |
146 | |
147 | /* Walk the tree backwards */ | |
148 | spin_lock_irqsave(&iovad->iova_rbtree_lock, flags); | |
149 | saved_pfn = limit_pfn; | |
150 | curr = __get_cached_rbnode(iovad, &limit_pfn); | |
ddf02886 | 151 | prev = curr; |
f8de50eb KA |
152 | while (curr) { |
153 | struct iova *curr_iova = container_of(curr, struct iova, node); | |
ddf02886 | 154 | |
f8de50eb KA |
155 | if (limit_pfn < curr_iova->pfn_lo) |
156 | goto move_left; | |
f76aec76 | 157 | else if (limit_pfn < curr_iova->pfn_hi) |
f8de50eb | 158 | goto adjust_limit_pfn; |
f76aec76 KA |
159 | else { |
160 | if (size_aligned) | |
161 | pad_size = iova_get_pad_size(size, limit_pfn); | |
162 | if ((curr_iova->pfn_hi + size + pad_size) <= limit_pfn) | |
163 | break; /* found a free slot */ | |
164 | } | |
f8de50eb KA |
165 | adjust_limit_pfn: |
166 | limit_pfn = curr_iova->pfn_lo - 1; | |
167 | move_left: | |
ddf02886 | 168 | prev = curr; |
f8de50eb KA |
169 | curr = rb_prev(curr); |
170 | } | |
171 | ||
f76aec76 KA |
172 | if (!curr) { |
173 | if (size_aligned) | |
174 | pad_size = iova_get_pad_size(size, limit_pfn); | |
1b722500 | 175 | if ((iovad->start_pfn + size + pad_size) > limit_pfn) { |
f76aec76 KA |
176 | spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags); |
177 | return -ENOMEM; | |
178 | } | |
f8de50eb | 179 | } |
f76aec76 KA |
180 | |
181 | /* pfn_lo will point to size aligned address if size_aligned is set */ | |
182 | new->pfn_lo = limit_pfn - (size + pad_size) + 1; | |
183 | new->pfn_hi = new->pfn_lo + size - 1; | |
f8de50eb | 184 | |
ddf02886 | 185 | /* Insert the new_iova into domain rbtree by holding writer lock */ |
186 | /* Add new node and rebalance tree. */ | |
187 | { | |
a15a519e DW |
188 | struct rb_node **entry, *parent = NULL; |
189 | ||
190 | /* If we have 'prev', it's a valid place to start the | |
191 | insertion. Otherwise, start from the root. */ | |
192 | if (prev) | |
193 | entry = &prev; | |
194 | else | |
195 | entry = &iovad->rbroot.rb_node; | |
196 | ||
ddf02886 | 197 | /* Figure out where to put new node */ |
198 | while (*entry) { | |
199 | struct iova *this = container_of(*entry, | |
200 | struct iova, node); | |
201 | parent = *entry; | |
202 | ||
203 | if (new->pfn_lo < this->pfn_lo) | |
204 | entry = &((*entry)->rb_left); | |
205 | else if (new->pfn_lo > this->pfn_lo) | |
206 | entry = &((*entry)->rb_right); | |
207 | else | |
208 | BUG(); /* this should not happen */ | |
209 | } | |
210 | ||
211 | /* Add new node and rebalance tree. */ | |
212 | rb_link_node(&new->node, parent, entry); | |
213 | rb_insert_color(&new->node, &iovad->rbroot); | |
214 | } | |
215 | __cached_rbnode_insert_update(iovad, saved_pfn, new); | |
216 | ||
f8de50eb | 217 | spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags); |
ddf02886 | 218 | |
219 | ||
f8de50eb KA |
220 | return 0; |
221 | } | |
222 | ||
223 | static void | |
224 | iova_insert_rbtree(struct rb_root *root, struct iova *iova) | |
225 | { | |
226 | struct rb_node **new = &(root->rb_node), *parent = NULL; | |
227 | /* Figure out where to put new node */ | |
228 | while (*new) { | |
229 | struct iova *this = container_of(*new, struct iova, node); | |
733cac2a | 230 | |
f8de50eb KA |
231 | parent = *new; |
232 | ||
233 | if (iova->pfn_lo < this->pfn_lo) | |
234 | new = &((*new)->rb_left); | |
235 | else if (iova->pfn_lo > this->pfn_lo) | |
236 | new = &((*new)->rb_right); | |
237 | else | |
238 | BUG(); /* this should not happen */ | |
239 | } | |
240 | /* Add new node and rebalance tree. */ | |
241 | rb_link_node(&iova->node, parent, new); | |
242 | rb_insert_color(&iova->node, root); | |
243 | } | |
244 | ||
245 | /** | |
246 | * alloc_iova - allocates an iova | |
07db0409 MI |
247 | * @iovad: - iova domain in question |
248 | * @size: - size of page frames to allocate | |
249 | * @limit_pfn: - max limit address | |
250 | * @size_aligned: - set if size_aligned address range is required | |
1b722500 RM |
251 | * This function allocates an iova in the range iovad->start_pfn to limit_pfn, |
252 | * searching top-down from limit_pfn to iovad->start_pfn. If the size_aligned | |
f76aec76 KA |
253 | * flag is set then the allocated address iova->pfn_lo will be naturally |
254 | * aligned on roundup_power_of_two(size). | |
f8de50eb KA |
255 | */ |
256 | struct iova * | |
257 | alloc_iova(struct iova_domain *iovad, unsigned long size, | |
f76aec76 KA |
258 | unsigned long limit_pfn, |
259 | bool size_aligned) | |
f8de50eb | 260 | { |
f8de50eb KA |
261 | struct iova *new_iova; |
262 | int ret; | |
263 | ||
264 | new_iova = alloc_iova_mem(); | |
265 | if (!new_iova) | |
266 | return NULL; | |
267 | ||
f76aec76 KA |
268 | /* If size aligned is set then round the size to |
269 | * to next power of two. | |
270 | */ | |
271 | if (size_aligned) | |
272 | size = __roundup_pow_of_two(size); | |
273 | ||
ddf02886 | 274 | ret = __alloc_and_insert_iova_range(iovad, size, limit_pfn, |
275 | new_iova, size_aligned); | |
f8de50eb KA |
276 | |
277 | if (ret) { | |
f8de50eb KA |
278 | free_iova_mem(new_iova); |
279 | return NULL; | |
280 | } | |
281 | ||
f8de50eb KA |
282 | return new_iova; |
283 | } | |
284 | ||
285 | /** | |
286 | * find_iova - find's an iova for a given pfn | |
07db0409 MI |
287 | * @iovad: - iova domain in question. |
288 | * @pfn: - page frame number | |
f8de50eb KA |
289 | * This function finds and returns an iova belonging to the |
290 | * given doamin which matches the given pfn. | |
291 | */ | |
292 | struct iova *find_iova(struct iova_domain *iovad, unsigned long pfn) | |
293 | { | |
294 | unsigned long flags; | |
295 | struct rb_node *node; | |
296 | ||
297 | /* Take the lock so that no other thread is manipulating the rbtree */ | |
298 | spin_lock_irqsave(&iovad->iova_rbtree_lock, flags); | |
299 | node = iovad->rbroot.rb_node; | |
300 | while (node) { | |
301 | struct iova *iova = container_of(node, struct iova, node); | |
302 | ||
303 | /* If pfn falls within iova's range, return iova */ | |
304 | if ((pfn >= iova->pfn_lo) && (pfn <= iova->pfn_hi)) { | |
305 | spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags); | |
306 | /* We are not holding the lock while this iova | |
307 | * is referenced by the caller as the same thread | |
308 | * which called this function also calls __free_iova() | |
07db0409 | 309 | * and it is by design that only one thread can possibly |
f8de50eb KA |
310 | * reference a particular iova and hence no conflict. |
311 | */ | |
312 | return iova; | |
313 | } | |
314 | ||
315 | if (pfn < iova->pfn_lo) | |
316 | node = node->rb_left; | |
317 | else if (pfn > iova->pfn_lo) | |
318 | node = node->rb_right; | |
319 | } | |
320 | ||
321 | spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags); | |
322 | return NULL; | |
323 | } | |
324 | ||
325 | /** | |
326 | * __free_iova - frees the given iova | |
327 | * @iovad: iova domain in question. | |
328 | * @iova: iova in question. | |
329 | * Frees the given iova belonging to the giving domain | |
330 | */ | |
331 | void | |
332 | __free_iova(struct iova_domain *iovad, struct iova *iova) | |
333 | { | |
334 | unsigned long flags; | |
335 | ||
336 | spin_lock_irqsave(&iovad->iova_rbtree_lock, flags); | |
337 | __cached_rbnode_delete_update(iovad, iova); | |
338 | rb_erase(&iova->node, &iovad->rbroot); | |
339 | spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags); | |
340 | free_iova_mem(iova); | |
341 | } | |
342 | ||
343 | /** | |
344 | * free_iova - finds and frees the iova for a given pfn | |
345 | * @iovad: - iova domain in question. | |
346 | * @pfn: - pfn that is allocated previously | |
347 | * This functions finds an iova for a given pfn and then | |
348 | * frees the iova from that domain. | |
349 | */ | |
350 | void | |
351 | free_iova(struct iova_domain *iovad, unsigned long pfn) | |
352 | { | |
353 | struct iova *iova = find_iova(iovad, pfn); | |
733cac2a | 354 | |
f8de50eb KA |
355 | if (iova) |
356 | __free_iova(iovad, iova); | |
357 | ||
358 | } | |
359 | ||
360 | /** | |
361 | * put_iova_domain - destroys the iova doamin | |
362 | * @iovad: - iova domain in question. | |
363 | * All the iova's in that domain are destroyed. | |
364 | */ | |
365 | void put_iova_domain(struct iova_domain *iovad) | |
366 | { | |
367 | struct rb_node *node; | |
368 | unsigned long flags; | |
369 | ||
370 | spin_lock_irqsave(&iovad->iova_rbtree_lock, flags); | |
371 | node = rb_first(&iovad->rbroot); | |
372 | while (node) { | |
373 | struct iova *iova = container_of(node, struct iova, node); | |
733cac2a | 374 | |
f8de50eb KA |
375 | rb_erase(node, &iovad->rbroot); |
376 | free_iova_mem(iova); | |
377 | node = rb_first(&iovad->rbroot); | |
378 | } | |
379 | spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags); | |
380 | } | |
381 | ||
382 | static int | |
383 | __is_range_overlap(struct rb_node *node, | |
384 | unsigned long pfn_lo, unsigned long pfn_hi) | |
385 | { | |
386 | struct iova *iova = container_of(node, struct iova, node); | |
387 | ||
388 | if ((pfn_lo <= iova->pfn_hi) && (pfn_hi >= iova->pfn_lo)) | |
389 | return 1; | |
390 | return 0; | |
391 | } | |
392 | ||
75f05569 JL |
393 | static inline struct iova * |
394 | alloc_and_init_iova(unsigned long pfn_lo, unsigned long pfn_hi) | |
395 | { | |
396 | struct iova *iova; | |
397 | ||
398 | iova = alloc_iova_mem(); | |
399 | if (iova) { | |
400 | iova->pfn_lo = pfn_lo; | |
401 | iova->pfn_hi = pfn_hi; | |
402 | } | |
403 | ||
404 | return iova; | |
405 | } | |
406 | ||
f8de50eb KA |
407 | static struct iova * |
408 | __insert_new_range(struct iova_domain *iovad, | |
409 | unsigned long pfn_lo, unsigned long pfn_hi) | |
410 | { | |
411 | struct iova *iova; | |
412 | ||
75f05569 JL |
413 | iova = alloc_and_init_iova(pfn_lo, pfn_hi); |
414 | if (iova) | |
415 | iova_insert_rbtree(&iovad->rbroot, iova); | |
f8de50eb | 416 | |
f8de50eb KA |
417 | return iova; |
418 | } | |
419 | ||
420 | static void | |
421 | __adjust_overlap_range(struct iova *iova, | |
422 | unsigned long *pfn_lo, unsigned long *pfn_hi) | |
423 | { | |
424 | if (*pfn_lo < iova->pfn_lo) | |
425 | iova->pfn_lo = *pfn_lo; | |
426 | if (*pfn_hi > iova->pfn_hi) | |
427 | *pfn_lo = iova->pfn_hi + 1; | |
428 | } | |
429 | ||
430 | /** | |
431 | * reserve_iova - reserves an iova in the given range | |
432 | * @iovad: - iova domain pointer | |
433 | * @pfn_lo: - lower page frame address | |
434 | * @pfn_hi:- higher pfn adderss | |
435 | * This function allocates reserves the address range from pfn_lo to pfn_hi so | |
436 | * that this address is not dished out as part of alloc_iova. | |
437 | */ | |
438 | struct iova * | |
439 | reserve_iova(struct iova_domain *iovad, | |
440 | unsigned long pfn_lo, unsigned long pfn_hi) | |
441 | { | |
442 | struct rb_node *node; | |
443 | unsigned long flags; | |
444 | struct iova *iova; | |
445 | unsigned int overlap = 0; | |
446 | ||
3d39cecc | 447 | spin_lock_irqsave(&iovad->iova_rbtree_lock, flags); |
f8de50eb KA |
448 | for (node = rb_first(&iovad->rbroot); node; node = rb_next(node)) { |
449 | if (__is_range_overlap(node, pfn_lo, pfn_hi)) { | |
450 | iova = container_of(node, struct iova, node); | |
451 | __adjust_overlap_range(iova, &pfn_lo, &pfn_hi); | |
452 | if ((pfn_lo >= iova->pfn_lo) && | |
453 | (pfn_hi <= iova->pfn_hi)) | |
454 | goto finish; | |
455 | overlap = 1; | |
456 | ||
457 | } else if (overlap) | |
458 | break; | |
459 | } | |
460 | ||
25985edc | 461 | /* We are here either because this is the first reserver node |
f8de50eb KA |
462 | * or need to insert remaining non overlap addr range |
463 | */ | |
464 | iova = __insert_new_range(iovad, pfn_lo, pfn_hi); | |
465 | finish: | |
466 | ||
3d39cecc | 467 | spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags); |
f8de50eb KA |
468 | return iova; |
469 | } | |
470 | ||
471 | /** | |
472 | * copy_reserved_iova - copies the reserved between domains | |
473 | * @from: - source doamin from where to copy | |
474 | * @to: - destination domin where to copy | |
475 | * This function copies reserved iova's from one doamin to | |
476 | * other. | |
477 | */ | |
478 | void | |
479 | copy_reserved_iova(struct iova_domain *from, struct iova_domain *to) | |
480 | { | |
481 | unsigned long flags; | |
482 | struct rb_node *node; | |
483 | ||
3d39cecc | 484 | spin_lock_irqsave(&from->iova_rbtree_lock, flags); |
f8de50eb KA |
485 | for (node = rb_first(&from->rbroot); node; node = rb_next(node)) { |
486 | struct iova *iova = container_of(node, struct iova, node); | |
487 | struct iova *new_iova; | |
733cac2a | 488 | |
f8de50eb KA |
489 | new_iova = reserve_iova(to, iova->pfn_lo, iova->pfn_hi); |
490 | if (!new_iova) | |
491 | printk(KERN_ERR "Reserve iova range %lx@%lx failed\n", | |
492 | iova->pfn_lo, iova->pfn_lo); | |
493 | } | |
3d39cecc | 494 | spin_unlock_irqrestore(&from->iova_rbtree_lock, flags); |
f8de50eb | 495 | } |
75f05569 JL |
496 | |
497 | struct iova * | |
498 | split_and_remove_iova(struct iova_domain *iovad, struct iova *iova, | |
499 | unsigned long pfn_lo, unsigned long pfn_hi) | |
500 | { | |
501 | unsigned long flags; | |
502 | struct iova *prev = NULL, *next = NULL; | |
503 | ||
504 | spin_lock_irqsave(&iovad->iova_rbtree_lock, flags); | |
505 | if (iova->pfn_lo < pfn_lo) { | |
506 | prev = alloc_and_init_iova(iova->pfn_lo, pfn_lo - 1); | |
507 | if (prev == NULL) | |
508 | goto error; | |
509 | } | |
510 | if (iova->pfn_hi > pfn_hi) { | |
511 | next = alloc_and_init_iova(pfn_hi + 1, iova->pfn_hi); | |
512 | if (next == NULL) | |
513 | goto error; | |
514 | } | |
515 | ||
516 | __cached_rbnode_delete_update(iovad, iova); | |
517 | rb_erase(&iova->node, &iovad->rbroot); | |
518 | ||
519 | if (prev) { | |
520 | iova_insert_rbtree(&iovad->rbroot, prev); | |
521 | iova->pfn_lo = pfn_lo; | |
522 | } | |
523 | if (next) { | |
524 | iova_insert_rbtree(&iovad->rbroot, next); | |
525 | iova->pfn_hi = pfn_hi; | |
526 | } | |
527 | spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags); | |
528 | ||
529 | return iova; | |
530 | ||
531 | error: | |
532 | spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags); | |
533 | if (prev) | |
534 | free_iova_mem(prev); | |
535 | return NULL; | |
536 | } |