Commit | Line | Data |
---|---|---|
e3c495c7 JR |
1 | /* |
2 | * Copyright (C) 2010-2012 Advanced Micro Devices, Inc. | |
3 | * Author: Joerg Roedel <joerg.roedel@amd.com> | |
4 | * | |
5 | * This program is free software; you can redistribute it and/or modify it | |
6 | * under the terms of the GNU General Public License version 2 as published | |
7 | * by the Free Software Foundation. | |
8 | * | |
9 | * This program is distributed in the hope that it will be useful, | |
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
12 | * GNU General Public License for more details. | |
13 | * | |
14 | * You should have received a copy of the GNU General Public License | |
15 | * along with this program; if not, write to the Free Software | |
16 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | |
17 | */ | |
18 | ||
8736b2c3 | 19 | #include <linux/mmu_notifier.h> |
ed96f228 JR |
20 | #include <linux/amd-iommu.h> |
21 | #include <linux/mm_types.h> | |
8736b2c3 | 22 | #include <linux/profile.h> |
e3c495c7 | 23 | #include <linux/module.h> |
2d5503b6 | 24 | #include <linux/sched.h> |
ed96f228 | 25 | #include <linux/iommu.h> |
028eeacc | 26 | #include <linux/wait.h> |
ed96f228 JR |
27 | #include <linux/pci.h> |
28 | #include <linux/gfp.h> | |
29 | ||
028eeacc | 30 | #include "amd_iommu_types.h" |
ed96f228 | 31 | #include "amd_iommu_proto.h" |
e3c495c7 JR |
32 | |
33 | MODULE_LICENSE("GPL v2"); | |
34 | MODULE_AUTHOR("Joerg Roedel <joerg.roedel@amd.com>"); | |
35 | ||
ed96f228 JR |
36 | #define MAX_DEVICES 0x10000 |
37 | #define PRI_QUEUE_SIZE 512 | |
38 | ||
39 | struct pri_queue { | |
40 | atomic_t inflight; | |
41 | bool finish; | |
028eeacc | 42 | int status; |
ed96f228 JR |
43 | }; |
44 | ||
45 | struct pasid_state { | |
46 | struct list_head list; /* For global state-list */ | |
47 | atomic_t count; /* Reference count */ | |
d73a6d72 | 48 | unsigned mmu_notifier_count; /* Counting nested mmu_notifier |
e79df31c | 49 | calls */ |
ed96f228 JR |
50 | struct task_struct *task; /* Task bound to this PASID */ |
51 | struct mm_struct *mm; /* mm_struct for the faults */ | |
ff6d0cce | 52 | struct mmu_notifier mn; /* mmu_notifier handle */ |
ed96f228 JR |
53 | struct pri_queue pri[PRI_QUEUE_SIZE]; /* PRI tag states */ |
54 | struct device_state *device_state; /* Link to our device_state */ | |
55 | int pasid; /* PASID index */ | |
d73a6d72 JR |
56 | spinlock_t lock; /* Protect pri_queues and |
57 | mmu_notifer_count */ | |
028eeacc | 58 | wait_queue_head_t wq; /* To wait for count == 0 */ |
ed96f228 JR |
59 | }; |
60 | ||
61 | struct device_state { | |
741669c7 JR |
62 | struct list_head list; |
63 | u16 devid; | |
ed96f228 JR |
64 | atomic_t count; |
65 | struct pci_dev *pdev; | |
66 | struct pasid_state **states; | |
67 | struct iommu_domain *domain; | |
68 | int pasid_levels; | |
69 | int max_pasids; | |
175d6146 | 70 | amd_iommu_invalid_ppr_cb inv_ppr_cb; |
bc21662f | 71 | amd_iommu_invalidate_ctx inv_ctx_cb; |
ed96f228 | 72 | spinlock_t lock; |
028eeacc JR |
73 | wait_queue_head_t wq; |
74 | }; | |
75 | ||
76 | struct fault { | |
77 | struct work_struct work; | |
78 | struct device_state *dev_state; | |
79 | struct pasid_state *state; | |
80 | struct mm_struct *mm; | |
81 | u64 address; | |
82 | u16 devid; | |
83 | u16 pasid; | |
84 | u16 tag; | |
85 | u16 finish; | |
86 | u16 flags; | |
ed96f228 JR |
87 | }; |
88 | ||
741669c7 | 89 | static LIST_HEAD(state_list); |
ed96f228 JR |
90 | static spinlock_t state_lock; |
91 | ||
028eeacc JR |
92 | static struct workqueue_struct *iommu_wq; |
93 | ||
8736b2c3 JR |
94 | /* |
95 | * Empty page table - Used between | |
96 | * mmu_notifier_invalidate_range_start and | |
97 | * mmu_notifier_invalidate_range_end | |
98 | */ | |
99 | static u64 *empty_page_table; | |
100 | ||
2d5503b6 | 101 | static void free_pasid_states(struct device_state *dev_state); |
ed96f228 JR |
102 | |
103 | static u16 device_id(struct pci_dev *pdev) | |
104 | { | |
105 | u16 devid; | |
106 | ||
107 | devid = pdev->bus->number; | |
108 | devid = (devid << 8) | pdev->devfn; | |
109 | ||
110 | return devid; | |
111 | } | |
112 | ||
b87d2d7c JR |
113 | static struct device_state *__get_device_state(u16 devid) |
114 | { | |
741669c7 JR |
115 | struct device_state *dev_state; |
116 | ||
117 | list_for_each_entry(dev_state, &state_list, list) { | |
118 | if (dev_state->devid == devid) | |
119 | return dev_state; | |
120 | } | |
121 | ||
122 | return NULL; | |
b87d2d7c JR |
123 | } |
124 | ||
ed96f228 JR |
125 | static struct device_state *get_device_state(u16 devid) |
126 | { | |
127 | struct device_state *dev_state; | |
128 | unsigned long flags; | |
129 | ||
130 | spin_lock_irqsave(&state_lock, flags); | |
b87d2d7c | 131 | dev_state = __get_device_state(devid); |
ed96f228 JR |
132 | if (dev_state != NULL) |
133 | atomic_inc(&dev_state->count); | |
134 | spin_unlock_irqrestore(&state_lock, flags); | |
135 | ||
136 | return dev_state; | |
137 | } | |
138 | ||
139 | static void free_device_state(struct device_state *dev_state) | |
140 | { | |
2d5503b6 JR |
141 | /* |
142 | * First detach device from domain - No more PRI requests will arrive | |
143 | * from that device after it is unbound from the IOMMUv2 domain. | |
144 | */ | |
ed96f228 | 145 | iommu_detach_device(dev_state->domain, &dev_state->pdev->dev); |
2d5503b6 JR |
146 | |
147 | /* Everything is down now, free the IOMMUv2 domain */ | |
ed96f228 | 148 | iommu_domain_free(dev_state->domain); |
2d5503b6 JR |
149 | |
150 | /* Finally get rid of the device-state */ | |
ed96f228 JR |
151 | kfree(dev_state); |
152 | } | |
153 | ||
154 | static void put_device_state(struct device_state *dev_state) | |
155 | { | |
156 | if (atomic_dec_and_test(&dev_state->count)) | |
028eeacc | 157 | wake_up(&dev_state->wq); |
ed96f228 JR |
158 | } |
159 | ||
028eeacc JR |
160 | static void put_device_state_wait(struct device_state *dev_state) |
161 | { | |
162 | DEFINE_WAIT(wait); | |
163 | ||
164 | prepare_to_wait(&dev_state->wq, &wait, TASK_UNINTERRUPTIBLE); | |
165 | if (!atomic_dec_and_test(&dev_state->count)) | |
166 | schedule(); | |
167 | finish_wait(&dev_state->wq, &wait); | |
168 | ||
169 | free_device_state(dev_state); | |
170 | } | |
8736b2c3 | 171 | |
2d5503b6 JR |
172 | /* Must be called under dev_state->lock */ |
173 | static struct pasid_state **__get_pasid_state_ptr(struct device_state *dev_state, | |
174 | int pasid, bool alloc) | |
175 | { | |
176 | struct pasid_state **root, **ptr; | |
177 | int level, index; | |
178 | ||
179 | level = dev_state->pasid_levels; | |
180 | root = dev_state->states; | |
181 | ||
182 | while (true) { | |
183 | ||
184 | index = (pasid >> (9 * level)) & 0x1ff; | |
185 | ptr = &root[index]; | |
186 | ||
187 | if (level == 0) | |
188 | break; | |
189 | ||
190 | if (*ptr == NULL) { | |
191 | if (!alloc) | |
192 | return NULL; | |
193 | ||
194 | *ptr = (void *)get_zeroed_page(GFP_ATOMIC); | |
195 | if (*ptr == NULL) | |
196 | return NULL; | |
197 | } | |
198 | ||
199 | root = (struct pasid_state **)*ptr; | |
200 | level -= 1; | |
201 | } | |
202 | ||
203 | return ptr; | |
204 | } | |
205 | ||
206 | static int set_pasid_state(struct device_state *dev_state, | |
207 | struct pasid_state *pasid_state, | |
208 | int pasid) | |
209 | { | |
210 | struct pasid_state **ptr; | |
211 | unsigned long flags; | |
212 | int ret; | |
213 | ||
214 | spin_lock_irqsave(&dev_state->lock, flags); | |
215 | ptr = __get_pasid_state_ptr(dev_state, pasid, true); | |
216 | ||
217 | ret = -ENOMEM; | |
218 | if (ptr == NULL) | |
219 | goto out_unlock; | |
220 | ||
221 | ret = -ENOMEM; | |
222 | if (*ptr != NULL) | |
223 | goto out_unlock; | |
224 | ||
225 | *ptr = pasid_state; | |
226 | ||
227 | ret = 0; | |
228 | ||
229 | out_unlock: | |
230 | spin_unlock_irqrestore(&dev_state->lock, flags); | |
231 | ||
232 | return ret; | |
233 | } | |
234 | ||
235 | static void clear_pasid_state(struct device_state *dev_state, int pasid) | |
236 | { | |
237 | struct pasid_state **ptr; | |
238 | unsigned long flags; | |
239 | ||
240 | spin_lock_irqsave(&dev_state->lock, flags); | |
241 | ptr = __get_pasid_state_ptr(dev_state, pasid, true); | |
242 | ||
243 | if (ptr == NULL) | |
244 | goto out_unlock; | |
245 | ||
246 | *ptr = NULL; | |
247 | ||
248 | out_unlock: | |
249 | spin_unlock_irqrestore(&dev_state->lock, flags); | |
250 | } | |
251 | ||
252 | static struct pasid_state *get_pasid_state(struct device_state *dev_state, | |
253 | int pasid) | |
254 | { | |
255 | struct pasid_state **ptr, *ret = NULL; | |
256 | unsigned long flags; | |
257 | ||
258 | spin_lock_irqsave(&dev_state->lock, flags); | |
259 | ptr = __get_pasid_state_ptr(dev_state, pasid, false); | |
260 | ||
261 | if (ptr == NULL) | |
262 | goto out_unlock; | |
263 | ||
264 | ret = *ptr; | |
265 | if (ret) | |
266 | atomic_inc(&ret->count); | |
267 | ||
268 | out_unlock: | |
269 | spin_unlock_irqrestore(&dev_state->lock, flags); | |
270 | ||
271 | return ret; | |
272 | } | |
273 | ||
274 | static void free_pasid_state(struct pasid_state *pasid_state) | |
275 | { | |
276 | kfree(pasid_state); | |
277 | } | |
278 | ||
279 | static void put_pasid_state(struct pasid_state *pasid_state) | |
280 | { | |
281 | if (atomic_dec_and_test(&pasid_state->count)) { | |
282 | put_device_state(pasid_state->device_state); | |
028eeacc | 283 | wake_up(&pasid_state->wq); |
2d5503b6 JR |
284 | } |
285 | } | |
286 | ||
028eeacc JR |
287 | static void put_pasid_state_wait(struct pasid_state *pasid_state) |
288 | { | |
289 | DEFINE_WAIT(wait); | |
290 | ||
291 | prepare_to_wait(&pasid_state->wq, &wait, TASK_UNINTERRUPTIBLE); | |
292 | ||
293 | if (atomic_dec_and_test(&pasid_state->count)) | |
294 | put_device_state(pasid_state->device_state); | |
295 | else | |
296 | schedule(); | |
297 | ||
298 | finish_wait(&pasid_state->wq, &wait); | |
299 | mmput(pasid_state->mm); | |
300 | free_pasid_state(pasid_state); | |
301 | } | |
302 | ||
61feb438 | 303 | static void unbind_pasid(struct pasid_state *pasid_state) |
8736b2c3 JR |
304 | { |
305 | struct iommu_domain *domain; | |
306 | ||
307 | domain = pasid_state->device_state->domain; | |
308 | ||
309 | amd_iommu_domain_clear_gcr3(domain, pasid_state->pasid); | |
310 | clear_pasid_state(pasid_state->device_state, pasid_state->pasid); | |
311 | ||
312 | /* Make sure no more pending faults are in the queue */ | |
313 | flush_workqueue(iommu_wq); | |
8736b2c3 JR |
314 | } |
315 | ||
2d5503b6 JR |
316 | static void free_pasid_states_level1(struct pasid_state **tbl) |
317 | { | |
318 | int i; | |
319 | ||
320 | for (i = 0; i < 512; ++i) { | |
321 | if (tbl[i] == NULL) | |
322 | continue; | |
323 | ||
324 | free_page((unsigned long)tbl[i]); | |
325 | } | |
326 | } | |
327 | ||
328 | static void free_pasid_states_level2(struct pasid_state **tbl) | |
329 | { | |
330 | struct pasid_state **ptr; | |
331 | int i; | |
332 | ||
333 | for (i = 0; i < 512; ++i) { | |
334 | if (tbl[i] == NULL) | |
335 | continue; | |
336 | ||
337 | ptr = (struct pasid_state **)tbl[i]; | |
338 | free_pasid_states_level1(ptr); | |
339 | } | |
340 | } | |
341 | ||
342 | static void free_pasid_states(struct device_state *dev_state) | |
343 | { | |
344 | struct pasid_state *pasid_state; | |
345 | int i; | |
346 | ||
347 | for (i = 0; i < dev_state->max_pasids; ++i) { | |
348 | pasid_state = get_pasid_state(dev_state, i); | |
349 | if (pasid_state == NULL) | |
350 | continue; | |
351 | ||
2d5503b6 | 352 | put_pasid_state(pasid_state); |
a40d4c67 JR |
353 | |
354 | /* | |
355 | * This will call the mn_release function and | |
356 | * unbind the PASID | |
357 | */ | |
358 | mmu_notifier_unregister(&pasid_state->mn, pasid_state->mm); | |
c5db16ad JR |
359 | |
360 | put_pasid_state_wait(pasid_state); /* Reference taken in | |
361 | amd_iommu_pasid_bind */ | |
2d5503b6 JR |
362 | } |
363 | ||
364 | if (dev_state->pasid_levels == 2) | |
365 | free_pasid_states_level2(dev_state->states); | |
366 | else if (dev_state->pasid_levels == 1) | |
367 | free_pasid_states_level1(dev_state->states); | |
368 | else if (dev_state->pasid_levels != 0) | |
369 | BUG(); | |
370 | ||
371 | free_page((unsigned long)dev_state->states); | |
372 | } | |
373 | ||
8736b2c3 JR |
374 | static struct pasid_state *mn_to_state(struct mmu_notifier *mn) |
375 | { | |
376 | return container_of(mn, struct pasid_state, mn); | |
377 | } | |
378 | ||
379 | static void __mn_flush_page(struct mmu_notifier *mn, | |
380 | unsigned long address) | |
381 | { | |
382 | struct pasid_state *pasid_state; | |
383 | struct device_state *dev_state; | |
384 | ||
385 | pasid_state = mn_to_state(mn); | |
386 | dev_state = pasid_state->device_state; | |
387 | ||
388 | amd_iommu_flush_page(dev_state->domain, pasid_state->pasid, address); | |
389 | } | |
390 | ||
391 | static int mn_clear_flush_young(struct mmu_notifier *mn, | |
392 | struct mm_struct *mm, | |
393 | unsigned long address) | |
394 | { | |
395 | __mn_flush_page(mn, address); | |
396 | ||
397 | return 0; | |
398 | } | |
399 | ||
400 | static void mn_change_pte(struct mmu_notifier *mn, | |
401 | struct mm_struct *mm, | |
402 | unsigned long address, | |
403 | pte_t pte) | |
404 | { | |
405 | __mn_flush_page(mn, address); | |
406 | } | |
407 | ||
408 | static void mn_invalidate_page(struct mmu_notifier *mn, | |
409 | struct mm_struct *mm, | |
410 | unsigned long address) | |
411 | { | |
412 | __mn_flush_page(mn, address); | |
413 | } | |
414 | ||
415 | static void mn_invalidate_range_start(struct mmu_notifier *mn, | |
416 | struct mm_struct *mm, | |
417 | unsigned long start, unsigned long end) | |
418 | { | |
419 | struct pasid_state *pasid_state; | |
420 | struct device_state *dev_state; | |
d73a6d72 | 421 | unsigned long flags; |
8736b2c3 JR |
422 | |
423 | pasid_state = mn_to_state(mn); | |
424 | dev_state = pasid_state->device_state; | |
425 | ||
d73a6d72 JR |
426 | spin_lock_irqsave(&pasid_state->lock, flags); |
427 | if (pasid_state->mmu_notifier_count == 0) { | |
e79df31c JR |
428 | amd_iommu_domain_set_gcr3(dev_state->domain, |
429 | pasid_state->pasid, | |
430 | __pa(empty_page_table)); | |
431 | } | |
d73a6d72 JR |
432 | pasid_state->mmu_notifier_count += 1; |
433 | spin_unlock_irqrestore(&pasid_state->lock, flags); | |
8736b2c3 JR |
434 | } |
435 | ||
436 | static void mn_invalidate_range_end(struct mmu_notifier *mn, | |
437 | struct mm_struct *mm, | |
438 | unsigned long start, unsigned long end) | |
439 | { | |
440 | struct pasid_state *pasid_state; | |
441 | struct device_state *dev_state; | |
d73a6d72 | 442 | unsigned long flags; |
8736b2c3 JR |
443 | |
444 | pasid_state = mn_to_state(mn); | |
445 | dev_state = pasid_state->device_state; | |
446 | ||
d73a6d72 JR |
447 | spin_lock_irqsave(&pasid_state->lock, flags); |
448 | pasid_state->mmu_notifier_count -= 1; | |
449 | if (pasid_state->mmu_notifier_count == 0) { | |
e79df31c JR |
450 | amd_iommu_domain_set_gcr3(dev_state->domain, |
451 | pasid_state->pasid, | |
452 | __pa(pasid_state->mm->pgd)); | |
453 | } | |
d73a6d72 | 454 | spin_unlock_irqrestore(&pasid_state->lock, flags); |
8736b2c3 JR |
455 | } |
456 | ||
a40d4c67 JR |
457 | static void mn_release(struct mmu_notifier *mn, struct mm_struct *mm) |
458 | { | |
459 | struct pasid_state *pasid_state; | |
460 | struct device_state *dev_state; | |
461 | ||
462 | might_sleep(); | |
463 | ||
464 | pasid_state = mn_to_state(mn); | |
465 | dev_state = pasid_state->device_state; | |
466 | ||
467 | if (pasid_state->device_state->inv_ctx_cb) | |
468 | dev_state->inv_ctx_cb(dev_state->pdev, pasid_state->pasid); | |
469 | ||
61feb438 | 470 | unbind_pasid(pasid_state); |
a40d4c67 JR |
471 | } |
472 | ||
8736b2c3 | 473 | static struct mmu_notifier_ops iommu_mn = { |
a40d4c67 | 474 | .release = mn_release, |
8736b2c3 JR |
475 | .clear_flush_young = mn_clear_flush_young, |
476 | .change_pte = mn_change_pte, | |
477 | .invalidate_page = mn_invalidate_page, | |
478 | .invalidate_range_start = mn_invalidate_range_start, | |
479 | .invalidate_range_end = mn_invalidate_range_end, | |
480 | }; | |
481 | ||
028eeacc JR |
482 | static void set_pri_tag_status(struct pasid_state *pasid_state, |
483 | u16 tag, int status) | |
484 | { | |
485 | unsigned long flags; | |
486 | ||
487 | spin_lock_irqsave(&pasid_state->lock, flags); | |
488 | pasid_state->pri[tag].status = status; | |
489 | spin_unlock_irqrestore(&pasid_state->lock, flags); | |
490 | } | |
491 | ||
492 | static void finish_pri_tag(struct device_state *dev_state, | |
493 | struct pasid_state *pasid_state, | |
494 | u16 tag) | |
495 | { | |
496 | unsigned long flags; | |
497 | ||
498 | spin_lock_irqsave(&pasid_state->lock, flags); | |
499 | if (atomic_dec_and_test(&pasid_state->pri[tag].inflight) && | |
500 | pasid_state->pri[tag].finish) { | |
501 | amd_iommu_complete_ppr(dev_state->pdev, pasid_state->pasid, | |
502 | pasid_state->pri[tag].status, tag); | |
503 | pasid_state->pri[tag].finish = false; | |
504 | pasid_state->pri[tag].status = PPR_SUCCESS; | |
505 | } | |
506 | spin_unlock_irqrestore(&pasid_state->lock, flags); | |
507 | } | |
508 | ||
509 | static void do_fault(struct work_struct *work) | |
510 | { | |
511 | struct fault *fault = container_of(work, struct fault, work); | |
512 | int npages, write; | |
513 | struct page *page; | |
514 | ||
515 | write = !!(fault->flags & PPR_FAULT_WRITE); | |
516 | ||
4378d992 | 517 | down_read(&fault->state->mm->mmap_sem); |
028eeacc JR |
518 | npages = get_user_pages(fault->state->task, fault->state->mm, |
519 | fault->address, 1, write, 0, &page, NULL); | |
4378d992 | 520 | up_read(&fault->state->mm->mmap_sem); |
028eeacc | 521 | |
175d6146 | 522 | if (npages == 1) { |
028eeacc | 523 | put_page(page); |
175d6146 JR |
524 | } else if (fault->dev_state->inv_ppr_cb) { |
525 | int status; | |
526 | ||
527 | status = fault->dev_state->inv_ppr_cb(fault->dev_state->pdev, | |
528 | fault->pasid, | |
529 | fault->address, | |
530 | fault->flags); | |
531 | switch (status) { | |
532 | case AMD_IOMMU_INV_PRI_RSP_SUCCESS: | |
533 | set_pri_tag_status(fault->state, fault->tag, PPR_SUCCESS); | |
534 | break; | |
535 | case AMD_IOMMU_INV_PRI_RSP_INVALID: | |
536 | set_pri_tag_status(fault->state, fault->tag, PPR_INVALID); | |
537 | break; | |
538 | case AMD_IOMMU_INV_PRI_RSP_FAIL: | |
539 | set_pri_tag_status(fault->state, fault->tag, PPR_FAILURE); | |
540 | break; | |
541 | default: | |
542 | BUG(); | |
543 | } | |
544 | } else { | |
028eeacc | 545 | set_pri_tag_status(fault->state, fault->tag, PPR_INVALID); |
175d6146 | 546 | } |
028eeacc JR |
547 | |
548 | finish_pri_tag(fault->dev_state, fault->state, fault->tag); | |
549 | ||
550 | put_pasid_state(fault->state); | |
551 | ||
552 | kfree(fault); | |
553 | } | |
554 | ||
555 | static int ppr_notifier(struct notifier_block *nb, unsigned long e, void *data) | |
556 | { | |
557 | struct amd_iommu_fault *iommu_fault; | |
558 | struct pasid_state *pasid_state; | |
559 | struct device_state *dev_state; | |
560 | unsigned long flags; | |
561 | struct fault *fault; | |
562 | bool finish; | |
563 | u16 tag; | |
564 | int ret; | |
565 | ||
566 | iommu_fault = data; | |
567 | tag = iommu_fault->tag & 0x1ff; | |
568 | finish = (iommu_fault->tag >> 9) & 1; | |
569 | ||
570 | ret = NOTIFY_DONE; | |
571 | dev_state = get_device_state(iommu_fault->device_id); | |
572 | if (dev_state == NULL) | |
573 | goto out; | |
574 | ||
575 | pasid_state = get_pasid_state(dev_state, iommu_fault->pasid); | |
576 | if (pasid_state == NULL) { | |
577 | /* We know the device but not the PASID -> send INVALID */ | |
578 | amd_iommu_complete_ppr(dev_state->pdev, iommu_fault->pasid, | |
579 | PPR_INVALID, tag); | |
580 | goto out_drop_state; | |
581 | } | |
582 | ||
583 | spin_lock_irqsave(&pasid_state->lock, flags); | |
584 | atomic_inc(&pasid_state->pri[tag].inflight); | |
585 | if (finish) | |
586 | pasid_state->pri[tag].finish = true; | |
587 | spin_unlock_irqrestore(&pasid_state->lock, flags); | |
588 | ||
589 | fault = kzalloc(sizeof(*fault), GFP_ATOMIC); | |
590 | if (fault == NULL) { | |
591 | /* We are OOM - send success and let the device re-fault */ | |
592 | finish_pri_tag(dev_state, pasid_state, tag); | |
593 | goto out_drop_state; | |
594 | } | |
595 | ||
596 | fault->dev_state = dev_state; | |
597 | fault->address = iommu_fault->address; | |
598 | fault->state = pasid_state; | |
599 | fault->tag = tag; | |
600 | fault->finish = finish; | |
b00675b8 | 601 | fault->pasid = iommu_fault->pasid; |
028eeacc JR |
602 | fault->flags = iommu_fault->flags; |
603 | INIT_WORK(&fault->work, do_fault); | |
604 | ||
605 | queue_work(iommu_wq, &fault->work); | |
606 | ||
607 | ret = NOTIFY_OK; | |
608 | ||
609 | out_drop_state: | |
610 | put_device_state(dev_state); | |
611 | ||
612 | out: | |
613 | return ret; | |
614 | } | |
615 | ||
616 | static struct notifier_block ppr_nb = { | |
617 | .notifier_call = ppr_notifier, | |
618 | }; | |
619 | ||
2d5503b6 JR |
620 | int amd_iommu_bind_pasid(struct pci_dev *pdev, int pasid, |
621 | struct task_struct *task) | |
622 | { | |
623 | struct pasid_state *pasid_state; | |
624 | struct device_state *dev_state; | |
625 | u16 devid; | |
626 | int ret; | |
627 | ||
628 | might_sleep(); | |
629 | ||
630 | if (!amd_iommu_v2_supported()) | |
631 | return -ENODEV; | |
632 | ||
633 | devid = device_id(pdev); | |
634 | dev_state = get_device_state(devid); | |
635 | ||
636 | if (dev_state == NULL) | |
637 | return -EINVAL; | |
638 | ||
639 | ret = -EINVAL; | |
640 | if (pasid < 0 || pasid >= dev_state->max_pasids) | |
641 | goto out; | |
642 | ||
643 | ret = -ENOMEM; | |
644 | pasid_state = kzalloc(sizeof(*pasid_state), GFP_KERNEL); | |
645 | if (pasid_state == NULL) | |
646 | goto out; | |
647 | ||
648 | atomic_set(&pasid_state->count, 1); | |
028eeacc | 649 | init_waitqueue_head(&pasid_state->wq); |
2c13d47a JR |
650 | spin_lock_init(&pasid_state->lock); |
651 | ||
2d5503b6 JR |
652 | pasid_state->task = task; |
653 | pasid_state->mm = get_task_mm(task); | |
654 | pasid_state->device_state = dev_state; | |
655 | pasid_state->pasid = pasid; | |
8736b2c3 | 656 | pasid_state->mn.ops = &iommu_mn; |
2d5503b6 JR |
657 | |
658 | if (pasid_state->mm == NULL) | |
659 | goto out_free; | |
660 | ||
8736b2c3 JR |
661 | mmu_notifier_register(&pasid_state->mn, pasid_state->mm); |
662 | ||
2d5503b6 JR |
663 | ret = set_pasid_state(dev_state, pasid_state, pasid); |
664 | if (ret) | |
8736b2c3 | 665 | goto out_unregister; |
2d5503b6 JR |
666 | |
667 | ret = amd_iommu_domain_set_gcr3(dev_state->domain, pasid, | |
668 | __pa(pasid_state->mm->pgd)); | |
669 | if (ret) | |
670 | goto out_clear_state; | |
671 | ||
2d5503b6 JR |
672 | return 0; |
673 | ||
674 | out_clear_state: | |
675 | clear_pasid_state(dev_state, pasid); | |
676 | ||
8736b2c3 JR |
677 | out_unregister: |
678 | mmu_notifier_unregister(&pasid_state->mn, pasid_state->mm); | |
679 | ||
2d5503b6 | 680 | out_free: |
c5db16ad | 681 | mmput(pasid_state->mm); |
028eeacc | 682 | free_pasid_state(pasid_state); |
2d5503b6 JR |
683 | |
684 | out: | |
685 | put_device_state(dev_state); | |
686 | ||
687 | return ret; | |
688 | } | |
689 | EXPORT_SYMBOL(amd_iommu_bind_pasid); | |
690 | ||
691 | void amd_iommu_unbind_pasid(struct pci_dev *pdev, int pasid) | |
692 | { | |
a40d4c67 | 693 | struct pasid_state *pasid_state; |
2d5503b6 JR |
694 | struct device_state *dev_state; |
695 | u16 devid; | |
696 | ||
697 | might_sleep(); | |
698 | ||
699 | if (!amd_iommu_v2_supported()) | |
700 | return; | |
701 | ||
702 | devid = device_id(pdev); | |
703 | dev_state = get_device_state(devid); | |
704 | if (dev_state == NULL) | |
705 | return; | |
706 | ||
707 | if (pasid < 0 || pasid >= dev_state->max_pasids) | |
708 | goto out; | |
709 | ||
a40d4c67 JR |
710 | pasid_state = get_pasid_state(dev_state, pasid); |
711 | if (pasid_state == NULL) | |
712 | goto out; | |
713 | /* | |
714 | * Drop reference taken here. We are safe because we still hold | |
715 | * the reference taken in the amd_iommu_bind_pasid function. | |
716 | */ | |
717 | put_pasid_state(pasid_state); | |
718 | ||
719 | /* This will call the mn_release function and unbind the PASID */ | |
720 | mmu_notifier_unregister(&pasid_state->mn, pasid_state->mm); | |
2d5503b6 | 721 | |
c5db16ad JR |
722 | put_pasid_state_wait(pasid_state); /* Reference taken in |
723 | amd_iommu_pasid_bind */ | |
2d5503b6 JR |
724 | out: |
725 | put_device_state(dev_state); | |
726 | } | |
727 | EXPORT_SYMBOL(amd_iommu_unbind_pasid); | |
728 | ||
ed96f228 JR |
729 | int amd_iommu_init_device(struct pci_dev *pdev, int pasids) |
730 | { | |
731 | struct device_state *dev_state; | |
732 | unsigned long flags; | |
733 | int ret, tmp; | |
734 | u16 devid; | |
735 | ||
736 | might_sleep(); | |
737 | ||
738 | if (!amd_iommu_v2_supported()) | |
739 | return -ENODEV; | |
740 | ||
741 | if (pasids <= 0 || pasids > (PASID_MASK + 1)) | |
742 | return -EINVAL; | |
743 | ||
744 | devid = device_id(pdev); | |
745 | ||
746 | dev_state = kzalloc(sizeof(*dev_state), GFP_KERNEL); | |
747 | if (dev_state == NULL) | |
748 | return -ENOMEM; | |
749 | ||
750 | spin_lock_init(&dev_state->lock); | |
028eeacc | 751 | init_waitqueue_head(&dev_state->wq); |
741669c7 JR |
752 | dev_state->pdev = pdev; |
753 | dev_state->devid = devid; | |
ed96f228 JR |
754 | |
755 | tmp = pasids; | |
756 | for (dev_state->pasid_levels = 0; (tmp - 1) & ~0x1ff; tmp >>= 9) | |
757 | dev_state->pasid_levels += 1; | |
758 | ||
759 | atomic_set(&dev_state->count, 1); | |
760 | dev_state->max_pasids = pasids; | |
761 | ||
762 | ret = -ENOMEM; | |
763 | dev_state->states = (void *)get_zeroed_page(GFP_KERNEL); | |
764 | if (dev_state->states == NULL) | |
765 | goto out_free_dev_state; | |
766 | ||
767 | dev_state->domain = iommu_domain_alloc(&pci_bus_type); | |
768 | if (dev_state->domain == NULL) | |
769 | goto out_free_states; | |
770 | ||
771 | amd_iommu_domain_direct_map(dev_state->domain); | |
772 | ||
773 | ret = amd_iommu_domain_enable_v2(dev_state->domain, pasids); | |
774 | if (ret) | |
775 | goto out_free_domain; | |
776 | ||
777 | ret = iommu_attach_device(dev_state->domain, &pdev->dev); | |
778 | if (ret != 0) | |
779 | goto out_free_domain; | |
780 | ||
781 | spin_lock_irqsave(&state_lock, flags); | |
782 | ||
741669c7 | 783 | if (__get_device_state(devid) != NULL) { |
ed96f228 JR |
784 | spin_unlock_irqrestore(&state_lock, flags); |
785 | ret = -EBUSY; | |
786 | goto out_free_domain; | |
787 | } | |
788 | ||
741669c7 | 789 | list_add_tail(&dev_state->list, &state_list); |
ed96f228 JR |
790 | |
791 | spin_unlock_irqrestore(&state_lock, flags); | |
792 | ||
793 | return 0; | |
794 | ||
795 | out_free_domain: | |
796 | iommu_domain_free(dev_state->domain); | |
797 | ||
798 | out_free_states: | |
799 | free_page((unsigned long)dev_state->states); | |
800 | ||
801 | out_free_dev_state: | |
802 | kfree(dev_state); | |
803 | ||
804 | return ret; | |
805 | } | |
806 | EXPORT_SYMBOL(amd_iommu_init_device); | |
807 | ||
808 | void amd_iommu_free_device(struct pci_dev *pdev) | |
809 | { | |
810 | struct device_state *dev_state; | |
811 | unsigned long flags; | |
812 | u16 devid; | |
813 | ||
814 | if (!amd_iommu_v2_supported()) | |
815 | return; | |
816 | ||
817 | devid = device_id(pdev); | |
818 | ||
819 | spin_lock_irqsave(&state_lock, flags); | |
820 | ||
b87d2d7c | 821 | dev_state = __get_device_state(devid); |
ed96f228 JR |
822 | if (dev_state == NULL) { |
823 | spin_unlock_irqrestore(&state_lock, flags); | |
824 | return; | |
825 | } | |
826 | ||
741669c7 | 827 | list_del(&dev_state->list); |
ed96f228 JR |
828 | |
829 | spin_unlock_irqrestore(&state_lock, flags); | |
830 | ||
2d5503b6 JR |
831 | /* Get rid of any remaining pasid states */ |
832 | free_pasid_states(dev_state); | |
833 | ||
028eeacc | 834 | put_device_state_wait(dev_state); |
ed96f228 JR |
835 | } |
836 | EXPORT_SYMBOL(amd_iommu_free_device); | |
837 | ||
175d6146 JR |
838 | int amd_iommu_set_invalid_ppr_cb(struct pci_dev *pdev, |
839 | amd_iommu_invalid_ppr_cb cb) | |
840 | { | |
841 | struct device_state *dev_state; | |
842 | unsigned long flags; | |
843 | u16 devid; | |
844 | int ret; | |
845 | ||
846 | if (!amd_iommu_v2_supported()) | |
847 | return -ENODEV; | |
848 | ||
849 | devid = device_id(pdev); | |
850 | ||
851 | spin_lock_irqsave(&state_lock, flags); | |
852 | ||
853 | ret = -EINVAL; | |
b87d2d7c | 854 | dev_state = __get_device_state(devid); |
175d6146 JR |
855 | if (dev_state == NULL) |
856 | goto out_unlock; | |
857 | ||
858 | dev_state->inv_ppr_cb = cb; | |
859 | ||
860 | ret = 0; | |
861 | ||
862 | out_unlock: | |
863 | spin_unlock_irqrestore(&state_lock, flags); | |
864 | ||
865 | return ret; | |
866 | } | |
867 | EXPORT_SYMBOL(amd_iommu_set_invalid_ppr_cb); | |
868 | ||
bc21662f JR |
869 | int amd_iommu_set_invalidate_ctx_cb(struct pci_dev *pdev, |
870 | amd_iommu_invalidate_ctx cb) | |
871 | { | |
872 | struct device_state *dev_state; | |
873 | unsigned long flags; | |
874 | u16 devid; | |
875 | int ret; | |
876 | ||
877 | if (!amd_iommu_v2_supported()) | |
878 | return -ENODEV; | |
879 | ||
880 | devid = device_id(pdev); | |
881 | ||
882 | spin_lock_irqsave(&state_lock, flags); | |
883 | ||
884 | ret = -EINVAL; | |
b87d2d7c | 885 | dev_state = __get_device_state(devid); |
bc21662f JR |
886 | if (dev_state == NULL) |
887 | goto out_unlock; | |
888 | ||
889 | dev_state->inv_ctx_cb = cb; | |
890 | ||
891 | ret = 0; | |
892 | ||
893 | out_unlock: | |
894 | spin_unlock_irqrestore(&state_lock, flags); | |
895 | ||
896 | return ret; | |
897 | } | |
898 | EXPORT_SYMBOL(amd_iommu_set_invalidate_ctx_cb); | |
899 | ||
e3c495c7 JR |
900 | static int __init amd_iommu_v2_init(void) |
901 | { | |
028eeacc | 902 | int ret; |
ed96f228 | 903 | |
474d567d JR |
904 | pr_info("AMD IOMMUv2 driver by Joerg Roedel <joerg.roedel@amd.com>\n"); |
905 | ||
906 | if (!amd_iommu_v2_supported()) { | |
07db0409 | 907 | pr_info("AMD IOMMUv2 functionality not available on this system\n"); |
474d567d JR |
908 | /* |
909 | * Load anyway to provide the symbols to other modules | |
910 | * which may use AMD IOMMUv2 optionally. | |
911 | */ | |
912 | return 0; | |
913 | } | |
e3c495c7 | 914 | |
ed96f228 JR |
915 | spin_lock_init(&state_lock); |
916 | ||
028eeacc JR |
917 | ret = -ENOMEM; |
918 | iommu_wq = create_workqueue("amd_iommu_v2"); | |
8736b2c3 | 919 | if (iommu_wq == NULL) |
741669c7 | 920 | goto out; |
8736b2c3 JR |
921 | |
922 | ret = -ENOMEM; | |
923 | empty_page_table = (u64 *)get_zeroed_page(GFP_KERNEL); | |
924 | if (empty_page_table == NULL) | |
925 | goto out_destroy_wq; | |
028eeacc JR |
926 | |
927 | amd_iommu_register_ppr_notifier(&ppr_nb); | |
928 | ||
e3c495c7 | 929 | return 0; |
028eeacc | 930 | |
8736b2c3 JR |
931 | out_destroy_wq: |
932 | destroy_workqueue(iommu_wq); | |
933 | ||
741669c7 | 934 | out: |
028eeacc | 935 | return ret; |
e3c495c7 JR |
936 | } |
937 | ||
938 | static void __exit amd_iommu_v2_exit(void) | |
939 | { | |
ed96f228 | 940 | struct device_state *dev_state; |
ed96f228 JR |
941 | int i; |
942 | ||
474d567d JR |
943 | if (!amd_iommu_v2_supported()) |
944 | return; | |
945 | ||
028eeacc JR |
946 | amd_iommu_unregister_ppr_notifier(&ppr_nb); |
947 | ||
948 | flush_workqueue(iommu_wq); | |
949 | ||
950 | /* | |
951 | * The loop below might call flush_workqueue(), so call | |
952 | * destroy_workqueue() after it | |
953 | */ | |
ed96f228 JR |
954 | for (i = 0; i < MAX_DEVICES; ++i) { |
955 | dev_state = get_device_state(i); | |
956 | ||
957 | if (dev_state == NULL) | |
958 | continue; | |
959 | ||
960 | WARN_ON_ONCE(1); | |
961 | ||
ed96f228 | 962 | put_device_state(dev_state); |
028eeacc | 963 | amd_iommu_free_device(dev_state->pdev); |
ed96f228 JR |
964 | } |
965 | ||
028eeacc JR |
966 | destroy_workqueue(iommu_wq); |
967 | ||
8736b2c3 | 968 | free_page((unsigned long)empty_page_table); |
e3c495c7 JR |
969 | } |
970 | ||
971 | module_init(amd_iommu_v2_init); | |
972 | module_exit(amd_iommu_v2_exit); |