Merge tag 'clk-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git...
[deliverable/linux.git] / drivers / iommu / amd_iommu_v2.c
1 /*
2 * Copyright (C) 2010-2012 Advanced Micro Devices, Inc.
3 * Author: Joerg Roedel <jroedel@suse.de>
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published
7 * by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17 */
18
19 #include <linux/mmu_notifier.h>
20 #include <linux/amd-iommu.h>
21 #include <linux/mm_types.h>
22 #include <linux/profile.h>
23 #include <linux/module.h>
24 #include <linux/sched.h>
25 #include <linux/iommu.h>
26 #include <linux/wait.h>
27 #include <linux/pci.h>
28 #include <linux/gfp.h>
29
30 #include "amd_iommu_types.h"
31 #include "amd_iommu_proto.h"
32
33 MODULE_LICENSE("GPL v2");
34 MODULE_AUTHOR("Joerg Roedel <jroedel@suse.de>");
35
36 #define MAX_DEVICES 0x10000
37 #define PRI_QUEUE_SIZE 512
38
39 struct pri_queue {
40 atomic_t inflight;
41 bool finish;
42 int status;
43 };
44
45 struct pasid_state {
46 struct list_head list; /* For global state-list */
47 atomic_t count; /* Reference count */
48 unsigned mmu_notifier_count; /* Counting nested mmu_notifier
49 calls */
50 struct mm_struct *mm; /* mm_struct for the faults */
51 struct mmu_notifier mn; /* mmu_notifier handle */
52 struct pri_queue pri[PRI_QUEUE_SIZE]; /* PRI tag states */
53 struct device_state *device_state; /* Link to our device_state */
54 int pasid; /* PASID index */
55 bool invalid; /* Used during setup and
56 teardown of the pasid */
57 spinlock_t lock; /* Protect pri_queues and
58 mmu_notifer_count */
59 wait_queue_head_t wq; /* To wait for count == 0 */
60 };
61
62 struct device_state {
63 struct list_head list;
64 u16 devid;
65 atomic_t count;
66 struct pci_dev *pdev;
67 struct pasid_state **states;
68 struct iommu_domain *domain;
69 int pasid_levels;
70 int max_pasids;
71 amd_iommu_invalid_ppr_cb inv_ppr_cb;
72 amd_iommu_invalidate_ctx inv_ctx_cb;
73 spinlock_t lock;
74 wait_queue_head_t wq;
75 };
76
77 struct fault {
78 struct work_struct work;
79 struct device_state *dev_state;
80 struct pasid_state *state;
81 struct mm_struct *mm;
82 u64 address;
83 u16 devid;
84 u16 pasid;
85 u16 tag;
86 u16 finish;
87 u16 flags;
88 };
89
90 static LIST_HEAD(state_list);
91 static spinlock_t state_lock;
92
93 static struct workqueue_struct *iommu_wq;
94
95 static void free_pasid_states(struct device_state *dev_state);
96
97 static u16 device_id(struct pci_dev *pdev)
98 {
99 u16 devid;
100
101 devid = pdev->bus->number;
102 devid = (devid << 8) | pdev->devfn;
103
104 return devid;
105 }
106
107 static struct device_state *__get_device_state(u16 devid)
108 {
109 struct device_state *dev_state;
110
111 list_for_each_entry(dev_state, &state_list, list) {
112 if (dev_state->devid == devid)
113 return dev_state;
114 }
115
116 return NULL;
117 }
118
119 static struct device_state *get_device_state(u16 devid)
120 {
121 struct device_state *dev_state;
122 unsigned long flags;
123
124 spin_lock_irqsave(&state_lock, flags);
125 dev_state = __get_device_state(devid);
126 if (dev_state != NULL)
127 atomic_inc(&dev_state->count);
128 spin_unlock_irqrestore(&state_lock, flags);
129
130 return dev_state;
131 }
132
133 static void free_device_state(struct device_state *dev_state)
134 {
135 struct iommu_group *group;
136
137 /*
138 * First detach device from domain - No more PRI requests will arrive
139 * from that device after it is unbound from the IOMMUv2 domain.
140 */
141 group = iommu_group_get(&dev_state->pdev->dev);
142 if (WARN_ON(!group))
143 return;
144
145 iommu_detach_group(dev_state->domain, group);
146
147 iommu_group_put(group);
148
149 /* Everything is down now, free the IOMMUv2 domain */
150 iommu_domain_free(dev_state->domain);
151
152 /* Finally get rid of the device-state */
153 kfree(dev_state);
154 }
155
156 static void put_device_state(struct device_state *dev_state)
157 {
158 if (atomic_dec_and_test(&dev_state->count))
159 wake_up(&dev_state->wq);
160 }
161
162 /* Must be called under dev_state->lock */
163 static struct pasid_state **__get_pasid_state_ptr(struct device_state *dev_state,
164 int pasid, bool alloc)
165 {
166 struct pasid_state **root, **ptr;
167 int level, index;
168
169 level = dev_state->pasid_levels;
170 root = dev_state->states;
171
172 while (true) {
173
174 index = (pasid >> (9 * level)) & 0x1ff;
175 ptr = &root[index];
176
177 if (level == 0)
178 break;
179
180 if (*ptr == NULL) {
181 if (!alloc)
182 return NULL;
183
184 *ptr = (void *)get_zeroed_page(GFP_ATOMIC);
185 if (*ptr == NULL)
186 return NULL;
187 }
188
189 root = (struct pasid_state **)*ptr;
190 level -= 1;
191 }
192
193 return ptr;
194 }
195
196 static int set_pasid_state(struct device_state *dev_state,
197 struct pasid_state *pasid_state,
198 int pasid)
199 {
200 struct pasid_state **ptr;
201 unsigned long flags;
202 int ret;
203
204 spin_lock_irqsave(&dev_state->lock, flags);
205 ptr = __get_pasid_state_ptr(dev_state, pasid, true);
206
207 ret = -ENOMEM;
208 if (ptr == NULL)
209 goto out_unlock;
210
211 ret = -ENOMEM;
212 if (*ptr != NULL)
213 goto out_unlock;
214
215 *ptr = pasid_state;
216
217 ret = 0;
218
219 out_unlock:
220 spin_unlock_irqrestore(&dev_state->lock, flags);
221
222 return ret;
223 }
224
225 static void clear_pasid_state(struct device_state *dev_state, int pasid)
226 {
227 struct pasid_state **ptr;
228 unsigned long flags;
229
230 spin_lock_irqsave(&dev_state->lock, flags);
231 ptr = __get_pasid_state_ptr(dev_state, pasid, true);
232
233 if (ptr == NULL)
234 goto out_unlock;
235
236 *ptr = NULL;
237
238 out_unlock:
239 spin_unlock_irqrestore(&dev_state->lock, flags);
240 }
241
242 static struct pasid_state *get_pasid_state(struct device_state *dev_state,
243 int pasid)
244 {
245 struct pasid_state **ptr, *ret = NULL;
246 unsigned long flags;
247
248 spin_lock_irqsave(&dev_state->lock, flags);
249 ptr = __get_pasid_state_ptr(dev_state, pasid, false);
250
251 if (ptr == NULL)
252 goto out_unlock;
253
254 ret = *ptr;
255 if (ret)
256 atomic_inc(&ret->count);
257
258 out_unlock:
259 spin_unlock_irqrestore(&dev_state->lock, flags);
260
261 return ret;
262 }
263
264 static void free_pasid_state(struct pasid_state *pasid_state)
265 {
266 kfree(pasid_state);
267 }
268
269 static void put_pasid_state(struct pasid_state *pasid_state)
270 {
271 if (atomic_dec_and_test(&pasid_state->count))
272 wake_up(&pasid_state->wq);
273 }
274
275 static void put_pasid_state_wait(struct pasid_state *pasid_state)
276 {
277 atomic_dec(&pasid_state->count);
278 wait_event(pasid_state->wq, !atomic_read(&pasid_state->count));
279 free_pasid_state(pasid_state);
280 }
281
282 static void unbind_pasid(struct pasid_state *pasid_state)
283 {
284 struct iommu_domain *domain;
285
286 domain = pasid_state->device_state->domain;
287
288 /*
289 * Mark pasid_state as invalid, no more faults will we added to the
290 * work queue after this is visible everywhere.
291 */
292 pasid_state->invalid = true;
293
294 /* Make sure this is visible */
295 smp_wmb();
296
297 /* After this the device/pasid can't access the mm anymore */
298 amd_iommu_domain_clear_gcr3(domain, pasid_state->pasid);
299
300 /* Make sure no more pending faults are in the queue */
301 flush_workqueue(iommu_wq);
302 }
303
304 static void free_pasid_states_level1(struct pasid_state **tbl)
305 {
306 int i;
307
308 for (i = 0; i < 512; ++i) {
309 if (tbl[i] == NULL)
310 continue;
311
312 free_page((unsigned long)tbl[i]);
313 }
314 }
315
316 static void free_pasid_states_level2(struct pasid_state **tbl)
317 {
318 struct pasid_state **ptr;
319 int i;
320
321 for (i = 0; i < 512; ++i) {
322 if (tbl[i] == NULL)
323 continue;
324
325 ptr = (struct pasid_state **)tbl[i];
326 free_pasid_states_level1(ptr);
327 }
328 }
329
330 static void free_pasid_states(struct device_state *dev_state)
331 {
332 struct pasid_state *pasid_state;
333 int i;
334
335 for (i = 0; i < dev_state->max_pasids; ++i) {
336 pasid_state = get_pasid_state(dev_state, i);
337 if (pasid_state == NULL)
338 continue;
339
340 put_pasid_state(pasid_state);
341
342 /*
343 * This will call the mn_release function and
344 * unbind the PASID
345 */
346 mmu_notifier_unregister(&pasid_state->mn, pasid_state->mm);
347
348 put_pasid_state_wait(pasid_state); /* Reference taken in
349 amd_iommu_bind_pasid */
350
351 /* Drop reference taken in amd_iommu_bind_pasid */
352 put_device_state(dev_state);
353 }
354
355 if (dev_state->pasid_levels == 2)
356 free_pasid_states_level2(dev_state->states);
357 else if (dev_state->pasid_levels == 1)
358 free_pasid_states_level1(dev_state->states);
359 else if (dev_state->pasid_levels != 0)
360 BUG();
361
362 free_page((unsigned long)dev_state->states);
363 }
364
365 static struct pasid_state *mn_to_state(struct mmu_notifier *mn)
366 {
367 return container_of(mn, struct pasid_state, mn);
368 }
369
370 static void __mn_flush_page(struct mmu_notifier *mn,
371 unsigned long address)
372 {
373 struct pasid_state *pasid_state;
374 struct device_state *dev_state;
375
376 pasid_state = mn_to_state(mn);
377 dev_state = pasid_state->device_state;
378
379 amd_iommu_flush_page(dev_state->domain, pasid_state->pasid, address);
380 }
381
382 static int mn_clear_flush_young(struct mmu_notifier *mn,
383 struct mm_struct *mm,
384 unsigned long start,
385 unsigned long end)
386 {
387 for (; start < end; start += PAGE_SIZE)
388 __mn_flush_page(mn, start);
389
390 return 0;
391 }
392
393 static void mn_invalidate_page(struct mmu_notifier *mn,
394 struct mm_struct *mm,
395 unsigned long address)
396 {
397 __mn_flush_page(mn, address);
398 }
399
400 static void mn_invalidate_range(struct mmu_notifier *mn,
401 struct mm_struct *mm,
402 unsigned long start, unsigned long end)
403 {
404 struct pasid_state *pasid_state;
405 struct device_state *dev_state;
406
407 pasid_state = mn_to_state(mn);
408 dev_state = pasid_state->device_state;
409
410 if ((start ^ (end - 1)) < PAGE_SIZE)
411 amd_iommu_flush_page(dev_state->domain, pasid_state->pasid,
412 start);
413 else
414 amd_iommu_flush_tlb(dev_state->domain, pasid_state->pasid);
415 }
416
417 static void mn_release(struct mmu_notifier *mn, struct mm_struct *mm)
418 {
419 struct pasid_state *pasid_state;
420 struct device_state *dev_state;
421 bool run_inv_ctx_cb;
422
423 might_sleep();
424
425 pasid_state = mn_to_state(mn);
426 dev_state = pasid_state->device_state;
427 run_inv_ctx_cb = !pasid_state->invalid;
428
429 if (run_inv_ctx_cb && dev_state->inv_ctx_cb)
430 dev_state->inv_ctx_cb(dev_state->pdev, pasid_state->pasid);
431
432 unbind_pasid(pasid_state);
433 }
434
435 static struct mmu_notifier_ops iommu_mn = {
436 .release = mn_release,
437 .clear_flush_young = mn_clear_flush_young,
438 .invalidate_page = mn_invalidate_page,
439 .invalidate_range = mn_invalidate_range,
440 };
441
442 static void set_pri_tag_status(struct pasid_state *pasid_state,
443 u16 tag, int status)
444 {
445 unsigned long flags;
446
447 spin_lock_irqsave(&pasid_state->lock, flags);
448 pasid_state->pri[tag].status = status;
449 spin_unlock_irqrestore(&pasid_state->lock, flags);
450 }
451
452 static void finish_pri_tag(struct device_state *dev_state,
453 struct pasid_state *pasid_state,
454 u16 tag)
455 {
456 unsigned long flags;
457
458 spin_lock_irqsave(&pasid_state->lock, flags);
459 if (atomic_dec_and_test(&pasid_state->pri[tag].inflight) &&
460 pasid_state->pri[tag].finish) {
461 amd_iommu_complete_ppr(dev_state->pdev, pasid_state->pasid,
462 pasid_state->pri[tag].status, tag);
463 pasid_state->pri[tag].finish = false;
464 pasid_state->pri[tag].status = PPR_SUCCESS;
465 }
466 spin_unlock_irqrestore(&pasid_state->lock, flags);
467 }
468
469 static void handle_fault_error(struct fault *fault)
470 {
471 int status;
472
473 if (!fault->dev_state->inv_ppr_cb) {
474 set_pri_tag_status(fault->state, fault->tag, PPR_INVALID);
475 return;
476 }
477
478 status = fault->dev_state->inv_ppr_cb(fault->dev_state->pdev,
479 fault->pasid,
480 fault->address,
481 fault->flags);
482 switch (status) {
483 case AMD_IOMMU_INV_PRI_RSP_SUCCESS:
484 set_pri_tag_status(fault->state, fault->tag, PPR_SUCCESS);
485 break;
486 case AMD_IOMMU_INV_PRI_RSP_INVALID:
487 set_pri_tag_status(fault->state, fault->tag, PPR_INVALID);
488 break;
489 case AMD_IOMMU_INV_PRI_RSP_FAIL:
490 set_pri_tag_status(fault->state, fault->tag, PPR_FAILURE);
491 break;
492 default:
493 BUG();
494 }
495 }
496
497 static void do_fault(struct work_struct *work)
498 {
499 struct fault *fault = container_of(work, struct fault, work);
500 struct mm_struct *mm;
501 struct vm_area_struct *vma;
502 u64 address;
503 int ret, write;
504
505 write = !!(fault->flags & PPR_FAULT_WRITE);
506
507 mm = fault->state->mm;
508 address = fault->address;
509
510 down_read(&mm->mmap_sem);
511 vma = find_extend_vma(mm, address);
512 if (!vma || address < vma->vm_start) {
513 /* failed to get a vma in the right range */
514 up_read(&mm->mmap_sem);
515 handle_fault_error(fault);
516 goto out;
517 }
518
519 ret = handle_mm_fault(mm, vma, address, write);
520 if (ret & VM_FAULT_ERROR) {
521 /* failed to service fault */
522 up_read(&mm->mmap_sem);
523 handle_fault_error(fault);
524 goto out;
525 }
526
527 up_read(&mm->mmap_sem);
528
529 out:
530 finish_pri_tag(fault->dev_state, fault->state, fault->tag);
531
532 put_pasid_state(fault->state);
533
534 kfree(fault);
535 }
536
537 static int ppr_notifier(struct notifier_block *nb, unsigned long e, void *data)
538 {
539 struct amd_iommu_fault *iommu_fault;
540 struct pasid_state *pasid_state;
541 struct device_state *dev_state;
542 unsigned long flags;
543 struct fault *fault;
544 bool finish;
545 u16 tag;
546 int ret;
547
548 iommu_fault = data;
549 tag = iommu_fault->tag & 0x1ff;
550 finish = (iommu_fault->tag >> 9) & 1;
551
552 ret = NOTIFY_DONE;
553 dev_state = get_device_state(iommu_fault->device_id);
554 if (dev_state == NULL)
555 goto out;
556
557 pasid_state = get_pasid_state(dev_state, iommu_fault->pasid);
558 if (pasid_state == NULL || pasid_state->invalid) {
559 /* We know the device but not the PASID -> send INVALID */
560 amd_iommu_complete_ppr(dev_state->pdev, iommu_fault->pasid,
561 PPR_INVALID, tag);
562 goto out_drop_state;
563 }
564
565 spin_lock_irqsave(&pasid_state->lock, flags);
566 atomic_inc(&pasid_state->pri[tag].inflight);
567 if (finish)
568 pasid_state->pri[tag].finish = true;
569 spin_unlock_irqrestore(&pasid_state->lock, flags);
570
571 fault = kzalloc(sizeof(*fault), GFP_ATOMIC);
572 if (fault == NULL) {
573 /* We are OOM - send success and let the device re-fault */
574 finish_pri_tag(dev_state, pasid_state, tag);
575 goto out_drop_state;
576 }
577
578 fault->dev_state = dev_state;
579 fault->address = iommu_fault->address;
580 fault->state = pasid_state;
581 fault->tag = tag;
582 fault->finish = finish;
583 fault->pasid = iommu_fault->pasid;
584 fault->flags = iommu_fault->flags;
585 INIT_WORK(&fault->work, do_fault);
586
587 queue_work(iommu_wq, &fault->work);
588
589 ret = NOTIFY_OK;
590
591 out_drop_state:
592
593 if (ret != NOTIFY_OK && pasid_state)
594 put_pasid_state(pasid_state);
595
596 put_device_state(dev_state);
597
598 out:
599 return ret;
600 }
601
602 static struct notifier_block ppr_nb = {
603 .notifier_call = ppr_notifier,
604 };
605
606 int amd_iommu_bind_pasid(struct pci_dev *pdev, int pasid,
607 struct task_struct *task)
608 {
609 struct pasid_state *pasid_state;
610 struct device_state *dev_state;
611 struct mm_struct *mm;
612 u16 devid;
613 int ret;
614
615 might_sleep();
616
617 if (!amd_iommu_v2_supported())
618 return -ENODEV;
619
620 devid = device_id(pdev);
621 dev_state = get_device_state(devid);
622
623 if (dev_state == NULL)
624 return -EINVAL;
625
626 ret = -EINVAL;
627 if (pasid < 0 || pasid >= dev_state->max_pasids)
628 goto out;
629
630 ret = -ENOMEM;
631 pasid_state = kzalloc(sizeof(*pasid_state), GFP_KERNEL);
632 if (pasid_state == NULL)
633 goto out;
634
635
636 atomic_set(&pasid_state->count, 1);
637 init_waitqueue_head(&pasid_state->wq);
638 spin_lock_init(&pasid_state->lock);
639
640 mm = get_task_mm(task);
641 pasid_state->mm = mm;
642 pasid_state->device_state = dev_state;
643 pasid_state->pasid = pasid;
644 pasid_state->invalid = true; /* Mark as valid only if we are
645 done with setting up the pasid */
646 pasid_state->mn.ops = &iommu_mn;
647
648 if (pasid_state->mm == NULL)
649 goto out_free;
650
651 mmu_notifier_register(&pasid_state->mn, mm);
652
653 ret = set_pasid_state(dev_state, pasid_state, pasid);
654 if (ret)
655 goto out_unregister;
656
657 ret = amd_iommu_domain_set_gcr3(dev_state->domain, pasid,
658 __pa(pasid_state->mm->pgd));
659 if (ret)
660 goto out_clear_state;
661
662 /* Now we are ready to handle faults */
663 pasid_state->invalid = false;
664
665 /*
666 * Drop the reference to the mm_struct here. We rely on the
667 * mmu_notifier release call-back to inform us when the mm
668 * is going away.
669 */
670 mmput(mm);
671
672 return 0;
673
674 out_clear_state:
675 clear_pasid_state(dev_state, pasid);
676
677 out_unregister:
678 mmu_notifier_unregister(&pasid_state->mn, mm);
679
680 out_free:
681 mmput(mm);
682 free_pasid_state(pasid_state);
683
684 out:
685 put_device_state(dev_state);
686
687 return ret;
688 }
689 EXPORT_SYMBOL(amd_iommu_bind_pasid);
690
691 void amd_iommu_unbind_pasid(struct pci_dev *pdev, int pasid)
692 {
693 struct pasid_state *pasid_state;
694 struct device_state *dev_state;
695 u16 devid;
696
697 might_sleep();
698
699 if (!amd_iommu_v2_supported())
700 return;
701
702 devid = device_id(pdev);
703 dev_state = get_device_state(devid);
704 if (dev_state == NULL)
705 return;
706
707 if (pasid < 0 || pasid >= dev_state->max_pasids)
708 goto out;
709
710 pasid_state = get_pasid_state(dev_state, pasid);
711 if (pasid_state == NULL)
712 goto out;
713 /*
714 * Drop reference taken here. We are safe because we still hold
715 * the reference taken in the amd_iommu_bind_pasid function.
716 */
717 put_pasid_state(pasid_state);
718
719 /* Clear the pasid state so that the pasid can be re-used */
720 clear_pasid_state(dev_state, pasid_state->pasid);
721
722 /*
723 * Call mmu_notifier_unregister to drop our reference
724 * to pasid_state->mm
725 */
726 mmu_notifier_unregister(&pasid_state->mn, pasid_state->mm);
727
728 put_pasid_state_wait(pasid_state); /* Reference taken in
729 amd_iommu_bind_pasid */
730 out:
731 /* Drop reference taken in this function */
732 put_device_state(dev_state);
733
734 /* Drop reference taken in amd_iommu_bind_pasid */
735 put_device_state(dev_state);
736 }
737 EXPORT_SYMBOL(amd_iommu_unbind_pasid);
738
739 int amd_iommu_init_device(struct pci_dev *pdev, int pasids)
740 {
741 struct device_state *dev_state;
742 struct iommu_group *group;
743 unsigned long flags;
744 int ret, tmp;
745 u16 devid;
746
747 might_sleep();
748
749 if (!amd_iommu_v2_supported())
750 return -ENODEV;
751
752 if (pasids <= 0 || pasids > (PASID_MASK + 1))
753 return -EINVAL;
754
755 devid = device_id(pdev);
756
757 dev_state = kzalloc(sizeof(*dev_state), GFP_KERNEL);
758 if (dev_state == NULL)
759 return -ENOMEM;
760
761 spin_lock_init(&dev_state->lock);
762 init_waitqueue_head(&dev_state->wq);
763 dev_state->pdev = pdev;
764 dev_state->devid = devid;
765
766 tmp = pasids;
767 for (dev_state->pasid_levels = 0; (tmp - 1) & ~0x1ff; tmp >>= 9)
768 dev_state->pasid_levels += 1;
769
770 atomic_set(&dev_state->count, 1);
771 dev_state->max_pasids = pasids;
772
773 ret = -ENOMEM;
774 dev_state->states = (void *)get_zeroed_page(GFP_KERNEL);
775 if (dev_state->states == NULL)
776 goto out_free_dev_state;
777
778 dev_state->domain = iommu_domain_alloc(&pci_bus_type);
779 if (dev_state->domain == NULL)
780 goto out_free_states;
781
782 amd_iommu_domain_direct_map(dev_state->domain);
783
784 ret = amd_iommu_domain_enable_v2(dev_state->domain, pasids);
785 if (ret)
786 goto out_free_domain;
787
788 group = iommu_group_get(&pdev->dev);
789 if (!group)
790 goto out_free_domain;
791
792 ret = iommu_attach_group(dev_state->domain, group);
793 if (ret != 0)
794 goto out_drop_group;
795
796 iommu_group_put(group);
797
798 spin_lock_irqsave(&state_lock, flags);
799
800 if (__get_device_state(devid) != NULL) {
801 spin_unlock_irqrestore(&state_lock, flags);
802 ret = -EBUSY;
803 goto out_free_domain;
804 }
805
806 list_add_tail(&dev_state->list, &state_list);
807
808 spin_unlock_irqrestore(&state_lock, flags);
809
810 return 0;
811
812 out_drop_group:
813 iommu_group_put(group);
814
815 out_free_domain:
816 iommu_domain_free(dev_state->domain);
817
818 out_free_states:
819 free_page((unsigned long)dev_state->states);
820
821 out_free_dev_state:
822 kfree(dev_state);
823
824 return ret;
825 }
826 EXPORT_SYMBOL(amd_iommu_init_device);
827
828 void amd_iommu_free_device(struct pci_dev *pdev)
829 {
830 struct device_state *dev_state;
831 unsigned long flags;
832 u16 devid;
833
834 if (!amd_iommu_v2_supported())
835 return;
836
837 devid = device_id(pdev);
838
839 spin_lock_irqsave(&state_lock, flags);
840
841 dev_state = __get_device_state(devid);
842 if (dev_state == NULL) {
843 spin_unlock_irqrestore(&state_lock, flags);
844 return;
845 }
846
847 list_del(&dev_state->list);
848
849 spin_unlock_irqrestore(&state_lock, flags);
850
851 /* Get rid of any remaining pasid states */
852 free_pasid_states(dev_state);
853
854 put_device_state(dev_state);
855 /*
856 * Wait until the last reference is dropped before freeing
857 * the device state.
858 */
859 wait_event(dev_state->wq, !atomic_read(&dev_state->count));
860 free_device_state(dev_state);
861 }
862 EXPORT_SYMBOL(amd_iommu_free_device);
863
864 int amd_iommu_set_invalid_ppr_cb(struct pci_dev *pdev,
865 amd_iommu_invalid_ppr_cb cb)
866 {
867 struct device_state *dev_state;
868 unsigned long flags;
869 u16 devid;
870 int ret;
871
872 if (!amd_iommu_v2_supported())
873 return -ENODEV;
874
875 devid = device_id(pdev);
876
877 spin_lock_irqsave(&state_lock, flags);
878
879 ret = -EINVAL;
880 dev_state = __get_device_state(devid);
881 if (dev_state == NULL)
882 goto out_unlock;
883
884 dev_state->inv_ppr_cb = cb;
885
886 ret = 0;
887
888 out_unlock:
889 spin_unlock_irqrestore(&state_lock, flags);
890
891 return ret;
892 }
893 EXPORT_SYMBOL(amd_iommu_set_invalid_ppr_cb);
894
895 int amd_iommu_set_invalidate_ctx_cb(struct pci_dev *pdev,
896 amd_iommu_invalidate_ctx cb)
897 {
898 struct device_state *dev_state;
899 unsigned long flags;
900 u16 devid;
901 int ret;
902
903 if (!amd_iommu_v2_supported())
904 return -ENODEV;
905
906 devid = device_id(pdev);
907
908 spin_lock_irqsave(&state_lock, flags);
909
910 ret = -EINVAL;
911 dev_state = __get_device_state(devid);
912 if (dev_state == NULL)
913 goto out_unlock;
914
915 dev_state->inv_ctx_cb = cb;
916
917 ret = 0;
918
919 out_unlock:
920 spin_unlock_irqrestore(&state_lock, flags);
921
922 return ret;
923 }
924 EXPORT_SYMBOL(amd_iommu_set_invalidate_ctx_cb);
925
926 static int __init amd_iommu_v2_init(void)
927 {
928 int ret;
929
930 pr_info("AMD IOMMUv2 driver by Joerg Roedel <jroedel@suse.de>\n");
931
932 if (!amd_iommu_v2_supported()) {
933 pr_info("AMD IOMMUv2 functionality not available on this system\n");
934 /*
935 * Load anyway to provide the symbols to other modules
936 * which may use AMD IOMMUv2 optionally.
937 */
938 return 0;
939 }
940
941 spin_lock_init(&state_lock);
942
943 ret = -ENOMEM;
944 iommu_wq = create_workqueue("amd_iommu_v2");
945 if (iommu_wq == NULL)
946 goto out;
947
948 amd_iommu_register_ppr_notifier(&ppr_nb);
949
950 return 0;
951
952 out:
953 return ret;
954 }
955
956 static void __exit amd_iommu_v2_exit(void)
957 {
958 struct device_state *dev_state;
959 int i;
960
961 if (!amd_iommu_v2_supported())
962 return;
963
964 amd_iommu_unregister_ppr_notifier(&ppr_nb);
965
966 flush_workqueue(iommu_wq);
967
968 /*
969 * The loop below might call flush_workqueue(), so call
970 * destroy_workqueue() after it
971 */
972 for (i = 0; i < MAX_DEVICES; ++i) {
973 dev_state = get_device_state(i);
974
975 if (dev_state == NULL)
976 continue;
977
978 WARN_ON_ONCE(1);
979
980 put_device_state(dev_state);
981 amd_iommu_free_device(dev_state->pdev);
982 }
983
984 destroy_workqueue(iommu_wq);
985 }
986
987 module_init(amd_iommu_v2_init);
988 module_exit(amd_iommu_v2_exit);
This page took 0.126305 seconds and 5 git commands to generate.