Commit | Line | Data |
---|---|---|
30edc14b KRW |
1 | /* |
2 | * PCI Backend - Provides restricted access to the real PCI bus topology | |
3 | * to the frontend | |
4 | * | |
5 | * Author: Ryan Wilson <hap9@epoch.ncsc.mil> | |
6 | */ | |
7 | ||
8 | #include <linux/list.h> | |
9 | #include <linux/pci.h> | |
10 | #include <linux/spinlock.h> | |
11 | #include "pciback.h" | |
12 | ||
13 | struct passthrough_dev_data { | |
14 | /* Access to dev_list must be protected by lock */ | |
15 | struct list_head dev_list; | |
16 | spinlock_t lock; | |
17 | }; | |
18 | ||
a92336a1 KRW |
19 | struct pci_dev *xen_pcibk_get_pci_dev(struct xen_pcibk_device *pdev, |
20 | unsigned int domain, unsigned int bus, | |
21 | unsigned int devfn) | |
30edc14b KRW |
22 | { |
23 | struct passthrough_dev_data *dev_data = pdev->pci_dev_data; | |
24 | struct pci_dev_entry *dev_entry; | |
25 | struct pci_dev *dev = NULL; | |
26 | unsigned long flags; | |
27 | ||
28 | spin_lock_irqsave(&dev_data->lock, flags); | |
29 | ||
30 | list_for_each_entry(dev_entry, &dev_data->dev_list, list) { | |
31 | if (domain == (unsigned int)pci_domain_nr(dev_entry->dev->bus) | |
32 | && bus == (unsigned int)dev_entry->dev->bus->number | |
33 | && devfn == dev_entry->dev->devfn) { | |
34 | dev = dev_entry->dev; | |
35 | break; | |
36 | } | |
37 | } | |
38 | ||
39 | spin_unlock_irqrestore(&dev_data->lock, flags); | |
40 | ||
41 | return dev; | |
42 | } | |
43 | ||
a92336a1 KRW |
44 | int xen_pcibk_add_pci_dev(struct xen_pcibk_device *pdev, struct pci_dev *dev, |
45 | int devid, publish_pci_dev_cb publish_cb) | |
30edc14b KRW |
46 | { |
47 | struct passthrough_dev_data *dev_data = pdev->pci_dev_data; | |
48 | struct pci_dev_entry *dev_entry; | |
49 | unsigned long flags; | |
50 | unsigned int domain, bus, devfn; | |
51 | int err; | |
52 | ||
53 | dev_entry = kmalloc(sizeof(*dev_entry), GFP_KERNEL); | |
54 | if (!dev_entry) | |
55 | return -ENOMEM; | |
56 | dev_entry->dev = dev; | |
57 | ||
58 | spin_lock_irqsave(&dev_data->lock, flags); | |
59 | list_add_tail(&dev_entry->list, &dev_data->dev_list); | |
60 | spin_unlock_irqrestore(&dev_data->lock, flags); | |
61 | ||
62 | /* Publish this device. */ | |
63 | domain = (unsigned int)pci_domain_nr(dev->bus); | |
64 | bus = (unsigned int)dev->bus->number; | |
65 | devfn = dev->devfn; | |
66 | err = publish_cb(pdev, domain, bus, devfn, devid); | |
67 | ||
68 | return err; | |
69 | } | |
70 | ||
a92336a1 KRW |
71 | void xen_pcibk_release_pci_dev(struct xen_pcibk_device *pdev, |
72 | struct pci_dev *dev) | |
30edc14b KRW |
73 | { |
74 | struct passthrough_dev_data *dev_data = pdev->pci_dev_data; | |
75 | struct pci_dev_entry *dev_entry, *t; | |
76 | struct pci_dev *found_dev = NULL; | |
77 | unsigned long flags; | |
78 | ||
79 | spin_lock_irqsave(&dev_data->lock, flags); | |
80 | ||
81 | list_for_each_entry_safe(dev_entry, t, &dev_data->dev_list, list) { | |
82 | if (dev_entry->dev == dev) { | |
83 | list_del(&dev_entry->list); | |
84 | found_dev = dev_entry->dev; | |
85 | kfree(dev_entry); | |
86 | } | |
87 | } | |
88 | ||
89 | spin_unlock_irqrestore(&dev_data->lock, flags); | |
90 | ||
91 | if (found_dev) | |
92 | pcistub_put_pci_dev(found_dev); | |
93 | } | |
94 | ||
a92336a1 | 95 | int xen_pcibk_init_devices(struct xen_pcibk_device *pdev) |
30edc14b KRW |
96 | { |
97 | struct passthrough_dev_data *dev_data; | |
98 | ||
99 | dev_data = kmalloc(sizeof(*dev_data), GFP_KERNEL); | |
100 | if (!dev_data) | |
101 | return -ENOMEM; | |
102 | ||
103 | spin_lock_init(&dev_data->lock); | |
104 | ||
105 | INIT_LIST_HEAD(&dev_data->dev_list); | |
106 | ||
107 | pdev->pci_dev_data = dev_data; | |
108 | ||
109 | return 0; | |
110 | } | |
111 | ||
a92336a1 KRW |
112 | int xen_pcibk_publish_pci_roots(struct xen_pcibk_device *pdev, |
113 | publish_pci_root_cb publish_root_cb) | |
30edc14b KRW |
114 | { |
115 | int err = 0; | |
116 | struct passthrough_dev_data *dev_data = pdev->pci_dev_data; | |
494ef20d | 117 | struct pci_dev_entry *dev_entry, *e, *tmp; |
30edc14b KRW |
118 | struct pci_dev *dev; |
119 | int found; | |
120 | unsigned int domain, bus; | |
121 | ||
122 | spin_lock(&dev_data->lock); | |
123 | ||
494ef20d | 124 | list_for_each_entry_safe(dev_entry, tmp, &dev_data->dev_list, list) { |
30edc14b KRW |
125 | /* Only publish this device as a root if none of its |
126 | * parent bridges are exported | |
127 | */ | |
128 | found = 0; | |
129 | dev = dev_entry->dev->bus->self; | |
130 | for (; !found && dev != NULL; dev = dev->bus->self) { | |
131 | list_for_each_entry(e, &dev_data->dev_list, list) { | |
132 | if (dev == e->dev) { | |
133 | found = 1; | |
134 | break; | |
135 | } | |
136 | } | |
137 | } | |
138 | ||
139 | domain = (unsigned int)pci_domain_nr(dev_entry->dev->bus); | |
140 | bus = (unsigned int)dev_entry->dev->bus->number; | |
141 | ||
142 | if (!found) { | |
494ef20d | 143 | spin_unlock(&dev_data->lock); |
30edc14b KRW |
144 | err = publish_root_cb(pdev, domain, bus); |
145 | if (err) | |
146 | break; | |
494ef20d | 147 | spin_lock(&dev_data->lock); |
30edc14b KRW |
148 | } |
149 | } | |
150 | ||
494ef20d KRW |
151 | if (!err) |
152 | spin_unlock(&dev_data->lock); | |
30edc14b KRW |
153 | |
154 | return err; | |
155 | } | |
156 | ||
a92336a1 | 157 | void xen_pcibk_release_devices(struct xen_pcibk_device *pdev) |
30edc14b KRW |
158 | { |
159 | struct passthrough_dev_data *dev_data = pdev->pci_dev_data; | |
160 | struct pci_dev_entry *dev_entry, *t; | |
161 | ||
162 | list_for_each_entry_safe(dev_entry, t, &dev_data->dev_list, list) { | |
163 | list_del(&dev_entry->list); | |
164 | pcistub_put_pci_dev(dev_entry->dev); | |
165 | kfree(dev_entry); | |
166 | } | |
167 | ||
168 | kfree(dev_data); | |
169 | pdev->pci_dev_data = NULL; | |
170 | } | |
171 | ||
a92336a1 KRW |
172 | int xen_pcibk_get_pcifront_dev(struct pci_dev *pcidev, |
173 | struct xen_pcibk_device *pdev, | |
174 | unsigned int *domain, unsigned int *bus, | |
175 | unsigned int *devfn) | |
30edc14b KRW |
176 | { |
177 | *domain = pci_domain_nr(pcidev->bus); | |
178 | *bus = pcidev->bus->number; | |
179 | *devfn = pcidev->devfn; | |
180 | return 1; | |
181 | } |