xen-pciback: notify hypervisor about devices intended to be assigned to guests
[deliverable/linux.git] / drivers / xen / xen-pciback / pci_stub.c
1 /*
2 * PCI Stub Driver - Grabs devices in backend to be exported later
3 *
4 * Ryan Wilson <hap9@epoch.ncsc.mil>
5 * Chris Bookholt <hap10@epoch.ncsc.mil>
6 */
7 #include <linux/module.h>
8 #include <linux/init.h>
9 #include <linux/rwsem.h>
10 #include <linux/list.h>
11 #include <linux/spinlock.h>
12 #include <linux/kref.h>
13 #include <linux/pci.h>
14 #include <linux/wait.h>
15 #include <linux/sched.h>
16 #include <linux/atomic.h>
17 #include <xen/events.h>
18 #include <asm/xen/pci.h>
19 #include <asm/xen/hypervisor.h>
20 #include <xen/interface/physdev.h>
21 #include "pciback.h"
22 #include "conf_space.h"
23 #include "conf_space_quirks.h"
24
25 static char *pci_devs_to_hide;
26 wait_queue_head_t xen_pcibk_aer_wait_queue;
27 /*Add sem for sync AER handling and xen_pcibk remove/reconfigue ops,
28 * We want to avoid in middle of AER ops, xen_pcibk devices is being removed
29 */
30 static DECLARE_RWSEM(pcistub_sem);
31 module_param_named(hide, pci_devs_to_hide, charp, 0444);
32
33 struct pcistub_device_id {
34 struct list_head slot_list;
35 int domain;
36 unsigned char bus;
37 unsigned int devfn;
38 };
39 static LIST_HEAD(pcistub_device_ids);
40 static DEFINE_SPINLOCK(device_ids_lock);
41
42 struct pcistub_device {
43 struct kref kref;
44 struct list_head dev_list;
45 spinlock_t lock;
46
47 struct pci_dev *dev;
48 struct xen_pcibk_device *pdev;/* non-NULL if struct pci_dev is in use */
49 };
50
51 /* Access to pcistub_devices & seized_devices lists and the initialize_devices
52 * flag must be locked with pcistub_devices_lock
53 */
54 static DEFINE_SPINLOCK(pcistub_devices_lock);
55 static LIST_HEAD(pcistub_devices);
56
57 /* wait for device_initcall before initializing our devices
58 * (see pcistub_init_devices_late)
59 */
60 static int initialize_devices;
61 static LIST_HEAD(seized_devices);
62
63 static struct pcistub_device *pcistub_device_alloc(struct pci_dev *dev)
64 {
65 struct pcistub_device *psdev;
66
67 dev_dbg(&dev->dev, "pcistub_device_alloc\n");
68
69 psdev = kzalloc(sizeof(*psdev), GFP_ATOMIC);
70 if (!psdev)
71 return NULL;
72
73 psdev->dev = pci_dev_get(dev);
74 if (!psdev->dev) {
75 kfree(psdev);
76 return NULL;
77 }
78
79 kref_init(&psdev->kref);
80 spin_lock_init(&psdev->lock);
81
82 return psdev;
83 }
84
85 /* Don't call this directly as it's called by pcistub_device_put */
86 static void pcistub_device_release(struct kref *kref)
87 {
88 struct pcistub_device *psdev;
89 struct pci_dev *dev;
90 struct xen_pcibk_dev_data *dev_data;
91
92 psdev = container_of(kref, struct pcistub_device, kref);
93 dev = psdev->dev;
94 dev_data = pci_get_drvdata(dev);
95
96 dev_dbg(&dev->dev, "pcistub_device_release\n");
97
98 xen_unregister_device_domain_owner(dev);
99
100 /* Call the reset function which does not take lock as this
101 * is called from "unbind" which takes a device_lock mutex.
102 */
103 __pci_reset_function_locked(dev);
104 if (pci_load_and_free_saved_state(dev, &dev_data->pci_saved_state))
105 dev_dbg(&dev->dev, "Could not reload PCI state\n");
106 else
107 pci_restore_state(dev);
108
109 if (pci_find_capability(dev, PCI_CAP_ID_MSIX)) {
110 struct physdev_pci_device ppdev = {
111 .seg = pci_domain_nr(dev->bus),
112 .bus = dev->bus->number,
113 .devfn = dev->devfn
114 };
115 int err = HYPERVISOR_physdev_op(PHYSDEVOP_release_msix,
116 &ppdev);
117
118 if (err)
119 dev_warn(&dev->dev, "MSI-X release failed (%d)\n",
120 err);
121 }
122
123 /* Disable the device */
124 xen_pcibk_reset_device(dev);
125
126 kfree(dev_data);
127 pci_set_drvdata(dev, NULL);
128
129 /* Clean-up the device */
130 xen_pcibk_config_free_dyn_fields(dev);
131 xen_pcibk_config_free_dev(dev);
132
133 dev->dev_flags &= ~PCI_DEV_FLAGS_ASSIGNED;
134 pci_dev_put(dev);
135
136 kfree(psdev);
137 }
138
139 static inline void pcistub_device_get(struct pcistub_device *psdev)
140 {
141 kref_get(&psdev->kref);
142 }
143
144 static inline void pcistub_device_put(struct pcistub_device *psdev)
145 {
146 kref_put(&psdev->kref, pcistub_device_release);
147 }
148
149 static struct pcistub_device *pcistub_device_find(int domain, int bus,
150 int slot, int func)
151 {
152 struct pcistub_device *psdev = NULL;
153 unsigned long flags;
154
155 spin_lock_irqsave(&pcistub_devices_lock, flags);
156
157 list_for_each_entry(psdev, &pcistub_devices, dev_list) {
158 if (psdev->dev != NULL
159 && domain == pci_domain_nr(psdev->dev->bus)
160 && bus == psdev->dev->bus->number
161 && slot == PCI_SLOT(psdev->dev->devfn)
162 && func == PCI_FUNC(psdev->dev->devfn)) {
163 pcistub_device_get(psdev);
164 goto out;
165 }
166 }
167
168 /* didn't find it */
169 psdev = NULL;
170
171 out:
172 spin_unlock_irqrestore(&pcistub_devices_lock, flags);
173 return psdev;
174 }
175
176 static struct pci_dev *pcistub_device_get_pci_dev(struct xen_pcibk_device *pdev,
177 struct pcistub_device *psdev)
178 {
179 struct pci_dev *pci_dev = NULL;
180 unsigned long flags;
181
182 pcistub_device_get(psdev);
183
184 spin_lock_irqsave(&psdev->lock, flags);
185 if (!psdev->pdev) {
186 psdev->pdev = pdev;
187 pci_dev = psdev->dev;
188 }
189 spin_unlock_irqrestore(&psdev->lock, flags);
190
191 if (!pci_dev)
192 pcistub_device_put(psdev);
193
194 return pci_dev;
195 }
196
197 struct pci_dev *pcistub_get_pci_dev_by_slot(struct xen_pcibk_device *pdev,
198 int domain, int bus,
199 int slot, int func)
200 {
201 struct pcistub_device *psdev;
202 struct pci_dev *found_dev = NULL;
203 unsigned long flags;
204
205 spin_lock_irqsave(&pcistub_devices_lock, flags);
206
207 list_for_each_entry(psdev, &pcistub_devices, dev_list) {
208 if (psdev->dev != NULL
209 && domain == pci_domain_nr(psdev->dev->bus)
210 && bus == psdev->dev->bus->number
211 && slot == PCI_SLOT(psdev->dev->devfn)
212 && func == PCI_FUNC(psdev->dev->devfn)) {
213 found_dev = pcistub_device_get_pci_dev(pdev, psdev);
214 break;
215 }
216 }
217
218 spin_unlock_irqrestore(&pcistub_devices_lock, flags);
219 return found_dev;
220 }
221
222 struct pci_dev *pcistub_get_pci_dev(struct xen_pcibk_device *pdev,
223 struct pci_dev *dev)
224 {
225 struct pcistub_device *psdev;
226 struct pci_dev *found_dev = NULL;
227 unsigned long flags;
228
229 spin_lock_irqsave(&pcistub_devices_lock, flags);
230
231 list_for_each_entry(psdev, &pcistub_devices, dev_list) {
232 if (psdev->dev == dev) {
233 found_dev = pcistub_device_get_pci_dev(pdev, psdev);
234 break;
235 }
236 }
237
238 spin_unlock_irqrestore(&pcistub_devices_lock, flags);
239 return found_dev;
240 }
241
242 void pcistub_put_pci_dev(struct pci_dev *dev)
243 {
244 struct pcistub_device *psdev, *found_psdev = NULL;
245 unsigned long flags;
246
247 spin_lock_irqsave(&pcistub_devices_lock, flags);
248
249 list_for_each_entry(psdev, &pcistub_devices, dev_list) {
250 if (psdev->dev == dev) {
251 found_psdev = psdev;
252 break;
253 }
254 }
255
256 spin_unlock_irqrestore(&pcistub_devices_lock, flags);
257 if (WARN_ON(!found_psdev))
258 return;
259
260 /*hold this lock for avoiding breaking link between
261 * pcistub and xen_pcibk when AER is in processing
262 */
263 down_write(&pcistub_sem);
264 /* Cleanup our device
265 * (so it's ready for the next domain)
266 */
267
268 /* This is OK - we are running from workqueue context
269 * and want to inhibit the user from fiddling with 'reset'
270 */
271 pci_reset_function(dev);
272 pci_restore_state(psdev->dev);
273
274 /* This disables the device. */
275 xen_pcibk_reset_device(found_psdev->dev);
276
277 /* And cleanup up our emulated fields. */
278 xen_pcibk_config_free_dyn_fields(found_psdev->dev);
279 xen_pcibk_config_reset_dev(found_psdev->dev);
280
281 xen_unregister_device_domain_owner(found_psdev->dev);
282
283 spin_lock_irqsave(&found_psdev->lock, flags);
284 found_psdev->pdev = NULL;
285 spin_unlock_irqrestore(&found_psdev->lock, flags);
286
287 pcistub_device_put(found_psdev);
288 up_write(&pcistub_sem);
289 }
290
291 static int pcistub_match_one(struct pci_dev *dev,
292 struct pcistub_device_id *pdev_id)
293 {
294 /* Match the specified device by domain, bus, slot, func and also if
295 * any of the device's parent bridges match.
296 */
297 for (; dev != NULL; dev = dev->bus->self) {
298 if (pci_domain_nr(dev->bus) == pdev_id->domain
299 && dev->bus->number == pdev_id->bus
300 && dev->devfn == pdev_id->devfn)
301 return 1;
302
303 /* Sometimes topmost bridge links to itself. */
304 if (dev == dev->bus->self)
305 break;
306 }
307
308 return 0;
309 }
310
311 static int pcistub_match(struct pci_dev *dev)
312 {
313 struct pcistub_device_id *pdev_id;
314 unsigned long flags;
315 int found = 0;
316
317 spin_lock_irqsave(&device_ids_lock, flags);
318 list_for_each_entry(pdev_id, &pcistub_device_ids, slot_list) {
319 if (pcistub_match_one(dev, pdev_id)) {
320 found = 1;
321 break;
322 }
323 }
324 spin_unlock_irqrestore(&device_ids_lock, flags);
325
326 return found;
327 }
328
329 static int pcistub_init_device(struct pci_dev *dev)
330 {
331 struct xen_pcibk_dev_data *dev_data;
332 int err = 0;
333
334 dev_dbg(&dev->dev, "initializing...\n");
335
336 /* The PCI backend is not intended to be a module (or to work with
337 * removable PCI devices (yet). If it were, xen_pcibk_config_free()
338 * would need to be called somewhere to free the memory allocated
339 * here and then to call kfree(pci_get_drvdata(psdev->dev)).
340 */
341 dev_data = kzalloc(sizeof(*dev_data) + strlen(DRV_NAME "[]")
342 + strlen(pci_name(dev)) + 1, GFP_ATOMIC);
343 if (!dev_data) {
344 err = -ENOMEM;
345 goto out;
346 }
347 pci_set_drvdata(dev, dev_data);
348
349 /*
350 * Setup name for fake IRQ handler. It will only be enabled
351 * once the device is turned on by the guest.
352 */
353 sprintf(dev_data->irq_name, DRV_NAME "[%s]", pci_name(dev));
354
355 dev_dbg(&dev->dev, "initializing config\n");
356
357 init_waitqueue_head(&xen_pcibk_aer_wait_queue);
358 err = xen_pcibk_config_init_dev(dev);
359 if (err)
360 goto out;
361
362 /* HACK: Force device (& ACPI) to determine what IRQ it's on - we
363 * must do this here because pcibios_enable_device may specify
364 * the pci device's true irq (and possibly its other resources)
365 * if they differ from what's in the configuration space.
366 * This makes the assumption that the device's resources won't
367 * change after this point (otherwise this code may break!)
368 */
369 dev_dbg(&dev->dev, "enabling device\n");
370 err = pci_enable_device(dev);
371 if (err)
372 goto config_release;
373
374 if (pci_find_capability(dev, PCI_CAP_ID_MSIX)) {
375 struct physdev_pci_device ppdev = {
376 .seg = pci_domain_nr(dev->bus),
377 .bus = dev->bus->number,
378 .devfn = dev->devfn
379 };
380
381 err = HYPERVISOR_physdev_op(PHYSDEVOP_prepare_msix, &ppdev);
382 if (err)
383 dev_err(&dev->dev, "MSI-X preparation failed (%d)\n",
384 err);
385 }
386
387 /* We need the device active to save the state. */
388 dev_dbg(&dev->dev, "save state of device\n");
389 pci_save_state(dev);
390 dev_data->pci_saved_state = pci_store_saved_state(dev);
391 if (!dev_data->pci_saved_state)
392 dev_err(&dev->dev, "Could not store PCI conf saved state!\n");
393 else {
394 dev_dbg(&dev->dev, "resetting (FLR, D3, etc) the device\n");
395 __pci_reset_function_locked(dev);
396 pci_restore_state(dev);
397 }
398 /* Now disable the device (this also ensures some private device
399 * data is setup before we export)
400 */
401 dev_dbg(&dev->dev, "reset device\n");
402 xen_pcibk_reset_device(dev);
403
404 dev->dev_flags |= PCI_DEV_FLAGS_ASSIGNED;
405 return 0;
406
407 config_release:
408 xen_pcibk_config_free_dev(dev);
409
410 out:
411 pci_set_drvdata(dev, NULL);
412 kfree(dev_data);
413 return err;
414 }
415
416 /*
417 * Because some initialization still happens on
418 * devices during fs_initcall, we need to defer
419 * full initialization of our devices until
420 * device_initcall.
421 */
422 static int __init pcistub_init_devices_late(void)
423 {
424 struct pcistub_device *psdev;
425 unsigned long flags;
426 int err = 0;
427
428 pr_debug(DRV_NAME ": pcistub_init_devices_late\n");
429
430 spin_lock_irqsave(&pcistub_devices_lock, flags);
431
432 while (!list_empty(&seized_devices)) {
433 psdev = container_of(seized_devices.next,
434 struct pcistub_device, dev_list);
435 list_del(&psdev->dev_list);
436
437 spin_unlock_irqrestore(&pcistub_devices_lock, flags);
438
439 err = pcistub_init_device(psdev->dev);
440 if (err) {
441 dev_err(&psdev->dev->dev,
442 "error %d initializing device\n", err);
443 kfree(psdev);
444 psdev = NULL;
445 }
446
447 spin_lock_irqsave(&pcistub_devices_lock, flags);
448
449 if (psdev)
450 list_add_tail(&psdev->dev_list, &pcistub_devices);
451 }
452
453 initialize_devices = 1;
454
455 spin_unlock_irqrestore(&pcistub_devices_lock, flags);
456
457 return 0;
458 }
459
460 static int pcistub_seize(struct pci_dev *dev)
461 {
462 struct pcistub_device *psdev;
463 unsigned long flags;
464 int err = 0;
465
466 psdev = pcistub_device_alloc(dev);
467 if (!psdev)
468 return -ENOMEM;
469
470 spin_lock_irqsave(&pcistub_devices_lock, flags);
471
472 if (initialize_devices) {
473 spin_unlock_irqrestore(&pcistub_devices_lock, flags);
474
475 /* don't want irqs disabled when calling pcistub_init_device */
476 err = pcistub_init_device(psdev->dev);
477
478 spin_lock_irqsave(&pcistub_devices_lock, flags);
479
480 if (!err)
481 list_add(&psdev->dev_list, &pcistub_devices);
482 } else {
483 dev_dbg(&dev->dev, "deferring initialization\n");
484 list_add(&psdev->dev_list, &seized_devices);
485 }
486
487 spin_unlock_irqrestore(&pcistub_devices_lock, flags);
488
489 if (err)
490 pcistub_device_put(psdev);
491
492 return err;
493 }
494
495 static int pcistub_probe(struct pci_dev *dev, const struct pci_device_id *id)
496 {
497 int err = 0;
498
499 dev_dbg(&dev->dev, "probing...\n");
500
501 if (pcistub_match(dev)) {
502
503 if (dev->hdr_type != PCI_HEADER_TYPE_NORMAL
504 && dev->hdr_type != PCI_HEADER_TYPE_BRIDGE) {
505 dev_err(&dev->dev, "can't export pci devices that "
506 "don't have a normal (0) or bridge (1) "
507 "header type!\n");
508 err = -ENODEV;
509 goto out;
510 }
511
512 dev_info(&dev->dev, "seizing device\n");
513 err = pcistub_seize(dev);
514 } else
515 /* Didn't find the device */
516 err = -ENODEV;
517
518 out:
519 return err;
520 }
521
522 static void pcistub_remove(struct pci_dev *dev)
523 {
524 struct pcistub_device *psdev, *found_psdev = NULL;
525 unsigned long flags;
526
527 dev_dbg(&dev->dev, "removing\n");
528
529 spin_lock_irqsave(&pcistub_devices_lock, flags);
530
531 xen_pcibk_config_quirk_release(dev);
532
533 list_for_each_entry(psdev, &pcistub_devices, dev_list) {
534 if (psdev->dev == dev) {
535 found_psdev = psdev;
536 break;
537 }
538 }
539
540 spin_unlock_irqrestore(&pcistub_devices_lock, flags);
541
542 if (found_psdev) {
543 dev_dbg(&dev->dev, "found device to remove - in use? %p\n",
544 found_psdev->pdev);
545
546 if (found_psdev->pdev) {
547 printk(KERN_WARNING DRV_NAME ": ****** removing device "
548 "%s while still in-use! ******\n",
549 pci_name(found_psdev->dev));
550 printk(KERN_WARNING DRV_NAME ": ****** driver domain may"
551 " still access this device's i/o resources!\n");
552 printk(KERN_WARNING DRV_NAME ": ****** shutdown driver "
553 "domain before binding device\n");
554 printk(KERN_WARNING DRV_NAME ": ****** to other drivers "
555 "or domains\n");
556
557 xen_pcibk_release_pci_dev(found_psdev->pdev,
558 found_psdev->dev);
559 }
560
561 spin_lock_irqsave(&pcistub_devices_lock, flags);
562 list_del(&found_psdev->dev_list);
563 spin_unlock_irqrestore(&pcistub_devices_lock, flags);
564
565 /* the final put for releasing from the list */
566 pcistub_device_put(found_psdev);
567 }
568 }
569
570 static DEFINE_PCI_DEVICE_TABLE(pcistub_ids) = {
571 {
572 .vendor = PCI_ANY_ID,
573 .device = PCI_ANY_ID,
574 .subvendor = PCI_ANY_ID,
575 .subdevice = PCI_ANY_ID,
576 },
577 {0,},
578 };
579
580 #define PCI_NODENAME_MAX 40
581 static void kill_domain_by_device(struct pcistub_device *psdev)
582 {
583 struct xenbus_transaction xbt;
584 int err;
585 char nodename[PCI_NODENAME_MAX];
586
587 BUG_ON(!psdev);
588 snprintf(nodename, PCI_NODENAME_MAX, "/local/domain/0/backend/pci/%d/0",
589 psdev->pdev->xdev->otherend_id);
590
591 again:
592 err = xenbus_transaction_start(&xbt);
593 if (err) {
594 dev_err(&psdev->dev->dev,
595 "error %d when start xenbus transaction\n", err);
596 return;
597 }
598 /*PV AER handlers will set this flag*/
599 xenbus_printf(xbt, nodename, "aerState" , "aerfail");
600 err = xenbus_transaction_end(xbt, 0);
601 if (err) {
602 if (err == -EAGAIN)
603 goto again;
604 dev_err(&psdev->dev->dev,
605 "error %d when end xenbus transaction\n", err);
606 return;
607 }
608 }
609
610 /* For each aer recovery step error_detected, mmio_enabled, etc, front_end and
611 * backend need to have cooperation. In xen_pcibk, those steps will do similar
612 * jobs: send service request and waiting for front_end response.
613 */
614 static pci_ers_result_t common_process(struct pcistub_device *psdev,
615 pci_channel_state_t state, int aer_cmd,
616 pci_ers_result_t result)
617 {
618 pci_ers_result_t res = result;
619 struct xen_pcie_aer_op *aer_op;
620 int ret;
621
622 /*with PV AER drivers*/
623 aer_op = &(psdev->pdev->sh_info->aer_op);
624 aer_op->cmd = aer_cmd ;
625 /*useful for error_detected callback*/
626 aer_op->err = state;
627 /*pcifront_end BDF*/
628 ret = xen_pcibk_get_pcifront_dev(psdev->dev, psdev->pdev,
629 &aer_op->domain, &aer_op->bus, &aer_op->devfn);
630 if (!ret) {
631 dev_err(&psdev->dev->dev,
632 DRV_NAME ": failed to get pcifront device\n");
633 return PCI_ERS_RESULT_NONE;
634 }
635 wmb();
636
637 dev_dbg(&psdev->dev->dev,
638 DRV_NAME ": aer_op %x dom %x bus %x devfn %x\n",
639 aer_cmd, aer_op->domain, aer_op->bus, aer_op->devfn);
640 /*local flag to mark there's aer request, xen_pcibk callback will use
641 * this flag to judge whether we need to check pci-front give aer
642 * service ack signal
643 */
644 set_bit(_PCIB_op_pending, (unsigned long *)&psdev->pdev->flags);
645
646 /*It is possible that a pcifront conf_read_write ops request invokes
647 * the callback which cause the spurious execution of wake_up.
648 * Yet it is harmless and better than a spinlock here
649 */
650 set_bit(_XEN_PCIB_active,
651 (unsigned long *)&psdev->pdev->sh_info->flags);
652 wmb();
653 notify_remote_via_irq(psdev->pdev->evtchn_irq);
654
655 ret = wait_event_timeout(xen_pcibk_aer_wait_queue,
656 !(test_bit(_XEN_PCIB_active, (unsigned long *)
657 &psdev->pdev->sh_info->flags)), 300*HZ);
658
659 if (!ret) {
660 if (test_bit(_XEN_PCIB_active,
661 (unsigned long *)&psdev->pdev->sh_info->flags)) {
662 dev_err(&psdev->dev->dev,
663 "pcifront aer process not responding!\n");
664 clear_bit(_XEN_PCIB_active,
665 (unsigned long *)&psdev->pdev->sh_info->flags);
666 aer_op->err = PCI_ERS_RESULT_NONE;
667 return res;
668 }
669 }
670 clear_bit(_PCIB_op_pending, (unsigned long *)&psdev->pdev->flags);
671
672 if (test_bit(_XEN_PCIF_active,
673 (unsigned long *)&psdev->pdev->sh_info->flags)) {
674 dev_dbg(&psdev->dev->dev,
675 "schedule pci_conf service in " DRV_NAME "\n");
676 xen_pcibk_test_and_schedule_op(psdev->pdev);
677 }
678
679 res = (pci_ers_result_t)aer_op->err;
680 return res;
681 }
682
683 /*
684 * xen_pcibk_slot_reset: it will send the slot_reset request to pcifront in case
685 * of the device driver could provide this service, and then wait for pcifront
686 * ack.
687 * @dev: pointer to PCI devices
688 * return value is used by aer_core do_recovery policy
689 */
690 static pci_ers_result_t xen_pcibk_slot_reset(struct pci_dev *dev)
691 {
692 struct pcistub_device *psdev;
693 pci_ers_result_t result;
694
695 result = PCI_ERS_RESULT_RECOVERED;
696 dev_dbg(&dev->dev, "xen_pcibk_slot_reset(bus:%x,devfn:%x)\n",
697 dev->bus->number, dev->devfn);
698
699 down_write(&pcistub_sem);
700 psdev = pcistub_device_find(pci_domain_nr(dev->bus),
701 dev->bus->number,
702 PCI_SLOT(dev->devfn),
703 PCI_FUNC(dev->devfn));
704
705 if (!psdev || !psdev->pdev) {
706 dev_err(&dev->dev,
707 DRV_NAME " device is not found/assigned\n");
708 goto end;
709 }
710
711 if (!psdev->pdev->sh_info) {
712 dev_err(&dev->dev, DRV_NAME " device is not connected or owned"
713 " by HVM, kill it\n");
714 kill_domain_by_device(psdev);
715 goto end;
716 }
717
718 if (!test_bit(_XEN_PCIB_AERHANDLER,
719 (unsigned long *)&psdev->pdev->sh_info->flags)) {
720 dev_err(&dev->dev,
721 "guest with no AER driver should have been killed\n");
722 goto end;
723 }
724 result = common_process(psdev, 1, XEN_PCI_OP_aer_slotreset, result);
725
726 if (result == PCI_ERS_RESULT_NONE ||
727 result == PCI_ERS_RESULT_DISCONNECT) {
728 dev_dbg(&dev->dev,
729 "No AER slot_reset service or disconnected!\n");
730 kill_domain_by_device(psdev);
731 }
732 end:
733 if (psdev)
734 pcistub_device_put(psdev);
735 up_write(&pcistub_sem);
736 return result;
737
738 }
739
740
741 /*xen_pcibk_mmio_enabled: it will send the mmio_enabled request to pcifront
742 * in case of the device driver could provide this service, and then wait
743 * for pcifront ack
744 * @dev: pointer to PCI devices
745 * return value is used by aer_core do_recovery policy
746 */
747
748 static pci_ers_result_t xen_pcibk_mmio_enabled(struct pci_dev *dev)
749 {
750 struct pcistub_device *psdev;
751 pci_ers_result_t result;
752
753 result = PCI_ERS_RESULT_RECOVERED;
754 dev_dbg(&dev->dev, "xen_pcibk_mmio_enabled(bus:%x,devfn:%x)\n",
755 dev->bus->number, dev->devfn);
756
757 down_write(&pcistub_sem);
758 psdev = pcistub_device_find(pci_domain_nr(dev->bus),
759 dev->bus->number,
760 PCI_SLOT(dev->devfn),
761 PCI_FUNC(dev->devfn));
762
763 if (!psdev || !psdev->pdev) {
764 dev_err(&dev->dev,
765 DRV_NAME " device is not found/assigned\n");
766 goto end;
767 }
768
769 if (!psdev->pdev->sh_info) {
770 dev_err(&dev->dev, DRV_NAME " device is not connected or owned"
771 " by HVM, kill it\n");
772 kill_domain_by_device(psdev);
773 goto end;
774 }
775
776 if (!test_bit(_XEN_PCIB_AERHANDLER,
777 (unsigned long *)&psdev->pdev->sh_info->flags)) {
778 dev_err(&dev->dev,
779 "guest with no AER driver should have been killed\n");
780 goto end;
781 }
782 result = common_process(psdev, 1, XEN_PCI_OP_aer_mmio, result);
783
784 if (result == PCI_ERS_RESULT_NONE ||
785 result == PCI_ERS_RESULT_DISCONNECT) {
786 dev_dbg(&dev->dev,
787 "No AER mmio_enabled service or disconnected!\n");
788 kill_domain_by_device(psdev);
789 }
790 end:
791 if (psdev)
792 pcistub_device_put(psdev);
793 up_write(&pcistub_sem);
794 return result;
795 }
796
797 /*xen_pcibk_error_detected: it will send the error_detected request to pcifront
798 * in case of the device driver could provide this service, and then wait
799 * for pcifront ack.
800 * @dev: pointer to PCI devices
801 * @error: the current PCI connection state
802 * return value is used by aer_core do_recovery policy
803 */
804
805 static pci_ers_result_t xen_pcibk_error_detected(struct pci_dev *dev,
806 pci_channel_state_t error)
807 {
808 struct pcistub_device *psdev;
809 pci_ers_result_t result;
810
811 result = PCI_ERS_RESULT_CAN_RECOVER;
812 dev_dbg(&dev->dev, "xen_pcibk_error_detected(bus:%x,devfn:%x)\n",
813 dev->bus->number, dev->devfn);
814
815 down_write(&pcistub_sem);
816 psdev = pcistub_device_find(pci_domain_nr(dev->bus),
817 dev->bus->number,
818 PCI_SLOT(dev->devfn),
819 PCI_FUNC(dev->devfn));
820
821 if (!psdev || !psdev->pdev) {
822 dev_err(&dev->dev,
823 DRV_NAME " device is not found/assigned\n");
824 goto end;
825 }
826
827 if (!psdev->pdev->sh_info) {
828 dev_err(&dev->dev, DRV_NAME " device is not connected or owned"
829 " by HVM, kill it\n");
830 kill_domain_by_device(psdev);
831 goto end;
832 }
833
834 /*Guest owns the device yet no aer handler regiested, kill guest*/
835 if (!test_bit(_XEN_PCIB_AERHANDLER,
836 (unsigned long *)&psdev->pdev->sh_info->flags)) {
837 dev_dbg(&dev->dev, "guest may have no aer driver, kill it\n");
838 kill_domain_by_device(psdev);
839 goto end;
840 }
841 result = common_process(psdev, error, XEN_PCI_OP_aer_detected, result);
842
843 if (result == PCI_ERS_RESULT_NONE ||
844 result == PCI_ERS_RESULT_DISCONNECT) {
845 dev_dbg(&dev->dev,
846 "No AER error_detected service or disconnected!\n");
847 kill_domain_by_device(psdev);
848 }
849 end:
850 if (psdev)
851 pcistub_device_put(psdev);
852 up_write(&pcistub_sem);
853 return result;
854 }
855
856 /*xen_pcibk_error_resume: it will send the error_resume request to pcifront
857 * in case of the device driver could provide this service, and then wait
858 * for pcifront ack.
859 * @dev: pointer to PCI devices
860 */
861
862 static void xen_pcibk_error_resume(struct pci_dev *dev)
863 {
864 struct pcistub_device *psdev;
865
866 dev_dbg(&dev->dev, "xen_pcibk_error_resume(bus:%x,devfn:%x)\n",
867 dev->bus->number, dev->devfn);
868
869 down_write(&pcistub_sem);
870 psdev = pcistub_device_find(pci_domain_nr(dev->bus),
871 dev->bus->number,
872 PCI_SLOT(dev->devfn),
873 PCI_FUNC(dev->devfn));
874
875 if (!psdev || !psdev->pdev) {
876 dev_err(&dev->dev,
877 DRV_NAME " device is not found/assigned\n");
878 goto end;
879 }
880
881 if (!psdev->pdev->sh_info) {
882 dev_err(&dev->dev, DRV_NAME " device is not connected or owned"
883 " by HVM, kill it\n");
884 kill_domain_by_device(psdev);
885 goto end;
886 }
887
888 if (!test_bit(_XEN_PCIB_AERHANDLER,
889 (unsigned long *)&psdev->pdev->sh_info->flags)) {
890 dev_err(&dev->dev,
891 "guest with no AER driver should have been killed\n");
892 kill_domain_by_device(psdev);
893 goto end;
894 }
895 common_process(psdev, 1, XEN_PCI_OP_aer_resume,
896 PCI_ERS_RESULT_RECOVERED);
897 end:
898 if (psdev)
899 pcistub_device_put(psdev);
900 up_write(&pcistub_sem);
901 return;
902 }
903
904 /*add xen_pcibk AER handling*/
905 static const struct pci_error_handlers xen_pcibk_error_handler = {
906 .error_detected = xen_pcibk_error_detected,
907 .mmio_enabled = xen_pcibk_mmio_enabled,
908 .slot_reset = xen_pcibk_slot_reset,
909 .resume = xen_pcibk_error_resume,
910 };
911
912 /*
913 * Note: There is no MODULE_DEVICE_TABLE entry here because this isn't
914 * for a normal device. I don't want it to be loaded automatically.
915 */
916
917 static struct pci_driver xen_pcibk_pci_driver = {
918 /* The name should be xen_pciback, but until the tools are updated
919 * we will keep it as pciback. */
920 .name = "pciback",
921 .id_table = pcistub_ids,
922 .probe = pcistub_probe,
923 .remove = pcistub_remove,
924 .err_handler = &xen_pcibk_error_handler,
925 };
926
927 static inline int str_to_slot(const char *buf, int *domain, int *bus,
928 int *slot, int *func)
929 {
930 int parsed = 0;
931
932 switch (sscanf(buf, " %x:%x:%x.%x %n", domain, bus, slot, func,
933 &parsed)) {
934 case 3:
935 *func = -1;
936 sscanf(buf, " %x:%x:%x.* %n", domain, bus, slot, &parsed);
937 break;
938 case 2:
939 *slot = *func = -1;
940 sscanf(buf, " %x:%x:*.* %n", domain, bus, &parsed);
941 break;
942 }
943 if (parsed && !buf[parsed])
944 return 0;
945
946 /* try again without domain */
947 *domain = 0;
948 switch (sscanf(buf, " %x:%x.%x %n", bus, slot, func, &parsed)) {
949 case 2:
950 *func = -1;
951 sscanf(buf, " %x:%x.* %n", bus, slot, &parsed);
952 break;
953 case 1:
954 *slot = *func = -1;
955 sscanf(buf, " %x:*.* %n", bus, &parsed);
956 break;
957 }
958 if (parsed && !buf[parsed])
959 return 0;
960
961 return -EINVAL;
962 }
963
964 static inline int str_to_quirk(const char *buf, int *domain, int *bus, int
965 *slot, int *func, int *reg, int *size, int *mask)
966 {
967 int parsed = 0;
968
969 sscanf(buf, " %x:%x:%x.%x-%x:%x:%x %n", domain, bus, slot, func,
970 reg, size, mask, &parsed);
971 if (parsed && !buf[parsed])
972 return 0;
973
974 /* try again without domain */
975 *domain = 0;
976 sscanf(buf, " %x:%x.%x-%x:%x:%x %n", bus, slot, func, reg, size,
977 mask, &parsed);
978 if (parsed && !buf[parsed])
979 return 0;
980
981 return -EINVAL;
982 }
983
984 static int pcistub_device_id_add(int domain, int bus, int slot, int func)
985 {
986 struct pcistub_device_id *pci_dev_id;
987 unsigned long flags;
988 int rc = 0, devfn = PCI_DEVFN(slot, func);
989
990 if (slot < 0) {
991 for (slot = 0; !rc && slot < 32; ++slot)
992 rc = pcistub_device_id_add(domain, bus, slot, func);
993 return rc;
994 }
995
996 if (func < 0) {
997 for (func = 0; !rc && func < 8; ++func)
998 rc = pcistub_device_id_add(domain, bus, slot, func);
999 return rc;
1000 }
1001
1002 if ((
1003 #if !defined(MODULE) /* pci_domains_supported is not being exported */ \
1004 || !defined(CONFIG_PCI_DOMAINS)
1005 !pci_domains_supported ? domain :
1006 #endif
1007 domain < 0 || domain > 0xffff)
1008 || bus < 0 || bus > 0xff
1009 || PCI_SLOT(devfn) != slot
1010 || PCI_FUNC(devfn) != func)
1011 return -EINVAL;
1012
1013 pci_dev_id = kmalloc(sizeof(*pci_dev_id), GFP_KERNEL);
1014 if (!pci_dev_id)
1015 return -ENOMEM;
1016
1017 pci_dev_id->domain = domain;
1018 pci_dev_id->bus = bus;
1019 pci_dev_id->devfn = devfn;
1020
1021 pr_debug(DRV_NAME ": wants to seize %04x:%02x:%02x.%d\n",
1022 domain, bus, slot, func);
1023
1024 spin_lock_irqsave(&device_ids_lock, flags);
1025 list_add_tail(&pci_dev_id->slot_list, &pcistub_device_ids);
1026 spin_unlock_irqrestore(&device_ids_lock, flags);
1027
1028 return 0;
1029 }
1030
1031 static int pcistub_device_id_remove(int domain, int bus, int slot, int func)
1032 {
1033 struct pcistub_device_id *pci_dev_id, *t;
1034 int err = -ENOENT;
1035 unsigned long flags;
1036
1037 spin_lock_irqsave(&device_ids_lock, flags);
1038 list_for_each_entry_safe(pci_dev_id, t, &pcistub_device_ids,
1039 slot_list) {
1040 if (pci_dev_id->domain == domain && pci_dev_id->bus == bus
1041 && (slot < 0 || PCI_SLOT(pci_dev_id->devfn) == slot)
1042 && (func < 0 || PCI_FUNC(pci_dev_id->devfn) == func)) {
1043 /* Don't break; here because it's possible the same
1044 * slot could be in the list more than once
1045 */
1046 list_del(&pci_dev_id->slot_list);
1047 kfree(pci_dev_id);
1048
1049 err = 0;
1050
1051 pr_debug(DRV_NAME ": removed %04x:%02x:%02x.%d from "
1052 "seize list\n", domain, bus, slot, func);
1053 }
1054 }
1055 spin_unlock_irqrestore(&device_ids_lock, flags);
1056
1057 return err;
1058 }
1059
1060 static int pcistub_reg_add(int domain, int bus, int slot, int func,
1061 unsigned int reg, unsigned int size,
1062 unsigned int mask)
1063 {
1064 int err = 0;
1065 struct pcistub_device *psdev;
1066 struct pci_dev *dev;
1067 struct config_field *field;
1068
1069 if (reg > 0xfff || (size < 4 && (mask >> (size * 8))))
1070 return -EINVAL;
1071
1072 psdev = pcistub_device_find(domain, bus, slot, func);
1073 if (!psdev) {
1074 err = -ENODEV;
1075 goto out;
1076 }
1077 dev = psdev->dev;
1078
1079 field = kzalloc(sizeof(*field), GFP_ATOMIC);
1080 if (!field) {
1081 err = -ENOMEM;
1082 goto out;
1083 }
1084
1085 field->offset = reg;
1086 field->size = size;
1087 field->mask = mask;
1088 field->init = NULL;
1089 field->reset = NULL;
1090 field->release = NULL;
1091 field->clean = xen_pcibk_config_field_free;
1092
1093 err = xen_pcibk_config_quirks_add_field(dev, field);
1094 if (err)
1095 kfree(field);
1096 out:
1097 if (psdev)
1098 pcistub_device_put(psdev);
1099 return err;
1100 }
1101
1102 static ssize_t pcistub_slot_add(struct device_driver *drv, const char *buf,
1103 size_t count)
1104 {
1105 int domain, bus, slot, func;
1106 int err;
1107
1108 err = str_to_slot(buf, &domain, &bus, &slot, &func);
1109 if (err)
1110 goto out;
1111
1112 err = pcistub_device_id_add(domain, bus, slot, func);
1113
1114 out:
1115 if (!err)
1116 err = count;
1117 return err;
1118 }
1119 static DRIVER_ATTR(new_slot, S_IWUSR, NULL, pcistub_slot_add);
1120
1121 static ssize_t pcistub_slot_remove(struct device_driver *drv, const char *buf,
1122 size_t count)
1123 {
1124 int domain, bus, slot, func;
1125 int err;
1126
1127 err = str_to_slot(buf, &domain, &bus, &slot, &func);
1128 if (err)
1129 goto out;
1130
1131 err = pcistub_device_id_remove(domain, bus, slot, func);
1132
1133 out:
1134 if (!err)
1135 err = count;
1136 return err;
1137 }
1138 static DRIVER_ATTR(remove_slot, S_IWUSR, NULL, pcistub_slot_remove);
1139
1140 static ssize_t pcistub_slot_show(struct device_driver *drv, char *buf)
1141 {
1142 struct pcistub_device_id *pci_dev_id;
1143 size_t count = 0;
1144 unsigned long flags;
1145
1146 spin_lock_irqsave(&device_ids_lock, flags);
1147 list_for_each_entry(pci_dev_id, &pcistub_device_ids, slot_list) {
1148 if (count >= PAGE_SIZE)
1149 break;
1150
1151 count += scnprintf(buf + count, PAGE_SIZE - count,
1152 "%04x:%02x:%02x.%d\n",
1153 pci_dev_id->domain, pci_dev_id->bus,
1154 PCI_SLOT(pci_dev_id->devfn),
1155 PCI_FUNC(pci_dev_id->devfn));
1156 }
1157 spin_unlock_irqrestore(&device_ids_lock, flags);
1158
1159 return count;
1160 }
1161 static DRIVER_ATTR(slots, S_IRUSR, pcistub_slot_show, NULL);
1162
1163 static ssize_t pcistub_irq_handler_show(struct device_driver *drv, char *buf)
1164 {
1165 struct pcistub_device *psdev;
1166 struct xen_pcibk_dev_data *dev_data;
1167 size_t count = 0;
1168 unsigned long flags;
1169
1170 spin_lock_irqsave(&pcistub_devices_lock, flags);
1171 list_for_each_entry(psdev, &pcistub_devices, dev_list) {
1172 if (count >= PAGE_SIZE)
1173 break;
1174 if (!psdev->dev)
1175 continue;
1176 dev_data = pci_get_drvdata(psdev->dev);
1177 if (!dev_data)
1178 continue;
1179 count +=
1180 scnprintf(buf + count, PAGE_SIZE - count,
1181 "%s:%s:%sing:%ld\n",
1182 pci_name(psdev->dev),
1183 dev_data->isr_on ? "on" : "off",
1184 dev_data->ack_intr ? "ack" : "not ack",
1185 dev_data->handled);
1186 }
1187 spin_unlock_irqrestore(&pcistub_devices_lock, flags);
1188 return count;
1189 }
1190 static DRIVER_ATTR(irq_handlers, S_IRUSR, pcistub_irq_handler_show, NULL);
1191
1192 static ssize_t pcistub_irq_handler_switch(struct device_driver *drv,
1193 const char *buf,
1194 size_t count)
1195 {
1196 struct pcistub_device *psdev;
1197 struct xen_pcibk_dev_data *dev_data;
1198 int domain, bus, slot, func;
1199 int err = -ENOENT;
1200
1201 err = str_to_slot(buf, &domain, &bus, &slot, &func);
1202 if (err)
1203 return err;
1204
1205 psdev = pcistub_device_find(domain, bus, slot, func);
1206 if (!psdev)
1207 goto out;
1208
1209 dev_data = pci_get_drvdata(psdev->dev);
1210 if (!dev_data)
1211 goto out;
1212
1213 dev_dbg(&psdev->dev->dev, "%s fake irq handler: %d->%d\n",
1214 dev_data->irq_name, dev_data->isr_on,
1215 !dev_data->isr_on);
1216
1217 dev_data->isr_on = !(dev_data->isr_on);
1218 if (dev_data->isr_on)
1219 dev_data->ack_intr = 1;
1220 out:
1221 if (psdev)
1222 pcistub_device_put(psdev);
1223 if (!err)
1224 err = count;
1225 return err;
1226 }
1227 static DRIVER_ATTR(irq_handler_state, S_IWUSR, NULL,
1228 pcistub_irq_handler_switch);
1229
1230 static ssize_t pcistub_quirk_add(struct device_driver *drv, const char *buf,
1231 size_t count)
1232 {
1233 int domain, bus, slot, func, reg, size, mask;
1234 int err;
1235
1236 err = str_to_quirk(buf, &domain, &bus, &slot, &func, &reg, &size,
1237 &mask);
1238 if (err)
1239 goto out;
1240
1241 err = pcistub_reg_add(domain, bus, slot, func, reg, size, mask);
1242
1243 out:
1244 if (!err)
1245 err = count;
1246 return err;
1247 }
1248
1249 static ssize_t pcistub_quirk_show(struct device_driver *drv, char *buf)
1250 {
1251 int count = 0;
1252 unsigned long flags;
1253 struct xen_pcibk_config_quirk *quirk;
1254 struct xen_pcibk_dev_data *dev_data;
1255 const struct config_field *field;
1256 const struct config_field_entry *cfg_entry;
1257
1258 spin_lock_irqsave(&device_ids_lock, flags);
1259 list_for_each_entry(quirk, &xen_pcibk_quirks, quirks_list) {
1260 if (count >= PAGE_SIZE)
1261 goto out;
1262
1263 count += scnprintf(buf + count, PAGE_SIZE - count,
1264 "%02x:%02x.%01x\n\t%04x:%04x:%04x:%04x\n",
1265 quirk->pdev->bus->number,
1266 PCI_SLOT(quirk->pdev->devfn),
1267 PCI_FUNC(quirk->pdev->devfn),
1268 quirk->devid.vendor, quirk->devid.device,
1269 quirk->devid.subvendor,
1270 quirk->devid.subdevice);
1271
1272 dev_data = pci_get_drvdata(quirk->pdev);
1273
1274 list_for_each_entry(cfg_entry, &dev_data->config_fields, list) {
1275 field = cfg_entry->field;
1276 if (count >= PAGE_SIZE)
1277 goto out;
1278
1279 count += scnprintf(buf + count, PAGE_SIZE - count,
1280 "\t\t%08x:%01x:%08x\n",
1281 cfg_entry->base_offset +
1282 field->offset, field->size,
1283 field->mask);
1284 }
1285 }
1286
1287 out:
1288 spin_unlock_irqrestore(&device_ids_lock, flags);
1289
1290 return count;
1291 }
1292 static DRIVER_ATTR(quirks, S_IRUSR | S_IWUSR, pcistub_quirk_show,
1293 pcistub_quirk_add);
1294
1295 static ssize_t permissive_add(struct device_driver *drv, const char *buf,
1296 size_t count)
1297 {
1298 int domain, bus, slot, func;
1299 int err;
1300 struct pcistub_device *psdev;
1301 struct xen_pcibk_dev_data *dev_data;
1302
1303 err = str_to_slot(buf, &domain, &bus, &slot, &func);
1304 if (err)
1305 goto out;
1306
1307 psdev = pcistub_device_find(domain, bus, slot, func);
1308 if (!psdev) {
1309 err = -ENODEV;
1310 goto out;
1311 }
1312
1313 dev_data = pci_get_drvdata(psdev->dev);
1314 /* the driver data for a device should never be null at this point */
1315 if (!dev_data) {
1316 err = -ENXIO;
1317 goto release;
1318 }
1319 if (!dev_data->permissive) {
1320 dev_data->permissive = 1;
1321 /* Let user know that what they're doing could be unsafe */
1322 dev_warn(&psdev->dev->dev, "enabling permissive mode "
1323 "configuration space accesses!\n");
1324 dev_warn(&psdev->dev->dev,
1325 "permissive mode is potentially unsafe!\n");
1326 }
1327 release:
1328 pcistub_device_put(psdev);
1329 out:
1330 if (!err)
1331 err = count;
1332 return err;
1333 }
1334
1335 static ssize_t permissive_show(struct device_driver *drv, char *buf)
1336 {
1337 struct pcistub_device *psdev;
1338 struct xen_pcibk_dev_data *dev_data;
1339 size_t count = 0;
1340 unsigned long flags;
1341 spin_lock_irqsave(&pcistub_devices_lock, flags);
1342 list_for_each_entry(psdev, &pcistub_devices, dev_list) {
1343 if (count >= PAGE_SIZE)
1344 break;
1345 if (!psdev->dev)
1346 continue;
1347 dev_data = pci_get_drvdata(psdev->dev);
1348 if (!dev_data || !dev_data->permissive)
1349 continue;
1350 count +=
1351 scnprintf(buf + count, PAGE_SIZE - count, "%s\n",
1352 pci_name(psdev->dev));
1353 }
1354 spin_unlock_irqrestore(&pcistub_devices_lock, flags);
1355 return count;
1356 }
1357 static DRIVER_ATTR(permissive, S_IRUSR | S_IWUSR, permissive_show,
1358 permissive_add);
1359
1360 static void pcistub_exit(void)
1361 {
1362 driver_remove_file(&xen_pcibk_pci_driver.driver, &driver_attr_new_slot);
1363 driver_remove_file(&xen_pcibk_pci_driver.driver,
1364 &driver_attr_remove_slot);
1365 driver_remove_file(&xen_pcibk_pci_driver.driver, &driver_attr_slots);
1366 driver_remove_file(&xen_pcibk_pci_driver.driver, &driver_attr_quirks);
1367 driver_remove_file(&xen_pcibk_pci_driver.driver,
1368 &driver_attr_permissive);
1369 driver_remove_file(&xen_pcibk_pci_driver.driver,
1370 &driver_attr_irq_handlers);
1371 driver_remove_file(&xen_pcibk_pci_driver.driver,
1372 &driver_attr_irq_handler_state);
1373 pci_unregister_driver(&xen_pcibk_pci_driver);
1374 }
1375
1376 static int __init pcistub_init(void)
1377 {
1378 int pos = 0;
1379 int err = 0;
1380 int domain, bus, slot, func;
1381 int parsed;
1382
1383 if (pci_devs_to_hide && *pci_devs_to_hide) {
1384 do {
1385 parsed = 0;
1386
1387 err = sscanf(pci_devs_to_hide + pos,
1388 " (%x:%x:%x.%x) %n",
1389 &domain, &bus, &slot, &func, &parsed);
1390 switch (err) {
1391 case 3:
1392 func = -1;
1393 sscanf(pci_devs_to_hide + pos,
1394 " (%x:%x:%x.*) %n",
1395 &domain, &bus, &slot, &parsed);
1396 break;
1397 case 2:
1398 slot = func = -1;
1399 sscanf(pci_devs_to_hide + pos,
1400 " (%x:%x:*.*) %n",
1401 &domain, &bus, &parsed);
1402 break;
1403 }
1404
1405 if (!parsed) {
1406 domain = 0;
1407 err = sscanf(pci_devs_to_hide + pos,
1408 " (%x:%x.%x) %n",
1409 &bus, &slot, &func, &parsed);
1410 switch (err) {
1411 case 2:
1412 func = -1;
1413 sscanf(pci_devs_to_hide + pos,
1414 " (%x:%x.*) %n",
1415 &bus, &slot, &parsed);
1416 break;
1417 case 1:
1418 slot = func = -1;
1419 sscanf(pci_devs_to_hide + pos,
1420 " (%x:*.*) %n",
1421 &bus, &parsed);
1422 break;
1423 }
1424 }
1425
1426 if (parsed <= 0)
1427 goto parse_error;
1428
1429 err = pcistub_device_id_add(domain, bus, slot, func);
1430 if (err)
1431 goto out;
1432
1433 pos += parsed;
1434 } while (pci_devs_to_hide[pos]);
1435 }
1436
1437 /* If we're the first PCI Device Driver to register, we're the
1438 * first one to get offered PCI devices as they become
1439 * available (and thus we can be the first to grab them)
1440 */
1441 err = pci_register_driver(&xen_pcibk_pci_driver);
1442 if (err < 0)
1443 goto out;
1444
1445 err = driver_create_file(&xen_pcibk_pci_driver.driver,
1446 &driver_attr_new_slot);
1447 if (!err)
1448 err = driver_create_file(&xen_pcibk_pci_driver.driver,
1449 &driver_attr_remove_slot);
1450 if (!err)
1451 err = driver_create_file(&xen_pcibk_pci_driver.driver,
1452 &driver_attr_slots);
1453 if (!err)
1454 err = driver_create_file(&xen_pcibk_pci_driver.driver,
1455 &driver_attr_quirks);
1456 if (!err)
1457 err = driver_create_file(&xen_pcibk_pci_driver.driver,
1458 &driver_attr_permissive);
1459
1460 if (!err)
1461 err = driver_create_file(&xen_pcibk_pci_driver.driver,
1462 &driver_attr_irq_handlers);
1463 if (!err)
1464 err = driver_create_file(&xen_pcibk_pci_driver.driver,
1465 &driver_attr_irq_handler_state);
1466 if (err)
1467 pcistub_exit();
1468
1469 out:
1470 return err;
1471
1472 parse_error:
1473 printk(KERN_ERR DRV_NAME ": Error parsing pci_devs_to_hide at \"%s\"\n",
1474 pci_devs_to_hide + pos);
1475 return -EINVAL;
1476 }
1477
1478 #ifndef MODULE
1479 /*
1480 * fs_initcall happens before device_initcall
1481 * so xen_pcibk *should* get called first (b/c we
1482 * want to suck up any device before other drivers
1483 * get a chance by being the first pci device
1484 * driver to register)
1485 */
1486 fs_initcall(pcistub_init);
1487 #endif
1488
1489 static int __init xen_pcibk_init(void)
1490 {
1491 int err;
1492
1493 if (!xen_initial_domain())
1494 return -ENODEV;
1495
1496 err = xen_pcibk_config_init();
1497 if (err)
1498 return err;
1499
1500 #ifdef MODULE
1501 err = pcistub_init();
1502 if (err < 0)
1503 return err;
1504 #endif
1505
1506 pcistub_init_devices_late();
1507 err = xen_pcibk_xenbus_register();
1508 if (err)
1509 pcistub_exit();
1510
1511 return err;
1512 }
1513
1514 static void __exit xen_pcibk_cleanup(void)
1515 {
1516 xen_pcibk_xenbus_unregister();
1517 pcistub_exit();
1518 }
1519
1520 module_init(xen_pcibk_init);
1521 module_exit(xen_pcibk_cleanup);
1522
1523 MODULE_LICENSE("Dual BSD/GPL");
1524 MODULE_ALIAS("xen-backend:pci");
This page took 0.077555 seconds and 5 git commands to generate.