Commit | Line | Data |
---|---|---|
30edc14b KRW |
1 | /* |
2 | * PCI Backend Operations - respond to PCI requests from Frontend | |
3 | * | |
4 | * Author: Ryan Wilson <hap9@epoch.ncsc.mil> | |
5 | */ | |
6 | #include <linux/module.h> | |
7 | #include <linux/wait.h> | |
8 | #include <linux/bitops.h> | |
9 | #include <xen/events.h> | |
10 | #include <linux/sched.h> | |
11 | #include "pciback.h" | |
12 | ||
13 | int verbose_request; | |
14 | module_param(verbose_request, int, 0644); | |
15 | ||
16 | /* Ensure a device is "turned off" and ready to be exported. | |
17 | * (Also see pciback_config_reset to ensure virtual configuration space is | |
18 | * ready to be re-exported) | |
19 | */ | |
20 | void pciback_reset_device(struct pci_dev *dev) | |
21 | { | |
22 | u16 cmd; | |
23 | ||
24 | /* Disable devices (but not bridges) */ | |
25 | if (dev->hdr_type == PCI_HEADER_TYPE_NORMAL) { | |
a2be65fd KRW |
26 | #ifdef CONFIG_PCI_MSI |
27 | /* The guest could have been abruptly killed without | |
28 | * disabling MSI/MSI-X interrupts.*/ | |
29 | if (dev->msix_enabled) | |
30 | pci_disable_msix(dev); | |
31 | if (dev->msi_enabled) | |
32 | pci_disable_msi(dev); | |
33 | #endif | |
30edc14b KRW |
34 | pci_disable_device(dev); |
35 | ||
36 | pci_write_config_word(dev, PCI_COMMAND, 0); | |
37 | ||
38 | dev->is_busmaster = 0; | |
39 | } else { | |
40 | pci_read_config_word(dev, PCI_COMMAND, &cmd); | |
41 | if (cmd & (PCI_COMMAND_INVALIDATE)) { | |
42 | cmd &= ~(PCI_COMMAND_INVALIDATE); | |
43 | pci_write_config_word(dev, PCI_COMMAND, cmd); | |
44 | ||
45 | dev->is_busmaster = 0; | |
46 | } | |
47 | } | |
48 | } | |
49 | /* | |
50 | * Now the same evtchn is used for both pcifront conf_read_write request | |
51 | * as well as pcie aer front end ack. We use a new work_queue to schedule | |
52 | * pciback conf_read_write service for avoiding confict with aer_core | |
53 | * do_recovery job which also use the system default work_queue | |
54 | */ | |
55 | void test_and_schedule_op(struct pciback_device *pdev) | |
56 | { | |
57 | /* Check that frontend is requesting an operation and that we are not | |
58 | * already processing a request */ | |
59 | if (test_bit(_XEN_PCIF_active, (unsigned long *)&pdev->sh_info->flags) | |
60 | && !test_and_set_bit(_PDEVF_op_active, &pdev->flags)) { | |
61 | queue_work(pciback_wq, &pdev->op_work); | |
62 | } | |
63 | /*_XEN_PCIB_active should have been cleared by pcifront. And also make | |
64 | sure pciback is waiting for ack by checking _PCIB_op_pending*/ | |
65 | if (!test_bit(_XEN_PCIB_active, (unsigned long *)&pdev->sh_info->flags) | |
66 | && test_bit(_PCIB_op_pending, &pdev->flags)) { | |
67 | wake_up(&aer_wait_queue); | |
68 | } | |
69 | } | |
70 | ||
71 | /* Performing the configuration space reads/writes must not be done in atomic | |
72 | * context because some of the pci_* functions can sleep (mostly due to ACPI | |
73 | * use of semaphores). This function is intended to be called from a work | |
74 | * queue in process context taking a struct pciback_device as a parameter */ | |
75 | ||
76 | void pciback_do_op(struct work_struct *data) | |
77 | { | |
78 | struct pciback_device *pdev = | |
79 | container_of(data, struct pciback_device, op_work); | |
80 | struct pci_dev *dev; | |
81 | struct xen_pci_op *op = &pdev->sh_info->op; | |
82 | ||
83 | dev = pciback_get_pci_dev(pdev, op->domain, op->bus, op->devfn); | |
84 | ||
85 | if (dev == NULL) | |
86 | op->err = XEN_PCI_ERR_dev_not_found; | |
87 | else { | |
88 | switch (op->cmd) { | |
89 | case XEN_PCI_OP_conf_read: | |
90 | op->err = pciback_config_read(dev, | |
91 | op->offset, op->size, &op->value); | |
92 | break; | |
93 | case XEN_PCI_OP_conf_write: | |
94 | op->err = pciback_config_write(dev, | |
95 | op->offset, op->size, op->value); | |
96 | break; | |
97 | #ifdef CONFIG_PCI_MSI | |
98 | case XEN_PCI_OP_enable_msi: | |
99 | op->err = pciback_enable_msi(pdev, dev, op); | |
100 | break; | |
101 | case XEN_PCI_OP_disable_msi: | |
102 | op->err = pciback_disable_msi(pdev, dev, op); | |
103 | break; | |
104 | case XEN_PCI_OP_enable_msix: | |
105 | op->err = pciback_enable_msix(pdev, dev, op); | |
106 | break; | |
107 | case XEN_PCI_OP_disable_msix: | |
108 | op->err = pciback_disable_msix(pdev, dev, op); | |
109 | break; | |
110 | #endif | |
111 | default: | |
112 | op->err = XEN_PCI_ERR_not_implemented; | |
113 | break; | |
114 | } | |
115 | } | |
116 | /* Tell the driver domain that we're done. */ | |
117 | wmb(); | |
118 | clear_bit(_XEN_PCIF_active, (unsigned long *)&pdev->sh_info->flags); | |
119 | notify_remote_via_irq(pdev->evtchn_irq); | |
120 | ||
121 | /* Mark that we're done. */ | |
122 | smp_mb__before_clear_bit(); /* /after/ clearing PCIF_active */ | |
123 | clear_bit(_PDEVF_op_active, &pdev->flags); | |
124 | smp_mb__after_clear_bit(); /* /before/ final check for work */ | |
125 | ||
126 | /* Check to see if the driver domain tried to start another request in | |
127 | * between clearing _XEN_PCIF_active and clearing _PDEVF_op_active. | |
128 | */ | |
129 | test_and_schedule_op(pdev); | |
130 | } | |
131 | ||
132 | irqreturn_t pciback_handle_event(int irq, void *dev_id) | |
133 | { | |
134 | struct pciback_device *pdev = dev_id; | |
135 | ||
136 | test_and_schedule_op(pdev); | |
137 | ||
138 | return IRQ_HANDLED; | |
139 | } |