fb: adv7393: off by one in probe function
[deliverable/linux.git] / arch / powerpc / platforms / powernv / pci-cxl.c
CommitLineData
f456834a
IM
1/*
2 * Copyright 2014-2016 IBM Corp.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 */
9
4361b034 10#include <linux/module.h>
a2f67d5e 11#include <linux/msi.h>
4361b034 12#include <asm/pci-bridge.h>
f456834a
IM
13#include <asm/pnv-pci.h>
14#include <asm/opal.h>
4361b034 15#include <misc/cxl.h>
f456834a
IM
16
17#include "pci.h"
18
19struct device_node *pnv_pci_get_phb_node(struct pci_dev *dev)
20{
21 struct pci_controller *hose = pci_bus_to_host(dev->bus);
22
23 return of_node_get(hose->dn);
24}
25EXPORT_SYMBOL(pnv_pci_get_phb_node);
26
27int pnv_phb_to_cxl_mode(struct pci_dev *dev, uint64_t mode)
28{
29 struct pci_controller *hose = pci_bus_to_host(dev->bus);
30 struct pnv_phb *phb = hose->private_data;
31 struct pnv_ioda_pe *pe;
32 int rc;
33
34 pe = pnv_ioda_get_pe(dev);
35 if (!pe)
36 return -ENODEV;
37
38 pe_info(pe, "Switching PHB to CXL\n");
39
40 rc = opal_pci_set_phb_cxl_mode(phb->opal_id, mode, pe->pe_number);
41 if (rc == OPAL_UNSUPPORTED)
42 dev_err(&dev->dev, "Required cxl mode not supported by firmware - update skiboot\n");
43 else if (rc)
44 dev_err(&dev->dev, "opal_pci_set_phb_cxl_mode failed: %i\n", rc);
45
46 return rc;
47}
48EXPORT_SYMBOL(pnv_phb_to_cxl_mode);
49
50/* Find PHB for cxl dev and allocate MSI hwirqs?
51 * Returns the absolute hardware IRQ number
52 */
53int pnv_cxl_alloc_hwirqs(struct pci_dev *dev, int num)
54{
55 struct pci_controller *hose = pci_bus_to_host(dev->bus);
56 struct pnv_phb *phb = hose->private_data;
57 int hwirq = msi_bitmap_alloc_hwirqs(&phb->msi_bmp, num);
58
59 if (hwirq < 0) {
60 dev_warn(&dev->dev, "Failed to find a free MSI\n");
61 return -ENOSPC;
62 }
63
64 return phb->msi_base + hwirq;
65}
66EXPORT_SYMBOL(pnv_cxl_alloc_hwirqs);
67
68void pnv_cxl_release_hwirqs(struct pci_dev *dev, int hwirq, int num)
69{
70 struct pci_controller *hose = pci_bus_to_host(dev->bus);
71 struct pnv_phb *phb = hose->private_data;
72
73 msi_bitmap_free_hwirqs(&phb->msi_bmp, hwirq - phb->msi_base, num);
74}
75EXPORT_SYMBOL(pnv_cxl_release_hwirqs);
76
77void pnv_cxl_release_hwirq_ranges(struct cxl_irq_ranges *irqs,
78 struct pci_dev *dev)
79{
80 struct pci_controller *hose = pci_bus_to_host(dev->bus);
81 struct pnv_phb *phb = hose->private_data;
82 int i, hwirq;
83
84 for (i = 1; i < CXL_IRQ_RANGES; i++) {
85 if (!irqs->range[i])
86 continue;
87 pr_devel("cxl release irq range 0x%x: offset: 0x%lx limit: %ld\n",
88 i, irqs->offset[i],
89 irqs->range[i]);
90 hwirq = irqs->offset[i] - phb->msi_base;
91 msi_bitmap_free_hwirqs(&phb->msi_bmp, hwirq,
92 irqs->range[i]);
93 }
94}
95EXPORT_SYMBOL(pnv_cxl_release_hwirq_ranges);
96
97int pnv_cxl_alloc_hwirq_ranges(struct cxl_irq_ranges *irqs,
98 struct pci_dev *dev, int num)
99{
100 struct pci_controller *hose = pci_bus_to_host(dev->bus);
101 struct pnv_phb *phb = hose->private_data;
102 int i, hwirq, try;
103
104 memset(irqs, 0, sizeof(struct cxl_irq_ranges));
105
106 /* 0 is reserved for the multiplexed PSL DSI interrupt */
107 for (i = 1; i < CXL_IRQ_RANGES && num; i++) {
108 try = num;
109 while (try) {
110 hwirq = msi_bitmap_alloc_hwirqs(&phb->msi_bmp, try);
111 if (hwirq >= 0)
112 break;
113 try /= 2;
114 }
115 if (!try)
116 goto fail;
117
118 irqs->offset[i] = phb->msi_base + hwirq;
119 irqs->range[i] = try;
120 pr_devel("cxl alloc irq range 0x%x: offset: 0x%lx limit: %li\n",
121 i, irqs->offset[i], irqs->range[i]);
122 num -= try;
123 }
124 if (num)
125 goto fail;
126
127 return 0;
128fail:
129 pnv_cxl_release_hwirq_ranges(irqs, dev);
130 return -ENOSPC;
131}
132EXPORT_SYMBOL(pnv_cxl_alloc_hwirq_ranges);
133
134int pnv_cxl_get_irq_count(struct pci_dev *dev)
135{
136 struct pci_controller *hose = pci_bus_to_host(dev->bus);
137 struct pnv_phb *phb = hose->private_data;
138
139 return phb->msi_bmp.irq_count;
140}
141EXPORT_SYMBOL(pnv_cxl_get_irq_count);
142
143int pnv_cxl_ioda_msi_setup(struct pci_dev *dev, unsigned int hwirq,
144 unsigned int virq)
145{
146 struct pci_controller *hose = pci_bus_to_host(dev->bus);
147 struct pnv_phb *phb = hose->private_data;
148 unsigned int xive_num = hwirq - phb->msi_base;
149 struct pnv_ioda_pe *pe;
150 int rc;
151
152 if (!(pe = pnv_ioda_get_pe(dev)))
153 return -ENODEV;
154
155 /* Assign XIVE to PE */
156 rc = opal_pci_set_xive_pe(phb->opal_id, pe->pe_number, xive_num);
157 if (rc) {
158 pe_warn(pe, "%s: OPAL error %d setting msi_base 0x%x "
159 "hwirq 0x%x XIVE 0x%x PE\n",
160 pci_name(dev), rc, phb->msi_base, hwirq, xive_num);
161 return -EIO;
162 }
163 pnv_set_msi_irq_chip(phb, virq);
164
165 return 0;
166}
167EXPORT_SYMBOL(pnv_cxl_ioda_msi_setup);
4361b034 168
c2ca9f6b
IM
169#if IS_MODULE(CONFIG_CXL)
170static inline int get_cxl_module(void)
171{
172 struct module *cxl_module;
173
174 mutex_lock(&module_mutex);
175
176 cxl_module = find_module("cxl");
177 if (cxl_module)
178 __module_get(cxl_module);
179
180 mutex_unlock(&module_mutex);
181
182 if (!cxl_module)
183 return -ENODEV;
184
185 return 0;
186}
187#else
188static inline int get_cxl_module(void) { return 0; }
189#endif
190
4361b034
IM
191/*
192 * Sets flags and switches the controller ops to enable the cxl kernel api.
193 * Originally the cxl kernel API operated on a virtual PHB, but certain cards
194 * such as the Mellanox CX4 use a peer model instead and for these cards the
195 * cxl kernel api will operate on the real PHB.
196 */
197int pnv_cxl_enable_phb_kernel_api(struct pci_controller *hose, bool enable)
198{
199 struct pnv_phb *phb = hose->private_data;
c2ca9f6b 200 int rc;
4361b034
IM
201
202 if (!enable) {
203 /*
204 * Once cxl mode is enabled on the PHB, there is currently no
205 * known safe method to disable it again, and trying risks a
206 * checkstop. If we can find a way to safely disable cxl mode
207 * in the future we can revisit this, but for now the only sane
208 * thing to do is to refuse to disable cxl mode:
209 */
210 return -EPERM;
211 }
212
213 /*
214 * Hold a reference to the cxl module since several PHB operations now
215 * depend on it, and it would be insane to allow it to be removed so
216 * long as we are in this mode (and since we can't safely disable this
217 * mode once enabled...).
218 */
c2ca9f6b
IM
219 rc = get_cxl_module();
220 if (rc)
221 return rc;
4361b034
IM
222
223 phb->flags |= PNV_PHB_FLAG_CXL;
224 hose->controller_ops = pnv_cxl_cx4_ioda_controller_ops;
225
226 return 0;
227}
228EXPORT_SYMBOL_GPL(pnv_cxl_enable_phb_kernel_api);
229
230bool pnv_pci_on_cxl_phb(struct pci_dev *dev)
231{
232 struct pci_controller *hose = pci_bus_to_host(dev->bus);
233 struct pnv_phb *phb = hose->private_data;
234
235 return !!(phb->flags & PNV_PHB_FLAG_CXL);
236}
237EXPORT_SYMBOL_GPL(pnv_pci_on_cxl_phb);
238
239struct cxl_afu *pnv_cxl_phb_to_afu(struct pci_controller *hose)
240{
241 struct pnv_phb *phb = hose->private_data;
242
243 return (struct cxl_afu *)phb->cxl_afu;
244}
245EXPORT_SYMBOL_GPL(pnv_cxl_phb_to_afu);
246
247void pnv_cxl_phb_set_peer_afu(struct pci_dev *dev, struct cxl_afu *afu)
248{
249 struct pci_controller *hose = pci_bus_to_host(dev->bus);
250 struct pnv_phb *phb = hose->private_data;
251
252 phb->cxl_afu = afu;
253}
254EXPORT_SYMBOL_GPL(pnv_cxl_phb_set_peer_afu);
255
256/*
257 * In the peer cxl model, the XSL/PSL is physical function 0, and will be used
258 * by other functions on the device for memory access and interrupts. When the
259 * other functions are enabled we explicitly take a reference on the cxl
260 * function since they will use it, and allocate a default context associated
261 * with that function just like the vPHB model of the cxl kernel API.
262 */
263bool pnv_cxl_enable_device_hook(struct pci_dev *dev)
264{
265 struct pci_controller *hose = pci_bus_to_host(dev->bus);
266 struct pnv_phb *phb = hose->private_data;
267 struct cxl_afu *afu = phb->cxl_afu;
268
269 if (!pnv_pci_enable_device_hook(dev))
270 return false;
271
272
273 /* No special handling for the cxl function, which is always PF 0 */
274 if (PCI_FUNC(dev->devfn) == 0)
275 return true;
276
277 if (!afu) {
278 dev_WARN(&dev->dev, "Attempted to enable function > 0 on CXL PHB without a peer AFU\n");
279 return false;
280 }
281
282 dev_info(&dev->dev, "Enabling function on CXL enabled PHB with peer AFU\n");
283
284 /* Make sure the peer AFU can't go away while this device is active */
285 cxl_afu_get(afu);
286
287 return cxl_pci_associate_default_context(dev, afu);
288}
289
290void pnv_cxl_disable_device(struct pci_dev *dev)
291{
292 struct pci_controller *hose = pci_bus_to_host(dev->bus);
293 struct pnv_phb *phb = hose->private_data;
294 struct cxl_afu *afu = phb->cxl_afu;
295
296 /* No special handling for cxl function: */
297 if (PCI_FUNC(dev->devfn) == 0)
298 return;
299
300 cxl_pci_disable_device(dev);
301 cxl_afu_put(afu);
302}
a2f67d5e
IM
303
304/*
305 * This is a special version of pnv_setup_msi_irqs for cards in cxl mode. This
306 * function handles setting up the IVTE entries for the XSL to use.
307 *
308 * We are currently not filling out the MSIX table, since the only currently
309 * supported adapter (CX4) uses a custom MSIX table format in cxl mode and it
310 * is up to their driver to fill that out. In the future we may fill out the
311 * MSIX table (and change the IVTE entries to be an index to the MSIX table)
312 * for adapters implementing the Full MSI-X mode described in the CAIA.
313 */
314int pnv_cxl_cx4_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type)
315{
316 struct pci_controller *hose = pci_bus_to_host(pdev->bus);
317 struct pnv_phb *phb = hose->private_data;
318 struct msi_desc *entry;
319 struct cxl_context *ctx = NULL;
320 unsigned int virq;
321 int hwirq;
322 int afu_irq = 0;
323 int rc;
324
325 if (WARN_ON(!phb) || !phb->msi_bmp.bitmap)
326 return -ENODEV;
327
328 if (pdev->no_64bit_msi && !phb->msi32_support)
329 return -ENODEV;
330
331 rc = cxl_cx4_setup_msi_irqs(pdev, nvec, type);
332 if (rc)
333 return rc;
334
335 for_each_pci_msi_entry(entry, pdev) {
336 if (!entry->msi_attrib.is_64 && !phb->msi32_support) {
337 pr_warn("%s: Supports only 64-bit MSIs\n",
338 pci_name(pdev));
339 return -ENXIO;
340 }
341
342 hwirq = cxl_next_msi_hwirq(pdev, &ctx, &afu_irq);
343 if (WARN_ON(hwirq <= 0))
344 return (hwirq ? hwirq : -ENOMEM);
345
346 virq = irq_create_mapping(NULL, hwirq);
347 if (virq == NO_IRQ) {
348 pr_warn("%s: Failed to map cxl mode MSI to linux irq\n",
349 pci_name(pdev));
350 return -ENOMEM;
351 }
352
353 rc = pnv_cxl_ioda_msi_setup(pdev, hwirq, virq);
354 if (rc) {
355 pr_warn("%s: Failed to setup cxl mode MSI\n", pci_name(pdev));
356 irq_dispose_mapping(virq);
357 return rc;
358 }
359
360 irq_set_msi_desc(virq, entry);
361 }
362
363 return 0;
364}
365
366void pnv_cxl_cx4_teardown_msi_irqs(struct pci_dev *pdev)
367{
368 struct pci_controller *hose = pci_bus_to_host(pdev->bus);
369 struct pnv_phb *phb = hose->private_data;
370 struct msi_desc *entry;
371 irq_hw_number_t hwirq;
372
373 if (WARN_ON(!phb))
374 return;
375
376 for_each_pci_msi_entry(entry, pdev) {
377 if (entry->irq == NO_IRQ)
378 continue;
379 hwirq = virq_to_hw(entry->irq);
380 irq_set_msi_desc(entry->irq, NULL);
381 irq_dispose_mapping(entry->irq);
382 }
383
384 cxl_cx4_teardown_msi_irqs(pdev);
385}
This page took 0.044379 seconds and 5 git commands to generate.