Merge remote-tracking branch 'vfio/next'
[deliverable/linux.git] / drivers / pci / host / pcie-qcom.c
1 /*
2 * Qualcomm PCIe root complex driver
3 *
4 * Copyright (c) 2014-2015, The Linux Foundation. All rights reserved.
5 * Copyright 2015 Linaro Limited.
6 *
7 * Author: Stanimir Varbanov <svarbanov@mm-sol.com>
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 and
11 * only version 2 as published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 */
18
19 #include <linux/clk.h>
20 #include <linux/delay.h>
21 #include <linux/gpio.h>
22 #include <linux/interrupt.h>
23 #include <linux/io.h>
24 #include <linux/iopoll.h>
25 #include <linux/kernel.h>
26 #include <linux/init.h>
27 #include <linux/of_device.h>
28 #include <linux/of_gpio.h>
29 #include <linux/pci.h>
30 #include <linux/platform_device.h>
31 #include <linux/phy/phy.h>
32 #include <linux/regulator/consumer.h>
33 #include <linux/reset.h>
34 #include <linux/slab.h>
35 #include <linux/types.h>
36
37 #include "pcie-designware.h"
38
39 #define PCIE20_PARF_PHY_CTRL 0x40
40 #define PCIE20_PARF_PHY_REFCLK 0x4C
41 #define PCIE20_PARF_DBI_BASE_ADDR 0x168
42 #define PCIE20_PARF_SLV_ADDR_SPACE_SIZE 0x16c
43 #define PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT 0x178
44
45 #define PCIE20_ELBI_SYS_CTRL 0x04
46 #define PCIE20_ELBI_SYS_CTRL_LT_ENABLE BIT(0)
47
48 #define PCIE20_CAP 0x70
49
50 #define PERST_DELAY_US 1000
51
52 struct qcom_pcie_resources_v0 {
53 struct clk *iface_clk;
54 struct clk *core_clk;
55 struct clk *phy_clk;
56 struct reset_control *pci_reset;
57 struct reset_control *axi_reset;
58 struct reset_control *ahb_reset;
59 struct reset_control *por_reset;
60 struct reset_control *phy_reset;
61 struct regulator *vdda;
62 struct regulator *vdda_phy;
63 struct regulator *vdda_refclk;
64 };
65
66 struct qcom_pcie_resources_v1 {
67 struct clk *iface;
68 struct clk *aux;
69 struct clk *master_bus;
70 struct clk *slave_bus;
71 struct reset_control *core;
72 struct regulator *vdda;
73 };
74
75 union qcom_pcie_resources {
76 struct qcom_pcie_resources_v0 v0;
77 struct qcom_pcie_resources_v1 v1;
78 };
79
80 struct qcom_pcie;
81
82 struct qcom_pcie_ops {
83 int (*get_resources)(struct qcom_pcie *pcie);
84 int (*init)(struct qcom_pcie *pcie);
85 void (*deinit)(struct qcom_pcie *pcie);
86 };
87
88 struct qcom_pcie {
89 struct pcie_port pp;
90 struct device *dev;
91 union qcom_pcie_resources res;
92 void __iomem *parf;
93 void __iomem *dbi;
94 void __iomem *elbi;
95 struct phy *phy;
96 struct gpio_desc *reset;
97 struct qcom_pcie_ops *ops;
98 };
99
100 #define to_qcom_pcie(x) container_of(x, struct qcom_pcie, pp)
101
102 static void qcom_ep_reset_assert(struct qcom_pcie *pcie)
103 {
104 gpiod_set_value(pcie->reset, 1);
105 usleep_range(PERST_DELAY_US, PERST_DELAY_US + 500);
106 }
107
108 static void qcom_ep_reset_deassert(struct qcom_pcie *pcie)
109 {
110 gpiod_set_value(pcie->reset, 0);
111 usleep_range(PERST_DELAY_US, PERST_DELAY_US + 500);
112 }
113
114 static irqreturn_t qcom_pcie_msi_irq_handler(int irq, void *arg)
115 {
116 struct pcie_port *pp = arg;
117
118 return dw_handle_msi_irq(pp);
119 }
120
121 static int qcom_pcie_establish_link(struct qcom_pcie *pcie)
122 {
123 u32 val;
124
125 if (dw_pcie_link_up(&pcie->pp))
126 return 0;
127
128 /* enable link training */
129 val = readl(pcie->elbi + PCIE20_ELBI_SYS_CTRL);
130 val |= PCIE20_ELBI_SYS_CTRL_LT_ENABLE;
131 writel(val, pcie->elbi + PCIE20_ELBI_SYS_CTRL);
132
133 return dw_pcie_wait_for_link(&pcie->pp);
134 }
135
136 static int qcom_pcie_get_resources_v0(struct qcom_pcie *pcie)
137 {
138 struct qcom_pcie_resources_v0 *res = &pcie->res.v0;
139 struct device *dev = pcie->dev;
140
141 res->vdda = devm_regulator_get(dev, "vdda");
142 if (IS_ERR(res->vdda))
143 return PTR_ERR(res->vdda);
144
145 res->vdda_phy = devm_regulator_get(dev, "vdda_phy");
146 if (IS_ERR(res->vdda_phy))
147 return PTR_ERR(res->vdda_phy);
148
149 res->vdda_refclk = devm_regulator_get(dev, "vdda_refclk");
150 if (IS_ERR(res->vdda_refclk))
151 return PTR_ERR(res->vdda_refclk);
152
153 res->iface_clk = devm_clk_get(dev, "iface");
154 if (IS_ERR(res->iface_clk))
155 return PTR_ERR(res->iface_clk);
156
157 res->core_clk = devm_clk_get(dev, "core");
158 if (IS_ERR(res->core_clk))
159 return PTR_ERR(res->core_clk);
160
161 res->phy_clk = devm_clk_get(dev, "phy");
162 if (IS_ERR(res->phy_clk))
163 return PTR_ERR(res->phy_clk);
164
165 res->pci_reset = devm_reset_control_get(dev, "pci");
166 if (IS_ERR(res->pci_reset))
167 return PTR_ERR(res->pci_reset);
168
169 res->axi_reset = devm_reset_control_get(dev, "axi");
170 if (IS_ERR(res->axi_reset))
171 return PTR_ERR(res->axi_reset);
172
173 res->ahb_reset = devm_reset_control_get(dev, "ahb");
174 if (IS_ERR(res->ahb_reset))
175 return PTR_ERR(res->ahb_reset);
176
177 res->por_reset = devm_reset_control_get(dev, "por");
178 if (IS_ERR(res->por_reset))
179 return PTR_ERR(res->por_reset);
180
181 res->phy_reset = devm_reset_control_get(dev, "phy");
182 if (IS_ERR(res->phy_reset))
183 return PTR_ERR(res->phy_reset);
184
185 return 0;
186 }
187
188 static int qcom_pcie_get_resources_v1(struct qcom_pcie *pcie)
189 {
190 struct qcom_pcie_resources_v1 *res = &pcie->res.v1;
191 struct device *dev = pcie->dev;
192
193 res->vdda = devm_regulator_get(dev, "vdda");
194 if (IS_ERR(res->vdda))
195 return PTR_ERR(res->vdda);
196
197 res->iface = devm_clk_get(dev, "iface");
198 if (IS_ERR(res->iface))
199 return PTR_ERR(res->iface);
200
201 res->aux = devm_clk_get(dev, "aux");
202 if (IS_ERR(res->aux))
203 return PTR_ERR(res->aux);
204
205 res->master_bus = devm_clk_get(dev, "master_bus");
206 if (IS_ERR(res->master_bus))
207 return PTR_ERR(res->master_bus);
208
209 res->slave_bus = devm_clk_get(dev, "slave_bus");
210 if (IS_ERR(res->slave_bus))
211 return PTR_ERR(res->slave_bus);
212
213 res->core = devm_reset_control_get(dev, "core");
214 if (IS_ERR(res->core))
215 return PTR_ERR(res->core);
216
217 return 0;
218 }
219
220 static void qcom_pcie_deinit_v0(struct qcom_pcie *pcie)
221 {
222 struct qcom_pcie_resources_v0 *res = &pcie->res.v0;
223
224 reset_control_assert(res->pci_reset);
225 reset_control_assert(res->axi_reset);
226 reset_control_assert(res->ahb_reset);
227 reset_control_assert(res->por_reset);
228 reset_control_assert(res->pci_reset);
229 clk_disable_unprepare(res->iface_clk);
230 clk_disable_unprepare(res->core_clk);
231 clk_disable_unprepare(res->phy_clk);
232 regulator_disable(res->vdda);
233 regulator_disable(res->vdda_phy);
234 regulator_disable(res->vdda_refclk);
235 }
236
237 static int qcom_pcie_init_v0(struct qcom_pcie *pcie)
238 {
239 struct qcom_pcie_resources_v0 *res = &pcie->res.v0;
240 struct device *dev = pcie->dev;
241 u32 val;
242 int ret;
243
244 ret = regulator_enable(res->vdda);
245 if (ret) {
246 dev_err(dev, "cannot enable vdda regulator\n");
247 return ret;
248 }
249
250 ret = regulator_enable(res->vdda_refclk);
251 if (ret) {
252 dev_err(dev, "cannot enable vdda_refclk regulator\n");
253 goto err_refclk;
254 }
255
256 ret = regulator_enable(res->vdda_phy);
257 if (ret) {
258 dev_err(dev, "cannot enable vdda_phy regulator\n");
259 goto err_vdda_phy;
260 }
261
262 ret = reset_control_assert(res->ahb_reset);
263 if (ret) {
264 dev_err(dev, "cannot assert ahb reset\n");
265 goto err_assert_ahb;
266 }
267
268 ret = clk_prepare_enable(res->iface_clk);
269 if (ret) {
270 dev_err(dev, "cannot prepare/enable iface clock\n");
271 goto err_assert_ahb;
272 }
273
274 ret = clk_prepare_enable(res->phy_clk);
275 if (ret) {
276 dev_err(dev, "cannot prepare/enable phy clock\n");
277 goto err_clk_phy;
278 }
279
280 ret = clk_prepare_enable(res->core_clk);
281 if (ret) {
282 dev_err(dev, "cannot prepare/enable core clock\n");
283 goto err_clk_core;
284 }
285
286 ret = reset_control_deassert(res->ahb_reset);
287 if (ret) {
288 dev_err(dev, "cannot deassert ahb reset\n");
289 goto err_deassert_ahb;
290 }
291
292 /* enable PCIe clocks and resets */
293 val = readl(pcie->parf + PCIE20_PARF_PHY_CTRL);
294 val &= ~BIT(0);
295 writel(val, pcie->parf + PCIE20_PARF_PHY_CTRL);
296
297 /* enable external reference clock */
298 val = readl(pcie->parf + PCIE20_PARF_PHY_REFCLK);
299 val |= BIT(16);
300 writel(val, pcie->parf + PCIE20_PARF_PHY_REFCLK);
301
302 ret = reset_control_deassert(res->phy_reset);
303 if (ret) {
304 dev_err(dev, "cannot deassert phy reset\n");
305 return ret;
306 }
307
308 ret = reset_control_deassert(res->pci_reset);
309 if (ret) {
310 dev_err(dev, "cannot deassert pci reset\n");
311 return ret;
312 }
313
314 ret = reset_control_deassert(res->por_reset);
315 if (ret) {
316 dev_err(dev, "cannot deassert por reset\n");
317 return ret;
318 }
319
320 ret = reset_control_deassert(res->axi_reset);
321 if (ret) {
322 dev_err(dev, "cannot deassert axi reset\n");
323 return ret;
324 }
325
326 /* wait for clock acquisition */
327 usleep_range(1000, 1500);
328
329 return 0;
330
331 err_deassert_ahb:
332 clk_disable_unprepare(res->core_clk);
333 err_clk_core:
334 clk_disable_unprepare(res->phy_clk);
335 err_clk_phy:
336 clk_disable_unprepare(res->iface_clk);
337 err_assert_ahb:
338 regulator_disable(res->vdda_phy);
339 err_vdda_phy:
340 regulator_disable(res->vdda_refclk);
341 err_refclk:
342 regulator_disable(res->vdda);
343
344 return ret;
345 }
346
347 static void qcom_pcie_deinit_v1(struct qcom_pcie *pcie)
348 {
349 struct qcom_pcie_resources_v1 *res = &pcie->res.v1;
350
351 reset_control_assert(res->core);
352 clk_disable_unprepare(res->slave_bus);
353 clk_disable_unprepare(res->master_bus);
354 clk_disable_unprepare(res->iface);
355 clk_disable_unprepare(res->aux);
356 regulator_disable(res->vdda);
357 }
358
359 static int qcom_pcie_init_v1(struct qcom_pcie *pcie)
360 {
361 struct qcom_pcie_resources_v1 *res = &pcie->res.v1;
362 struct device *dev = pcie->dev;
363 int ret;
364
365 ret = reset_control_deassert(res->core);
366 if (ret) {
367 dev_err(dev, "cannot deassert core reset\n");
368 return ret;
369 }
370
371 ret = clk_prepare_enable(res->aux);
372 if (ret) {
373 dev_err(dev, "cannot prepare/enable aux clock\n");
374 goto err_res;
375 }
376
377 ret = clk_prepare_enable(res->iface);
378 if (ret) {
379 dev_err(dev, "cannot prepare/enable iface clock\n");
380 goto err_aux;
381 }
382
383 ret = clk_prepare_enable(res->master_bus);
384 if (ret) {
385 dev_err(dev, "cannot prepare/enable master_bus clock\n");
386 goto err_iface;
387 }
388
389 ret = clk_prepare_enable(res->slave_bus);
390 if (ret) {
391 dev_err(dev, "cannot prepare/enable slave_bus clock\n");
392 goto err_master;
393 }
394
395 ret = regulator_enable(res->vdda);
396 if (ret) {
397 dev_err(dev, "cannot enable vdda regulator\n");
398 goto err_slave;
399 }
400
401 /* change DBI base address */
402 writel(0, pcie->parf + PCIE20_PARF_DBI_BASE_ADDR);
403
404 if (IS_ENABLED(CONFIG_PCI_MSI)) {
405 u32 val = readl(pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT);
406
407 val |= BIT(31);
408 writel(val, pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT);
409 }
410
411 return 0;
412 err_slave:
413 clk_disable_unprepare(res->slave_bus);
414 err_master:
415 clk_disable_unprepare(res->master_bus);
416 err_iface:
417 clk_disable_unprepare(res->iface);
418 err_aux:
419 clk_disable_unprepare(res->aux);
420 err_res:
421 reset_control_assert(res->core);
422
423 return ret;
424 }
425
426 static int qcom_pcie_link_up(struct pcie_port *pp)
427 {
428 struct qcom_pcie *pcie = to_qcom_pcie(pp);
429 u16 val = readw(pcie->dbi + PCIE20_CAP + PCI_EXP_LNKSTA);
430
431 return !!(val & PCI_EXP_LNKSTA_DLLLA);
432 }
433
434 static void qcom_pcie_host_init(struct pcie_port *pp)
435 {
436 struct qcom_pcie *pcie = to_qcom_pcie(pp);
437 int ret;
438
439 qcom_ep_reset_assert(pcie);
440
441 ret = pcie->ops->init(pcie);
442 if (ret)
443 goto err_deinit;
444
445 ret = phy_power_on(pcie->phy);
446 if (ret)
447 goto err_deinit;
448
449 dw_pcie_setup_rc(pp);
450
451 if (IS_ENABLED(CONFIG_PCI_MSI))
452 dw_pcie_msi_init(pp);
453
454 qcom_ep_reset_deassert(pcie);
455
456 ret = qcom_pcie_establish_link(pcie);
457 if (ret)
458 goto err;
459
460 return;
461 err:
462 qcom_ep_reset_assert(pcie);
463 phy_power_off(pcie->phy);
464 err_deinit:
465 pcie->ops->deinit(pcie);
466 }
467
468 static int qcom_pcie_rd_own_conf(struct pcie_port *pp, int where, int size,
469 u32 *val)
470 {
471 /* the device class is not reported correctly from the register */
472 if (where == PCI_CLASS_REVISION && size == 4) {
473 *val = readl(pp->dbi_base + PCI_CLASS_REVISION);
474 *val &= 0xff; /* keep revision id */
475 *val |= PCI_CLASS_BRIDGE_PCI << 16;
476 return PCIBIOS_SUCCESSFUL;
477 }
478
479 return dw_pcie_cfg_read(pp->dbi_base + where, size, val);
480 }
481
482 static struct pcie_host_ops qcom_pcie_dw_ops = {
483 .link_up = qcom_pcie_link_up,
484 .host_init = qcom_pcie_host_init,
485 .rd_own_conf = qcom_pcie_rd_own_conf,
486 };
487
488 static const struct qcom_pcie_ops ops_v0 = {
489 .get_resources = qcom_pcie_get_resources_v0,
490 .init = qcom_pcie_init_v0,
491 .deinit = qcom_pcie_deinit_v0,
492 };
493
494 static const struct qcom_pcie_ops ops_v1 = {
495 .get_resources = qcom_pcie_get_resources_v1,
496 .init = qcom_pcie_init_v1,
497 .deinit = qcom_pcie_deinit_v1,
498 };
499
500 static int qcom_pcie_probe(struct platform_device *pdev)
501 {
502 struct device *dev = &pdev->dev;
503 struct resource *res;
504 struct qcom_pcie *pcie;
505 struct pcie_port *pp;
506 int ret;
507
508 pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL);
509 if (!pcie)
510 return -ENOMEM;
511
512 pcie->ops = (struct qcom_pcie_ops *)of_device_get_match_data(dev);
513 pcie->dev = dev;
514
515 pcie->reset = devm_gpiod_get_optional(dev, "perst", GPIOD_OUT_LOW);
516 if (IS_ERR(pcie->reset))
517 return PTR_ERR(pcie->reset);
518
519 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "parf");
520 pcie->parf = devm_ioremap_resource(dev, res);
521 if (IS_ERR(pcie->parf))
522 return PTR_ERR(pcie->parf);
523
524 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi");
525 pcie->dbi = devm_ioremap_resource(dev, res);
526 if (IS_ERR(pcie->dbi))
527 return PTR_ERR(pcie->dbi);
528
529 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "elbi");
530 pcie->elbi = devm_ioremap_resource(dev, res);
531 if (IS_ERR(pcie->elbi))
532 return PTR_ERR(pcie->elbi);
533
534 pcie->phy = devm_phy_optional_get(dev, "pciephy");
535 if (IS_ERR(pcie->phy))
536 return PTR_ERR(pcie->phy);
537
538 ret = pcie->ops->get_resources(pcie);
539 if (ret)
540 return ret;
541
542 pp = &pcie->pp;
543 pp->dev = dev;
544 pp->dbi_base = pcie->dbi;
545 pp->root_bus_nr = -1;
546 pp->ops = &qcom_pcie_dw_ops;
547
548 if (IS_ENABLED(CONFIG_PCI_MSI)) {
549 pp->msi_irq = platform_get_irq_byname(pdev, "msi");
550 if (pp->msi_irq < 0)
551 return pp->msi_irq;
552
553 ret = devm_request_irq(dev, pp->msi_irq,
554 qcom_pcie_msi_irq_handler,
555 IRQF_SHARED, "qcom-pcie-msi", pp);
556 if (ret) {
557 dev_err(dev, "cannot request msi irq\n");
558 return ret;
559 }
560 }
561
562 ret = phy_init(pcie->phy);
563 if (ret)
564 return ret;
565
566 ret = dw_pcie_host_init(pp);
567 if (ret) {
568 dev_err(dev, "cannot initialize host\n");
569 return ret;
570 }
571
572 platform_set_drvdata(pdev, pcie);
573
574 return 0;
575 }
576
577 static const struct of_device_id qcom_pcie_match[] = {
578 { .compatible = "qcom,pcie-ipq8064", .data = &ops_v0 },
579 { .compatible = "qcom,pcie-apq8064", .data = &ops_v0 },
580 { .compatible = "qcom,pcie-apq8084", .data = &ops_v1 },
581 { }
582 };
583
584 static struct platform_driver qcom_pcie_driver = {
585 .probe = qcom_pcie_probe,
586 .driver = {
587 .name = "qcom-pcie",
588 .suppress_bind_attrs = true,
589 .of_match_table = qcom_pcie_match,
590 },
591 };
592 builtin_platform_driver(qcom_pcie_driver);
This page took 0.045709 seconds and 5 git commands to generate.