Merge remote-tracking branch 'regmap/for-next'
[deliverable/linux.git] / drivers / pci / host / pci-imx6.c
1 /*
2 * PCIe host controller driver for Freescale i.MX6 SoCs
3 *
4 * Copyright (C) 2013 Kosagi
5 * http://www.kosagi.com
6 *
7 * Author: Sean Cross <xobs@kosagi.com>
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 */
13
14 #include <linux/clk.h>
15 #include <linux/delay.h>
16 #include <linux/gpio.h>
17 #include <linux/kernel.h>
18 #include <linux/mfd/syscon.h>
19 #include <linux/mfd/syscon/imx6q-iomuxc-gpr.h>
20 #include <linux/module.h>
21 #include <linux/of_gpio.h>
22 #include <linux/of_device.h>
23 #include <linux/pci.h>
24 #include <linux/platform_device.h>
25 #include <linux/regmap.h>
26 #include <linux/resource.h>
27 #include <linux/signal.h>
28 #include <linux/types.h>
29 #include <linux/interrupt.h>
30
31 #include "pcie-designware.h"
32
33 #define to_imx6_pcie(x) container_of(x, struct imx6_pcie, pp)
34
35 enum imx6_pcie_variants {
36 IMX6Q,
37 IMX6SX,
38 IMX6QP,
39 };
40
41 struct imx6_pcie {
42 int reset_gpio;
43 bool gpio_active_high;
44 struct clk *pcie_bus;
45 struct clk *pcie_phy;
46 struct clk *pcie_inbound_axi;
47 struct clk *pcie;
48 struct pcie_port pp;
49 struct regmap *iomuxc_gpr;
50 enum imx6_pcie_variants variant;
51 void __iomem *mem_base;
52 u32 tx_deemph_gen1;
53 u32 tx_deemph_gen2_3p5db;
54 u32 tx_deemph_gen2_6db;
55 u32 tx_swing_full;
56 u32 tx_swing_low;
57 int link_gen;
58 };
59
60 /* PCIe Root Complex registers (memory-mapped) */
61 #define PCIE_RC_LCR 0x7c
62 #define PCIE_RC_LCR_MAX_LINK_SPEEDS_GEN1 0x1
63 #define PCIE_RC_LCR_MAX_LINK_SPEEDS_GEN2 0x2
64 #define PCIE_RC_LCR_MAX_LINK_SPEEDS_MASK 0xf
65
66 #define PCIE_RC_LCSR 0x80
67
68 /* PCIe Port Logic registers (memory-mapped) */
69 #define PL_OFFSET 0x700
70 #define PCIE_PL_PFLR (PL_OFFSET + 0x08)
71 #define PCIE_PL_PFLR_LINK_STATE_MASK (0x3f << 16)
72 #define PCIE_PL_PFLR_FORCE_LINK (1 << 15)
73 #define PCIE_PHY_DEBUG_R0 (PL_OFFSET + 0x28)
74 #define PCIE_PHY_DEBUG_R1 (PL_OFFSET + 0x2c)
75 #define PCIE_PHY_DEBUG_R1_XMLH_LINK_IN_TRAINING (1 << 29)
76 #define PCIE_PHY_DEBUG_R1_XMLH_LINK_UP (1 << 4)
77
78 #define PCIE_PHY_CTRL (PL_OFFSET + 0x114)
79 #define PCIE_PHY_CTRL_DATA_LOC 0
80 #define PCIE_PHY_CTRL_CAP_ADR_LOC 16
81 #define PCIE_PHY_CTRL_CAP_DAT_LOC 17
82 #define PCIE_PHY_CTRL_WR_LOC 18
83 #define PCIE_PHY_CTRL_RD_LOC 19
84
85 #define PCIE_PHY_STAT (PL_OFFSET + 0x110)
86 #define PCIE_PHY_STAT_ACK_LOC 16
87
88 #define PCIE_LINK_WIDTH_SPEED_CONTROL 0x80C
89 #define PORT_LOGIC_SPEED_CHANGE (0x1 << 17)
90
91 /* PHY registers (not memory-mapped) */
92 #define PCIE_PHY_RX_ASIC_OUT 0x100D
93 #define PCIE_PHY_RX_ASIC_OUT_VALID (1 << 0)
94
95 #define PHY_RX_OVRD_IN_LO 0x1005
96 #define PHY_RX_OVRD_IN_LO_RX_DATA_EN (1 << 5)
97 #define PHY_RX_OVRD_IN_LO_RX_PLL_EN (1 << 3)
98
99 static int pcie_phy_poll_ack(void __iomem *dbi_base, int exp_val)
100 {
101 u32 val;
102 u32 max_iterations = 10;
103 u32 wait_counter = 0;
104
105 do {
106 val = readl(dbi_base + PCIE_PHY_STAT);
107 val = (val >> PCIE_PHY_STAT_ACK_LOC) & 0x1;
108 wait_counter++;
109
110 if (val == exp_val)
111 return 0;
112
113 udelay(1);
114 } while (wait_counter < max_iterations);
115
116 return -ETIMEDOUT;
117 }
118
119 static int pcie_phy_wait_ack(void __iomem *dbi_base, int addr)
120 {
121 u32 val;
122 int ret;
123
124 val = addr << PCIE_PHY_CTRL_DATA_LOC;
125 writel(val, dbi_base + PCIE_PHY_CTRL);
126
127 val |= (0x1 << PCIE_PHY_CTRL_CAP_ADR_LOC);
128 writel(val, dbi_base + PCIE_PHY_CTRL);
129
130 ret = pcie_phy_poll_ack(dbi_base, 1);
131 if (ret)
132 return ret;
133
134 val = addr << PCIE_PHY_CTRL_DATA_LOC;
135 writel(val, dbi_base + PCIE_PHY_CTRL);
136
137 return pcie_phy_poll_ack(dbi_base, 0);
138 }
139
140 /* Read from the 16-bit PCIe PHY control registers (not memory-mapped) */
141 static int pcie_phy_read(void __iomem *dbi_base, int addr, int *data)
142 {
143 u32 val, phy_ctl;
144 int ret;
145
146 ret = pcie_phy_wait_ack(dbi_base, addr);
147 if (ret)
148 return ret;
149
150 /* assert Read signal */
151 phy_ctl = 0x1 << PCIE_PHY_CTRL_RD_LOC;
152 writel(phy_ctl, dbi_base + PCIE_PHY_CTRL);
153
154 ret = pcie_phy_poll_ack(dbi_base, 1);
155 if (ret)
156 return ret;
157
158 val = readl(dbi_base + PCIE_PHY_STAT);
159 *data = val & 0xffff;
160
161 /* deassert Read signal */
162 writel(0x00, dbi_base + PCIE_PHY_CTRL);
163
164 return pcie_phy_poll_ack(dbi_base, 0);
165 }
166
167 static int pcie_phy_write(void __iomem *dbi_base, int addr, int data)
168 {
169 u32 var;
170 int ret;
171
172 /* write addr */
173 /* cap addr */
174 ret = pcie_phy_wait_ack(dbi_base, addr);
175 if (ret)
176 return ret;
177
178 var = data << PCIE_PHY_CTRL_DATA_LOC;
179 writel(var, dbi_base + PCIE_PHY_CTRL);
180
181 /* capture data */
182 var |= (0x1 << PCIE_PHY_CTRL_CAP_DAT_LOC);
183 writel(var, dbi_base + PCIE_PHY_CTRL);
184
185 ret = pcie_phy_poll_ack(dbi_base, 1);
186 if (ret)
187 return ret;
188
189 /* deassert cap data */
190 var = data << PCIE_PHY_CTRL_DATA_LOC;
191 writel(var, dbi_base + PCIE_PHY_CTRL);
192
193 /* wait for ack de-assertion */
194 ret = pcie_phy_poll_ack(dbi_base, 0);
195 if (ret)
196 return ret;
197
198 /* assert wr signal */
199 var = 0x1 << PCIE_PHY_CTRL_WR_LOC;
200 writel(var, dbi_base + PCIE_PHY_CTRL);
201
202 /* wait for ack */
203 ret = pcie_phy_poll_ack(dbi_base, 1);
204 if (ret)
205 return ret;
206
207 /* deassert wr signal */
208 var = data << PCIE_PHY_CTRL_DATA_LOC;
209 writel(var, dbi_base + PCIE_PHY_CTRL);
210
211 /* wait for ack de-assertion */
212 ret = pcie_phy_poll_ack(dbi_base, 0);
213 if (ret)
214 return ret;
215
216 writel(0x0, dbi_base + PCIE_PHY_CTRL);
217
218 return 0;
219 }
220
221 static void imx6_pcie_reset_phy(struct pcie_port *pp)
222 {
223 u32 tmp;
224
225 pcie_phy_read(pp->dbi_base, PHY_RX_OVRD_IN_LO, &tmp);
226 tmp |= (PHY_RX_OVRD_IN_LO_RX_DATA_EN |
227 PHY_RX_OVRD_IN_LO_RX_PLL_EN);
228 pcie_phy_write(pp->dbi_base, PHY_RX_OVRD_IN_LO, tmp);
229
230 usleep_range(2000, 3000);
231
232 pcie_phy_read(pp->dbi_base, PHY_RX_OVRD_IN_LO, &tmp);
233 tmp &= ~(PHY_RX_OVRD_IN_LO_RX_DATA_EN |
234 PHY_RX_OVRD_IN_LO_RX_PLL_EN);
235 pcie_phy_write(pp->dbi_base, PHY_RX_OVRD_IN_LO, tmp);
236 }
237
238 /* Added for PCI abort handling */
239 static int imx6q_pcie_abort_handler(unsigned long addr,
240 unsigned int fsr, struct pt_regs *regs)
241 {
242 return 0;
243 }
244
245 static int imx6_pcie_assert_core_reset(struct pcie_port *pp)
246 {
247 struct imx6_pcie *imx6_pcie = to_imx6_pcie(pp);
248 u32 val, gpr1, gpr12;
249
250 switch (imx6_pcie->variant) {
251 case IMX6SX:
252 regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
253 IMX6SX_GPR12_PCIE_TEST_POWERDOWN,
254 IMX6SX_GPR12_PCIE_TEST_POWERDOWN);
255 /* Force PCIe PHY reset */
256 regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR5,
257 IMX6SX_GPR5_PCIE_BTNRST_RESET,
258 IMX6SX_GPR5_PCIE_BTNRST_RESET);
259 break;
260 case IMX6QP:
261 regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
262 IMX6Q_GPR1_PCIE_SW_RST,
263 IMX6Q_GPR1_PCIE_SW_RST);
264 break;
265 case IMX6Q:
266 /*
267 * If the bootloader already enabled the link we need some
268 * special handling to get the core back into a state where
269 * it is safe to touch it for configuration. As there is
270 * no dedicated reset signal wired up for MX6QDL, we need
271 * to manually force LTSSM into "detect" state before
272 * completely disabling LTSSM, which is a prerequisite for
273 * core configuration.
274 *
275 * If both LTSSM_ENABLE and REF_SSP_ENABLE are active we
276 * have a strong indication that the bootloader activated
277 * the link.
278 */
279 regmap_read(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1, &gpr1);
280 regmap_read(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12, &gpr12);
281
282 if ((gpr1 & IMX6Q_GPR1_PCIE_REF_CLK_EN) &&
283 (gpr12 & IMX6Q_GPR12_PCIE_CTL_2)) {
284 val = readl(pp->dbi_base + PCIE_PL_PFLR);
285 val &= ~PCIE_PL_PFLR_LINK_STATE_MASK;
286 val |= PCIE_PL_PFLR_FORCE_LINK;
287 writel(val, pp->dbi_base + PCIE_PL_PFLR);
288
289 regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
290 IMX6Q_GPR12_PCIE_CTL_2, 0 << 10);
291 }
292
293 regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
294 IMX6Q_GPR1_PCIE_TEST_PD, 1 << 18);
295 regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
296 IMX6Q_GPR1_PCIE_REF_CLK_EN, 0 << 16);
297 break;
298 }
299
300 return 0;
301 }
302
303 static int imx6_pcie_enable_ref_clk(struct imx6_pcie *imx6_pcie)
304 {
305 struct pcie_port *pp = &imx6_pcie->pp;
306 int ret = 0;
307
308 switch (imx6_pcie->variant) {
309 case IMX6SX:
310 ret = clk_prepare_enable(imx6_pcie->pcie_inbound_axi);
311 if (ret) {
312 dev_err(pp->dev, "unable to enable pcie_axi clock\n");
313 break;
314 }
315
316 regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
317 IMX6SX_GPR12_PCIE_TEST_POWERDOWN, 0);
318 break;
319 case IMX6QP: /* FALLTHROUGH */
320 case IMX6Q:
321 /* power up core phy and enable ref clock */
322 regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
323 IMX6Q_GPR1_PCIE_TEST_PD, 0 << 18);
324 /*
325 * the async reset input need ref clock to sync internally,
326 * when the ref clock comes after reset, internal synced
327 * reset time is too short, cannot meet the requirement.
328 * add one ~10us delay here.
329 */
330 udelay(10);
331 regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
332 IMX6Q_GPR1_PCIE_REF_CLK_EN, 1 << 16);
333 break;
334 }
335
336 return ret;
337 }
338
339 static int imx6_pcie_deassert_core_reset(struct pcie_port *pp)
340 {
341 struct imx6_pcie *imx6_pcie = to_imx6_pcie(pp);
342 int ret;
343
344 ret = clk_prepare_enable(imx6_pcie->pcie_phy);
345 if (ret) {
346 dev_err(pp->dev, "unable to enable pcie_phy clock\n");
347 goto err_pcie_phy;
348 }
349
350 ret = clk_prepare_enable(imx6_pcie->pcie_bus);
351 if (ret) {
352 dev_err(pp->dev, "unable to enable pcie_bus clock\n");
353 goto err_pcie_bus;
354 }
355
356 ret = clk_prepare_enable(imx6_pcie->pcie);
357 if (ret) {
358 dev_err(pp->dev, "unable to enable pcie clock\n");
359 goto err_pcie;
360 }
361
362 ret = imx6_pcie_enable_ref_clk(imx6_pcie);
363 if (ret) {
364 dev_err(pp->dev, "unable to enable pcie ref clock\n");
365 goto err_ref_clk;
366 }
367
368 /* allow the clocks to stabilize */
369 usleep_range(200, 500);
370
371 /* Some boards don't have PCIe reset GPIO. */
372 if (gpio_is_valid(imx6_pcie->reset_gpio)) {
373 gpio_set_value_cansleep(imx6_pcie->reset_gpio,
374 imx6_pcie->gpio_active_high);
375 msleep(100);
376 gpio_set_value_cansleep(imx6_pcie->reset_gpio,
377 !imx6_pcie->gpio_active_high);
378 }
379
380 switch (imx6_pcie->variant) {
381 case IMX6SX:
382 regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR5,
383 IMX6SX_GPR5_PCIE_BTNRST_RESET, 0);
384 break;
385 case IMX6QP:
386 regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
387 IMX6Q_GPR1_PCIE_SW_RST, 0);
388
389 usleep_range(200, 500);
390 break;
391 case IMX6Q: /* Nothing to do */
392 break;
393 }
394
395 return 0;
396
397 err_ref_clk:
398 clk_disable_unprepare(imx6_pcie->pcie);
399 err_pcie:
400 clk_disable_unprepare(imx6_pcie->pcie_bus);
401 err_pcie_bus:
402 clk_disable_unprepare(imx6_pcie->pcie_phy);
403 err_pcie_phy:
404 return ret;
405 }
406
407 static void imx6_pcie_init_phy(struct pcie_port *pp)
408 {
409 struct imx6_pcie *imx6_pcie = to_imx6_pcie(pp);
410
411 if (imx6_pcie->variant == IMX6SX)
412 regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
413 IMX6SX_GPR12_PCIE_RX_EQ_MASK,
414 IMX6SX_GPR12_PCIE_RX_EQ_2);
415
416 regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
417 IMX6Q_GPR12_PCIE_CTL_2, 0 << 10);
418
419 /* configure constant input signal to the pcie ctrl and phy */
420 regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
421 IMX6Q_GPR12_DEVICE_TYPE, PCI_EXP_TYPE_ROOT_PORT << 12);
422 regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
423 IMX6Q_GPR12_LOS_LEVEL, 9 << 4);
424
425 regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8,
426 IMX6Q_GPR8_TX_DEEMPH_GEN1,
427 imx6_pcie->tx_deemph_gen1 << 0);
428 regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8,
429 IMX6Q_GPR8_TX_DEEMPH_GEN2_3P5DB,
430 imx6_pcie->tx_deemph_gen2_3p5db << 6);
431 regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8,
432 IMX6Q_GPR8_TX_DEEMPH_GEN2_6DB,
433 imx6_pcie->tx_deemph_gen2_6db << 12);
434 regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8,
435 IMX6Q_GPR8_TX_SWING_FULL,
436 imx6_pcie->tx_swing_full << 18);
437 regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8,
438 IMX6Q_GPR8_TX_SWING_LOW,
439 imx6_pcie->tx_swing_low << 25);
440 }
441
442 static int imx6_pcie_wait_for_link(struct pcie_port *pp)
443 {
444 /* check if the link is up or not */
445 if (!dw_pcie_wait_for_link(pp))
446 return 0;
447
448 dev_dbg(pp->dev, "DEBUG_R0: 0x%08x, DEBUG_R1: 0x%08x\n",
449 readl(pp->dbi_base + PCIE_PHY_DEBUG_R0),
450 readl(pp->dbi_base + PCIE_PHY_DEBUG_R1));
451 return -ETIMEDOUT;
452 }
453
454 static int imx6_pcie_wait_for_speed_change(struct pcie_port *pp)
455 {
456 u32 tmp;
457 unsigned int retries;
458
459 for (retries = 0; retries < 200; retries++) {
460 tmp = readl(pp->dbi_base + PCIE_LINK_WIDTH_SPEED_CONTROL);
461 /* Test if the speed change finished. */
462 if (!(tmp & PORT_LOGIC_SPEED_CHANGE))
463 return 0;
464 usleep_range(100, 1000);
465 }
466
467 dev_err(pp->dev, "Speed change timeout\n");
468 return -EINVAL;
469 }
470
471 static irqreturn_t imx6_pcie_msi_handler(int irq, void *arg)
472 {
473 struct pcie_port *pp = arg;
474
475 return dw_handle_msi_irq(pp);
476 }
477
478 static int imx6_pcie_establish_link(struct pcie_port *pp)
479 {
480 struct imx6_pcie *imx6_pcie = to_imx6_pcie(pp);
481 u32 tmp;
482 int ret;
483
484 /*
485 * Force Gen1 operation when starting the link. In case the link is
486 * started in Gen2 mode, there is a possibility the devices on the
487 * bus will not be detected at all. This happens with PCIe switches.
488 */
489 tmp = readl(pp->dbi_base + PCIE_RC_LCR);
490 tmp &= ~PCIE_RC_LCR_MAX_LINK_SPEEDS_MASK;
491 tmp |= PCIE_RC_LCR_MAX_LINK_SPEEDS_GEN1;
492 writel(tmp, pp->dbi_base + PCIE_RC_LCR);
493
494 /* Start LTSSM. */
495 regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
496 IMX6Q_GPR12_PCIE_CTL_2, 1 << 10);
497
498 ret = imx6_pcie_wait_for_link(pp);
499 if (ret) {
500 dev_info(pp->dev, "Link never came up\n");
501 goto err_reset_phy;
502 }
503
504 if (imx6_pcie->link_gen == 2) {
505 /* Allow Gen2 mode after the link is up. */
506 tmp = readl(pp->dbi_base + PCIE_RC_LCR);
507 tmp &= ~PCIE_RC_LCR_MAX_LINK_SPEEDS_MASK;
508 tmp |= PCIE_RC_LCR_MAX_LINK_SPEEDS_GEN2;
509 writel(tmp, pp->dbi_base + PCIE_RC_LCR);
510 } else {
511 dev_info(pp->dev, "Link: Gen2 disabled\n");
512 }
513
514 /*
515 * Start Directed Speed Change so the best possible speed both link
516 * partners support can be negotiated.
517 */
518 tmp = readl(pp->dbi_base + PCIE_LINK_WIDTH_SPEED_CONTROL);
519 tmp |= PORT_LOGIC_SPEED_CHANGE;
520 writel(tmp, pp->dbi_base + PCIE_LINK_WIDTH_SPEED_CONTROL);
521
522 ret = imx6_pcie_wait_for_speed_change(pp);
523 if (ret) {
524 dev_err(pp->dev, "Failed to bring link up!\n");
525 goto err_reset_phy;
526 }
527
528 /* Make sure link training is finished as well! */
529 ret = imx6_pcie_wait_for_link(pp);
530 if (ret) {
531 dev_err(pp->dev, "Failed to bring link up!\n");
532 goto err_reset_phy;
533 }
534
535 tmp = readl(pp->dbi_base + PCIE_RC_LCSR);
536 dev_info(pp->dev, "Link up, Gen%i\n", (tmp >> 16) & 0xf);
537 return 0;
538
539 err_reset_phy:
540 dev_dbg(pp->dev, "PHY DEBUG_R0=0x%08x DEBUG_R1=0x%08x\n",
541 readl(pp->dbi_base + PCIE_PHY_DEBUG_R0),
542 readl(pp->dbi_base + PCIE_PHY_DEBUG_R1));
543 imx6_pcie_reset_phy(pp);
544
545 return ret;
546 }
547
548 static void imx6_pcie_host_init(struct pcie_port *pp)
549 {
550 imx6_pcie_assert_core_reset(pp);
551
552 imx6_pcie_init_phy(pp);
553
554 imx6_pcie_deassert_core_reset(pp);
555
556 dw_pcie_setup_rc(pp);
557
558 imx6_pcie_establish_link(pp);
559
560 if (IS_ENABLED(CONFIG_PCI_MSI))
561 dw_pcie_msi_init(pp);
562 }
563
564 static int imx6_pcie_link_up(struct pcie_port *pp)
565 {
566 return readl(pp->dbi_base + PCIE_PHY_DEBUG_R1) &
567 PCIE_PHY_DEBUG_R1_XMLH_LINK_UP;
568 }
569
570 static struct pcie_host_ops imx6_pcie_host_ops = {
571 .link_up = imx6_pcie_link_up,
572 .host_init = imx6_pcie_host_init,
573 };
574
575 static int __init imx6_add_pcie_port(struct pcie_port *pp,
576 struct platform_device *pdev)
577 {
578 int ret;
579
580 if (IS_ENABLED(CONFIG_PCI_MSI)) {
581 pp->msi_irq = platform_get_irq_byname(pdev, "msi");
582 if (pp->msi_irq <= 0) {
583 dev_err(&pdev->dev, "failed to get MSI irq\n");
584 return -ENODEV;
585 }
586
587 ret = devm_request_irq(&pdev->dev, pp->msi_irq,
588 imx6_pcie_msi_handler,
589 IRQF_SHARED | IRQF_NO_THREAD,
590 "mx6-pcie-msi", pp);
591 if (ret) {
592 dev_err(&pdev->dev, "failed to request MSI irq\n");
593 return ret;
594 }
595 }
596
597 pp->root_bus_nr = -1;
598 pp->ops = &imx6_pcie_host_ops;
599
600 ret = dw_pcie_host_init(pp);
601 if (ret) {
602 dev_err(&pdev->dev, "failed to initialize host\n");
603 return ret;
604 }
605
606 return 0;
607 }
608
609 static int __init imx6_pcie_probe(struct platform_device *pdev)
610 {
611 struct imx6_pcie *imx6_pcie;
612 struct pcie_port *pp;
613 struct device_node *np = pdev->dev.of_node;
614 struct resource *dbi_base;
615 struct device_node *node = pdev->dev.of_node;
616 int ret;
617
618 imx6_pcie = devm_kzalloc(&pdev->dev, sizeof(*imx6_pcie), GFP_KERNEL);
619 if (!imx6_pcie)
620 return -ENOMEM;
621
622 pp = &imx6_pcie->pp;
623 pp->dev = &pdev->dev;
624
625 imx6_pcie->variant =
626 (enum imx6_pcie_variants)of_device_get_match_data(&pdev->dev);
627
628 /* Added for PCI abort handling */
629 hook_fault_code(16 + 6, imx6q_pcie_abort_handler, SIGBUS, 0,
630 "imprecise external abort");
631
632 dbi_base = platform_get_resource(pdev, IORESOURCE_MEM, 0);
633 pp->dbi_base = devm_ioremap_resource(&pdev->dev, dbi_base);
634 if (IS_ERR(pp->dbi_base))
635 return PTR_ERR(pp->dbi_base);
636
637 /* Fetch GPIOs */
638 imx6_pcie->reset_gpio = of_get_named_gpio(np, "reset-gpio", 0);
639 imx6_pcie->gpio_active_high = of_property_read_bool(np,
640 "reset-gpio-active-high");
641 if (gpio_is_valid(imx6_pcie->reset_gpio)) {
642 ret = devm_gpio_request_one(&pdev->dev, imx6_pcie->reset_gpio,
643 imx6_pcie->gpio_active_high ?
644 GPIOF_OUT_INIT_HIGH :
645 GPIOF_OUT_INIT_LOW,
646 "PCIe reset");
647 if (ret) {
648 dev_err(&pdev->dev, "unable to get reset gpio\n");
649 return ret;
650 }
651 }
652
653 /* Fetch clocks */
654 imx6_pcie->pcie_phy = devm_clk_get(&pdev->dev, "pcie_phy");
655 if (IS_ERR(imx6_pcie->pcie_phy)) {
656 dev_err(&pdev->dev,
657 "pcie_phy clock source missing or invalid\n");
658 return PTR_ERR(imx6_pcie->pcie_phy);
659 }
660
661 imx6_pcie->pcie_bus = devm_clk_get(&pdev->dev, "pcie_bus");
662 if (IS_ERR(imx6_pcie->pcie_bus)) {
663 dev_err(&pdev->dev,
664 "pcie_bus clock source missing or invalid\n");
665 return PTR_ERR(imx6_pcie->pcie_bus);
666 }
667
668 imx6_pcie->pcie = devm_clk_get(&pdev->dev, "pcie");
669 if (IS_ERR(imx6_pcie->pcie)) {
670 dev_err(&pdev->dev,
671 "pcie clock source missing or invalid\n");
672 return PTR_ERR(imx6_pcie->pcie);
673 }
674
675 if (imx6_pcie->variant == IMX6SX) {
676 imx6_pcie->pcie_inbound_axi = devm_clk_get(&pdev->dev,
677 "pcie_inbound_axi");
678 if (IS_ERR(imx6_pcie->pcie_inbound_axi)) {
679 dev_err(&pdev->dev,
680 "pcie_incbound_axi clock missing or invalid\n");
681 return PTR_ERR(imx6_pcie->pcie_inbound_axi);
682 }
683 }
684
685 /* Grab GPR config register range */
686 imx6_pcie->iomuxc_gpr =
687 syscon_regmap_lookup_by_compatible("fsl,imx6q-iomuxc-gpr");
688 if (IS_ERR(imx6_pcie->iomuxc_gpr)) {
689 dev_err(&pdev->dev, "unable to find iomuxc registers\n");
690 return PTR_ERR(imx6_pcie->iomuxc_gpr);
691 }
692
693 /* Grab PCIe PHY Tx Settings */
694 if (of_property_read_u32(node, "fsl,tx-deemph-gen1",
695 &imx6_pcie->tx_deemph_gen1))
696 imx6_pcie->tx_deemph_gen1 = 0;
697
698 if (of_property_read_u32(node, "fsl,tx-deemph-gen2-3p5db",
699 &imx6_pcie->tx_deemph_gen2_3p5db))
700 imx6_pcie->tx_deemph_gen2_3p5db = 0;
701
702 if (of_property_read_u32(node, "fsl,tx-deemph-gen2-6db",
703 &imx6_pcie->tx_deemph_gen2_6db))
704 imx6_pcie->tx_deemph_gen2_6db = 20;
705
706 if (of_property_read_u32(node, "fsl,tx-swing-full",
707 &imx6_pcie->tx_swing_full))
708 imx6_pcie->tx_swing_full = 127;
709
710 if (of_property_read_u32(node, "fsl,tx-swing-low",
711 &imx6_pcie->tx_swing_low))
712 imx6_pcie->tx_swing_low = 127;
713
714 /* Limit link speed */
715 ret = of_property_read_u32(pp->dev->of_node, "fsl,max-link-speed",
716 &imx6_pcie->link_gen);
717 if (ret)
718 imx6_pcie->link_gen = 1;
719
720 ret = imx6_add_pcie_port(pp, pdev);
721 if (ret < 0)
722 return ret;
723
724 platform_set_drvdata(pdev, imx6_pcie);
725 return 0;
726 }
727
728 static void imx6_pcie_shutdown(struct platform_device *pdev)
729 {
730 struct imx6_pcie *imx6_pcie = platform_get_drvdata(pdev);
731
732 /* bring down link, so bootloader gets clean state in case of reboot */
733 imx6_pcie_assert_core_reset(&imx6_pcie->pp);
734 }
735
736 static const struct of_device_id imx6_pcie_of_match[] = {
737 { .compatible = "fsl,imx6q-pcie", .data = (void *)IMX6Q, },
738 { .compatible = "fsl,imx6sx-pcie", .data = (void *)IMX6SX, },
739 { .compatible = "fsl,imx6qp-pcie", .data = (void *)IMX6QP, },
740 {},
741 };
742
743 static struct platform_driver imx6_pcie_driver = {
744 .driver = {
745 .name = "imx6q-pcie",
746 .of_match_table = imx6_pcie_of_match,
747 },
748 .shutdown = imx6_pcie_shutdown,
749 };
750
751 static int __init imx6_pcie_init(void)
752 {
753 return platform_driver_probe(&imx6_pcie_driver, imx6_pcie_probe);
754 }
755 device_initcall(imx6_pcie_init);
This page took 0.049373 seconds and 5 git commands to generate.