1 /* Applied Micro X-Gene SoC Ethernet Driver
3 * Copyright (c) 2014, Applied Micro Circuits Corporation
4 * Authors: Iyappan Subramanian <isubramanian@apm.com>
5 * Ravi Patel <rapatel@apm.com>
6 * Keyur Chudgar <kchudgar@apm.com>
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the
10 * Free Software Foundation; either version 2 of the License, or (at your
11 * option) any later version.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program. If not, see <http://www.gnu.org/licenses/>.
22 #include "xgene_enet_main.h"
23 #include "xgene_enet_hw.h"
25 static void xgene_enet_ring_init(struct xgene_enet_desc_ring
*ring
)
27 u32
*ring_cfg
= ring
->state
;
29 enum xgene_enet_ring_cfgsize cfgsize
= ring
->cfgsize
;
31 ring_cfg
[4] |= (1 << SELTHRSH_POS
) &
32 CREATE_MASK(SELTHRSH_POS
, SELTHRSH_LEN
);
33 ring_cfg
[3] |= ACCEPTLERR
;
34 ring_cfg
[2] |= QCOHERENT
;
37 ring_cfg
[2] |= (addr
<< RINGADDRL_POS
) &
38 CREATE_MASK_ULL(RINGADDRL_POS
, RINGADDRL_LEN
);
39 addr
>>= RINGADDRL_LEN
;
40 ring_cfg
[3] |= addr
& CREATE_MASK_ULL(RINGADDRH_POS
, RINGADDRH_LEN
);
41 ring_cfg
[3] |= ((u32
)cfgsize
<< RINGSIZE_POS
) &
42 CREATE_MASK(RINGSIZE_POS
, RINGSIZE_LEN
);
45 static void xgene_enet_ring_set_type(struct xgene_enet_desc_ring
*ring
)
47 u32
*ring_cfg
= ring
->state
;
51 is_bufpool
= xgene_enet_is_bufpool(ring
->id
);
52 val
= (is_bufpool
) ? RING_BUFPOOL
: RING_REGULAR
;
53 ring_cfg
[4] |= (val
<< RINGTYPE_POS
) &
54 CREATE_MASK(RINGTYPE_POS
, RINGTYPE_LEN
);
57 ring_cfg
[3] |= (BUFPOOL_MODE
<< RINGMODE_POS
) &
58 CREATE_MASK(RINGMODE_POS
, RINGMODE_LEN
);
62 static void xgene_enet_ring_set_recombbuf(struct xgene_enet_desc_ring
*ring
)
64 u32
*ring_cfg
= ring
->state
;
66 ring_cfg
[3] |= RECOMBBUF
;
67 ring_cfg
[3] |= (0xf << RECOMTIMEOUTL_POS
) &
68 CREATE_MASK(RECOMTIMEOUTL_POS
, RECOMTIMEOUTL_LEN
);
69 ring_cfg
[4] |= 0x7 & CREATE_MASK(RECOMTIMEOUTH_POS
, RECOMTIMEOUTH_LEN
);
72 static void xgene_enet_ring_wr32(struct xgene_enet_desc_ring
*ring
,
75 struct xgene_enet_pdata
*pdata
= netdev_priv(ring
->ndev
);
77 iowrite32(data
, pdata
->ring_csr_addr
+ offset
);
80 static void xgene_enet_ring_rd32(struct xgene_enet_desc_ring
*ring
,
81 u32 offset
, u32
*data
)
83 struct xgene_enet_pdata
*pdata
= netdev_priv(ring
->ndev
);
85 *data
= ioread32(pdata
->ring_csr_addr
+ offset
);
88 static void xgene_enet_write_ring_state(struct xgene_enet_desc_ring
*ring
)
90 struct xgene_enet_pdata
*pdata
= netdev_priv(ring
->ndev
);
93 xgene_enet_ring_wr32(ring
, CSR_RING_CONFIG
, ring
->num
);
94 for (i
= 0; i
< pdata
->ring_ops
->num_ring_config
; i
++) {
95 xgene_enet_ring_wr32(ring
, CSR_RING_WR_BASE
+ (i
* 4),
100 static void xgene_enet_clr_ring_state(struct xgene_enet_desc_ring
*ring
)
102 memset(ring
->state
, 0, sizeof(ring
->state
));
103 xgene_enet_write_ring_state(ring
);
106 static void xgene_enet_set_ring_state(struct xgene_enet_desc_ring
*ring
)
108 xgene_enet_ring_set_type(ring
);
110 if (xgene_enet_ring_owner(ring
->id
) == RING_OWNER_ETH0
||
111 xgene_enet_ring_owner(ring
->id
) == RING_OWNER_ETH1
)
112 xgene_enet_ring_set_recombbuf(ring
);
114 xgene_enet_ring_init(ring
);
115 xgene_enet_write_ring_state(ring
);
118 static void xgene_enet_set_ring_id(struct xgene_enet_desc_ring
*ring
)
120 u32 ring_id_val
, ring_id_buf
;
123 is_bufpool
= xgene_enet_is_bufpool(ring
->id
);
125 ring_id_val
= ring
->id
& GENMASK(9, 0);
126 ring_id_val
|= OVERWRITE
;
128 ring_id_buf
= (ring
->num
<< 9) & GENMASK(18, 9);
129 ring_id_buf
|= PREFETCH_BUF_EN
;
131 ring_id_buf
|= IS_BUFFER_POOL
;
133 xgene_enet_ring_wr32(ring
, CSR_RING_ID
, ring_id_val
);
134 xgene_enet_ring_wr32(ring
, CSR_RING_ID_BUF
, ring_id_buf
);
137 static void xgene_enet_clr_desc_ring_id(struct xgene_enet_desc_ring
*ring
)
141 ring_id
= ring
->id
| OVERWRITE
;
142 xgene_enet_ring_wr32(ring
, CSR_RING_ID
, ring_id
);
143 xgene_enet_ring_wr32(ring
, CSR_RING_ID_BUF
, 0);
146 static struct xgene_enet_desc_ring
*xgene_enet_setup_ring(
147 struct xgene_enet_desc_ring
*ring
)
149 u32 size
= ring
->size
;
153 xgene_enet_clr_ring_state(ring
);
154 xgene_enet_set_ring_state(ring
);
155 xgene_enet_set_ring_id(ring
);
157 ring
->slots
= xgene_enet_get_numslots(ring
->id
, size
);
159 is_bufpool
= xgene_enet_is_bufpool(ring
->id
);
160 if (is_bufpool
|| xgene_enet_ring_owner(ring
->id
) != RING_OWNER_CPU
)
163 for (i
= 0; i
< ring
->slots
; i
++)
164 xgene_enet_mark_desc_slot_empty(&ring
->raw_desc
[i
]);
166 xgene_enet_ring_rd32(ring
, CSR_RING_NE_INT_MODE
, &data
);
167 data
|= BIT(31 - xgene_enet_ring_bufnum(ring
->id
));
168 xgene_enet_ring_wr32(ring
, CSR_RING_NE_INT_MODE
, data
);
173 static void xgene_enet_clear_ring(struct xgene_enet_desc_ring
*ring
)
178 is_bufpool
= xgene_enet_is_bufpool(ring
->id
);
179 if (is_bufpool
|| xgene_enet_ring_owner(ring
->id
) != RING_OWNER_CPU
)
182 xgene_enet_ring_rd32(ring
, CSR_RING_NE_INT_MODE
, &data
);
183 data
&= ~BIT(31 - xgene_enet_ring_bufnum(ring
->id
));
184 xgene_enet_ring_wr32(ring
, CSR_RING_NE_INT_MODE
, data
);
187 xgene_enet_clr_desc_ring_id(ring
);
188 xgene_enet_clr_ring_state(ring
);
191 static void xgene_enet_wr_cmd(struct xgene_enet_desc_ring
*ring
, int count
)
193 iowrite32(count
, ring
->cmd
);
196 static u32
xgene_enet_ring_len(struct xgene_enet_desc_ring
*ring
)
198 u32 __iomem
*cmd_base
= ring
->cmd_base
;
199 u32 ring_state
, num_msgs
;
201 ring_state
= ioread32(&cmd_base
[1]);
202 num_msgs
= GET_VAL(NUMMSGSINQ
, ring_state
);
207 static void xgene_enet_setup_coalescing(struct xgene_enet_desc_ring
*ring
)
211 xgene_enet_ring_wr32(ring
, CSR_PBM_COAL
, 0x8e);
212 xgene_enet_ring_wr32(ring
, CSR_PBM_CTICK1
, data
);
213 xgene_enet_ring_wr32(ring
, CSR_PBM_CTICK2
, data
<< 16);
214 xgene_enet_ring_wr32(ring
, CSR_THRESHOLD0_SET1
, 0x40);
215 xgene_enet_ring_wr32(ring
, CSR_THRESHOLD1_SET1
, 0x80);
218 void xgene_enet_parse_error(struct xgene_enet_desc_ring
*ring
,
219 struct xgene_enet_pdata
*pdata
,
220 enum xgene_enet_err_code status
)
224 ring
->rx_crc_errors
++;
227 case INGRESS_CHECKSUM
:
228 case INGRESS_CHECKSUM_COMPUTE
:
232 case INGRESS_TRUNC_FRAME
:
233 ring
->rx_frame_errors
++;
236 case INGRESS_PKT_LEN
:
237 ring
->rx_length_errors
++;
240 case INGRESS_PKT_UNDER
:
241 ring
->rx_frame_errors
++;
244 case INGRESS_FIFO_OVERRUN
:
245 ring
->rx_fifo_errors
++;
252 static void xgene_enet_wr_csr(struct xgene_enet_pdata
*pdata
,
255 void __iomem
*addr
= pdata
->eth_csr_addr
+ offset
;
257 iowrite32(val
, addr
);
260 static void xgene_enet_wr_ring_if(struct xgene_enet_pdata
*pdata
,
263 void __iomem
*addr
= pdata
->eth_ring_if_addr
+ offset
;
265 iowrite32(val
, addr
);
268 static void xgene_enet_wr_diag_csr(struct xgene_enet_pdata
*pdata
,
271 void __iomem
*addr
= pdata
->eth_diag_csr_addr
+ offset
;
273 iowrite32(val
, addr
);
276 static void xgene_enet_wr_mcx_csr(struct xgene_enet_pdata
*pdata
,
279 void __iomem
*addr
= pdata
->mcx_mac_csr_addr
+ offset
;
281 iowrite32(val
, addr
);
284 static bool xgene_enet_wr_indirect(void __iomem
*addr
, void __iomem
*wr
,
285 void __iomem
*cmd
, void __iomem
*cmd_done
,
286 u32 wr_addr
, u32 wr_data
)
291 iowrite32(wr_addr
, addr
);
292 iowrite32(wr_data
, wr
);
293 iowrite32(XGENE_ENET_WR_CMD
, cmd
);
295 /* wait for write command to complete */
296 while (!(done
= ioread32(cmd_done
)) && wait
--)
307 static void xgene_enet_wr_mcx_mac(struct xgene_enet_pdata
*pdata
,
308 u32 wr_addr
, u32 wr_data
)
310 void __iomem
*addr
, *wr
, *cmd
, *cmd_done
;
312 addr
= pdata
->mcx_mac_addr
+ MAC_ADDR_REG_OFFSET
;
313 wr
= pdata
->mcx_mac_addr
+ MAC_WRITE_REG_OFFSET
;
314 cmd
= pdata
->mcx_mac_addr
+ MAC_COMMAND_REG_OFFSET
;
315 cmd_done
= pdata
->mcx_mac_addr
+ MAC_COMMAND_DONE_REG_OFFSET
;
317 if (!xgene_enet_wr_indirect(addr
, wr
, cmd
, cmd_done
, wr_addr
, wr_data
))
318 netdev_err(pdata
->ndev
, "MCX mac write failed, addr: %04x\n",
322 static void xgene_enet_rd_csr(struct xgene_enet_pdata
*pdata
,
323 u32 offset
, u32
*val
)
325 void __iomem
*addr
= pdata
->eth_csr_addr
+ offset
;
327 *val
= ioread32(addr
);
330 static void xgene_enet_rd_diag_csr(struct xgene_enet_pdata
*pdata
,
331 u32 offset
, u32
*val
)
333 void __iomem
*addr
= pdata
->eth_diag_csr_addr
+ offset
;
335 *val
= ioread32(addr
);
338 static void xgene_enet_rd_mcx_csr(struct xgene_enet_pdata
*pdata
,
339 u32 offset
, u32
*val
)
341 void __iomem
*addr
= pdata
->mcx_mac_csr_addr
+ offset
;
343 *val
= ioread32(addr
);
346 static bool xgene_enet_rd_indirect(void __iomem
*addr
, void __iomem
*rd
,
347 void __iomem
*cmd
, void __iomem
*cmd_done
,
348 u32 rd_addr
, u32
*rd_data
)
353 iowrite32(rd_addr
, addr
);
354 iowrite32(XGENE_ENET_RD_CMD
, cmd
);
356 /* wait for read command to complete */
357 while (!(done
= ioread32(cmd_done
)) && wait
--)
363 *rd_data
= ioread32(rd
);
369 static void xgene_enet_rd_mcx_mac(struct xgene_enet_pdata
*pdata
,
370 u32 rd_addr
, u32
*rd_data
)
372 void __iomem
*addr
, *rd
, *cmd
, *cmd_done
;
374 addr
= pdata
->mcx_mac_addr
+ MAC_ADDR_REG_OFFSET
;
375 rd
= pdata
->mcx_mac_addr
+ MAC_READ_REG_OFFSET
;
376 cmd
= pdata
->mcx_mac_addr
+ MAC_COMMAND_REG_OFFSET
;
377 cmd_done
= pdata
->mcx_mac_addr
+ MAC_COMMAND_DONE_REG_OFFSET
;
379 if (!xgene_enet_rd_indirect(addr
, rd
, cmd
, cmd_done
, rd_addr
, rd_data
))
380 netdev_err(pdata
->ndev
, "MCX mac read failed, addr: %04x\n",
384 static void xgene_gmac_set_mac_addr(struct xgene_enet_pdata
*pdata
)
387 u8
*dev_addr
= pdata
->ndev
->dev_addr
;
389 addr0
= (dev_addr
[3] << 24) | (dev_addr
[2] << 16) |
390 (dev_addr
[1] << 8) | dev_addr
[0];
391 addr1
= (dev_addr
[5] << 24) | (dev_addr
[4] << 16);
393 xgene_enet_wr_mcx_mac(pdata
, STATION_ADDR0_ADDR
, addr0
);
394 xgene_enet_wr_mcx_mac(pdata
, STATION_ADDR1_ADDR
, addr1
);
397 static int xgene_enet_ecc_init(struct xgene_enet_pdata
*pdata
)
399 struct net_device
*ndev
= pdata
->ndev
;
403 xgene_enet_wr_diag_csr(pdata
, ENET_CFG_MEM_RAM_SHUTDOWN_ADDR
, 0x0);
405 usleep_range(100, 110);
406 xgene_enet_rd_diag_csr(pdata
, ENET_BLOCK_MEM_RDY_ADDR
, &data
);
407 } while ((data
!= 0xffffffff) && wait
--);
409 if (data
!= 0xffffffff) {
410 netdev_err(ndev
, "Failed to release memory from shutdown\n");
417 static void xgene_gmac_reset(struct xgene_enet_pdata
*pdata
)
419 xgene_enet_wr_mcx_mac(pdata
, MAC_CONFIG_1_ADDR
, SOFT_RESET1
);
420 xgene_enet_wr_mcx_mac(pdata
, MAC_CONFIG_1_ADDR
, 0);
423 static void xgene_enet_configure_clock(struct xgene_enet_pdata
*pdata
)
425 struct device
*dev
= &pdata
->pdev
->dev
;
428 struct clk
*parent
= clk_get_parent(pdata
->clk
);
430 switch (pdata
->phy_speed
) {
432 clk_set_rate(parent
, 2500000);
435 clk_set_rate(parent
, 25000000);
438 clk_set_rate(parent
, 125000000);
444 switch (pdata
->phy_speed
) {
446 acpi_evaluate_object(ACPI_HANDLE(dev
),
450 acpi_evaluate_object(ACPI_HANDLE(dev
),
454 acpi_evaluate_object(ACPI_HANDLE(dev
),
462 static void xgene_gmac_set_speed(struct xgene_enet_pdata
*pdata
)
464 struct device
*dev
= &pdata
->pdev
->dev
;
466 u32 intf_ctl
, rgmii
, value
;
468 xgene_enet_rd_mcx_csr(pdata
, ICM_CONFIG0_REG_0_ADDR
, &icm0
);
469 xgene_enet_rd_mcx_csr(pdata
, ICM_CONFIG2_REG_0_ADDR
, &icm2
);
470 xgene_enet_rd_mcx_mac(pdata
, MAC_CONFIG_2_ADDR
, &mc2
);
471 xgene_enet_rd_mcx_mac(pdata
, INTERFACE_CONTROL_ADDR
, &intf_ctl
);
472 xgene_enet_rd_csr(pdata
, RGMII_REG_0_ADDR
, &rgmii
);
474 switch (pdata
->phy_speed
) {
476 ENET_INTERFACE_MODE2_SET(&mc2
, 1);
477 intf_ctl
&= ~(ENET_LHD_MODE
| ENET_GHD_MODE
);
478 CFG_MACMODE_SET(&icm0
, 0);
479 CFG_WAITASYNCRD_SET(&icm2
, 500);
480 rgmii
&= ~CFG_SPEED_1250
;
483 ENET_INTERFACE_MODE2_SET(&mc2
, 1);
484 intf_ctl
&= ~ENET_GHD_MODE
;
485 intf_ctl
|= ENET_LHD_MODE
;
486 CFG_MACMODE_SET(&icm0
, 1);
487 CFG_WAITASYNCRD_SET(&icm2
, 80);
488 rgmii
&= ~CFG_SPEED_1250
;
491 ENET_INTERFACE_MODE2_SET(&mc2
, 2);
492 intf_ctl
&= ~ENET_LHD_MODE
;
493 intf_ctl
|= ENET_GHD_MODE
;
494 CFG_MACMODE_SET(&icm0
, 2);
495 CFG_WAITASYNCRD_SET(&icm2
, 0);
497 CFG_TXCLK_MUXSEL0_SET(&rgmii
, pdata
->tx_delay
);
498 CFG_RXCLK_MUXSEL0_SET(&rgmii
, pdata
->rx_delay
);
500 rgmii
|= CFG_SPEED_1250
;
502 xgene_enet_rd_csr(pdata
, DEBUG_REG_ADDR
, &value
);
503 value
|= CFG_BYPASS_UNISEC_TX
| CFG_BYPASS_UNISEC_RX
;
504 xgene_enet_wr_csr(pdata
, DEBUG_REG_ADDR
, value
);
508 mc2
|= FULL_DUPLEX2
| PAD_CRC
;
509 xgene_enet_wr_mcx_mac(pdata
, MAC_CONFIG_2_ADDR
, mc2
);
510 xgene_enet_wr_mcx_mac(pdata
, INTERFACE_CONTROL_ADDR
, intf_ctl
);
511 xgene_enet_wr_csr(pdata
, RGMII_REG_0_ADDR
, rgmii
);
512 xgene_enet_configure_clock(pdata
);
514 xgene_enet_wr_mcx_csr(pdata
, ICM_CONFIG0_REG_0_ADDR
, icm0
);
515 xgene_enet_wr_mcx_csr(pdata
, ICM_CONFIG2_REG_0_ADDR
, icm2
);
518 static void xgene_gmac_init(struct xgene_enet_pdata
*pdata
)
522 if (!pdata
->mdio_driver
)
523 xgene_gmac_reset(pdata
);
525 xgene_gmac_set_speed(pdata
);
526 xgene_gmac_set_mac_addr(pdata
);
528 /* Adjust MDC clock frequency */
529 xgene_enet_rd_mcx_mac(pdata
, MII_MGMT_CONFIG_ADDR
, &value
);
530 MGMT_CLOCK_SEL_SET(&value
, 7);
531 xgene_enet_wr_mcx_mac(pdata
, MII_MGMT_CONFIG_ADDR
, value
);
533 /* Enable drop if bufpool not available */
534 xgene_enet_rd_csr(pdata
, RSIF_CONFIG_REG_ADDR
, &value
);
535 value
|= CFG_RSIF_FPBUFF_TIMEOUT_EN
;
536 xgene_enet_wr_csr(pdata
, RSIF_CONFIG_REG_ADDR
, value
);
538 /* Rtype should be copied from FP */
539 xgene_enet_wr_csr(pdata
, RSIF_RAM_DBG_REG0_ADDR
, 0);
541 /* Rx-Tx traffic resume */
542 xgene_enet_wr_csr(pdata
, CFG_LINK_AGGR_RESUME_0_ADDR
, TX_PORT0
);
544 xgene_enet_rd_mcx_csr(pdata
, RX_DV_GATE_REG_0_ADDR
, &value
);
545 value
&= ~TX_DV_GATE_EN0
;
546 value
&= ~RX_DV_GATE_EN0
;
548 xgene_enet_wr_mcx_csr(pdata
, RX_DV_GATE_REG_0_ADDR
, value
);
550 xgene_enet_wr_csr(pdata
, CFG_BYPASS_ADDR
, RESUME_TX
);
553 static void xgene_enet_config_ring_if_assoc(struct xgene_enet_pdata
*pdata
)
555 u32 val
= 0xffffffff;
557 xgene_enet_wr_ring_if(pdata
, ENET_CFGSSQMIWQASSOC_ADDR
, val
);
558 xgene_enet_wr_ring_if(pdata
, ENET_CFGSSQMIFPQASSOC_ADDR
, val
);
559 xgene_enet_wr_ring_if(pdata
, ENET_CFGSSQMIQMLITEWQASSOC_ADDR
, val
);
560 xgene_enet_wr_ring_if(pdata
, ENET_CFGSSQMIQMLITEFPQASSOC_ADDR
, val
);
563 static void xgene_enet_cle_bypass(struct xgene_enet_pdata
*pdata
,
564 u32 dst_ring_num
, u16 bufpool_id
)
569 fpsel
= xgene_enet_ring_bufnum(bufpool_id
) - 0x20;
571 xgene_enet_rd_csr(pdata
, CLE_BYPASS_REG0_0_ADDR
, &cb
);
572 cb
|= CFG_CLE_BYPASS_EN0
;
573 CFG_CLE_IP_PROTOCOL0_SET(&cb
, 3);
574 xgene_enet_wr_csr(pdata
, CLE_BYPASS_REG0_0_ADDR
, cb
);
576 xgene_enet_rd_csr(pdata
, CLE_BYPASS_REG1_0_ADDR
, &cb
);
577 CFG_CLE_DSTQID0_SET(&cb
, dst_ring_num
);
578 CFG_CLE_FPSEL0_SET(&cb
, fpsel
);
579 xgene_enet_wr_csr(pdata
, CLE_BYPASS_REG1_0_ADDR
, cb
);
582 static void xgene_gmac_rx_enable(struct xgene_enet_pdata
*pdata
)
586 xgene_enet_rd_mcx_mac(pdata
, MAC_CONFIG_1_ADDR
, &data
);
587 xgene_enet_wr_mcx_mac(pdata
, MAC_CONFIG_1_ADDR
, data
| RX_EN
);
590 static void xgene_gmac_tx_enable(struct xgene_enet_pdata
*pdata
)
594 xgene_enet_rd_mcx_mac(pdata
, MAC_CONFIG_1_ADDR
, &data
);
595 xgene_enet_wr_mcx_mac(pdata
, MAC_CONFIG_1_ADDR
, data
| TX_EN
);
598 static void xgene_gmac_rx_disable(struct xgene_enet_pdata
*pdata
)
602 xgene_enet_rd_mcx_mac(pdata
, MAC_CONFIG_1_ADDR
, &data
);
603 xgene_enet_wr_mcx_mac(pdata
, MAC_CONFIG_1_ADDR
, data
& ~RX_EN
);
606 static void xgene_gmac_tx_disable(struct xgene_enet_pdata
*pdata
)
610 xgene_enet_rd_mcx_mac(pdata
, MAC_CONFIG_1_ADDR
, &data
);
611 xgene_enet_wr_mcx_mac(pdata
, MAC_CONFIG_1_ADDR
, data
& ~TX_EN
);
614 bool xgene_ring_mgr_init(struct xgene_enet_pdata
*p
)
616 if (!ioread32(p
->ring_csr_addr
+ CLKEN_ADDR
))
619 if (ioread32(p
->ring_csr_addr
+ SRST_ADDR
))
625 static int xgene_enet_reset(struct xgene_enet_pdata
*pdata
)
627 struct device
*dev
= &pdata
->pdev
->dev
;
629 if (!xgene_ring_mgr_init(pdata
))
632 if (pdata
->mdio_driver
) {
633 xgene_enet_config_ring_if_assoc(pdata
);
638 clk_prepare_enable(pdata
->clk
);
640 clk_disable_unprepare(pdata
->clk
);
642 clk_prepare_enable(pdata
->clk
);
646 if (acpi_has_method(ACPI_HANDLE(&pdata
->pdev
->dev
), "_RST")) {
647 acpi_evaluate_object(ACPI_HANDLE(&pdata
->pdev
->dev
),
649 } else if (acpi_has_method(ACPI_HANDLE(&pdata
->pdev
->dev
),
651 acpi_evaluate_object(ACPI_HANDLE(&pdata
->pdev
->dev
),
657 xgene_enet_ecc_init(pdata
);
658 xgene_enet_config_ring_if_assoc(pdata
);
663 static void xgene_enet_clear(struct xgene_enet_pdata
*pdata
,
664 struct xgene_enet_desc_ring
*ring
)
668 val
= xgene_enet_ring_bufnum(ring
->id
);
670 if (xgene_enet_is_bufpool(ring
->id
)) {
671 addr
= ENET_CFGSSQMIFPRESET_ADDR
;
672 data
= BIT(val
- 0x20);
674 addr
= ENET_CFGSSQMIWQRESET_ADDR
;
678 xgene_enet_wr_ring_if(pdata
, addr
, data
);
681 static void xgene_gport_shutdown(struct xgene_enet_pdata
*pdata
)
683 struct device
*dev
= &pdata
->pdev
->dev
;
684 struct xgene_enet_desc_ring
*ring
;
689 for (i
= 0; i
< pdata
->rxq_cnt
; i
++) {
690 ring
= pdata
->rx_ring
[i
]->buf_pool
;
692 val
= xgene_enet_ring_bufnum(ring
->id
);
693 pb
|= BIT(val
- 0x20);
695 xgene_enet_wr_ring_if(pdata
, ENET_CFGSSQMIFPRESET_ADDR
, pb
);
698 for (i
= 0; i
< pdata
->txq_cnt
; i
++) {
699 ring
= pdata
->tx_ring
[i
];
701 val
= xgene_enet_ring_bufnum(ring
->id
);
704 xgene_enet_wr_ring_if(pdata
, ENET_CFGSSQMIWQRESET_ADDR
, pb
);
707 if (!IS_ERR(pdata
->clk
))
708 clk_disable_unprepare(pdata
->clk
);
712 static void xgene_enet_adjust_link(struct net_device
*ndev
)
714 struct xgene_enet_pdata
*pdata
= netdev_priv(ndev
);
715 const struct xgene_mac_ops
*mac_ops
= pdata
->mac_ops
;
716 struct phy_device
*phydev
= pdata
->phy_dev
;
719 if (pdata
->phy_speed
!= phydev
->speed
) {
720 pdata
->phy_speed
= phydev
->speed
;
721 mac_ops
->set_speed(pdata
);
722 mac_ops
->rx_enable(pdata
);
723 mac_ops
->tx_enable(pdata
);
724 phy_print_status(phydev
);
727 mac_ops
->rx_disable(pdata
);
728 mac_ops
->tx_disable(pdata
);
729 pdata
->phy_speed
= SPEED_UNKNOWN
;
730 phy_print_status(phydev
);
735 static struct acpi_device
*acpi_phy_find_device(struct device
*dev
)
737 struct acpi_reference_args args
;
738 struct fwnode_handle
*fw_node
;
741 fw_node
= acpi_fwnode_handle(ACPI_COMPANION(dev
));
742 status
= acpi_node_get_property_reference(fw_node
, "phy-handle", 0,
744 if (ACPI_FAILURE(status
)) {
745 dev_dbg(dev
, "No matching phy in ACPI table\n");
753 int xgene_enet_phy_connect(struct net_device
*ndev
)
755 struct xgene_enet_pdata
*pdata
= netdev_priv(ndev
);
756 struct device_node
*np
;
757 struct phy_device
*phy_dev
;
758 struct device
*dev
= &pdata
->pdev
->dev
;
762 for (i
= 0 ; i
< 2; i
++) {
763 np
= of_parse_phandle(dev
->of_node
, "phy-handle", i
);
764 phy_dev
= of_phy_connect(ndev
, np
,
765 &xgene_enet_adjust_link
,
773 netdev_err(ndev
, "Could not connect to PHY\n");
777 pdata
->phy_dev
= phy_dev
;
780 struct acpi_device
*adev
= acpi_phy_find_device(dev
);
782 pdata
->phy_dev
= adev
->driver_data
;
784 phy_dev
= pdata
->phy_dev
;
787 phy_connect_direct(ndev
, phy_dev
, &xgene_enet_adjust_link
,
789 netdev_err(ndev
, "Could not connect to PHY\n");
797 pdata
->phy_speed
= SPEED_UNKNOWN
;
798 phy_dev
->supported
&= ~SUPPORTED_10baseT_Half
&
799 ~SUPPORTED_100baseT_Half
&
800 ~SUPPORTED_1000baseT_Half
;
801 phy_dev
->advertising
= phy_dev
->supported
;
806 static int xgene_mdiobus_register(struct xgene_enet_pdata
*pdata
,
807 struct mii_bus
*mdio
)
809 struct device
*dev
= &pdata
->pdev
->dev
;
810 struct net_device
*ndev
= pdata
->ndev
;
811 struct phy_device
*phy
;
812 struct device_node
*child_np
;
813 struct device_node
*mdio_np
= NULL
;
818 for_each_child_of_node(dev
->of_node
, child_np
) {
819 if (of_device_is_compatible(child_np
,
827 netdev_dbg(ndev
, "No mdio node in the dts\n");
831 return of_mdiobus_register(mdio
, mdio_np
);
834 /* Mask out all PHYs from auto probing. */
837 /* Register the MDIO bus */
838 ret
= mdiobus_register(mdio
);
842 ret
= device_property_read_u32(dev
, "phy-channel", &phy_addr
);
844 ret
= device_property_read_u32(dev
, "phy-addr", &phy_addr
);
848 phy
= xgene_enet_phy_register(mdio
, phy_addr
);
852 pdata
->phy_dev
= phy
;
857 int xgene_enet_mdio_config(struct xgene_enet_pdata
*pdata
)
859 struct net_device
*ndev
= pdata
->ndev
;
860 struct mii_bus
*mdio_bus
;
863 mdio_bus
= mdiobus_alloc();
867 mdio_bus
->name
= "APM X-Gene MDIO bus";
868 mdio_bus
->read
= xgene_mdio_rgmii_read
;
869 mdio_bus
->write
= xgene_mdio_rgmii_write
;
870 snprintf(mdio_bus
->id
, MII_BUS_ID_SIZE
, "%s-%s", "xgene-mii",
873 mdio_bus
->priv
= (void __force
*)pdata
->mcx_mac_addr
;
874 mdio_bus
->parent
= &pdata
->pdev
->dev
;
876 ret
= xgene_mdiobus_register(pdata
, mdio_bus
);
878 netdev_err(ndev
, "Failed to register MDIO bus\n");
879 mdiobus_free(mdio_bus
);
882 pdata
->mdio_bus
= mdio_bus
;
884 ret
= xgene_enet_phy_connect(ndev
);
886 xgene_enet_mdio_remove(pdata
);
891 void xgene_enet_phy_disconnect(struct xgene_enet_pdata
*pdata
)
894 phy_disconnect(pdata
->phy_dev
);
897 void xgene_enet_mdio_remove(struct xgene_enet_pdata
*pdata
)
900 phy_disconnect(pdata
->phy_dev
);
902 mdiobus_unregister(pdata
->mdio_bus
);
903 mdiobus_free(pdata
->mdio_bus
);
904 pdata
->mdio_bus
= NULL
;
907 const struct xgene_mac_ops xgene_gmac_ops
= {
908 .init
= xgene_gmac_init
,
909 .reset
= xgene_gmac_reset
,
910 .rx_enable
= xgene_gmac_rx_enable
,
911 .tx_enable
= xgene_gmac_tx_enable
,
912 .rx_disable
= xgene_gmac_rx_disable
,
913 .tx_disable
= xgene_gmac_tx_disable
,
914 .set_speed
= xgene_gmac_set_speed
,
915 .set_mac_addr
= xgene_gmac_set_mac_addr
,
918 const struct xgene_port_ops xgene_gport_ops
= {
919 .reset
= xgene_enet_reset
,
920 .clear
= xgene_enet_clear
,
921 .cle_bypass
= xgene_enet_cle_bypass
,
922 .shutdown
= xgene_gport_shutdown
,
925 struct xgene_ring_ops xgene_ring1_ops
= {
926 .num_ring_config
= NUM_RING_CONFIG
,
927 .num_ring_id_shift
= 6,
928 .setup
= xgene_enet_setup_ring
,
929 .clear
= xgene_enet_clear_ring
,
930 .wr_cmd
= xgene_enet_wr_cmd
,
931 .len
= xgene_enet_ring_len
,
932 .coalesce
= xgene_enet_setup_coalescing
,