Commit | Line | Data |
---|---|---|
e6ad7673 IS |
1 | /* Applied Micro X-Gene SoC Ethernet Driver |
2 | * | |
3 | * Copyright (c) 2014, Applied Micro Circuits Corporation | |
4 | * Authors: Iyappan Subramanian <isubramanian@apm.com> | |
5 | * Ravi Patel <rapatel@apm.com> | |
6 | * Keyur Chudgar <kchudgar@apm.com> | |
7 | * | |
8 | * This program is free software; you can redistribute it and/or modify it | |
9 | * under the terms of the GNU General Public License as published by the | |
10 | * Free Software Foundation; either version 2 of the License, or (at your | |
11 | * option) any later version. | |
12 | * | |
13 | * This program is distributed in the hope that it will be useful, | |
14 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
16 | * GNU General Public License for more details. | |
17 | * | |
18 | * You should have received a copy of the GNU General Public License | |
19 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | |
20 | */ | |
21 | ||
22 | #include "xgene_enet_main.h" | |
23 | #include "xgene_enet_hw.h" | |
24 | ||
25 | static void xgene_enet_ring_init(struct xgene_enet_desc_ring *ring) | |
26 | { | |
27 | u32 *ring_cfg = ring->state; | |
28 | u64 addr = ring->dma; | |
29 | enum xgene_enet_ring_cfgsize cfgsize = ring->cfgsize; | |
30 | ||
31 | ring_cfg[4] |= (1 << SELTHRSH_POS) & | |
32 | CREATE_MASK(SELTHRSH_POS, SELTHRSH_LEN); | |
33 | ring_cfg[3] |= ACCEPTLERR; | |
34 | ring_cfg[2] |= QCOHERENT; | |
35 | ||
36 | addr >>= 8; | |
37 | ring_cfg[2] |= (addr << RINGADDRL_POS) & | |
38 | CREATE_MASK_ULL(RINGADDRL_POS, RINGADDRL_LEN); | |
39 | addr >>= RINGADDRL_LEN; | |
40 | ring_cfg[3] |= addr & CREATE_MASK_ULL(RINGADDRH_POS, RINGADDRH_LEN); | |
41 | ring_cfg[3] |= ((u32)cfgsize << RINGSIZE_POS) & | |
42 | CREATE_MASK(RINGSIZE_POS, RINGSIZE_LEN); | |
43 | } | |
44 | ||
45 | static void xgene_enet_ring_set_type(struct xgene_enet_desc_ring *ring) | |
46 | { | |
47 | u32 *ring_cfg = ring->state; | |
48 | bool is_bufpool; | |
49 | u32 val; | |
50 | ||
51 | is_bufpool = xgene_enet_is_bufpool(ring->id); | |
52 | val = (is_bufpool) ? RING_BUFPOOL : RING_REGULAR; | |
53 | ring_cfg[4] |= (val << RINGTYPE_POS) & | |
54 | CREATE_MASK(RINGTYPE_POS, RINGTYPE_LEN); | |
55 | ||
56 | if (is_bufpool) { | |
57 | ring_cfg[3] |= (BUFPOOL_MODE << RINGMODE_POS) & | |
58 | CREATE_MASK(RINGMODE_POS, RINGMODE_LEN); | |
59 | } | |
60 | } | |
61 | ||
62 | static void xgene_enet_ring_set_recombbuf(struct xgene_enet_desc_ring *ring) | |
63 | { | |
64 | u32 *ring_cfg = ring->state; | |
65 | ||
66 | ring_cfg[3] |= RECOMBBUF; | |
67 | ring_cfg[3] |= (0xf << RECOMTIMEOUTL_POS) & | |
68 | CREATE_MASK(RECOMTIMEOUTL_POS, RECOMTIMEOUTL_LEN); | |
69 | ring_cfg[4] |= 0x7 & CREATE_MASK(RECOMTIMEOUTH_POS, RECOMTIMEOUTH_LEN); | |
70 | } | |
71 | ||
72 | static void xgene_enet_ring_wr32(struct xgene_enet_desc_ring *ring, | |
73 | u32 offset, u32 data) | |
74 | { | |
75 | struct xgene_enet_pdata *pdata = netdev_priv(ring->ndev); | |
76 | ||
77 | iowrite32(data, pdata->ring_csr_addr + offset); | |
78 | } | |
79 | ||
80 | static void xgene_enet_ring_rd32(struct xgene_enet_desc_ring *ring, | |
81 | u32 offset, u32 *data) | |
82 | { | |
83 | struct xgene_enet_pdata *pdata = netdev_priv(ring->ndev); | |
84 | ||
85 | *data = ioread32(pdata->ring_csr_addr + offset); | |
86 | } | |
87 | ||
88 | static void xgene_enet_write_ring_state(struct xgene_enet_desc_ring *ring) | |
89 | { | |
90 | int i; | |
91 | ||
92 | xgene_enet_ring_wr32(ring, CSR_RING_CONFIG, ring->num); | |
93 | for (i = 0; i < NUM_RING_CONFIG; i++) { | |
94 | xgene_enet_ring_wr32(ring, CSR_RING_WR_BASE + (i * 4), | |
95 | ring->state[i]); | |
96 | } | |
97 | } | |
98 | ||
99 | static void xgene_enet_clr_ring_state(struct xgene_enet_desc_ring *ring) | |
100 | { | |
101 | memset(ring->state, 0, sizeof(u32) * NUM_RING_CONFIG); | |
102 | xgene_enet_write_ring_state(ring); | |
103 | } | |
104 | ||
105 | static void xgene_enet_set_ring_state(struct xgene_enet_desc_ring *ring) | |
106 | { | |
107 | xgene_enet_ring_set_type(ring); | |
108 | ||
109 | if (xgene_enet_ring_owner(ring->id) == RING_OWNER_ETH0) | |
110 | xgene_enet_ring_set_recombbuf(ring); | |
111 | ||
112 | xgene_enet_ring_init(ring); | |
113 | xgene_enet_write_ring_state(ring); | |
114 | } | |
115 | ||
116 | static void xgene_enet_set_ring_id(struct xgene_enet_desc_ring *ring) | |
117 | { | |
118 | u32 ring_id_val, ring_id_buf; | |
119 | bool is_bufpool; | |
120 | ||
121 | is_bufpool = xgene_enet_is_bufpool(ring->id); | |
122 | ||
123 | ring_id_val = ring->id & GENMASK(9, 0); | |
124 | ring_id_val |= OVERWRITE; | |
125 | ||
126 | ring_id_buf = (ring->num << 9) & GENMASK(18, 9); | |
127 | ring_id_buf |= PREFETCH_BUF_EN; | |
128 | if (is_bufpool) | |
129 | ring_id_buf |= IS_BUFFER_POOL; | |
130 | ||
131 | xgene_enet_ring_wr32(ring, CSR_RING_ID, ring_id_val); | |
132 | xgene_enet_ring_wr32(ring, CSR_RING_ID_BUF, ring_id_buf); | |
133 | } | |
134 | ||
135 | static void xgene_enet_clr_desc_ring_id(struct xgene_enet_desc_ring *ring) | |
136 | { | |
137 | u32 ring_id; | |
138 | ||
139 | ring_id = ring->id | OVERWRITE; | |
140 | xgene_enet_ring_wr32(ring, CSR_RING_ID, ring_id); | |
141 | xgene_enet_ring_wr32(ring, CSR_RING_ID_BUF, 0); | |
142 | } | |
143 | ||
144 | struct xgene_enet_desc_ring *xgene_enet_setup_ring( | |
145 | struct xgene_enet_desc_ring *ring) | |
146 | { | |
147 | u32 size = ring->size; | |
148 | u32 i, data; | |
149 | bool is_bufpool; | |
150 | ||
151 | xgene_enet_clr_ring_state(ring); | |
152 | xgene_enet_set_ring_state(ring); | |
153 | xgene_enet_set_ring_id(ring); | |
154 | ||
155 | ring->slots = xgene_enet_get_numslots(ring->id, size); | |
156 | ||
157 | is_bufpool = xgene_enet_is_bufpool(ring->id); | |
158 | if (is_bufpool || xgene_enet_ring_owner(ring->id) != RING_OWNER_CPU) | |
159 | return ring; | |
160 | ||
161 | for (i = 0; i < ring->slots; i++) | |
162 | xgene_enet_mark_desc_slot_empty(&ring->raw_desc[i]); | |
163 | ||
164 | xgene_enet_ring_rd32(ring, CSR_RING_NE_INT_MODE, &data); | |
165 | data |= BIT(31 - xgene_enet_ring_bufnum(ring->id)); | |
166 | xgene_enet_ring_wr32(ring, CSR_RING_NE_INT_MODE, data); | |
167 | ||
168 | return ring; | |
169 | } | |
170 | ||
171 | void xgene_enet_clear_ring(struct xgene_enet_desc_ring *ring) | |
172 | { | |
173 | u32 data; | |
174 | bool is_bufpool; | |
175 | ||
176 | is_bufpool = xgene_enet_is_bufpool(ring->id); | |
177 | if (is_bufpool || xgene_enet_ring_owner(ring->id) != RING_OWNER_CPU) | |
178 | goto out; | |
179 | ||
180 | xgene_enet_ring_rd32(ring, CSR_RING_NE_INT_MODE, &data); | |
181 | data &= ~BIT(31 - xgene_enet_ring_bufnum(ring->id)); | |
182 | xgene_enet_ring_wr32(ring, CSR_RING_NE_INT_MODE, data); | |
183 | ||
184 | out: | |
185 | xgene_enet_clr_desc_ring_id(ring); | |
186 | xgene_enet_clr_ring_state(ring); | |
187 | } | |
188 | ||
189 | void xgene_enet_parse_error(struct xgene_enet_desc_ring *ring, | |
190 | struct xgene_enet_pdata *pdata, | |
191 | enum xgene_enet_err_code status) | |
192 | { | |
193 | struct rtnl_link_stats64 *stats = &pdata->stats; | |
194 | ||
195 | switch (status) { | |
196 | case INGRESS_CRC: | |
197 | stats->rx_crc_errors++; | |
198 | break; | |
199 | case INGRESS_CHECKSUM: | |
200 | case INGRESS_CHECKSUM_COMPUTE: | |
201 | stats->rx_errors++; | |
202 | break; | |
203 | case INGRESS_TRUNC_FRAME: | |
204 | stats->rx_frame_errors++; | |
205 | break; | |
206 | case INGRESS_PKT_LEN: | |
207 | stats->rx_length_errors++; | |
208 | break; | |
209 | case INGRESS_PKT_UNDER: | |
210 | stats->rx_frame_errors++; | |
211 | break; | |
212 | case INGRESS_FIFO_OVERRUN: | |
213 | stats->rx_fifo_errors++; | |
214 | break; | |
215 | default: | |
216 | break; | |
217 | } | |
218 | } | |
219 | ||
220 | static void xgene_enet_wr_csr(struct xgene_enet_pdata *pdata, | |
221 | u32 offset, u32 val) | |
222 | { | |
223 | void __iomem *addr = pdata->eth_csr_addr + offset; | |
224 | ||
225 | iowrite32(val, addr); | |
226 | } | |
227 | ||
228 | static void xgene_enet_wr_ring_if(struct xgene_enet_pdata *pdata, | |
229 | u32 offset, u32 val) | |
230 | { | |
231 | void __iomem *addr = pdata->eth_ring_if_addr + offset; | |
232 | ||
233 | iowrite32(val, addr); | |
234 | } | |
235 | ||
236 | static void xgene_enet_wr_diag_csr(struct xgene_enet_pdata *pdata, | |
237 | u32 offset, u32 val) | |
238 | { | |
239 | void __iomem *addr = pdata->eth_diag_csr_addr + offset; | |
240 | ||
241 | iowrite32(val, addr); | |
242 | } | |
243 | ||
244 | static void xgene_enet_wr_mcx_csr(struct xgene_enet_pdata *pdata, | |
245 | u32 offset, u32 val) | |
246 | { | |
247 | void __iomem *addr = pdata->mcx_mac_csr_addr + offset; | |
248 | ||
249 | iowrite32(val, addr); | |
250 | } | |
251 | ||
252 | static bool xgene_enet_wr_indirect(void __iomem *addr, void __iomem *wr, | |
253 | void __iomem *cmd, void __iomem *cmd_done, | |
254 | u32 wr_addr, u32 wr_data) | |
255 | { | |
256 | u32 done; | |
257 | u8 wait = 10; | |
258 | ||
259 | iowrite32(wr_addr, addr); | |
260 | iowrite32(wr_data, wr); | |
261 | iowrite32(XGENE_ENET_WR_CMD, cmd); | |
262 | ||
263 | /* wait for write command to complete */ | |
264 | while (!(done = ioread32(cmd_done)) && wait--) | |
265 | udelay(1); | |
266 | ||
267 | if (!done) | |
268 | return false; | |
269 | ||
270 | iowrite32(0, cmd); | |
271 | ||
272 | return true; | |
273 | } | |
274 | ||
275 | static void xgene_enet_wr_mcx_mac(struct xgene_enet_pdata *pdata, | |
276 | u32 wr_addr, u32 wr_data) | |
277 | { | |
278 | void __iomem *addr, *wr, *cmd, *cmd_done; | |
279 | ||
280 | addr = pdata->mcx_mac_addr + MAC_ADDR_REG_OFFSET; | |
281 | wr = pdata->mcx_mac_addr + MAC_WRITE_REG_OFFSET; | |
282 | cmd = pdata->mcx_mac_addr + MAC_COMMAND_REG_OFFSET; | |
283 | cmd_done = pdata->mcx_mac_addr + MAC_COMMAND_DONE_REG_OFFSET; | |
284 | ||
285 | if (!xgene_enet_wr_indirect(addr, wr, cmd, cmd_done, wr_addr, wr_data)) | |
286 | netdev_err(pdata->ndev, "MCX mac write failed, addr: %04x\n", | |
287 | wr_addr); | |
288 | } | |
289 | ||
290 | static void xgene_enet_rd_csr(struct xgene_enet_pdata *pdata, | |
291 | u32 offset, u32 *val) | |
292 | { | |
293 | void __iomem *addr = pdata->eth_csr_addr + offset; | |
294 | ||
295 | *val = ioread32(addr); | |
296 | } | |
297 | ||
298 | static void xgene_enet_rd_diag_csr(struct xgene_enet_pdata *pdata, | |
299 | u32 offset, u32 *val) | |
300 | { | |
301 | void __iomem *addr = pdata->eth_diag_csr_addr + offset; | |
302 | ||
303 | *val = ioread32(addr); | |
304 | } | |
305 | ||
306 | static void xgene_enet_rd_mcx_csr(struct xgene_enet_pdata *pdata, | |
307 | u32 offset, u32 *val) | |
308 | { | |
309 | void __iomem *addr = pdata->mcx_mac_csr_addr + offset; | |
310 | ||
311 | *val = ioread32(addr); | |
312 | } | |
313 | ||
314 | static bool xgene_enet_rd_indirect(void __iomem *addr, void __iomem *rd, | |
315 | void __iomem *cmd, void __iomem *cmd_done, | |
316 | u32 rd_addr, u32 *rd_data) | |
317 | { | |
318 | u32 done; | |
319 | u8 wait = 10; | |
320 | ||
321 | iowrite32(rd_addr, addr); | |
322 | iowrite32(XGENE_ENET_RD_CMD, cmd); | |
323 | ||
324 | /* wait for read command to complete */ | |
325 | while (!(done = ioread32(cmd_done)) && wait--) | |
326 | udelay(1); | |
327 | ||
328 | if (!done) | |
329 | return false; | |
330 | ||
331 | *rd_data = ioread32(rd); | |
332 | iowrite32(0, cmd); | |
333 | ||
334 | return true; | |
335 | } | |
336 | ||
337 | static void xgene_enet_rd_mcx_mac(struct xgene_enet_pdata *pdata, | |
338 | u32 rd_addr, u32 *rd_data) | |
339 | { | |
340 | void __iomem *addr, *rd, *cmd, *cmd_done; | |
341 | ||
342 | addr = pdata->mcx_mac_addr + MAC_ADDR_REG_OFFSET; | |
343 | rd = pdata->mcx_mac_addr + MAC_READ_REG_OFFSET; | |
344 | cmd = pdata->mcx_mac_addr + MAC_COMMAND_REG_OFFSET; | |
345 | cmd_done = pdata->mcx_mac_addr + MAC_COMMAND_DONE_REG_OFFSET; | |
346 | ||
347 | if (!xgene_enet_rd_indirect(addr, rd, cmd, cmd_done, rd_addr, rd_data)) | |
348 | netdev_err(pdata->ndev, "MCX mac read failed, addr: %04x\n", | |
349 | rd_addr); | |
350 | } | |
351 | ||
352 | static int xgene_mii_phy_write(struct xgene_enet_pdata *pdata, int phy_id, | |
353 | u32 reg, u16 data) | |
354 | { | |
355 | u32 addr = 0, wr_data = 0; | |
356 | u32 done; | |
357 | u8 wait = 10; | |
358 | ||
359 | PHY_ADDR_SET(&addr, phy_id); | |
360 | REG_ADDR_SET(&addr, reg); | |
361 | xgene_enet_wr_mcx_mac(pdata, MII_MGMT_ADDRESS_ADDR, addr); | |
362 | ||
363 | PHY_CONTROL_SET(&wr_data, data); | |
364 | xgene_enet_wr_mcx_mac(pdata, MII_MGMT_CONTROL_ADDR, wr_data); | |
365 | do { | |
366 | usleep_range(5, 10); | |
367 | xgene_enet_rd_mcx_mac(pdata, MII_MGMT_INDICATORS_ADDR, &done); | |
368 | } while ((done & BUSY_MASK) && wait--); | |
369 | ||
370 | if (done & BUSY_MASK) { | |
371 | netdev_err(pdata->ndev, "MII_MGMT write failed\n"); | |
372 | return -EBUSY; | |
373 | } | |
374 | ||
375 | return 0; | |
376 | } | |
377 | ||
378 | static int xgene_mii_phy_read(struct xgene_enet_pdata *pdata, | |
379 | u8 phy_id, u32 reg) | |
380 | { | |
381 | u32 addr = 0; | |
382 | u32 data, done; | |
383 | u8 wait = 10; | |
384 | ||
385 | PHY_ADDR_SET(&addr, phy_id); | |
386 | REG_ADDR_SET(&addr, reg); | |
387 | xgene_enet_wr_mcx_mac(pdata, MII_MGMT_ADDRESS_ADDR, addr); | |
388 | xgene_enet_wr_mcx_mac(pdata, MII_MGMT_COMMAND_ADDR, READ_CYCLE_MASK); | |
389 | do { | |
390 | usleep_range(5, 10); | |
391 | xgene_enet_rd_mcx_mac(pdata, MII_MGMT_INDICATORS_ADDR, &done); | |
392 | } while ((done & BUSY_MASK) && wait--); | |
393 | ||
394 | if (done & BUSY_MASK) { | |
395 | netdev_err(pdata->ndev, "MII_MGMT read failed\n"); | |
396 | return -EBUSY; | |
397 | } | |
398 | ||
399 | xgene_enet_rd_mcx_mac(pdata, MII_MGMT_STATUS_ADDR, &data); | |
400 | xgene_enet_wr_mcx_mac(pdata, MII_MGMT_COMMAND_ADDR, 0); | |
401 | ||
402 | return data; | |
403 | } | |
404 | ||
d0eb7458 | 405 | static void xgene_gmac_set_mac_addr(struct xgene_enet_pdata *pdata) |
e6ad7673 IS |
406 | { |
407 | u32 addr0, addr1; | |
408 | u8 *dev_addr = pdata->ndev->dev_addr; | |
409 | ||
410 | addr0 = (dev_addr[3] << 24) | (dev_addr[2] << 16) | | |
411 | (dev_addr[1] << 8) | dev_addr[0]; | |
412 | addr1 = (dev_addr[5] << 24) | (dev_addr[4] << 16); | |
e6ad7673 IS |
413 | |
414 | xgene_enet_wr_mcx_mac(pdata, STATION_ADDR0_ADDR, addr0); | |
415 | xgene_enet_wr_mcx_mac(pdata, STATION_ADDR1_ADDR, addr1); | |
416 | } | |
417 | ||
418 | static int xgene_enet_ecc_init(struct xgene_enet_pdata *pdata) | |
419 | { | |
420 | struct net_device *ndev = pdata->ndev; | |
421 | u32 data; | |
422 | u8 wait = 10; | |
423 | ||
424 | xgene_enet_wr_diag_csr(pdata, ENET_CFG_MEM_RAM_SHUTDOWN_ADDR, 0x0); | |
425 | do { | |
426 | usleep_range(100, 110); | |
427 | xgene_enet_rd_diag_csr(pdata, ENET_BLOCK_MEM_RDY_ADDR, &data); | |
428 | } while ((data != 0xffffffff) && wait--); | |
429 | ||
430 | if (data != 0xffffffff) { | |
431 | netdev_err(ndev, "Failed to release memory from shutdown\n"); | |
432 | return -ENODEV; | |
433 | } | |
434 | ||
435 | return 0; | |
436 | } | |
437 | ||
d0eb7458 | 438 | static void xgene_gmac_reset(struct xgene_enet_pdata *pdata) |
e6ad7673 IS |
439 | { |
440 | xgene_enet_wr_mcx_mac(pdata, MAC_CONFIG_1_ADDR, SOFT_RESET1); | |
441 | xgene_enet_wr_mcx_mac(pdata, MAC_CONFIG_1_ADDR, 0); | |
442 | } | |
443 | ||
d0eb7458 | 444 | static void xgene_gmac_init(struct xgene_enet_pdata *pdata) |
e6ad7673 IS |
445 | { |
446 | u32 value, mc2; | |
447 | u32 intf_ctl, rgmii; | |
448 | u32 icm0, icm2; | |
449 | ||
450 | xgene_gmac_reset(pdata); | |
451 | ||
452 | xgene_enet_rd_mcx_csr(pdata, ICM_CONFIG0_REG_0_ADDR, &icm0); | |
453 | xgene_enet_rd_mcx_csr(pdata, ICM_CONFIG2_REG_0_ADDR, &icm2); | |
454 | xgene_enet_rd_mcx_mac(pdata, MAC_CONFIG_2_ADDR, &mc2); | |
455 | xgene_enet_rd_mcx_mac(pdata, INTERFACE_CONTROL_ADDR, &intf_ctl); | |
456 | xgene_enet_rd_csr(pdata, RGMII_REG_0_ADDR, &rgmii); | |
457 | ||
d0eb7458 | 458 | switch (pdata->phy_speed) { |
e6ad7673 IS |
459 | case SPEED_10: |
460 | ENET_INTERFACE_MODE2_SET(&mc2, 1); | |
461 | CFG_MACMODE_SET(&icm0, 0); | |
462 | CFG_WAITASYNCRD_SET(&icm2, 500); | |
463 | rgmii &= ~CFG_SPEED_1250; | |
464 | break; | |
465 | case SPEED_100: | |
466 | ENET_INTERFACE_MODE2_SET(&mc2, 1); | |
467 | intf_ctl |= ENET_LHD_MODE; | |
468 | CFG_MACMODE_SET(&icm0, 1); | |
469 | CFG_WAITASYNCRD_SET(&icm2, 80); | |
470 | rgmii &= ~CFG_SPEED_1250; | |
471 | break; | |
472 | default: | |
473 | ENET_INTERFACE_MODE2_SET(&mc2, 2); | |
474 | intf_ctl |= ENET_GHD_MODE; | |
475 | CFG_TXCLK_MUXSEL0_SET(&rgmii, 4); | |
476 | xgene_enet_rd_csr(pdata, DEBUG_REG_ADDR, &value); | |
477 | value |= CFG_BYPASS_UNISEC_TX | CFG_BYPASS_UNISEC_RX; | |
478 | xgene_enet_wr_csr(pdata, DEBUG_REG_ADDR, value); | |
479 | break; | |
480 | } | |
481 | ||
482 | mc2 |= FULL_DUPLEX2; | |
483 | xgene_enet_wr_mcx_mac(pdata, MAC_CONFIG_2_ADDR, mc2); | |
484 | xgene_enet_wr_mcx_mac(pdata, INTERFACE_CONTROL_ADDR, intf_ctl); | |
485 | ||
486 | xgene_gmac_set_mac_addr(pdata); | |
487 | ||
488 | /* Adjust MDC clock frequency */ | |
489 | xgene_enet_rd_mcx_mac(pdata, MII_MGMT_CONFIG_ADDR, &value); | |
490 | MGMT_CLOCK_SEL_SET(&value, 7); | |
491 | xgene_enet_wr_mcx_mac(pdata, MII_MGMT_CONFIG_ADDR, value); | |
492 | ||
493 | /* Enable drop if bufpool not available */ | |
494 | xgene_enet_rd_csr(pdata, RSIF_CONFIG_REG_ADDR, &value); | |
495 | value |= CFG_RSIF_FPBUFF_TIMEOUT_EN; | |
496 | xgene_enet_wr_csr(pdata, RSIF_CONFIG_REG_ADDR, value); | |
497 | ||
498 | /* Rtype should be copied from FP */ | |
499 | xgene_enet_wr_csr(pdata, RSIF_RAM_DBG_REG0_ADDR, 0); | |
500 | xgene_enet_wr_csr(pdata, RGMII_REG_0_ADDR, rgmii); | |
501 | ||
502 | /* Rx-Tx traffic resume */ | |
503 | xgene_enet_wr_csr(pdata, CFG_LINK_AGGR_RESUME_0_ADDR, TX_PORT0); | |
504 | ||
505 | xgene_enet_wr_mcx_csr(pdata, ICM_CONFIG0_REG_0_ADDR, icm0); | |
506 | xgene_enet_wr_mcx_csr(pdata, ICM_CONFIG2_REG_0_ADDR, icm2); | |
507 | ||
508 | xgene_enet_rd_mcx_csr(pdata, RX_DV_GATE_REG_0_ADDR, &value); | |
509 | value &= ~TX_DV_GATE_EN0; | |
510 | value &= ~RX_DV_GATE_EN0; | |
511 | value |= RESUME_RX0; | |
512 | xgene_enet_wr_mcx_csr(pdata, RX_DV_GATE_REG_0_ADDR, value); | |
513 | ||
514 | xgene_enet_wr_csr(pdata, CFG_BYPASS_ADDR, RESUME_TX); | |
515 | } | |
516 | ||
517 | static void xgene_enet_config_ring_if_assoc(struct xgene_enet_pdata *pdata) | |
518 | { | |
519 | u32 val = 0xffffffff; | |
520 | ||
521 | xgene_enet_wr_ring_if(pdata, ENET_CFGSSQMIWQASSOC_ADDR, val); | |
522 | xgene_enet_wr_ring_if(pdata, ENET_CFGSSQMIFPQASSOC_ADDR, val); | |
523 | xgene_enet_wr_ring_if(pdata, ENET_CFGSSQMIQMLITEWQASSOC_ADDR, val); | |
524 | xgene_enet_wr_ring_if(pdata, ENET_CFGSSQMIQMLITEFPQASSOC_ADDR, val); | |
525 | } | |
526 | ||
d0eb7458 IS |
527 | static void xgene_enet_cle_bypass(struct xgene_enet_pdata *pdata, |
528 | u32 dst_ring_num, u16 bufpool_id) | |
e6ad7673 IS |
529 | { |
530 | u32 cb; | |
531 | u32 fpsel; | |
532 | ||
533 | fpsel = xgene_enet_ring_bufnum(bufpool_id) - 0x20; | |
534 | ||
535 | xgene_enet_rd_csr(pdata, CLE_BYPASS_REG0_0_ADDR, &cb); | |
536 | cb |= CFG_CLE_BYPASS_EN0; | |
537 | CFG_CLE_IP_PROTOCOL0_SET(&cb, 3); | |
538 | xgene_enet_wr_csr(pdata, CLE_BYPASS_REG0_0_ADDR, cb); | |
539 | ||
540 | xgene_enet_rd_csr(pdata, CLE_BYPASS_REG1_0_ADDR, &cb); | |
541 | CFG_CLE_DSTQID0_SET(&cb, dst_ring_num); | |
542 | CFG_CLE_FPSEL0_SET(&cb, fpsel); | |
543 | xgene_enet_wr_csr(pdata, CLE_BYPASS_REG1_0_ADDR, cb); | |
544 | } | |
545 | ||
d0eb7458 | 546 | static void xgene_gmac_rx_enable(struct xgene_enet_pdata *pdata) |
e6ad7673 IS |
547 | { |
548 | u32 data; | |
549 | ||
550 | xgene_enet_rd_mcx_mac(pdata, MAC_CONFIG_1_ADDR, &data); | |
551 | xgene_enet_wr_mcx_mac(pdata, MAC_CONFIG_1_ADDR, data | RX_EN); | |
552 | } | |
553 | ||
d0eb7458 | 554 | static void xgene_gmac_tx_enable(struct xgene_enet_pdata *pdata) |
e6ad7673 IS |
555 | { |
556 | u32 data; | |
557 | ||
558 | xgene_enet_rd_mcx_mac(pdata, MAC_CONFIG_1_ADDR, &data); | |
559 | xgene_enet_wr_mcx_mac(pdata, MAC_CONFIG_1_ADDR, data | TX_EN); | |
560 | } | |
561 | ||
d0eb7458 | 562 | static void xgene_gmac_rx_disable(struct xgene_enet_pdata *pdata) |
e6ad7673 IS |
563 | { |
564 | u32 data; | |
565 | ||
566 | xgene_enet_rd_mcx_mac(pdata, MAC_CONFIG_1_ADDR, &data); | |
567 | xgene_enet_wr_mcx_mac(pdata, MAC_CONFIG_1_ADDR, data & ~RX_EN); | |
568 | } | |
569 | ||
d0eb7458 | 570 | static void xgene_gmac_tx_disable(struct xgene_enet_pdata *pdata) |
e6ad7673 IS |
571 | { |
572 | u32 data; | |
573 | ||
574 | xgene_enet_rd_mcx_mac(pdata, MAC_CONFIG_1_ADDR, &data); | |
575 | xgene_enet_wr_mcx_mac(pdata, MAC_CONFIG_1_ADDR, data & ~TX_EN); | |
576 | } | |
577 | ||
d0eb7458 | 578 | static void xgene_enet_reset(struct xgene_enet_pdata *pdata) |
e6ad7673 IS |
579 | { |
580 | u32 val; | |
581 | ||
582 | clk_prepare_enable(pdata->clk); | |
583 | clk_disable_unprepare(pdata->clk); | |
584 | clk_prepare_enable(pdata->clk); | |
585 | xgene_enet_ecc_init(pdata); | |
586 | xgene_enet_config_ring_if_assoc(pdata); | |
587 | ||
588 | /* Enable auto-incr for scanning */ | |
589 | xgene_enet_rd_mcx_mac(pdata, MII_MGMT_CONFIG_ADDR, &val); | |
590 | val |= SCAN_AUTO_INCR; | |
591 | MGMT_CLOCK_SEL_SET(&val, 1); | |
592 | xgene_enet_wr_mcx_mac(pdata, MII_MGMT_CONFIG_ADDR, val); | |
593 | } | |
594 | ||
d0eb7458 | 595 | static void xgene_gport_shutdown(struct xgene_enet_pdata *pdata) |
e6ad7673 IS |
596 | { |
597 | clk_disable_unprepare(pdata->clk); | |
598 | } | |
599 | ||
600 | static int xgene_enet_mdio_read(struct mii_bus *bus, int mii_id, int regnum) | |
601 | { | |
602 | struct xgene_enet_pdata *pdata = bus->priv; | |
603 | u32 val; | |
604 | ||
605 | val = xgene_mii_phy_read(pdata, mii_id, regnum); | |
606 | netdev_dbg(pdata->ndev, "mdio_rd: bus=%d reg=%d val=%x\n", | |
607 | mii_id, regnum, val); | |
608 | ||
609 | return val; | |
610 | } | |
611 | ||
612 | static int xgene_enet_mdio_write(struct mii_bus *bus, int mii_id, int regnum, | |
613 | u16 val) | |
614 | { | |
615 | struct xgene_enet_pdata *pdata = bus->priv; | |
616 | ||
617 | netdev_dbg(pdata->ndev, "mdio_wr: bus=%d reg=%d val=%x\n", | |
618 | mii_id, regnum, val); | |
619 | return xgene_mii_phy_write(pdata, mii_id, regnum, val); | |
620 | } | |
621 | ||
622 | static void xgene_enet_adjust_link(struct net_device *ndev) | |
623 | { | |
624 | struct xgene_enet_pdata *pdata = netdev_priv(ndev); | |
625 | struct phy_device *phydev = pdata->phy_dev; | |
626 | ||
627 | if (phydev->link) { | |
628 | if (pdata->phy_speed != phydev->speed) { | |
d0eb7458 IS |
629 | pdata->phy_speed = phydev->speed; |
630 | xgene_gmac_init(pdata); | |
e6ad7673 IS |
631 | xgene_gmac_rx_enable(pdata); |
632 | xgene_gmac_tx_enable(pdata); | |
e6ad7673 IS |
633 | phy_print_status(phydev); |
634 | } | |
635 | } else { | |
636 | xgene_gmac_rx_disable(pdata); | |
637 | xgene_gmac_tx_disable(pdata); | |
638 | pdata->phy_speed = SPEED_UNKNOWN; | |
639 | phy_print_status(phydev); | |
640 | } | |
641 | } | |
642 | ||
643 | static int xgene_enet_phy_connect(struct net_device *ndev) | |
644 | { | |
645 | struct xgene_enet_pdata *pdata = netdev_priv(ndev); | |
646 | struct device_node *phy_np; | |
647 | struct phy_device *phy_dev; | |
648 | struct device *dev = &pdata->pdev->dev; | |
649 | ||
650 | phy_np = of_parse_phandle(dev->of_node, "phy-handle", 0); | |
651 | if (!phy_np) { | |
652 | netdev_dbg(ndev, "No phy-handle found\n"); | |
653 | return -ENODEV; | |
654 | } | |
655 | ||
656 | phy_dev = of_phy_connect(ndev, phy_np, &xgene_enet_adjust_link, | |
657 | 0, pdata->phy_mode); | |
658 | if (!phy_dev) { | |
659 | netdev_err(ndev, "Could not connect to PHY\n"); | |
660 | return -ENODEV; | |
661 | } | |
662 | ||
663 | pdata->phy_speed = SPEED_UNKNOWN; | |
664 | phy_dev->supported &= ~SUPPORTED_10baseT_Half & | |
665 | ~SUPPORTED_100baseT_Half & | |
666 | ~SUPPORTED_1000baseT_Half; | |
667 | phy_dev->advertising = phy_dev->supported; | |
668 | pdata->phy_dev = phy_dev; | |
669 | ||
670 | return 0; | |
671 | } | |
672 | ||
673 | int xgene_enet_mdio_config(struct xgene_enet_pdata *pdata) | |
674 | { | |
675 | struct net_device *ndev = pdata->ndev; | |
676 | struct device *dev = &pdata->pdev->dev; | |
677 | struct device_node *child_np; | |
678 | struct device_node *mdio_np = NULL; | |
679 | struct mii_bus *mdio_bus; | |
680 | int ret; | |
681 | ||
682 | for_each_child_of_node(dev->of_node, child_np) { | |
683 | if (of_device_is_compatible(child_np, "apm,xgene-mdio")) { | |
684 | mdio_np = child_np; | |
685 | break; | |
686 | } | |
687 | } | |
688 | ||
689 | if (!mdio_np) { | |
690 | netdev_dbg(ndev, "No mdio node in the dts\n"); | |
691 | return -ENXIO; | |
692 | } | |
693 | ||
694 | mdio_bus = mdiobus_alloc(); | |
695 | if (!mdio_bus) | |
696 | return -ENOMEM; | |
697 | ||
698 | mdio_bus->name = "APM X-Gene MDIO bus"; | |
699 | mdio_bus->read = xgene_enet_mdio_read; | |
700 | mdio_bus->write = xgene_enet_mdio_write; | |
701 | snprintf(mdio_bus->id, MII_BUS_ID_SIZE, "%s-%s", "xgene-mii", | |
702 | ndev->name); | |
703 | ||
704 | mdio_bus->priv = pdata; | |
705 | mdio_bus->parent = &ndev->dev; | |
706 | ||
707 | ret = of_mdiobus_register(mdio_bus, mdio_np); | |
708 | if (ret) { | |
709 | netdev_err(ndev, "Failed to register MDIO bus\n"); | |
710 | mdiobus_free(mdio_bus); | |
711 | return ret; | |
712 | } | |
713 | pdata->mdio_bus = mdio_bus; | |
714 | ||
715 | ret = xgene_enet_phy_connect(ndev); | |
716 | if (ret) | |
717 | xgene_enet_mdio_remove(pdata); | |
718 | ||
719 | return ret; | |
720 | } | |
721 | ||
722 | void xgene_enet_mdio_remove(struct xgene_enet_pdata *pdata) | |
723 | { | |
724 | mdiobus_unregister(pdata->mdio_bus); | |
725 | mdiobus_free(pdata->mdio_bus); | |
726 | pdata->mdio_bus = NULL; | |
727 | } | |
d0eb7458 IS |
728 | |
729 | struct xgene_mac_ops xgene_gmac_ops = { | |
730 | .init = xgene_gmac_init, | |
731 | .reset = xgene_gmac_reset, | |
732 | .rx_enable = xgene_gmac_rx_enable, | |
733 | .tx_enable = xgene_gmac_tx_enable, | |
734 | .rx_disable = xgene_gmac_rx_disable, | |
735 | .tx_disable = xgene_gmac_tx_disable, | |
736 | .set_mac_addr = xgene_gmac_set_mac_addr, | |
737 | }; | |
738 | ||
739 | struct xgene_port_ops xgene_gport_ops = { | |
740 | .reset = xgene_enet_reset, | |
741 | .cle_bypass = xgene_enet_cle_bypass, | |
742 | .shutdown = xgene_gport_shutdown, | |
743 | }; |