Commit | Line | Data |
---|---|---|
c156633f SS |
1 | /* Renesas Ethernet AVB device driver |
2 | * | |
3 | * Copyright (C) 2014-2015 Renesas Electronics Corporation | |
4 | * Copyright (C) 2015 Renesas Solutions Corp. | |
5 | * Copyright (C) 2015 Cogent Embedded, Inc. <source@cogentembedded.com> | |
6 | * | |
7 | * Based on the SuperH Ethernet driver | |
8 | * | |
9 | * This program is free software; you can redistribute it and/or modify it | |
10 | * under the terms and conditions of the GNU General Public License version 2, | |
11 | * as published by the Free Software Foundation. | |
12 | */ | |
13 | ||
14 | #include <linux/cache.h> | |
15 | #include <linux/clk.h> | |
16 | #include <linux/delay.h> | |
17 | #include <linux/dma-mapping.h> | |
18 | #include <linux/err.h> | |
19 | #include <linux/etherdevice.h> | |
20 | #include <linux/ethtool.h> | |
21 | #include <linux/if_vlan.h> | |
22 | #include <linux/kernel.h> | |
23 | #include <linux/list.h> | |
24 | #include <linux/module.h> | |
25 | #include <linux/net_tstamp.h> | |
26 | #include <linux/of.h> | |
27 | #include <linux/of_device.h> | |
28 | #include <linux/of_irq.h> | |
29 | #include <linux/of_mdio.h> | |
30 | #include <linux/of_net.h> | |
c156633f SS |
31 | #include <linux/pm_runtime.h> |
32 | #include <linux/slab.h> | |
33 | #include <linux/spinlock.h> | |
34 | ||
35 | #include "ravb.h" | |
36 | ||
37 | #define RAVB_DEF_MSG_ENABLE \ | |
38 | (NETIF_MSG_LINK | \ | |
39 | NETIF_MSG_TIMER | \ | |
40 | NETIF_MSG_RX_ERR | \ | |
41 | NETIF_MSG_TX_ERR) | |
42 | ||
a0d2f206 | 43 | int ravb_wait(struct net_device *ndev, enum ravb_reg reg, u32 mask, u32 value) |
c156633f SS |
44 | { |
45 | int i; | |
46 | ||
47 | for (i = 0; i < 10000; i++) { | |
48 | if ((ravb_read(ndev, reg) & mask) == value) | |
49 | return 0; | |
50 | udelay(10); | |
51 | } | |
52 | return -ETIMEDOUT; | |
53 | } | |
54 | ||
55 | static int ravb_config(struct net_device *ndev) | |
56 | { | |
57 | int error; | |
58 | ||
59 | /* Set config mode */ | |
60 | ravb_write(ndev, (ravb_read(ndev, CCC) & ~CCC_OPC) | CCC_OPC_CONFIG, | |
61 | CCC); | |
62 | /* Check if the operating mode is changed to the config mode */ | |
63 | error = ravb_wait(ndev, CSR, CSR_OPS, CSR_OPS_CONFIG); | |
64 | if (error) | |
65 | netdev_err(ndev, "failed to switch device to config mode\n"); | |
66 | ||
67 | return error; | |
68 | } | |
69 | ||
70 | static void ravb_set_duplex(struct net_device *ndev) | |
71 | { | |
72 | struct ravb_private *priv = netdev_priv(ndev); | |
73 | u32 ecmr = ravb_read(ndev, ECMR); | |
74 | ||
75 | if (priv->duplex) /* Full */ | |
76 | ecmr |= ECMR_DM; | |
77 | else /* Half */ | |
78 | ecmr &= ~ECMR_DM; | |
79 | ravb_write(ndev, ecmr, ECMR); | |
80 | } | |
81 | ||
82 | static void ravb_set_rate(struct net_device *ndev) | |
83 | { | |
84 | struct ravb_private *priv = netdev_priv(ndev); | |
85 | ||
86 | switch (priv->speed) { | |
87 | case 100: /* 100BASE */ | |
88 | ravb_write(ndev, GECMR_SPEED_100, GECMR); | |
89 | break; | |
90 | case 1000: /* 1000BASE */ | |
91 | ravb_write(ndev, GECMR_SPEED_1000, GECMR); | |
92 | break; | |
93 | default: | |
94 | break; | |
95 | } | |
96 | } | |
97 | ||
98 | static void ravb_set_buffer_align(struct sk_buff *skb) | |
99 | { | |
100 | u32 reserve = (unsigned long)skb->data & (RAVB_ALIGN - 1); | |
101 | ||
102 | if (reserve) | |
103 | skb_reserve(skb, RAVB_ALIGN - reserve); | |
104 | } | |
105 | ||
106 | /* Get MAC address from the MAC address registers | |
107 | * | |
108 | * Ethernet AVB device doesn't have ROM for MAC address. | |
109 | * This function gets the MAC address that was used by a bootloader. | |
110 | */ | |
111 | static void ravb_read_mac_address(struct net_device *ndev, const u8 *mac) | |
112 | { | |
113 | if (mac) { | |
114 | ether_addr_copy(ndev->dev_addr, mac); | |
115 | } else { | |
116 | ndev->dev_addr[0] = (ravb_read(ndev, MAHR) >> 24); | |
117 | ndev->dev_addr[1] = (ravb_read(ndev, MAHR) >> 16) & 0xFF; | |
118 | ndev->dev_addr[2] = (ravb_read(ndev, MAHR) >> 8) & 0xFF; | |
119 | ndev->dev_addr[3] = (ravb_read(ndev, MAHR) >> 0) & 0xFF; | |
120 | ndev->dev_addr[4] = (ravb_read(ndev, MALR) >> 8) & 0xFF; | |
121 | ndev->dev_addr[5] = (ravb_read(ndev, MALR) >> 0) & 0xFF; | |
122 | } | |
123 | } | |
124 | ||
125 | static void ravb_mdio_ctrl(struct mdiobb_ctrl *ctrl, u32 mask, int set) | |
126 | { | |
127 | struct ravb_private *priv = container_of(ctrl, struct ravb_private, | |
128 | mdiobb); | |
129 | u32 pir = ravb_read(priv->ndev, PIR); | |
130 | ||
131 | if (set) | |
132 | pir |= mask; | |
133 | else | |
134 | pir &= ~mask; | |
135 | ravb_write(priv->ndev, pir, PIR); | |
136 | } | |
137 | ||
138 | /* MDC pin control */ | |
139 | static void ravb_set_mdc(struct mdiobb_ctrl *ctrl, int level) | |
140 | { | |
141 | ravb_mdio_ctrl(ctrl, PIR_MDC, level); | |
142 | } | |
143 | ||
144 | /* Data I/O pin control */ | |
145 | static void ravb_set_mdio_dir(struct mdiobb_ctrl *ctrl, int output) | |
146 | { | |
147 | ravb_mdio_ctrl(ctrl, PIR_MMD, output); | |
148 | } | |
149 | ||
150 | /* Set data bit */ | |
151 | static void ravb_set_mdio_data(struct mdiobb_ctrl *ctrl, int value) | |
152 | { | |
153 | ravb_mdio_ctrl(ctrl, PIR_MDO, value); | |
154 | } | |
155 | ||
156 | /* Get data bit */ | |
157 | static int ravb_get_mdio_data(struct mdiobb_ctrl *ctrl) | |
158 | { | |
159 | struct ravb_private *priv = container_of(ctrl, struct ravb_private, | |
160 | mdiobb); | |
161 | ||
162 | return (ravb_read(priv->ndev, PIR) & PIR_MDI) != 0; | |
163 | } | |
164 | ||
165 | /* MDIO bus control struct */ | |
166 | static struct mdiobb_ops bb_ops = { | |
167 | .owner = THIS_MODULE, | |
168 | .set_mdc = ravb_set_mdc, | |
169 | .set_mdio_dir = ravb_set_mdio_dir, | |
170 | .set_mdio_data = ravb_set_mdio_data, | |
171 | .get_mdio_data = ravb_get_mdio_data, | |
172 | }; | |
173 | ||
174 | /* Free skb's and DMA buffers for Ethernet AVB */ | |
175 | static void ravb_ring_free(struct net_device *ndev, int q) | |
176 | { | |
177 | struct ravb_private *priv = netdev_priv(ndev); | |
178 | int ring_size; | |
179 | int i; | |
180 | ||
181 | /* Free RX skb ringbuffer */ | |
182 | if (priv->rx_skb[q]) { | |
183 | for (i = 0; i < priv->num_rx_ring[q]; i++) | |
184 | dev_kfree_skb(priv->rx_skb[q][i]); | |
185 | } | |
186 | kfree(priv->rx_skb[q]); | |
187 | priv->rx_skb[q] = NULL; | |
188 | ||
189 | /* Free TX skb ringbuffer */ | |
190 | if (priv->tx_skb[q]) { | |
191 | for (i = 0; i < priv->num_tx_ring[q]; i++) | |
192 | dev_kfree_skb(priv->tx_skb[q][i]); | |
193 | } | |
194 | kfree(priv->tx_skb[q]); | |
195 | priv->tx_skb[q] = NULL; | |
196 | ||
197 | /* Free aligned TX buffers */ | |
198 | if (priv->tx_buffers[q]) { | |
199 | for (i = 0; i < priv->num_tx_ring[q]; i++) | |
200 | kfree(priv->tx_buffers[q][i]); | |
201 | } | |
202 | kfree(priv->tx_buffers[q]); | |
203 | priv->tx_buffers[q] = NULL; | |
204 | ||
205 | if (priv->rx_ring[q]) { | |
206 | ring_size = sizeof(struct ravb_ex_rx_desc) * | |
207 | (priv->num_rx_ring[q] + 1); | |
208 | dma_free_coherent(NULL, ring_size, priv->rx_ring[q], | |
209 | priv->rx_desc_dma[q]); | |
210 | priv->rx_ring[q] = NULL; | |
211 | } | |
212 | ||
213 | if (priv->tx_ring[q]) { | |
214 | ring_size = sizeof(struct ravb_tx_desc) * | |
215 | (priv->num_tx_ring[q] + 1); | |
216 | dma_free_coherent(NULL, ring_size, priv->tx_ring[q], | |
217 | priv->tx_desc_dma[q]); | |
218 | priv->tx_ring[q] = NULL; | |
219 | } | |
220 | } | |
221 | ||
222 | /* Format skb and descriptor buffer for Ethernet AVB */ | |
223 | static void ravb_ring_format(struct net_device *ndev, int q) | |
224 | { | |
225 | struct ravb_private *priv = netdev_priv(ndev); | |
226 | struct ravb_ex_rx_desc *rx_desc = NULL; | |
227 | struct ravb_tx_desc *tx_desc = NULL; | |
228 | struct ravb_desc *desc = NULL; | |
229 | int rx_ring_size = sizeof(*rx_desc) * priv->num_rx_ring[q]; | |
230 | int tx_ring_size = sizeof(*tx_desc) * priv->num_tx_ring[q]; | |
231 | struct sk_buff *skb; | |
232 | dma_addr_t dma_addr; | |
233 | void *buffer; | |
234 | int i; | |
235 | ||
236 | priv->cur_rx[q] = 0; | |
237 | priv->cur_tx[q] = 0; | |
238 | priv->dirty_rx[q] = 0; | |
239 | priv->dirty_tx[q] = 0; | |
240 | ||
241 | memset(priv->rx_ring[q], 0, rx_ring_size); | |
242 | /* Build RX ring buffer */ | |
243 | for (i = 0; i < priv->num_rx_ring[q]; i++) { | |
244 | priv->rx_skb[q][i] = NULL; | |
245 | skb = netdev_alloc_skb(ndev, PKT_BUF_SZ + RAVB_ALIGN - 1); | |
246 | if (!skb) | |
247 | break; | |
248 | ravb_set_buffer_align(skb); | |
249 | /* RX descriptor */ | |
250 | rx_desc = &priv->rx_ring[q][i]; | |
251 | /* The size of the buffer should be on 16-byte boundary. */ | |
252 | rx_desc->ds_cc = cpu_to_le16(ALIGN(PKT_BUF_SZ, 16)); | |
253 | dma_addr = dma_map_single(&ndev->dev, skb->data, | |
254 | ALIGN(PKT_BUF_SZ, 16), | |
255 | DMA_FROM_DEVICE); | |
256 | if (dma_mapping_error(&ndev->dev, dma_addr)) { | |
257 | dev_kfree_skb(skb); | |
258 | break; | |
259 | } | |
260 | priv->rx_skb[q][i] = skb; | |
261 | rx_desc->dptr = cpu_to_le32(dma_addr); | |
262 | rx_desc->die_dt = DT_FEMPTY; | |
263 | } | |
264 | rx_desc = &priv->rx_ring[q][i]; | |
265 | rx_desc->dptr = cpu_to_le32((u32)priv->rx_desc_dma[q]); | |
266 | rx_desc->die_dt = DT_LINKFIX; /* type */ | |
267 | priv->dirty_rx[q] = (u32)(i - priv->num_rx_ring[q]); | |
268 | ||
269 | memset(priv->tx_ring[q], 0, tx_ring_size); | |
270 | /* Build TX ring buffer */ | |
271 | for (i = 0; i < priv->num_tx_ring[q]; i++) { | |
272 | priv->tx_skb[q][i] = NULL; | |
273 | priv->tx_buffers[q][i] = NULL; | |
274 | buffer = kmalloc(PKT_BUF_SZ + RAVB_ALIGN - 1, GFP_KERNEL); | |
275 | if (!buffer) | |
276 | break; | |
277 | /* Aligned TX buffer */ | |
278 | priv->tx_buffers[q][i] = buffer; | |
279 | tx_desc = &priv->tx_ring[q][i]; | |
280 | tx_desc->die_dt = DT_EEMPTY; | |
281 | } | |
282 | tx_desc = &priv->tx_ring[q][i]; | |
283 | tx_desc->dptr = cpu_to_le32((u32)priv->tx_desc_dma[q]); | |
284 | tx_desc->die_dt = DT_LINKFIX; /* type */ | |
285 | ||
286 | /* RX descriptor base address for best effort */ | |
287 | desc = &priv->desc_bat[RX_QUEUE_OFFSET + q]; | |
288 | desc->die_dt = DT_LINKFIX; /* type */ | |
289 | desc->dptr = cpu_to_le32((u32)priv->rx_desc_dma[q]); | |
290 | ||
291 | /* TX descriptor base address for best effort */ | |
292 | desc = &priv->desc_bat[q]; | |
293 | desc->die_dt = DT_LINKFIX; /* type */ | |
294 | desc->dptr = cpu_to_le32((u32)priv->tx_desc_dma[q]); | |
295 | } | |
296 | ||
297 | /* Init skb and descriptor buffer for Ethernet AVB */ | |
298 | static int ravb_ring_init(struct net_device *ndev, int q) | |
299 | { | |
300 | struct ravb_private *priv = netdev_priv(ndev); | |
301 | int ring_size; | |
302 | ||
303 | /* Allocate RX and TX skb rings */ | |
304 | priv->rx_skb[q] = kcalloc(priv->num_rx_ring[q], | |
305 | sizeof(*priv->rx_skb[q]), GFP_KERNEL); | |
306 | priv->tx_skb[q] = kcalloc(priv->num_tx_ring[q], | |
307 | sizeof(*priv->tx_skb[q]), GFP_KERNEL); | |
308 | if (!priv->rx_skb[q] || !priv->tx_skb[q]) | |
309 | goto error; | |
310 | ||
311 | /* Allocate rings for the aligned buffers */ | |
312 | priv->tx_buffers[q] = kcalloc(priv->num_tx_ring[q], | |
313 | sizeof(*priv->tx_buffers[q]), GFP_KERNEL); | |
314 | if (!priv->tx_buffers[q]) | |
315 | goto error; | |
316 | ||
317 | /* Allocate all RX descriptors. */ | |
318 | ring_size = sizeof(struct ravb_ex_rx_desc) * (priv->num_rx_ring[q] + 1); | |
319 | priv->rx_ring[q] = dma_alloc_coherent(NULL, ring_size, | |
320 | &priv->rx_desc_dma[q], | |
321 | GFP_KERNEL); | |
322 | if (!priv->rx_ring[q]) | |
323 | goto error; | |
324 | ||
325 | priv->dirty_rx[q] = 0; | |
326 | ||
327 | /* Allocate all TX descriptors. */ | |
328 | ring_size = sizeof(struct ravb_tx_desc) * (priv->num_tx_ring[q] + 1); | |
329 | priv->tx_ring[q] = dma_alloc_coherent(NULL, ring_size, | |
330 | &priv->tx_desc_dma[q], | |
331 | GFP_KERNEL); | |
332 | if (!priv->tx_ring[q]) | |
333 | goto error; | |
334 | ||
335 | return 0; | |
336 | ||
337 | error: | |
338 | ravb_ring_free(ndev, q); | |
339 | ||
340 | return -ENOMEM; | |
341 | } | |
342 | ||
343 | /* E-MAC init function */ | |
344 | static void ravb_emac_init(struct net_device *ndev) | |
345 | { | |
346 | struct ravb_private *priv = netdev_priv(ndev); | |
347 | u32 ecmr; | |
348 | ||
349 | /* Receive frame limit set register */ | |
350 | ravb_write(ndev, ndev->mtu + ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN, RFLR); | |
351 | ||
352 | /* PAUSE prohibition */ | |
353 | ecmr = ravb_read(ndev, ECMR); | |
354 | ecmr &= ECMR_DM; | |
355 | ecmr |= ECMR_ZPF | (priv->duplex ? ECMR_DM : 0) | ECMR_TE | ECMR_RE; | |
356 | ravb_write(ndev, ecmr, ECMR); | |
357 | ||
358 | ravb_set_rate(ndev); | |
359 | ||
360 | /* Set MAC address */ | |
361 | ravb_write(ndev, | |
362 | (ndev->dev_addr[0] << 24) | (ndev->dev_addr[1] << 16) | | |
363 | (ndev->dev_addr[2] << 8) | (ndev->dev_addr[3]), MAHR); | |
364 | ravb_write(ndev, | |
365 | (ndev->dev_addr[4] << 8) | (ndev->dev_addr[5]), MALR); | |
366 | ||
367 | ravb_write(ndev, 1, MPR); | |
368 | ||
369 | /* E-MAC status register clear */ | |
370 | ravb_write(ndev, ECSR_ICD | ECSR_MPD, ECSR); | |
371 | ||
372 | /* E-MAC interrupt enable register */ | |
373 | ravb_write(ndev, ECSIPR_ICDIP | ECSIPR_MPDIP | ECSIPR_LCHNGIP, ECSIPR); | |
374 | } | |
375 | ||
376 | /* Device init function for Ethernet AVB */ | |
377 | static int ravb_dmac_init(struct net_device *ndev) | |
378 | { | |
379 | int error; | |
380 | ||
381 | /* Set CONFIG mode */ | |
382 | error = ravb_config(ndev); | |
383 | if (error) | |
384 | return error; | |
385 | ||
386 | error = ravb_ring_init(ndev, RAVB_BE); | |
387 | if (error) | |
388 | return error; | |
389 | error = ravb_ring_init(ndev, RAVB_NC); | |
390 | if (error) { | |
391 | ravb_ring_free(ndev, RAVB_BE); | |
392 | return error; | |
393 | } | |
394 | ||
395 | /* Descriptor format */ | |
396 | ravb_ring_format(ndev, RAVB_BE); | |
397 | ravb_ring_format(ndev, RAVB_NC); | |
398 | ||
399 | #if defined(__LITTLE_ENDIAN) | |
400 | ravb_write(ndev, ravb_read(ndev, CCC) & ~CCC_BOC, CCC); | |
401 | #else | |
402 | ravb_write(ndev, ravb_read(ndev, CCC) | CCC_BOC, CCC); | |
403 | #endif | |
404 | ||
405 | /* Set AVB RX */ | |
406 | ravb_write(ndev, RCR_EFFS | RCR_ENCF | RCR_ETS0 | 0x18000000, RCR); | |
407 | ||
408 | /* Set FIFO size */ | |
409 | ravb_write(ndev, TGC_TQP_AVBMODE1 | 0x00222200, TGC); | |
410 | ||
411 | /* Timestamp enable */ | |
412 | ravb_write(ndev, TCCR_TFEN, TCCR); | |
413 | ||
414 | /* Interrupt enable: */ | |
415 | /* Frame receive */ | |
416 | ravb_write(ndev, RIC0_FRE0 | RIC0_FRE1, RIC0); | |
417 | /* Receive FIFO full warning */ | |
418 | ravb_write(ndev, RIC1_RFWE, RIC1); | |
419 | /* Receive FIFO full error, descriptor empty */ | |
420 | ravb_write(ndev, RIC2_QFE0 | RIC2_QFE1 | RIC2_RFFE, RIC2); | |
421 | /* Frame transmitted, timestamp FIFO updated */ | |
422 | ravb_write(ndev, TIC_FTE0 | TIC_FTE1 | TIC_TFUE, TIC); | |
423 | ||
424 | /* Setting the control will start the AVB-DMAC process. */ | |
425 | ravb_write(ndev, (ravb_read(ndev, CCC) & ~CCC_OPC) | CCC_OPC_OPERATION, | |
426 | CCC); | |
427 | ||
428 | return 0; | |
429 | } | |
430 | ||
431 | /* Free TX skb function for AVB-IP */ | |
432 | static int ravb_tx_free(struct net_device *ndev, int q) | |
433 | { | |
434 | struct ravb_private *priv = netdev_priv(ndev); | |
435 | struct net_device_stats *stats = &priv->stats[q]; | |
436 | struct ravb_tx_desc *desc; | |
437 | int free_num = 0; | |
438 | int entry = 0; | |
439 | u32 size; | |
440 | ||
441 | for (; priv->cur_tx[q] - priv->dirty_tx[q] > 0; priv->dirty_tx[q]++) { | |
442 | entry = priv->dirty_tx[q] % priv->num_tx_ring[q]; | |
443 | desc = &priv->tx_ring[q][entry]; | |
444 | if (desc->die_dt != DT_FEMPTY) | |
445 | break; | |
446 | /* Descriptor type must be checked before all other reads */ | |
447 | dma_rmb(); | |
448 | size = le16_to_cpu(desc->ds_tagl) & TX_DS; | |
449 | /* Free the original skb. */ | |
450 | if (priv->tx_skb[q][entry]) { | |
451 | dma_unmap_single(&ndev->dev, le32_to_cpu(desc->dptr), | |
452 | size, DMA_TO_DEVICE); | |
453 | dev_kfree_skb_any(priv->tx_skb[q][entry]); | |
454 | priv->tx_skb[q][entry] = NULL; | |
455 | free_num++; | |
456 | } | |
457 | stats->tx_packets++; | |
458 | stats->tx_bytes += size; | |
459 | desc->die_dt = DT_EEMPTY; | |
460 | } | |
461 | return free_num; | |
462 | } | |
463 | ||
464 | static void ravb_get_tx_tstamp(struct net_device *ndev) | |
465 | { | |
466 | struct ravb_private *priv = netdev_priv(ndev); | |
467 | struct ravb_tstamp_skb *ts_skb, *ts_skb2; | |
468 | struct skb_shared_hwtstamps shhwtstamps; | |
469 | struct sk_buff *skb; | |
470 | struct timespec64 ts; | |
471 | u16 tag, tfa_tag; | |
472 | int count; | |
473 | u32 tfa2; | |
474 | ||
475 | count = (ravb_read(ndev, TSR) & TSR_TFFL) >> 8; | |
476 | while (count--) { | |
477 | tfa2 = ravb_read(ndev, TFA2); | |
478 | tfa_tag = (tfa2 & TFA2_TST) >> 16; | |
479 | ts.tv_nsec = (u64)ravb_read(ndev, TFA0); | |
480 | ts.tv_sec = ((u64)(tfa2 & TFA2_TSV) << 32) | | |
481 | ravb_read(ndev, TFA1); | |
482 | memset(&shhwtstamps, 0, sizeof(shhwtstamps)); | |
483 | shhwtstamps.hwtstamp = timespec64_to_ktime(ts); | |
484 | list_for_each_entry_safe(ts_skb, ts_skb2, &priv->ts_skb_list, | |
485 | list) { | |
486 | skb = ts_skb->skb; | |
487 | tag = ts_skb->tag; | |
488 | list_del(&ts_skb->list); | |
489 | kfree(ts_skb); | |
490 | if (tag == tfa_tag) { | |
491 | skb_tstamp_tx(skb, &shhwtstamps); | |
492 | break; | |
493 | } | |
494 | } | |
495 | ravb_write(ndev, ravb_read(ndev, TCCR) | TCCR_TFR, TCCR); | |
496 | } | |
497 | } | |
498 | ||
499 | /* Packet receive function for Ethernet AVB */ | |
500 | static bool ravb_rx(struct net_device *ndev, int *quota, int q) | |
501 | { | |
502 | struct ravb_private *priv = netdev_priv(ndev); | |
503 | int entry = priv->cur_rx[q] % priv->num_rx_ring[q]; | |
504 | int boguscnt = (priv->dirty_rx[q] + priv->num_rx_ring[q]) - | |
505 | priv->cur_rx[q]; | |
506 | struct net_device_stats *stats = &priv->stats[q]; | |
507 | struct ravb_ex_rx_desc *desc; | |
508 | struct sk_buff *skb; | |
509 | dma_addr_t dma_addr; | |
510 | struct timespec64 ts; | |
511 | u16 pkt_len = 0; | |
512 | u8 desc_status; | |
513 | int limit; | |
514 | ||
515 | boguscnt = min(boguscnt, *quota); | |
516 | limit = boguscnt; | |
517 | desc = &priv->rx_ring[q][entry]; | |
518 | while (desc->die_dt != DT_FEMPTY) { | |
519 | /* Descriptor type must be checked before all other reads */ | |
520 | dma_rmb(); | |
521 | desc_status = desc->msc; | |
522 | pkt_len = le16_to_cpu(desc->ds_cc) & RX_DS; | |
523 | ||
524 | if (--boguscnt < 0) | |
525 | break; | |
526 | ||
527 | if (desc_status & MSC_MC) | |
528 | stats->multicast++; | |
529 | ||
530 | if (desc_status & (MSC_CRC | MSC_RFE | MSC_RTSF | MSC_RTLF | | |
531 | MSC_CEEF)) { | |
532 | stats->rx_errors++; | |
533 | if (desc_status & MSC_CRC) | |
534 | stats->rx_crc_errors++; | |
535 | if (desc_status & MSC_RFE) | |
536 | stats->rx_frame_errors++; | |
537 | if (desc_status & (MSC_RTLF | MSC_RTSF)) | |
538 | stats->rx_length_errors++; | |
539 | if (desc_status & MSC_CEEF) | |
540 | stats->rx_missed_errors++; | |
541 | } else { | |
542 | u32 get_ts = priv->tstamp_rx_ctrl & RAVB_RXTSTAMP_TYPE; | |
543 | ||
544 | skb = priv->rx_skb[q][entry]; | |
545 | priv->rx_skb[q][entry] = NULL; | |
546 | dma_sync_single_for_cpu(&ndev->dev, | |
547 | le32_to_cpu(desc->dptr), | |
548 | ALIGN(PKT_BUF_SZ, 16), | |
549 | DMA_FROM_DEVICE); | |
550 | get_ts &= (q == RAVB_NC) ? | |
551 | RAVB_RXTSTAMP_TYPE_V2_L2_EVENT : | |
552 | ~RAVB_RXTSTAMP_TYPE_V2_L2_EVENT; | |
553 | if (get_ts) { | |
554 | struct skb_shared_hwtstamps *shhwtstamps; | |
555 | ||
556 | shhwtstamps = skb_hwtstamps(skb); | |
557 | memset(shhwtstamps, 0, sizeof(*shhwtstamps)); | |
558 | ts.tv_sec = ((u64) le16_to_cpu(desc->ts_sh) << | |
559 | 32) | le32_to_cpu(desc->ts_sl); | |
560 | ts.tv_nsec = le32_to_cpu(desc->ts_n); | |
561 | shhwtstamps->hwtstamp = timespec64_to_ktime(ts); | |
562 | } | |
563 | skb_put(skb, pkt_len); | |
564 | skb->protocol = eth_type_trans(skb, ndev); | |
565 | napi_gro_receive(&priv->napi[q], skb); | |
566 | stats->rx_packets++; | |
567 | stats->rx_bytes += pkt_len; | |
568 | } | |
569 | ||
570 | entry = (++priv->cur_rx[q]) % priv->num_rx_ring[q]; | |
571 | desc = &priv->rx_ring[q][entry]; | |
572 | } | |
573 | ||
574 | /* Refill the RX ring buffers. */ | |
575 | for (; priv->cur_rx[q] - priv->dirty_rx[q] > 0; priv->dirty_rx[q]++) { | |
576 | entry = priv->dirty_rx[q] % priv->num_rx_ring[q]; | |
577 | desc = &priv->rx_ring[q][entry]; | |
578 | /* The size of the buffer should be on 16-byte boundary. */ | |
579 | desc->ds_cc = cpu_to_le16(ALIGN(PKT_BUF_SZ, 16)); | |
580 | ||
581 | if (!priv->rx_skb[q][entry]) { | |
582 | skb = netdev_alloc_skb(ndev, | |
583 | PKT_BUF_SZ + RAVB_ALIGN - 1); | |
584 | if (!skb) | |
585 | break; /* Better luck next round. */ | |
586 | ravb_set_buffer_align(skb); | |
587 | dma_unmap_single(&ndev->dev, le32_to_cpu(desc->dptr), | |
588 | ALIGN(PKT_BUF_SZ, 16), | |
589 | DMA_FROM_DEVICE); | |
590 | dma_addr = dma_map_single(&ndev->dev, skb->data, | |
591 | le16_to_cpu(desc->ds_cc), | |
592 | DMA_FROM_DEVICE); | |
593 | skb_checksum_none_assert(skb); | |
594 | if (dma_mapping_error(&ndev->dev, dma_addr)) { | |
595 | dev_kfree_skb_any(skb); | |
596 | break; | |
597 | } | |
598 | desc->dptr = cpu_to_le32(dma_addr); | |
599 | priv->rx_skb[q][entry] = skb; | |
600 | } | |
601 | /* Descriptor type must be set after all the above writes */ | |
602 | dma_wmb(); | |
603 | desc->die_dt = DT_FEMPTY; | |
604 | } | |
605 | ||
606 | *quota -= limit - (++boguscnt); | |
607 | ||
608 | return boguscnt <= 0; | |
609 | } | |
610 | ||
611 | static void ravb_rcv_snd_disable(struct net_device *ndev) | |
612 | { | |
613 | /* Disable TX and RX */ | |
614 | ravb_write(ndev, ravb_read(ndev, ECMR) & ~(ECMR_RE | ECMR_TE), ECMR); | |
615 | } | |
616 | ||
617 | static void ravb_rcv_snd_enable(struct net_device *ndev) | |
618 | { | |
619 | /* Enable TX and RX */ | |
620 | ravb_write(ndev, ravb_read(ndev, ECMR) | ECMR_RE | ECMR_TE, ECMR); | |
621 | } | |
622 | ||
623 | /* function for waiting dma process finished */ | |
624 | static int ravb_stop_dma(struct net_device *ndev) | |
625 | { | |
626 | int error; | |
627 | ||
628 | /* Wait for stopping the hardware TX process */ | |
629 | error = ravb_wait(ndev, TCCR, | |
630 | TCCR_TSRQ0 | TCCR_TSRQ1 | TCCR_TSRQ2 | TCCR_TSRQ3, 0); | |
631 | if (error) | |
632 | return error; | |
633 | ||
634 | error = ravb_wait(ndev, CSR, CSR_TPO0 | CSR_TPO1 | CSR_TPO2 | CSR_TPO3, | |
635 | 0); | |
636 | if (error) | |
637 | return error; | |
638 | ||
639 | /* Stop the E-MAC's RX/TX processes. */ | |
640 | ravb_rcv_snd_disable(ndev); | |
641 | ||
642 | /* Wait for stopping the RX DMA process */ | |
643 | error = ravb_wait(ndev, CSR, CSR_RPO, 0); | |
644 | if (error) | |
645 | return error; | |
646 | ||
647 | /* Stop AVB-DMAC process */ | |
648 | return ravb_config(ndev); | |
649 | } | |
650 | ||
651 | /* E-MAC interrupt handler */ | |
652 | static void ravb_emac_interrupt(struct net_device *ndev) | |
653 | { | |
654 | struct ravb_private *priv = netdev_priv(ndev); | |
655 | u32 ecsr, psr; | |
656 | ||
657 | ecsr = ravb_read(ndev, ECSR); | |
658 | ravb_write(ndev, ecsr, ECSR); /* clear interrupt */ | |
659 | if (ecsr & ECSR_ICD) | |
660 | ndev->stats.tx_carrier_errors++; | |
661 | if (ecsr & ECSR_LCHNG) { | |
662 | /* Link changed */ | |
663 | if (priv->no_avb_link) | |
664 | return; | |
665 | psr = ravb_read(ndev, PSR); | |
666 | if (priv->avb_link_active_low) | |
667 | psr ^= PSR_LMON; | |
668 | if (!(psr & PSR_LMON)) { | |
669 | /* DIsable RX and TX */ | |
670 | ravb_rcv_snd_disable(ndev); | |
671 | } else { | |
672 | /* Enable RX and TX */ | |
673 | ravb_rcv_snd_enable(ndev); | |
674 | } | |
675 | } | |
676 | } | |
677 | ||
678 | /* Error interrupt handler */ | |
679 | static void ravb_error_interrupt(struct net_device *ndev) | |
680 | { | |
681 | struct ravb_private *priv = netdev_priv(ndev); | |
682 | u32 eis, ris2; | |
683 | ||
684 | eis = ravb_read(ndev, EIS); | |
685 | ravb_write(ndev, ~EIS_QFS, EIS); | |
686 | if (eis & EIS_QFS) { | |
687 | ris2 = ravb_read(ndev, RIS2); | |
688 | ravb_write(ndev, ~(RIS2_QFF0 | RIS2_RFFF), RIS2); | |
689 | ||
690 | /* Receive Descriptor Empty int */ | |
691 | if (ris2 & RIS2_QFF0) | |
692 | priv->stats[RAVB_BE].rx_over_errors++; | |
693 | ||
694 | /* Receive Descriptor Empty int */ | |
695 | if (ris2 & RIS2_QFF1) | |
696 | priv->stats[RAVB_NC].rx_over_errors++; | |
697 | ||
698 | /* Receive FIFO Overflow int */ | |
699 | if (ris2 & RIS2_RFFF) | |
700 | priv->rx_fifo_errors++; | |
701 | } | |
702 | } | |
703 | ||
704 | static irqreturn_t ravb_interrupt(int irq, void *dev_id) | |
705 | { | |
706 | struct net_device *ndev = dev_id; | |
707 | struct ravb_private *priv = netdev_priv(ndev); | |
708 | irqreturn_t result = IRQ_NONE; | |
709 | u32 iss; | |
710 | ||
711 | spin_lock(&priv->lock); | |
712 | /* Get interrupt status */ | |
713 | iss = ravb_read(ndev, ISS); | |
714 | ||
715 | /* Received and transmitted interrupts */ | |
716 | if (iss & (ISS_FRS | ISS_FTS | ISS_TFUS)) { | |
717 | u32 ris0 = ravb_read(ndev, RIS0); | |
718 | u32 ric0 = ravb_read(ndev, RIC0); | |
719 | u32 tis = ravb_read(ndev, TIS); | |
720 | u32 tic = ravb_read(ndev, TIC); | |
721 | int q; | |
722 | ||
723 | /* Timestamp updated */ | |
724 | if (tis & TIS_TFUF) { | |
725 | ravb_write(ndev, ~TIS_TFUF, TIS); | |
726 | ravb_get_tx_tstamp(ndev); | |
727 | result = IRQ_HANDLED; | |
728 | } | |
729 | ||
730 | /* Network control and best effort queue RX/TX */ | |
731 | for (q = RAVB_NC; q >= RAVB_BE; q--) { | |
732 | if (((ris0 & ric0) & BIT(q)) || | |
733 | ((tis & tic) & BIT(q))) { | |
734 | if (napi_schedule_prep(&priv->napi[q])) { | |
735 | /* Mask RX and TX interrupts */ | |
736 | ravb_write(ndev, ric0 & ~BIT(q), RIC0); | |
737 | ravb_write(ndev, tic & ~BIT(q), TIC); | |
738 | __napi_schedule(&priv->napi[q]); | |
739 | } else { | |
740 | netdev_warn(ndev, | |
741 | "ignoring interrupt, rx status 0x%08x, rx mask 0x%08x,\n", | |
742 | ris0, ric0); | |
743 | netdev_warn(ndev, | |
744 | " tx status 0x%08x, tx mask 0x%08x.\n", | |
745 | tis, tic); | |
746 | } | |
747 | result = IRQ_HANDLED; | |
748 | } | |
749 | } | |
750 | } | |
751 | ||
752 | /* E-MAC status summary */ | |
753 | if (iss & ISS_MS) { | |
754 | ravb_emac_interrupt(ndev); | |
755 | result = IRQ_HANDLED; | |
756 | } | |
757 | ||
758 | /* Error status summary */ | |
759 | if (iss & ISS_ES) { | |
760 | ravb_error_interrupt(ndev); | |
761 | result = IRQ_HANDLED; | |
762 | } | |
763 | ||
a0d2f206 SS |
764 | if (iss & ISS_CGIS) |
765 | result = ravb_ptp_interrupt(ndev); | |
766 | ||
c156633f SS |
767 | mmiowb(); |
768 | spin_unlock(&priv->lock); | |
769 | return result; | |
770 | } | |
771 | ||
772 | static int ravb_poll(struct napi_struct *napi, int budget) | |
773 | { | |
774 | struct net_device *ndev = napi->dev; | |
775 | struct ravb_private *priv = netdev_priv(ndev); | |
776 | unsigned long flags; | |
777 | int q = napi - priv->napi; | |
778 | int mask = BIT(q); | |
779 | int quota = budget; | |
780 | u32 ris0, tis; | |
781 | ||
782 | for (;;) { | |
783 | tis = ravb_read(ndev, TIS); | |
784 | ris0 = ravb_read(ndev, RIS0); | |
785 | if (!((ris0 & mask) || (tis & mask))) | |
786 | break; | |
787 | ||
788 | /* Processing RX Descriptor Ring */ | |
789 | if (ris0 & mask) { | |
790 | /* Clear RX interrupt */ | |
791 | ravb_write(ndev, ~mask, RIS0); | |
792 | if (ravb_rx(ndev, "a, q)) | |
793 | goto out; | |
794 | } | |
795 | /* Processing TX Descriptor Ring */ | |
796 | if (tis & mask) { | |
797 | spin_lock_irqsave(&priv->lock, flags); | |
798 | /* Clear TX interrupt */ | |
799 | ravb_write(ndev, ~mask, TIS); | |
800 | ravb_tx_free(ndev, q); | |
801 | netif_wake_subqueue(ndev, q); | |
802 | mmiowb(); | |
803 | spin_unlock_irqrestore(&priv->lock, flags); | |
804 | } | |
805 | } | |
806 | ||
807 | napi_complete(napi); | |
808 | ||
809 | /* Re-enable RX/TX interrupts */ | |
810 | spin_lock_irqsave(&priv->lock, flags); | |
811 | ravb_write(ndev, ravb_read(ndev, RIC0) | mask, RIC0); | |
812 | ravb_write(ndev, ravb_read(ndev, TIC) | mask, TIC); | |
813 | mmiowb(); | |
814 | spin_unlock_irqrestore(&priv->lock, flags); | |
815 | ||
816 | /* Receive error message handling */ | |
817 | priv->rx_over_errors = priv->stats[RAVB_BE].rx_over_errors; | |
818 | priv->rx_over_errors += priv->stats[RAVB_NC].rx_over_errors; | |
819 | if (priv->rx_over_errors != ndev->stats.rx_over_errors) { | |
820 | ndev->stats.rx_over_errors = priv->rx_over_errors; | |
821 | netif_err(priv, rx_err, ndev, "Receive Descriptor Empty\n"); | |
822 | } | |
823 | if (priv->rx_fifo_errors != ndev->stats.rx_fifo_errors) { | |
824 | ndev->stats.rx_fifo_errors = priv->rx_fifo_errors; | |
825 | netif_err(priv, rx_err, ndev, "Receive FIFO Overflow\n"); | |
826 | } | |
827 | out: | |
828 | return budget - quota; | |
829 | } | |
830 | ||
831 | /* PHY state control function */ | |
832 | static void ravb_adjust_link(struct net_device *ndev) | |
833 | { | |
834 | struct ravb_private *priv = netdev_priv(ndev); | |
835 | struct phy_device *phydev = priv->phydev; | |
836 | bool new_state = false; | |
837 | ||
838 | if (phydev->link) { | |
839 | if (phydev->duplex != priv->duplex) { | |
840 | new_state = true; | |
841 | priv->duplex = phydev->duplex; | |
842 | ravb_set_duplex(ndev); | |
843 | } | |
844 | ||
845 | if (phydev->speed != priv->speed) { | |
846 | new_state = true; | |
847 | priv->speed = phydev->speed; | |
848 | ravb_set_rate(ndev); | |
849 | } | |
850 | if (!priv->link) { | |
851 | ravb_write(ndev, ravb_read(ndev, ECMR) & ~ECMR_TXF, | |
852 | ECMR); | |
853 | new_state = true; | |
854 | priv->link = phydev->link; | |
855 | if (priv->no_avb_link) | |
856 | ravb_rcv_snd_enable(ndev); | |
857 | } | |
858 | } else if (priv->link) { | |
859 | new_state = true; | |
860 | priv->link = 0; | |
861 | priv->speed = 0; | |
862 | priv->duplex = -1; | |
863 | if (priv->no_avb_link) | |
864 | ravb_rcv_snd_disable(ndev); | |
865 | } | |
866 | ||
867 | if (new_state && netif_msg_link(priv)) | |
868 | phy_print_status(phydev); | |
869 | } | |
870 | ||
871 | /* PHY init function */ | |
872 | static int ravb_phy_init(struct net_device *ndev) | |
873 | { | |
874 | struct device_node *np = ndev->dev.parent->of_node; | |
875 | struct ravb_private *priv = netdev_priv(ndev); | |
876 | struct phy_device *phydev; | |
877 | struct device_node *pn; | |
878 | ||
879 | priv->link = 0; | |
880 | priv->speed = 0; | |
881 | priv->duplex = -1; | |
882 | ||
883 | /* Try connecting to PHY */ | |
884 | pn = of_parse_phandle(np, "phy-handle", 0); | |
885 | phydev = of_phy_connect(ndev, pn, ravb_adjust_link, 0, | |
886 | priv->phy_interface); | |
887 | if (!phydev) { | |
888 | netdev_err(ndev, "failed to connect PHY\n"); | |
889 | return -ENOENT; | |
890 | } | |
891 | ||
892 | netdev_info(ndev, "attached PHY %d (IRQ %d) to driver %s\n", | |
893 | phydev->addr, phydev->irq, phydev->drv->name); | |
894 | ||
895 | priv->phydev = phydev; | |
896 | ||
897 | return 0; | |
898 | } | |
899 | ||
900 | /* PHY control start function */ | |
901 | static int ravb_phy_start(struct net_device *ndev) | |
902 | { | |
903 | struct ravb_private *priv = netdev_priv(ndev); | |
904 | int error; | |
905 | ||
906 | error = ravb_phy_init(ndev); | |
907 | if (error) | |
908 | return error; | |
909 | ||
910 | phy_start(priv->phydev); | |
911 | ||
912 | return 0; | |
913 | } | |
914 | ||
915 | static int ravb_get_settings(struct net_device *ndev, struct ethtool_cmd *ecmd) | |
916 | { | |
917 | struct ravb_private *priv = netdev_priv(ndev); | |
918 | int error = -ENODEV; | |
919 | unsigned long flags; | |
920 | ||
921 | if (priv->phydev) { | |
922 | spin_lock_irqsave(&priv->lock, flags); | |
923 | error = phy_ethtool_gset(priv->phydev, ecmd); | |
924 | spin_unlock_irqrestore(&priv->lock, flags); | |
925 | } | |
926 | ||
927 | return error; | |
928 | } | |
929 | ||
930 | static int ravb_set_settings(struct net_device *ndev, struct ethtool_cmd *ecmd) | |
931 | { | |
932 | struct ravb_private *priv = netdev_priv(ndev); | |
933 | unsigned long flags; | |
934 | int error; | |
935 | ||
936 | if (!priv->phydev) | |
937 | return -ENODEV; | |
938 | ||
939 | spin_lock_irqsave(&priv->lock, flags); | |
940 | ||
941 | /* Disable TX and RX */ | |
942 | ravb_rcv_snd_disable(ndev); | |
943 | ||
944 | error = phy_ethtool_sset(priv->phydev, ecmd); | |
945 | if (error) | |
946 | goto error_exit; | |
947 | ||
948 | if (ecmd->duplex == DUPLEX_FULL) | |
949 | priv->duplex = 1; | |
950 | else | |
951 | priv->duplex = 0; | |
952 | ||
953 | ravb_set_duplex(ndev); | |
954 | ||
955 | error_exit: | |
956 | mdelay(1); | |
957 | ||
958 | /* Enable TX and RX */ | |
959 | ravb_rcv_snd_enable(ndev); | |
960 | ||
961 | mmiowb(); | |
962 | spin_unlock_irqrestore(&priv->lock, flags); | |
963 | ||
964 | return error; | |
965 | } | |
966 | ||
967 | static int ravb_nway_reset(struct net_device *ndev) | |
968 | { | |
969 | struct ravb_private *priv = netdev_priv(ndev); | |
970 | int error = -ENODEV; | |
971 | unsigned long flags; | |
972 | ||
973 | if (priv->phydev) { | |
974 | spin_lock_irqsave(&priv->lock, flags); | |
975 | error = phy_start_aneg(priv->phydev); | |
976 | spin_unlock_irqrestore(&priv->lock, flags); | |
977 | } | |
978 | ||
979 | return error; | |
980 | } | |
981 | ||
982 | static u32 ravb_get_msglevel(struct net_device *ndev) | |
983 | { | |
984 | struct ravb_private *priv = netdev_priv(ndev); | |
985 | ||
986 | return priv->msg_enable; | |
987 | } | |
988 | ||
989 | static void ravb_set_msglevel(struct net_device *ndev, u32 value) | |
990 | { | |
991 | struct ravb_private *priv = netdev_priv(ndev); | |
992 | ||
993 | priv->msg_enable = value; | |
994 | } | |
995 | ||
996 | static const char ravb_gstrings_stats[][ETH_GSTRING_LEN] = { | |
997 | "rx_queue_0_current", | |
998 | "tx_queue_0_current", | |
999 | "rx_queue_0_dirty", | |
1000 | "tx_queue_0_dirty", | |
1001 | "rx_queue_0_packets", | |
1002 | "tx_queue_0_packets", | |
1003 | "rx_queue_0_bytes", | |
1004 | "tx_queue_0_bytes", | |
1005 | "rx_queue_0_mcast_packets", | |
1006 | "rx_queue_0_errors", | |
1007 | "rx_queue_0_crc_errors", | |
1008 | "rx_queue_0_frame_errors", | |
1009 | "rx_queue_0_length_errors", | |
1010 | "rx_queue_0_missed_errors", | |
1011 | "rx_queue_0_over_errors", | |
1012 | ||
1013 | "rx_queue_1_current", | |
1014 | "tx_queue_1_current", | |
1015 | "rx_queue_1_dirty", | |
1016 | "tx_queue_1_dirty", | |
1017 | "rx_queue_1_packets", | |
1018 | "tx_queue_1_packets", | |
1019 | "rx_queue_1_bytes", | |
1020 | "tx_queue_1_bytes", | |
1021 | "rx_queue_1_mcast_packets", | |
1022 | "rx_queue_1_errors", | |
1023 | "rx_queue_1_crc_errors", | |
1024 | "rx_queue_1_frame_errors_", | |
1025 | "rx_queue_1_length_errors", | |
1026 | "rx_queue_1_missed_errors", | |
1027 | "rx_queue_1_over_errors", | |
1028 | }; | |
1029 | ||
1030 | #define RAVB_STATS_LEN ARRAY_SIZE(ravb_gstrings_stats) | |
1031 | ||
1032 | static int ravb_get_sset_count(struct net_device *netdev, int sset) | |
1033 | { | |
1034 | switch (sset) { | |
1035 | case ETH_SS_STATS: | |
1036 | return RAVB_STATS_LEN; | |
1037 | default: | |
1038 | return -EOPNOTSUPP; | |
1039 | } | |
1040 | } | |
1041 | ||
1042 | static void ravb_get_ethtool_stats(struct net_device *ndev, | |
1043 | struct ethtool_stats *stats, u64 *data) | |
1044 | { | |
1045 | struct ravb_private *priv = netdev_priv(ndev); | |
1046 | int i = 0; | |
1047 | int q; | |
1048 | ||
1049 | /* Device-specific stats */ | |
1050 | for (q = RAVB_BE; q < NUM_RX_QUEUE; q++) { | |
1051 | struct net_device_stats *stats = &priv->stats[q]; | |
1052 | ||
1053 | data[i++] = priv->cur_rx[q]; | |
1054 | data[i++] = priv->cur_tx[q]; | |
1055 | data[i++] = priv->dirty_rx[q]; | |
1056 | data[i++] = priv->dirty_tx[q]; | |
1057 | data[i++] = stats->rx_packets; | |
1058 | data[i++] = stats->tx_packets; | |
1059 | data[i++] = stats->rx_bytes; | |
1060 | data[i++] = stats->tx_bytes; | |
1061 | data[i++] = stats->multicast; | |
1062 | data[i++] = stats->rx_errors; | |
1063 | data[i++] = stats->rx_crc_errors; | |
1064 | data[i++] = stats->rx_frame_errors; | |
1065 | data[i++] = stats->rx_length_errors; | |
1066 | data[i++] = stats->rx_missed_errors; | |
1067 | data[i++] = stats->rx_over_errors; | |
1068 | } | |
1069 | } | |
1070 | ||
1071 | static void ravb_get_strings(struct net_device *ndev, u32 stringset, u8 *data) | |
1072 | { | |
1073 | switch (stringset) { | |
1074 | case ETH_SS_STATS: | |
1075 | memcpy(data, *ravb_gstrings_stats, sizeof(ravb_gstrings_stats)); | |
1076 | break; | |
1077 | } | |
1078 | } | |
1079 | ||
1080 | static void ravb_get_ringparam(struct net_device *ndev, | |
1081 | struct ethtool_ringparam *ring) | |
1082 | { | |
1083 | struct ravb_private *priv = netdev_priv(ndev); | |
1084 | ||
1085 | ring->rx_max_pending = BE_RX_RING_MAX; | |
1086 | ring->tx_max_pending = BE_TX_RING_MAX; | |
1087 | ring->rx_pending = priv->num_rx_ring[RAVB_BE]; | |
1088 | ring->tx_pending = priv->num_tx_ring[RAVB_BE]; | |
1089 | } | |
1090 | ||
1091 | static int ravb_set_ringparam(struct net_device *ndev, | |
1092 | struct ethtool_ringparam *ring) | |
1093 | { | |
1094 | struct ravb_private *priv = netdev_priv(ndev); | |
1095 | int error; | |
1096 | ||
1097 | if (ring->tx_pending > BE_TX_RING_MAX || | |
1098 | ring->rx_pending > BE_RX_RING_MAX || | |
1099 | ring->tx_pending < BE_TX_RING_MIN || | |
1100 | ring->rx_pending < BE_RX_RING_MIN) | |
1101 | return -EINVAL; | |
1102 | if (ring->rx_mini_pending || ring->rx_jumbo_pending) | |
1103 | return -EINVAL; | |
1104 | ||
1105 | if (netif_running(ndev)) { | |
1106 | netif_device_detach(ndev); | |
a0d2f206 SS |
1107 | /* Stop PTP Clock driver */ |
1108 | ravb_ptp_stop(ndev); | |
c156633f SS |
1109 | /* Wait for DMA stopping */ |
1110 | error = ravb_stop_dma(ndev); | |
1111 | if (error) { | |
1112 | netdev_err(ndev, | |
1113 | "cannot set ringparam! Any AVB processes are still running?\n"); | |
1114 | return error; | |
1115 | } | |
1116 | synchronize_irq(ndev->irq); | |
1117 | ||
1118 | /* Free all the skb's in the RX queue and the DMA buffers. */ | |
1119 | ravb_ring_free(ndev, RAVB_BE); | |
1120 | ravb_ring_free(ndev, RAVB_NC); | |
1121 | } | |
1122 | ||
1123 | /* Set new parameters */ | |
1124 | priv->num_rx_ring[RAVB_BE] = ring->rx_pending; | |
1125 | priv->num_tx_ring[RAVB_BE] = ring->tx_pending; | |
1126 | ||
1127 | if (netif_running(ndev)) { | |
1128 | error = ravb_dmac_init(ndev); | |
1129 | if (error) { | |
1130 | netdev_err(ndev, | |
1131 | "%s: ravb_dmac_init() failed, error %d\n", | |
1132 | __func__, error); | |
1133 | return error; | |
1134 | } | |
1135 | ||
1136 | ravb_emac_init(ndev); | |
1137 | ||
a0d2f206 SS |
1138 | /* Initialise PTP Clock driver */ |
1139 | ravb_ptp_init(ndev, priv->pdev); | |
1140 | ||
c156633f SS |
1141 | netif_device_attach(ndev); |
1142 | } | |
1143 | ||
1144 | return 0; | |
1145 | } | |
1146 | ||
1147 | static int ravb_get_ts_info(struct net_device *ndev, | |
1148 | struct ethtool_ts_info *info) | |
1149 | { | |
a0d2f206 SS |
1150 | struct ravb_private *priv = netdev_priv(ndev); |
1151 | ||
c156633f SS |
1152 | info->so_timestamping = |
1153 | SOF_TIMESTAMPING_TX_SOFTWARE | | |
1154 | SOF_TIMESTAMPING_RX_SOFTWARE | | |
1155 | SOF_TIMESTAMPING_SOFTWARE | | |
1156 | SOF_TIMESTAMPING_TX_HARDWARE | | |
1157 | SOF_TIMESTAMPING_RX_HARDWARE | | |
1158 | SOF_TIMESTAMPING_RAW_HARDWARE; | |
1159 | info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON); | |
1160 | info->rx_filters = | |
1161 | (1 << HWTSTAMP_FILTER_NONE) | | |
1162 | (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) | | |
1163 | (1 << HWTSTAMP_FILTER_ALL); | |
a0d2f206 | 1164 | info->phc_index = ptp_clock_index(priv->ptp.clock); |
c156633f SS |
1165 | |
1166 | return 0; | |
1167 | } | |
1168 | ||
1169 | static const struct ethtool_ops ravb_ethtool_ops = { | |
1170 | .get_settings = ravb_get_settings, | |
1171 | .set_settings = ravb_set_settings, | |
1172 | .nway_reset = ravb_nway_reset, | |
1173 | .get_msglevel = ravb_get_msglevel, | |
1174 | .set_msglevel = ravb_set_msglevel, | |
1175 | .get_link = ethtool_op_get_link, | |
1176 | .get_strings = ravb_get_strings, | |
1177 | .get_ethtool_stats = ravb_get_ethtool_stats, | |
1178 | .get_sset_count = ravb_get_sset_count, | |
1179 | .get_ringparam = ravb_get_ringparam, | |
1180 | .set_ringparam = ravb_set_ringparam, | |
1181 | .get_ts_info = ravb_get_ts_info, | |
1182 | }; | |
1183 | ||
1184 | /* Network device open function for Ethernet AVB */ | |
1185 | static int ravb_open(struct net_device *ndev) | |
1186 | { | |
1187 | struct ravb_private *priv = netdev_priv(ndev); | |
1188 | int error; | |
1189 | ||
1190 | napi_enable(&priv->napi[RAVB_BE]); | |
1191 | napi_enable(&priv->napi[RAVB_NC]); | |
1192 | ||
1193 | error = request_irq(ndev->irq, ravb_interrupt, IRQF_SHARED, ndev->name, | |
1194 | ndev); | |
1195 | if (error) { | |
1196 | netdev_err(ndev, "cannot request IRQ\n"); | |
1197 | goto out_napi_off; | |
1198 | } | |
1199 | ||
1200 | /* Device init */ | |
1201 | error = ravb_dmac_init(ndev); | |
1202 | if (error) | |
1203 | goto out_free_irq; | |
1204 | ravb_emac_init(ndev); | |
1205 | ||
a0d2f206 SS |
1206 | /* Initialise PTP Clock driver */ |
1207 | ravb_ptp_init(ndev, priv->pdev); | |
1208 | ||
c156633f SS |
1209 | netif_tx_start_all_queues(ndev); |
1210 | ||
1211 | /* PHY control start */ | |
1212 | error = ravb_phy_start(ndev); | |
1213 | if (error) | |
a0d2f206 | 1214 | goto out_ptp_stop; |
c156633f SS |
1215 | |
1216 | return 0; | |
1217 | ||
a0d2f206 SS |
1218 | out_ptp_stop: |
1219 | /* Stop PTP Clock driver */ | |
1220 | ravb_ptp_stop(ndev); | |
c156633f SS |
1221 | out_free_irq: |
1222 | free_irq(ndev->irq, ndev); | |
1223 | out_napi_off: | |
1224 | napi_disable(&priv->napi[RAVB_NC]); | |
1225 | napi_disable(&priv->napi[RAVB_BE]); | |
1226 | return error; | |
1227 | } | |
1228 | ||
1229 | /* Timeout function for Ethernet AVB */ | |
1230 | static void ravb_tx_timeout(struct net_device *ndev) | |
1231 | { | |
1232 | struct ravb_private *priv = netdev_priv(ndev); | |
1233 | ||
1234 | netif_err(priv, tx_err, ndev, | |
1235 | "transmit timed out, status %08x, resetting...\n", | |
1236 | ravb_read(ndev, ISS)); | |
1237 | ||
1238 | /* tx_errors count up */ | |
1239 | ndev->stats.tx_errors++; | |
1240 | ||
1241 | schedule_work(&priv->work); | |
1242 | } | |
1243 | ||
1244 | static void ravb_tx_timeout_work(struct work_struct *work) | |
1245 | { | |
1246 | struct ravb_private *priv = container_of(work, struct ravb_private, | |
1247 | work); | |
1248 | struct net_device *ndev = priv->ndev; | |
1249 | ||
1250 | netif_tx_stop_all_queues(ndev); | |
1251 | ||
a0d2f206 SS |
1252 | /* Stop PTP Clock driver */ |
1253 | ravb_ptp_stop(ndev); | |
1254 | ||
c156633f SS |
1255 | /* Wait for DMA stopping */ |
1256 | ravb_stop_dma(ndev); | |
1257 | ||
1258 | ravb_ring_free(ndev, RAVB_BE); | |
1259 | ravb_ring_free(ndev, RAVB_NC); | |
1260 | ||
1261 | /* Device init */ | |
1262 | ravb_dmac_init(ndev); | |
1263 | ravb_emac_init(ndev); | |
1264 | ||
a0d2f206 SS |
1265 | /* Initialise PTP Clock driver */ |
1266 | ravb_ptp_init(ndev, priv->pdev); | |
1267 | ||
c156633f SS |
1268 | netif_tx_start_all_queues(ndev); |
1269 | } | |
1270 | ||
1271 | /* Packet transmit function for Ethernet AVB */ | |
1272 | static netdev_tx_t ravb_start_xmit(struct sk_buff *skb, struct net_device *ndev) | |
1273 | { | |
1274 | struct ravb_private *priv = netdev_priv(ndev); | |
1275 | struct ravb_tstamp_skb *ts_skb = NULL; | |
1276 | u16 q = skb_get_queue_mapping(skb); | |
1277 | struct ravb_tx_desc *desc; | |
1278 | unsigned long flags; | |
1279 | u32 dma_addr; | |
1280 | void *buffer; | |
1281 | u32 entry; | |
1282 | u32 tccr; | |
1283 | ||
1284 | spin_lock_irqsave(&priv->lock, flags); | |
1285 | if (priv->cur_tx[q] - priv->dirty_tx[q] >= priv->num_tx_ring[q]) { | |
1286 | netif_err(priv, tx_queued, ndev, | |
1287 | "still transmitting with the full ring!\n"); | |
1288 | netif_stop_subqueue(ndev, q); | |
1289 | spin_unlock_irqrestore(&priv->lock, flags); | |
1290 | return NETDEV_TX_BUSY; | |
1291 | } | |
1292 | entry = priv->cur_tx[q] % priv->num_tx_ring[q]; | |
1293 | priv->tx_skb[q][entry] = skb; | |
1294 | ||
1295 | if (skb_put_padto(skb, ETH_ZLEN)) | |
1296 | goto drop; | |
1297 | ||
1298 | buffer = PTR_ALIGN(priv->tx_buffers[q][entry], RAVB_ALIGN); | |
1299 | memcpy(buffer, skb->data, skb->len); | |
1300 | desc = &priv->tx_ring[q][entry]; | |
1301 | desc->ds_tagl = cpu_to_le16(skb->len); | |
1302 | dma_addr = dma_map_single(&ndev->dev, buffer, skb->len, DMA_TO_DEVICE); | |
1303 | if (dma_mapping_error(&ndev->dev, dma_addr)) | |
1304 | goto drop; | |
1305 | desc->dptr = cpu_to_le32(dma_addr); | |
1306 | ||
1307 | /* TX timestamp required */ | |
1308 | if (q == RAVB_NC) { | |
1309 | ts_skb = kmalloc(sizeof(*ts_skb), GFP_ATOMIC); | |
1310 | if (!ts_skb) { | |
1311 | dma_unmap_single(&ndev->dev, dma_addr, skb->len, | |
1312 | DMA_TO_DEVICE); | |
1313 | goto drop; | |
1314 | } | |
1315 | ts_skb->skb = skb; | |
1316 | ts_skb->tag = priv->ts_skb_tag++; | |
1317 | priv->ts_skb_tag &= 0x3ff; | |
1318 | list_add_tail(&ts_skb->list, &priv->ts_skb_list); | |
1319 | ||
1320 | /* TAG and timestamp required flag */ | |
1321 | skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; | |
1322 | skb_tx_timestamp(skb); | |
1323 | desc->tagh_tsr = (ts_skb->tag >> 4) | TX_TSR; | |
1324 | desc->ds_tagl |= le16_to_cpu(ts_skb->tag << 12); | |
1325 | } | |
1326 | ||
1327 | /* Descriptor type must be set after all the above writes */ | |
1328 | dma_wmb(); | |
1329 | desc->die_dt = DT_FSINGLE; | |
1330 | ||
1331 | tccr = ravb_read(ndev, TCCR); | |
1332 | if (!(tccr & (TCCR_TSRQ0 << q))) | |
1333 | ravb_write(ndev, tccr | (TCCR_TSRQ0 << q), TCCR); | |
1334 | ||
1335 | priv->cur_tx[q]++; | |
1336 | if (priv->cur_tx[q] - priv->dirty_tx[q] >= priv->num_tx_ring[q] && | |
1337 | !ravb_tx_free(ndev, q)) | |
1338 | netif_stop_subqueue(ndev, q); | |
1339 | ||
1340 | exit: | |
1341 | mmiowb(); | |
1342 | spin_unlock_irqrestore(&priv->lock, flags); | |
1343 | return NETDEV_TX_OK; | |
1344 | ||
1345 | drop: | |
1346 | dev_kfree_skb_any(skb); | |
1347 | priv->tx_skb[q][entry] = NULL; | |
1348 | goto exit; | |
1349 | } | |
1350 | ||
1351 | static u16 ravb_select_queue(struct net_device *ndev, struct sk_buff *skb, | |
1352 | void *accel_priv, select_queue_fallback_t fallback) | |
1353 | { | |
1354 | /* If skb needs TX timestamp, it is handled in network control queue */ | |
1355 | return (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) ? RAVB_NC : | |
1356 | RAVB_BE; | |
1357 | ||
1358 | } | |
1359 | ||
1360 | static struct net_device_stats *ravb_get_stats(struct net_device *ndev) | |
1361 | { | |
1362 | struct ravb_private *priv = netdev_priv(ndev); | |
1363 | struct net_device_stats *nstats, *stats0, *stats1; | |
1364 | ||
1365 | nstats = &ndev->stats; | |
1366 | stats0 = &priv->stats[RAVB_BE]; | |
1367 | stats1 = &priv->stats[RAVB_NC]; | |
1368 | ||
1369 | nstats->tx_dropped += ravb_read(ndev, TROCR); | |
1370 | ravb_write(ndev, 0, TROCR); /* (write clear) */ | |
1371 | nstats->collisions += ravb_read(ndev, CDCR); | |
1372 | ravb_write(ndev, 0, CDCR); /* (write clear) */ | |
1373 | nstats->tx_carrier_errors += ravb_read(ndev, LCCR); | |
1374 | ravb_write(ndev, 0, LCCR); /* (write clear) */ | |
1375 | ||
1376 | nstats->tx_carrier_errors += ravb_read(ndev, CERCR); | |
1377 | ravb_write(ndev, 0, CERCR); /* (write clear) */ | |
1378 | nstats->tx_carrier_errors += ravb_read(ndev, CEECR); | |
1379 | ravb_write(ndev, 0, CEECR); /* (write clear) */ | |
1380 | ||
1381 | nstats->rx_packets = stats0->rx_packets + stats1->rx_packets; | |
1382 | nstats->tx_packets = stats0->tx_packets + stats1->tx_packets; | |
1383 | nstats->rx_bytes = stats0->rx_bytes + stats1->rx_bytes; | |
1384 | nstats->tx_bytes = stats0->tx_bytes + stats1->tx_bytes; | |
1385 | nstats->multicast = stats0->multicast + stats1->multicast; | |
1386 | nstats->rx_errors = stats0->rx_errors + stats1->rx_errors; | |
1387 | nstats->rx_crc_errors = stats0->rx_crc_errors + stats1->rx_crc_errors; | |
1388 | nstats->rx_frame_errors = | |
1389 | stats0->rx_frame_errors + stats1->rx_frame_errors; | |
1390 | nstats->rx_length_errors = | |
1391 | stats0->rx_length_errors + stats1->rx_length_errors; | |
1392 | nstats->rx_missed_errors = | |
1393 | stats0->rx_missed_errors + stats1->rx_missed_errors; | |
1394 | nstats->rx_over_errors = | |
1395 | stats0->rx_over_errors + stats1->rx_over_errors; | |
1396 | ||
1397 | return nstats; | |
1398 | } | |
1399 | ||
1400 | /* Update promiscuous bit */ | |
1401 | static void ravb_set_rx_mode(struct net_device *ndev) | |
1402 | { | |
1403 | struct ravb_private *priv = netdev_priv(ndev); | |
1404 | unsigned long flags; | |
1405 | u32 ecmr; | |
1406 | ||
1407 | spin_lock_irqsave(&priv->lock, flags); | |
1408 | ecmr = ravb_read(ndev, ECMR); | |
1409 | if (ndev->flags & IFF_PROMISC) | |
1410 | ecmr |= ECMR_PRM; | |
1411 | else | |
1412 | ecmr &= ~ECMR_PRM; | |
1413 | ravb_write(ndev, ecmr, ECMR); | |
1414 | mmiowb(); | |
1415 | spin_unlock_irqrestore(&priv->lock, flags); | |
1416 | } | |
1417 | ||
1418 | /* Device close function for Ethernet AVB */ | |
1419 | static int ravb_close(struct net_device *ndev) | |
1420 | { | |
1421 | struct ravb_private *priv = netdev_priv(ndev); | |
1422 | struct ravb_tstamp_skb *ts_skb, *ts_skb2; | |
1423 | ||
1424 | netif_tx_stop_all_queues(ndev); | |
1425 | ||
1426 | /* Disable interrupts by clearing the interrupt masks. */ | |
1427 | ravb_write(ndev, 0, RIC0); | |
1428 | ravb_write(ndev, 0, RIC1); | |
1429 | ravb_write(ndev, 0, RIC2); | |
1430 | ravb_write(ndev, 0, TIC); | |
1431 | ||
a0d2f206 SS |
1432 | /* Stop PTP Clock driver */ |
1433 | ravb_ptp_stop(ndev); | |
1434 | ||
c156633f SS |
1435 | /* Set the config mode to stop the AVB-DMAC's processes */ |
1436 | if (ravb_stop_dma(ndev) < 0) | |
1437 | netdev_err(ndev, | |
1438 | "device will be stopped after h/w processes are done.\n"); | |
1439 | ||
1440 | /* Clear the timestamp list */ | |
1441 | list_for_each_entry_safe(ts_skb, ts_skb2, &priv->ts_skb_list, list) { | |
1442 | list_del(&ts_skb->list); | |
1443 | kfree(ts_skb); | |
1444 | } | |
1445 | ||
1446 | /* PHY disconnect */ | |
1447 | if (priv->phydev) { | |
1448 | phy_stop(priv->phydev); | |
1449 | phy_disconnect(priv->phydev); | |
1450 | priv->phydev = NULL; | |
1451 | } | |
1452 | ||
1453 | free_irq(ndev->irq, ndev); | |
1454 | ||
1455 | napi_disable(&priv->napi[RAVB_NC]); | |
1456 | napi_disable(&priv->napi[RAVB_BE]); | |
1457 | ||
1458 | /* Free all the skb's in the RX queue and the DMA buffers. */ | |
1459 | ravb_ring_free(ndev, RAVB_BE); | |
1460 | ravb_ring_free(ndev, RAVB_NC); | |
1461 | ||
1462 | return 0; | |
1463 | } | |
1464 | ||
1465 | static int ravb_hwtstamp_get(struct net_device *ndev, struct ifreq *req) | |
1466 | { | |
1467 | struct ravb_private *priv = netdev_priv(ndev); | |
1468 | struct hwtstamp_config config; | |
1469 | ||
1470 | config.flags = 0; | |
1471 | config.tx_type = priv->tstamp_tx_ctrl ? HWTSTAMP_TX_ON : | |
1472 | HWTSTAMP_TX_OFF; | |
1473 | if (priv->tstamp_rx_ctrl & RAVB_RXTSTAMP_TYPE_V2_L2_EVENT) | |
1474 | config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT; | |
1475 | else if (priv->tstamp_rx_ctrl & RAVB_RXTSTAMP_TYPE_ALL) | |
1476 | config.rx_filter = HWTSTAMP_FILTER_ALL; | |
1477 | else | |
1478 | config.rx_filter = HWTSTAMP_FILTER_NONE; | |
1479 | ||
1480 | return copy_to_user(req->ifr_data, &config, sizeof(config)) ? | |
1481 | -EFAULT : 0; | |
1482 | } | |
1483 | ||
1484 | /* Control hardware time stamping */ | |
1485 | static int ravb_hwtstamp_set(struct net_device *ndev, struct ifreq *req) | |
1486 | { | |
1487 | struct ravb_private *priv = netdev_priv(ndev); | |
1488 | struct hwtstamp_config config; | |
1489 | u32 tstamp_rx_ctrl = RAVB_RXTSTAMP_ENABLED; | |
1490 | u32 tstamp_tx_ctrl; | |
1491 | ||
1492 | if (copy_from_user(&config, req->ifr_data, sizeof(config))) | |
1493 | return -EFAULT; | |
1494 | ||
1495 | /* Reserved for future extensions */ | |
1496 | if (config.flags) | |
1497 | return -EINVAL; | |
1498 | ||
1499 | switch (config.tx_type) { | |
1500 | case HWTSTAMP_TX_OFF: | |
1501 | tstamp_tx_ctrl = 0; | |
1502 | break; | |
1503 | case HWTSTAMP_TX_ON: | |
1504 | tstamp_tx_ctrl = RAVB_TXTSTAMP_ENABLED; | |
1505 | break; | |
1506 | default: | |
1507 | return -ERANGE; | |
1508 | } | |
1509 | ||
1510 | switch (config.rx_filter) { | |
1511 | case HWTSTAMP_FILTER_NONE: | |
1512 | tstamp_rx_ctrl = 0; | |
1513 | break; | |
1514 | case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: | |
1515 | tstamp_rx_ctrl |= RAVB_RXTSTAMP_TYPE_V2_L2_EVENT; | |
1516 | break; | |
1517 | default: | |
1518 | config.rx_filter = HWTSTAMP_FILTER_ALL; | |
1519 | tstamp_rx_ctrl |= RAVB_RXTSTAMP_TYPE_ALL; | |
1520 | } | |
1521 | ||
1522 | priv->tstamp_tx_ctrl = tstamp_tx_ctrl; | |
1523 | priv->tstamp_rx_ctrl = tstamp_rx_ctrl; | |
1524 | ||
1525 | return copy_to_user(req->ifr_data, &config, sizeof(config)) ? | |
1526 | -EFAULT : 0; | |
1527 | } | |
1528 | ||
1529 | /* ioctl to device function */ | |
1530 | static int ravb_do_ioctl(struct net_device *ndev, struct ifreq *req, int cmd) | |
1531 | { | |
1532 | struct ravb_private *priv = netdev_priv(ndev); | |
1533 | struct phy_device *phydev = priv->phydev; | |
1534 | ||
1535 | if (!netif_running(ndev)) | |
1536 | return -EINVAL; | |
1537 | ||
1538 | if (!phydev) | |
1539 | return -ENODEV; | |
1540 | ||
1541 | switch (cmd) { | |
1542 | case SIOCGHWTSTAMP: | |
1543 | return ravb_hwtstamp_get(ndev, req); | |
1544 | case SIOCSHWTSTAMP: | |
1545 | return ravb_hwtstamp_set(ndev, req); | |
1546 | } | |
1547 | ||
1548 | return phy_mii_ioctl(phydev, req, cmd); | |
1549 | } | |
1550 | ||
1551 | static const struct net_device_ops ravb_netdev_ops = { | |
1552 | .ndo_open = ravb_open, | |
1553 | .ndo_stop = ravb_close, | |
1554 | .ndo_start_xmit = ravb_start_xmit, | |
1555 | .ndo_select_queue = ravb_select_queue, | |
1556 | .ndo_get_stats = ravb_get_stats, | |
1557 | .ndo_set_rx_mode = ravb_set_rx_mode, | |
1558 | .ndo_tx_timeout = ravb_tx_timeout, | |
1559 | .ndo_do_ioctl = ravb_do_ioctl, | |
1560 | .ndo_validate_addr = eth_validate_addr, | |
1561 | .ndo_set_mac_address = eth_mac_addr, | |
1562 | .ndo_change_mtu = eth_change_mtu, | |
1563 | }; | |
1564 | ||
1565 | /* MDIO bus init function */ | |
1566 | static int ravb_mdio_init(struct ravb_private *priv) | |
1567 | { | |
1568 | struct platform_device *pdev = priv->pdev; | |
1569 | struct device *dev = &pdev->dev; | |
1570 | int error; | |
1571 | ||
1572 | /* Bitbang init */ | |
1573 | priv->mdiobb.ops = &bb_ops; | |
1574 | ||
1575 | /* MII controller setting */ | |
1576 | priv->mii_bus = alloc_mdio_bitbang(&priv->mdiobb); | |
1577 | if (!priv->mii_bus) | |
1578 | return -ENOMEM; | |
1579 | ||
1580 | /* Hook up MII support for ethtool */ | |
1581 | priv->mii_bus->name = "ravb_mii"; | |
1582 | priv->mii_bus->parent = dev; | |
1583 | snprintf(priv->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x", | |
1584 | pdev->name, pdev->id); | |
1585 | ||
1586 | /* Register MDIO bus */ | |
1587 | error = of_mdiobus_register(priv->mii_bus, dev->of_node); | |
1588 | if (error) | |
1589 | goto out_free_bus; | |
1590 | ||
1591 | return 0; | |
1592 | ||
1593 | out_free_bus: | |
1594 | free_mdio_bitbang(priv->mii_bus); | |
1595 | return error; | |
1596 | } | |
1597 | ||
1598 | /* MDIO bus release function */ | |
1599 | static int ravb_mdio_release(struct ravb_private *priv) | |
1600 | { | |
1601 | /* Unregister mdio bus */ | |
1602 | mdiobus_unregister(priv->mii_bus); | |
1603 | ||
1604 | /* Free bitbang info */ | |
1605 | free_mdio_bitbang(priv->mii_bus); | |
1606 | ||
1607 | return 0; | |
1608 | } | |
1609 | ||
1610 | static int ravb_probe(struct platform_device *pdev) | |
1611 | { | |
1612 | struct device_node *np = pdev->dev.of_node; | |
1613 | struct ravb_private *priv; | |
1614 | struct net_device *ndev; | |
1615 | int error, irq, q; | |
1616 | struct resource *res; | |
1617 | ||
1618 | if (!np) { | |
1619 | dev_err(&pdev->dev, | |
1620 | "this driver is required to be instantiated from device tree\n"); | |
1621 | return -EINVAL; | |
1622 | } | |
1623 | ||
1624 | /* Get base address */ | |
1625 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | |
1626 | if (!res) { | |
1627 | dev_err(&pdev->dev, "invalid resource\n"); | |
1628 | return -EINVAL; | |
1629 | } | |
1630 | ||
1631 | ndev = alloc_etherdev_mqs(sizeof(struct ravb_private), | |
1632 | NUM_TX_QUEUE, NUM_RX_QUEUE); | |
1633 | if (!ndev) | |
1634 | return -ENOMEM; | |
1635 | ||
1636 | pm_runtime_enable(&pdev->dev); | |
1637 | pm_runtime_get_sync(&pdev->dev); | |
1638 | ||
1639 | /* The Ether-specific entries in the device structure. */ | |
1640 | ndev->base_addr = res->start; | |
1641 | ndev->dma = -1; | |
1642 | irq = platform_get_irq(pdev, 0); | |
1643 | if (irq < 0) { | |
1644 | error = -ENODEV; | |
1645 | goto out_release; | |
1646 | } | |
1647 | ndev->irq = irq; | |
1648 | ||
1649 | SET_NETDEV_DEV(ndev, &pdev->dev); | |
1650 | ||
1651 | priv = netdev_priv(ndev); | |
1652 | priv->ndev = ndev; | |
1653 | priv->pdev = pdev; | |
1654 | priv->num_tx_ring[RAVB_BE] = BE_TX_RING_SIZE; | |
1655 | priv->num_rx_ring[RAVB_BE] = BE_RX_RING_SIZE; | |
1656 | priv->num_tx_ring[RAVB_NC] = NC_TX_RING_SIZE; | |
1657 | priv->num_rx_ring[RAVB_NC] = NC_RX_RING_SIZE; | |
1658 | priv->addr = devm_ioremap_resource(&pdev->dev, res); | |
1659 | if (IS_ERR(priv->addr)) { | |
1660 | error = PTR_ERR(priv->addr); | |
1661 | goto out_release; | |
1662 | } | |
1663 | ||
1664 | spin_lock_init(&priv->lock); | |
1665 | INIT_WORK(&priv->work, ravb_tx_timeout_work); | |
1666 | ||
1667 | priv->phy_interface = of_get_phy_mode(np); | |
1668 | ||
1669 | priv->no_avb_link = of_property_read_bool(np, "renesas,no-ether-link"); | |
1670 | priv->avb_link_active_low = | |
1671 | of_property_read_bool(np, "renesas,ether-link-active-low"); | |
1672 | ||
1673 | /* Set function */ | |
1674 | ndev->netdev_ops = &ravb_netdev_ops; | |
1675 | ndev->ethtool_ops = &ravb_ethtool_ops; | |
1676 | ||
1677 | /* Set AVB config mode */ | |
1678 | ravb_write(ndev, (ravb_read(ndev, CCC) & ~CCC_OPC) | CCC_OPC_CONFIG, | |
1679 | CCC); | |
1680 | ||
1681 | /* Set CSEL value */ | |
1682 | ravb_write(ndev, (ravb_read(ndev, CCC) & ~CCC_CSEL) | CCC_CSEL_HPB, | |
1683 | CCC); | |
1684 | ||
1685 | /* Set GTI value */ | |
1686 | ravb_write(ndev, ((1000 << 20) / 130) & GTI_TIV, GTI); | |
1687 | ||
1688 | /* Request GTI loading */ | |
1689 | ravb_write(ndev, ravb_read(ndev, GCCR) | GCCR_LTI, GCCR); | |
1690 | ||
1691 | /* Allocate descriptor base address table */ | |
1692 | priv->desc_bat_size = sizeof(struct ravb_desc) * DBAT_ENTRY_NUM; | |
1693 | priv->desc_bat = dma_alloc_coherent(NULL, priv->desc_bat_size, | |
1694 | &priv->desc_bat_dma, GFP_KERNEL); | |
1695 | if (!priv->desc_bat) { | |
1696 | dev_err(&ndev->dev, | |
1697 | "Cannot allocate desc base address table (size %d bytes)\n", | |
1698 | priv->desc_bat_size); | |
1699 | error = -ENOMEM; | |
1700 | goto out_release; | |
1701 | } | |
1702 | for (q = RAVB_BE; q < DBAT_ENTRY_NUM; q++) | |
1703 | priv->desc_bat[q].die_dt = DT_EOS; | |
1704 | ravb_write(ndev, priv->desc_bat_dma, DBAT); | |
1705 | ||
1706 | /* Initialise HW timestamp list */ | |
1707 | INIT_LIST_HEAD(&priv->ts_skb_list); | |
1708 | ||
1709 | /* Debug message level */ | |
1710 | priv->msg_enable = RAVB_DEF_MSG_ENABLE; | |
1711 | ||
1712 | /* Read and set MAC address */ | |
1713 | ravb_read_mac_address(ndev, of_get_mac_address(np)); | |
1714 | if (!is_valid_ether_addr(ndev->dev_addr)) { | |
1715 | dev_warn(&pdev->dev, | |
1716 | "no valid MAC address supplied, using a random one\n"); | |
1717 | eth_hw_addr_random(ndev); | |
1718 | } | |
1719 | ||
1720 | /* MDIO bus init */ | |
1721 | error = ravb_mdio_init(priv); | |
1722 | if (error) { | |
1723 | dev_err(&ndev->dev, "failed to initialize MDIO\n"); | |
1724 | goto out_dma_free; | |
1725 | } | |
1726 | ||
1727 | netif_napi_add(ndev, &priv->napi[RAVB_BE], ravb_poll, 64); | |
1728 | netif_napi_add(ndev, &priv->napi[RAVB_NC], ravb_poll, 64); | |
1729 | ||
1730 | /* Network device register */ | |
1731 | error = register_netdev(ndev); | |
1732 | if (error) | |
1733 | goto out_napi_del; | |
1734 | ||
1735 | /* Print device information */ | |
1736 | netdev_info(ndev, "Base address at %#x, %pM, IRQ %d.\n", | |
1737 | (u32)ndev->base_addr, ndev->dev_addr, ndev->irq); | |
1738 | ||
1739 | platform_set_drvdata(pdev, ndev); | |
1740 | ||
1741 | return 0; | |
1742 | ||
1743 | out_napi_del: | |
1744 | netif_napi_del(&priv->napi[RAVB_NC]); | |
1745 | netif_napi_del(&priv->napi[RAVB_BE]); | |
1746 | ravb_mdio_release(priv); | |
1747 | out_dma_free: | |
1748 | dma_free_coherent(NULL, priv->desc_bat_size, priv->desc_bat, | |
1749 | priv->desc_bat_dma); | |
1750 | out_release: | |
1751 | if (ndev) | |
1752 | free_netdev(ndev); | |
1753 | ||
1754 | pm_runtime_put(&pdev->dev); | |
1755 | pm_runtime_disable(&pdev->dev); | |
1756 | return error; | |
1757 | } | |
1758 | ||
1759 | static int ravb_remove(struct platform_device *pdev) | |
1760 | { | |
1761 | struct net_device *ndev = platform_get_drvdata(pdev); | |
1762 | struct ravb_private *priv = netdev_priv(ndev); | |
1763 | ||
1764 | dma_free_coherent(NULL, priv->desc_bat_size, priv->desc_bat, | |
1765 | priv->desc_bat_dma); | |
1766 | /* Set reset mode */ | |
1767 | ravb_write(ndev, CCC_OPC_RESET, CCC); | |
1768 | pm_runtime_put_sync(&pdev->dev); | |
1769 | unregister_netdev(ndev); | |
1770 | netif_napi_del(&priv->napi[RAVB_NC]); | |
1771 | netif_napi_del(&priv->napi[RAVB_BE]); | |
1772 | ravb_mdio_release(priv); | |
1773 | pm_runtime_disable(&pdev->dev); | |
1774 | free_netdev(ndev); | |
1775 | platform_set_drvdata(pdev, NULL); | |
1776 | ||
1777 | return 0; | |
1778 | } | |
1779 | ||
1780 | #ifdef CONFIG_PM | |
1781 | static int ravb_runtime_nop(struct device *dev) | |
1782 | { | |
1783 | /* Runtime PM callback shared between ->runtime_suspend() | |
1784 | * and ->runtime_resume(). Simply returns success. | |
1785 | * | |
1786 | * This driver re-initializes all registers after | |
1787 | * pm_runtime_get_sync() anyway so there is no need | |
1788 | * to save and restore registers here. | |
1789 | */ | |
1790 | return 0; | |
1791 | } | |
1792 | ||
1793 | static const struct dev_pm_ops ravb_dev_pm_ops = { | |
1794 | .runtime_suspend = ravb_runtime_nop, | |
1795 | .runtime_resume = ravb_runtime_nop, | |
1796 | }; | |
1797 | ||
1798 | #define RAVB_PM_OPS (&ravb_dev_pm_ops) | |
1799 | #else | |
1800 | #define RAVB_PM_OPS NULL | |
1801 | #endif | |
1802 | ||
1803 | static const struct of_device_id ravb_match_table[] = { | |
1804 | { .compatible = "renesas,etheravb-r8a7790" }, | |
1805 | { .compatible = "renesas,etheravb-r8a7794" }, | |
1806 | { } | |
1807 | }; | |
1808 | MODULE_DEVICE_TABLE(of, ravb_match_table); | |
1809 | ||
1810 | static struct platform_driver ravb_driver = { | |
1811 | .probe = ravb_probe, | |
1812 | .remove = ravb_remove, | |
1813 | .driver = { | |
1814 | .name = "ravb", | |
1815 | .pm = RAVB_PM_OPS, | |
1816 | .of_match_table = ravb_match_table, | |
1817 | }, | |
1818 | }; | |
1819 | ||
1820 | module_platform_driver(ravb_driver); | |
1821 | ||
1822 | MODULE_AUTHOR("Mitsuhiro Kimura, Masaru Nagai"); | |
1823 | MODULE_DESCRIPTION("Renesas Ethernet AVB driver"); | |
1824 | MODULE_LICENSE("GPL v2"); |