net: make skb_set_owner_w() more robust
[deliverable/linux.git] / drivers / net / ethernet / renesas / ravb_main.c
CommitLineData
c156633f
SS
1/* Renesas Ethernet AVB device driver
2 *
3 * Copyright (C) 2014-2015 Renesas Electronics Corporation
4 * Copyright (C) 2015 Renesas Solutions Corp.
5 * Copyright (C) 2015 Cogent Embedded, Inc. <source@cogentembedded.com>
6 *
7 * Based on the SuperH Ethernet driver
8 *
9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms and conditions of the GNU General Public License version 2,
11 * as published by the Free Software Foundation.
12 */
13
14#include <linux/cache.h>
15#include <linux/clk.h>
16#include <linux/delay.h>
17#include <linux/dma-mapping.h>
18#include <linux/err.h>
19#include <linux/etherdevice.h>
20#include <linux/ethtool.h>
21#include <linux/if_vlan.h>
22#include <linux/kernel.h>
23#include <linux/list.h>
24#include <linux/module.h>
25#include <linux/net_tstamp.h>
26#include <linux/of.h>
27#include <linux/of_device.h>
28#include <linux/of_irq.h>
29#include <linux/of_mdio.h>
30#include <linux/of_net.h>
c156633f
SS
31#include <linux/pm_runtime.h>
32#include <linux/slab.h>
33#include <linux/spinlock.h>
34
35#include "ravb.h"
36
37#define RAVB_DEF_MSG_ENABLE \
38 (NETIF_MSG_LINK | \
39 NETIF_MSG_TIMER | \
40 NETIF_MSG_RX_ERR | \
41 NETIF_MSG_TX_ERR)
42
a0d2f206 43int ravb_wait(struct net_device *ndev, enum ravb_reg reg, u32 mask, u32 value)
c156633f
SS
44{
45 int i;
46
47 for (i = 0; i < 10000; i++) {
48 if ((ravb_read(ndev, reg) & mask) == value)
49 return 0;
50 udelay(10);
51 }
52 return -ETIMEDOUT;
53}
54
55static int ravb_config(struct net_device *ndev)
56{
57 int error;
58
59 /* Set config mode */
60 ravb_write(ndev, (ravb_read(ndev, CCC) & ~CCC_OPC) | CCC_OPC_CONFIG,
61 CCC);
62 /* Check if the operating mode is changed to the config mode */
63 error = ravb_wait(ndev, CSR, CSR_OPS, CSR_OPS_CONFIG);
64 if (error)
65 netdev_err(ndev, "failed to switch device to config mode\n");
66
67 return error;
68}
69
70static void ravb_set_duplex(struct net_device *ndev)
71{
72 struct ravb_private *priv = netdev_priv(ndev);
73 u32 ecmr = ravb_read(ndev, ECMR);
74
75 if (priv->duplex) /* Full */
76 ecmr |= ECMR_DM;
77 else /* Half */
78 ecmr &= ~ECMR_DM;
79 ravb_write(ndev, ecmr, ECMR);
80}
81
82static void ravb_set_rate(struct net_device *ndev)
83{
84 struct ravb_private *priv = netdev_priv(ndev);
85
86 switch (priv->speed) {
87 case 100: /* 100BASE */
88 ravb_write(ndev, GECMR_SPEED_100, GECMR);
89 break;
90 case 1000: /* 1000BASE */
91 ravb_write(ndev, GECMR_SPEED_1000, GECMR);
92 break;
93 default:
94 break;
95 }
96}
97
98static void ravb_set_buffer_align(struct sk_buff *skb)
99{
100 u32 reserve = (unsigned long)skb->data & (RAVB_ALIGN - 1);
101
102 if (reserve)
103 skb_reserve(skb, RAVB_ALIGN - reserve);
104}
105
106/* Get MAC address from the MAC address registers
107 *
108 * Ethernet AVB device doesn't have ROM for MAC address.
109 * This function gets the MAC address that was used by a bootloader.
110 */
111static void ravb_read_mac_address(struct net_device *ndev, const u8 *mac)
112{
113 if (mac) {
114 ether_addr_copy(ndev->dev_addr, mac);
115 } else {
116 ndev->dev_addr[0] = (ravb_read(ndev, MAHR) >> 24);
117 ndev->dev_addr[1] = (ravb_read(ndev, MAHR) >> 16) & 0xFF;
118 ndev->dev_addr[2] = (ravb_read(ndev, MAHR) >> 8) & 0xFF;
119 ndev->dev_addr[3] = (ravb_read(ndev, MAHR) >> 0) & 0xFF;
120 ndev->dev_addr[4] = (ravb_read(ndev, MALR) >> 8) & 0xFF;
121 ndev->dev_addr[5] = (ravb_read(ndev, MALR) >> 0) & 0xFF;
122 }
123}
124
125static void ravb_mdio_ctrl(struct mdiobb_ctrl *ctrl, u32 mask, int set)
126{
127 struct ravb_private *priv = container_of(ctrl, struct ravb_private,
128 mdiobb);
129 u32 pir = ravb_read(priv->ndev, PIR);
130
131 if (set)
132 pir |= mask;
133 else
134 pir &= ~mask;
135 ravb_write(priv->ndev, pir, PIR);
136}
137
138/* MDC pin control */
139static void ravb_set_mdc(struct mdiobb_ctrl *ctrl, int level)
140{
141 ravb_mdio_ctrl(ctrl, PIR_MDC, level);
142}
143
144/* Data I/O pin control */
145static void ravb_set_mdio_dir(struct mdiobb_ctrl *ctrl, int output)
146{
147 ravb_mdio_ctrl(ctrl, PIR_MMD, output);
148}
149
150/* Set data bit */
151static void ravb_set_mdio_data(struct mdiobb_ctrl *ctrl, int value)
152{
153 ravb_mdio_ctrl(ctrl, PIR_MDO, value);
154}
155
156/* Get data bit */
157static int ravb_get_mdio_data(struct mdiobb_ctrl *ctrl)
158{
159 struct ravb_private *priv = container_of(ctrl, struct ravb_private,
160 mdiobb);
161
162 return (ravb_read(priv->ndev, PIR) & PIR_MDI) != 0;
163}
164
165/* MDIO bus control struct */
166static struct mdiobb_ops bb_ops = {
167 .owner = THIS_MODULE,
168 .set_mdc = ravb_set_mdc,
169 .set_mdio_dir = ravb_set_mdio_dir,
170 .set_mdio_data = ravb_set_mdio_data,
171 .get_mdio_data = ravb_get_mdio_data,
172};
173
174/* Free skb's and DMA buffers for Ethernet AVB */
175static void ravb_ring_free(struct net_device *ndev, int q)
176{
177 struct ravb_private *priv = netdev_priv(ndev);
178 int ring_size;
179 int i;
180
181 /* Free RX skb ringbuffer */
182 if (priv->rx_skb[q]) {
183 for (i = 0; i < priv->num_rx_ring[q]; i++)
184 dev_kfree_skb(priv->rx_skb[q][i]);
185 }
186 kfree(priv->rx_skb[q]);
187 priv->rx_skb[q] = NULL;
188
189 /* Free TX skb ringbuffer */
190 if (priv->tx_skb[q]) {
191 for (i = 0; i < priv->num_tx_ring[q]; i++)
192 dev_kfree_skb(priv->tx_skb[q][i]);
193 }
194 kfree(priv->tx_skb[q]);
195 priv->tx_skb[q] = NULL;
196
197 /* Free aligned TX buffers */
2f45d190
SS
198 kfree(priv->tx_align[q]);
199 priv->tx_align[q] = NULL;
c156633f
SS
200
201 if (priv->rx_ring[q]) {
202 ring_size = sizeof(struct ravb_ex_rx_desc) *
203 (priv->num_rx_ring[q] + 1);
e2dbb33a 204 dma_free_coherent(ndev->dev.parent, ring_size, priv->rx_ring[q],
c156633f
SS
205 priv->rx_desc_dma[q]);
206 priv->rx_ring[q] = NULL;
207 }
208
209 if (priv->tx_ring[q]) {
210 ring_size = sizeof(struct ravb_tx_desc) *
2f45d190 211 (priv->num_tx_ring[q] * NUM_TX_DESC + 1);
e2dbb33a 212 dma_free_coherent(ndev->dev.parent, ring_size, priv->tx_ring[q],
c156633f
SS
213 priv->tx_desc_dma[q]);
214 priv->tx_ring[q] = NULL;
215 }
216}
217
218/* Format skb and descriptor buffer for Ethernet AVB */
219static void ravb_ring_format(struct net_device *ndev, int q)
220{
221 struct ravb_private *priv = netdev_priv(ndev);
aad0d51e
SS
222 struct ravb_ex_rx_desc *rx_desc;
223 struct ravb_tx_desc *tx_desc;
224 struct ravb_desc *desc;
c156633f 225 int rx_ring_size = sizeof(*rx_desc) * priv->num_rx_ring[q];
2f45d190
SS
226 int tx_ring_size = sizeof(*tx_desc) * priv->num_tx_ring[q] *
227 NUM_TX_DESC;
c156633f 228 dma_addr_t dma_addr;
c156633f
SS
229 int i;
230
231 priv->cur_rx[q] = 0;
232 priv->cur_tx[q] = 0;
233 priv->dirty_rx[q] = 0;
234 priv->dirty_tx[q] = 0;
235
236 memset(priv->rx_ring[q], 0, rx_ring_size);
237 /* Build RX ring buffer */
238 for (i = 0; i < priv->num_rx_ring[q]; i++) {
c156633f
SS
239 /* RX descriptor */
240 rx_desc = &priv->rx_ring[q][i];
241 /* The size of the buffer should be on 16-byte boundary. */
242 rx_desc->ds_cc = cpu_to_le16(ALIGN(PKT_BUF_SZ, 16));
e2dbb33a 243 dma_addr = dma_map_single(ndev->dev.parent, priv->rx_skb[q][i]->data,
c156633f
SS
244 ALIGN(PKT_BUF_SZ, 16),
245 DMA_FROM_DEVICE);
d8b48911
SS
246 /* We just set the data size to 0 for a failed mapping which
247 * should prevent DMA from happening...
248 */
e2dbb33a 249 if (dma_mapping_error(ndev->dev.parent, dma_addr))
d8b48911 250 rx_desc->ds_cc = cpu_to_le16(0);
c156633f
SS
251 rx_desc->dptr = cpu_to_le32(dma_addr);
252 rx_desc->die_dt = DT_FEMPTY;
253 }
254 rx_desc = &priv->rx_ring[q][i];
255 rx_desc->dptr = cpu_to_le32((u32)priv->rx_desc_dma[q]);
256 rx_desc->die_dt = DT_LINKFIX; /* type */
c156633f
SS
257
258 memset(priv->tx_ring[q], 0, tx_ring_size);
259 /* Build TX ring buffer */
2f45d190
SS
260 for (i = 0, tx_desc = priv->tx_ring[q]; i < priv->num_tx_ring[q];
261 i++, tx_desc++) {
262 tx_desc->die_dt = DT_EEMPTY;
263 tx_desc++;
c156633f
SS
264 tx_desc->die_dt = DT_EEMPTY;
265 }
c156633f
SS
266 tx_desc->dptr = cpu_to_le32((u32)priv->tx_desc_dma[q]);
267 tx_desc->die_dt = DT_LINKFIX; /* type */
268
269 /* RX descriptor base address for best effort */
270 desc = &priv->desc_bat[RX_QUEUE_OFFSET + q];
271 desc->die_dt = DT_LINKFIX; /* type */
272 desc->dptr = cpu_to_le32((u32)priv->rx_desc_dma[q]);
273
274 /* TX descriptor base address for best effort */
275 desc = &priv->desc_bat[q];
276 desc->die_dt = DT_LINKFIX; /* type */
277 desc->dptr = cpu_to_le32((u32)priv->tx_desc_dma[q]);
278}
279
280/* Init skb and descriptor buffer for Ethernet AVB */
281static int ravb_ring_init(struct net_device *ndev, int q)
282{
283 struct ravb_private *priv = netdev_priv(ndev);
d8b48911 284 struct sk_buff *skb;
c156633f 285 int ring_size;
d8b48911 286 int i;
c156633f
SS
287
288 /* Allocate RX and TX skb rings */
289 priv->rx_skb[q] = kcalloc(priv->num_rx_ring[q],
290 sizeof(*priv->rx_skb[q]), GFP_KERNEL);
291 priv->tx_skb[q] = kcalloc(priv->num_tx_ring[q],
292 sizeof(*priv->tx_skb[q]), GFP_KERNEL);
293 if (!priv->rx_skb[q] || !priv->tx_skb[q])
294 goto error;
295
d8b48911
SS
296 for (i = 0; i < priv->num_rx_ring[q]; i++) {
297 skb = netdev_alloc_skb(ndev, PKT_BUF_SZ + RAVB_ALIGN - 1);
298 if (!skb)
299 goto error;
300 ravb_set_buffer_align(skb);
301 priv->rx_skb[q][i] = skb;
302 }
303
c156633f 304 /* Allocate rings for the aligned buffers */
2f45d190
SS
305 priv->tx_align[q] = kmalloc(DPTR_ALIGN * priv->num_tx_ring[q] +
306 DPTR_ALIGN - 1, GFP_KERNEL);
307 if (!priv->tx_align[q])
c156633f
SS
308 goto error;
309
310 /* Allocate all RX descriptors. */
311 ring_size = sizeof(struct ravb_ex_rx_desc) * (priv->num_rx_ring[q] + 1);
e2dbb33a 312 priv->rx_ring[q] = dma_alloc_coherent(ndev->dev.parent, ring_size,
c156633f
SS
313 &priv->rx_desc_dma[q],
314 GFP_KERNEL);
315 if (!priv->rx_ring[q])
316 goto error;
317
318 priv->dirty_rx[q] = 0;
319
320 /* Allocate all TX descriptors. */
2f45d190
SS
321 ring_size = sizeof(struct ravb_tx_desc) *
322 (priv->num_tx_ring[q] * NUM_TX_DESC + 1);
e2dbb33a 323 priv->tx_ring[q] = dma_alloc_coherent(ndev->dev.parent, ring_size,
c156633f
SS
324 &priv->tx_desc_dma[q],
325 GFP_KERNEL);
326 if (!priv->tx_ring[q])
327 goto error;
328
329 return 0;
330
331error:
332 ravb_ring_free(ndev, q);
333
334 return -ENOMEM;
335}
336
337/* E-MAC init function */
338static void ravb_emac_init(struct net_device *ndev)
339{
340 struct ravb_private *priv = netdev_priv(ndev);
341 u32 ecmr;
342
343 /* Receive frame limit set register */
344 ravb_write(ndev, ndev->mtu + ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN, RFLR);
345
346 /* PAUSE prohibition */
347 ecmr = ravb_read(ndev, ECMR);
348 ecmr &= ECMR_DM;
349 ecmr |= ECMR_ZPF | (priv->duplex ? ECMR_DM : 0) | ECMR_TE | ECMR_RE;
350 ravb_write(ndev, ecmr, ECMR);
351
352 ravb_set_rate(ndev);
353
354 /* Set MAC address */
355 ravb_write(ndev,
356 (ndev->dev_addr[0] << 24) | (ndev->dev_addr[1] << 16) |
357 (ndev->dev_addr[2] << 8) | (ndev->dev_addr[3]), MAHR);
358 ravb_write(ndev,
359 (ndev->dev_addr[4] << 8) | (ndev->dev_addr[5]), MALR);
360
361 ravb_write(ndev, 1, MPR);
362
363 /* E-MAC status register clear */
364 ravb_write(ndev, ECSR_ICD | ECSR_MPD, ECSR);
365
366 /* E-MAC interrupt enable register */
367 ravb_write(ndev, ECSIPR_ICDIP | ECSIPR_MPDIP | ECSIPR_LCHNGIP, ECSIPR);
368}
369
370/* Device init function for Ethernet AVB */
371static int ravb_dmac_init(struct net_device *ndev)
372{
373 int error;
374
375 /* Set CONFIG mode */
376 error = ravb_config(ndev);
377 if (error)
378 return error;
379
380 error = ravb_ring_init(ndev, RAVB_BE);
381 if (error)
382 return error;
383 error = ravb_ring_init(ndev, RAVB_NC);
384 if (error) {
385 ravb_ring_free(ndev, RAVB_BE);
386 return error;
387 }
388
389 /* Descriptor format */
390 ravb_ring_format(ndev, RAVB_BE);
391 ravb_ring_format(ndev, RAVB_NC);
392
393#if defined(__LITTLE_ENDIAN)
394 ravb_write(ndev, ravb_read(ndev, CCC) & ~CCC_BOC, CCC);
395#else
396 ravb_write(ndev, ravb_read(ndev, CCC) | CCC_BOC, CCC);
397#endif
398
399 /* Set AVB RX */
400 ravb_write(ndev, RCR_EFFS | RCR_ENCF | RCR_ETS0 | 0x18000000, RCR);
401
402 /* Set FIFO size */
403 ravb_write(ndev, TGC_TQP_AVBMODE1 | 0x00222200, TGC);
404
405 /* Timestamp enable */
406 ravb_write(ndev, TCCR_TFEN, TCCR);
407
408 /* Interrupt enable: */
409 /* Frame receive */
410 ravb_write(ndev, RIC0_FRE0 | RIC0_FRE1, RIC0);
411 /* Receive FIFO full warning */
412 ravb_write(ndev, RIC1_RFWE, RIC1);
413 /* Receive FIFO full error, descriptor empty */
414 ravb_write(ndev, RIC2_QFE0 | RIC2_QFE1 | RIC2_RFFE, RIC2);
415 /* Frame transmitted, timestamp FIFO updated */
416 ravb_write(ndev, TIC_FTE0 | TIC_FTE1 | TIC_TFUE, TIC);
417
418 /* Setting the control will start the AVB-DMAC process. */
419 ravb_write(ndev, (ravb_read(ndev, CCC) & ~CCC_OPC) | CCC_OPC_OPERATION,
420 CCC);
421
422 return 0;
423}
424
425/* Free TX skb function for AVB-IP */
426static int ravb_tx_free(struct net_device *ndev, int q)
427{
428 struct ravb_private *priv = netdev_priv(ndev);
429 struct net_device_stats *stats = &priv->stats[q];
430 struct ravb_tx_desc *desc;
431 int free_num = 0;
aad0d51e 432 int entry;
c156633f
SS
433 u32 size;
434
435 for (; priv->cur_tx[q] - priv->dirty_tx[q] > 0; priv->dirty_tx[q]++) {
2f45d190
SS
436 entry = priv->dirty_tx[q] % (priv->num_tx_ring[q] *
437 NUM_TX_DESC);
c156633f
SS
438 desc = &priv->tx_ring[q][entry];
439 if (desc->die_dt != DT_FEMPTY)
440 break;
441 /* Descriptor type must be checked before all other reads */
442 dma_rmb();
443 size = le16_to_cpu(desc->ds_tagl) & TX_DS;
444 /* Free the original skb. */
2f45d190 445 if (priv->tx_skb[q][entry / NUM_TX_DESC]) {
e2dbb33a 446 dma_unmap_single(ndev->dev.parent, le32_to_cpu(desc->dptr),
c156633f 447 size, DMA_TO_DEVICE);
2f45d190
SS
448 /* Last packet descriptor? */
449 if (entry % NUM_TX_DESC == NUM_TX_DESC - 1) {
450 entry /= NUM_TX_DESC;
451 dev_kfree_skb_any(priv->tx_skb[q][entry]);
452 priv->tx_skb[q][entry] = NULL;
453 stats->tx_packets++;
454 }
c156633f
SS
455 free_num++;
456 }
c156633f
SS
457 stats->tx_bytes += size;
458 desc->die_dt = DT_EEMPTY;
459 }
460 return free_num;
461}
462
463static void ravb_get_tx_tstamp(struct net_device *ndev)
464{
465 struct ravb_private *priv = netdev_priv(ndev);
466 struct ravb_tstamp_skb *ts_skb, *ts_skb2;
467 struct skb_shared_hwtstamps shhwtstamps;
468 struct sk_buff *skb;
469 struct timespec64 ts;
470 u16 tag, tfa_tag;
471 int count;
472 u32 tfa2;
473
474 count = (ravb_read(ndev, TSR) & TSR_TFFL) >> 8;
475 while (count--) {
476 tfa2 = ravb_read(ndev, TFA2);
477 tfa_tag = (tfa2 & TFA2_TST) >> 16;
478 ts.tv_nsec = (u64)ravb_read(ndev, TFA0);
479 ts.tv_sec = ((u64)(tfa2 & TFA2_TSV) << 32) |
480 ravb_read(ndev, TFA1);
481 memset(&shhwtstamps, 0, sizeof(shhwtstamps));
482 shhwtstamps.hwtstamp = timespec64_to_ktime(ts);
483 list_for_each_entry_safe(ts_skb, ts_skb2, &priv->ts_skb_list,
484 list) {
485 skb = ts_skb->skb;
486 tag = ts_skb->tag;
487 list_del(&ts_skb->list);
488 kfree(ts_skb);
489 if (tag == tfa_tag) {
490 skb_tstamp_tx(skb, &shhwtstamps);
491 break;
492 }
493 }
494 ravb_write(ndev, ravb_read(ndev, TCCR) | TCCR_TFR, TCCR);
495 }
496}
497
498/* Packet receive function for Ethernet AVB */
499static bool ravb_rx(struct net_device *ndev, int *quota, int q)
500{
501 struct ravb_private *priv = netdev_priv(ndev);
502 int entry = priv->cur_rx[q] % priv->num_rx_ring[q];
503 int boguscnt = (priv->dirty_rx[q] + priv->num_rx_ring[q]) -
504 priv->cur_rx[q];
505 struct net_device_stats *stats = &priv->stats[q];
506 struct ravb_ex_rx_desc *desc;
507 struct sk_buff *skb;
508 dma_addr_t dma_addr;
509 struct timespec64 ts;
c156633f 510 u8 desc_status;
aad0d51e 511 u16 pkt_len;
c156633f
SS
512 int limit;
513
514 boguscnt = min(boguscnt, *quota);
515 limit = boguscnt;
516 desc = &priv->rx_ring[q][entry];
517 while (desc->die_dt != DT_FEMPTY) {
518 /* Descriptor type must be checked before all other reads */
519 dma_rmb();
520 desc_status = desc->msc;
521 pkt_len = le16_to_cpu(desc->ds_cc) & RX_DS;
522
523 if (--boguscnt < 0)
524 break;
525
d8b48911
SS
526 /* We use 0-byte descriptors to mark the DMA mapping errors */
527 if (!pkt_len)
528 continue;
529
c156633f
SS
530 if (desc_status & MSC_MC)
531 stats->multicast++;
532
533 if (desc_status & (MSC_CRC | MSC_RFE | MSC_RTSF | MSC_RTLF |
534 MSC_CEEF)) {
535 stats->rx_errors++;
536 if (desc_status & MSC_CRC)
537 stats->rx_crc_errors++;
538 if (desc_status & MSC_RFE)
539 stats->rx_frame_errors++;
540 if (desc_status & (MSC_RTLF | MSC_RTSF))
541 stats->rx_length_errors++;
542 if (desc_status & MSC_CEEF)
543 stats->rx_missed_errors++;
544 } else {
545 u32 get_ts = priv->tstamp_rx_ctrl & RAVB_RXTSTAMP_TYPE;
546
547 skb = priv->rx_skb[q][entry];
548 priv->rx_skb[q][entry] = NULL;
e2dbb33a 549 dma_unmap_single(ndev->dev.parent, le32_to_cpu(desc->dptr),
e2370f07
SS
550 ALIGN(PKT_BUF_SZ, 16),
551 DMA_FROM_DEVICE);
c156633f
SS
552 get_ts &= (q == RAVB_NC) ?
553 RAVB_RXTSTAMP_TYPE_V2_L2_EVENT :
554 ~RAVB_RXTSTAMP_TYPE_V2_L2_EVENT;
555 if (get_ts) {
556 struct skb_shared_hwtstamps *shhwtstamps;
557
558 shhwtstamps = skb_hwtstamps(skb);
559 memset(shhwtstamps, 0, sizeof(*shhwtstamps));
560 ts.tv_sec = ((u64) le16_to_cpu(desc->ts_sh) <<
561 32) | le32_to_cpu(desc->ts_sl);
562 ts.tv_nsec = le32_to_cpu(desc->ts_n);
563 shhwtstamps->hwtstamp = timespec64_to_ktime(ts);
564 }
565 skb_put(skb, pkt_len);
566 skb->protocol = eth_type_trans(skb, ndev);
567 napi_gro_receive(&priv->napi[q], skb);
568 stats->rx_packets++;
569 stats->rx_bytes += pkt_len;
570 }
571
572 entry = (++priv->cur_rx[q]) % priv->num_rx_ring[q];
573 desc = &priv->rx_ring[q][entry];
574 }
575
576 /* Refill the RX ring buffers. */
577 for (; priv->cur_rx[q] - priv->dirty_rx[q] > 0; priv->dirty_rx[q]++) {
578 entry = priv->dirty_rx[q] % priv->num_rx_ring[q];
579 desc = &priv->rx_ring[q][entry];
580 /* The size of the buffer should be on 16-byte boundary. */
581 desc->ds_cc = cpu_to_le16(ALIGN(PKT_BUF_SZ, 16));
582
583 if (!priv->rx_skb[q][entry]) {
584 skb = netdev_alloc_skb(ndev,
585 PKT_BUF_SZ + RAVB_ALIGN - 1);
586 if (!skb)
587 break; /* Better luck next round. */
588 ravb_set_buffer_align(skb);
e2dbb33a 589 dma_addr = dma_map_single(ndev->dev.parent, skb->data,
c156633f
SS
590 le16_to_cpu(desc->ds_cc),
591 DMA_FROM_DEVICE);
592 skb_checksum_none_assert(skb);
d8b48911
SS
593 /* We just set the data size to 0 for a failed mapping
594 * which should prevent DMA from happening...
595 */
e2dbb33a 596 if (dma_mapping_error(ndev->dev.parent, dma_addr))
d8b48911 597 desc->ds_cc = cpu_to_le16(0);
c156633f
SS
598 desc->dptr = cpu_to_le32(dma_addr);
599 priv->rx_skb[q][entry] = skb;
600 }
601 /* Descriptor type must be set after all the above writes */
602 dma_wmb();
603 desc->die_dt = DT_FEMPTY;
604 }
605
606 *quota -= limit - (++boguscnt);
607
608 return boguscnt <= 0;
609}
610
611static void ravb_rcv_snd_disable(struct net_device *ndev)
612{
613 /* Disable TX and RX */
614 ravb_write(ndev, ravb_read(ndev, ECMR) & ~(ECMR_RE | ECMR_TE), ECMR);
615}
616
617static void ravb_rcv_snd_enable(struct net_device *ndev)
618{
619 /* Enable TX and RX */
620 ravb_write(ndev, ravb_read(ndev, ECMR) | ECMR_RE | ECMR_TE, ECMR);
621}
622
623/* function for waiting dma process finished */
624static int ravb_stop_dma(struct net_device *ndev)
625{
626 int error;
627
628 /* Wait for stopping the hardware TX process */
629 error = ravb_wait(ndev, TCCR,
630 TCCR_TSRQ0 | TCCR_TSRQ1 | TCCR_TSRQ2 | TCCR_TSRQ3, 0);
631 if (error)
632 return error;
633
634 error = ravb_wait(ndev, CSR, CSR_TPO0 | CSR_TPO1 | CSR_TPO2 | CSR_TPO3,
635 0);
636 if (error)
637 return error;
638
639 /* Stop the E-MAC's RX/TX processes. */
640 ravb_rcv_snd_disable(ndev);
641
642 /* Wait for stopping the RX DMA process */
643 error = ravb_wait(ndev, CSR, CSR_RPO, 0);
644 if (error)
645 return error;
646
647 /* Stop AVB-DMAC process */
648 return ravb_config(ndev);
649}
650
651/* E-MAC interrupt handler */
652static void ravb_emac_interrupt(struct net_device *ndev)
653{
654 struct ravb_private *priv = netdev_priv(ndev);
655 u32 ecsr, psr;
656
657 ecsr = ravb_read(ndev, ECSR);
658 ravb_write(ndev, ecsr, ECSR); /* clear interrupt */
659 if (ecsr & ECSR_ICD)
660 ndev->stats.tx_carrier_errors++;
661 if (ecsr & ECSR_LCHNG) {
662 /* Link changed */
663 if (priv->no_avb_link)
664 return;
665 psr = ravb_read(ndev, PSR);
666 if (priv->avb_link_active_low)
667 psr ^= PSR_LMON;
668 if (!(psr & PSR_LMON)) {
669 /* DIsable RX and TX */
670 ravb_rcv_snd_disable(ndev);
671 } else {
672 /* Enable RX and TX */
673 ravb_rcv_snd_enable(ndev);
674 }
675 }
676}
677
678/* Error interrupt handler */
679static void ravb_error_interrupt(struct net_device *ndev)
680{
681 struct ravb_private *priv = netdev_priv(ndev);
682 u32 eis, ris2;
683
684 eis = ravb_read(ndev, EIS);
685 ravb_write(ndev, ~EIS_QFS, EIS);
686 if (eis & EIS_QFS) {
687 ris2 = ravb_read(ndev, RIS2);
688 ravb_write(ndev, ~(RIS2_QFF0 | RIS2_RFFF), RIS2);
689
690 /* Receive Descriptor Empty int */
691 if (ris2 & RIS2_QFF0)
692 priv->stats[RAVB_BE].rx_over_errors++;
693
694 /* Receive Descriptor Empty int */
695 if (ris2 & RIS2_QFF1)
696 priv->stats[RAVB_NC].rx_over_errors++;
697
698 /* Receive FIFO Overflow int */
699 if (ris2 & RIS2_RFFF)
700 priv->rx_fifo_errors++;
701 }
702}
703
704static irqreturn_t ravb_interrupt(int irq, void *dev_id)
705{
706 struct net_device *ndev = dev_id;
707 struct ravb_private *priv = netdev_priv(ndev);
708 irqreturn_t result = IRQ_NONE;
709 u32 iss;
710
711 spin_lock(&priv->lock);
712 /* Get interrupt status */
713 iss = ravb_read(ndev, ISS);
714
715 /* Received and transmitted interrupts */
716 if (iss & (ISS_FRS | ISS_FTS | ISS_TFUS)) {
717 u32 ris0 = ravb_read(ndev, RIS0);
718 u32 ric0 = ravb_read(ndev, RIC0);
719 u32 tis = ravb_read(ndev, TIS);
720 u32 tic = ravb_read(ndev, TIC);
721 int q;
722
723 /* Timestamp updated */
724 if (tis & TIS_TFUF) {
725 ravb_write(ndev, ~TIS_TFUF, TIS);
726 ravb_get_tx_tstamp(ndev);
727 result = IRQ_HANDLED;
728 }
729
730 /* Network control and best effort queue RX/TX */
731 for (q = RAVB_NC; q >= RAVB_BE; q--) {
732 if (((ris0 & ric0) & BIT(q)) ||
733 ((tis & tic) & BIT(q))) {
734 if (napi_schedule_prep(&priv->napi[q])) {
735 /* Mask RX and TX interrupts */
736 ravb_write(ndev, ric0 & ~BIT(q), RIC0);
737 ravb_write(ndev, tic & ~BIT(q), TIC);
738 __napi_schedule(&priv->napi[q]);
739 } else {
740 netdev_warn(ndev,
741 "ignoring interrupt, rx status 0x%08x, rx mask 0x%08x,\n",
742 ris0, ric0);
743 netdev_warn(ndev,
744 " tx status 0x%08x, tx mask 0x%08x.\n",
745 tis, tic);
746 }
747 result = IRQ_HANDLED;
748 }
749 }
750 }
751
752 /* E-MAC status summary */
753 if (iss & ISS_MS) {
754 ravb_emac_interrupt(ndev);
755 result = IRQ_HANDLED;
756 }
757
758 /* Error status summary */
759 if (iss & ISS_ES) {
760 ravb_error_interrupt(ndev);
761 result = IRQ_HANDLED;
762 }
763
a0d2f206
SS
764 if (iss & ISS_CGIS)
765 result = ravb_ptp_interrupt(ndev);
766
c156633f
SS
767 mmiowb();
768 spin_unlock(&priv->lock);
769 return result;
770}
771
772static int ravb_poll(struct napi_struct *napi, int budget)
773{
774 struct net_device *ndev = napi->dev;
775 struct ravb_private *priv = netdev_priv(ndev);
776 unsigned long flags;
777 int q = napi - priv->napi;
778 int mask = BIT(q);
779 int quota = budget;
780 u32 ris0, tis;
781
782 for (;;) {
783 tis = ravb_read(ndev, TIS);
784 ris0 = ravb_read(ndev, RIS0);
785 if (!((ris0 & mask) || (tis & mask)))
786 break;
787
788 /* Processing RX Descriptor Ring */
789 if (ris0 & mask) {
790 /* Clear RX interrupt */
791 ravb_write(ndev, ~mask, RIS0);
792 if (ravb_rx(ndev, &quota, q))
793 goto out;
794 }
795 /* Processing TX Descriptor Ring */
796 if (tis & mask) {
797 spin_lock_irqsave(&priv->lock, flags);
798 /* Clear TX interrupt */
799 ravb_write(ndev, ~mask, TIS);
800 ravb_tx_free(ndev, q);
801 netif_wake_subqueue(ndev, q);
802 mmiowb();
803 spin_unlock_irqrestore(&priv->lock, flags);
804 }
805 }
806
807 napi_complete(napi);
808
809 /* Re-enable RX/TX interrupts */
810 spin_lock_irqsave(&priv->lock, flags);
811 ravb_write(ndev, ravb_read(ndev, RIC0) | mask, RIC0);
812 ravb_write(ndev, ravb_read(ndev, TIC) | mask, TIC);
813 mmiowb();
814 spin_unlock_irqrestore(&priv->lock, flags);
815
816 /* Receive error message handling */
817 priv->rx_over_errors = priv->stats[RAVB_BE].rx_over_errors;
818 priv->rx_over_errors += priv->stats[RAVB_NC].rx_over_errors;
819 if (priv->rx_over_errors != ndev->stats.rx_over_errors) {
820 ndev->stats.rx_over_errors = priv->rx_over_errors;
821 netif_err(priv, rx_err, ndev, "Receive Descriptor Empty\n");
822 }
823 if (priv->rx_fifo_errors != ndev->stats.rx_fifo_errors) {
824 ndev->stats.rx_fifo_errors = priv->rx_fifo_errors;
825 netif_err(priv, rx_err, ndev, "Receive FIFO Overflow\n");
826 }
827out:
828 return budget - quota;
829}
830
831/* PHY state control function */
832static void ravb_adjust_link(struct net_device *ndev)
833{
834 struct ravb_private *priv = netdev_priv(ndev);
835 struct phy_device *phydev = priv->phydev;
836 bool new_state = false;
837
838 if (phydev->link) {
839 if (phydev->duplex != priv->duplex) {
840 new_state = true;
841 priv->duplex = phydev->duplex;
842 ravb_set_duplex(ndev);
843 }
844
845 if (phydev->speed != priv->speed) {
846 new_state = true;
847 priv->speed = phydev->speed;
848 ravb_set_rate(ndev);
849 }
850 if (!priv->link) {
851 ravb_write(ndev, ravb_read(ndev, ECMR) & ~ECMR_TXF,
852 ECMR);
853 new_state = true;
854 priv->link = phydev->link;
855 if (priv->no_avb_link)
856 ravb_rcv_snd_enable(ndev);
857 }
858 } else if (priv->link) {
859 new_state = true;
860 priv->link = 0;
861 priv->speed = 0;
862 priv->duplex = -1;
863 if (priv->no_avb_link)
864 ravb_rcv_snd_disable(ndev);
865 }
866
867 if (new_state && netif_msg_link(priv))
868 phy_print_status(phydev);
869}
870
871/* PHY init function */
872static int ravb_phy_init(struct net_device *ndev)
873{
874 struct device_node *np = ndev->dev.parent->of_node;
875 struct ravb_private *priv = netdev_priv(ndev);
876 struct phy_device *phydev;
877 struct device_node *pn;
878
879 priv->link = 0;
880 priv->speed = 0;
881 priv->duplex = -1;
882
883 /* Try connecting to PHY */
884 pn = of_parse_phandle(np, "phy-handle", 0);
885 phydev = of_phy_connect(ndev, pn, ravb_adjust_link, 0,
886 priv->phy_interface);
887 if (!phydev) {
888 netdev_err(ndev, "failed to connect PHY\n");
889 return -ENOENT;
890 }
891
22d4df8f
KM
892 /* This driver only support 10/100Mbit speeds on Gen3
893 * at this time.
894 */
895 if (priv->chip_id == RCAR_GEN3) {
896 int err;
897
898 err = phy_set_max_speed(phydev, SPEED_100);
899 if (err) {
900 netdev_err(ndev, "failed to limit PHY to 100Mbit/s\n");
901 phy_disconnect(phydev);
902 return err;
903 }
904
905 netdev_info(ndev, "limited PHY to 100Mbit/s\n");
906 }
907
c156633f
SS
908 netdev_info(ndev, "attached PHY %d (IRQ %d) to driver %s\n",
909 phydev->addr, phydev->irq, phydev->drv->name);
910
911 priv->phydev = phydev;
912
913 return 0;
914}
915
916/* PHY control start function */
917static int ravb_phy_start(struct net_device *ndev)
918{
919 struct ravb_private *priv = netdev_priv(ndev);
920 int error;
921
922 error = ravb_phy_init(ndev);
923 if (error)
924 return error;
925
926 phy_start(priv->phydev);
927
928 return 0;
929}
930
931static int ravb_get_settings(struct net_device *ndev, struct ethtool_cmd *ecmd)
932{
933 struct ravb_private *priv = netdev_priv(ndev);
934 int error = -ENODEV;
935 unsigned long flags;
936
937 if (priv->phydev) {
938 spin_lock_irqsave(&priv->lock, flags);
939 error = phy_ethtool_gset(priv->phydev, ecmd);
940 spin_unlock_irqrestore(&priv->lock, flags);
941 }
942
943 return error;
944}
945
946static int ravb_set_settings(struct net_device *ndev, struct ethtool_cmd *ecmd)
947{
948 struct ravb_private *priv = netdev_priv(ndev);
949 unsigned long flags;
950 int error;
951
952 if (!priv->phydev)
953 return -ENODEV;
954
955 spin_lock_irqsave(&priv->lock, flags);
956
957 /* Disable TX and RX */
958 ravb_rcv_snd_disable(ndev);
959
960 error = phy_ethtool_sset(priv->phydev, ecmd);
961 if (error)
962 goto error_exit;
963
964 if (ecmd->duplex == DUPLEX_FULL)
965 priv->duplex = 1;
966 else
967 priv->duplex = 0;
968
969 ravb_set_duplex(ndev);
970
971error_exit:
972 mdelay(1);
973
974 /* Enable TX and RX */
975 ravb_rcv_snd_enable(ndev);
976
977 mmiowb();
978 spin_unlock_irqrestore(&priv->lock, flags);
979
980 return error;
981}
982
983static int ravb_nway_reset(struct net_device *ndev)
984{
985 struct ravb_private *priv = netdev_priv(ndev);
986 int error = -ENODEV;
987 unsigned long flags;
988
989 if (priv->phydev) {
990 spin_lock_irqsave(&priv->lock, flags);
991 error = phy_start_aneg(priv->phydev);
992 spin_unlock_irqrestore(&priv->lock, flags);
993 }
994
995 return error;
996}
997
998static u32 ravb_get_msglevel(struct net_device *ndev)
999{
1000 struct ravb_private *priv = netdev_priv(ndev);
1001
1002 return priv->msg_enable;
1003}
1004
1005static void ravb_set_msglevel(struct net_device *ndev, u32 value)
1006{
1007 struct ravb_private *priv = netdev_priv(ndev);
1008
1009 priv->msg_enable = value;
1010}
1011
1012static const char ravb_gstrings_stats[][ETH_GSTRING_LEN] = {
1013 "rx_queue_0_current",
1014 "tx_queue_0_current",
1015 "rx_queue_0_dirty",
1016 "tx_queue_0_dirty",
1017 "rx_queue_0_packets",
1018 "tx_queue_0_packets",
1019 "rx_queue_0_bytes",
1020 "tx_queue_0_bytes",
1021 "rx_queue_0_mcast_packets",
1022 "rx_queue_0_errors",
1023 "rx_queue_0_crc_errors",
1024 "rx_queue_0_frame_errors",
1025 "rx_queue_0_length_errors",
1026 "rx_queue_0_missed_errors",
1027 "rx_queue_0_over_errors",
1028
1029 "rx_queue_1_current",
1030 "tx_queue_1_current",
1031 "rx_queue_1_dirty",
1032 "tx_queue_1_dirty",
1033 "rx_queue_1_packets",
1034 "tx_queue_1_packets",
1035 "rx_queue_1_bytes",
1036 "tx_queue_1_bytes",
1037 "rx_queue_1_mcast_packets",
1038 "rx_queue_1_errors",
1039 "rx_queue_1_crc_errors",
1040 "rx_queue_1_frame_errors_",
1041 "rx_queue_1_length_errors",
1042 "rx_queue_1_missed_errors",
1043 "rx_queue_1_over_errors",
1044};
1045
1046#define RAVB_STATS_LEN ARRAY_SIZE(ravb_gstrings_stats)
1047
1048static int ravb_get_sset_count(struct net_device *netdev, int sset)
1049{
1050 switch (sset) {
1051 case ETH_SS_STATS:
1052 return RAVB_STATS_LEN;
1053 default:
1054 return -EOPNOTSUPP;
1055 }
1056}
1057
1058static void ravb_get_ethtool_stats(struct net_device *ndev,
1059 struct ethtool_stats *stats, u64 *data)
1060{
1061 struct ravb_private *priv = netdev_priv(ndev);
1062 int i = 0;
1063 int q;
1064
1065 /* Device-specific stats */
1066 for (q = RAVB_BE; q < NUM_RX_QUEUE; q++) {
1067 struct net_device_stats *stats = &priv->stats[q];
1068
1069 data[i++] = priv->cur_rx[q];
1070 data[i++] = priv->cur_tx[q];
1071 data[i++] = priv->dirty_rx[q];
1072 data[i++] = priv->dirty_tx[q];
1073 data[i++] = stats->rx_packets;
1074 data[i++] = stats->tx_packets;
1075 data[i++] = stats->rx_bytes;
1076 data[i++] = stats->tx_bytes;
1077 data[i++] = stats->multicast;
1078 data[i++] = stats->rx_errors;
1079 data[i++] = stats->rx_crc_errors;
1080 data[i++] = stats->rx_frame_errors;
1081 data[i++] = stats->rx_length_errors;
1082 data[i++] = stats->rx_missed_errors;
1083 data[i++] = stats->rx_over_errors;
1084 }
1085}
1086
1087static void ravb_get_strings(struct net_device *ndev, u32 stringset, u8 *data)
1088{
1089 switch (stringset) {
1090 case ETH_SS_STATS:
1091 memcpy(data, *ravb_gstrings_stats, sizeof(ravb_gstrings_stats));
1092 break;
1093 }
1094}
1095
1096static void ravb_get_ringparam(struct net_device *ndev,
1097 struct ethtool_ringparam *ring)
1098{
1099 struct ravb_private *priv = netdev_priv(ndev);
1100
1101 ring->rx_max_pending = BE_RX_RING_MAX;
1102 ring->tx_max_pending = BE_TX_RING_MAX;
1103 ring->rx_pending = priv->num_rx_ring[RAVB_BE];
1104 ring->tx_pending = priv->num_tx_ring[RAVB_BE];
1105}
1106
1107static int ravb_set_ringparam(struct net_device *ndev,
1108 struct ethtool_ringparam *ring)
1109{
1110 struct ravb_private *priv = netdev_priv(ndev);
1111 int error;
1112
1113 if (ring->tx_pending > BE_TX_RING_MAX ||
1114 ring->rx_pending > BE_RX_RING_MAX ||
1115 ring->tx_pending < BE_TX_RING_MIN ||
1116 ring->rx_pending < BE_RX_RING_MIN)
1117 return -EINVAL;
1118 if (ring->rx_mini_pending || ring->rx_jumbo_pending)
1119 return -EINVAL;
1120
1121 if (netif_running(ndev)) {
1122 netif_device_detach(ndev);
a0d2f206
SS
1123 /* Stop PTP Clock driver */
1124 ravb_ptp_stop(ndev);
c156633f
SS
1125 /* Wait for DMA stopping */
1126 error = ravb_stop_dma(ndev);
1127 if (error) {
1128 netdev_err(ndev,
1129 "cannot set ringparam! Any AVB processes are still running?\n");
1130 return error;
1131 }
1132 synchronize_irq(ndev->irq);
1133
1134 /* Free all the skb's in the RX queue and the DMA buffers. */
1135 ravb_ring_free(ndev, RAVB_BE);
1136 ravb_ring_free(ndev, RAVB_NC);
1137 }
1138
1139 /* Set new parameters */
1140 priv->num_rx_ring[RAVB_BE] = ring->rx_pending;
1141 priv->num_tx_ring[RAVB_BE] = ring->tx_pending;
1142
1143 if (netif_running(ndev)) {
1144 error = ravb_dmac_init(ndev);
1145 if (error) {
1146 netdev_err(ndev,
1147 "%s: ravb_dmac_init() failed, error %d\n",
1148 __func__, error);
1149 return error;
1150 }
1151
1152 ravb_emac_init(ndev);
1153
a0d2f206
SS
1154 /* Initialise PTP Clock driver */
1155 ravb_ptp_init(ndev, priv->pdev);
1156
c156633f
SS
1157 netif_device_attach(ndev);
1158 }
1159
1160 return 0;
1161}
1162
1163static int ravb_get_ts_info(struct net_device *ndev,
1164 struct ethtool_ts_info *info)
1165{
a0d2f206
SS
1166 struct ravb_private *priv = netdev_priv(ndev);
1167
c156633f
SS
1168 info->so_timestamping =
1169 SOF_TIMESTAMPING_TX_SOFTWARE |
1170 SOF_TIMESTAMPING_RX_SOFTWARE |
1171 SOF_TIMESTAMPING_SOFTWARE |
1172 SOF_TIMESTAMPING_TX_HARDWARE |
1173 SOF_TIMESTAMPING_RX_HARDWARE |
1174 SOF_TIMESTAMPING_RAW_HARDWARE;
1175 info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
1176 info->rx_filters =
1177 (1 << HWTSTAMP_FILTER_NONE) |
1178 (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
1179 (1 << HWTSTAMP_FILTER_ALL);
a0d2f206 1180 info->phc_index = ptp_clock_index(priv->ptp.clock);
c156633f
SS
1181
1182 return 0;
1183}
1184
1185static const struct ethtool_ops ravb_ethtool_ops = {
1186 .get_settings = ravb_get_settings,
1187 .set_settings = ravb_set_settings,
1188 .nway_reset = ravb_nway_reset,
1189 .get_msglevel = ravb_get_msglevel,
1190 .set_msglevel = ravb_set_msglevel,
1191 .get_link = ethtool_op_get_link,
1192 .get_strings = ravb_get_strings,
1193 .get_ethtool_stats = ravb_get_ethtool_stats,
1194 .get_sset_count = ravb_get_sset_count,
1195 .get_ringparam = ravb_get_ringparam,
1196 .set_ringparam = ravb_set_ringparam,
1197 .get_ts_info = ravb_get_ts_info,
1198};
1199
1200/* Network device open function for Ethernet AVB */
1201static int ravb_open(struct net_device *ndev)
1202{
1203 struct ravb_private *priv = netdev_priv(ndev);
1204 int error;
1205
1206 napi_enable(&priv->napi[RAVB_BE]);
1207 napi_enable(&priv->napi[RAVB_NC]);
1208
1209 error = request_irq(ndev->irq, ravb_interrupt, IRQF_SHARED, ndev->name,
1210 ndev);
1211 if (error) {
1212 netdev_err(ndev, "cannot request IRQ\n");
1213 goto out_napi_off;
1214 }
1215
22d4df8f
KM
1216 if (priv->chip_id == RCAR_GEN3) {
1217 error = request_irq(priv->emac_irq, ravb_interrupt,
1218 IRQF_SHARED, ndev->name, ndev);
1219 if (error) {
1220 netdev_err(ndev, "cannot request IRQ\n");
1221 goto out_free_irq;
1222 }
1223 }
1224
c156633f
SS
1225 /* Device init */
1226 error = ravb_dmac_init(ndev);
1227 if (error)
1228 goto out_free_irq;
1229 ravb_emac_init(ndev);
1230
a0d2f206
SS
1231 /* Initialise PTP Clock driver */
1232 ravb_ptp_init(ndev, priv->pdev);
1233
c156633f
SS
1234 netif_tx_start_all_queues(ndev);
1235
1236 /* PHY control start */
1237 error = ravb_phy_start(ndev);
1238 if (error)
a0d2f206 1239 goto out_ptp_stop;
c156633f
SS
1240
1241 return 0;
1242
a0d2f206
SS
1243out_ptp_stop:
1244 /* Stop PTP Clock driver */
1245 ravb_ptp_stop(ndev);
c156633f
SS
1246out_free_irq:
1247 free_irq(ndev->irq, ndev);
22d4df8f 1248 free_irq(priv->emac_irq, ndev);
c156633f
SS
1249out_napi_off:
1250 napi_disable(&priv->napi[RAVB_NC]);
1251 napi_disable(&priv->napi[RAVB_BE]);
1252 return error;
1253}
1254
1255/* Timeout function for Ethernet AVB */
1256static void ravb_tx_timeout(struct net_device *ndev)
1257{
1258 struct ravb_private *priv = netdev_priv(ndev);
1259
1260 netif_err(priv, tx_err, ndev,
1261 "transmit timed out, status %08x, resetting...\n",
1262 ravb_read(ndev, ISS));
1263
1264 /* tx_errors count up */
1265 ndev->stats.tx_errors++;
1266
1267 schedule_work(&priv->work);
1268}
1269
1270static void ravb_tx_timeout_work(struct work_struct *work)
1271{
1272 struct ravb_private *priv = container_of(work, struct ravb_private,
1273 work);
1274 struct net_device *ndev = priv->ndev;
1275
1276 netif_tx_stop_all_queues(ndev);
1277
a0d2f206
SS
1278 /* Stop PTP Clock driver */
1279 ravb_ptp_stop(ndev);
1280
c156633f
SS
1281 /* Wait for DMA stopping */
1282 ravb_stop_dma(ndev);
1283
1284 ravb_ring_free(ndev, RAVB_BE);
1285 ravb_ring_free(ndev, RAVB_NC);
1286
1287 /* Device init */
1288 ravb_dmac_init(ndev);
1289 ravb_emac_init(ndev);
1290
a0d2f206
SS
1291 /* Initialise PTP Clock driver */
1292 ravb_ptp_init(ndev, priv->pdev);
1293
c156633f
SS
1294 netif_tx_start_all_queues(ndev);
1295}
1296
1297/* Packet transmit function for Ethernet AVB */
1298static netdev_tx_t ravb_start_xmit(struct sk_buff *skb, struct net_device *ndev)
1299{
1300 struct ravb_private *priv = netdev_priv(ndev);
c156633f 1301 u16 q = skb_get_queue_mapping(skb);
aad0d51e 1302 struct ravb_tstamp_skb *ts_skb;
c156633f
SS
1303 struct ravb_tx_desc *desc;
1304 unsigned long flags;
1305 u32 dma_addr;
1306 void *buffer;
1307 u32 entry;
2f45d190 1308 u32 len;
c156633f
SS
1309
1310 spin_lock_irqsave(&priv->lock, flags);
2f45d190
SS
1311 if (priv->cur_tx[q] - priv->dirty_tx[q] > (priv->num_tx_ring[q] - 1) *
1312 NUM_TX_DESC) {
c156633f
SS
1313 netif_err(priv, tx_queued, ndev,
1314 "still transmitting with the full ring!\n");
1315 netif_stop_subqueue(ndev, q);
1316 spin_unlock_irqrestore(&priv->lock, flags);
1317 return NETDEV_TX_BUSY;
1318 }
2f45d190
SS
1319 entry = priv->cur_tx[q] % (priv->num_tx_ring[q] * NUM_TX_DESC);
1320 priv->tx_skb[q][entry / NUM_TX_DESC] = skb;
c156633f
SS
1321
1322 if (skb_put_padto(skb, ETH_ZLEN))
1323 goto drop;
1324
2f45d190
SS
1325 buffer = PTR_ALIGN(priv->tx_align[q], DPTR_ALIGN) +
1326 entry / NUM_TX_DESC * DPTR_ALIGN;
1327 len = PTR_ALIGN(skb->data, DPTR_ALIGN) - skb->data;
1328 memcpy(buffer, skb->data, len);
e2dbb33a
KM
1329 dma_addr = dma_map_single(ndev->dev.parent, buffer, len, DMA_TO_DEVICE);
1330 if (dma_mapping_error(ndev->dev.parent, dma_addr))
c156633f 1331 goto drop;
2f45d190
SS
1332
1333 desc = &priv->tx_ring[q][entry];
1334 desc->ds_tagl = cpu_to_le16(len);
1335 desc->dptr = cpu_to_le32(dma_addr);
1336
1337 buffer = skb->data + len;
1338 len = skb->len - len;
e2dbb33a
KM
1339 dma_addr = dma_map_single(ndev->dev.parent, buffer, len, DMA_TO_DEVICE);
1340 if (dma_mapping_error(ndev->dev.parent, dma_addr))
2f45d190
SS
1341 goto unmap;
1342
1343 desc++;
1344 desc->ds_tagl = cpu_to_le16(len);
c156633f
SS
1345 desc->dptr = cpu_to_le32(dma_addr);
1346
1347 /* TX timestamp required */
1348 if (q == RAVB_NC) {
1349 ts_skb = kmalloc(sizeof(*ts_skb), GFP_ATOMIC);
1350 if (!ts_skb) {
2f45d190 1351 desc--;
e2dbb33a 1352 dma_unmap_single(ndev->dev.parent, dma_addr, len,
c156633f 1353 DMA_TO_DEVICE);
2f45d190 1354 goto unmap;
c156633f
SS
1355 }
1356 ts_skb->skb = skb;
1357 ts_skb->tag = priv->ts_skb_tag++;
1358 priv->ts_skb_tag &= 0x3ff;
1359 list_add_tail(&ts_skb->list, &priv->ts_skb_list);
1360
1361 /* TAG and timestamp required flag */
1362 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
1363 skb_tx_timestamp(skb);
1364 desc->tagh_tsr = (ts_skb->tag >> 4) | TX_TSR;
1365 desc->ds_tagl |= le16_to_cpu(ts_skb->tag << 12);
1366 }
1367
1368 /* Descriptor type must be set after all the above writes */
1369 dma_wmb();
2f45d190
SS
1370 desc->die_dt = DT_FEND;
1371 desc--;
1372 desc->die_dt = DT_FSTART;
c156633f 1373
06613e38 1374 ravb_write(ndev, ravb_read(ndev, TCCR) | (TCCR_TSRQ0 << q), TCCR);
c156633f 1375
2f45d190
SS
1376 priv->cur_tx[q] += NUM_TX_DESC;
1377 if (priv->cur_tx[q] - priv->dirty_tx[q] >
1378 (priv->num_tx_ring[q] - 1) * NUM_TX_DESC && !ravb_tx_free(ndev, q))
c156633f
SS
1379 netif_stop_subqueue(ndev, q);
1380
1381exit:
1382 mmiowb();
1383 spin_unlock_irqrestore(&priv->lock, flags);
1384 return NETDEV_TX_OK;
1385
2f45d190 1386unmap:
e2dbb33a 1387 dma_unmap_single(ndev->dev.parent, le32_to_cpu(desc->dptr),
2f45d190 1388 le16_to_cpu(desc->ds_tagl), DMA_TO_DEVICE);
c156633f
SS
1389drop:
1390 dev_kfree_skb_any(skb);
2f45d190 1391 priv->tx_skb[q][entry / NUM_TX_DESC] = NULL;
c156633f
SS
1392 goto exit;
1393}
1394
1395static u16 ravb_select_queue(struct net_device *ndev, struct sk_buff *skb,
1396 void *accel_priv, select_queue_fallback_t fallback)
1397{
1398 /* If skb needs TX timestamp, it is handled in network control queue */
1399 return (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) ? RAVB_NC :
1400 RAVB_BE;
1401
1402}
1403
1404static struct net_device_stats *ravb_get_stats(struct net_device *ndev)
1405{
1406 struct ravb_private *priv = netdev_priv(ndev);
1407 struct net_device_stats *nstats, *stats0, *stats1;
1408
1409 nstats = &ndev->stats;
1410 stats0 = &priv->stats[RAVB_BE];
1411 stats1 = &priv->stats[RAVB_NC];
1412
1413 nstats->tx_dropped += ravb_read(ndev, TROCR);
1414 ravb_write(ndev, 0, TROCR); /* (write clear) */
1415 nstats->collisions += ravb_read(ndev, CDCR);
1416 ravb_write(ndev, 0, CDCR); /* (write clear) */
1417 nstats->tx_carrier_errors += ravb_read(ndev, LCCR);
1418 ravb_write(ndev, 0, LCCR); /* (write clear) */
1419
1420 nstats->tx_carrier_errors += ravb_read(ndev, CERCR);
1421 ravb_write(ndev, 0, CERCR); /* (write clear) */
1422 nstats->tx_carrier_errors += ravb_read(ndev, CEECR);
1423 ravb_write(ndev, 0, CEECR); /* (write clear) */
1424
1425 nstats->rx_packets = stats0->rx_packets + stats1->rx_packets;
1426 nstats->tx_packets = stats0->tx_packets + stats1->tx_packets;
1427 nstats->rx_bytes = stats0->rx_bytes + stats1->rx_bytes;
1428 nstats->tx_bytes = stats0->tx_bytes + stats1->tx_bytes;
1429 nstats->multicast = stats0->multicast + stats1->multicast;
1430 nstats->rx_errors = stats0->rx_errors + stats1->rx_errors;
1431 nstats->rx_crc_errors = stats0->rx_crc_errors + stats1->rx_crc_errors;
1432 nstats->rx_frame_errors =
1433 stats0->rx_frame_errors + stats1->rx_frame_errors;
1434 nstats->rx_length_errors =
1435 stats0->rx_length_errors + stats1->rx_length_errors;
1436 nstats->rx_missed_errors =
1437 stats0->rx_missed_errors + stats1->rx_missed_errors;
1438 nstats->rx_over_errors =
1439 stats0->rx_over_errors + stats1->rx_over_errors;
1440
1441 return nstats;
1442}
1443
1444/* Update promiscuous bit */
1445static void ravb_set_rx_mode(struct net_device *ndev)
1446{
1447 struct ravb_private *priv = netdev_priv(ndev);
1448 unsigned long flags;
1449 u32 ecmr;
1450
1451 spin_lock_irqsave(&priv->lock, flags);
1452 ecmr = ravb_read(ndev, ECMR);
1453 if (ndev->flags & IFF_PROMISC)
1454 ecmr |= ECMR_PRM;
1455 else
1456 ecmr &= ~ECMR_PRM;
1457 ravb_write(ndev, ecmr, ECMR);
1458 mmiowb();
1459 spin_unlock_irqrestore(&priv->lock, flags);
1460}
1461
1462/* Device close function for Ethernet AVB */
1463static int ravb_close(struct net_device *ndev)
1464{
1465 struct ravb_private *priv = netdev_priv(ndev);
1466 struct ravb_tstamp_skb *ts_skb, *ts_skb2;
1467
1468 netif_tx_stop_all_queues(ndev);
1469
1470 /* Disable interrupts by clearing the interrupt masks. */
1471 ravb_write(ndev, 0, RIC0);
1472 ravb_write(ndev, 0, RIC1);
1473 ravb_write(ndev, 0, RIC2);
1474 ravb_write(ndev, 0, TIC);
1475
a0d2f206
SS
1476 /* Stop PTP Clock driver */
1477 ravb_ptp_stop(ndev);
1478
c156633f
SS
1479 /* Set the config mode to stop the AVB-DMAC's processes */
1480 if (ravb_stop_dma(ndev) < 0)
1481 netdev_err(ndev,
1482 "device will be stopped after h/w processes are done.\n");
1483
1484 /* Clear the timestamp list */
1485 list_for_each_entry_safe(ts_skb, ts_skb2, &priv->ts_skb_list, list) {
1486 list_del(&ts_skb->list);
1487 kfree(ts_skb);
1488 }
1489
1490 /* PHY disconnect */
1491 if (priv->phydev) {
1492 phy_stop(priv->phydev);
1493 phy_disconnect(priv->phydev);
1494 priv->phydev = NULL;
1495 }
1496
1497 free_irq(ndev->irq, ndev);
1498
1499 napi_disable(&priv->napi[RAVB_NC]);
1500 napi_disable(&priv->napi[RAVB_BE]);
1501
1502 /* Free all the skb's in the RX queue and the DMA buffers. */
1503 ravb_ring_free(ndev, RAVB_BE);
1504 ravb_ring_free(ndev, RAVB_NC);
1505
1506 return 0;
1507}
1508
1509static int ravb_hwtstamp_get(struct net_device *ndev, struct ifreq *req)
1510{
1511 struct ravb_private *priv = netdev_priv(ndev);
1512 struct hwtstamp_config config;
1513
1514 config.flags = 0;
1515 config.tx_type = priv->tstamp_tx_ctrl ? HWTSTAMP_TX_ON :
1516 HWTSTAMP_TX_OFF;
1517 if (priv->tstamp_rx_ctrl & RAVB_RXTSTAMP_TYPE_V2_L2_EVENT)
1518 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
1519 else if (priv->tstamp_rx_ctrl & RAVB_RXTSTAMP_TYPE_ALL)
1520 config.rx_filter = HWTSTAMP_FILTER_ALL;
1521 else
1522 config.rx_filter = HWTSTAMP_FILTER_NONE;
1523
1524 return copy_to_user(req->ifr_data, &config, sizeof(config)) ?
1525 -EFAULT : 0;
1526}
1527
1528/* Control hardware time stamping */
1529static int ravb_hwtstamp_set(struct net_device *ndev, struct ifreq *req)
1530{
1531 struct ravb_private *priv = netdev_priv(ndev);
1532 struct hwtstamp_config config;
1533 u32 tstamp_rx_ctrl = RAVB_RXTSTAMP_ENABLED;
1534 u32 tstamp_tx_ctrl;
1535
1536 if (copy_from_user(&config, req->ifr_data, sizeof(config)))
1537 return -EFAULT;
1538
1539 /* Reserved for future extensions */
1540 if (config.flags)
1541 return -EINVAL;
1542
1543 switch (config.tx_type) {
1544 case HWTSTAMP_TX_OFF:
1545 tstamp_tx_ctrl = 0;
1546 break;
1547 case HWTSTAMP_TX_ON:
1548 tstamp_tx_ctrl = RAVB_TXTSTAMP_ENABLED;
1549 break;
1550 default:
1551 return -ERANGE;
1552 }
1553
1554 switch (config.rx_filter) {
1555 case HWTSTAMP_FILTER_NONE:
1556 tstamp_rx_ctrl = 0;
1557 break;
1558 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
1559 tstamp_rx_ctrl |= RAVB_RXTSTAMP_TYPE_V2_L2_EVENT;
1560 break;
1561 default:
1562 config.rx_filter = HWTSTAMP_FILTER_ALL;
1563 tstamp_rx_ctrl |= RAVB_RXTSTAMP_TYPE_ALL;
1564 }
1565
1566 priv->tstamp_tx_ctrl = tstamp_tx_ctrl;
1567 priv->tstamp_rx_ctrl = tstamp_rx_ctrl;
1568
1569 return copy_to_user(req->ifr_data, &config, sizeof(config)) ?
1570 -EFAULT : 0;
1571}
1572
1573/* ioctl to device function */
1574static int ravb_do_ioctl(struct net_device *ndev, struct ifreq *req, int cmd)
1575{
1576 struct ravb_private *priv = netdev_priv(ndev);
1577 struct phy_device *phydev = priv->phydev;
1578
1579 if (!netif_running(ndev))
1580 return -EINVAL;
1581
1582 if (!phydev)
1583 return -ENODEV;
1584
1585 switch (cmd) {
1586 case SIOCGHWTSTAMP:
1587 return ravb_hwtstamp_get(ndev, req);
1588 case SIOCSHWTSTAMP:
1589 return ravb_hwtstamp_set(ndev, req);
1590 }
1591
1592 return phy_mii_ioctl(phydev, req, cmd);
1593}
1594
1595static const struct net_device_ops ravb_netdev_ops = {
1596 .ndo_open = ravb_open,
1597 .ndo_stop = ravb_close,
1598 .ndo_start_xmit = ravb_start_xmit,
1599 .ndo_select_queue = ravb_select_queue,
1600 .ndo_get_stats = ravb_get_stats,
1601 .ndo_set_rx_mode = ravb_set_rx_mode,
1602 .ndo_tx_timeout = ravb_tx_timeout,
1603 .ndo_do_ioctl = ravb_do_ioctl,
1604 .ndo_validate_addr = eth_validate_addr,
1605 .ndo_set_mac_address = eth_mac_addr,
1606 .ndo_change_mtu = eth_change_mtu,
1607};
1608
1609/* MDIO bus init function */
1610static int ravb_mdio_init(struct ravb_private *priv)
1611{
1612 struct platform_device *pdev = priv->pdev;
1613 struct device *dev = &pdev->dev;
1614 int error;
1615
1616 /* Bitbang init */
1617 priv->mdiobb.ops = &bb_ops;
1618
1619 /* MII controller setting */
1620 priv->mii_bus = alloc_mdio_bitbang(&priv->mdiobb);
1621 if (!priv->mii_bus)
1622 return -ENOMEM;
1623
1624 /* Hook up MII support for ethtool */
1625 priv->mii_bus->name = "ravb_mii";
1626 priv->mii_bus->parent = dev;
1627 snprintf(priv->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x",
1628 pdev->name, pdev->id);
1629
1630 /* Register MDIO bus */
1631 error = of_mdiobus_register(priv->mii_bus, dev->of_node);
1632 if (error)
1633 goto out_free_bus;
1634
1635 return 0;
1636
1637out_free_bus:
1638 free_mdio_bitbang(priv->mii_bus);
1639 return error;
1640}
1641
1642/* MDIO bus release function */
1643static int ravb_mdio_release(struct ravb_private *priv)
1644{
1645 /* Unregister mdio bus */
1646 mdiobus_unregister(priv->mii_bus);
1647
1648 /* Free bitbang info */
1649 free_mdio_bitbang(priv->mii_bus);
1650
1651 return 0;
1652}
1653
22d4df8f
KM
1654static const struct of_device_id ravb_match_table[] = {
1655 { .compatible = "renesas,etheravb-r8a7790", .data = (void *)RCAR_GEN2 },
1656 { .compatible = "renesas,etheravb-r8a7794", .data = (void *)RCAR_GEN2 },
1657 { .compatible = "renesas,etheravb-r8a7795", .data = (void *)RCAR_GEN3 },
1658 { }
1659};
1660MODULE_DEVICE_TABLE(of, ravb_match_table);
1661
c156633f
SS
1662static int ravb_probe(struct platform_device *pdev)
1663{
1664 struct device_node *np = pdev->dev.of_node;
22d4df8f 1665 const struct of_device_id *match;
c156633f 1666 struct ravb_private *priv;
22d4df8f 1667 enum ravb_chip_id chip_id;
c156633f
SS
1668 struct net_device *ndev;
1669 int error, irq, q;
1670 struct resource *res;
1671
1672 if (!np) {
1673 dev_err(&pdev->dev,
1674 "this driver is required to be instantiated from device tree\n");
1675 return -EINVAL;
1676 }
1677
1678 /* Get base address */
1679 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1680 if (!res) {
1681 dev_err(&pdev->dev, "invalid resource\n");
1682 return -EINVAL;
1683 }
1684
1685 ndev = alloc_etherdev_mqs(sizeof(struct ravb_private),
1686 NUM_TX_QUEUE, NUM_RX_QUEUE);
1687 if (!ndev)
1688 return -ENOMEM;
1689
1690 pm_runtime_enable(&pdev->dev);
1691 pm_runtime_get_sync(&pdev->dev);
1692
1693 /* The Ether-specific entries in the device structure. */
1694 ndev->base_addr = res->start;
1695 ndev->dma = -1;
22d4df8f
KM
1696
1697 match = of_match_device(of_match_ptr(ravb_match_table), &pdev->dev);
1698 chip_id = (enum ravb_chip_id)match->data;
1699
1700 if (chip_id == RCAR_GEN3)
1701 irq = platform_get_irq_byname(pdev, "ch22");
1702 else
1703 irq = platform_get_irq(pdev, 0);
c156633f 1704 if (irq < 0) {
f375339e 1705 error = irq;
c156633f
SS
1706 goto out_release;
1707 }
1708 ndev->irq = irq;
1709
1710 SET_NETDEV_DEV(ndev, &pdev->dev);
1711
1712 priv = netdev_priv(ndev);
1713 priv->ndev = ndev;
1714 priv->pdev = pdev;
1715 priv->num_tx_ring[RAVB_BE] = BE_TX_RING_SIZE;
1716 priv->num_rx_ring[RAVB_BE] = BE_RX_RING_SIZE;
1717 priv->num_tx_ring[RAVB_NC] = NC_TX_RING_SIZE;
1718 priv->num_rx_ring[RAVB_NC] = NC_RX_RING_SIZE;
1719 priv->addr = devm_ioremap_resource(&pdev->dev, res);
1720 if (IS_ERR(priv->addr)) {
1721 error = PTR_ERR(priv->addr);
1722 goto out_release;
1723 }
1724
1725 spin_lock_init(&priv->lock);
1726 INIT_WORK(&priv->work, ravb_tx_timeout_work);
1727
1728 priv->phy_interface = of_get_phy_mode(np);
1729
1730 priv->no_avb_link = of_property_read_bool(np, "renesas,no-ether-link");
1731 priv->avb_link_active_low =
1732 of_property_read_bool(np, "renesas,ether-link-active-low");
1733
22d4df8f
KM
1734 if (chip_id == RCAR_GEN3) {
1735 irq = platform_get_irq_byname(pdev, "ch24");
1736 if (irq < 0) {
1737 error = irq;
1738 goto out_release;
1739 }
1740 priv->emac_irq = irq;
1741 }
1742
1743 priv->chip_id = chip_id;
1744
c156633f
SS
1745 /* Set function */
1746 ndev->netdev_ops = &ravb_netdev_ops;
1747 ndev->ethtool_ops = &ravb_ethtool_ops;
1748
1749 /* Set AVB config mode */
1750 ravb_write(ndev, (ravb_read(ndev, CCC) & ~CCC_OPC) | CCC_OPC_CONFIG,
1751 CCC);
1752
1753 /* Set CSEL value */
1754 ravb_write(ndev, (ravb_read(ndev, CCC) & ~CCC_CSEL) | CCC_CSEL_HPB,
1755 CCC);
1756
1757 /* Set GTI value */
1758 ravb_write(ndev, ((1000 << 20) / 130) & GTI_TIV, GTI);
1759
1760 /* Request GTI loading */
1761 ravb_write(ndev, ravb_read(ndev, GCCR) | GCCR_LTI, GCCR);
1762
1763 /* Allocate descriptor base address table */
1764 priv->desc_bat_size = sizeof(struct ravb_desc) * DBAT_ENTRY_NUM;
e2dbb33a 1765 priv->desc_bat = dma_alloc_coherent(ndev->dev.parent, priv->desc_bat_size,
c156633f
SS
1766 &priv->desc_bat_dma, GFP_KERNEL);
1767 if (!priv->desc_bat) {
1768 dev_err(&ndev->dev,
1769 "Cannot allocate desc base address table (size %d bytes)\n",
1770 priv->desc_bat_size);
1771 error = -ENOMEM;
1772 goto out_release;
1773 }
1774 for (q = RAVB_BE; q < DBAT_ENTRY_NUM; q++)
1775 priv->desc_bat[q].die_dt = DT_EOS;
1776 ravb_write(ndev, priv->desc_bat_dma, DBAT);
1777
1778 /* Initialise HW timestamp list */
1779 INIT_LIST_HEAD(&priv->ts_skb_list);
1780
1781 /* Debug message level */
1782 priv->msg_enable = RAVB_DEF_MSG_ENABLE;
1783
1784 /* Read and set MAC address */
1785 ravb_read_mac_address(ndev, of_get_mac_address(np));
1786 if (!is_valid_ether_addr(ndev->dev_addr)) {
1787 dev_warn(&pdev->dev,
1788 "no valid MAC address supplied, using a random one\n");
1789 eth_hw_addr_random(ndev);
1790 }
1791
1792 /* MDIO bus init */
1793 error = ravb_mdio_init(priv);
1794 if (error) {
1795 dev_err(&ndev->dev, "failed to initialize MDIO\n");
1796 goto out_dma_free;
1797 }
1798
1799 netif_napi_add(ndev, &priv->napi[RAVB_BE], ravb_poll, 64);
1800 netif_napi_add(ndev, &priv->napi[RAVB_NC], ravb_poll, 64);
1801
1802 /* Network device register */
1803 error = register_netdev(ndev);
1804 if (error)
1805 goto out_napi_del;
1806
1807 /* Print device information */
1808 netdev_info(ndev, "Base address at %#x, %pM, IRQ %d.\n",
1809 (u32)ndev->base_addr, ndev->dev_addr, ndev->irq);
1810
1811 platform_set_drvdata(pdev, ndev);
1812
1813 return 0;
1814
1815out_napi_del:
1816 netif_napi_del(&priv->napi[RAVB_NC]);
1817 netif_napi_del(&priv->napi[RAVB_BE]);
1818 ravb_mdio_release(priv);
1819out_dma_free:
e2dbb33a 1820 dma_free_coherent(ndev->dev.parent, priv->desc_bat_size, priv->desc_bat,
c156633f
SS
1821 priv->desc_bat_dma);
1822out_release:
1823 if (ndev)
1824 free_netdev(ndev);
1825
1826 pm_runtime_put(&pdev->dev);
1827 pm_runtime_disable(&pdev->dev);
1828 return error;
1829}
1830
1831static int ravb_remove(struct platform_device *pdev)
1832{
1833 struct net_device *ndev = platform_get_drvdata(pdev);
1834 struct ravb_private *priv = netdev_priv(ndev);
1835
e2dbb33a 1836 dma_free_coherent(ndev->dev.parent, priv->desc_bat_size, priv->desc_bat,
c156633f
SS
1837 priv->desc_bat_dma);
1838 /* Set reset mode */
1839 ravb_write(ndev, CCC_OPC_RESET, CCC);
1840 pm_runtime_put_sync(&pdev->dev);
1841 unregister_netdev(ndev);
1842 netif_napi_del(&priv->napi[RAVB_NC]);
1843 netif_napi_del(&priv->napi[RAVB_BE]);
1844 ravb_mdio_release(priv);
1845 pm_runtime_disable(&pdev->dev);
1846 free_netdev(ndev);
1847 platform_set_drvdata(pdev, NULL);
1848
1849 return 0;
1850}
1851
1852#ifdef CONFIG_PM
1853static int ravb_runtime_nop(struct device *dev)
1854{
1855 /* Runtime PM callback shared between ->runtime_suspend()
1856 * and ->runtime_resume(). Simply returns success.
1857 *
1858 * This driver re-initializes all registers after
1859 * pm_runtime_get_sync() anyway so there is no need
1860 * to save and restore registers here.
1861 */
1862 return 0;
1863}
1864
1865static const struct dev_pm_ops ravb_dev_pm_ops = {
1866 .runtime_suspend = ravb_runtime_nop,
1867 .runtime_resume = ravb_runtime_nop,
1868};
1869
1870#define RAVB_PM_OPS (&ravb_dev_pm_ops)
1871#else
1872#define RAVB_PM_OPS NULL
1873#endif
1874
c156633f
SS
1875static struct platform_driver ravb_driver = {
1876 .probe = ravb_probe,
1877 .remove = ravb_remove,
1878 .driver = {
1879 .name = "ravb",
1880 .pm = RAVB_PM_OPS,
1881 .of_match_table = ravb_match_table,
1882 },
1883};
1884
1885module_platform_driver(ravb_driver);
1886
1887MODULE_AUTHOR("Mitsuhiro Kimura, Masaru Nagai");
1888MODULE_DESCRIPTION("Renesas Ethernet AVB driver");
1889MODULE_LICENSE("GPL v2");
This page took 0.125303 seconds and 5 git commands to generate.