2 * Broadcom GENET (Gigabit Ethernet) controller driver
4 * Copyright (c) 2014 Broadcom Corporation
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
11 #define pr_fmt(fmt) "bcmgenet: " fmt
13 #include <linux/kernel.h>
14 #include <linux/module.h>
15 #include <linux/sched.h>
16 #include <linux/types.h>
17 #include <linux/fcntl.h>
18 #include <linux/interrupt.h>
19 #include <linux/string.h>
20 #include <linux/if_ether.h>
21 #include <linux/init.h>
22 #include <linux/errno.h>
23 #include <linux/delay.h>
24 #include <linux/platform_device.h>
25 #include <linux/dma-mapping.h>
27 #include <linux/clk.h>
29 #include <linux/of_address.h>
30 #include <linux/of_irq.h>
31 #include <linux/of_net.h>
32 #include <linux/of_platform.h>
35 #include <linux/mii.h>
36 #include <linux/ethtool.h>
37 #include <linux/netdevice.h>
38 #include <linux/inetdevice.h>
39 #include <linux/etherdevice.h>
40 #include <linux/skbuff.h>
43 #include <linux/ipv6.h>
44 #include <linux/phy.h>
45 #include <linux/platform_data/bcmgenet.h>
47 #include <asm/unaligned.h>
51 /* Maximum number of hardware queues, downsized if needed */
52 #define GENET_MAX_MQ_CNT 4
54 /* Default highest priority queue for multi queue support */
55 #define GENET_Q0_PRIORITY 0
57 #define GENET_Q16_RX_BD_CNT \
58 (TOTAL_DESC - priv->hw_params->rx_queues * priv->hw_params->rx_bds_per_q)
59 #define GENET_Q16_TX_BD_CNT \
60 (TOTAL_DESC - priv->hw_params->tx_queues * priv->hw_params->tx_bds_per_q)
62 #define RX_BUF_LENGTH 2048
63 #define SKB_ALIGNMENT 32
65 /* Tx/Rx DMA register offset, skip 256 descriptors */
66 #define WORDS_PER_BD(p) (p->hw_params->words_per_bd)
67 #define DMA_DESC_SIZE (WORDS_PER_BD(priv) * sizeof(u32))
69 #define GENET_TDMA_REG_OFF (priv->hw_params->tdma_offset + \
70 TOTAL_DESC * DMA_DESC_SIZE)
72 #define GENET_RDMA_REG_OFF (priv->hw_params->rdma_offset + \
73 TOTAL_DESC * DMA_DESC_SIZE)
75 static inline void dmadesc_set_length_status(struct bcmgenet_priv
*priv
,
76 void __iomem
*d
, u32 value
)
78 __raw_writel(value
, d
+ DMA_DESC_LENGTH_STATUS
);
81 static inline u32
dmadesc_get_length_status(struct bcmgenet_priv
*priv
,
84 return __raw_readl(d
+ DMA_DESC_LENGTH_STATUS
);
87 static inline void dmadesc_set_addr(struct bcmgenet_priv
*priv
,
91 __raw_writel(lower_32_bits(addr
), d
+ DMA_DESC_ADDRESS_LO
);
93 /* Register writes to GISB bus can take couple hundred nanoseconds
94 * and are done for each packet, save these expensive writes unless
95 * the platform is explicitly configured for 64-bits/LPAE.
97 #ifdef CONFIG_PHYS_ADDR_T_64BIT
98 if (priv
->hw_params
->flags
& GENET_HAS_40BITS
)
99 __raw_writel(upper_32_bits(addr
), d
+ DMA_DESC_ADDRESS_HI
);
103 /* Combined address + length/status setter */
104 static inline void dmadesc_set(struct bcmgenet_priv
*priv
,
105 void __iomem
*d
, dma_addr_t addr
, u32 val
)
107 dmadesc_set_length_status(priv
, d
, val
);
108 dmadesc_set_addr(priv
, d
, addr
);
111 static inline dma_addr_t
dmadesc_get_addr(struct bcmgenet_priv
*priv
,
116 addr
= __raw_readl(d
+ DMA_DESC_ADDRESS_LO
);
118 /* Register writes to GISB bus can take couple hundred nanoseconds
119 * and are done for each packet, save these expensive writes unless
120 * the platform is explicitly configured for 64-bits/LPAE.
122 #ifdef CONFIG_PHYS_ADDR_T_64BIT
123 if (priv
->hw_params
->flags
& GENET_HAS_40BITS
)
124 addr
|= (u64
)__raw_readl(d
+ DMA_DESC_ADDRESS_HI
) << 32;
129 #define GENET_VER_FMT "%1d.%1d EPHY: 0x%04x"
131 #define GENET_MSG_DEFAULT (NETIF_MSG_DRV | NETIF_MSG_PROBE | \
134 static inline u32
bcmgenet_rbuf_ctrl_get(struct bcmgenet_priv
*priv
)
136 if (GENET_IS_V1(priv
))
137 return bcmgenet_rbuf_readl(priv
, RBUF_FLUSH_CTRL_V1
);
139 return bcmgenet_sys_readl(priv
, SYS_RBUF_FLUSH_CTRL
);
142 static inline void bcmgenet_rbuf_ctrl_set(struct bcmgenet_priv
*priv
, u32 val
)
144 if (GENET_IS_V1(priv
))
145 bcmgenet_rbuf_writel(priv
, val
, RBUF_FLUSH_CTRL_V1
);
147 bcmgenet_sys_writel(priv
, val
, SYS_RBUF_FLUSH_CTRL
);
150 /* These macros are defined to deal with register map change
151 * between GENET1.1 and GENET2. Only those currently being used
152 * by driver are defined.
154 static inline u32
bcmgenet_tbuf_ctrl_get(struct bcmgenet_priv
*priv
)
156 if (GENET_IS_V1(priv
))
157 return bcmgenet_rbuf_readl(priv
, TBUF_CTRL_V1
);
159 return __raw_readl(priv
->base
+
160 priv
->hw_params
->tbuf_offset
+ TBUF_CTRL
);
163 static inline void bcmgenet_tbuf_ctrl_set(struct bcmgenet_priv
*priv
, u32 val
)
165 if (GENET_IS_V1(priv
))
166 bcmgenet_rbuf_writel(priv
, val
, TBUF_CTRL_V1
);
168 __raw_writel(val
, priv
->base
+
169 priv
->hw_params
->tbuf_offset
+ TBUF_CTRL
);
172 static inline u32
bcmgenet_bp_mc_get(struct bcmgenet_priv
*priv
)
174 if (GENET_IS_V1(priv
))
175 return bcmgenet_rbuf_readl(priv
, TBUF_BP_MC_V1
);
177 return __raw_readl(priv
->base
+
178 priv
->hw_params
->tbuf_offset
+ TBUF_BP_MC
);
181 static inline void bcmgenet_bp_mc_set(struct bcmgenet_priv
*priv
, u32 val
)
183 if (GENET_IS_V1(priv
))
184 bcmgenet_rbuf_writel(priv
, val
, TBUF_BP_MC_V1
);
186 __raw_writel(val
, priv
->base
+
187 priv
->hw_params
->tbuf_offset
+ TBUF_BP_MC
);
190 /* RX/TX DMA register accessors */
227 static const u8 bcmgenet_dma_regs_v3plus
[] = {
228 [DMA_RING_CFG
] = 0x00,
231 [DMA_SCB_BURST_SIZE
] = 0x0C,
232 [DMA_ARB_CTRL
] = 0x2C,
233 [DMA_PRIORITY_0
] = 0x30,
234 [DMA_PRIORITY_1
] = 0x34,
235 [DMA_PRIORITY_2
] = 0x38,
236 [DMA_RING0_TIMEOUT
] = 0x2C,
237 [DMA_RING1_TIMEOUT
] = 0x30,
238 [DMA_RING2_TIMEOUT
] = 0x34,
239 [DMA_RING3_TIMEOUT
] = 0x38,
240 [DMA_RING4_TIMEOUT
] = 0x3c,
241 [DMA_RING5_TIMEOUT
] = 0x40,
242 [DMA_RING6_TIMEOUT
] = 0x44,
243 [DMA_RING7_TIMEOUT
] = 0x48,
244 [DMA_RING8_TIMEOUT
] = 0x4c,
245 [DMA_RING9_TIMEOUT
] = 0x50,
246 [DMA_RING10_TIMEOUT
] = 0x54,
247 [DMA_RING11_TIMEOUT
] = 0x58,
248 [DMA_RING12_TIMEOUT
] = 0x5c,
249 [DMA_RING13_TIMEOUT
] = 0x60,
250 [DMA_RING14_TIMEOUT
] = 0x64,
251 [DMA_RING15_TIMEOUT
] = 0x68,
252 [DMA_RING16_TIMEOUT
] = 0x6C,
253 [DMA_INDEX2RING_0
] = 0x70,
254 [DMA_INDEX2RING_1
] = 0x74,
255 [DMA_INDEX2RING_2
] = 0x78,
256 [DMA_INDEX2RING_3
] = 0x7C,
257 [DMA_INDEX2RING_4
] = 0x80,
258 [DMA_INDEX2RING_5
] = 0x84,
259 [DMA_INDEX2RING_6
] = 0x88,
260 [DMA_INDEX2RING_7
] = 0x8C,
263 static const u8 bcmgenet_dma_regs_v2
[] = {
264 [DMA_RING_CFG
] = 0x00,
267 [DMA_SCB_BURST_SIZE
] = 0x0C,
268 [DMA_ARB_CTRL
] = 0x30,
269 [DMA_PRIORITY_0
] = 0x34,
270 [DMA_PRIORITY_1
] = 0x38,
271 [DMA_PRIORITY_2
] = 0x3C,
272 [DMA_RING0_TIMEOUT
] = 0x2C,
273 [DMA_RING1_TIMEOUT
] = 0x30,
274 [DMA_RING2_TIMEOUT
] = 0x34,
275 [DMA_RING3_TIMEOUT
] = 0x38,
276 [DMA_RING4_TIMEOUT
] = 0x3c,
277 [DMA_RING5_TIMEOUT
] = 0x40,
278 [DMA_RING6_TIMEOUT
] = 0x44,
279 [DMA_RING7_TIMEOUT
] = 0x48,
280 [DMA_RING8_TIMEOUT
] = 0x4c,
281 [DMA_RING9_TIMEOUT
] = 0x50,
282 [DMA_RING10_TIMEOUT
] = 0x54,
283 [DMA_RING11_TIMEOUT
] = 0x58,
284 [DMA_RING12_TIMEOUT
] = 0x5c,
285 [DMA_RING13_TIMEOUT
] = 0x60,
286 [DMA_RING14_TIMEOUT
] = 0x64,
287 [DMA_RING15_TIMEOUT
] = 0x68,
288 [DMA_RING16_TIMEOUT
] = 0x6C,
291 static const u8 bcmgenet_dma_regs_v1
[] = {
294 [DMA_SCB_BURST_SIZE
] = 0x0C,
295 [DMA_ARB_CTRL
] = 0x30,
296 [DMA_PRIORITY_0
] = 0x34,
297 [DMA_PRIORITY_1
] = 0x38,
298 [DMA_PRIORITY_2
] = 0x3C,
299 [DMA_RING0_TIMEOUT
] = 0x2C,
300 [DMA_RING1_TIMEOUT
] = 0x30,
301 [DMA_RING2_TIMEOUT
] = 0x34,
302 [DMA_RING3_TIMEOUT
] = 0x38,
303 [DMA_RING4_TIMEOUT
] = 0x3c,
304 [DMA_RING5_TIMEOUT
] = 0x40,
305 [DMA_RING6_TIMEOUT
] = 0x44,
306 [DMA_RING7_TIMEOUT
] = 0x48,
307 [DMA_RING8_TIMEOUT
] = 0x4c,
308 [DMA_RING9_TIMEOUT
] = 0x50,
309 [DMA_RING10_TIMEOUT
] = 0x54,
310 [DMA_RING11_TIMEOUT
] = 0x58,
311 [DMA_RING12_TIMEOUT
] = 0x5c,
312 [DMA_RING13_TIMEOUT
] = 0x60,
313 [DMA_RING14_TIMEOUT
] = 0x64,
314 [DMA_RING15_TIMEOUT
] = 0x68,
315 [DMA_RING16_TIMEOUT
] = 0x6C,
318 /* Set at runtime once bcmgenet version is known */
319 static const u8
*bcmgenet_dma_regs
;
321 static inline struct bcmgenet_priv
*dev_to_priv(struct device
*dev
)
323 return netdev_priv(dev_get_drvdata(dev
));
326 static inline u32
bcmgenet_tdma_readl(struct bcmgenet_priv
*priv
,
329 return __raw_readl(priv
->base
+ GENET_TDMA_REG_OFF
+
330 DMA_RINGS_SIZE
+ bcmgenet_dma_regs
[r
]);
333 static inline void bcmgenet_tdma_writel(struct bcmgenet_priv
*priv
,
334 u32 val
, enum dma_reg r
)
336 __raw_writel(val
, priv
->base
+ GENET_TDMA_REG_OFF
+
337 DMA_RINGS_SIZE
+ bcmgenet_dma_regs
[r
]);
340 static inline u32
bcmgenet_rdma_readl(struct bcmgenet_priv
*priv
,
343 return __raw_readl(priv
->base
+ GENET_RDMA_REG_OFF
+
344 DMA_RINGS_SIZE
+ bcmgenet_dma_regs
[r
]);
347 static inline void bcmgenet_rdma_writel(struct bcmgenet_priv
*priv
,
348 u32 val
, enum dma_reg r
)
350 __raw_writel(val
, priv
->base
+ GENET_RDMA_REG_OFF
+
351 DMA_RINGS_SIZE
+ bcmgenet_dma_regs
[r
]);
354 /* RDMA/TDMA ring registers and accessors
355 * we merge the common fields and just prefix with T/D the registers
356 * having different meaning depending on the direction
360 RDMA_WRITE_PTR
= TDMA_READ_PTR
,
362 RDMA_WRITE_PTR_HI
= TDMA_READ_PTR_HI
,
364 RDMA_PROD_INDEX
= TDMA_CONS_INDEX
,
366 RDMA_CONS_INDEX
= TDMA_PROD_INDEX
,
372 DMA_MBUF_DONE_THRESH
,
374 RDMA_XON_XOFF_THRESH
= TDMA_FLOW_PERIOD
,
376 RDMA_READ_PTR
= TDMA_WRITE_PTR
,
378 RDMA_READ_PTR_HI
= TDMA_WRITE_PTR_HI
381 /* GENET v4 supports 40-bits pointer addressing
382 * for obvious reasons the LO and HI word parts
383 * are contiguous, but this offsets the other
386 static const u8 genet_dma_ring_regs_v4
[] = {
387 [TDMA_READ_PTR
] = 0x00,
388 [TDMA_READ_PTR_HI
] = 0x04,
389 [TDMA_CONS_INDEX
] = 0x08,
390 [TDMA_PROD_INDEX
] = 0x0C,
391 [DMA_RING_BUF_SIZE
] = 0x10,
392 [DMA_START_ADDR
] = 0x14,
393 [DMA_START_ADDR_HI
] = 0x18,
394 [DMA_END_ADDR
] = 0x1C,
395 [DMA_END_ADDR_HI
] = 0x20,
396 [DMA_MBUF_DONE_THRESH
] = 0x24,
397 [TDMA_FLOW_PERIOD
] = 0x28,
398 [TDMA_WRITE_PTR
] = 0x2C,
399 [TDMA_WRITE_PTR_HI
] = 0x30,
402 static const u8 genet_dma_ring_regs_v123
[] = {
403 [TDMA_READ_PTR
] = 0x00,
404 [TDMA_CONS_INDEX
] = 0x04,
405 [TDMA_PROD_INDEX
] = 0x08,
406 [DMA_RING_BUF_SIZE
] = 0x0C,
407 [DMA_START_ADDR
] = 0x10,
408 [DMA_END_ADDR
] = 0x14,
409 [DMA_MBUF_DONE_THRESH
] = 0x18,
410 [TDMA_FLOW_PERIOD
] = 0x1C,
411 [TDMA_WRITE_PTR
] = 0x20,
414 /* Set at runtime once GENET version is known */
415 static const u8
*genet_dma_ring_regs
;
417 static inline u32
bcmgenet_tdma_ring_readl(struct bcmgenet_priv
*priv
,
421 return __raw_readl(priv
->base
+ GENET_TDMA_REG_OFF
+
422 (DMA_RING_SIZE
* ring
) +
423 genet_dma_ring_regs
[r
]);
426 static inline void bcmgenet_tdma_ring_writel(struct bcmgenet_priv
*priv
,
427 unsigned int ring
, u32 val
,
430 __raw_writel(val
, priv
->base
+ GENET_TDMA_REG_OFF
+
431 (DMA_RING_SIZE
* ring
) +
432 genet_dma_ring_regs
[r
]);
435 static inline u32
bcmgenet_rdma_ring_readl(struct bcmgenet_priv
*priv
,
439 return __raw_readl(priv
->base
+ GENET_RDMA_REG_OFF
+
440 (DMA_RING_SIZE
* ring
) +
441 genet_dma_ring_regs
[r
]);
444 static inline void bcmgenet_rdma_ring_writel(struct bcmgenet_priv
*priv
,
445 unsigned int ring
, u32 val
,
448 __raw_writel(val
, priv
->base
+ GENET_RDMA_REG_OFF
+
449 (DMA_RING_SIZE
* ring
) +
450 genet_dma_ring_regs
[r
]);
453 static int bcmgenet_get_settings(struct net_device
*dev
,
454 struct ethtool_cmd
*cmd
)
456 struct bcmgenet_priv
*priv
= netdev_priv(dev
);
458 if (!netif_running(dev
))
464 return phy_ethtool_gset(priv
->phydev
, cmd
);
467 static int bcmgenet_set_settings(struct net_device
*dev
,
468 struct ethtool_cmd
*cmd
)
470 struct bcmgenet_priv
*priv
= netdev_priv(dev
);
472 if (!netif_running(dev
))
478 return phy_ethtool_sset(priv
->phydev
, cmd
);
481 static int bcmgenet_set_rx_csum(struct net_device
*dev
,
482 netdev_features_t wanted
)
484 struct bcmgenet_priv
*priv
= netdev_priv(dev
);
488 rx_csum_en
= !!(wanted
& NETIF_F_RXCSUM
);
490 rbuf_chk_ctrl
= bcmgenet_rbuf_readl(priv
, RBUF_CHK_CTRL
);
492 /* enable rx checksumming */
494 rbuf_chk_ctrl
|= RBUF_RXCHK_EN
;
496 rbuf_chk_ctrl
&= ~RBUF_RXCHK_EN
;
497 priv
->desc_rxchk_en
= rx_csum_en
;
499 /* If UniMAC forwards CRC, we need to skip over it to get
500 * a valid CHK bit to be set in the per-packet status word
502 if (rx_csum_en
&& priv
->crc_fwd_en
)
503 rbuf_chk_ctrl
|= RBUF_SKIP_FCS
;
505 rbuf_chk_ctrl
&= ~RBUF_SKIP_FCS
;
507 bcmgenet_rbuf_writel(priv
, rbuf_chk_ctrl
, RBUF_CHK_CTRL
);
512 static int bcmgenet_set_tx_csum(struct net_device
*dev
,
513 netdev_features_t wanted
)
515 struct bcmgenet_priv
*priv
= netdev_priv(dev
);
517 u32 tbuf_ctrl
, rbuf_ctrl
;
519 tbuf_ctrl
= bcmgenet_tbuf_ctrl_get(priv
);
520 rbuf_ctrl
= bcmgenet_rbuf_readl(priv
, RBUF_CTRL
);
522 desc_64b_en
= !!(wanted
& (NETIF_F_IP_CSUM
| NETIF_F_IPV6_CSUM
));
524 /* enable 64 bytes descriptor in both directions (RBUF and TBUF) */
526 tbuf_ctrl
|= RBUF_64B_EN
;
527 rbuf_ctrl
|= RBUF_64B_EN
;
529 tbuf_ctrl
&= ~RBUF_64B_EN
;
530 rbuf_ctrl
&= ~RBUF_64B_EN
;
532 priv
->desc_64b_en
= desc_64b_en
;
534 bcmgenet_tbuf_ctrl_set(priv
, tbuf_ctrl
);
535 bcmgenet_rbuf_writel(priv
, rbuf_ctrl
, RBUF_CTRL
);
540 static int bcmgenet_set_features(struct net_device
*dev
,
541 netdev_features_t features
)
543 netdev_features_t changed
= features
^ dev
->features
;
544 netdev_features_t wanted
= dev
->wanted_features
;
547 if (changed
& (NETIF_F_IP_CSUM
| NETIF_F_IPV6_CSUM
))
548 ret
= bcmgenet_set_tx_csum(dev
, wanted
);
549 if (changed
& (NETIF_F_RXCSUM
))
550 ret
= bcmgenet_set_rx_csum(dev
, wanted
);
555 static u32
bcmgenet_get_msglevel(struct net_device
*dev
)
557 struct bcmgenet_priv
*priv
= netdev_priv(dev
);
559 return priv
->msg_enable
;
562 static void bcmgenet_set_msglevel(struct net_device
*dev
, u32 level
)
564 struct bcmgenet_priv
*priv
= netdev_priv(dev
);
566 priv
->msg_enable
= level
;
569 static int bcmgenet_get_coalesce(struct net_device
*dev
,
570 struct ethtool_coalesce
*ec
)
572 struct bcmgenet_priv
*priv
= netdev_priv(dev
);
574 ec
->tx_max_coalesced_frames
=
575 bcmgenet_tdma_ring_readl(priv
, DESC_INDEX
,
576 DMA_MBUF_DONE_THRESH
);
577 ec
->rx_max_coalesced_frames
=
578 bcmgenet_rdma_ring_readl(priv
, DESC_INDEX
,
579 DMA_MBUF_DONE_THRESH
);
580 ec
->rx_coalesce_usecs
=
581 bcmgenet_rdma_readl(priv
, DMA_RING16_TIMEOUT
) * 8192 / 1000;
586 static int bcmgenet_set_coalesce(struct net_device
*dev
,
587 struct ethtool_coalesce
*ec
)
589 struct bcmgenet_priv
*priv
= netdev_priv(dev
);
593 /* Base system clock is 125Mhz, DMA timeout is this reference clock
594 * divided by 1024, which yields roughly 8.192us, our maximum value
595 * has to fit in the DMA_TIMEOUT_MASK (16 bits)
597 if (ec
->tx_max_coalesced_frames
> DMA_INTR_THRESHOLD_MASK
||
598 ec
->tx_max_coalesced_frames
== 0 ||
599 ec
->rx_max_coalesced_frames
> DMA_INTR_THRESHOLD_MASK
||
600 ec
->rx_coalesce_usecs
> (DMA_TIMEOUT_MASK
* 8) + 1)
603 if (ec
->rx_coalesce_usecs
== 0 && ec
->rx_max_coalesced_frames
== 0)
606 /* GENET TDMA hardware does not support a configurable timeout, but will
607 * always generate an interrupt either after MBDONE packets have been
608 * transmitted, or when the ring is emtpy.
610 if (ec
->tx_coalesce_usecs
|| ec
->tx_coalesce_usecs_high
||
611 ec
->tx_coalesce_usecs_irq
|| ec
->tx_coalesce_usecs_low
)
614 /* Program all TX queues with the same values, as there is no
615 * ethtool knob to do coalescing on a per-queue basis
617 for (i
= 0; i
< priv
->hw_params
->tx_queues
; i
++)
618 bcmgenet_tdma_ring_writel(priv
, i
,
619 ec
->tx_max_coalesced_frames
,
620 DMA_MBUF_DONE_THRESH
);
621 bcmgenet_tdma_ring_writel(priv
, DESC_INDEX
,
622 ec
->tx_max_coalesced_frames
,
623 DMA_MBUF_DONE_THRESH
);
625 for (i
= 0; i
< priv
->hw_params
->rx_queues
; i
++) {
626 bcmgenet_rdma_ring_writel(priv
, i
,
627 ec
->rx_max_coalesced_frames
,
628 DMA_MBUF_DONE_THRESH
);
630 reg
= bcmgenet_rdma_readl(priv
, DMA_RING0_TIMEOUT
+ i
);
631 reg
&= ~DMA_TIMEOUT_MASK
;
632 reg
|= DIV_ROUND_UP(ec
->rx_coalesce_usecs
* 1000, 8192);
633 bcmgenet_rdma_writel(priv
, reg
, DMA_RING0_TIMEOUT
+ i
);
636 bcmgenet_rdma_ring_writel(priv
, DESC_INDEX
,
637 ec
->rx_max_coalesced_frames
,
638 DMA_MBUF_DONE_THRESH
);
640 reg
= bcmgenet_rdma_readl(priv
, DMA_RING16_TIMEOUT
);
641 reg
&= ~DMA_TIMEOUT_MASK
;
642 reg
|= DIV_ROUND_UP(ec
->rx_coalesce_usecs
* 1000, 8192);
643 bcmgenet_rdma_writel(priv
, reg
, DMA_RING16_TIMEOUT
);
648 /* standard ethtool support functions. */
649 enum bcmgenet_stat_type
{
650 BCMGENET_STAT_NETDEV
= -1,
651 BCMGENET_STAT_MIB_RX
,
652 BCMGENET_STAT_MIB_TX
,
658 struct bcmgenet_stats
{
659 char stat_string
[ETH_GSTRING_LEN
];
662 enum bcmgenet_stat_type type
;
663 /* reg offset from UMAC base for misc counters */
667 #define STAT_NETDEV(m) { \
668 .stat_string = __stringify(m), \
669 .stat_sizeof = sizeof(((struct net_device_stats *)0)->m), \
670 .stat_offset = offsetof(struct net_device_stats, m), \
671 .type = BCMGENET_STAT_NETDEV, \
674 #define STAT_GENET_MIB(str, m, _type) { \
675 .stat_string = str, \
676 .stat_sizeof = sizeof(((struct bcmgenet_priv *)0)->m), \
677 .stat_offset = offsetof(struct bcmgenet_priv, m), \
681 #define STAT_GENET_MIB_RX(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_MIB_RX)
682 #define STAT_GENET_MIB_TX(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_MIB_TX)
683 #define STAT_GENET_RUNT(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_RUNT)
684 #define STAT_GENET_SOFT_MIB(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_SOFT)
686 #define STAT_GENET_MISC(str, m, offset) { \
687 .stat_string = str, \
688 .stat_sizeof = sizeof(((struct bcmgenet_priv *)0)->m), \
689 .stat_offset = offsetof(struct bcmgenet_priv, m), \
690 .type = BCMGENET_STAT_MISC, \
691 .reg_offset = offset, \
695 /* There is a 0xC gap between the end of RX and beginning of TX stats and then
696 * between the end of TX stats and the beginning of the RX RUNT
698 #define BCMGENET_STAT_OFFSET 0xc
700 /* Hardware counters must be kept in sync because the order/offset
701 * is important here (order in structure declaration = order in hardware)
703 static const struct bcmgenet_stats bcmgenet_gstrings_stats
[] = {
705 STAT_NETDEV(rx_packets
),
706 STAT_NETDEV(tx_packets
),
707 STAT_NETDEV(rx_bytes
),
708 STAT_NETDEV(tx_bytes
),
709 STAT_NETDEV(rx_errors
),
710 STAT_NETDEV(tx_errors
),
711 STAT_NETDEV(rx_dropped
),
712 STAT_NETDEV(tx_dropped
),
713 STAT_NETDEV(multicast
),
714 /* UniMAC RSV counters */
715 STAT_GENET_MIB_RX("rx_64_octets", mib
.rx
.pkt_cnt
.cnt_64
),
716 STAT_GENET_MIB_RX("rx_65_127_oct", mib
.rx
.pkt_cnt
.cnt_127
),
717 STAT_GENET_MIB_RX("rx_128_255_oct", mib
.rx
.pkt_cnt
.cnt_255
),
718 STAT_GENET_MIB_RX("rx_256_511_oct", mib
.rx
.pkt_cnt
.cnt_511
),
719 STAT_GENET_MIB_RX("rx_512_1023_oct", mib
.rx
.pkt_cnt
.cnt_1023
),
720 STAT_GENET_MIB_RX("rx_1024_1518_oct", mib
.rx
.pkt_cnt
.cnt_1518
),
721 STAT_GENET_MIB_RX("rx_vlan_1519_1522_oct", mib
.rx
.pkt_cnt
.cnt_mgv
),
722 STAT_GENET_MIB_RX("rx_1522_2047_oct", mib
.rx
.pkt_cnt
.cnt_2047
),
723 STAT_GENET_MIB_RX("rx_2048_4095_oct", mib
.rx
.pkt_cnt
.cnt_4095
),
724 STAT_GENET_MIB_RX("rx_4096_9216_oct", mib
.rx
.pkt_cnt
.cnt_9216
),
725 STAT_GENET_MIB_RX("rx_pkts", mib
.rx
.pkt
),
726 STAT_GENET_MIB_RX("rx_bytes", mib
.rx
.bytes
),
727 STAT_GENET_MIB_RX("rx_multicast", mib
.rx
.mca
),
728 STAT_GENET_MIB_RX("rx_broadcast", mib
.rx
.bca
),
729 STAT_GENET_MIB_RX("rx_fcs", mib
.rx
.fcs
),
730 STAT_GENET_MIB_RX("rx_control", mib
.rx
.cf
),
731 STAT_GENET_MIB_RX("rx_pause", mib
.rx
.pf
),
732 STAT_GENET_MIB_RX("rx_unknown", mib
.rx
.uo
),
733 STAT_GENET_MIB_RX("rx_align", mib
.rx
.aln
),
734 STAT_GENET_MIB_RX("rx_outrange", mib
.rx
.flr
),
735 STAT_GENET_MIB_RX("rx_code", mib
.rx
.cde
),
736 STAT_GENET_MIB_RX("rx_carrier", mib
.rx
.fcr
),
737 STAT_GENET_MIB_RX("rx_oversize", mib
.rx
.ovr
),
738 STAT_GENET_MIB_RX("rx_jabber", mib
.rx
.jbr
),
739 STAT_GENET_MIB_RX("rx_mtu_err", mib
.rx
.mtue
),
740 STAT_GENET_MIB_RX("rx_good_pkts", mib
.rx
.pok
),
741 STAT_GENET_MIB_RX("rx_unicast", mib
.rx
.uc
),
742 STAT_GENET_MIB_RX("rx_ppp", mib
.rx
.ppp
),
743 STAT_GENET_MIB_RX("rx_crc", mib
.rx
.rcrc
),
744 /* UniMAC TSV counters */
745 STAT_GENET_MIB_TX("tx_64_octets", mib
.tx
.pkt_cnt
.cnt_64
),
746 STAT_GENET_MIB_TX("tx_65_127_oct", mib
.tx
.pkt_cnt
.cnt_127
),
747 STAT_GENET_MIB_TX("tx_128_255_oct", mib
.tx
.pkt_cnt
.cnt_255
),
748 STAT_GENET_MIB_TX("tx_256_511_oct", mib
.tx
.pkt_cnt
.cnt_511
),
749 STAT_GENET_MIB_TX("tx_512_1023_oct", mib
.tx
.pkt_cnt
.cnt_1023
),
750 STAT_GENET_MIB_TX("tx_1024_1518_oct", mib
.tx
.pkt_cnt
.cnt_1518
),
751 STAT_GENET_MIB_TX("tx_vlan_1519_1522_oct", mib
.tx
.pkt_cnt
.cnt_mgv
),
752 STAT_GENET_MIB_TX("tx_1522_2047_oct", mib
.tx
.pkt_cnt
.cnt_2047
),
753 STAT_GENET_MIB_TX("tx_2048_4095_oct", mib
.tx
.pkt_cnt
.cnt_4095
),
754 STAT_GENET_MIB_TX("tx_4096_9216_oct", mib
.tx
.pkt_cnt
.cnt_9216
),
755 STAT_GENET_MIB_TX("tx_pkts", mib
.tx
.pkts
),
756 STAT_GENET_MIB_TX("tx_multicast", mib
.tx
.mca
),
757 STAT_GENET_MIB_TX("tx_broadcast", mib
.tx
.bca
),
758 STAT_GENET_MIB_TX("tx_pause", mib
.tx
.pf
),
759 STAT_GENET_MIB_TX("tx_control", mib
.tx
.cf
),
760 STAT_GENET_MIB_TX("tx_fcs_err", mib
.tx
.fcs
),
761 STAT_GENET_MIB_TX("tx_oversize", mib
.tx
.ovr
),
762 STAT_GENET_MIB_TX("tx_defer", mib
.tx
.drf
),
763 STAT_GENET_MIB_TX("tx_excess_defer", mib
.tx
.edf
),
764 STAT_GENET_MIB_TX("tx_single_col", mib
.tx
.scl
),
765 STAT_GENET_MIB_TX("tx_multi_col", mib
.tx
.mcl
),
766 STAT_GENET_MIB_TX("tx_late_col", mib
.tx
.lcl
),
767 STAT_GENET_MIB_TX("tx_excess_col", mib
.tx
.ecl
),
768 STAT_GENET_MIB_TX("tx_frags", mib
.tx
.frg
),
769 STAT_GENET_MIB_TX("tx_total_col", mib
.tx
.ncl
),
770 STAT_GENET_MIB_TX("tx_jabber", mib
.tx
.jbr
),
771 STAT_GENET_MIB_TX("tx_bytes", mib
.tx
.bytes
),
772 STAT_GENET_MIB_TX("tx_good_pkts", mib
.tx
.pok
),
773 STAT_GENET_MIB_TX("tx_unicast", mib
.tx
.uc
),
774 /* UniMAC RUNT counters */
775 STAT_GENET_RUNT("rx_runt_pkts", mib
.rx_runt_cnt
),
776 STAT_GENET_RUNT("rx_runt_valid_fcs", mib
.rx_runt_fcs
),
777 STAT_GENET_RUNT("rx_runt_inval_fcs_align", mib
.rx_runt_fcs_align
),
778 STAT_GENET_RUNT("rx_runt_bytes", mib
.rx_runt_bytes
),
779 /* Misc UniMAC counters */
780 STAT_GENET_MISC("rbuf_ovflow_cnt", mib
.rbuf_ovflow_cnt
,
782 STAT_GENET_MISC("rbuf_err_cnt", mib
.rbuf_err_cnt
, UMAC_RBUF_ERR_CNT
),
783 STAT_GENET_MISC("mdf_err_cnt", mib
.mdf_err_cnt
, UMAC_MDF_ERR_CNT
),
784 STAT_GENET_SOFT_MIB("alloc_rx_buff_failed", mib
.alloc_rx_buff_failed
),
785 STAT_GENET_SOFT_MIB("rx_dma_failed", mib
.rx_dma_failed
),
786 STAT_GENET_SOFT_MIB("tx_dma_failed", mib
.tx_dma_failed
),
789 #define BCMGENET_STATS_LEN ARRAY_SIZE(bcmgenet_gstrings_stats)
791 static void bcmgenet_get_drvinfo(struct net_device
*dev
,
792 struct ethtool_drvinfo
*info
)
794 strlcpy(info
->driver
, "bcmgenet", sizeof(info
->driver
));
795 strlcpy(info
->version
, "v2.0", sizeof(info
->version
));
798 static int bcmgenet_get_sset_count(struct net_device
*dev
, int string_set
)
800 switch (string_set
) {
802 return BCMGENET_STATS_LEN
;
808 static void bcmgenet_get_strings(struct net_device
*dev
, u32 stringset
,
815 for (i
= 0; i
< BCMGENET_STATS_LEN
; i
++) {
816 memcpy(data
+ i
* ETH_GSTRING_LEN
,
817 bcmgenet_gstrings_stats
[i
].stat_string
,
824 static void bcmgenet_update_mib_counters(struct bcmgenet_priv
*priv
)
828 for (i
= 0; i
< BCMGENET_STATS_LEN
; i
++) {
829 const struct bcmgenet_stats
*s
;
834 s
= &bcmgenet_gstrings_stats
[i
];
836 case BCMGENET_STAT_NETDEV
:
837 case BCMGENET_STAT_SOFT
:
839 case BCMGENET_STAT_MIB_RX
:
840 case BCMGENET_STAT_MIB_TX
:
841 case BCMGENET_STAT_RUNT
:
842 if (s
->type
!= BCMGENET_STAT_MIB_RX
)
843 offset
= BCMGENET_STAT_OFFSET
;
844 val
= bcmgenet_umac_readl(priv
,
845 UMAC_MIB_START
+ j
+ offset
);
847 case BCMGENET_STAT_MISC
:
848 val
= bcmgenet_umac_readl(priv
, s
->reg_offset
);
849 /* clear if overflowed */
851 bcmgenet_umac_writel(priv
, 0, s
->reg_offset
);
856 p
= (char *)priv
+ s
->stat_offset
;
861 static void bcmgenet_get_ethtool_stats(struct net_device
*dev
,
862 struct ethtool_stats
*stats
,
865 struct bcmgenet_priv
*priv
= netdev_priv(dev
);
868 if (netif_running(dev
))
869 bcmgenet_update_mib_counters(priv
);
871 for (i
= 0; i
< BCMGENET_STATS_LEN
; i
++) {
872 const struct bcmgenet_stats
*s
;
875 s
= &bcmgenet_gstrings_stats
[i
];
876 if (s
->type
== BCMGENET_STAT_NETDEV
)
877 p
= (char *)&dev
->stats
;
885 static void bcmgenet_eee_enable_set(struct net_device
*dev
, bool enable
)
887 struct bcmgenet_priv
*priv
= netdev_priv(dev
);
888 u32 off
= priv
->hw_params
->tbuf_offset
+ TBUF_ENERGY_CTRL
;
891 if (enable
&& !priv
->clk_eee_enabled
) {
892 clk_prepare_enable(priv
->clk_eee
);
893 priv
->clk_eee_enabled
= true;
896 reg
= bcmgenet_umac_readl(priv
, UMAC_EEE_CTRL
);
901 bcmgenet_umac_writel(priv
, reg
, UMAC_EEE_CTRL
);
903 /* Enable EEE and switch to a 27Mhz clock automatically */
904 reg
= __raw_readl(priv
->base
+ off
);
906 reg
|= TBUF_EEE_EN
| TBUF_PM_EN
;
908 reg
&= ~(TBUF_EEE_EN
| TBUF_PM_EN
);
909 __raw_writel(reg
, priv
->base
+ off
);
911 /* Do the same for thing for RBUF */
912 reg
= bcmgenet_rbuf_readl(priv
, RBUF_ENERGY_CTRL
);
914 reg
|= RBUF_EEE_EN
| RBUF_PM_EN
;
916 reg
&= ~(RBUF_EEE_EN
| RBUF_PM_EN
);
917 bcmgenet_rbuf_writel(priv
, reg
, RBUF_ENERGY_CTRL
);
919 if (!enable
&& priv
->clk_eee_enabled
) {
920 clk_disable_unprepare(priv
->clk_eee
);
921 priv
->clk_eee_enabled
= false;
924 priv
->eee
.eee_enabled
= enable
;
925 priv
->eee
.eee_active
= enable
;
928 static int bcmgenet_get_eee(struct net_device
*dev
, struct ethtool_eee
*e
)
930 struct bcmgenet_priv
*priv
= netdev_priv(dev
);
931 struct ethtool_eee
*p
= &priv
->eee
;
933 if (GENET_IS_V1(priv
))
936 e
->eee_enabled
= p
->eee_enabled
;
937 e
->eee_active
= p
->eee_active
;
938 e
->tx_lpi_timer
= bcmgenet_umac_readl(priv
, UMAC_EEE_LPI_TIMER
);
940 return phy_ethtool_get_eee(priv
->phydev
, e
);
943 static int bcmgenet_set_eee(struct net_device
*dev
, struct ethtool_eee
*e
)
945 struct bcmgenet_priv
*priv
= netdev_priv(dev
);
946 struct ethtool_eee
*p
= &priv
->eee
;
949 if (GENET_IS_V1(priv
))
952 p
->eee_enabled
= e
->eee_enabled
;
954 if (!p
->eee_enabled
) {
955 bcmgenet_eee_enable_set(dev
, false);
957 ret
= phy_init_eee(priv
->phydev
, 0);
959 netif_err(priv
, hw
, dev
, "EEE initialization failed\n");
963 bcmgenet_umac_writel(priv
, e
->tx_lpi_timer
, UMAC_EEE_LPI_TIMER
);
964 bcmgenet_eee_enable_set(dev
, true);
967 return phy_ethtool_set_eee(priv
->phydev
, e
);
970 static int bcmgenet_nway_reset(struct net_device
*dev
)
972 struct bcmgenet_priv
*priv
= netdev_priv(dev
);
974 return genphy_restart_aneg(priv
->phydev
);
977 /* standard ethtool support functions. */
978 static struct ethtool_ops bcmgenet_ethtool_ops
= {
979 .get_strings
= bcmgenet_get_strings
,
980 .get_sset_count
= bcmgenet_get_sset_count
,
981 .get_ethtool_stats
= bcmgenet_get_ethtool_stats
,
982 .get_settings
= bcmgenet_get_settings
,
983 .set_settings
= bcmgenet_set_settings
,
984 .get_drvinfo
= bcmgenet_get_drvinfo
,
985 .get_link
= ethtool_op_get_link
,
986 .get_msglevel
= bcmgenet_get_msglevel
,
987 .set_msglevel
= bcmgenet_set_msglevel
,
988 .get_wol
= bcmgenet_get_wol
,
989 .set_wol
= bcmgenet_set_wol
,
990 .get_eee
= bcmgenet_get_eee
,
991 .set_eee
= bcmgenet_set_eee
,
992 .nway_reset
= bcmgenet_nway_reset
,
993 .get_coalesce
= bcmgenet_get_coalesce
,
994 .set_coalesce
= bcmgenet_set_coalesce
,
997 /* Power down the unimac, based on mode. */
998 static int bcmgenet_power_down(struct bcmgenet_priv
*priv
,
999 enum bcmgenet_power_mode mode
)
1005 case GENET_POWER_CABLE_SENSE
:
1006 phy_detach(priv
->phydev
);
1009 case GENET_POWER_WOL_MAGIC
:
1010 ret
= bcmgenet_wol_power_down_cfg(priv
, mode
);
1013 case GENET_POWER_PASSIVE
:
1014 /* Power down LED */
1015 if (priv
->hw_params
->flags
& GENET_HAS_EXT
) {
1016 reg
= bcmgenet_ext_readl(priv
, EXT_EXT_PWR_MGMT
);
1017 reg
|= (EXT_PWR_DOWN_PHY
|
1018 EXT_PWR_DOWN_DLL
| EXT_PWR_DOWN_BIAS
);
1019 bcmgenet_ext_writel(priv
, reg
, EXT_EXT_PWR_MGMT
);
1021 bcmgenet_phy_power_set(priv
->dev
, false);
1031 static void bcmgenet_power_up(struct bcmgenet_priv
*priv
,
1032 enum bcmgenet_power_mode mode
)
1036 if (!(priv
->hw_params
->flags
& GENET_HAS_EXT
))
1039 reg
= bcmgenet_ext_readl(priv
, EXT_EXT_PWR_MGMT
);
1042 case GENET_POWER_PASSIVE
:
1043 reg
&= ~(EXT_PWR_DOWN_DLL
| EXT_PWR_DOWN_PHY
|
1046 case GENET_POWER_CABLE_SENSE
:
1048 reg
|= EXT_PWR_DN_EN_LD
;
1050 case GENET_POWER_WOL_MAGIC
:
1051 bcmgenet_wol_power_up_cfg(priv
, mode
);
1057 bcmgenet_ext_writel(priv
, reg
, EXT_EXT_PWR_MGMT
);
1058 if (mode
== GENET_POWER_PASSIVE
) {
1059 bcmgenet_phy_power_set(priv
->dev
, true);
1060 bcmgenet_mii_reset(priv
->dev
);
1064 /* ioctl handle special commands that are not present in ethtool. */
1065 static int bcmgenet_ioctl(struct net_device
*dev
, struct ifreq
*rq
, int cmd
)
1067 struct bcmgenet_priv
*priv
= netdev_priv(dev
);
1070 if (!netif_running(dev
))
1080 val
= phy_mii_ioctl(priv
->phydev
, rq
, cmd
);
1091 static struct enet_cb
*bcmgenet_get_txcb(struct bcmgenet_priv
*priv
,
1092 struct bcmgenet_tx_ring
*ring
)
1094 struct enet_cb
*tx_cb_ptr
;
1096 tx_cb_ptr
= ring
->cbs
;
1097 tx_cb_ptr
+= ring
->write_ptr
- ring
->cb_ptr
;
1099 /* Advancing local write pointer */
1100 if (ring
->write_ptr
== ring
->end_ptr
)
1101 ring
->write_ptr
= ring
->cb_ptr
;
1108 /* Simple helper to free a control block's resources */
1109 static void bcmgenet_free_cb(struct enet_cb
*cb
)
1111 dev_kfree_skb_any(cb
->skb
);
1113 dma_unmap_addr_set(cb
, dma_addr
, 0);
1116 static inline void bcmgenet_rx_ring16_int_disable(struct bcmgenet_rx_ring
*ring
)
1118 bcmgenet_intrl2_0_writel(ring
->priv
, UMAC_IRQ_RXDMA_DONE
,
1119 INTRL2_CPU_MASK_SET
);
1122 static inline void bcmgenet_rx_ring16_int_enable(struct bcmgenet_rx_ring
*ring
)
1124 bcmgenet_intrl2_0_writel(ring
->priv
, UMAC_IRQ_RXDMA_DONE
,
1125 INTRL2_CPU_MASK_CLEAR
);
1128 static inline void bcmgenet_rx_ring_int_disable(struct bcmgenet_rx_ring
*ring
)
1130 bcmgenet_intrl2_1_writel(ring
->priv
,
1131 1 << (UMAC_IRQ1_RX_INTR_SHIFT
+ ring
->index
),
1132 INTRL2_CPU_MASK_SET
);
1135 static inline void bcmgenet_rx_ring_int_enable(struct bcmgenet_rx_ring
*ring
)
1137 bcmgenet_intrl2_1_writel(ring
->priv
,
1138 1 << (UMAC_IRQ1_RX_INTR_SHIFT
+ ring
->index
),
1139 INTRL2_CPU_MASK_CLEAR
);
1142 static inline void bcmgenet_tx_ring16_int_disable(struct bcmgenet_tx_ring
*ring
)
1144 bcmgenet_intrl2_0_writel(ring
->priv
, UMAC_IRQ_TXDMA_DONE
,
1145 INTRL2_CPU_MASK_SET
);
1148 static inline void bcmgenet_tx_ring16_int_enable(struct bcmgenet_tx_ring
*ring
)
1150 bcmgenet_intrl2_0_writel(ring
->priv
, UMAC_IRQ_TXDMA_DONE
,
1151 INTRL2_CPU_MASK_CLEAR
);
1154 static inline void bcmgenet_tx_ring_int_enable(struct bcmgenet_tx_ring
*ring
)
1156 bcmgenet_intrl2_1_writel(ring
->priv
, 1 << ring
->index
,
1157 INTRL2_CPU_MASK_CLEAR
);
1160 static inline void bcmgenet_tx_ring_int_disable(struct bcmgenet_tx_ring
*ring
)
1162 bcmgenet_intrl2_1_writel(ring
->priv
, 1 << ring
->index
,
1163 INTRL2_CPU_MASK_SET
);
1166 /* Unlocked version of the reclaim routine */
1167 static unsigned int __bcmgenet_tx_reclaim(struct net_device
*dev
,
1168 struct bcmgenet_tx_ring
*ring
)
1170 struct bcmgenet_priv
*priv
= netdev_priv(dev
);
1171 struct enet_cb
*tx_cb_ptr
;
1172 struct netdev_queue
*txq
;
1173 unsigned int pkts_compl
= 0;
1174 unsigned int bytes_compl
= 0;
1175 unsigned int c_index
;
1176 unsigned int txbds_ready
;
1177 unsigned int txbds_processed
= 0;
1179 /* Compute how many buffers are transmitted since last xmit call */
1180 c_index
= bcmgenet_tdma_ring_readl(priv
, ring
->index
, TDMA_CONS_INDEX
);
1181 c_index
&= DMA_C_INDEX_MASK
;
1183 if (likely(c_index
>= ring
->c_index
))
1184 txbds_ready
= c_index
- ring
->c_index
;
1186 txbds_ready
= (DMA_C_INDEX_MASK
+ 1) - ring
->c_index
+ c_index
;
1188 netif_dbg(priv
, tx_done
, dev
,
1189 "%s ring=%d old_c_index=%u c_index=%u txbds_ready=%u\n",
1190 __func__
, ring
->index
, ring
->c_index
, c_index
, txbds_ready
);
1192 /* Reclaim transmitted buffers */
1193 while (txbds_processed
< txbds_ready
) {
1194 tx_cb_ptr
= &priv
->tx_cbs
[ring
->clean_ptr
];
1195 if (tx_cb_ptr
->skb
) {
1197 bytes_compl
+= GENET_CB(tx_cb_ptr
->skb
)->bytes_sent
;
1198 dma_unmap_single(&dev
->dev
,
1199 dma_unmap_addr(tx_cb_ptr
, dma_addr
),
1200 dma_unmap_len(tx_cb_ptr
, dma_len
),
1202 bcmgenet_free_cb(tx_cb_ptr
);
1203 } else if (dma_unmap_addr(tx_cb_ptr
, dma_addr
)) {
1204 dma_unmap_page(&dev
->dev
,
1205 dma_unmap_addr(tx_cb_ptr
, dma_addr
),
1206 dma_unmap_len(tx_cb_ptr
, dma_len
),
1208 dma_unmap_addr_set(tx_cb_ptr
, dma_addr
, 0);
1212 if (likely(ring
->clean_ptr
< ring
->end_ptr
))
1215 ring
->clean_ptr
= ring
->cb_ptr
;
1218 ring
->free_bds
+= txbds_processed
;
1219 ring
->c_index
= (ring
->c_index
+ txbds_processed
) & DMA_C_INDEX_MASK
;
1221 dev
->stats
.tx_packets
+= pkts_compl
;
1222 dev
->stats
.tx_bytes
+= bytes_compl
;
1224 if (ring
->free_bds
> (MAX_SKB_FRAGS
+ 1)) {
1225 txq
= netdev_get_tx_queue(dev
, ring
->queue
);
1226 if (netif_tx_queue_stopped(txq
))
1227 netif_tx_wake_queue(txq
);
1233 static unsigned int bcmgenet_tx_reclaim(struct net_device
*dev
,
1234 struct bcmgenet_tx_ring
*ring
)
1236 unsigned int released
;
1237 unsigned long flags
;
1239 spin_lock_irqsave(&ring
->lock
, flags
);
1240 released
= __bcmgenet_tx_reclaim(dev
, ring
);
1241 spin_unlock_irqrestore(&ring
->lock
, flags
);
1246 static int bcmgenet_tx_poll(struct napi_struct
*napi
, int budget
)
1248 struct bcmgenet_tx_ring
*ring
=
1249 container_of(napi
, struct bcmgenet_tx_ring
, napi
);
1250 unsigned int work_done
= 0;
1252 work_done
= bcmgenet_tx_reclaim(ring
->priv
->dev
, ring
);
1254 if (work_done
== 0) {
1255 napi_complete(napi
);
1256 ring
->int_enable(ring
);
1264 static void bcmgenet_tx_reclaim_all(struct net_device
*dev
)
1266 struct bcmgenet_priv
*priv
= netdev_priv(dev
);
1269 if (netif_is_multiqueue(dev
)) {
1270 for (i
= 0; i
< priv
->hw_params
->tx_queues
; i
++)
1271 bcmgenet_tx_reclaim(dev
, &priv
->tx_rings
[i
]);
1274 bcmgenet_tx_reclaim(dev
, &priv
->tx_rings
[DESC_INDEX
]);
1277 /* Transmits a single SKB (either head of a fragment or a single SKB)
1278 * caller must hold priv->lock
1280 static int bcmgenet_xmit_single(struct net_device
*dev
,
1281 struct sk_buff
*skb
,
1283 struct bcmgenet_tx_ring
*ring
)
1285 struct bcmgenet_priv
*priv
= netdev_priv(dev
);
1286 struct device
*kdev
= &priv
->pdev
->dev
;
1287 struct enet_cb
*tx_cb_ptr
;
1288 unsigned int skb_len
;
1293 tx_cb_ptr
= bcmgenet_get_txcb(priv
, ring
);
1295 if (unlikely(!tx_cb_ptr
))
1298 tx_cb_ptr
->skb
= skb
;
1300 skb_len
= skb_headlen(skb
);
1302 mapping
= dma_map_single(kdev
, skb
->data
, skb_len
, DMA_TO_DEVICE
);
1303 ret
= dma_mapping_error(kdev
, mapping
);
1305 priv
->mib
.tx_dma_failed
++;
1306 netif_err(priv
, tx_err
, dev
, "Tx DMA map failed\n");
1311 dma_unmap_addr_set(tx_cb_ptr
, dma_addr
, mapping
);
1312 dma_unmap_len_set(tx_cb_ptr
, dma_len
, skb_len
);
1313 length_status
= (skb_len
<< DMA_BUFLENGTH_SHIFT
) | dma_desc_flags
|
1314 (priv
->hw_params
->qtag_mask
<< DMA_TX_QTAG_SHIFT
) |
1317 if (skb
->ip_summed
== CHECKSUM_PARTIAL
)
1318 length_status
|= DMA_TX_DO_CSUM
;
1320 dmadesc_set(priv
, tx_cb_ptr
->bd_addr
, mapping
, length_status
);
1325 /* Transmit a SKB fragment */
1326 static int bcmgenet_xmit_frag(struct net_device
*dev
,
1329 struct bcmgenet_tx_ring
*ring
)
1331 struct bcmgenet_priv
*priv
= netdev_priv(dev
);
1332 struct device
*kdev
= &priv
->pdev
->dev
;
1333 struct enet_cb
*tx_cb_ptr
;
1337 tx_cb_ptr
= bcmgenet_get_txcb(priv
, ring
);
1339 if (unlikely(!tx_cb_ptr
))
1341 tx_cb_ptr
->skb
= NULL
;
1343 mapping
= skb_frag_dma_map(kdev
, frag
, 0,
1344 skb_frag_size(frag
), DMA_TO_DEVICE
);
1345 ret
= dma_mapping_error(kdev
, mapping
);
1347 priv
->mib
.tx_dma_failed
++;
1348 netif_err(priv
, tx_err
, dev
, "%s: Tx DMA map failed\n",
1353 dma_unmap_addr_set(tx_cb_ptr
, dma_addr
, mapping
);
1354 dma_unmap_len_set(tx_cb_ptr
, dma_len
, frag
->size
);
1356 dmadesc_set(priv
, tx_cb_ptr
->bd_addr
, mapping
,
1357 (frag
->size
<< DMA_BUFLENGTH_SHIFT
) | dma_desc_flags
|
1358 (priv
->hw_params
->qtag_mask
<< DMA_TX_QTAG_SHIFT
));
1363 /* Reallocate the SKB to put enough headroom in front of it and insert
1364 * the transmit checksum offsets in the descriptors
1366 static struct sk_buff
*bcmgenet_put_tx_csum(struct net_device
*dev
,
1367 struct sk_buff
*skb
)
1369 struct status_64
*status
= NULL
;
1370 struct sk_buff
*new_skb
;
1376 if (unlikely(skb_headroom(skb
) < sizeof(*status
))) {
1377 /* If 64 byte status block enabled, must make sure skb has
1378 * enough headroom for us to insert 64B status block.
1380 new_skb
= skb_realloc_headroom(skb
, sizeof(*status
));
1383 dev
->stats
.tx_dropped
++;
1389 skb_push(skb
, sizeof(*status
));
1390 status
= (struct status_64
*)skb
->data
;
1392 if (skb
->ip_summed
== CHECKSUM_PARTIAL
) {
1393 ip_ver
= htons(skb
->protocol
);
1396 ip_proto
= ip_hdr(skb
)->protocol
;
1399 ip_proto
= ipv6_hdr(skb
)->nexthdr
;
1405 offset
= skb_checksum_start_offset(skb
) - sizeof(*status
);
1406 tx_csum_info
= (offset
<< STATUS_TX_CSUM_START_SHIFT
) |
1407 (offset
+ skb
->csum_offset
);
1409 /* Set the length valid bit for TCP and UDP and just set
1410 * the special UDP flag for IPv4, else just set to 0.
1412 if (ip_proto
== IPPROTO_TCP
|| ip_proto
== IPPROTO_UDP
) {
1413 tx_csum_info
|= STATUS_TX_CSUM_LV
;
1414 if (ip_proto
== IPPROTO_UDP
&& ip_ver
== ETH_P_IP
)
1415 tx_csum_info
|= STATUS_TX_CSUM_PROTO_UDP
;
1420 status
->tx_csum_info
= tx_csum_info
;
1426 static netdev_tx_t
bcmgenet_xmit(struct sk_buff
*skb
, struct net_device
*dev
)
1428 struct bcmgenet_priv
*priv
= netdev_priv(dev
);
1429 struct bcmgenet_tx_ring
*ring
= NULL
;
1430 struct netdev_queue
*txq
;
1431 unsigned long flags
= 0;
1432 int nr_frags
, index
;
1437 index
= skb_get_queue_mapping(skb
);
1438 /* Mapping strategy:
1439 * queue_mapping = 0, unclassified, packet xmited through ring16
1440 * queue_mapping = 1, goes to ring 0. (highest priority queue
1441 * queue_mapping = 2, goes to ring 1.
1442 * queue_mapping = 3, goes to ring 2.
1443 * queue_mapping = 4, goes to ring 3.
1450 nr_frags
= skb_shinfo(skb
)->nr_frags
;
1451 ring
= &priv
->tx_rings
[index
];
1452 txq
= netdev_get_tx_queue(dev
, ring
->queue
);
1454 spin_lock_irqsave(&ring
->lock
, flags
);
1455 if (ring
->free_bds
<= nr_frags
+ 1) {
1456 netif_tx_stop_queue(txq
);
1457 netdev_err(dev
, "%s: tx ring %d full when queue %d awake\n",
1458 __func__
, index
, ring
->queue
);
1459 ret
= NETDEV_TX_BUSY
;
1463 if (skb_padto(skb
, ETH_ZLEN
)) {
1468 /* Retain how many bytes will be sent on the wire, without TSB inserted
1469 * by transmit checksum offload
1471 GENET_CB(skb
)->bytes_sent
= skb
->len
;
1473 /* set the SKB transmit checksum */
1474 if (priv
->desc_64b_en
) {
1475 skb
= bcmgenet_put_tx_csum(dev
, skb
);
1482 dma_desc_flags
= DMA_SOP
;
1484 dma_desc_flags
|= DMA_EOP
;
1486 /* Transmit single SKB or head of fragment list */
1487 ret
= bcmgenet_xmit_single(dev
, skb
, dma_desc_flags
, ring
);
1494 for (i
= 0; i
< nr_frags
; i
++) {
1495 ret
= bcmgenet_xmit_frag(dev
,
1496 &skb_shinfo(skb
)->frags
[i
],
1497 (i
== nr_frags
- 1) ? DMA_EOP
: 0,
1505 skb_tx_timestamp(skb
);
1507 /* Decrement total BD count and advance our write pointer */
1508 ring
->free_bds
-= nr_frags
+ 1;
1509 ring
->prod_index
+= nr_frags
+ 1;
1510 ring
->prod_index
&= DMA_P_INDEX_MASK
;
1512 if (ring
->free_bds
<= (MAX_SKB_FRAGS
+ 1))
1513 netif_tx_stop_queue(txq
);
1515 if (!skb
->xmit_more
|| netif_xmit_stopped(txq
))
1516 /* Packets are ready, update producer index */
1517 bcmgenet_tdma_ring_writel(priv
, ring
->index
,
1518 ring
->prod_index
, TDMA_PROD_INDEX
);
1520 spin_unlock_irqrestore(&ring
->lock
, flags
);
1525 static struct sk_buff
*bcmgenet_rx_refill(struct bcmgenet_priv
*priv
,
1528 struct device
*kdev
= &priv
->pdev
->dev
;
1529 struct sk_buff
*skb
;
1530 struct sk_buff
*rx_skb
;
1533 /* Allocate a new Rx skb */
1534 skb
= netdev_alloc_skb(priv
->dev
, priv
->rx_buf_len
+ SKB_ALIGNMENT
);
1536 priv
->mib
.alloc_rx_buff_failed
++;
1537 netif_err(priv
, rx_err
, priv
->dev
,
1538 "%s: Rx skb allocation failed\n", __func__
);
1542 /* DMA-map the new Rx skb */
1543 mapping
= dma_map_single(kdev
, skb
->data
, priv
->rx_buf_len
,
1545 if (dma_mapping_error(kdev
, mapping
)) {
1546 priv
->mib
.rx_dma_failed
++;
1547 dev_kfree_skb_any(skb
);
1548 netif_err(priv
, rx_err
, priv
->dev
,
1549 "%s: Rx skb DMA mapping failed\n", __func__
);
1553 /* Grab the current Rx skb from the ring and DMA-unmap it */
1556 dma_unmap_single(kdev
, dma_unmap_addr(cb
, dma_addr
),
1557 priv
->rx_buf_len
, DMA_FROM_DEVICE
);
1559 /* Put the new Rx skb on the ring */
1561 dma_unmap_addr_set(cb
, dma_addr
, mapping
);
1562 dmadesc_set_addr(priv
, cb
->bd_addr
, mapping
);
1564 /* Return the current Rx skb to caller */
1568 /* bcmgenet_desc_rx - descriptor based rx process.
1569 * this could be called from bottom half, or from NAPI polling method.
1571 static unsigned int bcmgenet_desc_rx(struct bcmgenet_rx_ring
*ring
,
1572 unsigned int budget
)
1574 struct bcmgenet_priv
*priv
= ring
->priv
;
1575 struct net_device
*dev
= priv
->dev
;
1577 struct sk_buff
*skb
;
1578 u32 dma_length_status
;
1579 unsigned long dma_flag
;
1581 unsigned int rxpktprocessed
= 0, rxpkttoprocess
;
1582 unsigned int p_index
;
1583 unsigned int discards
;
1584 unsigned int chksum_ok
= 0;
1586 p_index
= bcmgenet_rdma_ring_readl(priv
, ring
->index
, RDMA_PROD_INDEX
);
1588 discards
= (p_index
>> DMA_P_INDEX_DISCARD_CNT_SHIFT
) &
1589 DMA_P_INDEX_DISCARD_CNT_MASK
;
1590 if (discards
> ring
->old_discards
) {
1591 discards
= discards
- ring
->old_discards
;
1592 dev
->stats
.rx_missed_errors
+= discards
;
1593 dev
->stats
.rx_errors
+= discards
;
1594 ring
->old_discards
+= discards
;
1596 /* Clear HW register when we reach 75% of maximum 0xFFFF */
1597 if (ring
->old_discards
>= 0xC000) {
1598 ring
->old_discards
= 0;
1599 bcmgenet_rdma_ring_writel(priv
, ring
->index
, 0,
1604 p_index
&= DMA_P_INDEX_MASK
;
1606 if (likely(p_index
>= ring
->c_index
))
1607 rxpkttoprocess
= p_index
- ring
->c_index
;
1609 rxpkttoprocess
= (DMA_C_INDEX_MASK
+ 1) - ring
->c_index
+
1612 netif_dbg(priv
, rx_status
, dev
,
1613 "RDMA: rxpkttoprocess=%d\n", rxpkttoprocess
);
1615 while ((rxpktprocessed
< rxpkttoprocess
) &&
1616 (rxpktprocessed
< budget
)) {
1617 cb
= &priv
->rx_cbs
[ring
->read_ptr
];
1618 skb
= bcmgenet_rx_refill(priv
, cb
);
1620 if (unlikely(!skb
)) {
1621 dev
->stats
.rx_dropped
++;
1625 if (!priv
->desc_64b_en
) {
1627 dmadesc_get_length_status(priv
, cb
->bd_addr
);
1629 struct status_64
*status
;
1631 status
= (struct status_64
*)skb
->data
;
1632 dma_length_status
= status
->length_status
;
1635 /* DMA flags and length are still valid no matter how
1636 * we got the Receive Status Vector (64B RSB or register)
1638 dma_flag
= dma_length_status
& 0xffff;
1639 len
= dma_length_status
>> DMA_BUFLENGTH_SHIFT
;
1641 netif_dbg(priv
, rx_status
, dev
,
1642 "%s:p_ind=%d c_ind=%d read_ptr=%d len_stat=0x%08x\n",
1643 __func__
, p_index
, ring
->c_index
,
1644 ring
->read_ptr
, dma_length_status
);
1646 if (unlikely(!(dma_flag
& DMA_EOP
) || !(dma_flag
& DMA_SOP
))) {
1647 netif_err(priv
, rx_status
, dev
,
1648 "dropping fragmented packet!\n");
1649 dev
->stats
.rx_errors
++;
1650 dev_kfree_skb_any(skb
);
1655 if (unlikely(dma_flag
& (DMA_RX_CRC_ERROR
|
1660 netif_err(priv
, rx_status
, dev
, "dma_flag=0x%x\n",
1661 (unsigned int)dma_flag
);
1662 if (dma_flag
& DMA_RX_CRC_ERROR
)
1663 dev
->stats
.rx_crc_errors
++;
1664 if (dma_flag
& DMA_RX_OV
)
1665 dev
->stats
.rx_over_errors
++;
1666 if (dma_flag
& DMA_RX_NO
)
1667 dev
->stats
.rx_frame_errors
++;
1668 if (dma_flag
& DMA_RX_LG
)
1669 dev
->stats
.rx_length_errors
++;
1670 dev
->stats
.rx_errors
++;
1671 dev_kfree_skb_any(skb
);
1673 } /* error packet */
1675 chksum_ok
= (dma_flag
& priv
->dma_rx_chk_bit
) &&
1676 priv
->desc_rxchk_en
;
1679 if (priv
->desc_64b_en
) {
1684 if (likely(chksum_ok
))
1685 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
1687 /* remove hardware 2bytes added for IP alignment */
1691 if (priv
->crc_fwd_en
) {
1692 skb_trim(skb
, len
- ETH_FCS_LEN
);
1696 /*Finish setting up the received SKB and send it to the kernel*/
1697 skb
->protocol
= eth_type_trans(skb
, priv
->dev
);
1698 dev
->stats
.rx_packets
++;
1699 dev
->stats
.rx_bytes
+= len
;
1700 if (dma_flag
& DMA_RX_MULT
)
1701 dev
->stats
.multicast
++;
1704 napi_gro_receive(&ring
->napi
, skb
);
1705 netif_dbg(priv
, rx_status
, dev
, "pushed up to kernel\n");
1709 if (likely(ring
->read_ptr
< ring
->end_ptr
))
1712 ring
->read_ptr
= ring
->cb_ptr
;
1714 ring
->c_index
= (ring
->c_index
+ 1) & DMA_C_INDEX_MASK
;
1715 bcmgenet_rdma_ring_writel(priv
, ring
->index
, ring
->c_index
, RDMA_CONS_INDEX
);
1718 return rxpktprocessed
;
1721 /* Rx NAPI polling method */
1722 static int bcmgenet_rx_poll(struct napi_struct
*napi
, int budget
)
1724 struct bcmgenet_rx_ring
*ring
= container_of(napi
,
1725 struct bcmgenet_rx_ring
, napi
);
1726 unsigned int work_done
;
1728 work_done
= bcmgenet_desc_rx(ring
, budget
);
1730 if (work_done
< budget
) {
1731 napi_complete(napi
);
1732 ring
->int_enable(ring
);
1738 /* Assign skb to RX DMA descriptor. */
1739 static int bcmgenet_alloc_rx_buffers(struct bcmgenet_priv
*priv
,
1740 struct bcmgenet_rx_ring
*ring
)
1743 struct sk_buff
*skb
;
1746 netif_dbg(priv
, hw
, priv
->dev
, "%s\n", __func__
);
1748 /* loop here for each buffer needing assign */
1749 for (i
= 0; i
< ring
->size
; i
++) {
1751 skb
= bcmgenet_rx_refill(priv
, cb
);
1753 dev_kfree_skb_any(skb
);
1761 static void bcmgenet_free_rx_buffers(struct bcmgenet_priv
*priv
)
1766 for (i
= 0; i
< priv
->num_rx_bds
; i
++) {
1767 cb
= &priv
->rx_cbs
[i
];
1769 if (dma_unmap_addr(cb
, dma_addr
)) {
1770 dma_unmap_single(&priv
->dev
->dev
,
1771 dma_unmap_addr(cb
, dma_addr
),
1772 priv
->rx_buf_len
, DMA_FROM_DEVICE
);
1773 dma_unmap_addr_set(cb
, dma_addr
, 0);
1777 bcmgenet_free_cb(cb
);
1781 static void umac_enable_set(struct bcmgenet_priv
*priv
, u32 mask
, bool enable
)
1785 reg
= bcmgenet_umac_readl(priv
, UMAC_CMD
);
1790 bcmgenet_umac_writel(priv
, reg
, UMAC_CMD
);
1792 /* UniMAC stops on a packet boundary, wait for a full-size packet
1796 usleep_range(1000, 2000);
1799 static int reset_umac(struct bcmgenet_priv
*priv
)
1801 struct device
*kdev
= &priv
->pdev
->dev
;
1802 unsigned int timeout
= 0;
1805 /* 7358a0/7552a0: bad default in RBUF_FLUSH_CTRL.umac_sw_rst */
1806 bcmgenet_rbuf_ctrl_set(priv
, 0);
1809 /* disable MAC while updating its registers */
1810 bcmgenet_umac_writel(priv
, 0, UMAC_CMD
);
1812 /* issue soft reset, wait for it to complete */
1813 bcmgenet_umac_writel(priv
, CMD_SW_RESET
, UMAC_CMD
);
1814 while (timeout
++ < 1000) {
1815 reg
= bcmgenet_umac_readl(priv
, UMAC_CMD
);
1816 if (!(reg
& CMD_SW_RESET
))
1822 if (timeout
== 1000) {
1824 "timeout waiting for MAC to come out of reset\n");
1831 static void bcmgenet_intr_disable(struct bcmgenet_priv
*priv
)
1833 /* Mask all interrupts.*/
1834 bcmgenet_intrl2_0_writel(priv
, 0xFFFFFFFF, INTRL2_CPU_MASK_SET
);
1835 bcmgenet_intrl2_0_writel(priv
, 0xFFFFFFFF, INTRL2_CPU_CLEAR
);
1836 bcmgenet_intrl2_0_writel(priv
, 0, INTRL2_CPU_MASK_CLEAR
);
1837 bcmgenet_intrl2_1_writel(priv
, 0xFFFFFFFF, INTRL2_CPU_MASK_SET
);
1838 bcmgenet_intrl2_1_writel(priv
, 0xFFFFFFFF, INTRL2_CPU_CLEAR
);
1839 bcmgenet_intrl2_1_writel(priv
, 0, INTRL2_CPU_MASK_CLEAR
);
1842 static void bcmgenet_link_intr_enable(struct bcmgenet_priv
*priv
)
1844 u32 int0_enable
= 0;
1846 /* Monitor cable plug/unplugged event for internal PHY, external PHY
1849 if (priv
->internal_phy
) {
1850 int0_enable
|= UMAC_IRQ_LINK_EVENT
;
1851 } else if (priv
->ext_phy
) {
1852 int0_enable
|= UMAC_IRQ_LINK_EVENT
;
1853 } else if (priv
->phy_interface
== PHY_INTERFACE_MODE_MOCA
) {
1854 if (priv
->hw_params
->flags
& GENET_HAS_MOCA_LINK_DET
)
1855 int0_enable
|= UMAC_IRQ_LINK_EVENT
;
1857 bcmgenet_intrl2_0_writel(priv
, int0_enable
, INTRL2_CPU_MASK_CLEAR
);
1860 static int init_umac(struct bcmgenet_priv
*priv
)
1862 struct device
*kdev
= &priv
->pdev
->dev
;
1865 u32 int0_enable
= 0;
1866 u32 int1_enable
= 0;
1869 dev_dbg(&priv
->pdev
->dev
, "bcmgenet: init_umac\n");
1871 ret
= reset_umac(priv
);
1875 bcmgenet_umac_writel(priv
, 0, UMAC_CMD
);
1876 /* clear tx/rx counter */
1877 bcmgenet_umac_writel(priv
,
1878 MIB_RESET_RX
| MIB_RESET_TX
| MIB_RESET_RUNT
,
1880 bcmgenet_umac_writel(priv
, 0, UMAC_MIB_CTRL
);
1882 bcmgenet_umac_writel(priv
, ENET_MAX_MTU_SIZE
, UMAC_MAX_FRAME_LEN
);
1884 /* init rx registers, enable ip header optimization */
1885 reg
= bcmgenet_rbuf_readl(priv
, RBUF_CTRL
);
1886 reg
|= RBUF_ALIGN_2B
;
1887 bcmgenet_rbuf_writel(priv
, reg
, RBUF_CTRL
);
1889 if (!GENET_IS_V1(priv
) && !GENET_IS_V2(priv
))
1890 bcmgenet_rbuf_writel(priv
, 1, RBUF_TBUF_SIZE_CTRL
);
1892 bcmgenet_intr_disable(priv
);
1894 /* Enable Rx default queue 16 interrupts */
1895 int0_enable
|= UMAC_IRQ_RXDMA_DONE
;
1897 /* Enable Tx default queue 16 interrupts */
1898 int0_enable
|= UMAC_IRQ_TXDMA_DONE
;
1900 /* Configure backpressure vectors for MoCA */
1901 if (priv
->phy_interface
== PHY_INTERFACE_MODE_MOCA
) {
1902 reg
= bcmgenet_bp_mc_get(priv
);
1903 reg
|= BIT(priv
->hw_params
->bp_in_en_shift
);
1905 /* bp_mask: back pressure mask */
1906 if (netif_is_multiqueue(priv
->dev
))
1907 reg
|= priv
->hw_params
->bp_in_mask
;
1909 reg
&= ~priv
->hw_params
->bp_in_mask
;
1910 bcmgenet_bp_mc_set(priv
, reg
);
1913 /* Enable MDIO interrupts on GENET v3+ */
1914 if (priv
->hw_params
->flags
& GENET_HAS_MDIO_INTR
)
1915 int0_enable
|= (UMAC_IRQ_MDIO_DONE
| UMAC_IRQ_MDIO_ERROR
);
1917 /* Enable Rx priority queue interrupts */
1918 for (i
= 0; i
< priv
->hw_params
->rx_queues
; ++i
)
1919 int1_enable
|= (1 << (UMAC_IRQ1_RX_INTR_SHIFT
+ i
));
1921 /* Enable Tx priority queue interrupts */
1922 for (i
= 0; i
< priv
->hw_params
->tx_queues
; ++i
)
1923 int1_enable
|= (1 << i
);
1925 bcmgenet_intrl2_0_writel(priv
, int0_enable
, INTRL2_CPU_MASK_CLEAR
);
1926 bcmgenet_intrl2_1_writel(priv
, int1_enable
, INTRL2_CPU_MASK_CLEAR
);
1928 /* Enable rx/tx engine.*/
1929 dev_dbg(kdev
, "done init umac\n");
1934 /* Initialize a Tx ring along with corresponding hardware registers */
1935 static void bcmgenet_init_tx_ring(struct bcmgenet_priv
*priv
,
1936 unsigned int index
, unsigned int size
,
1937 unsigned int start_ptr
, unsigned int end_ptr
)
1939 struct bcmgenet_tx_ring
*ring
= &priv
->tx_rings
[index
];
1940 u32 words_per_bd
= WORDS_PER_BD(priv
);
1941 u32 flow_period_val
= 0;
1943 spin_lock_init(&ring
->lock
);
1945 ring
->index
= index
;
1946 if (index
== DESC_INDEX
) {
1948 ring
->int_enable
= bcmgenet_tx_ring16_int_enable
;
1949 ring
->int_disable
= bcmgenet_tx_ring16_int_disable
;
1951 ring
->queue
= index
+ 1;
1952 ring
->int_enable
= bcmgenet_tx_ring_int_enable
;
1953 ring
->int_disable
= bcmgenet_tx_ring_int_disable
;
1955 ring
->cbs
= priv
->tx_cbs
+ start_ptr
;
1957 ring
->clean_ptr
= start_ptr
;
1959 ring
->free_bds
= size
;
1960 ring
->write_ptr
= start_ptr
;
1961 ring
->cb_ptr
= start_ptr
;
1962 ring
->end_ptr
= end_ptr
- 1;
1963 ring
->prod_index
= 0;
1965 /* Set flow period for ring != 16 */
1966 if (index
!= DESC_INDEX
)
1967 flow_period_val
= ENET_MAX_MTU_SIZE
<< 16;
1969 bcmgenet_tdma_ring_writel(priv
, index
, 0, TDMA_PROD_INDEX
);
1970 bcmgenet_tdma_ring_writel(priv
, index
, 0, TDMA_CONS_INDEX
);
1971 bcmgenet_tdma_ring_writel(priv
, index
, 1, DMA_MBUF_DONE_THRESH
);
1972 /* Disable rate control for now */
1973 bcmgenet_tdma_ring_writel(priv
, index
, flow_period_val
,
1975 bcmgenet_tdma_ring_writel(priv
, index
,
1976 ((size
<< DMA_RING_SIZE_SHIFT
) |
1977 RX_BUF_LENGTH
), DMA_RING_BUF_SIZE
);
1979 /* Set start and end address, read and write pointers */
1980 bcmgenet_tdma_ring_writel(priv
, index
, start_ptr
* words_per_bd
,
1982 bcmgenet_tdma_ring_writel(priv
, index
, start_ptr
* words_per_bd
,
1984 bcmgenet_tdma_ring_writel(priv
, index
, start_ptr
* words_per_bd
,
1986 bcmgenet_tdma_ring_writel(priv
, index
, end_ptr
* words_per_bd
- 1,
1990 /* Initialize a RDMA ring */
1991 static int bcmgenet_init_rx_ring(struct bcmgenet_priv
*priv
,
1992 unsigned int index
, unsigned int size
,
1993 unsigned int start_ptr
, unsigned int end_ptr
)
1995 struct bcmgenet_rx_ring
*ring
= &priv
->rx_rings
[index
];
1996 u32 words_per_bd
= WORDS_PER_BD(priv
);
2000 ring
->index
= index
;
2001 if (index
== DESC_INDEX
) {
2002 ring
->int_enable
= bcmgenet_rx_ring16_int_enable
;
2003 ring
->int_disable
= bcmgenet_rx_ring16_int_disable
;
2005 ring
->int_enable
= bcmgenet_rx_ring_int_enable
;
2006 ring
->int_disable
= bcmgenet_rx_ring_int_disable
;
2008 ring
->cbs
= priv
->rx_cbs
+ start_ptr
;
2011 ring
->read_ptr
= start_ptr
;
2012 ring
->cb_ptr
= start_ptr
;
2013 ring
->end_ptr
= end_ptr
- 1;
2015 ret
= bcmgenet_alloc_rx_buffers(priv
, ring
);
2019 bcmgenet_rdma_ring_writel(priv
, index
, 0, RDMA_PROD_INDEX
);
2020 bcmgenet_rdma_ring_writel(priv
, index
, 0, RDMA_CONS_INDEX
);
2021 bcmgenet_rdma_ring_writel(priv
, index
, 1, DMA_MBUF_DONE_THRESH
);
2022 bcmgenet_rdma_ring_writel(priv
, index
,
2023 ((size
<< DMA_RING_SIZE_SHIFT
) |
2024 RX_BUF_LENGTH
), DMA_RING_BUF_SIZE
);
2025 bcmgenet_rdma_ring_writel(priv
, index
,
2026 (DMA_FC_THRESH_LO
<<
2027 DMA_XOFF_THRESHOLD_SHIFT
) |
2028 DMA_FC_THRESH_HI
, RDMA_XON_XOFF_THRESH
);
2030 /* Set start and end address, read and write pointers */
2031 bcmgenet_rdma_ring_writel(priv
, index
, start_ptr
* words_per_bd
,
2033 bcmgenet_rdma_ring_writel(priv
, index
, start_ptr
* words_per_bd
,
2035 bcmgenet_rdma_ring_writel(priv
, index
, start_ptr
* words_per_bd
,
2037 bcmgenet_rdma_ring_writel(priv
, index
, end_ptr
* words_per_bd
- 1,
2043 static void bcmgenet_init_tx_napi(struct bcmgenet_priv
*priv
)
2046 struct bcmgenet_tx_ring
*ring
;
2048 for (i
= 0; i
< priv
->hw_params
->tx_queues
; ++i
) {
2049 ring
= &priv
->tx_rings
[i
];
2050 netif_tx_napi_add(priv
->dev
, &ring
->napi
, bcmgenet_tx_poll
, 64);
2053 ring
= &priv
->tx_rings
[DESC_INDEX
];
2054 netif_tx_napi_add(priv
->dev
, &ring
->napi
, bcmgenet_tx_poll
, 64);
2057 static void bcmgenet_enable_tx_napi(struct bcmgenet_priv
*priv
)
2060 struct bcmgenet_tx_ring
*ring
;
2062 for (i
= 0; i
< priv
->hw_params
->tx_queues
; ++i
) {
2063 ring
= &priv
->tx_rings
[i
];
2064 napi_enable(&ring
->napi
);
2067 ring
= &priv
->tx_rings
[DESC_INDEX
];
2068 napi_enable(&ring
->napi
);
2071 static void bcmgenet_disable_tx_napi(struct bcmgenet_priv
*priv
)
2074 struct bcmgenet_tx_ring
*ring
;
2076 for (i
= 0; i
< priv
->hw_params
->tx_queues
; ++i
) {
2077 ring
= &priv
->tx_rings
[i
];
2078 napi_disable(&ring
->napi
);
2081 ring
= &priv
->tx_rings
[DESC_INDEX
];
2082 napi_disable(&ring
->napi
);
2085 static void bcmgenet_fini_tx_napi(struct bcmgenet_priv
*priv
)
2088 struct bcmgenet_tx_ring
*ring
;
2090 for (i
= 0; i
< priv
->hw_params
->tx_queues
; ++i
) {
2091 ring
= &priv
->tx_rings
[i
];
2092 netif_napi_del(&ring
->napi
);
2095 ring
= &priv
->tx_rings
[DESC_INDEX
];
2096 netif_napi_del(&ring
->napi
);
2099 /* Initialize Tx queues
2101 * Queues 0-3 are priority-based, each one has 32 descriptors,
2102 * with queue 0 being the highest priority queue.
2104 * Queue 16 is the default Tx queue with
2105 * GENET_Q16_TX_BD_CNT = 256 - 4 * 32 = 128 descriptors.
2107 * The transmit control block pool is then partitioned as follows:
2108 * - Tx queue 0 uses tx_cbs[0..31]
2109 * - Tx queue 1 uses tx_cbs[32..63]
2110 * - Tx queue 2 uses tx_cbs[64..95]
2111 * - Tx queue 3 uses tx_cbs[96..127]
2112 * - Tx queue 16 uses tx_cbs[128..255]
2114 static void bcmgenet_init_tx_queues(struct net_device
*dev
)
2116 struct bcmgenet_priv
*priv
= netdev_priv(dev
);
2118 u32 dma_ctrl
, ring_cfg
;
2119 u32 dma_priority
[3] = {0, 0, 0};
2121 dma_ctrl
= bcmgenet_tdma_readl(priv
, DMA_CTRL
);
2122 dma_enable
= dma_ctrl
& DMA_EN
;
2123 dma_ctrl
&= ~DMA_EN
;
2124 bcmgenet_tdma_writel(priv
, dma_ctrl
, DMA_CTRL
);
2129 /* Enable strict priority arbiter mode */
2130 bcmgenet_tdma_writel(priv
, DMA_ARBITER_SP
, DMA_ARB_CTRL
);
2132 /* Initialize Tx priority queues */
2133 for (i
= 0; i
< priv
->hw_params
->tx_queues
; i
++) {
2134 bcmgenet_init_tx_ring(priv
, i
, priv
->hw_params
->tx_bds_per_q
,
2135 i
* priv
->hw_params
->tx_bds_per_q
,
2136 (i
+ 1) * priv
->hw_params
->tx_bds_per_q
);
2137 ring_cfg
|= (1 << i
);
2138 dma_ctrl
|= (1 << (i
+ DMA_RING_BUF_EN_SHIFT
));
2139 dma_priority
[DMA_PRIO_REG_INDEX(i
)] |=
2140 ((GENET_Q0_PRIORITY
+ i
) << DMA_PRIO_REG_SHIFT(i
));
2143 /* Initialize Tx default queue 16 */
2144 bcmgenet_init_tx_ring(priv
, DESC_INDEX
, GENET_Q16_TX_BD_CNT
,
2145 priv
->hw_params
->tx_queues
*
2146 priv
->hw_params
->tx_bds_per_q
,
2148 ring_cfg
|= (1 << DESC_INDEX
);
2149 dma_ctrl
|= (1 << (DESC_INDEX
+ DMA_RING_BUF_EN_SHIFT
));
2150 dma_priority
[DMA_PRIO_REG_INDEX(DESC_INDEX
)] |=
2151 ((GENET_Q0_PRIORITY
+ priv
->hw_params
->tx_queues
) <<
2152 DMA_PRIO_REG_SHIFT(DESC_INDEX
));
2154 /* Set Tx queue priorities */
2155 bcmgenet_tdma_writel(priv
, dma_priority
[0], DMA_PRIORITY_0
);
2156 bcmgenet_tdma_writel(priv
, dma_priority
[1], DMA_PRIORITY_1
);
2157 bcmgenet_tdma_writel(priv
, dma_priority
[2], DMA_PRIORITY_2
);
2159 /* Initialize Tx NAPI */
2160 bcmgenet_init_tx_napi(priv
);
2162 /* Enable Tx queues */
2163 bcmgenet_tdma_writel(priv
, ring_cfg
, DMA_RING_CFG
);
2168 bcmgenet_tdma_writel(priv
, dma_ctrl
, DMA_CTRL
);
2171 static void bcmgenet_init_rx_napi(struct bcmgenet_priv
*priv
)
2174 struct bcmgenet_rx_ring
*ring
;
2176 for (i
= 0; i
< priv
->hw_params
->rx_queues
; ++i
) {
2177 ring
= &priv
->rx_rings
[i
];
2178 netif_napi_add(priv
->dev
, &ring
->napi
, bcmgenet_rx_poll
, 64);
2181 ring
= &priv
->rx_rings
[DESC_INDEX
];
2182 netif_napi_add(priv
->dev
, &ring
->napi
, bcmgenet_rx_poll
, 64);
2185 static void bcmgenet_enable_rx_napi(struct bcmgenet_priv
*priv
)
2188 struct bcmgenet_rx_ring
*ring
;
2190 for (i
= 0; i
< priv
->hw_params
->rx_queues
; ++i
) {
2191 ring
= &priv
->rx_rings
[i
];
2192 napi_enable(&ring
->napi
);
2195 ring
= &priv
->rx_rings
[DESC_INDEX
];
2196 napi_enable(&ring
->napi
);
2199 static void bcmgenet_disable_rx_napi(struct bcmgenet_priv
*priv
)
2202 struct bcmgenet_rx_ring
*ring
;
2204 for (i
= 0; i
< priv
->hw_params
->rx_queues
; ++i
) {
2205 ring
= &priv
->rx_rings
[i
];
2206 napi_disable(&ring
->napi
);
2209 ring
= &priv
->rx_rings
[DESC_INDEX
];
2210 napi_disable(&ring
->napi
);
2213 static void bcmgenet_fini_rx_napi(struct bcmgenet_priv
*priv
)
2216 struct bcmgenet_rx_ring
*ring
;
2218 for (i
= 0; i
< priv
->hw_params
->rx_queues
; ++i
) {
2219 ring
= &priv
->rx_rings
[i
];
2220 netif_napi_del(&ring
->napi
);
2223 ring
= &priv
->rx_rings
[DESC_INDEX
];
2224 netif_napi_del(&ring
->napi
);
2227 /* Initialize Rx queues
2229 * Queues 0-15 are priority queues. Hardware Filtering Block (HFB) can be
2230 * used to direct traffic to these queues.
2232 * Queue 16 is the default Rx queue with GENET_Q16_RX_BD_CNT descriptors.
2234 static int bcmgenet_init_rx_queues(struct net_device
*dev
)
2236 struct bcmgenet_priv
*priv
= netdev_priv(dev
);
2243 dma_ctrl
= bcmgenet_rdma_readl(priv
, DMA_CTRL
);
2244 dma_enable
= dma_ctrl
& DMA_EN
;
2245 dma_ctrl
&= ~DMA_EN
;
2246 bcmgenet_rdma_writel(priv
, dma_ctrl
, DMA_CTRL
);
2251 /* Initialize Rx priority queues */
2252 for (i
= 0; i
< priv
->hw_params
->rx_queues
; i
++) {
2253 ret
= bcmgenet_init_rx_ring(priv
, i
,
2254 priv
->hw_params
->rx_bds_per_q
,
2255 i
* priv
->hw_params
->rx_bds_per_q
,
2257 priv
->hw_params
->rx_bds_per_q
);
2261 ring_cfg
|= (1 << i
);
2262 dma_ctrl
|= (1 << (i
+ DMA_RING_BUF_EN_SHIFT
));
2265 /* Initialize Rx default queue 16 */
2266 ret
= bcmgenet_init_rx_ring(priv
, DESC_INDEX
, GENET_Q16_RX_BD_CNT
,
2267 priv
->hw_params
->rx_queues
*
2268 priv
->hw_params
->rx_bds_per_q
,
2273 ring_cfg
|= (1 << DESC_INDEX
);
2274 dma_ctrl
|= (1 << (DESC_INDEX
+ DMA_RING_BUF_EN_SHIFT
));
2276 /* Initialize Rx NAPI */
2277 bcmgenet_init_rx_napi(priv
);
2280 bcmgenet_rdma_writel(priv
, ring_cfg
, DMA_RING_CFG
);
2282 /* Configure ring as descriptor ring and re-enable DMA if enabled */
2285 bcmgenet_rdma_writel(priv
, dma_ctrl
, DMA_CTRL
);
2290 static int bcmgenet_dma_teardown(struct bcmgenet_priv
*priv
)
2298 /* Disable TDMA to stop add more frames in TX DMA */
2299 reg
= bcmgenet_tdma_readl(priv
, DMA_CTRL
);
2301 bcmgenet_tdma_writel(priv
, reg
, DMA_CTRL
);
2303 /* Check TDMA status register to confirm TDMA is disabled */
2304 while (timeout
++ < DMA_TIMEOUT_VAL
) {
2305 reg
= bcmgenet_tdma_readl(priv
, DMA_STATUS
);
2306 if (reg
& DMA_DISABLED
)
2312 if (timeout
== DMA_TIMEOUT_VAL
) {
2313 netdev_warn(priv
->dev
, "Timed out while disabling TX DMA\n");
2317 /* Wait 10ms for packet drain in both tx and rx dma */
2318 usleep_range(10000, 20000);
2321 reg
= bcmgenet_rdma_readl(priv
, DMA_CTRL
);
2323 bcmgenet_rdma_writel(priv
, reg
, DMA_CTRL
);
2326 /* Check RDMA status register to confirm RDMA is disabled */
2327 while (timeout
++ < DMA_TIMEOUT_VAL
) {
2328 reg
= bcmgenet_rdma_readl(priv
, DMA_STATUS
);
2329 if (reg
& DMA_DISABLED
)
2335 if (timeout
== DMA_TIMEOUT_VAL
) {
2336 netdev_warn(priv
->dev
, "Timed out while disabling RX DMA\n");
2341 for (i
= 0; i
< priv
->hw_params
->rx_queues
; i
++)
2342 dma_ctrl
|= (1 << (i
+ DMA_RING_BUF_EN_SHIFT
));
2343 reg
= bcmgenet_rdma_readl(priv
, DMA_CTRL
);
2345 bcmgenet_rdma_writel(priv
, reg
, DMA_CTRL
);
2348 for (i
= 0; i
< priv
->hw_params
->tx_queues
; i
++)
2349 dma_ctrl
|= (1 << (i
+ DMA_RING_BUF_EN_SHIFT
));
2350 reg
= bcmgenet_tdma_readl(priv
, DMA_CTRL
);
2352 bcmgenet_tdma_writel(priv
, reg
, DMA_CTRL
);
2357 static void bcmgenet_fini_dma(struct bcmgenet_priv
*priv
)
2361 bcmgenet_fini_rx_napi(priv
);
2362 bcmgenet_fini_tx_napi(priv
);
2365 bcmgenet_dma_teardown(priv
);
2367 for (i
= 0; i
< priv
->num_tx_bds
; i
++) {
2368 if (priv
->tx_cbs
[i
].skb
!= NULL
) {
2369 dev_kfree_skb(priv
->tx_cbs
[i
].skb
);
2370 priv
->tx_cbs
[i
].skb
= NULL
;
2374 bcmgenet_free_rx_buffers(priv
);
2375 kfree(priv
->rx_cbs
);
2376 kfree(priv
->tx_cbs
);
2379 /* init_edma: Initialize DMA control register */
2380 static int bcmgenet_init_dma(struct bcmgenet_priv
*priv
)
2386 netif_dbg(priv
, hw
, priv
->dev
, "%s\n", __func__
);
2388 /* Initialize common Rx ring structures */
2389 priv
->rx_bds
= priv
->base
+ priv
->hw_params
->rdma_offset
;
2390 priv
->num_rx_bds
= TOTAL_DESC
;
2391 priv
->rx_cbs
= kcalloc(priv
->num_rx_bds
, sizeof(struct enet_cb
),
2396 for (i
= 0; i
< priv
->num_rx_bds
; i
++) {
2397 cb
= priv
->rx_cbs
+ i
;
2398 cb
->bd_addr
= priv
->rx_bds
+ i
* DMA_DESC_SIZE
;
2401 /* Initialize common TX ring structures */
2402 priv
->tx_bds
= priv
->base
+ priv
->hw_params
->tdma_offset
;
2403 priv
->num_tx_bds
= TOTAL_DESC
;
2404 priv
->tx_cbs
= kcalloc(priv
->num_tx_bds
, sizeof(struct enet_cb
),
2406 if (!priv
->tx_cbs
) {
2407 kfree(priv
->rx_cbs
);
2411 for (i
= 0; i
< priv
->num_tx_bds
; i
++) {
2412 cb
= priv
->tx_cbs
+ i
;
2413 cb
->bd_addr
= priv
->tx_bds
+ i
* DMA_DESC_SIZE
;
2417 bcmgenet_rdma_writel(priv
, DMA_MAX_BURST_LENGTH
, DMA_SCB_BURST_SIZE
);
2419 /* Initialize Rx queues */
2420 ret
= bcmgenet_init_rx_queues(priv
->dev
);
2422 netdev_err(priv
->dev
, "failed to initialize Rx queues\n");
2423 bcmgenet_free_rx_buffers(priv
);
2424 kfree(priv
->rx_cbs
);
2425 kfree(priv
->tx_cbs
);
2430 bcmgenet_tdma_writel(priv
, DMA_MAX_BURST_LENGTH
, DMA_SCB_BURST_SIZE
);
2432 /* Initialize Tx queues */
2433 bcmgenet_init_tx_queues(priv
->dev
);
2438 /* Interrupt bottom half */
2439 static void bcmgenet_irq_task(struct work_struct
*work
)
2441 struct bcmgenet_priv
*priv
= container_of(
2442 work
, struct bcmgenet_priv
, bcmgenet_irq_work
);
2444 netif_dbg(priv
, intr
, priv
->dev
, "%s\n", __func__
);
2446 if (priv
->irq0_stat
& UMAC_IRQ_MPD_R
) {
2447 priv
->irq0_stat
&= ~UMAC_IRQ_MPD_R
;
2448 netif_dbg(priv
, wol
, priv
->dev
,
2449 "magic packet detected, waking up\n");
2450 bcmgenet_power_up(priv
, GENET_POWER_WOL_MAGIC
);
2453 /* Link UP/DOWN event */
2454 if (priv
->irq0_stat
& UMAC_IRQ_LINK_EVENT
) {
2455 phy_mac_interrupt(priv
->phydev
,
2456 !!(priv
->irq0_stat
& UMAC_IRQ_LINK_UP
));
2457 priv
->irq0_stat
&= ~UMAC_IRQ_LINK_EVENT
;
2461 /* bcmgenet_isr1: handle Rx and Tx priority queues */
2462 static irqreturn_t
bcmgenet_isr1(int irq
, void *dev_id
)
2464 struct bcmgenet_priv
*priv
= dev_id
;
2465 struct bcmgenet_rx_ring
*rx_ring
;
2466 struct bcmgenet_tx_ring
*tx_ring
;
2469 /* Save irq status for bottom-half processing. */
2471 bcmgenet_intrl2_1_readl(priv
, INTRL2_CPU_STAT
) &
2472 ~bcmgenet_intrl2_1_readl(priv
, INTRL2_CPU_MASK_STATUS
);
2474 /* clear interrupts */
2475 bcmgenet_intrl2_1_writel(priv
, priv
->irq1_stat
, INTRL2_CPU_CLEAR
);
2477 netif_dbg(priv
, intr
, priv
->dev
,
2478 "%s: IRQ=0x%x\n", __func__
, priv
->irq1_stat
);
2480 /* Check Rx priority queue interrupts */
2481 for (index
= 0; index
< priv
->hw_params
->rx_queues
; index
++) {
2482 if (!(priv
->irq1_stat
& BIT(UMAC_IRQ1_RX_INTR_SHIFT
+ index
)))
2485 rx_ring
= &priv
->rx_rings
[index
];
2487 if (likely(napi_schedule_prep(&rx_ring
->napi
))) {
2488 rx_ring
->int_disable(rx_ring
);
2489 __napi_schedule(&rx_ring
->napi
);
2493 /* Check Tx priority queue interrupts */
2494 for (index
= 0; index
< priv
->hw_params
->tx_queues
; index
++) {
2495 if (!(priv
->irq1_stat
& BIT(index
)))
2498 tx_ring
= &priv
->tx_rings
[index
];
2500 if (likely(napi_schedule_prep(&tx_ring
->napi
))) {
2501 tx_ring
->int_disable(tx_ring
);
2502 __napi_schedule(&tx_ring
->napi
);
2509 /* bcmgenet_isr0: handle Rx and Tx default queues + other stuff */
2510 static irqreturn_t
bcmgenet_isr0(int irq
, void *dev_id
)
2512 struct bcmgenet_priv
*priv
= dev_id
;
2513 struct bcmgenet_rx_ring
*rx_ring
;
2514 struct bcmgenet_tx_ring
*tx_ring
;
2516 /* Save irq status for bottom-half processing. */
2518 bcmgenet_intrl2_0_readl(priv
, INTRL2_CPU_STAT
) &
2519 ~bcmgenet_intrl2_0_readl(priv
, INTRL2_CPU_MASK_STATUS
);
2521 /* clear interrupts */
2522 bcmgenet_intrl2_0_writel(priv
, priv
->irq0_stat
, INTRL2_CPU_CLEAR
);
2524 netif_dbg(priv
, intr
, priv
->dev
,
2525 "IRQ=0x%x\n", priv
->irq0_stat
);
2527 if (priv
->irq0_stat
& UMAC_IRQ_RXDMA_DONE
) {
2528 rx_ring
= &priv
->rx_rings
[DESC_INDEX
];
2530 if (likely(napi_schedule_prep(&rx_ring
->napi
))) {
2531 rx_ring
->int_disable(rx_ring
);
2532 __napi_schedule(&rx_ring
->napi
);
2536 if (priv
->irq0_stat
& UMAC_IRQ_TXDMA_DONE
) {
2537 tx_ring
= &priv
->tx_rings
[DESC_INDEX
];
2539 if (likely(napi_schedule_prep(&tx_ring
->napi
))) {
2540 tx_ring
->int_disable(tx_ring
);
2541 __napi_schedule(&tx_ring
->napi
);
2545 if (priv
->irq0_stat
& (UMAC_IRQ_PHY_DET_R
|
2546 UMAC_IRQ_PHY_DET_F
|
2547 UMAC_IRQ_LINK_EVENT
|
2551 /* all other interested interrupts handled in bottom half */
2552 schedule_work(&priv
->bcmgenet_irq_work
);
2555 if ((priv
->hw_params
->flags
& GENET_HAS_MDIO_INTR
) &&
2556 priv
->irq0_stat
& (UMAC_IRQ_MDIO_DONE
| UMAC_IRQ_MDIO_ERROR
)) {
2557 priv
->irq0_stat
&= ~(UMAC_IRQ_MDIO_DONE
| UMAC_IRQ_MDIO_ERROR
);
2564 static irqreturn_t
bcmgenet_wol_isr(int irq
, void *dev_id
)
2566 struct bcmgenet_priv
*priv
= dev_id
;
2568 pm_wakeup_event(&priv
->pdev
->dev
, 0);
2573 #ifdef CONFIG_NET_POLL_CONTROLLER
2574 static void bcmgenet_poll_controller(struct net_device
*dev
)
2576 struct bcmgenet_priv
*priv
= netdev_priv(dev
);
2578 /* Invoke the main RX/TX interrupt handler */
2579 disable_irq(priv
->irq0
);
2580 bcmgenet_isr0(priv
->irq0
, priv
);
2581 enable_irq(priv
->irq0
);
2583 /* And the interrupt handler for RX/TX priority queues */
2584 disable_irq(priv
->irq1
);
2585 bcmgenet_isr1(priv
->irq1
, priv
);
2586 enable_irq(priv
->irq1
);
2590 static void bcmgenet_umac_reset(struct bcmgenet_priv
*priv
)
2594 reg
= bcmgenet_rbuf_ctrl_get(priv
);
2596 bcmgenet_rbuf_ctrl_set(priv
, reg
);
2600 bcmgenet_rbuf_ctrl_set(priv
, reg
);
2604 static void bcmgenet_set_hw_addr(struct bcmgenet_priv
*priv
,
2605 unsigned char *addr
)
2607 bcmgenet_umac_writel(priv
, (addr
[0] << 24) | (addr
[1] << 16) |
2608 (addr
[2] << 8) | addr
[3], UMAC_MAC0
);
2609 bcmgenet_umac_writel(priv
, (addr
[4] << 8) | addr
[5], UMAC_MAC1
);
2612 /* Returns a reusable dma control register value */
2613 static u32
bcmgenet_dma_disable(struct bcmgenet_priv
*priv
)
2619 dma_ctrl
= 1 << (DESC_INDEX
+ DMA_RING_BUF_EN_SHIFT
) | DMA_EN
;
2620 reg
= bcmgenet_tdma_readl(priv
, DMA_CTRL
);
2622 bcmgenet_tdma_writel(priv
, reg
, DMA_CTRL
);
2624 reg
= bcmgenet_rdma_readl(priv
, DMA_CTRL
);
2626 bcmgenet_rdma_writel(priv
, reg
, DMA_CTRL
);
2628 bcmgenet_umac_writel(priv
, 1, UMAC_TX_FLUSH
);
2630 bcmgenet_umac_writel(priv
, 0, UMAC_TX_FLUSH
);
2635 static void bcmgenet_enable_dma(struct bcmgenet_priv
*priv
, u32 dma_ctrl
)
2639 reg
= bcmgenet_rdma_readl(priv
, DMA_CTRL
);
2641 bcmgenet_rdma_writel(priv
, reg
, DMA_CTRL
);
2643 reg
= bcmgenet_tdma_readl(priv
, DMA_CTRL
);
2645 bcmgenet_tdma_writel(priv
, reg
, DMA_CTRL
);
2648 static bool bcmgenet_hfb_is_filter_enabled(struct bcmgenet_priv
*priv
,
2654 offset
= HFB_FLT_ENABLE_V3PLUS
+ (f_index
< 32) * sizeof(u32
);
2655 reg
= bcmgenet_hfb_reg_readl(priv
, offset
);
2656 return !!(reg
& (1 << (f_index
% 32)));
2659 static void bcmgenet_hfb_enable_filter(struct bcmgenet_priv
*priv
, u32 f_index
)
2664 offset
= HFB_FLT_ENABLE_V3PLUS
+ (f_index
< 32) * sizeof(u32
);
2665 reg
= bcmgenet_hfb_reg_readl(priv
, offset
);
2666 reg
|= (1 << (f_index
% 32));
2667 bcmgenet_hfb_reg_writel(priv
, reg
, offset
);
2670 static void bcmgenet_hfb_set_filter_rx_queue_mapping(struct bcmgenet_priv
*priv
,
2671 u32 f_index
, u32 rx_queue
)
2676 offset
= f_index
/ 8;
2677 reg
= bcmgenet_rdma_readl(priv
, DMA_INDEX2RING_0
+ offset
);
2678 reg
&= ~(0xF << (4 * (f_index
% 8)));
2679 reg
|= ((rx_queue
& 0xF) << (4 * (f_index
% 8)));
2680 bcmgenet_rdma_writel(priv
, reg
, DMA_INDEX2RING_0
+ offset
);
2683 static void bcmgenet_hfb_set_filter_length(struct bcmgenet_priv
*priv
,
2684 u32 f_index
, u32 f_length
)
2689 offset
= HFB_FLT_LEN_V3PLUS
+
2690 ((priv
->hw_params
->hfb_filter_cnt
- 1 - f_index
) / 4) *
2692 reg
= bcmgenet_hfb_reg_readl(priv
, offset
);
2693 reg
&= ~(0xFF << (8 * (f_index
% 4)));
2694 reg
|= ((f_length
& 0xFF) << (8 * (f_index
% 4)));
2695 bcmgenet_hfb_reg_writel(priv
, reg
, offset
);
2698 static int bcmgenet_hfb_find_unused_filter(struct bcmgenet_priv
*priv
)
2702 for (f_index
= 0; f_index
< priv
->hw_params
->hfb_filter_cnt
; f_index
++)
2703 if (!bcmgenet_hfb_is_filter_enabled(priv
, f_index
))
2709 /* bcmgenet_hfb_add_filter
2711 * Add new filter to Hardware Filter Block to match and direct Rx traffic to
2714 * f_data is an array of unsigned 32-bit integers where each 32-bit integer
2715 * provides filter data for 2 bytes (4 nibbles) of Rx frame:
2717 * bits 31:20 - unused
2718 * bit 19 - nibble 0 match enable
2719 * bit 18 - nibble 1 match enable
2720 * bit 17 - nibble 2 match enable
2721 * bit 16 - nibble 3 match enable
2722 * bits 15:12 - nibble 0 data
2723 * bits 11:8 - nibble 1 data
2724 * bits 7:4 - nibble 2 data
2725 * bits 3:0 - nibble 3 data
2728 * In order to match:
2729 * - Ethernet frame type = 0x0800 (IP)
2730 * - IP version field = 4
2731 * - IP protocol field = 0x11 (UDP)
2733 * The following filter is needed:
2734 * u32 hfb_filter_ipv4_udp[] = {
2735 * Rx frame offset 0x00: 0x00000000, 0x00000000, 0x00000000, 0x00000000,
2736 * Rx frame offset 0x08: 0x00000000, 0x00000000, 0x000F0800, 0x00084000,
2737 * Rx frame offset 0x10: 0x00000000, 0x00000000, 0x00000000, 0x00030011,
2740 * To add the filter to HFB and direct the traffic to Rx queue 0, call:
2741 * bcmgenet_hfb_add_filter(priv, hfb_filter_ipv4_udp,
2742 * ARRAY_SIZE(hfb_filter_ipv4_udp), 0);
2744 int bcmgenet_hfb_add_filter(struct bcmgenet_priv
*priv
, u32
*f_data
,
2745 u32 f_length
, u32 rx_queue
)
2750 f_index
= bcmgenet_hfb_find_unused_filter(priv
);
2754 if (f_length
> priv
->hw_params
->hfb_filter_size
)
2757 for (i
= 0; i
< f_length
; i
++)
2758 bcmgenet_hfb_writel(priv
, f_data
[i
],
2759 (f_index
* priv
->hw_params
->hfb_filter_size
+ i
) *
2762 bcmgenet_hfb_set_filter_length(priv
, f_index
, 2 * f_length
);
2763 bcmgenet_hfb_set_filter_rx_queue_mapping(priv
, f_index
, rx_queue
);
2764 bcmgenet_hfb_enable_filter(priv
, f_index
);
2765 bcmgenet_hfb_reg_writel(priv
, 0x1, HFB_CTRL
);
2770 /* bcmgenet_hfb_clear
2772 * Clear Hardware Filter Block and disable all filtering.
2774 static void bcmgenet_hfb_clear(struct bcmgenet_priv
*priv
)
2778 bcmgenet_hfb_reg_writel(priv
, 0x0, HFB_CTRL
);
2779 bcmgenet_hfb_reg_writel(priv
, 0x0, HFB_FLT_ENABLE_V3PLUS
);
2780 bcmgenet_hfb_reg_writel(priv
, 0x0, HFB_FLT_ENABLE_V3PLUS
+ 4);
2782 for (i
= DMA_INDEX2RING_0
; i
<= DMA_INDEX2RING_7
; i
++)
2783 bcmgenet_rdma_writel(priv
, 0x0, i
);
2785 for (i
= 0; i
< (priv
->hw_params
->hfb_filter_cnt
/ 4); i
++)
2786 bcmgenet_hfb_reg_writel(priv
, 0x0,
2787 HFB_FLT_LEN_V3PLUS
+ i
* sizeof(u32
));
2789 for (i
= 0; i
< priv
->hw_params
->hfb_filter_cnt
*
2790 priv
->hw_params
->hfb_filter_size
; i
++)
2791 bcmgenet_hfb_writel(priv
, 0x0, i
* sizeof(u32
));
2794 static void bcmgenet_hfb_init(struct bcmgenet_priv
*priv
)
2796 if (GENET_IS_V1(priv
) || GENET_IS_V2(priv
))
2799 bcmgenet_hfb_clear(priv
);
2802 static void bcmgenet_netif_start(struct net_device
*dev
)
2804 struct bcmgenet_priv
*priv
= netdev_priv(dev
);
2806 /* Start the network engine */
2807 bcmgenet_enable_rx_napi(priv
);
2808 bcmgenet_enable_tx_napi(priv
);
2810 umac_enable_set(priv
, CMD_TX_EN
| CMD_RX_EN
, true);
2812 netif_tx_start_all_queues(dev
);
2814 /* Monitor link interrupts now */
2815 bcmgenet_link_intr_enable(priv
);
2817 phy_start(priv
->phydev
);
2820 static int bcmgenet_open(struct net_device
*dev
)
2822 struct bcmgenet_priv
*priv
= netdev_priv(dev
);
2823 unsigned long dma_ctrl
;
2827 netif_dbg(priv
, ifup
, dev
, "bcmgenet_open\n");
2829 /* Turn on the clock */
2830 clk_prepare_enable(priv
->clk
);
2832 /* If this is an internal GPHY, power it back on now, before UniMAC is
2833 * brought out of reset as absolutely no UniMAC activity is allowed
2835 if (priv
->internal_phy
)
2836 bcmgenet_power_up(priv
, GENET_POWER_PASSIVE
);
2838 /* take MAC out of reset */
2839 bcmgenet_umac_reset(priv
);
2841 ret
= init_umac(priv
);
2843 goto err_clk_disable
;
2845 /* disable ethernet MAC while updating its registers */
2846 umac_enable_set(priv
, CMD_TX_EN
| CMD_RX_EN
, false);
2848 /* Make sure we reflect the value of CRC_CMD_FWD */
2849 reg
= bcmgenet_umac_readl(priv
, UMAC_CMD
);
2850 priv
->crc_fwd_en
= !!(reg
& CMD_CRC_FWD
);
2852 bcmgenet_set_hw_addr(priv
, dev
->dev_addr
);
2854 if (priv
->internal_phy
) {
2855 reg
= bcmgenet_ext_readl(priv
, EXT_EXT_PWR_MGMT
);
2856 reg
|= EXT_ENERGY_DET_MASK
;
2857 bcmgenet_ext_writel(priv
, reg
, EXT_EXT_PWR_MGMT
);
2860 /* Disable RX/TX DMA and flush TX queues */
2861 dma_ctrl
= bcmgenet_dma_disable(priv
);
2863 /* Reinitialize TDMA and RDMA and SW housekeeping */
2864 ret
= bcmgenet_init_dma(priv
);
2866 netdev_err(dev
, "failed to initialize DMA\n");
2867 goto err_clk_disable
;
2870 /* Always enable ring 16 - descriptor ring */
2871 bcmgenet_enable_dma(priv
, dma_ctrl
);
2874 bcmgenet_hfb_init(priv
);
2876 ret
= request_irq(priv
->irq0
, bcmgenet_isr0
, IRQF_SHARED
,
2879 netdev_err(dev
, "can't request IRQ %d\n", priv
->irq0
);
2883 ret
= request_irq(priv
->irq1
, bcmgenet_isr1
, IRQF_SHARED
,
2886 netdev_err(dev
, "can't request IRQ %d\n", priv
->irq1
);
2890 ret
= bcmgenet_mii_probe(dev
);
2892 netdev_err(dev
, "failed to connect to PHY\n");
2896 bcmgenet_netif_start(dev
);
2901 free_irq(priv
->irq1
, priv
);
2903 free_irq(priv
->irq0
, priv
);
2905 bcmgenet_fini_dma(priv
);
2907 clk_disable_unprepare(priv
->clk
);
2911 static void bcmgenet_netif_stop(struct net_device
*dev
)
2913 struct bcmgenet_priv
*priv
= netdev_priv(dev
);
2915 netif_tx_stop_all_queues(dev
);
2916 phy_stop(priv
->phydev
);
2917 bcmgenet_intr_disable(priv
);
2918 bcmgenet_disable_rx_napi(priv
);
2919 bcmgenet_disable_tx_napi(priv
);
2921 /* Wait for pending work items to complete. Since interrupts are
2922 * disabled no new work will be scheduled.
2924 cancel_work_sync(&priv
->bcmgenet_irq_work
);
2926 priv
->old_link
= -1;
2927 priv
->old_speed
= -1;
2928 priv
->old_duplex
= -1;
2929 priv
->old_pause
= -1;
2932 static int bcmgenet_close(struct net_device
*dev
)
2934 struct bcmgenet_priv
*priv
= netdev_priv(dev
);
2937 netif_dbg(priv
, ifdown
, dev
, "bcmgenet_close\n");
2939 bcmgenet_netif_stop(dev
);
2941 /* Really kill the PHY state machine and disconnect from it */
2942 phy_disconnect(priv
->phydev
);
2944 /* Disable MAC receive */
2945 umac_enable_set(priv
, CMD_RX_EN
, false);
2947 ret
= bcmgenet_dma_teardown(priv
);
2951 /* Disable MAC transmit. TX DMA disabled have to done before this */
2952 umac_enable_set(priv
, CMD_TX_EN
, false);
2955 bcmgenet_tx_reclaim_all(dev
);
2956 bcmgenet_fini_dma(priv
);
2958 free_irq(priv
->irq0
, priv
);
2959 free_irq(priv
->irq1
, priv
);
2961 if (priv
->internal_phy
)
2962 ret
= bcmgenet_power_down(priv
, GENET_POWER_PASSIVE
);
2964 clk_disable_unprepare(priv
->clk
);
2969 static void bcmgenet_dump_tx_queue(struct bcmgenet_tx_ring
*ring
)
2971 struct bcmgenet_priv
*priv
= ring
->priv
;
2972 u32 p_index
, c_index
, intsts
, intmsk
;
2973 struct netdev_queue
*txq
;
2974 unsigned int free_bds
;
2975 unsigned long flags
;
2978 if (!netif_msg_tx_err(priv
))
2981 txq
= netdev_get_tx_queue(priv
->dev
, ring
->queue
);
2983 spin_lock_irqsave(&ring
->lock
, flags
);
2984 if (ring
->index
== DESC_INDEX
) {
2985 intsts
= ~bcmgenet_intrl2_0_readl(priv
, INTRL2_CPU_MASK_STATUS
);
2986 intmsk
= UMAC_IRQ_TXDMA_DONE
| UMAC_IRQ_TXDMA_MBDONE
;
2988 intsts
= ~bcmgenet_intrl2_1_readl(priv
, INTRL2_CPU_MASK_STATUS
);
2989 intmsk
= 1 << ring
->index
;
2991 c_index
= bcmgenet_tdma_ring_readl(priv
, ring
->index
, TDMA_CONS_INDEX
);
2992 p_index
= bcmgenet_tdma_ring_readl(priv
, ring
->index
, TDMA_PROD_INDEX
);
2993 txq_stopped
= netif_tx_queue_stopped(txq
);
2994 free_bds
= ring
->free_bds
;
2995 spin_unlock_irqrestore(&ring
->lock
, flags
);
2997 netif_err(priv
, tx_err
, priv
->dev
, "Ring %d queue %d status summary\n"
2998 "TX queue status: %s, interrupts: %s\n"
2999 "(sw)free_bds: %d (sw)size: %d\n"
3000 "(sw)p_index: %d (hw)p_index: %d\n"
3001 "(sw)c_index: %d (hw)c_index: %d\n"
3002 "(sw)clean_p: %d (sw)write_p: %d\n"
3003 "(sw)cb_ptr: %d (sw)end_ptr: %d\n",
3004 ring
->index
, ring
->queue
,
3005 txq_stopped
? "stopped" : "active",
3006 intsts
& intmsk
? "enabled" : "disabled",
3007 free_bds
, ring
->size
,
3008 ring
->prod_index
, p_index
& DMA_P_INDEX_MASK
,
3009 ring
->c_index
, c_index
& DMA_C_INDEX_MASK
,
3010 ring
->clean_ptr
, ring
->write_ptr
,
3011 ring
->cb_ptr
, ring
->end_ptr
);
3014 static void bcmgenet_timeout(struct net_device
*dev
)
3016 struct bcmgenet_priv
*priv
= netdev_priv(dev
);
3017 u32 int0_enable
= 0;
3018 u32 int1_enable
= 0;
3021 netif_dbg(priv
, tx_err
, dev
, "bcmgenet_timeout\n");
3023 for (q
= 0; q
< priv
->hw_params
->tx_queues
; q
++)
3024 bcmgenet_dump_tx_queue(&priv
->tx_rings
[q
]);
3025 bcmgenet_dump_tx_queue(&priv
->tx_rings
[DESC_INDEX
]);
3027 bcmgenet_tx_reclaim_all(dev
);
3029 for (q
= 0; q
< priv
->hw_params
->tx_queues
; q
++)
3030 int1_enable
|= (1 << q
);
3032 int0_enable
= UMAC_IRQ_TXDMA_DONE
;
3034 /* Re-enable TX interrupts if disabled */
3035 bcmgenet_intrl2_0_writel(priv
, int0_enable
, INTRL2_CPU_MASK_CLEAR
);
3036 bcmgenet_intrl2_1_writel(priv
, int1_enable
, INTRL2_CPU_MASK_CLEAR
);
3038 dev
->trans_start
= jiffies
;
3040 dev
->stats
.tx_errors
++;
3042 netif_tx_wake_all_queues(dev
);
3045 #define MAX_MC_COUNT 16
3047 static inline void bcmgenet_set_mdf_addr(struct bcmgenet_priv
*priv
,
3048 unsigned char *addr
,
3054 bcmgenet_umac_writel(priv
, addr
[0] << 8 | addr
[1],
3055 UMAC_MDF_ADDR
+ (*i
* 4));
3056 bcmgenet_umac_writel(priv
, addr
[2] << 24 | addr
[3] << 16 |
3057 addr
[4] << 8 | addr
[5],
3058 UMAC_MDF_ADDR
+ ((*i
+ 1) * 4));
3059 reg
= bcmgenet_umac_readl(priv
, UMAC_MDF_CTRL
);
3060 reg
|= (1 << (MAX_MC_COUNT
- *mc
));
3061 bcmgenet_umac_writel(priv
, reg
, UMAC_MDF_CTRL
);
3066 static void bcmgenet_set_rx_mode(struct net_device
*dev
)
3068 struct bcmgenet_priv
*priv
= netdev_priv(dev
);
3069 struct netdev_hw_addr
*ha
;
3073 netif_dbg(priv
, hw
, dev
, "%s: %08X\n", __func__
, dev
->flags
);
3075 /* Promiscuous mode */
3076 reg
= bcmgenet_umac_readl(priv
, UMAC_CMD
);
3077 if (dev
->flags
& IFF_PROMISC
) {
3079 bcmgenet_umac_writel(priv
, reg
, UMAC_CMD
);
3080 bcmgenet_umac_writel(priv
, 0, UMAC_MDF_CTRL
);
3083 reg
&= ~CMD_PROMISC
;
3084 bcmgenet_umac_writel(priv
, reg
, UMAC_CMD
);
3087 /* UniMac doesn't support ALLMULTI */
3088 if (dev
->flags
& IFF_ALLMULTI
) {
3089 netdev_warn(dev
, "ALLMULTI is not supported\n");
3093 /* update MDF filter */
3097 bcmgenet_set_mdf_addr(priv
, dev
->broadcast
, &i
, &mc
);
3098 /* my own address.*/
3099 bcmgenet_set_mdf_addr(priv
, dev
->dev_addr
, &i
, &mc
);
3101 if (netdev_uc_count(dev
) > (MAX_MC_COUNT
- mc
))
3104 if (!netdev_uc_empty(dev
))
3105 netdev_for_each_uc_addr(ha
, dev
)
3106 bcmgenet_set_mdf_addr(priv
, ha
->addr
, &i
, &mc
);
3108 if (netdev_mc_empty(dev
) || netdev_mc_count(dev
) >= (MAX_MC_COUNT
- mc
))
3111 netdev_for_each_mc_addr(ha
, dev
)
3112 bcmgenet_set_mdf_addr(priv
, ha
->addr
, &i
, &mc
);
3115 /* Set the hardware MAC address. */
3116 static int bcmgenet_set_mac_addr(struct net_device
*dev
, void *p
)
3118 struct sockaddr
*addr
= p
;
3120 /* Setting the MAC address at the hardware level is not possible
3121 * without disabling the UniMAC RX/TX enable bits.
3123 if (netif_running(dev
))
3126 ether_addr_copy(dev
->dev_addr
, addr
->sa_data
);
3131 static const struct net_device_ops bcmgenet_netdev_ops
= {
3132 .ndo_open
= bcmgenet_open
,
3133 .ndo_stop
= bcmgenet_close
,
3134 .ndo_start_xmit
= bcmgenet_xmit
,
3135 .ndo_tx_timeout
= bcmgenet_timeout
,
3136 .ndo_set_rx_mode
= bcmgenet_set_rx_mode
,
3137 .ndo_set_mac_address
= bcmgenet_set_mac_addr
,
3138 .ndo_do_ioctl
= bcmgenet_ioctl
,
3139 .ndo_set_features
= bcmgenet_set_features
,
3140 #ifdef CONFIG_NET_POLL_CONTROLLER
3141 .ndo_poll_controller
= bcmgenet_poll_controller
,
3145 /* Array of GENET hardware parameters/characteristics */
3146 static struct bcmgenet_hw_params bcmgenet_hw_params
[] = {
3152 .bp_in_en_shift
= 16,
3153 .bp_in_mask
= 0xffff,
3154 .hfb_filter_cnt
= 16,
3156 .hfb_offset
= 0x1000,
3157 .rdma_offset
= 0x2000,
3158 .tdma_offset
= 0x3000,
3166 .bp_in_en_shift
= 16,
3167 .bp_in_mask
= 0xffff,
3168 .hfb_filter_cnt
= 16,
3170 .tbuf_offset
= 0x0600,
3171 .hfb_offset
= 0x1000,
3172 .hfb_reg_offset
= 0x2000,
3173 .rdma_offset
= 0x3000,
3174 .tdma_offset
= 0x4000,
3176 .flags
= GENET_HAS_EXT
,
3183 .bp_in_en_shift
= 17,
3184 .bp_in_mask
= 0x1ffff,
3185 .hfb_filter_cnt
= 48,
3186 .hfb_filter_size
= 128,
3188 .tbuf_offset
= 0x0600,
3189 .hfb_offset
= 0x8000,
3190 .hfb_reg_offset
= 0xfc00,
3191 .rdma_offset
= 0x10000,
3192 .tdma_offset
= 0x11000,
3194 .flags
= GENET_HAS_EXT
| GENET_HAS_MDIO_INTR
|
3195 GENET_HAS_MOCA_LINK_DET
,
3202 .bp_in_en_shift
= 17,
3203 .bp_in_mask
= 0x1ffff,
3204 .hfb_filter_cnt
= 48,
3205 .hfb_filter_size
= 128,
3207 .tbuf_offset
= 0x0600,
3208 .hfb_offset
= 0x8000,
3209 .hfb_reg_offset
= 0xfc00,
3210 .rdma_offset
= 0x2000,
3211 .tdma_offset
= 0x4000,
3213 .flags
= GENET_HAS_40BITS
| GENET_HAS_EXT
|
3214 GENET_HAS_MDIO_INTR
| GENET_HAS_MOCA_LINK_DET
,
3218 /* Infer hardware parameters from the detected GENET version */
3219 static void bcmgenet_set_hw_params(struct bcmgenet_priv
*priv
)
3221 struct bcmgenet_hw_params
*params
;
3226 if (GENET_IS_V4(priv
)) {
3227 bcmgenet_dma_regs
= bcmgenet_dma_regs_v3plus
;
3228 genet_dma_ring_regs
= genet_dma_ring_regs_v4
;
3229 priv
->dma_rx_chk_bit
= DMA_RX_CHK_V3PLUS
;
3230 priv
->version
= GENET_V4
;
3231 } else if (GENET_IS_V3(priv
)) {
3232 bcmgenet_dma_regs
= bcmgenet_dma_regs_v3plus
;
3233 genet_dma_ring_regs
= genet_dma_ring_regs_v123
;
3234 priv
->dma_rx_chk_bit
= DMA_RX_CHK_V3PLUS
;
3235 priv
->version
= GENET_V3
;
3236 } else if (GENET_IS_V2(priv
)) {
3237 bcmgenet_dma_regs
= bcmgenet_dma_regs_v2
;
3238 genet_dma_ring_regs
= genet_dma_ring_regs_v123
;
3239 priv
->dma_rx_chk_bit
= DMA_RX_CHK_V12
;
3240 priv
->version
= GENET_V2
;
3241 } else if (GENET_IS_V1(priv
)) {
3242 bcmgenet_dma_regs
= bcmgenet_dma_regs_v1
;
3243 genet_dma_ring_regs
= genet_dma_ring_regs_v123
;
3244 priv
->dma_rx_chk_bit
= DMA_RX_CHK_V12
;
3245 priv
->version
= GENET_V1
;
3248 /* enum genet_version starts at 1 */
3249 priv
->hw_params
= &bcmgenet_hw_params
[priv
->version
];
3250 params
= priv
->hw_params
;
3252 /* Read GENET HW version */
3253 reg
= bcmgenet_sys_readl(priv
, SYS_REV_CTRL
);
3254 major
= (reg
>> 24 & 0x0f);
3257 else if (major
== 0)
3259 if (major
!= priv
->version
) {
3260 dev_err(&priv
->pdev
->dev
,
3261 "GENET version mismatch, got: %d, configured for: %d\n",
3262 major
, priv
->version
);
3265 /* Print the GENET core version */
3266 dev_info(&priv
->pdev
->dev
, "GENET " GENET_VER_FMT
,
3267 major
, (reg
>> 16) & 0x0f, reg
& 0xffff);
3269 /* Store the integrated PHY revision for the MDIO probing function
3270 * to pass this information to the PHY driver. The PHY driver expects
3271 * to find the PHY major revision in bits 15:8 while the GENET register
3272 * stores that information in bits 7:0, account for that.
3274 * On newer chips, starting with PHY revision G0, a new scheme is
3275 * deployed similar to the Starfighter 2 switch with GPHY major
3276 * revision in bits 15:8 and patch level in bits 7:0. Major revision 0
3277 * is reserved as well as special value 0x01ff, we have a small
3278 * heuristic to check for the new GPHY revision and re-arrange things
3279 * so the GPHY driver is happy.
3281 gphy_rev
= reg
& 0xffff;
3283 /* This is the good old scheme, just GPHY major, no minor nor patch */
3284 if ((gphy_rev
& 0xf0) != 0)
3285 priv
->gphy_rev
= gphy_rev
<< 8;
3287 /* This is the new scheme, GPHY major rolls over with 0x10 = rev G0 */
3288 else if ((gphy_rev
& 0xff00) != 0)
3289 priv
->gphy_rev
= gphy_rev
;
3291 /* This is reserved so should require special treatment */
3292 else if (gphy_rev
== 0 || gphy_rev
== 0x01ff) {
3293 pr_warn("Invalid GPHY revision detected: 0x%04x\n", gphy_rev
);
3297 #ifdef CONFIG_PHYS_ADDR_T_64BIT
3298 if (!(params
->flags
& GENET_HAS_40BITS
))
3299 pr_warn("GENET does not support 40-bits PA\n");
3302 pr_debug("Configuration for version: %d\n"
3303 "TXq: %1d, TXqBDs: %1d, RXq: %1d, RXqBDs: %1d\n"
3304 "BP << en: %2d, BP msk: 0x%05x\n"
3305 "HFB count: %2d, QTAQ msk: 0x%05x\n"
3306 "TBUF: 0x%04x, HFB: 0x%04x, HFBreg: 0x%04x\n"
3307 "RDMA: 0x%05x, TDMA: 0x%05x\n"
3310 params
->tx_queues
, params
->tx_bds_per_q
,
3311 params
->rx_queues
, params
->rx_bds_per_q
,
3312 params
->bp_in_en_shift
, params
->bp_in_mask
,
3313 params
->hfb_filter_cnt
, params
->qtag_mask
,
3314 params
->tbuf_offset
, params
->hfb_offset
,
3315 params
->hfb_reg_offset
,
3316 params
->rdma_offset
, params
->tdma_offset
,
3317 params
->words_per_bd
);
3320 static const struct of_device_id bcmgenet_match
[] = {
3321 { .compatible
= "brcm,genet-v1", .data
= (void *)GENET_V1
},
3322 { .compatible
= "brcm,genet-v2", .data
= (void *)GENET_V2
},
3323 { .compatible
= "brcm,genet-v3", .data
= (void *)GENET_V3
},
3324 { .compatible
= "brcm,genet-v4", .data
= (void *)GENET_V4
},
3327 MODULE_DEVICE_TABLE(of
, bcmgenet_match
);
3329 static int bcmgenet_probe(struct platform_device
*pdev
)
3331 struct bcmgenet_platform_data
*pd
= pdev
->dev
.platform_data
;
3332 struct device_node
*dn
= pdev
->dev
.of_node
;
3333 const struct of_device_id
*of_id
= NULL
;
3334 struct bcmgenet_priv
*priv
;
3335 struct net_device
*dev
;
3336 const void *macaddr
;
3340 /* Up to GENET_MAX_MQ_CNT + 1 TX queues and RX queues */
3341 dev
= alloc_etherdev_mqs(sizeof(*priv
), GENET_MAX_MQ_CNT
+ 1,
3342 GENET_MAX_MQ_CNT
+ 1);
3344 dev_err(&pdev
->dev
, "can't allocate net device\n");
3349 of_id
= of_match_node(bcmgenet_match
, dn
);
3354 priv
= netdev_priv(dev
);
3355 priv
->irq0
= platform_get_irq(pdev
, 0);
3356 priv
->irq1
= platform_get_irq(pdev
, 1);
3357 priv
->wol_irq
= platform_get_irq(pdev
, 2);
3358 if (!priv
->irq0
|| !priv
->irq1
) {
3359 dev_err(&pdev
->dev
, "can't find IRQs\n");
3365 macaddr
= of_get_mac_address(dn
);
3367 dev_err(&pdev
->dev
, "can't find MAC address\n");
3372 macaddr
= pd
->mac_address
;
3375 r
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
3376 priv
->base
= devm_ioremap_resource(&pdev
->dev
, r
);
3377 if (IS_ERR(priv
->base
)) {
3378 err
= PTR_ERR(priv
->base
);
3382 SET_NETDEV_DEV(dev
, &pdev
->dev
);
3383 dev_set_drvdata(&pdev
->dev
, dev
);
3384 ether_addr_copy(dev
->dev_addr
, macaddr
);
3385 dev
->watchdog_timeo
= 2 * HZ
;
3386 dev
->ethtool_ops
= &bcmgenet_ethtool_ops
;
3387 dev
->netdev_ops
= &bcmgenet_netdev_ops
;
3389 priv
->msg_enable
= netif_msg_init(-1, GENET_MSG_DEFAULT
);
3391 /* Set hardware features */
3392 dev
->hw_features
|= NETIF_F_SG
| NETIF_F_IP_CSUM
|
3393 NETIF_F_IPV6_CSUM
| NETIF_F_RXCSUM
;
3395 /* Request the WOL interrupt and advertise suspend if available */
3396 priv
->wol_irq_disabled
= true;
3397 err
= devm_request_irq(&pdev
->dev
, priv
->wol_irq
, bcmgenet_wol_isr
, 0,
3400 device_set_wakeup_capable(&pdev
->dev
, 1);
3402 /* Set the needed headroom to account for any possible
3403 * features enabling/disabling at runtime
3405 dev
->needed_headroom
+= 64;
3407 netdev_boot_setup_check(dev
);
3412 priv
->version
= (enum bcmgenet_version
)of_id
->data
;
3414 priv
->version
= pd
->genet_version
;
3416 priv
->clk
= devm_clk_get(&priv
->pdev
->dev
, "enet");
3417 if (IS_ERR(priv
->clk
)) {
3418 dev_warn(&priv
->pdev
->dev
, "failed to get enet clock\n");
3422 clk_prepare_enable(priv
->clk
);
3424 bcmgenet_set_hw_params(priv
);
3426 /* Mii wait queue */
3427 init_waitqueue_head(&priv
->wq
);
3428 /* Always use RX_BUF_LENGTH (2KB) buffer for all chips */
3429 priv
->rx_buf_len
= RX_BUF_LENGTH
;
3430 INIT_WORK(&priv
->bcmgenet_irq_work
, bcmgenet_irq_task
);
3432 priv
->clk_wol
= devm_clk_get(&priv
->pdev
->dev
, "enet-wol");
3433 if (IS_ERR(priv
->clk_wol
)) {
3434 dev_warn(&priv
->pdev
->dev
, "failed to get enet-wol clock\n");
3435 priv
->clk_wol
= NULL
;
3438 priv
->clk_eee
= devm_clk_get(&priv
->pdev
->dev
, "enet-eee");
3439 if (IS_ERR(priv
->clk_eee
)) {
3440 dev_warn(&priv
->pdev
->dev
, "failed to get enet-eee clock\n");
3441 priv
->clk_eee
= NULL
;
3444 err
= reset_umac(priv
);
3446 goto err_clk_disable
;
3448 err
= bcmgenet_mii_init(dev
);
3450 goto err_clk_disable
;
3452 /* setup number of real queues + 1 (GENET_V1 has 0 hardware queues
3453 * just the ring 16 descriptor based TX
3455 netif_set_real_num_tx_queues(priv
->dev
, priv
->hw_params
->tx_queues
+ 1);
3456 netif_set_real_num_rx_queues(priv
->dev
, priv
->hw_params
->rx_queues
+ 1);
3458 /* libphy will determine the link state */
3459 netif_carrier_off(dev
);
3461 /* Turn off the main clock, WOL clock is handled separately */
3462 clk_disable_unprepare(priv
->clk
);
3464 err
= register_netdev(dev
);
3471 clk_disable_unprepare(priv
->clk
);
3477 static int bcmgenet_remove(struct platform_device
*pdev
)
3479 struct bcmgenet_priv
*priv
= dev_to_priv(&pdev
->dev
);
3481 dev_set_drvdata(&pdev
->dev
, NULL
);
3482 unregister_netdev(priv
->dev
);
3483 bcmgenet_mii_exit(priv
->dev
);
3484 free_netdev(priv
->dev
);
3489 #ifdef CONFIG_PM_SLEEP
3490 static int bcmgenet_suspend(struct device
*d
)
3492 struct net_device
*dev
= dev_get_drvdata(d
);
3493 struct bcmgenet_priv
*priv
= netdev_priv(dev
);
3496 if (!netif_running(dev
))
3499 bcmgenet_netif_stop(dev
);
3501 phy_suspend(priv
->phydev
);
3503 netif_device_detach(dev
);
3505 /* Disable MAC receive */
3506 umac_enable_set(priv
, CMD_RX_EN
, false);
3508 ret
= bcmgenet_dma_teardown(priv
);
3512 /* Disable MAC transmit. TX DMA disabled have to done before this */
3513 umac_enable_set(priv
, CMD_TX_EN
, false);
3516 bcmgenet_tx_reclaim_all(dev
);
3517 bcmgenet_fini_dma(priv
);
3519 /* Prepare the device for Wake-on-LAN and switch to the slow clock */
3520 if (device_may_wakeup(d
) && priv
->wolopts
) {
3521 ret
= bcmgenet_power_down(priv
, GENET_POWER_WOL_MAGIC
);
3522 clk_prepare_enable(priv
->clk_wol
);
3523 } else if (priv
->internal_phy
) {
3524 ret
= bcmgenet_power_down(priv
, GENET_POWER_PASSIVE
);
3527 /* Turn off the clocks */
3528 clk_disable_unprepare(priv
->clk
);
3533 static int bcmgenet_resume(struct device
*d
)
3535 struct net_device
*dev
= dev_get_drvdata(d
);
3536 struct bcmgenet_priv
*priv
= netdev_priv(dev
);
3537 unsigned long dma_ctrl
;
3541 if (!netif_running(dev
))
3544 /* Turn on the clock */
3545 ret
= clk_prepare_enable(priv
->clk
);
3549 /* If this is an internal GPHY, power it back on now, before UniMAC is
3550 * brought out of reset as absolutely no UniMAC activity is allowed
3552 if (priv
->internal_phy
)
3553 bcmgenet_power_up(priv
, GENET_POWER_PASSIVE
);
3555 bcmgenet_umac_reset(priv
);
3557 ret
= init_umac(priv
);
3559 goto out_clk_disable
;
3561 /* From WOL-enabled suspend, switch to regular clock */
3563 clk_disable_unprepare(priv
->clk_wol
);
3565 phy_init_hw(priv
->phydev
);
3566 /* Speed settings must be restored */
3567 bcmgenet_mii_config(priv
->dev
);
3569 /* disable ethernet MAC while updating its registers */
3570 umac_enable_set(priv
, CMD_TX_EN
| CMD_RX_EN
, false);
3572 bcmgenet_set_hw_addr(priv
, dev
->dev_addr
);
3574 if (priv
->internal_phy
) {
3575 reg
= bcmgenet_ext_readl(priv
, EXT_EXT_PWR_MGMT
);
3576 reg
|= EXT_ENERGY_DET_MASK
;
3577 bcmgenet_ext_writel(priv
, reg
, EXT_EXT_PWR_MGMT
);
3581 bcmgenet_power_up(priv
, GENET_POWER_WOL_MAGIC
);
3583 /* Disable RX/TX DMA and flush TX queues */
3584 dma_ctrl
= bcmgenet_dma_disable(priv
);
3586 /* Reinitialize TDMA and RDMA and SW housekeeping */
3587 ret
= bcmgenet_init_dma(priv
);
3589 netdev_err(dev
, "failed to initialize DMA\n");
3590 goto out_clk_disable
;
3593 /* Always enable ring 16 - descriptor ring */
3594 bcmgenet_enable_dma(priv
, dma_ctrl
);
3596 netif_device_attach(dev
);
3598 phy_resume(priv
->phydev
);
3600 if (priv
->eee
.eee_enabled
)
3601 bcmgenet_eee_enable_set(dev
, true);
3603 bcmgenet_netif_start(dev
);
3608 clk_disable_unprepare(priv
->clk
);
3611 #endif /* CONFIG_PM_SLEEP */
3613 static SIMPLE_DEV_PM_OPS(bcmgenet_pm_ops
, bcmgenet_suspend
, bcmgenet_resume
);
3615 static struct platform_driver bcmgenet_driver
= {
3616 .probe
= bcmgenet_probe
,
3617 .remove
= bcmgenet_remove
,
3620 .of_match_table
= bcmgenet_match
,
3621 .pm
= &bcmgenet_pm_ops
,
3624 module_platform_driver(bcmgenet_driver
);
3626 MODULE_AUTHOR("Broadcom Corporation");
3627 MODULE_DESCRIPTION("Broadcom GENET Ethernet controller driver");
3628 MODULE_ALIAS("platform:bcmgenet");
3629 MODULE_LICENSE("GPL");