1 /* This program is free software; you can redistribute it and/or modify
2 * it under the terms of the GNU General Public License as published by
3 * the Free Software Foundation; version 2 of the License
5 * This program is distributed in the hope that it will be useful,
6 * but WITHOUT ANY WARRANTY; without even the implied warranty of
7 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
8 * GNU General Public License for more details.
10 * Copyright (C) 2009-2016 John Crispin <blogic@openwrt.org>
11 * Copyright (C) 2009-2016 Felix Fietkau <nbd@openwrt.org>
12 * Copyright (C) 2013-2016 Michael Lee <igvtee@gmail.com>
15 #include <linux/of_device.h>
16 #include <linux/of_mdio.h>
17 #include <linux/of_net.h>
18 #include <linux/mfd/syscon.h>
19 #include <linux/regmap.h>
20 #include <linux/clk.h>
21 #include <linux/if_vlan.h>
22 #include <linux/reset.h>
23 #include <linux/tcp.h>
25 #include "mtk_eth_soc.h"
27 static int mtk_msg_level
= -1;
28 module_param_named(msg_level
, mtk_msg_level
, int, 0);
29 MODULE_PARM_DESC(msg_level
, "Message level (-1=defaults,0=none,...,16=all)");
31 #define MTK_ETHTOOL_STAT(x) { #x, \
32 offsetof(struct mtk_hw_stats, x) / sizeof(u64) }
34 /* strings used by ethtool */
35 static const struct mtk_ethtool_stats
{
36 char str
[ETH_GSTRING_LEN
];
38 } mtk_ethtool_stats
[] = {
39 MTK_ETHTOOL_STAT(tx_bytes
),
40 MTK_ETHTOOL_STAT(tx_packets
),
41 MTK_ETHTOOL_STAT(tx_skip
),
42 MTK_ETHTOOL_STAT(tx_collisions
),
43 MTK_ETHTOOL_STAT(rx_bytes
),
44 MTK_ETHTOOL_STAT(rx_packets
),
45 MTK_ETHTOOL_STAT(rx_overflow
),
46 MTK_ETHTOOL_STAT(rx_fcs_errors
),
47 MTK_ETHTOOL_STAT(rx_short_errors
),
48 MTK_ETHTOOL_STAT(rx_long_errors
),
49 MTK_ETHTOOL_STAT(rx_checksum_errors
),
50 MTK_ETHTOOL_STAT(rx_flow_control_packets
),
53 static const char * const mtk_clks_source_name
[] = {
54 "ethif", "esw", "gp1", "gp2"
57 void mtk_w32(struct mtk_eth
*eth
, u32 val
, unsigned reg
)
59 __raw_writel(val
, eth
->base
+ reg
);
62 u32
mtk_r32(struct mtk_eth
*eth
, unsigned reg
)
64 return __raw_readl(eth
->base
+ reg
);
67 static int mtk_mdio_busy_wait(struct mtk_eth
*eth
)
69 unsigned long t_start
= jiffies
;
72 if (!(mtk_r32(eth
, MTK_PHY_IAC
) & PHY_IAC_ACCESS
))
74 if (time_after(jiffies
, t_start
+ PHY_IAC_TIMEOUT
))
79 dev_err(eth
->dev
, "mdio: MDIO timeout\n");
83 static u32
_mtk_mdio_write(struct mtk_eth
*eth
, u32 phy_addr
,
84 u32 phy_register
, u32 write_data
)
86 if (mtk_mdio_busy_wait(eth
))
91 mtk_w32(eth
, PHY_IAC_ACCESS
| PHY_IAC_START
| PHY_IAC_WRITE
|
92 (phy_register
<< PHY_IAC_REG_SHIFT
) |
93 (phy_addr
<< PHY_IAC_ADDR_SHIFT
) | write_data
,
96 if (mtk_mdio_busy_wait(eth
))
102 static u32
_mtk_mdio_read(struct mtk_eth
*eth
, int phy_addr
, int phy_reg
)
106 if (mtk_mdio_busy_wait(eth
))
109 mtk_w32(eth
, PHY_IAC_ACCESS
| PHY_IAC_START
| PHY_IAC_READ
|
110 (phy_reg
<< PHY_IAC_REG_SHIFT
) |
111 (phy_addr
<< PHY_IAC_ADDR_SHIFT
),
114 if (mtk_mdio_busy_wait(eth
))
117 d
= mtk_r32(eth
, MTK_PHY_IAC
) & 0xffff;
122 static int mtk_mdio_write(struct mii_bus
*bus
, int phy_addr
,
123 int phy_reg
, u16 val
)
125 struct mtk_eth
*eth
= bus
->priv
;
127 return _mtk_mdio_write(eth
, phy_addr
, phy_reg
, val
);
130 static int mtk_mdio_read(struct mii_bus
*bus
, int phy_addr
, int phy_reg
)
132 struct mtk_eth
*eth
= bus
->priv
;
134 return _mtk_mdio_read(eth
, phy_addr
, phy_reg
);
137 static void mtk_phy_link_adjust(struct net_device
*dev
)
139 struct mtk_mac
*mac
= netdev_priv(dev
);
140 u16 lcl_adv
= 0, rmt_adv
= 0;
142 u32 mcr
= MAC_MCR_MAX_RX_1536
| MAC_MCR_IPG_CFG
|
143 MAC_MCR_FORCE_MODE
| MAC_MCR_TX_EN
|
144 MAC_MCR_RX_EN
| MAC_MCR_BACKOFF_EN
|
147 switch (mac
->phy_dev
->speed
) {
149 mcr
|= MAC_MCR_SPEED_1000
;
152 mcr
|= MAC_MCR_SPEED_100
;
156 if (mac
->phy_dev
->link
)
157 mcr
|= MAC_MCR_FORCE_LINK
;
159 if (mac
->phy_dev
->duplex
) {
160 mcr
|= MAC_MCR_FORCE_DPX
;
162 if (mac
->phy_dev
->pause
)
163 rmt_adv
= LPA_PAUSE_CAP
;
164 if (mac
->phy_dev
->asym_pause
)
165 rmt_adv
|= LPA_PAUSE_ASYM
;
167 if (mac
->phy_dev
->advertising
& ADVERTISED_Pause
)
168 lcl_adv
|= ADVERTISE_PAUSE_CAP
;
169 if (mac
->phy_dev
->advertising
& ADVERTISED_Asym_Pause
)
170 lcl_adv
|= ADVERTISE_PAUSE_ASYM
;
172 flowctrl
= mii_resolve_flowctrl_fdx(lcl_adv
, rmt_adv
);
174 if (flowctrl
& FLOW_CTRL_TX
)
175 mcr
|= MAC_MCR_FORCE_TX_FC
;
176 if (flowctrl
& FLOW_CTRL_RX
)
177 mcr
|= MAC_MCR_FORCE_RX_FC
;
179 netif_dbg(mac
->hw
, link
, dev
, "rx pause %s, tx pause %s\n",
180 flowctrl
& FLOW_CTRL_RX
? "enabled" : "disabled",
181 flowctrl
& FLOW_CTRL_TX
? "enabled" : "disabled");
184 mtk_w32(mac
->hw
, mcr
, MTK_MAC_MCR(mac
->id
));
186 if (mac
->phy_dev
->link
)
187 netif_carrier_on(dev
);
189 netif_carrier_off(dev
);
192 static int mtk_phy_connect_node(struct mtk_eth
*eth
, struct mtk_mac
*mac
,
193 struct device_node
*phy_node
)
195 const __be32
*_addr
= NULL
;
196 struct phy_device
*phydev
;
199 _addr
= of_get_property(phy_node
, "reg", NULL
);
201 if (!_addr
|| (be32_to_cpu(*_addr
) >= 0x20)) {
202 pr_err("%s: invalid phy address\n", phy_node
->name
);
205 addr
= be32_to_cpu(*_addr
);
206 phy_mode
= of_get_phy_mode(phy_node
);
208 dev_err(eth
->dev
, "incorrect phy-mode %d\n", phy_mode
);
212 phydev
= of_phy_connect(eth
->netdev
[mac
->id
], phy_node
,
213 mtk_phy_link_adjust
, 0, phy_mode
);
215 dev_err(eth
->dev
, "could not connect to PHY\n");
220 "connected mac %d to PHY at %s [uid=%08x, driver=%s]\n",
221 mac
->id
, phydev_name(phydev
), phydev
->phy_id
,
224 mac
->phy_dev
= phydev
;
229 static int mtk_phy_connect(struct mtk_mac
*mac
)
231 struct mtk_eth
*eth
= mac
->hw
;
232 struct device_node
*np
;
235 np
= of_parse_phandle(mac
->of_node
, "phy-handle", 0);
236 if (!np
&& of_phy_is_fixed_link(mac
->of_node
))
237 if (!of_phy_register_fixed_link(mac
->of_node
))
238 np
= of_node_get(mac
->of_node
);
242 switch (of_get_phy_mode(np
)) {
243 case PHY_INTERFACE_MODE_RGMII_TXID
:
244 case PHY_INTERFACE_MODE_RGMII_RXID
:
245 case PHY_INTERFACE_MODE_RGMII_ID
:
246 case PHY_INTERFACE_MODE_RGMII
:
249 case PHY_INTERFACE_MODE_MII
:
252 case PHY_INTERFACE_MODE_REVMII
:
255 case PHY_INTERFACE_MODE_RMII
:
264 /* put the gmac into the right mode */
265 regmap_read(eth
->ethsys
, ETHSYS_SYSCFG0
, &val
);
266 val
&= ~SYSCFG0_GE_MODE(SYSCFG0_GE_MASK
, mac
->id
);
267 val
|= SYSCFG0_GE_MODE(ge_mode
, mac
->id
);
268 regmap_write(eth
->ethsys
, ETHSYS_SYSCFG0
, val
);
270 mtk_phy_connect_node(eth
, mac
, np
);
271 mac
->phy_dev
->autoneg
= AUTONEG_ENABLE
;
272 mac
->phy_dev
->speed
= 0;
273 mac
->phy_dev
->duplex
= 0;
275 if (of_phy_is_fixed_link(mac
->of_node
))
276 mac
->phy_dev
->supported
|=
277 SUPPORTED_Pause
| SUPPORTED_Asym_Pause
;
279 mac
->phy_dev
->supported
&= PHY_GBIT_FEATURES
| SUPPORTED_Pause
|
280 SUPPORTED_Asym_Pause
;
281 mac
->phy_dev
->advertising
= mac
->phy_dev
->supported
|
283 phy_start_aneg(mac
->phy_dev
);
291 dev_err(eth
->dev
, "invalid phy_mode\n");
295 static int mtk_mdio_init(struct mtk_eth
*eth
)
297 struct device_node
*mii_np
;
300 mii_np
= of_get_child_by_name(eth
->dev
->of_node
, "mdio-bus");
302 dev_err(eth
->dev
, "no %s child node found", "mdio-bus");
306 if (!of_device_is_available(mii_np
)) {
311 eth
->mii_bus
= mdiobus_alloc();
317 eth
->mii_bus
->name
= "mdio";
318 eth
->mii_bus
->read
= mtk_mdio_read
;
319 eth
->mii_bus
->write
= mtk_mdio_write
;
320 eth
->mii_bus
->priv
= eth
;
321 eth
->mii_bus
->parent
= eth
->dev
;
323 snprintf(eth
->mii_bus
->id
, MII_BUS_ID_SIZE
, "%s", mii_np
->name
);
324 err
= of_mdiobus_register(eth
->mii_bus
, mii_np
);
331 mdiobus_free(eth
->mii_bus
);
339 static void mtk_mdio_cleanup(struct mtk_eth
*eth
)
344 mdiobus_unregister(eth
->mii_bus
);
345 of_node_put(eth
->mii_bus
->dev
.of_node
);
346 mdiobus_free(eth
->mii_bus
);
349 static inline void mtk_irq_disable(struct mtk_eth
*eth
, u32 mask
)
354 spin_lock_irqsave(ð
->irq_lock
, flags
);
355 val
= mtk_r32(eth
, MTK_QDMA_INT_MASK
);
356 mtk_w32(eth
, val
& ~mask
, MTK_QDMA_INT_MASK
);
357 spin_unlock_irqrestore(ð
->irq_lock
, flags
);
360 static inline void mtk_irq_enable(struct mtk_eth
*eth
, u32 mask
)
365 spin_lock_irqsave(ð
->irq_lock
, flags
);
366 val
= mtk_r32(eth
, MTK_QDMA_INT_MASK
);
367 mtk_w32(eth
, val
| mask
, MTK_QDMA_INT_MASK
);
368 spin_unlock_irqrestore(ð
->irq_lock
, flags
);
371 static int mtk_set_mac_address(struct net_device
*dev
, void *p
)
373 int ret
= eth_mac_addr(dev
, p
);
374 struct mtk_mac
*mac
= netdev_priv(dev
);
375 const char *macaddr
= dev
->dev_addr
;
381 spin_lock_irqsave(&mac
->hw
->page_lock
, flags
);
382 mtk_w32(mac
->hw
, (macaddr
[0] << 8) | macaddr
[1],
383 MTK_GDMA_MAC_ADRH(mac
->id
));
384 mtk_w32(mac
->hw
, (macaddr
[2] << 24) | (macaddr
[3] << 16) |
385 (macaddr
[4] << 8) | macaddr
[5],
386 MTK_GDMA_MAC_ADRL(mac
->id
));
387 spin_unlock_irqrestore(&mac
->hw
->page_lock
, flags
);
392 void mtk_stats_update_mac(struct mtk_mac
*mac
)
394 struct mtk_hw_stats
*hw_stats
= mac
->hw_stats
;
395 unsigned int base
= MTK_GDM1_TX_GBCNT
;
398 base
+= hw_stats
->reg_offset
;
400 u64_stats_update_begin(&hw_stats
->syncp
);
402 hw_stats
->rx_bytes
+= mtk_r32(mac
->hw
, base
);
403 stats
= mtk_r32(mac
->hw
, base
+ 0x04);
405 hw_stats
->rx_bytes
+= (stats
<< 32);
406 hw_stats
->rx_packets
+= mtk_r32(mac
->hw
, base
+ 0x08);
407 hw_stats
->rx_overflow
+= mtk_r32(mac
->hw
, base
+ 0x10);
408 hw_stats
->rx_fcs_errors
+= mtk_r32(mac
->hw
, base
+ 0x14);
409 hw_stats
->rx_short_errors
+= mtk_r32(mac
->hw
, base
+ 0x18);
410 hw_stats
->rx_long_errors
+= mtk_r32(mac
->hw
, base
+ 0x1c);
411 hw_stats
->rx_checksum_errors
+= mtk_r32(mac
->hw
, base
+ 0x20);
412 hw_stats
->rx_flow_control_packets
+=
413 mtk_r32(mac
->hw
, base
+ 0x24);
414 hw_stats
->tx_skip
+= mtk_r32(mac
->hw
, base
+ 0x28);
415 hw_stats
->tx_collisions
+= mtk_r32(mac
->hw
, base
+ 0x2c);
416 hw_stats
->tx_bytes
+= mtk_r32(mac
->hw
, base
+ 0x30);
417 stats
= mtk_r32(mac
->hw
, base
+ 0x34);
419 hw_stats
->tx_bytes
+= (stats
<< 32);
420 hw_stats
->tx_packets
+= mtk_r32(mac
->hw
, base
+ 0x38);
421 u64_stats_update_end(&hw_stats
->syncp
);
424 static void mtk_stats_update(struct mtk_eth
*eth
)
428 for (i
= 0; i
< MTK_MAC_COUNT
; i
++) {
429 if (!eth
->mac
[i
] || !eth
->mac
[i
]->hw_stats
)
431 if (spin_trylock(ð
->mac
[i
]->hw_stats
->stats_lock
)) {
432 mtk_stats_update_mac(eth
->mac
[i
]);
433 spin_unlock(ð
->mac
[i
]->hw_stats
->stats_lock
);
438 static struct rtnl_link_stats64
*mtk_get_stats64(struct net_device
*dev
,
439 struct rtnl_link_stats64
*storage
)
441 struct mtk_mac
*mac
= netdev_priv(dev
);
442 struct mtk_hw_stats
*hw_stats
= mac
->hw_stats
;
445 if (netif_running(dev
) && netif_device_present(dev
)) {
446 if (spin_trylock(&hw_stats
->stats_lock
)) {
447 mtk_stats_update_mac(mac
);
448 spin_unlock(&hw_stats
->stats_lock
);
453 start
= u64_stats_fetch_begin_irq(&hw_stats
->syncp
);
454 storage
->rx_packets
= hw_stats
->rx_packets
;
455 storage
->tx_packets
= hw_stats
->tx_packets
;
456 storage
->rx_bytes
= hw_stats
->rx_bytes
;
457 storage
->tx_bytes
= hw_stats
->tx_bytes
;
458 storage
->collisions
= hw_stats
->tx_collisions
;
459 storage
->rx_length_errors
= hw_stats
->rx_short_errors
+
460 hw_stats
->rx_long_errors
;
461 storage
->rx_over_errors
= hw_stats
->rx_overflow
;
462 storage
->rx_crc_errors
= hw_stats
->rx_fcs_errors
;
463 storage
->rx_errors
= hw_stats
->rx_checksum_errors
;
464 storage
->tx_aborted_errors
= hw_stats
->tx_skip
;
465 } while (u64_stats_fetch_retry_irq(&hw_stats
->syncp
, start
));
467 storage
->tx_errors
= dev
->stats
.tx_errors
;
468 storage
->rx_dropped
= dev
->stats
.rx_dropped
;
469 storage
->tx_dropped
= dev
->stats
.tx_dropped
;
474 static inline int mtk_max_frag_size(int mtu
)
476 /* make sure buf_size will be at least MTK_MAX_RX_LENGTH */
477 if (mtu
+ MTK_RX_ETH_HLEN
< MTK_MAX_RX_LENGTH
)
478 mtu
= MTK_MAX_RX_LENGTH
- MTK_RX_ETH_HLEN
;
480 return SKB_DATA_ALIGN(MTK_RX_HLEN
+ mtu
) +
481 SKB_DATA_ALIGN(sizeof(struct skb_shared_info
));
484 static inline int mtk_max_buf_size(int frag_size
)
486 int buf_size
= frag_size
- NET_SKB_PAD
- NET_IP_ALIGN
-
487 SKB_DATA_ALIGN(sizeof(struct skb_shared_info
));
489 WARN_ON(buf_size
< MTK_MAX_RX_LENGTH
);
494 static inline void mtk_rx_get_desc(struct mtk_rx_dma
*rxd
,
495 struct mtk_rx_dma
*dma_rxd
)
497 rxd
->rxd1
= READ_ONCE(dma_rxd
->rxd1
);
498 rxd
->rxd2
= READ_ONCE(dma_rxd
->rxd2
);
499 rxd
->rxd3
= READ_ONCE(dma_rxd
->rxd3
);
500 rxd
->rxd4
= READ_ONCE(dma_rxd
->rxd4
);
503 /* the qdma core needs scratch memory to be setup */
504 static int mtk_init_fq_dma(struct mtk_eth
*eth
)
506 dma_addr_t phy_ring_tail
;
507 int cnt
= MTK_DMA_SIZE
;
511 eth
->scratch_ring
= dma_alloc_coherent(eth
->dev
,
512 cnt
* sizeof(struct mtk_tx_dma
),
513 ð
->phy_scratch_ring
,
514 GFP_ATOMIC
| __GFP_ZERO
);
515 if (unlikely(!eth
->scratch_ring
))
518 eth
->scratch_head
= kcalloc(cnt
, MTK_QDMA_PAGE_SIZE
,
520 if (unlikely(!eth
->scratch_head
))
523 dma_addr
= dma_map_single(eth
->dev
,
524 eth
->scratch_head
, cnt
* MTK_QDMA_PAGE_SIZE
,
526 if (unlikely(dma_mapping_error(eth
->dev
, dma_addr
)))
529 memset(eth
->scratch_ring
, 0x0, sizeof(struct mtk_tx_dma
) * cnt
);
530 phy_ring_tail
= eth
->phy_scratch_ring
+
531 (sizeof(struct mtk_tx_dma
) * (cnt
- 1));
533 for (i
= 0; i
< cnt
; i
++) {
534 eth
->scratch_ring
[i
].txd1
=
535 (dma_addr
+ (i
* MTK_QDMA_PAGE_SIZE
));
537 eth
->scratch_ring
[i
].txd2
= (eth
->phy_scratch_ring
+
538 ((i
+ 1) * sizeof(struct mtk_tx_dma
)));
539 eth
->scratch_ring
[i
].txd3
= TX_DMA_SDL(MTK_QDMA_PAGE_SIZE
);
542 mtk_w32(eth
, eth
->phy_scratch_ring
, MTK_QDMA_FQ_HEAD
);
543 mtk_w32(eth
, phy_ring_tail
, MTK_QDMA_FQ_TAIL
);
544 mtk_w32(eth
, (cnt
<< 16) | cnt
, MTK_QDMA_FQ_CNT
);
545 mtk_w32(eth
, MTK_QDMA_PAGE_SIZE
<< 16, MTK_QDMA_FQ_BLEN
);
550 static inline void *mtk_qdma_phys_to_virt(struct mtk_tx_ring
*ring
, u32 desc
)
552 void *ret
= ring
->dma
;
554 return ret
+ (desc
- ring
->phys
);
557 static inline struct mtk_tx_buf
*mtk_desc_to_tx_buf(struct mtk_tx_ring
*ring
,
558 struct mtk_tx_dma
*txd
)
560 int idx
= txd
- ring
->dma
;
562 return &ring
->buf
[idx
];
565 static void mtk_tx_unmap(struct mtk_eth
*eth
, struct mtk_tx_buf
*tx_buf
)
567 if (tx_buf
->flags
& MTK_TX_FLAGS_SINGLE0
) {
568 dma_unmap_single(eth
->dev
,
569 dma_unmap_addr(tx_buf
, dma_addr0
),
570 dma_unmap_len(tx_buf
, dma_len0
),
572 } else if (tx_buf
->flags
& MTK_TX_FLAGS_PAGE0
) {
573 dma_unmap_page(eth
->dev
,
574 dma_unmap_addr(tx_buf
, dma_addr0
),
575 dma_unmap_len(tx_buf
, dma_len0
),
580 (tx_buf
->skb
!= (struct sk_buff
*)MTK_DMA_DUMMY_DESC
))
581 dev_kfree_skb_any(tx_buf
->skb
);
585 static int mtk_tx_map(struct sk_buff
*skb
, struct net_device
*dev
,
586 int tx_num
, struct mtk_tx_ring
*ring
, bool gso
)
588 struct mtk_mac
*mac
= netdev_priv(dev
);
589 struct mtk_eth
*eth
= mac
->hw
;
590 struct mtk_tx_dma
*itxd
, *txd
;
591 struct mtk_tx_buf
*tx_buf
;
592 dma_addr_t mapped_addr
;
593 unsigned int nr_frags
;
597 itxd
= ring
->next_free
;
598 if (itxd
== ring
->last_free
)
601 /* set the forward port */
602 fport
= (mac
->id
+ 1) << TX_DMA_FPORT_SHIFT
;
605 tx_buf
= mtk_desc_to_tx_buf(ring
, itxd
);
606 memset(tx_buf
, 0, sizeof(*tx_buf
));
611 /* TX Checksum offload */
612 if (skb
->ip_summed
== CHECKSUM_PARTIAL
)
613 txd4
|= TX_DMA_CHKSUM
;
615 /* VLAN header offload */
616 if (skb_vlan_tag_present(skb
))
617 txd4
|= TX_DMA_INS_VLAN
| skb_vlan_tag_get(skb
);
619 mapped_addr
= dma_map_single(eth
->dev
, skb
->data
,
620 skb_headlen(skb
), DMA_TO_DEVICE
);
621 if (unlikely(dma_mapping_error(eth
->dev
, mapped_addr
)))
624 WRITE_ONCE(itxd
->txd1
, mapped_addr
);
625 tx_buf
->flags
|= MTK_TX_FLAGS_SINGLE0
;
626 dma_unmap_addr_set(tx_buf
, dma_addr0
, mapped_addr
);
627 dma_unmap_len_set(tx_buf
, dma_len0
, skb_headlen(skb
));
631 nr_frags
= skb_shinfo(skb
)->nr_frags
;
632 for (i
= 0; i
< nr_frags
; i
++) {
633 struct skb_frag_struct
*frag
= &skb_shinfo(skb
)->frags
[i
];
634 unsigned int offset
= 0;
635 int frag_size
= skb_frag_size(frag
);
638 bool last_frag
= false;
639 unsigned int frag_map_size
;
641 txd
= mtk_qdma_phys_to_virt(ring
, txd
->txd2
);
642 if (txd
== ring
->last_free
)
646 frag_map_size
= min(frag_size
, MTK_TX_DMA_BUF_LEN
);
647 mapped_addr
= skb_frag_dma_map(eth
->dev
, frag
, offset
,
650 if (unlikely(dma_mapping_error(eth
->dev
, mapped_addr
)))
653 if (i
== nr_frags
- 1 &&
654 (frag_size
- frag_map_size
) == 0)
657 WRITE_ONCE(txd
->txd1
, mapped_addr
);
658 WRITE_ONCE(txd
->txd3
, (TX_DMA_SWC
|
659 TX_DMA_PLEN0(frag_map_size
) |
660 last_frag
* TX_DMA_LS0
));
661 WRITE_ONCE(txd
->txd4
, fport
);
663 tx_buf
->skb
= (struct sk_buff
*)MTK_DMA_DUMMY_DESC
;
664 tx_buf
= mtk_desc_to_tx_buf(ring
, txd
);
665 memset(tx_buf
, 0, sizeof(*tx_buf
));
667 tx_buf
->flags
|= MTK_TX_FLAGS_PAGE0
;
668 dma_unmap_addr_set(tx_buf
, dma_addr0
, mapped_addr
);
669 dma_unmap_len_set(tx_buf
, dma_len0
, frag_map_size
);
670 frag_size
-= frag_map_size
;
671 offset
+= frag_map_size
;
675 /* store skb to cleanup */
678 WRITE_ONCE(itxd
->txd4
, txd4
);
679 WRITE_ONCE(itxd
->txd3
, (TX_DMA_SWC
| TX_DMA_PLEN0(skb_headlen(skb
)) |
680 (!nr_frags
* TX_DMA_LS0
)));
682 netdev_sent_queue(dev
, skb
->len
);
683 skb_tx_timestamp(skb
);
685 ring
->next_free
= mtk_qdma_phys_to_virt(ring
, txd
->txd2
);
686 atomic_sub(n_desc
, &ring
->free_count
);
688 /* make sure that all changes to the dma ring are flushed before we
693 if (netif_xmit_stopped(netdev_get_tx_queue(dev
, 0)) || !skb
->xmit_more
)
694 mtk_w32(eth
, txd
->txd2
, MTK_QTX_CTX_PTR
);
700 tx_buf
= mtk_desc_to_tx_buf(ring
, itxd
);
703 mtk_tx_unmap(eth
, tx_buf
);
705 itxd
->txd3
= TX_DMA_LS0
| TX_DMA_OWNER_CPU
;
706 itxd
= mtk_qdma_phys_to_virt(ring
, itxd
->txd2
);
707 } while (itxd
!= txd
);
712 static inline int mtk_cal_txd_req(struct sk_buff
*skb
)
715 struct skb_frag_struct
*frag
;
718 if (skb_is_gso(skb
)) {
719 for (i
= 0; i
< skb_shinfo(skb
)->nr_frags
; i
++) {
720 frag
= &skb_shinfo(skb
)->frags
[i
];
721 nfrags
+= DIV_ROUND_UP(frag
->size
, MTK_TX_DMA_BUF_LEN
);
724 nfrags
+= skb_shinfo(skb
)->nr_frags
;
730 static int mtk_queue_stopped(struct mtk_eth
*eth
)
734 for (i
= 0; i
< MTK_MAC_COUNT
; i
++) {
737 if (netif_queue_stopped(eth
->netdev
[i
]))
744 static void mtk_wake_queue(struct mtk_eth
*eth
)
748 for (i
= 0; i
< MTK_MAC_COUNT
; i
++) {
751 netif_wake_queue(eth
->netdev
[i
]);
755 static void mtk_stop_queue(struct mtk_eth
*eth
)
759 for (i
= 0; i
< MTK_MAC_COUNT
; i
++) {
762 netif_stop_queue(eth
->netdev
[i
]);
766 static int mtk_start_xmit(struct sk_buff
*skb
, struct net_device
*dev
)
768 struct mtk_mac
*mac
= netdev_priv(dev
);
769 struct mtk_eth
*eth
= mac
->hw
;
770 struct mtk_tx_ring
*ring
= ð
->tx_ring
;
771 struct net_device_stats
*stats
= &dev
->stats
;
776 /* normally we can rely on the stack not calling this more than once,
777 * however we have 2 queues running on the same ring so we need to lock
780 spin_lock_irqsave(ð
->page_lock
, flags
);
782 tx_num
= mtk_cal_txd_req(skb
);
783 if (unlikely(atomic_read(&ring
->free_count
) <= tx_num
)) {
785 netif_err(eth
, tx_queued
, dev
,
786 "Tx Ring full when queue awake!\n");
787 spin_unlock_irqrestore(ð
->page_lock
, flags
);
788 return NETDEV_TX_BUSY
;
791 /* TSO: fill MSS info in tcp checksum field */
792 if (skb_is_gso(skb
)) {
793 if (skb_cow_head(skb
, 0)) {
794 netif_warn(eth
, tx_err
, dev
,
795 "GSO expand head fail.\n");
799 if (skb_shinfo(skb
)->gso_type
&
800 (SKB_GSO_TCPV4
| SKB_GSO_TCPV6
)) {
802 tcp_hdr(skb
)->check
= htons(skb_shinfo(skb
)->gso_size
);
806 if (mtk_tx_map(skb
, dev
, tx_num
, ring
, gso
) < 0)
809 if (unlikely(atomic_read(&ring
->free_count
) <= ring
->thresh
))
812 spin_unlock_irqrestore(ð
->page_lock
, flags
);
817 spin_unlock_irqrestore(ð
->page_lock
, flags
);
823 static int mtk_poll_rx(struct napi_struct
*napi
, int budget
,
826 struct mtk_rx_ring
*ring
= ð
->rx_ring
;
827 int idx
= ring
->calc_idx
;
830 struct mtk_rx_dma
*rxd
, trxd
;
833 while (done
< budget
) {
834 struct net_device
*netdev
;
839 idx
= NEXT_RX_DESP_IDX(idx
);
840 rxd
= &ring
->dma
[idx
];
841 data
= ring
->data
[idx
];
843 mtk_rx_get_desc(&trxd
, rxd
);
844 if (!(trxd
.rxd2
& RX_DMA_DONE
))
847 /* find out which mac the packet come from. values start at 1 */
848 mac
= (trxd
.rxd4
>> RX_DMA_FPORT_SHIFT
) &
852 netdev
= eth
->netdev
[mac
];
854 /* alloc new buffer */
855 new_data
= napi_alloc_frag(ring
->frag_size
);
856 if (unlikely(!new_data
)) {
857 netdev
->stats
.rx_dropped
++;
860 dma_addr
= dma_map_single(eth
->dev
,
861 new_data
+ NET_SKB_PAD
,
864 if (unlikely(dma_mapping_error(eth
->dev
, dma_addr
))) {
865 skb_free_frag(new_data
);
866 netdev
->stats
.rx_dropped
++;
871 skb
= build_skb(data
, ring
->frag_size
);
872 if (unlikely(!skb
)) {
873 put_page(virt_to_head_page(new_data
));
874 netdev
->stats
.rx_dropped
++;
877 skb_reserve(skb
, NET_SKB_PAD
+ NET_IP_ALIGN
);
879 dma_unmap_single(eth
->dev
, trxd
.rxd1
,
880 ring
->buf_size
, DMA_FROM_DEVICE
);
881 pktlen
= RX_DMA_GET_PLEN0(trxd
.rxd2
);
883 skb_put(skb
, pktlen
);
884 if (trxd
.rxd4
& RX_DMA_L4_VALID
)
885 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
887 skb_checksum_none_assert(skb
);
888 skb
->protocol
= eth_type_trans(skb
, netdev
);
890 if (netdev
->features
& NETIF_F_HW_VLAN_CTAG_RX
&&
891 RX_DMA_VID(trxd
.rxd3
))
892 __vlan_hwaccel_put_tag(skb
, htons(ETH_P_8021Q
),
893 RX_DMA_VID(trxd
.rxd3
));
894 napi_gro_receive(napi
, skb
);
896 ring
->data
[idx
] = new_data
;
897 rxd
->rxd1
= (unsigned int)dma_addr
;
900 rxd
->rxd2
= RX_DMA_PLEN0(ring
->buf_size
);
902 ring
->calc_idx
= idx
;
903 /* make sure that all changes to the dma ring are flushed before
907 mtk_w32(eth
, ring
->calc_idx
, MTK_QRX_CRX_IDX0
);
912 mtk_w32(eth
, MTK_RX_DONE_INT
, MTK_QMTK_INT_STATUS
);
917 static int mtk_poll_tx(struct mtk_eth
*eth
, int budget
)
919 struct mtk_tx_ring
*ring
= ð
->tx_ring
;
920 struct mtk_tx_dma
*desc
;
922 struct mtk_tx_buf
*tx_buf
;
923 unsigned int done
[MTK_MAX_DEVS
];
924 unsigned int bytes
[MTK_MAX_DEVS
];
926 static int condition
;
929 memset(done
, 0, sizeof(done
));
930 memset(bytes
, 0, sizeof(bytes
));
932 cpu
= mtk_r32(eth
, MTK_QTX_CRX_PTR
);
933 dma
= mtk_r32(eth
, MTK_QTX_DRX_PTR
);
935 desc
= mtk_qdma_phys_to_virt(ring
, cpu
);
937 while ((cpu
!= dma
) && budget
) {
938 u32 next_cpu
= desc
->txd2
;
941 desc
= mtk_qdma_phys_to_virt(ring
, desc
->txd2
);
942 if ((desc
->txd3
& TX_DMA_OWNER_CPU
) == 0)
945 mac
= (desc
->txd4
>> TX_DMA_FPORT_SHIFT
) &
949 tx_buf
= mtk_desc_to_tx_buf(ring
, desc
);
956 if (skb
!= (struct sk_buff
*)MTK_DMA_DUMMY_DESC
) {
957 bytes
[mac
] += skb
->len
;
961 mtk_tx_unmap(eth
, tx_buf
);
963 ring
->last_free
= desc
;
964 atomic_inc(&ring
->free_count
);
969 mtk_w32(eth
, cpu
, MTK_QTX_CRX_PTR
);
971 for (i
= 0; i
< MTK_MAC_COUNT
; i
++) {
972 if (!eth
->netdev
[i
] || !done
[i
])
974 netdev_completed_queue(eth
->netdev
[i
], done
[i
], bytes
[i
]);
978 if (mtk_queue_stopped(eth
) &&
979 (atomic_read(&ring
->free_count
) > ring
->thresh
))
985 static void mtk_handle_status_irq(struct mtk_eth
*eth
)
987 u32 status2
= mtk_r32(eth
, MTK_INT_STATUS2
);
989 if (unlikely(status2
& (MTK_GDM1_AF
| MTK_GDM2_AF
))) {
990 mtk_stats_update(eth
);
991 mtk_w32(eth
, (MTK_GDM1_AF
| MTK_GDM2_AF
),
996 static int mtk_napi_tx(struct napi_struct
*napi
, int budget
)
998 struct mtk_eth
*eth
= container_of(napi
, struct mtk_eth
, tx_napi
);
1002 mtk_handle_status_irq(eth
);
1003 mtk_w32(eth
, MTK_TX_DONE_INT
, MTK_QMTK_INT_STATUS
);
1004 tx_done
= mtk_poll_tx(eth
, budget
);
1006 if (unlikely(netif_msg_intr(eth
))) {
1007 status
= mtk_r32(eth
, MTK_QMTK_INT_STATUS
);
1008 mask
= mtk_r32(eth
, MTK_QDMA_INT_MASK
);
1010 "done tx %d, intr 0x%08x/0x%x\n",
1011 tx_done
, status
, mask
);
1014 if (tx_done
== budget
)
1017 status
= mtk_r32(eth
, MTK_QMTK_INT_STATUS
);
1018 if (status
& MTK_TX_DONE_INT
)
1021 napi_complete(napi
);
1022 mtk_irq_enable(eth
, MTK_TX_DONE_INT
);
1027 static int mtk_napi_rx(struct napi_struct
*napi
, int budget
)
1029 struct mtk_eth
*eth
= container_of(napi
, struct mtk_eth
, rx_napi
);
1033 mtk_handle_status_irq(eth
);
1034 mtk_w32(eth
, MTK_RX_DONE_INT
, MTK_QMTK_INT_STATUS
);
1035 rx_done
= mtk_poll_rx(napi
, budget
, eth
);
1037 if (unlikely(netif_msg_intr(eth
))) {
1038 status
= mtk_r32(eth
, MTK_QMTK_INT_STATUS
);
1039 mask
= mtk_r32(eth
, MTK_QDMA_INT_MASK
);
1041 "done rx %d, intr 0x%08x/0x%x\n",
1042 rx_done
, status
, mask
);
1045 if (rx_done
== budget
)
1048 status
= mtk_r32(eth
, MTK_QMTK_INT_STATUS
);
1049 if (status
& MTK_RX_DONE_INT
)
1052 napi_complete(napi
);
1053 mtk_irq_enable(eth
, MTK_RX_DONE_INT
);
1058 static int mtk_tx_alloc(struct mtk_eth
*eth
)
1060 struct mtk_tx_ring
*ring
= ð
->tx_ring
;
1061 int i
, sz
= sizeof(*ring
->dma
);
1063 ring
->buf
= kcalloc(MTK_DMA_SIZE
, sizeof(*ring
->buf
),
1068 ring
->dma
= dma_alloc_coherent(eth
->dev
,
1071 GFP_ATOMIC
| __GFP_ZERO
);
1075 memset(ring
->dma
, 0, MTK_DMA_SIZE
* sz
);
1076 for (i
= 0; i
< MTK_DMA_SIZE
; i
++) {
1077 int next
= (i
+ 1) % MTK_DMA_SIZE
;
1078 u32 next_ptr
= ring
->phys
+ next
* sz
;
1080 ring
->dma
[i
].txd2
= next_ptr
;
1081 ring
->dma
[i
].txd3
= TX_DMA_LS0
| TX_DMA_OWNER_CPU
;
1084 atomic_set(&ring
->free_count
, MTK_DMA_SIZE
- 2);
1085 ring
->next_free
= &ring
->dma
[0];
1086 ring
->last_free
= &ring
->dma
[MTK_DMA_SIZE
- 1];
1087 ring
->thresh
= MAX_SKB_FRAGS
;
1089 /* make sure that all changes to the dma ring are flushed before we
1094 mtk_w32(eth
, ring
->phys
, MTK_QTX_CTX_PTR
);
1095 mtk_w32(eth
, ring
->phys
, MTK_QTX_DTX_PTR
);
1097 ring
->phys
+ ((MTK_DMA_SIZE
- 1) * sz
),
1100 ring
->phys
+ ((MTK_DMA_SIZE
- 1) * sz
),
1109 static void mtk_tx_clean(struct mtk_eth
*eth
)
1111 struct mtk_tx_ring
*ring
= ð
->tx_ring
;
1115 for (i
= 0; i
< MTK_DMA_SIZE
; i
++)
1116 mtk_tx_unmap(eth
, &ring
->buf
[i
]);
1122 dma_free_coherent(eth
->dev
,
1123 MTK_DMA_SIZE
* sizeof(*ring
->dma
),
1130 static int mtk_rx_alloc(struct mtk_eth
*eth
)
1132 struct mtk_rx_ring
*ring
= ð
->rx_ring
;
1135 ring
->frag_size
= mtk_max_frag_size(ETH_DATA_LEN
);
1136 ring
->buf_size
= mtk_max_buf_size(ring
->frag_size
);
1137 ring
->data
= kcalloc(MTK_DMA_SIZE
, sizeof(*ring
->data
),
1142 for (i
= 0; i
< MTK_DMA_SIZE
; i
++) {
1143 ring
->data
[i
] = netdev_alloc_frag(ring
->frag_size
);
1148 ring
->dma
= dma_alloc_coherent(eth
->dev
,
1149 MTK_DMA_SIZE
* sizeof(*ring
->dma
),
1151 GFP_ATOMIC
| __GFP_ZERO
);
1155 for (i
= 0; i
< MTK_DMA_SIZE
; i
++) {
1156 dma_addr_t dma_addr
= dma_map_single(eth
->dev
,
1157 ring
->data
[i
] + NET_SKB_PAD
,
1160 if (unlikely(dma_mapping_error(eth
->dev
, dma_addr
)))
1162 ring
->dma
[i
].rxd1
= (unsigned int)dma_addr
;
1164 ring
->dma
[i
].rxd2
= RX_DMA_PLEN0(ring
->buf_size
);
1166 ring
->calc_idx
= MTK_DMA_SIZE
- 1;
1167 /* make sure that all changes to the dma ring are flushed before we
1172 mtk_w32(eth
, eth
->rx_ring
.phys
, MTK_QRX_BASE_PTR0
);
1173 mtk_w32(eth
, MTK_DMA_SIZE
, MTK_QRX_MAX_CNT0
);
1174 mtk_w32(eth
, eth
->rx_ring
.calc_idx
, MTK_QRX_CRX_IDX0
);
1175 mtk_w32(eth
, MTK_PST_DRX_IDX0
, MTK_QDMA_RST_IDX
);
1176 mtk_w32(eth
, (QDMA_RES_THRES
<< 8) | QDMA_RES_THRES
, MTK_QTX_CFG(0));
1181 static void mtk_rx_clean(struct mtk_eth
*eth
)
1183 struct mtk_rx_ring
*ring
= ð
->rx_ring
;
1186 if (ring
->data
&& ring
->dma
) {
1187 for (i
= 0; i
< MTK_DMA_SIZE
; i
++) {
1190 if (!ring
->dma
[i
].rxd1
)
1192 dma_unmap_single(eth
->dev
,
1196 skb_free_frag(ring
->data
[i
]);
1203 dma_free_coherent(eth
->dev
,
1204 MTK_DMA_SIZE
* sizeof(*ring
->dma
),
1211 /* wait for DMA to finish whatever it is doing before we start using it again */
1212 static int mtk_dma_busy_wait(struct mtk_eth
*eth
)
1214 unsigned long t_start
= jiffies
;
1217 if (!(mtk_r32(eth
, MTK_QDMA_GLO_CFG
) &
1218 (MTK_RX_DMA_BUSY
| MTK_TX_DMA_BUSY
)))
1220 if (time_after(jiffies
, t_start
+ MTK_DMA_BUSY_TIMEOUT
))
1224 dev_err(eth
->dev
, "DMA init timeout\n");
1228 static int mtk_dma_init(struct mtk_eth
*eth
)
1232 if (mtk_dma_busy_wait(eth
))
1235 /* QDMA needs scratch memory for internal reordering of the
1238 err
= mtk_init_fq_dma(eth
);
1242 err
= mtk_tx_alloc(eth
);
1246 err
= mtk_rx_alloc(eth
);
1250 /* Enable random early drop and set drop threshold automatically */
1251 mtk_w32(eth
, FC_THRES_DROP_MODE
| FC_THRES_DROP_EN
| FC_THRES_MIN
,
1253 mtk_w32(eth
, 0x0, MTK_QDMA_HRED2
);
1258 static void mtk_dma_free(struct mtk_eth
*eth
)
1262 for (i
= 0; i
< MTK_MAC_COUNT
; i
++)
1264 netdev_reset_queue(eth
->netdev
[i
]);
1265 if (eth
->scratch_ring
) {
1266 dma_free_coherent(eth
->dev
,
1267 MTK_DMA_SIZE
* sizeof(struct mtk_tx_dma
),
1269 eth
->phy_scratch_ring
);
1270 eth
->scratch_ring
= NULL
;
1271 eth
->phy_scratch_ring
= 0;
1275 kfree(eth
->scratch_head
);
1278 static void mtk_tx_timeout(struct net_device
*dev
)
1280 struct mtk_mac
*mac
= netdev_priv(dev
);
1281 struct mtk_eth
*eth
= mac
->hw
;
1283 eth
->netdev
[mac
->id
]->stats
.tx_errors
++;
1284 netif_err(eth
, tx_err
, dev
,
1285 "transmit timed out\n");
1286 schedule_work(ð
->pending_work
);
1289 static irqreturn_t
mtk_handle_irq_rx(int irq
, void *_eth
)
1291 struct mtk_eth
*eth
= _eth
;
1293 if (likely(napi_schedule_prep(ð
->rx_napi
))) {
1294 __napi_schedule(ð
->rx_napi
);
1295 mtk_irq_disable(eth
, MTK_RX_DONE_INT
);
1301 static irqreturn_t
mtk_handle_irq_tx(int irq
, void *_eth
)
1303 struct mtk_eth
*eth
= _eth
;
1305 if (likely(napi_schedule_prep(ð
->tx_napi
))) {
1306 __napi_schedule(ð
->tx_napi
);
1307 mtk_irq_disable(eth
, MTK_TX_DONE_INT
);
1313 #ifdef CONFIG_NET_POLL_CONTROLLER
1314 static void mtk_poll_controller(struct net_device
*dev
)
1316 struct mtk_mac
*mac
= netdev_priv(dev
);
1317 struct mtk_eth
*eth
= mac
->hw
;
1318 u32 int_mask
= MTK_TX_DONE_INT
| MTK_RX_DONE_INT
;
1320 mtk_irq_disable(eth
, int_mask
);
1321 mtk_handle_irq_rx(eth
->irq
[2], dev
);
1322 mtk_irq_enable(eth
, int_mask
);
1326 static int mtk_start_dma(struct mtk_eth
*eth
)
1330 err
= mtk_dma_init(eth
);
1337 MTK_TX_WB_DDONE
| MTK_RX_DMA_EN
| MTK_TX_DMA_EN
|
1338 MTK_RX_2B_OFFSET
| MTK_DMA_SIZE_16DWORDS
|
1339 MTK_RX_BT_32DWORDS
| MTK_NDP_CO_PRO
,
1345 static int mtk_open(struct net_device
*dev
)
1347 struct mtk_mac
*mac
= netdev_priv(dev
);
1348 struct mtk_eth
*eth
= mac
->hw
;
1350 /* we run 2 netdevs on the same dma ring so we only bring it up once */
1351 if (!atomic_read(ð
->dma_refcnt
)) {
1352 int err
= mtk_start_dma(eth
);
1357 napi_enable(ð
->tx_napi
);
1358 napi_enable(ð
->rx_napi
);
1359 mtk_irq_enable(eth
, MTK_TX_DONE_INT
| MTK_RX_DONE_INT
);
1361 atomic_inc(ð
->dma_refcnt
);
1363 phy_start(mac
->phy_dev
);
1364 netif_start_queue(dev
);
1369 static void mtk_stop_dma(struct mtk_eth
*eth
, u32 glo_cfg
)
1371 unsigned long flags
;
1375 /* stop the dma engine */
1376 spin_lock_irqsave(ð
->page_lock
, flags
);
1377 val
= mtk_r32(eth
, glo_cfg
);
1378 mtk_w32(eth
, val
& ~(MTK_TX_WB_DDONE
| MTK_RX_DMA_EN
| MTK_TX_DMA_EN
),
1380 spin_unlock_irqrestore(ð
->page_lock
, flags
);
1382 /* wait for dma stop */
1383 for (i
= 0; i
< 10; i
++) {
1384 val
= mtk_r32(eth
, glo_cfg
);
1385 if (val
& (MTK_TX_DMA_BUSY
| MTK_RX_DMA_BUSY
)) {
1393 static int mtk_stop(struct net_device
*dev
)
1395 struct mtk_mac
*mac
= netdev_priv(dev
);
1396 struct mtk_eth
*eth
= mac
->hw
;
1398 netif_tx_disable(dev
);
1399 phy_stop(mac
->phy_dev
);
1401 /* only shutdown DMA if this is the last user */
1402 if (!atomic_dec_and_test(ð
->dma_refcnt
))
1405 mtk_irq_disable(eth
, MTK_TX_DONE_INT
| MTK_RX_DONE_INT
);
1406 napi_disable(ð
->tx_napi
);
1407 napi_disable(ð
->rx_napi
);
1409 mtk_stop_dma(eth
, MTK_QDMA_GLO_CFG
);
1416 static int __init
mtk_hw_init(struct mtk_eth
*eth
)
1420 /* reset the frame engine */
1421 reset_control_assert(eth
->rstc
);
1422 usleep_range(10, 20);
1423 reset_control_deassert(eth
->rstc
);
1424 usleep_range(10, 20);
1426 /* Set GE2 driving and slew rate */
1427 regmap_write(eth
->pctl
, GPIO_DRV_SEL10
, 0xa00);
1430 regmap_write(eth
->pctl
, GPIO_OD33_CTRL8
, 0x5);
1433 regmap_write(eth
->pctl
, GPIO_BIAS_CTRL
, 0x0);
1435 /* GE1, Force 1000M/FD, FC ON */
1436 mtk_w32(eth
, MAC_MCR_FIXED_LINK
, MTK_MAC_MCR(0));
1438 /* GE2, Force 1000M/FD, FC ON */
1439 mtk_w32(eth
, MAC_MCR_FIXED_LINK
, MTK_MAC_MCR(1));
1441 /* Enable RX VLan Offloading */
1442 mtk_w32(eth
, 1, MTK_CDMP_EG_CTRL
);
1444 err
= devm_request_irq(eth
->dev
, eth
->irq
[1], mtk_handle_irq_tx
, 0,
1445 dev_name(eth
->dev
), eth
);
1448 err
= devm_request_irq(eth
->dev
, eth
->irq
[2], mtk_handle_irq_rx
, 0,
1449 dev_name(eth
->dev
), eth
);
1453 err
= mtk_mdio_init(eth
);
1457 /* disable delay and normal interrupt */
1458 mtk_w32(eth
, 0, MTK_QDMA_DELAY_INT
);
1459 mtk_irq_disable(eth
, ~0);
1460 mtk_w32(eth
, RST_GL_PSE
, MTK_RST_GL
);
1461 mtk_w32(eth
, 0, MTK_RST_GL
);
1463 /* FE int grouping */
1464 mtk_w32(eth
, MTK_TX_DONE_INT
, MTK_PDMA_INT_GRP1
);
1465 mtk_w32(eth
, MTK_RX_DONE_INT
, MTK_PDMA_INT_GRP2
);
1466 mtk_w32(eth
, MTK_TX_DONE_INT
, MTK_QDMA_INT_GRP1
);
1467 mtk_w32(eth
, MTK_RX_DONE_INT
, MTK_QDMA_INT_GRP2
);
1468 mtk_w32(eth
, 0x21021000, MTK_FE_INT_GRP
);
1470 for (i
= 0; i
< 2; i
++) {
1471 u32 val
= mtk_r32(eth
, MTK_GDMA_FWD_CFG(i
));
1473 /* setup the forward port to send frame to QDMA */
1477 /* Enable RX checksum */
1478 val
|= MTK_GDMA_ICS_EN
| MTK_GDMA_TCS_EN
| MTK_GDMA_UCS_EN
;
1480 /* setup the mac dma */
1481 mtk_w32(eth
, val
, MTK_GDMA_FWD_CFG(i
));
1487 static int __init
mtk_init(struct net_device
*dev
)
1489 struct mtk_mac
*mac
= netdev_priv(dev
);
1490 struct mtk_eth
*eth
= mac
->hw
;
1491 const char *mac_addr
;
1493 mac_addr
= of_get_mac_address(mac
->of_node
);
1495 ether_addr_copy(dev
->dev_addr
, mac_addr
);
1497 /* If the mac address is invalid, use random mac address */
1498 if (!is_valid_ether_addr(dev
->dev_addr
)) {
1499 random_ether_addr(dev
->dev_addr
);
1500 dev_err(eth
->dev
, "generated random MAC address %pM\n",
1502 dev
->addr_assign_type
= NET_ADDR_RANDOM
;
1505 return mtk_phy_connect(mac
);
1508 static void mtk_uninit(struct net_device
*dev
)
1510 struct mtk_mac
*mac
= netdev_priv(dev
);
1511 struct mtk_eth
*eth
= mac
->hw
;
1513 phy_disconnect(mac
->phy_dev
);
1514 mtk_mdio_cleanup(eth
);
1515 mtk_irq_disable(eth
, ~0);
1516 free_irq(eth
->irq
[1], dev
);
1517 free_irq(eth
->irq
[2], dev
);
1520 static int mtk_do_ioctl(struct net_device
*dev
, struct ifreq
*ifr
, int cmd
)
1522 struct mtk_mac
*mac
= netdev_priv(dev
);
1528 return phy_mii_ioctl(mac
->phy_dev
, ifr
, cmd
);
1536 static void mtk_pending_work(struct work_struct
*work
)
1538 struct mtk_eth
*eth
= container_of(work
, struct mtk_eth
, pending_work
);
1540 unsigned long restart
= 0;
1544 /* stop all devices to make sure that dma is properly shut down */
1545 for (i
= 0; i
< MTK_MAC_COUNT
; i
++) {
1546 if (!eth
->netdev
[i
])
1548 mtk_stop(eth
->netdev
[i
]);
1549 __set_bit(i
, &restart
);
1552 /* restart DMA and enable IRQs */
1553 for (i
= 0; i
< MTK_MAC_COUNT
; i
++) {
1554 if (!test_bit(i
, &restart
))
1556 err
= mtk_open(eth
->netdev
[i
]);
1558 netif_alert(eth
, ifup
, eth
->netdev
[i
],
1559 "Driver up/down cycle failed, closing device.\n");
1560 dev_close(eth
->netdev
[i
]);
1566 static int mtk_cleanup(struct mtk_eth
*eth
)
1570 for (i
= 0; i
< MTK_MAC_COUNT
; i
++) {
1571 if (!eth
->netdev
[i
])
1574 unregister_netdev(eth
->netdev
[i
]);
1575 free_netdev(eth
->netdev
[i
]);
1577 cancel_work_sync(ð
->pending_work
);
1582 static int mtk_get_settings(struct net_device
*dev
,
1583 struct ethtool_cmd
*cmd
)
1585 struct mtk_mac
*mac
= netdev_priv(dev
);
1588 err
= phy_read_status(mac
->phy_dev
);
1592 return phy_ethtool_gset(mac
->phy_dev
, cmd
);
1595 static int mtk_set_settings(struct net_device
*dev
,
1596 struct ethtool_cmd
*cmd
)
1598 struct mtk_mac
*mac
= netdev_priv(dev
);
1600 if (cmd
->phy_address
!= mac
->phy_dev
->mdio
.addr
) {
1601 mac
->phy_dev
= mdiobus_get_phy(mac
->hw
->mii_bus
,
1607 return phy_ethtool_sset(mac
->phy_dev
, cmd
);
1610 static void mtk_get_drvinfo(struct net_device
*dev
,
1611 struct ethtool_drvinfo
*info
)
1613 struct mtk_mac
*mac
= netdev_priv(dev
);
1615 strlcpy(info
->driver
, mac
->hw
->dev
->driver
->name
, sizeof(info
->driver
));
1616 strlcpy(info
->bus_info
, dev_name(mac
->hw
->dev
), sizeof(info
->bus_info
));
1617 info
->n_stats
= ARRAY_SIZE(mtk_ethtool_stats
);
1620 static u32
mtk_get_msglevel(struct net_device
*dev
)
1622 struct mtk_mac
*mac
= netdev_priv(dev
);
1624 return mac
->hw
->msg_enable
;
1627 static void mtk_set_msglevel(struct net_device
*dev
, u32 value
)
1629 struct mtk_mac
*mac
= netdev_priv(dev
);
1631 mac
->hw
->msg_enable
= value
;
1634 static int mtk_nway_reset(struct net_device
*dev
)
1636 struct mtk_mac
*mac
= netdev_priv(dev
);
1638 return genphy_restart_aneg(mac
->phy_dev
);
1641 static u32
mtk_get_link(struct net_device
*dev
)
1643 struct mtk_mac
*mac
= netdev_priv(dev
);
1646 err
= genphy_update_link(mac
->phy_dev
);
1648 return ethtool_op_get_link(dev
);
1650 return mac
->phy_dev
->link
;
1653 static void mtk_get_strings(struct net_device
*dev
, u32 stringset
, u8
*data
)
1657 switch (stringset
) {
1659 for (i
= 0; i
< ARRAY_SIZE(mtk_ethtool_stats
); i
++) {
1660 memcpy(data
, mtk_ethtool_stats
[i
].str
, ETH_GSTRING_LEN
);
1661 data
+= ETH_GSTRING_LEN
;
1667 static int mtk_get_sset_count(struct net_device
*dev
, int sset
)
1671 return ARRAY_SIZE(mtk_ethtool_stats
);
1677 static void mtk_get_ethtool_stats(struct net_device
*dev
,
1678 struct ethtool_stats
*stats
, u64
*data
)
1680 struct mtk_mac
*mac
= netdev_priv(dev
);
1681 struct mtk_hw_stats
*hwstats
= mac
->hw_stats
;
1682 u64
*data_src
, *data_dst
;
1686 if (netif_running(dev
) && netif_device_present(dev
)) {
1687 if (spin_trylock(&hwstats
->stats_lock
)) {
1688 mtk_stats_update_mac(mac
);
1689 spin_unlock(&hwstats
->stats_lock
);
1694 data_src
= (u64
*)hwstats
;
1696 start
= u64_stats_fetch_begin_irq(&hwstats
->syncp
);
1698 for (i
= 0; i
< ARRAY_SIZE(mtk_ethtool_stats
); i
++)
1699 *data_dst
++ = *(data_src
+ mtk_ethtool_stats
[i
].offset
);
1700 } while (u64_stats_fetch_retry_irq(&hwstats
->syncp
, start
));
1703 static struct ethtool_ops mtk_ethtool_ops
= {
1704 .get_settings
= mtk_get_settings
,
1705 .set_settings
= mtk_set_settings
,
1706 .get_drvinfo
= mtk_get_drvinfo
,
1707 .get_msglevel
= mtk_get_msglevel
,
1708 .set_msglevel
= mtk_set_msglevel
,
1709 .nway_reset
= mtk_nway_reset
,
1710 .get_link
= mtk_get_link
,
1711 .get_strings
= mtk_get_strings
,
1712 .get_sset_count
= mtk_get_sset_count
,
1713 .get_ethtool_stats
= mtk_get_ethtool_stats
,
1716 static const struct net_device_ops mtk_netdev_ops
= {
1717 .ndo_init
= mtk_init
,
1718 .ndo_uninit
= mtk_uninit
,
1719 .ndo_open
= mtk_open
,
1720 .ndo_stop
= mtk_stop
,
1721 .ndo_start_xmit
= mtk_start_xmit
,
1722 .ndo_set_mac_address
= mtk_set_mac_address
,
1723 .ndo_validate_addr
= eth_validate_addr
,
1724 .ndo_do_ioctl
= mtk_do_ioctl
,
1725 .ndo_change_mtu
= eth_change_mtu
,
1726 .ndo_tx_timeout
= mtk_tx_timeout
,
1727 .ndo_get_stats64
= mtk_get_stats64
,
1728 #ifdef CONFIG_NET_POLL_CONTROLLER
1729 .ndo_poll_controller
= mtk_poll_controller
,
1733 static int mtk_add_mac(struct mtk_eth
*eth
, struct device_node
*np
)
1735 struct mtk_mac
*mac
;
1736 const __be32
*_id
= of_get_property(np
, "reg", NULL
);
1740 dev_err(eth
->dev
, "missing mac id\n");
1744 id
= be32_to_cpup(_id
);
1745 if (id
>= MTK_MAC_COUNT
) {
1746 dev_err(eth
->dev
, "%d is not a valid mac id\n", id
);
1750 if (eth
->netdev
[id
]) {
1751 dev_err(eth
->dev
, "duplicate mac id found: %d\n", id
);
1755 eth
->netdev
[id
] = alloc_etherdev(sizeof(*mac
));
1756 if (!eth
->netdev
[id
]) {
1757 dev_err(eth
->dev
, "alloc_etherdev failed\n");
1760 mac
= netdev_priv(eth
->netdev
[id
]);
1766 mac
->hw_stats
= devm_kzalloc(eth
->dev
,
1767 sizeof(*mac
->hw_stats
),
1769 if (!mac
->hw_stats
) {
1770 dev_err(eth
->dev
, "failed to allocate counter memory\n");
1774 spin_lock_init(&mac
->hw_stats
->stats_lock
);
1775 u64_stats_init(&mac
->hw_stats
->syncp
);
1776 mac
->hw_stats
->reg_offset
= id
* MTK_STAT_OFFSET
;
1778 SET_NETDEV_DEV(eth
->netdev
[id
], eth
->dev
);
1779 eth
->netdev
[id
]->watchdog_timeo
= 5 * HZ
;
1780 eth
->netdev
[id
]->netdev_ops
= &mtk_netdev_ops
;
1781 eth
->netdev
[id
]->base_addr
= (unsigned long)eth
->base
;
1782 eth
->netdev
[id
]->vlan_features
= MTK_HW_FEATURES
&
1783 ~(NETIF_F_HW_VLAN_CTAG_TX
| NETIF_F_HW_VLAN_CTAG_RX
);
1784 eth
->netdev
[id
]->features
|= MTK_HW_FEATURES
;
1785 eth
->netdev
[id
]->ethtool_ops
= &mtk_ethtool_ops
;
1787 err
= register_netdev(eth
->netdev
[id
]);
1789 dev_err(eth
->dev
, "error bringing up device\n");
1792 eth
->netdev
[id
]->irq
= eth
->irq
[0];
1793 netif_info(eth
, probe
, eth
->netdev
[id
],
1794 "mediatek frame engine at 0x%08lx, irq %d\n",
1795 eth
->netdev
[id
]->base_addr
, eth
->irq
[0]);
1800 free_netdev(eth
->netdev
[id
]);
1804 static int mtk_probe(struct platform_device
*pdev
)
1806 struct resource
*res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
1807 struct device_node
*mac_np
;
1808 const struct of_device_id
*match
;
1809 struct mtk_soc_data
*soc
;
1810 struct mtk_eth
*eth
;
1814 match
= of_match_device(of_mtk_match
, &pdev
->dev
);
1815 soc
= (struct mtk_soc_data
*)match
->data
;
1817 eth
= devm_kzalloc(&pdev
->dev
, sizeof(*eth
), GFP_KERNEL
);
1821 eth
->dev
= &pdev
->dev
;
1822 eth
->base
= devm_ioremap_resource(&pdev
->dev
, res
);
1823 if (IS_ERR(eth
->base
))
1824 return PTR_ERR(eth
->base
);
1826 spin_lock_init(ð
->page_lock
);
1827 spin_lock_init(ð
->irq_lock
);
1829 eth
->ethsys
= syscon_regmap_lookup_by_phandle(pdev
->dev
.of_node
,
1831 if (IS_ERR(eth
->ethsys
)) {
1832 dev_err(&pdev
->dev
, "no ethsys regmap found\n");
1833 return PTR_ERR(eth
->ethsys
);
1836 eth
->pctl
= syscon_regmap_lookup_by_phandle(pdev
->dev
.of_node
,
1838 if (IS_ERR(eth
->pctl
)) {
1839 dev_err(&pdev
->dev
, "no pctl regmap found\n");
1840 return PTR_ERR(eth
->pctl
);
1843 eth
->rstc
= devm_reset_control_get(&pdev
->dev
, "eth");
1844 if (IS_ERR(eth
->rstc
)) {
1845 dev_err(&pdev
->dev
, "no eth reset found\n");
1846 return PTR_ERR(eth
->rstc
);
1849 for (i
= 0; i
< 3; i
++) {
1850 eth
->irq
[i
] = platform_get_irq(pdev
, i
);
1851 if (eth
->irq
[i
] < 0) {
1852 dev_err(&pdev
->dev
, "no IRQ%d resource found\n", i
);
1856 for (i
= 0; i
< ARRAY_SIZE(eth
->clks
); i
++) {
1857 eth
->clks
[i
] = devm_clk_get(eth
->dev
,
1858 mtk_clks_source_name
[i
]);
1859 if (IS_ERR(eth
->clks
[i
])) {
1860 if (PTR_ERR(eth
->clks
[i
]) == -EPROBE_DEFER
)
1861 return -EPROBE_DEFER
;
1866 clk_prepare_enable(eth
->clks
[MTK_CLK_ETHIF
]);
1867 clk_prepare_enable(eth
->clks
[MTK_CLK_ESW
]);
1868 clk_prepare_enable(eth
->clks
[MTK_CLK_GP1
]);
1869 clk_prepare_enable(eth
->clks
[MTK_CLK_GP2
]);
1871 eth
->msg_enable
= netif_msg_init(mtk_msg_level
, MTK_DEFAULT_MSG_ENABLE
);
1872 INIT_WORK(ð
->pending_work
, mtk_pending_work
);
1874 err
= mtk_hw_init(eth
);
1878 for_each_child_of_node(pdev
->dev
.of_node
, mac_np
) {
1879 if (!of_device_is_compatible(mac_np
,
1880 "mediatek,eth-mac"))
1883 if (!of_device_is_available(mac_np
))
1886 err
= mtk_add_mac(eth
, mac_np
);
1891 /* we run 2 devices on the same DMA ring so we need a dummy device
1894 init_dummy_netdev(ð
->dummy_dev
);
1895 netif_napi_add(ð
->dummy_dev
, ð
->tx_napi
, mtk_napi_tx
,
1897 netif_napi_add(ð
->dummy_dev
, ð
->rx_napi
, mtk_napi_rx
,
1900 platform_set_drvdata(pdev
, eth
);
1909 static int mtk_remove(struct platform_device
*pdev
)
1911 struct mtk_eth
*eth
= platform_get_drvdata(pdev
);
1913 clk_disable_unprepare(eth
->clks
[MTK_CLK_ETHIF
]);
1914 clk_disable_unprepare(eth
->clks
[MTK_CLK_ESW
]);
1915 clk_disable_unprepare(eth
->clks
[MTK_CLK_GP1
]);
1916 clk_disable_unprepare(eth
->clks
[MTK_CLK_GP2
]);
1918 netif_napi_del(ð
->tx_napi
);
1919 netif_napi_del(ð
->rx_napi
);
1921 platform_set_drvdata(pdev
, NULL
);
1926 const struct of_device_id of_mtk_match
[] = {
1927 { .compatible
= "mediatek,mt7623-eth" },
1931 static struct platform_driver mtk_driver
= {
1933 .remove
= mtk_remove
,
1935 .name
= "mtk_soc_eth",
1936 .of_match_table
= of_mtk_match
,
1940 module_platform_driver(mtk_driver
);
1942 MODULE_LICENSE("GPL");
1943 MODULE_AUTHOR("John Crispin <blogic@openwrt.org>");
1944 MODULE_DESCRIPTION("Ethernet driver for MediaTek SoC");