2 /* Copyright (c) 2014 Linaro Ltd.
3 * Copyright (c) 2014 Hisilicon Limited.
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
11 #include <linux/module.h>
12 #include <linux/etherdevice.h>
13 #include <linux/platform_device.h>
14 #include <linux/interrupt.h>
15 #include <linux/ktime.h>
16 #include <linux/of_address.h>
17 #include <linux/phy.h>
18 #include <linux/of_mdio.h>
19 #include <linux/of_net.h>
20 #include <linux/mfd/syscon.h>
21 #include <linux/regmap.h>
23 #define PPE_CFG_RX_ADDR 0x100
24 #define PPE_CFG_POOL_GRP 0x300
25 #define PPE_CFG_RX_BUF_SIZE 0x400
26 #define PPE_CFG_RX_FIFO_SIZE 0x500
27 #define PPE_CURR_BUF_CNT 0xa200
29 #define GE_DUPLEX_TYPE 0x08
30 #define GE_MAX_FRM_SIZE_REG 0x3c
31 #define GE_PORT_MODE 0x40
32 #define GE_PORT_EN 0x44
33 #define GE_SHORT_RUNTS_THR_REG 0x50
34 #define GE_TX_LOCAL_PAGE_REG 0x5c
35 #define GE_TRANSMIT_CONTROL_REG 0x60
36 #define GE_CF_CRC_STRIP_REG 0x1b0
37 #define GE_MODE_CHANGE_REG 0x1b4
38 #define GE_RECV_CONTROL_REG 0x1e0
39 #define GE_STATION_MAC_ADDRESS 0x210
40 #define PPE_CFG_CPU_ADD_ADDR 0x580
41 #define PPE_CFG_MAX_FRAME_LEN_REG 0x408
42 #define PPE_CFG_BUS_CTRL_REG 0x424
43 #define PPE_CFG_RX_CTRL_REG 0x428
44 #define PPE_CFG_RX_PKT_MODE_REG 0x438
45 #define PPE_CFG_QOS_VMID_GEN 0x500
46 #define PPE_CFG_RX_PKT_INT 0x538
47 #define PPE_INTEN 0x600
48 #define PPE_INTSTS 0x608
49 #define PPE_RINT 0x604
50 #define PPE_CFG_STS_MODE 0x700
51 #define PPE_HIS_RX_PKT_CNT 0x804
54 #define RCV_INT BIT(10)
55 #define RCV_NOBUF BIT(8)
56 #define RCV_DROP BIT(7)
57 #define TX_DROP BIT(6)
58 #define DEF_INT_ERR (RCV_NOBUF | RCV_DROP | TX_DROP)
59 #define DEF_INT_MASK (RCV_INT | DEF_INT_ERR)
61 /* TX descriptor config */
62 #define TX_FREE_MEM BIT(0)
63 #define TX_READ_ALLOC_L3 BIT(1)
64 #define TX_FINISH_CACHE_INV BIT(2)
65 #define TX_CLEAR_WB BIT(4)
66 #define TX_L3_CHECKSUM BIT(5)
67 #define TX_LOOP_BACK BIT(11)
70 #define RX_PKT_DROP BIT(0)
71 #define RX_L2_ERR BIT(1)
72 #define RX_PKT_ERR (RX_PKT_DROP | RX_L2_ERR)
74 #define SGMII_SPEED_1000 0x08
75 #define SGMII_SPEED_100 0x07
76 #define SGMII_SPEED_10 0x06
77 #define MII_SPEED_100 0x01
78 #define MII_SPEED_10 0x00
80 #define GE_DUPLEX_FULL BIT(0)
81 #define GE_DUPLEX_HALF 0x00
82 #define GE_MODE_CHANGE_EN BIT(0)
84 #define GE_TX_AUTO_NEG BIT(5)
85 #define GE_TX_ADD_CRC BIT(6)
86 #define GE_TX_SHORT_PAD_THROUGH BIT(7)
88 #define GE_RX_STRIP_CRC BIT(0)
89 #define GE_RX_STRIP_PAD BIT(3)
90 #define GE_RX_PAD_EN BIT(4)
92 #define GE_AUTO_NEG_CTL BIT(0)
94 #define GE_RX_INT_THRESHOLD BIT(6)
95 #define GE_RX_TIMEOUT 0x04
97 #define GE_RX_PORT_EN BIT(1)
98 #define GE_TX_PORT_EN BIT(2)
100 #define PPE_CFG_STS_RX_PKT_CNT_RC BIT(12)
102 #define PPE_CFG_RX_PKT_ALIGN BIT(18)
103 #define PPE_CFG_QOS_VMID_MODE BIT(14)
104 #define PPE_CFG_QOS_VMID_GRP_SHIFT 8
106 #define PPE_CFG_RX_FIFO_FSFU BIT(11)
107 #define PPE_CFG_RX_DEPTH_SHIFT 16
108 #define PPE_CFG_RX_START_SHIFT 0
109 #define PPE_CFG_RX_CTRL_ALIGN_SHIFT 11
111 #define PPE_CFG_BUS_LOCAL_REL BIT(14)
112 #define PPE_CFG_BUS_BIG_ENDIEN BIT(0)
114 #define RX_DESC_NUM 128
115 #define TX_DESC_NUM 256
116 #define TX_NEXT(N) (((N) + 1) & (TX_DESC_NUM-1))
117 #define RX_NEXT(N) (((N) + 1) & (RX_DESC_NUM-1))
119 #define GMAC_PPE_RX_PKT_MAX_LEN 379
120 #define GMAC_MAX_PKT_LEN 1516
121 #define GMAC_MIN_PKT_LEN 31
122 #define RX_BUF_SIZE 1600
123 #define RESET_TIMEOUT 1000
124 #define TX_TIMEOUT (6 * HZ)
126 #define DRV_NAME "hip04-ether"
127 #define DRV_VERSION "v1.0"
129 #define HIP04_MAX_TX_COALESCE_USECS 200
130 #define HIP04_MIN_TX_COALESCE_USECS 100
131 #define HIP04_MAX_TX_COALESCE_FRAMES 200
132 #define HIP04_MIN_TX_COALESCE_FRAMES 100
157 unsigned int reg_inten
;
159 struct napi_struct napi
;
160 struct net_device
*ndev
;
162 struct tx_desc
*tx_desc
;
163 dma_addr_t tx_desc_dma
;
164 struct sk_buff
*tx_skb
[TX_DESC_NUM
];
165 dma_addr_t tx_phys
[TX_DESC_NUM
];
166 unsigned int tx_head
;
168 int tx_coalesce_frames
;
169 int tx_coalesce_usecs
;
170 struct hrtimer tx_coalesce_timer
;
172 unsigned char *rx_buf
[RX_DESC_NUM
];
173 dma_addr_t rx_phys
[RX_DESC_NUM
];
174 unsigned int rx_head
;
175 unsigned int rx_buf_size
;
177 struct device_node
*phy_node
;
178 struct phy_device
*phy
;
180 struct work_struct tx_timeout_task
;
182 /* written only by tx cleanup */
183 unsigned int tx_tail ____cacheline_aligned_in_smp
;
186 static inline unsigned int tx_count(unsigned int head
, unsigned int tail
)
188 return (head
- tail
) % (TX_DESC_NUM
- 1);
191 static void hip04_config_port(struct net_device
*ndev
, u32 speed
, u32 duplex
)
193 struct hip04_priv
*priv
= netdev_priv(ndev
);
197 priv
->duplex
= duplex
;
199 switch (priv
->phy_mode
) {
200 case PHY_INTERFACE_MODE_SGMII
:
201 if (speed
== SPEED_1000
)
202 val
= SGMII_SPEED_1000
;
203 else if (speed
== SPEED_100
)
204 val
= SGMII_SPEED_100
;
206 val
= SGMII_SPEED_10
;
208 case PHY_INTERFACE_MODE_MII
:
209 if (speed
== SPEED_100
)
215 netdev_warn(ndev
, "not supported mode\n");
219 writel_relaxed(val
, priv
->base
+ GE_PORT_MODE
);
221 val
= duplex
? GE_DUPLEX_FULL
: GE_DUPLEX_HALF
;
222 writel_relaxed(val
, priv
->base
+ GE_DUPLEX_TYPE
);
224 val
= GE_MODE_CHANGE_EN
;
225 writel_relaxed(val
, priv
->base
+ GE_MODE_CHANGE_REG
);
228 static void hip04_reset_ppe(struct hip04_priv
*priv
)
230 u32 val
, tmp
, timeout
= 0;
233 regmap_read(priv
->map
, priv
->port
* 4 + PPE_CURR_BUF_CNT
, &val
);
234 regmap_read(priv
->map
, priv
->port
* 4 + PPE_CFG_RX_ADDR
, &tmp
);
235 if (timeout
++ > RESET_TIMEOUT
)
237 } while (val
& 0xfff);
240 static void hip04_config_fifo(struct hip04_priv
*priv
)
244 val
= readl_relaxed(priv
->base
+ PPE_CFG_STS_MODE
);
245 val
|= PPE_CFG_STS_RX_PKT_CNT_RC
;
246 writel_relaxed(val
, priv
->base
+ PPE_CFG_STS_MODE
);
248 val
= BIT(priv
->port
);
249 regmap_write(priv
->map
, priv
->port
* 4 + PPE_CFG_POOL_GRP
, val
);
251 val
= priv
->port
<< PPE_CFG_QOS_VMID_GRP_SHIFT
;
252 val
|= PPE_CFG_QOS_VMID_MODE
;
253 writel_relaxed(val
, priv
->base
+ PPE_CFG_QOS_VMID_GEN
);
256 regmap_write(priv
->map
, priv
->port
* 4 + PPE_CFG_RX_BUF_SIZE
, val
);
258 val
= RX_DESC_NUM
<< PPE_CFG_RX_DEPTH_SHIFT
;
259 val
|= PPE_CFG_RX_FIFO_FSFU
;
260 val
|= priv
->chan
<< PPE_CFG_RX_START_SHIFT
;
261 regmap_write(priv
->map
, priv
->port
* 4 + PPE_CFG_RX_FIFO_SIZE
, val
);
263 val
= NET_IP_ALIGN
<< PPE_CFG_RX_CTRL_ALIGN_SHIFT
;
264 writel_relaxed(val
, priv
->base
+ PPE_CFG_RX_CTRL_REG
);
266 val
= PPE_CFG_RX_PKT_ALIGN
;
267 writel_relaxed(val
, priv
->base
+ PPE_CFG_RX_PKT_MODE_REG
);
269 val
= PPE_CFG_BUS_LOCAL_REL
| PPE_CFG_BUS_BIG_ENDIEN
;
270 writel_relaxed(val
, priv
->base
+ PPE_CFG_BUS_CTRL_REG
);
272 val
= GMAC_PPE_RX_PKT_MAX_LEN
;
273 writel_relaxed(val
, priv
->base
+ PPE_CFG_MAX_FRAME_LEN_REG
);
275 val
= GMAC_MAX_PKT_LEN
;
276 writel_relaxed(val
, priv
->base
+ GE_MAX_FRM_SIZE_REG
);
278 val
= GMAC_MIN_PKT_LEN
;
279 writel_relaxed(val
, priv
->base
+ GE_SHORT_RUNTS_THR_REG
);
281 val
= readl_relaxed(priv
->base
+ GE_TRANSMIT_CONTROL_REG
);
282 val
|= GE_TX_AUTO_NEG
| GE_TX_ADD_CRC
| GE_TX_SHORT_PAD_THROUGH
;
283 writel_relaxed(val
, priv
->base
+ GE_TRANSMIT_CONTROL_REG
);
285 val
= GE_RX_STRIP_CRC
;
286 writel_relaxed(val
, priv
->base
+ GE_CF_CRC_STRIP_REG
);
288 val
= readl_relaxed(priv
->base
+ GE_RECV_CONTROL_REG
);
289 val
|= GE_RX_STRIP_PAD
| GE_RX_PAD_EN
;
290 writel_relaxed(val
, priv
->base
+ GE_RECV_CONTROL_REG
);
292 val
= GE_AUTO_NEG_CTL
;
293 writel_relaxed(val
, priv
->base
+ GE_TX_LOCAL_PAGE_REG
);
296 static void hip04_mac_enable(struct net_device
*ndev
)
298 struct hip04_priv
*priv
= netdev_priv(ndev
);
302 val
= readl_relaxed(priv
->base
+ GE_PORT_EN
);
303 val
|= GE_RX_PORT_EN
| GE_TX_PORT_EN
;
304 writel_relaxed(val
, priv
->base
+ GE_PORT_EN
);
308 writel_relaxed(val
, priv
->base
+ PPE_RINT
);
310 /* config recv int */
311 val
= GE_RX_INT_THRESHOLD
| GE_RX_TIMEOUT
;
312 writel_relaxed(val
, priv
->base
+ PPE_CFG_RX_PKT_INT
);
314 /* enable interrupt */
315 priv
->reg_inten
= DEF_INT_MASK
;
316 writel_relaxed(priv
->reg_inten
, priv
->base
+ PPE_INTEN
);
319 static void hip04_mac_disable(struct net_device
*ndev
)
321 struct hip04_priv
*priv
= netdev_priv(ndev
);
325 priv
->reg_inten
&= ~(DEF_INT_MASK
);
326 writel_relaxed(priv
->reg_inten
, priv
->base
+ PPE_INTEN
);
328 /* disable tx & rx */
329 val
= readl_relaxed(priv
->base
+ GE_PORT_EN
);
330 val
&= ~(GE_RX_PORT_EN
| GE_TX_PORT_EN
);
331 writel_relaxed(val
, priv
->base
+ GE_PORT_EN
);
334 static void hip04_set_xmit_desc(struct hip04_priv
*priv
, dma_addr_t phys
)
336 writel(phys
, priv
->base
+ PPE_CFG_CPU_ADD_ADDR
);
339 static void hip04_set_recv_desc(struct hip04_priv
*priv
, dma_addr_t phys
)
341 regmap_write(priv
->map
, priv
->port
* 4 + PPE_CFG_RX_ADDR
, phys
);
344 static u32
hip04_recv_cnt(struct hip04_priv
*priv
)
346 return readl(priv
->base
+ PPE_HIS_RX_PKT_CNT
);
349 static void hip04_update_mac_address(struct net_device
*ndev
)
351 struct hip04_priv
*priv
= netdev_priv(ndev
);
353 writel_relaxed(((ndev
->dev_addr
[0] << 8) | (ndev
->dev_addr
[1])),
354 priv
->base
+ GE_STATION_MAC_ADDRESS
);
355 writel_relaxed(((ndev
->dev_addr
[2] << 24) | (ndev
->dev_addr
[3] << 16) |
356 (ndev
->dev_addr
[4] << 8) | (ndev
->dev_addr
[5])),
357 priv
->base
+ GE_STATION_MAC_ADDRESS
+ 4);
360 static int hip04_set_mac_address(struct net_device
*ndev
, void *addr
)
362 eth_mac_addr(ndev
, addr
);
363 hip04_update_mac_address(ndev
);
367 static int hip04_tx_reclaim(struct net_device
*ndev
, bool force
)
369 struct hip04_priv
*priv
= netdev_priv(ndev
);
370 unsigned tx_tail
= priv
->tx_tail
;
371 struct tx_desc
*desc
;
372 unsigned int bytes_compl
= 0, pkts_compl
= 0;
376 count
= tx_count(ACCESS_ONCE(priv
->tx_head
), tx_tail
);
381 desc
= &priv
->tx_desc
[tx_tail
];
382 if (desc
->send_addr
!= 0) {
389 if (priv
->tx_phys
[tx_tail
]) {
390 dma_unmap_single(&ndev
->dev
, priv
->tx_phys
[tx_tail
],
391 priv
->tx_skb
[tx_tail
]->len
,
393 priv
->tx_phys
[tx_tail
] = 0;
396 bytes_compl
+= priv
->tx_skb
[tx_tail
]->len
;
397 dev_kfree_skb(priv
->tx_skb
[tx_tail
]);
398 priv
->tx_skb
[tx_tail
] = NULL
;
399 tx_tail
= TX_NEXT(tx_tail
);
403 priv
->tx_tail
= tx_tail
;
404 smp_wmb(); /* Ensure tx_tail visible to xmit */
407 if (pkts_compl
|| bytes_compl
)
408 netdev_completed_queue(ndev
, pkts_compl
, bytes_compl
);
410 if (unlikely(netif_queue_stopped(ndev
)) && (count
< (TX_DESC_NUM
- 1)))
411 netif_wake_queue(ndev
);
416 static int hip04_mac_start_xmit(struct sk_buff
*skb
, struct net_device
*ndev
)
418 struct hip04_priv
*priv
= netdev_priv(ndev
);
419 struct net_device_stats
*stats
= &ndev
->stats
;
420 unsigned int tx_head
= priv
->tx_head
, count
;
421 struct tx_desc
*desc
= &priv
->tx_desc
[tx_head
];
425 count
= tx_count(tx_head
, ACCESS_ONCE(priv
->tx_tail
));
426 if (count
== (TX_DESC_NUM
- 1)) {
427 netif_stop_queue(ndev
);
428 return NETDEV_TX_BUSY
;
431 phys
= dma_map_single(&ndev
->dev
, skb
->data
, skb
->len
, DMA_TO_DEVICE
);
432 if (dma_mapping_error(&ndev
->dev
, phys
)) {
437 priv
->tx_skb
[tx_head
] = skb
;
438 priv
->tx_phys
[tx_head
] = phys
;
439 desc
->send_addr
= cpu_to_be32(phys
);
440 desc
->send_size
= cpu_to_be32(skb
->len
);
441 desc
->cfg
= cpu_to_be32(TX_CLEAR_WB
| TX_FINISH_CACHE_INV
);
442 phys
= priv
->tx_desc_dma
+ tx_head
* sizeof(struct tx_desc
);
443 desc
->wb_addr
= cpu_to_be32(phys
);
444 skb_tx_timestamp(skb
);
446 hip04_set_xmit_desc(priv
, phys
);
447 priv
->tx_head
= TX_NEXT(tx_head
);
449 netdev_sent_queue(ndev
, skb
->len
);
451 stats
->tx_bytes
+= skb
->len
;
454 /* Ensure tx_head update visible to tx reclaim */
457 /* queue is getting full, better start cleaning up now */
458 if (count
>= priv
->tx_coalesce_frames
) {
459 if (napi_schedule_prep(&priv
->napi
)) {
460 /* disable rx interrupt and timer */
461 priv
->reg_inten
&= ~(RCV_INT
);
462 writel_relaxed(DEF_INT_MASK
& ~RCV_INT
,
463 priv
->base
+ PPE_INTEN
);
464 hrtimer_cancel(&priv
->tx_coalesce_timer
);
465 __napi_schedule(&priv
->napi
);
467 } else if (!hrtimer_is_queued(&priv
->tx_coalesce_timer
)) {
468 /* cleanup not pending yet, start a new timer */
469 hrtimer_start_expires(&priv
->tx_coalesce_timer
,
476 static int hip04_rx_poll(struct napi_struct
*napi
, int budget
)
478 struct hip04_priv
*priv
= container_of(napi
, struct hip04_priv
, napi
);
479 struct net_device
*ndev
= priv
->ndev
;
480 struct net_device_stats
*stats
= &ndev
->stats
;
481 unsigned int cnt
= hip04_recv_cnt(priv
);
482 struct rx_desc
*desc
;
492 while (cnt
&& !last
) {
493 buf
= priv
->rx_buf
[priv
->rx_head
];
494 skb
= build_skb(buf
, priv
->rx_buf_size
);
496 net_dbg_ratelimited("build_skb failed\n");
498 dma_unmap_single(&ndev
->dev
, priv
->rx_phys
[priv
->rx_head
],
499 RX_BUF_SIZE
, DMA_FROM_DEVICE
);
500 priv
->rx_phys
[priv
->rx_head
] = 0;
502 desc
= (struct rx_desc
*)skb
->data
;
503 len
= be16_to_cpu(desc
->pkt_len
);
504 err
= be32_to_cpu(desc
->pkt_err
);
507 dev_kfree_skb_any(skb
);
509 } else if ((err
& RX_PKT_ERR
) || (len
>= GMAC_MAX_PKT_LEN
)) {
510 dev_kfree_skb_any(skb
);
514 skb_reserve(skb
, NET_SKB_PAD
+ NET_IP_ALIGN
);
516 skb
->protocol
= eth_type_trans(skb
, ndev
);
517 napi_gro_receive(&priv
->napi
, skb
);
519 stats
->rx_bytes
+= len
;
523 buf
= netdev_alloc_frag(priv
->rx_buf_size
);
526 phys
= dma_map_single(&ndev
->dev
, buf
,
527 RX_BUF_SIZE
, DMA_FROM_DEVICE
);
528 if (dma_mapping_error(&ndev
->dev
, phys
))
530 priv
->rx_buf
[priv
->rx_head
] = buf
;
531 priv
->rx_phys
[priv
->rx_head
] = phys
;
532 hip04_set_recv_desc(priv
, phys
);
534 priv
->rx_head
= RX_NEXT(priv
->rx_head
);
539 cnt
= hip04_recv_cnt(priv
);
542 if (!(priv
->reg_inten
& RCV_INT
)) {
543 /* enable rx interrupt */
544 priv
->reg_inten
|= RCV_INT
;
545 writel_relaxed(priv
->reg_inten
, priv
->base
+ PPE_INTEN
);
549 /* clean up tx descriptors and start a new timer if necessary */
550 tx_remaining
= hip04_tx_reclaim(ndev
, false);
551 if (rx
< budget
&& tx_remaining
)
552 hrtimer_start_expires(&priv
->tx_coalesce_timer
, HRTIMER_MODE_REL
);
557 static irqreturn_t
hip04_mac_interrupt(int irq
, void *dev_id
)
559 struct net_device
*ndev
= (struct net_device
*)dev_id
;
560 struct hip04_priv
*priv
= netdev_priv(ndev
);
561 struct net_device_stats
*stats
= &ndev
->stats
;
562 u32 ists
= readl_relaxed(priv
->base
+ PPE_INTSTS
);
567 writel_relaxed(DEF_INT_MASK
, priv
->base
+ PPE_RINT
);
569 if (unlikely(ists
& DEF_INT_ERR
)) {
570 if (ists
& (RCV_NOBUF
| RCV_DROP
)) {
573 netdev_err(ndev
, "rx drop\n");
575 if (ists
& TX_DROP
) {
577 netdev_err(ndev
, "tx drop\n");
581 if (ists
& RCV_INT
&& napi_schedule_prep(&priv
->napi
)) {
582 /* disable rx interrupt */
583 priv
->reg_inten
&= ~(RCV_INT
);
584 writel_relaxed(DEF_INT_MASK
& ~RCV_INT
, priv
->base
+ PPE_INTEN
);
585 hrtimer_cancel(&priv
->tx_coalesce_timer
);
586 __napi_schedule(&priv
->napi
);
592 enum hrtimer_restart
tx_done(struct hrtimer
*hrtimer
)
594 struct hip04_priv
*priv
;
596 priv
= container_of(hrtimer
, struct hip04_priv
, tx_coalesce_timer
);
598 if (napi_schedule_prep(&priv
->napi
)) {
599 /* disable rx interrupt */
600 priv
->reg_inten
&= ~(RCV_INT
);
601 writel_relaxed(DEF_INT_MASK
& ~RCV_INT
, priv
->base
+ PPE_INTEN
);
602 __napi_schedule(&priv
->napi
);
605 return HRTIMER_NORESTART
;
608 static void hip04_adjust_link(struct net_device
*ndev
)
610 struct hip04_priv
*priv
= netdev_priv(ndev
);
611 struct phy_device
*phy
= priv
->phy
;
613 if ((priv
->speed
!= phy
->speed
) || (priv
->duplex
!= phy
->duplex
)) {
614 hip04_config_port(ndev
, phy
->speed
, phy
->duplex
);
615 phy_print_status(phy
);
619 static int hip04_mac_open(struct net_device
*ndev
)
621 struct hip04_priv
*priv
= netdev_priv(ndev
);
627 hip04_reset_ppe(priv
);
629 for (i
= 0; i
< RX_DESC_NUM
; i
++) {
632 phys
= dma_map_single(&ndev
->dev
, priv
->rx_buf
[i
],
633 RX_BUF_SIZE
, DMA_FROM_DEVICE
);
634 if (dma_mapping_error(&ndev
->dev
, phys
))
637 priv
->rx_phys
[i
] = phys
;
638 hip04_set_recv_desc(priv
, phys
);
642 phy_start(priv
->phy
);
644 netdev_reset_queue(ndev
);
645 netif_start_queue(ndev
);
646 hip04_mac_enable(ndev
);
647 napi_enable(&priv
->napi
);
652 static int hip04_mac_stop(struct net_device
*ndev
)
654 struct hip04_priv
*priv
= netdev_priv(ndev
);
657 napi_disable(&priv
->napi
);
658 netif_stop_queue(ndev
);
659 hip04_mac_disable(ndev
);
660 hip04_tx_reclaim(ndev
, true);
661 hip04_reset_ppe(priv
);
666 for (i
= 0; i
< RX_DESC_NUM
; i
++) {
667 if (priv
->rx_phys
[i
]) {
668 dma_unmap_single(&ndev
->dev
, priv
->rx_phys
[i
],
669 RX_BUF_SIZE
, DMA_FROM_DEVICE
);
670 priv
->rx_phys
[i
] = 0;
677 static void hip04_timeout(struct net_device
*ndev
)
679 struct hip04_priv
*priv
= netdev_priv(ndev
);
681 schedule_work(&priv
->tx_timeout_task
);
684 static void hip04_tx_timeout_task(struct work_struct
*work
)
686 struct hip04_priv
*priv
;
688 priv
= container_of(work
, struct hip04_priv
, tx_timeout_task
);
689 hip04_mac_stop(priv
->ndev
);
690 hip04_mac_open(priv
->ndev
);
693 static struct net_device_stats
*hip04_get_stats(struct net_device
*ndev
)
698 static int hip04_get_coalesce(struct net_device
*netdev
,
699 struct ethtool_coalesce
*ec
)
701 struct hip04_priv
*priv
= netdev_priv(netdev
);
703 ec
->tx_coalesce_usecs
= priv
->tx_coalesce_usecs
;
704 ec
->tx_max_coalesced_frames
= priv
->tx_coalesce_frames
;
709 static int hip04_set_coalesce(struct net_device
*netdev
,
710 struct ethtool_coalesce
*ec
)
712 struct hip04_priv
*priv
= netdev_priv(netdev
);
714 /* Check not supported parameters */
715 if ((ec
->rx_max_coalesced_frames
) || (ec
->rx_coalesce_usecs_irq
) ||
716 (ec
->rx_max_coalesced_frames_irq
) || (ec
->tx_coalesce_usecs_irq
) ||
717 (ec
->use_adaptive_rx_coalesce
) || (ec
->use_adaptive_tx_coalesce
) ||
718 (ec
->pkt_rate_low
) || (ec
->rx_coalesce_usecs_low
) ||
719 (ec
->rx_max_coalesced_frames_low
) || (ec
->tx_coalesce_usecs_high
) ||
720 (ec
->tx_max_coalesced_frames_low
) || (ec
->pkt_rate_high
) ||
721 (ec
->tx_coalesce_usecs_low
) || (ec
->rx_coalesce_usecs_high
) ||
722 (ec
->rx_max_coalesced_frames_high
) || (ec
->rx_coalesce_usecs
) ||
723 (ec
->tx_max_coalesced_frames_irq
) ||
724 (ec
->stats_block_coalesce_usecs
) ||
725 (ec
->tx_max_coalesced_frames_high
) || (ec
->rate_sample_interval
))
728 if ((ec
->tx_coalesce_usecs
> HIP04_MAX_TX_COALESCE_USECS
||
729 ec
->tx_coalesce_usecs
< HIP04_MIN_TX_COALESCE_USECS
) ||
730 (ec
->tx_max_coalesced_frames
> HIP04_MAX_TX_COALESCE_FRAMES
||
731 ec
->tx_max_coalesced_frames
< HIP04_MIN_TX_COALESCE_FRAMES
))
734 priv
->tx_coalesce_usecs
= ec
->tx_coalesce_usecs
;
735 priv
->tx_coalesce_frames
= ec
->tx_max_coalesced_frames
;
740 static void hip04_get_drvinfo(struct net_device
*netdev
,
741 struct ethtool_drvinfo
*drvinfo
)
743 strlcpy(drvinfo
->driver
, DRV_NAME
, sizeof(drvinfo
->driver
));
744 strlcpy(drvinfo
->version
, DRV_VERSION
, sizeof(drvinfo
->version
));
747 static struct ethtool_ops hip04_ethtool_ops
= {
748 .get_coalesce
= hip04_get_coalesce
,
749 .set_coalesce
= hip04_set_coalesce
,
750 .get_drvinfo
= hip04_get_drvinfo
,
753 static struct net_device_ops hip04_netdev_ops
= {
754 .ndo_open
= hip04_mac_open
,
755 .ndo_stop
= hip04_mac_stop
,
756 .ndo_get_stats
= hip04_get_stats
,
757 .ndo_start_xmit
= hip04_mac_start_xmit
,
758 .ndo_set_mac_address
= hip04_set_mac_address
,
759 .ndo_tx_timeout
= hip04_timeout
,
760 .ndo_validate_addr
= eth_validate_addr
,
761 .ndo_change_mtu
= eth_change_mtu
,
764 static int hip04_alloc_ring(struct net_device
*ndev
, struct device
*d
)
766 struct hip04_priv
*priv
= netdev_priv(ndev
);
769 priv
->tx_desc
= dma_alloc_coherent(d
,
770 TX_DESC_NUM
* sizeof(struct tx_desc
),
771 &priv
->tx_desc_dma
, GFP_KERNEL
);
775 priv
->rx_buf_size
= RX_BUF_SIZE
+
776 SKB_DATA_ALIGN(sizeof(struct skb_shared_info
));
777 for (i
= 0; i
< RX_DESC_NUM
; i
++) {
778 priv
->rx_buf
[i
] = netdev_alloc_frag(priv
->rx_buf_size
);
779 if (!priv
->rx_buf
[i
])
786 static void hip04_free_ring(struct net_device
*ndev
, struct device
*d
)
788 struct hip04_priv
*priv
= netdev_priv(ndev
);
791 for (i
= 0; i
< RX_DESC_NUM
; i
++)
793 put_page(virt_to_head_page(priv
->rx_buf
[i
]));
795 for (i
= 0; i
< TX_DESC_NUM
; i
++)
797 dev_kfree_skb_any(priv
->tx_skb
[i
]);
799 dma_free_coherent(d
, TX_DESC_NUM
* sizeof(struct tx_desc
),
800 priv
->tx_desc
, priv
->tx_desc_dma
);
803 static int hip04_mac_probe(struct platform_device
*pdev
)
805 struct device
*d
= &pdev
->dev
;
806 struct device_node
*node
= d
->of_node
;
807 struct of_phandle_args arg
;
808 struct net_device
*ndev
;
809 struct hip04_priv
*priv
;
810 struct resource
*res
;
815 ndev
= alloc_etherdev(sizeof(struct hip04_priv
));
819 priv
= netdev_priv(ndev
);
821 platform_set_drvdata(pdev
, ndev
);
823 res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
824 priv
->base
= devm_ioremap_resource(d
, res
);
825 if (IS_ERR(priv
->base
)) {
826 ret
= PTR_ERR(priv
->base
);
830 ret
= of_parse_phandle_with_fixed_args(node
, "port-handle", 2, 0, &arg
);
832 dev_warn(d
, "no port-handle\n");
836 priv
->port
= arg
.args
[0];
837 priv
->chan
= arg
.args
[1] * RX_DESC_NUM
;
839 hrtimer_init(&priv
->tx_coalesce_timer
, CLOCK_MONOTONIC
, HRTIMER_MODE_REL
);
841 /* BQL will try to keep the TX queue as short as possible, but it can't
842 * be faster than tx_coalesce_usecs, so we need a fast timeout here,
843 * but also long enough to gather up enough frames to ensure we don't
844 * get more interrupts than necessary.
845 * 200us is enough for 16 frames of 1500 bytes at gigabit ethernet rate
847 priv
->tx_coalesce_frames
= TX_DESC_NUM
* 3 / 4;
848 priv
->tx_coalesce_usecs
= 200;
849 /* allow timer to fire after half the time at the earliest */
850 txtime
= ktime_set(0, priv
->tx_coalesce_usecs
* NSEC_PER_USEC
/ 2);
851 hrtimer_set_expires_range(&priv
->tx_coalesce_timer
, txtime
, txtime
);
852 priv
->tx_coalesce_timer
.function
= tx_done
;
854 priv
->map
= syscon_node_to_regmap(arg
.np
);
855 if (IS_ERR(priv
->map
)) {
856 dev_warn(d
, "no syscon hisilicon,hip04-ppe\n");
857 ret
= PTR_ERR(priv
->map
);
861 priv
->phy_mode
= of_get_phy_mode(node
);
862 if (priv
->phy_mode
< 0) {
863 dev_warn(d
, "not find phy-mode\n");
868 irq
= platform_get_irq(pdev
, 0);
874 ret
= devm_request_irq(d
, irq
, hip04_mac_interrupt
,
875 0, pdev
->name
, ndev
);
877 netdev_err(ndev
, "devm_request_irq failed\n");
881 priv
->phy_node
= of_parse_phandle(node
, "phy-handle", 0);
882 if (priv
->phy_node
) {
883 priv
->phy
= of_phy_connect(ndev
, priv
->phy_node
,
892 INIT_WORK(&priv
->tx_timeout_task
, hip04_tx_timeout_task
);
895 ndev
->netdev_ops
= &hip04_netdev_ops
;
896 ndev
->ethtool_ops
= &hip04_ethtool_ops
;
897 ndev
->watchdog_timeo
= TX_TIMEOUT
;
898 ndev
->priv_flags
|= IFF_UNICAST_FLT
;
900 netif_napi_add(ndev
, &priv
->napi
, hip04_rx_poll
, NAPI_POLL_WEIGHT
);
901 SET_NETDEV_DEV(ndev
, &pdev
->dev
);
903 hip04_reset_ppe(priv
);
904 if (priv
->phy_mode
== PHY_INTERFACE_MODE_MII
)
905 hip04_config_port(ndev
, SPEED_100
, DUPLEX_FULL
);
907 hip04_config_fifo(priv
);
908 random_ether_addr(ndev
->dev_addr
);
909 hip04_update_mac_address(ndev
);
911 ret
= hip04_alloc_ring(ndev
, d
);
913 netdev_err(ndev
, "alloc ring fail\n");
917 ret
= register_netdev(ndev
);
926 hip04_free_ring(ndev
, d
);
928 of_node_put(priv
->phy_node
);
933 static int hip04_remove(struct platform_device
*pdev
)
935 struct net_device
*ndev
= platform_get_drvdata(pdev
);
936 struct hip04_priv
*priv
= netdev_priv(ndev
);
937 struct device
*d
= &pdev
->dev
;
940 phy_disconnect(priv
->phy
);
942 hip04_free_ring(ndev
, d
);
943 unregister_netdev(ndev
);
944 free_irq(ndev
->irq
, ndev
);
945 of_node_put(priv
->phy_node
);
946 cancel_work_sync(&priv
->tx_timeout_task
);
952 static const struct of_device_id hip04_mac_match
[] = {
953 { .compatible
= "hisilicon,hip04-mac" },
957 MODULE_DEVICE_TABLE(of
, hip04_mac_match
);
959 static struct platform_driver hip04_mac_driver
= {
960 .probe
= hip04_mac_probe
,
961 .remove
= hip04_remove
,
964 .owner
= THIS_MODULE
,
965 .of_match_table
= hip04_mac_match
,
968 module_platform_driver(hip04_mac_driver
);
970 MODULE_DESCRIPTION("HISILICON P04 Ethernet driver");
971 MODULE_LICENSE("GPL");