1 /* Applied Micro X-Gene SoC Ethernet Driver
3 * Copyright (c) 2014, Applied Micro Circuits Corporation
4 * Authors: Iyappan Subramanian <isubramanian@apm.com>
5 * Ravi Patel <rapatel@apm.com>
6 * Keyur Chudgar <kchudgar@apm.com>
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the
10 * Free Software Foundation; either version 2 of the License, or (at your
11 * option) any later version.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program. If not, see <http://www.gnu.org/licenses/>.
22 #include "xgene_enet_main.h"
23 #include "xgene_enet_hw.h"
25 static void xgene_enet_init_bufpool(struct xgene_enet_desc_ring
*buf_pool
)
27 struct xgene_enet_raw_desc16
*raw_desc
;
30 for (i
= 0; i
< buf_pool
->slots
; i
++) {
31 raw_desc
= &buf_pool
->raw_desc16
[i
];
33 /* Hardware expects descriptor in little endian format */
34 raw_desc
->m0
= cpu_to_le64(i
|
35 SET_VAL(FPQNUM
, buf_pool
->dst_ring_num
) |
40 static int xgene_enet_refill_bufpool(struct xgene_enet_desc_ring
*buf_pool
,
44 struct xgene_enet_raw_desc16
*raw_desc
;
45 struct net_device
*ndev
;
48 u32 tail
= buf_pool
->tail
;
49 u32 slots
= buf_pool
->slots
- 1;
53 ndev
= buf_pool
->ndev
;
54 dev
= ndev_to_dev(buf_pool
->ndev
);
55 bufdatalen
= BUF_LEN_CODE_2K
| (SKB_BUFFER_SIZE
& GENMASK(11, 0));
56 len
= XGENE_ENET_MAX_MTU
;
58 for (i
= 0; i
< nbuf
; i
++) {
59 raw_desc
= &buf_pool
->raw_desc16
[tail
];
61 skb
= netdev_alloc_skb_ip_align(ndev
, len
);
64 buf_pool
->rx_skb
[tail
] = skb
;
66 dma_addr
= dma_map_single(dev
, skb
->data
, len
, DMA_FROM_DEVICE
);
67 if (dma_mapping_error(dev
, dma_addr
)) {
68 netdev_err(ndev
, "DMA mapping error\n");
69 dev_kfree_skb_any(skb
);
73 raw_desc
->m1
= cpu_to_le64(SET_VAL(DATAADDR
, dma_addr
) |
74 SET_VAL(BUFDATALEN
, bufdatalen
) |
76 tail
= (tail
+ 1) & slots
;
79 iowrite32(nbuf
, buf_pool
->cmd
);
80 buf_pool
->tail
= tail
;
85 static u16
xgene_enet_dst_ring_num(struct xgene_enet_desc_ring
*ring
)
87 struct xgene_enet_pdata
*pdata
= netdev_priv(ring
->ndev
);
89 return ((u16
)pdata
->rm
<< 10) | ring
->num
;
92 static u8
xgene_enet_hdr_len(const void *data
)
94 const struct ethhdr
*eth
= data
;
96 return (eth
->h_proto
== htons(ETH_P_8021Q
)) ? VLAN_ETH_HLEN
: ETH_HLEN
;
99 static u32
xgene_enet_ring_len(struct xgene_enet_desc_ring
*ring
)
101 u32 __iomem
*cmd_base
= ring
->cmd_base
;
102 u32 ring_state
, num_msgs
;
104 ring_state
= ioread32(&cmd_base
[1]);
105 num_msgs
= ring_state
& CREATE_MASK(NUMMSGSINQ_POS
, NUMMSGSINQ_LEN
);
107 return num_msgs
>> NUMMSGSINQ_POS
;
110 static void xgene_enet_delete_bufpool(struct xgene_enet_desc_ring
*buf_pool
)
112 struct xgene_enet_raw_desc16
*raw_desc
;
113 u32 slots
= buf_pool
->slots
- 1;
114 u32 tail
= buf_pool
->tail
;
118 len
= xgene_enet_ring_len(buf_pool
);
119 for (i
= 0; i
< len
; i
++) {
120 tail
= (tail
- 1) & slots
;
121 raw_desc
= &buf_pool
->raw_desc16
[tail
];
123 /* Hardware stores descriptor in little endian format */
124 userinfo
= GET_VAL(USERINFO
, le64_to_cpu(raw_desc
->m0
));
125 dev_kfree_skb_any(buf_pool
->rx_skb
[userinfo
]);
128 iowrite32(-len
, buf_pool
->cmd
);
129 buf_pool
->tail
= tail
;
132 static irqreturn_t
xgene_enet_rx_irq(const int irq
, void *data
)
134 struct xgene_enet_desc_ring
*rx_ring
= data
;
136 if (napi_schedule_prep(&rx_ring
->napi
)) {
137 disable_irq_nosync(irq
);
138 __napi_schedule(&rx_ring
->napi
);
144 static int xgene_enet_tx_completion(struct xgene_enet_desc_ring
*cp_ring
,
145 struct xgene_enet_raw_desc
*raw_desc
)
153 skb_index
= GET_VAL(USERINFO
, le64_to_cpu(raw_desc
->m0
));
154 skb
= cp_ring
->cp_skb
[skb_index
];
156 dev
= ndev_to_dev(cp_ring
->ndev
);
157 dma_unmap_single(dev
, GET_VAL(DATAADDR
, le64_to_cpu(raw_desc
->m1
)),
158 GET_VAL(BUFDATALEN
, le64_to_cpu(raw_desc
->m1
)),
161 /* Checking for error */
162 status
= GET_VAL(LERR
, le64_to_cpu(raw_desc
->m0
));
163 if (unlikely(status
> 2)) {
164 xgene_enet_parse_error(cp_ring
, netdev_priv(cp_ring
->ndev
),
170 dev_kfree_skb_any(skb
);
172 netdev_err(cp_ring
->ndev
, "completion skb is NULL\n");
179 static u64
xgene_enet_work_msg(struct sk_buff
*skb
)
182 u8 l3hlen
, l4hlen
= 0;
188 if (unlikely(skb
->protocol
!= htons(ETH_P_IP
)) &&
189 unlikely(skb
->protocol
!= htons(ETH_P_8021Q
)))
192 if (unlikely(!(skb
->dev
->features
& NETIF_F_IP_CSUM
)))
196 if (unlikely(ip_is_fragment(iph
)))
199 if (likely(iph
->protocol
== IPPROTO_TCP
)) {
200 l4hlen
= tcp_hdrlen(skb
) >> 2;
202 proto
= TSO_IPPROTO_TCP
;
203 } else if (iph
->protocol
== IPPROTO_UDP
) {
204 l4hlen
= UDP_HDR_SIZE
;
208 l3hlen
= ip_hdrlen(skb
) >> 2;
209 ethhdr
= xgene_enet_hdr_len(skb
->data
);
210 hopinfo
= SET_VAL(TCPHDR
, l4hlen
) |
211 SET_VAL(IPHDR
, l3hlen
) |
212 SET_VAL(ETHHDR
, ethhdr
) |
213 SET_VAL(EC
, csum_enable
) |
216 SET_BIT(TYPE_ETH_WORK_MESSAGE
);
221 static int xgene_enet_setup_tx_desc(struct xgene_enet_desc_ring
*tx_ring
,
224 struct device
*dev
= ndev_to_dev(tx_ring
->ndev
);
225 struct xgene_enet_raw_desc
*raw_desc
;
227 u16 tail
= tx_ring
->tail
;
230 raw_desc
= &tx_ring
->raw_desc
[tail
];
231 memset(raw_desc
, 0, sizeof(struct xgene_enet_raw_desc
));
233 dma_addr
= dma_map_single(dev
, skb
->data
, skb
->len
, DMA_TO_DEVICE
);
234 if (dma_mapping_error(dev
, dma_addr
)) {
235 netdev_err(tx_ring
->ndev
, "DMA mapping error\n");
239 /* Hardware expects descriptor in little endian format */
240 raw_desc
->m0
= cpu_to_le64(tail
);
241 raw_desc
->m1
= cpu_to_le64(SET_VAL(DATAADDR
, dma_addr
) |
242 SET_VAL(BUFDATALEN
, skb
->len
) |
244 hopinfo
= xgene_enet_work_msg(skb
);
245 raw_desc
->m3
= cpu_to_le64(SET_VAL(HENQNUM
, tx_ring
->dst_ring_num
) |
247 tx_ring
->cp_ring
->cp_skb
[tail
] = skb
;
252 static netdev_tx_t
xgene_enet_start_xmit(struct sk_buff
*skb
,
253 struct net_device
*ndev
)
255 struct xgene_enet_pdata
*pdata
= netdev_priv(ndev
);
256 struct xgene_enet_desc_ring
*tx_ring
= pdata
->tx_ring
;
257 struct xgene_enet_desc_ring
*cp_ring
= tx_ring
->cp_ring
;
258 u32 tx_level
, cq_level
;
260 tx_level
= xgene_enet_ring_len(tx_ring
);
261 cq_level
= xgene_enet_ring_len(cp_ring
);
262 if (unlikely(tx_level
> pdata
->tx_qcnt_hi
||
263 cq_level
> pdata
->cp_qcnt_hi
)) {
264 netif_stop_queue(ndev
);
265 return NETDEV_TX_BUSY
;
268 if (xgene_enet_setup_tx_desc(tx_ring
, skb
)) {
269 dev_kfree_skb_any(skb
);
273 iowrite32(1, tx_ring
->cmd
);
274 skb_tx_timestamp(skb
);
275 tx_ring
->tail
= (tx_ring
->tail
+ 1) & (tx_ring
->slots
- 1);
277 pdata
->stats
.tx_packets
++;
278 pdata
->stats
.tx_bytes
+= skb
->len
;
283 static void xgene_enet_skip_csum(struct sk_buff
*skb
)
285 struct iphdr
*iph
= ip_hdr(skb
);
287 if (!ip_is_fragment(iph
) ||
288 (iph
->protocol
!= IPPROTO_TCP
&& iph
->protocol
!= IPPROTO_UDP
)) {
289 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
293 static int xgene_enet_rx_frame(struct xgene_enet_desc_ring
*rx_ring
,
294 struct xgene_enet_raw_desc
*raw_desc
)
296 struct net_device
*ndev
;
297 struct xgene_enet_pdata
*pdata
;
299 struct xgene_enet_desc_ring
*buf_pool
;
300 u32 datalen
, skb_index
;
305 ndev
= rx_ring
->ndev
;
306 pdata
= netdev_priv(ndev
);
307 dev
= ndev_to_dev(rx_ring
->ndev
);
308 buf_pool
= rx_ring
->buf_pool
;
310 dma_unmap_single(dev
, GET_VAL(DATAADDR
, le64_to_cpu(raw_desc
->m1
)),
311 XGENE_ENET_MAX_MTU
, DMA_FROM_DEVICE
);
312 skb_index
= GET_VAL(USERINFO
, le64_to_cpu(raw_desc
->m0
));
313 skb
= buf_pool
->rx_skb
[skb_index
];
315 /* checking for error */
316 status
= GET_VAL(LERR
, le64_to_cpu(raw_desc
->m0
));
317 if (unlikely(status
> 2)) {
318 dev_kfree_skb_any(skb
);
319 xgene_enet_parse_error(rx_ring
, netdev_priv(rx_ring
->ndev
),
321 pdata
->stats
.rx_dropped
++;
326 /* strip off CRC as HW isn't doing this */
327 datalen
= GET_VAL(BUFDATALEN
, le64_to_cpu(raw_desc
->m1
));
329 prefetch(skb
->data
- NET_IP_ALIGN
);
330 skb_put(skb
, datalen
);
332 skb_checksum_none_assert(skb
);
333 skb
->protocol
= eth_type_trans(skb
, ndev
);
334 if (likely((ndev
->features
& NETIF_F_IP_CSUM
) &&
335 skb
->protocol
== htons(ETH_P_IP
))) {
336 xgene_enet_skip_csum(skb
);
339 pdata
->stats
.rx_packets
++;
340 pdata
->stats
.rx_bytes
+= datalen
;
341 napi_gro_receive(&rx_ring
->napi
, skb
);
343 if (--rx_ring
->nbufpool
== 0) {
344 ret
= xgene_enet_refill_bufpool(buf_pool
, NUM_BUFPOOL
);
345 rx_ring
->nbufpool
= NUM_BUFPOOL
;
351 static bool is_rx_desc(struct xgene_enet_raw_desc
*raw_desc
)
353 return GET_VAL(FPQNUM
, le64_to_cpu(raw_desc
->m0
)) ? true : false;
356 static int xgene_enet_process_ring(struct xgene_enet_desc_ring
*ring
,
359 struct xgene_enet_pdata
*pdata
= netdev_priv(ring
->ndev
);
360 struct xgene_enet_raw_desc
*raw_desc
;
361 u16 head
= ring
->head
;
362 u16 slots
= ring
->slots
- 1;
366 raw_desc
= &ring
->raw_desc
[head
];
367 if (unlikely(xgene_enet_is_desc_slot_empty(raw_desc
)))
370 if (is_rx_desc(raw_desc
))
371 ret
= xgene_enet_rx_frame(ring
, raw_desc
);
373 ret
= xgene_enet_tx_completion(ring
, raw_desc
);
374 xgene_enet_mark_desc_slot_empty(raw_desc
);
376 head
= (head
+ 1) & slots
;
384 iowrite32(-count
, ring
->cmd
);
387 if (netif_queue_stopped(ring
->ndev
)) {
388 if (xgene_enet_ring_len(ring
) < pdata
->cp_qcnt_low
)
389 netif_wake_queue(ring
->ndev
);
396 static int xgene_enet_napi(struct napi_struct
*napi
, const int budget
)
398 struct xgene_enet_desc_ring
*ring
;
401 ring
= container_of(napi
, struct xgene_enet_desc_ring
, napi
);
402 processed
= xgene_enet_process_ring(ring
, budget
);
404 if (processed
!= budget
) {
406 enable_irq(ring
->irq
);
412 static void xgene_enet_timeout(struct net_device
*ndev
)
414 struct xgene_enet_pdata
*pdata
= netdev_priv(ndev
);
416 xgene_gmac_reset(pdata
);
419 static int xgene_enet_register_irq(struct net_device
*ndev
)
421 struct xgene_enet_pdata
*pdata
= netdev_priv(ndev
);
422 struct device
*dev
= ndev_to_dev(ndev
);
425 ret
= devm_request_irq(dev
, pdata
->rx_ring
->irq
, xgene_enet_rx_irq
,
426 IRQF_SHARED
, ndev
->name
, pdata
->rx_ring
);
428 netdev_err(ndev
, "rx%d interrupt request failed\n",
429 pdata
->rx_ring
->irq
);
435 static void xgene_enet_free_irq(struct net_device
*ndev
)
437 struct xgene_enet_pdata
*pdata
;
440 pdata
= netdev_priv(ndev
);
441 dev
= ndev_to_dev(ndev
);
442 devm_free_irq(dev
, pdata
->rx_ring
->irq
, pdata
->rx_ring
);
445 static int xgene_enet_open(struct net_device
*ndev
)
447 struct xgene_enet_pdata
*pdata
= netdev_priv(ndev
);
450 xgene_gmac_tx_enable(pdata
);
451 xgene_gmac_rx_enable(pdata
);
453 ret
= xgene_enet_register_irq(ndev
);
456 napi_enable(&pdata
->rx_ring
->napi
);
459 phy_start(pdata
->phy_dev
);
461 netif_start_queue(ndev
);
466 static int xgene_enet_close(struct net_device
*ndev
)
468 struct xgene_enet_pdata
*pdata
= netdev_priv(ndev
);
470 netif_stop_queue(ndev
);
473 phy_stop(pdata
->phy_dev
);
475 napi_disable(&pdata
->rx_ring
->napi
);
476 xgene_enet_free_irq(ndev
);
477 xgene_enet_process_ring(pdata
->rx_ring
, -1);
479 xgene_gmac_tx_disable(pdata
);
480 xgene_gmac_rx_disable(pdata
);
485 static void xgene_enet_delete_ring(struct xgene_enet_desc_ring
*ring
)
487 struct xgene_enet_pdata
*pdata
;
490 pdata
= netdev_priv(ring
->ndev
);
491 dev
= ndev_to_dev(ring
->ndev
);
493 xgene_enet_clear_ring(ring
);
494 dma_free_coherent(dev
, ring
->size
, ring
->desc_addr
, ring
->dma
);
497 static void xgene_enet_delete_desc_rings(struct xgene_enet_pdata
*pdata
)
499 struct xgene_enet_desc_ring
*buf_pool
;
501 if (pdata
->tx_ring
) {
502 xgene_enet_delete_ring(pdata
->tx_ring
);
503 pdata
->tx_ring
= NULL
;
506 if (pdata
->rx_ring
) {
507 buf_pool
= pdata
->rx_ring
->buf_pool
;
508 xgene_enet_delete_bufpool(buf_pool
);
509 xgene_enet_delete_ring(buf_pool
);
510 xgene_enet_delete_ring(pdata
->rx_ring
);
511 pdata
->rx_ring
= NULL
;
515 static int xgene_enet_get_ring_size(struct device
*dev
,
516 enum xgene_enet_ring_cfgsize cfgsize
)
521 case RING_CFGSIZE_512B
:
524 case RING_CFGSIZE_2KB
:
527 case RING_CFGSIZE_16KB
:
530 case RING_CFGSIZE_64KB
:
533 case RING_CFGSIZE_512KB
:
537 dev_err(dev
, "Unsupported cfg ring size %d\n", cfgsize
);
544 static void xgene_enet_free_desc_ring(struct xgene_enet_desc_ring
*ring
)
551 dev
= ndev_to_dev(ring
->ndev
);
553 if (ring
->desc_addr
) {
554 xgene_enet_clear_ring(ring
);
555 dma_free_coherent(dev
, ring
->size
, ring
->desc_addr
, ring
->dma
);
557 devm_kfree(dev
, ring
);
560 static void xgene_enet_free_desc_rings(struct xgene_enet_pdata
*pdata
)
562 struct device
*dev
= &pdata
->pdev
->dev
;
563 struct xgene_enet_desc_ring
*ring
;
565 ring
= pdata
->tx_ring
;
566 if (ring
&& ring
->cp_ring
&& ring
->cp_ring
->cp_skb
)
567 devm_kfree(dev
, ring
->cp_ring
->cp_skb
);
568 xgene_enet_free_desc_ring(ring
);
570 ring
= pdata
->rx_ring
;
571 if (ring
&& ring
->buf_pool
&& ring
->buf_pool
->rx_skb
)
572 devm_kfree(dev
, ring
->buf_pool
->rx_skb
);
573 xgene_enet_free_desc_ring(ring
->buf_pool
);
574 xgene_enet_free_desc_ring(ring
);
577 static struct xgene_enet_desc_ring
*xgene_enet_create_desc_ring(
578 struct net_device
*ndev
, u32 ring_num
,
579 enum xgene_enet_ring_cfgsize cfgsize
, u32 ring_id
)
581 struct xgene_enet_desc_ring
*ring
;
582 struct xgene_enet_pdata
*pdata
= netdev_priv(ndev
);
583 struct device
*dev
= ndev_to_dev(ndev
);
586 ring
= devm_kzalloc(dev
, sizeof(struct xgene_enet_desc_ring
),
592 ring
->num
= ring_num
;
593 ring
->cfgsize
= cfgsize
;
596 size
= xgene_enet_get_ring_size(dev
, cfgsize
);
597 ring
->desc_addr
= dma_zalloc_coherent(dev
, size
, &ring
->dma
,
599 if (!ring
->desc_addr
) {
600 devm_kfree(dev
, ring
);
605 ring
->cmd_base
= pdata
->ring_cmd_addr
+ (ring
->num
<< 6);
606 ring
->cmd
= ring
->cmd_base
+ INC_DEC_CMD_ADDR
;
608 ring
= xgene_enet_setup_ring(ring
);
609 netdev_dbg(ndev
, "ring info: num=%d size=%d id=%d slots=%d\n",
610 ring
->num
, ring
->size
, ring
->id
, ring
->slots
);
615 static u16
xgene_enet_get_ring_id(enum xgene_ring_owner owner
, u8 bufnum
)
617 return (owner
<< 6) | (bufnum
& GENMASK(5, 0));
620 static int xgene_enet_create_desc_rings(struct net_device
*ndev
)
622 struct xgene_enet_pdata
*pdata
= netdev_priv(ndev
);
623 struct device
*dev
= ndev_to_dev(ndev
);
624 struct xgene_enet_desc_ring
*rx_ring
, *tx_ring
, *cp_ring
;
625 struct xgene_enet_desc_ring
*buf_pool
= NULL
;
626 u8 cpu_bufnum
= 0, eth_bufnum
= 0;
628 u16 ring_id
, ring_num
= 0;
631 /* allocate rx descriptor ring */
632 ring_id
= xgene_enet_get_ring_id(RING_OWNER_CPU
, cpu_bufnum
++);
633 rx_ring
= xgene_enet_create_desc_ring(ndev
, ring_num
++,
634 RING_CFGSIZE_16KB
, ring_id
);
640 /* allocate buffer pool for receiving packets */
641 ring_id
= xgene_enet_get_ring_id(RING_OWNER_ETH0
, bp_bufnum
++);
642 buf_pool
= xgene_enet_create_desc_ring(ndev
, ring_num
++,
643 RING_CFGSIZE_2KB
, ring_id
);
649 rx_ring
->nbufpool
= NUM_BUFPOOL
;
650 rx_ring
->buf_pool
= buf_pool
;
651 rx_ring
->irq
= pdata
->rx_irq
;
652 buf_pool
->rx_skb
= devm_kcalloc(dev
, buf_pool
->slots
,
653 sizeof(struct sk_buff
*), GFP_KERNEL
);
654 if (!buf_pool
->rx_skb
) {
659 buf_pool
->dst_ring_num
= xgene_enet_dst_ring_num(buf_pool
);
660 rx_ring
->buf_pool
= buf_pool
;
661 pdata
->rx_ring
= rx_ring
;
663 /* allocate tx descriptor ring */
664 ring_id
= xgene_enet_get_ring_id(RING_OWNER_ETH0
, eth_bufnum
++);
665 tx_ring
= xgene_enet_create_desc_ring(ndev
, ring_num
++,
666 RING_CFGSIZE_16KB
, ring_id
);
671 pdata
->tx_ring
= tx_ring
;
673 cp_ring
= pdata
->rx_ring
;
674 cp_ring
->cp_skb
= devm_kcalloc(dev
, tx_ring
->slots
,
675 sizeof(struct sk_buff
*), GFP_KERNEL
);
676 if (!cp_ring
->cp_skb
) {
680 pdata
->tx_ring
->cp_ring
= cp_ring
;
681 pdata
->tx_ring
->dst_ring_num
= xgene_enet_dst_ring_num(cp_ring
);
683 pdata
->tx_qcnt_hi
= pdata
->tx_ring
->slots
/ 2;
684 pdata
->cp_qcnt_hi
= pdata
->rx_ring
->slots
/ 2;
685 pdata
->cp_qcnt_low
= pdata
->cp_qcnt_hi
/ 2;
690 xgene_enet_free_desc_rings(pdata
);
694 static struct rtnl_link_stats64
*xgene_enet_get_stats64(
695 struct net_device
*ndev
,
696 struct rtnl_link_stats64
*storage
)
698 struct xgene_enet_pdata
*pdata
= netdev_priv(ndev
);
699 struct rtnl_link_stats64
*stats
= &pdata
->stats
;
701 stats
->rx_errors
+= stats
->rx_length_errors
+
702 stats
->rx_crc_errors
+
703 stats
->rx_frame_errors
+
704 stats
->rx_fifo_errors
;
705 memcpy(storage
, &pdata
->stats
, sizeof(struct rtnl_link_stats64
));
710 static int xgene_enet_set_mac_address(struct net_device
*ndev
, void *addr
)
712 struct xgene_enet_pdata
*pdata
= netdev_priv(ndev
);
715 ret
= eth_mac_addr(ndev
, addr
);
718 xgene_gmac_set_mac_addr(pdata
);
723 static const struct net_device_ops xgene_ndev_ops
= {
724 .ndo_open
= xgene_enet_open
,
725 .ndo_stop
= xgene_enet_close
,
726 .ndo_start_xmit
= xgene_enet_start_xmit
,
727 .ndo_tx_timeout
= xgene_enet_timeout
,
728 .ndo_get_stats64
= xgene_enet_get_stats64
,
729 .ndo_change_mtu
= eth_change_mtu
,
730 .ndo_set_mac_address
= xgene_enet_set_mac_address
,
733 static int xgene_enet_get_resources(struct xgene_enet_pdata
*pdata
)
735 struct platform_device
*pdev
;
736 struct net_device
*ndev
;
738 struct resource
*res
;
739 void __iomem
*base_addr
;
747 res
= platform_get_resource_byname(pdev
, IORESOURCE_MEM
, "enet_csr");
749 dev_err(dev
, "Resource enet_csr not defined\n");
752 pdata
->base_addr
= devm_ioremap_resource(dev
, res
);
753 if (IS_ERR(pdata
->base_addr
)) {
754 dev_err(dev
, "Unable to retrieve ENET Port CSR region\n");
755 return PTR_ERR(pdata
->base_addr
);
758 res
= platform_get_resource_byname(pdev
, IORESOURCE_MEM
, "ring_csr");
760 dev_err(dev
, "Resource ring_csr not defined\n");
763 pdata
->ring_csr_addr
= devm_ioremap_resource(dev
, res
);
764 if (IS_ERR(pdata
->ring_csr_addr
)) {
765 dev_err(dev
, "Unable to retrieve ENET Ring CSR region\n");
766 return PTR_ERR(pdata
->ring_csr_addr
);
769 res
= platform_get_resource_byname(pdev
, IORESOURCE_MEM
, "ring_cmd");
771 dev_err(dev
, "Resource ring_cmd not defined\n");
774 pdata
->ring_cmd_addr
= devm_ioremap_resource(dev
, res
);
775 if (IS_ERR(pdata
->ring_cmd_addr
)) {
776 dev_err(dev
, "Unable to retrieve ENET Ring command region\n");
777 return PTR_ERR(pdata
->ring_cmd_addr
);
780 ret
= platform_get_irq(pdev
, 0);
782 dev_err(dev
, "Unable to get ENET Rx IRQ\n");
783 ret
= ret
? : -ENXIO
;
788 mac
= of_get_mac_address(dev
->of_node
);
790 memcpy(ndev
->dev_addr
, mac
, ndev
->addr_len
);
792 eth_hw_addr_random(ndev
);
793 memcpy(ndev
->perm_addr
, ndev
->dev_addr
, ndev
->addr_len
);
795 pdata
->phy_mode
= of_get_phy_mode(pdev
->dev
.of_node
);
796 if (pdata
->phy_mode
< 0) {
797 dev_err(dev
, "Incorrect phy-connection-type in DTS\n");
801 pdata
->clk
= devm_clk_get(&pdev
->dev
, NULL
);
802 ret
= IS_ERR(pdata
->clk
);
803 if (IS_ERR(pdata
->clk
)) {
804 dev_err(&pdev
->dev
, "can't get clock\n");
805 ret
= PTR_ERR(pdata
->clk
);
809 base_addr
= pdata
->base_addr
;
810 pdata
->eth_csr_addr
= base_addr
+ BLOCK_ETH_CSR_OFFSET
;
811 pdata
->eth_ring_if_addr
= base_addr
+ BLOCK_ETH_RING_IF_OFFSET
;
812 pdata
->eth_diag_csr_addr
= base_addr
+ BLOCK_ETH_DIAG_CSR_OFFSET
;
813 pdata
->mcx_mac_addr
= base_addr
+ BLOCK_ETH_MAC_OFFSET
;
814 pdata
->mcx_stats_addr
= base_addr
+ BLOCK_ETH_STATS_OFFSET
;
815 pdata
->mcx_mac_csr_addr
= base_addr
+ BLOCK_ETH_MAC_CSR_OFFSET
;
816 pdata
->rx_buff_cnt
= NUM_PKT_BUF
;
821 static int xgene_enet_init_hw(struct xgene_enet_pdata
*pdata
)
823 struct net_device
*ndev
= pdata
->ndev
;
824 struct xgene_enet_desc_ring
*buf_pool
;
828 xgene_gmac_tx_disable(pdata
);
829 xgene_gmac_rx_disable(pdata
);
831 ret
= xgene_enet_create_desc_rings(ndev
);
833 netdev_err(ndev
, "Error in ring configuration\n");
837 /* setup buffer pool */
838 buf_pool
= pdata
->rx_ring
->buf_pool
;
839 xgene_enet_init_bufpool(buf_pool
);
840 ret
= xgene_enet_refill_bufpool(buf_pool
, pdata
->rx_buff_cnt
);
842 xgene_enet_delete_desc_rings(pdata
);
846 dst_ring_num
= xgene_enet_dst_ring_num(pdata
->rx_ring
);
847 xgene_enet_cle_bypass(pdata
, dst_ring_num
, buf_pool
->id
);
852 static int xgene_enet_probe(struct platform_device
*pdev
)
854 struct net_device
*ndev
;
855 struct xgene_enet_pdata
*pdata
;
856 struct device
*dev
= &pdev
->dev
;
857 struct napi_struct
*napi
;
860 ndev
= alloc_etherdev(sizeof(struct xgene_enet_pdata
));
864 pdata
= netdev_priv(ndev
);
868 SET_NETDEV_DEV(ndev
, dev
);
869 platform_set_drvdata(pdev
, pdata
);
870 ndev
->netdev_ops
= &xgene_ndev_ops
;
871 xgene_enet_set_ethtool_ops(ndev
);
872 ndev
->features
|= NETIF_F_IP_CSUM
|
876 ret
= xgene_enet_get_resources(pdata
);
880 xgene_enet_reset(pdata
);
881 xgene_gmac_init(pdata
, SPEED_1000
);
883 ret
= register_netdev(ndev
);
885 netdev_err(ndev
, "Failed to register netdev\n");
889 ret
= dma_set_mask_and_coherent(dev
, DMA_BIT_MASK(64));
891 netdev_err(ndev
, "No usable DMA configuration\n");
895 ret
= xgene_enet_init_hw(pdata
);
899 napi
= &pdata
->rx_ring
->napi
;
900 netif_napi_add(ndev
, napi
, xgene_enet_napi
, NAPI_POLL_WEIGHT
);
901 ret
= xgene_enet_mdio_config(pdata
);
909 static int xgene_enet_remove(struct platform_device
*pdev
)
911 struct xgene_enet_pdata
*pdata
;
912 struct net_device
*ndev
;
914 pdata
= platform_get_drvdata(pdev
);
917 xgene_gmac_rx_disable(pdata
);
918 xgene_gmac_tx_disable(pdata
);
920 netif_napi_del(&pdata
->rx_ring
->napi
);
921 xgene_enet_mdio_remove(pdata
);
922 xgene_enet_delete_desc_rings(pdata
);
923 unregister_netdev(ndev
);
924 xgene_gport_shutdown(pdata
);
930 static struct of_device_id xgene_enet_match
[] = {
931 {.compatible
= "apm,xgene-enet",},
935 MODULE_DEVICE_TABLE(of
, xgene_enet_match
);
937 static struct platform_driver xgene_enet_driver
= {
939 .name
= "xgene-enet",
940 .of_match_table
= xgene_enet_match
,
942 .probe
= xgene_enet_probe
,
943 .remove
= xgene_enet_remove
,
946 module_platform_driver(xgene_enet_driver
);
948 MODULE_DESCRIPTION("APM X-Gene SoC Ethernet driver");
949 MODULE_VERSION(XGENE_DRV_VERSION
);
950 MODULE_AUTHOR("Keyur Chudgar <kchudgar@apm.com>");
951 MODULE_LICENSE("GPL");