1 /* Applied Micro X-Gene SoC Ethernet Driver
3 * Copyright (c) 2014, Applied Micro Circuits Corporation
4 * Authors: Iyappan Subramanian <isubramanian@apm.com>
5 * Ravi Patel <rapatel@apm.com>
6 * Keyur Chudgar <kchudgar@apm.com>
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the
10 * Free Software Foundation; either version 2 of the License, or (at your
11 * option) any later version.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program. If not, see <http://www.gnu.org/licenses/>.
22 #include <linux/gpio.h>
23 #include "xgene_enet_main.h"
24 #include "xgene_enet_hw.h"
25 #include "xgene_enet_sgmac.h"
26 #include "xgene_enet_xgmac.h"
28 #define RES_ENET_CSR 0
29 #define RES_RING_CSR 1
30 #define RES_RING_CMD 2
32 static const struct of_device_id xgene_enet_of_match
[];
33 static const struct acpi_device_id xgene_enet_acpi_match
[];
35 static void xgene_enet_init_bufpool(struct xgene_enet_desc_ring
*buf_pool
)
37 struct xgene_enet_raw_desc16
*raw_desc
;
40 for (i
= 0; i
< buf_pool
->slots
; i
++) {
41 raw_desc
= &buf_pool
->raw_desc16
[i
];
43 /* Hardware expects descriptor in little endian format */
44 raw_desc
->m0
= cpu_to_le64(i
|
45 SET_VAL(FPQNUM
, buf_pool
->dst_ring_num
) |
50 static int xgene_enet_refill_bufpool(struct xgene_enet_desc_ring
*buf_pool
,
54 struct xgene_enet_raw_desc16
*raw_desc
;
55 struct xgene_enet_pdata
*pdata
;
56 struct net_device
*ndev
;
59 u32 tail
= buf_pool
->tail
;
60 u32 slots
= buf_pool
->slots
- 1;
64 ndev
= buf_pool
->ndev
;
65 dev
= ndev_to_dev(buf_pool
->ndev
);
66 pdata
= netdev_priv(ndev
);
67 bufdatalen
= BUF_LEN_CODE_2K
| (SKB_BUFFER_SIZE
& GENMASK(11, 0));
68 len
= XGENE_ENET_MAX_MTU
;
70 for (i
= 0; i
< nbuf
; i
++) {
71 raw_desc
= &buf_pool
->raw_desc16
[tail
];
73 skb
= netdev_alloc_skb_ip_align(ndev
, len
);
77 dma_addr
= dma_map_single(dev
, skb
->data
, len
, DMA_FROM_DEVICE
);
78 if (dma_mapping_error(dev
, dma_addr
)) {
79 netdev_err(ndev
, "DMA mapping error\n");
80 dev_kfree_skb_any(skb
);
84 buf_pool
->rx_skb
[tail
] = skb
;
86 raw_desc
->m1
= cpu_to_le64(SET_VAL(DATAADDR
, dma_addr
) |
87 SET_VAL(BUFDATALEN
, bufdatalen
) |
89 tail
= (tail
+ 1) & slots
;
92 pdata
->ring_ops
->wr_cmd(buf_pool
, nbuf
);
93 buf_pool
->tail
= tail
;
98 static u8
xgene_enet_hdr_len(const void *data
)
100 const struct ethhdr
*eth
= data
;
102 return (eth
->h_proto
== htons(ETH_P_8021Q
)) ? VLAN_ETH_HLEN
: ETH_HLEN
;
105 static void xgene_enet_delete_bufpool(struct xgene_enet_desc_ring
*buf_pool
)
107 struct device
*dev
= ndev_to_dev(buf_pool
->ndev
);
108 struct xgene_enet_raw_desc16
*raw_desc
;
112 /* Free up the buffers held by hardware */
113 for (i
= 0; i
< buf_pool
->slots
; i
++) {
114 if (buf_pool
->rx_skb
[i
]) {
115 dev_kfree_skb_any(buf_pool
->rx_skb
[i
]);
117 raw_desc
= &buf_pool
->raw_desc16
[i
];
118 dma_addr
= GET_VAL(DATAADDR
, le64_to_cpu(raw_desc
->m1
));
119 dma_unmap_single(dev
, dma_addr
, XGENE_ENET_MAX_MTU
,
125 static irqreturn_t
xgene_enet_rx_irq(const int irq
, void *data
)
127 struct xgene_enet_desc_ring
*rx_ring
= data
;
129 if (napi_schedule_prep(&rx_ring
->napi
)) {
130 disable_irq_nosync(irq
);
131 __napi_schedule(&rx_ring
->napi
);
137 static int xgene_enet_tx_completion(struct xgene_enet_desc_ring
*cp_ring
,
138 struct xgene_enet_raw_desc
*raw_desc
)
143 dma_addr_t
*frag_dma_addr
;
148 skb_index
= GET_VAL(USERINFO
, le64_to_cpu(raw_desc
->m0
));
149 skb
= cp_ring
->cp_skb
[skb_index
];
150 frag_dma_addr
= &cp_ring
->frag_dma_addr
[skb_index
* MAX_SKB_FRAGS
];
152 dev
= ndev_to_dev(cp_ring
->ndev
);
153 dma_unmap_single(dev
, GET_VAL(DATAADDR
, le64_to_cpu(raw_desc
->m1
)),
157 for (i
= 0; i
< skb_shinfo(skb
)->nr_frags
; i
++) {
158 frag
= &skb_shinfo(skb
)->frags
[i
];
159 dma_unmap_page(dev
, frag_dma_addr
[i
], skb_frag_size(frag
),
163 /* Checking for error */
164 status
= GET_VAL(LERR
, le64_to_cpu(raw_desc
->m0
));
165 if (unlikely(status
> 2)) {
166 xgene_enet_parse_error(cp_ring
, netdev_priv(cp_ring
->ndev
),
172 dev_kfree_skb_any(skb
);
174 netdev_err(cp_ring
->ndev
, "completion skb is NULL\n");
181 static u64
xgene_enet_work_msg(struct sk_buff
*skb
)
183 struct net_device
*ndev
= skb
->dev
;
185 u8 l3hlen
= 0, l4hlen
= 0;
186 u8 ethhdr
, proto
= 0, csum_enable
= 0;
188 u32 hdr_len
, mss
= 0;
189 u32 i
, len
, nr_frags
;
191 ethhdr
= xgene_enet_hdr_len(skb
->data
);
193 if (unlikely(skb
->protocol
!= htons(ETH_P_IP
)) &&
194 unlikely(skb
->protocol
!= htons(ETH_P_8021Q
)))
197 if (unlikely(!(skb
->dev
->features
& NETIF_F_IP_CSUM
)))
201 if (unlikely(ip_is_fragment(iph
)))
204 if (likely(iph
->protocol
== IPPROTO_TCP
)) {
205 l4hlen
= tcp_hdrlen(skb
) >> 2;
207 proto
= TSO_IPPROTO_TCP
;
208 if (ndev
->features
& NETIF_F_TSO
) {
209 hdr_len
= ethhdr
+ ip_hdrlen(skb
) + tcp_hdrlen(skb
);
210 mss
= skb_shinfo(skb
)->gso_size
;
212 if (skb_is_nonlinear(skb
)) {
213 len
= skb_headlen(skb
);
214 nr_frags
= skb_shinfo(skb
)->nr_frags
;
216 for (i
= 0; i
< 2 && i
< nr_frags
; i
++)
217 len
+= skb_shinfo(skb
)->frags
[i
].size
;
219 /* HW requires header must reside in 3 buffer */
220 if (unlikely(hdr_len
> len
)) {
221 if (skb_linearize(skb
))
226 if (!mss
|| ((skb
->len
- hdr_len
) <= mss
))
229 hopinfo
|= SET_BIT(ET
);
231 } else if (iph
->protocol
== IPPROTO_UDP
) {
232 l4hlen
= UDP_HDR_SIZE
;
236 l3hlen
= ip_hdrlen(skb
) >> 2;
237 hopinfo
|= SET_VAL(TCPHDR
, l4hlen
) |
238 SET_VAL(IPHDR
, l3hlen
) |
239 SET_VAL(ETHHDR
, ethhdr
) |
240 SET_VAL(EC
, csum_enable
) |
243 SET_BIT(TYPE_ETH_WORK_MESSAGE
);
248 static u16
xgene_enet_encode_len(u16 len
)
250 return (len
== BUFLEN_16K
) ? 0 : len
;
253 static void xgene_set_addr_len(__le64
*desc
, u32 idx
, dma_addr_t addr
, u32 len
)
255 desc
[idx
^ 1] = cpu_to_le64(SET_VAL(DATAADDR
, addr
) |
256 SET_VAL(BUFDATALEN
, len
));
259 static __le64
*xgene_enet_get_exp_bufs(struct xgene_enet_desc_ring
*ring
)
263 exp_bufs
= &ring
->exp_bufs
[ring
->exp_buf_tail
* MAX_EXP_BUFFS
];
264 memset(exp_bufs
, 0, sizeof(__le64
) * MAX_EXP_BUFFS
);
265 ring
->exp_buf_tail
= (ring
->exp_buf_tail
+ 1) & ((ring
->slots
/ 2) - 1);
270 static dma_addr_t
*xgene_get_frag_dma_array(struct xgene_enet_desc_ring
*ring
)
272 return &ring
->cp_ring
->frag_dma_addr
[ring
->tail
* MAX_SKB_FRAGS
];
275 static int xgene_enet_setup_tx_desc(struct xgene_enet_desc_ring
*tx_ring
,
278 struct device
*dev
= ndev_to_dev(tx_ring
->ndev
);
279 struct xgene_enet_pdata
*pdata
= netdev_priv(tx_ring
->ndev
);
280 struct xgene_enet_raw_desc
*raw_desc
;
281 __le64
*exp_desc
= NULL
, *exp_bufs
= NULL
;
282 dma_addr_t dma_addr
, pbuf_addr
, *frag_dma_addr
;
284 u16 tail
= tx_ring
->tail
;
287 u8 ll
= 0, nv
= 0, idx
= 0;
289 u32 size
, offset
, ell_bytes
= 0;
290 u32 i
, fidx
, nr_frags
, count
= 1;
292 raw_desc
= &tx_ring
->raw_desc
[tail
];
293 tail
= (tail
+ 1) & (tx_ring
->slots
- 1);
294 memset(raw_desc
, 0, sizeof(struct xgene_enet_raw_desc
));
296 hopinfo
= xgene_enet_work_msg(skb
);
299 raw_desc
->m3
= cpu_to_le64(SET_VAL(HENQNUM
, tx_ring
->dst_ring_num
) |
302 len
= skb_headlen(skb
);
303 hw_len
= xgene_enet_encode_len(len
);
305 dma_addr
= dma_map_single(dev
, skb
->data
, len
, DMA_TO_DEVICE
);
306 if (dma_mapping_error(dev
, dma_addr
)) {
307 netdev_err(tx_ring
->ndev
, "DMA mapping error\n");
311 /* Hardware expects descriptor in little endian format */
312 raw_desc
->m1
= cpu_to_le64(SET_VAL(DATAADDR
, dma_addr
) |
313 SET_VAL(BUFDATALEN
, hw_len
) |
316 if (!skb_is_nonlinear(skb
))
321 exp_desc
= (void *)&tx_ring
->raw_desc
[tail
];
322 tail
= (tail
+ 1) & (tx_ring
->slots
- 1);
323 memset(exp_desc
, 0, sizeof(struct xgene_enet_raw_desc
));
325 nr_frags
= skb_shinfo(skb
)->nr_frags
;
326 for (i
= nr_frags
; i
< 4 ; i
++)
327 exp_desc
[i
^ 1] = cpu_to_le64(LAST_BUFFER
);
329 frag_dma_addr
= xgene_get_frag_dma_array(tx_ring
);
331 for (i
= 0, fidx
= 0; split
|| (fidx
< nr_frags
); i
++) {
333 frag
= &skb_shinfo(skb
)->frags
[fidx
];
334 size
= skb_frag_size(frag
);
337 pbuf_addr
= skb_frag_dma_map(dev
, frag
, 0, size
,
339 if (dma_mapping_error(dev
, pbuf_addr
))
342 frag_dma_addr
[fidx
] = pbuf_addr
;
345 if (size
> BUFLEN_16K
)
349 if (size
> BUFLEN_16K
) {
357 dma_addr
= pbuf_addr
+ offset
;
358 hw_len
= xgene_enet_encode_len(len
);
364 xgene_set_addr_len(exp_desc
, i
, dma_addr
, hw_len
);
367 if (split
|| (fidx
!= nr_frags
)) {
368 exp_bufs
= xgene_enet_get_exp_bufs(tx_ring
);
369 xgene_set_addr_len(exp_bufs
, idx
, dma_addr
,
374 xgene_set_addr_len(exp_desc
, i
, dma_addr
,
379 xgene_set_addr_len(exp_bufs
, idx
, dma_addr
, hw_len
);
386 offset
+= BUFLEN_16K
;
392 dma_addr
= dma_map_single(dev
, exp_bufs
,
393 sizeof(u64
) * MAX_EXP_BUFFS
,
395 if (dma_mapping_error(dev
, dma_addr
)) {
396 dev_kfree_skb_any(skb
);
399 i
= ell_bytes
>> LL_BYTES_LSB_LEN
;
400 exp_desc
[2] = cpu_to_le64(SET_VAL(DATAADDR
, dma_addr
) |
401 SET_VAL(LL_BYTES_MSB
, i
) |
402 SET_VAL(LL_LEN
, idx
));
403 raw_desc
->m2
= cpu_to_le64(SET_VAL(LL_BYTES_LSB
, ell_bytes
));
407 raw_desc
->m0
= cpu_to_le64(SET_VAL(LL
, ll
) | SET_VAL(NV
, nv
) |
408 SET_VAL(USERINFO
, tx_ring
->tail
));
409 tx_ring
->cp_ring
->cp_skb
[tx_ring
->tail
] = skb
;
410 pdata
->tx_level
[tx_ring
->cp_ring
->index
] += count
;
411 tx_ring
->tail
= tail
;
416 static netdev_tx_t
xgene_enet_start_xmit(struct sk_buff
*skb
,
417 struct net_device
*ndev
)
419 struct xgene_enet_pdata
*pdata
= netdev_priv(ndev
);
420 struct xgene_enet_desc_ring
*tx_ring
;
421 int index
= skb
->queue_mapping
;
422 u32 tx_level
= pdata
->tx_level
[index
];
425 tx_ring
= pdata
->tx_ring
[index
];
426 if (tx_level
< pdata
->txc_level
[index
])
427 tx_level
+= ((typeof(pdata
->tx_level
[index
]))~0U);
429 if ((tx_level
- pdata
->txc_level
[index
]) > pdata
->tx_qcnt_hi
) {
430 netif_stop_subqueue(ndev
, index
);
431 return NETDEV_TX_BUSY
;
434 if (skb_padto(skb
, XGENE_MIN_ENET_FRAME_SIZE
))
437 count
= xgene_enet_setup_tx_desc(tx_ring
, skb
);
439 dev_kfree_skb_any(skb
);
443 skb_tx_timestamp(skb
);
445 tx_ring
->tx_packets
++;
446 tx_ring
->tx_bytes
+= skb
->len
;
448 pdata
->ring_ops
->wr_cmd(tx_ring
, count
);
452 static void xgene_enet_skip_csum(struct sk_buff
*skb
)
454 struct iphdr
*iph
= ip_hdr(skb
);
456 if (!ip_is_fragment(iph
) ||
457 (iph
->protocol
!= IPPROTO_TCP
&& iph
->protocol
!= IPPROTO_UDP
)) {
458 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
462 static int xgene_enet_rx_frame(struct xgene_enet_desc_ring
*rx_ring
,
463 struct xgene_enet_raw_desc
*raw_desc
)
465 struct net_device
*ndev
;
467 struct xgene_enet_desc_ring
*buf_pool
;
468 u32 datalen
, skb_index
;
473 ndev
= rx_ring
->ndev
;
474 dev
= ndev_to_dev(rx_ring
->ndev
);
475 buf_pool
= rx_ring
->buf_pool
;
477 dma_unmap_single(dev
, GET_VAL(DATAADDR
, le64_to_cpu(raw_desc
->m1
)),
478 XGENE_ENET_MAX_MTU
, DMA_FROM_DEVICE
);
479 skb_index
= GET_VAL(USERINFO
, le64_to_cpu(raw_desc
->m0
));
480 skb
= buf_pool
->rx_skb
[skb_index
];
481 buf_pool
->rx_skb
[skb_index
] = NULL
;
483 /* checking for error */
484 status
= (GET_VAL(ELERR
, le64_to_cpu(raw_desc
->m0
)) << LERR_LEN
) ||
485 GET_VAL(LERR
, le64_to_cpu(raw_desc
->m0
));
486 if (unlikely(status
> 2)) {
487 dev_kfree_skb_any(skb
);
488 xgene_enet_parse_error(rx_ring
, netdev_priv(rx_ring
->ndev
),
494 /* strip off CRC as HW isn't doing this */
495 datalen
= GET_VAL(BUFDATALEN
, le64_to_cpu(raw_desc
->m1
));
496 datalen
= (datalen
& DATALEN_MASK
) - 4;
497 prefetch(skb
->data
- NET_IP_ALIGN
);
498 skb_put(skb
, datalen
);
500 skb_checksum_none_assert(skb
);
501 skb
->protocol
= eth_type_trans(skb
, ndev
);
502 if (likely((ndev
->features
& NETIF_F_IP_CSUM
) &&
503 skb
->protocol
== htons(ETH_P_IP
))) {
504 xgene_enet_skip_csum(skb
);
507 rx_ring
->rx_packets
++;
508 rx_ring
->rx_bytes
+= datalen
;
509 napi_gro_receive(&rx_ring
->napi
, skb
);
511 if (--rx_ring
->nbufpool
== 0) {
512 ret
= xgene_enet_refill_bufpool(buf_pool
, NUM_BUFPOOL
);
513 rx_ring
->nbufpool
= NUM_BUFPOOL
;
519 static bool is_rx_desc(struct xgene_enet_raw_desc
*raw_desc
)
521 return GET_VAL(FPQNUM
, le64_to_cpu(raw_desc
->m0
)) ? true : false;
524 static int xgene_enet_process_ring(struct xgene_enet_desc_ring
*ring
,
527 struct net_device
*ndev
= ring
->ndev
;
528 struct xgene_enet_pdata
*pdata
= netdev_priv(ndev
);
529 struct xgene_enet_raw_desc
*raw_desc
, *exp_desc
;
530 u16 head
= ring
->head
;
531 u16 slots
= ring
->slots
- 1;
532 int ret
, desc_count
, count
= 0, processed
= 0;
536 raw_desc
= &ring
->raw_desc
[head
];
538 is_completion
= false;
540 if (unlikely(xgene_enet_is_desc_slot_empty(raw_desc
)))
543 /* read fpqnum field after dataaddr field */
545 if (GET_BIT(NV
, le64_to_cpu(raw_desc
->m0
))) {
546 head
= (head
+ 1) & slots
;
547 exp_desc
= &ring
->raw_desc
[head
];
549 if (unlikely(xgene_enet_is_desc_slot_empty(exp_desc
))) {
550 head
= (head
- 1) & slots
;
557 if (is_rx_desc(raw_desc
)) {
558 ret
= xgene_enet_rx_frame(ring
, raw_desc
);
560 ret
= xgene_enet_tx_completion(ring
, raw_desc
);
561 is_completion
= true;
563 xgene_enet_mark_desc_slot_empty(raw_desc
);
565 xgene_enet_mark_desc_slot_empty(exp_desc
);
567 head
= (head
+ 1) & slots
;
572 pdata
->txc_level
[ring
->index
] += desc_count
;
579 pdata
->ring_ops
->wr_cmd(ring
, -count
);
582 if (__netif_subqueue_stopped(ndev
, ring
->index
))
583 netif_start_subqueue(ndev
, ring
->index
);
589 static int xgene_enet_napi(struct napi_struct
*napi
, const int budget
)
591 struct xgene_enet_desc_ring
*ring
;
594 ring
= container_of(napi
, struct xgene_enet_desc_ring
, napi
);
595 processed
= xgene_enet_process_ring(ring
, budget
);
597 if (processed
!= budget
) {
599 enable_irq(ring
->irq
);
605 static void xgene_enet_timeout(struct net_device
*ndev
)
607 struct xgene_enet_pdata
*pdata
= netdev_priv(ndev
);
608 struct netdev_queue
*txq
;
611 pdata
->mac_ops
->reset(pdata
);
613 for (i
= 0; i
< pdata
->txq_cnt
; i
++) {
614 txq
= netdev_get_tx_queue(ndev
, i
);
615 txq
->trans_start
= jiffies
;
616 netif_tx_start_queue(txq
);
620 static void xgene_enet_set_irq_name(struct net_device
*ndev
)
622 struct xgene_enet_pdata
*pdata
= netdev_priv(ndev
);
623 struct xgene_enet_desc_ring
*ring
;
626 for (i
= 0; i
< pdata
->rxq_cnt
; i
++) {
627 ring
= pdata
->rx_ring
[i
];
628 if (!pdata
->cq_cnt
) {
629 snprintf(ring
->irq_name
, IRQ_ID_SIZE
, "%s-rx-txc",
632 snprintf(ring
->irq_name
, IRQ_ID_SIZE
, "%s-rx-%d",
637 for (i
= 0; i
< pdata
->cq_cnt
; i
++) {
638 ring
= pdata
->tx_ring
[i
]->cp_ring
;
639 snprintf(ring
->irq_name
, IRQ_ID_SIZE
, "%s-txc-%d",
644 static int xgene_enet_register_irq(struct net_device
*ndev
)
646 struct xgene_enet_pdata
*pdata
= netdev_priv(ndev
);
647 struct device
*dev
= ndev_to_dev(ndev
);
648 struct xgene_enet_desc_ring
*ring
;
651 xgene_enet_set_irq_name(ndev
);
652 for (i
= 0; i
< pdata
->rxq_cnt
; i
++) {
653 ring
= pdata
->rx_ring
[i
];
654 irq_set_status_flags(ring
->irq
, IRQ_DISABLE_UNLAZY
);
655 ret
= devm_request_irq(dev
, ring
->irq
, xgene_enet_rx_irq
,
656 0, ring
->irq_name
, ring
);
658 netdev_err(ndev
, "Failed to request irq %s\n",
663 for (i
= 0; i
< pdata
->cq_cnt
; i
++) {
664 ring
= pdata
->tx_ring
[i
]->cp_ring
;
665 irq_set_status_flags(ring
->irq
, IRQ_DISABLE_UNLAZY
);
666 ret
= devm_request_irq(dev
, ring
->irq
, xgene_enet_rx_irq
,
667 0, ring
->irq_name
, ring
);
669 netdev_err(ndev
, "Failed to request irq %s\n",
677 static void xgene_enet_free_irq(struct net_device
*ndev
)
679 struct xgene_enet_pdata
*pdata
;
680 struct xgene_enet_desc_ring
*ring
;
684 pdata
= netdev_priv(ndev
);
685 dev
= ndev_to_dev(ndev
);
687 for (i
= 0; i
< pdata
->rxq_cnt
; i
++) {
688 ring
= pdata
->rx_ring
[i
];
689 irq_clear_status_flags(ring
->irq
, IRQ_DISABLE_UNLAZY
);
690 devm_free_irq(dev
, ring
->irq
, ring
);
693 for (i
= 0; i
< pdata
->cq_cnt
; i
++) {
694 ring
= pdata
->tx_ring
[i
]->cp_ring
;
695 irq_clear_status_flags(ring
->irq
, IRQ_DISABLE_UNLAZY
);
696 devm_free_irq(dev
, ring
->irq
, ring
);
700 static void xgene_enet_napi_enable(struct xgene_enet_pdata
*pdata
)
702 struct napi_struct
*napi
;
705 for (i
= 0; i
< pdata
->rxq_cnt
; i
++) {
706 napi
= &pdata
->rx_ring
[i
]->napi
;
710 for (i
= 0; i
< pdata
->cq_cnt
; i
++) {
711 napi
= &pdata
->tx_ring
[i
]->cp_ring
->napi
;
716 static void xgene_enet_napi_disable(struct xgene_enet_pdata
*pdata
)
718 struct napi_struct
*napi
;
721 for (i
= 0; i
< pdata
->rxq_cnt
; i
++) {
722 napi
= &pdata
->rx_ring
[i
]->napi
;
726 for (i
= 0; i
< pdata
->cq_cnt
; i
++) {
727 napi
= &pdata
->tx_ring
[i
]->cp_ring
->napi
;
732 static int xgene_enet_open(struct net_device
*ndev
)
734 struct xgene_enet_pdata
*pdata
= netdev_priv(ndev
);
735 const struct xgene_mac_ops
*mac_ops
= pdata
->mac_ops
;
738 ret
= netif_set_real_num_tx_queues(ndev
, pdata
->txq_cnt
);
742 ret
= netif_set_real_num_rx_queues(ndev
, pdata
->rxq_cnt
);
746 xgene_enet_napi_enable(pdata
);
747 ret
= xgene_enet_register_irq(ndev
);
751 if (pdata
->phy_dev
) {
752 phy_start(pdata
->phy_dev
);
754 schedule_delayed_work(&pdata
->link_work
, PHY_POLL_LINK_OFF
);
755 netif_carrier_off(ndev
);
758 mac_ops
->tx_enable(pdata
);
759 mac_ops
->rx_enable(pdata
);
760 netif_tx_start_all_queues(ndev
);
765 static int xgene_enet_close(struct net_device
*ndev
)
767 struct xgene_enet_pdata
*pdata
= netdev_priv(ndev
);
768 const struct xgene_mac_ops
*mac_ops
= pdata
->mac_ops
;
771 netif_tx_stop_all_queues(ndev
);
772 mac_ops
->tx_disable(pdata
);
773 mac_ops
->rx_disable(pdata
);
776 phy_stop(pdata
->phy_dev
);
778 cancel_delayed_work_sync(&pdata
->link_work
);
780 xgene_enet_free_irq(ndev
);
781 xgene_enet_napi_disable(pdata
);
782 for (i
= 0; i
< pdata
->rxq_cnt
; i
++)
783 xgene_enet_process_ring(pdata
->rx_ring
[i
], -1);
787 static void xgene_enet_delete_ring(struct xgene_enet_desc_ring
*ring
)
789 struct xgene_enet_pdata
*pdata
;
792 pdata
= netdev_priv(ring
->ndev
);
793 dev
= ndev_to_dev(ring
->ndev
);
795 pdata
->ring_ops
->clear(ring
);
796 dmam_free_coherent(dev
, ring
->size
, ring
->desc_addr
, ring
->dma
);
799 static void xgene_enet_delete_desc_rings(struct xgene_enet_pdata
*pdata
)
801 struct xgene_enet_desc_ring
*buf_pool
;
802 struct xgene_enet_desc_ring
*ring
;
805 for (i
= 0; i
< pdata
->txq_cnt
; i
++) {
806 ring
= pdata
->tx_ring
[i
];
808 xgene_enet_delete_ring(ring
);
809 pdata
->port_ops
->clear(pdata
, ring
);
811 xgene_enet_delete_ring(ring
->cp_ring
);
812 pdata
->tx_ring
[i
] = NULL
;
816 for (i
= 0; i
< pdata
->rxq_cnt
; i
++) {
817 ring
= pdata
->rx_ring
[i
];
819 buf_pool
= ring
->buf_pool
;
820 xgene_enet_delete_bufpool(buf_pool
);
821 xgene_enet_delete_ring(buf_pool
);
822 pdata
->port_ops
->clear(pdata
, buf_pool
);
823 xgene_enet_delete_ring(ring
);
824 pdata
->rx_ring
[i
] = NULL
;
829 static int xgene_enet_get_ring_size(struct device
*dev
,
830 enum xgene_enet_ring_cfgsize cfgsize
)
835 case RING_CFGSIZE_512B
:
838 case RING_CFGSIZE_2KB
:
841 case RING_CFGSIZE_16KB
:
844 case RING_CFGSIZE_64KB
:
847 case RING_CFGSIZE_512KB
:
851 dev_err(dev
, "Unsupported cfg ring size %d\n", cfgsize
);
858 static void xgene_enet_free_desc_ring(struct xgene_enet_desc_ring
*ring
)
860 struct xgene_enet_pdata
*pdata
;
866 dev
= ndev_to_dev(ring
->ndev
);
867 pdata
= netdev_priv(ring
->ndev
);
869 if (ring
->desc_addr
) {
870 pdata
->ring_ops
->clear(ring
);
871 dmam_free_coherent(dev
, ring
->size
, ring
->desc_addr
, ring
->dma
);
873 devm_kfree(dev
, ring
);
876 static void xgene_enet_free_desc_rings(struct xgene_enet_pdata
*pdata
)
878 struct device
*dev
= &pdata
->pdev
->dev
;
879 struct xgene_enet_desc_ring
*ring
;
882 for (i
= 0; i
< pdata
->txq_cnt
; i
++) {
883 ring
= pdata
->tx_ring
[i
];
885 if (ring
->cp_ring
&& ring
->cp_ring
->cp_skb
)
886 devm_kfree(dev
, ring
->cp_ring
->cp_skb
);
887 if (ring
->cp_ring
&& pdata
->cq_cnt
)
888 xgene_enet_free_desc_ring(ring
->cp_ring
);
889 xgene_enet_free_desc_ring(ring
);
893 for (i
= 0; i
< pdata
->rxq_cnt
; i
++) {
894 ring
= pdata
->rx_ring
[i
];
896 if (ring
->buf_pool
) {
897 if (ring
->buf_pool
->rx_skb
)
898 devm_kfree(dev
, ring
->buf_pool
->rx_skb
);
899 xgene_enet_free_desc_ring(ring
->buf_pool
);
901 xgene_enet_free_desc_ring(ring
);
906 static bool is_irq_mbox_required(struct xgene_enet_pdata
*pdata
,
907 struct xgene_enet_desc_ring
*ring
)
909 if ((pdata
->enet_id
== XGENE_ENET2
) &&
910 (xgene_enet_ring_owner(ring
->id
) == RING_OWNER_CPU
)) {
917 static void __iomem
*xgene_enet_ring_cmd_base(struct xgene_enet_pdata
*pdata
,
918 struct xgene_enet_desc_ring
*ring
)
920 u8 num_ring_id_shift
= pdata
->ring_ops
->num_ring_id_shift
;
922 return pdata
->ring_cmd_addr
+ (ring
->num
<< num_ring_id_shift
);
925 static struct xgene_enet_desc_ring
*xgene_enet_create_desc_ring(
926 struct net_device
*ndev
, u32 ring_num
,
927 enum xgene_enet_ring_cfgsize cfgsize
, u32 ring_id
)
929 struct xgene_enet_pdata
*pdata
= netdev_priv(ndev
);
930 struct device
*dev
= ndev_to_dev(ndev
);
931 struct xgene_enet_desc_ring
*ring
;
935 size
= xgene_enet_get_ring_size(dev
, cfgsize
);
939 ring
= devm_kzalloc(dev
, sizeof(struct xgene_enet_desc_ring
),
945 ring
->num
= ring_num
;
946 ring
->cfgsize
= cfgsize
;
949 ring
->desc_addr
= dmam_alloc_coherent(dev
, size
, &ring
->dma
,
950 GFP_KERNEL
| __GFP_ZERO
);
951 if (!ring
->desc_addr
) {
952 devm_kfree(dev
, ring
);
957 if (is_irq_mbox_required(pdata
, ring
)) {
958 irq_mbox_addr
= dmam_alloc_coherent(dev
, INTR_MBOX_SIZE
,
960 GFP_KERNEL
| __GFP_ZERO
);
961 if (!irq_mbox_addr
) {
962 dmam_free_coherent(dev
, size
, ring
->desc_addr
,
964 devm_kfree(dev
, ring
);
967 ring
->irq_mbox_addr
= irq_mbox_addr
;
970 ring
->cmd_base
= xgene_enet_ring_cmd_base(pdata
, ring
);
971 ring
->cmd
= ring
->cmd_base
+ INC_DEC_CMD_ADDR
;
972 ring
= pdata
->ring_ops
->setup(ring
);
973 netdev_dbg(ndev
, "ring info: num=%d size=%d id=%d slots=%d\n",
974 ring
->num
, ring
->size
, ring
->id
, ring
->slots
);
979 static u16
xgene_enet_get_ring_id(enum xgene_ring_owner owner
, u8 bufnum
)
981 return (owner
<< 6) | (bufnum
& GENMASK(5, 0));
984 static enum xgene_ring_owner
xgene_derive_ring_owner(struct xgene_enet_pdata
*p
)
986 enum xgene_ring_owner owner
;
988 if (p
->enet_id
== XGENE_ENET1
) {
989 switch (p
->phy_mode
) {
990 case PHY_INTERFACE_MODE_SGMII
:
991 owner
= RING_OWNER_ETH0
;
994 owner
= (!p
->port_id
) ? RING_OWNER_ETH0
:
999 owner
= (!p
->port_id
) ? RING_OWNER_ETH0
: RING_OWNER_ETH1
;
1005 static u8
xgene_start_cpu_bufnum(struct xgene_enet_pdata
*pdata
)
1007 struct device
*dev
= &pdata
->pdev
->dev
;
1011 ret
= device_property_read_u32(dev
, "channel", &cpu_bufnum
);
1013 return (!ret
) ? cpu_bufnum
: pdata
->cpu_bufnum
;
1016 static int xgene_enet_create_desc_rings(struct net_device
*ndev
)
1018 struct xgene_enet_pdata
*pdata
= netdev_priv(ndev
);
1019 struct device
*dev
= ndev_to_dev(ndev
);
1020 struct xgene_enet_desc_ring
*rx_ring
, *tx_ring
, *cp_ring
;
1021 struct xgene_enet_desc_ring
*buf_pool
= NULL
;
1022 enum xgene_ring_owner owner
;
1023 dma_addr_t dma_exp_bufs
;
1025 u8 eth_bufnum
= pdata
->eth_bufnum
;
1026 u8 bp_bufnum
= pdata
->bp_bufnum
;
1027 u16 ring_num
= pdata
->ring_num
;
1032 cpu_bufnum
= xgene_start_cpu_bufnum(pdata
);
1034 for (i
= 0; i
< pdata
->rxq_cnt
; i
++) {
1035 /* allocate rx descriptor ring */
1036 owner
= xgene_derive_ring_owner(pdata
);
1037 ring_id
= xgene_enet_get_ring_id(RING_OWNER_CPU
, cpu_bufnum
++);
1038 rx_ring
= xgene_enet_create_desc_ring(ndev
, ring_num
++,
1046 /* allocate buffer pool for receiving packets */
1047 owner
= xgene_derive_ring_owner(pdata
);
1048 ring_id
= xgene_enet_get_ring_id(owner
, bp_bufnum
++);
1049 buf_pool
= xgene_enet_create_desc_ring(ndev
, ring_num
++,
1057 rx_ring
->nbufpool
= NUM_BUFPOOL
;
1058 rx_ring
->buf_pool
= buf_pool
;
1059 rx_ring
->irq
= pdata
->irqs
[i
];
1060 buf_pool
->rx_skb
= devm_kcalloc(dev
, buf_pool
->slots
,
1061 sizeof(struct sk_buff
*),
1063 if (!buf_pool
->rx_skb
) {
1068 buf_pool
->dst_ring_num
= xgene_enet_dst_ring_num(buf_pool
);
1069 rx_ring
->buf_pool
= buf_pool
;
1070 pdata
->rx_ring
[i
] = rx_ring
;
1073 for (i
= 0; i
< pdata
->txq_cnt
; i
++) {
1074 /* allocate tx descriptor ring */
1075 owner
= xgene_derive_ring_owner(pdata
);
1076 ring_id
= xgene_enet_get_ring_id(owner
, eth_bufnum
++);
1077 tx_ring
= xgene_enet_create_desc_ring(ndev
, ring_num
++,
1085 size
= (tx_ring
->slots
/ 2) * sizeof(__le64
) * MAX_EXP_BUFFS
;
1086 exp_bufs
= dmam_alloc_coherent(dev
, size
, &dma_exp_bufs
,
1087 GFP_KERNEL
| __GFP_ZERO
);
1092 tx_ring
->exp_bufs
= exp_bufs
;
1094 pdata
->tx_ring
[i
] = tx_ring
;
1096 if (!pdata
->cq_cnt
) {
1097 cp_ring
= pdata
->rx_ring
[i
];
1099 /* allocate tx completion descriptor ring */
1100 ring_id
= xgene_enet_get_ring_id(RING_OWNER_CPU
,
1102 cp_ring
= xgene_enet_create_desc_ring(ndev
, ring_num
++,
1110 cp_ring
->irq
= pdata
->irqs
[pdata
->rxq_cnt
+ i
];
1114 cp_ring
->cp_skb
= devm_kcalloc(dev
, tx_ring
->slots
,
1115 sizeof(struct sk_buff
*),
1117 if (!cp_ring
->cp_skb
) {
1122 size
= sizeof(dma_addr_t
) * MAX_SKB_FRAGS
;
1123 cp_ring
->frag_dma_addr
= devm_kcalloc(dev
, tx_ring
->slots
,
1125 if (!cp_ring
->frag_dma_addr
) {
1126 devm_kfree(dev
, cp_ring
->cp_skb
);
1131 tx_ring
->cp_ring
= cp_ring
;
1132 tx_ring
->dst_ring_num
= xgene_enet_dst_ring_num(cp_ring
);
1135 pdata
->ring_ops
->coalesce(pdata
->tx_ring
[0]);
1136 pdata
->tx_qcnt_hi
= pdata
->tx_ring
[0]->slots
- 128;
1141 xgene_enet_free_desc_rings(pdata
);
1145 static struct rtnl_link_stats64
*xgene_enet_get_stats64(
1146 struct net_device
*ndev
,
1147 struct rtnl_link_stats64
*storage
)
1149 struct xgene_enet_pdata
*pdata
= netdev_priv(ndev
);
1150 struct rtnl_link_stats64
*stats
= &pdata
->stats
;
1151 struct xgene_enet_desc_ring
*ring
;
1154 memset(stats
, 0, sizeof(struct rtnl_link_stats64
));
1155 for (i
= 0; i
< pdata
->txq_cnt
; i
++) {
1156 ring
= pdata
->tx_ring
[i
];
1158 stats
->tx_packets
+= ring
->tx_packets
;
1159 stats
->tx_bytes
+= ring
->tx_bytes
;
1163 for (i
= 0; i
< pdata
->rxq_cnt
; i
++) {
1164 ring
= pdata
->rx_ring
[i
];
1166 stats
->rx_packets
+= ring
->rx_packets
;
1167 stats
->rx_bytes
+= ring
->rx_bytes
;
1168 stats
->rx_errors
+= ring
->rx_length_errors
+
1169 ring
->rx_crc_errors
+
1170 ring
->rx_frame_errors
+
1171 ring
->rx_fifo_errors
;
1172 stats
->rx_dropped
+= ring
->rx_dropped
;
1175 memcpy(storage
, stats
, sizeof(struct rtnl_link_stats64
));
1180 static int xgene_enet_set_mac_address(struct net_device
*ndev
, void *addr
)
1182 struct xgene_enet_pdata
*pdata
= netdev_priv(ndev
);
1185 ret
= eth_mac_addr(ndev
, addr
);
1188 pdata
->mac_ops
->set_mac_addr(pdata
);
1193 static const struct net_device_ops xgene_ndev_ops
= {
1194 .ndo_open
= xgene_enet_open
,
1195 .ndo_stop
= xgene_enet_close
,
1196 .ndo_start_xmit
= xgene_enet_start_xmit
,
1197 .ndo_tx_timeout
= xgene_enet_timeout
,
1198 .ndo_get_stats64
= xgene_enet_get_stats64
,
1199 .ndo_change_mtu
= eth_change_mtu
,
1200 .ndo_set_mac_address
= xgene_enet_set_mac_address
,
1204 static void xgene_get_port_id_acpi(struct device
*dev
,
1205 struct xgene_enet_pdata
*pdata
)
1210 status
= acpi_evaluate_integer(ACPI_HANDLE(dev
), "_SUN", NULL
, &temp
);
1211 if (ACPI_FAILURE(status
)) {
1214 pdata
->port_id
= temp
;
1221 static void xgene_get_port_id_dt(struct device
*dev
, struct xgene_enet_pdata
*pdata
)
1225 of_property_read_u32(dev
->of_node
, "port-id", &id
);
1227 pdata
->port_id
= id
& BIT(0);
1232 static int xgene_get_tx_delay(struct xgene_enet_pdata
*pdata
)
1234 struct device
*dev
= &pdata
->pdev
->dev
;
1237 ret
= of_property_read_u32(dev
->of_node
, "tx-delay", &delay
);
1239 pdata
->tx_delay
= 4;
1243 if (delay
< 0 || delay
> 7) {
1244 dev_err(dev
, "Invalid tx-delay specified\n");
1248 pdata
->tx_delay
= delay
;
1253 static int xgene_get_rx_delay(struct xgene_enet_pdata
*pdata
)
1255 struct device
*dev
= &pdata
->pdev
->dev
;
1258 ret
= of_property_read_u32(dev
->of_node
, "rx-delay", &delay
);
1260 pdata
->rx_delay
= 2;
1264 if (delay
< 0 || delay
> 7) {
1265 dev_err(dev
, "Invalid rx-delay specified\n");
1269 pdata
->rx_delay
= delay
;
1274 static int xgene_enet_get_irqs(struct xgene_enet_pdata
*pdata
)
1276 struct platform_device
*pdev
= pdata
->pdev
;
1277 struct device
*dev
= &pdev
->dev
;
1278 int i
, ret
, max_irqs
;
1280 if (pdata
->phy_mode
== PHY_INTERFACE_MODE_RGMII
)
1282 else if (pdata
->phy_mode
== PHY_INTERFACE_MODE_SGMII
)
1285 max_irqs
= XGENE_MAX_ENET_IRQ
;
1287 for (i
= 0; i
< max_irqs
; i
++) {
1288 ret
= platform_get_irq(pdev
, i
);
1290 if (pdata
->phy_mode
== PHY_INTERFACE_MODE_XGMII
) {
1292 pdata
->rxq_cnt
= max_irqs
/ 2;
1293 pdata
->txq_cnt
= max_irqs
/ 2;
1294 pdata
->cq_cnt
= max_irqs
/ 2;
1297 dev_err(dev
, "Unable to get ENET IRQ\n");
1298 ret
= ret
? : -ENXIO
;
1301 pdata
->irqs
[i
] = ret
;
1307 static int xgene_enet_check_phy_handle(struct xgene_enet_pdata
*pdata
)
1311 if (pdata
->phy_mode
== PHY_INTERFACE_MODE_XGMII
)
1314 if (!IS_ENABLED(CONFIG_MDIO_XGENE
))
1317 ret
= xgene_enet_phy_connect(pdata
->ndev
);
1319 pdata
->mdio_driver
= true;
1324 static void xgene_enet_gpiod_get(struct xgene_enet_pdata
*pdata
)
1326 struct device
*dev
= &pdata
->pdev
->dev
;
1328 if (pdata
->phy_mode
!= PHY_INTERFACE_MODE_XGMII
)
1331 pdata
->sfp_rdy
= gpiod_get(dev
, "rxlos", GPIOD_IN
);
1332 if (IS_ERR(pdata
->sfp_rdy
))
1333 pdata
->sfp_rdy
= gpiod_get(dev
, "sfp", GPIOD_IN
);
1336 static int xgene_enet_get_resources(struct xgene_enet_pdata
*pdata
)
1338 struct platform_device
*pdev
;
1339 struct net_device
*ndev
;
1341 struct resource
*res
;
1342 void __iomem
*base_addr
;
1350 res
= platform_get_resource(pdev
, IORESOURCE_MEM
, RES_ENET_CSR
);
1352 dev_err(dev
, "Resource enet_csr not defined\n");
1355 pdata
->base_addr
= devm_ioremap(dev
, res
->start
, resource_size(res
));
1356 if (!pdata
->base_addr
) {
1357 dev_err(dev
, "Unable to retrieve ENET Port CSR region\n");
1361 res
= platform_get_resource(pdev
, IORESOURCE_MEM
, RES_RING_CSR
);
1363 dev_err(dev
, "Resource ring_csr not defined\n");
1366 pdata
->ring_csr_addr
= devm_ioremap(dev
, res
->start
,
1367 resource_size(res
));
1368 if (!pdata
->ring_csr_addr
) {
1369 dev_err(dev
, "Unable to retrieve ENET Ring CSR region\n");
1373 res
= platform_get_resource(pdev
, IORESOURCE_MEM
, RES_RING_CMD
);
1375 dev_err(dev
, "Resource ring_cmd not defined\n");
1378 pdata
->ring_cmd_addr
= devm_ioremap(dev
, res
->start
,
1379 resource_size(res
));
1380 if (!pdata
->ring_cmd_addr
) {
1381 dev_err(dev
, "Unable to retrieve ENET Ring command region\n");
1386 xgene_get_port_id_dt(dev
, pdata
);
1389 xgene_get_port_id_acpi(dev
, pdata
);
1392 if (!device_get_mac_address(dev
, ndev
->dev_addr
, ETH_ALEN
))
1393 eth_hw_addr_random(ndev
);
1395 memcpy(ndev
->perm_addr
, ndev
->dev_addr
, ndev
->addr_len
);
1397 pdata
->phy_mode
= device_get_phy_mode(dev
);
1398 if (pdata
->phy_mode
< 0) {
1399 dev_err(dev
, "Unable to get phy-connection-type\n");
1400 return pdata
->phy_mode
;
1402 if (pdata
->phy_mode
!= PHY_INTERFACE_MODE_RGMII
&&
1403 pdata
->phy_mode
!= PHY_INTERFACE_MODE_SGMII
&&
1404 pdata
->phy_mode
!= PHY_INTERFACE_MODE_XGMII
) {
1405 dev_err(dev
, "Incorrect phy-connection-type specified\n");
1409 ret
= xgene_get_tx_delay(pdata
);
1413 ret
= xgene_get_rx_delay(pdata
);
1417 ret
= xgene_enet_get_irqs(pdata
);
1421 ret
= xgene_enet_check_phy_handle(pdata
);
1425 xgene_enet_gpiod_get(pdata
);
1427 pdata
->clk
= devm_clk_get(&pdev
->dev
, NULL
);
1428 if (IS_ERR(pdata
->clk
)) {
1429 /* Firmware may have set up the clock already. */
1430 dev_info(dev
, "clocks have been setup already\n");
1433 if (pdata
->phy_mode
!= PHY_INTERFACE_MODE_XGMII
)
1434 base_addr
= pdata
->base_addr
- (pdata
->port_id
* MAC_OFFSET
);
1436 base_addr
= pdata
->base_addr
;
1437 pdata
->eth_csr_addr
= base_addr
+ BLOCK_ETH_CSR_OFFSET
;
1438 pdata
->cle
.base
= base_addr
+ BLOCK_ETH_CLE_CSR_OFFSET
;
1439 pdata
->eth_ring_if_addr
= base_addr
+ BLOCK_ETH_RING_IF_OFFSET
;
1440 pdata
->eth_diag_csr_addr
= base_addr
+ BLOCK_ETH_DIAG_CSR_OFFSET
;
1441 if (pdata
->phy_mode
== PHY_INTERFACE_MODE_RGMII
||
1442 pdata
->phy_mode
== PHY_INTERFACE_MODE_SGMII
) {
1443 pdata
->mcx_mac_addr
= pdata
->base_addr
+ BLOCK_ETH_MAC_OFFSET
;
1444 offset
= (pdata
->enet_id
== XGENE_ENET1
) ?
1445 BLOCK_ETH_MAC_CSR_OFFSET
:
1446 X2_BLOCK_ETH_MAC_CSR_OFFSET
;
1447 pdata
->mcx_mac_csr_addr
= base_addr
+ offset
;
1449 pdata
->mcx_mac_addr
= base_addr
+ BLOCK_AXG_MAC_OFFSET
;
1450 pdata
->mcx_mac_csr_addr
= base_addr
+ BLOCK_AXG_MAC_CSR_OFFSET
;
1451 pdata
->pcs_addr
= base_addr
+ BLOCK_PCS_OFFSET
;
1453 pdata
->rx_buff_cnt
= NUM_PKT_BUF
;
1458 static int xgene_enet_init_hw(struct xgene_enet_pdata
*pdata
)
1460 struct xgene_enet_cle
*enet_cle
= &pdata
->cle
;
1461 struct net_device
*ndev
= pdata
->ndev
;
1462 struct xgene_enet_desc_ring
*buf_pool
;
1466 ret
= pdata
->port_ops
->reset(pdata
);
1470 ret
= xgene_enet_create_desc_rings(ndev
);
1472 netdev_err(ndev
, "Error in ring configuration\n");
1476 /* setup buffer pool */
1477 for (i
= 0; i
< pdata
->rxq_cnt
; i
++) {
1478 buf_pool
= pdata
->rx_ring
[i
]->buf_pool
;
1479 xgene_enet_init_bufpool(buf_pool
);
1480 ret
= xgene_enet_refill_bufpool(buf_pool
, pdata
->rx_buff_cnt
);
1485 dst_ring_num
= xgene_enet_dst_ring_num(pdata
->rx_ring
[0]);
1486 buf_pool
= pdata
->rx_ring
[0]->buf_pool
;
1487 if (pdata
->phy_mode
== PHY_INTERFACE_MODE_XGMII
) {
1488 /* Initialize and Enable PreClassifier Tree */
1489 enet_cle
->max_nodes
= 512;
1490 enet_cle
->max_dbptrs
= 1024;
1491 enet_cle
->parsers
= 3;
1492 enet_cle
->active_parser
= PARSER_ALL
;
1493 enet_cle
->ptree
.start_node
= 0;
1494 enet_cle
->ptree
.start_dbptr
= 0;
1495 enet_cle
->jump_bytes
= 8;
1496 ret
= pdata
->cle_ops
->cle_init(pdata
);
1498 netdev_err(ndev
, "Preclass Tree init error\n");
1502 pdata
->port_ops
->cle_bypass(pdata
, dst_ring_num
, buf_pool
->id
);
1505 pdata
->phy_speed
= SPEED_UNKNOWN
;
1506 pdata
->mac_ops
->init(pdata
);
1511 xgene_enet_delete_desc_rings(pdata
);
1515 static void xgene_enet_setup_ops(struct xgene_enet_pdata
*pdata
)
1517 switch (pdata
->phy_mode
) {
1518 case PHY_INTERFACE_MODE_RGMII
:
1519 pdata
->mac_ops
= &xgene_gmac_ops
;
1520 pdata
->port_ops
= &xgene_gport_ops
;
1526 case PHY_INTERFACE_MODE_SGMII
:
1527 pdata
->mac_ops
= &xgene_sgmac_ops
;
1528 pdata
->port_ops
= &xgene_sgport_ops
;
1535 pdata
->mac_ops
= &xgene_xgmac_ops
;
1536 pdata
->port_ops
= &xgene_xgport_ops
;
1537 pdata
->cle_ops
= &xgene_cle3in_ops
;
1539 if (!pdata
->rxq_cnt
) {
1540 pdata
->rxq_cnt
= XGENE_NUM_RX_RING
;
1541 pdata
->txq_cnt
= XGENE_NUM_TX_RING
;
1542 pdata
->cq_cnt
= XGENE_NUM_TXC_RING
;
1547 if (pdata
->enet_id
== XGENE_ENET1
) {
1548 switch (pdata
->port_id
) {
1550 if (pdata
->phy_mode
== PHY_INTERFACE_MODE_XGMII
) {
1551 pdata
->cpu_bufnum
= X2_START_CPU_BUFNUM_0
;
1552 pdata
->eth_bufnum
= X2_START_ETH_BUFNUM_0
;
1553 pdata
->bp_bufnum
= X2_START_BP_BUFNUM_0
;
1554 pdata
->ring_num
= START_RING_NUM_0
;
1556 pdata
->cpu_bufnum
= START_CPU_BUFNUM_0
;
1557 pdata
->eth_bufnum
= START_ETH_BUFNUM_0
;
1558 pdata
->bp_bufnum
= START_BP_BUFNUM_0
;
1559 pdata
->ring_num
= START_RING_NUM_0
;
1563 if (pdata
->phy_mode
== PHY_INTERFACE_MODE_XGMII
) {
1564 pdata
->cpu_bufnum
= XG_START_CPU_BUFNUM_1
;
1565 pdata
->eth_bufnum
= XG_START_ETH_BUFNUM_1
;
1566 pdata
->bp_bufnum
= XG_START_BP_BUFNUM_1
;
1567 pdata
->ring_num
= XG_START_RING_NUM_1
;
1569 pdata
->cpu_bufnum
= START_CPU_BUFNUM_1
;
1570 pdata
->eth_bufnum
= START_ETH_BUFNUM_1
;
1571 pdata
->bp_bufnum
= START_BP_BUFNUM_1
;
1572 pdata
->ring_num
= START_RING_NUM_1
;
1578 pdata
->ring_ops
= &xgene_ring1_ops
;
1580 switch (pdata
->port_id
) {
1582 pdata
->cpu_bufnum
= X2_START_CPU_BUFNUM_0
;
1583 pdata
->eth_bufnum
= X2_START_ETH_BUFNUM_0
;
1584 pdata
->bp_bufnum
= X2_START_BP_BUFNUM_0
;
1585 pdata
->ring_num
= X2_START_RING_NUM_0
;
1588 pdata
->cpu_bufnum
= X2_START_CPU_BUFNUM_1
;
1589 pdata
->eth_bufnum
= X2_START_ETH_BUFNUM_1
;
1590 pdata
->bp_bufnum
= X2_START_BP_BUFNUM_1
;
1591 pdata
->ring_num
= X2_START_RING_NUM_1
;
1597 pdata
->ring_ops
= &xgene_ring2_ops
;
1601 static void xgene_enet_napi_add(struct xgene_enet_pdata
*pdata
)
1603 struct napi_struct
*napi
;
1606 for (i
= 0; i
< pdata
->rxq_cnt
; i
++) {
1607 napi
= &pdata
->rx_ring
[i
]->napi
;
1608 netif_napi_add(pdata
->ndev
, napi
, xgene_enet_napi
,
1612 for (i
= 0; i
< pdata
->cq_cnt
; i
++) {
1613 napi
= &pdata
->tx_ring
[i
]->cp_ring
->napi
;
1614 netif_napi_add(pdata
->ndev
, napi
, xgene_enet_napi
,
1619 static int xgene_enet_probe(struct platform_device
*pdev
)
1621 struct net_device
*ndev
;
1622 struct xgene_enet_pdata
*pdata
;
1623 struct device
*dev
= &pdev
->dev
;
1624 void (*link_state
)(struct work_struct
*);
1625 const struct of_device_id
*of_id
;
1628 ndev
= alloc_etherdev_mqs(sizeof(struct xgene_enet_pdata
),
1629 XGENE_NUM_RX_RING
, XGENE_NUM_TX_RING
);
1633 pdata
= netdev_priv(ndev
);
1637 SET_NETDEV_DEV(ndev
, dev
);
1638 platform_set_drvdata(pdev
, pdata
);
1639 ndev
->netdev_ops
= &xgene_ndev_ops
;
1640 xgene_enet_set_ethtool_ops(ndev
);
1641 ndev
->features
|= NETIF_F_IP_CSUM
|
1646 of_id
= of_match_device(xgene_enet_of_match
, &pdev
->dev
);
1648 pdata
->enet_id
= (enum xgene_enet_id
)of_id
->data
;
1652 const struct acpi_device_id
*acpi_id
;
1654 acpi_id
= acpi_match_device(xgene_enet_acpi_match
, &pdev
->dev
);
1656 pdata
->enet_id
= (enum xgene_enet_id
) acpi_id
->driver_data
;
1659 if (!pdata
->enet_id
) {
1664 ret
= xgene_enet_get_resources(pdata
);
1668 xgene_enet_setup_ops(pdata
);
1670 if (pdata
->phy_mode
== PHY_INTERFACE_MODE_XGMII
) {
1671 ndev
->features
|= NETIF_F_TSO
;
1672 pdata
->mss
= XGENE_ENET_MSS
;
1674 ndev
->hw_features
= ndev
->features
;
1676 ret
= dma_coerce_mask_and_coherent(dev
, DMA_BIT_MASK(64));
1678 netdev_err(ndev
, "No usable DMA configuration\n");
1682 ret
= xgene_enet_init_hw(pdata
);
1686 link_state
= pdata
->mac_ops
->link_state
;
1687 if (pdata
->phy_mode
== PHY_INTERFACE_MODE_XGMII
) {
1688 INIT_DELAYED_WORK(&pdata
->link_work
, link_state
);
1689 } else if (!pdata
->mdio_driver
) {
1690 if (pdata
->phy_mode
== PHY_INTERFACE_MODE_RGMII
)
1691 ret
= xgene_enet_mdio_config(pdata
);
1693 INIT_DELAYED_WORK(&pdata
->link_work
, link_state
);
1699 xgene_enet_napi_add(pdata
);
1700 ret
= register_netdev(ndev
);
1702 netdev_err(ndev
, "Failed to register netdev\n");
1710 * If necessary, free_netdev() will call netif_napi_del() and undo
1711 * the effects of xgene_enet_napi_add()'s calls to netif_napi_add().
1714 if (pdata
->mdio_driver
)
1715 xgene_enet_phy_disconnect(pdata
);
1716 else if (pdata
->phy_mode
== PHY_INTERFACE_MODE_RGMII
)
1717 xgene_enet_mdio_remove(pdata
);
1719 xgene_enet_delete_desc_rings(pdata
);
1725 static int xgene_enet_remove(struct platform_device
*pdev
)
1727 struct xgene_enet_pdata
*pdata
;
1728 struct net_device
*ndev
;
1730 pdata
= platform_get_drvdata(pdev
);
1734 if (netif_running(ndev
))
1738 if (pdata
->mdio_driver
)
1739 xgene_enet_phy_disconnect(pdata
);
1740 else if (pdata
->phy_mode
== PHY_INTERFACE_MODE_RGMII
)
1741 xgene_enet_mdio_remove(pdata
);
1743 unregister_netdev(ndev
);
1744 pdata
->port_ops
->shutdown(pdata
);
1745 xgene_enet_delete_desc_rings(pdata
);
1751 static void xgene_enet_shutdown(struct platform_device
*pdev
)
1753 struct xgene_enet_pdata
*pdata
;
1755 pdata
= platform_get_drvdata(pdev
);
1762 xgene_enet_remove(pdev
);
1766 static const struct acpi_device_id xgene_enet_acpi_match
[] = {
1767 { "APMC0D05", XGENE_ENET1
},
1768 { "APMC0D30", XGENE_ENET1
},
1769 { "APMC0D31", XGENE_ENET1
},
1770 { "APMC0D3F", XGENE_ENET1
},
1771 { "APMC0D26", XGENE_ENET2
},
1772 { "APMC0D25", XGENE_ENET2
},
1775 MODULE_DEVICE_TABLE(acpi
, xgene_enet_acpi_match
);
1779 static const struct of_device_id xgene_enet_of_match
[] = {
1780 {.compatible
= "apm,xgene-enet", .data
= (void *)XGENE_ENET1
},
1781 {.compatible
= "apm,xgene1-sgenet", .data
= (void *)XGENE_ENET1
},
1782 {.compatible
= "apm,xgene1-xgenet", .data
= (void *)XGENE_ENET1
},
1783 {.compatible
= "apm,xgene2-sgenet", .data
= (void *)XGENE_ENET2
},
1784 {.compatible
= "apm,xgene2-xgenet", .data
= (void *)XGENE_ENET2
},
1788 MODULE_DEVICE_TABLE(of
, xgene_enet_of_match
);
1791 static struct platform_driver xgene_enet_driver
= {
1793 .name
= "xgene-enet",
1794 .of_match_table
= of_match_ptr(xgene_enet_of_match
),
1795 .acpi_match_table
= ACPI_PTR(xgene_enet_acpi_match
),
1797 .probe
= xgene_enet_probe
,
1798 .remove
= xgene_enet_remove
,
1799 .shutdown
= xgene_enet_shutdown
,
1802 module_platform_driver(xgene_enet_driver
);
1804 MODULE_DESCRIPTION("APM X-Gene SoC Ethernet driver");
1805 MODULE_VERSION(XGENE_DRV_VERSION
);
1806 MODULE_AUTHOR("Iyappan Subramanian <isubramanian@apm.com>");
1807 MODULE_AUTHOR("Keyur Chudgar <kchudgar@apm.com>");
1808 MODULE_LICENSE("GPL");