2 * AMD 10Gb Ethernet driver
4 * This file is available to you under your choice of the following two
9 * Copyright (c) 2014 Advanced Micro Devices, Inc.
11 * This file is free software; you may copy, redistribute and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation, either version 2 of the License, or (at
14 * your option) any later version.
16 * This file is distributed in the hope that it will be useful, but
17 * WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * General Public License for more details.
21 * You should have received a copy of the GNU General Public License
22 * along with this program. If not, see <http://www.gnu.org/licenses/>.
24 * This file incorporates work covered by the following copyright and
26 * The Synopsys DWC ETHER XGMAC Software Driver and documentation
27 * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
28 * Inc. unless otherwise expressly agreed to in writing between Synopsys
31 * The Software IS NOT an item of Licensed Software or Licensed Product
32 * under any End User Software License Agreement or Agreement for Licensed
33 * Product with Synopsys or any supplement thereto. Permission is hereby
34 * granted, free of charge, to any person obtaining a copy of this software
35 * annotated with this license and the Software, to deal in the Software
36 * without restriction, including without limitation the rights to use,
37 * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
38 * of the Software, and to permit persons to whom the Software is furnished
39 * to do so, subject to the following conditions:
41 * The above copyright notice and this permission notice shall be included
42 * in all copies or substantial portions of the Software.
44 * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
45 * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
46 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
47 * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
48 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
49 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
50 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
51 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
52 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
53 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
54 * THE POSSIBILITY OF SUCH DAMAGE.
57 * License 2: Modified BSD
59 * Copyright (c) 2014 Advanced Micro Devices, Inc.
60 * All rights reserved.
62 * Redistribution and use in source and binary forms, with or without
63 * modification, are permitted provided that the following conditions are met:
64 * * Redistributions of source code must retain the above copyright
65 * notice, this list of conditions and the following disclaimer.
66 * * Redistributions in binary form must reproduce the above copyright
67 * notice, this list of conditions and the following disclaimer in the
68 * documentation and/or other materials provided with the distribution.
69 * * Neither the name of Advanced Micro Devices, Inc. nor the
70 * names of its contributors may be used to endorse or promote products
71 * derived from this software without specific prior written permission.
73 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
74 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
75 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
76 * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
77 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
78 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
79 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
80 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
81 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
82 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
84 * This file incorporates work covered by the following copyright and
86 * The Synopsys DWC ETHER XGMAC Software Driver and documentation
87 * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
88 * Inc. unless otherwise expressly agreed to in writing between Synopsys
91 * The Software IS NOT an item of Licensed Software or Licensed Product
92 * under any End User Software License Agreement or Agreement for Licensed
93 * Product with Synopsys or any supplement thereto. Permission is hereby
94 * granted, free of charge, to any person obtaining a copy of this software
95 * annotated with this license and the Software, to deal in the Software
96 * without restriction, including without limitation the rights to use,
97 * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
98 * of the Software, and to permit persons to whom the Software is furnished
99 * to do so, subject to the following conditions:
101 * The above copyright notice and this permission notice shall be included
102 * in all copies or substantial portions of the Software.
104 * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
105 * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
106 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
107 * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
108 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
109 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
110 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
111 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
112 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
113 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
114 * THE POSSIBILITY OF SUCH DAMAGE.
117 #include <linux/platform_device.h>
118 #include <linux/spinlock.h>
119 #include <linux/tcp.h>
120 #include <linux/if_vlan.h>
121 #include <net/busy_poll.h>
122 #include <linux/clk.h>
123 #include <linux/if_ether.h>
124 #include <linux/net_tstamp.h>
125 #include <linux/phy.h>
128 #include "xgbe-common.h"
130 static int xgbe_one_poll(struct napi_struct
*, int);
131 static int xgbe_all_poll(struct napi_struct
*, int);
132 static void xgbe_set_rx_mode(struct net_device
*);
134 static int xgbe_alloc_channels(struct xgbe_prv_data
*pdata
)
136 struct xgbe_channel
*channel_mem
, *channel
;
137 struct xgbe_ring
*tx_ring
, *rx_ring
;
138 unsigned int count
, i
;
141 count
= max_t(unsigned int, pdata
->tx_ring_count
, pdata
->rx_ring_count
);
143 channel_mem
= kcalloc(count
, sizeof(struct xgbe_channel
), GFP_KERNEL
);
147 tx_ring
= kcalloc(pdata
->tx_ring_count
, sizeof(struct xgbe_ring
),
152 rx_ring
= kcalloc(pdata
->rx_ring_count
, sizeof(struct xgbe_ring
),
157 for (i
= 0, channel
= channel_mem
; i
< count
; i
++, channel
++) {
158 snprintf(channel
->name
, sizeof(channel
->name
), "channel-%d", i
);
159 channel
->pdata
= pdata
;
160 channel
->queue_index
= i
;
161 channel
->dma_regs
= pdata
->xgmac_regs
+ DMA_CH_BASE
+
164 if (pdata
->per_channel_irq
) {
165 /* Get the DMA interrupt (offset 1) */
166 ret
= platform_get_irq(pdata
->pdev
, i
+ 1);
168 netdev_err(pdata
->netdev
,
169 "platform_get_irq %u failed\n",
174 channel
->dma_irq
= ret
;
177 if (i
< pdata
->tx_ring_count
) {
178 spin_lock_init(&tx_ring
->lock
);
179 channel
->tx_ring
= tx_ring
++;
182 if (i
< pdata
->rx_ring_count
) {
183 spin_lock_init(&rx_ring
->lock
);
184 channel
->rx_ring
= rx_ring
++;
187 DBGPR(" %s: queue=%u, dma_regs=%p, dma_irq=%d, tx=%p, rx=%p\n",
188 channel
->name
, channel
->queue_index
, channel
->dma_regs
,
189 channel
->dma_irq
, channel
->tx_ring
, channel
->rx_ring
);
192 pdata
->channel
= channel_mem
;
193 pdata
->channel_count
= count
;
210 static void xgbe_free_channels(struct xgbe_prv_data
*pdata
)
215 kfree(pdata
->channel
->rx_ring
);
216 kfree(pdata
->channel
->tx_ring
);
217 kfree(pdata
->channel
);
219 pdata
->channel
= NULL
;
220 pdata
->channel_count
= 0;
223 static inline unsigned int xgbe_tx_avail_desc(struct xgbe_ring
*ring
)
225 return (ring
->rdesc_count
- (ring
->cur
- ring
->dirty
));
228 static inline unsigned int xgbe_rx_dirty_desc(struct xgbe_ring
*ring
)
230 return (ring
->cur
- ring
->dirty
);
233 static int xgbe_maybe_stop_tx_queue(struct xgbe_channel
*channel
,
234 struct xgbe_ring
*ring
, unsigned int count
)
236 struct xgbe_prv_data
*pdata
= channel
->pdata
;
238 if (count
> xgbe_tx_avail_desc(ring
)) {
239 DBGPR(" Tx queue stopped, not enough descriptors available\n");
240 netif_stop_subqueue(pdata
->netdev
, channel
->queue_index
);
241 ring
->tx
.queue_stopped
= 1;
243 /* If we haven't notified the hardware because of xmit_more
244 * support, tell it now
246 if (ring
->tx
.xmit_more
)
247 pdata
->hw_if
.tx_start_xmit(channel
, ring
);
249 return NETDEV_TX_BUSY
;
255 static int xgbe_calc_rx_buf_size(struct net_device
*netdev
, unsigned int mtu
)
257 unsigned int rx_buf_size
;
259 if (mtu
> XGMAC_JUMBO_PACKET_MTU
) {
260 netdev_alert(netdev
, "MTU exceeds maximum supported value\n");
264 rx_buf_size
= mtu
+ ETH_HLEN
+ ETH_FCS_LEN
+ VLAN_HLEN
;
265 rx_buf_size
= clamp_val(rx_buf_size
, XGBE_RX_MIN_BUF_SIZE
, PAGE_SIZE
);
267 rx_buf_size
= (rx_buf_size
+ XGBE_RX_BUF_ALIGN
- 1) &
268 ~(XGBE_RX_BUF_ALIGN
- 1);
273 static void xgbe_enable_rx_tx_ints(struct xgbe_prv_data
*pdata
)
275 struct xgbe_hw_if
*hw_if
= &pdata
->hw_if
;
276 struct xgbe_channel
*channel
;
277 enum xgbe_int int_id
;
280 channel
= pdata
->channel
;
281 for (i
= 0; i
< pdata
->channel_count
; i
++, channel
++) {
282 if (channel
->tx_ring
&& channel
->rx_ring
)
283 int_id
= XGMAC_INT_DMA_CH_SR_TI_RI
;
284 else if (channel
->tx_ring
)
285 int_id
= XGMAC_INT_DMA_CH_SR_TI
;
286 else if (channel
->rx_ring
)
287 int_id
= XGMAC_INT_DMA_CH_SR_RI
;
291 hw_if
->enable_int(channel
, int_id
);
295 static void xgbe_disable_rx_tx_ints(struct xgbe_prv_data
*pdata
)
297 struct xgbe_hw_if
*hw_if
= &pdata
->hw_if
;
298 struct xgbe_channel
*channel
;
299 enum xgbe_int int_id
;
302 channel
= pdata
->channel
;
303 for (i
= 0; i
< pdata
->channel_count
; i
++, channel
++) {
304 if (channel
->tx_ring
&& channel
->rx_ring
)
305 int_id
= XGMAC_INT_DMA_CH_SR_TI_RI
;
306 else if (channel
->tx_ring
)
307 int_id
= XGMAC_INT_DMA_CH_SR_TI
;
308 else if (channel
->rx_ring
)
309 int_id
= XGMAC_INT_DMA_CH_SR_RI
;
313 hw_if
->disable_int(channel
, int_id
);
317 static irqreturn_t
xgbe_isr(int irq
, void *data
)
319 struct xgbe_prv_data
*pdata
= data
;
320 struct xgbe_hw_if
*hw_if
= &pdata
->hw_if
;
321 struct xgbe_channel
*channel
;
322 unsigned int dma_isr
, dma_ch_isr
;
323 unsigned int mac_isr
, mac_tssr
;
326 /* The DMA interrupt status register also reports MAC and MTL
327 * interrupts. So for polling mode, we just need to check for
328 * this register to be non-zero
330 dma_isr
= XGMAC_IOREAD(pdata
, DMA_ISR
);
334 DBGPR(" DMA_ISR = %08x\n", dma_isr
);
336 for (i
= 0; i
< pdata
->channel_count
; i
++) {
337 if (!(dma_isr
& (1 << i
)))
340 channel
= pdata
->channel
+ i
;
342 dma_ch_isr
= XGMAC_DMA_IOREAD(channel
, DMA_CH_SR
);
343 DBGPR(" DMA_CH%u_ISR = %08x\n", i
, dma_ch_isr
);
345 /* The TI or RI interrupt bits may still be set even if using
346 * per channel DMA interrupts. Check to be sure those are not
347 * enabled before using the private data napi structure.
349 if (!pdata
->per_channel_irq
&&
350 (XGMAC_GET_BITS(dma_ch_isr
, DMA_CH_SR
, TI
) ||
351 XGMAC_GET_BITS(dma_ch_isr
, DMA_CH_SR
, RI
))) {
352 if (napi_schedule_prep(&pdata
->napi
)) {
353 /* Disable Tx and Rx interrupts */
354 xgbe_disable_rx_tx_ints(pdata
);
356 /* Turn on polling */
357 __napi_schedule(&pdata
->napi
);
361 /* Restart the device on a Fatal Bus Error */
362 if (XGMAC_GET_BITS(dma_ch_isr
, DMA_CH_SR
, FBE
))
363 schedule_work(&pdata
->restart_work
);
365 /* Clear all interrupt signals */
366 XGMAC_DMA_IOWRITE(channel
, DMA_CH_SR
, dma_ch_isr
);
369 if (XGMAC_GET_BITS(dma_isr
, DMA_ISR
, MACIS
)) {
370 mac_isr
= XGMAC_IOREAD(pdata
, MAC_ISR
);
372 if (XGMAC_GET_BITS(mac_isr
, MAC_ISR
, MMCTXIS
))
373 hw_if
->tx_mmc_int(pdata
);
375 if (XGMAC_GET_BITS(mac_isr
, MAC_ISR
, MMCRXIS
))
376 hw_if
->rx_mmc_int(pdata
);
378 if (XGMAC_GET_BITS(mac_isr
, MAC_ISR
, TSIS
)) {
379 mac_tssr
= XGMAC_IOREAD(pdata
, MAC_TSSR
);
381 if (XGMAC_GET_BITS(mac_tssr
, MAC_TSSR
, TXTSC
)) {
382 /* Read Tx Timestamp to clear interrupt */
384 hw_if
->get_tx_tstamp(pdata
);
385 schedule_work(&pdata
->tx_tstamp_work
);
390 DBGPR(" DMA_ISR = %08x\n", XGMAC_IOREAD(pdata
, DMA_ISR
));
396 static irqreturn_t
xgbe_dma_isr(int irq
, void *data
)
398 struct xgbe_channel
*channel
= data
;
400 /* Per channel DMA interrupts are enabled, so we use the per
401 * channel napi structure and not the private data napi structure
403 if (napi_schedule_prep(&channel
->napi
)) {
404 /* Disable Tx and Rx interrupts */
405 disable_irq_nosync(channel
->dma_irq
);
407 /* Turn on polling */
408 __napi_schedule(&channel
->napi
);
414 static enum hrtimer_restart
xgbe_tx_timer(struct hrtimer
*timer
)
416 struct xgbe_channel
*channel
= container_of(timer
,
419 struct xgbe_prv_data
*pdata
= channel
->pdata
;
420 struct napi_struct
*napi
;
422 DBGPR("-->xgbe_tx_timer\n");
424 napi
= (pdata
->per_channel_irq
) ? &channel
->napi
: &pdata
->napi
;
426 if (napi_schedule_prep(napi
)) {
427 /* Disable Tx and Rx interrupts */
428 if (pdata
->per_channel_irq
)
429 disable_irq(channel
->dma_irq
);
431 xgbe_disable_rx_tx_ints(pdata
);
433 /* Turn on polling */
434 __napi_schedule(napi
);
437 channel
->tx_timer_active
= 0;
439 DBGPR("<--xgbe_tx_timer\n");
441 return HRTIMER_NORESTART
;
444 static void xgbe_init_tx_timers(struct xgbe_prv_data
*pdata
)
446 struct xgbe_channel
*channel
;
449 DBGPR("-->xgbe_init_tx_timers\n");
451 channel
= pdata
->channel
;
452 for (i
= 0; i
< pdata
->channel_count
; i
++, channel
++) {
453 if (!channel
->tx_ring
)
456 DBGPR(" %s adding tx timer\n", channel
->name
);
457 hrtimer_init(&channel
->tx_timer
, CLOCK_MONOTONIC
,
459 channel
->tx_timer
.function
= xgbe_tx_timer
;
462 DBGPR("<--xgbe_init_tx_timers\n");
465 static void xgbe_stop_tx_timers(struct xgbe_prv_data
*pdata
)
467 struct xgbe_channel
*channel
;
470 DBGPR("-->xgbe_stop_tx_timers\n");
472 channel
= pdata
->channel
;
473 for (i
= 0; i
< pdata
->channel_count
; i
++, channel
++) {
474 if (!channel
->tx_ring
)
477 DBGPR(" %s deleting tx timer\n", channel
->name
);
478 channel
->tx_timer_active
= 0;
479 hrtimer_cancel(&channel
->tx_timer
);
482 DBGPR("<--xgbe_stop_tx_timers\n");
485 void xgbe_get_all_hw_features(struct xgbe_prv_data
*pdata
)
487 unsigned int mac_hfr0
, mac_hfr1
, mac_hfr2
;
488 struct xgbe_hw_features
*hw_feat
= &pdata
->hw_feat
;
490 DBGPR("-->xgbe_get_all_hw_features\n");
492 mac_hfr0
= XGMAC_IOREAD(pdata
, MAC_HWF0R
);
493 mac_hfr1
= XGMAC_IOREAD(pdata
, MAC_HWF1R
);
494 mac_hfr2
= XGMAC_IOREAD(pdata
, MAC_HWF2R
);
496 memset(hw_feat
, 0, sizeof(*hw_feat
));
498 hw_feat
->version
= XGMAC_IOREAD(pdata
, MAC_VR
);
500 /* Hardware feature register 0 */
501 hw_feat
->gmii
= XGMAC_GET_BITS(mac_hfr0
, MAC_HWF0R
, GMIISEL
);
502 hw_feat
->vlhash
= XGMAC_GET_BITS(mac_hfr0
, MAC_HWF0R
, VLHASH
);
503 hw_feat
->sma
= XGMAC_GET_BITS(mac_hfr0
, MAC_HWF0R
, SMASEL
);
504 hw_feat
->rwk
= XGMAC_GET_BITS(mac_hfr0
, MAC_HWF0R
, RWKSEL
);
505 hw_feat
->mgk
= XGMAC_GET_BITS(mac_hfr0
, MAC_HWF0R
, MGKSEL
);
506 hw_feat
->mmc
= XGMAC_GET_BITS(mac_hfr0
, MAC_HWF0R
, MMCSEL
);
507 hw_feat
->aoe
= XGMAC_GET_BITS(mac_hfr0
, MAC_HWF0R
, ARPOFFSEL
);
508 hw_feat
->ts
= XGMAC_GET_BITS(mac_hfr0
, MAC_HWF0R
, TSSEL
);
509 hw_feat
->eee
= XGMAC_GET_BITS(mac_hfr0
, MAC_HWF0R
, EEESEL
);
510 hw_feat
->tx_coe
= XGMAC_GET_BITS(mac_hfr0
, MAC_HWF0R
, TXCOESEL
);
511 hw_feat
->rx_coe
= XGMAC_GET_BITS(mac_hfr0
, MAC_HWF0R
, RXCOESEL
);
512 hw_feat
->addn_mac
= XGMAC_GET_BITS(mac_hfr0
, MAC_HWF0R
,
514 hw_feat
->ts_src
= XGMAC_GET_BITS(mac_hfr0
, MAC_HWF0R
, TSSTSSEL
);
515 hw_feat
->sa_vlan_ins
= XGMAC_GET_BITS(mac_hfr0
, MAC_HWF0R
, SAVLANINS
);
517 /* Hardware feature register 1 */
518 hw_feat
->rx_fifo_size
= XGMAC_GET_BITS(mac_hfr1
, MAC_HWF1R
,
520 hw_feat
->tx_fifo_size
= XGMAC_GET_BITS(mac_hfr1
, MAC_HWF1R
,
522 hw_feat
->dcb
= XGMAC_GET_BITS(mac_hfr1
, MAC_HWF1R
, DCBEN
);
523 hw_feat
->sph
= XGMAC_GET_BITS(mac_hfr1
, MAC_HWF1R
, SPHEN
);
524 hw_feat
->tso
= XGMAC_GET_BITS(mac_hfr1
, MAC_HWF1R
, TSOEN
);
525 hw_feat
->dma_debug
= XGMAC_GET_BITS(mac_hfr1
, MAC_HWF1R
, DBGMEMA
);
526 hw_feat
->rss
= XGMAC_GET_BITS(mac_hfr1
, MAC_HWF1R
, RSSEN
);
527 hw_feat
->tc_cnt
= XGMAC_GET_BITS(mac_hfr1
, MAC_HWF1R
, NUMTC
);
528 hw_feat
->hash_table_size
= XGMAC_GET_BITS(mac_hfr1
, MAC_HWF1R
,
530 hw_feat
->l3l4_filter_num
= XGMAC_GET_BITS(mac_hfr1
, MAC_HWF1R
,
533 /* Hardware feature register 2 */
534 hw_feat
->rx_q_cnt
= XGMAC_GET_BITS(mac_hfr2
, MAC_HWF2R
, RXQCNT
);
535 hw_feat
->tx_q_cnt
= XGMAC_GET_BITS(mac_hfr2
, MAC_HWF2R
, TXQCNT
);
536 hw_feat
->rx_ch_cnt
= XGMAC_GET_BITS(mac_hfr2
, MAC_HWF2R
, RXCHCNT
);
537 hw_feat
->tx_ch_cnt
= XGMAC_GET_BITS(mac_hfr2
, MAC_HWF2R
, TXCHCNT
);
538 hw_feat
->pps_out_num
= XGMAC_GET_BITS(mac_hfr2
, MAC_HWF2R
, PPSOUTNUM
);
539 hw_feat
->aux_snap_num
= XGMAC_GET_BITS(mac_hfr2
, MAC_HWF2R
, AUXSNAPNUM
);
541 /* Translate the Hash Table size into actual number */
542 switch (hw_feat
->hash_table_size
) {
546 hw_feat
->hash_table_size
= 64;
549 hw_feat
->hash_table_size
= 128;
552 hw_feat
->hash_table_size
= 256;
556 /* The Queue, Channel and TC counts are zero based so increment them
557 * to get the actual number
561 hw_feat
->rx_ch_cnt
++;
562 hw_feat
->tx_ch_cnt
++;
565 DBGPR("<--xgbe_get_all_hw_features\n");
568 static void xgbe_napi_enable(struct xgbe_prv_data
*pdata
, unsigned int add
)
570 struct xgbe_channel
*channel
;
573 if (pdata
->per_channel_irq
) {
574 channel
= pdata
->channel
;
575 for (i
= 0; i
< pdata
->channel_count
; i
++, channel
++) {
577 netif_napi_add(pdata
->netdev
, &channel
->napi
,
578 xgbe_one_poll
, NAPI_POLL_WEIGHT
);
580 napi_enable(&channel
->napi
);
584 netif_napi_add(pdata
->netdev
, &pdata
->napi
,
585 xgbe_all_poll
, NAPI_POLL_WEIGHT
);
587 napi_enable(&pdata
->napi
);
591 static void xgbe_napi_disable(struct xgbe_prv_data
*pdata
, unsigned int del
)
593 struct xgbe_channel
*channel
;
596 if (pdata
->per_channel_irq
) {
597 channel
= pdata
->channel
;
598 for (i
= 0; i
< pdata
->channel_count
; i
++, channel
++) {
599 napi_disable(&channel
->napi
);
602 netif_napi_del(&channel
->napi
);
605 napi_disable(&pdata
->napi
);
608 netif_napi_del(&pdata
->napi
);
612 static int xgbe_request_irqs(struct xgbe_prv_data
*pdata
)
614 struct xgbe_channel
*channel
;
615 struct net_device
*netdev
= pdata
->netdev
;
619 ret
= devm_request_irq(pdata
->dev
, pdata
->dev_irq
, xgbe_isr
, 0,
620 netdev
->name
, pdata
);
622 netdev_alert(netdev
, "error requesting irq %d\n",
627 if (!pdata
->per_channel_irq
)
630 channel
= pdata
->channel
;
631 for (i
= 0; i
< pdata
->channel_count
; i
++, channel
++) {
632 snprintf(channel
->dma_irq_name
,
633 sizeof(channel
->dma_irq_name
) - 1,
634 "%s-TxRx-%u", netdev_name(netdev
),
635 channel
->queue_index
);
637 ret
= devm_request_irq(pdata
->dev
, channel
->dma_irq
,
639 channel
->dma_irq_name
, channel
);
641 netdev_alert(netdev
, "error requesting irq %d\n",
650 /* Using an unsigned int, 'i' will go to UINT_MAX and exit */
651 for (i
--, channel
--; i
< pdata
->channel_count
; i
--, channel
--)
652 devm_free_irq(pdata
->dev
, channel
->dma_irq
, channel
);
654 devm_free_irq(pdata
->dev
, pdata
->dev_irq
, pdata
);
659 static void xgbe_free_irqs(struct xgbe_prv_data
*pdata
)
661 struct xgbe_channel
*channel
;
664 devm_free_irq(pdata
->dev
, pdata
->dev_irq
, pdata
);
666 if (!pdata
->per_channel_irq
)
669 channel
= pdata
->channel
;
670 for (i
= 0; i
< pdata
->channel_count
; i
++, channel
++)
671 devm_free_irq(pdata
->dev
, channel
->dma_irq
, channel
);
674 void xgbe_init_tx_coalesce(struct xgbe_prv_data
*pdata
)
676 struct xgbe_hw_if
*hw_if
= &pdata
->hw_if
;
678 DBGPR("-->xgbe_init_tx_coalesce\n");
680 pdata
->tx_usecs
= XGMAC_INIT_DMA_TX_USECS
;
681 pdata
->tx_frames
= XGMAC_INIT_DMA_TX_FRAMES
;
683 hw_if
->config_tx_coalesce(pdata
);
685 DBGPR("<--xgbe_init_tx_coalesce\n");
688 void xgbe_init_rx_coalesce(struct xgbe_prv_data
*pdata
)
690 struct xgbe_hw_if
*hw_if
= &pdata
->hw_if
;
692 DBGPR("-->xgbe_init_rx_coalesce\n");
694 pdata
->rx_riwt
= hw_if
->usec_to_riwt(pdata
, XGMAC_INIT_DMA_RX_USECS
);
695 pdata
->rx_frames
= XGMAC_INIT_DMA_RX_FRAMES
;
697 hw_if
->config_rx_coalesce(pdata
);
699 DBGPR("<--xgbe_init_rx_coalesce\n");
702 static void xgbe_free_tx_data(struct xgbe_prv_data
*pdata
)
704 struct xgbe_desc_if
*desc_if
= &pdata
->desc_if
;
705 struct xgbe_channel
*channel
;
706 struct xgbe_ring
*ring
;
707 struct xgbe_ring_data
*rdata
;
710 DBGPR("-->xgbe_free_tx_data\n");
712 channel
= pdata
->channel
;
713 for (i
= 0; i
< pdata
->channel_count
; i
++, channel
++) {
714 ring
= channel
->tx_ring
;
718 for (j
= 0; j
< ring
->rdesc_count
; j
++) {
719 rdata
= XGBE_GET_DESC_DATA(ring
, j
);
720 desc_if
->unmap_rdata(pdata
, rdata
);
724 DBGPR("<--xgbe_free_tx_data\n");
727 static void xgbe_free_rx_data(struct xgbe_prv_data
*pdata
)
729 struct xgbe_desc_if
*desc_if
= &pdata
->desc_if
;
730 struct xgbe_channel
*channel
;
731 struct xgbe_ring
*ring
;
732 struct xgbe_ring_data
*rdata
;
735 DBGPR("-->xgbe_free_rx_data\n");
737 channel
= pdata
->channel
;
738 for (i
= 0; i
< pdata
->channel_count
; i
++, channel
++) {
739 ring
= channel
->rx_ring
;
743 for (j
= 0; j
< ring
->rdesc_count
; j
++) {
744 rdata
= XGBE_GET_DESC_DATA(ring
, j
);
745 desc_if
->unmap_rdata(pdata
, rdata
);
749 DBGPR("<--xgbe_free_rx_data\n");
752 static void xgbe_adjust_link(struct net_device
*netdev
)
754 struct xgbe_prv_data
*pdata
= netdev_priv(netdev
);
755 struct xgbe_hw_if
*hw_if
= &pdata
->hw_if
;
756 struct phy_device
*phydev
= pdata
->phydev
;
763 /* Flow control support */
764 if (pdata
->pause_autoneg
) {
765 if (phydev
->pause
|| phydev
->asym_pause
) {
774 if (pdata
->tx_pause
!= pdata
->phy_tx_pause
) {
775 hw_if
->config_tx_flow_control(pdata
);
776 pdata
->phy_tx_pause
= pdata
->tx_pause
;
779 if (pdata
->rx_pause
!= pdata
->phy_rx_pause
) {
780 hw_if
->config_rx_flow_control(pdata
);
781 pdata
->phy_rx_pause
= pdata
->rx_pause
;
785 if (phydev
->speed
!= pdata
->phy_speed
) {
788 switch (phydev
->speed
) {
790 hw_if
->set_xgmii_speed(pdata
);
794 hw_if
->set_gmii_2500_speed(pdata
);
798 hw_if
->set_gmii_speed(pdata
);
801 pdata
->phy_speed
= phydev
->speed
;
804 if (phydev
->link
!= pdata
->phy_link
) {
808 } else if (pdata
->phy_link
) {
811 pdata
->phy_speed
= SPEED_UNKNOWN
;
815 phy_print_status(phydev
);
818 static int xgbe_phy_init(struct xgbe_prv_data
*pdata
)
820 struct net_device
*netdev
= pdata
->netdev
;
821 struct phy_device
*phydev
= pdata
->phydev
;
824 pdata
->phy_link
= -1;
825 pdata
->phy_speed
= SPEED_UNKNOWN
;
826 pdata
->phy_tx_pause
= pdata
->tx_pause
;
827 pdata
->phy_rx_pause
= pdata
->rx_pause
;
829 ret
= phy_connect_direct(netdev
, phydev
, &xgbe_adjust_link
,
832 netdev_err(netdev
, "phy_connect_direct failed\n");
836 if (!phydev
->drv
|| (phydev
->drv
->phy_id
== 0)) {
837 netdev_err(netdev
, "phy_id not valid\n");
839 goto err_phy_connect
;
841 DBGPR(" phy_connect_direct succeeded for PHY %s, link=%d\n",
842 dev_name(&phydev
->dev
), phydev
->link
);
847 phy_disconnect(phydev
);
852 static void xgbe_phy_exit(struct xgbe_prv_data
*pdata
)
857 phy_disconnect(pdata
->phydev
);
860 int xgbe_powerdown(struct net_device
*netdev
, unsigned int caller
)
862 struct xgbe_prv_data
*pdata
= netdev_priv(netdev
);
863 struct xgbe_hw_if
*hw_if
= &pdata
->hw_if
;
866 DBGPR("-->xgbe_powerdown\n");
868 if (!netif_running(netdev
) ||
869 (caller
== XGMAC_IOCTL_CONTEXT
&& pdata
->power_down
)) {
870 netdev_alert(netdev
, "Device is already powered down\n");
871 DBGPR("<--xgbe_powerdown\n");
875 spin_lock_irqsave(&pdata
->lock
, flags
);
877 if (caller
== XGMAC_DRIVER_CONTEXT
)
878 netif_device_detach(netdev
);
880 netif_tx_stop_all_queues(netdev
);
882 hw_if
->powerdown_tx(pdata
);
883 hw_if
->powerdown_rx(pdata
);
885 xgbe_napi_disable(pdata
, 0);
887 phy_stop(pdata
->phydev
);
889 pdata
->power_down
= 1;
891 spin_unlock_irqrestore(&pdata
->lock
, flags
);
893 DBGPR("<--xgbe_powerdown\n");
898 int xgbe_powerup(struct net_device
*netdev
, unsigned int caller
)
900 struct xgbe_prv_data
*pdata
= netdev_priv(netdev
);
901 struct xgbe_hw_if
*hw_if
= &pdata
->hw_if
;
904 DBGPR("-->xgbe_powerup\n");
906 if (!netif_running(netdev
) ||
907 (caller
== XGMAC_IOCTL_CONTEXT
&& !pdata
->power_down
)) {
908 netdev_alert(netdev
, "Device is already powered up\n");
909 DBGPR("<--xgbe_powerup\n");
913 spin_lock_irqsave(&pdata
->lock
, flags
);
915 pdata
->power_down
= 0;
917 phy_start(pdata
->phydev
);
919 xgbe_napi_enable(pdata
, 0);
921 hw_if
->powerup_tx(pdata
);
922 hw_if
->powerup_rx(pdata
);
924 if (caller
== XGMAC_DRIVER_CONTEXT
)
925 netif_device_attach(netdev
);
927 netif_tx_start_all_queues(netdev
);
929 spin_unlock_irqrestore(&pdata
->lock
, flags
);
931 DBGPR("<--xgbe_powerup\n");
936 static int xgbe_start(struct xgbe_prv_data
*pdata
)
938 struct xgbe_hw_if
*hw_if
= &pdata
->hw_if
;
939 struct net_device
*netdev
= pdata
->netdev
;
942 DBGPR("-->xgbe_start\n");
944 xgbe_set_rx_mode(netdev
);
948 phy_start(pdata
->phydev
);
950 xgbe_napi_enable(pdata
, 1);
952 ret
= xgbe_request_irqs(pdata
);
956 hw_if
->enable_tx(pdata
);
957 hw_if
->enable_rx(pdata
);
959 xgbe_init_tx_timers(pdata
);
961 netif_tx_start_all_queues(netdev
);
963 DBGPR("<--xgbe_start\n");
968 xgbe_napi_disable(pdata
, 1);
970 phy_stop(pdata
->phydev
);
977 static void xgbe_stop(struct xgbe_prv_data
*pdata
)
979 struct xgbe_hw_if
*hw_if
= &pdata
->hw_if
;
980 struct xgbe_channel
*channel
;
981 struct net_device
*netdev
= pdata
->netdev
;
982 struct netdev_queue
*txq
;
985 DBGPR("-->xgbe_stop\n");
987 netif_tx_stop_all_queues(netdev
);
989 xgbe_stop_tx_timers(pdata
);
991 hw_if
->disable_tx(pdata
);
992 hw_if
->disable_rx(pdata
);
994 xgbe_free_irqs(pdata
);
996 xgbe_napi_disable(pdata
, 1);
998 phy_stop(pdata
->phydev
);
1002 channel
= pdata
->channel
;
1003 for (i
= 0; i
< pdata
->channel_count
; i
++, channel
++) {
1004 if (!channel
->tx_ring
)
1007 txq
= netdev_get_tx_queue(netdev
, channel
->queue_index
);
1008 netdev_tx_reset_queue(txq
);
1011 DBGPR("<--xgbe_stop\n");
1014 static void xgbe_restart_dev(struct xgbe_prv_data
*pdata
)
1016 DBGPR("-->xgbe_restart_dev\n");
1018 /* If not running, "restart" will happen on open */
1019 if (!netif_running(pdata
->netdev
))
1024 xgbe_free_tx_data(pdata
);
1025 xgbe_free_rx_data(pdata
);
1029 DBGPR("<--xgbe_restart_dev\n");
1032 static void xgbe_restart(struct work_struct
*work
)
1034 struct xgbe_prv_data
*pdata
= container_of(work
,
1035 struct xgbe_prv_data
,
1040 xgbe_restart_dev(pdata
);
1045 static void xgbe_tx_tstamp(struct work_struct
*work
)
1047 struct xgbe_prv_data
*pdata
= container_of(work
,
1048 struct xgbe_prv_data
,
1050 struct skb_shared_hwtstamps hwtstamps
;
1052 unsigned long flags
;
1054 if (pdata
->tx_tstamp
) {
1055 nsec
= timecounter_cyc2time(&pdata
->tstamp_tc
,
1058 memset(&hwtstamps
, 0, sizeof(hwtstamps
));
1059 hwtstamps
.hwtstamp
= ns_to_ktime(nsec
);
1060 skb_tstamp_tx(pdata
->tx_tstamp_skb
, &hwtstamps
);
1063 dev_kfree_skb_any(pdata
->tx_tstamp_skb
);
1065 spin_lock_irqsave(&pdata
->tstamp_lock
, flags
);
1066 pdata
->tx_tstamp_skb
= NULL
;
1067 spin_unlock_irqrestore(&pdata
->tstamp_lock
, flags
);
1070 static int xgbe_get_hwtstamp_settings(struct xgbe_prv_data
*pdata
,
1071 struct ifreq
*ifreq
)
1073 if (copy_to_user(ifreq
->ifr_data
, &pdata
->tstamp_config
,
1074 sizeof(pdata
->tstamp_config
)))
1080 static int xgbe_set_hwtstamp_settings(struct xgbe_prv_data
*pdata
,
1081 struct ifreq
*ifreq
)
1083 struct hwtstamp_config config
;
1084 unsigned int mac_tscr
;
1086 if (copy_from_user(&config
, ifreq
->ifr_data
, sizeof(config
)))
1094 switch (config
.tx_type
) {
1095 case HWTSTAMP_TX_OFF
:
1098 case HWTSTAMP_TX_ON
:
1099 XGMAC_SET_BITS(mac_tscr
, MAC_TSCR
, TSENA
, 1);
1106 switch (config
.rx_filter
) {
1107 case HWTSTAMP_FILTER_NONE
:
1110 case HWTSTAMP_FILTER_ALL
:
1111 XGMAC_SET_BITS(mac_tscr
, MAC_TSCR
, TSENALL
, 1);
1112 XGMAC_SET_BITS(mac_tscr
, MAC_TSCR
, TSENA
, 1);
1115 /* PTP v2, UDP, any kind of event packet */
1116 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT
:
1117 XGMAC_SET_BITS(mac_tscr
, MAC_TSCR
, TSVER2ENA
, 1);
1118 /* PTP v1, UDP, any kind of event packet */
1119 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT
:
1120 XGMAC_SET_BITS(mac_tscr
, MAC_TSCR
, TSIPV4ENA
, 1);
1121 XGMAC_SET_BITS(mac_tscr
, MAC_TSCR
, TSIPV6ENA
, 1);
1122 XGMAC_SET_BITS(mac_tscr
, MAC_TSCR
, SNAPTYPSEL
, 1);
1123 XGMAC_SET_BITS(mac_tscr
, MAC_TSCR
, TSENA
, 1);
1126 /* PTP v2, UDP, Sync packet */
1127 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC
:
1128 XGMAC_SET_BITS(mac_tscr
, MAC_TSCR
, TSVER2ENA
, 1);
1129 /* PTP v1, UDP, Sync packet */
1130 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC
:
1131 XGMAC_SET_BITS(mac_tscr
, MAC_TSCR
, TSIPV4ENA
, 1);
1132 XGMAC_SET_BITS(mac_tscr
, MAC_TSCR
, TSIPV6ENA
, 1);
1133 XGMAC_SET_BITS(mac_tscr
, MAC_TSCR
, TSEVNTENA
, 1);
1134 XGMAC_SET_BITS(mac_tscr
, MAC_TSCR
, TSENA
, 1);
1137 /* PTP v2, UDP, Delay_req packet */
1138 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ
:
1139 XGMAC_SET_BITS(mac_tscr
, MAC_TSCR
, TSVER2ENA
, 1);
1140 /* PTP v1, UDP, Delay_req packet */
1141 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ
:
1142 XGMAC_SET_BITS(mac_tscr
, MAC_TSCR
, TSIPV4ENA
, 1);
1143 XGMAC_SET_BITS(mac_tscr
, MAC_TSCR
, TSIPV6ENA
, 1);
1144 XGMAC_SET_BITS(mac_tscr
, MAC_TSCR
, TSEVNTENA
, 1);
1145 XGMAC_SET_BITS(mac_tscr
, MAC_TSCR
, TSMSTRENA
, 1);
1146 XGMAC_SET_BITS(mac_tscr
, MAC_TSCR
, TSENA
, 1);
1149 /* 802.AS1, Ethernet, any kind of event packet */
1150 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT
:
1151 XGMAC_SET_BITS(mac_tscr
, MAC_TSCR
, AV8021ASMEN
, 1);
1152 XGMAC_SET_BITS(mac_tscr
, MAC_TSCR
, SNAPTYPSEL
, 1);
1153 XGMAC_SET_BITS(mac_tscr
, MAC_TSCR
, TSENA
, 1);
1156 /* 802.AS1, Ethernet, Sync packet */
1157 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC
:
1158 XGMAC_SET_BITS(mac_tscr
, MAC_TSCR
, AV8021ASMEN
, 1);
1159 XGMAC_SET_BITS(mac_tscr
, MAC_TSCR
, TSEVNTENA
, 1);
1160 XGMAC_SET_BITS(mac_tscr
, MAC_TSCR
, TSENA
, 1);
1163 /* 802.AS1, Ethernet, Delay_req packet */
1164 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ
:
1165 XGMAC_SET_BITS(mac_tscr
, MAC_TSCR
, AV8021ASMEN
, 1);
1166 XGMAC_SET_BITS(mac_tscr
, MAC_TSCR
, TSMSTRENA
, 1);
1167 XGMAC_SET_BITS(mac_tscr
, MAC_TSCR
, TSEVNTENA
, 1);
1168 XGMAC_SET_BITS(mac_tscr
, MAC_TSCR
, TSENA
, 1);
1171 /* PTP v2/802.AS1, any layer, any kind of event packet */
1172 case HWTSTAMP_FILTER_PTP_V2_EVENT
:
1173 XGMAC_SET_BITS(mac_tscr
, MAC_TSCR
, TSVER2ENA
, 1);
1174 XGMAC_SET_BITS(mac_tscr
, MAC_TSCR
, TSIPENA
, 1);
1175 XGMAC_SET_BITS(mac_tscr
, MAC_TSCR
, TSIPV4ENA
, 1);
1176 XGMAC_SET_BITS(mac_tscr
, MAC_TSCR
, TSIPV6ENA
, 1);
1177 XGMAC_SET_BITS(mac_tscr
, MAC_TSCR
, SNAPTYPSEL
, 1);
1178 XGMAC_SET_BITS(mac_tscr
, MAC_TSCR
, TSENA
, 1);
1181 /* PTP v2/802.AS1, any layer, Sync packet */
1182 case HWTSTAMP_FILTER_PTP_V2_SYNC
:
1183 XGMAC_SET_BITS(mac_tscr
, MAC_TSCR
, TSVER2ENA
, 1);
1184 XGMAC_SET_BITS(mac_tscr
, MAC_TSCR
, TSIPENA
, 1);
1185 XGMAC_SET_BITS(mac_tscr
, MAC_TSCR
, TSIPV4ENA
, 1);
1186 XGMAC_SET_BITS(mac_tscr
, MAC_TSCR
, TSIPV6ENA
, 1);
1187 XGMAC_SET_BITS(mac_tscr
, MAC_TSCR
, TSEVNTENA
, 1);
1188 XGMAC_SET_BITS(mac_tscr
, MAC_TSCR
, TSENA
, 1);
1191 /* PTP v2/802.AS1, any layer, Delay_req packet */
1192 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ
:
1193 XGMAC_SET_BITS(mac_tscr
, MAC_TSCR
, TSVER2ENA
, 1);
1194 XGMAC_SET_BITS(mac_tscr
, MAC_TSCR
, TSIPENA
, 1);
1195 XGMAC_SET_BITS(mac_tscr
, MAC_TSCR
, TSIPV4ENA
, 1);
1196 XGMAC_SET_BITS(mac_tscr
, MAC_TSCR
, TSIPV6ENA
, 1);
1197 XGMAC_SET_BITS(mac_tscr
, MAC_TSCR
, TSMSTRENA
, 1);
1198 XGMAC_SET_BITS(mac_tscr
, MAC_TSCR
, TSEVNTENA
, 1);
1199 XGMAC_SET_BITS(mac_tscr
, MAC_TSCR
, TSENA
, 1);
1206 pdata
->hw_if
.config_tstamp(pdata
, mac_tscr
);
1208 memcpy(&pdata
->tstamp_config
, &config
, sizeof(config
));
1213 static void xgbe_prep_tx_tstamp(struct xgbe_prv_data
*pdata
,
1214 struct sk_buff
*skb
,
1215 struct xgbe_packet_data
*packet
)
1217 unsigned long flags
;
1219 if (XGMAC_GET_BITS(packet
->attributes
, TX_PACKET_ATTRIBUTES
, PTP
)) {
1220 spin_lock_irqsave(&pdata
->tstamp_lock
, flags
);
1221 if (pdata
->tx_tstamp_skb
) {
1222 /* Another timestamp in progress, ignore this one */
1223 XGMAC_SET_BITS(packet
->attributes
,
1224 TX_PACKET_ATTRIBUTES
, PTP
, 0);
1226 pdata
->tx_tstamp_skb
= skb_get(skb
);
1227 skb_shinfo(skb
)->tx_flags
|= SKBTX_IN_PROGRESS
;
1229 spin_unlock_irqrestore(&pdata
->tstamp_lock
, flags
);
1232 if (!XGMAC_GET_BITS(packet
->attributes
, TX_PACKET_ATTRIBUTES
, PTP
))
1233 skb_tx_timestamp(skb
);
1236 static void xgbe_prep_vlan(struct sk_buff
*skb
, struct xgbe_packet_data
*packet
)
1238 if (skb_vlan_tag_present(skb
))
1239 packet
->vlan_ctag
= skb_vlan_tag_get(skb
);
1242 static int xgbe_prep_tso(struct sk_buff
*skb
, struct xgbe_packet_data
*packet
)
1246 if (!XGMAC_GET_BITS(packet
->attributes
, TX_PACKET_ATTRIBUTES
,
1250 ret
= skb_cow_head(skb
, 0);
1254 packet
->header_len
= skb_transport_offset(skb
) + tcp_hdrlen(skb
);
1255 packet
->tcp_header_len
= tcp_hdrlen(skb
);
1256 packet
->tcp_payload_len
= skb
->len
- packet
->header_len
;
1257 packet
->mss
= skb_shinfo(skb
)->gso_size
;
1258 DBGPR(" packet->header_len=%u\n", packet
->header_len
);
1259 DBGPR(" packet->tcp_header_len=%u, packet->tcp_payload_len=%u\n",
1260 packet
->tcp_header_len
, packet
->tcp_payload_len
);
1261 DBGPR(" packet->mss=%u\n", packet
->mss
);
1263 /* Update the number of packets that will ultimately be transmitted
1264 * along with the extra bytes for each extra packet
1266 packet
->tx_packets
= skb_shinfo(skb
)->gso_segs
;
1267 packet
->tx_bytes
+= (packet
->tx_packets
- 1) * packet
->header_len
;
1272 static int xgbe_is_tso(struct sk_buff
*skb
)
1274 if (skb
->ip_summed
!= CHECKSUM_PARTIAL
)
1277 if (!skb_is_gso(skb
))
1280 DBGPR(" TSO packet to be processed\n");
1285 static void xgbe_packet_info(struct xgbe_prv_data
*pdata
,
1286 struct xgbe_ring
*ring
, struct sk_buff
*skb
,
1287 struct xgbe_packet_data
*packet
)
1289 struct skb_frag_struct
*frag
;
1290 unsigned int context_desc
;
1297 packet
->rdesc_count
= 0;
1299 packet
->tx_packets
= 1;
1300 packet
->tx_bytes
= skb
->len
;
1302 if (xgbe_is_tso(skb
)) {
1303 /* TSO requires an extra descriptor if mss is different */
1304 if (skb_shinfo(skb
)->gso_size
!= ring
->tx
.cur_mss
) {
1306 packet
->rdesc_count
++;
1309 /* TSO requires an extra descriptor for TSO header */
1310 packet
->rdesc_count
++;
1312 XGMAC_SET_BITS(packet
->attributes
, TX_PACKET_ATTRIBUTES
,
1314 XGMAC_SET_BITS(packet
->attributes
, TX_PACKET_ATTRIBUTES
,
1316 } else if (skb
->ip_summed
== CHECKSUM_PARTIAL
)
1317 XGMAC_SET_BITS(packet
->attributes
, TX_PACKET_ATTRIBUTES
,
1320 if (skb_vlan_tag_present(skb
)) {
1321 /* VLAN requires an extra descriptor if tag is different */
1322 if (skb_vlan_tag_get(skb
) != ring
->tx
.cur_vlan_ctag
)
1323 /* We can share with the TSO context descriptor */
1324 if (!context_desc
) {
1326 packet
->rdesc_count
++;
1329 XGMAC_SET_BITS(packet
->attributes
, TX_PACKET_ATTRIBUTES
,
1333 if ((skb_shinfo(skb
)->tx_flags
& SKBTX_HW_TSTAMP
) &&
1334 (pdata
->tstamp_config
.tx_type
== HWTSTAMP_TX_ON
))
1335 XGMAC_SET_BITS(packet
->attributes
, TX_PACKET_ATTRIBUTES
,
1338 for (len
= skb_headlen(skb
); len
;) {
1339 packet
->rdesc_count
++;
1340 len
-= min_t(unsigned int, len
, XGBE_TX_MAX_BUF_SIZE
);
1343 for (i
= 0; i
< skb_shinfo(skb
)->nr_frags
; i
++) {
1344 frag
= &skb_shinfo(skb
)->frags
[i
];
1345 for (len
= skb_frag_size(frag
); len
; ) {
1346 packet
->rdesc_count
++;
1347 len
-= min_t(unsigned int, len
, XGBE_TX_MAX_BUF_SIZE
);
1352 static int xgbe_open(struct net_device
*netdev
)
1354 struct xgbe_prv_data
*pdata
= netdev_priv(netdev
);
1355 struct xgbe_desc_if
*desc_if
= &pdata
->desc_if
;
1358 DBGPR("-->xgbe_open\n");
1360 /* Initialize the phy */
1361 ret
= xgbe_phy_init(pdata
);
1365 /* Enable the clocks */
1366 ret
= clk_prepare_enable(pdata
->sysclk
);
1368 netdev_alert(netdev
, "dma clk_prepare_enable failed\n");
1372 ret
= clk_prepare_enable(pdata
->ptpclk
);
1374 netdev_alert(netdev
, "ptp clk_prepare_enable failed\n");
1378 /* Calculate the Rx buffer size before allocating rings */
1379 ret
= xgbe_calc_rx_buf_size(netdev
, netdev
->mtu
);
1382 pdata
->rx_buf_size
= ret
;
1384 /* Allocate the channel and ring structures */
1385 ret
= xgbe_alloc_channels(pdata
);
1389 /* Allocate the ring descriptors and buffers */
1390 ret
= desc_if
->alloc_ring_resources(pdata
);
1394 /* Initialize the device restart and Tx timestamp work struct */
1395 INIT_WORK(&pdata
->restart_work
, xgbe_restart
);
1396 INIT_WORK(&pdata
->tx_tstamp_work
, xgbe_tx_tstamp
);
1398 ret
= xgbe_start(pdata
);
1402 DBGPR("<--xgbe_open\n");
1407 desc_if
->free_ring_resources(pdata
);
1410 xgbe_free_channels(pdata
);
1413 clk_disable_unprepare(pdata
->ptpclk
);
1416 clk_disable_unprepare(pdata
->sysclk
);
1419 xgbe_phy_exit(pdata
);
1424 static int xgbe_close(struct net_device
*netdev
)
1426 struct xgbe_prv_data
*pdata
= netdev_priv(netdev
);
1427 struct xgbe_desc_if
*desc_if
= &pdata
->desc_if
;
1429 DBGPR("-->xgbe_close\n");
1431 /* Stop the device */
1434 /* Free the ring descriptors and buffers */
1435 desc_if
->free_ring_resources(pdata
);
1437 /* Free the channel and ring structures */
1438 xgbe_free_channels(pdata
);
1440 /* Disable the clocks */
1441 clk_disable_unprepare(pdata
->ptpclk
);
1442 clk_disable_unprepare(pdata
->sysclk
);
1444 /* Release the phy */
1445 xgbe_phy_exit(pdata
);
1447 DBGPR("<--xgbe_close\n");
1452 static int xgbe_xmit(struct sk_buff
*skb
, struct net_device
*netdev
)
1454 struct xgbe_prv_data
*pdata
= netdev_priv(netdev
);
1455 struct xgbe_hw_if
*hw_if
= &pdata
->hw_if
;
1456 struct xgbe_desc_if
*desc_if
= &pdata
->desc_if
;
1457 struct xgbe_channel
*channel
;
1458 struct xgbe_ring
*ring
;
1459 struct xgbe_packet_data
*packet
;
1460 struct netdev_queue
*txq
;
1463 DBGPR("-->xgbe_xmit: skb->len = %d\n", skb
->len
);
1465 channel
= pdata
->channel
+ skb
->queue_mapping
;
1466 txq
= netdev_get_tx_queue(netdev
, channel
->queue_index
);
1467 ring
= channel
->tx_ring
;
1468 packet
= &ring
->packet_data
;
1472 if (skb
->len
== 0) {
1473 netdev_err(netdev
, "empty skb received from stack\n");
1474 dev_kfree_skb_any(skb
);
1475 goto tx_netdev_return
;
1478 /* Calculate preliminary packet info */
1479 memset(packet
, 0, sizeof(*packet
));
1480 xgbe_packet_info(pdata
, ring
, skb
, packet
);
1482 /* Check that there are enough descriptors available */
1483 ret
= xgbe_maybe_stop_tx_queue(channel
, ring
, packet
->rdesc_count
);
1485 goto tx_netdev_return
;
1487 ret
= xgbe_prep_tso(skb
, packet
);
1489 netdev_err(netdev
, "error processing TSO packet\n");
1490 dev_kfree_skb_any(skb
);
1491 goto tx_netdev_return
;
1493 xgbe_prep_vlan(skb
, packet
);
1495 if (!desc_if
->map_tx_skb(channel
, skb
)) {
1496 dev_kfree_skb_any(skb
);
1497 goto tx_netdev_return
;
1500 xgbe_prep_tx_tstamp(pdata
, skb
, packet
);
1502 /* Report on the actual number of bytes (to be) sent */
1503 netdev_tx_sent_queue(txq
, packet
->tx_bytes
);
1505 /* Configure required descriptor fields for transmission */
1506 hw_if
->dev_xmit(channel
);
1508 #ifdef XGMAC_ENABLE_TX_PKT_DUMP
1509 xgbe_print_pkt(netdev
, skb
, true);
1512 /* Stop the queue in advance if there may not be enough descriptors */
1513 xgbe_maybe_stop_tx_queue(channel
, ring
, XGBE_TX_MAX_DESCS
);
1521 static void xgbe_set_rx_mode(struct net_device
*netdev
)
1523 struct xgbe_prv_data
*pdata
= netdev_priv(netdev
);
1524 struct xgbe_hw_if
*hw_if
= &pdata
->hw_if
;
1525 unsigned int pr_mode
, am_mode
;
1527 DBGPR("-->xgbe_set_rx_mode\n");
1529 pr_mode
= ((netdev
->flags
& IFF_PROMISC
) != 0);
1530 am_mode
= ((netdev
->flags
& IFF_ALLMULTI
) != 0);
1532 hw_if
->set_promiscuous_mode(pdata
, pr_mode
);
1533 hw_if
->set_all_multicast_mode(pdata
, am_mode
);
1535 hw_if
->add_mac_addresses(pdata
);
1537 DBGPR("<--xgbe_set_rx_mode\n");
1540 static int xgbe_set_mac_address(struct net_device
*netdev
, void *addr
)
1542 struct xgbe_prv_data
*pdata
= netdev_priv(netdev
);
1543 struct xgbe_hw_if
*hw_if
= &pdata
->hw_if
;
1544 struct sockaddr
*saddr
= addr
;
1546 DBGPR("-->xgbe_set_mac_address\n");
1548 if (!is_valid_ether_addr(saddr
->sa_data
))
1549 return -EADDRNOTAVAIL
;
1551 memcpy(netdev
->dev_addr
, saddr
->sa_data
, netdev
->addr_len
);
1553 hw_if
->set_mac_address(pdata
, netdev
->dev_addr
);
1555 DBGPR("<--xgbe_set_mac_address\n");
1560 static int xgbe_ioctl(struct net_device
*netdev
, struct ifreq
*ifreq
, int cmd
)
1562 struct xgbe_prv_data
*pdata
= netdev_priv(netdev
);
1567 ret
= xgbe_get_hwtstamp_settings(pdata
, ifreq
);
1571 ret
= xgbe_set_hwtstamp_settings(pdata
, ifreq
);
1581 static int xgbe_change_mtu(struct net_device
*netdev
, int mtu
)
1583 struct xgbe_prv_data
*pdata
= netdev_priv(netdev
);
1586 DBGPR("-->xgbe_change_mtu\n");
1588 ret
= xgbe_calc_rx_buf_size(netdev
, mtu
);
1592 pdata
->rx_buf_size
= ret
;
1595 xgbe_restart_dev(pdata
);
1597 DBGPR("<--xgbe_change_mtu\n");
1602 static struct rtnl_link_stats64
*xgbe_get_stats64(struct net_device
*netdev
,
1603 struct rtnl_link_stats64
*s
)
1605 struct xgbe_prv_data
*pdata
= netdev_priv(netdev
);
1606 struct xgbe_mmc_stats
*pstats
= &pdata
->mmc_stats
;
1608 DBGPR("-->%s\n", __func__
);
1610 pdata
->hw_if
.read_mmc_stats(pdata
);
1612 s
->rx_packets
= pstats
->rxframecount_gb
;
1613 s
->rx_bytes
= pstats
->rxoctetcount_gb
;
1614 s
->rx_errors
= pstats
->rxframecount_gb
-
1615 pstats
->rxbroadcastframes_g
-
1616 pstats
->rxmulticastframes_g
-
1617 pstats
->rxunicastframes_g
;
1618 s
->multicast
= pstats
->rxmulticastframes_g
;
1619 s
->rx_length_errors
= pstats
->rxlengtherror
;
1620 s
->rx_crc_errors
= pstats
->rxcrcerror
;
1621 s
->rx_fifo_errors
= pstats
->rxfifooverflow
;
1623 s
->tx_packets
= pstats
->txframecount_gb
;
1624 s
->tx_bytes
= pstats
->txoctetcount_gb
;
1625 s
->tx_errors
= pstats
->txframecount_gb
- pstats
->txframecount_g
;
1626 s
->tx_dropped
= netdev
->stats
.tx_dropped
;
1628 DBGPR("<--%s\n", __func__
);
1633 static int xgbe_vlan_rx_add_vid(struct net_device
*netdev
, __be16 proto
,
1636 struct xgbe_prv_data
*pdata
= netdev_priv(netdev
);
1637 struct xgbe_hw_if
*hw_if
= &pdata
->hw_if
;
1639 DBGPR("-->%s\n", __func__
);
1641 set_bit(vid
, pdata
->active_vlans
);
1642 hw_if
->update_vlan_hash_table(pdata
);
1644 DBGPR("<--%s\n", __func__
);
1649 static int xgbe_vlan_rx_kill_vid(struct net_device
*netdev
, __be16 proto
,
1652 struct xgbe_prv_data
*pdata
= netdev_priv(netdev
);
1653 struct xgbe_hw_if
*hw_if
= &pdata
->hw_if
;
1655 DBGPR("-->%s\n", __func__
);
1657 clear_bit(vid
, pdata
->active_vlans
);
1658 hw_if
->update_vlan_hash_table(pdata
);
1660 DBGPR("<--%s\n", __func__
);
1665 #ifdef CONFIG_NET_POLL_CONTROLLER
1666 static void xgbe_poll_controller(struct net_device
*netdev
)
1668 struct xgbe_prv_data
*pdata
= netdev_priv(netdev
);
1669 struct xgbe_channel
*channel
;
1672 DBGPR("-->xgbe_poll_controller\n");
1674 if (pdata
->per_channel_irq
) {
1675 channel
= pdata
->channel
;
1676 for (i
= 0; i
< pdata
->channel_count
; i
++, channel
++)
1677 xgbe_dma_isr(channel
->dma_irq
, channel
);
1679 disable_irq(pdata
->dev_irq
);
1680 xgbe_isr(pdata
->dev_irq
, pdata
);
1681 enable_irq(pdata
->dev_irq
);
1684 DBGPR("<--xgbe_poll_controller\n");
1686 #endif /* End CONFIG_NET_POLL_CONTROLLER */
1688 static int xgbe_setup_tc(struct net_device
*netdev
, u8 tc
)
1690 struct xgbe_prv_data
*pdata
= netdev_priv(netdev
);
1691 unsigned int offset
, queue
;
1694 if (tc
&& (tc
!= pdata
->hw_feat
.tc_cnt
))
1698 netdev_set_num_tc(netdev
, tc
);
1699 for (i
= 0, queue
= 0, offset
= 0; i
< tc
; i
++) {
1700 while ((queue
< pdata
->tx_q_count
) &&
1701 (pdata
->q2tc_map
[queue
] == i
))
1704 DBGPR(" TC%u using TXq%u-%u\n", i
, offset
, queue
- 1);
1705 netdev_set_tc_queue(netdev
, i
, queue
- offset
, offset
);
1709 netdev_reset_tc(netdev
);
1715 static int xgbe_set_features(struct net_device
*netdev
,
1716 netdev_features_t features
)
1718 struct xgbe_prv_data
*pdata
= netdev_priv(netdev
);
1719 struct xgbe_hw_if
*hw_if
= &pdata
->hw_if
;
1720 netdev_features_t rxhash
, rxcsum
, rxvlan
, rxvlan_filter
;
1723 rxhash
= pdata
->netdev_features
& NETIF_F_RXHASH
;
1724 rxcsum
= pdata
->netdev_features
& NETIF_F_RXCSUM
;
1725 rxvlan
= pdata
->netdev_features
& NETIF_F_HW_VLAN_CTAG_RX
;
1726 rxvlan_filter
= pdata
->netdev_features
& NETIF_F_HW_VLAN_CTAG_FILTER
;
1728 if ((features
& NETIF_F_RXHASH
) && !rxhash
)
1729 ret
= hw_if
->enable_rss(pdata
);
1730 else if (!(features
& NETIF_F_RXHASH
) && rxhash
)
1731 ret
= hw_if
->disable_rss(pdata
);
1735 if ((features
& NETIF_F_RXCSUM
) && !rxcsum
)
1736 hw_if
->enable_rx_csum(pdata
);
1737 else if (!(features
& NETIF_F_RXCSUM
) && rxcsum
)
1738 hw_if
->disable_rx_csum(pdata
);
1740 if ((features
& NETIF_F_HW_VLAN_CTAG_RX
) && !rxvlan
)
1741 hw_if
->enable_rx_vlan_stripping(pdata
);
1742 else if (!(features
& NETIF_F_HW_VLAN_CTAG_RX
) && rxvlan
)
1743 hw_if
->disable_rx_vlan_stripping(pdata
);
1745 if ((features
& NETIF_F_HW_VLAN_CTAG_FILTER
) && !rxvlan_filter
)
1746 hw_if
->enable_rx_vlan_filtering(pdata
);
1747 else if (!(features
& NETIF_F_HW_VLAN_CTAG_FILTER
) && rxvlan_filter
)
1748 hw_if
->disable_rx_vlan_filtering(pdata
);
1750 pdata
->netdev_features
= features
;
1752 DBGPR("<--xgbe_set_features\n");
1757 static const struct net_device_ops xgbe_netdev_ops
= {
1758 .ndo_open
= xgbe_open
,
1759 .ndo_stop
= xgbe_close
,
1760 .ndo_start_xmit
= xgbe_xmit
,
1761 .ndo_set_rx_mode
= xgbe_set_rx_mode
,
1762 .ndo_set_mac_address
= xgbe_set_mac_address
,
1763 .ndo_validate_addr
= eth_validate_addr
,
1764 .ndo_do_ioctl
= xgbe_ioctl
,
1765 .ndo_change_mtu
= xgbe_change_mtu
,
1766 .ndo_get_stats64
= xgbe_get_stats64
,
1767 .ndo_vlan_rx_add_vid
= xgbe_vlan_rx_add_vid
,
1768 .ndo_vlan_rx_kill_vid
= xgbe_vlan_rx_kill_vid
,
1769 #ifdef CONFIG_NET_POLL_CONTROLLER
1770 .ndo_poll_controller
= xgbe_poll_controller
,
1772 .ndo_setup_tc
= xgbe_setup_tc
,
1773 .ndo_set_features
= xgbe_set_features
,
1776 struct net_device_ops
*xgbe_get_netdev_ops(void)
1778 return (struct net_device_ops
*)&xgbe_netdev_ops
;
1781 static void xgbe_rx_refresh(struct xgbe_channel
*channel
)
1783 struct xgbe_prv_data
*pdata
= channel
->pdata
;
1784 struct xgbe_hw_if
*hw_if
= &pdata
->hw_if
;
1785 struct xgbe_desc_if
*desc_if
= &pdata
->desc_if
;
1786 struct xgbe_ring
*ring
= channel
->rx_ring
;
1787 struct xgbe_ring_data
*rdata
;
1789 while (ring
->dirty
!= ring
->cur
) {
1790 rdata
= XGBE_GET_DESC_DATA(ring
, ring
->dirty
);
1792 /* Reset rdata values */
1793 desc_if
->unmap_rdata(pdata
, rdata
);
1795 if (desc_if
->map_rx_buffer(pdata
, ring
, rdata
))
1798 hw_if
->rx_desc_reset(rdata
);
1803 /* Make sure everything is written before the register write */
1806 /* Update the Rx Tail Pointer Register with address of
1807 * the last cleaned entry */
1808 rdata
= XGBE_GET_DESC_DATA(ring
, ring
->dirty
- 1);
1809 XGMAC_DMA_IOWRITE(channel
, DMA_CH_RDTR_LO
,
1810 lower_32_bits(rdata
->rdesc_dma
));
1813 static struct sk_buff
*xgbe_create_skb(struct xgbe_prv_data
*pdata
,
1814 struct xgbe_ring_data
*rdata
,
1817 struct net_device
*netdev
= pdata
->netdev
;
1818 struct sk_buff
*skb
;
1820 unsigned int copy_len
;
1822 skb
= netdev_alloc_skb_ip_align(netdev
, rdata
->rx
.hdr
.dma_len
);
1826 packet
= page_address(rdata
->rx
.hdr
.pa
.pages
) +
1827 rdata
->rx
.hdr
.pa
.pages_offset
;
1828 copy_len
= (rdata
->rx
.hdr_len
) ? rdata
->rx
.hdr_len
: *len
;
1829 copy_len
= min(rdata
->rx
.hdr
.dma_len
, copy_len
);
1830 skb_copy_to_linear_data(skb
, packet
, copy_len
);
1831 skb_put(skb
, copy_len
);
1838 static int xgbe_tx_poll(struct xgbe_channel
*channel
)
1840 struct xgbe_prv_data
*pdata
= channel
->pdata
;
1841 struct xgbe_hw_if
*hw_if
= &pdata
->hw_if
;
1842 struct xgbe_desc_if
*desc_if
= &pdata
->desc_if
;
1843 struct xgbe_ring
*ring
= channel
->tx_ring
;
1844 struct xgbe_ring_data
*rdata
;
1845 struct xgbe_ring_desc
*rdesc
;
1846 struct net_device
*netdev
= pdata
->netdev
;
1847 struct netdev_queue
*txq
;
1849 unsigned int tx_packets
= 0, tx_bytes
= 0;
1851 DBGPR("-->xgbe_tx_poll\n");
1853 /* Nothing to do if there isn't a Tx ring for this channel */
1857 txq
= netdev_get_tx_queue(netdev
, channel
->queue_index
);
1859 while ((processed
< XGBE_TX_DESC_MAX_PROC
) &&
1860 (ring
->dirty
!= ring
->cur
)) {
1861 rdata
= XGBE_GET_DESC_DATA(ring
, ring
->dirty
);
1862 rdesc
= rdata
->rdesc
;
1864 if (!hw_if
->tx_complete(rdesc
))
1867 /* Make sure descriptor fields are read after reading the OWN
1871 #ifdef XGMAC_ENABLE_TX_DESC_DUMP
1872 xgbe_dump_tx_desc(ring
, ring
->dirty
, 1, 0);
1875 if (hw_if
->is_last_desc(rdesc
)) {
1876 tx_packets
+= rdata
->tx
.packets
;
1877 tx_bytes
+= rdata
->tx
.bytes
;
1880 /* Free the SKB and reset the descriptor for re-use */
1881 desc_if
->unmap_rdata(pdata
, rdata
);
1882 hw_if
->tx_desc_reset(rdata
);
1891 netdev_tx_completed_queue(txq
, tx_packets
, tx_bytes
);
1893 if ((ring
->tx
.queue_stopped
== 1) &&
1894 (xgbe_tx_avail_desc(ring
) > XGBE_TX_DESC_MIN_FREE
)) {
1895 ring
->tx
.queue_stopped
= 0;
1896 netif_tx_wake_queue(txq
);
1899 DBGPR("<--xgbe_tx_poll: processed=%d\n", processed
);
1904 static int xgbe_rx_poll(struct xgbe_channel
*channel
, int budget
)
1906 struct xgbe_prv_data
*pdata
= channel
->pdata
;
1907 struct xgbe_hw_if
*hw_if
= &pdata
->hw_if
;
1908 struct xgbe_ring
*ring
= channel
->rx_ring
;
1909 struct xgbe_ring_data
*rdata
;
1910 struct xgbe_packet_data
*packet
;
1911 struct net_device
*netdev
= pdata
->netdev
;
1912 struct napi_struct
*napi
;
1913 struct sk_buff
*skb
;
1914 struct skb_shared_hwtstamps
*hwtstamps
;
1915 unsigned int incomplete
, error
, context_next
, context
;
1916 unsigned int len
, put_len
, max_len
;
1917 unsigned int received
= 0;
1918 int packet_count
= 0;
1920 DBGPR("-->xgbe_rx_poll: budget=%d\n", budget
);
1922 /* Nothing to do if there isn't a Rx ring for this channel */
1926 napi
= (pdata
->per_channel_irq
) ? &channel
->napi
: &pdata
->napi
;
1928 rdata
= XGBE_GET_DESC_DATA(ring
, ring
->cur
);
1929 packet
= &ring
->packet_data
;
1930 while (packet_count
< budget
) {
1931 DBGPR(" cur = %d\n", ring
->cur
);
1933 /* First time in loop see if we need to restore state */
1934 if (!received
&& rdata
->state_saved
) {
1935 incomplete
= rdata
->state
.incomplete
;
1936 context_next
= rdata
->state
.context_next
;
1937 skb
= rdata
->state
.skb
;
1938 error
= rdata
->state
.error
;
1939 len
= rdata
->state
.len
;
1941 memset(packet
, 0, sizeof(*packet
));
1950 rdata
= XGBE_GET_DESC_DATA(ring
, ring
->cur
);
1952 if (xgbe_rx_dirty_desc(ring
) > (XGBE_RX_DESC_CNT
>> 3))
1953 xgbe_rx_refresh(channel
);
1955 if (hw_if
->dev_read(channel
))
1961 incomplete
= XGMAC_GET_BITS(packet
->attributes
,
1962 RX_PACKET_ATTRIBUTES
,
1964 context_next
= XGMAC_GET_BITS(packet
->attributes
,
1965 RX_PACKET_ATTRIBUTES
,
1967 context
= XGMAC_GET_BITS(packet
->attributes
,
1968 RX_PACKET_ATTRIBUTES
,
1971 /* Earlier error, just drain the remaining data */
1972 if ((incomplete
|| context_next
) && error
)
1975 if (error
|| packet
->errors
) {
1977 DBGPR("Error in received packet\n");
1983 put_len
= rdata
->rx
.len
- len
;
1987 dma_sync_single_for_cpu(pdata
->dev
,
1989 rdata
->rx
.hdr
.dma_len
,
1992 skb
= xgbe_create_skb(pdata
, rdata
, &put_len
);
2000 dma_sync_single_for_cpu(pdata
->dev
,
2002 rdata
->rx
.buf
.dma_len
,
2005 skb_add_rx_frag(skb
, skb_shinfo(skb
)->nr_frags
,
2006 rdata
->rx
.buf
.pa
.pages
,
2007 rdata
->rx
.buf
.pa
.pages_offset
,
2008 put_len
, rdata
->rx
.buf
.dma_len
);
2009 rdata
->rx
.buf
.pa
.pages
= NULL
;
2014 if (incomplete
|| context_next
)
2020 /* Be sure we don't exceed the configured MTU */
2021 max_len
= netdev
->mtu
+ ETH_HLEN
;
2022 if (!(netdev
->features
& NETIF_F_HW_VLAN_CTAG_RX
) &&
2023 (skb
->protocol
== htons(ETH_P_8021Q
)))
2024 max_len
+= VLAN_HLEN
;
2026 if (skb
->len
> max_len
) {
2027 DBGPR("packet length exceeds configured MTU\n");
2032 #ifdef XGMAC_ENABLE_RX_PKT_DUMP
2033 xgbe_print_pkt(netdev
, skb
, false);
2036 skb_checksum_none_assert(skb
);
2037 if (XGMAC_GET_BITS(packet
->attributes
,
2038 RX_PACKET_ATTRIBUTES
, CSUM_DONE
))
2039 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
2041 if (XGMAC_GET_BITS(packet
->attributes
,
2042 RX_PACKET_ATTRIBUTES
, VLAN_CTAG
))
2043 __vlan_hwaccel_put_tag(skb
, htons(ETH_P_8021Q
),
2046 if (XGMAC_GET_BITS(packet
->attributes
,
2047 RX_PACKET_ATTRIBUTES
, RX_TSTAMP
)) {
2050 nsec
= timecounter_cyc2time(&pdata
->tstamp_tc
,
2052 hwtstamps
= skb_hwtstamps(skb
);
2053 hwtstamps
->hwtstamp
= ns_to_ktime(nsec
);
2056 if (XGMAC_GET_BITS(packet
->attributes
,
2057 RX_PACKET_ATTRIBUTES
, RSS_HASH
))
2058 skb_set_hash(skb
, packet
->rss_hash
,
2059 packet
->rss_hash_type
);
2062 skb
->protocol
= eth_type_trans(skb
, netdev
);
2063 skb_record_rx_queue(skb
, channel
->queue_index
);
2064 skb_mark_napi_id(skb
, napi
);
2066 netdev
->last_rx
= jiffies
;
2067 napi_gro_receive(napi
, skb
);
2073 /* Check if we need to save state before leaving */
2074 if (received
&& (incomplete
|| context_next
)) {
2075 rdata
= XGBE_GET_DESC_DATA(ring
, ring
->cur
);
2076 rdata
->state_saved
= 1;
2077 rdata
->state
.incomplete
= incomplete
;
2078 rdata
->state
.context_next
= context_next
;
2079 rdata
->state
.skb
= skb
;
2080 rdata
->state
.len
= len
;
2081 rdata
->state
.error
= error
;
2084 DBGPR("<--xgbe_rx_poll: packet_count = %d\n", packet_count
);
2086 return packet_count
;
2089 static int xgbe_one_poll(struct napi_struct
*napi
, int budget
)
2091 struct xgbe_channel
*channel
= container_of(napi
, struct xgbe_channel
,
2095 DBGPR("-->xgbe_one_poll: budget=%d\n", budget
);
2097 /* Cleanup Tx ring first */
2098 xgbe_tx_poll(channel
);
2100 /* Process Rx ring next */
2101 processed
= xgbe_rx_poll(channel
, budget
);
2103 /* If we processed everything, we are done */
2104 if (processed
< budget
) {
2105 /* Turn off polling */
2106 napi_complete(napi
);
2108 /* Enable Tx and Rx interrupts */
2109 enable_irq(channel
->dma_irq
);
2112 DBGPR("<--xgbe_one_poll: received = %d\n", processed
);
2117 static int xgbe_all_poll(struct napi_struct
*napi
, int budget
)
2119 struct xgbe_prv_data
*pdata
= container_of(napi
, struct xgbe_prv_data
,
2121 struct xgbe_channel
*channel
;
2123 int processed
, last_processed
;
2126 DBGPR("-->xgbe_all_poll: budget=%d\n", budget
);
2129 ring_budget
= budget
/ pdata
->rx_ring_count
;
2131 last_processed
= processed
;
2133 channel
= pdata
->channel
;
2134 for (i
= 0; i
< pdata
->channel_count
; i
++, channel
++) {
2135 /* Cleanup Tx ring first */
2136 xgbe_tx_poll(channel
);
2138 /* Process Rx ring next */
2139 if (ring_budget
> (budget
- processed
))
2140 ring_budget
= budget
- processed
;
2141 processed
+= xgbe_rx_poll(channel
, ring_budget
);
2143 } while ((processed
< budget
) && (processed
!= last_processed
));
2145 /* If we processed everything, we are done */
2146 if (processed
< budget
) {
2147 /* Turn off polling */
2148 napi_complete(napi
);
2150 /* Enable Tx and Rx interrupts */
2151 xgbe_enable_rx_tx_ints(pdata
);
2154 DBGPR("<--xgbe_all_poll: received = %d\n", processed
);
2159 void xgbe_dump_tx_desc(struct xgbe_ring
*ring
, unsigned int idx
,
2160 unsigned int count
, unsigned int flag
)
2162 struct xgbe_ring_data
*rdata
;
2163 struct xgbe_ring_desc
*rdesc
;
2166 rdata
= XGBE_GET_DESC_DATA(ring
, idx
);
2167 rdesc
= rdata
->rdesc
;
2168 pr_alert("TX_NORMAL_DESC[%d %s] = %08x:%08x:%08x:%08x\n", idx
,
2169 (flag
== 1) ? "QUEUED FOR TX" : "TX BY DEVICE",
2170 le32_to_cpu(rdesc
->desc0
), le32_to_cpu(rdesc
->desc1
),
2171 le32_to_cpu(rdesc
->desc2
), le32_to_cpu(rdesc
->desc3
));
2176 void xgbe_dump_rx_desc(struct xgbe_ring
*ring
, struct xgbe_ring_desc
*desc
,
2179 pr_alert("RX_NORMAL_DESC[%d RX BY DEVICE] = %08x:%08x:%08x:%08x\n", idx
,
2180 le32_to_cpu(desc
->desc0
), le32_to_cpu(desc
->desc1
),
2181 le32_to_cpu(desc
->desc2
), le32_to_cpu(desc
->desc3
));
2184 void xgbe_print_pkt(struct net_device
*netdev
, struct sk_buff
*skb
, bool tx_rx
)
2186 struct ethhdr
*eth
= (struct ethhdr
*)skb
->data
;
2187 unsigned char *buf
= skb
->data
;
2188 unsigned char buffer
[128];
2191 netdev_alert(netdev
, "\n************** SKB dump ****************\n");
2193 netdev_alert(netdev
, "%s packet of %d bytes\n",
2194 (tx_rx
? "TX" : "RX"), skb
->len
);
2196 netdev_alert(netdev
, "Dst MAC addr: %pM\n", eth
->h_dest
);
2197 netdev_alert(netdev
, "Src MAC addr: %pM\n", eth
->h_source
);
2198 netdev_alert(netdev
, "Protocol: 0x%04hx\n", ntohs(eth
->h_proto
));
2200 for (i
= 0, j
= 0; i
< skb
->len
;) {
2201 j
+= snprintf(buffer
+ j
, sizeof(buffer
) - j
, "%02hhx",
2204 if ((i
% 32) == 0) {
2205 netdev_alert(netdev
, " 0x%04x: %s\n", i
- 32, buffer
);
2207 } else if ((i
% 16) == 0) {
2210 } else if ((i
% 4) == 0) {
2215 netdev_alert(netdev
, " 0x%04x: %s\n", i
- (i
% 32), buffer
);
2217 netdev_alert(netdev
, "\n************** SKB dump ****************\n");