1 /*******************************************************************************
3 * Intel Ethernet Controller XL710 Family Linux Driver
4 * Copyright(c) 2013 - 2014 Intel Corporation.
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * You should have received a copy of the GNU General Public License along
16 * with this program. If not, see <http://www.gnu.org/licenses/>.
18 * The full GNU General Public License is included in this distribution in
19 * the file called "COPYING".
21 * Contact Information:
22 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25 ******************************************************************************/
27 #include <linux/prefetch.h>
29 #include "i40e_prototype.h"
31 static inline __le64
build_ctob(u32 td_cmd
, u32 td_offset
, unsigned int size
,
34 return cpu_to_le64(I40E_TX_DESC_DTYPE_DATA
|
35 ((u64
)td_cmd
<< I40E_TXD_QW1_CMD_SHIFT
) |
36 ((u64
)td_offset
<< I40E_TXD_QW1_OFFSET_SHIFT
) |
37 ((u64
)size
<< I40E_TXD_QW1_TX_BUF_SZ_SHIFT
) |
38 ((u64
)td_tag
<< I40E_TXD_QW1_L2TAG1_SHIFT
));
41 #define I40E_TXD_CMD (I40E_TX_DESC_CMD_EOP | I40E_TX_DESC_CMD_RS)
43 * i40e_program_fdir_filter - Program a Flow Director filter
44 * @fdir_data: Packet data that will be filter parameters
45 * @raw_packet: the pre-allocated packet buffer for FDir
47 * @add: True for add/update, False for remove
49 int i40e_program_fdir_filter(struct i40e_fdir_filter
*fdir_data
, u8
*raw_packet
,
50 struct i40e_pf
*pf
, bool add
)
52 struct i40e_filter_program_desc
*fdir_desc
;
53 struct i40e_tx_buffer
*tx_buf
;
54 struct i40e_tx_desc
*tx_desc
;
55 struct i40e_ring
*tx_ring
;
56 unsigned int fpt
, dcc
;
63 /* find existing FDIR VSI */
65 for (i
= 0; i
< pf
->hw
.func_caps
.num_vsis
; i
++)
66 if (pf
->vsi
[i
] && pf
->vsi
[i
]->type
== I40E_VSI_FDIR
)
71 tx_ring
= vsi
->tx_rings
[0];
74 dma
= dma_map_single(dev
, raw_packet
,
75 I40E_FDIR_MAX_RAW_PACKET_SIZE
, DMA_TO_DEVICE
);
76 if (dma_mapping_error(dev
, dma
))
79 /* grab the next descriptor */
80 i
= tx_ring
->next_to_use
;
81 fdir_desc
= I40E_TX_FDIRDESC(tx_ring
, i
);
83 tx_ring
->next_to_use
= (i
+ 1 < tx_ring
->count
) ? i
+ 1 : 0;
85 fpt
= (fdir_data
->q_index
<< I40E_TXD_FLTR_QW0_QINDEX_SHIFT
) &
86 I40E_TXD_FLTR_QW0_QINDEX_MASK
;
88 fpt
|= (fdir_data
->flex_off
<< I40E_TXD_FLTR_QW0_FLEXOFF_SHIFT
) &
89 I40E_TXD_FLTR_QW0_FLEXOFF_MASK
;
91 fpt
|= (fdir_data
->pctype
<< I40E_TXD_FLTR_QW0_PCTYPE_SHIFT
) &
92 I40E_TXD_FLTR_QW0_PCTYPE_MASK
;
94 /* Use LAN VSI Id if not programmed by user */
95 if (fdir_data
->dest_vsi
== 0)
96 fpt
|= (pf
->vsi
[pf
->lan_vsi
]->id
) <<
97 I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT
;
99 fpt
|= ((u32
)fdir_data
->dest_vsi
<<
100 I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT
) &
101 I40E_TXD_FLTR_QW0_DEST_VSI_MASK
;
103 fdir_desc
->qindex_flex_ptype_vsi
= cpu_to_le32(fpt
);
105 dcc
= I40E_TX_DESC_DTYPE_FILTER_PROG
;
108 dcc
|= I40E_FILTER_PROGRAM_DESC_PCMD_ADD_UPDATE
<<
109 I40E_TXD_FLTR_QW1_PCMD_SHIFT
;
111 dcc
|= I40E_FILTER_PROGRAM_DESC_PCMD_REMOVE
<<
112 I40E_TXD_FLTR_QW1_PCMD_SHIFT
;
114 dcc
|= (fdir_data
->dest_ctl
<< I40E_TXD_FLTR_QW1_DEST_SHIFT
) &
115 I40E_TXD_FLTR_QW1_DEST_MASK
;
117 dcc
|= (fdir_data
->fd_status
<< I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT
) &
118 I40E_TXD_FLTR_QW1_FD_STATUS_MASK
;
120 if (fdir_data
->cnt_index
!= 0) {
121 dcc
|= I40E_TXD_FLTR_QW1_CNT_ENA_MASK
;
122 dcc
|= ((u32
)fdir_data
->cnt_index
<<
123 I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT
) &
124 I40E_TXD_FLTR_QW1_CNTINDEX_MASK
;
127 fdir_desc
->dtype_cmd_cntindex
= cpu_to_le32(dcc
);
128 fdir_desc
->fd_id
= cpu_to_le32(fdir_data
->fd_id
);
130 /* Now program a dummy descriptor */
131 i
= tx_ring
->next_to_use
;
132 tx_desc
= I40E_TX_DESC(tx_ring
, i
);
133 tx_buf
= &tx_ring
->tx_bi
[i
];
135 tx_ring
->next_to_use
= (i
+ 1 < tx_ring
->count
) ? i
+ 1 : 0;
137 /* record length, and DMA address */
138 dma_unmap_len_set(tx_buf
, len
, I40E_FDIR_MAX_RAW_PACKET_SIZE
);
139 dma_unmap_addr_set(tx_buf
, dma
, dma
);
141 tx_desc
->buffer_addr
= cpu_to_le64(dma
);
142 td_cmd
= I40E_TXD_CMD
| I40E_TX_DESC_CMD_DUMMY
;
144 tx_desc
->cmd_type_offset_bsz
=
145 build_ctob(td_cmd
, 0, I40E_FDIR_MAX_RAW_PACKET_SIZE
, 0);
147 /* set the timestamp */
148 tx_buf
->time_stamp
= jiffies
;
150 /* Force memory writes to complete before letting h/w
151 * know there are new descriptors to fetch. (Only
152 * applicable for weak-ordered memory model archs,
157 /* Mark the data descriptor to be watched */
158 tx_buf
->next_to_watch
= tx_desc
;
160 writel(tx_ring
->next_to_use
, tx_ring
->tail
);
167 #define IP_HEADER_OFFSET 14
168 #define I40E_UDPIP_DUMMY_PACKET_LEN 42
170 * i40e_add_del_fdir_udpv4 - Add/Remove UDPv4 filters
171 * @vsi: pointer to the targeted VSI
172 * @fd_data: the flow director data required for the FDir descriptor
173 * @raw_packet: the pre-allocated packet buffer for FDir
174 * @add: true adds a filter, false removes it
176 * Returns 0 if the filters were successfully added or removed
178 static int i40e_add_del_fdir_udpv4(struct i40e_vsi
*vsi
,
179 struct i40e_fdir_filter
*fd_data
,
180 u8
*raw_packet
, bool add
)
182 struct i40e_pf
*pf
= vsi
->back
;
187 static char packet
[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0,
188 0x45, 0, 0, 0x1c, 0, 0, 0x40, 0, 0x40, 0x11, 0, 0, 0, 0, 0, 0,
189 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
191 memcpy(raw_packet
, packet
, I40E_UDPIP_DUMMY_PACKET_LEN
);
193 ip
= (struct iphdr
*)(raw_packet
+ IP_HEADER_OFFSET
);
194 udp
= (struct udphdr
*)(raw_packet
+ IP_HEADER_OFFSET
195 + sizeof(struct iphdr
));
197 ip
->daddr
= fd_data
->dst_ip
[0];
198 udp
->dest
= fd_data
->dst_port
;
199 ip
->saddr
= fd_data
->src_ip
[0];
200 udp
->source
= fd_data
->src_port
;
202 fd_data
->pctype
= I40E_FILTER_PCTYPE_NONF_IPV4_UDP
;
203 ret
= i40e_program_fdir_filter(fd_data
, raw_packet
, pf
, add
);
205 dev_info(&pf
->pdev
->dev
,
206 "Filter command send failed for PCTYPE %d (ret = %d)\n",
207 fd_data
->pctype
, ret
);
210 dev_info(&pf
->pdev
->dev
,
211 "Filter OK for PCTYPE %d (ret = %d)\n",
212 fd_data
->pctype
, ret
);
215 return err
? -EOPNOTSUPP
: 0;
218 #define I40E_TCPIP_DUMMY_PACKET_LEN 54
220 * i40e_add_del_fdir_tcpv4 - Add/Remove TCPv4 filters
221 * @vsi: pointer to the targeted VSI
222 * @fd_data: the flow director data required for the FDir descriptor
223 * @raw_packet: the pre-allocated packet buffer for FDir
224 * @add: true adds a filter, false removes it
226 * Returns 0 if the filters were successfully added or removed
228 static int i40e_add_del_fdir_tcpv4(struct i40e_vsi
*vsi
,
229 struct i40e_fdir_filter
*fd_data
,
230 u8
*raw_packet
, bool add
)
232 struct i40e_pf
*pf
= vsi
->back
;
238 static char packet
[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0,
239 0x45, 0, 0, 0x28, 0, 0, 0x40, 0, 0x40, 0x6, 0, 0, 0, 0, 0, 0,
240 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x80, 0x11,
241 0x0, 0x72, 0, 0, 0, 0};
243 memcpy(raw_packet
, packet
, I40E_TCPIP_DUMMY_PACKET_LEN
);
245 ip
= (struct iphdr
*)(raw_packet
+ IP_HEADER_OFFSET
);
246 tcp
= (struct tcphdr
*)(raw_packet
+ IP_HEADER_OFFSET
247 + sizeof(struct iphdr
));
249 ip
->daddr
= fd_data
->dst_ip
[0];
250 tcp
->dest
= fd_data
->dst_port
;
251 ip
->saddr
= fd_data
->src_ip
[0];
252 tcp
->source
= fd_data
->src_port
;
255 if (pf
->flags
& I40E_FLAG_FD_ATR_ENABLED
) {
256 dev_info(&pf
->pdev
->dev
, "Forcing ATR off, sideband rules for TCP/IPv4 flow being applied\n");
257 pf
->flags
&= ~I40E_FLAG_FD_ATR_ENABLED
;
261 fd_data
->pctype
= I40E_FILTER_PCTYPE_NONF_IPV4_TCP
;
262 ret
= i40e_program_fdir_filter(fd_data
, raw_packet
, pf
, add
);
265 dev_info(&pf
->pdev
->dev
,
266 "Filter command send failed for PCTYPE %d (ret = %d)\n",
267 fd_data
->pctype
, ret
);
270 dev_info(&pf
->pdev
->dev
, "Filter OK for PCTYPE %d (ret = %d)\n",
271 fd_data
->pctype
, ret
);
274 fd_data
->pctype
= I40E_FILTER_PCTYPE_NONF_IPV4_TCP
;
276 ret
= i40e_program_fdir_filter(fd_data
, raw_packet
, pf
, add
);
278 dev_info(&pf
->pdev
->dev
,
279 "Filter command send failed for PCTYPE %d (ret = %d)\n",
280 fd_data
->pctype
, ret
);
283 dev_info(&pf
->pdev
->dev
, "Filter OK for PCTYPE %d (ret = %d)\n",
284 fd_data
->pctype
, ret
);
287 return err
? -EOPNOTSUPP
: 0;
291 * i40e_add_del_fdir_sctpv4 - Add/Remove SCTPv4 Flow Director filters for
292 * a specific flow spec
293 * @vsi: pointer to the targeted VSI
294 * @fd_data: the flow director data required for the FDir descriptor
295 * @raw_packet: the pre-allocated packet buffer for FDir
296 * @add: true adds a filter, false removes it
298 * Always returns -EOPNOTSUPP
300 static int i40e_add_del_fdir_sctpv4(struct i40e_vsi
*vsi
,
301 struct i40e_fdir_filter
*fd_data
,
302 u8
*raw_packet
, bool add
)
307 #define I40E_IP_DUMMY_PACKET_LEN 34
309 * i40e_add_del_fdir_ipv4 - Add/Remove IPv4 Flow Director filters for
310 * a specific flow spec
311 * @vsi: pointer to the targeted VSI
312 * @fd_data: the flow director data required for the FDir descriptor
313 * @raw_packet: the pre-allocated packet buffer for FDir
314 * @add: true adds a filter, false removes it
316 * Returns 0 if the filters were successfully added or removed
318 static int i40e_add_del_fdir_ipv4(struct i40e_vsi
*vsi
,
319 struct i40e_fdir_filter
*fd_data
,
320 u8
*raw_packet
, bool add
)
322 struct i40e_pf
*pf
= vsi
->back
;
327 static char packet
[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0,
328 0x45, 0, 0, 0x14, 0, 0, 0x40, 0, 0x40, 0x10, 0, 0, 0, 0, 0, 0,
331 memcpy(raw_packet
, packet
, I40E_IP_DUMMY_PACKET_LEN
);
332 ip
= (struct iphdr
*)(raw_packet
+ IP_HEADER_OFFSET
);
334 ip
->saddr
= fd_data
->src_ip
[0];
335 ip
->daddr
= fd_data
->dst_ip
[0];
338 for (i
= I40E_FILTER_PCTYPE_NONF_IPV4_OTHER
;
339 i
<= I40E_FILTER_PCTYPE_FRAG_IPV4
; i
++) {
341 ret
= i40e_program_fdir_filter(fd_data
, raw_packet
, pf
, add
);
344 dev_info(&pf
->pdev
->dev
,
345 "Filter command send failed for PCTYPE %d (ret = %d)\n",
346 fd_data
->pctype
, ret
);
349 dev_info(&pf
->pdev
->dev
,
350 "Filter OK for PCTYPE %d (ret = %d)\n",
351 fd_data
->pctype
, ret
);
355 return err
? -EOPNOTSUPP
: 0;
359 * i40e_add_del_fdir - Build raw packets to add/del fdir filter
360 * @vsi: pointer to the targeted VSI
361 * @cmd: command to get or set RX flow classification rules
362 * @add: true adds a filter, false removes it
365 int i40e_add_del_fdir(struct i40e_vsi
*vsi
,
366 struct i40e_fdir_filter
*input
, bool add
)
368 struct i40e_pf
*pf
= vsi
->back
;
372 /* Populate the Flow Director that we have at the moment
373 * and allocate the raw packet buffer for the calling functions
375 raw_packet
= kzalloc(I40E_FDIR_MAX_RAW_PACKET_SIZE
, GFP_KERNEL
);
379 switch (input
->flow_type
& ~FLOW_EXT
) {
381 ret
= i40e_add_del_fdir_tcpv4(vsi
, input
, raw_packet
,
385 ret
= i40e_add_del_fdir_udpv4(vsi
, input
, raw_packet
,
389 ret
= i40e_add_del_fdir_sctpv4(vsi
, input
, raw_packet
,
393 ret
= i40e_add_del_fdir_ipv4(vsi
, input
, raw_packet
,
397 switch (input
->ip4_proto
) {
399 ret
= i40e_add_del_fdir_tcpv4(vsi
, input
,
403 ret
= i40e_add_del_fdir_udpv4(vsi
, input
,
407 ret
= i40e_add_del_fdir_sctpv4(vsi
, input
,
411 ret
= i40e_add_del_fdir_ipv4(vsi
, input
,
417 dev_info(&pf
->pdev
->dev
, "Could not specify spec type %d\n",
427 * i40e_fd_handle_status - check the Programming Status for FD
428 * @rx_ring: the Rx ring for this descriptor
429 * @rx_desc: the Rx descriptor for programming Status, not a packet descriptor.
430 * @prog_id: the id originally used for programming
432 * This is used to verify if the FD programming or invalidation
433 * requested by SW to the HW is successful or not and take actions accordingly.
435 static void i40e_fd_handle_status(struct i40e_ring
*rx_ring
,
436 union i40e_rx_desc
*rx_desc
, u8 prog_id
)
438 struct i40e_pf
*pf
= rx_ring
->vsi
->back
;
439 struct pci_dev
*pdev
= pf
->pdev
;
440 u32 fcnt_prog
, fcnt_avail
;
444 qw
= le64_to_cpu(rx_desc
->wb
.qword1
.status_error_len
);
445 error
= (qw
& I40E_RX_PROG_STATUS_DESC_QW1_ERROR_MASK
) >>
446 I40E_RX_PROG_STATUS_DESC_QW1_ERROR_SHIFT
;
448 if (error
== (0x1 << I40E_RX_PROG_STATUS_DESC_FD_TBL_FULL_SHIFT
)) {
449 dev_warn(&pdev
->dev
, "ntuple filter loc = %d, could not be added\n",
450 rx_desc
->wb
.qword0
.hi_dword
.fd_id
);
452 /* filter programming failed most likely due to table full */
453 fcnt_prog
= i40e_get_current_fd_count(pf
);
454 fcnt_avail
= i40e_get_fd_cnt_all(pf
);
455 /* If ATR is running fcnt_prog can quickly change,
456 * if we are very close to full, it makes sense to disable
457 * FD ATR/SB and then re-enable it when there is room.
459 if (fcnt_prog
>= (fcnt_avail
- I40E_FDIR_BUFFER_FULL_MARGIN
)) {
460 /* Turn off ATR first */
461 if (pf
->flags
& I40E_FLAG_FD_ATR_ENABLED
) {
462 pf
->flags
&= ~I40E_FLAG_FD_ATR_ENABLED
;
463 dev_warn(&pdev
->dev
, "FD filter space full, ATR for further flows will be turned off\n");
464 pf
->auto_disable_flags
|=
465 I40E_FLAG_FD_ATR_ENABLED
;
466 pf
->flags
|= I40E_FLAG_FDIR_REQUIRES_REINIT
;
467 } else if (pf
->flags
& I40E_FLAG_FD_SB_ENABLED
) {
468 pf
->flags
&= ~I40E_FLAG_FD_SB_ENABLED
;
469 dev_warn(&pdev
->dev
, "FD filter space full, new ntuple rules will not be added\n");
470 pf
->auto_disable_flags
|=
471 I40E_FLAG_FD_SB_ENABLED
;
472 pf
->flags
|= I40E_FLAG_FDIR_REQUIRES_REINIT
;
475 dev_info(&pdev
->dev
, "FD filter programming error\n");
478 (0x1 << I40E_RX_PROG_STATUS_DESC_NO_FD_ENTRY_SHIFT
)) {
479 if (I40E_DEBUG_FD
& pf
->hw
.debug_mask
)
480 dev_info(&pdev
->dev
, "ntuple filter loc = %d, could not be removed\n",
481 rx_desc
->wb
.qword0
.hi_dword
.fd_id
);
486 * i40e_unmap_and_free_tx_resource - Release a Tx buffer
487 * @ring: the ring that owns the buffer
488 * @tx_buffer: the buffer to free
490 static void i40e_unmap_and_free_tx_resource(struct i40e_ring
*ring
,
491 struct i40e_tx_buffer
*tx_buffer
)
493 if (tx_buffer
->skb
) {
494 dev_kfree_skb_any(tx_buffer
->skb
);
495 if (dma_unmap_len(tx_buffer
, len
))
496 dma_unmap_single(ring
->dev
,
497 dma_unmap_addr(tx_buffer
, dma
),
498 dma_unmap_len(tx_buffer
, len
),
500 } else if (dma_unmap_len(tx_buffer
, len
)) {
501 dma_unmap_page(ring
->dev
,
502 dma_unmap_addr(tx_buffer
, dma
),
503 dma_unmap_len(tx_buffer
, len
),
506 tx_buffer
->next_to_watch
= NULL
;
507 tx_buffer
->skb
= NULL
;
508 dma_unmap_len_set(tx_buffer
, len
, 0);
509 /* tx_buffer must be completely set up in the transmit path */
513 * i40e_clean_tx_ring - Free any empty Tx buffers
514 * @tx_ring: ring to be cleaned
516 void i40e_clean_tx_ring(struct i40e_ring
*tx_ring
)
518 unsigned long bi_size
;
521 /* ring already cleared, nothing to do */
525 /* Free all the Tx ring sk_buffs */
526 for (i
= 0; i
< tx_ring
->count
; i
++)
527 i40e_unmap_and_free_tx_resource(tx_ring
, &tx_ring
->tx_bi
[i
]);
529 bi_size
= sizeof(struct i40e_tx_buffer
) * tx_ring
->count
;
530 memset(tx_ring
->tx_bi
, 0, bi_size
);
532 /* Zero out the descriptor ring */
533 memset(tx_ring
->desc
, 0, tx_ring
->size
);
535 tx_ring
->next_to_use
= 0;
536 tx_ring
->next_to_clean
= 0;
538 if (!tx_ring
->netdev
)
541 /* cleanup Tx queue statistics */
542 netdev_tx_reset_queue(netdev_get_tx_queue(tx_ring
->netdev
,
543 tx_ring
->queue_index
));
547 * i40e_free_tx_resources - Free Tx resources per queue
548 * @tx_ring: Tx descriptor ring for a specific queue
550 * Free all transmit software resources
552 void i40e_free_tx_resources(struct i40e_ring
*tx_ring
)
554 i40e_clean_tx_ring(tx_ring
);
555 kfree(tx_ring
->tx_bi
);
556 tx_ring
->tx_bi
= NULL
;
559 dma_free_coherent(tx_ring
->dev
, tx_ring
->size
,
560 tx_ring
->desc
, tx_ring
->dma
);
561 tx_ring
->desc
= NULL
;
566 * i40e_get_tx_pending - how many tx descriptors not processed
567 * @tx_ring: the ring of descriptors
569 * Since there is no access to the ring head register
570 * in XL710, we need to use our local copies
572 static u32
i40e_get_tx_pending(struct i40e_ring
*ring
)
574 u32 ntu
= ((ring
->next_to_clean
<= ring
->next_to_use
)
576 : ring
->next_to_use
+ ring
->count
);
577 return ntu
- ring
->next_to_clean
;
581 * i40e_check_tx_hang - Is there a hang in the Tx queue
582 * @tx_ring: the ring of descriptors
584 static bool i40e_check_tx_hang(struct i40e_ring
*tx_ring
)
586 u32 tx_pending
= i40e_get_tx_pending(tx_ring
);
589 clear_check_for_tx_hang(tx_ring
);
591 /* Check for a hung queue, but be thorough. This verifies
592 * that a transmit has been completed since the previous
593 * check AND there is at least one packet pending. The
594 * ARMED bit is set to indicate a potential hang. The
595 * bit is cleared if a pause frame is received to remove
596 * false hang detection due to PFC or 802.3x frames. By
597 * requiring this to fail twice we avoid races with
598 * PFC clearing the ARMED bit and conditions where we
599 * run the check_tx_hang logic with a transmit completion
600 * pending but without time to complete it yet.
602 if ((tx_ring
->tx_stats
.tx_done_old
== tx_ring
->stats
.packets
) &&
604 /* make sure it is true for two checks in a row */
605 ret
= test_and_set_bit(__I40E_HANG_CHECK_ARMED
,
608 /* update completed stats and disarm the hang check */
609 tx_ring
->tx_stats
.tx_done_old
= tx_ring
->stats
.packets
;
610 clear_bit(__I40E_HANG_CHECK_ARMED
, &tx_ring
->state
);
617 * i40e_get_head - Retrieve head from head writeback
618 * @tx_ring: tx ring to fetch head of
620 * Returns value of Tx ring head based on value stored
621 * in head write-back location
623 static inline u32
i40e_get_head(struct i40e_ring
*tx_ring
)
625 void *head
= (struct i40e_tx_desc
*)tx_ring
->desc
+ tx_ring
->count
;
627 return le32_to_cpu(*(volatile __le32
*)head
);
631 * i40e_clean_tx_irq - Reclaim resources after transmit completes
632 * @tx_ring: tx ring to clean
633 * @budget: how many cleans we're allowed
635 * Returns true if there's any budget left (e.g. the clean is finished)
637 static bool i40e_clean_tx_irq(struct i40e_ring
*tx_ring
, int budget
)
639 u16 i
= tx_ring
->next_to_clean
;
640 struct i40e_tx_buffer
*tx_buf
;
641 struct i40e_tx_desc
*tx_head
;
642 struct i40e_tx_desc
*tx_desc
;
643 unsigned int total_packets
= 0;
644 unsigned int total_bytes
= 0;
646 tx_buf
= &tx_ring
->tx_bi
[i
];
647 tx_desc
= I40E_TX_DESC(tx_ring
, i
);
650 tx_head
= I40E_TX_DESC(tx_ring
, i40e_get_head(tx_ring
));
653 struct i40e_tx_desc
*eop_desc
= tx_buf
->next_to_watch
;
655 /* if next_to_watch is not set then there is no work pending */
659 /* prevent any other reads prior to eop_desc */
660 read_barrier_depends();
662 /* we have caught up to head, no work left to do */
663 if (tx_head
== tx_desc
)
666 /* clear next_to_watch to prevent false hangs */
667 tx_buf
->next_to_watch
= NULL
;
669 /* update the statistics for this packet */
670 total_bytes
+= tx_buf
->bytecount
;
671 total_packets
+= tx_buf
->gso_segs
;
674 dev_kfree_skb_any(tx_buf
->skb
);
676 /* unmap skb header data */
677 dma_unmap_single(tx_ring
->dev
,
678 dma_unmap_addr(tx_buf
, dma
),
679 dma_unmap_len(tx_buf
, len
),
682 /* clear tx_buffer data */
684 dma_unmap_len_set(tx_buf
, len
, 0);
686 /* unmap remaining buffers */
687 while (tx_desc
!= eop_desc
) {
694 tx_buf
= tx_ring
->tx_bi
;
695 tx_desc
= I40E_TX_DESC(tx_ring
, 0);
698 /* unmap any remaining paged data */
699 if (dma_unmap_len(tx_buf
, len
)) {
700 dma_unmap_page(tx_ring
->dev
,
701 dma_unmap_addr(tx_buf
, dma
),
702 dma_unmap_len(tx_buf
, len
),
704 dma_unmap_len_set(tx_buf
, len
, 0);
708 /* move us one more past the eop_desc for start of next pkt */
714 tx_buf
= tx_ring
->tx_bi
;
715 tx_desc
= I40E_TX_DESC(tx_ring
, 0);
718 /* update budget accounting */
720 } while (likely(budget
));
723 tx_ring
->next_to_clean
= i
;
724 u64_stats_update_begin(&tx_ring
->syncp
);
725 tx_ring
->stats
.bytes
+= total_bytes
;
726 tx_ring
->stats
.packets
+= total_packets
;
727 u64_stats_update_end(&tx_ring
->syncp
);
728 tx_ring
->q_vector
->tx
.total_bytes
+= total_bytes
;
729 tx_ring
->q_vector
->tx
.total_packets
+= total_packets
;
731 if (check_for_tx_hang(tx_ring
) && i40e_check_tx_hang(tx_ring
)) {
732 /* schedule immediate reset if we believe we hung */
733 dev_info(tx_ring
->dev
, "Detected Tx Unit Hang\n"
736 " next_to_use <%x>\n"
737 " next_to_clean <%x>\n",
739 tx_ring
->queue_index
,
740 tx_ring
->next_to_use
, i
);
741 dev_info(tx_ring
->dev
, "tx_bi[next_to_clean]\n"
742 " time_stamp <%lx>\n"
744 tx_ring
->tx_bi
[i
].time_stamp
, jiffies
);
746 netif_stop_subqueue(tx_ring
->netdev
, tx_ring
->queue_index
);
748 dev_info(tx_ring
->dev
,
749 "tx hang detected on queue %d, resetting adapter\n",
750 tx_ring
->queue_index
);
752 tx_ring
->netdev
->netdev_ops
->ndo_tx_timeout(tx_ring
->netdev
);
754 /* the adapter is about to reset, no point in enabling stuff */
758 netdev_tx_completed_queue(netdev_get_tx_queue(tx_ring
->netdev
,
759 tx_ring
->queue_index
),
760 total_packets
, total_bytes
);
762 #define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
763 if (unlikely(total_packets
&& netif_carrier_ok(tx_ring
->netdev
) &&
764 (I40E_DESC_UNUSED(tx_ring
) >= TX_WAKE_THRESHOLD
))) {
765 /* Make sure that anybody stopping the queue after this
766 * sees the new next_to_clean.
769 if (__netif_subqueue_stopped(tx_ring
->netdev
,
770 tx_ring
->queue_index
) &&
771 !test_bit(__I40E_DOWN
, &tx_ring
->vsi
->state
)) {
772 netif_wake_subqueue(tx_ring
->netdev
,
773 tx_ring
->queue_index
);
774 ++tx_ring
->tx_stats
.restart_queue
;
782 * i40e_set_new_dynamic_itr - Find new ITR level
783 * @rc: structure containing ring performance data
785 * Stores a new ITR value based on packets and byte counts during
786 * the last interrupt. The advantage of per interrupt computation
787 * is faster updates and more accurate ITR for the current traffic
788 * pattern. Constants in this function were computed based on
789 * theoretical maximum wire speed and thresholds were set based on
790 * testing data as well as attempting to minimize response time
791 * while increasing bulk throughput.
793 static void i40e_set_new_dynamic_itr(struct i40e_ring_container
*rc
)
795 enum i40e_latency_range new_latency_range
= rc
->latency_range
;
796 u32 new_itr
= rc
->itr
;
799 if (rc
->total_packets
== 0 || !rc
->itr
)
802 /* simple throttlerate management
803 * 0-10MB/s lowest (100000 ints/s)
804 * 10-20MB/s low (20000 ints/s)
805 * 20-1249MB/s bulk (8000 ints/s)
807 bytes_per_int
= rc
->total_bytes
/ rc
->itr
;
809 case I40E_LOWEST_LATENCY
:
810 if (bytes_per_int
> 10)
811 new_latency_range
= I40E_LOW_LATENCY
;
813 case I40E_LOW_LATENCY
:
814 if (bytes_per_int
> 20)
815 new_latency_range
= I40E_BULK_LATENCY
;
816 else if (bytes_per_int
<= 10)
817 new_latency_range
= I40E_LOWEST_LATENCY
;
819 case I40E_BULK_LATENCY
:
820 if (bytes_per_int
<= 20)
821 rc
->latency_range
= I40E_LOW_LATENCY
;
825 switch (new_latency_range
) {
826 case I40E_LOWEST_LATENCY
:
827 new_itr
= I40E_ITR_100K
;
829 case I40E_LOW_LATENCY
:
830 new_itr
= I40E_ITR_20K
;
832 case I40E_BULK_LATENCY
:
833 new_itr
= I40E_ITR_8K
;
839 if (new_itr
!= rc
->itr
) {
840 /* do an exponential smoothing */
841 new_itr
= (10 * new_itr
* rc
->itr
) /
842 ((9 * new_itr
) + rc
->itr
);
843 rc
->itr
= new_itr
& I40E_MAX_ITR
;
847 rc
->total_packets
= 0;
851 * i40e_update_dynamic_itr - Adjust ITR based on bytes per int
852 * @q_vector: the vector to adjust
854 static void i40e_update_dynamic_itr(struct i40e_q_vector
*q_vector
)
856 u16 vector
= q_vector
->vsi
->base_vector
+ q_vector
->v_idx
;
857 struct i40e_hw
*hw
= &q_vector
->vsi
->back
->hw
;
861 reg_addr
= I40E_PFINT_ITRN(I40E_RX_ITR
, vector
- 1);
862 old_itr
= q_vector
->rx
.itr
;
863 i40e_set_new_dynamic_itr(&q_vector
->rx
);
864 if (old_itr
!= q_vector
->rx
.itr
)
865 wr32(hw
, reg_addr
, q_vector
->rx
.itr
);
867 reg_addr
= I40E_PFINT_ITRN(I40E_TX_ITR
, vector
- 1);
868 old_itr
= q_vector
->tx
.itr
;
869 i40e_set_new_dynamic_itr(&q_vector
->tx
);
870 if (old_itr
!= q_vector
->tx
.itr
)
871 wr32(hw
, reg_addr
, q_vector
->tx
.itr
);
875 * i40e_clean_programming_status - clean the programming status descriptor
876 * @rx_ring: the rx ring that has this descriptor
877 * @rx_desc: the rx descriptor written back by HW
879 * Flow director should handle FD_FILTER_STATUS to check its filter programming
880 * status being successful or not and take actions accordingly. FCoE should
881 * handle its context/filter programming/invalidation status and take actions.
884 static void i40e_clean_programming_status(struct i40e_ring
*rx_ring
,
885 union i40e_rx_desc
*rx_desc
)
890 qw
= le64_to_cpu(rx_desc
->wb
.qword1
.status_error_len
);
891 id
= (qw
& I40E_RX_PROG_STATUS_DESC_QW1_PROGID_MASK
) >>
892 I40E_RX_PROG_STATUS_DESC_QW1_PROGID_SHIFT
;
894 if (id
== I40E_RX_PROG_STATUS_DESC_FD_FILTER_STATUS
)
895 i40e_fd_handle_status(rx_ring
, rx_desc
, id
);
899 * i40e_setup_tx_descriptors - Allocate the Tx descriptors
900 * @tx_ring: the tx ring to set up
902 * Return 0 on success, negative on error
904 int i40e_setup_tx_descriptors(struct i40e_ring
*tx_ring
)
906 struct device
*dev
= tx_ring
->dev
;
912 bi_size
= sizeof(struct i40e_tx_buffer
) * tx_ring
->count
;
913 tx_ring
->tx_bi
= kzalloc(bi_size
, GFP_KERNEL
);
917 /* round up to nearest 4K */
918 tx_ring
->size
= tx_ring
->count
* sizeof(struct i40e_tx_desc
);
919 /* add u32 for head writeback, align after this takes care of
920 * guaranteeing this is at least one cache line in size
922 tx_ring
->size
+= sizeof(u32
);
923 tx_ring
->size
= ALIGN(tx_ring
->size
, 4096);
924 tx_ring
->desc
= dma_alloc_coherent(dev
, tx_ring
->size
,
925 &tx_ring
->dma
, GFP_KERNEL
);
926 if (!tx_ring
->desc
) {
927 dev_info(dev
, "Unable to allocate memory for the Tx descriptor ring, size=%d\n",
932 tx_ring
->next_to_use
= 0;
933 tx_ring
->next_to_clean
= 0;
937 kfree(tx_ring
->tx_bi
);
938 tx_ring
->tx_bi
= NULL
;
943 * i40e_clean_rx_ring - Free Rx buffers
944 * @rx_ring: ring to be cleaned
946 void i40e_clean_rx_ring(struct i40e_ring
*rx_ring
)
948 struct device
*dev
= rx_ring
->dev
;
949 struct i40e_rx_buffer
*rx_bi
;
950 unsigned long bi_size
;
953 /* ring already cleared, nothing to do */
957 /* Free all the Rx ring sk_buffs */
958 for (i
= 0; i
< rx_ring
->count
; i
++) {
959 rx_bi
= &rx_ring
->rx_bi
[i
];
961 dma_unmap_single(dev
,
968 dev_kfree_skb(rx_bi
->skb
);
972 if (rx_bi
->page_dma
) {
979 __free_page(rx_bi
->page
);
981 rx_bi
->page_offset
= 0;
985 bi_size
= sizeof(struct i40e_rx_buffer
) * rx_ring
->count
;
986 memset(rx_ring
->rx_bi
, 0, bi_size
);
988 /* Zero out the descriptor ring */
989 memset(rx_ring
->desc
, 0, rx_ring
->size
);
991 rx_ring
->next_to_clean
= 0;
992 rx_ring
->next_to_use
= 0;
996 * i40e_free_rx_resources - Free Rx resources
997 * @rx_ring: ring to clean the resources from
999 * Free all receive software resources
1001 void i40e_free_rx_resources(struct i40e_ring
*rx_ring
)
1003 i40e_clean_rx_ring(rx_ring
);
1004 kfree(rx_ring
->rx_bi
);
1005 rx_ring
->rx_bi
= NULL
;
1007 if (rx_ring
->desc
) {
1008 dma_free_coherent(rx_ring
->dev
, rx_ring
->size
,
1009 rx_ring
->desc
, rx_ring
->dma
);
1010 rx_ring
->desc
= NULL
;
1015 * i40e_setup_rx_descriptors - Allocate Rx descriptors
1016 * @rx_ring: Rx descriptor ring (for a specific queue) to setup
1018 * Returns 0 on success, negative on failure
1020 int i40e_setup_rx_descriptors(struct i40e_ring
*rx_ring
)
1022 struct device
*dev
= rx_ring
->dev
;
1025 bi_size
= sizeof(struct i40e_rx_buffer
) * rx_ring
->count
;
1026 rx_ring
->rx_bi
= kzalloc(bi_size
, GFP_KERNEL
);
1027 if (!rx_ring
->rx_bi
)
1030 /* Round up to nearest 4K */
1031 rx_ring
->size
= ring_is_16byte_desc_enabled(rx_ring
)
1032 ? rx_ring
->count
* sizeof(union i40e_16byte_rx_desc
)
1033 : rx_ring
->count
* sizeof(union i40e_32byte_rx_desc
);
1034 rx_ring
->size
= ALIGN(rx_ring
->size
, 4096);
1035 rx_ring
->desc
= dma_alloc_coherent(dev
, rx_ring
->size
,
1036 &rx_ring
->dma
, GFP_KERNEL
);
1038 if (!rx_ring
->desc
) {
1039 dev_info(dev
, "Unable to allocate memory for the Rx descriptor ring, size=%d\n",
1044 rx_ring
->next_to_clean
= 0;
1045 rx_ring
->next_to_use
= 0;
1049 kfree(rx_ring
->rx_bi
);
1050 rx_ring
->rx_bi
= NULL
;
1055 * i40e_release_rx_desc - Store the new tail and head values
1056 * @rx_ring: ring to bump
1057 * @val: new head index
1059 static inline void i40e_release_rx_desc(struct i40e_ring
*rx_ring
, u32 val
)
1061 rx_ring
->next_to_use
= val
;
1062 /* Force memory writes to complete before letting h/w
1063 * know there are new descriptors to fetch. (Only
1064 * applicable for weak-ordered memory model archs,
1068 writel(val
, rx_ring
->tail
);
1072 * i40e_alloc_rx_buffers - Replace used receive buffers; packet split
1073 * @rx_ring: ring to place buffers on
1074 * @cleaned_count: number of buffers to replace
1076 void i40e_alloc_rx_buffers(struct i40e_ring
*rx_ring
, u16 cleaned_count
)
1078 u16 i
= rx_ring
->next_to_use
;
1079 union i40e_rx_desc
*rx_desc
;
1080 struct i40e_rx_buffer
*bi
;
1081 struct sk_buff
*skb
;
1083 /* do nothing if no valid netdev defined */
1084 if (!rx_ring
->netdev
|| !cleaned_count
)
1087 while (cleaned_count
--) {
1088 rx_desc
= I40E_RX_DESC(rx_ring
, i
);
1089 bi
= &rx_ring
->rx_bi
[i
];
1093 skb
= netdev_alloc_skb_ip_align(rx_ring
->netdev
,
1094 rx_ring
->rx_buf_len
);
1096 rx_ring
->rx_stats
.alloc_buff_failed
++;
1099 /* initialize queue mapping */
1100 skb_record_rx_queue(skb
, rx_ring
->queue_index
);
1105 bi
->dma
= dma_map_single(rx_ring
->dev
,
1107 rx_ring
->rx_buf_len
,
1109 if (dma_mapping_error(rx_ring
->dev
, bi
->dma
)) {
1110 rx_ring
->rx_stats
.alloc_buff_failed
++;
1116 if (ring_is_ps_enabled(rx_ring
)) {
1118 bi
->page
= alloc_page(GFP_ATOMIC
);
1120 rx_ring
->rx_stats
.alloc_page_failed
++;
1125 if (!bi
->page_dma
) {
1126 /* use a half page if we're re-using */
1127 bi
->page_offset
^= PAGE_SIZE
/ 2;
1128 bi
->page_dma
= dma_map_page(rx_ring
->dev
,
1133 if (dma_mapping_error(rx_ring
->dev
,
1135 rx_ring
->rx_stats
.alloc_page_failed
++;
1141 /* Refresh the desc even if buffer_addrs didn't change
1142 * because each write-back erases this info.
1144 rx_desc
->read
.pkt_addr
= cpu_to_le64(bi
->page_dma
);
1145 rx_desc
->read
.hdr_addr
= cpu_to_le64(bi
->dma
);
1147 rx_desc
->read
.pkt_addr
= cpu_to_le64(bi
->dma
);
1148 rx_desc
->read
.hdr_addr
= 0;
1151 if (i
== rx_ring
->count
)
1156 if (rx_ring
->next_to_use
!= i
)
1157 i40e_release_rx_desc(rx_ring
, i
);
1161 * i40e_receive_skb - Send a completed packet up the stack
1162 * @rx_ring: rx ring in play
1163 * @skb: packet to send up
1164 * @vlan_tag: vlan tag for packet
1166 static void i40e_receive_skb(struct i40e_ring
*rx_ring
,
1167 struct sk_buff
*skb
, u16 vlan_tag
)
1169 struct i40e_q_vector
*q_vector
= rx_ring
->q_vector
;
1170 struct i40e_vsi
*vsi
= rx_ring
->vsi
;
1171 u64 flags
= vsi
->back
->flags
;
1173 if (vlan_tag
& VLAN_VID_MASK
)
1174 __vlan_hwaccel_put_tag(skb
, htons(ETH_P_8021Q
), vlan_tag
);
1176 if (flags
& I40E_FLAG_IN_NETPOLL
)
1179 napi_gro_receive(&q_vector
->napi
, skb
);
1183 * i40e_rx_checksum - Indicate in skb if hw indicated a good cksum
1184 * @vsi: the VSI we care about
1185 * @skb: skb currently being received and modified
1186 * @rx_status: status value of last descriptor in packet
1187 * @rx_error: error value of last descriptor in packet
1188 * @rx_ptype: ptype value of last descriptor in packet
1190 static inline void i40e_rx_checksum(struct i40e_vsi
*vsi
,
1191 struct sk_buff
*skb
,
1196 bool ipv4_tunnel
, ipv6_tunnel
;
1201 ipv4_tunnel
= (rx_ptype
> I40E_RX_PTYPE_GRENAT4_MAC_PAY3
) &&
1202 (rx_ptype
< I40E_RX_PTYPE_GRENAT4_MACVLAN_IPV6_ICMP_PAY4
);
1203 ipv6_tunnel
= (rx_ptype
> I40E_RX_PTYPE_GRENAT6_MAC_PAY3
) &&
1204 (rx_ptype
< I40E_RX_PTYPE_GRENAT6_MACVLAN_IPV6_ICMP_PAY4
);
1206 skb
->encapsulation
= ipv4_tunnel
|| ipv6_tunnel
;
1207 skb
->ip_summed
= CHECKSUM_NONE
;
1209 /* Rx csum enabled and ip headers found? */
1210 if (!(vsi
->netdev
->features
& NETIF_F_RXCSUM
&&
1211 rx_status
& (1 << I40E_RX_DESC_STATUS_L3L4P_SHIFT
)))
1214 /* likely incorrect csum if alternate IP extension headers found */
1215 if (rx_status
& (1 << I40E_RX_DESC_STATUS_IPV6EXADD_SHIFT
))
1218 /* IP or L4 or outmost IP checksum error */
1219 if (rx_error
& ((1 << I40E_RX_DESC_ERROR_IPE_SHIFT
) |
1220 (1 << I40E_RX_DESC_ERROR_L4E_SHIFT
) |
1221 (1 << I40E_RX_DESC_ERROR_EIPE_SHIFT
))) {
1222 vsi
->back
->hw_csum_rx_error
++;
1227 !(rx_status
& (1 << I40E_RX_DESC_STATUS_UDP_0_SHIFT
))) {
1228 /* If VXLAN traffic has an outer UDPv4 checksum we need to check
1229 * it in the driver, hardware does not do it for us.
1230 * Since L3L4P bit was set we assume a valid IHL value (>=5)
1231 * so the total length of IPv4 header is IHL*4 bytes
1233 skb
->transport_header
= skb
->mac_header
+
1234 sizeof(struct ethhdr
) +
1235 (ip_hdr(skb
)->ihl
* 4);
1237 /* Add 4 bytes for VLAN tagged packets */
1238 skb
->transport_header
+= (skb
->protocol
== htons(ETH_P_8021Q
) ||
1239 skb
->protocol
== htons(ETH_P_8021AD
))
1242 rx_udp_csum
= udp_csum(skb
);
1244 csum
= csum_tcpudp_magic(
1245 iph
->saddr
, iph
->daddr
,
1246 (skb
->len
- skb_transport_offset(skb
)),
1247 IPPROTO_UDP
, rx_udp_csum
);
1249 if (udp_hdr(skb
)->check
!= csum
) {
1250 vsi
->back
->hw_csum_rx_error
++;
1255 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
1259 * i40e_rx_hash - returns the hash value from the Rx descriptor
1260 * @ring: descriptor ring
1261 * @rx_desc: specific descriptor
1263 static inline u32
i40e_rx_hash(struct i40e_ring
*ring
,
1264 union i40e_rx_desc
*rx_desc
)
1266 const __le64 rss_mask
=
1267 cpu_to_le64((u64
)I40E_RX_DESC_FLTSTAT_RSS_HASH
<<
1268 I40E_RX_DESC_STATUS_FLTSTAT_SHIFT
);
1270 if ((ring
->netdev
->features
& NETIF_F_RXHASH
) &&
1271 (rx_desc
->wb
.qword1
.status_error_len
& rss_mask
) == rss_mask
)
1272 return le32_to_cpu(rx_desc
->wb
.qword0
.hi_dword
.rss
);
1278 * i40e_ptype_to_hash - get a hash type
1279 * @ptype: the ptype value from the descriptor
1281 * Returns a hash type to be used by skb_set_hash
1283 static inline enum pkt_hash_types
i40e_ptype_to_hash(u8 ptype
)
1285 struct i40e_rx_ptype_decoded decoded
= decode_rx_desc_ptype(ptype
);
1288 return PKT_HASH_TYPE_NONE
;
1290 if (decoded
.outer_ip
== I40E_RX_PTYPE_OUTER_IP
&&
1291 decoded
.payload_layer
== I40E_RX_PTYPE_PAYLOAD_LAYER_PAY4
)
1292 return PKT_HASH_TYPE_L4
;
1293 else if (decoded
.outer_ip
== I40E_RX_PTYPE_OUTER_IP
&&
1294 decoded
.payload_layer
== I40E_RX_PTYPE_PAYLOAD_LAYER_PAY3
)
1295 return PKT_HASH_TYPE_L3
;
1297 return PKT_HASH_TYPE_L2
;
1301 * i40e_clean_rx_irq - Reclaim resources after receive completes
1302 * @rx_ring: rx ring to clean
1303 * @budget: how many cleans we're allowed
1305 * Returns true if there's any budget left (e.g. the clean is finished)
1307 static int i40e_clean_rx_irq(struct i40e_ring
*rx_ring
, int budget
)
1309 unsigned int total_rx_bytes
= 0, total_rx_packets
= 0;
1310 u16 rx_packet_len
, rx_header_len
, rx_sph
, rx_hbo
;
1311 u16 cleaned_count
= I40E_DESC_UNUSED(rx_ring
);
1312 const int current_node
= numa_node_id();
1313 struct i40e_vsi
*vsi
= rx_ring
->vsi
;
1314 u16 i
= rx_ring
->next_to_clean
;
1315 union i40e_rx_desc
*rx_desc
;
1316 u32 rx_error
, rx_status
;
1323 rx_desc
= I40E_RX_DESC(rx_ring
, i
);
1324 qword
= le64_to_cpu(rx_desc
->wb
.qword1
.status_error_len
);
1325 rx_status
= (qword
& I40E_RXD_QW1_STATUS_MASK
) >>
1326 I40E_RXD_QW1_STATUS_SHIFT
;
1328 while (rx_status
& (1 << I40E_RX_DESC_STATUS_DD_SHIFT
)) {
1329 union i40e_rx_desc
*next_rxd
;
1330 struct i40e_rx_buffer
*rx_bi
;
1331 struct sk_buff
*skb
;
1333 if (i40e_rx_is_programming_status(qword
)) {
1334 i40e_clean_programming_status(rx_ring
, rx_desc
);
1335 I40E_RX_NEXT_DESC_PREFETCH(rx_ring
, i
, next_rxd
);
1338 rx_bi
= &rx_ring
->rx_bi
[i
];
1340 prefetch(skb
->data
);
1342 rx_packet_len
= (qword
& I40E_RXD_QW1_LENGTH_PBUF_MASK
) >>
1343 I40E_RXD_QW1_LENGTH_PBUF_SHIFT
;
1344 rx_header_len
= (qword
& I40E_RXD_QW1_LENGTH_HBUF_MASK
) >>
1345 I40E_RXD_QW1_LENGTH_HBUF_SHIFT
;
1346 rx_sph
= (qword
& I40E_RXD_QW1_LENGTH_SPH_MASK
) >>
1347 I40E_RXD_QW1_LENGTH_SPH_SHIFT
;
1349 rx_error
= (qword
& I40E_RXD_QW1_ERROR_MASK
) >>
1350 I40E_RXD_QW1_ERROR_SHIFT
;
1351 rx_hbo
= rx_error
& (1 << I40E_RX_DESC_ERROR_HBO_SHIFT
);
1352 rx_error
&= ~(1 << I40E_RX_DESC_ERROR_HBO_SHIFT
);
1354 rx_ptype
= (qword
& I40E_RXD_QW1_PTYPE_MASK
) >>
1355 I40E_RXD_QW1_PTYPE_SHIFT
;
1358 /* This memory barrier is needed to keep us from reading
1359 * any other fields out of the rx_desc until we know the
1360 * STATUS_DD bit is set
1364 /* Get the header and possibly the whole packet
1365 * If this is an skb from previous receive dma will be 0
1371 len
= I40E_RX_HDR_SIZE
;
1373 len
= rx_header_len
;
1374 else if (rx_packet_len
)
1375 len
= rx_packet_len
; /* 1buf/no split found */
1377 len
= rx_header_len
; /* split always mode */
1380 dma_unmap_single(rx_ring
->dev
,
1382 rx_ring
->rx_buf_len
,
1387 /* Get the rest of the data if this was a header split */
1388 if (ring_is_ps_enabled(rx_ring
) && rx_packet_len
) {
1390 skb_fill_page_desc(skb
, skb_shinfo(skb
)->nr_frags
,
1395 skb
->len
+= rx_packet_len
;
1396 skb
->data_len
+= rx_packet_len
;
1397 skb
->truesize
+= rx_packet_len
;
1399 if ((page_count(rx_bi
->page
) == 1) &&
1400 (page_to_nid(rx_bi
->page
) == current_node
))
1401 get_page(rx_bi
->page
);
1405 dma_unmap_page(rx_ring
->dev
,
1409 rx_bi
->page_dma
= 0;
1411 I40E_RX_NEXT_DESC_PREFETCH(rx_ring
, i
, next_rxd
);
1414 !(rx_status
& (1 << I40E_RX_DESC_STATUS_EOF_SHIFT
)))) {
1415 struct i40e_rx_buffer
*next_buffer
;
1417 next_buffer
= &rx_ring
->rx_bi
[i
];
1419 if (ring_is_ps_enabled(rx_ring
)) {
1420 rx_bi
->skb
= next_buffer
->skb
;
1421 rx_bi
->dma
= next_buffer
->dma
;
1422 next_buffer
->skb
= skb
;
1423 next_buffer
->dma
= 0;
1425 rx_ring
->rx_stats
.non_eop_descs
++;
1429 /* ERR_MASK will only have valid bits if EOP set */
1430 if (unlikely(rx_error
& (1 << I40E_RX_DESC_ERROR_RXE_SHIFT
))) {
1431 dev_kfree_skb_any(skb
);
1435 skb_set_hash(skb
, i40e_rx_hash(rx_ring
, rx_desc
),
1436 i40e_ptype_to_hash(rx_ptype
));
1437 if (unlikely(rx_status
& I40E_RXD_QW1_STATUS_TSYNVALID_MASK
)) {
1438 i40e_ptp_rx_hwtstamp(vsi
->back
, skb
, (rx_status
&
1439 I40E_RXD_QW1_STATUS_TSYNINDX_MASK
) >>
1440 I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT
);
1441 rx_ring
->last_rx_timestamp
= jiffies
;
1444 /* probably a little skewed due to removing CRC */
1445 total_rx_bytes
+= skb
->len
;
1448 skb
->protocol
= eth_type_trans(skb
, rx_ring
->netdev
);
1450 i40e_rx_checksum(vsi
, skb
, rx_status
, rx_error
, rx_ptype
);
1452 vlan_tag
= rx_status
& (1 << I40E_RX_DESC_STATUS_L2TAG1P_SHIFT
)
1453 ? le16_to_cpu(rx_desc
->wb
.qword0
.lo_dword
.l2tag1
)
1455 i40e_receive_skb(rx_ring
, skb
, vlan_tag
);
1457 rx_ring
->netdev
->last_rx
= jiffies
;
1460 rx_desc
->wb
.qword1
.status_error_len
= 0;
1465 /* return some buffers to hardware, one at a time is too slow */
1466 if (cleaned_count
>= I40E_RX_BUFFER_WRITE
) {
1467 i40e_alloc_rx_buffers(rx_ring
, cleaned_count
);
1471 /* use prefetched values */
1473 qword
= le64_to_cpu(rx_desc
->wb
.qword1
.status_error_len
);
1474 rx_status
= (qword
& I40E_RXD_QW1_STATUS_MASK
) >>
1475 I40E_RXD_QW1_STATUS_SHIFT
;
1478 rx_ring
->next_to_clean
= i
;
1479 u64_stats_update_begin(&rx_ring
->syncp
);
1480 rx_ring
->stats
.packets
+= total_rx_packets
;
1481 rx_ring
->stats
.bytes
+= total_rx_bytes
;
1482 u64_stats_update_end(&rx_ring
->syncp
);
1483 rx_ring
->q_vector
->rx
.total_packets
+= total_rx_packets
;
1484 rx_ring
->q_vector
->rx
.total_bytes
+= total_rx_bytes
;
1487 i40e_alloc_rx_buffers(rx_ring
, cleaned_count
);
1493 * i40e_napi_poll - NAPI polling Rx/Tx cleanup routine
1494 * @napi: napi struct with our devices info in it
1495 * @budget: amount of work driver is allowed to do this pass, in packets
1497 * This function will clean all queues associated with a q_vector.
1499 * Returns the amount of work done
1501 int i40e_napi_poll(struct napi_struct
*napi
, int budget
)
1503 struct i40e_q_vector
*q_vector
=
1504 container_of(napi
, struct i40e_q_vector
, napi
);
1505 struct i40e_vsi
*vsi
= q_vector
->vsi
;
1506 struct i40e_ring
*ring
;
1507 bool clean_complete
= true;
1508 int budget_per_ring
;
1510 if (test_bit(__I40E_DOWN
, &vsi
->state
)) {
1511 napi_complete(napi
);
1515 /* Since the actual Tx work is minimal, we can give the Tx a larger
1516 * budget and be more aggressive about cleaning up the Tx descriptors.
1518 i40e_for_each_ring(ring
, q_vector
->tx
)
1519 clean_complete
&= i40e_clean_tx_irq(ring
, vsi
->work_limit
);
1521 /* We attempt to distribute budget to each Rx queue fairly, but don't
1522 * allow the budget to go below 1 because that would exit polling early.
1524 budget_per_ring
= max(budget
/q_vector
->num_ringpairs
, 1);
1526 i40e_for_each_ring(ring
, q_vector
->rx
)
1527 clean_complete
&= i40e_clean_rx_irq(ring
, budget_per_ring
);
1529 /* If work not completed, return budget and polling will return */
1530 if (!clean_complete
)
1533 /* Work is done so exit the polling mode and re-enable the interrupt */
1534 napi_complete(napi
);
1535 if (ITR_IS_DYNAMIC(vsi
->rx_itr_setting
) ||
1536 ITR_IS_DYNAMIC(vsi
->tx_itr_setting
))
1537 i40e_update_dynamic_itr(q_vector
);
1539 if (!test_bit(__I40E_DOWN
, &vsi
->state
)) {
1540 if (vsi
->back
->flags
& I40E_FLAG_MSIX_ENABLED
) {
1541 i40e_irq_dynamic_enable(vsi
,
1542 q_vector
->v_idx
+ vsi
->base_vector
);
1544 struct i40e_hw
*hw
= &vsi
->back
->hw
;
1545 /* We re-enable the queue 0 cause, but
1546 * don't worry about dynamic_enable
1547 * because we left it on for the other
1548 * possible interrupts during napi
1550 u32 qval
= rd32(hw
, I40E_QINT_RQCTL(0));
1551 qval
|= I40E_QINT_RQCTL_CAUSE_ENA_MASK
;
1552 wr32(hw
, I40E_QINT_RQCTL(0), qval
);
1554 qval
= rd32(hw
, I40E_QINT_TQCTL(0));
1555 qval
|= I40E_QINT_TQCTL_CAUSE_ENA_MASK
;
1556 wr32(hw
, I40E_QINT_TQCTL(0), qval
);
1558 i40e_irq_dynamic_enable_icr0(vsi
->back
);
1566 * i40e_atr - Add a Flow Director ATR filter
1567 * @tx_ring: ring to add programming descriptor to
1569 * @flags: send flags
1570 * @protocol: wire protocol
1572 static void i40e_atr(struct i40e_ring
*tx_ring
, struct sk_buff
*skb
,
1573 u32 flags
, __be16 protocol
)
1575 struct i40e_filter_program_desc
*fdir_desc
;
1576 struct i40e_pf
*pf
= tx_ring
->vsi
->back
;
1578 unsigned char *network
;
1580 struct ipv6hdr
*ipv6
;
1584 u32 flex_ptype
, dtype_cmd
;
1587 /* make sure ATR is enabled */
1588 if (!(pf
->flags
& I40E_FLAG_FD_ATR_ENABLED
))
1591 /* if sampling is disabled do nothing */
1592 if (!tx_ring
->atr_sample_rate
)
1595 /* snag network header to get L4 type and address */
1596 hdr
.network
= skb_network_header(skb
);
1598 /* Currently only IPv4/IPv6 with TCP is supported */
1599 if (protocol
== htons(ETH_P_IP
)) {
1600 if (hdr
.ipv4
->protocol
!= IPPROTO_TCP
)
1603 /* access ihl as a u8 to avoid unaligned access on ia64 */
1604 hlen
= (hdr
.network
[0] & 0x0F) << 2;
1605 } else if (protocol
== htons(ETH_P_IPV6
)) {
1606 if (hdr
.ipv6
->nexthdr
!= IPPROTO_TCP
)
1609 hlen
= sizeof(struct ipv6hdr
);
1614 th
= (struct tcphdr
*)(hdr
.network
+ hlen
);
1616 /* Due to lack of space, no more new filters can be programmed */
1617 if (th
->syn
&& (pf
->auto_disable_flags
& I40E_FLAG_FD_ATR_ENABLED
))
1620 tx_ring
->atr_count
++;
1622 /* sample on all syn/fin/rst packets or once every atr sample rate */
1626 (tx_ring
->atr_count
< tx_ring
->atr_sample_rate
))
1629 tx_ring
->atr_count
= 0;
1631 /* grab the next descriptor */
1632 i
= tx_ring
->next_to_use
;
1633 fdir_desc
= I40E_TX_FDIRDESC(tx_ring
, i
);
1636 tx_ring
->next_to_use
= (i
< tx_ring
->count
) ? i
: 0;
1638 flex_ptype
= (tx_ring
->queue_index
<< I40E_TXD_FLTR_QW0_QINDEX_SHIFT
) &
1639 I40E_TXD_FLTR_QW0_QINDEX_MASK
;
1640 flex_ptype
|= (protocol
== htons(ETH_P_IP
)) ?
1641 (I40E_FILTER_PCTYPE_NONF_IPV4_TCP
<<
1642 I40E_TXD_FLTR_QW0_PCTYPE_SHIFT
) :
1643 (I40E_FILTER_PCTYPE_NONF_IPV6_TCP
<<
1644 I40E_TXD_FLTR_QW0_PCTYPE_SHIFT
);
1646 flex_ptype
|= tx_ring
->vsi
->id
<< I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT
;
1648 dtype_cmd
= I40E_TX_DESC_DTYPE_FILTER_PROG
;
1650 dtype_cmd
|= (th
->fin
|| th
->rst
) ?
1651 (I40E_FILTER_PROGRAM_DESC_PCMD_REMOVE
<<
1652 I40E_TXD_FLTR_QW1_PCMD_SHIFT
) :
1653 (I40E_FILTER_PROGRAM_DESC_PCMD_ADD_UPDATE
<<
1654 I40E_TXD_FLTR_QW1_PCMD_SHIFT
);
1656 dtype_cmd
|= I40E_FILTER_PROGRAM_DESC_DEST_DIRECT_PACKET_QINDEX
<<
1657 I40E_TXD_FLTR_QW1_DEST_SHIFT
;
1659 dtype_cmd
|= I40E_FILTER_PROGRAM_DESC_FD_STATUS_FD_ID
<<
1660 I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT
;
1662 fdir_desc
->qindex_flex_ptype_vsi
= cpu_to_le32(flex_ptype
);
1663 fdir_desc
->dtype_cmd_cntindex
= cpu_to_le32(dtype_cmd
);
1667 * i40e_tx_prepare_vlan_flags - prepare generic TX VLAN tagging flags for HW
1669 * @tx_ring: ring to send buffer on
1670 * @flags: the tx flags to be set
1672 * Checks the skb and set up correspondingly several generic transmit flags
1673 * related to VLAN tagging for the HW, such as VLAN, DCB, etc.
1675 * Returns error code indicate the frame should be dropped upon error and the
1676 * otherwise returns 0 to indicate the flags has been set properly.
1678 static int i40e_tx_prepare_vlan_flags(struct sk_buff
*skb
,
1679 struct i40e_ring
*tx_ring
,
1682 __be16 protocol
= skb
->protocol
;
1685 /* if we have a HW VLAN tag being added, default to the HW one */
1686 if (vlan_tx_tag_present(skb
)) {
1687 tx_flags
|= vlan_tx_tag_get(skb
) << I40E_TX_FLAGS_VLAN_SHIFT
;
1688 tx_flags
|= I40E_TX_FLAGS_HW_VLAN
;
1689 /* else if it is a SW VLAN, check the next protocol and store the tag */
1690 } else if (protocol
== htons(ETH_P_8021Q
)) {
1691 struct vlan_hdr
*vhdr
, _vhdr
;
1692 vhdr
= skb_header_pointer(skb
, ETH_HLEN
, sizeof(_vhdr
), &_vhdr
);
1696 protocol
= vhdr
->h_vlan_encapsulated_proto
;
1697 tx_flags
|= ntohs(vhdr
->h_vlan_TCI
) << I40E_TX_FLAGS_VLAN_SHIFT
;
1698 tx_flags
|= I40E_TX_FLAGS_SW_VLAN
;
1701 /* Insert 802.1p priority into VLAN header */
1702 if ((tx_ring
->vsi
->back
->flags
& I40E_FLAG_DCB_ENABLED
) &&
1703 ((tx_flags
& (I40E_TX_FLAGS_HW_VLAN
| I40E_TX_FLAGS_SW_VLAN
)) ||
1704 (skb
->priority
!= TC_PRIO_CONTROL
))) {
1705 tx_flags
&= ~I40E_TX_FLAGS_VLAN_PRIO_MASK
;
1706 tx_flags
|= (skb
->priority
& 0x7) <<
1707 I40E_TX_FLAGS_VLAN_PRIO_SHIFT
;
1708 if (tx_flags
& I40E_TX_FLAGS_SW_VLAN
) {
1709 struct vlan_ethhdr
*vhdr
;
1712 rc
= skb_cow_head(skb
, 0);
1715 vhdr
= (struct vlan_ethhdr
*)skb
->data
;
1716 vhdr
->h_vlan_TCI
= htons(tx_flags
>>
1717 I40E_TX_FLAGS_VLAN_SHIFT
);
1719 tx_flags
|= I40E_TX_FLAGS_HW_VLAN
;
1727 * i40e_tso - set up the tso context descriptor
1728 * @tx_ring: ptr to the ring to send
1729 * @skb: ptr to the skb we're sending
1730 * @tx_flags: the collected send information
1731 * @protocol: the send protocol
1732 * @hdr_len: ptr to the size of the packet header
1733 * @cd_tunneling: ptr to context descriptor bits
1735 * Returns 0 if no TSO can happen, 1 if tso is going, or error
1737 static int i40e_tso(struct i40e_ring
*tx_ring
, struct sk_buff
*skb
,
1738 u32 tx_flags
, __be16 protocol
, u8
*hdr_len
,
1739 u64
*cd_type_cmd_tso_mss
, u32
*cd_tunneling
)
1741 u32 cd_cmd
, cd_tso_len
, cd_mss
;
1742 struct ipv6hdr
*ipv6h
;
1743 struct tcphdr
*tcph
;
1748 if (!skb_is_gso(skb
))
1751 err
= skb_cow_head(skb
, 0);
1755 if (protocol
== htons(ETH_P_IP
)) {
1756 iph
= skb
->encapsulation
? inner_ip_hdr(skb
) : ip_hdr(skb
);
1757 tcph
= skb
->encapsulation
? inner_tcp_hdr(skb
) : tcp_hdr(skb
);
1760 tcph
->check
= ~csum_tcpudp_magic(iph
->saddr
, iph
->daddr
,
1762 } else if (skb_is_gso_v6(skb
)) {
1764 ipv6h
= skb
->encapsulation
? inner_ipv6_hdr(skb
)
1766 tcph
= skb
->encapsulation
? inner_tcp_hdr(skb
) : tcp_hdr(skb
);
1767 ipv6h
->payload_len
= 0;
1768 tcph
->check
= ~csum_ipv6_magic(&ipv6h
->saddr
, &ipv6h
->daddr
,
1772 l4len
= skb
->encapsulation
? inner_tcp_hdrlen(skb
) : tcp_hdrlen(skb
);
1773 *hdr_len
= (skb
->encapsulation
1774 ? (skb_inner_transport_header(skb
) - skb
->data
)
1775 : skb_transport_offset(skb
)) + l4len
;
1777 /* find the field values */
1778 cd_cmd
= I40E_TX_CTX_DESC_TSO
;
1779 cd_tso_len
= skb
->len
- *hdr_len
;
1780 cd_mss
= skb_shinfo(skb
)->gso_size
;
1781 *cd_type_cmd_tso_mss
|= ((u64
)cd_cmd
<< I40E_TXD_CTX_QW1_CMD_SHIFT
) |
1783 I40E_TXD_CTX_QW1_TSO_LEN_SHIFT
) |
1784 ((u64
)cd_mss
<< I40E_TXD_CTX_QW1_MSS_SHIFT
);
1789 * i40e_tsyn - set up the tsyn context descriptor
1790 * @tx_ring: ptr to the ring to send
1791 * @skb: ptr to the skb we're sending
1792 * @tx_flags: the collected send information
1794 * Returns 0 if no Tx timestamp can happen and 1 if the timestamp will happen
1796 static int i40e_tsyn(struct i40e_ring
*tx_ring
, struct sk_buff
*skb
,
1797 u32 tx_flags
, u64
*cd_type_cmd_tso_mss
)
1801 if (likely(!(skb_shinfo(skb
)->tx_flags
& SKBTX_HW_TSTAMP
)))
1804 /* Tx timestamps cannot be sampled when doing TSO */
1805 if (tx_flags
& I40E_TX_FLAGS_TSO
)
1808 /* only timestamp the outbound packet if the user has requested it and
1809 * we are not already transmitting a packet to be timestamped
1811 pf
= i40e_netdev_to_pf(tx_ring
->netdev
);
1812 if (pf
->ptp_tx
&& !pf
->ptp_tx_skb
) {
1813 skb_shinfo(skb
)->tx_flags
|= SKBTX_IN_PROGRESS
;
1814 pf
->ptp_tx_skb
= skb_get(skb
);
1819 *cd_type_cmd_tso_mss
|= (u64
)I40E_TX_CTX_DESC_TSYN
<<
1820 I40E_TXD_CTX_QW1_CMD_SHIFT
;
1826 * i40e_tx_enable_csum - Enable Tx checksum offloads
1828 * @tx_flags: Tx flags currently set
1829 * @td_cmd: Tx descriptor command bits to set
1830 * @td_offset: Tx descriptor header offsets to set
1831 * @cd_tunneling: ptr to context desc bits
1833 static void i40e_tx_enable_csum(struct sk_buff
*skb
, u32 tx_flags
,
1834 u32
*td_cmd
, u32
*td_offset
,
1835 struct i40e_ring
*tx_ring
,
1838 struct ipv6hdr
*this_ipv6_hdr
;
1839 unsigned int this_tcp_hdrlen
;
1840 struct iphdr
*this_ip_hdr
;
1841 u32 network_hdr_len
;
1844 if (skb
->encapsulation
) {
1845 network_hdr_len
= skb_inner_network_header_len(skb
);
1846 this_ip_hdr
= inner_ip_hdr(skb
);
1847 this_ipv6_hdr
= inner_ipv6_hdr(skb
);
1848 this_tcp_hdrlen
= inner_tcp_hdrlen(skb
);
1850 if (tx_flags
& I40E_TX_FLAGS_IPV4
) {
1852 if (tx_flags
& I40E_TX_FLAGS_TSO
) {
1853 *cd_tunneling
|= I40E_TX_CTX_EXT_IP_IPV4
;
1854 ip_hdr(skb
)->check
= 0;
1857 I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM
;
1859 } else if (tx_flags
& I40E_TX_FLAGS_IPV6
) {
1860 if (tx_flags
& I40E_TX_FLAGS_TSO
) {
1861 *cd_tunneling
|= I40E_TX_CTX_EXT_IP_IPV6
;
1862 ip_hdr(skb
)->check
= 0;
1865 I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM
;
1869 /* Now set the ctx descriptor fields */
1870 *cd_tunneling
|= (skb_network_header_len(skb
) >> 2) <<
1871 I40E_TXD_CTX_QW0_EXT_IPLEN_SHIFT
|
1872 I40E_TXD_CTX_UDP_TUNNELING
|
1873 ((skb_inner_network_offset(skb
) -
1874 skb_transport_offset(skb
)) >> 1) <<
1875 I40E_TXD_CTX_QW0_NATLEN_SHIFT
;
1878 network_hdr_len
= skb_network_header_len(skb
);
1879 this_ip_hdr
= ip_hdr(skb
);
1880 this_ipv6_hdr
= ipv6_hdr(skb
);
1881 this_tcp_hdrlen
= tcp_hdrlen(skb
);
1884 /* Enable IP checksum offloads */
1885 if (tx_flags
& I40E_TX_FLAGS_IPV4
) {
1886 l4_hdr
= this_ip_hdr
->protocol
;
1887 /* the stack computes the IP header already, the only time we
1888 * need the hardware to recompute it is in the case of TSO.
1890 if (tx_flags
& I40E_TX_FLAGS_TSO
) {
1891 *td_cmd
|= I40E_TX_DESC_CMD_IIPT_IPV4_CSUM
;
1892 this_ip_hdr
->check
= 0;
1894 *td_cmd
|= I40E_TX_DESC_CMD_IIPT_IPV4
;
1896 /* Now set the td_offset for IP header length */
1897 *td_offset
= (network_hdr_len
>> 2) <<
1898 I40E_TX_DESC_LENGTH_IPLEN_SHIFT
;
1899 } else if (tx_flags
& I40E_TX_FLAGS_IPV6
) {
1900 l4_hdr
= this_ipv6_hdr
->nexthdr
;
1901 *td_cmd
|= I40E_TX_DESC_CMD_IIPT_IPV6
;
1902 /* Now set the td_offset for IP header length */
1903 *td_offset
= (network_hdr_len
>> 2) <<
1904 I40E_TX_DESC_LENGTH_IPLEN_SHIFT
;
1906 /* words in MACLEN + dwords in IPLEN + dwords in L4Len */
1907 *td_offset
|= (skb_network_offset(skb
) >> 1) <<
1908 I40E_TX_DESC_LENGTH_MACLEN_SHIFT
;
1910 /* Enable L4 checksum offloads */
1913 /* enable checksum offloads */
1914 *td_cmd
|= I40E_TX_DESC_CMD_L4T_EOFT_TCP
;
1915 *td_offset
|= (this_tcp_hdrlen
>> 2) <<
1916 I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT
;
1919 /* enable SCTP checksum offload */
1920 *td_cmd
|= I40E_TX_DESC_CMD_L4T_EOFT_SCTP
;
1921 *td_offset
|= (sizeof(struct sctphdr
) >> 2) <<
1922 I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT
;
1925 /* enable UDP checksum offload */
1926 *td_cmd
|= I40E_TX_DESC_CMD_L4T_EOFT_UDP
;
1927 *td_offset
|= (sizeof(struct udphdr
) >> 2) <<
1928 I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT
;
1936 * i40e_create_tx_ctx Build the Tx context descriptor
1937 * @tx_ring: ring to create the descriptor on
1938 * @cd_type_cmd_tso_mss: Quad Word 1
1939 * @cd_tunneling: Quad Word 0 - bits 0-31
1940 * @cd_l2tag2: Quad Word 0 - bits 32-63
1942 static void i40e_create_tx_ctx(struct i40e_ring
*tx_ring
,
1943 const u64 cd_type_cmd_tso_mss
,
1944 const u32 cd_tunneling
, const u32 cd_l2tag2
)
1946 struct i40e_tx_context_desc
*context_desc
;
1947 int i
= tx_ring
->next_to_use
;
1949 if ((cd_type_cmd_tso_mss
== I40E_TX_DESC_DTYPE_CONTEXT
) &&
1950 !cd_tunneling
&& !cd_l2tag2
)
1953 /* grab the next descriptor */
1954 context_desc
= I40E_TX_CTXTDESC(tx_ring
, i
);
1957 tx_ring
->next_to_use
= (i
< tx_ring
->count
) ? i
: 0;
1959 /* cpu_to_le32 and assign to struct fields */
1960 context_desc
->tunneling_params
= cpu_to_le32(cd_tunneling
);
1961 context_desc
->l2tag2
= cpu_to_le16(cd_l2tag2
);
1962 context_desc
->type_cmd_tso_mss
= cpu_to_le64(cd_type_cmd_tso_mss
);
1966 * i40e_tx_map - Build the Tx descriptor
1967 * @tx_ring: ring to send buffer on
1969 * @first: first buffer info buffer to use
1970 * @tx_flags: collected send information
1971 * @hdr_len: size of the packet header
1972 * @td_cmd: the command field in the descriptor
1973 * @td_offset: offset for checksum or crc
1975 static void i40e_tx_map(struct i40e_ring
*tx_ring
, struct sk_buff
*skb
,
1976 struct i40e_tx_buffer
*first
, u32 tx_flags
,
1977 const u8 hdr_len
, u32 td_cmd
, u32 td_offset
)
1979 unsigned int data_len
= skb
->data_len
;
1980 unsigned int size
= skb_headlen(skb
);
1981 struct skb_frag_struct
*frag
;
1982 struct i40e_tx_buffer
*tx_bi
;
1983 struct i40e_tx_desc
*tx_desc
;
1984 u16 i
= tx_ring
->next_to_use
;
1989 if (tx_flags
& I40E_TX_FLAGS_HW_VLAN
) {
1990 td_cmd
|= I40E_TX_DESC_CMD_IL2TAG1
;
1991 td_tag
= (tx_flags
& I40E_TX_FLAGS_VLAN_MASK
) >>
1992 I40E_TX_FLAGS_VLAN_SHIFT
;
1995 if (tx_flags
& (I40E_TX_FLAGS_TSO
| I40E_TX_FLAGS_FSO
))
1996 gso_segs
= skb_shinfo(skb
)->gso_segs
;
2000 /* multiply data chunks by size of headers */
2001 first
->bytecount
= skb
->len
- hdr_len
+ (gso_segs
* hdr_len
);
2002 first
->gso_segs
= gso_segs
;
2004 first
->tx_flags
= tx_flags
;
2006 dma
= dma_map_single(tx_ring
->dev
, skb
->data
, size
, DMA_TO_DEVICE
);
2008 tx_desc
= I40E_TX_DESC(tx_ring
, i
);
2011 for (frag
= &skb_shinfo(skb
)->frags
[0];; frag
++) {
2012 if (dma_mapping_error(tx_ring
->dev
, dma
))
2015 /* record length, and DMA address */
2016 dma_unmap_len_set(tx_bi
, len
, size
);
2017 dma_unmap_addr_set(tx_bi
, dma
, dma
);
2019 tx_desc
->buffer_addr
= cpu_to_le64(dma
);
2021 while (unlikely(size
> I40E_MAX_DATA_PER_TXD
)) {
2022 tx_desc
->cmd_type_offset_bsz
=
2023 build_ctob(td_cmd
, td_offset
,
2024 I40E_MAX_DATA_PER_TXD
, td_tag
);
2028 if (i
== tx_ring
->count
) {
2029 tx_desc
= I40E_TX_DESC(tx_ring
, 0);
2033 dma
+= I40E_MAX_DATA_PER_TXD
;
2034 size
-= I40E_MAX_DATA_PER_TXD
;
2036 tx_desc
->buffer_addr
= cpu_to_le64(dma
);
2039 if (likely(!data_len
))
2042 tx_desc
->cmd_type_offset_bsz
= build_ctob(td_cmd
, td_offset
,
2047 if (i
== tx_ring
->count
) {
2048 tx_desc
= I40E_TX_DESC(tx_ring
, 0);
2052 size
= skb_frag_size(frag
);
2055 dma
= skb_frag_dma_map(tx_ring
->dev
, frag
, 0, size
,
2058 tx_bi
= &tx_ring
->tx_bi
[i
];
2061 /* Place RS bit on last descriptor of any packet that spans across the
2062 * 4th descriptor (WB_STRIDE aka 0x3) in a 64B cacheline.
2064 #define WB_STRIDE 0x3
2065 if (((i
& WB_STRIDE
) != WB_STRIDE
) &&
2066 (first
<= &tx_ring
->tx_bi
[i
]) &&
2067 (first
>= &tx_ring
->tx_bi
[i
& ~WB_STRIDE
])) {
2068 tx_desc
->cmd_type_offset_bsz
=
2069 build_ctob(td_cmd
, td_offset
, size
, td_tag
) |
2070 cpu_to_le64((u64
)I40E_TX_DESC_CMD_EOP
<<
2071 I40E_TXD_QW1_CMD_SHIFT
);
2073 tx_desc
->cmd_type_offset_bsz
=
2074 build_ctob(td_cmd
, td_offset
, size
, td_tag
) |
2075 cpu_to_le64((u64
)I40E_TXD_CMD
<<
2076 I40E_TXD_QW1_CMD_SHIFT
);
2079 netdev_tx_sent_queue(netdev_get_tx_queue(tx_ring
->netdev
,
2080 tx_ring
->queue_index
),
2083 /* set the timestamp */
2084 first
->time_stamp
= jiffies
;
2086 /* Force memory writes to complete before letting h/w
2087 * know there are new descriptors to fetch. (Only
2088 * applicable for weak-ordered memory model archs,
2093 /* set next_to_watch value indicating a packet is present */
2094 first
->next_to_watch
= tx_desc
;
2097 if (i
== tx_ring
->count
)
2100 tx_ring
->next_to_use
= i
;
2102 /* notify HW of packet */
2103 writel(i
, tx_ring
->tail
);
2108 dev_info(tx_ring
->dev
, "TX DMA map failed\n");
2110 /* clear dma mappings for failed tx_bi map */
2112 tx_bi
= &tx_ring
->tx_bi
[i
];
2113 i40e_unmap_and_free_tx_resource(tx_ring
, tx_bi
);
2121 tx_ring
->next_to_use
= i
;
2125 * __i40e_maybe_stop_tx - 2nd level check for tx stop conditions
2126 * @tx_ring: the ring to be checked
2127 * @size: the size buffer we want to assure is available
2129 * Returns -EBUSY if a stop is needed, else 0
2131 static inline int __i40e_maybe_stop_tx(struct i40e_ring
*tx_ring
, int size
)
2133 netif_stop_subqueue(tx_ring
->netdev
, tx_ring
->queue_index
);
2134 /* Memory barrier before checking head and tail */
2137 /* Check again in a case another CPU has just made room available. */
2138 if (likely(I40E_DESC_UNUSED(tx_ring
) < size
))
2141 /* A reprieve! - use start_queue because it doesn't call schedule */
2142 netif_start_subqueue(tx_ring
->netdev
, tx_ring
->queue_index
);
2143 ++tx_ring
->tx_stats
.restart_queue
;
2148 * i40e_maybe_stop_tx - 1st level check for tx stop conditions
2149 * @tx_ring: the ring to be checked
2150 * @size: the size buffer we want to assure is available
2152 * Returns 0 if stop is not needed
2154 static int i40e_maybe_stop_tx(struct i40e_ring
*tx_ring
, int size
)
2156 if (likely(I40E_DESC_UNUSED(tx_ring
) >= size
))
2158 return __i40e_maybe_stop_tx(tx_ring
, size
);
2162 * i40e_xmit_descriptor_count - calculate number of tx descriptors needed
2164 * @tx_ring: ring to send buffer on
2166 * Returns number of data descriptors needed for this skb. Returns 0 to indicate
2167 * there is not enough descriptors available in this ring since we need at least
2170 static int i40e_xmit_descriptor_count(struct sk_buff
*skb
,
2171 struct i40e_ring
*tx_ring
)
2176 /* need: 1 descriptor per page * PAGE_SIZE/I40E_MAX_DATA_PER_TXD,
2177 * + 1 desc for skb_head_len/I40E_MAX_DATA_PER_TXD,
2178 * + 4 desc gap to avoid the cache line where head is,
2179 * + 1 desc for context descriptor,
2180 * otherwise try next time
2182 for (f
= 0; f
< skb_shinfo(skb
)->nr_frags
; f
++)
2183 count
+= TXD_USE_COUNT(skb_shinfo(skb
)->frags
[f
].size
);
2185 count
+= TXD_USE_COUNT(skb_headlen(skb
));
2186 if (i40e_maybe_stop_tx(tx_ring
, count
+ 4 + 1)) {
2187 tx_ring
->tx_stats
.tx_busy
++;
2194 * i40e_xmit_frame_ring - Sends buffer on Tx ring
2196 * @tx_ring: ring to send buffer on
2198 * Returns NETDEV_TX_OK if sent, else an error code
2200 static netdev_tx_t
i40e_xmit_frame_ring(struct sk_buff
*skb
,
2201 struct i40e_ring
*tx_ring
)
2203 u64 cd_type_cmd_tso_mss
= I40E_TX_DESC_DTYPE_CONTEXT
;
2204 u32 cd_tunneling
= 0, cd_l2tag2
= 0;
2205 struct i40e_tx_buffer
*first
;
2213 if (0 == i40e_xmit_descriptor_count(skb
, tx_ring
))
2214 return NETDEV_TX_BUSY
;
2216 /* prepare the xmit flags */
2217 if (i40e_tx_prepare_vlan_flags(skb
, tx_ring
, &tx_flags
))
2220 /* obtain protocol of skb */
2221 protocol
= skb
->protocol
;
2223 /* record the location of the first descriptor for this packet */
2224 first
= &tx_ring
->tx_bi
[tx_ring
->next_to_use
];
2226 /* setup IPv4/IPv6 offloads */
2227 if (protocol
== htons(ETH_P_IP
))
2228 tx_flags
|= I40E_TX_FLAGS_IPV4
;
2229 else if (protocol
== htons(ETH_P_IPV6
))
2230 tx_flags
|= I40E_TX_FLAGS_IPV6
;
2232 tso
= i40e_tso(tx_ring
, skb
, tx_flags
, protocol
, &hdr_len
,
2233 &cd_type_cmd_tso_mss
, &cd_tunneling
);
2238 tx_flags
|= I40E_TX_FLAGS_TSO
;
2240 skb_tx_timestamp(skb
);
2242 tsyn
= i40e_tsyn(tx_ring
, skb
, tx_flags
, &cd_type_cmd_tso_mss
);
2245 tx_flags
|= I40E_TX_FLAGS_TSYN
;
2247 /* always enable CRC insertion offload */
2248 td_cmd
|= I40E_TX_DESC_CMD_ICRC
;
2250 /* Always offload the checksum, since it's in the data descriptor */
2251 if (skb
->ip_summed
== CHECKSUM_PARTIAL
) {
2252 tx_flags
|= I40E_TX_FLAGS_CSUM
;
2254 i40e_tx_enable_csum(skb
, tx_flags
, &td_cmd
, &td_offset
,
2255 tx_ring
, &cd_tunneling
);
2258 i40e_create_tx_ctx(tx_ring
, cd_type_cmd_tso_mss
,
2259 cd_tunneling
, cd_l2tag2
);
2261 /* Add Flow Director ATR if it's enabled.
2263 * NOTE: this must always be directly before the data descriptor.
2265 i40e_atr(tx_ring
, skb
, tx_flags
, protocol
);
2267 i40e_tx_map(tx_ring
, skb
, first
, tx_flags
, hdr_len
,
2270 i40e_maybe_stop_tx(tx_ring
, DESC_NEEDED
);
2272 return NETDEV_TX_OK
;
2275 dev_kfree_skb_any(skb
);
2276 return NETDEV_TX_OK
;
2280 * i40e_lan_xmit_frame - Selects the correct VSI and Tx queue to send buffer
2282 * @netdev: network interface device structure
2284 * Returns NETDEV_TX_OK if sent, else an error code
2286 netdev_tx_t
i40e_lan_xmit_frame(struct sk_buff
*skb
, struct net_device
*netdev
)
2288 struct i40e_netdev_priv
*np
= netdev_priv(netdev
);
2289 struct i40e_vsi
*vsi
= np
->vsi
;
2290 struct i40e_ring
*tx_ring
= vsi
->tx_rings
[skb
->queue_mapping
];
2292 /* hardware can't handle really short frames, hardware padding works
2295 if (unlikely(skb
->len
< I40E_MIN_TX_LEN
)) {
2296 if (skb_pad(skb
, I40E_MIN_TX_LEN
- skb
->len
))
2297 return NETDEV_TX_OK
;
2298 skb
->len
= I40E_MIN_TX_LEN
;
2299 skb_set_tail_pointer(skb
, I40E_MIN_TX_LEN
);
2302 return i40e_xmit_frame_ring(skb
, tx_ring
);