1 /*******************************************************************************
3 * Intel Ethernet Controller XL710 Family Linux Driver
4 * Copyright(c) 2013 - 2016 Intel Corporation.
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * You should have received a copy of the GNU General Public License along
16 * with this program. If not, see <http://www.gnu.org/licenses/>.
18 * The full GNU General Public License is included in this distribution in
19 * the file called "COPYING".
21 * Contact Information:
22 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25 ******************************************************************************/
27 #include <linux/prefetch.h>
28 #include <net/busy_poll.h>
30 #include "i40e_prototype.h"
32 static inline __le64
build_ctob(u32 td_cmd
, u32 td_offset
, unsigned int size
,
35 return cpu_to_le64(I40E_TX_DESC_DTYPE_DATA
|
36 ((u64
)td_cmd
<< I40E_TXD_QW1_CMD_SHIFT
) |
37 ((u64
)td_offset
<< I40E_TXD_QW1_OFFSET_SHIFT
) |
38 ((u64
)size
<< I40E_TXD_QW1_TX_BUF_SZ_SHIFT
) |
39 ((u64
)td_tag
<< I40E_TXD_QW1_L2TAG1_SHIFT
));
42 #define I40E_TXD_CMD (I40E_TX_DESC_CMD_EOP | I40E_TX_DESC_CMD_RS)
43 #define I40E_FD_CLEAN_DELAY 10
45 * i40e_program_fdir_filter - Program a Flow Director filter
46 * @fdir_data: Packet data that will be filter parameters
47 * @raw_packet: the pre-allocated packet buffer for FDir
49 * @add: True for add/update, False for remove
51 int i40e_program_fdir_filter(struct i40e_fdir_filter
*fdir_data
, u8
*raw_packet
,
52 struct i40e_pf
*pf
, bool add
)
54 struct i40e_filter_program_desc
*fdir_desc
;
55 struct i40e_tx_buffer
*tx_buf
, *first
;
56 struct i40e_tx_desc
*tx_desc
;
57 struct i40e_ring
*tx_ring
;
58 unsigned int fpt
, dcc
;
66 /* find existing FDIR VSI */
68 for (i
= 0; i
< pf
->num_alloc_vsi
; i
++)
69 if (pf
->vsi
[i
] && pf
->vsi
[i
]->type
== I40E_VSI_FDIR
)
74 tx_ring
= vsi
->tx_rings
[0];
77 /* we need two descriptors to add/del a filter and we can wait */
79 if (I40E_DESC_UNUSED(tx_ring
) > 1)
81 msleep_interruptible(1);
83 } while (delay
< I40E_FD_CLEAN_DELAY
);
85 if (!(I40E_DESC_UNUSED(tx_ring
) > 1))
88 dma
= dma_map_single(dev
, raw_packet
,
89 I40E_FDIR_MAX_RAW_PACKET_SIZE
, DMA_TO_DEVICE
);
90 if (dma_mapping_error(dev
, dma
))
93 /* grab the next descriptor */
94 i
= tx_ring
->next_to_use
;
95 fdir_desc
= I40E_TX_FDIRDESC(tx_ring
, i
);
96 first
= &tx_ring
->tx_bi
[i
];
97 memset(first
, 0, sizeof(struct i40e_tx_buffer
));
99 tx_ring
->next_to_use
= ((i
+ 1) < tx_ring
->count
) ? i
+ 1 : 0;
101 fpt
= (fdir_data
->q_index
<< I40E_TXD_FLTR_QW0_QINDEX_SHIFT
) &
102 I40E_TXD_FLTR_QW0_QINDEX_MASK
;
104 fpt
|= (fdir_data
->flex_off
<< I40E_TXD_FLTR_QW0_FLEXOFF_SHIFT
) &
105 I40E_TXD_FLTR_QW0_FLEXOFF_MASK
;
107 fpt
|= (fdir_data
->pctype
<< I40E_TXD_FLTR_QW0_PCTYPE_SHIFT
) &
108 I40E_TXD_FLTR_QW0_PCTYPE_MASK
;
110 /* Use LAN VSI Id if not programmed by user */
111 if (fdir_data
->dest_vsi
== 0)
112 fpt
|= (pf
->vsi
[pf
->lan_vsi
]->id
) <<
113 I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT
;
115 fpt
|= ((u32
)fdir_data
->dest_vsi
<<
116 I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT
) &
117 I40E_TXD_FLTR_QW0_DEST_VSI_MASK
;
119 dcc
= I40E_TX_DESC_DTYPE_FILTER_PROG
;
122 dcc
|= I40E_FILTER_PROGRAM_DESC_PCMD_ADD_UPDATE
<<
123 I40E_TXD_FLTR_QW1_PCMD_SHIFT
;
125 dcc
|= I40E_FILTER_PROGRAM_DESC_PCMD_REMOVE
<<
126 I40E_TXD_FLTR_QW1_PCMD_SHIFT
;
128 dcc
|= (fdir_data
->dest_ctl
<< I40E_TXD_FLTR_QW1_DEST_SHIFT
) &
129 I40E_TXD_FLTR_QW1_DEST_MASK
;
131 dcc
|= (fdir_data
->fd_status
<< I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT
) &
132 I40E_TXD_FLTR_QW1_FD_STATUS_MASK
;
134 if (fdir_data
->cnt_index
!= 0) {
135 dcc
|= I40E_TXD_FLTR_QW1_CNT_ENA_MASK
;
136 dcc
|= ((u32
)fdir_data
->cnt_index
<<
137 I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT
) &
138 I40E_TXD_FLTR_QW1_CNTINDEX_MASK
;
141 fdir_desc
->qindex_flex_ptype_vsi
= cpu_to_le32(fpt
);
142 fdir_desc
->rsvd
= cpu_to_le32(0);
143 fdir_desc
->dtype_cmd_cntindex
= cpu_to_le32(dcc
);
144 fdir_desc
->fd_id
= cpu_to_le32(fdir_data
->fd_id
);
146 /* Now program a dummy descriptor */
147 i
= tx_ring
->next_to_use
;
148 tx_desc
= I40E_TX_DESC(tx_ring
, i
);
149 tx_buf
= &tx_ring
->tx_bi
[i
];
151 tx_ring
->next_to_use
= ((i
+ 1) < tx_ring
->count
) ? i
+ 1 : 0;
153 memset(tx_buf
, 0, sizeof(struct i40e_tx_buffer
));
155 /* record length, and DMA address */
156 dma_unmap_len_set(tx_buf
, len
, I40E_FDIR_MAX_RAW_PACKET_SIZE
);
157 dma_unmap_addr_set(tx_buf
, dma
, dma
);
159 tx_desc
->buffer_addr
= cpu_to_le64(dma
);
160 td_cmd
= I40E_TXD_CMD
| I40E_TX_DESC_CMD_DUMMY
;
162 tx_buf
->tx_flags
= I40E_TX_FLAGS_FD_SB
;
163 tx_buf
->raw_buf
= (void *)raw_packet
;
165 tx_desc
->cmd_type_offset_bsz
=
166 build_ctob(td_cmd
, 0, I40E_FDIR_MAX_RAW_PACKET_SIZE
, 0);
168 /* Force memory writes to complete before letting h/w
169 * know there are new descriptors to fetch.
173 /* Mark the data descriptor to be watched */
174 first
->next_to_watch
= tx_desc
;
176 writel(tx_ring
->next_to_use
, tx_ring
->tail
);
183 #define IP_HEADER_OFFSET 14
184 #define I40E_UDPIP_DUMMY_PACKET_LEN 42
186 * i40e_add_del_fdir_udpv4 - Add/Remove UDPv4 filters
187 * @vsi: pointer to the targeted VSI
188 * @fd_data: the flow director data required for the FDir descriptor
189 * @add: true adds a filter, false removes it
191 * Returns 0 if the filters were successfully added or removed
193 static int i40e_add_del_fdir_udpv4(struct i40e_vsi
*vsi
,
194 struct i40e_fdir_filter
*fd_data
,
197 struct i40e_pf
*pf
= vsi
->back
;
203 static char packet
[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0,
204 0x45, 0, 0, 0x1c, 0, 0, 0x40, 0, 0x40, 0x11, 0, 0, 0, 0, 0, 0,
205 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
207 raw_packet
= kzalloc(I40E_FDIR_MAX_RAW_PACKET_SIZE
, GFP_KERNEL
);
210 memcpy(raw_packet
, packet
, I40E_UDPIP_DUMMY_PACKET_LEN
);
212 ip
= (struct iphdr
*)(raw_packet
+ IP_HEADER_OFFSET
);
213 udp
= (struct udphdr
*)(raw_packet
+ IP_HEADER_OFFSET
214 + sizeof(struct iphdr
));
216 ip
->daddr
= fd_data
->dst_ip
[0];
217 udp
->dest
= fd_data
->dst_port
;
218 ip
->saddr
= fd_data
->src_ip
[0];
219 udp
->source
= fd_data
->src_port
;
221 fd_data
->pctype
= I40E_FILTER_PCTYPE_NONF_IPV4_UDP
;
222 ret
= i40e_program_fdir_filter(fd_data
, raw_packet
, pf
, add
);
224 dev_info(&pf
->pdev
->dev
,
225 "PCTYPE:%d, Filter command send failed for fd_id:%d (ret = %d)\n",
226 fd_data
->pctype
, fd_data
->fd_id
, ret
);
228 } else if (I40E_DEBUG_FD
& pf
->hw
.debug_mask
) {
230 dev_info(&pf
->pdev
->dev
,
231 "Filter OK for PCTYPE %d loc = %d\n",
232 fd_data
->pctype
, fd_data
->fd_id
);
234 dev_info(&pf
->pdev
->dev
,
235 "Filter deleted for PCTYPE %d loc = %d\n",
236 fd_data
->pctype
, fd_data
->fd_id
);
241 return err
? -EOPNOTSUPP
: 0;
244 #define I40E_TCPIP_DUMMY_PACKET_LEN 54
246 * i40e_add_del_fdir_tcpv4 - Add/Remove TCPv4 filters
247 * @vsi: pointer to the targeted VSI
248 * @fd_data: the flow director data required for the FDir descriptor
249 * @add: true adds a filter, false removes it
251 * Returns 0 if the filters were successfully added or removed
253 static int i40e_add_del_fdir_tcpv4(struct i40e_vsi
*vsi
,
254 struct i40e_fdir_filter
*fd_data
,
257 struct i40e_pf
*pf
= vsi
->back
;
264 static char packet
[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0,
265 0x45, 0, 0, 0x28, 0, 0, 0x40, 0, 0x40, 0x6, 0, 0, 0, 0, 0, 0,
266 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x80, 0x11,
267 0x0, 0x72, 0, 0, 0, 0};
269 raw_packet
= kzalloc(I40E_FDIR_MAX_RAW_PACKET_SIZE
, GFP_KERNEL
);
272 memcpy(raw_packet
, packet
, I40E_TCPIP_DUMMY_PACKET_LEN
);
274 ip
= (struct iphdr
*)(raw_packet
+ IP_HEADER_OFFSET
);
275 tcp
= (struct tcphdr
*)(raw_packet
+ IP_HEADER_OFFSET
276 + sizeof(struct iphdr
));
278 ip
->daddr
= fd_data
->dst_ip
[0];
279 tcp
->dest
= fd_data
->dst_port
;
280 ip
->saddr
= fd_data
->src_ip
[0];
281 tcp
->source
= fd_data
->src_port
;
285 if (pf
->flags
& I40E_FLAG_FD_ATR_ENABLED
) {
286 if (I40E_DEBUG_FD
& pf
->hw
.debug_mask
)
287 dev_info(&pf
->pdev
->dev
, "Forcing ATR off, sideband rules for TCP/IPv4 flow being applied\n");
288 pf
->flags
&= ~I40E_FLAG_FD_ATR_ENABLED
;
291 pf
->fd_tcp_rule
= (pf
->fd_tcp_rule
> 0) ?
292 (pf
->fd_tcp_rule
- 1) : 0;
293 if (pf
->fd_tcp_rule
== 0) {
294 pf
->flags
|= I40E_FLAG_FD_ATR_ENABLED
;
295 if (I40E_DEBUG_FD
& pf
->hw
.debug_mask
)
296 dev_info(&pf
->pdev
->dev
, "ATR re-enabled due to no sideband TCP/IPv4 rules\n");
300 fd_data
->pctype
= I40E_FILTER_PCTYPE_NONF_IPV4_TCP
;
301 ret
= i40e_program_fdir_filter(fd_data
, raw_packet
, pf
, add
);
304 dev_info(&pf
->pdev
->dev
,
305 "PCTYPE:%d, Filter command send failed for fd_id:%d (ret = %d)\n",
306 fd_data
->pctype
, fd_data
->fd_id
, ret
);
308 } else if (I40E_DEBUG_FD
& pf
->hw
.debug_mask
) {
310 dev_info(&pf
->pdev
->dev
, "Filter OK for PCTYPE %d loc = %d)\n",
311 fd_data
->pctype
, fd_data
->fd_id
);
313 dev_info(&pf
->pdev
->dev
,
314 "Filter deleted for PCTYPE %d loc = %d\n",
315 fd_data
->pctype
, fd_data
->fd_id
);
321 return err
? -EOPNOTSUPP
: 0;
325 * i40e_add_del_fdir_sctpv4 - Add/Remove SCTPv4 Flow Director filters for
326 * a specific flow spec
327 * @vsi: pointer to the targeted VSI
328 * @fd_data: the flow director data required for the FDir descriptor
329 * @add: true adds a filter, false removes it
331 * Returns 0 if the filters were successfully added or removed
333 static int i40e_add_del_fdir_sctpv4(struct i40e_vsi
*vsi
,
334 struct i40e_fdir_filter
*fd_data
,
340 #define I40E_IP_DUMMY_PACKET_LEN 34
342 * i40e_add_del_fdir_ipv4 - Add/Remove IPv4 Flow Director filters for
343 * a specific flow spec
344 * @vsi: pointer to the targeted VSI
345 * @fd_data: the flow director data required for the FDir descriptor
346 * @add: true adds a filter, false removes it
348 * Returns 0 if the filters were successfully added or removed
350 static int i40e_add_del_fdir_ipv4(struct i40e_vsi
*vsi
,
351 struct i40e_fdir_filter
*fd_data
,
354 struct i40e_pf
*pf
= vsi
->back
;
360 static char packet
[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0,
361 0x45, 0, 0, 0x14, 0, 0, 0x40, 0, 0x40, 0x10, 0, 0, 0, 0, 0, 0,
364 for (i
= I40E_FILTER_PCTYPE_NONF_IPV4_OTHER
;
365 i
<= I40E_FILTER_PCTYPE_FRAG_IPV4
; i
++) {
366 raw_packet
= kzalloc(I40E_FDIR_MAX_RAW_PACKET_SIZE
, GFP_KERNEL
);
369 memcpy(raw_packet
, packet
, I40E_IP_DUMMY_PACKET_LEN
);
370 ip
= (struct iphdr
*)(raw_packet
+ IP_HEADER_OFFSET
);
372 ip
->saddr
= fd_data
->src_ip
[0];
373 ip
->daddr
= fd_data
->dst_ip
[0];
377 ret
= i40e_program_fdir_filter(fd_data
, raw_packet
, pf
, add
);
380 dev_info(&pf
->pdev
->dev
,
381 "PCTYPE:%d, Filter command send failed for fd_id:%d (ret = %d)\n",
382 fd_data
->pctype
, fd_data
->fd_id
, ret
);
384 } else if (I40E_DEBUG_FD
& pf
->hw
.debug_mask
) {
386 dev_info(&pf
->pdev
->dev
,
387 "Filter OK for PCTYPE %d loc = %d\n",
388 fd_data
->pctype
, fd_data
->fd_id
);
390 dev_info(&pf
->pdev
->dev
,
391 "Filter deleted for PCTYPE %d loc = %d\n",
392 fd_data
->pctype
, fd_data
->fd_id
);
399 return err
? -EOPNOTSUPP
: 0;
403 * i40e_add_del_fdir - Build raw packets to add/del fdir filter
404 * @vsi: pointer to the targeted VSI
405 * @cmd: command to get or set RX flow classification rules
406 * @add: true adds a filter, false removes it
409 int i40e_add_del_fdir(struct i40e_vsi
*vsi
,
410 struct i40e_fdir_filter
*input
, bool add
)
412 struct i40e_pf
*pf
= vsi
->back
;
415 switch (input
->flow_type
& ~FLOW_EXT
) {
417 ret
= i40e_add_del_fdir_tcpv4(vsi
, input
, add
);
420 ret
= i40e_add_del_fdir_udpv4(vsi
, input
, add
);
423 ret
= i40e_add_del_fdir_sctpv4(vsi
, input
, add
);
426 ret
= i40e_add_del_fdir_ipv4(vsi
, input
, add
);
429 switch (input
->ip4_proto
) {
431 ret
= i40e_add_del_fdir_tcpv4(vsi
, input
, add
);
434 ret
= i40e_add_del_fdir_udpv4(vsi
, input
, add
);
437 ret
= i40e_add_del_fdir_sctpv4(vsi
, input
, add
);
440 ret
= i40e_add_del_fdir_ipv4(vsi
, input
, add
);
445 dev_info(&pf
->pdev
->dev
, "Could not specify spec type %d\n",
450 /* The buffer allocated here is freed by the i40e_clean_tx_ring() */
455 * i40e_fd_handle_status - check the Programming Status for FD
456 * @rx_ring: the Rx ring for this descriptor
457 * @rx_desc: the Rx descriptor for programming Status, not a packet descriptor.
458 * @prog_id: the id originally used for programming
460 * This is used to verify if the FD programming or invalidation
461 * requested by SW to the HW is successful or not and take actions accordingly.
463 static void i40e_fd_handle_status(struct i40e_ring
*rx_ring
,
464 union i40e_rx_desc
*rx_desc
, u8 prog_id
)
466 struct i40e_pf
*pf
= rx_ring
->vsi
->back
;
467 struct pci_dev
*pdev
= pf
->pdev
;
468 u32 fcnt_prog
, fcnt_avail
;
472 qw
= le64_to_cpu(rx_desc
->wb
.qword1
.status_error_len
);
473 error
= (qw
& I40E_RX_PROG_STATUS_DESC_QW1_ERROR_MASK
) >>
474 I40E_RX_PROG_STATUS_DESC_QW1_ERROR_SHIFT
;
476 if (error
== BIT(I40E_RX_PROG_STATUS_DESC_FD_TBL_FULL_SHIFT
)) {
477 pf
->fd_inv
= le32_to_cpu(rx_desc
->wb
.qword0
.hi_dword
.fd_id
);
478 if ((rx_desc
->wb
.qword0
.hi_dword
.fd_id
!= 0) ||
479 (I40E_DEBUG_FD
& pf
->hw
.debug_mask
))
480 dev_warn(&pdev
->dev
, "ntuple filter loc = %d, could not be added\n",
483 /* Check if the programming error is for ATR.
484 * If so, auto disable ATR and set a state for
485 * flush in progress. Next time we come here if flush is in
486 * progress do nothing, once flush is complete the state will
489 if (test_bit(__I40E_FD_FLUSH_REQUESTED
, &pf
->state
))
493 /* store the current atr filter count */
494 pf
->fd_atr_cnt
= i40e_get_current_atr_cnt(pf
);
496 if ((rx_desc
->wb
.qword0
.hi_dword
.fd_id
== 0) &&
497 (pf
->auto_disable_flags
& I40E_FLAG_FD_SB_ENABLED
)) {
498 pf
->auto_disable_flags
|= I40E_FLAG_FD_ATR_ENABLED
;
499 set_bit(__I40E_FD_FLUSH_REQUESTED
, &pf
->state
);
502 /* filter programming failed most likely due to table full */
503 fcnt_prog
= i40e_get_global_fd_count(pf
);
504 fcnt_avail
= pf
->fdir_pf_filter_count
;
505 /* If ATR is running fcnt_prog can quickly change,
506 * if we are very close to full, it makes sense to disable
507 * FD ATR/SB and then re-enable it when there is room.
509 if (fcnt_prog
>= (fcnt_avail
- I40E_FDIR_BUFFER_FULL_MARGIN
)) {
510 if ((pf
->flags
& I40E_FLAG_FD_SB_ENABLED
) &&
511 !(pf
->auto_disable_flags
&
512 I40E_FLAG_FD_SB_ENABLED
)) {
513 if (I40E_DEBUG_FD
& pf
->hw
.debug_mask
)
514 dev_warn(&pdev
->dev
, "FD filter space full, new ntuple rules will not be added\n");
515 pf
->auto_disable_flags
|=
516 I40E_FLAG_FD_SB_ENABLED
;
519 } else if (error
== BIT(I40E_RX_PROG_STATUS_DESC_NO_FD_ENTRY_SHIFT
)) {
520 if (I40E_DEBUG_FD
& pf
->hw
.debug_mask
)
521 dev_info(&pdev
->dev
, "ntuple filter fd_id = %d, could not be removed\n",
522 rx_desc
->wb
.qword0
.hi_dword
.fd_id
);
527 * i40e_unmap_and_free_tx_resource - Release a Tx buffer
528 * @ring: the ring that owns the buffer
529 * @tx_buffer: the buffer to free
531 static void i40e_unmap_and_free_tx_resource(struct i40e_ring
*ring
,
532 struct i40e_tx_buffer
*tx_buffer
)
534 if (tx_buffer
->skb
) {
535 dev_kfree_skb_any(tx_buffer
->skb
);
536 if (dma_unmap_len(tx_buffer
, len
))
537 dma_unmap_single(ring
->dev
,
538 dma_unmap_addr(tx_buffer
, dma
),
539 dma_unmap_len(tx_buffer
, len
),
541 } else if (dma_unmap_len(tx_buffer
, len
)) {
542 dma_unmap_page(ring
->dev
,
543 dma_unmap_addr(tx_buffer
, dma
),
544 dma_unmap_len(tx_buffer
, len
),
548 if (tx_buffer
->tx_flags
& I40E_TX_FLAGS_FD_SB
)
549 kfree(tx_buffer
->raw_buf
);
551 tx_buffer
->next_to_watch
= NULL
;
552 tx_buffer
->skb
= NULL
;
553 dma_unmap_len_set(tx_buffer
, len
, 0);
554 /* tx_buffer must be completely set up in the transmit path */
558 * i40e_clean_tx_ring - Free any empty Tx buffers
559 * @tx_ring: ring to be cleaned
561 void i40e_clean_tx_ring(struct i40e_ring
*tx_ring
)
563 unsigned long bi_size
;
566 /* ring already cleared, nothing to do */
570 /* Free all the Tx ring sk_buffs */
571 for (i
= 0; i
< tx_ring
->count
; i
++)
572 i40e_unmap_and_free_tx_resource(tx_ring
, &tx_ring
->tx_bi
[i
]);
574 bi_size
= sizeof(struct i40e_tx_buffer
) * tx_ring
->count
;
575 memset(tx_ring
->tx_bi
, 0, bi_size
);
577 /* Zero out the descriptor ring */
578 memset(tx_ring
->desc
, 0, tx_ring
->size
);
580 tx_ring
->next_to_use
= 0;
581 tx_ring
->next_to_clean
= 0;
583 if (!tx_ring
->netdev
)
586 /* cleanup Tx queue statistics */
587 netdev_tx_reset_queue(netdev_get_tx_queue(tx_ring
->netdev
,
588 tx_ring
->queue_index
));
592 * i40e_free_tx_resources - Free Tx resources per queue
593 * @tx_ring: Tx descriptor ring for a specific queue
595 * Free all transmit software resources
597 void i40e_free_tx_resources(struct i40e_ring
*tx_ring
)
599 i40e_clean_tx_ring(tx_ring
);
600 kfree(tx_ring
->tx_bi
);
601 tx_ring
->tx_bi
= NULL
;
604 dma_free_coherent(tx_ring
->dev
, tx_ring
->size
,
605 tx_ring
->desc
, tx_ring
->dma
);
606 tx_ring
->desc
= NULL
;
611 * i40e_get_tx_pending - how many tx descriptors not processed
612 * @tx_ring: the ring of descriptors
613 * @in_sw: is tx_pending being checked in SW or HW
615 * Since there is no access to the ring head register
616 * in XL710, we need to use our local copies
618 u32
i40e_get_tx_pending(struct i40e_ring
*ring
, bool in_sw
)
623 head
= i40e_get_head(ring
);
625 head
= ring
->next_to_clean
;
626 tail
= readl(ring
->tail
);
629 return (head
< tail
) ?
630 tail
- head
: (tail
+ ring
->count
- head
);
635 #define WB_STRIDE 0x3
638 * i40e_clean_tx_irq - Reclaim resources after transmit completes
639 * @vsi: the VSI we care about
640 * @tx_ring: Tx ring to clean
641 * @napi_budget: Used to determine if we are in netpoll
643 * Returns true if there's any budget left (e.g. the clean is finished)
645 static bool i40e_clean_tx_irq(struct i40e_vsi
*vsi
,
646 struct i40e_ring
*tx_ring
, int napi_budget
)
648 u16 i
= tx_ring
->next_to_clean
;
649 struct i40e_tx_buffer
*tx_buf
;
650 struct i40e_tx_desc
*tx_head
;
651 struct i40e_tx_desc
*tx_desc
;
652 unsigned int total_bytes
= 0, total_packets
= 0;
653 unsigned int budget
= vsi
->work_limit
;
655 tx_buf
= &tx_ring
->tx_bi
[i
];
656 tx_desc
= I40E_TX_DESC(tx_ring
, i
);
659 tx_head
= I40E_TX_DESC(tx_ring
, i40e_get_head(tx_ring
));
662 struct i40e_tx_desc
*eop_desc
= tx_buf
->next_to_watch
;
664 /* if next_to_watch is not set then there is no work pending */
668 /* prevent any other reads prior to eop_desc */
669 read_barrier_depends();
671 /* we have caught up to head, no work left to do */
672 if (tx_head
== tx_desc
)
675 /* clear next_to_watch to prevent false hangs */
676 tx_buf
->next_to_watch
= NULL
;
678 /* update the statistics for this packet */
679 total_bytes
+= tx_buf
->bytecount
;
680 total_packets
+= tx_buf
->gso_segs
;
683 napi_consume_skb(tx_buf
->skb
, napi_budget
);
685 /* unmap skb header data */
686 dma_unmap_single(tx_ring
->dev
,
687 dma_unmap_addr(tx_buf
, dma
),
688 dma_unmap_len(tx_buf
, len
),
691 /* clear tx_buffer data */
693 dma_unmap_len_set(tx_buf
, len
, 0);
695 /* unmap remaining buffers */
696 while (tx_desc
!= eop_desc
) {
703 tx_buf
= tx_ring
->tx_bi
;
704 tx_desc
= I40E_TX_DESC(tx_ring
, 0);
707 /* unmap any remaining paged data */
708 if (dma_unmap_len(tx_buf
, len
)) {
709 dma_unmap_page(tx_ring
->dev
,
710 dma_unmap_addr(tx_buf
, dma
),
711 dma_unmap_len(tx_buf
, len
),
713 dma_unmap_len_set(tx_buf
, len
, 0);
717 /* move us one more past the eop_desc for start of next pkt */
723 tx_buf
= tx_ring
->tx_bi
;
724 tx_desc
= I40E_TX_DESC(tx_ring
, 0);
729 /* update budget accounting */
731 } while (likely(budget
));
734 tx_ring
->next_to_clean
= i
;
735 u64_stats_update_begin(&tx_ring
->syncp
);
736 tx_ring
->stats
.bytes
+= total_bytes
;
737 tx_ring
->stats
.packets
+= total_packets
;
738 u64_stats_update_end(&tx_ring
->syncp
);
739 tx_ring
->q_vector
->tx
.total_bytes
+= total_bytes
;
740 tx_ring
->q_vector
->tx
.total_packets
+= total_packets
;
742 if (tx_ring
->flags
& I40E_TXR_FLAGS_WB_ON_ITR
) {
743 /* check to see if there are < 4 descriptors
744 * waiting to be written back, then kick the hardware to force
745 * them to be written back in case we stay in NAPI.
746 * In this mode on X722 we do not enable Interrupt.
748 unsigned int j
= i40e_get_tx_pending(tx_ring
, false);
751 ((j
/ (WB_STRIDE
+ 1)) == 0) && (j
!= 0) &&
752 !test_bit(__I40E_DOWN
, &vsi
->state
) &&
753 (I40E_DESC_UNUSED(tx_ring
) != tx_ring
->count
))
754 tx_ring
->arm_wb
= true;
757 netdev_tx_completed_queue(netdev_get_tx_queue(tx_ring
->netdev
,
758 tx_ring
->queue_index
),
759 total_packets
, total_bytes
);
761 #define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
762 if (unlikely(total_packets
&& netif_carrier_ok(tx_ring
->netdev
) &&
763 (I40E_DESC_UNUSED(tx_ring
) >= TX_WAKE_THRESHOLD
))) {
764 /* Make sure that anybody stopping the queue after this
765 * sees the new next_to_clean.
768 if (__netif_subqueue_stopped(tx_ring
->netdev
,
769 tx_ring
->queue_index
) &&
770 !test_bit(__I40E_DOWN
, &vsi
->state
)) {
771 netif_wake_subqueue(tx_ring
->netdev
,
772 tx_ring
->queue_index
);
773 ++tx_ring
->tx_stats
.restart_queue
;
781 * i40e_enable_wb_on_itr - Arm hardware to do a wb, interrupts are not enabled
782 * @vsi: the VSI we care about
783 * @q_vector: the vector on which to enable writeback
786 static void i40e_enable_wb_on_itr(struct i40e_vsi
*vsi
,
787 struct i40e_q_vector
*q_vector
)
789 u16 flags
= q_vector
->tx
.ring
[0].flags
;
792 if (!(flags
& I40E_TXR_FLAGS_WB_ON_ITR
))
795 if (q_vector
->arm_wb_state
)
798 if (vsi
->back
->flags
& I40E_FLAG_MSIX_ENABLED
) {
799 val
= I40E_PFINT_DYN_CTLN_WB_ON_ITR_MASK
|
800 I40E_PFINT_DYN_CTLN_ITR_INDX_MASK
; /* set noitr */
803 I40E_PFINT_DYN_CTLN(q_vector
->v_idx
+ vsi
->base_vector
- 1),
806 val
= I40E_PFINT_DYN_CTL0_WB_ON_ITR_MASK
|
807 I40E_PFINT_DYN_CTL0_ITR_INDX_MASK
; /* set noitr */
809 wr32(&vsi
->back
->hw
, I40E_PFINT_DYN_CTL0
, val
);
811 q_vector
->arm_wb_state
= true;
815 * i40e_force_wb - Issue SW Interrupt so HW does a wb
816 * @vsi: the VSI we care about
817 * @q_vector: the vector on which to force writeback
820 void i40e_force_wb(struct i40e_vsi
*vsi
, struct i40e_q_vector
*q_vector
)
822 if (vsi
->back
->flags
& I40E_FLAG_MSIX_ENABLED
) {
823 u32 val
= I40E_PFINT_DYN_CTLN_INTENA_MASK
|
824 I40E_PFINT_DYN_CTLN_ITR_INDX_MASK
| /* set noitr */
825 I40E_PFINT_DYN_CTLN_SWINT_TRIG_MASK
|
826 I40E_PFINT_DYN_CTLN_SW_ITR_INDX_ENA_MASK
;
827 /* allow 00 to be written to the index */
830 I40E_PFINT_DYN_CTLN(q_vector
->v_idx
+
831 vsi
->base_vector
- 1), val
);
833 u32 val
= I40E_PFINT_DYN_CTL0_INTENA_MASK
|
834 I40E_PFINT_DYN_CTL0_ITR_INDX_MASK
| /* set noitr */
835 I40E_PFINT_DYN_CTL0_SWINT_TRIG_MASK
|
836 I40E_PFINT_DYN_CTL0_SW_ITR_INDX_ENA_MASK
;
837 /* allow 00 to be written to the index */
839 wr32(&vsi
->back
->hw
, I40E_PFINT_DYN_CTL0
, val
);
844 * i40e_set_new_dynamic_itr - Find new ITR level
845 * @rc: structure containing ring performance data
847 * Returns true if ITR changed, false if not
849 * Stores a new ITR value based on packets and byte counts during
850 * the last interrupt. The advantage of per interrupt computation
851 * is faster updates and more accurate ITR for the current traffic
852 * pattern. Constants in this function were computed based on
853 * theoretical maximum wire speed and thresholds were set based on
854 * testing data as well as attempting to minimize response time
855 * while increasing bulk throughput.
857 static bool i40e_set_new_dynamic_itr(struct i40e_ring_container
*rc
)
859 enum i40e_latency_range new_latency_range
= rc
->latency_range
;
860 struct i40e_q_vector
*qv
= rc
->ring
->q_vector
;
861 u32 new_itr
= rc
->itr
;
865 if (rc
->total_packets
== 0 || !rc
->itr
)
868 /* simple throttlerate management
869 * 0-10MB/s lowest (50000 ints/s)
870 * 10-20MB/s low (20000 ints/s)
871 * 20-1249MB/s bulk (18000 ints/s)
872 * > 40000 Rx packets per second (8000 ints/s)
874 * The math works out because the divisor is in 10^(-6) which
875 * turns the bytes/us input value into MB/s values, but
876 * make sure to use usecs, as the register values written
877 * are in 2 usec increments in the ITR registers, and make sure
878 * to use the smoothed values that the countdown timer gives us.
880 usecs
= (rc
->itr
<< 1) * ITR_COUNTDOWN_START
;
881 bytes_per_int
= rc
->total_bytes
/ usecs
;
883 switch (new_latency_range
) {
884 case I40E_LOWEST_LATENCY
:
885 if (bytes_per_int
> 10)
886 new_latency_range
= I40E_LOW_LATENCY
;
888 case I40E_LOW_LATENCY
:
889 if (bytes_per_int
> 20)
890 new_latency_range
= I40E_BULK_LATENCY
;
891 else if (bytes_per_int
<= 10)
892 new_latency_range
= I40E_LOWEST_LATENCY
;
894 case I40E_BULK_LATENCY
:
895 case I40E_ULTRA_LATENCY
:
897 if (bytes_per_int
<= 20)
898 new_latency_range
= I40E_LOW_LATENCY
;
902 /* this is to adjust RX more aggressively when streaming small
903 * packets. The value of 40000 was picked as it is just beyond
904 * what the hardware can receive per second if in low latency
907 #define RX_ULTRA_PACKET_RATE 40000
909 if ((((rc
->total_packets
* 1000000) / usecs
) > RX_ULTRA_PACKET_RATE
) &&
911 new_latency_range
= I40E_ULTRA_LATENCY
;
913 rc
->latency_range
= new_latency_range
;
915 switch (new_latency_range
) {
916 case I40E_LOWEST_LATENCY
:
917 new_itr
= I40E_ITR_50K
;
919 case I40E_LOW_LATENCY
:
920 new_itr
= I40E_ITR_20K
;
922 case I40E_BULK_LATENCY
:
923 new_itr
= I40E_ITR_18K
;
925 case I40E_ULTRA_LATENCY
:
926 new_itr
= I40E_ITR_8K
;
933 rc
->total_packets
= 0;
935 if (new_itr
!= rc
->itr
) {
944 * i40e_clean_programming_status - clean the programming status descriptor
945 * @rx_ring: the rx ring that has this descriptor
946 * @rx_desc: the rx descriptor written back by HW
948 * Flow director should handle FD_FILTER_STATUS to check its filter programming
949 * status being successful or not and take actions accordingly. FCoE should
950 * handle its context/filter programming/invalidation status and take actions.
953 static void i40e_clean_programming_status(struct i40e_ring
*rx_ring
,
954 union i40e_rx_desc
*rx_desc
)
959 qw
= le64_to_cpu(rx_desc
->wb
.qword1
.status_error_len
);
960 id
= (qw
& I40E_RX_PROG_STATUS_DESC_QW1_PROGID_MASK
) >>
961 I40E_RX_PROG_STATUS_DESC_QW1_PROGID_SHIFT
;
963 if (id
== I40E_RX_PROG_STATUS_DESC_FD_FILTER_STATUS
)
964 i40e_fd_handle_status(rx_ring
, rx_desc
, id
);
966 else if ((id
== I40E_RX_PROG_STATUS_DESC_FCOE_CTXT_PROG_STATUS
) ||
967 (id
== I40E_RX_PROG_STATUS_DESC_FCOE_CTXT_INVL_STATUS
))
968 i40e_fcoe_handle_status(rx_ring
, rx_desc
, id
);
973 * i40e_setup_tx_descriptors - Allocate the Tx descriptors
974 * @tx_ring: the tx ring to set up
976 * Return 0 on success, negative on error
978 int i40e_setup_tx_descriptors(struct i40e_ring
*tx_ring
)
980 struct device
*dev
= tx_ring
->dev
;
986 /* warn if we are about to overwrite the pointer */
987 WARN_ON(tx_ring
->tx_bi
);
988 bi_size
= sizeof(struct i40e_tx_buffer
) * tx_ring
->count
;
989 tx_ring
->tx_bi
= kzalloc(bi_size
, GFP_KERNEL
);
993 /* round up to nearest 4K */
994 tx_ring
->size
= tx_ring
->count
* sizeof(struct i40e_tx_desc
);
995 /* add u32 for head writeback, align after this takes care of
996 * guaranteeing this is at least one cache line in size
998 tx_ring
->size
+= sizeof(u32
);
999 tx_ring
->size
= ALIGN(tx_ring
->size
, 4096);
1000 tx_ring
->desc
= dma_alloc_coherent(dev
, tx_ring
->size
,
1001 &tx_ring
->dma
, GFP_KERNEL
);
1002 if (!tx_ring
->desc
) {
1003 dev_info(dev
, "Unable to allocate memory for the Tx descriptor ring, size=%d\n",
1008 tx_ring
->next_to_use
= 0;
1009 tx_ring
->next_to_clean
= 0;
1013 kfree(tx_ring
->tx_bi
);
1014 tx_ring
->tx_bi
= NULL
;
1019 * i40e_clean_rx_ring - Free Rx buffers
1020 * @rx_ring: ring to be cleaned
1022 void i40e_clean_rx_ring(struct i40e_ring
*rx_ring
)
1024 struct device
*dev
= rx_ring
->dev
;
1025 unsigned long bi_size
;
1028 /* ring already cleared, nothing to do */
1029 if (!rx_ring
->rx_bi
)
1032 /* Free all the Rx ring sk_buffs */
1033 for (i
= 0; i
< rx_ring
->count
; i
++) {
1034 struct i40e_rx_buffer
*rx_bi
= &rx_ring
->rx_bi
[i
];
1037 dev_kfree_skb(rx_bi
->skb
);
1043 dma_unmap_page(dev
, rx_bi
->dma
, PAGE_SIZE
, DMA_FROM_DEVICE
);
1044 __free_pages(rx_bi
->page
, 0);
1047 rx_bi
->page_offset
= 0;
1050 bi_size
= sizeof(struct i40e_rx_buffer
) * rx_ring
->count
;
1051 memset(rx_ring
->rx_bi
, 0, bi_size
);
1053 /* Zero out the descriptor ring */
1054 memset(rx_ring
->desc
, 0, rx_ring
->size
);
1056 rx_ring
->next_to_alloc
= 0;
1057 rx_ring
->next_to_clean
= 0;
1058 rx_ring
->next_to_use
= 0;
1062 * i40e_free_rx_resources - Free Rx resources
1063 * @rx_ring: ring to clean the resources from
1065 * Free all receive software resources
1067 void i40e_free_rx_resources(struct i40e_ring
*rx_ring
)
1069 i40e_clean_rx_ring(rx_ring
);
1070 kfree(rx_ring
->rx_bi
);
1071 rx_ring
->rx_bi
= NULL
;
1073 if (rx_ring
->desc
) {
1074 dma_free_coherent(rx_ring
->dev
, rx_ring
->size
,
1075 rx_ring
->desc
, rx_ring
->dma
);
1076 rx_ring
->desc
= NULL
;
1081 * i40e_setup_rx_descriptors - Allocate Rx descriptors
1082 * @rx_ring: Rx descriptor ring (for a specific queue) to setup
1084 * Returns 0 on success, negative on failure
1086 int i40e_setup_rx_descriptors(struct i40e_ring
*rx_ring
)
1088 struct device
*dev
= rx_ring
->dev
;
1091 /* warn if we are about to overwrite the pointer */
1092 WARN_ON(rx_ring
->rx_bi
);
1093 bi_size
= sizeof(struct i40e_rx_buffer
) * rx_ring
->count
;
1094 rx_ring
->rx_bi
= kzalloc(bi_size
, GFP_KERNEL
);
1095 if (!rx_ring
->rx_bi
)
1098 u64_stats_init(&rx_ring
->syncp
);
1100 /* Round up to nearest 4K */
1101 rx_ring
->size
= rx_ring
->count
* sizeof(union i40e_32byte_rx_desc
);
1102 rx_ring
->size
= ALIGN(rx_ring
->size
, 4096);
1103 rx_ring
->desc
= dma_alloc_coherent(dev
, rx_ring
->size
,
1104 &rx_ring
->dma
, GFP_KERNEL
);
1106 if (!rx_ring
->desc
) {
1107 dev_info(dev
, "Unable to allocate memory for the Rx descriptor ring, size=%d\n",
1112 rx_ring
->next_to_alloc
= 0;
1113 rx_ring
->next_to_clean
= 0;
1114 rx_ring
->next_to_use
= 0;
1118 kfree(rx_ring
->rx_bi
);
1119 rx_ring
->rx_bi
= NULL
;
1124 * i40e_release_rx_desc - Store the new tail and head values
1125 * @rx_ring: ring to bump
1126 * @val: new head index
1128 static inline void i40e_release_rx_desc(struct i40e_ring
*rx_ring
, u32 val
)
1130 rx_ring
->next_to_use
= val
;
1132 /* update next to alloc since we have filled the ring */
1133 rx_ring
->next_to_alloc
= val
;
1135 /* Force memory writes to complete before letting h/w
1136 * know there are new descriptors to fetch. (Only
1137 * applicable for weak-ordered memory model archs,
1141 writel(val
, rx_ring
->tail
);
1145 * i40e_alloc_mapped_page - recycle or make a new page
1146 * @rx_ring: ring to use
1147 * @bi: rx_buffer struct to modify
1149 * Returns true if the page was successfully allocated or
1152 static bool i40e_alloc_mapped_page(struct i40e_ring
*rx_ring
,
1153 struct i40e_rx_buffer
*bi
)
1155 struct page
*page
= bi
->page
;
1158 /* since we are recycling buffers we should seldom need to alloc */
1160 rx_ring
->rx_stats
.page_reuse_count
++;
1164 /* alloc new page for storage */
1165 page
= dev_alloc_page();
1166 if (unlikely(!page
)) {
1167 rx_ring
->rx_stats
.alloc_page_failed
++;
1171 /* map page for use */
1172 dma
= dma_map_page(rx_ring
->dev
, page
, 0, PAGE_SIZE
, DMA_FROM_DEVICE
);
1174 /* if mapping failed free memory back to system since
1175 * there isn't much point in holding memory we can't use
1177 if (dma_mapping_error(rx_ring
->dev
, dma
)) {
1178 __free_pages(page
, 0);
1179 rx_ring
->rx_stats
.alloc_page_failed
++;
1185 bi
->page_offset
= 0;
1191 * i40e_receive_skb - Send a completed packet up the stack
1192 * @rx_ring: rx ring in play
1193 * @skb: packet to send up
1194 * @vlan_tag: vlan tag for packet
1196 static void i40e_receive_skb(struct i40e_ring
*rx_ring
,
1197 struct sk_buff
*skb
, u16 vlan_tag
)
1199 struct i40e_q_vector
*q_vector
= rx_ring
->q_vector
;
1201 if ((rx_ring
->netdev
->features
& NETIF_F_HW_VLAN_CTAG_RX
) &&
1202 (vlan_tag
& VLAN_VID_MASK
))
1203 __vlan_hwaccel_put_tag(skb
, htons(ETH_P_8021Q
), vlan_tag
);
1205 napi_gro_receive(&q_vector
->napi
, skb
);
1209 * i40e_alloc_rx_buffers - Replace used receive buffers
1210 * @rx_ring: ring to place buffers on
1211 * @cleaned_count: number of buffers to replace
1213 * Returns false if all allocations were successful, true if any fail
1215 bool i40e_alloc_rx_buffers(struct i40e_ring
*rx_ring
, u16 cleaned_count
)
1217 u16 ntu
= rx_ring
->next_to_use
;
1218 union i40e_rx_desc
*rx_desc
;
1219 struct i40e_rx_buffer
*bi
;
1221 /* do nothing if no valid netdev defined */
1222 if (!rx_ring
->netdev
|| !cleaned_count
)
1225 rx_desc
= I40E_RX_DESC(rx_ring
, ntu
);
1226 bi
= &rx_ring
->rx_bi
[ntu
];
1229 if (!i40e_alloc_mapped_page(rx_ring
, bi
))
1232 /* Refresh the desc even if buffer_addrs didn't change
1233 * because each write-back erases this info.
1235 rx_desc
->read
.pkt_addr
= cpu_to_le64(bi
->dma
+ bi
->page_offset
);
1236 rx_desc
->read
.hdr_addr
= 0;
1241 if (unlikely(ntu
== rx_ring
->count
)) {
1242 rx_desc
= I40E_RX_DESC(rx_ring
, 0);
1243 bi
= rx_ring
->rx_bi
;
1247 /* clear the status bits for the next_to_use descriptor */
1248 rx_desc
->wb
.qword1
.status_error_len
= 0;
1251 } while (cleaned_count
);
1253 if (rx_ring
->next_to_use
!= ntu
)
1254 i40e_release_rx_desc(rx_ring
, ntu
);
1259 if (rx_ring
->next_to_use
!= ntu
)
1260 i40e_release_rx_desc(rx_ring
, ntu
);
1262 /* make sure to come back via polling to try again after
1263 * allocation failure
1269 * i40e_rx_checksum - Indicate in skb if hw indicated a good cksum
1270 * @vsi: the VSI we care about
1271 * @skb: skb currently being received and modified
1272 * @rx_desc: the receive descriptor
1274 * skb->protocol must be set before this function is called
1276 static inline void i40e_rx_checksum(struct i40e_vsi
*vsi
,
1277 struct sk_buff
*skb
,
1278 union i40e_rx_desc
*rx_desc
)
1280 struct i40e_rx_ptype_decoded decoded
;
1281 u32 rx_error
, rx_status
;
1286 qword
= le64_to_cpu(rx_desc
->wb
.qword1
.status_error_len
);
1287 ptype
= (qword
& I40E_RXD_QW1_PTYPE_MASK
) >> I40E_RXD_QW1_PTYPE_SHIFT
;
1288 rx_error
= (qword
& I40E_RXD_QW1_ERROR_MASK
) >>
1289 I40E_RXD_QW1_ERROR_SHIFT
;
1290 rx_status
= (qword
& I40E_RXD_QW1_STATUS_MASK
) >>
1291 I40E_RXD_QW1_STATUS_SHIFT
;
1292 decoded
= decode_rx_desc_ptype(ptype
);
1294 skb
->ip_summed
= CHECKSUM_NONE
;
1296 skb_checksum_none_assert(skb
);
1298 /* Rx csum enabled and ip headers found? */
1299 if (!(vsi
->netdev
->features
& NETIF_F_RXCSUM
))
1302 /* did the hardware decode the packet and checksum? */
1303 if (!(rx_status
& BIT(I40E_RX_DESC_STATUS_L3L4P_SHIFT
)))
1306 /* both known and outer_ip must be set for the below code to work */
1307 if (!(decoded
.known
&& decoded
.outer_ip
))
1310 ipv4
= (decoded
.outer_ip
== I40E_RX_PTYPE_OUTER_IP
) &&
1311 (decoded
.outer_ip_ver
== I40E_RX_PTYPE_OUTER_IPV4
);
1312 ipv6
= (decoded
.outer_ip
== I40E_RX_PTYPE_OUTER_IP
) &&
1313 (decoded
.outer_ip_ver
== I40E_RX_PTYPE_OUTER_IPV6
);
1316 (rx_error
& (BIT(I40E_RX_DESC_ERROR_IPE_SHIFT
) |
1317 BIT(I40E_RX_DESC_ERROR_EIPE_SHIFT
))))
1320 /* likely incorrect csum if alternate IP extension headers found */
1322 rx_status
& BIT(I40E_RX_DESC_STATUS_IPV6EXADD_SHIFT
))
1323 /* don't increment checksum err here, non-fatal err */
1326 /* there was some L4 error, count error and punt packet to the stack */
1327 if (rx_error
& BIT(I40E_RX_DESC_ERROR_L4E_SHIFT
))
1330 /* handle packets that were not able to be checksummed due
1331 * to arrival speed, in this case the stack can compute
1334 if (rx_error
& BIT(I40E_RX_DESC_ERROR_PPRS_SHIFT
))
1337 /* If there is an outer header present that might contain a checksum
1338 * we need to bump the checksum level by 1 to reflect the fact that
1339 * we are indicating we validated the inner checksum.
1341 if (decoded
.tunnel_type
>= I40E_RX_PTYPE_TUNNEL_IP_GRENAT
)
1342 skb
->csum_level
= 1;
1344 /* Only report checksum unnecessary for TCP, UDP, or SCTP */
1345 switch (decoded
.inner_prot
) {
1346 case I40E_RX_PTYPE_INNER_PROT_TCP
:
1347 case I40E_RX_PTYPE_INNER_PROT_UDP
:
1348 case I40E_RX_PTYPE_INNER_PROT_SCTP
:
1349 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
1358 vsi
->back
->hw_csum_rx_error
++;
1362 * i40e_ptype_to_htype - get a hash type
1363 * @ptype: the ptype value from the descriptor
1365 * Returns a hash type to be used by skb_set_hash
1367 static inline int i40e_ptype_to_htype(u8 ptype
)
1369 struct i40e_rx_ptype_decoded decoded
= decode_rx_desc_ptype(ptype
);
1372 return PKT_HASH_TYPE_NONE
;
1374 if (decoded
.outer_ip
== I40E_RX_PTYPE_OUTER_IP
&&
1375 decoded
.payload_layer
== I40E_RX_PTYPE_PAYLOAD_LAYER_PAY4
)
1376 return PKT_HASH_TYPE_L4
;
1377 else if (decoded
.outer_ip
== I40E_RX_PTYPE_OUTER_IP
&&
1378 decoded
.payload_layer
== I40E_RX_PTYPE_PAYLOAD_LAYER_PAY3
)
1379 return PKT_HASH_TYPE_L3
;
1381 return PKT_HASH_TYPE_L2
;
1385 * i40e_rx_hash - set the hash value in the skb
1386 * @ring: descriptor ring
1387 * @rx_desc: specific descriptor
1389 static inline void i40e_rx_hash(struct i40e_ring
*ring
,
1390 union i40e_rx_desc
*rx_desc
,
1391 struct sk_buff
*skb
,
1395 const __le64 rss_mask
=
1396 cpu_to_le64((u64
)I40E_RX_DESC_FLTSTAT_RSS_HASH
<<
1397 I40E_RX_DESC_STATUS_FLTSTAT_SHIFT
);
1399 if (!(ring
->netdev
->features
& NETIF_F_RXHASH
))
1402 if ((rx_desc
->wb
.qword1
.status_error_len
& rss_mask
) == rss_mask
) {
1403 hash
= le32_to_cpu(rx_desc
->wb
.qword0
.hi_dword
.rss
);
1404 skb_set_hash(skb
, hash
, i40e_ptype_to_htype(rx_ptype
));
1409 * i40e_process_skb_fields - Populate skb header fields from Rx descriptor
1410 * @rx_ring: rx descriptor ring packet is being transacted on
1411 * @rx_desc: pointer to the EOP Rx descriptor
1412 * @skb: pointer to current skb being populated
1413 * @rx_ptype: the packet type decoded by hardware
1415 * This function checks the ring, descriptor, and packet information in
1416 * order to populate the hash, checksum, VLAN, protocol, and
1417 * other fields within the skb.
1420 void i40e_process_skb_fields(struct i40e_ring
*rx_ring
,
1421 union i40e_rx_desc
*rx_desc
, struct sk_buff
*skb
,
1424 u64 qword
= le64_to_cpu(rx_desc
->wb
.qword1
.status_error_len
);
1425 u32 rx_status
= (qword
& I40E_RXD_QW1_STATUS_MASK
) >>
1426 I40E_RXD_QW1_STATUS_SHIFT
;
1427 u32 rsyn
= (rx_status
& I40E_RXD_QW1_STATUS_TSYNINDX_MASK
) >>
1428 I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT
;
1430 if (unlikely(rsyn
)) {
1431 i40e_ptp_rx_hwtstamp(rx_ring
->vsi
->back
, skb
, rsyn
);
1432 rx_ring
->last_rx_timestamp
= jiffies
;
1435 i40e_rx_hash(rx_ring
, rx_desc
, skb
, rx_ptype
);
1437 /* modifies the skb - consumes the enet header */
1438 skb
->protocol
= eth_type_trans(skb
, rx_ring
->netdev
);
1440 i40e_rx_checksum(rx_ring
->vsi
, skb
, rx_desc
);
1442 skb_record_rx_queue(skb
, rx_ring
->queue_index
);
1446 * i40e_pull_tail - i40e specific version of skb_pull_tail
1447 * @rx_ring: rx descriptor ring packet is being transacted on
1448 * @skb: pointer to current skb being adjusted
1450 * This function is an i40e specific version of __pskb_pull_tail. The
1451 * main difference between this version and the original function is that
1452 * this function can make several assumptions about the state of things
1453 * that allow for significant optimizations versus the standard function.
1454 * As a result we can do things like drop a frag and maintain an accurate
1455 * truesize for the skb.
1457 static void i40e_pull_tail(struct i40e_ring
*rx_ring
, struct sk_buff
*skb
)
1459 struct skb_frag_struct
*frag
= &skb_shinfo(skb
)->frags
[0];
1461 unsigned int pull_len
;
1463 /* it is valid to use page_address instead of kmap since we are
1464 * working with pages allocated out of the lomem pool per
1465 * alloc_page(GFP_ATOMIC)
1467 va
= skb_frag_address(frag
);
1469 /* we need the header to contain the greater of either ETH_HLEN or
1470 * 60 bytes if the skb->len is less than 60 for skb_pad.
1472 pull_len
= eth_get_headlen(va
, I40E_RX_HDR_SIZE
);
1474 /* align pull length to size of long to optimize memcpy performance */
1475 skb_copy_to_linear_data(skb
, va
, ALIGN(pull_len
, sizeof(long)));
1477 /* update all of the pointers */
1478 skb_frag_size_sub(frag
, pull_len
);
1479 frag
->page_offset
+= pull_len
;
1480 skb
->data_len
-= pull_len
;
1481 skb
->tail
+= pull_len
;
1485 * i40e_cleanup_headers - Correct empty headers
1486 * @rx_ring: rx descriptor ring packet is being transacted on
1487 * @skb: pointer to current skb being fixed
1489 * Also address the case where we are pulling data in on pages only
1490 * and as such no data is present in the skb header.
1492 * In addition if skb is not at least 60 bytes we need to pad it so that
1493 * it is large enough to qualify as a valid Ethernet frame.
1495 * Returns true if an error was encountered and skb was freed.
1497 static bool i40e_cleanup_headers(struct i40e_ring
*rx_ring
, struct sk_buff
*skb
)
1499 /* place header in linear portion of buffer */
1500 if (skb_is_nonlinear(skb
))
1501 i40e_pull_tail(rx_ring
, skb
);
1503 /* if eth_skb_pad returns an error the skb was freed */
1504 if (eth_skb_pad(skb
))
1511 * i40e_reuse_rx_page - page flip buffer and store it back on the ring
1512 * @rx_ring: rx descriptor ring to store buffers on
1513 * @old_buff: donor buffer to have page reused
1515 * Synchronizes page for reuse by the adapter
1517 static void i40e_reuse_rx_page(struct i40e_ring
*rx_ring
,
1518 struct i40e_rx_buffer
*old_buff
)
1520 struct i40e_rx_buffer
*new_buff
;
1521 u16 nta
= rx_ring
->next_to_alloc
;
1523 new_buff
= &rx_ring
->rx_bi
[nta
];
1525 /* update, and store next to alloc */
1527 rx_ring
->next_to_alloc
= (nta
< rx_ring
->count
) ? nta
: 0;
1529 /* transfer page from old buffer to new buffer */
1530 *new_buff
= *old_buff
;
1534 * i40e_page_is_reserved - check if reuse is possible
1535 * @page: page struct to check
1537 static inline bool i40e_page_is_reserved(struct page
*page
)
1539 return (page_to_nid(page
) != numa_mem_id()) || page_is_pfmemalloc(page
);
1543 * i40e_add_rx_frag - Add contents of Rx buffer to sk_buff
1544 * @rx_ring: rx descriptor ring to transact packets on
1545 * @rx_buffer: buffer containing page to add
1546 * @rx_desc: descriptor containing length of buffer written by hardware
1547 * @skb: sk_buff to place the data into
1549 * This function will add the data contained in rx_buffer->page to the skb.
1550 * This is done either through a direct copy if the data in the buffer is
1551 * less than the skb header size, otherwise it will just attach the page as
1552 * a frag to the skb.
1554 * The function will then update the page offset if necessary and return
1555 * true if the buffer can be reused by the adapter.
1557 static bool i40e_add_rx_frag(struct i40e_ring
*rx_ring
,
1558 struct i40e_rx_buffer
*rx_buffer
,
1559 union i40e_rx_desc
*rx_desc
,
1560 struct sk_buff
*skb
)
1562 struct page
*page
= rx_buffer
->page
;
1563 u64 qword
= le64_to_cpu(rx_desc
->wb
.qword1
.status_error_len
);
1564 unsigned int size
= (qword
& I40E_RXD_QW1_LENGTH_PBUF_MASK
) >>
1565 I40E_RXD_QW1_LENGTH_PBUF_SHIFT
;
1566 #if (PAGE_SIZE < 8192)
1567 unsigned int truesize
= I40E_RXBUFFER_2048
;
1569 unsigned int truesize
= ALIGN(size
, L1_CACHE_BYTES
);
1570 unsigned int last_offset
= PAGE_SIZE
- I40E_RXBUFFER_2048
;
1573 /* will the data fit in the skb we allocated? if so, just
1574 * copy it as it is pretty small anyway
1576 if ((size
<= I40E_RX_HDR_SIZE
) && !skb_is_nonlinear(skb
)) {
1577 unsigned char *va
= page_address(page
) + rx_buffer
->page_offset
;
1579 memcpy(__skb_put(skb
, size
), va
, ALIGN(size
, sizeof(long)));
1581 /* page is not reserved, we can reuse buffer as-is */
1582 if (likely(!i40e_page_is_reserved(page
)))
1585 /* this page cannot be reused so discard it */
1586 __free_pages(page
, 0);
1590 skb_add_rx_frag(skb
, skb_shinfo(skb
)->nr_frags
, page
,
1591 rx_buffer
->page_offset
, size
, truesize
);
1593 /* avoid re-using remote pages */
1594 if (unlikely(i40e_page_is_reserved(page
)))
1597 #if (PAGE_SIZE < 8192)
1598 /* if we are only owner of page we can reuse it */
1599 if (unlikely(page_count(page
) != 1))
1602 /* flip page offset to other buffer */
1603 rx_buffer
->page_offset
^= truesize
;
1605 /* move offset up to the next cache line */
1606 rx_buffer
->page_offset
+= truesize
;
1608 if (rx_buffer
->page_offset
> last_offset
)
1612 /* Even if we own the page, we are not allowed to use atomic_set()
1613 * This would break get_page_unless_zero() users.
1615 get_page(rx_buffer
->page
);
1621 * i40e_fetch_rx_buffer - Allocate skb and populate it
1622 * @rx_ring: rx descriptor ring to transact packets on
1623 * @rx_desc: descriptor containing info written by hardware
1625 * This function allocates an skb on the fly, and populates it with the page
1626 * data from the current receive descriptor, taking care to set up the skb
1627 * correctly, as well as handling calling the page recycle function if
1631 struct sk_buff
*i40e_fetch_rx_buffer(struct i40e_ring
*rx_ring
,
1632 union i40e_rx_desc
*rx_desc
)
1634 struct i40e_rx_buffer
*rx_buffer
;
1635 struct sk_buff
*skb
;
1638 rx_buffer
= &rx_ring
->rx_bi
[rx_ring
->next_to_clean
];
1639 page
= rx_buffer
->page
;
1642 skb
= rx_buffer
->skb
;
1645 void *page_addr
= page_address(page
) + rx_buffer
->page_offset
;
1647 /* prefetch first cache line of first page */
1648 prefetch(page_addr
);
1649 #if L1_CACHE_BYTES < 128
1650 prefetch(page_addr
+ L1_CACHE_BYTES
);
1653 /* allocate a skb to store the frags */
1654 skb
= __napi_alloc_skb(&rx_ring
->q_vector
->napi
,
1656 GFP_ATOMIC
| __GFP_NOWARN
);
1657 if (unlikely(!skb
)) {
1658 rx_ring
->rx_stats
.alloc_buff_failed
++;
1662 /* we will be copying header into skb->data in
1663 * pskb_may_pull so it is in our interest to prefetch
1664 * it now to avoid a possible cache miss
1666 prefetchw(skb
->data
);
1668 rx_buffer
->skb
= NULL
;
1671 /* we are reusing so sync this buffer for CPU use */
1672 dma_sync_single_range_for_cpu(rx_ring
->dev
,
1674 rx_buffer
->page_offset
,
1678 /* pull page into skb */
1679 if (i40e_add_rx_frag(rx_ring
, rx_buffer
, rx_desc
, skb
)) {
1680 /* hand second half of page back to the ring */
1681 i40e_reuse_rx_page(rx_ring
, rx_buffer
);
1682 rx_ring
->rx_stats
.page_reuse_count
++;
1684 /* we are not reusing the buffer so unmap it */
1685 dma_unmap_page(rx_ring
->dev
, rx_buffer
->dma
, PAGE_SIZE
,
1689 /* clear contents of buffer_info */
1690 rx_buffer
->page
= NULL
;
1696 * i40e_is_non_eop - process handling of non-EOP buffers
1697 * @rx_ring: Rx ring being processed
1698 * @rx_desc: Rx descriptor for current buffer
1699 * @skb: Current socket buffer containing buffer in progress
1701 * This function updates next to clean. If the buffer is an EOP buffer
1702 * this function exits returning false, otherwise it will place the
1703 * sk_buff in the next buffer to be chained and return true indicating
1704 * that this is in fact a non-EOP buffer.
1706 static bool i40e_is_non_eop(struct i40e_ring
*rx_ring
,
1707 union i40e_rx_desc
*rx_desc
,
1708 struct sk_buff
*skb
)
1710 u32 ntc
= rx_ring
->next_to_clean
+ 1;
1712 /* fetch, update, and store next to clean */
1713 ntc
= (ntc
< rx_ring
->count
) ? ntc
: 0;
1714 rx_ring
->next_to_clean
= ntc
;
1716 prefetch(I40E_RX_DESC(rx_ring
, ntc
));
1718 #define staterrlen rx_desc->wb.qword1.status_error_len
1719 if (unlikely(i40e_rx_is_programming_status(le64_to_cpu(staterrlen
)))) {
1720 i40e_clean_programming_status(rx_ring
, rx_desc
);
1721 rx_ring
->rx_bi
[ntc
].skb
= skb
;
1724 /* if we are the last buffer then there is nothing else to do */
1725 #define I40E_RXD_EOF BIT(I40E_RX_DESC_STATUS_EOF_SHIFT)
1726 if (likely(i40e_test_staterr(rx_desc
, I40E_RXD_EOF
)))
1729 /* place skb in next buffer to be received */
1730 rx_ring
->rx_bi
[ntc
].skb
= skb
;
1731 rx_ring
->rx_stats
.non_eop_descs
++;
1737 * i40e_clean_rx_irq - Clean completed descriptors from Rx ring - bounce buf
1738 * @rx_ring: rx descriptor ring to transact packets on
1739 * @budget: Total limit on number of packets to process
1741 * This function provides a "bounce buffer" approach to Rx interrupt
1742 * processing. The advantage to this is that on systems that have
1743 * expensive overhead for IOMMU access this provides a means of avoiding
1744 * it by maintaining the mapping of the page to the system.
1746 * Returns amount of work completed
1748 static int i40e_clean_rx_irq(struct i40e_ring
*rx_ring
, int budget
)
1750 unsigned int total_rx_bytes
= 0, total_rx_packets
= 0;
1751 u16 cleaned_count
= I40E_DESC_UNUSED(rx_ring
);
1752 bool failure
= false;
1754 while (likely(total_rx_packets
< budget
)) {
1755 union i40e_rx_desc
*rx_desc
;
1756 struct sk_buff
*skb
;
1762 /* return some buffers to hardware, one at a time is too slow */
1763 if (cleaned_count
>= I40E_RX_BUFFER_WRITE
) {
1764 failure
= failure
||
1765 i40e_alloc_rx_buffers(rx_ring
, cleaned_count
);
1769 rx_desc
= I40E_RX_DESC(rx_ring
, rx_ring
->next_to_clean
);
1771 qword
= le64_to_cpu(rx_desc
->wb
.qword1
.status_error_len
);
1772 rx_ptype
= (qword
& I40E_RXD_QW1_PTYPE_MASK
) >>
1773 I40E_RXD_QW1_PTYPE_SHIFT
;
1774 rx_status
= (qword
& I40E_RXD_QW1_STATUS_MASK
) >>
1775 I40E_RXD_QW1_STATUS_SHIFT
;
1777 if (!(rx_status
& BIT(I40E_RX_DESC_STATUS_DD_SHIFT
)))
1780 /* status_error_len will always be zero for unused descriptors
1781 * because it's cleared in cleanup, and overlaps with hdr_addr
1782 * which is always zero because packet split isn't used, if the
1783 * hardware wrote DD then it will be non-zero
1785 if (!rx_desc
->wb
.qword1
.status_error_len
)
1788 /* This memory barrier is needed to keep us from reading
1789 * any other fields out of the rx_desc until we know the
1794 skb
= i40e_fetch_rx_buffer(rx_ring
, rx_desc
);
1800 if (i40e_is_non_eop(rx_ring
, rx_desc
, skb
))
1803 /* ERR_MASK will only have valid bits if EOP set, and
1804 * what we are doing here is actually checking
1805 * I40E_RX_DESC_ERROR_RXE_SHIFT, since it is the zeroth bit in
1808 if (unlikely(i40e_test_staterr(rx_desc
, BIT(I40E_RXD_QW1_ERROR_SHIFT
)))) {
1809 dev_kfree_skb_any(skb
);
1813 if (i40e_cleanup_headers(rx_ring
, skb
))
1816 /* probably a little skewed due to removing CRC */
1817 total_rx_bytes
+= skb
->len
;
1819 /* populate checksum, VLAN, and protocol */
1820 i40e_process_skb_fields(rx_ring
, rx_desc
, skb
, rx_ptype
);
1824 i40e_rx_is_fcoe(rx_ptype
) &&
1825 !i40e_fcoe_handle_offload(rx_ring
, rx_desc
, skb
))) {
1826 dev_kfree_skb_any(skb
);
1831 vlan_tag
= (qword
& BIT(I40E_RX_DESC_STATUS_L2TAG1P_SHIFT
)) ?
1832 le16_to_cpu(rx_desc
->wb
.qword0
.lo_dword
.l2tag1
) : 0;
1834 i40e_receive_skb(rx_ring
, skb
, vlan_tag
);
1836 /* update budget accounting */
1840 u64_stats_update_begin(&rx_ring
->syncp
);
1841 rx_ring
->stats
.packets
+= total_rx_packets
;
1842 rx_ring
->stats
.bytes
+= total_rx_bytes
;
1843 u64_stats_update_end(&rx_ring
->syncp
);
1844 rx_ring
->q_vector
->rx
.total_packets
+= total_rx_packets
;
1845 rx_ring
->q_vector
->rx
.total_bytes
+= total_rx_bytes
;
1847 /* guarantee a trip back through this routine if there was a failure */
1848 return failure
? budget
: total_rx_packets
;
1851 static u32
i40e_buildreg_itr(const int type
, const u16 itr
)
1855 val
= I40E_PFINT_DYN_CTLN_INTENA_MASK
|
1856 /* Don't clear PBA because that can cause lost interrupts that
1857 * came in while we were cleaning/polling
1859 (type
<< I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT
) |
1860 (itr
<< I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT
);
1865 /* a small macro to shorten up some long lines */
1866 #define INTREG I40E_PFINT_DYN_CTLN
1869 * i40e_update_enable_itr - Update itr and re-enable MSIX interrupt
1870 * @vsi: the VSI we care about
1871 * @q_vector: q_vector for which itr is being updated and interrupt enabled
1874 static inline void i40e_update_enable_itr(struct i40e_vsi
*vsi
,
1875 struct i40e_q_vector
*q_vector
)
1877 struct i40e_hw
*hw
= &vsi
->back
->hw
;
1878 bool rx
= false, tx
= false;
1881 int idx
= q_vector
->v_idx
;
1883 vector
= (q_vector
->v_idx
+ vsi
->base_vector
);
1885 /* avoid dynamic calculation if in countdown mode OR if
1886 * all dynamic is disabled
1888 rxval
= txval
= i40e_buildreg_itr(I40E_ITR_NONE
, 0);
1890 if (q_vector
->itr_countdown
> 0 ||
1891 (!ITR_IS_DYNAMIC(vsi
->rx_rings
[idx
]->rx_itr_setting
) &&
1892 !ITR_IS_DYNAMIC(vsi
->tx_rings
[idx
]->tx_itr_setting
))) {
1896 if (ITR_IS_DYNAMIC(vsi
->rx_rings
[idx
]->rx_itr_setting
)) {
1897 rx
= i40e_set_new_dynamic_itr(&q_vector
->rx
);
1898 rxval
= i40e_buildreg_itr(I40E_RX_ITR
, q_vector
->rx
.itr
);
1901 if (ITR_IS_DYNAMIC(vsi
->tx_rings
[idx
]->tx_itr_setting
)) {
1902 tx
= i40e_set_new_dynamic_itr(&q_vector
->tx
);
1903 txval
= i40e_buildreg_itr(I40E_TX_ITR
, q_vector
->tx
.itr
);
1907 /* get the higher of the two ITR adjustments and
1908 * use the same value for both ITR registers
1909 * when in adaptive mode (Rx and/or Tx)
1911 u16 itr
= max(q_vector
->tx
.itr
, q_vector
->rx
.itr
);
1913 q_vector
->tx
.itr
= q_vector
->rx
.itr
= itr
;
1914 txval
= i40e_buildreg_itr(I40E_TX_ITR
, itr
);
1916 rxval
= i40e_buildreg_itr(I40E_RX_ITR
, itr
);
1920 /* only need to enable the interrupt once, but need
1921 * to possibly update both ITR values
1924 /* set the INTENA_MSK_MASK so that this first write
1925 * won't actually enable the interrupt, instead just
1926 * updating the ITR (it's bit 31 PF and VF)
1929 /* don't check _DOWN because interrupt isn't being enabled */
1930 wr32(hw
, INTREG(vector
- 1), rxval
);
1934 if (!test_bit(__I40E_DOWN
, &vsi
->state
))
1935 wr32(hw
, INTREG(vector
- 1), txval
);
1937 if (q_vector
->itr_countdown
)
1938 q_vector
->itr_countdown
--;
1940 q_vector
->itr_countdown
= ITR_COUNTDOWN_START
;
1944 * i40e_napi_poll - NAPI polling Rx/Tx cleanup routine
1945 * @napi: napi struct with our devices info in it
1946 * @budget: amount of work driver is allowed to do this pass, in packets
1948 * This function will clean all queues associated with a q_vector.
1950 * Returns the amount of work done
1952 int i40e_napi_poll(struct napi_struct
*napi
, int budget
)
1954 struct i40e_q_vector
*q_vector
=
1955 container_of(napi
, struct i40e_q_vector
, napi
);
1956 struct i40e_vsi
*vsi
= q_vector
->vsi
;
1957 struct i40e_ring
*ring
;
1958 bool clean_complete
= true;
1959 bool arm_wb
= false;
1960 int budget_per_ring
;
1963 if (test_bit(__I40E_DOWN
, &vsi
->state
)) {
1964 napi_complete(napi
);
1968 /* Clear hung_detected bit */
1969 clear_bit(I40E_Q_VECTOR_HUNG_DETECT
, &q_vector
->hung_detected
);
1970 /* Since the actual Tx work is minimal, we can give the Tx a larger
1971 * budget and be more aggressive about cleaning up the Tx descriptors.
1973 i40e_for_each_ring(ring
, q_vector
->tx
) {
1974 if (!i40e_clean_tx_irq(vsi
, ring
, budget
)) {
1975 clean_complete
= false;
1978 arm_wb
|= ring
->arm_wb
;
1979 ring
->arm_wb
= false;
1982 /* Handle case where we are called by netpoll with a budget of 0 */
1986 /* We attempt to distribute budget to each Rx queue fairly, but don't
1987 * allow the budget to go below 1 because that would exit polling early.
1989 budget_per_ring
= max(budget
/q_vector
->num_ringpairs
, 1);
1991 i40e_for_each_ring(ring
, q_vector
->rx
) {
1992 int cleaned
= i40e_clean_rx_irq(ring
, budget_per_ring
);
1994 work_done
+= cleaned
;
1995 /* if we clean as many as budgeted, we must not be done */
1996 if (cleaned
>= budget_per_ring
)
1997 clean_complete
= false;
2000 /* If work not completed, return budget and polling will return */
2001 if (!clean_complete
) {
2004 q_vector
->tx
.ring
[0].tx_stats
.tx_force_wb
++;
2005 i40e_enable_wb_on_itr(vsi
, q_vector
);
2010 if (vsi
->back
->flags
& I40E_TXR_FLAGS_WB_ON_ITR
)
2011 q_vector
->arm_wb_state
= false;
2013 /* Work is done so exit the polling mode and re-enable the interrupt */
2014 napi_complete_done(napi
, work_done
);
2015 if (vsi
->back
->flags
& I40E_FLAG_MSIX_ENABLED
) {
2016 i40e_update_enable_itr(vsi
, q_vector
);
2017 } else { /* Legacy mode */
2018 i40e_irq_dynamic_enable_icr0(vsi
->back
, false);
2024 * i40e_atr - Add a Flow Director ATR filter
2025 * @tx_ring: ring to add programming descriptor to
2027 * @tx_flags: send tx flags
2029 static void i40e_atr(struct i40e_ring
*tx_ring
, struct sk_buff
*skb
,
2032 struct i40e_filter_program_desc
*fdir_desc
;
2033 struct i40e_pf
*pf
= tx_ring
->vsi
->back
;
2035 unsigned char *network
;
2037 struct ipv6hdr
*ipv6
;
2041 u32 flex_ptype
, dtype_cmd
;
2045 /* make sure ATR is enabled */
2046 if (!(pf
->flags
& I40E_FLAG_FD_ATR_ENABLED
))
2049 if ((pf
->auto_disable_flags
& I40E_FLAG_FD_ATR_ENABLED
))
2052 /* if sampling is disabled do nothing */
2053 if (!tx_ring
->atr_sample_rate
)
2056 /* Currently only IPv4/IPv6 with TCP is supported */
2057 if (!(tx_flags
& (I40E_TX_FLAGS_IPV4
| I40E_TX_FLAGS_IPV6
)))
2060 /* snag network header to get L4 type and address */
2061 hdr
.network
= (tx_flags
& I40E_TX_FLAGS_UDP_TUNNEL
) ?
2062 skb_inner_network_header(skb
) : skb_network_header(skb
);
2064 /* Note: tx_flags gets modified to reflect inner protocols in
2065 * tx_enable_csum function if encap is enabled.
2067 if (tx_flags
& I40E_TX_FLAGS_IPV4
) {
2068 /* access ihl as u8 to avoid unaligned access on ia64 */
2069 hlen
= (hdr
.network
[0] & 0x0F) << 2;
2070 l4_proto
= hdr
.ipv4
->protocol
;
2072 hlen
= hdr
.network
- skb
->data
;
2073 l4_proto
= ipv6_find_hdr(skb
, &hlen
, IPPROTO_TCP
, NULL
, NULL
);
2074 hlen
-= hdr
.network
- skb
->data
;
2077 if (l4_proto
!= IPPROTO_TCP
)
2080 th
= (struct tcphdr
*)(hdr
.network
+ hlen
);
2082 /* Due to lack of space, no more new filters can be programmed */
2083 if (th
->syn
&& (pf
->auto_disable_flags
& I40E_FLAG_FD_ATR_ENABLED
))
2085 if ((pf
->flags
& I40E_FLAG_HW_ATR_EVICT_CAPABLE
) &&
2086 (!(pf
->auto_disable_flags
& I40E_FLAG_HW_ATR_EVICT_CAPABLE
))) {
2087 /* HW ATR eviction will take care of removing filters on FIN
2090 if (th
->fin
|| th
->rst
)
2094 tx_ring
->atr_count
++;
2096 /* sample on all syn/fin/rst packets or once every atr sample rate */
2100 (tx_ring
->atr_count
< tx_ring
->atr_sample_rate
))
2103 tx_ring
->atr_count
= 0;
2105 /* grab the next descriptor */
2106 i
= tx_ring
->next_to_use
;
2107 fdir_desc
= I40E_TX_FDIRDESC(tx_ring
, i
);
2110 tx_ring
->next_to_use
= (i
< tx_ring
->count
) ? i
: 0;
2112 flex_ptype
= (tx_ring
->queue_index
<< I40E_TXD_FLTR_QW0_QINDEX_SHIFT
) &
2113 I40E_TXD_FLTR_QW0_QINDEX_MASK
;
2114 flex_ptype
|= (tx_flags
& I40E_TX_FLAGS_IPV4
) ?
2115 (I40E_FILTER_PCTYPE_NONF_IPV4_TCP
<<
2116 I40E_TXD_FLTR_QW0_PCTYPE_SHIFT
) :
2117 (I40E_FILTER_PCTYPE_NONF_IPV6_TCP
<<
2118 I40E_TXD_FLTR_QW0_PCTYPE_SHIFT
);
2120 flex_ptype
|= tx_ring
->vsi
->id
<< I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT
;
2122 dtype_cmd
= I40E_TX_DESC_DTYPE_FILTER_PROG
;
2124 dtype_cmd
|= (th
->fin
|| th
->rst
) ?
2125 (I40E_FILTER_PROGRAM_DESC_PCMD_REMOVE
<<
2126 I40E_TXD_FLTR_QW1_PCMD_SHIFT
) :
2127 (I40E_FILTER_PROGRAM_DESC_PCMD_ADD_UPDATE
<<
2128 I40E_TXD_FLTR_QW1_PCMD_SHIFT
);
2130 dtype_cmd
|= I40E_FILTER_PROGRAM_DESC_DEST_DIRECT_PACKET_QINDEX
<<
2131 I40E_TXD_FLTR_QW1_DEST_SHIFT
;
2133 dtype_cmd
|= I40E_FILTER_PROGRAM_DESC_FD_STATUS_FD_ID
<<
2134 I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT
;
2136 dtype_cmd
|= I40E_TXD_FLTR_QW1_CNT_ENA_MASK
;
2137 if (!(tx_flags
& I40E_TX_FLAGS_UDP_TUNNEL
))
2139 ((u32
)I40E_FD_ATR_STAT_IDX(pf
->hw
.pf_id
) <<
2140 I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT
) &
2141 I40E_TXD_FLTR_QW1_CNTINDEX_MASK
;
2144 ((u32
)I40E_FD_ATR_TUNNEL_STAT_IDX(pf
->hw
.pf_id
) <<
2145 I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT
) &
2146 I40E_TXD_FLTR_QW1_CNTINDEX_MASK
;
2148 if ((pf
->flags
& I40E_FLAG_HW_ATR_EVICT_CAPABLE
) &&
2149 (!(pf
->auto_disable_flags
& I40E_FLAG_HW_ATR_EVICT_CAPABLE
)))
2150 dtype_cmd
|= I40E_TXD_FLTR_QW1_ATR_MASK
;
2152 fdir_desc
->qindex_flex_ptype_vsi
= cpu_to_le32(flex_ptype
);
2153 fdir_desc
->rsvd
= cpu_to_le32(0);
2154 fdir_desc
->dtype_cmd_cntindex
= cpu_to_le32(dtype_cmd
);
2155 fdir_desc
->fd_id
= cpu_to_le32(0);
2159 * i40e_tx_prepare_vlan_flags - prepare generic TX VLAN tagging flags for HW
2161 * @tx_ring: ring to send buffer on
2162 * @flags: the tx flags to be set
2164 * Checks the skb and set up correspondingly several generic transmit flags
2165 * related to VLAN tagging for the HW, such as VLAN, DCB, etc.
2167 * Returns error code indicate the frame should be dropped upon error and the
2168 * otherwise returns 0 to indicate the flags has been set properly.
2171 inline int i40e_tx_prepare_vlan_flags(struct sk_buff
*skb
,
2172 struct i40e_ring
*tx_ring
,
2175 static inline int i40e_tx_prepare_vlan_flags(struct sk_buff
*skb
,
2176 struct i40e_ring
*tx_ring
,
2180 __be16 protocol
= skb
->protocol
;
2183 if (protocol
== htons(ETH_P_8021Q
) &&
2184 !(tx_ring
->netdev
->features
& NETIF_F_HW_VLAN_CTAG_TX
)) {
2185 /* When HW VLAN acceleration is turned off by the user the
2186 * stack sets the protocol to 8021q so that the driver
2187 * can take any steps required to support the SW only
2188 * VLAN handling. In our case the driver doesn't need
2189 * to take any further steps so just set the protocol
2190 * to the encapsulated ethertype.
2192 skb
->protocol
= vlan_get_protocol(skb
);
2196 /* if we have a HW VLAN tag being added, default to the HW one */
2197 if (skb_vlan_tag_present(skb
)) {
2198 tx_flags
|= skb_vlan_tag_get(skb
) << I40E_TX_FLAGS_VLAN_SHIFT
;
2199 tx_flags
|= I40E_TX_FLAGS_HW_VLAN
;
2200 /* else if it is a SW VLAN, check the next protocol and store the tag */
2201 } else if (protocol
== htons(ETH_P_8021Q
)) {
2202 struct vlan_hdr
*vhdr
, _vhdr
;
2204 vhdr
= skb_header_pointer(skb
, ETH_HLEN
, sizeof(_vhdr
), &_vhdr
);
2208 protocol
= vhdr
->h_vlan_encapsulated_proto
;
2209 tx_flags
|= ntohs(vhdr
->h_vlan_TCI
) << I40E_TX_FLAGS_VLAN_SHIFT
;
2210 tx_flags
|= I40E_TX_FLAGS_SW_VLAN
;
2213 if (!(tx_ring
->vsi
->back
->flags
& I40E_FLAG_DCB_ENABLED
))
2216 /* Insert 802.1p priority into VLAN header */
2217 if ((tx_flags
& (I40E_TX_FLAGS_HW_VLAN
| I40E_TX_FLAGS_SW_VLAN
)) ||
2218 (skb
->priority
!= TC_PRIO_CONTROL
)) {
2219 tx_flags
&= ~I40E_TX_FLAGS_VLAN_PRIO_MASK
;
2220 tx_flags
|= (skb
->priority
& 0x7) <<
2221 I40E_TX_FLAGS_VLAN_PRIO_SHIFT
;
2222 if (tx_flags
& I40E_TX_FLAGS_SW_VLAN
) {
2223 struct vlan_ethhdr
*vhdr
;
2226 rc
= skb_cow_head(skb
, 0);
2229 vhdr
= (struct vlan_ethhdr
*)skb
->data
;
2230 vhdr
->h_vlan_TCI
= htons(tx_flags
>>
2231 I40E_TX_FLAGS_VLAN_SHIFT
);
2233 tx_flags
|= I40E_TX_FLAGS_HW_VLAN
;
2243 * i40e_tso - set up the tso context descriptor
2244 * @skb: ptr to the skb we're sending
2245 * @hdr_len: ptr to the size of the packet header
2246 * @cd_type_cmd_tso_mss: Quad Word 1
2248 * Returns 0 if no TSO can happen, 1 if tso is going, or error
2250 static int i40e_tso(struct sk_buff
*skb
, u8
*hdr_len
, u64
*cd_type_cmd_tso_mss
)
2252 u64 cd_cmd
, cd_tso_len
, cd_mss
;
2263 u32 paylen
, l4_offset
;
2266 if (skb
->ip_summed
!= CHECKSUM_PARTIAL
)
2269 if (!skb_is_gso(skb
))
2272 err
= skb_cow_head(skb
, 0);
2276 ip
.hdr
= skb_network_header(skb
);
2277 l4
.hdr
= skb_transport_header(skb
);
2279 /* initialize outer IP header fields */
2280 if (ip
.v4
->version
== 4) {
2284 ip
.v6
->payload_len
= 0;
2287 if (skb_shinfo(skb
)->gso_type
& (SKB_GSO_GRE
|
2291 SKB_GSO_UDP_TUNNEL
|
2292 SKB_GSO_UDP_TUNNEL_CSUM
)) {
2293 if (!(skb_shinfo(skb
)->gso_type
& SKB_GSO_PARTIAL
) &&
2294 (skb_shinfo(skb
)->gso_type
& SKB_GSO_UDP_TUNNEL_CSUM
)) {
2297 /* determine offset of outer transport header */
2298 l4_offset
= l4
.hdr
- skb
->data
;
2300 /* remove payload length from outer checksum */
2301 paylen
= skb
->len
- l4_offset
;
2302 csum_replace_by_diff(&l4
.udp
->check
, htonl(paylen
));
2305 /* reset pointers to inner headers */
2306 ip
.hdr
= skb_inner_network_header(skb
);
2307 l4
.hdr
= skb_inner_transport_header(skb
);
2309 /* initialize inner IP header fields */
2310 if (ip
.v4
->version
== 4) {
2314 ip
.v6
->payload_len
= 0;
2318 /* determine offset of inner transport header */
2319 l4_offset
= l4
.hdr
- skb
->data
;
2321 /* remove payload length from inner checksum */
2322 paylen
= skb
->len
- l4_offset
;
2323 csum_replace_by_diff(&l4
.tcp
->check
, htonl(paylen
));
2325 /* compute length of segmentation header */
2326 *hdr_len
= (l4
.tcp
->doff
* 4) + l4_offset
;
2328 /* find the field values */
2329 cd_cmd
= I40E_TX_CTX_DESC_TSO
;
2330 cd_tso_len
= skb
->len
- *hdr_len
;
2331 cd_mss
= skb_shinfo(skb
)->gso_size
;
2332 *cd_type_cmd_tso_mss
|= (cd_cmd
<< I40E_TXD_CTX_QW1_CMD_SHIFT
) |
2333 (cd_tso_len
<< I40E_TXD_CTX_QW1_TSO_LEN_SHIFT
) |
2334 (cd_mss
<< I40E_TXD_CTX_QW1_MSS_SHIFT
);
2339 * i40e_tsyn - set up the tsyn context descriptor
2340 * @tx_ring: ptr to the ring to send
2341 * @skb: ptr to the skb we're sending
2342 * @tx_flags: the collected send information
2343 * @cd_type_cmd_tso_mss: Quad Word 1
2345 * Returns 0 if no Tx timestamp can happen and 1 if the timestamp will happen
2347 static int i40e_tsyn(struct i40e_ring
*tx_ring
, struct sk_buff
*skb
,
2348 u32 tx_flags
, u64
*cd_type_cmd_tso_mss
)
2352 if (likely(!(skb_shinfo(skb
)->tx_flags
& SKBTX_HW_TSTAMP
)))
2355 /* Tx timestamps cannot be sampled when doing TSO */
2356 if (tx_flags
& I40E_TX_FLAGS_TSO
)
2359 /* only timestamp the outbound packet if the user has requested it and
2360 * we are not already transmitting a packet to be timestamped
2362 pf
= i40e_netdev_to_pf(tx_ring
->netdev
);
2363 if (!(pf
->flags
& I40E_FLAG_PTP
))
2367 !test_and_set_bit_lock(__I40E_PTP_TX_IN_PROGRESS
, &pf
->state
)) {
2368 skb_shinfo(skb
)->tx_flags
|= SKBTX_IN_PROGRESS
;
2369 pf
->ptp_tx_skb
= skb_get(skb
);
2374 *cd_type_cmd_tso_mss
|= (u64
)I40E_TX_CTX_DESC_TSYN
<<
2375 I40E_TXD_CTX_QW1_CMD_SHIFT
;
2381 * i40e_tx_enable_csum - Enable Tx checksum offloads
2383 * @tx_flags: pointer to Tx flags currently set
2384 * @td_cmd: Tx descriptor command bits to set
2385 * @td_offset: Tx descriptor header offsets to set
2386 * @tx_ring: Tx descriptor ring
2387 * @cd_tunneling: ptr to context desc bits
2389 static int i40e_tx_enable_csum(struct sk_buff
*skb
, u32
*tx_flags
,
2390 u32
*td_cmd
, u32
*td_offset
,
2391 struct i40e_ring
*tx_ring
,
2404 unsigned char *exthdr
;
2405 u32 offset
, cmd
= 0;
2409 if (skb
->ip_summed
!= CHECKSUM_PARTIAL
)
2412 ip
.hdr
= skb_network_header(skb
);
2413 l4
.hdr
= skb_transport_header(skb
);
2415 /* compute outer L2 header size */
2416 offset
= ((ip
.hdr
- skb
->data
) / 2) << I40E_TX_DESC_LENGTH_MACLEN_SHIFT
;
2418 if (skb
->encapsulation
) {
2420 /* define outer network header type */
2421 if (*tx_flags
& I40E_TX_FLAGS_IPV4
) {
2422 tunnel
|= (*tx_flags
& I40E_TX_FLAGS_TSO
) ?
2423 I40E_TX_CTX_EXT_IP_IPV4
:
2424 I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM
;
2426 l4_proto
= ip
.v4
->protocol
;
2427 } else if (*tx_flags
& I40E_TX_FLAGS_IPV6
) {
2428 tunnel
|= I40E_TX_CTX_EXT_IP_IPV6
;
2430 exthdr
= ip
.hdr
+ sizeof(*ip
.v6
);
2431 l4_proto
= ip
.v6
->nexthdr
;
2432 if (l4
.hdr
!= exthdr
)
2433 ipv6_skip_exthdr(skb
, exthdr
- skb
->data
,
2434 &l4_proto
, &frag_off
);
2437 /* define outer transport */
2440 tunnel
|= I40E_TXD_CTX_UDP_TUNNELING
;
2441 *tx_flags
|= I40E_TX_FLAGS_UDP_TUNNEL
;
2444 tunnel
|= I40E_TXD_CTX_GRE_TUNNELING
;
2445 *tx_flags
|= I40E_TX_FLAGS_UDP_TUNNEL
;
2449 *tx_flags
|= I40E_TX_FLAGS_UDP_TUNNEL
;
2450 l4
.hdr
= skb_inner_network_header(skb
);
2453 if (*tx_flags
& I40E_TX_FLAGS_TSO
)
2456 skb_checksum_help(skb
);
2460 /* compute outer L3 header size */
2461 tunnel
|= ((l4
.hdr
- ip
.hdr
) / 4) <<
2462 I40E_TXD_CTX_QW0_EXT_IPLEN_SHIFT
;
2464 /* switch IP header pointer from outer to inner header */
2465 ip
.hdr
= skb_inner_network_header(skb
);
2467 /* compute tunnel header size */
2468 tunnel
|= ((ip
.hdr
- l4
.hdr
) / 2) <<
2469 I40E_TXD_CTX_QW0_NATLEN_SHIFT
;
2471 /* indicate if we need to offload outer UDP header */
2472 if ((*tx_flags
& I40E_TX_FLAGS_TSO
) &&
2473 !(skb_shinfo(skb
)->gso_type
& SKB_GSO_PARTIAL
) &&
2474 (skb_shinfo(skb
)->gso_type
& SKB_GSO_UDP_TUNNEL_CSUM
))
2475 tunnel
|= I40E_TXD_CTX_QW0_L4T_CS_MASK
;
2477 /* record tunnel offload values */
2478 *cd_tunneling
|= tunnel
;
2480 /* switch L4 header pointer from outer to inner */
2481 l4
.hdr
= skb_inner_transport_header(skb
);
2484 /* reset type as we transition from outer to inner headers */
2485 *tx_flags
&= ~(I40E_TX_FLAGS_IPV4
| I40E_TX_FLAGS_IPV6
);
2486 if (ip
.v4
->version
== 4)
2487 *tx_flags
|= I40E_TX_FLAGS_IPV4
;
2488 if (ip
.v6
->version
== 6)
2489 *tx_flags
|= I40E_TX_FLAGS_IPV6
;
2492 /* Enable IP checksum offloads */
2493 if (*tx_flags
& I40E_TX_FLAGS_IPV4
) {
2494 l4_proto
= ip
.v4
->protocol
;
2495 /* the stack computes the IP header already, the only time we
2496 * need the hardware to recompute it is in the case of TSO.
2498 cmd
|= (*tx_flags
& I40E_TX_FLAGS_TSO
) ?
2499 I40E_TX_DESC_CMD_IIPT_IPV4_CSUM
:
2500 I40E_TX_DESC_CMD_IIPT_IPV4
;
2501 } else if (*tx_flags
& I40E_TX_FLAGS_IPV6
) {
2502 cmd
|= I40E_TX_DESC_CMD_IIPT_IPV6
;
2504 exthdr
= ip
.hdr
+ sizeof(*ip
.v6
);
2505 l4_proto
= ip
.v6
->nexthdr
;
2506 if (l4
.hdr
!= exthdr
)
2507 ipv6_skip_exthdr(skb
, exthdr
- skb
->data
,
2508 &l4_proto
, &frag_off
);
2511 /* compute inner L3 header size */
2512 offset
|= ((l4
.hdr
- ip
.hdr
) / 4) << I40E_TX_DESC_LENGTH_IPLEN_SHIFT
;
2514 /* Enable L4 checksum offloads */
2517 /* enable checksum offloads */
2518 cmd
|= I40E_TX_DESC_CMD_L4T_EOFT_TCP
;
2519 offset
|= l4
.tcp
->doff
<< I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT
;
2522 /* enable SCTP checksum offload */
2523 cmd
|= I40E_TX_DESC_CMD_L4T_EOFT_SCTP
;
2524 offset
|= (sizeof(struct sctphdr
) >> 2) <<
2525 I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT
;
2528 /* enable UDP checksum offload */
2529 cmd
|= I40E_TX_DESC_CMD_L4T_EOFT_UDP
;
2530 offset
|= (sizeof(struct udphdr
) >> 2) <<
2531 I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT
;
2534 if (*tx_flags
& I40E_TX_FLAGS_TSO
)
2536 skb_checksum_help(skb
);
2541 *td_offset
|= offset
;
2547 * i40e_create_tx_ctx Build the Tx context descriptor
2548 * @tx_ring: ring to create the descriptor on
2549 * @cd_type_cmd_tso_mss: Quad Word 1
2550 * @cd_tunneling: Quad Word 0 - bits 0-31
2551 * @cd_l2tag2: Quad Word 0 - bits 32-63
2553 static void i40e_create_tx_ctx(struct i40e_ring
*tx_ring
,
2554 const u64 cd_type_cmd_tso_mss
,
2555 const u32 cd_tunneling
, const u32 cd_l2tag2
)
2557 struct i40e_tx_context_desc
*context_desc
;
2558 int i
= tx_ring
->next_to_use
;
2560 if ((cd_type_cmd_tso_mss
== I40E_TX_DESC_DTYPE_CONTEXT
) &&
2561 !cd_tunneling
&& !cd_l2tag2
)
2564 /* grab the next descriptor */
2565 context_desc
= I40E_TX_CTXTDESC(tx_ring
, i
);
2568 tx_ring
->next_to_use
= (i
< tx_ring
->count
) ? i
: 0;
2570 /* cpu_to_le32 and assign to struct fields */
2571 context_desc
->tunneling_params
= cpu_to_le32(cd_tunneling
);
2572 context_desc
->l2tag2
= cpu_to_le16(cd_l2tag2
);
2573 context_desc
->rsvd
= cpu_to_le16(0);
2574 context_desc
->type_cmd_tso_mss
= cpu_to_le64(cd_type_cmd_tso_mss
);
2578 * __i40e_maybe_stop_tx - 2nd level check for tx stop conditions
2579 * @tx_ring: the ring to be checked
2580 * @size: the size buffer we want to assure is available
2582 * Returns -EBUSY if a stop is needed, else 0
2584 int __i40e_maybe_stop_tx(struct i40e_ring
*tx_ring
, int size
)
2586 netif_stop_subqueue(tx_ring
->netdev
, tx_ring
->queue_index
);
2587 /* Memory barrier before checking head and tail */
2590 /* Check again in a case another CPU has just made room available. */
2591 if (likely(I40E_DESC_UNUSED(tx_ring
) < size
))
2594 /* A reprieve! - use start_queue because it doesn't call schedule */
2595 netif_start_subqueue(tx_ring
->netdev
, tx_ring
->queue_index
);
2596 ++tx_ring
->tx_stats
.restart_queue
;
2601 * __i40e_chk_linearize - Check if there are more than 8 buffers per packet
2604 * Note: Our HW can't DMA more than 8 buffers to build a packet on the wire
2605 * and so we need to figure out the cases where we need to linearize the skb.
2607 * For TSO we need to count the TSO header and segment payload separately.
2608 * As such we need to check cases where we have 7 fragments or more as we
2609 * can potentially require 9 DMA transactions, 1 for the TSO header, 1 for
2610 * the segment payload in the first descriptor, and another 7 for the
2613 bool __i40e_chk_linearize(struct sk_buff
*skb
)
2615 const struct skb_frag_struct
*frag
, *stale
;
2618 /* no need to check if number of frags is less than 7 */
2619 nr_frags
= skb_shinfo(skb
)->nr_frags
;
2620 if (nr_frags
< (I40E_MAX_BUFFER_TXD
- 1))
2623 /* We need to walk through the list and validate that each group
2624 * of 6 fragments totals at least gso_size. However we don't need
2625 * to perform such validation on the last 6 since the last 6 cannot
2626 * inherit any data from a descriptor after them.
2628 nr_frags
-= I40E_MAX_BUFFER_TXD
- 2;
2629 frag
= &skb_shinfo(skb
)->frags
[0];
2631 /* Initialize size to the negative value of gso_size minus 1. We
2632 * use this as the worst case scenerio in which the frag ahead
2633 * of us only provides one byte which is why we are limited to 6
2634 * descriptors for a single transmit as the header and previous
2635 * fragment are already consuming 2 descriptors.
2637 sum
= 1 - skb_shinfo(skb
)->gso_size
;
2639 /* Add size of frags 0 through 4 to create our initial sum */
2640 sum
+= skb_frag_size(frag
++);
2641 sum
+= skb_frag_size(frag
++);
2642 sum
+= skb_frag_size(frag
++);
2643 sum
+= skb_frag_size(frag
++);
2644 sum
+= skb_frag_size(frag
++);
2646 /* Walk through fragments adding latest fragment, testing it, and
2647 * then removing stale fragments from the sum.
2649 stale
= &skb_shinfo(skb
)->frags
[0];
2651 sum
+= skb_frag_size(frag
++);
2653 /* if sum is negative we failed to make sufficient progress */
2657 /* use pre-decrement to avoid processing last fragment */
2661 sum
-= skb_frag_size(stale
++);
2668 * i40e_tx_map - Build the Tx descriptor
2669 * @tx_ring: ring to send buffer on
2671 * @first: first buffer info buffer to use
2672 * @tx_flags: collected send information
2673 * @hdr_len: size of the packet header
2674 * @td_cmd: the command field in the descriptor
2675 * @td_offset: offset for checksum or crc
2678 inline void i40e_tx_map(struct i40e_ring
*tx_ring
, struct sk_buff
*skb
,
2679 struct i40e_tx_buffer
*first
, u32 tx_flags
,
2680 const u8 hdr_len
, u32 td_cmd
, u32 td_offset
)
2682 static inline void i40e_tx_map(struct i40e_ring
*tx_ring
, struct sk_buff
*skb
,
2683 struct i40e_tx_buffer
*first
, u32 tx_flags
,
2684 const u8 hdr_len
, u32 td_cmd
, u32 td_offset
)
2687 unsigned int data_len
= skb
->data_len
;
2688 unsigned int size
= skb_headlen(skb
);
2689 struct skb_frag_struct
*frag
;
2690 struct i40e_tx_buffer
*tx_bi
;
2691 struct i40e_tx_desc
*tx_desc
;
2692 u16 i
= tx_ring
->next_to_use
;
2697 bool tail_bump
= true;
2700 if (tx_flags
& I40E_TX_FLAGS_HW_VLAN
) {
2701 td_cmd
|= I40E_TX_DESC_CMD_IL2TAG1
;
2702 td_tag
= (tx_flags
& I40E_TX_FLAGS_VLAN_MASK
) >>
2703 I40E_TX_FLAGS_VLAN_SHIFT
;
2706 if (tx_flags
& (I40E_TX_FLAGS_TSO
| I40E_TX_FLAGS_FSO
))
2707 gso_segs
= skb_shinfo(skb
)->gso_segs
;
2711 /* multiply data chunks by size of headers */
2712 first
->bytecount
= skb
->len
- hdr_len
+ (gso_segs
* hdr_len
);
2713 first
->gso_segs
= gso_segs
;
2715 first
->tx_flags
= tx_flags
;
2717 dma
= dma_map_single(tx_ring
->dev
, skb
->data
, size
, DMA_TO_DEVICE
);
2719 tx_desc
= I40E_TX_DESC(tx_ring
, i
);
2722 for (frag
= &skb_shinfo(skb
)->frags
[0];; frag
++) {
2723 unsigned int max_data
= I40E_MAX_DATA_PER_TXD_ALIGNED
;
2725 if (dma_mapping_error(tx_ring
->dev
, dma
))
2728 /* record length, and DMA address */
2729 dma_unmap_len_set(tx_bi
, len
, size
);
2730 dma_unmap_addr_set(tx_bi
, dma
, dma
);
2732 /* align size to end of page */
2733 max_data
+= -dma
& (I40E_MAX_READ_REQ_SIZE
- 1);
2734 tx_desc
->buffer_addr
= cpu_to_le64(dma
);
2736 while (unlikely(size
> I40E_MAX_DATA_PER_TXD
)) {
2737 tx_desc
->cmd_type_offset_bsz
=
2738 build_ctob(td_cmd
, td_offset
,
2745 if (i
== tx_ring
->count
) {
2746 tx_desc
= I40E_TX_DESC(tx_ring
, 0);
2753 max_data
= I40E_MAX_DATA_PER_TXD_ALIGNED
;
2754 tx_desc
->buffer_addr
= cpu_to_le64(dma
);
2757 if (likely(!data_len
))
2760 tx_desc
->cmd_type_offset_bsz
= build_ctob(td_cmd
, td_offset
,
2767 if (i
== tx_ring
->count
) {
2768 tx_desc
= I40E_TX_DESC(tx_ring
, 0);
2772 size
= skb_frag_size(frag
);
2775 dma
= skb_frag_dma_map(tx_ring
->dev
, frag
, 0, size
,
2778 tx_bi
= &tx_ring
->tx_bi
[i
];
2781 /* set next_to_watch value indicating a packet is present */
2782 first
->next_to_watch
= tx_desc
;
2785 if (i
== tx_ring
->count
)
2788 tx_ring
->next_to_use
= i
;
2790 netdev_tx_sent_queue(netdev_get_tx_queue(tx_ring
->netdev
,
2791 tx_ring
->queue_index
),
2793 i40e_maybe_stop_tx(tx_ring
, DESC_NEEDED
);
2795 /* Algorithm to optimize tail and RS bit setting:
2796 * if xmit_more is supported
2797 * if xmit_more is true
2798 * do not update tail and do not mark RS bit.
2799 * if xmit_more is false and last xmit_more was false
2800 * if every packet spanned less than 4 desc
2801 * then set RS bit on 4th packet and update tail
2804 * update tail and set RS bit on every packet.
2805 * if xmit_more is false and last_xmit_more was true
2806 * update tail and set RS bit.
2808 * Optimization: wmb to be issued only in case of tail update.
2809 * Also optimize the Descriptor WB path for RS bit with the same
2812 * Note: If there are less than 4 packets
2813 * pending and interrupts were disabled the service task will
2814 * trigger a force WB.
2816 if (skb
->xmit_more
&&
2817 !netif_xmit_stopped(netdev_get_tx_queue(tx_ring
->netdev
,
2818 tx_ring
->queue_index
))) {
2819 tx_ring
->flags
|= I40E_TXR_FLAGS_LAST_XMIT_MORE_SET
;
2821 } else if (!skb
->xmit_more
&&
2822 !netif_xmit_stopped(netdev_get_tx_queue(tx_ring
->netdev
,
2823 tx_ring
->queue_index
)) &&
2824 (!(tx_ring
->flags
& I40E_TXR_FLAGS_LAST_XMIT_MORE_SET
)) &&
2825 (tx_ring
->packet_stride
< WB_STRIDE
) &&
2826 (desc_count
< WB_STRIDE
)) {
2827 tx_ring
->packet_stride
++;
2829 tx_ring
->packet_stride
= 0;
2830 tx_ring
->flags
&= ~I40E_TXR_FLAGS_LAST_XMIT_MORE_SET
;
2834 tx_ring
->packet_stride
= 0;
2836 tx_desc
->cmd_type_offset_bsz
=
2837 build_ctob(td_cmd
, td_offset
, size
, td_tag
) |
2838 cpu_to_le64((u64
)(do_rs
? I40E_TXD_CMD
:
2839 I40E_TX_DESC_CMD_EOP
) <<
2840 I40E_TXD_QW1_CMD_SHIFT
);
2842 /* notify HW of packet */
2844 prefetchw(tx_desc
+ 1);
2847 /* Force memory writes to complete before letting h/w
2848 * know there are new descriptors to fetch. (Only
2849 * applicable for weak-ordered memory model archs,
2853 writel(i
, tx_ring
->tail
);
2859 dev_info(tx_ring
->dev
, "TX DMA map failed\n");
2861 /* clear dma mappings for failed tx_bi map */
2863 tx_bi
= &tx_ring
->tx_bi
[i
];
2864 i40e_unmap_and_free_tx_resource(tx_ring
, tx_bi
);
2872 tx_ring
->next_to_use
= i
;
2876 * i40e_xmit_frame_ring - Sends buffer on Tx ring
2878 * @tx_ring: ring to send buffer on
2880 * Returns NETDEV_TX_OK if sent, else an error code
2882 static netdev_tx_t
i40e_xmit_frame_ring(struct sk_buff
*skb
,
2883 struct i40e_ring
*tx_ring
)
2885 u64 cd_type_cmd_tso_mss
= I40E_TX_DESC_DTYPE_CONTEXT
;
2886 u32 cd_tunneling
= 0, cd_l2tag2
= 0;
2887 struct i40e_tx_buffer
*first
;
2896 /* prefetch the data, we'll need it later */
2897 prefetch(skb
->data
);
2899 count
= i40e_xmit_descriptor_count(skb
);
2900 if (i40e_chk_linearize(skb
, count
)) {
2901 if (__skb_linearize(skb
))
2903 count
= i40e_txd_use_count(skb
->len
);
2904 tx_ring
->tx_stats
.tx_linearize
++;
2907 /* need: 1 descriptor per page * PAGE_SIZE/I40E_MAX_DATA_PER_TXD,
2908 * + 1 desc for skb_head_len/I40E_MAX_DATA_PER_TXD,
2909 * + 4 desc gap to avoid the cache line where head is,
2910 * + 1 desc for context descriptor,
2911 * otherwise try next time
2913 if (i40e_maybe_stop_tx(tx_ring
, count
+ 4 + 1)) {
2914 tx_ring
->tx_stats
.tx_busy
++;
2915 return NETDEV_TX_BUSY
;
2918 /* prepare the xmit flags */
2919 if (i40e_tx_prepare_vlan_flags(skb
, tx_ring
, &tx_flags
))
2922 /* obtain protocol of skb */
2923 protocol
= vlan_get_protocol(skb
);
2925 /* record the location of the first descriptor for this packet */
2926 first
= &tx_ring
->tx_bi
[tx_ring
->next_to_use
];
2928 /* setup IPv4/IPv6 offloads */
2929 if (protocol
== htons(ETH_P_IP
))
2930 tx_flags
|= I40E_TX_FLAGS_IPV4
;
2931 else if (protocol
== htons(ETH_P_IPV6
))
2932 tx_flags
|= I40E_TX_FLAGS_IPV6
;
2934 tso
= i40e_tso(skb
, &hdr_len
, &cd_type_cmd_tso_mss
);
2939 tx_flags
|= I40E_TX_FLAGS_TSO
;
2941 /* Always offload the checksum, since it's in the data descriptor */
2942 tso
= i40e_tx_enable_csum(skb
, &tx_flags
, &td_cmd
, &td_offset
,
2943 tx_ring
, &cd_tunneling
);
2947 tsyn
= i40e_tsyn(tx_ring
, skb
, tx_flags
, &cd_type_cmd_tso_mss
);
2950 tx_flags
|= I40E_TX_FLAGS_TSYN
;
2952 skb_tx_timestamp(skb
);
2954 /* always enable CRC insertion offload */
2955 td_cmd
|= I40E_TX_DESC_CMD_ICRC
;
2957 i40e_create_tx_ctx(tx_ring
, cd_type_cmd_tso_mss
,
2958 cd_tunneling
, cd_l2tag2
);
2960 /* Add Flow Director ATR if it's enabled.
2962 * NOTE: this must always be directly before the data descriptor.
2964 i40e_atr(tx_ring
, skb
, tx_flags
);
2966 i40e_tx_map(tx_ring
, skb
, first
, tx_flags
, hdr_len
,
2969 return NETDEV_TX_OK
;
2972 dev_kfree_skb_any(skb
);
2973 return NETDEV_TX_OK
;
2977 * i40e_lan_xmit_frame - Selects the correct VSI and Tx queue to send buffer
2979 * @netdev: network interface device structure
2981 * Returns NETDEV_TX_OK if sent, else an error code
2983 netdev_tx_t
i40e_lan_xmit_frame(struct sk_buff
*skb
, struct net_device
*netdev
)
2985 struct i40e_netdev_priv
*np
= netdev_priv(netdev
);
2986 struct i40e_vsi
*vsi
= np
->vsi
;
2987 struct i40e_ring
*tx_ring
= vsi
->tx_rings
[skb
->queue_mapping
];
2989 /* hardware can't handle really short frames, hardware padding works
2992 if (skb_put_padto(skb
, I40E_MIN_TX_LEN
))
2993 return NETDEV_TX_OK
;
2995 return i40e_xmit_frame_ring(skb
, tx_ring
);