i40e/i40evf: initialize context descriptor
[deliverable/linux.git] / drivers / net / ethernet / intel / i40e / i40e_txrx.c
CommitLineData
fd0a05ce
JB
1/*******************************************************************************
2 *
3 * Intel Ethernet Controller XL710 Family Linux Driver
dc641b73 4 * Copyright(c) 2013 - 2014 Intel Corporation.
fd0a05ce
JB
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
dc641b73
GR
15 * You should have received a copy of the GNU General Public License along
16 * with this program. If not, see <http://www.gnu.org/licenses/>.
fd0a05ce
JB
17 *
18 * The full GNU General Public License is included in this distribution in
19 * the file called "COPYING".
20 *
21 * Contact Information:
22 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24 *
25 ******************************************************************************/
26
1c112a64 27#include <linux/prefetch.h>
fd0a05ce 28#include "i40e.h"
206812b5 29#include "i40e_prototype.h"
fd0a05ce
JB
30
31static inline __le64 build_ctob(u32 td_cmd, u32 td_offset, unsigned int size,
32 u32 td_tag)
33{
34 return cpu_to_le64(I40E_TX_DESC_DTYPE_DATA |
35 ((u64)td_cmd << I40E_TXD_QW1_CMD_SHIFT) |
36 ((u64)td_offset << I40E_TXD_QW1_OFFSET_SHIFT) |
37 ((u64)size << I40E_TXD_QW1_TX_BUF_SZ_SHIFT) |
38 ((u64)td_tag << I40E_TXD_QW1_L2TAG1_SHIFT));
39}
40
eaefbd06 41#define I40E_TXD_CMD (I40E_TX_DESC_CMD_EOP | I40E_TX_DESC_CMD_RS)
fd0a05ce
JB
42/**
43 * i40e_program_fdir_filter - Program a Flow Director filter
17a73f6b
JG
44 * @fdir_data: Packet data that will be filter parameters
45 * @raw_packet: the pre-allocated packet buffer for FDir
fd0a05ce
JB
46 * @pf: The pf pointer
47 * @add: True for add/update, False for remove
48 **/
17a73f6b 49int i40e_program_fdir_filter(struct i40e_fdir_filter *fdir_data, u8 *raw_packet,
fd0a05ce
JB
50 struct i40e_pf *pf, bool add)
51{
52 struct i40e_filter_program_desc *fdir_desc;
53 struct i40e_tx_buffer *tx_buf;
54 struct i40e_tx_desc *tx_desc;
55 struct i40e_ring *tx_ring;
eaefbd06 56 unsigned int fpt, dcc;
fd0a05ce
JB
57 struct i40e_vsi *vsi;
58 struct device *dev;
59 dma_addr_t dma;
60 u32 td_cmd = 0;
61 u16 i;
62
63 /* find existing FDIR VSI */
64 vsi = NULL;
505682cd 65 for (i = 0; i < pf->num_alloc_vsi; i++)
fd0a05ce
JB
66 if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR)
67 vsi = pf->vsi[i];
68 if (!vsi)
69 return -ENOENT;
70
9f65e15b 71 tx_ring = vsi->tx_rings[0];
fd0a05ce
JB
72 dev = tx_ring->dev;
73
17a73f6b
JG
74 dma = dma_map_single(dev, raw_packet,
75 I40E_FDIR_MAX_RAW_PACKET_SIZE, DMA_TO_DEVICE);
fd0a05ce
JB
76 if (dma_mapping_error(dev, dma))
77 goto dma_fail;
78
79 /* grab the next descriptor */
fc4ac67b
AD
80 i = tx_ring->next_to_use;
81 fdir_desc = I40E_TX_FDIRDESC(tx_ring, i);
fc4ac67b 82
eaefbd06 83 tx_ring->next_to_use = (i + 1 < tx_ring->count) ? i + 1 : 0;
fd0a05ce 84
eaefbd06
JB
85 fpt = (fdir_data->q_index << I40E_TXD_FLTR_QW0_QINDEX_SHIFT) &
86 I40E_TXD_FLTR_QW0_QINDEX_MASK;
fd0a05ce 87
eaefbd06
JB
88 fpt |= (fdir_data->flex_off << I40E_TXD_FLTR_QW0_FLEXOFF_SHIFT) &
89 I40E_TXD_FLTR_QW0_FLEXOFF_MASK;
fd0a05ce 90
eaefbd06
JB
91 fpt |= (fdir_data->pctype << I40E_TXD_FLTR_QW0_PCTYPE_SHIFT) &
92 I40E_TXD_FLTR_QW0_PCTYPE_MASK;
fd0a05ce
JB
93
94 /* Use LAN VSI Id if not programmed by user */
95 if (fdir_data->dest_vsi == 0)
eaefbd06
JB
96 fpt |= (pf->vsi[pf->lan_vsi]->id) <<
97 I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT;
fd0a05ce 98 else
eaefbd06
JB
99 fpt |= ((u32)fdir_data->dest_vsi <<
100 I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT) &
101 I40E_TXD_FLTR_QW0_DEST_VSI_MASK;
102
eaefbd06 103 dcc = I40E_TX_DESC_DTYPE_FILTER_PROG;
fd0a05ce
JB
104
105 if (add)
eaefbd06
JB
106 dcc |= I40E_FILTER_PROGRAM_DESC_PCMD_ADD_UPDATE <<
107 I40E_TXD_FLTR_QW1_PCMD_SHIFT;
fd0a05ce 108 else
eaefbd06
JB
109 dcc |= I40E_FILTER_PROGRAM_DESC_PCMD_REMOVE <<
110 I40E_TXD_FLTR_QW1_PCMD_SHIFT;
fd0a05ce 111
eaefbd06
JB
112 dcc |= (fdir_data->dest_ctl << I40E_TXD_FLTR_QW1_DEST_SHIFT) &
113 I40E_TXD_FLTR_QW1_DEST_MASK;
fd0a05ce 114
eaefbd06
JB
115 dcc |= (fdir_data->fd_status << I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT) &
116 I40E_TXD_FLTR_QW1_FD_STATUS_MASK;
fd0a05ce
JB
117
118 if (fdir_data->cnt_index != 0) {
eaefbd06
JB
119 dcc |= I40E_TXD_FLTR_QW1_CNT_ENA_MASK;
120 dcc |= ((u32)fdir_data->cnt_index <<
121 I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) &
433c47de 122 I40E_TXD_FLTR_QW1_CNTINDEX_MASK;
fd0a05ce
JB
123 }
124
99753ea6
JB
125 fdir_desc->qindex_flex_ptype_vsi = cpu_to_le32(fpt);
126 fdir_desc->rsvd = cpu_to_le32(0);
eaefbd06 127 fdir_desc->dtype_cmd_cntindex = cpu_to_le32(dcc);
fd0a05ce
JB
128 fdir_desc->fd_id = cpu_to_le32(fdir_data->fd_id);
129
130 /* Now program a dummy descriptor */
fc4ac67b
AD
131 i = tx_ring->next_to_use;
132 tx_desc = I40E_TX_DESC(tx_ring, i);
298deef1 133 tx_buf = &tx_ring->tx_bi[i];
fc4ac67b 134
eaefbd06 135 tx_ring->next_to_use = (i + 1 < tx_ring->count) ? i + 1 : 0;
fd0a05ce 136
298deef1 137 /* record length, and DMA address */
17a73f6b 138 dma_unmap_len_set(tx_buf, len, I40E_FDIR_MAX_RAW_PACKET_SIZE);
298deef1
ASJ
139 dma_unmap_addr_set(tx_buf, dma, dma);
140
fd0a05ce 141 tx_desc->buffer_addr = cpu_to_le64(dma);
eaefbd06 142 td_cmd = I40E_TXD_CMD | I40E_TX_DESC_CMD_DUMMY;
fd0a05ce
JB
143
144 tx_desc->cmd_type_offset_bsz =
17a73f6b 145 build_ctob(td_cmd, 0, I40E_FDIR_MAX_RAW_PACKET_SIZE, 0);
fd0a05ce 146
298deef1
ASJ
147 /* set the timestamp */
148 tx_buf->time_stamp = jiffies;
149
fd0a05ce
JB
150 /* Force memory writes to complete before letting h/w
151 * know there are new descriptors to fetch. (Only
152 * applicable for weak-ordered memory model archs,
153 * such as IA-64).
154 */
155 wmb();
156
fc4ac67b
AD
157 /* Mark the data descriptor to be watched */
158 tx_buf->next_to_watch = tx_desc;
159
fd0a05ce
JB
160 writel(tx_ring->next_to_use, tx_ring->tail);
161 return 0;
162
163dma_fail:
164 return -1;
165}
166
17a73f6b
JG
167#define IP_HEADER_OFFSET 14
168#define I40E_UDPIP_DUMMY_PACKET_LEN 42
169/**
170 * i40e_add_del_fdir_udpv4 - Add/Remove UDPv4 filters
171 * @vsi: pointer to the targeted VSI
172 * @fd_data: the flow director data required for the FDir descriptor
173 * @raw_packet: the pre-allocated packet buffer for FDir
174 * @add: true adds a filter, false removes it
175 *
176 * Returns 0 if the filters were successfully added or removed
177 **/
178static int i40e_add_del_fdir_udpv4(struct i40e_vsi *vsi,
179 struct i40e_fdir_filter *fd_data,
180 u8 *raw_packet, bool add)
181{
182 struct i40e_pf *pf = vsi->back;
183 struct udphdr *udp;
184 struct iphdr *ip;
185 bool err = false;
186 int ret;
17a73f6b
JG
187 static char packet[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0,
188 0x45, 0, 0, 0x1c, 0, 0, 0x40, 0, 0x40, 0x11, 0, 0, 0, 0, 0, 0,
189 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
190
191 memcpy(raw_packet, packet, I40E_UDPIP_DUMMY_PACKET_LEN);
192
193 ip = (struct iphdr *)(raw_packet + IP_HEADER_OFFSET);
194 udp = (struct udphdr *)(raw_packet + IP_HEADER_OFFSET
195 + sizeof(struct iphdr));
196
197 ip->daddr = fd_data->dst_ip[0];
198 udp->dest = fd_data->dst_port;
199 ip->saddr = fd_data->src_ip[0];
200 udp->source = fd_data->src_port;
201
b2d36c03
KS
202 fd_data->pctype = I40E_FILTER_PCTYPE_NONF_IPV4_UDP;
203 ret = i40e_program_fdir_filter(fd_data, raw_packet, pf, add);
204 if (ret) {
205 dev_info(&pf->pdev->dev,
206 "Filter command send failed for PCTYPE %d (ret = %d)\n",
207 fd_data->pctype, ret);
208 err = true;
209 } else {
210 dev_info(&pf->pdev->dev,
211 "Filter OK for PCTYPE %d (ret = %d)\n",
212 fd_data->pctype, ret);
17a73f6b
JG
213 }
214
215 return err ? -EOPNOTSUPP : 0;
216}
217
218#define I40E_TCPIP_DUMMY_PACKET_LEN 54
219/**
220 * i40e_add_del_fdir_tcpv4 - Add/Remove TCPv4 filters
221 * @vsi: pointer to the targeted VSI
222 * @fd_data: the flow director data required for the FDir descriptor
223 * @raw_packet: the pre-allocated packet buffer for FDir
224 * @add: true adds a filter, false removes it
225 *
226 * Returns 0 if the filters were successfully added or removed
227 **/
228static int i40e_add_del_fdir_tcpv4(struct i40e_vsi *vsi,
229 struct i40e_fdir_filter *fd_data,
230 u8 *raw_packet, bool add)
231{
232 struct i40e_pf *pf = vsi->back;
233 struct tcphdr *tcp;
234 struct iphdr *ip;
235 bool err = false;
236 int ret;
237 /* Dummy packet */
238 static char packet[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0,
239 0x45, 0, 0, 0x28, 0, 0, 0x40, 0, 0x40, 0x6, 0, 0, 0, 0, 0, 0,
240 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x80, 0x11,
241 0x0, 0x72, 0, 0, 0, 0};
242
243 memcpy(raw_packet, packet, I40E_TCPIP_DUMMY_PACKET_LEN);
244
245 ip = (struct iphdr *)(raw_packet + IP_HEADER_OFFSET);
246 tcp = (struct tcphdr *)(raw_packet + IP_HEADER_OFFSET
247 + sizeof(struct iphdr));
248
249 ip->daddr = fd_data->dst_ip[0];
250 tcp->dest = fd_data->dst_port;
251 ip->saddr = fd_data->src_ip[0];
252 tcp->source = fd_data->src_port;
253
254 if (add) {
255 if (pf->flags & I40E_FLAG_FD_ATR_ENABLED) {
256 dev_info(&pf->pdev->dev, "Forcing ATR off, sideband rules for TCP/IPv4 flow being applied\n");
257 pf->flags &= ~I40E_FLAG_FD_ATR_ENABLED;
258 }
259 }
260
b2d36c03 261 fd_data->pctype = I40E_FILTER_PCTYPE_NONF_IPV4_TCP;
17a73f6b
JG
262 ret = i40e_program_fdir_filter(fd_data, raw_packet, pf, add);
263
264 if (ret) {
265 dev_info(&pf->pdev->dev,
266 "Filter command send failed for PCTYPE %d (ret = %d)\n",
267 fd_data->pctype, ret);
268 err = true;
269 } else {
270 dev_info(&pf->pdev->dev, "Filter OK for PCTYPE %d (ret = %d)\n",
271 fd_data->pctype, ret);
272 }
273
17a73f6b
JG
274 return err ? -EOPNOTSUPP : 0;
275}
276
277/**
278 * i40e_add_del_fdir_sctpv4 - Add/Remove SCTPv4 Flow Director filters for
279 * a specific flow spec
280 * @vsi: pointer to the targeted VSI
281 * @fd_data: the flow director data required for the FDir descriptor
282 * @raw_packet: the pre-allocated packet buffer for FDir
283 * @add: true adds a filter, false removes it
284 *
21d3efdc 285 * Always returns -EOPNOTSUPP
17a73f6b
JG
286 **/
287static int i40e_add_del_fdir_sctpv4(struct i40e_vsi *vsi,
288 struct i40e_fdir_filter *fd_data,
289 u8 *raw_packet, bool add)
290{
291 return -EOPNOTSUPP;
292}
293
294#define I40E_IP_DUMMY_PACKET_LEN 34
295/**
296 * i40e_add_del_fdir_ipv4 - Add/Remove IPv4 Flow Director filters for
297 * a specific flow spec
298 * @vsi: pointer to the targeted VSI
299 * @fd_data: the flow director data required for the FDir descriptor
300 * @raw_packet: the pre-allocated packet buffer for FDir
301 * @add: true adds a filter, false removes it
302 *
303 * Returns 0 if the filters were successfully added or removed
304 **/
305static int i40e_add_del_fdir_ipv4(struct i40e_vsi *vsi,
306 struct i40e_fdir_filter *fd_data,
307 u8 *raw_packet, bool add)
308{
309 struct i40e_pf *pf = vsi->back;
310 struct iphdr *ip;
311 bool err = false;
312 int ret;
313 int i;
314 static char packet[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0,
315 0x45, 0, 0, 0x14, 0, 0, 0x40, 0, 0x40, 0x10, 0, 0, 0, 0, 0, 0,
316 0, 0, 0, 0};
317
318 memcpy(raw_packet, packet, I40E_IP_DUMMY_PACKET_LEN);
319 ip = (struct iphdr *)(raw_packet + IP_HEADER_OFFSET);
320
321 ip->saddr = fd_data->src_ip[0];
322 ip->daddr = fd_data->dst_ip[0];
323 ip->protocol = 0;
324
325 for (i = I40E_FILTER_PCTYPE_NONF_IPV4_OTHER;
326 i <= I40E_FILTER_PCTYPE_FRAG_IPV4; i++) {
327 fd_data->pctype = i;
328 ret = i40e_program_fdir_filter(fd_data, raw_packet, pf, add);
329
330 if (ret) {
331 dev_info(&pf->pdev->dev,
332 "Filter command send failed for PCTYPE %d (ret = %d)\n",
333 fd_data->pctype, ret);
334 err = true;
335 } else {
336 dev_info(&pf->pdev->dev,
337 "Filter OK for PCTYPE %d (ret = %d)\n",
338 fd_data->pctype, ret);
339 }
340 }
341
342 return err ? -EOPNOTSUPP : 0;
343}
344
345/**
346 * i40e_add_del_fdir - Build raw packets to add/del fdir filter
347 * @vsi: pointer to the targeted VSI
348 * @cmd: command to get or set RX flow classification rules
349 * @add: true adds a filter, false removes it
350 *
351 **/
352int i40e_add_del_fdir(struct i40e_vsi *vsi,
353 struct i40e_fdir_filter *input, bool add)
354{
355 struct i40e_pf *pf = vsi->back;
356 u8 *raw_packet;
357 int ret;
358
359 /* Populate the Flow Director that we have at the moment
360 * and allocate the raw packet buffer for the calling functions
361 */
362 raw_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_SIZE, GFP_KERNEL);
363 if (!raw_packet)
364 return -ENOMEM;
365
366 switch (input->flow_type & ~FLOW_EXT) {
367 case TCP_V4_FLOW:
368 ret = i40e_add_del_fdir_tcpv4(vsi, input, raw_packet,
369 add);
370 break;
371 case UDP_V4_FLOW:
372 ret = i40e_add_del_fdir_udpv4(vsi, input, raw_packet,
373 add);
374 break;
375 case SCTP_V4_FLOW:
376 ret = i40e_add_del_fdir_sctpv4(vsi, input, raw_packet,
377 add);
378 break;
379 case IPV4_FLOW:
380 ret = i40e_add_del_fdir_ipv4(vsi, input, raw_packet,
381 add);
382 break;
383 case IP_USER_FLOW:
384 switch (input->ip4_proto) {
385 case IPPROTO_TCP:
386 ret = i40e_add_del_fdir_tcpv4(vsi, input,
387 raw_packet, add);
388 break;
389 case IPPROTO_UDP:
390 ret = i40e_add_del_fdir_udpv4(vsi, input,
391 raw_packet, add);
392 break;
393 case IPPROTO_SCTP:
394 ret = i40e_add_del_fdir_sctpv4(vsi, input,
395 raw_packet, add);
396 break;
397 default:
398 ret = i40e_add_del_fdir_ipv4(vsi, input,
399 raw_packet, add);
400 break;
401 }
402 break;
403 default:
c5ffe7e1 404 dev_info(&pf->pdev->dev, "Could not specify spec type %d\n",
17a73f6b
JG
405 input->flow_type);
406 ret = -EINVAL;
407 }
408
409 kfree(raw_packet);
410 return ret;
411}
412
fd0a05ce
JB
413/**
414 * i40e_fd_handle_status - check the Programming Status for FD
415 * @rx_ring: the Rx ring for this descriptor
55a5e60b 416 * @rx_desc: the Rx descriptor for programming Status, not a packet descriptor.
fd0a05ce
JB
417 * @prog_id: the id originally used for programming
418 *
419 * This is used to verify if the FD programming or invalidation
420 * requested by SW to the HW is successful or not and take actions accordingly.
421 **/
55a5e60b
ASJ
422static void i40e_fd_handle_status(struct i40e_ring *rx_ring,
423 union i40e_rx_desc *rx_desc, u8 prog_id)
fd0a05ce 424{
55a5e60b
ASJ
425 struct i40e_pf *pf = rx_ring->vsi->back;
426 struct pci_dev *pdev = pf->pdev;
427 u32 fcnt_prog, fcnt_avail;
fd0a05ce 428 u32 error;
55a5e60b 429 u64 qw;
fd0a05ce 430
55a5e60b 431 qw = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
fd0a05ce
JB
432 error = (qw & I40E_RX_PROG_STATUS_DESC_QW1_ERROR_MASK) >>
433 I40E_RX_PROG_STATUS_DESC_QW1_ERROR_SHIFT;
434
55a5e60b
ASJ
435 if (error == (0x1 << I40E_RX_PROG_STATUS_DESC_FD_TBL_FULL_SHIFT)) {
436 dev_warn(&pdev->dev, "ntuple filter loc = %d, could not be added\n",
437 rx_desc->wb.qword0.hi_dword.fd_id);
438
439 /* filter programming failed most likely due to table full */
12957388
ASJ
440 fcnt_prog = i40e_get_cur_guaranteed_fd_count(pf);
441 fcnt_avail = pf->fdir_pf_filter_count;
55a5e60b
ASJ
442 /* If ATR is running fcnt_prog can quickly change,
443 * if we are very close to full, it makes sense to disable
444 * FD ATR/SB and then re-enable it when there is room.
445 */
446 if (fcnt_prog >= (fcnt_avail - I40E_FDIR_BUFFER_FULL_MARGIN)) {
447 /* Turn off ATR first */
b814ba65
ASJ
448 if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) &&
449 !(pf->auto_disable_flags &
450 I40E_FLAG_FD_ATR_ENABLED)) {
55a5e60b
ASJ
451 dev_warn(&pdev->dev, "FD filter space full, ATR for further flows will be turned off\n");
452 pf->auto_disable_flags |=
453 I40E_FLAG_FD_ATR_ENABLED;
454 pf->flags |= I40E_FLAG_FDIR_REQUIRES_REINIT;
b814ba65
ASJ
455 } else if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) &&
456 !(pf->auto_disable_flags &
457 I40E_FLAG_FD_SB_ENABLED)) {
55a5e60b
ASJ
458 dev_warn(&pdev->dev, "FD filter space full, new ntuple rules will not be added\n");
459 pf->auto_disable_flags |=
460 I40E_FLAG_FD_SB_ENABLED;
461 pf->flags |= I40E_FLAG_FDIR_REQUIRES_REINIT;
462 }
463 } else {
c5ffe7e1 464 dev_info(&pdev->dev, "FD filter programming error\n");
55a5e60b
ASJ
465 }
466 } else if (error ==
467 (0x1 << I40E_RX_PROG_STATUS_DESC_NO_FD_ENTRY_SHIFT)) {
13c2884f
ASJ
468 if (I40E_DEBUG_FD & pf->hw.debug_mask)
469 dev_info(&pdev->dev, "ntuple filter loc = %d, could not be removed\n",
470 rx_desc->wb.qword0.hi_dword.fd_id);
55a5e60b 471 }
fd0a05ce
JB
472}
473
474/**
a5e9c572 475 * i40e_unmap_and_free_tx_resource - Release a Tx buffer
fd0a05ce
JB
476 * @ring: the ring that owns the buffer
477 * @tx_buffer: the buffer to free
478 **/
a5e9c572
AD
479static void i40e_unmap_and_free_tx_resource(struct i40e_ring *ring,
480 struct i40e_tx_buffer *tx_buffer)
fd0a05ce 481{
a5e9c572
AD
482 if (tx_buffer->skb) {
483 dev_kfree_skb_any(tx_buffer->skb);
484 if (dma_unmap_len(tx_buffer, len))
fd0a05ce 485 dma_unmap_single(ring->dev,
35a1e2ad
AD
486 dma_unmap_addr(tx_buffer, dma),
487 dma_unmap_len(tx_buffer, len),
fd0a05ce 488 DMA_TO_DEVICE);
a5e9c572
AD
489 } else if (dma_unmap_len(tx_buffer, len)) {
490 dma_unmap_page(ring->dev,
491 dma_unmap_addr(tx_buffer, dma),
492 dma_unmap_len(tx_buffer, len),
493 DMA_TO_DEVICE);
fd0a05ce 494 }
a5e9c572
AD
495 tx_buffer->next_to_watch = NULL;
496 tx_buffer->skb = NULL;
35a1e2ad 497 dma_unmap_len_set(tx_buffer, len, 0);
a5e9c572 498 /* tx_buffer must be completely set up in the transmit path */
fd0a05ce
JB
499}
500
501/**
502 * i40e_clean_tx_ring - Free any empty Tx buffers
503 * @tx_ring: ring to be cleaned
504 **/
505void i40e_clean_tx_ring(struct i40e_ring *tx_ring)
506{
fd0a05ce
JB
507 unsigned long bi_size;
508 u16 i;
509
510 /* ring already cleared, nothing to do */
511 if (!tx_ring->tx_bi)
512 return;
513
514 /* Free all the Tx ring sk_buffs */
a5e9c572
AD
515 for (i = 0; i < tx_ring->count; i++)
516 i40e_unmap_and_free_tx_resource(tx_ring, &tx_ring->tx_bi[i]);
fd0a05ce
JB
517
518 bi_size = sizeof(struct i40e_tx_buffer) * tx_ring->count;
519 memset(tx_ring->tx_bi, 0, bi_size);
520
521 /* Zero out the descriptor ring */
522 memset(tx_ring->desc, 0, tx_ring->size);
523
524 tx_ring->next_to_use = 0;
525 tx_ring->next_to_clean = 0;
7070ce0a
AD
526
527 if (!tx_ring->netdev)
528 return;
529
530 /* cleanup Tx queue statistics */
531 netdev_tx_reset_queue(netdev_get_tx_queue(tx_ring->netdev,
532 tx_ring->queue_index));
fd0a05ce
JB
533}
534
535/**
536 * i40e_free_tx_resources - Free Tx resources per queue
537 * @tx_ring: Tx descriptor ring for a specific queue
538 *
539 * Free all transmit software resources
540 **/
541void i40e_free_tx_resources(struct i40e_ring *tx_ring)
542{
543 i40e_clean_tx_ring(tx_ring);
544 kfree(tx_ring->tx_bi);
545 tx_ring->tx_bi = NULL;
546
547 if (tx_ring->desc) {
548 dma_free_coherent(tx_ring->dev, tx_ring->size,
549 tx_ring->desc, tx_ring->dma);
550 tx_ring->desc = NULL;
551 }
552}
553
554/**
555 * i40e_get_tx_pending - how many tx descriptors not processed
556 * @tx_ring: the ring of descriptors
557 *
558 * Since there is no access to the ring head register
559 * in XL710, we need to use our local copies
560 **/
561static u32 i40e_get_tx_pending(struct i40e_ring *ring)
562{
563 u32 ntu = ((ring->next_to_clean <= ring->next_to_use)
564 ? ring->next_to_use
565 : ring->next_to_use + ring->count);
566 return ntu - ring->next_to_clean;
567}
568
569/**
570 * i40e_check_tx_hang - Is there a hang in the Tx queue
571 * @tx_ring: the ring of descriptors
572 **/
573static bool i40e_check_tx_hang(struct i40e_ring *tx_ring)
574{
575 u32 tx_pending = i40e_get_tx_pending(tx_ring);
576 bool ret = false;
577
578 clear_check_for_tx_hang(tx_ring);
579
580 /* Check for a hung queue, but be thorough. This verifies
581 * that a transmit has been completed since the previous
582 * check AND there is at least one packet pending. The
583 * ARMED bit is set to indicate a potential hang. The
584 * bit is cleared if a pause frame is received to remove
585 * false hang detection due to PFC or 802.3x frames. By
586 * requiring this to fail twice we avoid races with
587 * PFC clearing the ARMED bit and conditions where we
588 * run the check_tx_hang logic with a transmit completion
589 * pending but without time to complete it yet.
590 */
a114d0a6 591 if ((tx_ring->tx_stats.tx_done_old == tx_ring->stats.packets) &&
fd0a05ce
JB
592 tx_pending) {
593 /* make sure it is true for two checks in a row */
594 ret = test_and_set_bit(__I40E_HANG_CHECK_ARMED,
595 &tx_ring->state);
596 } else {
597 /* update completed stats and disarm the hang check */
a114d0a6 598 tx_ring->tx_stats.tx_done_old = tx_ring->stats.packets;
fd0a05ce
JB
599 clear_bit(__I40E_HANG_CHECK_ARMED, &tx_ring->state);
600 }
601
602 return ret;
603}
604
1943d8ba
JB
605/**
606 * i40e_get_head - Retrieve head from head writeback
607 * @tx_ring: tx ring to fetch head of
608 *
609 * Returns value of Tx ring head based on value stored
610 * in head write-back location
611 **/
612static inline u32 i40e_get_head(struct i40e_ring *tx_ring)
613{
614 void *head = (struct i40e_tx_desc *)tx_ring->desc + tx_ring->count;
615
616 return le32_to_cpu(*(volatile __le32 *)head);
617}
618
fd0a05ce
JB
619/**
620 * i40e_clean_tx_irq - Reclaim resources after transmit completes
621 * @tx_ring: tx ring to clean
622 * @budget: how many cleans we're allowed
623 *
624 * Returns true if there's any budget left (e.g. the clean is finished)
625 **/
626static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget)
627{
628 u16 i = tx_ring->next_to_clean;
629 struct i40e_tx_buffer *tx_buf;
1943d8ba 630 struct i40e_tx_desc *tx_head;
fd0a05ce
JB
631 struct i40e_tx_desc *tx_desc;
632 unsigned int total_packets = 0;
633 unsigned int total_bytes = 0;
634
635 tx_buf = &tx_ring->tx_bi[i];
636 tx_desc = I40E_TX_DESC(tx_ring, i);
a5e9c572 637 i -= tx_ring->count;
fd0a05ce 638
1943d8ba
JB
639 tx_head = I40E_TX_DESC(tx_ring, i40e_get_head(tx_ring));
640
a5e9c572
AD
641 do {
642 struct i40e_tx_desc *eop_desc = tx_buf->next_to_watch;
fd0a05ce
JB
643
644 /* if next_to_watch is not set then there is no work pending */
645 if (!eop_desc)
646 break;
647
a5e9c572
AD
648 /* prevent any other reads prior to eop_desc */
649 read_barrier_depends();
650
1943d8ba
JB
651 /* we have caught up to head, no work left to do */
652 if (tx_head == tx_desc)
fd0a05ce
JB
653 break;
654
c304fdac 655 /* clear next_to_watch to prevent false hangs */
fd0a05ce 656 tx_buf->next_to_watch = NULL;
fd0a05ce 657
a5e9c572
AD
658 /* update the statistics for this packet */
659 total_bytes += tx_buf->bytecount;
660 total_packets += tx_buf->gso_segs;
fd0a05ce 661
a5e9c572
AD
662 /* free the skb */
663 dev_kfree_skb_any(tx_buf->skb);
fd0a05ce 664
a5e9c572
AD
665 /* unmap skb header data */
666 dma_unmap_single(tx_ring->dev,
667 dma_unmap_addr(tx_buf, dma),
668 dma_unmap_len(tx_buf, len),
669 DMA_TO_DEVICE);
fd0a05ce 670
a5e9c572
AD
671 /* clear tx_buffer data */
672 tx_buf->skb = NULL;
673 dma_unmap_len_set(tx_buf, len, 0);
fd0a05ce 674
a5e9c572
AD
675 /* unmap remaining buffers */
676 while (tx_desc != eop_desc) {
fd0a05ce
JB
677
678 tx_buf++;
679 tx_desc++;
680 i++;
a5e9c572
AD
681 if (unlikely(!i)) {
682 i -= tx_ring->count;
fd0a05ce
JB
683 tx_buf = tx_ring->tx_bi;
684 tx_desc = I40E_TX_DESC(tx_ring, 0);
685 }
fd0a05ce 686
a5e9c572
AD
687 /* unmap any remaining paged data */
688 if (dma_unmap_len(tx_buf, len)) {
689 dma_unmap_page(tx_ring->dev,
690 dma_unmap_addr(tx_buf, dma),
691 dma_unmap_len(tx_buf, len),
692 DMA_TO_DEVICE);
693 dma_unmap_len_set(tx_buf, len, 0);
694 }
695 }
696
697 /* move us one more past the eop_desc for start of next pkt */
698 tx_buf++;
699 tx_desc++;
700 i++;
701 if (unlikely(!i)) {
702 i -= tx_ring->count;
703 tx_buf = tx_ring->tx_bi;
704 tx_desc = I40E_TX_DESC(tx_ring, 0);
705 }
706
707 /* update budget accounting */
708 budget--;
709 } while (likely(budget));
710
711 i += tx_ring->count;
fd0a05ce 712 tx_ring->next_to_clean = i;
980e9b11 713 u64_stats_update_begin(&tx_ring->syncp);
a114d0a6
AD
714 tx_ring->stats.bytes += total_bytes;
715 tx_ring->stats.packets += total_packets;
980e9b11 716 u64_stats_update_end(&tx_ring->syncp);
fd0a05ce
JB
717 tx_ring->q_vector->tx.total_bytes += total_bytes;
718 tx_ring->q_vector->tx.total_packets += total_packets;
a5e9c572 719
fd0a05ce
JB
720 if (check_for_tx_hang(tx_ring) && i40e_check_tx_hang(tx_ring)) {
721 /* schedule immediate reset if we believe we hung */
722 dev_info(tx_ring->dev, "Detected Tx Unit Hang\n"
723 " VSI <%d>\n"
724 " Tx Queue <%d>\n"
725 " next_to_use <%x>\n"
726 " next_to_clean <%x>\n",
727 tx_ring->vsi->seid,
728 tx_ring->queue_index,
729 tx_ring->next_to_use, i);
730 dev_info(tx_ring->dev, "tx_bi[next_to_clean]\n"
731 " time_stamp <%lx>\n"
732 " jiffies <%lx>\n",
733 tx_ring->tx_bi[i].time_stamp, jiffies);
734
735 netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
736
737 dev_info(tx_ring->dev,
738 "tx hang detected on queue %d, resetting adapter\n",
739 tx_ring->queue_index);
740
741 tx_ring->netdev->netdev_ops->ndo_tx_timeout(tx_ring->netdev);
742
743 /* the adapter is about to reset, no point in enabling stuff */
744 return true;
745 }
746
7070ce0a
AD
747 netdev_tx_completed_queue(netdev_get_tx_queue(tx_ring->netdev,
748 tx_ring->queue_index),
749 total_packets, total_bytes);
750
fd0a05ce
JB
751#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
752 if (unlikely(total_packets && netif_carrier_ok(tx_ring->netdev) &&
753 (I40E_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) {
754 /* Make sure that anybody stopping the queue after this
755 * sees the new next_to_clean.
756 */
757 smp_mb();
758 if (__netif_subqueue_stopped(tx_ring->netdev,
759 tx_ring->queue_index) &&
760 !test_bit(__I40E_DOWN, &tx_ring->vsi->state)) {
761 netif_wake_subqueue(tx_ring->netdev,
762 tx_ring->queue_index);
763 ++tx_ring->tx_stats.restart_queue;
764 }
765 }
766
767 return budget > 0;
768}
769
770/**
771 * i40e_set_new_dynamic_itr - Find new ITR level
772 * @rc: structure containing ring performance data
773 *
774 * Stores a new ITR value based on packets and byte counts during
775 * the last interrupt. The advantage of per interrupt computation
776 * is faster updates and more accurate ITR for the current traffic
777 * pattern. Constants in this function were computed based on
778 * theoretical maximum wire speed and thresholds were set based on
779 * testing data as well as attempting to minimize response time
780 * while increasing bulk throughput.
781 **/
782static void i40e_set_new_dynamic_itr(struct i40e_ring_container *rc)
783{
784 enum i40e_latency_range new_latency_range = rc->latency_range;
785 u32 new_itr = rc->itr;
786 int bytes_per_int;
787
788 if (rc->total_packets == 0 || !rc->itr)
789 return;
790
791 /* simple throttlerate management
792 * 0-10MB/s lowest (100000 ints/s)
793 * 10-20MB/s low (20000 ints/s)
794 * 20-1249MB/s bulk (8000 ints/s)
795 */
796 bytes_per_int = rc->total_bytes / rc->itr;
797 switch (rc->itr) {
798 case I40E_LOWEST_LATENCY:
799 if (bytes_per_int > 10)
800 new_latency_range = I40E_LOW_LATENCY;
801 break;
802 case I40E_LOW_LATENCY:
803 if (bytes_per_int > 20)
804 new_latency_range = I40E_BULK_LATENCY;
805 else if (bytes_per_int <= 10)
806 new_latency_range = I40E_LOWEST_LATENCY;
807 break;
808 case I40E_BULK_LATENCY:
809 if (bytes_per_int <= 20)
810 rc->latency_range = I40E_LOW_LATENCY;
811 break;
812 }
813
814 switch (new_latency_range) {
815 case I40E_LOWEST_LATENCY:
816 new_itr = I40E_ITR_100K;
817 break;
818 case I40E_LOW_LATENCY:
819 new_itr = I40E_ITR_20K;
820 break;
821 case I40E_BULK_LATENCY:
822 new_itr = I40E_ITR_8K;
823 break;
824 default:
825 break;
826 }
827
828 if (new_itr != rc->itr) {
829 /* do an exponential smoothing */
830 new_itr = (10 * new_itr * rc->itr) /
831 ((9 * new_itr) + rc->itr);
832 rc->itr = new_itr & I40E_MAX_ITR;
833 }
834
835 rc->total_bytes = 0;
836 rc->total_packets = 0;
837}
838
839/**
840 * i40e_update_dynamic_itr - Adjust ITR based on bytes per int
841 * @q_vector: the vector to adjust
842 **/
843static void i40e_update_dynamic_itr(struct i40e_q_vector *q_vector)
844{
845 u16 vector = q_vector->vsi->base_vector + q_vector->v_idx;
846 struct i40e_hw *hw = &q_vector->vsi->back->hw;
847 u32 reg_addr;
848 u16 old_itr;
849
850 reg_addr = I40E_PFINT_ITRN(I40E_RX_ITR, vector - 1);
851 old_itr = q_vector->rx.itr;
852 i40e_set_new_dynamic_itr(&q_vector->rx);
853 if (old_itr != q_vector->rx.itr)
854 wr32(hw, reg_addr, q_vector->rx.itr);
855
856 reg_addr = I40E_PFINT_ITRN(I40E_TX_ITR, vector - 1);
857 old_itr = q_vector->tx.itr;
858 i40e_set_new_dynamic_itr(&q_vector->tx);
859 if (old_itr != q_vector->tx.itr)
860 wr32(hw, reg_addr, q_vector->tx.itr);
fd0a05ce
JB
861}
862
863/**
864 * i40e_clean_programming_status - clean the programming status descriptor
865 * @rx_ring: the rx ring that has this descriptor
866 * @rx_desc: the rx descriptor written back by HW
867 *
868 * Flow director should handle FD_FILTER_STATUS to check its filter programming
869 * status being successful or not and take actions accordingly. FCoE should
870 * handle its context/filter programming/invalidation status and take actions.
871 *
872 **/
873static void i40e_clean_programming_status(struct i40e_ring *rx_ring,
874 union i40e_rx_desc *rx_desc)
875{
876 u64 qw;
877 u8 id;
878
879 qw = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
880 id = (qw & I40E_RX_PROG_STATUS_DESC_QW1_PROGID_MASK) >>
881 I40E_RX_PROG_STATUS_DESC_QW1_PROGID_SHIFT;
882
883 if (id == I40E_RX_PROG_STATUS_DESC_FD_FILTER_STATUS)
55a5e60b 884 i40e_fd_handle_status(rx_ring, rx_desc, id);
fd0a05ce
JB
885}
886
887/**
888 * i40e_setup_tx_descriptors - Allocate the Tx descriptors
889 * @tx_ring: the tx ring to set up
890 *
891 * Return 0 on success, negative on error
892 **/
893int i40e_setup_tx_descriptors(struct i40e_ring *tx_ring)
894{
895 struct device *dev = tx_ring->dev;
896 int bi_size;
897
898 if (!dev)
899 return -ENOMEM;
900
901 bi_size = sizeof(struct i40e_tx_buffer) * tx_ring->count;
902 tx_ring->tx_bi = kzalloc(bi_size, GFP_KERNEL);
903 if (!tx_ring->tx_bi)
904 goto err;
905
906 /* round up to nearest 4K */
907 tx_ring->size = tx_ring->count * sizeof(struct i40e_tx_desc);
1943d8ba
JB
908 /* add u32 for head writeback, align after this takes care of
909 * guaranteeing this is at least one cache line in size
910 */
911 tx_ring->size += sizeof(u32);
fd0a05ce
JB
912 tx_ring->size = ALIGN(tx_ring->size, 4096);
913 tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size,
914 &tx_ring->dma, GFP_KERNEL);
915 if (!tx_ring->desc) {
916 dev_info(dev, "Unable to allocate memory for the Tx descriptor ring, size=%d\n",
917 tx_ring->size);
918 goto err;
919 }
920
921 tx_ring->next_to_use = 0;
922 tx_ring->next_to_clean = 0;
923 return 0;
924
925err:
926 kfree(tx_ring->tx_bi);
927 tx_ring->tx_bi = NULL;
928 return -ENOMEM;
929}
930
931/**
932 * i40e_clean_rx_ring - Free Rx buffers
933 * @rx_ring: ring to be cleaned
934 **/
935void i40e_clean_rx_ring(struct i40e_ring *rx_ring)
936{
937 struct device *dev = rx_ring->dev;
938 struct i40e_rx_buffer *rx_bi;
939 unsigned long bi_size;
940 u16 i;
941
942 /* ring already cleared, nothing to do */
943 if (!rx_ring->rx_bi)
944 return;
945
946 /* Free all the Rx ring sk_buffs */
947 for (i = 0; i < rx_ring->count; i++) {
948 rx_bi = &rx_ring->rx_bi[i];
949 if (rx_bi->dma) {
950 dma_unmap_single(dev,
951 rx_bi->dma,
952 rx_ring->rx_buf_len,
953 DMA_FROM_DEVICE);
954 rx_bi->dma = 0;
955 }
956 if (rx_bi->skb) {
957 dev_kfree_skb(rx_bi->skb);
958 rx_bi->skb = NULL;
959 }
960 if (rx_bi->page) {
961 if (rx_bi->page_dma) {
962 dma_unmap_page(dev,
963 rx_bi->page_dma,
964 PAGE_SIZE / 2,
965 DMA_FROM_DEVICE);
966 rx_bi->page_dma = 0;
967 }
968 __free_page(rx_bi->page);
969 rx_bi->page = NULL;
970 rx_bi->page_offset = 0;
971 }
972 }
973
974 bi_size = sizeof(struct i40e_rx_buffer) * rx_ring->count;
975 memset(rx_ring->rx_bi, 0, bi_size);
976
977 /* Zero out the descriptor ring */
978 memset(rx_ring->desc, 0, rx_ring->size);
979
980 rx_ring->next_to_clean = 0;
981 rx_ring->next_to_use = 0;
982}
983
984/**
985 * i40e_free_rx_resources - Free Rx resources
986 * @rx_ring: ring to clean the resources from
987 *
988 * Free all receive software resources
989 **/
990void i40e_free_rx_resources(struct i40e_ring *rx_ring)
991{
992 i40e_clean_rx_ring(rx_ring);
993 kfree(rx_ring->rx_bi);
994 rx_ring->rx_bi = NULL;
995
996 if (rx_ring->desc) {
997 dma_free_coherent(rx_ring->dev, rx_ring->size,
998 rx_ring->desc, rx_ring->dma);
999 rx_ring->desc = NULL;
1000 }
1001}
1002
1003/**
1004 * i40e_setup_rx_descriptors - Allocate Rx descriptors
1005 * @rx_ring: Rx descriptor ring (for a specific queue) to setup
1006 *
1007 * Returns 0 on success, negative on failure
1008 **/
1009int i40e_setup_rx_descriptors(struct i40e_ring *rx_ring)
1010{
1011 struct device *dev = rx_ring->dev;
1012 int bi_size;
1013
1014 bi_size = sizeof(struct i40e_rx_buffer) * rx_ring->count;
1015 rx_ring->rx_bi = kzalloc(bi_size, GFP_KERNEL);
1016 if (!rx_ring->rx_bi)
1017 goto err;
1018
1019 /* Round up to nearest 4K */
1020 rx_ring->size = ring_is_16byte_desc_enabled(rx_ring)
1021 ? rx_ring->count * sizeof(union i40e_16byte_rx_desc)
1022 : rx_ring->count * sizeof(union i40e_32byte_rx_desc);
1023 rx_ring->size = ALIGN(rx_ring->size, 4096);
1024 rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size,
1025 &rx_ring->dma, GFP_KERNEL);
1026
1027 if (!rx_ring->desc) {
1028 dev_info(dev, "Unable to allocate memory for the Rx descriptor ring, size=%d\n",
1029 rx_ring->size);
1030 goto err;
1031 }
1032
1033 rx_ring->next_to_clean = 0;
1034 rx_ring->next_to_use = 0;
1035
1036 return 0;
1037err:
1038 kfree(rx_ring->rx_bi);
1039 rx_ring->rx_bi = NULL;
1040 return -ENOMEM;
1041}
1042
1043/**
1044 * i40e_release_rx_desc - Store the new tail and head values
1045 * @rx_ring: ring to bump
1046 * @val: new head index
1047 **/
1048static inline void i40e_release_rx_desc(struct i40e_ring *rx_ring, u32 val)
1049{
1050 rx_ring->next_to_use = val;
1051 /* Force memory writes to complete before letting h/w
1052 * know there are new descriptors to fetch. (Only
1053 * applicable for weak-ordered memory model archs,
1054 * such as IA-64).
1055 */
1056 wmb();
1057 writel(val, rx_ring->tail);
1058}
1059
1060/**
1061 * i40e_alloc_rx_buffers - Replace used receive buffers; packet split
1062 * @rx_ring: ring to place buffers on
1063 * @cleaned_count: number of buffers to replace
1064 **/
1065void i40e_alloc_rx_buffers(struct i40e_ring *rx_ring, u16 cleaned_count)
1066{
1067 u16 i = rx_ring->next_to_use;
1068 union i40e_rx_desc *rx_desc;
1069 struct i40e_rx_buffer *bi;
1070 struct sk_buff *skb;
1071
1072 /* do nothing if no valid netdev defined */
1073 if (!rx_ring->netdev || !cleaned_count)
1074 return;
1075
1076 while (cleaned_count--) {
1077 rx_desc = I40E_RX_DESC(rx_ring, i);
1078 bi = &rx_ring->rx_bi[i];
1079 skb = bi->skb;
1080
1081 if (!skb) {
1082 skb = netdev_alloc_skb_ip_align(rx_ring->netdev,
1083 rx_ring->rx_buf_len);
1084 if (!skb) {
420136cc 1085 rx_ring->rx_stats.alloc_buff_failed++;
fd0a05ce
JB
1086 goto no_buffers;
1087 }
1088 /* initialize queue mapping */
1089 skb_record_rx_queue(skb, rx_ring->queue_index);
1090 bi->skb = skb;
1091 }
1092
1093 if (!bi->dma) {
1094 bi->dma = dma_map_single(rx_ring->dev,
1095 skb->data,
1096 rx_ring->rx_buf_len,
1097 DMA_FROM_DEVICE);
1098 if (dma_mapping_error(rx_ring->dev, bi->dma)) {
420136cc 1099 rx_ring->rx_stats.alloc_buff_failed++;
fd0a05ce
JB
1100 bi->dma = 0;
1101 goto no_buffers;
1102 }
1103 }
1104
1105 if (ring_is_ps_enabled(rx_ring)) {
1106 if (!bi->page) {
1107 bi->page = alloc_page(GFP_ATOMIC);
1108 if (!bi->page) {
420136cc 1109 rx_ring->rx_stats.alloc_page_failed++;
fd0a05ce
JB
1110 goto no_buffers;
1111 }
1112 }
1113
1114 if (!bi->page_dma) {
1115 /* use a half page if we're re-using */
1116 bi->page_offset ^= PAGE_SIZE / 2;
1117 bi->page_dma = dma_map_page(rx_ring->dev,
1118 bi->page,
1119 bi->page_offset,
1120 PAGE_SIZE / 2,
1121 DMA_FROM_DEVICE);
1122 if (dma_mapping_error(rx_ring->dev,
1123 bi->page_dma)) {
420136cc 1124 rx_ring->rx_stats.alloc_page_failed++;
fd0a05ce
JB
1125 bi->page_dma = 0;
1126 goto no_buffers;
1127 }
1128 }
1129
1130 /* Refresh the desc even if buffer_addrs didn't change
1131 * because each write-back erases this info.
1132 */
1133 rx_desc->read.pkt_addr = cpu_to_le64(bi->page_dma);
1134 rx_desc->read.hdr_addr = cpu_to_le64(bi->dma);
1135 } else {
1136 rx_desc->read.pkt_addr = cpu_to_le64(bi->dma);
1137 rx_desc->read.hdr_addr = 0;
1138 }
1139 i++;
1140 if (i == rx_ring->count)
1141 i = 0;
1142 }
1143
1144no_buffers:
1145 if (rx_ring->next_to_use != i)
1146 i40e_release_rx_desc(rx_ring, i);
1147}
1148
1149/**
1150 * i40e_receive_skb - Send a completed packet up the stack
1151 * @rx_ring: rx ring in play
1152 * @skb: packet to send up
1153 * @vlan_tag: vlan tag for packet
1154 **/
1155static void i40e_receive_skb(struct i40e_ring *rx_ring,
1156 struct sk_buff *skb, u16 vlan_tag)
1157{
1158 struct i40e_q_vector *q_vector = rx_ring->q_vector;
1159 struct i40e_vsi *vsi = rx_ring->vsi;
1160 u64 flags = vsi->back->flags;
1161
1162 if (vlan_tag & VLAN_VID_MASK)
1163 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
1164
1165 if (flags & I40E_FLAG_IN_NETPOLL)
1166 netif_rx(skb);
1167 else
1168 napi_gro_receive(&q_vector->napi, skb);
1169}
1170
1171/**
1172 * i40e_rx_checksum - Indicate in skb if hw indicated a good cksum
1173 * @vsi: the VSI we care about
1174 * @skb: skb currently being received and modified
1175 * @rx_status: status value of last descriptor in packet
1176 * @rx_error: error value of last descriptor in packet
8144f0f7 1177 * @rx_ptype: ptype value of last descriptor in packet
fd0a05ce
JB
1178 **/
1179static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
1180 struct sk_buff *skb,
1181 u32 rx_status,
8144f0f7
JG
1182 u32 rx_error,
1183 u16 rx_ptype)
fd0a05ce 1184{
8a3c91cc
JB
1185 struct i40e_rx_ptype_decoded decoded = decode_rx_desc_ptype(rx_ptype);
1186 bool ipv4 = false, ipv6 = false;
8144f0f7
JG
1187 bool ipv4_tunnel, ipv6_tunnel;
1188 __wsum rx_udp_csum;
8144f0f7 1189 struct iphdr *iph;
8a3c91cc 1190 __sum16 csum;
8144f0f7
JG
1191
1192 ipv4_tunnel = (rx_ptype > I40E_RX_PTYPE_GRENAT4_MAC_PAY3) &&
1193 (rx_ptype < I40E_RX_PTYPE_GRENAT4_MACVLAN_IPV6_ICMP_PAY4);
1194 ipv6_tunnel = (rx_ptype > I40E_RX_PTYPE_GRENAT6_MAC_PAY3) &&
1195 (rx_ptype < I40E_RX_PTYPE_GRENAT6_MACVLAN_IPV6_ICMP_PAY4);
1196
1197 skb->encapsulation = ipv4_tunnel || ipv6_tunnel;
fd0a05ce
JB
1198 skb->ip_summed = CHECKSUM_NONE;
1199
1200 /* Rx csum enabled and ip headers found? */
8a3c91cc
JB
1201 if (!(vsi->netdev->features & NETIF_F_RXCSUM))
1202 return;
1203
1204 /* did the hardware decode the packet and checksum? */
1205 if (!(rx_status & (1 << I40E_RX_DESC_STATUS_L3L4P_SHIFT)))
1206 return;
1207
1208 /* both known and outer_ip must be set for the below code to work */
1209 if (!(decoded.known && decoded.outer_ip))
fd0a05ce
JB
1210 return;
1211
8a3c91cc
JB
1212 if (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP &&
1213 decoded.outer_ip_ver == I40E_RX_PTYPE_OUTER_IPV4)
1214 ipv4 = true;
1215 else if (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP &&
1216 decoded.outer_ip_ver == I40E_RX_PTYPE_OUTER_IPV6)
1217 ipv6 = true;
1218
1219 if (ipv4 &&
1220 (rx_error & ((1 << I40E_RX_DESC_ERROR_IPE_SHIFT) |
1221 (1 << I40E_RX_DESC_ERROR_EIPE_SHIFT))))
1222 goto checksum_fail;
1223
ddf1d0d7 1224 /* likely incorrect csum if alternate IP extension headers found */
8a3c91cc
JB
1225 if (ipv6 &&
1226 decoded.inner_prot == I40E_RX_PTYPE_INNER_PROT_TCP &&
1227 rx_error & (1 << I40E_RX_DESC_ERROR_L4E_SHIFT) &&
1228 rx_status & (1 << I40E_RX_DESC_STATUS_IPV6EXADD_SHIFT))
1229 /* don't increment checksum err here, non-fatal err */
8ee75a8e
SN
1230 return;
1231
8a3c91cc
JB
1232 /* there was some L4 error, count error and punt packet to the stack */
1233 if (rx_error & (1 << I40E_RX_DESC_ERROR_L4E_SHIFT))
1234 goto checksum_fail;
1235
1236 /* handle packets that were not able to be checksummed due
1237 * to arrival speed, in this case the stack can compute
1238 * the csum.
1239 */
1240 if (rx_error & (1 << I40E_RX_DESC_ERROR_PPRS_SHIFT))
fd0a05ce 1241 return;
fd0a05ce 1242
8a3c91cc
JB
1243 /* If VXLAN traffic has an outer UDPv4 checksum we need to check
1244 * it in the driver, hardware does not do it for us.
1245 * Since L3L4P bit was set we assume a valid IHL value (>=5)
1246 * so the total length of IPv4 header is IHL*4 bytes
1247 * The UDP_0 bit *may* bet set if the *inner* header is UDP
1248 */
8144f0f7 1249 if (ipv4_tunnel &&
8a3c91cc 1250 (decoded.inner_prot != I40E_RX_PTYPE_INNER_PROT_UDP) &&
8144f0f7 1251 !(rx_status & (1 << I40E_RX_DESC_STATUS_UDP_0_SHIFT))) {
8144f0f7
JG
1252 skb->transport_header = skb->mac_header +
1253 sizeof(struct ethhdr) +
1254 (ip_hdr(skb)->ihl * 4);
1255
1256 /* Add 4 bytes for VLAN tagged packets */
1257 skb->transport_header += (skb->protocol == htons(ETH_P_8021Q) ||
1258 skb->protocol == htons(ETH_P_8021AD))
1259 ? VLAN_HLEN : 0;
1260
1261 rx_udp_csum = udp_csum(skb);
1262 iph = ip_hdr(skb);
1263 csum = csum_tcpudp_magic(
1264 iph->saddr, iph->daddr,
1265 (skb->len - skb_transport_offset(skb)),
1266 IPPROTO_UDP, rx_udp_csum);
1267
8a3c91cc
JB
1268 if (udp_hdr(skb)->check != csum)
1269 goto checksum_fail;
8144f0f7
JG
1270 }
1271
fd0a05ce 1272 skb->ip_summed = CHECKSUM_UNNECESSARY;
8a3c91cc
JB
1273
1274 return;
1275
1276checksum_fail:
1277 vsi->back->hw_csum_rx_error++;
fd0a05ce
JB
1278}
1279
1280/**
1281 * i40e_rx_hash - returns the hash value from the Rx descriptor
1282 * @ring: descriptor ring
1283 * @rx_desc: specific descriptor
1284 **/
1285static inline u32 i40e_rx_hash(struct i40e_ring *ring,
1286 union i40e_rx_desc *rx_desc)
1287{
8a494920
JB
1288 const __le64 rss_mask =
1289 cpu_to_le64((u64)I40E_RX_DESC_FLTSTAT_RSS_HASH <<
1290 I40E_RX_DESC_STATUS_FLTSTAT_SHIFT);
1291
1292 if ((ring->netdev->features & NETIF_F_RXHASH) &&
1293 (rx_desc->wb.qword1.status_error_len & rss_mask) == rss_mask)
1294 return le32_to_cpu(rx_desc->wb.qword0.hi_dword.rss);
1295 else
1296 return 0;
fd0a05ce
JB
1297}
1298
206812b5
JB
1299/**
1300 * i40e_ptype_to_hash - get a hash type
1301 * @ptype: the ptype value from the descriptor
1302 *
1303 * Returns a hash type to be used by skb_set_hash
1304 **/
1305static inline enum pkt_hash_types i40e_ptype_to_hash(u8 ptype)
1306{
1307 struct i40e_rx_ptype_decoded decoded = decode_rx_desc_ptype(ptype);
1308
1309 if (!decoded.known)
1310 return PKT_HASH_TYPE_NONE;
1311
1312 if (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP &&
1313 decoded.payload_layer == I40E_RX_PTYPE_PAYLOAD_LAYER_PAY4)
1314 return PKT_HASH_TYPE_L4;
1315 else if (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP &&
1316 decoded.payload_layer == I40E_RX_PTYPE_PAYLOAD_LAYER_PAY3)
1317 return PKT_HASH_TYPE_L3;
1318 else
1319 return PKT_HASH_TYPE_L2;
1320}
1321
fd0a05ce
JB
1322/**
1323 * i40e_clean_rx_irq - Reclaim resources after receive completes
1324 * @rx_ring: rx ring to clean
1325 * @budget: how many cleans we're allowed
1326 *
1327 * Returns true if there's any budget left (e.g. the clean is finished)
1328 **/
1329static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
1330{
1331 unsigned int total_rx_bytes = 0, total_rx_packets = 0;
1332 u16 rx_packet_len, rx_header_len, rx_sph, rx_hbo;
1333 u16 cleaned_count = I40E_DESC_UNUSED(rx_ring);
1334 const int current_node = numa_node_id();
1335 struct i40e_vsi *vsi = rx_ring->vsi;
1336 u16 i = rx_ring->next_to_clean;
1337 union i40e_rx_desc *rx_desc;
1338 u32 rx_error, rx_status;
206812b5 1339 u8 rx_ptype;
fd0a05ce
JB
1340 u64 qword;
1341
390f86df
EB
1342 if (budget <= 0)
1343 return 0;
1344
fd0a05ce
JB
1345 rx_desc = I40E_RX_DESC(rx_ring, i);
1346 qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
6838b535
JB
1347 rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >>
1348 I40E_RXD_QW1_STATUS_SHIFT;
fd0a05ce
JB
1349
1350 while (rx_status & (1 << I40E_RX_DESC_STATUS_DD_SHIFT)) {
1351 union i40e_rx_desc *next_rxd;
1352 struct i40e_rx_buffer *rx_bi;
1353 struct sk_buff *skb;
1354 u16 vlan_tag;
1355 if (i40e_rx_is_programming_status(qword)) {
1356 i40e_clean_programming_status(rx_ring, rx_desc);
1357 I40E_RX_NEXT_DESC_PREFETCH(rx_ring, i, next_rxd);
1358 goto next_desc;
1359 }
1360 rx_bi = &rx_ring->rx_bi[i];
1361 skb = rx_bi->skb;
1362 prefetch(skb->data);
1363
829af3ac
MW
1364 rx_packet_len = (qword & I40E_RXD_QW1_LENGTH_PBUF_MASK) >>
1365 I40E_RXD_QW1_LENGTH_PBUF_SHIFT;
1366 rx_header_len = (qword & I40E_RXD_QW1_LENGTH_HBUF_MASK) >>
1367 I40E_RXD_QW1_LENGTH_HBUF_SHIFT;
1368 rx_sph = (qword & I40E_RXD_QW1_LENGTH_SPH_MASK) >>
1369 I40E_RXD_QW1_LENGTH_SPH_SHIFT;
1370
1371 rx_error = (qword & I40E_RXD_QW1_ERROR_MASK) >>
1372 I40E_RXD_QW1_ERROR_SHIFT;
fd0a05ce
JB
1373 rx_hbo = rx_error & (1 << I40E_RX_DESC_ERROR_HBO_SHIFT);
1374 rx_error &= ~(1 << I40E_RX_DESC_ERROR_HBO_SHIFT);
1375
8144f0f7
JG
1376 rx_ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >>
1377 I40E_RXD_QW1_PTYPE_SHIFT;
fd0a05ce
JB
1378 rx_bi->skb = NULL;
1379
1380 /* This memory barrier is needed to keep us from reading
1381 * any other fields out of the rx_desc until we know the
1382 * STATUS_DD bit is set
1383 */
1384 rmb();
1385
1386 /* Get the header and possibly the whole packet
1387 * If this is an skb from previous receive dma will be 0
1388 */
1389 if (rx_bi->dma) {
1390 u16 len;
1391
1392 if (rx_hbo)
1393 len = I40E_RX_HDR_SIZE;
1394 else if (rx_sph)
1395 len = rx_header_len;
1396 else if (rx_packet_len)
1397 len = rx_packet_len; /* 1buf/no split found */
1398 else
1399 len = rx_header_len; /* split always mode */
1400
1401 skb_put(skb, len);
1402 dma_unmap_single(rx_ring->dev,
1403 rx_bi->dma,
1404 rx_ring->rx_buf_len,
1405 DMA_FROM_DEVICE);
1406 rx_bi->dma = 0;
1407 }
1408
1409 /* Get the rest of the data if this was a header split */
1410 if (ring_is_ps_enabled(rx_ring) && rx_packet_len) {
1411
1412 skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
1413 rx_bi->page,
1414 rx_bi->page_offset,
1415 rx_packet_len);
1416
1417 skb->len += rx_packet_len;
1418 skb->data_len += rx_packet_len;
1419 skb->truesize += rx_packet_len;
1420
1421 if ((page_count(rx_bi->page) == 1) &&
1422 (page_to_nid(rx_bi->page) == current_node))
1423 get_page(rx_bi->page);
1424 else
1425 rx_bi->page = NULL;
1426
1427 dma_unmap_page(rx_ring->dev,
1428 rx_bi->page_dma,
1429 PAGE_SIZE / 2,
1430 DMA_FROM_DEVICE);
1431 rx_bi->page_dma = 0;
1432 }
1433 I40E_RX_NEXT_DESC_PREFETCH(rx_ring, i, next_rxd);
1434
1435 if (unlikely(
1436 !(rx_status & (1 << I40E_RX_DESC_STATUS_EOF_SHIFT)))) {
1437 struct i40e_rx_buffer *next_buffer;
1438
1439 next_buffer = &rx_ring->rx_bi[i];
1440
1441 if (ring_is_ps_enabled(rx_ring)) {
1442 rx_bi->skb = next_buffer->skb;
1443 rx_bi->dma = next_buffer->dma;
1444 next_buffer->skb = skb;
1445 next_buffer->dma = 0;
1446 }
1447 rx_ring->rx_stats.non_eop_descs++;
1448 goto next_desc;
1449 }
1450
1451 /* ERR_MASK will only have valid bits if EOP set */
1452 if (unlikely(rx_error & (1 << I40E_RX_DESC_ERROR_RXE_SHIFT))) {
1453 dev_kfree_skb_any(skb);
8a3c91cc
JB
1454 /* TODO: shouldn't we increment a counter indicating the
1455 * drop?
1456 */
fd0a05ce
JB
1457 goto next_desc;
1458 }
1459
206812b5
JB
1460 skb_set_hash(skb, i40e_rx_hash(rx_ring, rx_desc),
1461 i40e_ptype_to_hash(rx_ptype));
beb0dff1
JK
1462 if (unlikely(rx_status & I40E_RXD_QW1_STATUS_TSYNVALID_MASK)) {
1463 i40e_ptp_rx_hwtstamp(vsi->back, skb, (rx_status &
1464 I40E_RXD_QW1_STATUS_TSYNINDX_MASK) >>
1465 I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT);
1466 rx_ring->last_rx_timestamp = jiffies;
1467 }
1468
fd0a05ce
JB
1469 /* probably a little skewed due to removing CRC */
1470 total_rx_bytes += skb->len;
1471 total_rx_packets++;
1472
1473 skb->protocol = eth_type_trans(skb, rx_ring->netdev);
8144f0f7
JG
1474
1475 i40e_rx_checksum(vsi, skb, rx_status, rx_error, rx_ptype);
1476
fd0a05ce
JB
1477 vlan_tag = rx_status & (1 << I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)
1478 ? le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1)
1479 : 0;
1480 i40e_receive_skb(rx_ring, skb, vlan_tag);
1481
1482 rx_ring->netdev->last_rx = jiffies;
1483 budget--;
1484next_desc:
1485 rx_desc->wb.qword1.status_error_len = 0;
1486 if (!budget)
1487 break;
1488
1489 cleaned_count++;
1490 /* return some buffers to hardware, one at a time is too slow */
1491 if (cleaned_count >= I40E_RX_BUFFER_WRITE) {
1492 i40e_alloc_rx_buffers(rx_ring, cleaned_count);
1493 cleaned_count = 0;
1494 }
1495
1496 /* use prefetched values */
1497 rx_desc = next_rxd;
1498 qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
829af3ac
MW
1499 rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >>
1500 I40E_RXD_QW1_STATUS_SHIFT;
fd0a05ce
JB
1501 }
1502
1503 rx_ring->next_to_clean = i;
980e9b11 1504 u64_stats_update_begin(&rx_ring->syncp);
a114d0a6
AD
1505 rx_ring->stats.packets += total_rx_packets;
1506 rx_ring->stats.bytes += total_rx_bytes;
980e9b11 1507 u64_stats_update_end(&rx_ring->syncp);
fd0a05ce
JB
1508 rx_ring->q_vector->rx.total_packets += total_rx_packets;
1509 rx_ring->q_vector->rx.total_bytes += total_rx_bytes;
1510
1511 if (cleaned_count)
1512 i40e_alloc_rx_buffers(rx_ring, cleaned_count);
1513
1514 return budget > 0;
1515}
1516
1517/**
1518 * i40e_napi_poll - NAPI polling Rx/Tx cleanup routine
1519 * @napi: napi struct with our devices info in it
1520 * @budget: amount of work driver is allowed to do this pass, in packets
1521 *
1522 * This function will clean all queues associated with a q_vector.
1523 *
1524 * Returns the amount of work done
1525 **/
1526int i40e_napi_poll(struct napi_struct *napi, int budget)
1527{
1528 struct i40e_q_vector *q_vector =
1529 container_of(napi, struct i40e_q_vector, napi);
1530 struct i40e_vsi *vsi = q_vector->vsi;
cd0b6fa6 1531 struct i40e_ring *ring;
fd0a05ce
JB
1532 bool clean_complete = true;
1533 int budget_per_ring;
fd0a05ce
JB
1534
1535 if (test_bit(__I40E_DOWN, &vsi->state)) {
1536 napi_complete(napi);
1537 return 0;
1538 }
1539
cd0b6fa6
AD
1540 /* Since the actual Tx work is minimal, we can give the Tx a larger
1541 * budget and be more aggressive about cleaning up the Tx descriptors.
1542 */
1543 i40e_for_each_ring(ring, q_vector->tx)
1544 clean_complete &= i40e_clean_tx_irq(ring, vsi->work_limit);
1545
fd0a05ce
JB
1546 /* We attempt to distribute budget to each Rx queue fairly, but don't
1547 * allow the budget to go below 1 because that would exit polling early.
fd0a05ce
JB
1548 */
1549 budget_per_ring = max(budget/q_vector->num_ringpairs, 1);
cd0b6fa6
AD
1550
1551 i40e_for_each_ring(ring, q_vector->rx)
1552 clean_complete &= i40e_clean_rx_irq(ring, budget_per_ring);
fd0a05ce
JB
1553
1554 /* If work not completed, return budget and polling will return */
1555 if (!clean_complete)
1556 return budget;
1557
1558 /* Work is done so exit the polling mode and re-enable the interrupt */
1559 napi_complete(napi);
1560 if (ITR_IS_DYNAMIC(vsi->rx_itr_setting) ||
1561 ITR_IS_DYNAMIC(vsi->tx_itr_setting))
1562 i40e_update_dynamic_itr(q_vector);
1563
1564 if (!test_bit(__I40E_DOWN, &vsi->state)) {
1565 if (vsi->back->flags & I40E_FLAG_MSIX_ENABLED) {
1566 i40e_irq_dynamic_enable(vsi,
1567 q_vector->v_idx + vsi->base_vector);
1568 } else {
1569 struct i40e_hw *hw = &vsi->back->hw;
1570 /* We re-enable the queue 0 cause, but
1571 * don't worry about dynamic_enable
1572 * because we left it on for the other
1573 * possible interrupts during napi
1574 */
1575 u32 qval = rd32(hw, I40E_QINT_RQCTL(0));
1576 qval |= I40E_QINT_RQCTL_CAUSE_ENA_MASK;
1577 wr32(hw, I40E_QINT_RQCTL(0), qval);
1578
1579 qval = rd32(hw, I40E_QINT_TQCTL(0));
1580 qval |= I40E_QINT_TQCTL_CAUSE_ENA_MASK;
1581 wr32(hw, I40E_QINT_TQCTL(0), qval);
116a57d4
SN
1582
1583 i40e_irq_dynamic_enable_icr0(vsi->back);
fd0a05ce
JB
1584 }
1585 }
1586
1587 return 0;
1588}
1589
1590/**
1591 * i40e_atr - Add a Flow Director ATR filter
1592 * @tx_ring: ring to add programming descriptor to
1593 * @skb: send buffer
1594 * @flags: send flags
1595 * @protocol: wire protocol
1596 **/
1597static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb,
1598 u32 flags, __be16 protocol)
1599{
1600 struct i40e_filter_program_desc *fdir_desc;
1601 struct i40e_pf *pf = tx_ring->vsi->back;
1602 union {
1603 unsigned char *network;
1604 struct iphdr *ipv4;
1605 struct ipv6hdr *ipv6;
1606 } hdr;
1607 struct tcphdr *th;
1608 unsigned int hlen;
1609 u32 flex_ptype, dtype_cmd;
fc4ac67b 1610 u16 i;
fd0a05ce
JB
1611
1612 /* make sure ATR is enabled */
60ea5f83 1613 if (!(pf->flags & I40E_FLAG_FD_ATR_ENABLED))
fd0a05ce
JB
1614 return;
1615
1616 /* if sampling is disabled do nothing */
1617 if (!tx_ring->atr_sample_rate)
1618 return;
1619
fd0a05ce
JB
1620 /* snag network header to get L4 type and address */
1621 hdr.network = skb_network_header(skb);
1622
1623 /* Currently only IPv4/IPv6 with TCP is supported */
1624 if (protocol == htons(ETH_P_IP)) {
1625 if (hdr.ipv4->protocol != IPPROTO_TCP)
1626 return;
1627
1628 /* access ihl as a u8 to avoid unaligned access on ia64 */
1629 hlen = (hdr.network[0] & 0x0F) << 2;
1630 } else if (protocol == htons(ETH_P_IPV6)) {
1631 if (hdr.ipv6->nexthdr != IPPROTO_TCP)
1632 return;
1633
1634 hlen = sizeof(struct ipv6hdr);
1635 } else {
1636 return;
1637 }
1638
1639 th = (struct tcphdr *)(hdr.network + hlen);
1640
55a5e60b
ASJ
1641 /* Due to lack of space, no more new filters can be programmed */
1642 if (th->syn && (pf->auto_disable_flags & I40E_FLAG_FD_ATR_ENABLED))
1643 return;
1644
1645 tx_ring->atr_count++;
1646
ce806783
ASJ
1647 /* sample on all syn/fin/rst packets or once every atr sample rate */
1648 if (!th->fin &&
1649 !th->syn &&
1650 !th->rst &&
1651 (tx_ring->atr_count < tx_ring->atr_sample_rate))
fd0a05ce
JB
1652 return;
1653
1654 tx_ring->atr_count = 0;
1655
1656 /* grab the next descriptor */
fc4ac67b
AD
1657 i = tx_ring->next_to_use;
1658 fdir_desc = I40E_TX_FDIRDESC(tx_ring, i);
1659
1660 i++;
1661 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
fd0a05ce
JB
1662
1663 flex_ptype = (tx_ring->queue_index << I40E_TXD_FLTR_QW0_QINDEX_SHIFT) &
1664 I40E_TXD_FLTR_QW0_QINDEX_MASK;
1665 flex_ptype |= (protocol == htons(ETH_P_IP)) ?
1666 (I40E_FILTER_PCTYPE_NONF_IPV4_TCP <<
1667 I40E_TXD_FLTR_QW0_PCTYPE_SHIFT) :
1668 (I40E_FILTER_PCTYPE_NONF_IPV6_TCP <<
1669 I40E_TXD_FLTR_QW0_PCTYPE_SHIFT);
1670
1671 flex_ptype |= tx_ring->vsi->id << I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT;
1672
1673 dtype_cmd = I40E_TX_DESC_DTYPE_FILTER_PROG;
1674
ce806783 1675 dtype_cmd |= (th->fin || th->rst) ?
fd0a05ce
JB
1676 (I40E_FILTER_PROGRAM_DESC_PCMD_REMOVE <<
1677 I40E_TXD_FLTR_QW1_PCMD_SHIFT) :
1678 (I40E_FILTER_PROGRAM_DESC_PCMD_ADD_UPDATE <<
1679 I40E_TXD_FLTR_QW1_PCMD_SHIFT);
1680
1681 dtype_cmd |= I40E_FILTER_PROGRAM_DESC_DEST_DIRECT_PACKET_QINDEX <<
1682 I40E_TXD_FLTR_QW1_DEST_SHIFT;
1683
1684 dtype_cmd |= I40E_FILTER_PROGRAM_DESC_FD_STATUS_FD_ID <<
1685 I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT;
1686
433c47de
ASJ
1687 dtype_cmd |= I40E_TXD_FLTR_QW1_CNT_ENA_MASK;
1688 dtype_cmd |=
1689 ((u32)pf->fd_atr_cnt_idx << I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) &
1690 I40E_TXD_FLTR_QW1_CNTINDEX_MASK;
1691
fd0a05ce 1692 fdir_desc->qindex_flex_ptype_vsi = cpu_to_le32(flex_ptype);
99753ea6 1693 fdir_desc->rsvd = cpu_to_le32(0);
fd0a05ce 1694 fdir_desc->dtype_cmd_cntindex = cpu_to_le32(dtype_cmd);
99753ea6 1695 fdir_desc->fd_id = cpu_to_le32(0);
fd0a05ce
JB
1696}
1697
fd0a05ce
JB
1698/**
1699 * i40e_tx_prepare_vlan_flags - prepare generic TX VLAN tagging flags for HW
1700 * @skb: send buffer
1701 * @tx_ring: ring to send buffer on
1702 * @flags: the tx flags to be set
1703 *
1704 * Checks the skb and set up correspondingly several generic transmit flags
1705 * related to VLAN tagging for the HW, such as VLAN, DCB, etc.
1706 *
1707 * Returns error code indicate the frame should be dropped upon error and the
1708 * otherwise returns 0 to indicate the flags has been set properly.
1709 **/
1710static int i40e_tx_prepare_vlan_flags(struct sk_buff *skb,
1711 struct i40e_ring *tx_ring,
1712 u32 *flags)
1713{
1714 __be16 protocol = skb->protocol;
1715 u32 tx_flags = 0;
1716
1717 /* if we have a HW VLAN tag being added, default to the HW one */
1718 if (vlan_tx_tag_present(skb)) {
1719 tx_flags |= vlan_tx_tag_get(skb) << I40E_TX_FLAGS_VLAN_SHIFT;
1720 tx_flags |= I40E_TX_FLAGS_HW_VLAN;
1721 /* else if it is a SW VLAN, check the next protocol and store the tag */
0e2fe46c 1722 } else if (protocol == htons(ETH_P_8021Q)) {
fd0a05ce
JB
1723 struct vlan_hdr *vhdr, _vhdr;
1724 vhdr = skb_header_pointer(skb, ETH_HLEN, sizeof(_vhdr), &_vhdr);
1725 if (!vhdr)
1726 return -EINVAL;
1727
1728 protocol = vhdr->h_vlan_encapsulated_proto;
1729 tx_flags |= ntohs(vhdr->h_vlan_TCI) << I40E_TX_FLAGS_VLAN_SHIFT;
1730 tx_flags |= I40E_TX_FLAGS_SW_VLAN;
1731 }
1732
1733 /* Insert 802.1p priority into VLAN header */
1734 if ((tx_ring->vsi->back->flags & I40E_FLAG_DCB_ENABLED) &&
1735 ((tx_flags & (I40E_TX_FLAGS_HW_VLAN | I40E_TX_FLAGS_SW_VLAN)) ||
1736 (skb->priority != TC_PRIO_CONTROL))) {
1737 tx_flags &= ~I40E_TX_FLAGS_VLAN_PRIO_MASK;
1738 tx_flags |= (skb->priority & 0x7) <<
1739 I40E_TX_FLAGS_VLAN_PRIO_SHIFT;
1740 if (tx_flags & I40E_TX_FLAGS_SW_VLAN) {
1741 struct vlan_ethhdr *vhdr;
dd225bc6
FR
1742 int rc;
1743
1744 rc = skb_cow_head(skb, 0);
1745 if (rc < 0)
1746 return rc;
fd0a05ce
JB
1747 vhdr = (struct vlan_ethhdr *)skb->data;
1748 vhdr->h_vlan_TCI = htons(tx_flags >>
1749 I40E_TX_FLAGS_VLAN_SHIFT);
1750 } else {
1751 tx_flags |= I40E_TX_FLAGS_HW_VLAN;
1752 }
1753 }
1754 *flags = tx_flags;
1755 return 0;
1756}
1757
fd0a05ce
JB
1758/**
1759 * i40e_tso - set up the tso context descriptor
1760 * @tx_ring: ptr to the ring to send
1761 * @skb: ptr to the skb we're sending
1762 * @tx_flags: the collected send information
1763 * @protocol: the send protocol
1764 * @hdr_len: ptr to the size of the packet header
1765 * @cd_tunneling: ptr to context descriptor bits
1766 *
1767 * Returns 0 if no TSO can happen, 1 if tso is going, or error
1768 **/
1769static int i40e_tso(struct i40e_ring *tx_ring, struct sk_buff *skb,
1770 u32 tx_flags, __be16 protocol, u8 *hdr_len,
1771 u64 *cd_type_cmd_tso_mss, u32 *cd_tunneling)
1772{
1773 u32 cd_cmd, cd_tso_len, cd_mss;
dd225bc6 1774 struct ipv6hdr *ipv6h;
fd0a05ce
JB
1775 struct tcphdr *tcph;
1776 struct iphdr *iph;
1777 u32 l4len;
1778 int err;
fd0a05ce
JB
1779
1780 if (!skb_is_gso(skb))
1781 return 0;
1782
dd225bc6
FR
1783 err = skb_cow_head(skb, 0);
1784 if (err < 0)
1785 return err;
fd0a05ce 1786
0e2fe46c 1787 if (protocol == htons(ETH_P_IP)) {
fd0a05ce
JB
1788 iph = skb->encapsulation ? inner_ip_hdr(skb) : ip_hdr(skb);
1789 tcph = skb->encapsulation ? inner_tcp_hdr(skb) : tcp_hdr(skb);
1790 iph->tot_len = 0;
1791 iph->check = 0;
1792 tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
1793 0, IPPROTO_TCP, 0);
1794 } else if (skb_is_gso_v6(skb)) {
1795
1796 ipv6h = skb->encapsulation ? inner_ipv6_hdr(skb)
1797 : ipv6_hdr(skb);
1798 tcph = skb->encapsulation ? inner_tcp_hdr(skb) : tcp_hdr(skb);
1799 ipv6h->payload_len = 0;
1800 tcph->check = ~csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr,
1801 0, IPPROTO_TCP, 0);
1802 }
1803
1804 l4len = skb->encapsulation ? inner_tcp_hdrlen(skb) : tcp_hdrlen(skb);
1805 *hdr_len = (skb->encapsulation
1806 ? (skb_inner_transport_header(skb) - skb->data)
1807 : skb_transport_offset(skb)) + l4len;
1808
1809 /* find the field values */
1810 cd_cmd = I40E_TX_CTX_DESC_TSO;
1811 cd_tso_len = skb->len - *hdr_len;
1812 cd_mss = skb_shinfo(skb)->gso_size;
829af3ac
MW
1813 *cd_type_cmd_tso_mss |= ((u64)cd_cmd << I40E_TXD_CTX_QW1_CMD_SHIFT) |
1814 ((u64)cd_tso_len <<
1815 I40E_TXD_CTX_QW1_TSO_LEN_SHIFT) |
1816 ((u64)cd_mss << I40E_TXD_CTX_QW1_MSS_SHIFT);
fd0a05ce
JB
1817 return 1;
1818}
1819
beb0dff1
JK
1820/**
1821 * i40e_tsyn - set up the tsyn context descriptor
1822 * @tx_ring: ptr to the ring to send
1823 * @skb: ptr to the skb we're sending
1824 * @tx_flags: the collected send information
1825 *
1826 * Returns 0 if no Tx timestamp can happen and 1 if the timestamp will happen
1827 **/
1828static int i40e_tsyn(struct i40e_ring *tx_ring, struct sk_buff *skb,
1829 u32 tx_flags, u64 *cd_type_cmd_tso_mss)
1830{
1831 struct i40e_pf *pf;
1832
1833 if (likely(!(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)))
1834 return 0;
1835
1836 /* Tx timestamps cannot be sampled when doing TSO */
1837 if (tx_flags & I40E_TX_FLAGS_TSO)
1838 return 0;
1839
1840 /* only timestamp the outbound packet if the user has requested it and
1841 * we are not already transmitting a packet to be timestamped
1842 */
1843 pf = i40e_netdev_to_pf(tx_ring->netdev);
1844 if (pf->ptp_tx && !pf->ptp_tx_skb) {
1845 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
1846 pf->ptp_tx_skb = skb_get(skb);
1847 } else {
1848 return 0;
1849 }
1850
1851 *cd_type_cmd_tso_mss |= (u64)I40E_TX_CTX_DESC_TSYN <<
1852 I40E_TXD_CTX_QW1_CMD_SHIFT;
1853
beb0dff1
JK
1854 return 1;
1855}
1856
fd0a05ce
JB
1857/**
1858 * i40e_tx_enable_csum - Enable Tx checksum offloads
1859 * @skb: send buffer
1860 * @tx_flags: Tx flags currently set
1861 * @td_cmd: Tx descriptor command bits to set
1862 * @td_offset: Tx descriptor header offsets to set
1863 * @cd_tunneling: ptr to context desc bits
1864 **/
1865static void i40e_tx_enable_csum(struct sk_buff *skb, u32 tx_flags,
1866 u32 *td_cmd, u32 *td_offset,
1867 struct i40e_ring *tx_ring,
1868 u32 *cd_tunneling)
1869{
1870 struct ipv6hdr *this_ipv6_hdr;
1871 unsigned int this_tcp_hdrlen;
1872 struct iphdr *this_ip_hdr;
1873 u32 network_hdr_len;
1874 u8 l4_hdr = 0;
1875
1876 if (skb->encapsulation) {
1877 network_hdr_len = skb_inner_network_header_len(skb);
1878 this_ip_hdr = inner_ip_hdr(skb);
1879 this_ipv6_hdr = inner_ipv6_hdr(skb);
1880 this_tcp_hdrlen = inner_tcp_hdrlen(skb);
1881
1882 if (tx_flags & I40E_TX_FLAGS_IPV4) {
1883
1884 if (tx_flags & I40E_TX_FLAGS_TSO) {
1885 *cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV4;
1886 ip_hdr(skb)->check = 0;
1887 } else {
1888 *cd_tunneling |=
1889 I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM;
1890 }
1891 } else if (tx_flags & I40E_TX_FLAGS_IPV6) {
1892 if (tx_flags & I40E_TX_FLAGS_TSO) {
1893 *cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV6;
1894 ip_hdr(skb)->check = 0;
1895 } else {
1896 *cd_tunneling |=
1897 I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM;
1898 }
1899 }
1900
1901 /* Now set the ctx descriptor fields */
1902 *cd_tunneling |= (skb_network_header_len(skb) >> 2) <<
1903 I40E_TXD_CTX_QW0_EXT_IPLEN_SHIFT |
1904 I40E_TXD_CTX_UDP_TUNNELING |
1905 ((skb_inner_network_offset(skb) -
1906 skb_transport_offset(skb)) >> 1) <<
1907 I40E_TXD_CTX_QW0_NATLEN_SHIFT;
1908
1909 } else {
1910 network_hdr_len = skb_network_header_len(skb);
1911 this_ip_hdr = ip_hdr(skb);
1912 this_ipv6_hdr = ipv6_hdr(skb);
1913 this_tcp_hdrlen = tcp_hdrlen(skb);
1914 }
1915
1916 /* Enable IP checksum offloads */
1917 if (tx_flags & I40E_TX_FLAGS_IPV4) {
1918 l4_hdr = this_ip_hdr->protocol;
1919 /* the stack computes the IP header already, the only time we
1920 * need the hardware to recompute it is in the case of TSO.
1921 */
1922 if (tx_flags & I40E_TX_FLAGS_TSO) {
1923 *td_cmd |= I40E_TX_DESC_CMD_IIPT_IPV4_CSUM;
1924 this_ip_hdr->check = 0;
1925 } else {
1926 *td_cmd |= I40E_TX_DESC_CMD_IIPT_IPV4;
1927 }
1928 /* Now set the td_offset for IP header length */
1929 *td_offset = (network_hdr_len >> 2) <<
1930 I40E_TX_DESC_LENGTH_IPLEN_SHIFT;
1931 } else if (tx_flags & I40E_TX_FLAGS_IPV6) {
1932 l4_hdr = this_ipv6_hdr->nexthdr;
1933 *td_cmd |= I40E_TX_DESC_CMD_IIPT_IPV6;
1934 /* Now set the td_offset for IP header length */
1935 *td_offset = (network_hdr_len >> 2) <<
1936 I40E_TX_DESC_LENGTH_IPLEN_SHIFT;
1937 }
1938 /* words in MACLEN + dwords in IPLEN + dwords in L4Len */
1939 *td_offset |= (skb_network_offset(skb) >> 1) <<
1940 I40E_TX_DESC_LENGTH_MACLEN_SHIFT;
1941
1942 /* Enable L4 checksum offloads */
1943 switch (l4_hdr) {
1944 case IPPROTO_TCP:
1945 /* enable checksum offloads */
1946 *td_cmd |= I40E_TX_DESC_CMD_L4T_EOFT_TCP;
1947 *td_offset |= (this_tcp_hdrlen >> 2) <<
1948 I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
1949 break;
1950 case IPPROTO_SCTP:
1951 /* enable SCTP checksum offload */
1952 *td_cmd |= I40E_TX_DESC_CMD_L4T_EOFT_SCTP;
1953 *td_offset |= (sizeof(struct sctphdr) >> 2) <<
1954 I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
1955 break;
1956 case IPPROTO_UDP:
1957 /* enable UDP checksum offload */
1958 *td_cmd |= I40E_TX_DESC_CMD_L4T_EOFT_UDP;
1959 *td_offset |= (sizeof(struct udphdr) >> 2) <<
1960 I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
1961 break;
1962 default:
1963 break;
1964 }
1965}
1966
1967/**
1968 * i40e_create_tx_ctx Build the Tx context descriptor
1969 * @tx_ring: ring to create the descriptor on
1970 * @cd_type_cmd_tso_mss: Quad Word 1
1971 * @cd_tunneling: Quad Word 0 - bits 0-31
1972 * @cd_l2tag2: Quad Word 0 - bits 32-63
1973 **/
1974static void i40e_create_tx_ctx(struct i40e_ring *tx_ring,
1975 const u64 cd_type_cmd_tso_mss,
1976 const u32 cd_tunneling, const u32 cd_l2tag2)
1977{
1978 struct i40e_tx_context_desc *context_desc;
fc4ac67b 1979 int i = tx_ring->next_to_use;
fd0a05ce 1980
ff40dd5d
JB
1981 if ((cd_type_cmd_tso_mss == I40E_TX_DESC_DTYPE_CONTEXT) &&
1982 !cd_tunneling && !cd_l2tag2)
fd0a05ce
JB
1983 return;
1984
1985 /* grab the next descriptor */
fc4ac67b
AD
1986 context_desc = I40E_TX_CTXTDESC(tx_ring, i);
1987
1988 i++;
1989 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
fd0a05ce
JB
1990
1991 /* cpu_to_le32 and assign to struct fields */
1992 context_desc->tunneling_params = cpu_to_le32(cd_tunneling);
1993 context_desc->l2tag2 = cpu_to_le16(cd_l2tag2);
3efbbb20 1994 context_desc->rsvd = cpu_to_le16(0);
fd0a05ce
JB
1995 context_desc->type_cmd_tso_mss = cpu_to_le64(cd_type_cmd_tso_mss);
1996}
1997
1998/**
1999 * i40e_tx_map - Build the Tx descriptor
2000 * @tx_ring: ring to send buffer on
2001 * @skb: send buffer
2002 * @first: first buffer info buffer to use
2003 * @tx_flags: collected send information
2004 * @hdr_len: size of the packet header
2005 * @td_cmd: the command field in the descriptor
2006 * @td_offset: offset for checksum or crc
2007 **/
2008static void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
2009 struct i40e_tx_buffer *first, u32 tx_flags,
2010 const u8 hdr_len, u32 td_cmd, u32 td_offset)
2011{
fd0a05ce
JB
2012 unsigned int data_len = skb->data_len;
2013 unsigned int size = skb_headlen(skb);
a5e9c572 2014 struct skb_frag_struct *frag;
fd0a05ce
JB
2015 struct i40e_tx_buffer *tx_bi;
2016 struct i40e_tx_desc *tx_desc;
a5e9c572 2017 u16 i = tx_ring->next_to_use;
fd0a05ce
JB
2018 u32 td_tag = 0;
2019 dma_addr_t dma;
2020 u16 gso_segs;
2021
fd0a05ce
JB
2022 if (tx_flags & I40E_TX_FLAGS_HW_VLAN) {
2023 td_cmd |= I40E_TX_DESC_CMD_IL2TAG1;
2024 td_tag = (tx_flags & I40E_TX_FLAGS_VLAN_MASK) >>
2025 I40E_TX_FLAGS_VLAN_SHIFT;
2026 }
2027
a5e9c572
AD
2028 if (tx_flags & (I40E_TX_FLAGS_TSO | I40E_TX_FLAGS_FSO))
2029 gso_segs = skb_shinfo(skb)->gso_segs;
2030 else
2031 gso_segs = 1;
2032
2033 /* multiply data chunks by size of headers */
2034 first->bytecount = skb->len - hdr_len + (gso_segs * hdr_len);
2035 first->gso_segs = gso_segs;
2036 first->skb = skb;
2037 first->tx_flags = tx_flags;
2038
2039 dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE);
2040
fd0a05ce 2041 tx_desc = I40E_TX_DESC(tx_ring, i);
a5e9c572
AD
2042 tx_bi = first;
2043
2044 for (frag = &skb_shinfo(skb)->frags[0];; frag++) {
2045 if (dma_mapping_error(tx_ring->dev, dma))
2046 goto dma_error;
2047
2048 /* record length, and DMA address */
2049 dma_unmap_len_set(tx_bi, len, size);
2050 dma_unmap_addr_set(tx_bi, dma, dma);
2051
2052 tx_desc->buffer_addr = cpu_to_le64(dma);
2053
2054 while (unlikely(size > I40E_MAX_DATA_PER_TXD)) {
fd0a05ce
JB
2055 tx_desc->cmd_type_offset_bsz =
2056 build_ctob(td_cmd, td_offset,
2057 I40E_MAX_DATA_PER_TXD, td_tag);
2058
fd0a05ce
JB
2059 tx_desc++;
2060 i++;
2061 if (i == tx_ring->count) {
2062 tx_desc = I40E_TX_DESC(tx_ring, 0);
2063 i = 0;
2064 }
fd0a05ce 2065
a5e9c572
AD
2066 dma += I40E_MAX_DATA_PER_TXD;
2067 size -= I40E_MAX_DATA_PER_TXD;
fd0a05ce 2068
a5e9c572
AD
2069 tx_desc->buffer_addr = cpu_to_le64(dma);
2070 }
fd0a05ce
JB
2071
2072 if (likely(!data_len))
2073 break;
2074
a5e9c572
AD
2075 tx_desc->cmd_type_offset_bsz = build_ctob(td_cmd, td_offset,
2076 size, td_tag);
fd0a05ce
JB
2077
2078 tx_desc++;
2079 i++;
2080 if (i == tx_ring->count) {
2081 tx_desc = I40E_TX_DESC(tx_ring, 0);
2082 i = 0;
2083 }
2084
a5e9c572
AD
2085 size = skb_frag_size(frag);
2086 data_len -= size;
fd0a05ce 2087
a5e9c572
AD
2088 dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size,
2089 DMA_TO_DEVICE);
fd0a05ce 2090
a5e9c572
AD
2091 tx_bi = &tx_ring->tx_bi[i];
2092 }
fd0a05ce 2093
1943d8ba
JB
2094 /* Place RS bit on last descriptor of any packet that spans across the
2095 * 4th descriptor (WB_STRIDE aka 0x3) in a 64B cacheline.
2096 */
2097#define WB_STRIDE 0x3
2098 if (((i & WB_STRIDE) != WB_STRIDE) &&
2099 (first <= &tx_ring->tx_bi[i]) &&
2100 (first >= &tx_ring->tx_bi[i & ~WB_STRIDE])) {
2101 tx_desc->cmd_type_offset_bsz =
2102 build_ctob(td_cmd, td_offset, size, td_tag) |
2103 cpu_to_le64((u64)I40E_TX_DESC_CMD_EOP <<
2104 I40E_TXD_QW1_CMD_SHIFT);
2105 } else {
2106 tx_desc->cmd_type_offset_bsz =
2107 build_ctob(td_cmd, td_offset, size, td_tag) |
2108 cpu_to_le64((u64)I40E_TXD_CMD <<
2109 I40E_TXD_QW1_CMD_SHIFT);
2110 }
fd0a05ce 2111
7070ce0a
AD
2112 netdev_tx_sent_queue(netdev_get_tx_queue(tx_ring->netdev,
2113 tx_ring->queue_index),
2114 first->bytecount);
2115
a5e9c572 2116 /* set the timestamp */
fd0a05ce 2117 first->time_stamp = jiffies;
fd0a05ce
JB
2118
2119 /* Force memory writes to complete before letting h/w
2120 * know there are new descriptors to fetch. (Only
2121 * applicable for weak-ordered memory model archs,
2122 * such as IA-64).
2123 */
2124 wmb();
2125
a5e9c572
AD
2126 /* set next_to_watch value indicating a packet is present */
2127 first->next_to_watch = tx_desc;
2128
2129 i++;
2130 if (i == tx_ring->count)
2131 i = 0;
2132
2133 tx_ring->next_to_use = i;
2134
2135 /* notify HW of packet */
fd0a05ce 2136 writel(i, tx_ring->tail);
a5e9c572 2137
fd0a05ce
JB
2138 return;
2139
2140dma_error:
a5e9c572 2141 dev_info(tx_ring->dev, "TX DMA map failed\n");
fd0a05ce
JB
2142
2143 /* clear dma mappings for failed tx_bi map */
2144 for (;;) {
2145 tx_bi = &tx_ring->tx_bi[i];
a5e9c572 2146 i40e_unmap_and_free_tx_resource(tx_ring, tx_bi);
fd0a05ce
JB
2147 if (tx_bi == first)
2148 break;
2149 if (i == 0)
2150 i = tx_ring->count;
2151 i--;
2152 }
2153
fd0a05ce
JB
2154 tx_ring->next_to_use = i;
2155}
2156
2157/**
2158 * __i40e_maybe_stop_tx - 2nd level check for tx stop conditions
2159 * @tx_ring: the ring to be checked
2160 * @size: the size buffer we want to assure is available
2161 *
2162 * Returns -EBUSY if a stop is needed, else 0
2163 **/
2164static inline int __i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
2165{
2166 netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
8e9dca53 2167 /* Memory barrier before checking head and tail */
fd0a05ce
JB
2168 smp_mb();
2169
2170 /* Check again in a case another CPU has just made room available. */
2171 if (likely(I40E_DESC_UNUSED(tx_ring) < size))
2172 return -EBUSY;
2173
2174 /* A reprieve! - use start_queue because it doesn't call schedule */
2175 netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index);
2176 ++tx_ring->tx_stats.restart_queue;
2177 return 0;
2178}
2179
2180/**
2181 * i40e_maybe_stop_tx - 1st level check for tx stop conditions
2182 * @tx_ring: the ring to be checked
2183 * @size: the size buffer we want to assure is available
2184 *
2185 * Returns 0 if stop is not needed
2186 **/
2187static int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
2188{
2189 if (likely(I40E_DESC_UNUSED(tx_ring) >= size))
2190 return 0;
2191 return __i40e_maybe_stop_tx(tx_ring, size);
2192}
2193
2194/**
2195 * i40e_xmit_descriptor_count - calculate number of tx descriptors needed
2196 * @skb: send buffer
2197 * @tx_ring: ring to send buffer on
2198 *
2199 * Returns number of data descriptors needed for this skb. Returns 0 to indicate
2200 * there is not enough descriptors available in this ring since we need at least
2201 * one descriptor.
2202 **/
2203static int i40e_xmit_descriptor_count(struct sk_buff *skb,
2204 struct i40e_ring *tx_ring)
2205{
fd0a05ce 2206 unsigned int f;
fd0a05ce
JB
2207 int count = 0;
2208
2209 /* need: 1 descriptor per page * PAGE_SIZE/I40E_MAX_DATA_PER_TXD,
2210 * + 1 desc for skb_head_len/I40E_MAX_DATA_PER_TXD,
be560521 2211 * + 4 desc gap to avoid the cache line where head is,
fd0a05ce
JB
2212 * + 1 desc for context descriptor,
2213 * otherwise try next time
2214 */
fd0a05ce
JB
2215 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
2216 count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);
980093eb 2217
fd0a05ce 2218 count += TXD_USE_COUNT(skb_headlen(skb));
be560521 2219 if (i40e_maybe_stop_tx(tx_ring, count + 4 + 1)) {
fd0a05ce
JB
2220 tx_ring->tx_stats.tx_busy++;
2221 return 0;
2222 }
2223 return count;
2224}
2225
2226/**
2227 * i40e_xmit_frame_ring - Sends buffer on Tx ring
2228 * @skb: send buffer
2229 * @tx_ring: ring to send buffer on
2230 *
2231 * Returns NETDEV_TX_OK if sent, else an error code
2232 **/
2233static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
2234 struct i40e_ring *tx_ring)
2235{
2236 u64 cd_type_cmd_tso_mss = I40E_TX_DESC_DTYPE_CONTEXT;
2237 u32 cd_tunneling = 0, cd_l2tag2 = 0;
2238 struct i40e_tx_buffer *first;
2239 u32 td_offset = 0;
2240 u32 tx_flags = 0;
2241 __be16 protocol;
2242 u32 td_cmd = 0;
2243 u8 hdr_len = 0;
beb0dff1 2244 int tsyn;
fd0a05ce
JB
2245 int tso;
2246 if (0 == i40e_xmit_descriptor_count(skb, tx_ring))
2247 return NETDEV_TX_BUSY;
2248
2249 /* prepare the xmit flags */
2250 if (i40e_tx_prepare_vlan_flags(skb, tx_ring, &tx_flags))
2251 goto out_drop;
2252
2253 /* obtain protocol of skb */
2254 protocol = skb->protocol;
2255
2256 /* record the location of the first descriptor for this packet */
2257 first = &tx_ring->tx_bi[tx_ring->next_to_use];
2258
2259 /* setup IPv4/IPv6 offloads */
0e2fe46c 2260 if (protocol == htons(ETH_P_IP))
fd0a05ce 2261 tx_flags |= I40E_TX_FLAGS_IPV4;
0e2fe46c 2262 else if (protocol == htons(ETH_P_IPV6))
fd0a05ce
JB
2263 tx_flags |= I40E_TX_FLAGS_IPV6;
2264
2265 tso = i40e_tso(tx_ring, skb, tx_flags, protocol, &hdr_len,
2266 &cd_type_cmd_tso_mss, &cd_tunneling);
2267
2268 if (tso < 0)
2269 goto out_drop;
2270 else if (tso)
2271 tx_flags |= I40E_TX_FLAGS_TSO;
2272
2273 skb_tx_timestamp(skb);
2274
beb0dff1
JK
2275 tsyn = i40e_tsyn(tx_ring, skb, tx_flags, &cd_type_cmd_tso_mss);
2276
2277 if (tsyn)
2278 tx_flags |= I40E_TX_FLAGS_TSYN;
2279
b1941306
AD
2280 /* always enable CRC insertion offload */
2281 td_cmd |= I40E_TX_DESC_CMD_ICRC;
2282
fd0a05ce 2283 /* Always offload the checksum, since it's in the data descriptor */
b1941306 2284 if (skb->ip_summed == CHECKSUM_PARTIAL) {
fd0a05ce
JB
2285 tx_flags |= I40E_TX_FLAGS_CSUM;
2286
fd0a05ce
JB
2287 i40e_tx_enable_csum(skb, tx_flags, &td_cmd, &td_offset,
2288 tx_ring, &cd_tunneling);
b1941306 2289 }
fd0a05ce
JB
2290
2291 i40e_create_tx_ctx(tx_ring, cd_type_cmd_tso_mss,
2292 cd_tunneling, cd_l2tag2);
2293
2294 /* Add Flow Director ATR if it's enabled.
2295 *
2296 * NOTE: this must always be directly before the data descriptor.
2297 */
2298 i40e_atr(tx_ring, skb, tx_flags, protocol);
2299
2300 i40e_tx_map(tx_ring, skb, first, tx_flags, hdr_len,
2301 td_cmd, td_offset);
2302
2303 i40e_maybe_stop_tx(tx_ring, DESC_NEEDED);
2304
2305 return NETDEV_TX_OK;
2306
2307out_drop:
2308 dev_kfree_skb_any(skb);
2309 return NETDEV_TX_OK;
2310}
2311
2312/**
2313 * i40e_lan_xmit_frame - Selects the correct VSI and Tx queue to send buffer
2314 * @skb: send buffer
2315 * @netdev: network interface device structure
2316 *
2317 * Returns NETDEV_TX_OK if sent, else an error code
2318 **/
2319netdev_tx_t i40e_lan_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
2320{
2321 struct i40e_netdev_priv *np = netdev_priv(netdev);
2322 struct i40e_vsi *vsi = np->vsi;
9f65e15b 2323 struct i40e_ring *tx_ring = vsi->tx_rings[skb->queue_mapping];
fd0a05ce
JB
2324
2325 /* hardware can't handle really short frames, hardware padding works
2326 * beyond this point
2327 */
2328 if (unlikely(skb->len < I40E_MIN_TX_LEN)) {
2329 if (skb_pad(skb, I40E_MIN_TX_LEN - skb->len))
2330 return NETDEV_TX_OK;
2331 skb->len = I40E_MIN_TX_LEN;
2332 skb_set_tail_pointer(skb, I40E_MIN_TX_LEN);
2333 }
2334
2335 return i40e_xmit_frame_ring(skb, tx_ring);
2336}
This page took 0.202171 seconds and 5 git commands to generate.