2 * Copyright (c) 2007 Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 #include <net/busy_poll.h>
35 #include <linux/mlx4/cq.h>
36 #include <linux/slab.h>
37 #include <linux/mlx4/qp.h>
38 #include <linux/skbuff.h>
39 #include <linux/rculist.h>
40 #include <linux/if_ether.h>
41 #include <linux/if_vlan.h>
42 #include <linux/vmalloc.h>
46 static int mlx4_alloc_pages(struct mlx4_en_priv
*priv
,
47 struct mlx4_en_rx_alloc
*page_alloc
,
48 const struct mlx4_en_frag_info
*frag_info
,
55 for (order
= MLX4_EN_ALLOC_PREFER_ORDER
; ;) {
59 gfp
|= __GFP_COMP
| __GFP_NOWARN
;
60 page
= alloc_pages(gfp
, order
);
64 ((PAGE_SIZE
<< order
) < frag_info
->frag_size
))
67 dma
= dma_map_page(priv
->ddev
, page
, 0, PAGE_SIZE
<< order
,
69 if (dma_mapping_error(priv
->ddev
, dma
)) {
73 page_alloc
->size
= PAGE_SIZE
<< order
;
74 page_alloc
->page
= page
;
75 page_alloc
->dma
= dma
;
76 page_alloc
->offset
= frag_info
->frag_align
;
77 /* Not doing get_page() for each frag is a big win
78 * on asymetric workloads.
80 atomic_set(&page
->_count
, page_alloc
->size
/ frag_info
->frag_stride
);
84 static int mlx4_en_alloc_frags(struct mlx4_en_priv
*priv
,
85 struct mlx4_en_rx_desc
*rx_desc
,
86 struct mlx4_en_rx_alloc
*frags
,
87 struct mlx4_en_rx_alloc
*ring_alloc
,
90 struct mlx4_en_rx_alloc page_alloc
[MLX4_EN_MAX_RX_FRAGS
];
91 const struct mlx4_en_frag_info
*frag_info
;
96 for (i
= 0; i
< priv
->num_frags
; i
++) {
97 frag_info
= &priv
->frag_info
[i
];
98 page_alloc
[i
] = ring_alloc
[i
];
99 page_alloc
[i
].offset
+= frag_info
->frag_stride
;
100 if (page_alloc
[i
].offset
+ frag_info
->frag_stride
<= ring_alloc
[i
].size
)
102 if (mlx4_alloc_pages(priv
, &page_alloc
[i
], frag_info
, gfp
))
106 for (i
= 0; i
< priv
->num_frags
; i
++) {
107 frags
[i
] = ring_alloc
[i
];
108 dma
= ring_alloc
[i
].dma
+ ring_alloc
[i
].offset
;
109 ring_alloc
[i
] = page_alloc
[i
];
110 rx_desc
->data
[i
].addr
= cpu_to_be64(dma
);
117 frag_info
= &priv
->frag_info
[i
];
118 if (page_alloc
[i
].page
!= ring_alloc
[i
].page
) {
119 dma_unmap_page(priv
->ddev
, page_alloc
[i
].dma
,
120 page_alloc
[i
].size
, PCI_DMA_FROMDEVICE
);
121 page
= page_alloc
[i
].page
;
122 atomic_set(&page
->_count
, 1);
129 static void mlx4_en_free_frag(struct mlx4_en_priv
*priv
,
130 struct mlx4_en_rx_alloc
*frags
,
133 const struct mlx4_en_frag_info
*frag_info
= &priv
->frag_info
[i
];
135 if (frags
[i
].offset
+ frag_info
->frag_stride
> frags
[i
].size
)
136 dma_unmap_page(priv
->ddev
, frags
[i
].dma
, frags
[i
].size
,
140 put_page(frags
[i
].page
);
143 static int mlx4_en_init_allocator(struct mlx4_en_priv
*priv
,
144 struct mlx4_en_rx_ring
*ring
)
147 struct mlx4_en_rx_alloc
*page_alloc
;
149 for (i
= 0; i
< priv
->num_frags
; i
++) {
150 const struct mlx4_en_frag_info
*frag_info
= &priv
->frag_info
[i
];
152 if (mlx4_alloc_pages(priv
, &ring
->page_alloc
[i
],
153 frag_info
, GFP_KERNEL
))
162 page_alloc
= &ring
->page_alloc
[i
];
163 dma_unmap_page(priv
->ddev
, page_alloc
->dma
,
164 page_alloc
->size
, PCI_DMA_FROMDEVICE
);
165 page
= page_alloc
->page
;
166 atomic_set(&page
->_count
, 1);
168 page_alloc
->page
= NULL
;
173 static void mlx4_en_destroy_allocator(struct mlx4_en_priv
*priv
,
174 struct mlx4_en_rx_ring
*ring
)
176 struct mlx4_en_rx_alloc
*page_alloc
;
179 for (i
= 0; i
< priv
->num_frags
; i
++) {
180 const struct mlx4_en_frag_info
*frag_info
= &priv
->frag_info
[i
];
182 page_alloc
= &ring
->page_alloc
[i
];
183 en_dbg(DRV
, priv
, "Freeing allocator:%d count:%d\n",
184 i
, page_count(page_alloc
->page
));
186 dma_unmap_page(priv
->ddev
, page_alloc
->dma
,
187 page_alloc
->size
, PCI_DMA_FROMDEVICE
);
188 while (page_alloc
->offset
+ frag_info
->frag_stride
< page_alloc
->size
) {
189 put_page(page_alloc
->page
);
190 page_alloc
->offset
+= frag_info
->frag_stride
;
192 page_alloc
->page
= NULL
;
196 static void mlx4_en_init_rx_desc(struct mlx4_en_priv
*priv
,
197 struct mlx4_en_rx_ring
*ring
, int index
)
199 struct mlx4_en_rx_desc
*rx_desc
= ring
->buf
+ ring
->stride
* index
;
203 /* Set size and memtype fields */
204 for (i
= 0; i
< priv
->num_frags
; i
++) {
205 rx_desc
->data
[i
].byte_count
=
206 cpu_to_be32(priv
->frag_info
[i
].frag_size
);
207 rx_desc
->data
[i
].lkey
= cpu_to_be32(priv
->mdev
->mr
.key
);
210 /* If the number of used fragments does not fill up the ring stride,
211 * remaining (unused) fragments must be padded with null address/size
212 * and a special memory key */
213 possible_frags
= (ring
->stride
- sizeof(struct mlx4_en_rx_desc
)) / DS_SIZE
;
214 for (i
= priv
->num_frags
; i
< possible_frags
; i
++) {
215 rx_desc
->data
[i
].byte_count
= 0;
216 rx_desc
->data
[i
].lkey
= cpu_to_be32(MLX4_EN_MEMTYPE_PAD
);
217 rx_desc
->data
[i
].addr
= 0;
221 static int mlx4_en_prepare_rx_desc(struct mlx4_en_priv
*priv
,
222 struct mlx4_en_rx_ring
*ring
, int index
,
225 struct mlx4_en_rx_desc
*rx_desc
= ring
->buf
+ (index
* ring
->stride
);
226 struct mlx4_en_rx_alloc
*frags
= ring
->rx_info
+
227 (index
<< priv
->log_rx_info
);
229 return mlx4_en_alloc_frags(priv
, rx_desc
, frags
, ring
->page_alloc
, gfp
);
232 static inline void mlx4_en_update_rx_prod_db(struct mlx4_en_rx_ring
*ring
)
234 *ring
->wqres
.db
.db
= cpu_to_be32(ring
->prod
& 0xffff);
237 static void mlx4_en_free_rx_desc(struct mlx4_en_priv
*priv
,
238 struct mlx4_en_rx_ring
*ring
,
241 struct mlx4_en_rx_alloc
*frags
;
244 frags
= ring
->rx_info
+ (index
<< priv
->log_rx_info
);
245 for (nr
= 0; nr
< priv
->num_frags
; nr
++) {
246 en_dbg(DRV
, priv
, "Freeing fragment:%d\n", nr
);
247 mlx4_en_free_frag(priv
, frags
, nr
);
251 static int mlx4_en_fill_rx_buffers(struct mlx4_en_priv
*priv
)
253 struct mlx4_en_rx_ring
*ring
;
258 for (buf_ind
= 0; buf_ind
< priv
->prof
->rx_ring_size
; buf_ind
++) {
259 for (ring_ind
= 0; ring_ind
< priv
->rx_ring_num
; ring_ind
++) {
260 ring
= &priv
->rx_ring
[ring_ind
];
262 if (mlx4_en_prepare_rx_desc(priv
, ring
,
265 if (ring
->actual_size
< MLX4_EN_MIN_RX_SIZE
) {
266 en_err(priv
, "Failed to allocate "
267 "enough rx buffers\n");
270 new_size
= rounddown_pow_of_two(ring
->actual_size
);
271 en_warn(priv
, "Only %d buffers allocated "
272 "reducing ring size to %d",
273 ring
->actual_size
, new_size
);
284 for (ring_ind
= 0; ring_ind
< priv
->rx_ring_num
; ring_ind
++) {
285 ring
= &priv
->rx_ring
[ring_ind
];
286 while (ring
->actual_size
> new_size
) {
289 mlx4_en_free_rx_desc(priv
, ring
, ring
->actual_size
);
296 static void mlx4_en_free_rx_buf(struct mlx4_en_priv
*priv
,
297 struct mlx4_en_rx_ring
*ring
)
301 en_dbg(DRV
, priv
, "Freeing Rx buf - cons:%d prod:%d\n",
302 ring
->cons
, ring
->prod
);
304 /* Unmap and free Rx buffers */
305 BUG_ON((u32
) (ring
->prod
- ring
->cons
) > ring
->actual_size
);
306 while (ring
->cons
!= ring
->prod
) {
307 index
= ring
->cons
& ring
->size_mask
;
308 en_dbg(DRV
, priv
, "Processing descriptor:%d\n", index
);
309 mlx4_en_free_rx_desc(priv
, ring
, index
);
314 int mlx4_en_create_rx_ring(struct mlx4_en_priv
*priv
,
315 struct mlx4_en_rx_ring
*ring
, u32 size
, u16 stride
)
317 struct mlx4_en_dev
*mdev
= priv
->mdev
;
324 ring
->size_mask
= size
- 1;
325 ring
->stride
= stride
;
326 ring
->log_stride
= ffs(ring
->stride
) - 1;
327 ring
->buf_size
= ring
->size
* ring
->stride
+ TXBB_SIZE
;
329 tmp
= size
* roundup_pow_of_two(MLX4_EN_MAX_RX_FRAGS
*
330 sizeof(struct mlx4_en_rx_alloc
));
331 ring
->rx_info
= vmalloc(tmp
);
335 en_dbg(DRV
, priv
, "Allocated rx_info ring at addr:%p size:%d\n",
338 err
= mlx4_alloc_hwq_res(mdev
->dev
, &ring
->wqres
,
339 ring
->buf_size
, 2 * PAGE_SIZE
);
343 err
= mlx4_en_map_buffer(&ring
->wqres
.buf
);
345 en_err(priv
, "Failed to map RX buffer\n");
348 ring
->buf
= ring
->wqres
.buf
.direct
.buf
;
350 ring
->hwtstamp_rx_filter
= priv
->hwtstamp_config
.rx_filter
;
355 mlx4_free_hwq_res(mdev
->dev
, &ring
->wqres
, ring
->buf_size
);
357 vfree(ring
->rx_info
);
358 ring
->rx_info
= NULL
;
362 int mlx4_en_activate_rx_rings(struct mlx4_en_priv
*priv
)
364 struct mlx4_en_rx_ring
*ring
;
368 int stride
= roundup_pow_of_two(sizeof(struct mlx4_en_rx_desc
) +
369 DS_SIZE
* priv
->num_frags
);
371 for (ring_ind
= 0; ring_ind
< priv
->rx_ring_num
; ring_ind
++) {
372 ring
= &priv
->rx_ring
[ring_ind
];
376 ring
->actual_size
= 0;
377 ring
->cqn
= priv
->rx_cq
[ring_ind
].mcq
.cqn
;
379 ring
->stride
= stride
;
380 if (ring
->stride
<= TXBB_SIZE
)
381 ring
->buf
+= TXBB_SIZE
;
383 ring
->log_stride
= ffs(ring
->stride
) - 1;
384 ring
->buf_size
= ring
->size
* ring
->stride
;
386 memset(ring
->buf
, 0, ring
->buf_size
);
387 mlx4_en_update_rx_prod_db(ring
);
389 /* Initialize all descriptors */
390 for (i
= 0; i
< ring
->size
; i
++)
391 mlx4_en_init_rx_desc(priv
, ring
, i
);
393 /* Initialize page allocators */
394 err
= mlx4_en_init_allocator(priv
, ring
);
396 en_err(priv
, "Failed initializing ring allocator\n");
397 if (ring
->stride
<= TXBB_SIZE
)
398 ring
->buf
-= TXBB_SIZE
;
403 err
= mlx4_en_fill_rx_buffers(priv
);
407 for (ring_ind
= 0; ring_ind
< priv
->rx_ring_num
; ring_ind
++) {
408 ring
= &priv
->rx_ring
[ring_ind
];
410 ring
->size_mask
= ring
->actual_size
- 1;
411 mlx4_en_update_rx_prod_db(ring
);
417 for (ring_ind
= 0; ring_ind
< priv
->rx_ring_num
; ring_ind
++)
418 mlx4_en_free_rx_buf(priv
, &priv
->rx_ring
[ring_ind
]);
420 ring_ind
= priv
->rx_ring_num
- 1;
422 while (ring_ind
>= 0) {
423 if (priv
->rx_ring
[ring_ind
].stride
<= TXBB_SIZE
)
424 priv
->rx_ring
[ring_ind
].buf
-= TXBB_SIZE
;
425 mlx4_en_destroy_allocator(priv
, &priv
->rx_ring
[ring_ind
]);
431 void mlx4_en_destroy_rx_ring(struct mlx4_en_priv
*priv
,
432 struct mlx4_en_rx_ring
*ring
, u32 size
, u16 stride
)
434 struct mlx4_en_dev
*mdev
= priv
->mdev
;
436 mlx4_en_unmap_buffer(&ring
->wqres
.buf
);
437 mlx4_free_hwq_res(mdev
->dev
, &ring
->wqres
, size
* stride
+ TXBB_SIZE
);
438 vfree(ring
->rx_info
);
439 ring
->rx_info
= NULL
;
440 #ifdef CONFIG_RFS_ACCEL
441 mlx4_en_cleanup_filters(priv
, ring
);
445 void mlx4_en_deactivate_rx_ring(struct mlx4_en_priv
*priv
,
446 struct mlx4_en_rx_ring
*ring
)
448 mlx4_en_free_rx_buf(priv
, ring
);
449 if (ring
->stride
<= TXBB_SIZE
)
450 ring
->buf
-= TXBB_SIZE
;
451 mlx4_en_destroy_allocator(priv
, ring
);
455 static int mlx4_en_complete_rx_desc(struct mlx4_en_priv
*priv
,
456 struct mlx4_en_rx_desc
*rx_desc
,
457 struct mlx4_en_rx_alloc
*frags
,
461 struct skb_frag_struct
*skb_frags_rx
= skb_shinfo(skb
)->frags
;
462 struct mlx4_en_frag_info
*frag_info
;
466 /* Collect used fragments while replacing them in the HW descriptors */
467 for (nr
= 0; nr
< priv
->num_frags
; nr
++) {
468 frag_info
= &priv
->frag_info
[nr
];
469 if (length
<= frag_info
->frag_prefix_size
)
474 dma
= be64_to_cpu(rx_desc
->data
[nr
].addr
);
475 dma_sync_single_for_cpu(priv
->ddev
, dma
, frag_info
->frag_size
,
478 /* Save page reference in skb */
479 __skb_frag_set_page(&skb_frags_rx
[nr
], frags
[nr
].page
);
480 skb_frag_size_set(&skb_frags_rx
[nr
], frag_info
->frag_size
);
481 skb_frags_rx
[nr
].page_offset
= frags
[nr
].offset
;
482 skb
->truesize
+= frag_info
->frag_stride
;
483 frags
[nr
].page
= NULL
;
485 /* Adjust size of last fragment to match actual length */
487 skb_frag_size_set(&skb_frags_rx
[nr
- 1],
488 length
- priv
->frag_info
[nr
- 1].frag_prefix_size
);
494 __skb_frag_unref(&skb_frags_rx
[nr
]);
500 static struct sk_buff
*mlx4_en_rx_skb(struct mlx4_en_priv
*priv
,
501 struct mlx4_en_rx_desc
*rx_desc
,
502 struct mlx4_en_rx_alloc
*frags
,
510 skb
= netdev_alloc_skb(priv
->dev
, SMALL_PACKET_SIZE
+ NET_IP_ALIGN
);
512 en_dbg(RX_ERR
, priv
, "Failed allocating skb\n");
515 skb_reserve(skb
, NET_IP_ALIGN
);
518 /* Get pointer to first fragment so we could copy the headers into the
519 * (linear part of the) skb */
520 va
= page_address(frags
[0].page
) + frags
[0].offset
;
522 if (length
<= SMALL_PACKET_SIZE
) {
523 /* We are copying all relevant data to the skb - temporarily
524 * sync buffers for the copy */
525 dma
= be64_to_cpu(rx_desc
->data
[0].addr
);
526 dma_sync_single_for_cpu(priv
->ddev
, dma
, length
,
528 skb_copy_to_linear_data(skb
, va
, length
);
531 /* Move relevant fragments to skb */
532 used_frags
= mlx4_en_complete_rx_desc(priv
, rx_desc
, frags
,
534 if (unlikely(!used_frags
)) {
538 skb_shinfo(skb
)->nr_frags
= used_frags
;
540 /* Copy headers into the skb linear buffer */
541 memcpy(skb
->data
, va
, HEADER_COPY_SIZE
);
542 skb
->tail
+= HEADER_COPY_SIZE
;
544 /* Skip headers in first fragment */
545 skb_shinfo(skb
)->frags
[0].page_offset
+= HEADER_COPY_SIZE
;
547 /* Adjust size of first fragment */
548 skb_frag_size_sub(&skb_shinfo(skb
)->frags
[0], HEADER_COPY_SIZE
);
549 skb
->data_len
= length
- HEADER_COPY_SIZE
;
554 static void validate_loopback(struct mlx4_en_priv
*priv
, struct sk_buff
*skb
)
557 int offset
= ETH_HLEN
;
559 for (i
= 0; i
< MLX4_LOOPBACK_TEST_PAYLOAD
; i
++, offset
++) {
560 if (*(skb
->data
+ offset
) != (unsigned char) (i
& 0xff))
564 priv
->loopback_ok
= 1;
567 dev_kfree_skb_any(skb
);
570 static void mlx4_en_refill_rx_buffers(struct mlx4_en_priv
*priv
,
571 struct mlx4_en_rx_ring
*ring
)
573 int index
= ring
->prod
& ring
->size_mask
;
575 while ((u32
) (ring
->prod
- ring
->cons
) < ring
->actual_size
) {
576 if (mlx4_en_prepare_rx_desc(priv
, ring
, index
, GFP_ATOMIC
))
579 index
= ring
->prod
& ring
->size_mask
;
583 int mlx4_en_process_rx_cq(struct net_device
*dev
, struct mlx4_en_cq
*cq
, int budget
)
585 struct mlx4_en_priv
*priv
= netdev_priv(dev
);
586 struct mlx4_en_dev
*mdev
= priv
->mdev
;
587 struct mlx4_cqe
*cqe
;
588 struct mlx4_en_rx_ring
*ring
= &priv
->rx_ring
[cq
->ring
];
589 struct mlx4_en_rx_alloc
*frags
;
590 struct mlx4_en_rx_desc
*rx_desc
;
597 int factor
= priv
->cqe_factor
;
603 /* We assume a 1:1 mapping between CQEs and Rx descriptors, so Rx
604 * descriptor offset can be deduced from the CQE index instead of
605 * reading 'cqe->index' */
606 index
= cq
->mcq
.cons_index
& ring
->size_mask
;
607 cqe
= &cq
->buf
[(index
<< factor
) + factor
];
609 /* Process all completed CQEs */
610 while (XNOR(cqe
->owner_sr_opcode
& MLX4_CQE_OWNER_MASK
,
611 cq
->mcq
.cons_index
& cq
->size
)) {
613 frags
= ring
->rx_info
+ (index
<< priv
->log_rx_info
);
614 rx_desc
= ring
->buf
+ (index
<< ring
->log_stride
);
617 * make sure we read the CQE after we read the ownership bit
621 /* Drop packet on bad receive or bad checksum */
622 if (unlikely((cqe
->owner_sr_opcode
& MLX4_CQE_OPCODE_MASK
) ==
623 MLX4_CQE_OPCODE_ERROR
)) {
624 en_err(priv
, "CQE completed in error - vendor "
625 "syndrom:%d syndrom:%d\n",
626 ((struct mlx4_err_cqe
*) cqe
)->vendor_err_syndrome
,
627 ((struct mlx4_err_cqe
*) cqe
)->syndrome
);
630 if (unlikely(cqe
->badfcs_enc
& MLX4_CQE_BAD_FCS
)) {
631 en_dbg(RX_ERR
, priv
, "Accepted frame with bad FCS\n");
635 /* Check if we need to drop the packet if SRIOV is not enabled
636 * and not performing the selftest or flb disabled
638 if (priv
->flags
& MLX4_EN_FLAG_RX_FILTER_NEEDED
) {
641 /* Get pointer to first fragment since we haven't
642 * skb yet and cast it to ethhdr struct
644 dma
= be64_to_cpu(rx_desc
->data
[0].addr
);
645 dma_sync_single_for_cpu(priv
->ddev
, dma
, sizeof(*ethh
),
647 ethh
= (struct ethhdr
*)(page_address(frags
[0].page
) +
650 if (is_multicast_ether_addr(ethh
->h_dest
)) {
651 struct mlx4_mac_entry
*entry
;
652 struct hlist_head
*bucket
;
653 unsigned int mac_hash
;
655 /* Drop the packet, since HW loopback-ed it */
656 mac_hash
= ethh
->h_source
[MLX4_EN_MAC_HASH_IDX
];
657 bucket
= &priv
->mac_hash
[mac_hash
];
659 hlist_for_each_entry_rcu(entry
, bucket
, hlist
) {
660 if (ether_addr_equal_64bits(entry
->mac
,
671 * Packet is OK - process it.
673 length
= be32_to_cpu(cqe
->byte_cnt
);
674 length
-= ring
->fcs_del
;
675 ring
->bytes
+= length
;
678 if (likely(dev
->features
& NETIF_F_RXCSUM
)) {
679 if ((cqe
->status
& cpu_to_be16(MLX4_CQE_STATUS_IPOK
)) &&
680 (cqe
->checksum
== cpu_to_be16(0xffff))) {
682 /* This packet is eligible for GRO if it is:
683 * - DIX Ethernet (type interpretation)
685 * - without IP options
686 * - not an IP fragment
687 * - no LLS polling in progress
689 if (!mlx4_en_cq_ll_polling(cq
) &&
690 (dev
->features
& NETIF_F_GRO
)) {
691 struct sk_buff
*gro_skb
= napi_get_frags(&cq
->napi
);
695 nr
= mlx4_en_complete_rx_desc(priv
,
696 rx_desc
, frags
, gro_skb
,
701 skb_shinfo(gro_skb
)->nr_frags
= nr
;
702 gro_skb
->len
= length
;
703 gro_skb
->data_len
= length
;
704 gro_skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
706 if ((cqe
->vlan_my_qpn
&
707 cpu_to_be32(MLX4_CQE_VLAN_PRESENT_MASK
)) &&
708 (dev
->features
& NETIF_F_HW_VLAN_CTAG_RX
)) {
709 u16 vid
= be16_to_cpu(cqe
->sl_vid
);
711 __vlan_hwaccel_put_tag(gro_skb
, htons(ETH_P_8021Q
), vid
);
714 if (dev
->features
& NETIF_F_RXHASH
)
715 gro_skb
->rxhash
= be32_to_cpu(cqe
->immed_rss_invalid
);
717 skb_record_rx_queue(gro_skb
, cq
->ring
);
719 if (ring
->hwtstamp_rx_filter
== HWTSTAMP_FILTER_ALL
) {
720 timestamp
= mlx4_en_get_cqe_ts(cqe
);
721 mlx4_en_fill_hwtstamps(mdev
,
722 skb_hwtstamps(gro_skb
),
726 napi_gro_frags(&cq
->napi
);
730 /* GRO not possible, complete processing here */
731 ip_summed
= CHECKSUM_UNNECESSARY
;
733 ip_summed
= CHECKSUM_NONE
;
737 ip_summed
= CHECKSUM_NONE
;
741 skb
= mlx4_en_rx_skb(priv
, rx_desc
, frags
, length
);
743 priv
->stats
.rx_dropped
++;
747 if (unlikely(priv
->validate_loopback
)) {
748 validate_loopback(priv
, skb
);
752 skb
->ip_summed
= ip_summed
;
753 skb
->protocol
= eth_type_trans(skb
, dev
);
754 skb_record_rx_queue(skb
, cq
->ring
);
756 if (dev
->features
& NETIF_F_RXHASH
)
757 skb
->rxhash
= be32_to_cpu(cqe
->immed_rss_invalid
);
759 if ((be32_to_cpu(cqe
->vlan_my_qpn
) &
760 MLX4_CQE_VLAN_PRESENT_MASK
) &&
761 (dev
->features
& NETIF_F_HW_VLAN_CTAG_RX
))
762 __vlan_hwaccel_put_tag(skb
, htons(ETH_P_8021Q
), be16_to_cpu(cqe
->sl_vid
));
764 if (ring
->hwtstamp_rx_filter
== HWTSTAMP_FILTER_ALL
) {
765 timestamp
= mlx4_en_get_cqe_ts(cqe
);
766 mlx4_en_fill_hwtstamps(mdev
, skb_hwtstamps(skb
),
770 skb_mark_napi_id(skb
, &cq
->napi
);
772 /* Push it up the stack */
773 netif_receive_skb(skb
);
776 for (nr
= 0; nr
< priv
->num_frags
; nr
++)
777 mlx4_en_free_frag(priv
, frags
, nr
);
779 ++cq
->mcq
.cons_index
;
780 index
= (cq
->mcq
.cons_index
) & ring
->size_mask
;
781 cqe
= &cq
->buf
[(index
<< factor
) + factor
];
782 if (++polled
== budget
)
787 AVG_PERF_COUNTER(priv
->pstats
.rx_coal_avg
, polled
);
788 mlx4_cq_set_ci(&cq
->mcq
);
789 wmb(); /* ensure HW sees CQ consumer before we post new buffers */
790 ring
->cons
= cq
->mcq
.cons_index
;
791 mlx4_en_refill_rx_buffers(priv
, ring
);
792 mlx4_en_update_rx_prod_db(ring
);
797 void mlx4_en_rx_irq(struct mlx4_cq
*mcq
)
799 struct mlx4_en_cq
*cq
= container_of(mcq
, struct mlx4_en_cq
, mcq
);
800 struct mlx4_en_priv
*priv
= netdev_priv(cq
->dev
);
803 napi_schedule(&cq
->napi
);
805 mlx4_en_arm_cq(priv
, cq
);
808 /* Rx CQ polling - called by NAPI */
809 int mlx4_en_poll_rx_cq(struct napi_struct
*napi
, int budget
)
811 struct mlx4_en_cq
*cq
= container_of(napi
, struct mlx4_en_cq
, napi
);
812 struct net_device
*dev
= cq
->dev
;
813 struct mlx4_en_priv
*priv
= netdev_priv(dev
);
816 if (!mlx4_en_cq_lock_napi(cq
))
819 done
= mlx4_en_process_rx_cq(dev
, cq
, budget
);
821 mlx4_en_cq_unlock_napi(cq
);
823 /* If we used up all the quota - we're probably not done yet... */
825 INC_PERF_COUNTER(priv
->pstats
.napi_quota
);
829 mlx4_en_arm_cq(priv
, cq
);
834 static const int frag_sizes
[] = {
841 void mlx4_en_calc_rx_buf(struct net_device
*dev
)
843 struct mlx4_en_priv
*priv
= netdev_priv(dev
);
844 int eff_mtu
= dev
->mtu
+ ETH_HLEN
+ VLAN_HLEN
+ ETH_LLC_SNAP_SIZE
;
848 while (buf_size
< eff_mtu
) {
849 priv
->frag_info
[i
].frag_size
=
850 (eff_mtu
> buf_size
+ frag_sizes
[i
]) ?
851 frag_sizes
[i
] : eff_mtu
- buf_size
;
852 priv
->frag_info
[i
].frag_prefix_size
= buf_size
;
854 priv
->frag_info
[i
].frag_align
= NET_IP_ALIGN
;
855 priv
->frag_info
[i
].frag_stride
=
856 ALIGN(frag_sizes
[i
] + NET_IP_ALIGN
, SMP_CACHE_BYTES
);
858 priv
->frag_info
[i
].frag_align
= 0;
859 priv
->frag_info
[i
].frag_stride
=
860 ALIGN(frag_sizes
[i
], SMP_CACHE_BYTES
);
862 buf_size
+= priv
->frag_info
[i
].frag_size
;
867 priv
->rx_skb_size
= eff_mtu
;
868 priv
->log_rx_info
= ROUNDUP_LOG2(i
* sizeof(struct mlx4_en_rx_alloc
));
870 en_dbg(DRV
, priv
, "Rx buffer scatter-list (effective-mtu:%d "
871 "num_frags:%d):\n", eff_mtu
, priv
->num_frags
);
872 for (i
= 0; i
< priv
->num_frags
; i
++) {
874 " frag:%d - size:%d prefix:%d align:%d stride:%d\n",
876 priv
->frag_info
[i
].frag_size
,
877 priv
->frag_info
[i
].frag_prefix_size
,
878 priv
->frag_info
[i
].frag_align
,
879 priv
->frag_info
[i
].frag_stride
);
883 /* RSS related functions */
885 static int mlx4_en_config_rss_qp(struct mlx4_en_priv
*priv
, int qpn
,
886 struct mlx4_en_rx_ring
*ring
,
887 enum mlx4_qp_state
*state
,
890 struct mlx4_en_dev
*mdev
= priv
->mdev
;
891 struct mlx4_qp_context
*context
;
894 context
= kmalloc(sizeof(*context
), GFP_KERNEL
);
898 err
= mlx4_qp_alloc(mdev
->dev
, qpn
, qp
);
900 en_err(priv
, "Failed to allocate qp #%x\n", qpn
);
903 qp
->event
= mlx4_en_sqp_event
;
905 memset(context
, 0, sizeof *context
);
906 mlx4_en_fill_qp_context(priv
, ring
->actual_size
, ring
->stride
, 0, 0,
907 qpn
, ring
->cqn
, -1, context
);
908 context
->db_rec_addr
= cpu_to_be64(ring
->wqres
.db
.dma
);
910 /* Cancel FCS removal if FW allows */
911 if (mdev
->dev
->caps
.flags
& MLX4_DEV_CAP_FLAG_FCS_KEEP
) {
912 context
->param3
|= cpu_to_be32(1 << 29);
913 ring
->fcs_del
= ETH_FCS_LEN
;
917 err
= mlx4_qp_to_ready(mdev
->dev
, &ring
->wqres
.mtt
, context
, qp
, state
);
919 mlx4_qp_remove(mdev
->dev
, qp
);
920 mlx4_qp_free(mdev
->dev
, qp
);
922 mlx4_en_update_rx_prod_db(ring
);
928 int mlx4_en_create_drop_qp(struct mlx4_en_priv
*priv
)
933 err
= mlx4_qp_reserve_range(priv
->mdev
->dev
, 1, 1, &qpn
);
935 en_err(priv
, "Failed reserving drop qpn\n");
938 err
= mlx4_qp_alloc(priv
->mdev
->dev
, qpn
, &priv
->drop_qp
);
940 en_err(priv
, "Failed allocating drop qp\n");
941 mlx4_qp_release_range(priv
->mdev
->dev
, qpn
, 1);
948 void mlx4_en_destroy_drop_qp(struct mlx4_en_priv
*priv
)
952 qpn
= priv
->drop_qp
.qpn
;
953 mlx4_qp_remove(priv
->mdev
->dev
, &priv
->drop_qp
);
954 mlx4_qp_free(priv
->mdev
->dev
, &priv
->drop_qp
);
955 mlx4_qp_release_range(priv
->mdev
->dev
, qpn
, 1);
958 /* Allocate rx qp's and configure them according to rss map */
959 int mlx4_en_config_rss_steer(struct mlx4_en_priv
*priv
)
961 struct mlx4_en_dev
*mdev
= priv
->mdev
;
962 struct mlx4_en_rss_map
*rss_map
= &priv
->rss_map
;
963 struct mlx4_qp_context context
;
964 struct mlx4_rss_context
*rss_context
;
967 u8 rss_mask
= (MLX4_RSS_IPV4
| MLX4_RSS_TCP_IPV4
| MLX4_RSS_IPV6
|
972 static const u32 rsskey
[10] = { 0xD181C62C, 0xF7F4DB5B, 0x1983A2FC,
973 0x943E1ADB, 0xD9389E6B, 0xD1039C2C, 0xA74499AD,
974 0x593D56D9, 0xF3253C06, 0x2ADC1FFC};
976 en_dbg(DRV
, priv
, "Configuring rss steering\n");
977 err
= mlx4_qp_reserve_range(mdev
->dev
, priv
->rx_ring_num
,
981 en_err(priv
, "Failed reserving %d qps\n", priv
->rx_ring_num
);
985 for (i
= 0; i
< priv
->rx_ring_num
; i
++) {
986 qpn
= rss_map
->base_qpn
+ i
;
987 err
= mlx4_en_config_rss_qp(priv
, qpn
, &priv
->rx_ring
[i
],
996 /* Configure RSS indirection qp */
997 err
= mlx4_qp_alloc(mdev
->dev
, priv
->base_qpn
, &rss_map
->indir_qp
);
999 en_err(priv
, "Failed to allocate RSS indirection QP\n");
1002 rss_map
->indir_qp
.event
= mlx4_en_sqp_event
;
1003 mlx4_en_fill_qp_context(priv
, 0, 0, 0, 1, priv
->base_qpn
,
1004 priv
->rx_ring
[0].cqn
, -1, &context
);
1006 if (!priv
->prof
->rss_rings
|| priv
->prof
->rss_rings
> priv
->rx_ring_num
)
1007 rss_rings
= priv
->rx_ring_num
;
1009 rss_rings
= priv
->prof
->rss_rings
;
1011 ptr
= ((void *) &context
) + offsetof(struct mlx4_qp_context
, pri_path
)
1012 + MLX4_RSS_OFFSET_IN_QPC_PRI_PATH
;
1014 rss_context
->base_qpn
= cpu_to_be32(ilog2(rss_rings
) << 24 |
1015 (rss_map
->base_qpn
));
1016 rss_context
->default_qpn
= cpu_to_be32(rss_map
->base_qpn
);
1017 if (priv
->mdev
->profile
.udp_rss
) {
1018 rss_mask
|= MLX4_RSS_UDP_IPV4
| MLX4_RSS_UDP_IPV6
;
1019 rss_context
->base_qpn_udp
= rss_context
->default_qpn
;
1021 rss_context
->flags
= rss_mask
;
1022 rss_context
->hash_fn
= MLX4_RSS_HASH_TOP
;
1023 for (i
= 0; i
< 10; i
++)
1024 rss_context
->rss_key
[i
] = cpu_to_be32(rsskey
[i
]);
1026 err
= mlx4_qp_to_ready(mdev
->dev
, &priv
->res
.mtt
, &context
,
1027 &rss_map
->indir_qp
, &rss_map
->indir_state
);
1034 mlx4_qp_modify(mdev
->dev
, NULL
, rss_map
->indir_state
,
1035 MLX4_QP_STATE_RST
, NULL
, 0, 0, &rss_map
->indir_qp
);
1036 mlx4_qp_remove(mdev
->dev
, &rss_map
->indir_qp
);
1037 mlx4_qp_free(mdev
->dev
, &rss_map
->indir_qp
);
1039 for (i
= 0; i
< good_qps
; i
++) {
1040 mlx4_qp_modify(mdev
->dev
, NULL
, rss_map
->state
[i
],
1041 MLX4_QP_STATE_RST
, NULL
, 0, 0, &rss_map
->qps
[i
]);
1042 mlx4_qp_remove(mdev
->dev
, &rss_map
->qps
[i
]);
1043 mlx4_qp_free(mdev
->dev
, &rss_map
->qps
[i
]);
1045 mlx4_qp_release_range(mdev
->dev
, rss_map
->base_qpn
, priv
->rx_ring_num
);
1049 void mlx4_en_release_rss_steer(struct mlx4_en_priv
*priv
)
1051 struct mlx4_en_dev
*mdev
= priv
->mdev
;
1052 struct mlx4_en_rss_map
*rss_map
= &priv
->rss_map
;
1055 mlx4_qp_modify(mdev
->dev
, NULL
, rss_map
->indir_state
,
1056 MLX4_QP_STATE_RST
, NULL
, 0, 0, &rss_map
->indir_qp
);
1057 mlx4_qp_remove(mdev
->dev
, &rss_map
->indir_qp
);
1058 mlx4_qp_free(mdev
->dev
, &rss_map
->indir_qp
);
1060 for (i
= 0; i
< priv
->rx_ring_num
; i
++) {
1061 mlx4_qp_modify(mdev
->dev
, NULL
, rss_map
->state
[i
],
1062 MLX4_QP_STATE_RST
, NULL
, 0, 0, &rss_map
->qps
[i
]);
1063 mlx4_qp_remove(mdev
->dev
, &rss_map
->qps
[i
]);
1064 mlx4_qp_free(mdev
->dev
, &rss_map
->qps
[i
]);
1066 mlx4_qp_release_range(mdev
->dev
, rss_map
->base_qpn
, priv
->rx_ring_num
);