1 /****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2005-2006 Fen Systems Ltd.
4 * Copyright 2005-2011 Solarflare Communications Inc.
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published
8 * by the Free Software Foundation, incorporated herein by reference.
11 #include <linux/socket.h>
13 #include <linux/slab.h>
15 #include <linux/tcp.h>
16 #include <linux/udp.h>
17 #include <linux/prefetch.h>
18 #include <linux/moduleparam.h>
19 #include <linux/iommu.h>
21 #include <net/checksum.h>
22 #include "net_driver.h"
26 #include "workarounds.h"
28 /* Preferred number of descriptors to fill at once */
29 #define EFX_RX_PREFERRED_BATCH 8U
31 /* Number of RX buffers to recycle pages for. When creating the RX page recycle
32 * ring, this number is divided by the number of buffers per page to calculate
33 * the number of pages to store in the RX page recycle ring.
35 #define EFX_RECYCLE_RING_SIZE_IOMMU 4096
36 #define EFX_RECYCLE_RING_SIZE_NOIOMMU (2 * EFX_RX_PREFERRED_BATCH)
38 /* Size of buffer allocated for skb header area. */
39 #define EFX_SKB_HEADERS 128u
41 /* This is the percentage fill level below which new RX descriptors
42 * will be added to the RX descriptor ring.
44 static unsigned int rx_refill_threshold
;
46 /* Each packet can consume up to ceil(max_frame_len / buffer_size) buffers */
47 #define EFX_RX_MAX_FRAGS DIV_ROUND_UP(EFX_MAX_FRAME_LEN(EFX_MAX_MTU), \
51 * RX maximum head room required.
53 * This must be at least 1 to prevent overflow, plus one packet-worth
54 * to allow pipelined receives.
56 #define EFX_RXD_HEAD_ROOM (1 + EFX_RX_MAX_FRAGS)
58 static inline u8
*efx_rx_buf_va(struct efx_rx_buffer
*buf
)
60 return page_address(buf
->page
) + buf
->page_offset
;
63 static inline u32
efx_rx_buf_hash(const u8
*eh
)
65 /* The ethernet header is always directly after any hash. */
66 #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) || NET_IP_ALIGN % 4 == 0
67 return __le32_to_cpup((const __le32
*)(eh
- 4));
69 const u8
*data
= eh
- 4;
77 static inline struct efx_rx_buffer
*
78 efx_rx_buf_next(struct efx_rx_queue
*rx_queue
, struct efx_rx_buffer
*rx_buf
)
80 if (unlikely(rx_buf
== efx_rx_buffer(rx_queue
, rx_queue
->ptr_mask
)))
81 return efx_rx_buffer(rx_queue
, 0);
86 static inline void efx_sync_rx_buffer(struct efx_nic
*efx
,
87 struct efx_rx_buffer
*rx_buf
,
90 dma_sync_single_for_cpu(&efx
->pci_dev
->dev
, rx_buf
->dma_addr
, len
,
94 void efx_rx_config_page_split(struct efx_nic
*efx
)
96 efx
->rx_page_buf_step
= ALIGN(efx
->rx_dma_len
+ NET_IP_ALIGN
,
97 EFX_RX_BUF_ALIGNMENT
);
98 efx
->rx_bufs_per_page
= efx
->rx_buffer_order
? 1 :
99 ((PAGE_SIZE
- sizeof(struct efx_rx_page_state
)) /
100 efx
->rx_page_buf_step
);
101 efx
->rx_buffer_truesize
= (PAGE_SIZE
<< efx
->rx_buffer_order
) /
102 efx
->rx_bufs_per_page
;
103 efx
->rx_pages_per_batch
= DIV_ROUND_UP(EFX_RX_PREFERRED_BATCH
,
104 efx
->rx_bufs_per_page
);
107 /* Check the RX page recycle ring for a page that can be reused. */
108 static struct page
*efx_reuse_page(struct efx_rx_queue
*rx_queue
)
110 struct efx_nic
*efx
= rx_queue
->efx
;
112 struct efx_rx_page_state
*state
;
115 index
= rx_queue
->page_remove
& rx_queue
->page_ptr_mask
;
116 page
= rx_queue
->page_ring
[index
];
120 rx_queue
->page_ring
[index
] = NULL
;
121 /* page_remove cannot exceed page_add. */
122 if (rx_queue
->page_remove
!= rx_queue
->page_add
)
123 ++rx_queue
->page_remove
;
125 /* If page_count is 1 then we hold the only reference to this page. */
126 if (page_count(page
) == 1) {
127 ++rx_queue
->page_recycle_count
;
130 state
= page_address(page
);
131 dma_unmap_page(&efx
->pci_dev
->dev
, state
->dma_addr
,
132 PAGE_SIZE
<< efx
->rx_buffer_order
,
135 ++rx_queue
->page_recycle_failed
;
142 * efx_init_rx_buffers - create EFX_RX_BATCH page-based RX buffers
144 * @rx_queue: Efx RX queue
146 * This allocates a batch of pages, maps them for DMA, and populates
147 * struct efx_rx_buffers for each one. Return a negative error code or
148 * 0 on success. If a single page can be used for multiple buffers,
149 * then the page will either be inserted fully, or not at all.
151 static int efx_init_rx_buffers(struct efx_rx_queue
*rx_queue
)
153 struct efx_nic
*efx
= rx_queue
->efx
;
154 struct efx_rx_buffer
*rx_buf
;
156 unsigned int page_offset
;
157 struct efx_rx_page_state
*state
;
159 unsigned index
, count
;
163 page
= efx_reuse_page(rx_queue
);
165 page
= alloc_pages(__GFP_COLD
| __GFP_COMP
| GFP_ATOMIC
,
166 efx
->rx_buffer_order
);
167 if (unlikely(page
== NULL
))
170 dma_map_page(&efx
->pci_dev
->dev
, page
, 0,
171 PAGE_SIZE
<< efx
->rx_buffer_order
,
173 if (unlikely(dma_mapping_error(&efx
->pci_dev
->dev
,
175 __free_pages(page
, efx
->rx_buffer_order
);
178 state
= page_address(page
);
179 state
->dma_addr
= dma_addr
;
181 state
= page_address(page
);
182 dma_addr
= state
->dma_addr
;
185 dma_addr
+= sizeof(struct efx_rx_page_state
);
186 page_offset
= sizeof(struct efx_rx_page_state
);
189 index
= rx_queue
->added_count
& rx_queue
->ptr_mask
;
190 rx_buf
= efx_rx_buffer(rx_queue
, index
);
191 rx_buf
->dma_addr
= dma_addr
+ NET_IP_ALIGN
;
193 rx_buf
->page_offset
= page_offset
+ NET_IP_ALIGN
;
194 rx_buf
->len
= efx
->rx_dma_len
;
196 ++rx_queue
->added_count
;
198 dma_addr
+= efx
->rx_page_buf_step
;
199 page_offset
+= efx
->rx_page_buf_step
;
200 } while (page_offset
+ efx
->rx_page_buf_step
<= PAGE_SIZE
);
202 rx_buf
->flags
= EFX_RX_BUF_LAST_IN_PAGE
;
203 } while (++count
< efx
->rx_pages_per_batch
);
208 /* Unmap a DMA-mapped page. This function is only called for the final RX
211 static void efx_unmap_rx_buffer(struct efx_nic
*efx
,
212 struct efx_rx_buffer
*rx_buf
)
214 struct page
*page
= rx_buf
->page
;
217 struct efx_rx_page_state
*state
= page_address(page
);
218 dma_unmap_page(&efx
->pci_dev
->dev
,
220 PAGE_SIZE
<< efx
->rx_buffer_order
,
225 static void efx_free_rx_buffer(struct efx_rx_buffer
*rx_buf
)
228 put_page(rx_buf
->page
);
233 /* Attempt to recycle the page if there is an RX recycle ring; the page can
234 * only be added if this is the final RX buffer, to prevent pages being used in
235 * the descriptor ring and appearing in the recycle ring simultaneously.
237 static void efx_recycle_rx_page(struct efx_channel
*channel
,
238 struct efx_rx_buffer
*rx_buf
)
240 struct page
*page
= rx_buf
->page
;
241 struct efx_rx_queue
*rx_queue
= efx_channel_get_rx_queue(channel
);
242 struct efx_nic
*efx
= rx_queue
->efx
;
245 /* Only recycle the page after processing the final buffer. */
246 if (!(rx_buf
->flags
& EFX_RX_BUF_LAST_IN_PAGE
))
249 index
= rx_queue
->page_add
& rx_queue
->page_ptr_mask
;
250 if (rx_queue
->page_ring
[index
] == NULL
) {
251 unsigned read_index
= rx_queue
->page_remove
&
252 rx_queue
->page_ptr_mask
;
254 /* The next slot in the recycle ring is available, but
255 * increment page_remove if the read pointer currently
258 if (read_index
== index
)
259 ++rx_queue
->page_remove
;
260 rx_queue
->page_ring
[index
] = page
;
261 ++rx_queue
->page_add
;
264 ++rx_queue
->page_recycle_full
;
265 efx_unmap_rx_buffer(efx
, rx_buf
);
266 put_page(rx_buf
->page
);
269 static void efx_fini_rx_buffer(struct efx_rx_queue
*rx_queue
,
270 struct efx_rx_buffer
*rx_buf
)
272 /* Release the page reference we hold for the buffer. */
274 put_page(rx_buf
->page
);
276 /* If this is the last buffer in a page, unmap and free it. */
277 if (rx_buf
->flags
& EFX_RX_BUF_LAST_IN_PAGE
) {
278 efx_unmap_rx_buffer(rx_queue
->efx
, rx_buf
);
279 efx_free_rx_buffer(rx_buf
);
284 /* Recycle the pages that are used by buffers that have just been received. */
285 static void efx_recycle_rx_pages(struct efx_channel
*channel
,
286 struct efx_rx_buffer
*rx_buf
,
287 unsigned int n_frags
)
289 struct efx_rx_queue
*rx_queue
= efx_channel_get_rx_queue(channel
);
292 efx_recycle_rx_page(channel
, rx_buf
);
293 rx_buf
= efx_rx_buf_next(rx_queue
, rx_buf
);
297 static void efx_discard_rx_packet(struct efx_channel
*channel
,
298 struct efx_rx_buffer
*rx_buf
,
299 unsigned int n_frags
)
301 struct efx_rx_queue
*rx_queue
= efx_channel_get_rx_queue(channel
);
303 efx_recycle_rx_pages(channel
, rx_buf
, n_frags
);
306 efx_free_rx_buffer(rx_buf
);
307 rx_buf
= efx_rx_buf_next(rx_queue
, rx_buf
);
312 * efx_fast_push_rx_descriptors - push new RX descriptors quickly
313 * @rx_queue: RX descriptor queue
315 * This will aim to fill the RX descriptor queue up to
316 * @rx_queue->@max_fill. If there is insufficient atomic
317 * memory to do so, a slow fill will be scheduled.
319 * The caller must provide serialisation (none is used here). In practise,
320 * this means this function must run from the NAPI handler, or be called
321 * when NAPI is disabled.
323 void efx_fast_push_rx_descriptors(struct efx_rx_queue
*rx_queue
)
325 struct efx_nic
*efx
= rx_queue
->efx
;
326 unsigned int fill_level
, batch_size
;
329 /* Calculate current fill level, and exit if we don't need to fill */
330 fill_level
= (rx_queue
->added_count
- rx_queue
->removed_count
);
331 EFX_BUG_ON_PARANOID(fill_level
> rx_queue
->efx
->rxq_entries
);
332 if (fill_level
>= rx_queue
->fast_fill_trigger
)
335 /* Record minimum fill level */
336 if (unlikely(fill_level
< rx_queue
->min_fill
)) {
338 rx_queue
->min_fill
= fill_level
;
341 batch_size
= efx
->rx_pages_per_batch
* efx
->rx_bufs_per_page
;
342 space
= rx_queue
->max_fill
- fill_level
;
343 EFX_BUG_ON_PARANOID(space
< batch_size
);
345 netif_vdbg(rx_queue
->efx
, rx_status
, rx_queue
->efx
->net_dev
,
346 "RX queue %d fast-filling descriptor ring from"
347 " level %d to level %d\n",
348 efx_rx_queue_index(rx_queue
), fill_level
,
353 rc
= efx_init_rx_buffers(rx_queue
);
355 /* Ensure that we don't leave the rx queue empty */
356 if (rx_queue
->added_count
== rx_queue
->removed_count
)
357 efx_schedule_slow_fill(rx_queue
);
360 } while ((space
-= batch_size
) >= batch_size
);
362 netif_vdbg(rx_queue
->efx
, rx_status
, rx_queue
->efx
->net_dev
,
363 "RX queue %d fast-filled descriptor ring "
364 "to level %d\n", efx_rx_queue_index(rx_queue
),
365 rx_queue
->added_count
- rx_queue
->removed_count
);
368 if (rx_queue
->notified_count
!= rx_queue
->added_count
)
369 efx_nic_notify_rx_desc(rx_queue
);
372 void efx_rx_slow_fill(unsigned long context
)
374 struct efx_rx_queue
*rx_queue
= (struct efx_rx_queue
*)context
;
376 /* Post an event to cause NAPI to run and refill the queue */
377 efx_nic_generate_fill_event(rx_queue
);
378 ++rx_queue
->slow_fill_count
;
381 static void efx_rx_packet__check_len(struct efx_rx_queue
*rx_queue
,
382 struct efx_rx_buffer
*rx_buf
,
385 struct efx_nic
*efx
= rx_queue
->efx
;
386 unsigned max_len
= rx_buf
->len
- efx
->type
->rx_buffer_padding
;
388 if (likely(len
<= max_len
))
391 /* The packet must be discarded, but this is only a fatal error
392 * if the caller indicated it was
394 rx_buf
->flags
|= EFX_RX_PKT_DISCARD
;
396 if ((len
> rx_buf
->len
) && EFX_WORKAROUND_8071(efx
)) {
398 netif_err(efx
, rx_err
, efx
->net_dev
,
399 " RX queue %d seriously overlength "
400 "RX event (0x%x > 0x%x+0x%x). Leaking\n",
401 efx_rx_queue_index(rx_queue
), len
, max_len
,
402 efx
->type
->rx_buffer_padding
);
403 efx_schedule_reset(efx
, RESET_TYPE_RX_RECOVERY
);
406 netif_err(efx
, rx_err
, efx
->net_dev
,
407 " RX queue %d overlength RX event "
409 efx_rx_queue_index(rx_queue
), len
, max_len
);
412 efx_rx_queue_channel(rx_queue
)->n_rx_overlength
++;
415 /* Pass a received packet up through GRO. GRO can handle pages
416 * regardless of checksum state and skbs with a good checksum.
419 efx_rx_packet_gro(struct efx_channel
*channel
, struct efx_rx_buffer
*rx_buf
,
420 unsigned int n_frags
, u8
*eh
)
422 struct napi_struct
*napi
= &channel
->napi_str
;
423 gro_result_t gro_result
;
424 struct efx_nic
*efx
= channel
->efx
;
427 skb
= napi_get_frags(napi
);
428 if (unlikely(!skb
)) {
430 put_page(rx_buf
->page
);
432 rx_buf
= efx_rx_buf_next(&channel
->rx_queue
, rx_buf
);
437 if (efx
->net_dev
->features
& NETIF_F_RXHASH
)
438 skb
->rxhash
= efx_rx_buf_hash(eh
);
439 skb
->ip_summed
= ((rx_buf
->flags
& EFX_RX_PKT_CSUMMED
) ?
440 CHECKSUM_UNNECESSARY
: CHECKSUM_NONE
);
443 skb_fill_page_desc(skb
, skb_shinfo(skb
)->nr_frags
,
444 rx_buf
->page
, rx_buf
->page_offset
,
447 skb
->len
+= rx_buf
->len
;
448 if (skb_shinfo(skb
)->nr_frags
== n_frags
)
451 rx_buf
= efx_rx_buf_next(&channel
->rx_queue
, rx_buf
);
454 skb
->data_len
= skb
->len
;
455 skb
->truesize
+= n_frags
* efx
->rx_buffer_truesize
;
457 skb_record_rx_queue(skb
, channel
->rx_queue
.core_index
);
459 gro_result
= napi_gro_frags(napi
);
460 if (gro_result
!= GRO_DROP
)
461 channel
->irq_mod_score
+= 2;
464 /* Allocate and construct an SKB around page fragments */
465 static struct sk_buff
*efx_rx_mk_skb(struct efx_channel
*channel
,
466 struct efx_rx_buffer
*rx_buf
,
467 unsigned int n_frags
,
470 struct efx_nic
*efx
= channel
->efx
;
473 /* Allocate an SKB to store the headers */
474 skb
= netdev_alloc_skb(efx
->net_dev
, hdr_len
+ EFX_PAGE_SKB_ALIGN
);
475 if (unlikely(skb
== NULL
))
478 EFX_BUG_ON_PARANOID(rx_buf
->len
< hdr_len
);
480 skb_reserve(skb
, EFX_PAGE_SKB_ALIGN
);
481 memcpy(__skb_put(skb
, hdr_len
), eh
, hdr_len
);
483 /* Append the remaining page(s) onto the frag list */
484 if (rx_buf
->len
> hdr_len
) {
485 rx_buf
->page_offset
+= hdr_len
;
486 rx_buf
->len
-= hdr_len
;
489 skb_fill_page_desc(skb
, skb_shinfo(skb
)->nr_frags
,
490 rx_buf
->page
, rx_buf
->page_offset
,
493 skb
->len
+= rx_buf
->len
;
494 skb
->data_len
+= rx_buf
->len
;
495 if (skb_shinfo(skb
)->nr_frags
== n_frags
)
498 rx_buf
= efx_rx_buf_next(&channel
->rx_queue
, rx_buf
);
501 __free_pages(rx_buf
->page
, efx
->rx_buffer_order
);
506 skb
->truesize
+= n_frags
* efx
->rx_buffer_truesize
;
508 /* Move past the ethernet header */
509 skb
->protocol
= eth_type_trans(skb
, efx
->net_dev
);
514 void efx_rx_packet(struct efx_rx_queue
*rx_queue
, unsigned int index
,
515 unsigned int n_frags
, unsigned int len
, u16 flags
)
517 struct efx_nic
*efx
= rx_queue
->efx
;
518 struct efx_channel
*channel
= efx_rx_queue_channel(rx_queue
);
519 struct efx_rx_buffer
*rx_buf
;
521 rx_buf
= efx_rx_buffer(rx_queue
, index
);
522 rx_buf
->flags
|= flags
;
524 /* Validate the number of fragments and completed length */
526 efx_rx_packet__check_len(rx_queue
, rx_buf
, len
);
527 } else if (unlikely(n_frags
> EFX_RX_MAX_FRAGS
) ||
528 unlikely(len
<= (n_frags
- 1) * EFX_RX_USR_BUF_SIZE
) ||
529 unlikely(len
> n_frags
* EFX_RX_USR_BUF_SIZE
) ||
530 unlikely(!efx
->rx_scatter
)) {
531 /* If this isn't an explicit discard request, either
532 * the hardware or the driver is broken.
534 WARN_ON(!(len
== 0 && rx_buf
->flags
& EFX_RX_PKT_DISCARD
));
535 rx_buf
->flags
|= EFX_RX_PKT_DISCARD
;
538 netif_vdbg(efx
, rx_status
, efx
->net_dev
,
539 "RX queue %d received ids %x-%x len %d %s%s\n",
540 efx_rx_queue_index(rx_queue
), index
,
541 (index
+ n_frags
- 1) & rx_queue
->ptr_mask
, len
,
542 (rx_buf
->flags
& EFX_RX_PKT_CSUMMED
) ? " [SUMMED]" : "",
543 (rx_buf
->flags
& EFX_RX_PKT_DISCARD
) ? " [DISCARD]" : "");
545 /* Discard packet, if instructed to do so. Process the
546 * previous receive first.
548 if (unlikely(rx_buf
->flags
& EFX_RX_PKT_DISCARD
)) {
549 efx_rx_flush_packet(channel
);
550 efx_discard_rx_packet(channel
, rx_buf
, n_frags
);
557 /* Release and/or sync the DMA mapping - assumes all RX buffers
558 * consumed in-order per RX queue.
560 efx_sync_rx_buffer(efx
, rx_buf
, rx_buf
->len
);
562 /* Prefetch nice and early so data will (hopefully) be in cache by
563 * the time we look at it.
565 prefetch(efx_rx_buf_va(rx_buf
));
567 rx_buf
->page_offset
+= efx
->type
->rx_buffer_hash_size
;
568 rx_buf
->len
-= efx
->type
->rx_buffer_hash_size
;
571 /* Release/sync DMA mapping for additional fragments.
572 * Fix length for last fragment.
574 unsigned int tail_frags
= n_frags
- 1;
577 rx_buf
= efx_rx_buf_next(rx_queue
, rx_buf
);
578 if (--tail_frags
== 0)
580 efx_sync_rx_buffer(efx
, rx_buf
, EFX_RX_USR_BUF_SIZE
);
582 rx_buf
->len
= len
- (n_frags
- 1) * EFX_RX_USR_BUF_SIZE
;
583 efx_sync_rx_buffer(efx
, rx_buf
, rx_buf
->len
);
586 /* All fragments have been DMA-synced, so recycle pages. */
587 rx_buf
= efx_rx_buffer(rx_queue
, index
);
588 efx_recycle_rx_pages(channel
, rx_buf
, n_frags
);
590 /* Pipeline receives so that we give time for packet headers to be
591 * prefetched into cache.
593 efx_rx_flush_packet(channel
);
594 channel
->rx_pkt_n_frags
= n_frags
;
595 channel
->rx_pkt_index
= index
;
598 static void efx_rx_deliver(struct efx_channel
*channel
, u8
*eh
,
599 struct efx_rx_buffer
*rx_buf
,
600 unsigned int n_frags
)
603 u16 hdr_len
= min_t(u16
, rx_buf
->len
, EFX_SKB_HEADERS
);
605 skb
= efx_rx_mk_skb(channel
, rx_buf
, n_frags
, eh
, hdr_len
);
606 if (unlikely(skb
== NULL
)) {
607 efx_free_rx_buffer(rx_buf
);
610 skb_record_rx_queue(skb
, channel
->rx_queue
.core_index
);
612 /* Set the SKB flags */
613 skb_checksum_none_assert(skb
);
614 if (likely(rx_buf
->flags
& EFX_RX_PKT_CSUMMED
))
615 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
617 if (channel
->type
->receive_skb
)
618 if (channel
->type
->receive_skb(channel
, skb
))
621 /* Pass the packet up */
622 netif_receive_skb(skb
);
625 /* Handle a received packet. Second half: Touches packet payload. */
626 void __efx_rx_packet(struct efx_channel
*channel
)
628 struct efx_nic
*efx
= channel
->efx
;
629 struct efx_rx_buffer
*rx_buf
=
630 efx_rx_buffer(&channel
->rx_queue
, channel
->rx_pkt_index
);
631 u8
*eh
= efx_rx_buf_va(rx_buf
);
633 /* If we're in loopback test, then pass the packet directly to the
634 * loopback layer, and free the rx_buf here
636 if (unlikely(efx
->loopback_selftest
)) {
637 efx_loopback_rx_packet(efx
, eh
, rx_buf
->len
);
638 efx_free_rx_buffer(rx_buf
);
642 if (unlikely(!(efx
->net_dev
->features
& NETIF_F_RXCSUM
)))
643 rx_buf
->flags
&= ~EFX_RX_PKT_CSUMMED
;
645 if ((rx_buf
->flags
& EFX_RX_PKT_TCP
) && !channel
->type
->receive_skb
)
646 efx_rx_packet_gro(channel
, rx_buf
, channel
->rx_pkt_n_frags
, eh
);
648 efx_rx_deliver(channel
, eh
, rx_buf
, channel
->rx_pkt_n_frags
);
650 channel
->rx_pkt_n_frags
= 0;
653 int efx_probe_rx_queue(struct efx_rx_queue
*rx_queue
)
655 struct efx_nic
*efx
= rx_queue
->efx
;
656 unsigned int entries
;
659 /* Create the smallest power-of-two aligned ring */
660 entries
= max(roundup_pow_of_two(efx
->rxq_entries
), EFX_MIN_DMAQ_SIZE
);
661 EFX_BUG_ON_PARANOID(entries
> EFX_MAX_DMAQ_SIZE
);
662 rx_queue
->ptr_mask
= entries
- 1;
664 netif_dbg(efx
, probe
, efx
->net_dev
,
665 "creating RX queue %d size %#x mask %#x\n",
666 efx_rx_queue_index(rx_queue
), efx
->rxq_entries
,
669 /* Allocate RX buffers */
670 rx_queue
->buffer
= kcalloc(entries
, sizeof(*rx_queue
->buffer
),
672 if (!rx_queue
->buffer
)
675 rc
= efx_nic_probe_rx(rx_queue
);
677 kfree(rx_queue
->buffer
);
678 rx_queue
->buffer
= NULL
;
684 static void efx_init_rx_recycle_ring(struct efx_nic
*efx
,
685 struct efx_rx_queue
*rx_queue
)
687 unsigned int bufs_in_recycle_ring
, page_ring_size
;
689 /* Set the RX recycle ring size */
691 bufs_in_recycle_ring
= EFX_RECYCLE_RING_SIZE_IOMMU
;
693 if (iommu_present(&pci_bus_type
))
694 bufs_in_recycle_ring
= EFX_RECYCLE_RING_SIZE_IOMMU
;
696 bufs_in_recycle_ring
= EFX_RECYCLE_RING_SIZE_NOIOMMU
;
697 #endif /* CONFIG_PPC64 */
699 page_ring_size
= roundup_pow_of_two(bufs_in_recycle_ring
/
700 efx
->rx_bufs_per_page
);
701 rx_queue
->page_ring
= kcalloc(page_ring_size
,
702 sizeof(*rx_queue
->page_ring
), GFP_KERNEL
);
703 rx_queue
->page_ptr_mask
= page_ring_size
- 1;
706 void efx_init_rx_queue(struct efx_rx_queue
*rx_queue
)
708 struct efx_nic
*efx
= rx_queue
->efx
;
709 unsigned int max_fill
, trigger
, max_trigger
;
711 netif_dbg(rx_queue
->efx
, drv
, rx_queue
->efx
->net_dev
,
712 "initialising RX queue %d\n", efx_rx_queue_index(rx_queue
));
714 /* Initialise ptr fields */
715 rx_queue
->added_count
= 0;
716 rx_queue
->notified_count
= 0;
717 rx_queue
->removed_count
= 0;
718 rx_queue
->min_fill
= -1U;
719 efx_init_rx_recycle_ring(efx
, rx_queue
);
721 rx_queue
->page_remove
= 0;
722 rx_queue
->page_add
= rx_queue
->page_ptr_mask
+ 1;
723 rx_queue
->page_recycle_count
= 0;
724 rx_queue
->page_recycle_failed
= 0;
725 rx_queue
->page_recycle_full
= 0;
727 /* Initialise limit fields */
728 max_fill
= efx
->rxq_entries
- EFX_RXD_HEAD_ROOM
;
730 max_fill
- efx
->rx_pages_per_batch
* efx
->rx_bufs_per_page
;
731 if (rx_refill_threshold
!= 0) {
732 trigger
= max_fill
* min(rx_refill_threshold
, 100U) / 100U;
733 if (trigger
> max_trigger
)
734 trigger
= max_trigger
;
736 trigger
= max_trigger
;
739 rx_queue
->max_fill
= max_fill
;
740 rx_queue
->fast_fill_trigger
= trigger
;
742 /* Set up RX descriptor ring */
743 rx_queue
->enabled
= true;
744 efx_nic_init_rx(rx_queue
);
747 void efx_fini_rx_queue(struct efx_rx_queue
*rx_queue
)
750 struct efx_nic
*efx
= rx_queue
->efx
;
751 struct efx_rx_buffer
*rx_buf
;
753 netif_dbg(rx_queue
->efx
, drv
, rx_queue
->efx
->net_dev
,
754 "shutting down RX queue %d\n", efx_rx_queue_index(rx_queue
));
756 /* A flush failure might have left rx_queue->enabled */
757 rx_queue
->enabled
= false;
759 del_timer_sync(&rx_queue
->slow_fill
);
760 efx_nic_fini_rx(rx_queue
);
762 /* Release RX buffers from the current read ptr to the write ptr */
763 if (rx_queue
->buffer
) {
764 for (i
= rx_queue
->removed_count
; i
< rx_queue
->added_count
;
766 unsigned index
= i
& rx_queue
->ptr_mask
;
767 rx_buf
= efx_rx_buffer(rx_queue
, index
);
768 efx_fini_rx_buffer(rx_queue
, rx_buf
);
772 /* Unmap and release the pages in the recycle ring. Remove the ring. */
773 for (i
= 0; i
<= rx_queue
->page_ptr_mask
; i
++) {
774 struct page
*page
= rx_queue
->page_ring
[i
];
775 struct efx_rx_page_state
*state
;
780 state
= page_address(page
);
781 dma_unmap_page(&efx
->pci_dev
->dev
, state
->dma_addr
,
782 PAGE_SIZE
<< efx
->rx_buffer_order
,
786 kfree(rx_queue
->page_ring
);
787 rx_queue
->page_ring
= NULL
;
790 void efx_remove_rx_queue(struct efx_rx_queue
*rx_queue
)
792 netif_dbg(rx_queue
->efx
, drv
, rx_queue
->efx
->net_dev
,
793 "destroying RX queue %d\n", efx_rx_queue_index(rx_queue
));
795 efx_nic_remove_rx(rx_queue
);
797 kfree(rx_queue
->buffer
);
798 rx_queue
->buffer
= NULL
;
802 module_param(rx_refill_threshold
, uint
, 0444);
803 MODULE_PARM_DESC(rx_refill_threshold
,
804 "RX descriptor ring refill threshold (%)");