2 * Linux driver for VMware's vmxnet3 ethernet NIC.
4 * Copyright (C) 2008-2016, VMware, Inc. All Rights Reserved.
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the
8 * Free Software Foundation; version 2 of the License and no later version.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
13 * NON INFRINGEMENT. See the GNU General Public License for more
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
20 * The full GNU General Public License is included in this distribution in
21 * the file called "COPYING".
23 * Maintained by: pv-drivers@vmware.com
27 #include <linux/module.h>
28 #include <net/ip6_checksum.h>
30 #include "vmxnet3_int.h"
32 char vmxnet3_driver_name
[] = "vmxnet3";
33 #define VMXNET3_DRIVER_DESC "VMware vmxnet3 virtual NIC driver"
37 * Last entry must be all 0s
39 static const struct pci_device_id vmxnet3_pciid_table
[] = {
40 {PCI_VDEVICE(VMWARE
, PCI_DEVICE_ID_VMWARE_VMXNET3
)},
44 MODULE_DEVICE_TABLE(pci
, vmxnet3_pciid_table
);
46 static int enable_mq
= 1;
49 vmxnet3_write_mac_addr(struct vmxnet3_adapter
*adapter
, u8
*mac
);
52 * Enable/Disable the given intr
55 vmxnet3_enable_intr(struct vmxnet3_adapter
*adapter
, unsigned intr_idx
)
57 VMXNET3_WRITE_BAR0_REG(adapter
, VMXNET3_REG_IMR
+ intr_idx
* 8, 0);
62 vmxnet3_disable_intr(struct vmxnet3_adapter
*adapter
, unsigned intr_idx
)
64 VMXNET3_WRITE_BAR0_REG(adapter
, VMXNET3_REG_IMR
+ intr_idx
* 8, 1);
69 * Enable/Disable all intrs used by the device
72 vmxnet3_enable_all_intrs(struct vmxnet3_adapter
*adapter
)
76 for (i
= 0; i
< adapter
->intr
.num_intrs
; i
++)
77 vmxnet3_enable_intr(adapter
, i
);
78 adapter
->shared
->devRead
.intrConf
.intrCtrl
&=
79 cpu_to_le32(~VMXNET3_IC_DISABLE_ALL
);
84 vmxnet3_disable_all_intrs(struct vmxnet3_adapter
*adapter
)
88 adapter
->shared
->devRead
.intrConf
.intrCtrl
|=
89 cpu_to_le32(VMXNET3_IC_DISABLE_ALL
);
90 for (i
= 0; i
< adapter
->intr
.num_intrs
; i
++)
91 vmxnet3_disable_intr(adapter
, i
);
96 vmxnet3_ack_events(struct vmxnet3_adapter
*adapter
, u32 events
)
98 VMXNET3_WRITE_BAR1_REG(adapter
, VMXNET3_REG_ECR
, events
);
103 vmxnet3_tq_stopped(struct vmxnet3_tx_queue
*tq
, struct vmxnet3_adapter
*adapter
)
110 vmxnet3_tq_start(struct vmxnet3_tx_queue
*tq
, struct vmxnet3_adapter
*adapter
)
113 netif_start_subqueue(adapter
->netdev
, tq
- adapter
->tx_queue
);
118 vmxnet3_tq_wake(struct vmxnet3_tx_queue
*tq
, struct vmxnet3_adapter
*adapter
)
121 netif_wake_subqueue(adapter
->netdev
, (tq
- adapter
->tx_queue
));
126 vmxnet3_tq_stop(struct vmxnet3_tx_queue
*tq
, struct vmxnet3_adapter
*adapter
)
130 netif_stop_subqueue(adapter
->netdev
, (tq
- adapter
->tx_queue
));
135 * Check the link state. This may start or stop the tx queue.
138 vmxnet3_check_link(struct vmxnet3_adapter
*adapter
, bool affectTxQueue
)
144 spin_lock_irqsave(&adapter
->cmd_lock
, flags
);
145 VMXNET3_WRITE_BAR1_REG(adapter
, VMXNET3_REG_CMD
, VMXNET3_CMD_GET_LINK
);
146 ret
= VMXNET3_READ_BAR1_REG(adapter
, VMXNET3_REG_CMD
);
147 spin_unlock_irqrestore(&adapter
->cmd_lock
, flags
);
149 adapter
->link_speed
= ret
>> 16;
150 if (ret
& 1) { /* Link is up. */
151 netdev_info(adapter
->netdev
, "NIC Link is Up %d Mbps\n",
152 adapter
->link_speed
);
153 netif_carrier_on(adapter
->netdev
);
156 for (i
= 0; i
< adapter
->num_tx_queues
; i
++)
157 vmxnet3_tq_start(&adapter
->tx_queue
[i
],
161 netdev_info(adapter
->netdev
, "NIC Link is Down\n");
162 netif_carrier_off(adapter
->netdev
);
165 for (i
= 0; i
< adapter
->num_tx_queues
; i
++)
166 vmxnet3_tq_stop(&adapter
->tx_queue
[i
], adapter
);
172 vmxnet3_process_events(struct vmxnet3_adapter
*adapter
)
176 u32 events
= le32_to_cpu(adapter
->shared
->ecr
);
180 vmxnet3_ack_events(adapter
, events
);
182 /* Check if link state has changed */
183 if (events
& VMXNET3_ECR_LINK
)
184 vmxnet3_check_link(adapter
, true);
186 /* Check if there is an error on xmit/recv queues */
187 if (events
& (VMXNET3_ECR_TQERR
| VMXNET3_ECR_RQERR
)) {
188 spin_lock_irqsave(&adapter
->cmd_lock
, flags
);
189 VMXNET3_WRITE_BAR1_REG(adapter
, VMXNET3_REG_CMD
,
190 VMXNET3_CMD_GET_QUEUE_STATUS
);
191 spin_unlock_irqrestore(&adapter
->cmd_lock
, flags
);
193 for (i
= 0; i
< adapter
->num_tx_queues
; i
++)
194 if (adapter
->tqd_start
[i
].status
.stopped
)
195 dev_err(&adapter
->netdev
->dev
,
196 "%s: tq[%d] error 0x%x\n",
197 adapter
->netdev
->name
, i
, le32_to_cpu(
198 adapter
->tqd_start
[i
].status
.error
));
199 for (i
= 0; i
< adapter
->num_rx_queues
; i
++)
200 if (adapter
->rqd_start
[i
].status
.stopped
)
201 dev_err(&adapter
->netdev
->dev
,
202 "%s: rq[%d] error 0x%x\n",
203 adapter
->netdev
->name
, i
,
204 adapter
->rqd_start
[i
].status
.error
);
206 schedule_work(&adapter
->work
);
210 #ifdef __BIG_ENDIAN_BITFIELD
212 * The device expects the bitfields in shared structures to be written in
213 * little endian. When CPU is big endian, the following routines are used to
214 * correctly read and write into ABI.
215 * The general technique used here is : double word bitfields are defined in
216 * opposite order for big endian architecture. Then before reading them in
217 * driver the complete double word is translated using le32_to_cpu. Similarly
218 * After the driver writes into bitfields, cpu_to_le32 is used to translate the
219 * double words into required format.
220 * In order to avoid touching bits in shared structure more than once, temporary
221 * descriptors are used. These are passed as srcDesc to following functions.
223 static void vmxnet3_RxDescToCPU(const struct Vmxnet3_RxDesc
*srcDesc
,
224 struct Vmxnet3_RxDesc
*dstDesc
)
226 u32
*src
= (u32
*)srcDesc
+ 2;
227 u32
*dst
= (u32
*)dstDesc
+ 2;
228 dstDesc
->addr
= le64_to_cpu(srcDesc
->addr
);
229 *dst
= le32_to_cpu(*src
);
230 dstDesc
->ext1
= le32_to_cpu(srcDesc
->ext1
);
233 static void vmxnet3_TxDescToLe(const struct Vmxnet3_TxDesc
*srcDesc
,
234 struct Vmxnet3_TxDesc
*dstDesc
)
237 u32
*src
= (u32
*)(srcDesc
+ 1);
238 u32
*dst
= (u32
*)(dstDesc
+ 1);
240 /* Working backwards so that the gen bit is set at the end. */
241 for (i
= 2; i
> 0; i
--) {
244 *dst
= cpu_to_le32(*src
);
249 static void vmxnet3_RxCompToCPU(const struct Vmxnet3_RxCompDesc
*srcDesc
,
250 struct Vmxnet3_RxCompDesc
*dstDesc
)
253 u32
*src
= (u32
*)srcDesc
;
254 u32
*dst
= (u32
*)dstDesc
;
255 for (i
= 0; i
< sizeof(struct Vmxnet3_RxCompDesc
) / sizeof(u32
); i
++) {
256 *dst
= le32_to_cpu(*src
);
263 /* Used to read bitfield values from double words. */
264 static u32
get_bitfield32(const __le32
*bitfield
, u32 pos
, u32 size
)
266 u32 temp
= le32_to_cpu(*bitfield
);
267 u32 mask
= ((1 << size
) - 1) << pos
;
275 #endif /* __BIG_ENDIAN_BITFIELD */
277 #ifdef __BIG_ENDIAN_BITFIELD
279 # define VMXNET3_TXDESC_GET_GEN(txdesc) get_bitfield32(((const __le32 *) \
280 txdesc) + VMXNET3_TXD_GEN_DWORD_SHIFT, \
281 VMXNET3_TXD_GEN_SHIFT, VMXNET3_TXD_GEN_SIZE)
282 # define VMXNET3_TXDESC_GET_EOP(txdesc) get_bitfield32(((const __le32 *) \
283 txdesc) + VMXNET3_TXD_EOP_DWORD_SHIFT, \
284 VMXNET3_TXD_EOP_SHIFT, VMXNET3_TXD_EOP_SIZE)
285 # define VMXNET3_TCD_GET_GEN(tcd) get_bitfield32(((const __le32 *)tcd) + \
286 VMXNET3_TCD_GEN_DWORD_SHIFT, VMXNET3_TCD_GEN_SHIFT, \
287 VMXNET3_TCD_GEN_SIZE)
288 # define VMXNET3_TCD_GET_TXIDX(tcd) get_bitfield32((const __le32 *)tcd, \
289 VMXNET3_TCD_TXIDX_SHIFT, VMXNET3_TCD_TXIDX_SIZE)
290 # define vmxnet3_getRxComp(dstrcd, rcd, tmp) do { \
292 vmxnet3_RxCompToCPU((rcd), (tmp)); \
294 # define vmxnet3_getRxDesc(dstrxd, rxd, tmp) do { \
296 vmxnet3_RxDescToCPU((rxd), (tmp)); \
301 # define VMXNET3_TXDESC_GET_GEN(txdesc) ((txdesc)->gen)
302 # define VMXNET3_TXDESC_GET_EOP(txdesc) ((txdesc)->eop)
303 # define VMXNET3_TCD_GET_GEN(tcd) ((tcd)->gen)
304 # define VMXNET3_TCD_GET_TXIDX(tcd) ((tcd)->txdIdx)
305 # define vmxnet3_getRxComp(dstrcd, rcd, tmp) (dstrcd) = (rcd)
306 # define vmxnet3_getRxDesc(dstrxd, rxd, tmp) (dstrxd) = (rxd)
308 #endif /* __BIG_ENDIAN_BITFIELD */
312 vmxnet3_unmap_tx_buf(struct vmxnet3_tx_buf_info
*tbi
,
313 struct pci_dev
*pdev
)
315 if (tbi
->map_type
== VMXNET3_MAP_SINGLE
)
316 dma_unmap_single(&pdev
->dev
, tbi
->dma_addr
, tbi
->len
,
318 else if (tbi
->map_type
== VMXNET3_MAP_PAGE
)
319 dma_unmap_page(&pdev
->dev
, tbi
->dma_addr
, tbi
->len
,
322 BUG_ON(tbi
->map_type
!= VMXNET3_MAP_NONE
);
324 tbi
->map_type
= VMXNET3_MAP_NONE
; /* to help debugging */
329 vmxnet3_unmap_pkt(u32 eop_idx
, struct vmxnet3_tx_queue
*tq
,
330 struct pci_dev
*pdev
, struct vmxnet3_adapter
*adapter
)
335 /* no out of order completion */
336 BUG_ON(tq
->buf_info
[eop_idx
].sop_idx
!= tq
->tx_ring
.next2comp
);
337 BUG_ON(VMXNET3_TXDESC_GET_EOP(&(tq
->tx_ring
.base
[eop_idx
].txd
)) != 1);
339 skb
= tq
->buf_info
[eop_idx
].skb
;
341 tq
->buf_info
[eop_idx
].skb
= NULL
;
343 VMXNET3_INC_RING_IDX_ONLY(eop_idx
, tq
->tx_ring
.size
);
345 while (tq
->tx_ring
.next2comp
!= eop_idx
) {
346 vmxnet3_unmap_tx_buf(tq
->buf_info
+ tq
->tx_ring
.next2comp
,
349 /* update next2comp w/o tx_lock. Since we are marking more,
350 * instead of less, tx ring entries avail, the worst case is
351 * that the tx routine incorrectly re-queues a pkt due to
352 * insufficient tx ring entries.
354 vmxnet3_cmd_ring_adv_next2comp(&tq
->tx_ring
);
358 dev_kfree_skb_any(skb
);
364 vmxnet3_tq_tx_complete(struct vmxnet3_tx_queue
*tq
,
365 struct vmxnet3_adapter
*adapter
)
368 union Vmxnet3_GenericDesc
*gdesc
;
370 gdesc
= tq
->comp_ring
.base
+ tq
->comp_ring
.next2proc
;
371 while (VMXNET3_TCD_GET_GEN(&gdesc
->tcd
) == tq
->comp_ring
.gen
) {
372 completed
+= vmxnet3_unmap_pkt(VMXNET3_TCD_GET_TXIDX(
373 &gdesc
->tcd
), tq
, adapter
->pdev
,
376 vmxnet3_comp_ring_adv_next2proc(&tq
->comp_ring
);
377 gdesc
= tq
->comp_ring
.base
+ tq
->comp_ring
.next2proc
;
381 spin_lock(&tq
->tx_lock
);
382 if (unlikely(vmxnet3_tq_stopped(tq
, adapter
) &&
383 vmxnet3_cmd_ring_desc_avail(&tq
->tx_ring
) >
384 VMXNET3_WAKE_QUEUE_THRESHOLD(tq
) &&
385 netif_carrier_ok(adapter
->netdev
))) {
386 vmxnet3_tq_wake(tq
, adapter
);
388 spin_unlock(&tq
->tx_lock
);
395 vmxnet3_tq_cleanup(struct vmxnet3_tx_queue
*tq
,
396 struct vmxnet3_adapter
*adapter
)
400 while (tq
->tx_ring
.next2comp
!= tq
->tx_ring
.next2fill
) {
401 struct vmxnet3_tx_buf_info
*tbi
;
403 tbi
= tq
->buf_info
+ tq
->tx_ring
.next2comp
;
405 vmxnet3_unmap_tx_buf(tbi
, adapter
->pdev
);
407 dev_kfree_skb_any(tbi
->skb
);
410 vmxnet3_cmd_ring_adv_next2comp(&tq
->tx_ring
);
413 /* sanity check, verify all buffers are indeed unmapped and freed */
414 for (i
= 0; i
< tq
->tx_ring
.size
; i
++) {
415 BUG_ON(tq
->buf_info
[i
].skb
!= NULL
||
416 tq
->buf_info
[i
].map_type
!= VMXNET3_MAP_NONE
);
419 tq
->tx_ring
.gen
= VMXNET3_INIT_GEN
;
420 tq
->tx_ring
.next2fill
= tq
->tx_ring
.next2comp
= 0;
422 tq
->comp_ring
.gen
= VMXNET3_INIT_GEN
;
423 tq
->comp_ring
.next2proc
= 0;
428 vmxnet3_tq_destroy(struct vmxnet3_tx_queue
*tq
,
429 struct vmxnet3_adapter
*adapter
)
431 if (tq
->tx_ring
.base
) {
432 dma_free_coherent(&adapter
->pdev
->dev
, tq
->tx_ring
.size
*
433 sizeof(struct Vmxnet3_TxDesc
),
434 tq
->tx_ring
.base
, tq
->tx_ring
.basePA
);
435 tq
->tx_ring
.base
= NULL
;
437 if (tq
->data_ring
.base
) {
438 dma_free_coherent(&adapter
->pdev
->dev
,
439 tq
->data_ring
.size
* tq
->txdata_desc_size
,
440 tq
->data_ring
.base
, tq
->data_ring
.basePA
);
441 tq
->data_ring
.base
= NULL
;
443 if (tq
->comp_ring
.base
) {
444 dma_free_coherent(&adapter
->pdev
->dev
, tq
->comp_ring
.size
*
445 sizeof(struct Vmxnet3_TxCompDesc
),
446 tq
->comp_ring
.base
, tq
->comp_ring
.basePA
);
447 tq
->comp_ring
.base
= NULL
;
450 dma_free_coherent(&adapter
->pdev
->dev
,
451 tq
->tx_ring
.size
* sizeof(tq
->buf_info
[0]),
452 tq
->buf_info
, tq
->buf_info_pa
);
458 /* Destroy all tx queues */
460 vmxnet3_tq_destroy_all(struct vmxnet3_adapter
*adapter
)
464 for (i
= 0; i
< adapter
->num_tx_queues
; i
++)
465 vmxnet3_tq_destroy(&adapter
->tx_queue
[i
], adapter
);
470 vmxnet3_tq_init(struct vmxnet3_tx_queue
*tq
,
471 struct vmxnet3_adapter
*adapter
)
475 /* reset the tx ring contents to 0 and reset the tx ring states */
476 memset(tq
->tx_ring
.base
, 0, tq
->tx_ring
.size
*
477 sizeof(struct Vmxnet3_TxDesc
));
478 tq
->tx_ring
.next2fill
= tq
->tx_ring
.next2comp
= 0;
479 tq
->tx_ring
.gen
= VMXNET3_INIT_GEN
;
481 memset(tq
->data_ring
.base
, 0,
482 tq
->data_ring
.size
* tq
->txdata_desc_size
);
484 /* reset the tx comp ring contents to 0 and reset comp ring states */
485 memset(tq
->comp_ring
.base
, 0, tq
->comp_ring
.size
*
486 sizeof(struct Vmxnet3_TxCompDesc
));
487 tq
->comp_ring
.next2proc
= 0;
488 tq
->comp_ring
.gen
= VMXNET3_INIT_GEN
;
490 /* reset the bookkeeping data */
491 memset(tq
->buf_info
, 0, sizeof(tq
->buf_info
[0]) * tq
->tx_ring
.size
);
492 for (i
= 0; i
< tq
->tx_ring
.size
; i
++)
493 tq
->buf_info
[i
].map_type
= VMXNET3_MAP_NONE
;
495 /* stats are not reset */
500 vmxnet3_tq_create(struct vmxnet3_tx_queue
*tq
,
501 struct vmxnet3_adapter
*adapter
)
505 BUG_ON(tq
->tx_ring
.base
|| tq
->data_ring
.base
||
506 tq
->comp_ring
.base
|| tq
->buf_info
);
508 tq
->tx_ring
.base
= dma_alloc_coherent(&adapter
->pdev
->dev
,
509 tq
->tx_ring
.size
* sizeof(struct Vmxnet3_TxDesc
),
510 &tq
->tx_ring
.basePA
, GFP_KERNEL
);
511 if (!tq
->tx_ring
.base
) {
512 netdev_err(adapter
->netdev
, "failed to allocate tx ring\n");
516 tq
->data_ring
.base
= dma_alloc_coherent(&adapter
->pdev
->dev
,
517 tq
->data_ring
.size
* tq
->txdata_desc_size
,
518 &tq
->data_ring
.basePA
, GFP_KERNEL
);
519 if (!tq
->data_ring
.base
) {
520 netdev_err(adapter
->netdev
, "failed to allocate tx data ring\n");
524 tq
->comp_ring
.base
= dma_alloc_coherent(&adapter
->pdev
->dev
,
525 tq
->comp_ring
.size
* sizeof(struct Vmxnet3_TxCompDesc
),
526 &tq
->comp_ring
.basePA
, GFP_KERNEL
);
527 if (!tq
->comp_ring
.base
) {
528 netdev_err(adapter
->netdev
, "failed to allocate tx comp ring\n");
532 sz
= tq
->tx_ring
.size
* sizeof(tq
->buf_info
[0]);
533 tq
->buf_info
= dma_zalloc_coherent(&adapter
->pdev
->dev
, sz
,
534 &tq
->buf_info_pa
, GFP_KERNEL
);
541 vmxnet3_tq_destroy(tq
, adapter
);
546 vmxnet3_tq_cleanup_all(struct vmxnet3_adapter
*adapter
)
550 for (i
= 0; i
< adapter
->num_tx_queues
; i
++)
551 vmxnet3_tq_cleanup(&adapter
->tx_queue
[i
], adapter
);
555 * starting from ring->next2fill, allocate rx buffers for the given ring
556 * of the rx queue and update the rx desc. stop after @num_to_alloc buffers
557 * are allocated or allocation fails
561 vmxnet3_rq_alloc_rx_buf(struct vmxnet3_rx_queue
*rq
, u32 ring_idx
,
562 int num_to_alloc
, struct vmxnet3_adapter
*adapter
)
564 int num_allocated
= 0;
565 struct vmxnet3_rx_buf_info
*rbi_base
= rq
->buf_info
[ring_idx
];
566 struct vmxnet3_cmd_ring
*ring
= &rq
->rx_ring
[ring_idx
];
569 while (num_allocated
<= num_to_alloc
) {
570 struct vmxnet3_rx_buf_info
*rbi
;
571 union Vmxnet3_GenericDesc
*gd
;
573 rbi
= rbi_base
+ ring
->next2fill
;
574 gd
= ring
->base
+ ring
->next2fill
;
576 if (rbi
->buf_type
== VMXNET3_RX_BUF_SKB
) {
577 if (rbi
->skb
== NULL
) {
578 rbi
->skb
= __netdev_alloc_skb_ip_align(adapter
->netdev
,
581 if (unlikely(rbi
->skb
== NULL
)) {
582 rq
->stats
.rx_buf_alloc_failure
++;
586 rbi
->dma_addr
= dma_map_single(
588 rbi
->skb
->data
, rbi
->len
,
590 if (dma_mapping_error(&adapter
->pdev
->dev
,
592 dev_kfree_skb_any(rbi
->skb
);
593 rq
->stats
.rx_buf_alloc_failure
++;
597 /* rx buffer skipped by the device */
599 val
= VMXNET3_RXD_BTYPE_HEAD
<< VMXNET3_RXD_BTYPE_SHIFT
;
601 BUG_ON(rbi
->buf_type
!= VMXNET3_RX_BUF_PAGE
||
602 rbi
->len
!= PAGE_SIZE
);
604 if (rbi
->page
== NULL
) {
605 rbi
->page
= alloc_page(GFP_ATOMIC
);
606 if (unlikely(rbi
->page
== NULL
)) {
607 rq
->stats
.rx_buf_alloc_failure
++;
610 rbi
->dma_addr
= dma_map_page(
612 rbi
->page
, 0, PAGE_SIZE
,
614 if (dma_mapping_error(&adapter
->pdev
->dev
,
617 rq
->stats
.rx_buf_alloc_failure
++;
621 /* rx buffers skipped by the device */
623 val
= VMXNET3_RXD_BTYPE_BODY
<< VMXNET3_RXD_BTYPE_SHIFT
;
626 gd
->rxd
.addr
= cpu_to_le64(rbi
->dma_addr
);
627 gd
->dword
[2] = cpu_to_le32((!ring
->gen
<< VMXNET3_RXD_GEN_SHIFT
)
630 /* Fill the last buffer but dont mark it ready, or else the
631 * device will think that the queue is full */
632 if (num_allocated
== num_to_alloc
)
635 gd
->dword
[2] |= cpu_to_le32(ring
->gen
<< VMXNET3_RXD_GEN_SHIFT
);
637 vmxnet3_cmd_ring_adv_next2fill(ring
);
640 netdev_dbg(adapter
->netdev
,
641 "alloc_rx_buf: %d allocated, next2fill %u, next2comp %u\n",
642 num_allocated
, ring
->next2fill
, ring
->next2comp
);
644 /* so that the device can distinguish a full ring and an empty ring */
645 BUG_ON(num_allocated
!= 0 && ring
->next2fill
== ring
->next2comp
);
647 return num_allocated
;
652 vmxnet3_append_frag(struct sk_buff
*skb
, struct Vmxnet3_RxCompDesc
*rcd
,
653 struct vmxnet3_rx_buf_info
*rbi
)
655 struct skb_frag_struct
*frag
= skb_shinfo(skb
)->frags
+
656 skb_shinfo(skb
)->nr_frags
;
658 BUG_ON(skb_shinfo(skb
)->nr_frags
>= MAX_SKB_FRAGS
);
660 __skb_frag_set_page(frag
, rbi
->page
);
661 frag
->page_offset
= 0;
662 skb_frag_size_set(frag
, rcd
->len
);
663 skb
->data_len
+= rcd
->len
;
664 skb
->truesize
+= PAGE_SIZE
;
665 skb_shinfo(skb
)->nr_frags
++;
670 vmxnet3_map_pkt(struct sk_buff
*skb
, struct vmxnet3_tx_ctx
*ctx
,
671 struct vmxnet3_tx_queue
*tq
, struct pci_dev
*pdev
,
672 struct vmxnet3_adapter
*adapter
)
675 unsigned long buf_offset
;
677 union Vmxnet3_GenericDesc
*gdesc
;
678 struct vmxnet3_tx_buf_info
*tbi
= NULL
;
680 BUG_ON(ctx
->copy_size
> skb_headlen(skb
));
682 /* use the previous gen bit for the SOP desc */
683 dw2
= (tq
->tx_ring
.gen
^ 0x1) << VMXNET3_TXD_GEN_SHIFT
;
685 ctx
->sop_txd
= tq
->tx_ring
.base
+ tq
->tx_ring
.next2fill
;
686 gdesc
= ctx
->sop_txd
; /* both loops below can be skipped */
688 /* no need to map the buffer if headers are copied */
689 if (ctx
->copy_size
) {
690 ctx
->sop_txd
->txd
.addr
= cpu_to_le64(tq
->data_ring
.basePA
+
691 tq
->tx_ring
.next2fill
*
692 tq
->txdata_desc_size
);
693 ctx
->sop_txd
->dword
[2] = cpu_to_le32(dw2
| ctx
->copy_size
);
694 ctx
->sop_txd
->dword
[3] = 0;
696 tbi
= tq
->buf_info
+ tq
->tx_ring
.next2fill
;
697 tbi
->map_type
= VMXNET3_MAP_NONE
;
699 netdev_dbg(adapter
->netdev
,
700 "txd[%u]: 0x%Lx 0x%x 0x%x\n",
701 tq
->tx_ring
.next2fill
,
702 le64_to_cpu(ctx
->sop_txd
->txd
.addr
),
703 ctx
->sop_txd
->dword
[2], ctx
->sop_txd
->dword
[3]);
704 vmxnet3_cmd_ring_adv_next2fill(&tq
->tx_ring
);
706 /* use the right gen for non-SOP desc */
707 dw2
= tq
->tx_ring
.gen
<< VMXNET3_TXD_GEN_SHIFT
;
710 /* linear part can use multiple tx desc if it's big */
711 len
= skb_headlen(skb
) - ctx
->copy_size
;
712 buf_offset
= ctx
->copy_size
;
716 if (len
< VMXNET3_MAX_TX_BUF_SIZE
) {
720 buf_size
= VMXNET3_MAX_TX_BUF_SIZE
;
721 /* spec says that for TxDesc.len, 0 == 2^14 */
724 tbi
= tq
->buf_info
+ tq
->tx_ring
.next2fill
;
725 tbi
->map_type
= VMXNET3_MAP_SINGLE
;
726 tbi
->dma_addr
= dma_map_single(&adapter
->pdev
->dev
,
727 skb
->data
+ buf_offset
, buf_size
,
729 if (dma_mapping_error(&adapter
->pdev
->dev
, tbi
->dma_addr
))
734 gdesc
= tq
->tx_ring
.base
+ tq
->tx_ring
.next2fill
;
735 BUG_ON(gdesc
->txd
.gen
== tq
->tx_ring
.gen
);
737 gdesc
->txd
.addr
= cpu_to_le64(tbi
->dma_addr
);
738 gdesc
->dword
[2] = cpu_to_le32(dw2
);
741 netdev_dbg(adapter
->netdev
,
742 "txd[%u]: 0x%Lx 0x%x 0x%x\n",
743 tq
->tx_ring
.next2fill
, le64_to_cpu(gdesc
->txd
.addr
),
744 le32_to_cpu(gdesc
->dword
[2]), gdesc
->dword
[3]);
745 vmxnet3_cmd_ring_adv_next2fill(&tq
->tx_ring
);
746 dw2
= tq
->tx_ring
.gen
<< VMXNET3_TXD_GEN_SHIFT
;
749 buf_offset
+= buf_size
;
752 for (i
= 0; i
< skb_shinfo(skb
)->nr_frags
; i
++) {
753 const struct skb_frag_struct
*frag
= &skb_shinfo(skb
)->frags
[i
];
757 len
= skb_frag_size(frag
);
759 tbi
= tq
->buf_info
+ tq
->tx_ring
.next2fill
;
760 if (len
< VMXNET3_MAX_TX_BUF_SIZE
) {
764 buf_size
= VMXNET3_MAX_TX_BUF_SIZE
;
765 /* spec says that for TxDesc.len, 0 == 2^14 */
767 tbi
->map_type
= VMXNET3_MAP_PAGE
;
768 tbi
->dma_addr
= skb_frag_dma_map(&adapter
->pdev
->dev
, frag
,
769 buf_offset
, buf_size
,
771 if (dma_mapping_error(&adapter
->pdev
->dev
, tbi
->dma_addr
))
776 gdesc
= tq
->tx_ring
.base
+ tq
->tx_ring
.next2fill
;
777 BUG_ON(gdesc
->txd
.gen
== tq
->tx_ring
.gen
);
779 gdesc
->txd
.addr
= cpu_to_le64(tbi
->dma_addr
);
780 gdesc
->dword
[2] = cpu_to_le32(dw2
);
783 netdev_dbg(adapter
->netdev
,
784 "txd[%u]: 0x%llx %u %u\n",
785 tq
->tx_ring
.next2fill
, le64_to_cpu(gdesc
->txd
.addr
),
786 le32_to_cpu(gdesc
->dword
[2]), gdesc
->dword
[3]);
787 vmxnet3_cmd_ring_adv_next2fill(&tq
->tx_ring
);
788 dw2
= tq
->tx_ring
.gen
<< VMXNET3_TXD_GEN_SHIFT
;
791 buf_offset
+= buf_size
;
795 ctx
->eop_txd
= gdesc
;
797 /* set the last buf_info for the pkt */
799 tbi
->sop_idx
= ctx
->sop_txd
- tq
->tx_ring
.base
;
805 /* Init all tx queues */
807 vmxnet3_tq_init_all(struct vmxnet3_adapter
*adapter
)
811 for (i
= 0; i
< adapter
->num_tx_queues
; i
++)
812 vmxnet3_tq_init(&adapter
->tx_queue
[i
], adapter
);
817 * parse relevant protocol headers:
818 * For a tso pkt, relevant headers are L2/3/4 including options
819 * For a pkt requesting csum offloading, they are L2/3 and may include L4
820 * if it's a TCP/UDP pkt
823 * -1: error happens during parsing
824 * 0: protocol headers parsed, but too big to be copied
825 * 1: protocol headers parsed and copied
828 * 1. related *ctx fields are updated.
829 * 2. ctx->copy_size is # of bytes copied
830 * 3. the portion to be copied is guaranteed to be in the linear part
834 vmxnet3_parse_hdr(struct sk_buff
*skb
, struct vmxnet3_tx_queue
*tq
,
835 struct vmxnet3_tx_ctx
*ctx
,
836 struct vmxnet3_adapter
*adapter
)
840 if (ctx
->mss
) { /* TSO */
841 ctx
->eth_ip_hdr_size
= skb_transport_offset(skb
);
842 ctx
->l4_hdr_size
= tcp_hdrlen(skb
);
843 ctx
->copy_size
= ctx
->eth_ip_hdr_size
+ ctx
->l4_hdr_size
;
845 if (skb
->ip_summed
== CHECKSUM_PARTIAL
) {
846 ctx
->eth_ip_hdr_size
= skb_checksum_start_offset(skb
);
849 const struct iphdr
*iph
= ip_hdr(skb
);
851 protocol
= iph
->protocol
;
852 } else if (ctx
->ipv6
) {
853 const struct ipv6hdr
*ipv6h
= ipv6_hdr(skb
);
855 protocol
= ipv6h
->nexthdr
;
860 ctx
->l4_hdr_size
= tcp_hdrlen(skb
);
863 ctx
->l4_hdr_size
= sizeof(struct udphdr
);
866 ctx
->l4_hdr_size
= 0;
870 ctx
->copy_size
= min(ctx
->eth_ip_hdr_size
+
871 ctx
->l4_hdr_size
, skb
->len
);
873 ctx
->eth_ip_hdr_size
= 0;
874 ctx
->l4_hdr_size
= 0;
875 /* copy as much as allowed */
876 ctx
->copy_size
= min_t(unsigned int,
877 tq
->txdata_desc_size
,
881 if (skb
->len
<= VMXNET3_HDR_COPY_SIZE
)
882 ctx
->copy_size
= skb
->len
;
884 /* make sure headers are accessible directly */
885 if (unlikely(!pskb_may_pull(skb
, ctx
->copy_size
)))
889 if (unlikely(ctx
->copy_size
> tq
->txdata_desc_size
)) {
890 tq
->stats
.oversized_hdr
++;
901 * copy relevant protocol headers to the transmit ring:
902 * For a tso pkt, relevant headers are L2/3/4 including options
903 * For a pkt requesting csum offloading, they are L2/3 and may include L4
904 * if it's a TCP/UDP pkt
907 * Note that this requires that vmxnet3_parse_hdr be called first to set the
908 * appropriate bits in ctx first
911 vmxnet3_copy_hdr(struct sk_buff
*skb
, struct vmxnet3_tx_queue
*tq
,
912 struct vmxnet3_tx_ctx
*ctx
,
913 struct vmxnet3_adapter
*adapter
)
915 struct Vmxnet3_TxDataDesc
*tdd
;
917 tdd
= tq
->data_ring
.base
+ tq
->tx_ring
.next2fill
;
919 memcpy(tdd
->data
, skb
->data
, ctx
->copy_size
);
920 netdev_dbg(adapter
->netdev
,
921 "copy %u bytes to dataRing[%u]\n",
922 ctx
->copy_size
, tq
->tx_ring
.next2fill
);
927 vmxnet3_prepare_tso(struct sk_buff
*skb
,
928 struct vmxnet3_tx_ctx
*ctx
)
930 struct tcphdr
*tcph
= tcp_hdr(skb
);
933 struct iphdr
*iph
= ip_hdr(skb
);
936 tcph
->check
= ~csum_tcpudp_magic(iph
->saddr
, iph
->daddr
, 0,
938 } else if (ctx
->ipv6
) {
939 struct ipv6hdr
*iph
= ipv6_hdr(skb
);
941 tcph
->check
= ~csum_ipv6_magic(&iph
->saddr
, &iph
->daddr
, 0,
946 static int txd_estimate(const struct sk_buff
*skb
)
948 int count
= VMXNET3_TXD_NEEDED(skb_headlen(skb
)) + 1;
951 for (i
= 0; i
< skb_shinfo(skb
)->nr_frags
; i
++) {
952 const struct skb_frag_struct
*frag
= &skb_shinfo(skb
)->frags
[i
];
954 count
+= VMXNET3_TXD_NEEDED(skb_frag_size(frag
));
960 * Transmits a pkt thru a given tq
962 * NETDEV_TX_OK: descriptors are setup successfully
963 * NETDEV_TX_OK: error occurred, the pkt is dropped
964 * NETDEV_TX_BUSY: tx ring is full, queue is stopped
967 * 1. tx ring may be changed
968 * 2. tq stats may be updated accordingly
969 * 3. shared->txNumDeferred may be updated
973 vmxnet3_tq_xmit(struct sk_buff
*skb
, struct vmxnet3_tx_queue
*tq
,
974 struct vmxnet3_adapter
*adapter
, struct net_device
*netdev
)
979 struct vmxnet3_tx_ctx ctx
;
980 union Vmxnet3_GenericDesc
*gdesc
;
981 #ifdef __BIG_ENDIAN_BITFIELD
982 /* Use temporary descriptor to avoid touching bits multiple times */
983 union Vmxnet3_GenericDesc tempTxDesc
;
986 count
= txd_estimate(skb
);
988 ctx
.ipv4
= (vlan_get_protocol(skb
) == cpu_to_be16(ETH_P_IP
));
989 ctx
.ipv6
= (vlan_get_protocol(skb
) == cpu_to_be16(ETH_P_IPV6
));
991 ctx
.mss
= skb_shinfo(skb
)->gso_size
;
993 if (skb_header_cloned(skb
)) {
994 if (unlikely(pskb_expand_head(skb
, 0, 0,
996 tq
->stats
.drop_tso
++;
999 tq
->stats
.copy_skb_header
++;
1001 vmxnet3_prepare_tso(skb
, &ctx
);
1003 if (unlikely(count
> VMXNET3_MAX_TXD_PER_PKT
)) {
1005 /* non-tso pkts must not use more than
1006 * VMXNET3_MAX_TXD_PER_PKT entries
1008 if (skb_linearize(skb
) != 0) {
1009 tq
->stats
.drop_too_many_frags
++;
1012 tq
->stats
.linearized
++;
1014 /* recalculate the # of descriptors to use */
1015 count
= VMXNET3_TXD_NEEDED(skb_headlen(skb
)) + 1;
1019 ret
= vmxnet3_parse_hdr(skb
, tq
, &ctx
, adapter
);
1021 BUG_ON(ret
<= 0 && ctx
.copy_size
!= 0);
1022 /* hdrs parsed, check against other limits */
1024 if (unlikely(ctx
.eth_ip_hdr_size
+ ctx
.l4_hdr_size
>
1025 VMXNET3_MAX_TX_BUF_SIZE
)) {
1026 tq
->stats
.drop_oversized_hdr
++;
1030 if (skb
->ip_summed
== CHECKSUM_PARTIAL
) {
1031 if (unlikely(ctx
.eth_ip_hdr_size
+
1033 VMXNET3_MAX_CSUM_OFFSET
)) {
1034 tq
->stats
.drop_oversized_hdr
++;
1040 tq
->stats
.drop_hdr_inspect_err
++;
1044 spin_lock_irqsave(&tq
->tx_lock
, flags
);
1046 if (count
> vmxnet3_cmd_ring_desc_avail(&tq
->tx_ring
)) {
1047 tq
->stats
.tx_ring_full
++;
1048 netdev_dbg(adapter
->netdev
,
1049 "tx queue stopped on %s, next2comp %u"
1050 " next2fill %u\n", adapter
->netdev
->name
,
1051 tq
->tx_ring
.next2comp
, tq
->tx_ring
.next2fill
);
1053 vmxnet3_tq_stop(tq
, adapter
);
1054 spin_unlock_irqrestore(&tq
->tx_lock
, flags
);
1055 return NETDEV_TX_BUSY
;
1059 vmxnet3_copy_hdr(skb
, tq
, &ctx
, adapter
);
1061 /* fill tx descs related to addr & len */
1062 if (vmxnet3_map_pkt(skb
, &ctx
, tq
, adapter
->pdev
, adapter
))
1063 goto unlock_drop_pkt
;
1065 /* setup the EOP desc */
1066 ctx
.eop_txd
->dword
[3] = cpu_to_le32(VMXNET3_TXD_CQ
| VMXNET3_TXD_EOP
);
1068 /* setup the SOP desc */
1069 #ifdef __BIG_ENDIAN_BITFIELD
1070 gdesc
= &tempTxDesc
;
1071 gdesc
->dword
[2] = ctx
.sop_txd
->dword
[2];
1072 gdesc
->dword
[3] = ctx
.sop_txd
->dword
[3];
1074 gdesc
= ctx
.sop_txd
;
1077 gdesc
->txd
.hlen
= ctx
.eth_ip_hdr_size
+ ctx
.l4_hdr_size
;
1078 gdesc
->txd
.om
= VMXNET3_OM_TSO
;
1079 gdesc
->txd
.msscof
= ctx
.mss
;
1080 le32_add_cpu(&tq
->shared
->txNumDeferred
, (skb
->len
-
1081 gdesc
->txd
.hlen
+ ctx
.mss
- 1) / ctx
.mss
);
1083 if (skb
->ip_summed
== CHECKSUM_PARTIAL
) {
1084 gdesc
->txd
.hlen
= ctx
.eth_ip_hdr_size
;
1085 gdesc
->txd
.om
= VMXNET3_OM_CSUM
;
1086 gdesc
->txd
.msscof
= ctx
.eth_ip_hdr_size
+
1090 gdesc
->txd
.msscof
= 0;
1092 le32_add_cpu(&tq
->shared
->txNumDeferred
, 1);
1095 if (skb_vlan_tag_present(skb
)) {
1097 gdesc
->txd
.tci
= skb_vlan_tag_get(skb
);
1100 /* finally flips the GEN bit of the SOP desc. */
1101 gdesc
->dword
[2] = cpu_to_le32(le32_to_cpu(gdesc
->dword
[2]) ^
1103 #ifdef __BIG_ENDIAN_BITFIELD
1104 /* Finished updating in bitfields of Tx Desc, so write them in original
1107 vmxnet3_TxDescToLe((struct Vmxnet3_TxDesc
*)gdesc
,
1108 (struct Vmxnet3_TxDesc
*)ctx
.sop_txd
);
1109 gdesc
= ctx
.sop_txd
;
1111 netdev_dbg(adapter
->netdev
,
1112 "txd[%u]: SOP 0x%Lx 0x%x 0x%x\n",
1114 tq
->tx_ring
.base
), le64_to_cpu(gdesc
->txd
.addr
),
1115 le32_to_cpu(gdesc
->dword
[2]), le32_to_cpu(gdesc
->dword
[3]));
1117 spin_unlock_irqrestore(&tq
->tx_lock
, flags
);
1119 if (le32_to_cpu(tq
->shared
->txNumDeferred
) >=
1120 le32_to_cpu(tq
->shared
->txThreshold
)) {
1121 tq
->shared
->txNumDeferred
= 0;
1122 VMXNET3_WRITE_BAR0_REG(adapter
,
1123 VMXNET3_REG_TXPROD
+ tq
->qid
* 8,
1124 tq
->tx_ring
.next2fill
);
1127 return NETDEV_TX_OK
;
1130 spin_unlock_irqrestore(&tq
->tx_lock
, flags
);
1132 tq
->stats
.drop_total
++;
1133 dev_kfree_skb_any(skb
);
1134 return NETDEV_TX_OK
;
1139 vmxnet3_xmit_frame(struct sk_buff
*skb
, struct net_device
*netdev
)
1141 struct vmxnet3_adapter
*adapter
= netdev_priv(netdev
);
1143 BUG_ON(skb
->queue_mapping
> adapter
->num_tx_queues
);
1144 return vmxnet3_tq_xmit(skb
,
1145 &adapter
->tx_queue
[skb
->queue_mapping
],
1151 vmxnet3_rx_csum(struct vmxnet3_adapter
*adapter
,
1152 struct sk_buff
*skb
,
1153 union Vmxnet3_GenericDesc
*gdesc
)
1155 if (!gdesc
->rcd
.cnc
&& adapter
->netdev
->features
& NETIF_F_RXCSUM
) {
1156 if (gdesc
->rcd
.v4
&&
1157 (le32_to_cpu(gdesc
->dword
[3]) &
1158 VMXNET3_RCD_CSUM_OK
) == VMXNET3_RCD_CSUM_OK
) {
1159 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
1160 BUG_ON(!(gdesc
->rcd
.tcp
|| gdesc
->rcd
.udp
));
1161 BUG_ON(gdesc
->rcd
.frg
);
1162 } else if (gdesc
->rcd
.v6
&& (le32_to_cpu(gdesc
->dword
[3]) &
1163 (1 << VMXNET3_RCD_TUC_SHIFT
))) {
1164 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
1165 BUG_ON(!(gdesc
->rcd
.tcp
|| gdesc
->rcd
.udp
));
1166 BUG_ON(gdesc
->rcd
.frg
);
1168 if (gdesc
->rcd
.csum
) {
1169 skb
->csum
= htons(gdesc
->rcd
.csum
);
1170 skb
->ip_summed
= CHECKSUM_PARTIAL
;
1172 skb_checksum_none_assert(skb
);
1176 skb_checksum_none_assert(skb
);
1182 vmxnet3_rx_error(struct vmxnet3_rx_queue
*rq
, struct Vmxnet3_RxCompDesc
*rcd
,
1183 struct vmxnet3_rx_ctx
*ctx
, struct vmxnet3_adapter
*adapter
)
1185 rq
->stats
.drop_err
++;
1187 rq
->stats
.drop_fcs
++;
1189 rq
->stats
.drop_total
++;
1192 * We do not unmap and chain the rx buffer to the skb.
1193 * We basically pretend this buffer is not used and will be recycled
1194 * by vmxnet3_rq_alloc_rx_buf()
1198 * ctx->skb may be NULL if this is the first and the only one
1202 dev_kfree_skb_irq(ctx
->skb
);
1209 vmxnet3_get_hdr_len(struct vmxnet3_adapter
*adapter
, struct sk_buff
*skb
,
1210 union Vmxnet3_GenericDesc
*gdesc
)
1217 struct ipv6hdr
*ipv6
;
1220 BUG_ON(gdesc
->rcd
.tcp
== 0);
1222 maplen
= skb_headlen(skb
);
1223 if (unlikely(sizeof(struct iphdr
) + sizeof(struct tcphdr
) > maplen
))
1226 hdr
.eth
= eth_hdr(skb
);
1227 if (gdesc
->rcd
.v4
) {
1228 BUG_ON(hdr
.eth
->h_proto
!= htons(ETH_P_IP
));
1229 hdr
.ptr
+= sizeof(struct ethhdr
);
1230 BUG_ON(hdr
.ipv4
->protocol
!= IPPROTO_TCP
);
1231 hlen
= hdr
.ipv4
->ihl
<< 2;
1232 hdr
.ptr
+= hdr
.ipv4
->ihl
<< 2;
1233 } else if (gdesc
->rcd
.v6
) {
1234 BUG_ON(hdr
.eth
->h_proto
!= htons(ETH_P_IPV6
));
1235 hdr
.ptr
+= sizeof(struct ethhdr
);
1236 /* Use an estimated value, since we also need to handle
1239 if (hdr
.ipv6
->nexthdr
!= IPPROTO_TCP
)
1240 return sizeof(struct ipv6hdr
) + sizeof(struct tcphdr
);
1241 hlen
= sizeof(struct ipv6hdr
);
1242 hdr
.ptr
+= sizeof(struct ipv6hdr
);
1244 /* Non-IP pkt, dont estimate header length */
1248 if (hlen
+ sizeof(struct tcphdr
) > maplen
)
1251 return (hlen
+ (hdr
.tcp
->doff
<< 2));
1255 vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue
*rq
,
1256 struct vmxnet3_adapter
*adapter
, int quota
)
1258 static const u32 rxprod_reg
[2] = {
1259 VMXNET3_REG_RXPROD
, VMXNET3_REG_RXPROD2
1262 bool skip_page_frags
= false;
1263 struct Vmxnet3_RxCompDesc
*rcd
;
1264 struct vmxnet3_rx_ctx
*ctx
= &rq
->rx_ctx
;
1265 u16 segCnt
= 0, mss
= 0;
1266 #ifdef __BIG_ENDIAN_BITFIELD
1267 struct Vmxnet3_RxDesc rxCmdDesc
;
1268 struct Vmxnet3_RxCompDesc rxComp
;
1270 vmxnet3_getRxComp(rcd
, &rq
->comp_ring
.base
[rq
->comp_ring
.next2proc
].rcd
,
1272 while (rcd
->gen
== rq
->comp_ring
.gen
) {
1273 struct vmxnet3_rx_buf_info
*rbi
;
1274 struct sk_buff
*skb
, *new_skb
= NULL
;
1275 struct page
*new_page
= NULL
;
1276 dma_addr_t new_dma_addr
;
1278 struct Vmxnet3_RxDesc
*rxd
;
1280 struct vmxnet3_cmd_ring
*ring
= NULL
;
1281 if (num_pkts
>= quota
) {
1282 /* we may stop even before we see the EOP desc of
1287 BUG_ON(rcd
->rqID
!= rq
->qid
&& rcd
->rqID
!= rq
->qid2
&&
1288 rcd
->rqID
!= rq
->dataRingQid
);
1290 ring_idx
= VMXNET3_GET_RING_IDX(adapter
, rcd
->rqID
);
1291 ring
= rq
->rx_ring
+ ring_idx
;
1292 vmxnet3_getRxDesc(rxd
, &rq
->rx_ring
[ring_idx
].base
[idx
].rxd
,
1294 rbi
= rq
->buf_info
[ring_idx
] + idx
;
1296 BUG_ON(rxd
->addr
!= rbi
->dma_addr
||
1297 rxd
->len
!= rbi
->len
);
1299 if (unlikely(rcd
->eop
&& rcd
->err
)) {
1300 vmxnet3_rx_error(rq
, rcd
, ctx
, adapter
);
1304 if (rcd
->sop
) { /* first buf of the pkt */
1305 bool rxDataRingUsed
;
1308 BUG_ON(rxd
->btype
!= VMXNET3_RXD_BTYPE_HEAD
||
1309 (rcd
->rqID
!= rq
->qid
&&
1310 rcd
->rqID
!= rq
->dataRingQid
));
1312 BUG_ON(rbi
->buf_type
!= VMXNET3_RX_BUF_SKB
);
1313 BUG_ON(ctx
->skb
!= NULL
|| rbi
->skb
== NULL
);
1315 if (unlikely(rcd
->len
== 0)) {
1316 /* Pretend the rx buffer is skipped. */
1317 BUG_ON(!(rcd
->sop
&& rcd
->eop
));
1318 netdev_dbg(adapter
->netdev
,
1319 "rxRing[%u][%u] 0 length\n",
1324 skip_page_frags
= false;
1325 ctx
->skb
= rbi
->skb
;
1328 VMXNET3_RX_DATA_RING(adapter
, rcd
->rqID
);
1329 len
= rxDataRingUsed
? rcd
->len
: rbi
->len
;
1330 new_skb
= netdev_alloc_skb_ip_align(adapter
->netdev
,
1332 if (new_skb
== NULL
) {
1333 /* Skb allocation failed, do not handover this
1334 * skb to stack. Reuse it. Drop the existing pkt
1336 rq
->stats
.rx_buf_alloc_failure
++;
1338 rq
->stats
.drop_total
++;
1339 skip_page_frags
= true;
1343 if (rxDataRingUsed
) {
1346 BUG_ON(rcd
->len
> rq
->data_ring
.desc_size
);
1349 sz
= rcd
->rxdIdx
* rq
->data_ring
.desc_size
;
1350 memcpy(new_skb
->data
,
1351 &rq
->data_ring
.base
[sz
], rcd
->len
);
1353 ctx
->skb
= rbi
->skb
;
1356 dma_map_single(&adapter
->pdev
->dev
,
1357 new_skb
->data
, rbi
->len
,
1358 PCI_DMA_FROMDEVICE
);
1359 if (dma_mapping_error(&adapter
->pdev
->dev
,
1361 dev_kfree_skb(new_skb
);
1362 /* Skb allocation failed, do not
1363 * handover this skb to stack. Reuse
1364 * it. Drop the existing pkt.
1366 rq
->stats
.rx_buf_alloc_failure
++;
1368 rq
->stats
.drop_total
++;
1369 skip_page_frags
= true;
1373 dma_unmap_single(&adapter
->pdev
->dev
,
1376 PCI_DMA_FROMDEVICE
);
1378 /* Immediate refill */
1380 rbi
->dma_addr
= new_dma_addr
;
1381 rxd
->addr
= cpu_to_le64(rbi
->dma_addr
);
1382 rxd
->len
= rbi
->len
;
1386 if (rcd
->rssType
!= VMXNET3_RCD_RSS_TYPE_NONE
&&
1387 (adapter
->netdev
->features
& NETIF_F_RXHASH
))
1388 skb_set_hash(ctx
->skb
,
1389 le32_to_cpu(rcd
->rssHash
),
1392 skb_put(ctx
->skb
, rcd
->len
);
1394 if (VMXNET3_VERSION_GE_2(adapter
) &&
1395 rcd
->type
== VMXNET3_CDTYPE_RXCOMP_LRO
) {
1396 struct Vmxnet3_RxCompDescExt
*rcdlro
;
1397 rcdlro
= (struct Vmxnet3_RxCompDescExt
*)rcd
;
1399 segCnt
= rcdlro
->segCnt
;
1400 WARN_ON_ONCE(segCnt
== 0);
1402 if (unlikely(segCnt
<= 1))
1408 BUG_ON(ctx
->skb
== NULL
&& !skip_page_frags
);
1410 /* non SOP buffer must be type 1 in most cases */
1411 BUG_ON(rbi
->buf_type
!= VMXNET3_RX_BUF_PAGE
);
1412 BUG_ON(rxd
->btype
!= VMXNET3_RXD_BTYPE_BODY
);
1414 /* If an sop buffer was dropped, skip all
1415 * following non-sop fragments. They will be reused.
1417 if (skip_page_frags
)
1421 new_page
= alloc_page(GFP_ATOMIC
);
1422 /* Replacement page frag could not be allocated.
1423 * Reuse this page. Drop the pkt and free the
1424 * skb which contained this page as a frag. Skip
1425 * processing all the following non-sop frags.
1427 if (unlikely(!new_page
)) {
1428 rq
->stats
.rx_buf_alloc_failure
++;
1429 dev_kfree_skb(ctx
->skb
);
1431 skip_page_frags
= true;
1434 new_dma_addr
= dma_map_page(&adapter
->pdev
->dev
,
1437 PCI_DMA_FROMDEVICE
);
1438 if (dma_mapping_error(&adapter
->pdev
->dev
,
1441 rq
->stats
.rx_buf_alloc_failure
++;
1442 dev_kfree_skb(ctx
->skb
);
1444 skip_page_frags
= true;
1448 dma_unmap_page(&adapter
->pdev
->dev
,
1449 rbi
->dma_addr
, rbi
->len
,
1450 PCI_DMA_FROMDEVICE
);
1452 vmxnet3_append_frag(ctx
->skb
, rcd
, rbi
);
1454 /* Immediate refill */
1455 rbi
->page
= new_page
;
1456 rbi
->dma_addr
= new_dma_addr
;
1457 rxd
->addr
= cpu_to_le64(rbi
->dma_addr
);
1458 rxd
->len
= rbi
->len
;
1465 u32 mtu
= adapter
->netdev
->mtu
;
1466 skb
->len
+= skb
->data_len
;
1468 vmxnet3_rx_csum(adapter
, skb
,
1469 (union Vmxnet3_GenericDesc
*)rcd
);
1470 skb
->protocol
= eth_type_trans(skb
, adapter
->netdev
);
1471 if (!rcd
->tcp
|| !adapter
->lro
)
1474 if (segCnt
!= 0 && mss
!= 0) {
1475 skb_shinfo(skb
)->gso_type
= rcd
->v4
?
1476 SKB_GSO_TCPV4
: SKB_GSO_TCPV6
;
1477 skb_shinfo(skb
)->gso_size
= mss
;
1478 skb_shinfo(skb
)->gso_segs
= segCnt
;
1479 } else if (segCnt
!= 0 || skb
->len
> mtu
) {
1482 hlen
= vmxnet3_get_hdr_len(adapter
, skb
,
1483 (union Vmxnet3_GenericDesc
*)rcd
);
1487 skb_shinfo(skb
)->gso_type
=
1488 rcd
->v4
? SKB_GSO_TCPV4
: SKB_GSO_TCPV6
;
1490 skb_shinfo(skb
)->gso_segs
= segCnt
;
1491 skb_shinfo(skb
)->gso_size
=
1492 DIV_ROUND_UP(skb
->len
-
1495 skb_shinfo(skb
)->gso_size
= mtu
- hlen
;
1499 if (unlikely(rcd
->ts
))
1500 __vlan_hwaccel_put_tag(skb
, htons(ETH_P_8021Q
), rcd
->tci
);
1502 if (adapter
->netdev
->features
& NETIF_F_LRO
)
1503 netif_receive_skb(skb
);
1505 napi_gro_receive(&rq
->napi
, skb
);
1512 /* device may have skipped some rx descs */
1513 ring
->next2comp
= idx
;
1514 num_to_alloc
= vmxnet3_cmd_ring_desc_avail(ring
);
1515 ring
= rq
->rx_ring
+ ring_idx
;
1516 while (num_to_alloc
) {
1517 vmxnet3_getRxDesc(rxd
, &ring
->base
[ring
->next2fill
].rxd
,
1521 /* Recv desc is ready to be used by the device */
1522 rxd
->gen
= ring
->gen
;
1523 vmxnet3_cmd_ring_adv_next2fill(ring
);
1527 /* if needed, update the register */
1528 if (unlikely(rq
->shared
->updateRxProd
)) {
1529 VMXNET3_WRITE_BAR0_REG(adapter
,
1530 rxprod_reg
[ring_idx
] + rq
->qid
* 8,
1534 vmxnet3_comp_ring_adv_next2proc(&rq
->comp_ring
);
1535 vmxnet3_getRxComp(rcd
,
1536 &rq
->comp_ring
.base
[rq
->comp_ring
.next2proc
].rcd
, &rxComp
);
1544 vmxnet3_rq_cleanup(struct vmxnet3_rx_queue
*rq
,
1545 struct vmxnet3_adapter
*adapter
)
1548 struct Vmxnet3_RxDesc
*rxd
;
1550 for (ring_idx
= 0; ring_idx
< 2; ring_idx
++) {
1551 for (i
= 0; i
< rq
->rx_ring
[ring_idx
].size
; i
++) {
1552 #ifdef __BIG_ENDIAN_BITFIELD
1553 struct Vmxnet3_RxDesc rxDesc
;
1555 vmxnet3_getRxDesc(rxd
,
1556 &rq
->rx_ring
[ring_idx
].base
[i
].rxd
, &rxDesc
);
1558 if (rxd
->btype
== VMXNET3_RXD_BTYPE_HEAD
&&
1559 rq
->buf_info
[ring_idx
][i
].skb
) {
1560 dma_unmap_single(&adapter
->pdev
->dev
, rxd
->addr
,
1561 rxd
->len
, PCI_DMA_FROMDEVICE
);
1562 dev_kfree_skb(rq
->buf_info
[ring_idx
][i
].skb
);
1563 rq
->buf_info
[ring_idx
][i
].skb
= NULL
;
1564 } else if (rxd
->btype
== VMXNET3_RXD_BTYPE_BODY
&&
1565 rq
->buf_info
[ring_idx
][i
].page
) {
1566 dma_unmap_page(&adapter
->pdev
->dev
, rxd
->addr
,
1567 rxd
->len
, PCI_DMA_FROMDEVICE
);
1568 put_page(rq
->buf_info
[ring_idx
][i
].page
);
1569 rq
->buf_info
[ring_idx
][i
].page
= NULL
;
1573 rq
->rx_ring
[ring_idx
].gen
= VMXNET3_INIT_GEN
;
1574 rq
->rx_ring
[ring_idx
].next2fill
=
1575 rq
->rx_ring
[ring_idx
].next2comp
= 0;
1578 rq
->comp_ring
.gen
= VMXNET3_INIT_GEN
;
1579 rq
->comp_ring
.next2proc
= 0;
1584 vmxnet3_rq_cleanup_all(struct vmxnet3_adapter
*adapter
)
1588 for (i
= 0; i
< adapter
->num_rx_queues
; i
++)
1589 vmxnet3_rq_cleanup(&adapter
->rx_queue
[i
], adapter
);
1593 static void vmxnet3_rq_destroy(struct vmxnet3_rx_queue
*rq
,
1594 struct vmxnet3_adapter
*adapter
)
1599 /* all rx buffers must have already been freed */
1600 for (i
= 0; i
< 2; i
++) {
1601 if (rq
->buf_info
[i
]) {
1602 for (j
= 0; j
< rq
->rx_ring
[i
].size
; j
++)
1603 BUG_ON(rq
->buf_info
[i
][j
].page
!= NULL
);
1608 for (i
= 0; i
< 2; i
++) {
1609 if (rq
->rx_ring
[i
].base
) {
1610 dma_free_coherent(&adapter
->pdev
->dev
,
1612 * sizeof(struct Vmxnet3_RxDesc
),
1613 rq
->rx_ring
[i
].base
,
1614 rq
->rx_ring
[i
].basePA
);
1615 rq
->rx_ring
[i
].base
= NULL
;
1617 rq
->buf_info
[i
] = NULL
;
1620 if (rq
->data_ring
.base
) {
1621 dma_free_coherent(&adapter
->pdev
->dev
,
1622 rq
->rx_ring
[0].size
* rq
->data_ring
.desc_size
,
1623 rq
->data_ring
.base
, rq
->data_ring
.basePA
);
1624 rq
->data_ring
.base
= NULL
;
1627 if (rq
->comp_ring
.base
) {
1628 dma_free_coherent(&adapter
->pdev
->dev
, rq
->comp_ring
.size
1629 * sizeof(struct Vmxnet3_RxCompDesc
),
1630 rq
->comp_ring
.base
, rq
->comp_ring
.basePA
);
1631 rq
->comp_ring
.base
= NULL
;
1634 if (rq
->buf_info
[0]) {
1635 size_t sz
= sizeof(struct vmxnet3_rx_buf_info
) *
1636 (rq
->rx_ring
[0].size
+ rq
->rx_ring
[1].size
);
1637 dma_free_coherent(&adapter
->pdev
->dev
, sz
, rq
->buf_info
[0],
1643 vmxnet3_rq_destroy_all_rxdataring(struct vmxnet3_adapter
*adapter
)
1647 for (i
= 0; i
< adapter
->num_rx_queues
; i
++) {
1648 struct vmxnet3_rx_queue
*rq
= &adapter
->rx_queue
[i
];
1650 if (rq
->data_ring
.base
) {
1651 dma_free_coherent(&adapter
->pdev
->dev
,
1652 (rq
->rx_ring
[0].size
*
1653 rq
->data_ring
.desc_size
),
1655 rq
->data_ring
.basePA
);
1656 rq
->data_ring
.base
= NULL
;
1657 rq
->data_ring
.desc_size
= 0;
1663 vmxnet3_rq_init(struct vmxnet3_rx_queue
*rq
,
1664 struct vmxnet3_adapter
*adapter
)
1668 /* initialize buf_info */
1669 for (i
= 0; i
< rq
->rx_ring
[0].size
; i
++) {
1671 /* 1st buf for a pkt is skbuff */
1672 if (i
% adapter
->rx_buf_per_pkt
== 0) {
1673 rq
->buf_info
[0][i
].buf_type
= VMXNET3_RX_BUF_SKB
;
1674 rq
->buf_info
[0][i
].len
= adapter
->skb_buf_size
;
1675 } else { /* subsequent bufs for a pkt is frag */
1676 rq
->buf_info
[0][i
].buf_type
= VMXNET3_RX_BUF_PAGE
;
1677 rq
->buf_info
[0][i
].len
= PAGE_SIZE
;
1680 for (i
= 0; i
< rq
->rx_ring
[1].size
; i
++) {
1681 rq
->buf_info
[1][i
].buf_type
= VMXNET3_RX_BUF_PAGE
;
1682 rq
->buf_info
[1][i
].len
= PAGE_SIZE
;
1685 /* reset internal state and allocate buffers for both rings */
1686 for (i
= 0; i
< 2; i
++) {
1687 rq
->rx_ring
[i
].next2fill
= rq
->rx_ring
[i
].next2comp
= 0;
1689 memset(rq
->rx_ring
[i
].base
, 0, rq
->rx_ring
[i
].size
*
1690 sizeof(struct Vmxnet3_RxDesc
));
1691 rq
->rx_ring
[i
].gen
= VMXNET3_INIT_GEN
;
1693 if (vmxnet3_rq_alloc_rx_buf(rq
, 0, rq
->rx_ring
[0].size
- 1,
1695 /* at least has 1 rx buffer for the 1st ring */
1698 vmxnet3_rq_alloc_rx_buf(rq
, 1, rq
->rx_ring
[1].size
- 1, adapter
);
1700 /* reset the comp ring */
1701 rq
->comp_ring
.next2proc
= 0;
1702 memset(rq
->comp_ring
.base
, 0, rq
->comp_ring
.size
*
1703 sizeof(struct Vmxnet3_RxCompDesc
));
1704 rq
->comp_ring
.gen
= VMXNET3_INIT_GEN
;
1707 rq
->rx_ctx
.skb
= NULL
;
1709 /* stats are not reset */
1715 vmxnet3_rq_init_all(struct vmxnet3_adapter
*adapter
)
1719 for (i
= 0; i
< adapter
->num_rx_queues
; i
++) {
1720 err
= vmxnet3_rq_init(&adapter
->rx_queue
[i
], adapter
);
1721 if (unlikely(err
)) {
1722 dev_err(&adapter
->netdev
->dev
, "%s: failed to "
1723 "initialize rx queue%i\n",
1724 adapter
->netdev
->name
, i
);
1734 vmxnet3_rq_create(struct vmxnet3_rx_queue
*rq
, struct vmxnet3_adapter
*adapter
)
1738 struct vmxnet3_rx_buf_info
*bi
;
1740 for (i
= 0; i
< 2; i
++) {
1742 sz
= rq
->rx_ring
[i
].size
* sizeof(struct Vmxnet3_RxDesc
);
1743 rq
->rx_ring
[i
].base
= dma_alloc_coherent(
1744 &adapter
->pdev
->dev
, sz
,
1745 &rq
->rx_ring
[i
].basePA
,
1747 if (!rq
->rx_ring
[i
].base
) {
1748 netdev_err(adapter
->netdev
,
1749 "failed to allocate rx ring %d\n", i
);
1754 if ((adapter
->rxdataring_enabled
) && (rq
->data_ring
.desc_size
!= 0)) {
1755 sz
= rq
->rx_ring
[0].size
* rq
->data_ring
.desc_size
;
1756 rq
->data_ring
.base
=
1757 dma_alloc_coherent(&adapter
->pdev
->dev
, sz
,
1758 &rq
->data_ring
.basePA
,
1760 if (!rq
->data_ring
.base
) {
1761 netdev_err(adapter
->netdev
,
1762 "rx data ring will be disabled\n");
1763 adapter
->rxdataring_enabled
= false;
1766 rq
->data_ring
.base
= NULL
;
1767 rq
->data_ring
.desc_size
= 0;
1770 sz
= rq
->comp_ring
.size
* sizeof(struct Vmxnet3_RxCompDesc
);
1771 rq
->comp_ring
.base
= dma_alloc_coherent(&adapter
->pdev
->dev
, sz
,
1772 &rq
->comp_ring
.basePA
,
1774 if (!rq
->comp_ring
.base
) {
1775 netdev_err(adapter
->netdev
, "failed to allocate rx comp ring\n");
1779 sz
= sizeof(struct vmxnet3_rx_buf_info
) * (rq
->rx_ring
[0].size
+
1780 rq
->rx_ring
[1].size
);
1781 bi
= dma_zalloc_coherent(&adapter
->pdev
->dev
, sz
, &rq
->buf_info_pa
,
1786 rq
->buf_info
[0] = bi
;
1787 rq
->buf_info
[1] = bi
+ rq
->rx_ring
[0].size
;
1792 vmxnet3_rq_destroy(rq
, adapter
);
1798 vmxnet3_rq_create_all(struct vmxnet3_adapter
*adapter
)
1802 adapter
->rxdataring_enabled
= VMXNET3_VERSION_GE_3(adapter
);
1804 for (i
= 0; i
< adapter
->num_rx_queues
; i
++) {
1805 err
= vmxnet3_rq_create(&adapter
->rx_queue
[i
], adapter
);
1806 if (unlikely(err
)) {
1807 dev_err(&adapter
->netdev
->dev
,
1808 "%s: failed to create rx queue%i\n",
1809 adapter
->netdev
->name
, i
);
1814 if (!adapter
->rxdataring_enabled
)
1815 vmxnet3_rq_destroy_all_rxdataring(adapter
);
1819 vmxnet3_rq_destroy_all(adapter
);
1824 /* Multiple queue aware polling function for tx and rx */
1827 vmxnet3_do_poll(struct vmxnet3_adapter
*adapter
, int budget
)
1829 int rcd_done
= 0, i
;
1830 if (unlikely(adapter
->shared
->ecr
))
1831 vmxnet3_process_events(adapter
);
1832 for (i
= 0; i
< adapter
->num_tx_queues
; i
++)
1833 vmxnet3_tq_tx_complete(&adapter
->tx_queue
[i
], adapter
);
1835 for (i
= 0; i
< adapter
->num_rx_queues
; i
++)
1836 rcd_done
+= vmxnet3_rq_rx_complete(&adapter
->rx_queue
[i
],
1843 vmxnet3_poll(struct napi_struct
*napi
, int budget
)
1845 struct vmxnet3_rx_queue
*rx_queue
= container_of(napi
,
1846 struct vmxnet3_rx_queue
, napi
);
1849 rxd_done
= vmxnet3_do_poll(rx_queue
->adapter
, budget
);
1851 if (rxd_done
< budget
) {
1852 napi_complete(napi
);
1853 vmxnet3_enable_all_intrs(rx_queue
->adapter
);
1859 * NAPI polling function for MSI-X mode with multiple Rx queues
1860 * Returns the # of the NAPI credit consumed (# of rx descriptors processed)
1864 vmxnet3_poll_rx_only(struct napi_struct
*napi
, int budget
)
1866 struct vmxnet3_rx_queue
*rq
= container_of(napi
,
1867 struct vmxnet3_rx_queue
, napi
);
1868 struct vmxnet3_adapter
*adapter
= rq
->adapter
;
1871 /* When sharing interrupt with corresponding tx queue, process
1872 * tx completions in that queue as well
1874 if (adapter
->share_intr
== VMXNET3_INTR_BUDDYSHARE
) {
1875 struct vmxnet3_tx_queue
*tq
=
1876 &adapter
->tx_queue
[rq
- adapter
->rx_queue
];
1877 vmxnet3_tq_tx_complete(tq
, adapter
);
1880 rxd_done
= vmxnet3_rq_rx_complete(rq
, adapter
, budget
);
1882 if (rxd_done
< budget
) {
1883 napi_complete(napi
);
1884 vmxnet3_enable_intr(adapter
, rq
->comp_ring
.intr_idx
);
1890 #ifdef CONFIG_PCI_MSI
1893 * Handle completion interrupts on tx queues
1894 * Returns whether or not the intr is handled
1898 vmxnet3_msix_tx(int irq
, void *data
)
1900 struct vmxnet3_tx_queue
*tq
= data
;
1901 struct vmxnet3_adapter
*adapter
= tq
->adapter
;
1903 if (adapter
->intr
.mask_mode
== VMXNET3_IMM_ACTIVE
)
1904 vmxnet3_disable_intr(adapter
, tq
->comp_ring
.intr_idx
);
1906 /* Handle the case where only one irq is allocate for all tx queues */
1907 if (adapter
->share_intr
== VMXNET3_INTR_TXSHARE
) {
1909 for (i
= 0; i
< adapter
->num_tx_queues
; i
++) {
1910 struct vmxnet3_tx_queue
*txq
= &adapter
->tx_queue
[i
];
1911 vmxnet3_tq_tx_complete(txq
, adapter
);
1914 vmxnet3_tq_tx_complete(tq
, adapter
);
1916 vmxnet3_enable_intr(adapter
, tq
->comp_ring
.intr_idx
);
1923 * Handle completion interrupts on rx queues. Returns whether or not the
1928 vmxnet3_msix_rx(int irq
, void *data
)
1930 struct vmxnet3_rx_queue
*rq
= data
;
1931 struct vmxnet3_adapter
*adapter
= rq
->adapter
;
1933 /* disable intr if needed */
1934 if (adapter
->intr
.mask_mode
== VMXNET3_IMM_ACTIVE
)
1935 vmxnet3_disable_intr(adapter
, rq
->comp_ring
.intr_idx
);
1936 napi_schedule(&rq
->napi
);
1942 *----------------------------------------------------------------------------
1944 * vmxnet3_msix_event --
1946 * vmxnet3 msix event intr handler
1949 * whether or not the intr is handled
1951 *----------------------------------------------------------------------------
1955 vmxnet3_msix_event(int irq
, void *data
)
1957 struct net_device
*dev
= data
;
1958 struct vmxnet3_adapter
*adapter
= netdev_priv(dev
);
1960 /* disable intr if needed */
1961 if (adapter
->intr
.mask_mode
== VMXNET3_IMM_ACTIVE
)
1962 vmxnet3_disable_intr(adapter
, adapter
->intr
.event_intr_idx
);
1964 if (adapter
->shared
->ecr
)
1965 vmxnet3_process_events(adapter
);
1967 vmxnet3_enable_intr(adapter
, adapter
->intr
.event_intr_idx
);
1972 #endif /* CONFIG_PCI_MSI */
1975 /* Interrupt handler for vmxnet3 */
1977 vmxnet3_intr(int irq
, void *dev_id
)
1979 struct net_device
*dev
= dev_id
;
1980 struct vmxnet3_adapter
*adapter
= netdev_priv(dev
);
1982 if (adapter
->intr
.type
== VMXNET3_IT_INTX
) {
1983 u32 icr
= VMXNET3_READ_BAR1_REG(adapter
, VMXNET3_REG_ICR
);
1984 if (unlikely(icr
== 0))
1990 /* disable intr if needed */
1991 if (adapter
->intr
.mask_mode
== VMXNET3_IMM_ACTIVE
)
1992 vmxnet3_disable_all_intrs(adapter
);
1994 napi_schedule(&adapter
->rx_queue
[0].napi
);
1999 #ifdef CONFIG_NET_POLL_CONTROLLER
2001 /* netpoll callback. */
2003 vmxnet3_netpoll(struct net_device
*netdev
)
2005 struct vmxnet3_adapter
*adapter
= netdev_priv(netdev
);
2007 switch (adapter
->intr
.type
) {
2008 #ifdef CONFIG_PCI_MSI
2009 case VMXNET3_IT_MSIX
: {
2011 for (i
= 0; i
< adapter
->num_rx_queues
; i
++)
2012 vmxnet3_msix_rx(0, &adapter
->rx_queue
[i
]);
2016 case VMXNET3_IT_MSI
:
2018 vmxnet3_intr(0, adapter
->netdev
);
2023 #endif /* CONFIG_NET_POLL_CONTROLLER */
2026 vmxnet3_request_irqs(struct vmxnet3_adapter
*adapter
)
2028 struct vmxnet3_intr
*intr
= &adapter
->intr
;
2032 #ifdef CONFIG_PCI_MSI
2033 if (adapter
->intr
.type
== VMXNET3_IT_MSIX
) {
2034 for (i
= 0; i
< adapter
->num_tx_queues
; i
++) {
2035 if (adapter
->share_intr
!= VMXNET3_INTR_BUDDYSHARE
) {
2036 sprintf(adapter
->tx_queue
[i
].name
, "%s-tx-%d",
2037 adapter
->netdev
->name
, vector
);
2039 intr
->msix_entries
[vector
].vector
,
2041 adapter
->tx_queue
[i
].name
,
2042 &adapter
->tx_queue
[i
]);
2044 sprintf(adapter
->tx_queue
[i
].name
, "%s-rxtx-%d",
2045 adapter
->netdev
->name
, vector
);
2048 dev_err(&adapter
->netdev
->dev
,
2049 "Failed to request irq for MSIX, %s, "
2051 adapter
->tx_queue
[i
].name
, err
);
2055 /* Handle the case where only 1 MSIx was allocated for
2057 if (adapter
->share_intr
== VMXNET3_INTR_TXSHARE
) {
2058 for (; i
< adapter
->num_tx_queues
; i
++)
2059 adapter
->tx_queue
[i
].comp_ring
.intr_idx
2064 adapter
->tx_queue
[i
].comp_ring
.intr_idx
2068 if (adapter
->share_intr
== VMXNET3_INTR_BUDDYSHARE
)
2071 for (i
= 0; i
< adapter
->num_rx_queues
; i
++) {
2072 if (adapter
->share_intr
!= VMXNET3_INTR_BUDDYSHARE
)
2073 sprintf(adapter
->rx_queue
[i
].name
, "%s-rx-%d",
2074 adapter
->netdev
->name
, vector
);
2076 sprintf(adapter
->rx_queue
[i
].name
, "%s-rxtx-%d",
2077 adapter
->netdev
->name
, vector
);
2078 err
= request_irq(intr
->msix_entries
[vector
].vector
,
2080 adapter
->rx_queue
[i
].name
,
2081 &(adapter
->rx_queue
[i
]));
2083 netdev_err(adapter
->netdev
,
2084 "Failed to request irq for MSIX, "
2086 adapter
->rx_queue
[i
].name
, err
);
2090 adapter
->rx_queue
[i
].comp_ring
.intr_idx
= vector
++;
2093 sprintf(intr
->event_msi_vector_name
, "%s-event-%d",
2094 adapter
->netdev
->name
, vector
);
2095 err
= request_irq(intr
->msix_entries
[vector
].vector
,
2096 vmxnet3_msix_event
, 0,
2097 intr
->event_msi_vector_name
, adapter
->netdev
);
2098 intr
->event_intr_idx
= vector
;
2100 } else if (intr
->type
== VMXNET3_IT_MSI
) {
2101 adapter
->num_rx_queues
= 1;
2102 err
= request_irq(adapter
->pdev
->irq
, vmxnet3_intr
, 0,
2103 adapter
->netdev
->name
, adapter
->netdev
);
2106 adapter
->num_rx_queues
= 1;
2107 err
= request_irq(adapter
->pdev
->irq
, vmxnet3_intr
,
2108 IRQF_SHARED
, adapter
->netdev
->name
,
2110 #ifdef CONFIG_PCI_MSI
2113 intr
->num_intrs
= vector
+ 1;
2115 netdev_err(adapter
->netdev
,
2116 "Failed to request irq (intr type:%d), error %d\n",
2119 /* Number of rx queues will not change after this */
2120 for (i
= 0; i
< adapter
->num_rx_queues
; i
++) {
2121 struct vmxnet3_rx_queue
*rq
= &adapter
->rx_queue
[i
];
2123 rq
->qid2
= i
+ adapter
->num_rx_queues
;
2124 rq
->dataRingQid
= i
+ 2 * adapter
->num_rx_queues
;
2127 /* init our intr settings */
2128 for (i
= 0; i
< intr
->num_intrs
; i
++)
2129 intr
->mod_levels
[i
] = UPT1_IML_ADAPTIVE
;
2130 if (adapter
->intr
.type
!= VMXNET3_IT_MSIX
) {
2131 adapter
->intr
.event_intr_idx
= 0;
2132 for (i
= 0; i
< adapter
->num_tx_queues
; i
++)
2133 adapter
->tx_queue
[i
].comp_ring
.intr_idx
= 0;
2134 adapter
->rx_queue
[0].comp_ring
.intr_idx
= 0;
2137 netdev_info(adapter
->netdev
,
2138 "intr type %u, mode %u, %u vectors allocated\n",
2139 intr
->type
, intr
->mask_mode
, intr
->num_intrs
);
2147 vmxnet3_free_irqs(struct vmxnet3_adapter
*adapter
)
2149 struct vmxnet3_intr
*intr
= &adapter
->intr
;
2150 BUG_ON(intr
->type
== VMXNET3_IT_AUTO
|| intr
->num_intrs
<= 0);
2152 switch (intr
->type
) {
2153 #ifdef CONFIG_PCI_MSI
2154 case VMXNET3_IT_MSIX
:
2158 if (adapter
->share_intr
!= VMXNET3_INTR_BUDDYSHARE
) {
2159 for (i
= 0; i
< adapter
->num_tx_queues
; i
++) {
2160 free_irq(intr
->msix_entries
[vector
++].vector
,
2161 &(adapter
->tx_queue
[i
]));
2162 if (adapter
->share_intr
== VMXNET3_INTR_TXSHARE
)
2167 for (i
= 0; i
< adapter
->num_rx_queues
; i
++) {
2168 free_irq(intr
->msix_entries
[vector
++].vector
,
2169 &(adapter
->rx_queue
[i
]));
2172 free_irq(intr
->msix_entries
[vector
].vector
,
2174 BUG_ON(vector
>= intr
->num_intrs
);
2178 case VMXNET3_IT_MSI
:
2179 free_irq(adapter
->pdev
->irq
, adapter
->netdev
);
2181 case VMXNET3_IT_INTX
:
2182 free_irq(adapter
->pdev
->irq
, adapter
->netdev
);
2191 vmxnet3_restore_vlan(struct vmxnet3_adapter
*adapter
)
2193 u32
*vfTable
= adapter
->shared
->devRead
.rxFilterConf
.vfTable
;
2196 /* allow untagged pkts */
2197 VMXNET3_SET_VFTABLE_ENTRY(vfTable
, 0);
2199 for_each_set_bit(vid
, adapter
->active_vlans
, VLAN_N_VID
)
2200 VMXNET3_SET_VFTABLE_ENTRY(vfTable
, vid
);
2205 vmxnet3_vlan_rx_add_vid(struct net_device
*netdev
, __be16 proto
, u16 vid
)
2207 struct vmxnet3_adapter
*adapter
= netdev_priv(netdev
);
2209 if (!(netdev
->flags
& IFF_PROMISC
)) {
2210 u32
*vfTable
= adapter
->shared
->devRead
.rxFilterConf
.vfTable
;
2211 unsigned long flags
;
2213 VMXNET3_SET_VFTABLE_ENTRY(vfTable
, vid
);
2214 spin_lock_irqsave(&adapter
->cmd_lock
, flags
);
2215 VMXNET3_WRITE_BAR1_REG(adapter
, VMXNET3_REG_CMD
,
2216 VMXNET3_CMD_UPDATE_VLAN_FILTERS
);
2217 spin_unlock_irqrestore(&adapter
->cmd_lock
, flags
);
2220 set_bit(vid
, adapter
->active_vlans
);
2227 vmxnet3_vlan_rx_kill_vid(struct net_device
*netdev
, __be16 proto
, u16 vid
)
2229 struct vmxnet3_adapter
*adapter
= netdev_priv(netdev
);
2231 if (!(netdev
->flags
& IFF_PROMISC
)) {
2232 u32
*vfTable
= adapter
->shared
->devRead
.rxFilterConf
.vfTable
;
2233 unsigned long flags
;
2235 VMXNET3_CLEAR_VFTABLE_ENTRY(vfTable
, vid
);
2236 spin_lock_irqsave(&adapter
->cmd_lock
, flags
);
2237 VMXNET3_WRITE_BAR1_REG(adapter
, VMXNET3_REG_CMD
,
2238 VMXNET3_CMD_UPDATE_VLAN_FILTERS
);
2239 spin_unlock_irqrestore(&adapter
->cmd_lock
, flags
);
2242 clear_bit(vid
, adapter
->active_vlans
);
2249 vmxnet3_copy_mc(struct net_device
*netdev
)
2252 u32 sz
= netdev_mc_count(netdev
) * ETH_ALEN
;
2254 /* struct Vmxnet3_RxFilterConf.mfTableLen is u16. */
2256 /* We may be called with BH disabled */
2257 buf
= kmalloc(sz
, GFP_ATOMIC
);
2259 struct netdev_hw_addr
*ha
;
2262 netdev_for_each_mc_addr(ha
, netdev
)
2263 memcpy(buf
+ i
++ * ETH_ALEN
, ha
->addr
,
2272 vmxnet3_set_mc(struct net_device
*netdev
)
2274 struct vmxnet3_adapter
*adapter
= netdev_priv(netdev
);
2275 unsigned long flags
;
2276 struct Vmxnet3_RxFilterConf
*rxConf
=
2277 &adapter
->shared
->devRead
.rxFilterConf
;
2278 u8
*new_table
= NULL
;
2279 dma_addr_t new_table_pa
= 0;
2280 u32 new_mode
= VMXNET3_RXM_UCAST
;
2282 if (netdev
->flags
& IFF_PROMISC
) {
2283 u32
*vfTable
= adapter
->shared
->devRead
.rxFilterConf
.vfTable
;
2284 memset(vfTable
, 0, VMXNET3_VFT_SIZE
* sizeof(*vfTable
));
2286 new_mode
|= VMXNET3_RXM_PROMISC
;
2288 vmxnet3_restore_vlan(adapter
);
2291 if (netdev
->flags
& IFF_BROADCAST
)
2292 new_mode
|= VMXNET3_RXM_BCAST
;
2294 if (netdev
->flags
& IFF_ALLMULTI
)
2295 new_mode
|= VMXNET3_RXM_ALL_MULTI
;
2297 if (!netdev_mc_empty(netdev
)) {
2298 new_table
= vmxnet3_copy_mc(netdev
);
2300 size_t sz
= netdev_mc_count(netdev
) * ETH_ALEN
;
2302 rxConf
->mfTableLen
= cpu_to_le16(sz
);
2303 new_table_pa
= dma_map_single(
2304 &adapter
->pdev
->dev
,
2310 if (!dma_mapping_error(&adapter
->pdev
->dev
,
2312 new_mode
|= VMXNET3_RXM_MCAST
;
2313 rxConf
->mfTablePA
= cpu_to_le64(new_table_pa
);
2316 "failed to copy mcast list, setting ALL_MULTI\n");
2317 new_mode
|= VMXNET3_RXM_ALL_MULTI
;
2321 if (!(new_mode
& VMXNET3_RXM_MCAST
)) {
2322 rxConf
->mfTableLen
= 0;
2323 rxConf
->mfTablePA
= 0;
2326 spin_lock_irqsave(&adapter
->cmd_lock
, flags
);
2327 if (new_mode
!= rxConf
->rxMode
) {
2328 rxConf
->rxMode
= cpu_to_le32(new_mode
);
2329 VMXNET3_WRITE_BAR1_REG(adapter
, VMXNET3_REG_CMD
,
2330 VMXNET3_CMD_UPDATE_RX_MODE
);
2331 VMXNET3_WRITE_BAR1_REG(adapter
, VMXNET3_REG_CMD
,
2332 VMXNET3_CMD_UPDATE_VLAN_FILTERS
);
2335 VMXNET3_WRITE_BAR1_REG(adapter
, VMXNET3_REG_CMD
,
2336 VMXNET3_CMD_UPDATE_MAC_FILTERS
);
2337 spin_unlock_irqrestore(&adapter
->cmd_lock
, flags
);
2340 dma_unmap_single(&adapter
->pdev
->dev
, new_table_pa
,
2341 rxConf
->mfTableLen
, PCI_DMA_TODEVICE
);
2346 vmxnet3_rq_destroy_all(struct vmxnet3_adapter
*adapter
)
2350 for (i
= 0; i
< adapter
->num_rx_queues
; i
++)
2351 vmxnet3_rq_destroy(&adapter
->rx_queue
[i
], adapter
);
2356 * Set up driver_shared based on settings in adapter.
2360 vmxnet3_setup_driver_shared(struct vmxnet3_adapter
*adapter
)
2362 struct Vmxnet3_DriverShared
*shared
= adapter
->shared
;
2363 struct Vmxnet3_DSDevRead
*devRead
= &shared
->devRead
;
2364 struct Vmxnet3_TxQueueConf
*tqc
;
2365 struct Vmxnet3_RxQueueConf
*rqc
;
2368 memset(shared
, 0, sizeof(*shared
));
2370 /* driver settings */
2371 shared
->magic
= cpu_to_le32(VMXNET3_REV1_MAGIC
);
2372 devRead
->misc
.driverInfo
.version
= cpu_to_le32(
2373 VMXNET3_DRIVER_VERSION_NUM
);
2374 devRead
->misc
.driverInfo
.gos
.gosBits
= (sizeof(void *) == 4 ?
2375 VMXNET3_GOS_BITS_32
: VMXNET3_GOS_BITS_64
);
2376 devRead
->misc
.driverInfo
.gos
.gosType
= VMXNET3_GOS_TYPE_LINUX
;
2377 *((u32
*)&devRead
->misc
.driverInfo
.gos
) = cpu_to_le32(
2378 *((u32
*)&devRead
->misc
.driverInfo
.gos
));
2379 devRead
->misc
.driverInfo
.vmxnet3RevSpt
= cpu_to_le32(1);
2380 devRead
->misc
.driverInfo
.uptVerSpt
= cpu_to_le32(1);
2382 devRead
->misc
.ddPA
= cpu_to_le64(adapter
->adapter_pa
);
2383 devRead
->misc
.ddLen
= cpu_to_le32(sizeof(struct vmxnet3_adapter
));
2385 /* set up feature flags */
2386 if (adapter
->netdev
->features
& NETIF_F_RXCSUM
)
2387 devRead
->misc
.uptFeatures
|= UPT1_F_RXCSUM
;
2389 if (adapter
->netdev
->features
& NETIF_F_LRO
) {
2390 devRead
->misc
.uptFeatures
|= UPT1_F_LRO
;
2391 devRead
->misc
.maxNumRxSG
= cpu_to_le16(1 + MAX_SKB_FRAGS
);
2393 if (adapter
->netdev
->features
& NETIF_F_HW_VLAN_CTAG_RX
)
2394 devRead
->misc
.uptFeatures
|= UPT1_F_RXVLAN
;
2396 devRead
->misc
.mtu
= cpu_to_le32(adapter
->netdev
->mtu
);
2397 devRead
->misc
.queueDescPA
= cpu_to_le64(adapter
->queue_desc_pa
);
2398 devRead
->misc
.queueDescLen
= cpu_to_le32(
2399 adapter
->num_tx_queues
* sizeof(struct Vmxnet3_TxQueueDesc
) +
2400 adapter
->num_rx_queues
* sizeof(struct Vmxnet3_RxQueueDesc
));
2402 /* tx queue settings */
2403 devRead
->misc
.numTxQueues
= adapter
->num_tx_queues
;
2404 for (i
= 0; i
< adapter
->num_tx_queues
; i
++) {
2405 struct vmxnet3_tx_queue
*tq
= &adapter
->tx_queue
[i
];
2406 BUG_ON(adapter
->tx_queue
[i
].tx_ring
.base
== NULL
);
2407 tqc
= &adapter
->tqd_start
[i
].conf
;
2408 tqc
->txRingBasePA
= cpu_to_le64(tq
->tx_ring
.basePA
);
2409 tqc
->dataRingBasePA
= cpu_to_le64(tq
->data_ring
.basePA
);
2410 tqc
->compRingBasePA
= cpu_to_le64(tq
->comp_ring
.basePA
);
2411 tqc
->ddPA
= cpu_to_le64(tq
->buf_info_pa
);
2412 tqc
->txRingSize
= cpu_to_le32(tq
->tx_ring
.size
);
2413 tqc
->dataRingSize
= cpu_to_le32(tq
->data_ring
.size
);
2414 tqc
->txDataRingDescSize
= cpu_to_le32(tq
->txdata_desc_size
);
2415 tqc
->compRingSize
= cpu_to_le32(tq
->comp_ring
.size
);
2416 tqc
->ddLen
= cpu_to_le32(
2417 sizeof(struct vmxnet3_tx_buf_info
) *
2419 tqc
->intrIdx
= tq
->comp_ring
.intr_idx
;
2422 /* rx queue settings */
2423 devRead
->misc
.numRxQueues
= adapter
->num_rx_queues
;
2424 for (i
= 0; i
< adapter
->num_rx_queues
; i
++) {
2425 struct vmxnet3_rx_queue
*rq
= &adapter
->rx_queue
[i
];
2426 rqc
= &adapter
->rqd_start
[i
].conf
;
2427 rqc
->rxRingBasePA
[0] = cpu_to_le64(rq
->rx_ring
[0].basePA
);
2428 rqc
->rxRingBasePA
[1] = cpu_to_le64(rq
->rx_ring
[1].basePA
);
2429 rqc
->compRingBasePA
= cpu_to_le64(rq
->comp_ring
.basePA
);
2430 rqc
->ddPA
= cpu_to_le64(rq
->buf_info_pa
);
2431 rqc
->rxRingSize
[0] = cpu_to_le32(rq
->rx_ring
[0].size
);
2432 rqc
->rxRingSize
[1] = cpu_to_le32(rq
->rx_ring
[1].size
);
2433 rqc
->compRingSize
= cpu_to_le32(rq
->comp_ring
.size
);
2434 rqc
->ddLen
= cpu_to_le32(
2435 sizeof(struct vmxnet3_rx_buf_info
) *
2436 (rqc
->rxRingSize
[0] +
2437 rqc
->rxRingSize
[1]));
2438 rqc
->intrIdx
= rq
->comp_ring
.intr_idx
;
2439 if (VMXNET3_VERSION_GE_3(adapter
)) {
2440 rqc
->rxDataRingBasePA
=
2441 cpu_to_le64(rq
->data_ring
.basePA
);
2442 rqc
->rxDataRingDescSize
=
2443 cpu_to_le16(rq
->data_ring
.desc_size
);
2448 memset(adapter
->rss_conf
, 0, sizeof(*adapter
->rss_conf
));
2451 struct UPT1_RSSConf
*rssConf
= adapter
->rss_conf
;
2453 devRead
->misc
.uptFeatures
|= UPT1_F_RSS
;
2454 devRead
->misc
.numRxQueues
= adapter
->num_rx_queues
;
2455 rssConf
->hashType
= UPT1_RSS_HASH_TYPE_TCP_IPV4
|
2456 UPT1_RSS_HASH_TYPE_IPV4
|
2457 UPT1_RSS_HASH_TYPE_TCP_IPV6
|
2458 UPT1_RSS_HASH_TYPE_IPV6
;
2459 rssConf
->hashFunc
= UPT1_RSS_HASH_FUNC_TOEPLITZ
;
2460 rssConf
->hashKeySize
= UPT1_RSS_MAX_KEY_SIZE
;
2461 rssConf
->indTableSize
= VMXNET3_RSS_IND_TABLE_SIZE
;
2462 netdev_rss_key_fill(rssConf
->hashKey
, sizeof(rssConf
->hashKey
));
2464 for (i
= 0; i
< rssConf
->indTableSize
; i
++)
2465 rssConf
->indTable
[i
] = ethtool_rxfh_indir_default(
2466 i
, adapter
->num_rx_queues
);
2468 devRead
->rssConfDesc
.confVer
= 1;
2469 devRead
->rssConfDesc
.confLen
= cpu_to_le32(sizeof(*rssConf
));
2470 devRead
->rssConfDesc
.confPA
=
2471 cpu_to_le64(adapter
->rss_conf_pa
);
2474 #endif /* VMXNET3_RSS */
2477 devRead
->intrConf
.autoMask
= adapter
->intr
.mask_mode
==
2479 devRead
->intrConf
.numIntrs
= adapter
->intr
.num_intrs
;
2480 for (i
= 0; i
< adapter
->intr
.num_intrs
; i
++)
2481 devRead
->intrConf
.modLevels
[i
] = adapter
->intr
.mod_levels
[i
];
2483 devRead
->intrConf
.eventIntrIdx
= adapter
->intr
.event_intr_idx
;
2484 devRead
->intrConf
.intrCtrl
|= cpu_to_le32(VMXNET3_IC_DISABLE_ALL
);
2486 /* rx filter settings */
2487 devRead
->rxFilterConf
.rxMode
= 0;
2488 vmxnet3_restore_vlan(adapter
);
2489 vmxnet3_write_mac_addr(adapter
, adapter
->netdev
->dev_addr
);
2491 /* the rest are already zeroed */
2495 vmxnet3_init_coalesce(struct vmxnet3_adapter
*adapter
)
2497 struct Vmxnet3_DriverShared
*shared
= adapter
->shared
;
2498 union Vmxnet3_CmdInfo
*cmdInfo
= &shared
->cu
.cmdInfo
;
2499 unsigned long flags
;
2501 if (!VMXNET3_VERSION_GE_3(adapter
))
2504 spin_lock_irqsave(&adapter
->cmd_lock
, flags
);
2505 cmdInfo
->varConf
.confVer
= 1;
2506 cmdInfo
->varConf
.confLen
=
2507 cpu_to_le32(sizeof(*adapter
->coal_conf
));
2508 cmdInfo
->varConf
.confPA
= cpu_to_le64(adapter
->coal_conf_pa
);
2510 if (adapter
->default_coal_mode
) {
2511 VMXNET3_WRITE_BAR1_REG(adapter
, VMXNET3_REG_CMD
,
2512 VMXNET3_CMD_GET_COALESCE
);
2514 VMXNET3_WRITE_BAR1_REG(adapter
, VMXNET3_REG_CMD
,
2515 VMXNET3_CMD_SET_COALESCE
);
2518 spin_unlock_irqrestore(&adapter
->cmd_lock
, flags
);
2522 vmxnet3_activate_dev(struct vmxnet3_adapter
*adapter
)
2526 unsigned long flags
;
2528 netdev_dbg(adapter
->netdev
, "%s: skb_buf_size %d, rx_buf_per_pkt %d,"
2529 " ring sizes %u %u %u\n", adapter
->netdev
->name
,
2530 adapter
->skb_buf_size
, adapter
->rx_buf_per_pkt
,
2531 adapter
->tx_queue
[0].tx_ring
.size
,
2532 adapter
->rx_queue
[0].rx_ring
[0].size
,
2533 adapter
->rx_queue
[0].rx_ring
[1].size
);
2535 vmxnet3_tq_init_all(adapter
);
2536 err
= vmxnet3_rq_init_all(adapter
);
2538 netdev_err(adapter
->netdev
,
2539 "Failed to init rx queue error %d\n", err
);
2543 err
= vmxnet3_request_irqs(adapter
);
2545 netdev_err(adapter
->netdev
,
2546 "Failed to setup irq for error %d\n", err
);
2550 vmxnet3_setup_driver_shared(adapter
);
2552 VMXNET3_WRITE_BAR1_REG(adapter
, VMXNET3_REG_DSAL
, VMXNET3_GET_ADDR_LO(
2553 adapter
->shared_pa
));
2554 VMXNET3_WRITE_BAR1_REG(adapter
, VMXNET3_REG_DSAH
, VMXNET3_GET_ADDR_HI(
2555 adapter
->shared_pa
));
2556 spin_lock_irqsave(&adapter
->cmd_lock
, flags
);
2557 VMXNET3_WRITE_BAR1_REG(adapter
, VMXNET3_REG_CMD
,
2558 VMXNET3_CMD_ACTIVATE_DEV
);
2559 ret
= VMXNET3_READ_BAR1_REG(adapter
, VMXNET3_REG_CMD
);
2560 spin_unlock_irqrestore(&adapter
->cmd_lock
, flags
);
2563 netdev_err(adapter
->netdev
,
2564 "Failed to activate dev: error %u\n", ret
);
2569 vmxnet3_init_coalesce(adapter
);
2571 for (i
= 0; i
< adapter
->num_rx_queues
; i
++) {
2572 VMXNET3_WRITE_BAR0_REG(adapter
,
2573 VMXNET3_REG_RXPROD
+ i
* VMXNET3_REG_ALIGN
,
2574 adapter
->rx_queue
[i
].rx_ring
[0].next2fill
);
2575 VMXNET3_WRITE_BAR0_REG(adapter
, (VMXNET3_REG_RXPROD2
+
2576 (i
* VMXNET3_REG_ALIGN
)),
2577 adapter
->rx_queue
[i
].rx_ring
[1].next2fill
);
2580 /* Apply the rx filter settins last. */
2581 vmxnet3_set_mc(adapter
->netdev
);
2584 * Check link state when first activating device. It will start the
2585 * tx queue if the link is up.
2587 vmxnet3_check_link(adapter
, true);
2588 for (i
= 0; i
< adapter
->num_rx_queues
; i
++)
2589 napi_enable(&adapter
->rx_queue
[i
].napi
);
2590 vmxnet3_enable_all_intrs(adapter
);
2591 clear_bit(VMXNET3_STATE_BIT_QUIESCED
, &adapter
->state
);
2595 VMXNET3_WRITE_BAR1_REG(adapter
, VMXNET3_REG_DSAL
, 0);
2596 VMXNET3_WRITE_BAR1_REG(adapter
, VMXNET3_REG_DSAH
, 0);
2597 vmxnet3_free_irqs(adapter
);
2600 /* free up buffers we allocated */
2601 vmxnet3_rq_cleanup_all(adapter
);
2607 vmxnet3_reset_dev(struct vmxnet3_adapter
*adapter
)
2609 unsigned long flags
;
2610 spin_lock_irqsave(&adapter
->cmd_lock
, flags
);
2611 VMXNET3_WRITE_BAR1_REG(adapter
, VMXNET3_REG_CMD
, VMXNET3_CMD_RESET_DEV
);
2612 spin_unlock_irqrestore(&adapter
->cmd_lock
, flags
);
2617 vmxnet3_quiesce_dev(struct vmxnet3_adapter
*adapter
)
2620 unsigned long flags
;
2621 if (test_and_set_bit(VMXNET3_STATE_BIT_QUIESCED
, &adapter
->state
))
2625 spin_lock_irqsave(&adapter
->cmd_lock
, flags
);
2626 VMXNET3_WRITE_BAR1_REG(adapter
, VMXNET3_REG_CMD
,
2627 VMXNET3_CMD_QUIESCE_DEV
);
2628 spin_unlock_irqrestore(&adapter
->cmd_lock
, flags
);
2629 vmxnet3_disable_all_intrs(adapter
);
2631 for (i
= 0; i
< adapter
->num_rx_queues
; i
++)
2632 napi_disable(&adapter
->rx_queue
[i
].napi
);
2633 netif_tx_disable(adapter
->netdev
);
2634 adapter
->link_speed
= 0;
2635 netif_carrier_off(adapter
->netdev
);
2637 vmxnet3_tq_cleanup_all(adapter
);
2638 vmxnet3_rq_cleanup_all(adapter
);
2639 vmxnet3_free_irqs(adapter
);
2645 vmxnet3_write_mac_addr(struct vmxnet3_adapter
*adapter
, u8
*mac
)
2650 VMXNET3_WRITE_BAR1_REG(adapter
, VMXNET3_REG_MACL
, tmp
);
2652 tmp
= (mac
[5] << 8) | mac
[4];
2653 VMXNET3_WRITE_BAR1_REG(adapter
, VMXNET3_REG_MACH
, tmp
);
2658 vmxnet3_set_mac_addr(struct net_device
*netdev
, void *p
)
2660 struct sockaddr
*addr
= p
;
2661 struct vmxnet3_adapter
*adapter
= netdev_priv(netdev
);
2663 memcpy(netdev
->dev_addr
, addr
->sa_data
, netdev
->addr_len
);
2664 vmxnet3_write_mac_addr(adapter
, addr
->sa_data
);
2670 /* ==================== initialization and cleanup routines ============ */
2673 vmxnet3_alloc_pci_resources(struct vmxnet3_adapter
*adapter
, bool *dma64
)
2676 unsigned long mmio_start
, mmio_len
;
2677 struct pci_dev
*pdev
= adapter
->pdev
;
2679 err
= pci_enable_device(pdev
);
2681 dev_err(&pdev
->dev
, "Failed to enable adapter: error %d\n", err
);
2685 if (pci_set_dma_mask(pdev
, DMA_BIT_MASK(64)) == 0) {
2686 if (pci_set_consistent_dma_mask(pdev
, DMA_BIT_MASK(64)) != 0) {
2688 "pci_set_consistent_dma_mask failed\n");
2694 if (pci_set_dma_mask(pdev
, DMA_BIT_MASK(32)) != 0) {
2696 "pci_set_dma_mask failed\n");
2703 err
= pci_request_selected_regions(pdev
, (1 << 2) - 1,
2704 vmxnet3_driver_name
);
2707 "Failed to request region for adapter: error %d\n", err
);
2711 pci_set_master(pdev
);
2713 mmio_start
= pci_resource_start(pdev
, 0);
2714 mmio_len
= pci_resource_len(pdev
, 0);
2715 adapter
->hw_addr0
= ioremap(mmio_start
, mmio_len
);
2716 if (!adapter
->hw_addr0
) {
2717 dev_err(&pdev
->dev
, "Failed to map bar0\n");
2722 mmio_start
= pci_resource_start(pdev
, 1);
2723 mmio_len
= pci_resource_len(pdev
, 1);
2724 adapter
->hw_addr1
= ioremap(mmio_start
, mmio_len
);
2725 if (!adapter
->hw_addr1
) {
2726 dev_err(&pdev
->dev
, "Failed to map bar1\n");
2733 iounmap(adapter
->hw_addr0
);
2735 pci_release_selected_regions(pdev
, (1 << 2) - 1);
2737 pci_disable_device(pdev
);
2743 vmxnet3_free_pci_resources(struct vmxnet3_adapter
*adapter
)
2745 BUG_ON(!adapter
->pdev
);
2747 iounmap(adapter
->hw_addr0
);
2748 iounmap(adapter
->hw_addr1
);
2749 pci_release_selected_regions(adapter
->pdev
, (1 << 2) - 1);
2750 pci_disable_device(adapter
->pdev
);
2755 vmxnet3_adjust_rx_ring_size(struct vmxnet3_adapter
*adapter
)
2757 size_t sz
, i
, ring0_size
, ring1_size
, comp_size
;
2758 struct vmxnet3_rx_queue
*rq
= &adapter
->rx_queue
[0];
2761 if (adapter
->netdev
->mtu
<= VMXNET3_MAX_SKB_BUF_SIZE
-
2762 VMXNET3_MAX_ETH_HDR_SIZE
) {
2763 adapter
->skb_buf_size
= adapter
->netdev
->mtu
+
2764 VMXNET3_MAX_ETH_HDR_SIZE
;
2765 if (adapter
->skb_buf_size
< VMXNET3_MIN_T0_BUF_SIZE
)
2766 adapter
->skb_buf_size
= VMXNET3_MIN_T0_BUF_SIZE
;
2768 adapter
->rx_buf_per_pkt
= 1;
2770 adapter
->skb_buf_size
= VMXNET3_MAX_SKB_BUF_SIZE
;
2771 sz
= adapter
->netdev
->mtu
- VMXNET3_MAX_SKB_BUF_SIZE
+
2772 VMXNET3_MAX_ETH_HDR_SIZE
;
2773 adapter
->rx_buf_per_pkt
= 1 + (sz
+ PAGE_SIZE
- 1) / PAGE_SIZE
;
2777 * for simplicity, force the ring0 size to be a multiple of
2778 * rx_buf_per_pkt * VMXNET3_RING_SIZE_ALIGN
2780 sz
= adapter
->rx_buf_per_pkt
* VMXNET3_RING_SIZE_ALIGN
;
2781 ring0_size
= adapter
->rx_queue
[0].rx_ring
[0].size
;
2782 ring0_size
= (ring0_size
+ sz
- 1) / sz
* sz
;
2783 ring0_size
= min_t(u32
, ring0_size
, VMXNET3_RX_RING_MAX_SIZE
/
2785 ring1_size
= adapter
->rx_queue
[0].rx_ring
[1].size
;
2786 ring1_size
= (ring1_size
+ sz
- 1) / sz
* sz
;
2787 ring1_size
= min_t(u32
, ring1_size
, VMXNET3_RX_RING2_MAX_SIZE
/
2789 comp_size
= ring0_size
+ ring1_size
;
2791 for (i
= 0; i
< adapter
->num_rx_queues
; i
++) {
2792 rq
= &adapter
->rx_queue
[i
];
2793 rq
->rx_ring
[0].size
= ring0_size
;
2794 rq
->rx_ring
[1].size
= ring1_size
;
2795 rq
->comp_ring
.size
= comp_size
;
2801 vmxnet3_create_queues(struct vmxnet3_adapter
*adapter
, u32 tx_ring_size
,
2802 u32 rx_ring_size
, u32 rx_ring2_size
,
2803 u16 txdata_desc_size
, u16 rxdata_desc_size
)
2807 for (i
= 0; i
< adapter
->num_tx_queues
; i
++) {
2808 struct vmxnet3_tx_queue
*tq
= &adapter
->tx_queue
[i
];
2809 tq
->tx_ring
.size
= tx_ring_size
;
2810 tq
->data_ring
.size
= tx_ring_size
;
2811 tq
->comp_ring
.size
= tx_ring_size
;
2812 tq
->txdata_desc_size
= txdata_desc_size
;
2813 tq
->shared
= &adapter
->tqd_start
[i
].ctrl
;
2815 tq
->adapter
= adapter
;
2817 err
= vmxnet3_tq_create(tq
, adapter
);
2819 * Too late to change num_tx_queues. We cannot do away with
2820 * lesser number of queues than what we asked for
2826 adapter
->rx_queue
[0].rx_ring
[0].size
= rx_ring_size
;
2827 adapter
->rx_queue
[0].rx_ring
[1].size
= rx_ring2_size
;
2828 vmxnet3_adjust_rx_ring_size(adapter
);
2830 adapter
->rxdataring_enabled
= VMXNET3_VERSION_GE_3(adapter
);
2831 for (i
= 0; i
< adapter
->num_rx_queues
; i
++) {
2832 struct vmxnet3_rx_queue
*rq
= &adapter
->rx_queue
[i
];
2833 /* qid and qid2 for rx queues will be assigned later when num
2834 * of rx queues is finalized after allocating intrs */
2835 rq
->shared
= &adapter
->rqd_start
[i
].ctrl
;
2836 rq
->adapter
= adapter
;
2837 rq
->data_ring
.desc_size
= rxdata_desc_size
;
2838 err
= vmxnet3_rq_create(rq
, adapter
);
2841 netdev_err(adapter
->netdev
,
2842 "Could not allocate any rx queues. "
2846 netdev_info(adapter
->netdev
,
2847 "Number of rx queues changed "
2849 adapter
->num_rx_queues
= i
;
2856 if (!adapter
->rxdataring_enabled
)
2857 vmxnet3_rq_destroy_all_rxdataring(adapter
);
2861 vmxnet3_tq_destroy_all(adapter
);
2866 vmxnet3_open(struct net_device
*netdev
)
2868 struct vmxnet3_adapter
*adapter
;
2871 adapter
= netdev_priv(netdev
);
2873 for (i
= 0; i
< adapter
->num_tx_queues
; i
++)
2874 spin_lock_init(&adapter
->tx_queue
[i
].tx_lock
);
2876 if (VMXNET3_VERSION_GE_3(adapter
)) {
2877 unsigned long flags
;
2878 u16 txdata_desc_size
;
2880 spin_lock_irqsave(&adapter
->cmd_lock
, flags
);
2881 VMXNET3_WRITE_BAR1_REG(adapter
, VMXNET3_REG_CMD
,
2882 VMXNET3_CMD_GET_TXDATA_DESC_SIZE
);
2883 txdata_desc_size
= VMXNET3_READ_BAR1_REG(adapter
,
2885 spin_unlock_irqrestore(&adapter
->cmd_lock
, flags
);
2887 if ((txdata_desc_size
< VMXNET3_TXDATA_DESC_MIN_SIZE
) ||
2888 (txdata_desc_size
> VMXNET3_TXDATA_DESC_MAX_SIZE
) ||
2889 (txdata_desc_size
& VMXNET3_TXDATA_DESC_SIZE_MASK
)) {
2890 adapter
->txdata_desc_size
=
2891 sizeof(struct Vmxnet3_TxDataDesc
);
2893 adapter
->txdata_desc_size
= txdata_desc_size
;
2896 adapter
->txdata_desc_size
= sizeof(struct Vmxnet3_TxDataDesc
);
2899 err
= vmxnet3_create_queues(adapter
,
2900 adapter
->tx_ring_size
,
2901 adapter
->rx_ring_size
,
2902 adapter
->rx_ring2_size
,
2903 adapter
->txdata_desc_size
,
2904 adapter
->rxdata_desc_size
);
2908 err
= vmxnet3_activate_dev(adapter
);
2915 vmxnet3_rq_destroy_all(adapter
);
2916 vmxnet3_tq_destroy_all(adapter
);
2923 vmxnet3_close(struct net_device
*netdev
)
2925 struct vmxnet3_adapter
*adapter
= netdev_priv(netdev
);
2928 * Reset_work may be in the middle of resetting the device, wait for its
2931 while (test_and_set_bit(VMXNET3_STATE_BIT_RESETTING
, &adapter
->state
))
2934 vmxnet3_quiesce_dev(adapter
);
2936 vmxnet3_rq_destroy_all(adapter
);
2937 vmxnet3_tq_destroy_all(adapter
);
2939 clear_bit(VMXNET3_STATE_BIT_RESETTING
, &adapter
->state
);
2947 vmxnet3_force_close(struct vmxnet3_adapter
*adapter
)
2952 * we must clear VMXNET3_STATE_BIT_RESETTING, otherwise
2953 * vmxnet3_close() will deadlock.
2955 BUG_ON(test_bit(VMXNET3_STATE_BIT_RESETTING
, &adapter
->state
));
2957 /* we need to enable NAPI, otherwise dev_close will deadlock */
2958 for (i
= 0; i
< adapter
->num_rx_queues
; i
++)
2959 napi_enable(&adapter
->rx_queue
[i
].napi
);
2960 dev_close(adapter
->netdev
);
2965 vmxnet3_change_mtu(struct net_device
*netdev
, int new_mtu
)
2967 struct vmxnet3_adapter
*adapter
= netdev_priv(netdev
);
2970 if (new_mtu
< VMXNET3_MIN_MTU
|| new_mtu
> VMXNET3_MAX_MTU
)
2973 netdev
->mtu
= new_mtu
;
2976 * Reset_work may be in the middle of resetting the device, wait for its
2979 while (test_and_set_bit(VMXNET3_STATE_BIT_RESETTING
, &adapter
->state
))
2982 if (netif_running(netdev
)) {
2983 vmxnet3_quiesce_dev(adapter
);
2984 vmxnet3_reset_dev(adapter
);
2986 /* we need to re-create the rx queue based on the new mtu */
2987 vmxnet3_rq_destroy_all(adapter
);
2988 vmxnet3_adjust_rx_ring_size(adapter
);
2989 err
= vmxnet3_rq_create_all(adapter
);
2992 "failed to re-create rx queues, "
2993 " error %d. Closing it.\n", err
);
2997 err
= vmxnet3_activate_dev(adapter
);
3000 "failed to re-activate, error %d. "
3001 "Closing it\n", err
);
3007 clear_bit(VMXNET3_STATE_BIT_RESETTING
, &adapter
->state
);
3009 vmxnet3_force_close(adapter
);
3016 vmxnet3_declare_features(struct vmxnet3_adapter
*adapter
, bool dma64
)
3018 struct net_device
*netdev
= adapter
->netdev
;
3020 netdev
->hw_features
= NETIF_F_SG
| NETIF_F_RXCSUM
|
3021 NETIF_F_HW_CSUM
| NETIF_F_HW_VLAN_CTAG_TX
|
3022 NETIF_F_HW_VLAN_CTAG_RX
| NETIF_F_TSO
| NETIF_F_TSO6
|
3025 netdev
->hw_features
|= NETIF_F_HIGHDMA
;
3026 netdev
->vlan_features
= netdev
->hw_features
&
3027 ~(NETIF_F_HW_VLAN_CTAG_TX
|
3028 NETIF_F_HW_VLAN_CTAG_RX
);
3029 netdev
->features
= netdev
->hw_features
| NETIF_F_HW_VLAN_CTAG_FILTER
;
3034 vmxnet3_read_mac_addr(struct vmxnet3_adapter
*adapter
, u8
*mac
)
3038 tmp
= VMXNET3_READ_BAR1_REG(adapter
, VMXNET3_REG_MACL
);
3041 tmp
= VMXNET3_READ_BAR1_REG(adapter
, VMXNET3_REG_MACH
);
3042 mac
[4] = tmp
& 0xff;
3043 mac
[5] = (tmp
>> 8) & 0xff;
3046 #ifdef CONFIG_PCI_MSI
3049 * Enable MSIx vectors.
3051 * VMXNET3_LINUX_MIN_MSIX_VECT when only minimum number of vectors required
3053 * number of vectors which were enabled otherwise (this number is greater
3054 * than VMXNET3_LINUX_MIN_MSIX_VECT)
3058 vmxnet3_acquire_msix_vectors(struct vmxnet3_adapter
*adapter
, int nvec
)
3060 int ret
= pci_enable_msix_range(adapter
->pdev
,
3061 adapter
->intr
.msix_entries
, nvec
, nvec
);
3063 if (ret
== -ENOSPC
&& nvec
> VMXNET3_LINUX_MIN_MSIX_VECT
) {
3064 dev_err(&adapter
->netdev
->dev
,
3065 "Failed to enable %d MSI-X, trying %d\n",
3066 nvec
, VMXNET3_LINUX_MIN_MSIX_VECT
);
3068 ret
= pci_enable_msix_range(adapter
->pdev
,
3069 adapter
->intr
.msix_entries
,
3070 VMXNET3_LINUX_MIN_MSIX_VECT
,
3071 VMXNET3_LINUX_MIN_MSIX_VECT
);
3075 dev_err(&adapter
->netdev
->dev
,
3076 "Failed to enable MSI-X, error: %d\n", ret
);
3083 #endif /* CONFIG_PCI_MSI */
3086 vmxnet3_alloc_intr_resources(struct vmxnet3_adapter
*adapter
)
3089 unsigned long flags
;
3092 spin_lock_irqsave(&adapter
->cmd_lock
, flags
);
3093 VMXNET3_WRITE_BAR1_REG(adapter
, VMXNET3_REG_CMD
,
3094 VMXNET3_CMD_GET_CONF_INTR
);
3095 cfg
= VMXNET3_READ_BAR1_REG(adapter
, VMXNET3_REG_CMD
);
3096 spin_unlock_irqrestore(&adapter
->cmd_lock
, flags
);
3097 adapter
->intr
.type
= cfg
& 0x3;
3098 adapter
->intr
.mask_mode
= (cfg
>> 2) & 0x3;
3100 if (adapter
->intr
.type
== VMXNET3_IT_AUTO
) {
3101 adapter
->intr
.type
= VMXNET3_IT_MSIX
;
3104 #ifdef CONFIG_PCI_MSI
3105 if (adapter
->intr
.type
== VMXNET3_IT_MSIX
) {
3108 nvec
= adapter
->share_intr
== VMXNET3_INTR_TXSHARE
?
3109 1 : adapter
->num_tx_queues
;
3110 nvec
+= adapter
->share_intr
== VMXNET3_INTR_BUDDYSHARE
?
3111 0 : adapter
->num_rx_queues
;
3112 nvec
+= 1; /* for link event */
3113 nvec
= nvec
> VMXNET3_LINUX_MIN_MSIX_VECT
?
3114 nvec
: VMXNET3_LINUX_MIN_MSIX_VECT
;
3116 for (i
= 0; i
< nvec
; i
++)
3117 adapter
->intr
.msix_entries
[i
].entry
= i
;
3119 nvec
= vmxnet3_acquire_msix_vectors(adapter
, nvec
);
3123 /* If we cannot allocate one MSIx vector per queue
3124 * then limit the number of rx queues to 1
3126 if (nvec
== VMXNET3_LINUX_MIN_MSIX_VECT
) {
3127 if (adapter
->share_intr
!= VMXNET3_INTR_BUDDYSHARE
3128 || adapter
->num_rx_queues
!= 1) {
3129 adapter
->share_intr
= VMXNET3_INTR_TXSHARE
;
3130 netdev_err(adapter
->netdev
,
3131 "Number of rx queues : 1\n");
3132 adapter
->num_rx_queues
= 1;
3136 adapter
->intr
.num_intrs
= nvec
;
3140 /* If we cannot allocate MSIx vectors use only one rx queue */
3141 dev_info(&adapter
->pdev
->dev
,
3142 "Failed to enable MSI-X, error %d. "
3143 "Limiting #rx queues to 1, try MSI.\n", nvec
);
3145 adapter
->intr
.type
= VMXNET3_IT_MSI
;
3148 if (adapter
->intr
.type
== VMXNET3_IT_MSI
) {
3149 if (!pci_enable_msi(adapter
->pdev
)) {
3150 adapter
->num_rx_queues
= 1;
3151 adapter
->intr
.num_intrs
= 1;
3155 #endif /* CONFIG_PCI_MSI */
3157 adapter
->num_rx_queues
= 1;
3158 dev_info(&adapter
->netdev
->dev
,
3159 "Using INTx interrupt, #Rx queues: 1.\n");
3160 adapter
->intr
.type
= VMXNET3_IT_INTX
;
3162 /* INT-X related setting */
3163 adapter
->intr
.num_intrs
= 1;
3168 vmxnet3_free_intr_resources(struct vmxnet3_adapter
*adapter
)
3170 if (adapter
->intr
.type
== VMXNET3_IT_MSIX
)
3171 pci_disable_msix(adapter
->pdev
);
3172 else if (adapter
->intr
.type
== VMXNET3_IT_MSI
)
3173 pci_disable_msi(adapter
->pdev
);
3175 BUG_ON(adapter
->intr
.type
!= VMXNET3_IT_INTX
);
3180 vmxnet3_tx_timeout(struct net_device
*netdev
)
3182 struct vmxnet3_adapter
*adapter
= netdev_priv(netdev
);
3183 adapter
->tx_timeout_count
++;
3185 netdev_err(adapter
->netdev
, "tx hang\n");
3186 schedule_work(&adapter
->work
);
3187 netif_wake_queue(adapter
->netdev
);
3192 vmxnet3_reset_work(struct work_struct
*data
)
3194 struct vmxnet3_adapter
*adapter
;
3196 adapter
= container_of(data
, struct vmxnet3_adapter
, work
);
3198 /* if another thread is resetting the device, no need to proceed */
3199 if (test_and_set_bit(VMXNET3_STATE_BIT_RESETTING
, &adapter
->state
))
3202 /* if the device is closed, we must leave it alone */
3204 if (netif_running(adapter
->netdev
)) {
3205 netdev_notice(adapter
->netdev
, "resetting\n");
3206 vmxnet3_quiesce_dev(adapter
);
3207 vmxnet3_reset_dev(adapter
);
3208 vmxnet3_activate_dev(adapter
);
3210 netdev_info(adapter
->netdev
, "already closed\n");
3214 clear_bit(VMXNET3_STATE_BIT_RESETTING
, &adapter
->state
);
3219 vmxnet3_probe_device(struct pci_dev
*pdev
,
3220 const struct pci_device_id
*id
)
3222 static const struct net_device_ops vmxnet3_netdev_ops
= {
3223 .ndo_open
= vmxnet3_open
,
3224 .ndo_stop
= vmxnet3_close
,
3225 .ndo_start_xmit
= vmxnet3_xmit_frame
,
3226 .ndo_set_mac_address
= vmxnet3_set_mac_addr
,
3227 .ndo_change_mtu
= vmxnet3_change_mtu
,
3228 .ndo_set_features
= vmxnet3_set_features
,
3229 .ndo_get_stats64
= vmxnet3_get_stats64
,
3230 .ndo_tx_timeout
= vmxnet3_tx_timeout
,
3231 .ndo_set_rx_mode
= vmxnet3_set_mc
,
3232 .ndo_vlan_rx_add_vid
= vmxnet3_vlan_rx_add_vid
,
3233 .ndo_vlan_rx_kill_vid
= vmxnet3_vlan_rx_kill_vid
,
3234 #ifdef CONFIG_NET_POLL_CONTROLLER
3235 .ndo_poll_controller
= vmxnet3_netpoll
,
3239 bool dma64
= false; /* stupid gcc */
3241 struct net_device
*netdev
;
3242 struct vmxnet3_adapter
*adapter
;
3248 if (!pci_msi_enabled())
3253 num_rx_queues
= min(VMXNET3_DEVICE_MAX_RX_QUEUES
,
3254 (int)num_online_cpus());
3258 num_rx_queues
= rounddown_pow_of_two(num_rx_queues
);
3261 num_tx_queues
= min(VMXNET3_DEVICE_MAX_TX_QUEUES
,
3262 (int)num_online_cpus());
3266 num_tx_queues
= rounddown_pow_of_two(num_tx_queues
);
3267 netdev
= alloc_etherdev_mq(sizeof(struct vmxnet3_adapter
),
3268 max(num_tx_queues
, num_rx_queues
));
3269 dev_info(&pdev
->dev
,
3270 "# of Tx queues : %d, # of Rx queues : %d\n",
3271 num_tx_queues
, num_rx_queues
);
3276 pci_set_drvdata(pdev
, netdev
);
3277 adapter
= netdev_priv(netdev
);
3278 adapter
->netdev
= netdev
;
3279 adapter
->pdev
= pdev
;
3281 adapter
->tx_ring_size
= VMXNET3_DEF_TX_RING_SIZE
;
3282 adapter
->rx_ring_size
= VMXNET3_DEF_RX_RING_SIZE
;
3283 adapter
->rx_ring2_size
= VMXNET3_DEF_RX_RING2_SIZE
;
3285 spin_lock_init(&adapter
->cmd_lock
);
3286 adapter
->adapter_pa
= dma_map_single(&adapter
->pdev
->dev
, adapter
,
3287 sizeof(struct vmxnet3_adapter
),
3289 if (dma_mapping_error(&adapter
->pdev
->dev
, adapter
->adapter_pa
)) {
3290 dev_err(&pdev
->dev
, "Failed to map dma\n");
3294 adapter
->shared
= dma_alloc_coherent(
3295 &adapter
->pdev
->dev
,
3296 sizeof(struct Vmxnet3_DriverShared
),
3297 &adapter
->shared_pa
, GFP_KERNEL
);
3298 if (!adapter
->shared
) {
3299 dev_err(&pdev
->dev
, "Failed to allocate memory\n");
3301 goto err_alloc_shared
;
3304 adapter
->num_rx_queues
= num_rx_queues
;
3305 adapter
->num_tx_queues
= num_tx_queues
;
3306 adapter
->rx_buf_per_pkt
= 1;
3308 size
= sizeof(struct Vmxnet3_TxQueueDesc
) * adapter
->num_tx_queues
;
3309 size
+= sizeof(struct Vmxnet3_RxQueueDesc
) * adapter
->num_rx_queues
;
3310 adapter
->tqd_start
= dma_alloc_coherent(&adapter
->pdev
->dev
, size
,
3311 &adapter
->queue_desc_pa
,
3314 if (!adapter
->tqd_start
) {
3315 dev_err(&pdev
->dev
, "Failed to allocate memory\n");
3317 goto err_alloc_queue_desc
;
3319 adapter
->rqd_start
= (struct Vmxnet3_RxQueueDesc
*)(adapter
->tqd_start
+
3320 adapter
->num_tx_queues
);
3322 adapter
->pm_conf
= dma_alloc_coherent(&adapter
->pdev
->dev
,
3323 sizeof(struct Vmxnet3_PMConf
),
3324 &adapter
->pm_conf_pa
,
3326 if (adapter
->pm_conf
== NULL
) {
3333 adapter
->rss_conf
= dma_alloc_coherent(&adapter
->pdev
->dev
,
3334 sizeof(struct UPT1_RSSConf
),
3335 &adapter
->rss_conf_pa
,
3337 if (adapter
->rss_conf
== NULL
) {
3341 #endif /* VMXNET3_RSS */
3343 err
= vmxnet3_alloc_pci_resources(adapter
, &dma64
);
3347 ver
= VMXNET3_READ_BAR1_REG(adapter
, VMXNET3_REG_VRRS
);
3348 if (ver
& (1 << VMXNET3_REV_3
)) {
3349 VMXNET3_WRITE_BAR1_REG(adapter
,
3351 1 << VMXNET3_REV_3
);
3352 adapter
->version
= VMXNET3_REV_3
+ 1;
3353 } else if (ver
& (1 << VMXNET3_REV_2
)) {
3354 VMXNET3_WRITE_BAR1_REG(adapter
,
3356 1 << VMXNET3_REV_2
);
3357 adapter
->version
= VMXNET3_REV_2
+ 1;
3358 } else if (ver
& (1 << VMXNET3_REV_1
)) {
3359 VMXNET3_WRITE_BAR1_REG(adapter
,
3361 1 << VMXNET3_REV_1
);
3362 adapter
->version
= VMXNET3_REV_1
+ 1;
3365 "Incompatible h/w version (0x%x) for adapter\n", ver
);
3369 dev_dbg(&pdev
->dev
, "Using device version %d\n", adapter
->version
);
3371 ver
= VMXNET3_READ_BAR1_REG(adapter
, VMXNET3_REG_UVRS
);
3373 VMXNET3_WRITE_BAR1_REG(adapter
, VMXNET3_REG_UVRS
, 1);
3376 "Incompatible upt version (0x%x) for adapter\n", ver
);
3381 if (VMXNET3_VERSION_GE_3(adapter
)) {
3382 adapter
->coal_conf
=
3383 dma_alloc_coherent(&adapter
->pdev
->dev
,
3384 sizeof(struct Vmxnet3_CoalesceScheme
)
3386 &adapter
->coal_conf_pa
,
3388 if (!adapter
->coal_conf
) {
3392 memset(adapter
->coal_conf
, 0, sizeof(*adapter
->coal_conf
));
3393 adapter
->coal_conf
->coalMode
= VMXNET3_COALESCE_DISABLED
;
3394 adapter
->default_coal_mode
= true;
3397 SET_NETDEV_DEV(netdev
, &pdev
->dev
);
3398 vmxnet3_declare_features(adapter
, dma64
);
3400 adapter
->rxdata_desc_size
= VMXNET3_VERSION_GE_3(adapter
) ?
3401 VMXNET3_DEF_RXDATA_DESC_SIZE
: 0;
3403 if (adapter
->num_tx_queues
== adapter
->num_rx_queues
)
3404 adapter
->share_intr
= VMXNET3_INTR_BUDDYSHARE
;
3406 adapter
->share_intr
= VMXNET3_INTR_DONTSHARE
;
3408 vmxnet3_alloc_intr_resources(adapter
);
3411 if (adapter
->num_rx_queues
> 1 &&
3412 adapter
->intr
.type
== VMXNET3_IT_MSIX
) {
3413 adapter
->rss
= true;
3414 netdev
->hw_features
|= NETIF_F_RXHASH
;
3415 netdev
->features
|= NETIF_F_RXHASH
;
3416 dev_dbg(&pdev
->dev
, "RSS is enabled.\n");
3418 adapter
->rss
= false;
3422 vmxnet3_read_mac_addr(adapter
, mac
);
3423 memcpy(netdev
->dev_addr
, mac
, netdev
->addr_len
);
3425 netdev
->netdev_ops
= &vmxnet3_netdev_ops
;
3426 vmxnet3_set_ethtool_ops(netdev
);
3427 netdev
->watchdog_timeo
= 5 * HZ
;
3429 INIT_WORK(&adapter
->work
, vmxnet3_reset_work
);
3430 set_bit(VMXNET3_STATE_BIT_QUIESCED
, &adapter
->state
);
3432 if (adapter
->intr
.type
== VMXNET3_IT_MSIX
) {
3434 for (i
= 0; i
< adapter
->num_rx_queues
; i
++) {
3435 netif_napi_add(adapter
->netdev
,
3436 &adapter
->rx_queue
[i
].napi
,
3437 vmxnet3_poll_rx_only
, 64);
3440 netif_napi_add(adapter
->netdev
, &adapter
->rx_queue
[0].napi
,
3444 netif_set_real_num_tx_queues(adapter
->netdev
, adapter
->num_tx_queues
);
3445 netif_set_real_num_rx_queues(adapter
->netdev
, adapter
->num_rx_queues
);
3447 netif_carrier_off(netdev
);
3448 err
= register_netdev(netdev
);
3451 dev_err(&pdev
->dev
, "Failed to register adapter\n");
3455 vmxnet3_check_link(adapter
, false);
3459 if (VMXNET3_VERSION_GE_3(adapter
)) {
3460 dma_free_coherent(&adapter
->pdev
->dev
,
3461 sizeof(struct Vmxnet3_CoalesceScheme
),
3462 adapter
->coal_conf
, adapter
->coal_conf_pa
);
3464 vmxnet3_free_intr_resources(adapter
);
3466 vmxnet3_free_pci_resources(adapter
);
3469 dma_free_coherent(&adapter
->pdev
->dev
, sizeof(struct UPT1_RSSConf
),
3470 adapter
->rss_conf
, adapter
->rss_conf_pa
);
3473 dma_free_coherent(&adapter
->pdev
->dev
, sizeof(struct Vmxnet3_PMConf
),
3474 adapter
->pm_conf
, adapter
->pm_conf_pa
);
3476 dma_free_coherent(&adapter
->pdev
->dev
, size
, adapter
->tqd_start
,
3477 adapter
->queue_desc_pa
);
3478 err_alloc_queue_desc
:
3479 dma_free_coherent(&adapter
->pdev
->dev
,
3480 sizeof(struct Vmxnet3_DriverShared
),
3481 adapter
->shared
, adapter
->shared_pa
);
3483 dma_unmap_single(&adapter
->pdev
->dev
, adapter
->adapter_pa
,
3484 sizeof(struct vmxnet3_adapter
), PCI_DMA_TODEVICE
);
3486 free_netdev(netdev
);
3492 vmxnet3_remove_device(struct pci_dev
*pdev
)
3494 struct net_device
*netdev
= pci_get_drvdata(pdev
);
3495 struct vmxnet3_adapter
*adapter
= netdev_priv(netdev
);
3501 num_rx_queues
= min(VMXNET3_DEVICE_MAX_RX_QUEUES
,
3502 (int)num_online_cpus());
3506 num_rx_queues
= rounddown_pow_of_two(num_rx_queues
);
3508 cancel_work_sync(&adapter
->work
);
3510 unregister_netdev(netdev
);
3512 vmxnet3_free_intr_resources(adapter
);
3513 vmxnet3_free_pci_resources(adapter
);
3514 if (VMXNET3_VERSION_GE_3(adapter
)) {
3515 dma_free_coherent(&adapter
->pdev
->dev
,
3516 sizeof(struct Vmxnet3_CoalesceScheme
),
3517 adapter
->coal_conf
, adapter
->coal_conf_pa
);
3520 dma_free_coherent(&adapter
->pdev
->dev
, sizeof(struct UPT1_RSSConf
),
3521 adapter
->rss_conf
, adapter
->rss_conf_pa
);
3523 dma_free_coherent(&adapter
->pdev
->dev
, sizeof(struct Vmxnet3_PMConf
),
3524 adapter
->pm_conf
, adapter
->pm_conf_pa
);
3526 size
= sizeof(struct Vmxnet3_TxQueueDesc
) * adapter
->num_tx_queues
;
3527 size
+= sizeof(struct Vmxnet3_RxQueueDesc
) * num_rx_queues
;
3528 dma_free_coherent(&adapter
->pdev
->dev
, size
, adapter
->tqd_start
,
3529 adapter
->queue_desc_pa
);
3530 dma_free_coherent(&adapter
->pdev
->dev
,
3531 sizeof(struct Vmxnet3_DriverShared
),
3532 adapter
->shared
, adapter
->shared_pa
);
3533 dma_unmap_single(&adapter
->pdev
->dev
, adapter
->adapter_pa
,
3534 sizeof(struct vmxnet3_adapter
), PCI_DMA_TODEVICE
);
3535 free_netdev(netdev
);
3538 static void vmxnet3_shutdown_device(struct pci_dev
*pdev
)
3540 struct net_device
*netdev
= pci_get_drvdata(pdev
);
3541 struct vmxnet3_adapter
*adapter
= netdev_priv(netdev
);
3542 unsigned long flags
;
3544 /* Reset_work may be in the middle of resetting the device, wait for its
3547 while (test_and_set_bit(VMXNET3_STATE_BIT_RESETTING
, &adapter
->state
))
3550 if (test_and_set_bit(VMXNET3_STATE_BIT_QUIESCED
,
3552 clear_bit(VMXNET3_STATE_BIT_RESETTING
, &adapter
->state
);
3555 spin_lock_irqsave(&adapter
->cmd_lock
, flags
);
3556 VMXNET3_WRITE_BAR1_REG(adapter
, VMXNET3_REG_CMD
,
3557 VMXNET3_CMD_QUIESCE_DEV
);
3558 spin_unlock_irqrestore(&adapter
->cmd_lock
, flags
);
3559 vmxnet3_disable_all_intrs(adapter
);
3561 clear_bit(VMXNET3_STATE_BIT_RESETTING
, &adapter
->state
);
3568 vmxnet3_suspend(struct device
*device
)
3570 struct pci_dev
*pdev
= to_pci_dev(device
);
3571 struct net_device
*netdev
= pci_get_drvdata(pdev
);
3572 struct vmxnet3_adapter
*adapter
= netdev_priv(netdev
);
3573 struct Vmxnet3_PMConf
*pmConf
;
3574 struct ethhdr
*ehdr
;
3575 struct arphdr
*ahdr
;
3577 struct in_device
*in_dev
;
3578 struct in_ifaddr
*ifa
;
3579 unsigned long flags
;
3582 if (!netif_running(netdev
))
3585 for (i
= 0; i
< adapter
->num_rx_queues
; i
++)
3586 napi_disable(&adapter
->rx_queue
[i
].napi
);
3588 vmxnet3_disable_all_intrs(adapter
);
3589 vmxnet3_free_irqs(adapter
);
3590 vmxnet3_free_intr_resources(adapter
);
3592 netif_device_detach(netdev
);
3593 netif_tx_stop_all_queues(netdev
);
3595 /* Create wake-up filters. */
3596 pmConf
= adapter
->pm_conf
;
3597 memset(pmConf
, 0, sizeof(*pmConf
));
3599 if (adapter
->wol
& WAKE_UCAST
) {
3600 pmConf
->filters
[i
].patternSize
= ETH_ALEN
;
3601 pmConf
->filters
[i
].maskSize
= 1;
3602 memcpy(pmConf
->filters
[i
].pattern
, netdev
->dev_addr
, ETH_ALEN
);
3603 pmConf
->filters
[i
].mask
[0] = 0x3F; /* LSB ETH_ALEN bits */
3605 pmConf
->wakeUpEvents
|= VMXNET3_PM_WAKEUP_FILTER
;
3609 if (adapter
->wol
& WAKE_ARP
) {
3610 in_dev
= in_dev_get(netdev
);
3614 ifa
= (struct in_ifaddr
*)in_dev
->ifa_list
;
3618 pmConf
->filters
[i
].patternSize
= ETH_HLEN
+ /* Ethernet header*/
3619 sizeof(struct arphdr
) + /* ARP header */
3620 2 * ETH_ALEN
+ /* 2 Ethernet addresses*/
3621 2 * sizeof(u32
); /*2 IPv4 addresses */
3622 pmConf
->filters
[i
].maskSize
=
3623 (pmConf
->filters
[i
].patternSize
- 1) / 8 + 1;
3625 /* ETH_P_ARP in Ethernet header. */
3626 ehdr
= (struct ethhdr
*)pmConf
->filters
[i
].pattern
;
3627 ehdr
->h_proto
= htons(ETH_P_ARP
);
3629 /* ARPOP_REQUEST in ARP header. */
3630 ahdr
= (struct arphdr
*)&pmConf
->filters
[i
].pattern
[ETH_HLEN
];
3631 ahdr
->ar_op
= htons(ARPOP_REQUEST
);
3632 arpreq
= (u8
*)(ahdr
+ 1);
3634 /* The Unicast IPv4 address in 'tip' field. */
3635 arpreq
+= 2 * ETH_ALEN
+ sizeof(u32
);
3636 *(u32
*)arpreq
= ifa
->ifa_address
;
3638 /* The mask for the relevant bits. */
3639 pmConf
->filters
[i
].mask
[0] = 0x00;
3640 pmConf
->filters
[i
].mask
[1] = 0x30; /* ETH_P_ARP */
3641 pmConf
->filters
[i
].mask
[2] = 0x30; /* ARPOP_REQUEST */
3642 pmConf
->filters
[i
].mask
[3] = 0x00;
3643 pmConf
->filters
[i
].mask
[4] = 0xC0; /* IPv4 TIP */
3644 pmConf
->filters
[i
].mask
[5] = 0x03; /* IPv4 TIP */
3647 pmConf
->wakeUpEvents
|= VMXNET3_PM_WAKEUP_FILTER
;
3652 if (adapter
->wol
& WAKE_MAGIC
)
3653 pmConf
->wakeUpEvents
|= VMXNET3_PM_WAKEUP_MAGIC
;
3655 pmConf
->numFilters
= i
;
3657 adapter
->shared
->devRead
.pmConfDesc
.confVer
= cpu_to_le32(1);
3658 adapter
->shared
->devRead
.pmConfDesc
.confLen
= cpu_to_le32(sizeof(
3660 adapter
->shared
->devRead
.pmConfDesc
.confPA
=
3661 cpu_to_le64(adapter
->pm_conf_pa
);
3663 spin_lock_irqsave(&adapter
->cmd_lock
, flags
);
3664 VMXNET3_WRITE_BAR1_REG(adapter
, VMXNET3_REG_CMD
,
3665 VMXNET3_CMD_UPDATE_PMCFG
);
3666 spin_unlock_irqrestore(&adapter
->cmd_lock
, flags
);
3668 pci_save_state(pdev
);
3669 pci_enable_wake(pdev
, pci_choose_state(pdev
, PMSG_SUSPEND
),
3671 pci_disable_device(pdev
);
3672 pci_set_power_state(pdev
, pci_choose_state(pdev
, PMSG_SUSPEND
));
3679 vmxnet3_resume(struct device
*device
)
3682 unsigned long flags
;
3683 struct pci_dev
*pdev
= to_pci_dev(device
);
3684 struct net_device
*netdev
= pci_get_drvdata(pdev
);
3685 struct vmxnet3_adapter
*adapter
= netdev_priv(netdev
);
3687 if (!netif_running(netdev
))
3690 pci_set_power_state(pdev
, PCI_D0
);
3691 pci_restore_state(pdev
);
3692 err
= pci_enable_device_mem(pdev
);
3696 pci_enable_wake(pdev
, PCI_D0
, 0);
3698 vmxnet3_alloc_intr_resources(adapter
);
3700 /* During hibernate and suspend, device has to be reinitialized as the
3701 * device state need not be preserved.
3704 /* Need not check adapter state as other reset tasks cannot run during
3707 spin_lock_irqsave(&adapter
->cmd_lock
, flags
);
3708 VMXNET3_WRITE_BAR1_REG(adapter
, VMXNET3_REG_CMD
,
3709 VMXNET3_CMD_QUIESCE_DEV
);
3710 spin_unlock_irqrestore(&adapter
->cmd_lock
, flags
);
3711 vmxnet3_tq_cleanup_all(adapter
);
3712 vmxnet3_rq_cleanup_all(adapter
);
3714 vmxnet3_reset_dev(adapter
);
3715 err
= vmxnet3_activate_dev(adapter
);
3718 "failed to re-activate on resume, error: %d", err
);
3719 vmxnet3_force_close(adapter
);
3722 netif_device_attach(netdev
);
3727 static const struct dev_pm_ops vmxnet3_pm_ops
= {
3728 .suspend
= vmxnet3_suspend
,
3729 .resume
= vmxnet3_resume
,
3730 .freeze
= vmxnet3_suspend
,
3731 .restore
= vmxnet3_resume
,
3735 static struct pci_driver vmxnet3_driver
= {
3736 .name
= vmxnet3_driver_name
,
3737 .id_table
= vmxnet3_pciid_table
,
3738 .probe
= vmxnet3_probe_device
,
3739 .remove
= vmxnet3_remove_device
,
3740 .shutdown
= vmxnet3_shutdown_device
,
3742 .driver
.pm
= &vmxnet3_pm_ops
,
3748 vmxnet3_init_module(void)
3750 pr_info("%s - version %s\n", VMXNET3_DRIVER_DESC
,
3751 VMXNET3_DRIVER_VERSION_REPORT
);
3752 return pci_register_driver(&vmxnet3_driver
);
3755 module_init(vmxnet3_init_module
);
3759 vmxnet3_exit_module(void)
3761 pci_unregister_driver(&vmxnet3_driver
);
3764 module_exit(vmxnet3_exit_module
);
3766 MODULE_AUTHOR("VMware, Inc.");
3767 MODULE_DESCRIPTION(VMXNET3_DRIVER_DESC
);
3768 MODULE_LICENSE("GPL v2");
3769 MODULE_VERSION(VMXNET3_DRIVER_VERSION_STRING
);