Fix common misspellings
[deliverable/linux.git] / drivers / net / vmxnet3 / vmxnet3_drv.c
CommitLineData
d1a890fa
SB
1/*
2 * Linux driver for VMware's vmxnet3 ethernet NIC.
3 *
4 * Copyright (C) 2008-2009, VMware, Inc. All Rights Reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the
8 * Free Software Foundation; version 2 of the License and no later version.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
13 * NON INFRINGEMENT. See the GNU General Public License for more
14 * details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * The full GNU General Public License is included in this distribution in
21 * the file called "COPYING".
22 *
23 * Maintained by: Shreyas Bhatewara <pv-drivers@vmware.com>
24 *
25 */
26
b038b040
SR
27#include <net/ip6_checksum.h>
28
d1a890fa
SB
29#include "vmxnet3_int.h"
30
31char vmxnet3_driver_name[] = "vmxnet3";
32#define VMXNET3_DRIVER_DESC "VMware vmxnet3 virtual NIC driver"
33
d1a890fa
SB
34/*
35 * PCI Device ID Table
36 * Last entry must be all 0s
37 */
a3aa1884 38static DEFINE_PCI_DEVICE_TABLE(vmxnet3_pciid_table) = {
d1a890fa
SB
39 {PCI_VDEVICE(VMWARE, PCI_DEVICE_ID_VMWARE_VMXNET3)},
40 {0}
41};
42
43MODULE_DEVICE_TABLE(pci, vmxnet3_pciid_table);
44
45static atomic_t devices_found;
46
09c5088e
SB
47#define VMXNET3_MAX_DEVICES 10
48static int enable_mq = 1;
49static int irq_share_mode;
d1a890fa 50
f9f25026
SB
51static void
52vmxnet3_write_mac_addr(struct vmxnet3_adapter *adapter, u8 *mac);
53
d1a890fa
SB
54/*
55 * Enable/Disable the given intr
56 */
57static void
58vmxnet3_enable_intr(struct vmxnet3_adapter *adapter, unsigned intr_idx)
59{
60 VMXNET3_WRITE_BAR0_REG(adapter, VMXNET3_REG_IMR + intr_idx * 8, 0);
61}
62
63
64static void
65vmxnet3_disable_intr(struct vmxnet3_adapter *adapter, unsigned intr_idx)
66{
67 VMXNET3_WRITE_BAR0_REG(adapter, VMXNET3_REG_IMR + intr_idx * 8, 1);
68}
69
70
71/*
72 * Enable/Disable all intrs used by the device
73 */
74static void
75vmxnet3_enable_all_intrs(struct vmxnet3_adapter *adapter)
76{
77 int i;
78
79 for (i = 0; i < adapter->intr.num_intrs; i++)
80 vmxnet3_enable_intr(adapter, i);
6929fe8a
RZ
81 adapter->shared->devRead.intrConf.intrCtrl &=
82 cpu_to_le32(~VMXNET3_IC_DISABLE_ALL);
d1a890fa
SB
83}
84
85
86static void
87vmxnet3_disable_all_intrs(struct vmxnet3_adapter *adapter)
88{
89 int i;
90
6929fe8a
RZ
91 adapter->shared->devRead.intrConf.intrCtrl |=
92 cpu_to_le32(VMXNET3_IC_DISABLE_ALL);
d1a890fa
SB
93 for (i = 0; i < adapter->intr.num_intrs; i++)
94 vmxnet3_disable_intr(adapter, i);
95}
96
97
98static void
99vmxnet3_ack_events(struct vmxnet3_adapter *adapter, u32 events)
100{
101 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_ECR, events);
102}
103
104
105static bool
106vmxnet3_tq_stopped(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter)
107{
09c5088e 108 return tq->stopped;
d1a890fa
SB
109}
110
111
112static void
113vmxnet3_tq_start(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter)
114{
115 tq->stopped = false;
09c5088e 116 netif_start_subqueue(adapter->netdev, tq - adapter->tx_queue);
d1a890fa
SB
117}
118
119
120static void
121vmxnet3_tq_wake(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter)
122{
123 tq->stopped = false;
09c5088e 124 netif_wake_subqueue(adapter->netdev, (tq - adapter->tx_queue));
d1a890fa
SB
125}
126
127
128static void
129vmxnet3_tq_stop(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter)
130{
131 tq->stopped = true;
132 tq->num_stop++;
09c5088e 133 netif_stop_subqueue(adapter->netdev, (tq - adapter->tx_queue));
d1a890fa
SB
134}
135
136
137/*
138 * Check the link state. This may start or stop the tx queue.
139 */
140static void
4a1745fc 141vmxnet3_check_link(struct vmxnet3_adapter *adapter, bool affectTxQueue)
d1a890fa
SB
142{
143 u32 ret;
09c5088e 144 int i;
83d0feff 145 unsigned long flags;
d1a890fa 146
83d0feff 147 spin_lock_irqsave(&adapter->cmd_lock, flags);
d1a890fa
SB
148 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_GET_LINK);
149 ret = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
83d0feff
SB
150 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
151
d1a890fa
SB
152 adapter->link_speed = ret >> 16;
153 if (ret & 1) { /* Link is up. */
154 printk(KERN_INFO "%s: NIC Link is Up %d Mbps\n",
155 adapter->netdev->name, adapter->link_speed);
156 if (!netif_carrier_ok(adapter->netdev))
157 netif_carrier_on(adapter->netdev);
158
09c5088e
SB
159 if (affectTxQueue) {
160 for (i = 0; i < adapter->num_tx_queues; i++)
161 vmxnet3_tq_start(&adapter->tx_queue[i],
162 adapter);
163 }
d1a890fa
SB
164 } else {
165 printk(KERN_INFO "%s: NIC Link is Down\n",
166 adapter->netdev->name);
167 if (netif_carrier_ok(adapter->netdev))
168 netif_carrier_off(adapter->netdev);
169
09c5088e
SB
170 if (affectTxQueue) {
171 for (i = 0; i < adapter->num_tx_queues; i++)
172 vmxnet3_tq_stop(&adapter->tx_queue[i], adapter);
173 }
d1a890fa
SB
174 }
175}
176
d1a890fa
SB
177static void
178vmxnet3_process_events(struct vmxnet3_adapter *adapter)
179{
09c5088e 180 int i;
115924b6 181 u32 events = le32_to_cpu(adapter->shared->ecr);
d1a890fa
SB
182 if (!events)
183 return;
184
185 vmxnet3_ack_events(adapter, events);
186
187 /* Check if link state has changed */
188 if (events & VMXNET3_ECR_LINK)
4a1745fc 189 vmxnet3_check_link(adapter, true);
d1a890fa
SB
190
191 /* Check if there is an error on xmit/recv queues */
192 if (events & (VMXNET3_ECR_TQERR | VMXNET3_ECR_RQERR)) {
83d0feff 193 spin_lock(&adapter->cmd_lock);
d1a890fa
SB
194 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
195 VMXNET3_CMD_GET_QUEUE_STATUS);
83d0feff 196 spin_unlock(&adapter->cmd_lock);
d1a890fa 197
09c5088e
SB
198 for (i = 0; i < adapter->num_tx_queues; i++)
199 if (adapter->tqd_start[i].status.stopped)
200 dev_err(&adapter->netdev->dev,
201 "%s: tq[%d] error 0x%x\n",
202 adapter->netdev->name, i, le32_to_cpu(
203 adapter->tqd_start[i].status.error));
204 for (i = 0; i < adapter->num_rx_queues; i++)
205 if (adapter->rqd_start[i].status.stopped)
206 dev_err(&adapter->netdev->dev,
207 "%s: rq[%d] error 0x%x\n",
208 adapter->netdev->name, i,
209 adapter->rqd_start[i].status.error);
d1a890fa
SB
210
211 schedule_work(&adapter->work);
212 }
213}
214
115924b6
SB
215#ifdef __BIG_ENDIAN_BITFIELD
216/*
217 * The device expects the bitfields in shared structures to be written in
218 * little endian. When CPU is big endian, the following routines are used to
219 * correctly read and write into ABI.
220 * The general technique used here is : double word bitfields are defined in
221 * opposite order for big endian architecture. Then before reading them in
222 * driver the complete double word is translated using le32_to_cpu. Similarly
223 * After the driver writes into bitfields, cpu_to_le32 is used to translate the
224 * double words into required format.
225 * In order to avoid touching bits in shared structure more than once, temporary
226 * descriptors are used. These are passed as srcDesc to following functions.
227 */
228static void vmxnet3_RxDescToCPU(const struct Vmxnet3_RxDesc *srcDesc,
229 struct Vmxnet3_RxDesc *dstDesc)
230{
231 u32 *src = (u32 *)srcDesc + 2;
232 u32 *dst = (u32 *)dstDesc + 2;
233 dstDesc->addr = le64_to_cpu(srcDesc->addr);
234 *dst = le32_to_cpu(*src);
235 dstDesc->ext1 = le32_to_cpu(srcDesc->ext1);
236}
237
238static void vmxnet3_TxDescToLe(const struct Vmxnet3_TxDesc *srcDesc,
239 struct Vmxnet3_TxDesc *dstDesc)
240{
241 int i;
242 u32 *src = (u32 *)(srcDesc + 1);
243 u32 *dst = (u32 *)(dstDesc + 1);
244
245 /* Working backwards so that the gen bit is set at the end. */
246 for (i = 2; i > 0; i--) {
247 src--;
248 dst--;
249 *dst = cpu_to_le32(*src);
250 }
251}
252
253
254static void vmxnet3_RxCompToCPU(const struct Vmxnet3_RxCompDesc *srcDesc,
255 struct Vmxnet3_RxCompDesc *dstDesc)
256{
257 int i = 0;
258 u32 *src = (u32 *)srcDesc;
259 u32 *dst = (u32 *)dstDesc;
260 for (i = 0; i < sizeof(struct Vmxnet3_RxCompDesc) / sizeof(u32); i++) {
261 *dst = le32_to_cpu(*src);
262 src++;
263 dst++;
264 }
265}
266
267
268/* Used to read bitfield values from double words. */
269static u32 get_bitfield32(const __le32 *bitfield, u32 pos, u32 size)
270{
271 u32 temp = le32_to_cpu(*bitfield);
272 u32 mask = ((1 << size) - 1) << pos;
273 temp &= mask;
274 temp >>= pos;
275 return temp;
276}
277
278
279
280#endif /* __BIG_ENDIAN_BITFIELD */
281
282#ifdef __BIG_ENDIAN_BITFIELD
283
284# define VMXNET3_TXDESC_GET_GEN(txdesc) get_bitfield32(((const __le32 *) \
285 txdesc) + VMXNET3_TXD_GEN_DWORD_SHIFT, \
286 VMXNET3_TXD_GEN_SHIFT, VMXNET3_TXD_GEN_SIZE)
287# define VMXNET3_TXDESC_GET_EOP(txdesc) get_bitfield32(((const __le32 *) \
288 txdesc) + VMXNET3_TXD_EOP_DWORD_SHIFT, \
289 VMXNET3_TXD_EOP_SHIFT, VMXNET3_TXD_EOP_SIZE)
290# define VMXNET3_TCD_GET_GEN(tcd) get_bitfield32(((const __le32 *)tcd) + \
291 VMXNET3_TCD_GEN_DWORD_SHIFT, VMXNET3_TCD_GEN_SHIFT, \
292 VMXNET3_TCD_GEN_SIZE)
293# define VMXNET3_TCD_GET_TXIDX(tcd) get_bitfield32((const __le32 *)tcd, \
294 VMXNET3_TCD_TXIDX_SHIFT, VMXNET3_TCD_TXIDX_SIZE)
295# define vmxnet3_getRxComp(dstrcd, rcd, tmp) do { \
296 (dstrcd) = (tmp); \
297 vmxnet3_RxCompToCPU((rcd), (tmp)); \
298 } while (0)
299# define vmxnet3_getRxDesc(dstrxd, rxd, tmp) do { \
300 (dstrxd) = (tmp); \
301 vmxnet3_RxDescToCPU((rxd), (tmp)); \
302 } while (0)
303
304#else
305
306# define VMXNET3_TXDESC_GET_GEN(txdesc) ((txdesc)->gen)
307# define VMXNET3_TXDESC_GET_EOP(txdesc) ((txdesc)->eop)
308# define VMXNET3_TCD_GET_GEN(tcd) ((tcd)->gen)
309# define VMXNET3_TCD_GET_TXIDX(tcd) ((tcd)->txdIdx)
310# define vmxnet3_getRxComp(dstrcd, rcd, tmp) (dstrcd) = (rcd)
311# define vmxnet3_getRxDesc(dstrxd, rxd, tmp) (dstrxd) = (rxd)
312
313#endif /* __BIG_ENDIAN_BITFIELD */
314
d1a890fa
SB
315
316static void
317vmxnet3_unmap_tx_buf(struct vmxnet3_tx_buf_info *tbi,
318 struct pci_dev *pdev)
319{
320 if (tbi->map_type == VMXNET3_MAP_SINGLE)
321 pci_unmap_single(pdev, tbi->dma_addr, tbi->len,
322 PCI_DMA_TODEVICE);
323 else if (tbi->map_type == VMXNET3_MAP_PAGE)
324 pci_unmap_page(pdev, tbi->dma_addr, tbi->len,
325 PCI_DMA_TODEVICE);
326 else
327 BUG_ON(tbi->map_type != VMXNET3_MAP_NONE);
328
329 tbi->map_type = VMXNET3_MAP_NONE; /* to help debugging */
330}
331
332
333static int
334vmxnet3_unmap_pkt(u32 eop_idx, struct vmxnet3_tx_queue *tq,
335 struct pci_dev *pdev, struct vmxnet3_adapter *adapter)
336{
337 struct sk_buff *skb;
338 int entries = 0;
339
340 /* no out of order completion */
341 BUG_ON(tq->buf_info[eop_idx].sop_idx != tq->tx_ring.next2comp);
115924b6 342 BUG_ON(VMXNET3_TXDESC_GET_EOP(&(tq->tx_ring.base[eop_idx].txd)) != 1);
d1a890fa
SB
343
344 skb = tq->buf_info[eop_idx].skb;
345 BUG_ON(skb == NULL);
346 tq->buf_info[eop_idx].skb = NULL;
347
348 VMXNET3_INC_RING_IDX_ONLY(eop_idx, tq->tx_ring.size);
349
350 while (tq->tx_ring.next2comp != eop_idx) {
351 vmxnet3_unmap_tx_buf(tq->buf_info + tq->tx_ring.next2comp,
352 pdev);
353
354 /* update next2comp w/o tx_lock. Since we are marking more,
355 * instead of less, tx ring entries avail, the worst case is
356 * that the tx routine incorrectly re-queues a pkt due to
357 * insufficient tx ring entries.
358 */
359 vmxnet3_cmd_ring_adv_next2comp(&tq->tx_ring);
360 entries++;
361 }
362
363 dev_kfree_skb_any(skb);
364 return entries;
365}
366
367
368static int
369vmxnet3_tq_tx_complete(struct vmxnet3_tx_queue *tq,
370 struct vmxnet3_adapter *adapter)
371{
372 int completed = 0;
373 union Vmxnet3_GenericDesc *gdesc;
374
375 gdesc = tq->comp_ring.base + tq->comp_ring.next2proc;
115924b6
SB
376 while (VMXNET3_TCD_GET_GEN(&gdesc->tcd) == tq->comp_ring.gen) {
377 completed += vmxnet3_unmap_pkt(VMXNET3_TCD_GET_TXIDX(
378 &gdesc->tcd), tq, adapter->pdev,
379 adapter);
d1a890fa
SB
380
381 vmxnet3_comp_ring_adv_next2proc(&tq->comp_ring);
382 gdesc = tq->comp_ring.base + tq->comp_ring.next2proc;
383 }
384
385 if (completed) {
386 spin_lock(&tq->tx_lock);
387 if (unlikely(vmxnet3_tq_stopped(tq, adapter) &&
388 vmxnet3_cmd_ring_desc_avail(&tq->tx_ring) >
389 VMXNET3_WAKE_QUEUE_THRESHOLD(tq) &&
390 netif_carrier_ok(adapter->netdev))) {
391 vmxnet3_tq_wake(tq, adapter);
392 }
393 spin_unlock(&tq->tx_lock);
394 }
395 return completed;
396}
397
398
399static void
400vmxnet3_tq_cleanup(struct vmxnet3_tx_queue *tq,
401 struct vmxnet3_adapter *adapter)
402{
403 int i;
404
405 while (tq->tx_ring.next2comp != tq->tx_ring.next2fill) {
406 struct vmxnet3_tx_buf_info *tbi;
407 union Vmxnet3_GenericDesc *gdesc;
408
409 tbi = tq->buf_info + tq->tx_ring.next2comp;
410 gdesc = tq->tx_ring.base + tq->tx_ring.next2comp;
411
412 vmxnet3_unmap_tx_buf(tbi, adapter->pdev);
413 if (tbi->skb) {
414 dev_kfree_skb_any(tbi->skb);
415 tbi->skb = NULL;
416 }
417 vmxnet3_cmd_ring_adv_next2comp(&tq->tx_ring);
418 }
419
420 /* sanity check, verify all buffers are indeed unmapped and freed */
421 for (i = 0; i < tq->tx_ring.size; i++) {
422 BUG_ON(tq->buf_info[i].skb != NULL ||
423 tq->buf_info[i].map_type != VMXNET3_MAP_NONE);
424 }
425
426 tq->tx_ring.gen = VMXNET3_INIT_GEN;
427 tq->tx_ring.next2fill = tq->tx_ring.next2comp = 0;
428
429 tq->comp_ring.gen = VMXNET3_INIT_GEN;
430 tq->comp_ring.next2proc = 0;
431}
432
433
09c5088e 434static void
d1a890fa
SB
435vmxnet3_tq_destroy(struct vmxnet3_tx_queue *tq,
436 struct vmxnet3_adapter *adapter)
437{
438 if (tq->tx_ring.base) {
439 pci_free_consistent(adapter->pdev, tq->tx_ring.size *
440 sizeof(struct Vmxnet3_TxDesc),
441 tq->tx_ring.base, tq->tx_ring.basePA);
442 tq->tx_ring.base = NULL;
443 }
444 if (tq->data_ring.base) {
445 pci_free_consistent(adapter->pdev, tq->data_ring.size *
446 sizeof(struct Vmxnet3_TxDataDesc),
447 tq->data_ring.base, tq->data_ring.basePA);
448 tq->data_ring.base = NULL;
449 }
450 if (tq->comp_ring.base) {
451 pci_free_consistent(adapter->pdev, tq->comp_ring.size *
452 sizeof(struct Vmxnet3_TxCompDesc),
453 tq->comp_ring.base, tq->comp_ring.basePA);
454 tq->comp_ring.base = NULL;
455 }
456 kfree(tq->buf_info);
457 tq->buf_info = NULL;
458}
459
460
09c5088e
SB
461/* Destroy all tx queues */
462void
463vmxnet3_tq_destroy_all(struct vmxnet3_adapter *adapter)
464{
465 int i;
466
467 for (i = 0; i < adapter->num_tx_queues; i++)
468 vmxnet3_tq_destroy(&adapter->tx_queue[i], adapter);
469}
470
471
d1a890fa
SB
472static void
473vmxnet3_tq_init(struct vmxnet3_tx_queue *tq,
474 struct vmxnet3_adapter *adapter)
475{
476 int i;
477
478 /* reset the tx ring contents to 0 and reset the tx ring states */
479 memset(tq->tx_ring.base, 0, tq->tx_ring.size *
480 sizeof(struct Vmxnet3_TxDesc));
481 tq->tx_ring.next2fill = tq->tx_ring.next2comp = 0;
482 tq->tx_ring.gen = VMXNET3_INIT_GEN;
483
484 memset(tq->data_ring.base, 0, tq->data_ring.size *
485 sizeof(struct Vmxnet3_TxDataDesc));
486
487 /* reset the tx comp ring contents to 0 and reset comp ring states */
488 memset(tq->comp_ring.base, 0, tq->comp_ring.size *
489 sizeof(struct Vmxnet3_TxCompDesc));
490 tq->comp_ring.next2proc = 0;
491 tq->comp_ring.gen = VMXNET3_INIT_GEN;
492
493 /* reset the bookkeeping data */
494 memset(tq->buf_info, 0, sizeof(tq->buf_info[0]) * tq->tx_ring.size);
495 for (i = 0; i < tq->tx_ring.size; i++)
496 tq->buf_info[i].map_type = VMXNET3_MAP_NONE;
497
498 /* stats are not reset */
499}
500
501
502static int
503vmxnet3_tq_create(struct vmxnet3_tx_queue *tq,
504 struct vmxnet3_adapter *adapter)
505{
506 BUG_ON(tq->tx_ring.base || tq->data_ring.base ||
507 tq->comp_ring.base || tq->buf_info);
508
509 tq->tx_ring.base = pci_alloc_consistent(adapter->pdev, tq->tx_ring.size
510 * sizeof(struct Vmxnet3_TxDesc),
511 &tq->tx_ring.basePA);
512 if (!tq->tx_ring.base) {
513 printk(KERN_ERR "%s: failed to allocate tx ring\n",
514 adapter->netdev->name);
515 goto err;
516 }
517
518 tq->data_ring.base = pci_alloc_consistent(adapter->pdev,
519 tq->data_ring.size *
520 sizeof(struct Vmxnet3_TxDataDesc),
521 &tq->data_ring.basePA);
522 if (!tq->data_ring.base) {
523 printk(KERN_ERR "%s: failed to allocate data ring\n",
524 adapter->netdev->name);
525 goto err;
526 }
527
528 tq->comp_ring.base = pci_alloc_consistent(adapter->pdev,
529 tq->comp_ring.size *
530 sizeof(struct Vmxnet3_TxCompDesc),
531 &tq->comp_ring.basePA);
532 if (!tq->comp_ring.base) {
533 printk(KERN_ERR "%s: failed to allocate tx comp ring\n",
534 adapter->netdev->name);
535 goto err;
536 }
537
538 tq->buf_info = kcalloc(tq->tx_ring.size, sizeof(tq->buf_info[0]),
539 GFP_KERNEL);
540 if (!tq->buf_info) {
541 printk(KERN_ERR "%s: failed to allocate tx bufinfo\n",
542 adapter->netdev->name);
543 goto err;
544 }
545
546 return 0;
547
548err:
549 vmxnet3_tq_destroy(tq, adapter);
550 return -ENOMEM;
551}
552
09c5088e
SB
553static void
554vmxnet3_tq_cleanup_all(struct vmxnet3_adapter *adapter)
555{
556 int i;
557
558 for (i = 0; i < adapter->num_tx_queues; i++)
559 vmxnet3_tq_cleanup(&adapter->tx_queue[i], adapter);
560}
d1a890fa
SB
561
562/*
563 * starting from ring->next2fill, allocate rx buffers for the given ring
564 * of the rx queue and update the rx desc. stop after @num_to_alloc buffers
565 * are allocated or allocation fails
566 */
567
568static int
569vmxnet3_rq_alloc_rx_buf(struct vmxnet3_rx_queue *rq, u32 ring_idx,
570 int num_to_alloc, struct vmxnet3_adapter *adapter)
571{
572 int num_allocated = 0;
573 struct vmxnet3_rx_buf_info *rbi_base = rq->buf_info[ring_idx];
574 struct vmxnet3_cmd_ring *ring = &rq->rx_ring[ring_idx];
575 u32 val;
576
577 while (num_allocated < num_to_alloc) {
578 struct vmxnet3_rx_buf_info *rbi;
579 union Vmxnet3_GenericDesc *gd;
580
581 rbi = rbi_base + ring->next2fill;
582 gd = ring->base + ring->next2fill;
583
584 if (rbi->buf_type == VMXNET3_RX_BUF_SKB) {
585 if (rbi->skb == NULL) {
586 rbi->skb = dev_alloc_skb(rbi->len +
587 NET_IP_ALIGN);
588 if (unlikely(rbi->skb == NULL)) {
589 rq->stats.rx_buf_alloc_failure++;
590 break;
591 }
592 rbi->skb->dev = adapter->netdev;
593
594 skb_reserve(rbi->skb, NET_IP_ALIGN);
595 rbi->dma_addr = pci_map_single(adapter->pdev,
596 rbi->skb->data, rbi->len,
597 PCI_DMA_FROMDEVICE);
598 } else {
599 /* rx buffer skipped by the device */
600 }
601 val = VMXNET3_RXD_BTYPE_HEAD << VMXNET3_RXD_BTYPE_SHIFT;
602 } else {
603 BUG_ON(rbi->buf_type != VMXNET3_RX_BUF_PAGE ||
604 rbi->len != PAGE_SIZE);
605
606 if (rbi->page == NULL) {
607 rbi->page = alloc_page(GFP_ATOMIC);
608 if (unlikely(rbi->page == NULL)) {
609 rq->stats.rx_buf_alloc_failure++;
610 break;
611 }
612 rbi->dma_addr = pci_map_page(adapter->pdev,
613 rbi->page, 0, PAGE_SIZE,
614 PCI_DMA_FROMDEVICE);
615 } else {
616 /* rx buffers skipped by the device */
617 }
618 val = VMXNET3_RXD_BTYPE_BODY << VMXNET3_RXD_BTYPE_SHIFT;
619 }
620
621 BUG_ON(rbi->dma_addr == 0);
115924b6
SB
622 gd->rxd.addr = cpu_to_le64(rbi->dma_addr);
623 gd->dword[2] = cpu_to_le32((ring->gen << VMXNET3_RXD_GEN_SHIFT)
624 | val | rbi->len);
d1a890fa
SB
625
626 num_allocated++;
627 vmxnet3_cmd_ring_adv_next2fill(ring);
628 }
629 rq->uncommitted[ring_idx] += num_allocated;
630
f6965582
RD
631 dev_dbg(&adapter->netdev->dev,
632 "alloc_rx_buf: %d allocated, next2fill %u, next2comp "
d1a890fa
SB
633 "%u, uncommited %u\n", num_allocated, ring->next2fill,
634 ring->next2comp, rq->uncommitted[ring_idx]);
635
636 /* so that the device can distinguish a full ring and an empty ring */
637 BUG_ON(num_allocated != 0 && ring->next2fill == ring->next2comp);
638
639 return num_allocated;
640}
641
642
643static void
644vmxnet3_append_frag(struct sk_buff *skb, struct Vmxnet3_RxCompDesc *rcd,
645 struct vmxnet3_rx_buf_info *rbi)
646{
647 struct skb_frag_struct *frag = skb_shinfo(skb)->frags +
648 skb_shinfo(skb)->nr_frags;
649
650 BUG_ON(skb_shinfo(skb)->nr_frags >= MAX_SKB_FRAGS);
651
652 frag->page = rbi->page;
653 frag->page_offset = 0;
654 frag->size = rcd->len;
655 skb->data_len += frag->size;
656 skb_shinfo(skb)->nr_frags++;
657}
658
659
660static void
661vmxnet3_map_pkt(struct sk_buff *skb, struct vmxnet3_tx_ctx *ctx,
662 struct vmxnet3_tx_queue *tq, struct pci_dev *pdev,
663 struct vmxnet3_adapter *adapter)
664{
665 u32 dw2, len;
666 unsigned long buf_offset;
667 int i;
668 union Vmxnet3_GenericDesc *gdesc;
669 struct vmxnet3_tx_buf_info *tbi = NULL;
670
671 BUG_ON(ctx->copy_size > skb_headlen(skb));
672
673 /* use the previous gen bit for the SOP desc */
674 dw2 = (tq->tx_ring.gen ^ 0x1) << VMXNET3_TXD_GEN_SHIFT;
675
676 ctx->sop_txd = tq->tx_ring.base + tq->tx_ring.next2fill;
677 gdesc = ctx->sop_txd; /* both loops below can be skipped */
678
679 /* no need to map the buffer if headers are copied */
680 if (ctx->copy_size) {
115924b6 681 ctx->sop_txd->txd.addr = cpu_to_le64(tq->data_ring.basePA +
d1a890fa 682 tq->tx_ring.next2fill *
115924b6
SB
683 sizeof(struct Vmxnet3_TxDataDesc));
684 ctx->sop_txd->dword[2] = cpu_to_le32(dw2 | ctx->copy_size);
d1a890fa
SB
685 ctx->sop_txd->dword[3] = 0;
686
687 tbi = tq->buf_info + tq->tx_ring.next2fill;
688 tbi->map_type = VMXNET3_MAP_NONE;
689
f6965582
RD
690 dev_dbg(&adapter->netdev->dev,
691 "txd[%u]: 0x%Lx 0x%x 0x%x\n",
115924b6
SB
692 tq->tx_ring.next2fill,
693 le64_to_cpu(ctx->sop_txd->txd.addr),
d1a890fa
SB
694 ctx->sop_txd->dword[2], ctx->sop_txd->dword[3]);
695 vmxnet3_cmd_ring_adv_next2fill(&tq->tx_ring);
696
697 /* use the right gen for non-SOP desc */
698 dw2 = tq->tx_ring.gen << VMXNET3_TXD_GEN_SHIFT;
699 }
700
701 /* linear part can use multiple tx desc if it's big */
702 len = skb_headlen(skb) - ctx->copy_size;
703 buf_offset = ctx->copy_size;
704 while (len) {
705 u32 buf_size;
706
1f4b1612
BD
707 if (len < VMXNET3_MAX_TX_BUF_SIZE) {
708 buf_size = len;
709 dw2 |= len;
710 } else {
711 buf_size = VMXNET3_MAX_TX_BUF_SIZE;
712 /* spec says that for TxDesc.len, 0 == 2^14 */
713 }
d1a890fa
SB
714
715 tbi = tq->buf_info + tq->tx_ring.next2fill;
716 tbi->map_type = VMXNET3_MAP_SINGLE;
717 tbi->dma_addr = pci_map_single(adapter->pdev,
718 skb->data + buf_offset, buf_size,
719 PCI_DMA_TODEVICE);
720
1f4b1612 721 tbi->len = buf_size;
d1a890fa
SB
722
723 gdesc = tq->tx_ring.base + tq->tx_ring.next2fill;
724 BUG_ON(gdesc->txd.gen == tq->tx_ring.gen);
725
115924b6 726 gdesc->txd.addr = cpu_to_le64(tbi->dma_addr);
1f4b1612 727 gdesc->dword[2] = cpu_to_le32(dw2);
d1a890fa
SB
728 gdesc->dword[3] = 0;
729
f6965582
RD
730 dev_dbg(&adapter->netdev->dev,
731 "txd[%u]: 0x%Lx 0x%x 0x%x\n",
115924b6
SB
732 tq->tx_ring.next2fill, le64_to_cpu(gdesc->txd.addr),
733 le32_to_cpu(gdesc->dword[2]), gdesc->dword[3]);
d1a890fa
SB
734 vmxnet3_cmd_ring_adv_next2fill(&tq->tx_ring);
735 dw2 = tq->tx_ring.gen << VMXNET3_TXD_GEN_SHIFT;
736
737 len -= buf_size;
738 buf_offset += buf_size;
739 }
740
741 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
742 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
743
744 tbi = tq->buf_info + tq->tx_ring.next2fill;
745 tbi->map_type = VMXNET3_MAP_PAGE;
746 tbi->dma_addr = pci_map_page(adapter->pdev, frag->page,
747 frag->page_offset, frag->size,
748 PCI_DMA_TODEVICE);
749
750 tbi->len = frag->size;
751
752 gdesc = tq->tx_ring.base + tq->tx_ring.next2fill;
753 BUG_ON(gdesc->txd.gen == tq->tx_ring.gen);
754
115924b6
SB
755 gdesc->txd.addr = cpu_to_le64(tbi->dma_addr);
756 gdesc->dword[2] = cpu_to_le32(dw2 | frag->size);
d1a890fa
SB
757 gdesc->dword[3] = 0;
758
f6965582
RD
759 dev_dbg(&adapter->netdev->dev,
760 "txd[%u]: 0x%llu %u %u\n",
115924b6
SB
761 tq->tx_ring.next2fill, le64_to_cpu(gdesc->txd.addr),
762 le32_to_cpu(gdesc->dword[2]), gdesc->dword[3]);
d1a890fa
SB
763 vmxnet3_cmd_ring_adv_next2fill(&tq->tx_ring);
764 dw2 = tq->tx_ring.gen << VMXNET3_TXD_GEN_SHIFT;
765 }
766
767 ctx->eop_txd = gdesc;
768
769 /* set the last buf_info for the pkt */
770 tbi->skb = skb;
771 tbi->sop_idx = ctx->sop_txd - tq->tx_ring.base;
772}
773
774
09c5088e
SB
775/* Init all tx queues */
776static void
777vmxnet3_tq_init_all(struct vmxnet3_adapter *adapter)
778{
779 int i;
780
781 for (i = 0; i < adapter->num_tx_queues; i++)
782 vmxnet3_tq_init(&adapter->tx_queue[i], adapter);
783}
784
785
d1a890fa
SB
786/*
787 * parse and copy relevant protocol headers:
788 * For a tso pkt, relevant headers are L2/3/4 including options
789 * For a pkt requesting csum offloading, they are L2/3 and may include L4
790 * if it's a TCP/UDP pkt
791 *
792 * Returns:
793 * -1: error happens during parsing
794 * 0: protocol headers parsed, but too big to be copied
795 * 1: protocol headers parsed and copied
796 *
797 * Other effects:
798 * 1. related *ctx fields are updated.
799 * 2. ctx->copy_size is # of bytes copied
800 * 3. the portion copied is guaranteed to be in the linear part
801 *
802 */
803static int
804vmxnet3_parse_and_copy_hdr(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
805 struct vmxnet3_tx_ctx *ctx,
806 struct vmxnet3_adapter *adapter)
807{
808 struct Vmxnet3_TxDataDesc *tdd;
809
0d0b1672 810 if (ctx->mss) { /* TSO */
d1a890fa
SB
811 ctx->eth_ip_hdr_size = skb_transport_offset(skb);
812 ctx->l4_hdr_size = ((struct tcphdr *)
813 skb_transport_header(skb))->doff * 4;
814 ctx->copy_size = ctx->eth_ip_hdr_size + ctx->l4_hdr_size;
815 } else {
d1a890fa 816 if (skb->ip_summed == CHECKSUM_PARTIAL) {
0d0b1672 817 ctx->eth_ip_hdr_size = skb_checksum_start_offset(skb);
d1a890fa
SB
818
819 if (ctx->ipv4) {
820 struct iphdr *iph = (struct iphdr *)
821 skb_network_header(skb);
39d4a96f 822 if (iph->protocol == IPPROTO_TCP)
d1a890fa
SB
823 ctx->l4_hdr_size = ((struct tcphdr *)
824 skb_transport_header(skb))->doff * 4;
39d4a96f
SB
825 else if (iph->protocol == IPPROTO_UDP)
826 /*
827 * Use tcp header size so that bytes to
828 * be copied are more than required by
829 * the device.
830 */
d1a890fa 831 ctx->l4_hdr_size =
39d4a96f
SB
832 sizeof(struct tcphdr);
833 else
d1a890fa 834 ctx->l4_hdr_size = 0;
d1a890fa
SB
835 } else {
836 /* for simplicity, don't copy L4 headers */
837 ctx->l4_hdr_size = 0;
838 }
839 ctx->copy_size = ctx->eth_ip_hdr_size +
840 ctx->l4_hdr_size;
841 } else {
842 ctx->eth_ip_hdr_size = 0;
843 ctx->l4_hdr_size = 0;
844 /* copy as much as allowed */
845 ctx->copy_size = min((unsigned int)VMXNET3_HDR_COPY_SIZE
846 , skb_headlen(skb));
847 }
848
849 /* make sure headers are accessible directly */
850 if (unlikely(!pskb_may_pull(skb, ctx->copy_size)))
851 goto err;
852 }
853
854 if (unlikely(ctx->copy_size > VMXNET3_HDR_COPY_SIZE)) {
855 tq->stats.oversized_hdr++;
856 ctx->copy_size = 0;
857 return 0;
858 }
859
860 tdd = tq->data_ring.base + tq->tx_ring.next2fill;
861
862 memcpy(tdd->data, skb->data, ctx->copy_size);
f6965582
RD
863 dev_dbg(&adapter->netdev->dev,
864 "copy %u bytes to dataRing[%u]\n",
d1a890fa
SB
865 ctx->copy_size, tq->tx_ring.next2fill);
866 return 1;
867
868err:
869 return -1;
870}
871
872
873static void
874vmxnet3_prepare_tso(struct sk_buff *skb,
875 struct vmxnet3_tx_ctx *ctx)
876{
877 struct tcphdr *tcph = (struct tcphdr *)skb_transport_header(skb);
878 if (ctx->ipv4) {
879 struct iphdr *iph = (struct iphdr *)skb_network_header(skb);
880 iph->check = 0;
881 tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, 0,
882 IPPROTO_TCP, 0);
883 } else {
884 struct ipv6hdr *iph = (struct ipv6hdr *)skb_network_header(skb);
885 tcph->check = ~csum_ipv6_magic(&iph->saddr, &iph->daddr, 0,
886 IPPROTO_TCP, 0);
887 }
888}
889
890
891/*
892 * Transmits a pkt thru a given tq
893 * Returns:
894 * NETDEV_TX_OK: descriptors are setup successfully
25985edc 895 * NETDEV_TX_OK: error occurred, the pkt is dropped
d1a890fa
SB
896 * NETDEV_TX_BUSY: tx ring is full, queue is stopped
897 *
898 * Side-effects:
899 * 1. tx ring may be changed
900 * 2. tq stats may be updated accordingly
901 * 3. shared->txNumDeferred may be updated
902 */
903
904static int
905vmxnet3_tq_xmit(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
906 struct vmxnet3_adapter *adapter, struct net_device *netdev)
907{
908 int ret;
909 u32 count;
910 unsigned long flags;
911 struct vmxnet3_tx_ctx ctx;
912 union Vmxnet3_GenericDesc *gdesc;
115924b6
SB
913#ifdef __BIG_ENDIAN_BITFIELD
914 /* Use temporary descriptor to avoid touching bits multiple times */
915 union Vmxnet3_GenericDesc tempTxDesc;
916#endif
d1a890fa
SB
917
918 /* conservatively estimate # of descriptors to use */
919 count = VMXNET3_TXD_NEEDED(skb_headlen(skb)) +
920 skb_shinfo(skb)->nr_frags + 1;
921
1b803fbf 922 ctx.ipv4 = (skb->protocol == cpu_to_be16(ETH_P_IP));
d1a890fa
SB
923
924 ctx.mss = skb_shinfo(skb)->gso_size;
925 if (ctx.mss) {
926 if (skb_header_cloned(skb)) {
927 if (unlikely(pskb_expand_head(skb, 0, 0,
928 GFP_ATOMIC) != 0)) {
929 tq->stats.drop_tso++;
930 goto drop_pkt;
931 }
932 tq->stats.copy_skb_header++;
933 }
934 vmxnet3_prepare_tso(skb, &ctx);
935 } else {
936 if (unlikely(count > VMXNET3_MAX_TXD_PER_PKT)) {
937
938 /* non-tso pkts must not use more than
939 * VMXNET3_MAX_TXD_PER_PKT entries
940 */
941 if (skb_linearize(skb) != 0) {
942 tq->stats.drop_too_many_frags++;
943 goto drop_pkt;
944 }
945 tq->stats.linearized++;
946
947 /* recalculate the # of descriptors to use */
948 count = VMXNET3_TXD_NEEDED(skb_headlen(skb)) + 1;
949 }
950 }
951
09c5088e
SB
952 spin_lock_irqsave(&tq->tx_lock, flags);
953
954 if (count > vmxnet3_cmd_ring_desc_avail(&tq->tx_ring)) {
955 tq->stats.tx_ring_full++;
956 dev_dbg(&adapter->netdev->dev,
957 "tx queue stopped on %s, next2comp %u"
958 " next2fill %u\n", adapter->netdev->name,
959 tq->tx_ring.next2comp, tq->tx_ring.next2fill);
960
961 vmxnet3_tq_stop(tq, adapter);
962 spin_unlock_irqrestore(&tq->tx_lock, flags);
963 return NETDEV_TX_BUSY;
964 }
965
966
d1a890fa
SB
967 ret = vmxnet3_parse_and_copy_hdr(skb, tq, &ctx, adapter);
968 if (ret >= 0) {
969 BUG_ON(ret <= 0 && ctx.copy_size != 0);
970 /* hdrs parsed, check against other limits */
971 if (ctx.mss) {
972 if (unlikely(ctx.eth_ip_hdr_size + ctx.l4_hdr_size >
973 VMXNET3_MAX_TX_BUF_SIZE)) {
974 goto hdr_too_big;
975 }
976 } else {
977 if (skb->ip_summed == CHECKSUM_PARTIAL) {
978 if (unlikely(ctx.eth_ip_hdr_size +
979 skb->csum_offset >
980 VMXNET3_MAX_CSUM_OFFSET)) {
981 goto hdr_too_big;
982 }
983 }
984 }
985 } else {
986 tq->stats.drop_hdr_inspect_err++;
f955e141 987 goto unlock_drop_pkt;
d1a890fa
SB
988 }
989
d1a890fa
SB
990 /* fill tx descs related to addr & len */
991 vmxnet3_map_pkt(skb, &ctx, tq, adapter->pdev, adapter);
992
993 /* setup the EOP desc */
115924b6 994 ctx.eop_txd->dword[3] = cpu_to_le32(VMXNET3_TXD_CQ | VMXNET3_TXD_EOP);
d1a890fa
SB
995
996 /* setup the SOP desc */
115924b6
SB
997#ifdef __BIG_ENDIAN_BITFIELD
998 gdesc = &tempTxDesc;
999 gdesc->dword[2] = ctx.sop_txd->dword[2];
1000 gdesc->dword[3] = ctx.sop_txd->dword[3];
1001#else
d1a890fa 1002 gdesc = ctx.sop_txd;
115924b6 1003#endif
d1a890fa
SB
1004 if (ctx.mss) {
1005 gdesc->txd.hlen = ctx.eth_ip_hdr_size + ctx.l4_hdr_size;
1006 gdesc->txd.om = VMXNET3_OM_TSO;
1007 gdesc->txd.msscof = ctx.mss;
115924b6
SB
1008 le32_add_cpu(&tq->shared->txNumDeferred, (skb->len -
1009 gdesc->txd.hlen + ctx.mss - 1) / ctx.mss);
d1a890fa
SB
1010 } else {
1011 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1012 gdesc->txd.hlen = ctx.eth_ip_hdr_size;
1013 gdesc->txd.om = VMXNET3_OM_CSUM;
1014 gdesc->txd.msscof = ctx.eth_ip_hdr_size +
1015 skb->csum_offset;
1016 } else {
1017 gdesc->txd.om = 0;
1018 gdesc->txd.msscof = 0;
1019 }
115924b6 1020 le32_add_cpu(&tq->shared->txNumDeferred, 1);
d1a890fa
SB
1021 }
1022
1023 if (vlan_tx_tag_present(skb)) {
1024 gdesc->txd.ti = 1;
1025 gdesc->txd.tci = vlan_tx_tag_get(skb);
1026 }
1027
115924b6
SB
1028 /* finally flips the GEN bit of the SOP desc. */
1029 gdesc->dword[2] = cpu_to_le32(le32_to_cpu(gdesc->dword[2]) ^
1030 VMXNET3_TXD_GEN);
1031#ifdef __BIG_ENDIAN_BITFIELD
1032 /* Finished updating in bitfields of Tx Desc, so write them in original
1033 * place.
1034 */
1035 vmxnet3_TxDescToLe((struct Vmxnet3_TxDesc *)gdesc,
1036 (struct Vmxnet3_TxDesc *)ctx.sop_txd);
1037 gdesc = ctx.sop_txd;
1038#endif
f6965582
RD
1039 dev_dbg(&adapter->netdev->dev,
1040 "txd[%u]: SOP 0x%Lx 0x%x 0x%x\n",
d1a890fa 1041 (u32)((union Vmxnet3_GenericDesc *)ctx.sop_txd -
115924b6
SB
1042 tq->tx_ring.base), le64_to_cpu(gdesc->txd.addr),
1043 le32_to_cpu(gdesc->dword[2]), le32_to_cpu(gdesc->dword[3]));
d1a890fa
SB
1044
1045 spin_unlock_irqrestore(&tq->tx_lock, flags);
1046
115924b6
SB
1047 if (le32_to_cpu(tq->shared->txNumDeferred) >=
1048 le32_to_cpu(tq->shared->txThreshold)) {
d1a890fa 1049 tq->shared->txNumDeferred = 0;
09c5088e
SB
1050 VMXNET3_WRITE_BAR0_REG(adapter,
1051 VMXNET3_REG_TXPROD + tq->qid * 8,
d1a890fa
SB
1052 tq->tx_ring.next2fill);
1053 }
d1a890fa
SB
1054
1055 return NETDEV_TX_OK;
1056
1057hdr_too_big:
1058 tq->stats.drop_oversized_hdr++;
f955e141
DC
1059unlock_drop_pkt:
1060 spin_unlock_irqrestore(&tq->tx_lock, flags);
d1a890fa
SB
1061drop_pkt:
1062 tq->stats.drop_total++;
1063 dev_kfree_skb(skb);
1064 return NETDEV_TX_OK;
1065}
1066
1067
1068static netdev_tx_t
1069vmxnet3_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
1070{
1071 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
d1a890fa 1072
09c5088e
SB
1073 BUG_ON(skb->queue_mapping > adapter->num_tx_queues);
1074 return vmxnet3_tq_xmit(skb,
1075 &adapter->tx_queue[skb->queue_mapping],
1076 adapter, netdev);
d1a890fa
SB
1077}
1078
1079
1080static void
1081vmxnet3_rx_csum(struct vmxnet3_adapter *adapter,
1082 struct sk_buff *skb,
1083 union Vmxnet3_GenericDesc *gdesc)
1084{
1085 if (!gdesc->rcd.cnc && adapter->rxcsum) {
1086 /* typical case: TCP/UDP over IP and both csums are correct */
115924b6 1087 if ((le32_to_cpu(gdesc->dword[3]) & VMXNET3_RCD_CSUM_OK) ==
d1a890fa
SB
1088 VMXNET3_RCD_CSUM_OK) {
1089 skb->ip_summed = CHECKSUM_UNNECESSARY;
1090 BUG_ON(!(gdesc->rcd.tcp || gdesc->rcd.udp));
1091 BUG_ON(!(gdesc->rcd.v4 || gdesc->rcd.v6));
1092 BUG_ON(gdesc->rcd.frg);
1093 } else {
1094 if (gdesc->rcd.csum) {
1095 skb->csum = htons(gdesc->rcd.csum);
1096 skb->ip_summed = CHECKSUM_PARTIAL;
1097 } else {
bc8acf2c 1098 skb_checksum_none_assert(skb);
d1a890fa
SB
1099 }
1100 }
1101 } else {
bc8acf2c 1102 skb_checksum_none_assert(skb);
d1a890fa
SB
1103 }
1104}
1105
1106
1107static void
1108vmxnet3_rx_error(struct vmxnet3_rx_queue *rq, struct Vmxnet3_RxCompDesc *rcd,
1109 struct vmxnet3_rx_ctx *ctx, struct vmxnet3_adapter *adapter)
1110{
1111 rq->stats.drop_err++;
1112 if (!rcd->fcs)
1113 rq->stats.drop_fcs++;
1114
1115 rq->stats.drop_total++;
1116
1117 /*
1118 * We do not unmap and chain the rx buffer to the skb.
1119 * We basically pretend this buffer is not used and will be recycled
1120 * by vmxnet3_rq_alloc_rx_buf()
1121 */
1122
1123 /*
1124 * ctx->skb may be NULL if this is the first and the only one
1125 * desc for the pkt
1126 */
1127 if (ctx->skb)
1128 dev_kfree_skb_irq(ctx->skb);
1129
1130 ctx->skb = NULL;
1131}
1132
1133
1134static int
1135vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
1136 struct vmxnet3_adapter *adapter, int quota)
1137{
215faf9c
JP
1138 static const u32 rxprod_reg[2] = {
1139 VMXNET3_REG_RXPROD, VMXNET3_REG_RXPROD2
1140 };
d1a890fa
SB
1141 u32 num_rxd = 0;
1142 struct Vmxnet3_RxCompDesc *rcd;
1143 struct vmxnet3_rx_ctx *ctx = &rq->rx_ctx;
115924b6
SB
1144#ifdef __BIG_ENDIAN_BITFIELD
1145 struct Vmxnet3_RxDesc rxCmdDesc;
1146 struct Vmxnet3_RxCompDesc rxComp;
1147#endif
1148 vmxnet3_getRxComp(rcd, &rq->comp_ring.base[rq->comp_ring.next2proc].rcd,
1149 &rxComp);
d1a890fa
SB
1150 while (rcd->gen == rq->comp_ring.gen) {
1151 struct vmxnet3_rx_buf_info *rbi;
1152 struct sk_buff *skb;
1153 int num_to_alloc;
1154 struct Vmxnet3_RxDesc *rxd;
1155 u32 idx, ring_idx;
1156
1157 if (num_rxd >= quota) {
1158 /* we may stop even before we see the EOP desc of
1159 * the current pkt
1160 */
1161 break;
1162 }
1163 num_rxd++;
09c5088e 1164 BUG_ON(rcd->rqID != rq->qid && rcd->rqID != rq->qid2);
d1a890fa 1165 idx = rcd->rxdIdx;
09c5088e 1166 ring_idx = rcd->rqID < adapter->num_rx_queues ? 0 : 1;
115924b6
SB
1167 vmxnet3_getRxDesc(rxd, &rq->rx_ring[ring_idx].base[idx].rxd,
1168 &rxCmdDesc);
d1a890fa
SB
1169 rbi = rq->buf_info[ring_idx] + idx;
1170
115924b6
SB
1171 BUG_ON(rxd->addr != rbi->dma_addr ||
1172 rxd->len != rbi->len);
d1a890fa
SB
1173
1174 if (unlikely(rcd->eop && rcd->err)) {
1175 vmxnet3_rx_error(rq, rcd, ctx, adapter);
1176 goto rcd_done;
1177 }
1178
1179 if (rcd->sop) { /* first buf of the pkt */
1180 BUG_ON(rxd->btype != VMXNET3_RXD_BTYPE_HEAD ||
1181 rcd->rqID != rq->qid);
1182
1183 BUG_ON(rbi->buf_type != VMXNET3_RX_BUF_SKB);
1184 BUG_ON(ctx->skb != NULL || rbi->skb == NULL);
1185
1186 if (unlikely(rcd->len == 0)) {
1187 /* Pretend the rx buffer is skipped. */
1188 BUG_ON(!(rcd->sop && rcd->eop));
f6965582
RD
1189 dev_dbg(&adapter->netdev->dev,
1190 "rxRing[%u][%u] 0 length\n",
d1a890fa
SB
1191 ring_idx, idx);
1192 goto rcd_done;
1193 }
1194
1195 ctx->skb = rbi->skb;
1196 rbi->skb = NULL;
1197
1198 pci_unmap_single(adapter->pdev, rbi->dma_addr, rbi->len,
1199 PCI_DMA_FROMDEVICE);
1200
1201 skb_put(ctx->skb, rcd->len);
1202 } else {
1203 BUG_ON(ctx->skb == NULL);
1204 /* non SOP buffer must be type 1 in most cases */
1205 if (rbi->buf_type == VMXNET3_RX_BUF_PAGE) {
1206 BUG_ON(rxd->btype != VMXNET3_RXD_BTYPE_BODY);
1207
1208 if (rcd->len) {
1209 pci_unmap_page(adapter->pdev,
1210 rbi->dma_addr, rbi->len,
1211 PCI_DMA_FROMDEVICE);
1212
1213 vmxnet3_append_frag(ctx->skb, rcd, rbi);
1214 rbi->page = NULL;
1215 }
1216 } else {
1217 /*
1218 * The only time a non-SOP buffer is type 0 is
1219 * when it's EOP and error flag is raised, which
1220 * has already been handled.
1221 */
1222 BUG_ON(true);
1223 }
1224 }
1225
1226 skb = ctx->skb;
1227 if (rcd->eop) {
1228 skb->len += skb->data_len;
1229 skb->truesize += skb->data_len;
1230
1231 vmxnet3_rx_csum(adapter, skb,
1232 (union Vmxnet3_GenericDesc *)rcd);
1233 skb->protocol = eth_type_trans(skb, adapter->netdev);
1234
1235 if (unlikely(adapter->vlan_grp && rcd->ts)) {
1236 vlan_hwaccel_receive_skb(skb,
1237 adapter->vlan_grp, rcd->tci);
1238 } else {
1239 netif_receive_skb(skb);
1240 }
1241
d1a890fa
SB
1242 ctx->skb = NULL;
1243 }
1244
1245rcd_done:
1246 /* device may skip some rx descs */
1247 rq->rx_ring[ring_idx].next2comp = idx;
1248 VMXNET3_INC_RING_IDX_ONLY(rq->rx_ring[ring_idx].next2comp,
1249 rq->rx_ring[ring_idx].size);
1250
1251 /* refill rx buffers frequently to avoid starving the h/w */
1252 num_to_alloc = vmxnet3_cmd_ring_desc_avail(rq->rx_ring +
1253 ring_idx);
1254 if (unlikely(num_to_alloc > VMXNET3_RX_ALLOC_THRESHOLD(rq,
1255 ring_idx, adapter))) {
1256 vmxnet3_rq_alloc_rx_buf(rq, ring_idx, num_to_alloc,
1257 adapter);
1258
1259 /* if needed, update the register */
1260 if (unlikely(rq->shared->updateRxProd)) {
1261 VMXNET3_WRITE_BAR0_REG(adapter,
1262 rxprod_reg[ring_idx] + rq->qid * 8,
1263 rq->rx_ring[ring_idx].next2fill);
1264 rq->uncommitted[ring_idx] = 0;
1265 }
1266 }
1267
1268 vmxnet3_comp_ring_adv_next2proc(&rq->comp_ring);
115924b6
SB
1269 vmxnet3_getRxComp(rcd,
1270 &rq->comp_ring.base[rq->comp_ring.next2proc].rcd, &rxComp);
d1a890fa
SB
1271 }
1272
1273 return num_rxd;
1274}
1275
1276
1277static void
1278vmxnet3_rq_cleanup(struct vmxnet3_rx_queue *rq,
1279 struct vmxnet3_adapter *adapter)
1280{
1281 u32 i, ring_idx;
1282 struct Vmxnet3_RxDesc *rxd;
1283
1284 for (ring_idx = 0; ring_idx < 2; ring_idx++) {
1285 for (i = 0; i < rq->rx_ring[ring_idx].size; i++) {
115924b6
SB
1286#ifdef __BIG_ENDIAN_BITFIELD
1287 struct Vmxnet3_RxDesc rxDesc;
1288#endif
1289 vmxnet3_getRxDesc(rxd,
1290 &rq->rx_ring[ring_idx].base[i].rxd, &rxDesc);
d1a890fa
SB
1291
1292 if (rxd->btype == VMXNET3_RXD_BTYPE_HEAD &&
1293 rq->buf_info[ring_idx][i].skb) {
1294 pci_unmap_single(adapter->pdev, rxd->addr,
1295 rxd->len, PCI_DMA_FROMDEVICE);
1296 dev_kfree_skb(rq->buf_info[ring_idx][i].skb);
1297 rq->buf_info[ring_idx][i].skb = NULL;
1298 } else if (rxd->btype == VMXNET3_RXD_BTYPE_BODY &&
1299 rq->buf_info[ring_idx][i].page) {
1300 pci_unmap_page(adapter->pdev, rxd->addr,
1301 rxd->len, PCI_DMA_FROMDEVICE);
1302 put_page(rq->buf_info[ring_idx][i].page);
1303 rq->buf_info[ring_idx][i].page = NULL;
1304 }
1305 }
1306
1307 rq->rx_ring[ring_idx].gen = VMXNET3_INIT_GEN;
1308 rq->rx_ring[ring_idx].next2fill =
1309 rq->rx_ring[ring_idx].next2comp = 0;
1310 rq->uncommitted[ring_idx] = 0;
1311 }
1312
1313 rq->comp_ring.gen = VMXNET3_INIT_GEN;
1314 rq->comp_ring.next2proc = 0;
1315}
1316
1317
09c5088e
SB
1318static void
1319vmxnet3_rq_cleanup_all(struct vmxnet3_adapter *adapter)
1320{
1321 int i;
1322
1323 for (i = 0; i < adapter->num_rx_queues; i++)
1324 vmxnet3_rq_cleanup(&adapter->rx_queue[i], adapter);
1325}
1326
1327
d1a890fa
SB
1328void vmxnet3_rq_destroy(struct vmxnet3_rx_queue *rq,
1329 struct vmxnet3_adapter *adapter)
1330{
1331 int i;
1332 int j;
1333
1334 /* all rx buffers must have already been freed */
1335 for (i = 0; i < 2; i++) {
1336 if (rq->buf_info[i]) {
1337 for (j = 0; j < rq->rx_ring[i].size; j++)
1338 BUG_ON(rq->buf_info[i][j].page != NULL);
1339 }
1340 }
1341
1342
1343 kfree(rq->buf_info[0]);
1344
1345 for (i = 0; i < 2; i++) {
1346 if (rq->rx_ring[i].base) {
1347 pci_free_consistent(adapter->pdev, rq->rx_ring[i].size
1348 * sizeof(struct Vmxnet3_RxDesc),
1349 rq->rx_ring[i].base,
1350 rq->rx_ring[i].basePA);
1351 rq->rx_ring[i].base = NULL;
1352 }
1353 rq->buf_info[i] = NULL;
1354 }
1355
1356 if (rq->comp_ring.base) {
1357 pci_free_consistent(adapter->pdev, rq->comp_ring.size *
1358 sizeof(struct Vmxnet3_RxCompDesc),
1359 rq->comp_ring.base, rq->comp_ring.basePA);
1360 rq->comp_ring.base = NULL;
1361 }
1362}
1363
1364
1365static int
1366vmxnet3_rq_init(struct vmxnet3_rx_queue *rq,
1367 struct vmxnet3_adapter *adapter)
1368{
1369 int i;
1370
1371 /* initialize buf_info */
1372 for (i = 0; i < rq->rx_ring[0].size; i++) {
1373
1374 /* 1st buf for a pkt is skbuff */
1375 if (i % adapter->rx_buf_per_pkt == 0) {
1376 rq->buf_info[0][i].buf_type = VMXNET3_RX_BUF_SKB;
1377 rq->buf_info[0][i].len = adapter->skb_buf_size;
1378 } else { /* subsequent bufs for a pkt is frag */
1379 rq->buf_info[0][i].buf_type = VMXNET3_RX_BUF_PAGE;
1380 rq->buf_info[0][i].len = PAGE_SIZE;
1381 }
1382 }
1383 for (i = 0; i < rq->rx_ring[1].size; i++) {
1384 rq->buf_info[1][i].buf_type = VMXNET3_RX_BUF_PAGE;
1385 rq->buf_info[1][i].len = PAGE_SIZE;
1386 }
1387
1388 /* reset internal state and allocate buffers for both rings */
1389 for (i = 0; i < 2; i++) {
1390 rq->rx_ring[i].next2fill = rq->rx_ring[i].next2comp = 0;
1391 rq->uncommitted[i] = 0;
1392
1393 memset(rq->rx_ring[i].base, 0, rq->rx_ring[i].size *
1394 sizeof(struct Vmxnet3_RxDesc));
1395 rq->rx_ring[i].gen = VMXNET3_INIT_GEN;
1396 }
1397 if (vmxnet3_rq_alloc_rx_buf(rq, 0, rq->rx_ring[0].size - 1,
1398 adapter) == 0) {
1399 /* at least has 1 rx buffer for the 1st ring */
1400 return -ENOMEM;
1401 }
1402 vmxnet3_rq_alloc_rx_buf(rq, 1, rq->rx_ring[1].size - 1, adapter);
1403
1404 /* reset the comp ring */
1405 rq->comp_ring.next2proc = 0;
1406 memset(rq->comp_ring.base, 0, rq->comp_ring.size *
1407 sizeof(struct Vmxnet3_RxCompDesc));
1408 rq->comp_ring.gen = VMXNET3_INIT_GEN;
1409
1410 /* reset rxctx */
1411 rq->rx_ctx.skb = NULL;
1412
1413 /* stats are not reset */
1414 return 0;
1415}
1416
1417
09c5088e
SB
1418static int
1419vmxnet3_rq_init_all(struct vmxnet3_adapter *adapter)
1420{
1421 int i, err = 0;
1422
1423 for (i = 0; i < adapter->num_rx_queues; i++) {
1424 err = vmxnet3_rq_init(&adapter->rx_queue[i], adapter);
1425 if (unlikely(err)) {
1426 dev_err(&adapter->netdev->dev, "%s: failed to "
1427 "initialize rx queue%i\n",
1428 adapter->netdev->name, i);
1429 break;
1430 }
1431 }
1432 return err;
1433
1434}
1435
1436
d1a890fa
SB
1437static int
1438vmxnet3_rq_create(struct vmxnet3_rx_queue *rq, struct vmxnet3_adapter *adapter)
1439{
1440 int i;
1441 size_t sz;
1442 struct vmxnet3_rx_buf_info *bi;
1443
1444 for (i = 0; i < 2; i++) {
1445
1446 sz = rq->rx_ring[i].size * sizeof(struct Vmxnet3_RxDesc);
1447 rq->rx_ring[i].base = pci_alloc_consistent(adapter->pdev, sz,
1448 &rq->rx_ring[i].basePA);
1449 if (!rq->rx_ring[i].base) {
1450 printk(KERN_ERR "%s: failed to allocate rx ring %d\n",
1451 adapter->netdev->name, i);
1452 goto err;
1453 }
1454 }
1455
1456 sz = rq->comp_ring.size * sizeof(struct Vmxnet3_RxCompDesc);
1457 rq->comp_ring.base = pci_alloc_consistent(adapter->pdev, sz,
1458 &rq->comp_ring.basePA);
1459 if (!rq->comp_ring.base) {
1460 printk(KERN_ERR "%s: failed to allocate rx comp ring\n",
1461 adapter->netdev->name);
1462 goto err;
1463 }
1464
1465 sz = sizeof(struct vmxnet3_rx_buf_info) * (rq->rx_ring[0].size +
1466 rq->rx_ring[1].size);
476c609e 1467 bi = kzalloc(sz, GFP_KERNEL);
d1a890fa
SB
1468 if (!bi) {
1469 printk(KERN_ERR "%s: failed to allocate rx bufinfo\n",
1470 adapter->netdev->name);
1471 goto err;
1472 }
d1a890fa
SB
1473 rq->buf_info[0] = bi;
1474 rq->buf_info[1] = bi + rq->rx_ring[0].size;
1475
1476 return 0;
1477
1478err:
1479 vmxnet3_rq_destroy(rq, adapter);
1480 return -ENOMEM;
1481}
1482
1483
09c5088e
SB
1484static int
1485vmxnet3_rq_create_all(struct vmxnet3_adapter *adapter)
1486{
1487 int i, err = 0;
1488
1489 for (i = 0; i < adapter->num_rx_queues; i++) {
1490 err = vmxnet3_rq_create(&adapter->rx_queue[i], adapter);
1491 if (unlikely(err)) {
1492 dev_err(&adapter->netdev->dev,
1493 "%s: failed to create rx queue%i\n",
1494 adapter->netdev->name, i);
1495 goto err_out;
1496 }
1497 }
1498 return err;
1499err_out:
1500 vmxnet3_rq_destroy_all(adapter);
1501 return err;
1502
1503}
1504
1505/* Multiple queue aware polling function for tx and rx */
1506
d1a890fa
SB
1507static int
1508vmxnet3_do_poll(struct vmxnet3_adapter *adapter, int budget)
1509{
09c5088e 1510 int rcd_done = 0, i;
d1a890fa
SB
1511 if (unlikely(adapter->shared->ecr))
1512 vmxnet3_process_events(adapter);
09c5088e
SB
1513 for (i = 0; i < adapter->num_tx_queues; i++)
1514 vmxnet3_tq_tx_complete(&adapter->tx_queue[i], adapter);
d1a890fa 1515
09c5088e
SB
1516 for (i = 0; i < adapter->num_rx_queues; i++)
1517 rcd_done += vmxnet3_rq_rx_complete(&adapter->rx_queue[i],
1518 adapter, budget);
1519 return rcd_done;
d1a890fa
SB
1520}
1521
1522
1523static int
1524vmxnet3_poll(struct napi_struct *napi, int budget)
1525{
09c5088e
SB
1526 struct vmxnet3_rx_queue *rx_queue = container_of(napi,
1527 struct vmxnet3_rx_queue, napi);
1528 int rxd_done;
1529
1530 rxd_done = vmxnet3_do_poll(rx_queue->adapter, budget);
1531
1532 if (rxd_done < budget) {
1533 napi_complete(napi);
1534 vmxnet3_enable_all_intrs(rx_queue->adapter);
1535 }
1536 return rxd_done;
1537}
1538
1539/*
1540 * NAPI polling function for MSI-X mode with multiple Rx queues
1541 * Returns the # of the NAPI credit consumed (# of rx descriptors processed)
1542 */
1543
1544static int
1545vmxnet3_poll_rx_only(struct napi_struct *napi, int budget)
1546{
1547 struct vmxnet3_rx_queue *rq = container_of(napi,
1548 struct vmxnet3_rx_queue, napi);
1549 struct vmxnet3_adapter *adapter = rq->adapter;
d1a890fa
SB
1550 int rxd_done;
1551
09c5088e
SB
1552 /* When sharing interrupt with corresponding tx queue, process
1553 * tx completions in that queue as well
1554 */
1555 if (adapter->share_intr == VMXNET3_INTR_BUDDYSHARE) {
1556 struct vmxnet3_tx_queue *tq =
1557 &adapter->tx_queue[rq - adapter->rx_queue];
1558 vmxnet3_tq_tx_complete(tq, adapter);
1559 }
1560
1561 rxd_done = vmxnet3_rq_rx_complete(rq, adapter, budget);
d1a890fa
SB
1562
1563 if (rxd_done < budget) {
1564 napi_complete(napi);
09c5088e 1565 vmxnet3_enable_intr(adapter, rq->comp_ring.intr_idx);
d1a890fa
SB
1566 }
1567 return rxd_done;
1568}
1569
1570
09c5088e
SB
1571#ifdef CONFIG_PCI_MSI
1572
1573/*
1574 * Handle completion interrupts on tx queues
1575 * Returns whether or not the intr is handled
1576 */
1577
1578static irqreturn_t
1579vmxnet3_msix_tx(int irq, void *data)
1580{
1581 struct vmxnet3_tx_queue *tq = data;
1582 struct vmxnet3_adapter *adapter = tq->adapter;
1583
1584 if (adapter->intr.mask_mode == VMXNET3_IMM_ACTIVE)
1585 vmxnet3_disable_intr(adapter, tq->comp_ring.intr_idx);
1586
1587 /* Handle the case where only one irq is allocate for all tx queues */
1588 if (adapter->share_intr == VMXNET3_INTR_TXSHARE) {
1589 int i;
1590 for (i = 0; i < adapter->num_tx_queues; i++) {
1591 struct vmxnet3_tx_queue *txq = &adapter->tx_queue[i];
1592 vmxnet3_tq_tx_complete(txq, adapter);
1593 }
1594 } else {
1595 vmxnet3_tq_tx_complete(tq, adapter);
1596 }
1597 vmxnet3_enable_intr(adapter, tq->comp_ring.intr_idx);
1598
1599 return IRQ_HANDLED;
1600}
1601
1602
1603/*
1604 * Handle completion interrupts on rx queues. Returns whether or not the
1605 * intr is handled
1606 */
1607
1608static irqreturn_t
1609vmxnet3_msix_rx(int irq, void *data)
1610{
1611 struct vmxnet3_rx_queue *rq = data;
1612 struct vmxnet3_adapter *adapter = rq->adapter;
1613
1614 /* disable intr if needed */
1615 if (adapter->intr.mask_mode == VMXNET3_IMM_ACTIVE)
1616 vmxnet3_disable_intr(adapter, rq->comp_ring.intr_idx);
1617 napi_schedule(&rq->napi);
1618
1619 return IRQ_HANDLED;
1620}
1621
1622/*
1623 *----------------------------------------------------------------------------
1624 *
1625 * vmxnet3_msix_event --
1626 *
1627 * vmxnet3 msix event intr handler
1628 *
1629 * Result:
1630 * whether or not the intr is handled
1631 *
1632 *----------------------------------------------------------------------------
1633 */
1634
1635static irqreturn_t
1636vmxnet3_msix_event(int irq, void *data)
1637{
1638 struct net_device *dev = data;
1639 struct vmxnet3_adapter *adapter = netdev_priv(dev);
1640
1641 /* disable intr if needed */
1642 if (adapter->intr.mask_mode == VMXNET3_IMM_ACTIVE)
1643 vmxnet3_disable_intr(adapter, adapter->intr.event_intr_idx);
1644
1645 if (adapter->shared->ecr)
1646 vmxnet3_process_events(adapter);
1647
1648 vmxnet3_enable_intr(adapter, adapter->intr.event_intr_idx);
1649
1650 return IRQ_HANDLED;
1651}
1652
1653#endif /* CONFIG_PCI_MSI */
1654
1655
d1a890fa
SB
1656/* Interrupt handler for vmxnet3 */
1657static irqreturn_t
1658vmxnet3_intr(int irq, void *dev_id)
1659{
1660 struct net_device *dev = dev_id;
1661 struct vmxnet3_adapter *adapter = netdev_priv(dev);
1662
09c5088e 1663 if (adapter->intr.type == VMXNET3_IT_INTX) {
d1a890fa
SB
1664 u32 icr = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_ICR);
1665 if (unlikely(icr == 0))
1666 /* not ours */
1667 return IRQ_NONE;
1668 }
1669
1670
1671 /* disable intr if needed */
1672 if (adapter->intr.mask_mode == VMXNET3_IMM_ACTIVE)
09c5088e 1673 vmxnet3_disable_all_intrs(adapter);
d1a890fa 1674
09c5088e 1675 napi_schedule(&adapter->rx_queue[0].napi);
d1a890fa
SB
1676
1677 return IRQ_HANDLED;
1678}
1679
1680#ifdef CONFIG_NET_POLL_CONTROLLER
1681
d1a890fa
SB
1682/* netpoll callback. */
1683static void
1684vmxnet3_netpoll(struct net_device *netdev)
1685{
1686 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
d1a890fa 1687
09c5088e
SB
1688 if (adapter->intr.mask_mode == VMXNET3_IMM_ACTIVE)
1689 vmxnet3_disable_all_intrs(adapter);
1690
1691 vmxnet3_do_poll(adapter, adapter->rx_queue[0].rx_ring[0].size);
1692 vmxnet3_enable_all_intrs(adapter);
d1a890fa 1693
d1a890fa 1694}
09c5088e 1695#endif /* CONFIG_NET_POLL_CONTROLLER */
d1a890fa
SB
1696
1697static int
1698vmxnet3_request_irqs(struct vmxnet3_adapter *adapter)
1699{
09c5088e
SB
1700 struct vmxnet3_intr *intr = &adapter->intr;
1701 int err = 0, i;
1702 int vector = 0;
d1a890fa 1703
8f7e524c 1704#ifdef CONFIG_PCI_MSI
d1a890fa 1705 if (adapter->intr.type == VMXNET3_IT_MSIX) {
09c5088e
SB
1706 for (i = 0; i < adapter->num_tx_queues; i++) {
1707 if (adapter->share_intr != VMXNET3_INTR_BUDDYSHARE) {
1708 sprintf(adapter->tx_queue[i].name, "%s-tx-%d",
1709 adapter->netdev->name, vector);
1710 err = request_irq(
1711 intr->msix_entries[vector].vector,
1712 vmxnet3_msix_tx, 0,
1713 adapter->tx_queue[i].name,
1714 &adapter->tx_queue[i]);
1715 } else {
1716 sprintf(adapter->tx_queue[i].name, "%s-rxtx-%d",
1717 adapter->netdev->name, vector);
1718 }
1719 if (err) {
1720 dev_err(&adapter->netdev->dev,
1721 "Failed to request irq for MSIX, %s, "
1722 "error %d\n",
1723 adapter->tx_queue[i].name, err);
1724 return err;
1725 }
1726
1727 /* Handle the case where only 1 MSIx was allocated for
1728 * all tx queues */
1729 if (adapter->share_intr == VMXNET3_INTR_TXSHARE) {
1730 for (; i < adapter->num_tx_queues; i++)
1731 adapter->tx_queue[i].comp_ring.intr_idx
1732 = vector;
1733 vector++;
1734 break;
1735 } else {
1736 adapter->tx_queue[i].comp_ring.intr_idx
1737 = vector++;
1738 }
1739 }
1740 if (adapter->share_intr == VMXNET3_INTR_BUDDYSHARE)
1741 vector = 0;
1742
1743 for (i = 0; i < adapter->num_rx_queues; i++) {
1744 if (adapter->share_intr != VMXNET3_INTR_BUDDYSHARE)
1745 sprintf(adapter->rx_queue[i].name, "%s-rx-%d",
1746 adapter->netdev->name, vector);
1747 else
1748 sprintf(adapter->rx_queue[i].name, "%s-rxtx-%d",
1749 adapter->netdev->name, vector);
1750 err = request_irq(intr->msix_entries[vector].vector,
1751 vmxnet3_msix_rx, 0,
1752 adapter->rx_queue[i].name,
1753 &(adapter->rx_queue[i]));
1754 if (err) {
1755 printk(KERN_ERR "Failed to request irq for MSIX"
1756 ", %s, error %d\n",
1757 adapter->rx_queue[i].name, err);
1758 return err;
1759 }
1760
1761 adapter->rx_queue[i].comp_ring.intr_idx = vector++;
1762 }
1763
1764 sprintf(intr->event_msi_vector_name, "%s-event-%d",
1765 adapter->netdev->name, vector);
1766 err = request_irq(intr->msix_entries[vector].vector,
1767 vmxnet3_msix_event, 0,
1768 intr->event_msi_vector_name, adapter->netdev);
1769 intr->event_intr_idx = vector;
1770
1771 } else if (intr->type == VMXNET3_IT_MSI) {
1772 adapter->num_rx_queues = 1;
d1a890fa
SB
1773 err = request_irq(adapter->pdev->irq, vmxnet3_intr, 0,
1774 adapter->netdev->name, adapter->netdev);
09c5088e 1775 } else {
115924b6 1776#endif
09c5088e 1777 adapter->num_rx_queues = 1;
d1a890fa
SB
1778 err = request_irq(adapter->pdev->irq, vmxnet3_intr,
1779 IRQF_SHARED, adapter->netdev->name,
1780 adapter->netdev);
09c5088e 1781#ifdef CONFIG_PCI_MSI
d1a890fa 1782 }
09c5088e
SB
1783#endif
1784 intr->num_intrs = vector + 1;
1785 if (err) {
d1a890fa 1786 printk(KERN_ERR "Failed to request irq %s (intr type:%d), error"
09c5088e
SB
1787 ":%d\n", adapter->netdev->name, intr->type, err);
1788 } else {
1789 /* Number of rx queues will not change after this */
1790 for (i = 0; i < adapter->num_rx_queues; i++) {
1791 struct vmxnet3_rx_queue *rq = &adapter->rx_queue[i];
1792 rq->qid = i;
1793 rq->qid2 = i + adapter->num_rx_queues;
1794 }
d1a890fa
SB
1795
1796
d1a890fa 1797
09c5088e
SB
1798 /* init our intr settings */
1799 for (i = 0; i < intr->num_intrs; i++)
1800 intr->mod_levels[i] = UPT1_IML_ADAPTIVE;
1801 if (adapter->intr.type != VMXNET3_IT_MSIX) {
1802 adapter->intr.event_intr_idx = 0;
1803 for (i = 0; i < adapter->num_tx_queues; i++)
1804 adapter->tx_queue[i].comp_ring.intr_idx = 0;
1805 adapter->rx_queue[0].comp_ring.intr_idx = 0;
1806 }
d1a890fa
SB
1807
1808 printk(KERN_INFO "%s: intr type %u, mode %u, %u vectors "
09c5088e
SB
1809 "allocated\n", adapter->netdev->name, intr->type,
1810 intr->mask_mode, intr->num_intrs);
d1a890fa
SB
1811 }
1812
1813 return err;
1814}
1815
1816
1817static void
1818vmxnet3_free_irqs(struct vmxnet3_adapter *adapter)
1819{
09c5088e
SB
1820 struct vmxnet3_intr *intr = &adapter->intr;
1821 BUG_ON(intr->type == VMXNET3_IT_AUTO || intr->num_intrs <= 0);
d1a890fa 1822
09c5088e 1823 switch (intr->type) {
8f7e524c 1824#ifdef CONFIG_PCI_MSI
d1a890fa
SB
1825 case VMXNET3_IT_MSIX:
1826 {
09c5088e 1827 int i, vector = 0;
d1a890fa 1828
09c5088e
SB
1829 if (adapter->share_intr != VMXNET3_INTR_BUDDYSHARE) {
1830 for (i = 0; i < adapter->num_tx_queues; i++) {
1831 free_irq(intr->msix_entries[vector++].vector,
1832 &(adapter->tx_queue[i]));
1833 if (adapter->share_intr == VMXNET3_INTR_TXSHARE)
1834 break;
1835 }
1836 }
1837
1838 for (i = 0; i < adapter->num_rx_queues; i++) {
1839 free_irq(intr->msix_entries[vector++].vector,
1840 &(adapter->rx_queue[i]));
1841 }
1842
1843 free_irq(intr->msix_entries[vector].vector,
1844 adapter->netdev);
1845 BUG_ON(vector >= intr->num_intrs);
d1a890fa
SB
1846 break;
1847 }
8f7e524c 1848#endif
d1a890fa
SB
1849 case VMXNET3_IT_MSI:
1850 free_irq(adapter->pdev->irq, adapter->netdev);
1851 break;
1852 case VMXNET3_IT_INTX:
1853 free_irq(adapter->pdev->irq, adapter->netdev);
1854 break;
1855 default:
1856 BUG_ON(true);
1857 }
1858}
1859
d1a890fa
SB
1860static void
1861vmxnet3_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp)
1862{
1863 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
1864 struct Vmxnet3_DriverShared *shared = adapter->shared;
1865 u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable;
83d0feff 1866 unsigned long flags;
d1a890fa
SB
1867
1868 if (grp) {
1869 /* add vlan rx stripping. */
1870 if (adapter->netdev->features & NETIF_F_HW_VLAN_RX) {
1871 int i;
d1a890fa
SB
1872 adapter->vlan_grp = grp;
1873
d1a890fa
SB
1874 /*
1875 * Clear entire vfTable; then enable untagged pkts.
1876 * Note: setting one entry in vfTable to non-zero turns
1877 * on VLAN rx filtering.
1878 */
1879 for (i = 0; i < VMXNET3_VFT_SIZE; i++)
1880 vfTable[i] = 0;
1881
1882 VMXNET3_SET_VFTABLE_ENTRY(vfTable, 0);
83d0feff 1883 spin_lock_irqsave(&adapter->cmd_lock, flags);
d1a890fa
SB
1884 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
1885 VMXNET3_CMD_UPDATE_VLAN_FILTERS);
83d0feff 1886 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
d1a890fa
SB
1887 } else {
1888 printk(KERN_ERR "%s: vlan_rx_register when device has "
1889 "no NETIF_F_HW_VLAN_RX\n", netdev->name);
1890 }
1891 } else {
1892 /* remove vlan rx stripping. */
1893 struct Vmxnet3_DSDevRead *devRead = &shared->devRead;
1894 adapter->vlan_grp = NULL;
1895
3843e515 1896 if (devRead->misc.uptFeatures & UPT1_F_RXVLAN) {
d1a890fa
SB
1897 int i;
1898
1899 for (i = 0; i < VMXNET3_VFT_SIZE; i++) {
1900 /* clear entire vfTable; this also disables
1901 * VLAN rx filtering
1902 */
1903 vfTable[i] = 0;
1904 }
83d0feff 1905 spin_lock_irqsave(&adapter->cmd_lock, flags);
d1a890fa
SB
1906 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
1907 VMXNET3_CMD_UPDATE_VLAN_FILTERS);
83d0feff 1908 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
d1a890fa
SB
1909 }
1910 }
1911}
1912
1913
1914static void
1915vmxnet3_restore_vlan(struct vmxnet3_adapter *adapter)
1916{
1917 if (adapter->vlan_grp) {
1918 u16 vid;
1919 u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable;
1920 bool activeVlan = false;
1921
b738127d 1922 for (vid = 0; vid < VLAN_N_VID; vid++) {
d1a890fa
SB
1923 if (vlan_group_get_device(adapter->vlan_grp, vid)) {
1924 VMXNET3_SET_VFTABLE_ENTRY(vfTable, vid);
1925 activeVlan = true;
1926 }
1927 }
1928 if (activeVlan) {
1929 /* continue to allow untagged pkts */
1930 VMXNET3_SET_VFTABLE_ENTRY(vfTable, 0);
1931 }
1932 }
1933}
1934
1935
1936static void
1937vmxnet3_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
1938{
1939 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
1940 u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable;
83d0feff 1941 unsigned long flags;
d1a890fa
SB
1942
1943 VMXNET3_SET_VFTABLE_ENTRY(vfTable, vid);
83d0feff 1944 spin_lock_irqsave(&adapter->cmd_lock, flags);
d1a890fa
SB
1945 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
1946 VMXNET3_CMD_UPDATE_VLAN_FILTERS);
83d0feff 1947 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
d1a890fa
SB
1948}
1949
1950
1951static void
1952vmxnet3_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
1953{
1954 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
1955 u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable;
83d0feff 1956 unsigned long flags;
d1a890fa
SB
1957
1958 VMXNET3_CLEAR_VFTABLE_ENTRY(vfTable, vid);
83d0feff 1959 spin_lock_irqsave(&adapter->cmd_lock, flags);
d1a890fa
SB
1960 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
1961 VMXNET3_CMD_UPDATE_VLAN_FILTERS);
83d0feff 1962 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
d1a890fa
SB
1963}
1964
1965
1966static u8 *
1967vmxnet3_copy_mc(struct net_device *netdev)
1968{
1969 u8 *buf = NULL;
4cd24eaf 1970 u32 sz = netdev_mc_count(netdev) * ETH_ALEN;
d1a890fa
SB
1971
1972 /* struct Vmxnet3_RxFilterConf.mfTableLen is u16. */
1973 if (sz <= 0xffff) {
1974 /* We may be called with BH disabled */
1975 buf = kmalloc(sz, GFP_ATOMIC);
1976 if (buf) {
22bedad3 1977 struct netdev_hw_addr *ha;
567ec874 1978 int i = 0;
d1a890fa 1979
22bedad3
JP
1980 netdev_for_each_mc_addr(ha, netdev)
1981 memcpy(buf + i++ * ETH_ALEN, ha->addr,
d1a890fa 1982 ETH_ALEN);
d1a890fa
SB
1983 }
1984 }
1985 return buf;
1986}
1987
1988
1989static void
1990vmxnet3_set_mc(struct net_device *netdev)
1991{
1992 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
83d0feff 1993 unsigned long flags;
d1a890fa
SB
1994 struct Vmxnet3_RxFilterConf *rxConf =
1995 &adapter->shared->devRead.rxFilterConf;
1996 u8 *new_table = NULL;
1997 u32 new_mode = VMXNET3_RXM_UCAST;
1998
1999 if (netdev->flags & IFF_PROMISC)
2000 new_mode |= VMXNET3_RXM_PROMISC;
2001
2002 if (netdev->flags & IFF_BROADCAST)
2003 new_mode |= VMXNET3_RXM_BCAST;
2004
2005 if (netdev->flags & IFF_ALLMULTI)
2006 new_mode |= VMXNET3_RXM_ALL_MULTI;
2007 else
4cd24eaf 2008 if (!netdev_mc_empty(netdev)) {
d1a890fa
SB
2009 new_table = vmxnet3_copy_mc(netdev);
2010 if (new_table) {
2011 new_mode |= VMXNET3_RXM_MCAST;
115924b6 2012 rxConf->mfTableLen = cpu_to_le16(
4cd24eaf 2013 netdev_mc_count(netdev) * ETH_ALEN);
115924b6
SB
2014 rxConf->mfTablePA = cpu_to_le64(virt_to_phys(
2015 new_table));
d1a890fa
SB
2016 } else {
2017 printk(KERN_INFO "%s: failed to copy mcast list"
2018 ", setting ALL_MULTI\n", netdev->name);
2019 new_mode |= VMXNET3_RXM_ALL_MULTI;
2020 }
2021 }
2022
2023
2024 if (!(new_mode & VMXNET3_RXM_MCAST)) {
2025 rxConf->mfTableLen = 0;
2026 rxConf->mfTablePA = 0;
2027 }
2028
83d0feff 2029 spin_lock_irqsave(&adapter->cmd_lock, flags);
d1a890fa 2030 if (new_mode != rxConf->rxMode) {
115924b6 2031 rxConf->rxMode = cpu_to_le32(new_mode);
d1a890fa
SB
2032 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
2033 VMXNET3_CMD_UPDATE_RX_MODE);
2034 }
2035
2036 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
2037 VMXNET3_CMD_UPDATE_MAC_FILTERS);
83d0feff 2038 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
d1a890fa
SB
2039
2040 kfree(new_table);
2041}
2042
09c5088e
SB
2043void
2044vmxnet3_rq_destroy_all(struct vmxnet3_adapter *adapter)
2045{
2046 int i;
2047
2048 for (i = 0; i < adapter->num_rx_queues; i++)
2049 vmxnet3_rq_destroy(&adapter->rx_queue[i], adapter);
2050}
2051
d1a890fa
SB
2052
2053/*
2054 * Set up driver_shared based on settings in adapter.
2055 */
2056
2057static void
2058vmxnet3_setup_driver_shared(struct vmxnet3_adapter *adapter)
2059{
2060 struct Vmxnet3_DriverShared *shared = adapter->shared;
2061 struct Vmxnet3_DSDevRead *devRead = &shared->devRead;
2062 struct Vmxnet3_TxQueueConf *tqc;
2063 struct Vmxnet3_RxQueueConf *rqc;
2064 int i;
2065
2066 memset(shared, 0, sizeof(*shared));
2067
2068 /* driver settings */
115924b6
SB
2069 shared->magic = cpu_to_le32(VMXNET3_REV1_MAGIC);
2070 devRead->misc.driverInfo.version = cpu_to_le32(
2071 VMXNET3_DRIVER_VERSION_NUM);
d1a890fa
SB
2072 devRead->misc.driverInfo.gos.gosBits = (sizeof(void *) == 4 ?
2073 VMXNET3_GOS_BITS_32 : VMXNET3_GOS_BITS_64);
2074 devRead->misc.driverInfo.gos.gosType = VMXNET3_GOS_TYPE_LINUX;
115924b6
SB
2075 *((u32 *)&devRead->misc.driverInfo.gos) = cpu_to_le32(
2076 *((u32 *)&devRead->misc.driverInfo.gos));
2077 devRead->misc.driverInfo.vmxnet3RevSpt = cpu_to_le32(1);
2078 devRead->misc.driverInfo.uptVerSpt = cpu_to_le32(1);
d1a890fa 2079
115924b6
SB
2080 devRead->misc.ddPA = cpu_to_le64(virt_to_phys(adapter));
2081 devRead->misc.ddLen = cpu_to_le32(sizeof(struct vmxnet3_adapter));
d1a890fa
SB
2082
2083 /* set up feature flags */
2084 if (adapter->rxcsum)
3843e515 2085 devRead->misc.uptFeatures |= UPT1_F_RXCSUM;
d1a890fa
SB
2086
2087 if (adapter->lro) {
3843e515 2088 devRead->misc.uptFeatures |= UPT1_F_LRO;
115924b6 2089 devRead->misc.maxNumRxSG = cpu_to_le16(1 + MAX_SKB_FRAGS);
d1a890fa 2090 }
54da3d00 2091 if (adapter->netdev->features & NETIF_F_HW_VLAN_RX)
3843e515 2092 devRead->misc.uptFeatures |= UPT1_F_RXVLAN;
d1a890fa 2093
115924b6
SB
2094 devRead->misc.mtu = cpu_to_le32(adapter->netdev->mtu);
2095 devRead->misc.queueDescPA = cpu_to_le64(adapter->queue_desc_pa);
2096 devRead->misc.queueDescLen = cpu_to_le32(
09c5088e
SB
2097 adapter->num_tx_queues * sizeof(struct Vmxnet3_TxQueueDesc) +
2098 adapter->num_rx_queues * sizeof(struct Vmxnet3_RxQueueDesc));
d1a890fa
SB
2099
2100 /* tx queue settings */
09c5088e
SB
2101 devRead->misc.numTxQueues = adapter->num_tx_queues;
2102 for (i = 0; i < adapter->num_tx_queues; i++) {
2103 struct vmxnet3_tx_queue *tq = &adapter->tx_queue[i];
2104 BUG_ON(adapter->tx_queue[i].tx_ring.base == NULL);
2105 tqc = &adapter->tqd_start[i].conf;
2106 tqc->txRingBasePA = cpu_to_le64(tq->tx_ring.basePA);
2107 tqc->dataRingBasePA = cpu_to_le64(tq->data_ring.basePA);
2108 tqc->compRingBasePA = cpu_to_le64(tq->comp_ring.basePA);
2109 tqc->ddPA = cpu_to_le64(virt_to_phys(tq->buf_info));
2110 tqc->txRingSize = cpu_to_le32(tq->tx_ring.size);
2111 tqc->dataRingSize = cpu_to_le32(tq->data_ring.size);
2112 tqc->compRingSize = cpu_to_le32(tq->comp_ring.size);
2113 tqc->ddLen = cpu_to_le32(
2114 sizeof(struct vmxnet3_tx_buf_info) *
2115 tqc->txRingSize);
2116 tqc->intrIdx = tq->comp_ring.intr_idx;
2117 }
d1a890fa
SB
2118
2119 /* rx queue settings */
09c5088e
SB
2120 devRead->misc.numRxQueues = adapter->num_rx_queues;
2121 for (i = 0; i < adapter->num_rx_queues; i++) {
2122 struct vmxnet3_rx_queue *rq = &adapter->rx_queue[i];
2123 rqc = &adapter->rqd_start[i].conf;
2124 rqc->rxRingBasePA[0] = cpu_to_le64(rq->rx_ring[0].basePA);
2125 rqc->rxRingBasePA[1] = cpu_to_le64(rq->rx_ring[1].basePA);
2126 rqc->compRingBasePA = cpu_to_le64(rq->comp_ring.basePA);
2127 rqc->ddPA = cpu_to_le64(virt_to_phys(
2128 rq->buf_info));
2129 rqc->rxRingSize[0] = cpu_to_le32(rq->rx_ring[0].size);
2130 rqc->rxRingSize[1] = cpu_to_le32(rq->rx_ring[1].size);
2131 rqc->compRingSize = cpu_to_le32(rq->comp_ring.size);
2132 rqc->ddLen = cpu_to_le32(
2133 sizeof(struct vmxnet3_rx_buf_info) *
2134 (rqc->rxRingSize[0] +
2135 rqc->rxRingSize[1]));
2136 rqc->intrIdx = rq->comp_ring.intr_idx;
2137 }
2138
2139#ifdef VMXNET3_RSS
2140 memset(adapter->rss_conf, 0, sizeof(*adapter->rss_conf));
2141
2142 if (adapter->rss) {
2143 struct UPT1_RSSConf *rssConf = adapter->rss_conf;
2144 devRead->misc.uptFeatures |= UPT1_F_RSS;
2145 devRead->misc.numRxQueues = adapter->num_rx_queues;
2146 rssConf->hashType = UPT1_RSS_HASH_TYPE_TCP_IPV4 |
2147 UPT1_RSS_HASH_TYPE_IPV4 |
2148 UPT1_RSS_HASH_TYPE_TCP_IPV6 |
2149 UPT1_RSS_HASH_TYPE_IPV6;
2150 rssConf->hashFunc = UPT1_RSS_HASH_FUNC_TOEPLITZ;
2151 rssConf->hashKeySize = UPT1_RSS_MAX_KEY_SIZE;
2152 rssConf->indTableSize = VMXNET3_RSS_IND_TABLE_SIZE;
2153 get_random_bytes(&rssConf->hashKey[0], rssConf->hashKeySize);
2154 for (i = 0; i < rssConf->indTableSize; i++)
2155 rssConf->indTable[i] = i % adapter->num_rx_queues;
2156
2157 devRead->rssConfDesc.confVer = 1;
2158 devRead->rssConfDesc.confLen = sizeof(*rssConf);
2159 devRead->rssConfDesc.confPA = virt_to_phys(rssConf);
2160 }
2161
2162#endif /* VMXNET3_RSS */
d1a890fa
SB
2163
2164 /* intr settings */
2165 devRead->intrConf.autoMask = adapter->intr.mask_mode ==
2166 VMXNET3_IMM_AUTO;
2167 devRead->intrConf.numIntrs = adapter->intr.num_intrs;
2168 for (i = 0; i < adapter->intr.num_intrs; i++)
2169 devRead->intrConf.modLevels[i] = adapter->intr.mod_levels[i];
2170
2171 devRead->intrConf.eventIntrIdx = adapter->intr.event_intr_idx;
6929fe8a 2172 devRead->intrConf.intrCtrl |= cpu_to_le32(VMXNET3_IC_DISABLE_ALL);
d1a890fa
SB
2173
2174 /* rx filter settings */
2175 devRead->rxFilterConf.rxMode = 0;
2176 vmxnet3_restore_vlan(adapter);
f9f25026
SB
2177 vmxnet3_write_mac_addr(adapter, adapter->netdev->dev_addr);
2178
d1a890fa
SB
2179 /* the rest are already zeroed */
2180}
2181
2182
2183int
2184vmxnet3_activate_dev(struct vmxnet3_adapter *adapter)
2185{
09c5088e 2186 int err, i;
d1a890fa 2187 u32 ret;
83d0feff 2188 unsigned long flags;
d1a890fa 2189
09c5088e
SB
2190 dev_dbg(&adapter->netdev->dev, "%s: skb_buf_size %d, rx_buf_per_pkt %d,"
2191 " ring sizes %u %u %u\n", adapter->netdev->name,
2192 adapter->skb_buf_size, adapter->rx_buf_per_pkt,
2193 adapter->tx_queue[0].tx_ring.size,
2194 adapter->rx_queue[0].rx_ring[0].size,
2195 adapter->rx_queue[0].rx_ring[1].size);
2196
2197 vmxnet3_tq_init_all(adapter);
2198 err = vmxnet3_rq_init_all(adapter);
d1a890fa
SB
2199 if (err) {
2200 printk(KERN_ERR "Failed to init rx queue for %s: error %d\n",
2201 adapter->netdev->name, err);
2202 goto rq_err;
2203 }
2204
2205 err = vmxnet3_request_irqs(adapter);
2206 if (err) {
2207 printk(KERN_ERR "Failed to setup irq for %s: error %d\n",
2208 adapter->netdev->name, err);
2209 goto irq_err;
2210 }
2211
2212 vmxnet3_setup_driver_shared(adapter);
2213
115924b6
SB
2214 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DSAL, VMXNET3_GET_ADDR_LO(
2215 adapter->shared_pa));
2216 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DSAH, VMXNET3_GET_ADDR_HI(
2217 adapter->shared_pa));
83d0feff 2218 spin_lock_irqsave(&adapter->cmd_lock, flags);
d1a890fa
SB
2219 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
2220 VMXNET3_CMD_ACTIVATE_DEV);
2221 ret = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
83d0feff 2222 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
d1a890fa
SB
2223
2224 if (ret != 0) {
2225 printk(KERN_ERR "Failed to activate dev %s: error %u\n",
2226 adapter->netdev->name, ret);
2227 err = -EINVAL;
2228 goto activate_err;
2229 }
09c5088e
SB
2230
2231 for (i = 0; i < adapter->num_rx_queues; i++) {
2232 VMXNET3_WRITE_BAR0_REG(adapter,
2233 VMXNET3_REG_RXPROD + i * VMXNET3_REG_ALIGN,
2234 adapter->rx_queue[i].rx_ring[0].next2fill);
2235 VMXNET3_WRITE_BAR0_REG(adapter, (VMXNET3_REG_RXPROD2 +
2236 (i * VMXNET3_REG_ALIGN)),
2237 adapter->rx_queue[i].rx_ring[1].next2fill);
2238 }
d1a890fa
SB
2239
2240 /* Apply the rx filter settins last. */
2241 vmxnet3_set_mc(adapter->netdev);
2242
2243 /*
2244 * Check link state when first activating device. It will start the
2245 * tx queue if the link is up.
2246 */
4a1745fc 2247 vmxnet3_check_link(adapter, true);
09c5088e
SB
2248 for (i = 0; i < adapter->num_rx_queues; i++)
2249 napi_enable(&adapter->rx_queue[i].napi);
d1a890fa
SB
2250 vmxnet3_enable_all_intrs(adapter);
2251 clear_bit(VMXNET3_STATE_BIT_QUIESCED, &adapter->state);
2252 return 0;
2253
2254activate_err:
2255 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DSAL, 0);
2256 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DSAH, 0);
2257 vmxnet3_free_irqs(adapter);
2258irq_err:
2259rq_err:
2260 /* free up buffers we allocated */
09c5088e 2261 vmxnet3_rq_cleanup_all(adapter);
d1a890fa
SB
2262 return err;
2263}
2264
2265
2266void
2267vmxnet3_reset_dev(struct vmxnet3_adapter *adapter)
2268{
83d0feff
SB
2269 unsigned long flags;
2270 spin_lock_irqsave(&adapter->cmd_lock, flags);
d1a890fa 2271 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_RESET_DEV);
83d0feff 2272 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
d1a890fa
SB
2273}
2274
2275
2276int
2277vmxnet3_quiesce_dev(struct vmxnet3_adapter *adapter)
2278{
09c5088e 2279 int i;
83d0feff 2280 unsigned long flags;
d1a890fa
SB
2281 if (test_and_set_bit(VMXNET3_STATE_BIT_QUIESCED, &adapter->state))
2282 return 0;
2283
2284
83d0feff 2285 spin_lock_irqsave(&adapter->cmd_lock, flags);
d1a890fa
SB
2286 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
2287 VMXNET3_CMD_QUIESCE_DEV);
83d0feff 2288 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
d1a890fa
SB
2289 vmxnet3_disable_all_intrs(adapter);
2290
09c5088e
SB
2291 for (i = 0; i < adapter->num_rx_queues; i++)
2292 napi_disable(&adapter->rx_queue[i].napi);
d1a890fa
SB
2293 netif_tx_disable(adapter->netdev);
2294 adapter->link_speed = 0;
2295 netif_carrier_off(adapter->netdev);
2296
09c5088e
SB
2297 vmxnet3_tq_cleanup_all(adapter);
2298 vmxnet3_rq_cleanup_all(adapter);
d1a890fa
SB
2299 vmxnet3_free_irqs(adapter);
2300 return 0;
2301}
2302
2303
2304static void
2305vmxnet3_write_mac_addr(struct vmxnet3_adapter *adapter, u8 *mac)
2306{
2307 u32 tmp;
2308
2309 tmp = *(u32 *)mac;
2310 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_MACL, tmp);
2311
2312 tmp = (mac[5] << 8) | mac[4];
2313 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_MACH, tmp);
2314}
2315
2316
2317static int
2318vmxnet3_set_mac_addr(struct net_device *netdev, void *p)
2319{
2320 struct sockaddr *addr = p;
2321 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
2322
2323 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
2324 vmxnet3_write_mac_addr(adapter, addr->sa_data);
2325
2326 return 0;
2327}
2328
2329
2330/* ==================== initialization and cleanup routines ============ */
2331
2332static int
2333vmxnet3_alloc_pci_resources(struct vmxnet3_adapter *adapter, bool *dma64)
2334{
2335 int err;
2336 unsigned long mmio_start, mmio_len;
2337 struct pci_dev *pdev = adapter->pdev;
2338
2339 err = pci_enable_device(pdev);
2340 if (err) {
2341 printk(KERN_ERR "Failed to enable adapter %s: error %d\n",
2342 pci_name(pdev), err);
2343 return err;
2344 }
2345
2346 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) == 0) {
2347 if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)) != 0) {
2348 printk(KERN_ERR "pci_set_consistent_dma_mask failed "
2349 "for adapter %s\n", pci_name(pdev));
2350 err = -EIO;
2351 goto err_set_mask;
2352 }
2353 *dma64 = true;
2354 } else {
2355 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) {
2356 printk(KERN_ERR "pci_set_dma_mask failed for adapter "
2357 "%s\n", pci_name(pdev));
2358 err = -EIO;
2359 goto err_set_mask;
2360 }
2361 *dma64 = false;
2362 }
2363
2364 err = pci_request_selected_regions(pdev, (1 << 2) - 1,
2365 vmxnet3_driver_name);
2366 if (err) {
2367 printk(KERN_ERR "Failed to request region for adapter %s: "
2368 "error %d\n", pci_name(pdev), err);
2369 goto err_set_mask;
2370 }
2371
2372 pci_set_master(pdev);
2373
2374 mmio_start = pci_resource_start(pdev, 0);
2375 mmio_len = pci_resource_len(pdev, 0);
2376 adapter->hw_addr0 = ioremap(mmio_start, mmio_len);
2377 if (!adapter->hw_addr0) {
2378 printk(KERN_ERR "Failed to map bar0 for adapter %s\n",
2379 pci_name(pdev));
2380 err = -EIO;
2381 goto err_ioremap;
2382 }
2383
2384 mmio_start = pci_resource_start(pdev, 1);
2385 mmio_len = pci_resource_len(pdev, 1);
2386 adapter->hw_addr1 = ioremap(mmio_start, mmio_len);
2387 if (!adapter->hw_addr1) {
2388 printk(KERN_ERR "Failed to map bar1 for adapter %s\n",
2389 pci_name(pdev));
2390 err = -EIO;
2391 goto err_bar1;
2392 }
2393 return 0;
2394
2395err_bar1:
2396 iounmap(adapter->hw_addr0);
2397err_ioremap:
2398 pci_release_selected_regions(pdev, (1 << 2) - 1);
2399err_set_mask:
2400 pci_disable_device(pdev);
2401 return err;
2402}
2403
2404
2405static void
2406vmxnet3_free_pci_resources(struct vmxnet3_adapter *adapter)
2407{
2408 BUG_ON(!adapter->pdev);
2409
2410 iounmap(adapter->hw_addr0);
2411 iounmap(adapter->hw_addr1);
2412 pci_release_selected_regions(adapter->pdev, (1 << 2) - 1);
2413 pci_disable_device(adapter->pdev);
2414}
2415
2416
2417static void
2418vmxnet3_adjust_rx_ring_size(struct vmxnet3_adapter *adapter)
2419{
09c5088e
SB
2420 size_t sz, i, ring0_size, ring1_size, comp_size;
2421 struct vmxnet3_rx_queue *rq = &adapter->rx_queue[0];
2422
d1a890fa
SB
2423
2424 if (adapter->netdev->mtu <= VMXNET3_MAX_SKB_BUF_SIZE -
2425 VMXNET3_MAX_ETH_HDR_SIZE) {
2426 adapter->skb_buf_size = adapter->netdev->mtu +
2427 VMXNET3_MAX_ETH_HDR_SIZE;
2428 if (adapter->skb_buf_size < VMXNET3_MIN_T0_BUF_SIZE)
2429 adapter->skb_buf_size = VMXNET3_MIN_T0_BUF_SIZE;
2430
2431 adapter->rx_buf_per_pkt = 1;
2432 } else {
2433 adapter->skb_buf_size = VMXNET3_MAX_SKB_BUF_SIZE;
2434 sz = adapter->netdev->mtu - VMXNET3_MAX_SKB_BUF_SIZE +
2435 VMXNET3_MAX_ETH_HDR_SIZE;
2436 adapter->rx_buf_per_pkt = 1 + (sz + PAGE_SIZE - 1) / PAGE_SIZE;
2437 }
2438
2439 /*
2440 * for simplicity, force the ring0 size to be a multiple of
2441 * rx_buf_per_pkt * VMXNET3_RING_SIZE_ALIGN
2442 */
2443 sz = adapter->rx_buf_per_pkt * VMXNET3_RING_SIZE_ALIGN;
09c5088e
SB
2444 ring0_size = adapter->rx_queue[0].rx_ring[0].size;
2445 ring0_size = (ring0_size + sz - 1) / sz * sz;
a53255d3 2446 ring0_size = min_t(u32, ring0_size, VMXNET3_RX_RING_MAX_SIZE /
09c5088e
SB
2447 sz * sz);
2448 ring1_size = adapter->rx_queue[0].rx_ring[1].size;
2449 comp_size = ring0_size + ring1_size;
2450
2451 for (i = 0; i < adapter->num_rx_queues; i++) {
2452 rq = &adapter->rx_queue[i];
2453 rq->rx_ring[0].size = ring0_size;
2454 rq->rx_ring[1].size = ring1_size;
2455 rq->comp_ring.size = comp_size;
2456 }
d1a890fa
SB
2457}
2458
2459
2460int
2461vmxnet3_create_queues(struct vmxnet3_adapter *adapter, u32 tx_ring_size,
2462 u32 rx_ring_size, u32 rx_ring2_size)
2463{
09c5088e
SB
2464 int err = 0, i;
2465
2466 for (i = 0; i < adapter->num_tx_queues; i++) {
2467 struct vmxnet3_tx_queue *tq = &adapter->tx_queue[i];
2468 tq->tx_ring.size = tx_ring_size;
2469 tq->data_ring.size = tx_ring_size;
2470 tq->comp_ring.size = tx_ring_size;
2471 tq->shared = &adapter->tqd_start[i].ctrl;
2472 tq->stopped = true;
2473 tq->adapter = adapter;
2474 tq->qid = i;
2475 err = vmxnet3_tq_create(tq, adapter);
2476 /*
2477 * Too late to change num_tx_queues. We cannot do away with
2478 * lesser number of queues than what we asked for
2479 */
2480 if (err)
2481 goto queue_err;
2482 }
d1a890fa 2483
09c5088e
SB
2484 adapter->rx_queue[0].rx_ring[0].size = rx_ring_size;
2485 adapter->rx_queue[0].rx_ring[1].size = rx_ring2_size;
d1a890fa 2486 vmxnet3_adjust_rx_ring_size(adapter);
09c5088e
SB
2487 for (i = 0; i < adapter->num_rx_queues; i++) {
2488 struct vmxnet3_rx_queue *rq = &adapter->rx_queue[i];
2489 /* qid and qid2 for rx queues will be assigned later when num
2490 * of rx queues is finalized after allocating intrs */
2491 rq->shared = &adapter->rqd_start[i].ctrl;
2492 rq->adapter = adapter;
2493 err = vmxnet3_rq_create(rq, adapter);
2494 if (err) {
2495 if (i == 0) {
2496 printk(KERN_ERR "Could not allocate any rx"
2497 "queues. Aborting.\n");
2498 goto queue_err;
2499 } else {
2500 printk(KERN_INFO "Number of rx queues changed "
2501 "to : %d.\n", i);
2502 adapter->num_rx_queues = i;
2503 err = 0;
2504 break;
2505 }
2506 }
2507 }
2508 return err;
2509queue_err:
2510 vmxnet3_tq_destroy_all(adapter);
d1a890fa
SB
2511 return err;
2512}
2513
2514static int
2515vmxnet3_open(struct net_device *netdev)
2516{
2517 struct vmxnet3_adapter *adapter;
09c5088e 2518 int err, i;
d1a890fa
SB
2519
2520 adapter = netdev_priv(netdev);
2521
09c5088e
SB
2522 for (i = 0; i < adapter->num_tx_queues; i++)
2523 spin_lock_init(&adapter->tx_queue[i].tx_lock);
d1a890fa
SB
2524
2525 err = vmxnet3_create_queues(adapter, VMXNET3_DEF_TX_RING_SIZE,
2526 VMXNET3_DEF_RX_RING_SIZE,
2527 VMXNET3_DEF_RX_RING_SIZE);
2528 if (err)
2529 goto queue_err;
2530
2531 err = vmxnet3_activate_dev(adapter);
2532 if (err)
2533 goto activate_err;
2534
2535 return 0;
2536
2537activate_err:
09c5088e
SB
2538 vmxnet3_rq_destroy_all(adapter);
2539 vmxnet3_tq_destroy_all(adapter);
d1a890fa
SB
2540queue_err:
2541 return err;
2542}
2543
2544
2545static int
2546vmxnet3_close(struct net_device *netdev)
2547{
2548 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
2549
2550 /*
2551 * Reset_work may be in the middle of resetting the device, wait for its
2552 * completion.
2553 */
2554 while (test_and_set_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state))
2555 msleep(1);
2556
2557 vmxnet3_quiesce_dev(adapter);
2558
09c5088e
SB
2559 vmxnet3_rq_destroy_all(adapter);
2560 vmxnet3_tq_destroy_all(adapter);
d1a890fa
SB
2561
2562 clear_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state);
2563
2564
2565 return 0;
2566}
2567
2568
2569void
2570vmxnet3_force_close(struct vmxnet3_adapter *adapter)
2571{
09c5088e
SB
2572 int i;
2573
d1a890fa
SB
2574 /*
2575 * we must clear VMXNET3_STATE_BIT_RESETTING, otherwise
2576 * vmxnet3_close() will deadlock.
2577 */
2578 BUG_ON(test_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state));
2579
2580 /* we need to enable NAPI, otherwise dev_close will deadlock */
09c5088e
SB
2581 for (i = 0; i < adapter->num_rx_queues; i++)
2582 napi_enable(&adapter->rx_queue[i].napi);
d1a890fa
SB
2583 dev_close(adapter->netdev);
2584}
2585
2586
2587static int
2588vmxnet3_change_mtu(struct net_device *netdev, int new_mtu)
2589{
2590 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
2591 int err = 0;
2592
2593 if (new_mtu < VMXNET3_MIN_MTU || new_mtu > VMXNET3_MAX_MTU)
2594 return -EINVAL;
2595
2596 if (new_mtu > 1500 && !adapter->jumbo_frame)
2597 return -EINVAL;
2598
2599 netdev->mtu = new_mtu;
2600
2601 /*
2602 * Reset_work may be in the middle of resetting the device, wait for its
2603 * completion.
2604 */
2605 while (test_and_set_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state))
2606 msleep(1);
2607
2608 if (netif_running(netdev)) {
2609 vmxnet3_quiesce_dev(adapter);
2610 vmxnet3_reset_dev(adapter);
2611
2612 /* we need to re-create the rx queue based on the new mtu */
09c5088e 2613 vmxnet3_rq_destroy_all(adapter);
d1a890fa 2614 vmxnet3_adjust_rx_ring_size(adapter);
09c5088e 2615 err = vmxnet3_rq_create_all(adapter);
d1a890fa 2616 if (err) {
09c5088e 2617 printk(KERN_ERR "%s: failed to re-create rx queues,"
d1a890fa
SB
2618 " error %d. Closing it.\n", netdev->name, err);
2619 goto out;
2620 }
2621
2622 err = vmxnet3_activate_dev(adapter);
2623 if (err) {
2624 printk(KERN_ERR "%s: failed to re-activate, error %d. "
2625 "Closing it\n", netdev->name, err);
2626 goto out;
2627 }
2628 }
2629
2630out:
2631 clear_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state);
2632 if (err)
2633 vmxnet3_force_close(adapter);
2634
2635 return err;
2636}
2637
2638
2639static void
2640vmxnet3_declare_features(struct vmxnet3_adapter *adapter, bool dma64)
2641{
2642 struct net_device *netdev = adapter->netdev;
2643
2644 netdev->features = NETIF_F_SG |
2645 NETIF_F_HW_CSUM |
2646 NETIF_F_HW_VLAN_TX |
2647 NETIF_F_HW_VLAN_RX |
2648 NETIF_F_HW_VLAN_FILTER |
2649 NETIF_F_TSO |
2650 NETIF_F_TSO6 |
2651 NETIF_F_LRO;
2652
2653 printk(KERN_INFO "features: sg csum vlan jf tso tsoIPv6 lro");
2654
2655 adapter->rxcsum = true;
2656 adapter->jumbo_frame = true;
2657 adapter->lro = true;
2658
2659 if (dma64) {
2660 netdev->features |= NETIF_F_HIGHDMA;
2661 printk(" highDMA");
2662 }
2663
2664 netdev->vlan_features = netdev->features;
2665 printk("\n");
2666}
2667
2668
2669static void
2670vmxnet3_read_mac_addr(struct vmxnet3_adapter *adapter, u8 *mac)
2671{
2672 u32 tmp;
2673
2674 tmp = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_MACL);
2675 *(u32 *)mac = tmp;
2676
2677 tmp = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_MACH);
2678 mac[4] = tmp & 0xff;
2679 mac[5] = (tmp >> 8) & 0xff;
2680}
2681
09c5088e
SB
2682#ifdef CONFIG_PCI_MSI
2683
2684/*
2685 * Enable MSIx vectors.
2686 * Returns :
2687 * 0 on successful enabling of required vectors,
25985edc 2688 * VMXNET3_LINUX_MIN_MSIX_VECT when only minimum number of vectors required
09c5088e
SB
2689 * could be enabled.
2690 * number of vectors which can be enabled otherwise (this number is smaller
2691 * than VMXNET3_LINUX_MIN_MSIX_VECT)
2692 */
2693
2694static int
2695vmxnet3_acquire_msix_vectors(struct vmxnet3_adapter *adapter,
2696 int vectors)
2697{
2698 int err = 0, vector_threshold;
2699 vector_threshold = VMXNET3_LINUX_MIN_MSIX_VECT;
2700
2701 while (vectors >= vector_threshold) {
2702 err = pci_enable_msix(adapter->pdev, adapter->intr.msix_entries,
2703 vectors);
2704 if (!err) {
2705 adapter->intr.num_intrs = vectors;
2706 return 0;
2707 } else if (err < 0) {
2708 printk(KERN_ERR "Failed to enable MSI-X for %s, error"
2709 " %d\n", adapter->netdev->name, err);
2710 vectors = 0;
2711 } else if (err < vector_threshold) {
2712 break;
2713 } else {
2714 /* If fails to enable required number of MSI-x vectors
7e96fbf2 2715 * try enabling minimum number of vectors required.
09c5088e
SB
2716 */
2717 vectors = vector_threshold;
2718 printk(KERN_ERR "Failed to enable %d MSI-X for %s, try"
2719 " %d instead\n", vectors, adapter->netdev->name,
2720 vector_threshold);
2721 }
2722 }
2723
2724 printk(KERN_INFO "Number of MSI-X interrupts which can be allocatedi"
2725 " are lower than min threshold required.\n");
2726 return err;
2727}
2728
2729
2730#endif /* CONFIG_PCI_MSI */
d1a890fa
SB
2731
2732static void
2733vmxnet3_alloc_intr_resources(struct vmxnet3_adapter *adapter)
2734{
2735 u32 cfg;
2736
2737 /* intr settings */
83d0feff 2738 spin_lock(&adapter->cmd_lock);
d1a890fa
SB
2739 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
2740 VMXNET3_CMD_GET_CONF_INTR);
2741 cfg = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
83d0feff 2742 spin_unlock(&adapter->cmd_lock);
d1a890fa
SB
2743 adapter->intr.type = cfg & 0x3;
2744 adapter->intr.mask_mode = (cfg >> 2) & 0x3;
2745
2746 if (adapter->intr.type == VMXNET3_IT_AUTO) {
0bdc0d70
SB
2747 adapter->intr.type = VMXNET3_IT_MSIX;
2748 }
d1a890fa 2749
8f7e524c 2750#ifdef CONFIG_PCI_MSI
0bdc0d70 2751 if (adapter->intr.type == VMXNET3_IT_MSIX) {
09c5088e
SB
2752 int vector, err = 0;
2753
2754 adapter->intr.num_intrs = (adapter->share_intr ==
2755 VMXNET3_INTR_TXSHARE) ? 1 :
2756 adapter->num_tx_queues;
2757 adapter->intr.num_intrs += (adapter->share_intr ==
2758 VMXNET3_INTR_BUDDYSHARE) ? 0 :
2759 adapter->num_rx_queues;
2760 adapter->intr.num_intrs += 1; /* for link event */
2761
2762 adapter->intr.num_intrs = (adapter->intr.num_intrs >
2763 VMXNET3_LINUX_MIN_MSIX_VECT
2764 ? adapter->intr.num_intrs :
2765 VMXNET3_LINUX_MIN_MSIX_VECT);
2766
2767 for (vector = 0; vector < adapter->intr.num_intrs; vector++)
2768 adapter->intr.msix_entries[vector].entry = vector;
2769
2770 err = vmxnet3_acquire_msix_vectors(adapter,
2771 adapter->intr.num_intrs);
2772 /* If we cannot allocate one MSIx vector per queue
2773 * then limit the number of rx queues to 1
2774 */
2775 if (err == VMXNET3_LINUX_MIN_MSIX_VECT) {
2776 if (adapter->share_intr != VMXNET3_INTR_BUDDYSHARE
7e96fbf2 2777 || adapter->num_rx_queues != 1) {
09c5088e
SB
2778 adapter->share_intr = VMXNET3_INTR_TXSHARE;
2779 printk(KERN_ERR "Number of rx queues : 1\n");
2780 adapter->num_rx_queues = 1;
2781 adapter->intr.num_intrs =
2782 VMXNET3_LINUX_MIN_MSIX_VECT;
2783 }
d1a890fa
SB
2784 return;
2785 }
09c5088e
SB
2786 if (!err)
2787 return;
2788
2789 /* If we cannot allocate MSIx vectors use only one rx queue */
2790 printk(KERN_INFO "Failed to enable MSI-X for %s, error %d."
2791 "#rx queues : 1, try MSI\n", adapter->netdev->name, err);
2792
0bdc0d70
SB
2793 adapter->intr.type = VMXNET3_IT_MSI;
2794 }
d1a890fa 2795
0bdc0d70
SB
2796 if (adapter->intr.type == VMXNET3_IT_MSI) {
2797 int err;
d1a890fa
SB
2798 err = pci_enable_msi(adapter->pdev);
2799 if (!err) {
09c5088e 2800 adapter->num_rx_queues = 1;
d1a890fa 2801 adapter->intr.num_intrs = 1;
d1a890fa
SB
2802 return;
2803 }
2804 }
0bdc0d70 2805#endif /* CONFIG_PCI_MSI */
d1a890fa 2806
09c5088e
SB
2807 adapter->num_rx_queues = 1;
2808 printk(KERN_INFO "Using INTx interrupt, #Rx queues: 1.\n");
d1a890fa
SB
2809 adapter->intr.type = VMXNET3_IT_INTX;
2810
2811 /* INT-X related setting */
2812 adapter->intr.num_intrs = 1;
2813}
2814
2815
2816static void
2817vmxnet3_free_intr_resources(struct vmxnet3_adapter *adapter)
2818{
2819 if (adapter->intr.type == VMXNET3_IT_MSIX)
2820 pci_disable_msix(adapter->pdev);
2821 else if (adapter->intr.type == VMXNET3_IT_MSI)
2822 pci_disable_msi(adapter->pdev);
2823 else
2824 BUG_ON(adapter->intr.type != VMXNET3_IT_INTX);
2825}
2826
2827
2828static void
2829vmxnet3_tx_timeout(struct net_device *netdev)
2830{
2831 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
2832 adapter->tx_timeout_count++;
2833
2834 printk(KERN_ERR "%s: tx hang\n", adapter->netdev->name);
2835 schedule_work(&adapter->work);
09c5088e 2836 netif_wake_queue(adapter->netdev);
d1a890fa
SB
2837}
2838
2839
2840static void
2841vmxnet3_reset_work(struct work_struct *data)
2842{
2843 struct vmxnet3_adapter *adapter;
2844
2845 adapter = container_of(data, struct vmxnet3_adapter, work);
2846
2847 /* if another thread is resetting the device, no need to proceed */
2848 if (test_and_set_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state))
2849 return;
2850
2851 /* if the device is closed, we must leave it alone */
d9a5f210 2852 rtnl_lock();
d1a890fa
SB
2853 if (netif_running(adapter->netdev)) {
2854 printk(KERN_INFO "%s: resetting\n", adapter->netdev->name);
2855 vmxnet3_quiesce_dev(adapter);
2856 vmxnet3_reset_dev(adapter);
2857 vmxnet3_activate_dev(adapter);
2858 } else {
2859 printk(KERN_INFO "%s: already closed\n", adapter->netdev->name);
2860 }
d9a5f210 2861 rtnl_unlock();
d1a890fa
SB
2862
2863 clear_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state);
2864}
2865
2866
2867static int __devinit
2868vmxnet3_probe_device(struct pci_dev *pdev,
2869 const struct pci_device_id *id)
2870{
2871 static const struct net_device_ops vmxnet3_netdev_ops = {
2872 .ndo_open = vmxnet3_open,
2873 .ndo_stop = vmxnet3_close,
2874 .ndo_start_xmit = vmxnet3_xmit_frame,
2875 .ndo_set_mac_address = vmxnet3_set_mac_addr,
2876 .ndo_change_mtu = vmxnet3_change_mtu,
2877 .ndo_get_stats = vmxnet3_get_stats,
2878 .ndo_tx_timeout = vmxnet3_tx_timeout,
2879 .ndo_set_multicast_list = vmxnet3_set_mc,
2880 .ndo_vlan_rx_register = vmxnet3_vlan_rx_register,
2881 .ndo_vlan_rx_add_vid = vmxnet3_vlan_rx_add_vid,
2882 .ndo_vlan_rx_kill_vid = vmxnet3_vlan_rx_kill_vid,
2883#ifdef CONFIG_NET_POLL_CONTROLLER
2884 .ndo_poll_controller = vmxnet3_netpoll,
2885#endif
2886 };
2887 int err;
2888 bool dma64 = false; /* stupid gcc */
2889 u32 ver;
2890 struct net_device *netdev;
2891 struct vmxnet3_adapter *adapter;
2892 u8 mac[ETH_ALEN];
09c5088e
SB
2893 int size;
2894 int num_tx_queues;
2895 int num_rx_queues;
2896
2897#ifdef VMXNET3_RSS
2898 if (enable_mq)
2899 num_rx_queues = min(VMXNET3_DEVICE_MAX_RX_QUEUES,
2900 (int)num_online_cpus());
2901 else
2902#endif
2903 num_rx_queues = 1;
2904
2905 if (enable_mq)
2906 num_tx_queues = min(VMXNET3_DEVICE_MAX_TX_QUEUES,
2907 (int)num_online_cpus());
2908 else
2909 num_tx_queues = 1;
2910
2911 netdev = alloc_etherdev_mq(sizeof(struct vmxnet3_adapter),
2912 max(num_tx_queues, num_rx_queues));
2913 printk(KERN_INFO "# of Tx queues : %d, # of Rx queues : %d\n",
2914 num_tx_queues, num_rx_queues);
d1a890fa 2915
d1a890fa
SB
2916 if (!netdev) {
2917 printk(KERN_ERR "Failed to alloc ethernet device for adapter "
2918 "%s\n", pci_name(pdev));
2919 return -ENOMEM;
2920 }
2921
2922 pci_set_drvdata(pdev, netdev);
2923 adapter = netdev_priv(netdev);
2924 adapter->netdev = netdev;
2925 adapter->pdev = pdev;
2926
83d0feff 2927 spin_lock_init(&adapter->cmd_lock);
d1a890fa
SB
2928 adapter->shared = pci_alloc_consistent(adapter->pdev,
2929 sizeof(struct Vmxnet3_DriverShared),
2930 &adapter->shared_pa);
2931 if (!adapter->shared) {
2932 printk(KERN_ERR "Failed to allocate memory for %s\n",
2933 pci_name(pdev));
2934 err = -ENOMEM;
2935 goto err_alloc_shared;
2936 }
2937
09c5088e
SB
2938 adapter->num_rx_queues = num_rx_queues;
2939 adapter->num_tx_queues = num_tx_queues;
2940
2941 size = sizeof(struct Vmxnet3_TxQueueDesc) * adapter->num_tx_queues;
2942 size += sizeof(struct Vmxnet3_RxQueueDesc) * adapter->num_rx_queues;
2943 adapter->tqd_start = pci_alloc_consistent(adapter->pdev, size,
d1a890fa
SB
2944 &adapter->queue_desc_pa);
2945
2946 if (!adapter->tqd_start) {
2947 printk(KERN_ERR "Failed to allocate memory for %s\n",
2948 pci_name(pdev));
2949 err = -ENOMEM;
2950 goto err_alloc_queue_desc;
2951 }
09c5088e
SB
2952 adapter->rqd_start = (struct Vmxnet3_RxQueueDesc *)(adapter->tqd_start +
2953 adapter->num_tx_queues);
d1a890fa
SB
2954
2955 adapter->pm_conf = kmalloc(sizeof(struct Vmxnet3_PMConf), GFP_KERNEL);
2956 if (adapter->pm_conf == NULL) {
2957 printk(KERN_ERR "Failed to allocate memory for %s\n",
2958 pci_name(pdev));
2959 err = -ENOMEM;
2960 goto err_alloc_pm;
2961 }
2962
09c5088e
SB
2963#ifdef VMXNET3_RSS
2964
2965 adapter->rss_conf = kmalloc(sizeof(struct UPT1_RSSConf), GFP_KERNEL);
2966 if (adapter->rss_conf == NULL) {
2967 printk(KERN_ERR "Failed to allocate memory for %s\n",
2968 pci_name(pdev));
2969 err = -ENOMEM;
2970 goto err_alloc_rss;
2971 }
2972#endif /* VMXNET3_RSS */
2973
d1a890fa
SB
2974 err = vmxnet3_alloc_pci_resources(adapter, &dma64);
2975 if (err < 0)
2976 goto err_alloc_pci;
2977
2978 ver = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_VRRS);
2979 if (ver & 1) {
2980 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_VRRS, 1);
2981 } else {
2982 printk(KERN_ERR "Incompatible h/w version (0x%x) for adapter"
2983 " %s\n", ver, pci_name(pdev));
2984 err = -EBUSY;
2985 goto err_ver;
2986 }
2987
2988 ver = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_UVRS);
2989 if (ver & 1) {
2990 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_UVRS, 1);
2991 } else {
2992 printk(KERN_ERR "Incompatible upt version (0x%x) for "
2993 "adapter %s\n", ver, pci_name(pdev));
2994 err = -EBUSY;
2995 goto err_ver;
2996 }
2997
2998 vmxnet3_declare_features(adapter, dma64);
2999
3000 adapter->dev_number = atomic_read(&devices_found);
09c5088e
SB
3001
3002 adapter->share_intr = irq_share_mode;
3003 if (adapter->share_intr == VMXNET3_INTR_BUDDYSHARE &&
3004 adapter->num_tx_queues != adapter->num_rx_queues)
3005 adapter->share_intr = VMXNET3_INTR_DONTSHARE;
3006
d1a890fa
SB
3007 vmxnet3_alloc_intr_resources(adapter);
3008
09c5088e
SB
3009#ifdef VMXNET3_RSS
3010 if (adapter->num_rx_queues > 1 &&
3011 adapter->intr.type == VMXNET3_IT_MSIX) {
3012 adapter->rss = true;
3013 printk(KERN_INFO "RSS is enabled.\n");
3014 } else {
3015 adapter->rss = false;
3016 }
3017#endif
3018
d1a890fa
SB
3019 vmxnet3_read_mac_addr(adapter, mac);
3020 memcpy(netdev->dev_addr, mac, netdev->addr_len);
3021
3022 netdev->netdev_ops = &vmxnet3_netdev_ops;
d1a890fa 3023 vmxnet3_set_ethtool_ops(netdev);
09c5088e 3024 netdev->watchdog_timeo = 5 * HZ;
d1a890fa
SB
3025
3026 INIT_WORK(&adapter->work, vmxnet3_reset_work);
3027
09c5088e
SB
3028 if (adapter->intr.type == VMXNET3_IT_MSIX) {
3029 int i;
3030 for (i = 0; i < adapter->num_rx_queues; i++) {
3031 netif_napi_add(adapter->netdev,
3032 &adapter->rx_queue[i].napi,
3033 vmxnet3_poll_rx_only, 64);
3034 }
3035 } else {
3036 netif_napi_add(adapter->netdev, &adapter->rx_queue[0].napi,
3037 vmxnet3_poll, 64);
3038 }
3039
3040 netif_set_real_num_tx_queues(adapter->netdev, adapter->num_tx_queues);
3041 netif_set_real_num_rx_queues(adapter->netdev, adapter->num_rx_queues);
3042
d1a890fa
SB
3043 SET_NETDEV_DEV(netdev, &pdev->dev);
3044 err = register_netdev(netdev);
3045
3046 if (err) {
3047 printk(KERN_ERR "Failed to register adapter %s\n",
3048 pci_name(pdev));
3049 goto err_register;
3050 }
3051
3052 set_bit(VMXNET3_STATE_BIT_QUIESCED, &adapter->state);
4a1745fc 3053 vmxnet3_check_link(adapter, false);
d1a890fa
SB
3054 atomic_inc(&devices_found);
3055 return 0;
3056
3057err_register:
3058 vmxnet3_free_intr_resources(adapter);
3059err_ver:
3060 vmxnet3_free_pci_resources(adapter);
3061err_alloc_pci:
09c5088e
SB
3062#ifdef VMXNET3_RSS
3063 kfree(adapter->rss_conf);
3064err_alloc_rss:
3065#endif
d1a890fa
SB
3066 kfree(adapter->pm_conf);
3067err_alloc_pm:
09c5088e
SB
3068 pci_free_consistent(adapter->pdev, size, adapter->tqd_start,
3069 adapter->queue_desc_pa);
d1a890fa
SB
3070err_alloc_queue_desc:
3071 pci_free_consistent(adapter->pdev, sizeof(struct Vmxnet3_DriverShared),
3072 adapter->shared, adapter->shared_pa);
3073err_alloc_shared:
3074 pci_set_drvdata(pdev, NULL);
3075 free_netdev(netdev);
3076 return err;
3077}
3078
3079
3080static void __devexit
3081vmxnet3_remove_device(struct pci_dev *pdev)
3082{
3083 struct net_device *netdev = pci_get_drvdata(pdev);
3084 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
09c5088e
SB
3085 int size = 0;
3086 int num_rx_queues;
3087
3088#ifdef VMXNET3_RSS
3089 if (enable_mq)
3090 num_rx_queues = min(VMXNET3_DEVICE_MAX_RX_QUEUES,
3091 (int)num_online_cpus());
3092 else
3093#endif
3094 num_rx_queues = 1;
d1a890fa 3095
23f333a2 3096 cancel_work_sync(&adapter->work);
d1a890fa
SB
3097
3098 unregister_netdev(netdev);
3099
3100 vmxnet3_free_intr_resources(adapter);
3101 vmxnet3_free_pci_resources(adapter);
09c5088e
SB
3102#ifdef VMXNET3_RSS
3103 kfree(adapter->rss_conf);
3104#endif
d1a890fa 3105 kfree(adapter->pm_conf);
09c5088e
SB
3106
3107 size = sizeof(struct Vmxnet3_TxQueueDesc) * adapter->num_tx_queues;
3108 size += sizeof(struct Vmxnet3_RxQueueDesc) * num_rx_queues;
3109 pci_free_consistent(adapter->pdev, size, adapter->tqd_start,
3110 adapter->queue_desc_pa);
d1a890fa
SB
3111 pci_free_consistent(adapter->pdev, sizeof(struct Vmxnet3_DriverShared),
3112 adapter->shared, adapter->shared_pa);
3113 free_netdev(netdev);
3114}
3115
3116
3117#ifdef CONFIG_PM
3118
3119static int
3120vmxnet3_suspend(struct device *device)
3121{
3122 struct pci_dev *pdev = to_pci_dev(device);
3123 struct net_device *netdev = pci_get_drvdata(pdev);
3124 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
3125 struct Vmxnet3_PMConf *pmConf;
3126 struct ethhdr *ehdr;
3127 struct arphdr *ahdr;
3128 u8 *arpreq;
3129 struct in_device *in_dev;
3130 struct in_ifaddr *ifa;
83d0feff 3131 unsigned long flags;
d1a890fa
SB
3132 int i = 0;
3133
3134 if (!netif_running(netdev))
3135 return 0;
3136
51956cd6
SB
3137 for (i = 0; i < adapter->num_rx_queues; i++)
3138 napi_disable(&adapter->rx_queue[i].napi);
3139
d1a890fa
SB
3140 vmxnet3_disable_all_intrs(adapter);
3141 vmxnet3_free_irqs(adapter);
3142 vmxnet3_free_intr_resources(adapter);
3143
3144 netif_device_detach(netdev);
09c5088e 3145 netif_tx_stop_all_queues(netdev);
d1a890fa
SB
3146
3147 /* Create wake-up filters. */
3148 pmConf = adapter->pm_conf;
3149 memset(pmConf, 0, sizeof(*pmConf));
3150
3151 if (adapter->wol & WAKE_UCAST) {
3152 pmConf->filters[i].patternSize = ETH_ALEN;
3153 pmConf->filters[i].maskSize = 1;
3154 memcpy(pmConf->filters[i].pattern, netdev->dev_addr, ETH_ALEN);
3155 pmConf->filters[i].mask[0] = 0x3F; /* LSB ETH_ALEN bits */
3156
3843e515 3157 pmConf->wakeUpEvents |= VMXNET3_PM_WAKEUP_FILTER;
d1a890fa
SB
3158 i++;
3159 }
3160
3161 if (adapter->wol & WAKE_ARP) {
3162 in_dev = in_dev_get(netdev);
3163 if (!in_dev)
3164 goto skip_arp;
3165
3166 ifa = (struct in_ifaddr *)in_dev->ifa_list;
3167 if (!ifa)
3168 goto skip_arp;
3169
3170 pmConf->filters[i].patternSize = ETH_HLEN + /* Ethernet header*/
3171 sizeof(struct arphdr) + /* ARP header */
3172 2 * ETH_ALEN + /* 2 Ethernet addresses*/
3173 2 * sizeof(u32); /*2 IPv4 addresses */
3174 pmConf->filters[i].maskSize =
3175 (pmConf->filters[i].patternSize - 1) / 8 + 1;
3176
3177 /* ETH_P_ARP in Ethernet header. */
3178 ehdr = (struct ethhdr *)pmConf->filters[i].pattern;
3179 ehdr->h_proto = htons(ETH_P_ARP);
3180
3181 /* ARPOP_REQUEST in ARP header. */
3182 ahdr = (struct arphdr *)&pmConf->filters[i].pattern[ETH_HLEN];
3183 ahdr->ar_op = htons(ARPOP_REQUEST);
3184 arpreq = (u8 *)(ahdr + 1);
3185
3186 /* The Unicast IPv4 address in 'tip' field. */
3187 arpreq += 2 * ETH_ALEN + sizeof(u32);
3188 *(u32 *)arpreq = ifa->ifa_address;
3189
3190 /* The mask for the relevant bits. */
3191 pmConf->filters[i].mask[0] = 0x00;
3192 pmConf->filters[i].mask[1] = 0x30; /* ETH_P_ARP */
3193 pmConf->filters[i].mask[2] = 0x30; /* ARPOP_REQUEST */
3194 pmConf->filters[i].mask[3] = 0x00;
3195 pmConf->filters[i].mask[4] = 0xC0; /* IPv4 TIP */
3196 pmConf->filters[i].mask[5] = 0x03; /* IPv4 TIP */
3197 in_dev_put(in_dev);
3198
3843e515 3199 pmConf->wakeUpEvents |= VMXNET3_PM_WAKEUP_FILTER;
d1a890fa
SB
3200 i++;
3201 }
3202
3203skip_arp:
3204 if (adapter->wol & WAKE_MAGIC)
3843e515 3205 pmConf->wakeUpEvents |= VMXNET3_PM_WAKEUP_MAGIC;
d1a890fa
SB
3206
3207 pmConf->numFilters = i;
3208
115924b6
SB
3209 adapter->shared->devRead.pmConfDesc.confVer = cpu_to_le32(1);
3210 adapter->shared->devRead.pmConfDesc.confLen = cpu_to_le32(sizeof(
3211 *pmConf));
3212 adapter->shared->devRead.pmConfDesc.confPA = cpu_to_le64(virt_to_phys(
3213 pmConf));
d1a890fa 3214
83d0feff 3215 spin_lock_irqsave(&adapter->cmd_lock, flags);
d1a890fa
SB
3216 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
3217 VMXNET3_CMD_UPDATE_PMCFG);
83d0feff 3218 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
d1a890fa
SB
3219
3220 pci_save_state(pdev);
3221 pci_enable_wake(pdev, pci_choose_state(pdev, PMSG_SUSPEND),
3222 adapter->wol);
3223 pci_disable_device(pdev);
3224 pci_set_power_state(pdev, pci_choose_state(pdev, PMSG_SUSPEND));
3225
3226 return 0;
3227}
3228
3229
3230static int
3231vmxnet3_resume(struct device *device)
3232{
51956cd6 3233 int err, i = 0;
83d0feff 3234 unsigned long flags;
d1a890fa
SB
3235 struct pci_dev *pdev = to_pci_dev(device);
3236 struct net_device *netdev = pci_get_drvdata(pdev);
3237 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
3238 struct Vmxnet3_PMConf *pmConf;
3239
3240 if (!netif_running(netdev))
3241 return 0;
3242
3243 /* Destroy wake-up filters. */
3244 pmConf = adapter->pm_conf;
3245 memset(pmConf, 0, sizeof(*pmConf));
3246
115924b6
SB
3247 adapter->shared->devRead.pmConfDesc.confVer = cpu_to_le32(1);
3248 adapter->shared->devRead.pmConfDesc.confLen = cpu_to_le32(sizeof(
3249 *pmConf));
0561cf3d 3250 adapter->shared->devRead.pmConfDesc.confPA = cpu_to_le64(virt_to_phys(
115924b6 3251 pmConf));
d1a890fa
SB
3252
3253 netif_device_attach(netdev);
3254 pci_set_power_state(pdev, PCI_D0);
3255 pci_restore_state(pdev);
3256 err = pci_enable_device_mem(pdev);
3257 if (err != 0)
3258 return err;
3259
3260 pci_enable_wake(pdev, PCI_D0, 0);
3261
83d0feff 3262 spin_lock_irqsave(&adapter->cmd_lock, flags);
d1a890fa
SB
3263 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
3264 VMXNET3_CMD_UPDATE_PMCFG);
83d0feff 3265 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
d1a890fa
SB
3266 vmxnet3_alloc_intr_resources(adapter);
3267 vmxnet3_request_irqs(adapter);
51956cd6
SB
3268 for (i = 0; i < adapter->num_rx_queues; i++)
3269 napi_enable(&adapter->rx_queue[i].napi);
d1a890fa
SB
3270 vmxnet3_enable_all_intrs(adapter);
3271
3272 return 0;
3273}
3274
47145210 3275static const struct dev_pm_ops vmxnet3_pm_ops = {
d1a890fa
SB
3276 .suspend = vmxnet3_suspend,
3277 .resume = vmxnet3_resume,
3278};
3279#endif
3280
3281static struct pci_driver vmxnet3_driver = {
3282 .name = vmxnet3_driver_name,
3283 .id_table = vmxnet3_pciid_table,
3284 .probe = vmxnet3_probe_device,
3285 .remove = __devexit_p(vmxnet3_remove_device),
3286#ifdef CONFIG_PM
3287 .driver.pm = &vmxnet3_pm_ops,
3288#endif
3289};
3290
3291
3292static int __init
3293vmxnet3_init_module(void)
3294{
3295 printk(KERN_INFO "%s - version %s\n", VMXNET3_DRIVER_DESC,
3296 VMXNET3_DRIVER_VERSION_REPORT);
3297 return pci_register_driver(&vmxnet3_driver);
3298}
3299
3300module_init(vmxnet3_init_module);
3301
3302
3303static void
3304vmxnet3_exit_module(void)
3305{
3306 pci_unregister_driver(&vmxnet3_driver);
3307}
3308
3309module_exit(vmxnet3_exit_module);
3310
3311MODULE_AUTHOR("VMware, Inc.");
3312MODULE_DESCRIPTION(VMXNET3_DRIVER_DESC);
3313MODULE_LICENSE("GPL v2");
3314MODULE_VERSION(VMXNET3_DRIVER_VERSION_STRING);
This page took 0.354711 seconds and 5 git commands to generate.