Merge branch 'fix/asoc' into for-linus
[deliverable/linux.git] / drivers / net / vmxnet3 / vmxnet3_drv.c
CommitLineData
d1a890fa
SB
1/*
2 * Linux driver for VMware's vmxnet3 ethernet NIC.
3 *
4 * Copyright (C) 2008-2009, VMware, Inc. All Rights Reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the
8 * Free Software Foundation; version 2 of the License and no later version.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
13 * NON INFRINGEMENT. See the GNU General Public License for more
14 * details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * The full GNU General Public License is included in this distribution in
21 * the file called "COPYING".
22 *
23 * Maintained by: Shreyas Bhatewara <pv-drivers@vmware.com>
24 *
25 */
26
b038b040
SR
27#include <net/ip6_checksum.h>
28
d1a890fa
SB
29#include "vmxnet3_int.h"
30
31char vmxnet3_driver_name[] = "vmxnet3";
32#define VMXNET3_DRIVER_DESC "VMware vmxnet3 virtual NIC driver"
33
d1a890fa
SB
34/*
35 * PCI Device ID Table
36 * Last entry must be all 0s
37 */
a3aa1884 38static DEFINE_PCI_DEVICE_TABLE(vmxnet3_pciid_table) = {
d1a890fa
SB
39 {PCI_VDEVICE(VMWARE, PCI_DEVICE_ID_VMWARE_VMXNET3)},
40 {0}
41};
42
43MODULE_DEVICE_TABLE(pci, vmxnet3_pciid_table);
44
45static atomic_t devices_found;
46
09c5088e
SB
47#define VMXNET3_MAX_DEVICES 10
48static int enable_mq = 1;
49static int irq_share_mode;
d1a890fa
SB
50
51/*
52 * Enable/Disable the given intr
53 */
54static void
55vmxnet3_enable_intr(struct vmxnet3_adapter *adapter, unsigned intr_idx)
56{
57 VMXNET3_WRITE_BAR0_REG(adapter, VMXNET3_REG_IMR + intr_idx * 8, 0);
58}
59
60
61static void
62vmxnet3_disable_intr(struct vmxnet3_adapter *adapter, unsigned intr_idx)
63{
64 VMXNET3_WRITE_BAR0_REG(adapter, VMXNET3_REG_IMR + intr_idx * 8, 1);
65}
66
67
68/*
69 * Enable/Disable all intrs used by the device
70 */
71static void
72vmxnet3_enable_all_intrs(struct vmxnet3_adapter *adapter)
73{
74 int i;
75
76 for (i = 0; i < adapter->intr.num_intrs; i++)
77 vmxnet3_enable_intr(adapter, i);
6929fe8a
RZ
78 adapter->shared->devRead.intrConf.intrCtrl &=
79 cpu_to_le32(~VMXNET3_IC_DISABLE_ALL);
d1a890fa
SB
80}
81
82
83static void
84vmxnet3_disable_all_intrs(struct vmxnet3_adapter *adapter)
85{
86 int i;
87
6929fe8a
RZ
88 adapter->shared->devRead.intrConf.intrCtrl |=
89 cpu_to_le32(VMXNET3_IC_DISABLE_ALL);
d1a890fa
SB
90 for (i = 0; i < adapter->intr.num_intrs; i++)
91 vmxnet3_disable_intr(adapter, i);
92}
93
94
95static void
96vmxnet3_ack_events(struct vmxnet3_adapter *adapter, u32 events)
97{
98 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_ECR, events);
99}
100
101
102static bool
103vmxnet3_tq_stopped(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter)
104{
09c5088e 105 return tq->stopped;
d1a890fa
SB
106}
107
108
109static void
110vmxnet3_tq_start(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter)
111{
112 tq->stopped = false;
09c5088e 113 netif_start_subqueue(adapter->netdev, tq - adapter->tx_queue);
d1a890fa
SB
114}
115
116
117static void
118vmxnet3_tq_wake(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter)
119{
120 tq->stopped = false;
09c5088e 121 netif_wake_subqueue(adapter->netdev, (tq - adapter->tx_queue));
d1a890fa
SB
122}
123
124
125static void
126vmxnet3_tq_stop(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter)
127{
128 tq->stopped = true;
129 tq->num_stop++;
09c5088e 130 netif_stop_subqueue(adapter->netdev, (tq - adapter->tx_queue));
d1a890fa
SB
131}
132
133
134/*
135 * Check the link state. This may start or stop the tx queue.
136 */
137static void
4a1745fc 138vmxnet3_check_link(struct vmxnet3_adapter *adapter, bool affectTxQueue)
d1a890fa
SB
139{
140 u32 ret;
09c5088e 141 int i;
d1a890fa
SB
142
143 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_GET_LINK);
144 ret = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
145 adapter->link_speed = ret >> 16;
146 if (ret & 1) { /* Link is up. */
147 printk(KERN_INFO "%s: NIC Link is Up %d Mbps\n",
148 adapter->netdev->name, adapter->link_speed);
149 if (!netif_carrier_ok(adapter->netdev))
150 netif_carrier_on(adapter->netdev);
151
09c5088e
SB
152 if (affectTxQueue) {
153 for (i = 0; i < adapter->num_tx_queues; i++)
154 vmxnet3_tq_start(&adapter->tx_queue[i],
155 adapter);
156 }
d1a890fa
SB
157 } else {
158 printk(KERN_INFO "%s: NIC Link is Down\n",
159 adapter->netdev->name);
160 if (netif_carrier_ok(adapter->netdev))
161 netif_carrier_off(adapter->netdev);
162
09c5088e
SB
163 if (affectTxQueue) {
164 for (i = 0; i < adapter->num_tx_queues; i++)
165 vmxnet3_tq_stop(&adapter->tx_queue[i], adapter);
166 }
d1a890fa
SB
167 }
168}
169
d1a890fa
SB
170static void
171vmxnet3_process_events(struct vmxnet3_adapter *adapter)
172{
09c5088e 173 int i;
115924b6 174 u32 events = le32_to_cpu(adapter->shared->ecr);
d1a890fa
SB
175 if (!events)
176 return;
177
178 vmxnet3_ack_events(adapter, events);
179
180 /* Check if link state has changed */
181 if (events & VMXNET3_ECR_LINK)
4a1745fc 182 vmxnet3_check_link(adapter, true);
d1a890fa
SB
183
184 /* Check if there is an error on xmit/recv queues */
185 if (events & (VMXNET3_ECR_TQERR | VMXNET3_ECR_RQERR)) {
186 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
187 VMXNET3_CMD_GET_QUEUE_STATUS);
188
09c5088e
SB
189 for (i = 0; i < adapter->num_tx_queues; i++)
190 if (adapter->tqd_start[i].status.stopped)
191 dev_err(&adapter->netdev->dev,
192 "%s: tq[%d] error 0x%x\n",
193 adapter->netdev->name, i, le32_to_cpu(
194 adapter->tqd_start[i].status.error));
195 for (i = 0; i < adapter->num_rx_queues; i++)
196 if (adapter->rqd_start[i].status.stopped)
197 dev_err(&adapter->netdev->dev,
198 "%s: rq[%d] error 0x%x\n",
199 adapter->netdev->name, i,
200 adapter->rqd_start[i].status.error);
d1a890fa
SB
201
202 schedule_work(&adapter->work);
203 }
204}
205
115924b6
SB
206#ifdef __BIG_ENDIAN_BITFIELD
207/*
208 * The device expects the bitfields in shared structures to be written in
209 * little endian. When CPU is big endian, the following routines are used to
210 * correctly read and write into ABI.
211 * The general technique used here is : double word bitfields are defined in
212 * opposite order for big endian architecture. Then before reading them in
213 * driver the complete double word is translated using le32_to_cpu. Similarly
214 * After the driver writes into bitfields, cpu_to_le32 is used to translate the
215 * double words into required format.
216 * In order to avoid touching bits in shared structure more than once, temporary
217 * descriptors are used. These are passed as srcDesc to following functions.
218 */
219static void vmxnet3_RxDescToCPU(const struct Vmxnet3_RxDesc *srcDesc,
220 struct Vmxnet3_RxDesc *dstDesc)
221{
222 u32 *src = (u32 *)srcDesc + 2;
223 u32 *dst = (u32 *)dstDesc + 2;
224 dstDesc->addr = le64_to_cpu(srcDesc->addr);
225 *dst = le32_to_cpu(*src);
226 dstDesc->ext1 = le32_to_cpu(srcDesc->ext1);
227}
228
229static void vmxnet3_TxDescToLe(const struct Vmxnet3_TxDesc *srcDesc,
230 struct Vmxnet3_TxDesc *dstDesc)
231{
232 int i;
233 u32 *src = (u32 *)(srcDesc + 1);
234 u32 *dst = (u32 *)(dstDesc + 1);
235
236 /* Working backwards so that the gen bit is set at the end. */
237 for (i = 2; i > 0; i--) {
238 src--;
239 dst--;
240 *dst = cpu_to_le32(*src);
241 }
242}
243
244
245static void vmxnet3_RxCompToCPU(const struct Vmxnet3_RxCompDesc *srcDesc,
246 struct Vmxnet3_RxCompDesc *dstDesc)
247{
248 int i = 0;
249 u32 *src = (u32 *)srcDesc;
250 u32 *dst = (u32 *)dstDesc;
251 for (i = 0; i < sizeof(struct Vmxnet3_RxCompDesc) / sizeof(u32); i++) {
252 *dst = le32_to_cpu(*src);
253 src++;
254 dst++;
255 }
256}
257
258
259/* Used to read bitfield values from double words. */
260static u32 get_bitfield32(const __le32 *bitfield, u32 pos, u32 size)
261{
262 u32 temp = le32_to_cpu(*bitfield);
263 u32 mask = ((1 << size) - 1) << pos;
264 temp &= mask;
265 temp >>= pos;
266 return temp;
267}
268
269
270
271#endif /* __BIG_ENDIAN_BITFIELD */
272
273#ifdef __BIG_ENDIAN_BITFIELD
274
275# define VMXNET3_TXDESC_GET_GEN(txdesc) get_bitfield32(((const __le32 *) \
276 txdesc) + VMXNET3_TXD_GEN_DWORD_SHIFT, \
277 VMXNET3_TXD_GEN_SHIFT, VMXNET3_TXD_GEN_SIZE)
278# define VMXNET3_TXDESC_GET_EOP(txdesc) get_bitfield32(((const __le32 *) \
279 txdesc) + VMXNET3_TXD_EOP_DWORD_SHIFT, \
280 VMXNET3_TXD_EOP_SHIFT, VMXNET3_TXD_EOP_SIZE)
281# define VMXNET3_TCD_GET_GEN(tcd) get_bitfield32(((const __le32 *)tcd) + \
282 VMXNET3_TCD_GEN_DWORD_SHIFT, VMXNET3_TCD_GEN_SHIFT, \
283 VMXNET3_TCD_GEN_SIZE)
284# define VMXNET3_TCD_GET_TXIDX(tcd) get_bitfield32((const __le32 *)tcd, \
285 VMXNET3_TCD_TXIDX_SHIFT, VMXNET3_TCD_TXIDX_SIZE)
286# define vmxnet3_getRxComp(dstrcd, rcd, tmp) do { \
287 (dstrcd) = (tmp); \
288 vmxnet3_RxCompToCPU((rcd), (tmp)); \
289 } while (0)
290# define vmxnet3_getRxDesc(dstrxd, rxd, tmp) do { \
291 (dstrxd) = (tmp); \
292 vmxnet3_RxDescToCPU((rxd), (tmp)); \
293 } while (0)
294
295#else
296
297# define VMXNET3_TXDESC_GET_GEN(txdesc) ((txdesc)->gen)
298# define VMXNET3_TXDESC_GET_EOP(txdesc) ((txdesc)->eop)
299# define VMXNET3_TCD_GET_GEN(tcd) ((tcd)->gen)
300# define VMXNET3_TCD_GET_TXIDX(tcd) ((tcd)->txdIdx)
301# define vmxnet3_getRxComp(dstrcd, rcd, tmp) (dstrcd) = (rcd)
302# define vmxnet3_getRxDesc(dstrxd, rxd, tmp) (dstrxd) = (rxd)
303
304#endif /* __BIG_ENDIAN_BITFIELD */
305
d1a890fa
SB
306
307static void
308vmxnet3_unmap_tx_buf(struct vmxnet3_tx_buf_info *tbi,
309 struct pci_dev *pdev)
310{
311 if (tbi->map_type == VMXNET3_MAP_SINGLE)
312 pci_unmap_single(pdev, tbi->dma_addr, tbi->len,
313 PCI_DMA_TODEVICE);
314 else if (tbi->map_type == VMXNET3_MAP_PAGE)
315 pci_unmap_page(pdev, tbi->dma_addr, tbi->len,
316 PCI_DMA_TODEVICE);
317 else
318 BUG_ON(tbi->map_type != VMXNET3_MAP_NONE);
319
320 tbi->map_type = VMXNET3_MAP_NONE; /* to help debugging */
321}
322
323
324static int
325vmxnet3_unmap_pkt(u32 eop_idx, struct vmxnet3_tx_queue *tq,
326 struct pci_dev *pdev, struct vmxnet3_adapter *adapter)
327{
328 struct sk_buff *skb;
329 int entries = 0;
330
331 /* no out of order completion */
332 BUG_ON(tq->buf_info[eop_idx].sop_idx != tq->tx_ring.next2comp);
115924b6 333 BUG_ON(VMXNET3_TXDESC_GET_EOP(&(tq->tx_ring.base[eop_idx].txd)) != 1);
d1a890fa
SB
334
335 skb = tq->buf_info[eop_idx].skb;
336 BUG_ON(skb == NULL);
337 tq->buf_info[eop_idx].skb = NULL;
338
339 VMXNET3_INC_RING_IDX_ONLY(eop_idx, tq->tx_ring.size);
340
341 while (tq->tx_ring.next2comp != eop_idx) {
342 vmxnet3_unmap_tx_buf(tq->buf_info + tq->tx_ring.next2comp,
343 pdev);
344
345 /* update next2comp w/o tx_lock. Since we are marking more,
346 * instead of less, tx ring entries avail, the worst case is
347 * that the tx routine incorrectly re-queues a pkt due to
348 * insufficient tx ring entries.
349 */
350 vmxnet3_cmd_ring_adv_next2comp(&tq->tx_ring);
351 entries++;
352 }
353
354 dev_kfree_skb_any(skb);
355 return entries;
356}
357
358
359static int
360vmxnet3_tq_tx_complete(struct vmxnet3_tx_queue *tq,
361 struct vmxnet3_adapter *adapter)
362{
363 int completed = 0;
364 union Vmxnet3_GenericDesc *gdesc;
365
366 gdesc = tq->comp_ring.base + tq->comp_ring.next2proc;
115924b6
SB
367 while (VMXNET3_TCD_GET_GEN(&gdesc->tcd) == tq->comp_ring.gen) {
368 completed += vmxnet3_unmap_pkt(VMXNET3_TCD_GET_TXIDX(
369 &gdesc->tcd), tq, adapter->pdev,
370 adapter);
d1a890fa
SB
371
372 vmxnet3_comp_ring_adv_next2proc(&tq->comp_ring);
373 gdesc = tq->comp_ring.base + tq->comp_ring.next2proc;
374 }
375
376 if (completed) {
377 spin_lock(&tq->tx_lock);
378 if (unlikely(vmxnet3_tq_stopped(tq, adapter) &&
379 vmxnet3_cmd_ring_desc_avail(&tq->tx_ring) >
380 VMXNET3_WAKE_QUEUE_THRESHOLD(tq) &&
381 netif_carrier_ok(adapter->netdev))) {
382 vmxnet3_tq_wake(tq, adapter);
383 }
384 spin_unlock(&tq->tx_lock);
385 }
386 return completed;
387}
388
389
390static void
391vmxnet3_tq_cleanup(struct vmxnet3_tx_queue *tq,
392 struct vmxnet3_adapter *adapter)
393{
394 int i;
395
396 while (tq->tx_ring.next2comp != tq->tx_ring.next2fill) {
397 struct vmxnet3_tx_buf_info *tbi;
398 union Vmxnet3_GenericDesc *gdesc;
399
400 tbi = tq->buf_info + tq->tx_ring.next2comp;
401 gdesc = tq->tx_ring.base + tq->tx_ring.next2comp;
402
403 vmxnet3_unmap_tx_buf(tbi, adapter->pdev);
404 if (tbi->skb) {
405 dev_kfree_skb_any(tbi->skb);
406 tbi->skb = NULL;
407 }
408 vmxnet3_cmd_ring_adv_next2comp(&tq->tx_ring);
409 }
410
411 /* sanity check, verify all buffers are indeed unmapped and freed */
412 for (i = 0; i < tq->tx_ring.size; i++) {
413 BUG_ON(tq->buf_info[i].skb != NULL ||
414 tq->buf_info[i].map_type != VMXNET3_MAP_NONE);
415 }
416
417 tq->tx_ring.gen = VMXNET3_INIT_GEN;
418 tq->tx_ring.next2fill = tq->tx_ring.next2comp = 0;
419
420 tq->comp_ring.gen = VMXNET3_INIT_GEN;
421 tq->comp_ring.next2proc = 0;
422}
423
424
09c5088e 425static void
d1a890fa
SB
426vmxnet3_tq_destroy(struct vmxnet3_tx_queue *tq,
427 struct vmxnet3_adapter *adapter)
428{
429 if (tq->tx_ring.base) {
430 pci_free_consistent(adapter->pdev, tq->tx_ring.size *
431 sizeof(struct Vmxnet3_TxDesc),
432 tq->tx_ring.base, tq->tx_ring.basePA);
433 tq->tx_ring.base = NULL;
434 }
435 if (tq->data_ring.base) {
436 pci_free_consistent(adapter->pdev, tq->data_ring.size *
437 sizeof(struct Vmxnet3_TxDataDesc),
438 tq->data_ring.base, tq->data_ring.basePA);
439 tq->data_ring.base = NULL;
440 }
441 if (tq->comp_ring.base) {
442 pci_free_consistent(adapter->pdev, tq->comp_ring.size *
443 sizeof(struct Vmxnet3_TxCompDesc),
444 tq->comp_ring.base, tq->comp_ring.basePA);
445 tq->comp_ring.base = NULL;
446 }
447 kfree(tq->buf_info);
448 tq->buf_info = NULL;
449}
450
451
09c5088e
SB
452/* Destroy all tx queues */
453void
454vmxnet3_tq_destroy_all(struct vmxnet3_adapter *adapter)
455{
456 int i;
457
458 for (i = 0; i < adapter->num_tx_queues; i++)
459 vmxnet3_tq_destroy(&adapter->tx_queue[i], adapter);
460}
461
462
d1a890fa
SB
463static void
464vmxnet3_tq_init(struct vmxnet3_tx_queue *tq,
465 struct vmxnet3_adapter *adapter)
466{
467 int i;
468
469 /* reset the tx ring contents to 0 and reset the tx ring states */
470 memset(tq->tx_ring.base, 0, tq->tx_ring.size *
471 sizeof(struct Vmxnet3_TxDesc));
472 tq->tx_ring.next2fill = tq->tx_ring.next2comp = 0;
473 tq->tx_ring.gen = VMXNET3_INIT_GEN;
474
475 memset(tq->data_ring.base, 0, tq->data_ring.size *
476 sizeof(struct Vmxnet3_TxDataDesc));
477
478 /* reset the tx comp ring contents to 0 and reset comp ring states */
479 memset(tq->comp_ring.base, 0, tq->comp_ring.size *
480 sizeof(struct Vmxnet3_TxCompDesc));
481 tq->comp_ring.next2proc = 0;
482 tq->comp_ring.gen = VMXNET3_INIT_GEN;
483
484 /* reset the bookkeeping data */
485 memset(tq->buf_info, 0, sizeof(tq->buf_info[0]) * tq->tx_ring.size);
486 for (i = 0; i < tq->tx_ring.size; i++)
487 tq->buf_info[i].map_type = VMXNET3_MAP_NONE;
488
489 /* stats are not reset */
490}
491
492
493static int
494vmxnet3_tq_create(struct vmxnet3_tx_queue *tq,
495 struct vmxnet3_adapter *adapter)
496{
497 BUG_ON(tq->tx_ring.base || tq->data_ring.base ||
498 tq->comp_ring.base || tq->buf_info);
499
500 tq->tx_ring.base = pci_alloc_consistent(adapter->pdev, tq->tx_ring.size
501 * sizeof(struct Vmxnet3_TxDesc),
502 &tq->tx_ring.basePA);
503 if (!tq->tx_ring.base) {
504 printk(KERN_ERR "%s: failed to allocate tx ring\n",
505 adapter->netdev->name);
506 goto err;
507 }
508
509 tq->data_ring.base = pci_alloc_consistent(adapter->pdev,
510 tq->data_ring.size *
511 sizeof(struct Vmxnet3_TxDataDesc),
512 &tq->data_ring.basePA);
513 if (!tq->data_ring.base) {
514 printk(KERN_ERR "%s: failed to allocate data ring\n",
515 adapter->netdev->name);
516 goto err;
517 }
518
519 tq->comp_ring.base = pci_alloc_consistent(adapter->pdev,
520 tq->comp_ring.size *
521 sizeof(struct Vmxnet3_TxCompDesc),
522 &tq->comp_ring.basePA);
523 if (!tq->comp_ring.base) {
524 printk(KERN_ERR "%s: failed to allocate tx comp ring\n",
525 adapter->netdev->name);
526 goto err;
527 }
528
529 tq->buf_info = kcalloc(tq->tx_ring.size, sizeof(tq->buf_info[0]),
530 GFP_KERNEL);
531 if (!tq->buf_info) {
532 printk(KERN_ERR "%s: failed to allocate tx bufinfo\n",
533 adapter->netdev->name);
534 goto err;
535 }
536
537 return 0;
538
539err:
540 vmxnet3_tq_destroy(tq, adapter);
541 return -ENOMEM;
542}
543
09c5088e
SB
544static void
545vmxnet3_tq_cleanup_all(struct vmxnet3_adapter *adapter)
546{
547 int i;
548
549 for (i = 0; i < adapter->num_tx_queues; i++)
550 vmxnet3_tq_cleanup(&adapter->tx_queue[i], adapter);
551}
d1a890fa
SB
552
553/*
554 * starting from ring->next2fill, allocate rx buffers for the given ring
555 * of the rx queue and update the rx desc. stop after @num_to_alloc buffers
556 * are allocated or allocation fails
557 */
558
559static int
560vmxnet3_rq_alloc_rx_buf(struct vmxnet3_rx_queue *rq, u32 ring_idx,
561 int num_to_alloc, struct vmxnet3_adapter *adapter)
562{
563 int num_allocated = 0;
564 struct vmxnet3_rx_buf_info *rbi_base = rq->buf_info[ring_idx];
565 struct vmxnet3_cmd_ring *ring = &rq->rx_ring[ring_idx];
566 u32 val;
567
568 while (num_allocated < num_to_alloc) {
569 struct vmxnet3_rx_buf_info *rbi;
570 union Vmxnet3_GenericDesc *gd;
571
572 rbi = rbi_base + ring->next2fill;
573 gd = ring->base + ring->next2fill;
574
575 if (rbi->buf_type == VMXNET3_RX_BUF_SKB) {
576 if (rbi->skb == NULL) {
577 rbi->skb = dev_alloc_skb(rbi->len +
578 NET_IP_ALIGN);
579 if (unlikely(rbi->skb == NULL)) {
580 rq->stats.rx_buf_alloc_failure++;
581 break;
582 }
583 rbi->skb->dev = adapter->netdev;
584
585 skb_reserve(rbi->skb, NET_IP_ALIGN);
586 rbi->dma_addr = pci_map_single(adapter->pdev,
587 rbi->skb->data, rbi->len,
588 PCI_DMA_FROMDEVICE);
589 } else {
590 /* rx buffer skipped by the device */
591 }
592 val = VMXNET3_RXD_BTYPE_HEAD << VMXNET3_RXD_BTYPE_SHIFT;
593 } else {
594 BUG_ON(rbi->buf_type != VMXNET3_RX_BUF_PAGE ||
595 rbi->len != PAGE_SIZE);
596
597 if (rbi->page == NULL) {
598 rbi->page = alloc_page(GFP_ATOMIC);
599 if (unlikely(rbi->page == NULL)) {
600 rq->stats.rx_buf_alloc_failure++;
601 break;
602 }
603 rbi->dma_addr = pci_map_page(adapter->pdev,
604 rbi->page, 0, PAGE_SIZE,
605 PCI_DMA_FROMDEVICE);
606 } else {
607 /* rx buffers skipped by the device */
608 }
609 val = VMXNET3_RXD_BTYPE_BODY << VMXNET3_RXD_BTYPE_SHIFT;
610 }
611
612 BUG_ON(rbi->dma_addr == 0);
115924b6
SB
613 gd->rxd.addr = cpu_to_le64(rbi->dma_addr);
614 gd->dword[2] = cpu_to_le32((ring->gen << VMXNET3_RXD_GEN_SHIFT)
615 | val | rbi->len);
d1a890fa
SB
616
617 num_allocated++;
618 vmxnet3_cmd_ring_adv_next2fill(ring);
619 }
620 rq->uncommitted[ring_idx] += num_allocated;
621
f6965582
RD
622 dev_dbg(&adapter->netdev->dev,
623 "alloc_rx_buf: %d allocated, next2fill %u, next2comp "
d1a890fa
SB
624 "%u, uncommited %u\n", num_allocated, ring->next2fill,
625 ring->next2comp, rq->uncommitted[ring_idx]);
626
627 /* so that the device can distinguish a full ring and an empty ring */
628 BUG_ON(num_allocated != 0 && ring->next2fill == ring->next2comp);
629
630 return num_allocated;
631}
632
633
634static void
635vmxnet3_append_frag(struct sk_buff *skb, struct Vmxnet3_RxCompDesc *rcd,
636 struct vmxnet3_rx_buf_info *rbi)
637{
638 struct skb_frag_struct *frag = skb_shinfo(skb)->frags +
639 skb_shinfo(skb)->nr_frags;
640
641 BUG_ON(skb_shinfo(skb)->nr_frags >= MAX_SKB_FRAGS);
642
643 frag->page = rbi->page;
644 frag->page_offset = 0;
645 frag->size = rcd->len;
646 skb->data_len += frag->size;
647 skb_shinfo(skb)->nr_frags++;
648}
649
650
651static void
652vmxnet3_map_pkt(struct sk_buff *skb, struct vmxnet3_tx_ctx *ctx,
653 struct vmxnet3_tx_queue *tq, struct pci_dev *pdev,
654 struct vmxnet3_adapter *adapter)
655{
656 u32 dw2, len;
657 unsigned long buf_offset;
658 int i;
659 union Vmxnet3_GenericDesc *gdesc;
660 struct vmxnet3_tx_buf_info *tbi = NULL;
661
662 BUG_ON(ctx->copy_size > skb_headlen(skb));
663
664 /* use the previous gen bit for the SOP desc */
665 dw2 = (tq->tx_ring.gen ^ 0x1) << VMXNET3_TXD_GEN_SHIFT;
666
667 ctx->sop_txd = tq->tx_ring.base + tq->tx_ring.next2fill;
668 gdesc = ctx->sop_txd; /* both loops below can be skipped */
669
670 /* no need to map the buffer if headers are copied */
671 if (ctx->copy_size) {
115924b6 672 ctx->sop_txd->txd.addr = cpu_to_le64(tq->data_ring.basePA +
d1a890fa 673 tq->tx_ring.next2fill *
115924b6
SB
674 sizeof(struct Vmxnet3_TxDataDesc));
675 ctx->sop_txd->dword[2] = cpu_to_le32(dw2 | ctx->copy_size);
d1a890fa
SB
676 ctx->sop_txd->dword[3] = 0;
677
678 tbi = tq->buf_info + tq->tx_ring.next2fill;
679 tbi->map_type = VMXNET3_MAP_NONE;
680
f6965582
RD
681 dev_dbg(&adapter->netdev->dev,
682 "txd[%u]: 0x%Lx 0x%x 0x%x\n",
115924b6
SB
683 tq->tx_ring.next2fill,
684 le64_to_cpu(ctx->sop_txd->txd.addr),
d1a890fa
SB
685 ctx->sop_txd->dword[2], ctx->sop_txd->dword[3]);
686 vmxnet3_cmd_ring_adv_next2fill(&tq->tx_ring);
687
688 /* use the right gen for non-SOP desc */
689 dw2 = tq->tx_ring.gen << VMXNET3_TXD_GEN_SHIFT;
690 }
691
692 /* linear part can use multiple tx desc if it's big */
693 len = skb_headlen(skb) - ctx->copy_size;
694 buf_offset = ctx->copy_size;
695 while (len) {
696 u32 buf_size;
697
1f4b1612
BD
698 if (len < VMXNET3_MAX_TX_BUF_SIZE) {
699 buf_size = len;
700 dw2 |= len;
701 } else {
702 buf_size = VMXNET3_MAX_TX_BUF_SIZE;
703 /* spec says that for TxDesc.len, 0 == 2^14 */
704 }
d1a890fa
SB
705
706 tbi = tq->buf_info + tq->tx_ring.next2fill;
707 tbi->map_type = VMXNET3_MAP_SINGLE;
708 tbi->dma_addr = pci_map_single(adapter->pdev,
709 skb->data + buf_offset, buf_size,
710 PCI_DMA_TODEVICE);
711
1f4b1612 712 tbi->len = buf_size;
d1a890fa
SB
713
714 gdesc = tq->tx_ring.base + tq->tx_ring.next2fill;
715 BUG_ON(gdesc->txd.gen == tq->tx_ring.gen);
716
115924b6 717 gdesc->txd.addr = cpu_to_le64(tbi->dma_addr);
1f4b1612 718 gdesc->dword[2] = cpu_to_le32(dw2);
d1a890fa
SB
719 gdesc->dword[3] = 0;
720
f6965582
RD
721 dev_dbg(&adapter->netdev->dev,
722 "txd[%u]: 0x%Lx 0x%x 0x%x\n",
115924b6
SB
723 tq->tx_ring.next2fill, le64_to_cpu(gdesc->txd.addr),
724 le32_to_cpu(gdesc->dword[2]), gdesc->dword[3]);
d1a890fa
SB
725 vmxnet3_cmd_ring_adv_next2fill(&tq->tx_ring);
726 dw2 = tq->tx_ring.gen << VMXNET3_TXD_GEN_SHIFT;
727
728 len -= buf_size;
729 buf_offset += buf_size;
730 }
731
732 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
733 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
734
735 tbi = tq->buf_info + tq->tx_ring.next2fill;
736 tbi->map_type = VMXNET3_MAP_PAGE;
737 tbi->dma_addr = pci_map_page(adapter->pdev, frag->page,
738 frag->page_offset, frag->size,
739 PCI_DMA_TODEVICE);
740
741 tbi->len = frag->size;
742
743 gdesc = tq->tx_ring.base + tq->tx_ring.next2fill;
744 BUG_ON(gdesc->txd.gen == tq->tx_ring.gen);
745
115924b6
SB
746 gdesc->txd.addr = cpu_to_le64(tbi->dma_addr);
747 gdesc->dword[2] = cpu_to_le32(dw2 | frag->size);
d1a890fa
SB
748 gdesc->dword[3] = 0;
749
f6965582
RD
750 dev_dbg(&adapter->netdev->dev,
751 "txd[%u]: 0x%llu %u %u\n",
115924b6
SB
752 tq->tx_ring.next2fill, le64_to_cpu(gdesc->txd.addr),
753 le32_to_cpu(gdesc->dword[2]), gdesc->dword[3]);
d1a890fa
SB
754 vmxnet3_cmd_ring_adv_next2fill(&tq->tx_ring);
755 dw2 = tq->tx_ring.gen << VMXNET3_TXD_GEN_SHIFT;
756 }
757
758 ctx->eop_txd = gdesc;
759
760 /* set the last buf_info for the pkt */
761 tbi->skb = skb;
762 tbi->sop_idx = ctx->sop_txd - tq->tx_ring.base;
763}
764
765
09c5088e
SB
766/* Init all tx queues */
767static void
768vmxnet3_tq_init_all(struct vmxnet3_adapter *adapter)
769{
770 int i;
771
772 for (i = 0; i < adapter->num_tx_queues; i++)
773 vmxnet3_tq_init(&adapter->tx_queue[i], adapter);
774}
775
776
d1a890fa
SB
777/*
778 * parse and copy relevant protocol headers:
779 * For a tso pkt, relevant headers are L2/3/4 including options
780 * For a pkt requesting csum offloading, they are L2/3 and may include L4
781 * if it's a TCP/UDP pkt
782 *
783 * Returns:
784 * -1: error happens during parsing
785 * 0: protocol headers parsed, but too big to be copied
786 * 1: protocol headers parsed and copied
787 *
788 * Other effects:
789 * 1. related *ctx fields are updated.
790 * 2. ctx->copy_size is # of bytes copied
791 * 3. the portion copied is guaranteed to be in the linear part
792 *
793 */
794static int
795vmxnet3_parse_and_copy_hdr(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
796 struct vmxnet3_tx_ctx *ctx,
797 struct vmxnet3_adapter *adapter)
798{
799 struct Vmxnet3_TxDataDesc *tdd;
800
0d0b1672 801 if (ctx->mss) { /* TSO */
d1a890fa
SB
802 ctx->eth_ip_hdr_size = skb_transport_offset(skb);
803 ctx->l4_hdr_size = ((struct tcphdr *)
804 skb_transport_header(skb))->doff * 4;
805 ctx->copy_size = ctx->eth_ip_hdr_size + ctx->l4_hdr_size;
806 } else {
807 unsigned int pull_size;
808
809 if (skb->ip_summed == CHECKSUM_PARTIAL) {
0d0b1672 810 ctx->eth_ip_hdr_size = skb_checksum_start_offset(skb);
d1a890fa
SB
811
812 if (ctx->ipv4) {
813 struct iphdr *iph = (struct iphdr *)
814 skb_network_header(skb);
815 if (iph->protocol == IPPROTO_TCP) {
816 pull_size = ctx->eth_ip_hdr_size +
817 sizeof(struct tcphdr);
818
819 if (unlikely(!pskb_may_pull(skb,
820 pull_size))) {
821 goto err;
822 }
823 ctx->l4_hdr_size = ((struct tcphdr *)
824 skb_transport_header(skb))->doff * 4;
825 } else if (iph->protocol == IPPROTO_UDP) {
826 ctx->l4_hdr_size =
827 sizeof(struct udphdr);
828 } else {
829 ctx->l4_hdr_size = 0;
830 }
831 } else {
832 /* for simplicity, don't copy L4 headers */
833 ctx->l4_hdr_size = 0;
834 }
835 ctx->copy_size = ctx->eth_ip_hdr_size +
836 ctx->l4_hdr_size;
837 } else {
838 ctx->eth_ip_hdr_size = 0;
839 ctx->l4_hdr_size = 0;
840 /* copy as much as allowed */
841 ctx->copy_size = min((unsigned int)VMXNET3_HDR_COPY_SIZE
842 , skb_headlen(skb));
843 }
844
845 /* make sure headers are accessible directly */
846 if (unlikely(!pskb_may_pull(skb, ctx->copy_size)))
847 goto err;
848 }
849
850 if (unlikely(ctx->copy_size > VMXNET3_HDR_COPY_SIZE)) {
851 tq->stats.oversized_hdr++;
852 ctx->copy_size = 0;
853 return 0;
854 }
855
856 tdd = tq->data_ring.base + tq->tx_ring.next2fill;
857
858 memcpy(tdd->data, skb->data, ctx->copy_size);
f6965582
RD
859 dev_dbg(&adapter->netdev->dev,
860 "copy %u bytes to dataRing[%u]\n",
d1a890fa
SB
861 ctx->copy_size, tq->tx_ring.next2fill);
862 return 1;
863
864err:
865 return -1;
866}
867
868
869static void
870vmxnet3_prepare_tso(struct sk_buff *skb,
871 struct vmxnet3_tx_ctx *ctx)
872{
873 struct tcphdr *tcph = (struct tcphdr *)skb_transport_header(skb);
874 if (ctx->ipv4) {
875 struct iphdr *iph = (struct iphdr *)skb_network_header(skb);
876 iph->check = 0;
877 tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, 0,
878 IPPROTO_TCP, 0);
879 } else {
880 struct ipv6hdr *iph = (struct ipv6hdr *)skb_network_header(skb);
881 tcph->check = ~csum_ipv6_magic(&iph->saddr, &iph->daddr, 0,
882 IPPROTO_TCP, 0);
883 }
884}
885
886
887/*
888 * Transmits a pkt thru a given tq
889 * Returns:
890 * NETDEV_TX_OK: descriptors are setup successfully
891 * NETDEV_TX_OK: error occured, the pkt is dropped
892 * NETDEV_TX_BUSY: tx ring is full, queue is stopped
893 *
894 * Side-effects:
895 * 1. tx ring may be changed
896 * 2. tq stats may be updated accordingly
897 * 3. shared->txNumDeferred may be updated
898 */
899
900static int
901vmxnet3_tq_xmit(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
902 struct vmxnet3_adapter *adapter, struct net_device *netdev)
903{
904 int ret;
905 u32 count;
906 unsigned long flags;
907 struct vmxnet3_tx_ctx ctx;
908 union Vmxnet3_GenericDesc *gdesc;
115924b6
SB
909#ifdef __BIG_ENDIAN_BITFIELD
910 /* Use temporary descriptor to avoid touching bits multiple times */
911 union Vmxnet3_GenericDesc tempTxDesc;
912#endif
d1a890fa
SB
913
914 /* conservatively estimate # of descriptors to use */
915 count = VMXNET3_TXD_NEEDED(skb_headlen(skb)) +
916 skb_shinfo(skb)->nr_frags + 1;
917
1b803fbf 918 ctx.ipv4 = (skb->protocol == cpu_to_be16(ETH_P_IP));
d1a890fa
SB
919
920 ctx.mss = skb_shinfo(skb)->gso_size;
921 if (ctx.mss) {
922 if (skb_header_cloned(skb)) {
923 if (unlikely(pskb_expand_head(skb, 0, 0,
924 GFP_ATOMIC) != 0)) {
925 tq->stats.drop_tso++;
926 goto drop_pkt;
927 }
928 tq->stats.copy_skb_header++;
929 }
930 vmxnet3_prepare_tso(skb, &ctx);
931 } else {
932 if (unlikely(count > VMXNET3_MAX_TXD_PER_PKT)) {
933
934 /* non-tso pkts must not use more than
935 * VMXNET3_MAX_TXD_PER_PKT entries
936 */
937 if (skb_linearize(skb) != 0) {
938 tq->stats.drop_too_many_frags++;
939 goto drop_pkt;
940 }
941 tq->stats.linearized++;
942
943 /* recalculate the # of descriptors to use */
944 count = VMXNET3_TXD_NEEDED(skb_headlen(skb)) + 1;
945 }
946 }
947
09c5088e
SB
948 spin_lock_irqsave(&tq->tx_lock, flags);
949
950 if (count > vmxnet3_cmd_ring_desc_avail(&tq->tx_ring)) {
951 tq->stats.tx_ring_full++;
952 dev_dbg(&adapter->netdev->dev,
953 "tx queue stopped on %s, next2comp %u"
954 " next2fill %u\n", adapter->netdev->name,
955 tq->tx_ring.next2comp, tq->tx_ring.next2fill);
956
957 vmxnet3_tq_stop(tq, adapter);
958 spin_unlock_irqrestore(&tq->tx_lock, flags);
959 return NETDEV_TX_BUSY;
960 }
961
962
d1a890fa
SB
963 ret = vmxnet3_parse_and_copy_hdr(skb, tq, &ctx, adapter);
964 if (ret >= 0) {
965 BUG_ON(ret <= 0 && ctx.copy_size != 0);
966 /* hdrs parsed, check against other limits */
967 if (ctx.mss) {
968 if (unlikely(ctx.eth_ip_hdr_size + ctx.l4_hdr_size >
969 VMXNET3_MAX_TX_BUF_SIZE)) {
970 goto hdr_too_big;
971 }
972 } else {
973 if (skb->ip_summed == CHECKSUM_PARTIAL) {
974 if (unlikely(ctx.eth_ip_hdr_size +
975 skb->csum_offset >
976 VMXNET3_MAX_CSUM_OFFSET)) {
977 goto hdr_too_big;
978 }
979 }
980 }
981 } else {
982 tq->stats.drop_hdr_inspect_err++;
f955e141 983 goto unlock_drop_pkt;
d1a890fa
SB
984 }
985
d1a890fa
SB
986 /* fill tx descs related to addr & len */
987 vmxnet3_map_pkt(skb, &ctx, tq, adapter->pdev, adapter);
988
989 /* setup the EOP desc */
115924b6 990 ctx.eop_txd->dword[3] = cpu_to_le32(VMXNET3_TXD_CQ | VMXNET3_TXD_EOP);
d1a890fa
SB
991
992 /* setup the SOP desc */
115924b6
SB
993#ifdef __BIG_ENDIAN_BITFIELD
994 gdesc = &tempTxDesc;
995 gdesc->dword[2] = ctx.sop_txd->dword[2];
996 gdesc->dword[3] = ctx.sop_txd->dword[3];
997#else
d1a890fa 998 gdesc = ctx.sop_txd;
115924b6 999#endif
d1a890fa
SB
1000 if (ctx.mss) {
1001 gdesc->txd.hlen = ctx.eth_ip_hdr_size + ctx.l4_hdr_size;
1002 gdesc->txd.om = VMXNET3_OM_TSO;
1003 gdesc->txd.msscof = ctx.mss;
115924b6
SB
1004 le32_add_cpu(&tq->shared->txNumDeferred, (skb->len -
1005 gdesc->txd.hlen + ctx.mss - 1) / ctx.mss);
d1a890fa
SB
1006 } else {
1007 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1008 gdesc->txd.hlen = ctx.eth_ip_hdr_size;
1009 gdesc->txd.om = VMXNET3_OM_CSUM;
1010 gdesc->txd.msscof = ctx.eth_ip_hdr_size +
1011 skb->csum_offset;
1012 } else {
1013 gdesc->txd.om = 0;
1014 gdesc->txd.msscof = 0;
1015 }
115924b6 1016 le32_add_cpu(&tq->shared->txNumDeferred, 1);
d1a890fa
SB
1017 }
1018
1019 if (vlan_tx_tag_present(skb)) {
1020 gdesc->txd.ti = 1;
1021 gdesc->txd.tci = vlan_tx_tag_get(skb);
1022 }
1023
115924b6
SB
1024 /* finally flips the GEN bit of the SOP desc. */
1025 gdesc->dword[2] = cpu_to_le32(le32_to_cpu(gdesc->dword[2]) ^
1026 VMXNET3_TXD_GEN);
1027#ifdef __BIG_ENDIAN_BITFIELD
1028 /* Finished updating in bitfields of Tx Desc, so write them in original
1029 * place.
1030 */
1031 vmxnet3_TxDescToLe((struct Vmxnet3_TxDesc *)gdesc,
1032 (struct Vmxnet3_TxDesc *)ctx.sop_txd);
1033 gdesc = ctx.sop_txd;
1034#endif
f6965582
RD
1035 dev_dbg(&adapter->netdev->dev,
1036 "txd[%u]: SOP 0x%Lx 0x%x 0x%x\n",
d1a890fa 1037 (u32)((union Vmxnet3_GenericDesc *)ctx.sop_txd -
115924b6
SB
1038 tq->tx_ring.base), le64_to_cpu(gdesc->txd.addr),
1039 le32_to_cpu(gdesc->dword[2]), le32_to_cpu(gdesc->dword[3]));
d1a890fa
SB
1040
1041 spin_unlock_irqrestore(&tq->tx_lock, flags);
1042
115924b6
SB
1043 if (le32_to_cpu(tq->shared->txNumDeferred) >=
1044 le32_to_cpu(tq->shared->txThreshold)) {
d1a890fa 1045 tq->shared->txNumDeferred = 0;
09c5088e
SB
1046 VMXNET3_WRITE_BAR0_REG(adapter,
1047 VMXNET3_REG_TXPROD + tq->qid * 8,
d1a890fa
SB
1048 tq->tx_ring.next2fill);
1049 }
d1a890fa
SB
1050
1051 return NETDEV_TX_OK;
1052
1053hdr_too_big:
1054 tq->stats.drop_oversized_hdr++;
f955e141
DC
1055unlock_drop_pkt:
1056 spin_unlock_irqrestore(&tq->tx_lock, flags);
d1a890fa
SB
1057drop_pkt:
1058 tq->stats.drop_total++;
1059 dev_kfree_skb(skb);
1060 return NETDEV_TX_OK;
1061}
1062
1063
1064static netdev_tx_t
1065vmxnet3_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
1066{
1067 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
d1a890fa 1068
09c5088e
SB
1069 BUG_ON(skb->queue_mapping > adapter->num_tx_queues);
1070 return vmxnet3_tq_xmit(skb,
1071 &adapter->tx_queue[skb->queue_mapping],
1072 adapter, netdev);
d1a890fa
SB
1073}
1074
1075
1076static void
1077vmxnet3_rx_csum(struct vmxnet3_adapter *adapter,
1078 struct sk_buff *skb,
1079 union Vmxnet3_GenericDesc *gdesc)
1080{
1081 if (!gdesc->rcd.cnc && adapter->rxcsum) {
1082 /* typical case: TCP/UDP over IP and both csums are correct */
115924b6 1083 if ((le32_to_cpu(gdesc->dword[3]) & VMXNET3_RCD_CSUM_OK) ==
d1a890fa
SB
1084 VMXNET3_RCD_CSUM_OK) {
1085 skb->ip_summed = CHECKSUM_UNNECESSARY;
1086 BUG_ON(!(gdesc->rcd.tcp || gdesc->rcd.udp));
1087 BUG_ON(!(gdesc->rcd.v4 || gdesc->rcd.v6));
1088 BUG_ON(gdesc->rcd.frg);
1089 } else {
1090 if (gdesc->rcd.csum) {
1091 skb->csum = htons(gdesc->rcd.csum);
1092 skb->ip_summed = CHECKSUM_PARTIAL;
1093 } else {
bc8acf2c 1094 skb_checksum_none_assert(skb);
d1a890fa
SB
1095 }
1096 }
1097 } else {
bc8acf2c 1098 skb_checksum_none_assert(skb);
d1a890fa
SB
1099 }
1100}
1101
1102
1103static void
1104vmxnet3_rx_error(struct vmxnet3_rx_queue *rq, struct Vmxnet3_RxCompDesc *rcd,
1105 struct vmxnet3_rx_ctx *ctx, struct vmxnet3_adapter *adapter)
1106{
1107 rq->stats.drop_err++;
1108 if (!rcd->fcs)
1109 rq->stats.drop_fcs++;
1110
1111 rq->stats.drop_total++;
1112
1113 /*
1114 * We do not unmap and chain the rx buffer to the skb.
1115 * We basically pretend this buffer is not used and will be recycled
1116 * by vmxnet3_rq_alloc_rx_buf()
1117 */
1118
1119 /*
1120 * ctx->skb may be NULL if this is the first and the only one
1121 * desc for the pkt
1122 */
1123 if (ctx->skb)
1124 dev_kfree_skb_irq(ctx->skb);
1125
1126 ctx->skb = NULL;
1127}
1128
1129
1130static int
1131vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
1132 struct vmxnet3_adapter *adapter, int quota)
1133{
215faf9c
JP
1134 static const u32 rxprod_reg[2] = {
1135 VMXNET3_REG_RXPROD, VMXNET3_REG_RXPROD2
1136 };
d1a890fa
SB
1137 u32 num_rxd = 0;
1138 struct Vmxnet3_RxCompDesc *rcd;
1139 struct vmxnet3_rx_ctx *ctx = &rq->rx_ctx;
115924b6
SB
1140#ifdef __BIG_ENDIAN_BITFIELD
1141 struct Vmxnet3_RxDesc rxCmdDesc;
1142 struct Vmxnet3_RxCompDesc rxComp;
1143#endif
1144 vmxnet3_getRxComp(rcd, &rq->comp_ring.base[rq->comp_ring.next2proc].rcd,
1145 &rxComp);
d1a890fa
SB
1146 while (rcd->gen == rq->comp_ring.gen) {
1147 struct vmxnet3_rx_buf_info *rbi;
1148 struct sk_buff *skb;
1149 int num_to_alloc;
1150 struct Vmxnet3_RxDesc *rxd;
1151 u32 idx, ring_idx;
1152
1153 if (num_rxd >= quota) {
1154 /* we may stop even before we see the EOP desc of
1155 * the current pkt
1156 */
1157 break;
1158 }
1159 num_rxd++;
09c5088e 1160 BUG_ON(rcd->rqID != rq->qid && rcd->rqID != rq->qid2);
d1a890fa 1161 idx = rcd->rxdIdx;
09c5088e 1162 ring_idx = rcd->rqID < adapter->num_rx_queues ? 0 : 1;
115924b6
SB
1163 vmxnet3_getRxDesc(rxd, &rq->rx_ring[ring_idx].base[idx].rxd,
1164 &rxCmdDesc);
d1a890fa
SB
1165 rbi = rq->buf_info[ring_idx] + idx;
1166
115924b6
SB
1167 BUG_ON(rxd->addr != rbi->dma_addr ||
1168 rxd->len != rbi->len);
d1a890fa
SB
1169
1170 if (unlikely(rcd->eop && rcd->err)) {
1171 vmxnet3_rx_error(rq, rcd, ctx, adapter);
1172 goto rcd_done;
1173 }
1174
1175 if (rcd->sop) { /* first buf of the pkt */
1176 BUG_ON(rxd->btype != VMXNET3_RXD_BTYPE_HEAD ||
1177 rcd->rqID != rq->qid);
1178
1179 BUG_ON(rbi->buf_type != VMXNET3_RX_BUF_SKB);
1180 BUG_ON(ctx->skb != NULL || rbi->skb == NULL);
1181
1182 if (unlikely(rcd->len == 0)) {
1183 /* Pretend the rx buffer is skipped. */
1184 BUG_ON(!(rcd->sop && rcd->eop));
f6965582
RD
1185 dev_dbg(&adapter->netdev->dev,
1186 "rxRing[%u][%u] 0 length\n",
d1a890fa
SB
1187 ring_idx, idx);
1188 goto rcd_done;
1189 }
1190
1191 ctx->skb = rbi->skb;
1192 rbi->skb = NULL;
1193
1194 pci_unmap_single(adapter->pdev, rbi->dma_addr, rbi->len,
1195 PCI_DMA_FROMDEVICE);
1196
1197 skb_put(ctx->skb, rcd->len);
1198 } else {
1199 BUG_ON(ctx->skb == NULL);
1200 /* non SOP buffer must be type 1 in most cases */
1201 if (rbi->buf_type == VMXNET3_RX_BUF_PAGE) {
1202 BUG_ON(rxd->btype != VMXNET3_RXD_BTYPE_BODY);
1203
1204 if (rcd->len) {
1205 pci_unmap_page(adapter->pdev,
1206 rbi->dma_addr, rbi->len,
1207 PCI_DMA_FROMDEVICE);
1208
1209 vmxnet3_append_frag(ctx->skb, rcd, rbi);
1210 rbi->page = NULL;
1211 }
1212 } else {
1213 /*
1214 * The only time a non-SOP buffer is type 0 is
1215 * when it's EOP and error flag is raised, which
1216 * has already been handled.
1217 */
1218 BUG_ON(true);
1219 }
1220 }
1221
1222 skb = ctx->skb;
1223 if (rcd->eop) {
1224 skb->len += skb->data_len;
1225 skb->truesize += skb->data_len;
1226
1227 vmxnet3_rx_csum(adapter, skb,
1228 (union Vmxnet3_GenericDesc *)rcd);
1229 skb->protocol = eth_type_trans(skb, adapter->netdev);
1230
1231 if (unlikely(adapter->vlan_grp && rcd->ts)) {
1232 vlan_hwaccel_receive_skb(skb,
1233 adapter->vlan_grp, rcd->tci);
1234 } else {
1235 netif_receive_skb(skb);
1236 }
1237
d1a890fa
SB
1238 ctx->skb = NULL;
1239 }
1240
1241rcd_done:
1242 /* device may skip some rx descs */
1243 rq->rx_ring[ring_idx].next2comp = idx;
1244 VMXNET3_INC_RING_IDX_ONLY(rq->rx_ring[ring_idx].next2comp,
1245 rq->rx_ring[ring_idx].size);
1246
1247 /* refill rx buffers frequently to avoid starving the h/w */
1248 num_to_alloc = vmxnet3_cmd_ring_desc_avail(rq->rx_ring +
1249 ring_idx);
1250 if (unlikely(num_to_alloc > VMXNET3_RX_ALLOC_THRESHOLD(rq,
1251 ring_idx, adapter))) {
1252 vmxnet3_rq_alloc_rx_buf(rq, ring_idx, num_to_alloc,
1253 adapter);
1254
1255 /* if needed, update the register */
1256 if (unlikely(rq->shared->updateRxProd)) {
1257 VMXNET3_WRITE_BAR0_REG(adapter,
1258 rxprod_reg[ring_idx] + rq->qid * 8,
1259 rq->rx_ring[ring_idx].next2fill);
1260 rq->uncommitted[ring_idx] = 0;
1261 }
1262 }
1263
1264 vmxnet3_comp_ring_adv_next2proc(&rq->comp_ring);
115924b6
SB
1265 vmxnet3_getRxComp(rcd,
1266 &rq->comp_ring.base[rq->comp_ring.next2proc].rcd, &rxComp);
d1a890fa
SB
1267 }
1268
1269 return num_rxd;
1270}
1271
1272
1273static void
1274vmxnet3_rq_cleanup(struct vmxnet3_rx_queue *rq,
1275 struct vmxnet3_adapter *adapter)
1276{
1277 u32 i, ring_idx;
1278 struct Vmxnet3_RxDesc *rxd;
1279
1280 for (ring_idx = 0; ring_idx < 2; ring_idx++) {
1281 for (i = 0; i < rq->rx_ring[ring_idx].size; i++) {
115924b6
SB
1282#ifdef __BIG_ENDIAN_BITFIELD
1283 struct Vmxnet3_RxDesc rxDesc;
1284#endif
1285 vmxnet3_getRxDesc(rxd,
1286 &rq->rx_ring[ring_idx].base[i].rxd, &rxDesc);
d1a890fa
SB
1287
1288 if (rxd->btype == VMXNET3_RXD_BTYPE_HEAD &&
1289 rq->buf_info[ring_idx][i].skb) {
1290 pci_unmap_single(adapter->pdev, rxd->addr,
1291 rxd->len, PCI_DMA_FROMDEVICE);
1292 dev_kfree_skb(rq->buf_info[ring_idx][i].skb);
1293 rq->buf_info[ring_idx][i].skb = NULL;
1294 } else if (rxd->btype == VMXNET3_RXD_BTYPE_BODY &&
1295 rq->buf_info[ring_idx][i].page) {
1296 pci_unmap_page(adapter->pdev, rxd->addr,
1297 rxd->len, PCI_DMA_FROMDEVICE);
1298 put_page(rq->buf_info[ring_idx][i].page);
1299 rq->buf_info[ring_idx][i].page = NULL;
1300 }
1301 }
1302
1303 rq->rx_ring[ring_idx].gen = VMXNET3_INIT_GEN;
1304 rq->rx_ring[ring_idx].next2fill =
1305 rq->rx_ring[ring_idx].next2comp = 0;
1306 rq->uncommitted[ring_idx] = 0;
1307 }
1308
1309 rq->comp_ring.gen = VMXNET3_INIT_GEN;
1310 rq->comp_ring.next2proc = 0;
1311}
1312
1313
09c5088e
SB
1314static void
1315vmxnet3_rq_cleanup_all(struct vmxnet3_adapter *adapter)
1316{
1317 int i;
1318
1319 for (i = 0; i < adapter->num_rx_queues; i++)
1320 vmxnet3_rq_cleanup(&adapter->rx_queue[i], adapter);
1321}
1322
1323
d1a890fa
SB
1324void vmxnet3_rq_destroy(struct vmxnet3_rx_queue *rq,
1325 struct vmxnet3_adapter *adapter)
1326{
1327 int i;
1328 int j;
1329
1330 /* all rx buffers must have already been freed */
1331 for (i = 0; i < 2; i++) {
1332 if (rq->buf_info[i]) {
1333 for (j = 0; j < rq->rx_ring[i].size; j++)
1334 BUG_ON(rq->buf_info[i][j].page != NULL);
1335 }
1336 }
1337
1338
1339 kfree(rq->buf_info[0]);
1340
1341 for (i = 0; i < 2; i++) {
1342 if (rq->rx_ring[i].base) {
1343 pci_free_consistent(adapter->pdev, rq->rx_ring[i].size
1344 * sizeof(struct Vmxnet3_RxDesc),
1345 rq->rx_ring[i].base,
1346 rq->rx_ring[i].basePA);
1347 rq->rx_ring[i].base = NULL;
1348 }
1349 rq->buf_info[i] = NULL;
1350 }
1351
1352 if (rq->comp_ring.base) {
1353 pci_free_consistent(adapter->pdev, rq->comp_ring.size *
1354 sizeof(struct Vmxnet3_RxCompDesc),
1355 rq->comp_ring.base, rq->comp_ring.basePA);
1356 rq->comp_ring.base = NULL;
1357 }
1358}
1359
1360
1361static int
1362vmxnet3_rq_init(struct vmxnet3_rx_queue *rq,
1363 struct vmxnet3_adapter *adapter)
1364{
1365 int i;
1366
1367 /* initialize buf_info */
1368 for (i = 0; i < rq->rx_ring[0].size; i++) {
1369
1370 /* 1st buf for a pkt is skbuff */
1371 if (i % adapter->rx_buf_per_pkt == 0) {
1372 rq->buf_info[0][i].buf_type = VMXNET3_RX_BUF_SKB;
1373 rq->buf_info[0][i].len = adapter->skb_buf_size;
1374 } else { /* subsequent bufs for a pkt is frag */
1375 rq->buf_info[0][i].buf_type = VMXNET3_RX_BUF_PAGE;
1376 rq->buf_info[0][i].len = PAGE_SIZE;
1377 }
1378 }
1379 for (i = 0; i < rq->rx_ring[1].size; i++) {
1380 rq->buf_info[1][i].buf_type = VMXNET3_RX_BUF_PAGE;
1381 rq->buf_info[1][i].len = PAGE_SIZE;
1382 }
1383
1384 /* reset internal state and allocate buffers for both rings */
1385 for (i = 0; i < 2; i++) {
1386 rq->rx_ring[i].next2fill = rq->rx_ring[i].next2comp = 0;
1387 rq->uncommitted[i] = 0;
1388
1389 memset(rq->rx_ring[i].base, 0, rq->rx_ring[i].size *
1390 sizeof(struct Vmxnet3_RxDesc));
1391 rq->rx_ring[i].gen = VMXNET3_INIT_GEN;
1392 }
1393 if (vmxnet3_rq_alloc_rx_buf(rq, 0, rq->rx_ring[0].size - 1,
1394 adapter) == 0) {
1395 /* at least has 1 rx buffer for the 1st ring */
1396 return -ENOMEM;
1397 }
1398 vmxnet3_rq_alloc_rx_buf(rq, 1, rq->rx_ring[1].size - 1, adapter);
1399
1400 /* reset the comp ring */
1401 rq->comp_ring.next2proc = 0;
1402 memset(rq->comp_ring.base, 0, rq->comp_ring.size *
1403 sizeof(struct Vmxnet3_RxCompDesc));
1404 rq->comp_ring.gen = VMXNET3_INIT_GEN;
1405
1406 /* reset rxctx */
1407 rq->rx_ctx.skb = NULL;
1408
1409 /* stats are not reset */
1410 return 0;
1411}
1412
1413
09c5088e
SB
1414static int
1415vmxnet3_rq_init_all(struct vmxnet3_adapter *adapter)
1416{
1417 int i, err = 0;
1418
1419 for (i = 0; i < adapter->num_rx_queues; i++) {
1420 err = vmxnet3_rq_init(&adapter->rx_queue[i], adapter);
1421 if (unlikely(err)) {
1422 dev_err(&adapter->netdev->dev, "%s: failed to "
1423 "initialize rx queue%i\n",
1424 adapter->netdev->name, i);
1425 break;
1426 }
1427 }
1428 return err;
1429
1430}
1431
1432
d1a890fa
SB
1433static int
1434vmxnet3_rq_create(struct vmxnet3_rx_queue *rq, struct vmxnet3_adapter *adapter)
1435{
1436 int i;
1437 size_t sz;
1438 struct vmxnet3_rx_buf_info *bi;
1439
1440 for (i = 0; i < 2; i++) {
1441
1442 sz = rq->rx_ring[i].size * sizeof(struct Vmxnet3_RxDesc);
1443 rq->rx_ring[i].base = pci_alloc_consistent(adapter->pdev, sz,
1444 &rq->rx_ring[i].basePA);
1445 if (!rq->rx_ring[i].base) {
1446 printk(KERN_ERR "%s: failed to allocate rx ring %d\n",
1447 adapter->netdev->name, i);
1448 goto err;
1449 }
1450 }
1451
1452 sz = rq->comp_ring.size * sizeof(struct Vmxnet3_RxCompDesc);
1453 rq->comp_ring.base = pci_alloc_consistent(adapter->pdev, sz,
1454 &rq->comp_ring.basePA);
1455 if (!rq->comp_ring.base) {
1456 printk(KERN_ERR "%s: failed to allocate rx comp ring\n",
1457 adapter->netdev->name);
1458 goto err;
1459 }
1460
1461 sz = sizeof(struct vmxnet3_rx_buf_info) * (rq->rx_ring[0].size +
1462 rq->rx_ring[1].size);
476c609e 1463 bi = kzalloc(sz, GFP_KERNEL);
d1a890fa
SB
1464 if (!bi) {
1465 printk(KERN_ERR "%s: failed to allocate rx bufinfo\n",
1466 adapter->netdev->name);
1467 goto err;
1468 }
d1a890fa
SB
1469 rq->buf_info[0] = bi;
1470 rq->buf_info[1] = bi + rq->rx_ring[0].size;
1471
1472 return 0;
1473
1474err:
1475 vmxnet3_rq_destroy(rq, adapter);
1476 return -ENOMEM;
1477}
1478
1479
09c5088e
SB
1480static int
1481vmxnet3_rq_create_all(struct vmxnet3_adapter *adapter)
1482{
1483 int i, err = 0;
1484
1485 for (i = 0; i < adapter->num_rx_queues; i++) {
1486 err = vmxnet3_rq_create(&adapter->rx_queue[i], adapter);
1487 if (unlikely(err)) {
1488 dev_err(&adapter->netdev->dev,
1489 "%s: failed to create rx queue%i\n",
1490 adapter->netdev->name, i);
1491 goto err_out;
1492 }
1493 }
1494 return err;
1495err_out:
1496 vmxnet3_rq_destroy_all(adapter);
1497 return err;
1498
1499}
1500
1501/* Multiple queue aware polling function for tx and rx */
1502
d1a890fa
SB
1503static int
1504vmxnet3_do_poll(struct vmxnet3_adapter *adapter, int budget)
1505{
09c5088e 1506 int rcd_done = 0, i;
d1a890fa
SB
1507 if (unlikely(adapter->shared->ecr))
1508 vmxnet3_process_events(adapter);
09c5088e
SB
1509 for (i = 0; i < adapter->num_tx_queues; i++)
1510 vmxnet3_tq_tx_complete(&adapter->tx_queue[i], adapter);
d1a890fa 1511
09c5088e
SB
1512 for (i = 0; i < adapter->num_rx_queues; i++)
1513 rcd_done += vmxnet3_rq_rx_complete(&adapter->rx_queue[i],
1514 adapter, budget);
1515 return rcd_done;
d1a890fa
SB
1516}
1517
1518
1519static int
1520vmxnet3_poll(struct napi_struct *napi, int budget)
1521{
09c5088e
SB
1522 struct vmxnet3_rx_queue *rx_queue = container_of(napi,
1523 struct vmxnet3_rx_queue, napi);
1524 int rxd_done;
1525
1526 rxd_done = vmxnet3_do_poll(rx_queue->adapter, budget);
1527
1528 if (rxd_done < budget) {
1529 napi_complete(napi);
1530 vmxnet3_enable_all_intrs(rx_queue->adapter);
1531 }
1532 return rxd_done;
1533}
1534
1535/*
1536 * NAPI polling function for MSI-X mode with multiple Rx queues
1537 * Returns the # of the NAPI credit consumed (# of rx descriptors processed)
1538 */
1539
1540static int
1541vmxnet3_poll_rx_only(struct napi_struct *napi, int budget)
1542{
1543 struct vmxnet3_rx_queue *rq = container_of(napi,
1544 struct vmxnet3_rx_queue, napi);
1545 struct vmxnet3_adapter *adapter = rq->adapter;
d1a890fa
SB
1546 int rxd_done;
1547
09c5088e
SB
1548 /* When sharing interrupt with corresponding tx queue, process
1549 * tx completions in that queue as well
1550 */
1551 if (adapter->share_intr == VMXNET3_INTR_BUDDYSHARE) {
1552 struct vmxnet3_tx_queue *tq =
1553 &adapter->tx_queue[rq - adapter->rx_queue];
1554 vmxnet3_tq_tx_complete(tq, adapter);
1555 }
1556
1557 rxd_done = vmxnet3_rq_rx_complete(rq, adapter, budget);
d1a890fa
SB
1558
1559 if (rxd_done < budget) {
1560 napi_complete(napi);
09c5088e 1561 vmxnet3_enable_intr(adapter, rq->comp_ring.intr_idx);
d1a890fa
SB
1562 }
1563 return rxd_done;
1564}
1565
1566
09c5088e
SB
1567#ifdef CONFIG_PCI_MSI
1568
1569/*
1570 * Handle completion interrupts on tx queues
1571 * Returns whether or not the intr is handled
1572 */
1573
1574static irqreturn_t
1575vmxnet3_msix_tx(int irq, void *data)
1576{
1577 struct vmxnet3_tx_queue *tq = data;
1578 struct vmxnet3_adapter *adapter = tq->adapter;
1579
1580 if (adapter->intr.mask_mode == VMXNET3_IMM_ACTIVE)
1581 vmxnet3_disable_intr(adapter, tq->comp_ring.intr_idx);
1582
1583 /* Handle the case where only one irq is allocate for all tx queues */
1584 if (adapter->share_intr == VMXNET3_INTR_TXSHARE) {
1585 int i;
1586 for (i = 0; i < adapter->num_tx_queues; i++) {
1587 struct vmxnet3_tx_queue *txq = &adapter->tx_queue[i];
1588 vmxnet3_tq_tx_complete(txq, adapter);
1589 }
1590 } else {
1591 vmxnet3_tq_tx_complete(tq, adapter);
1592 }
1593 vmxnet3_enable_intr(adapter, tq->comp_ring.intr_idx);
1594
1595 return IRQ_HANDLED;
1596}
1597
1598
1599/*
1600 * Handle completion interrupts on rx queues. Returns whether or not the
1601 * intr is handled
1602 */
1603
1604static irqreturn_t
1605vmxnet3_msix_rx(int irq, void *data)
1606{
1607 struct vmxnet3_rx_queue *rq = data;
1608 struct vmxnet3_adapter *adapter = rq->adapter;
1609
1610 /* disable intr if needed */
1611 if (adapter->intr.mask_mode == VMXNET3_IMM_ACTIVE)
1612 vmxnet3_disable_intr(adapter, rq->comp_ring.intr_idx);
1613 napi_schedule(&rq->napi);
1614
1615 return IRQ_HANDLED;
1616}
1617
1618/*
1619 *----------------------------------------------------------------------------
1620 *
1621 * vmxnet3_msix_event --
1622 *
1623 * vmxnet3 msix event intr handler
1624 *
1625 * Result:
1626 * whether or not the intr is handled
1627 *
1628 *----------------------------------------------------------------------------
1629 */
1630
1631static irqreturn_t
1632vmxnet3_msix_event(int irq, void *data)
1633{
1634 struct net_device *dev = data;
1635 struct vmxnet3_adapter *adapter = netdev_priv(dev);
1636
1637 /* disable intr if needed */
1638 if (adapter->intr.mask_mode == VMXNET3_IMM_ACTIVE)
1639 vmxnet3_disable_intr(adapter, adapter->intr.event_intr_idx);
1640
1641 if (adapter->shared->ecr)
1642 vmxnet3_process_events(adapter);
1643
1644 vmxnet3_enable_intr(adapter, adapter->intr.event_intr_idx);
1645
1646 return IRQ_HANDLED;
1647}
1648
1649#endif /* CONFIG_PCI_MSI */
1650
1651
d1a890fa
SB
1652/* Interrupt handler for vmxnet3 */
1653static irqreturn_t
1654vmxnet3_intr(int irq, void *dev_id)
1655{
1656 struct net_device *dev = dev_id;
1657 struct vmxnet3_adapter *adapter = netdev_priv(dev);
1658
09c5088e 1659 if (adapter->intr.type == VMXNET3_IT_INTX) {
d1a890fa
SB
1660 u32 icr = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_ICR);
1661 if (unlikely(icr == 0))
1662 /* not ours */
1663 return IRQ_NONE;
1664 }
1665
1666
1667 /* disable intr if needed */
1668 if (adapter->intr.mask_mode == VMXNET3_IMM_ACTIVE)
09c5088e 1669 vmxnet3_disable_all_intrs(adapter);
d1a890fa 1670
09c5088e 1671 napi_schedule(&adapter->rx_queue[0].napi);
d1a890fa
SB
1672
1673 return IRQ_HANDLED;
1674}
1675
1676#ifdef CONFIG_NET_POLL_CONTROLLER
1677
d1a890fa
SB
1678/* netpoll callback. */
1679static void
1680vmxnet3_netpoll(struct net_device *netdev)
1681{
1682 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
d1a890fa 1683
09c5088e
SB
1684 if (adapter->intr.mask_mode == VMXNET3_IMM_ACTIVE)
1685 vmxnet3_disable_all_intrs(adapter);
1686
1687 vmxnet3_do_poll(adapter, adapter->rx_queue[0].rx_ring[0].size);
1688 vmxnet3_enable_all_intrs(adapter);
d1a890fa 1689
d1a890fa 1690}
09c5088e 1691#endif /* CONFIG_NET_POLL_CONTROLLER */
d1a890fa
SB
1692
1693static int
1694vmxnet3_request_irqs(struct vmxnet3_adapter *adapter)
1695{
09c5088e
SB
1696 struct vmxnet3_intr *intr = &adapter->intr;
1697 int err = 0, i;
1698 int vector = 0;
d1a890fa 1699
8f7e524c 1700#ifdef CONFIG_PCI_MSI
d1a890fa 1701 if (adapter->intr.type == VMXNET3_IT_MSIX) {
09c5088e
SB
1702 for (i = 0; i < adapter->num_tx_queues; i++) {
1703 if (adapter->share_intr != VMXNET3_INTR_BUDDYSHARE) {
1704 sprintf(adapter->tx_queue[i].name, "%s-tx-%d",
1705 adapter->netdev->name, vector);
1706 err = request_irq(
1707 intr->msix_entries[vector].vector,
1708 vmxnet3_msix_tx, 0,
1709 adapter->tx_queue[i].name,
1710 &adapter->tx_queue[i]);
1711 } else {
1712 sprintf(adapter->tx_queue[i].name, "%s-rxtx-%d",
1713 adapter->netdev->name, vector);
1714 }
1715 if (err) {
1716 dev_err(&adapter->netdev->dev,
1717 "Failed to request irq for MSIX, %s, "
1718 "error %d\n",
1719 adapter->tx_queue[i].name, err);
1720 return err;
1721 }
1722
1723 /* Handle the case where only 1 MSIx was allocated for
1724 * all tx queues */
1725 if (adapter->share_intr == VMXNET3_INTR_TXSHARE) {
1726 for (; i < adapter->num_tx_queues; i++)
1727 adapter->tx_queue[i].comp_ring.intr_idx
1728 = vector;
1729 vector++;
1730 break;
1731 } else {
1732 adapter->tx_queue[i].comp_ring.intr_idx
1733 = vector++;
1734 }
1735 }
1736 if (adapter->share_intr == VMXNET3_INTR_BUDDYSHARE)
1737 vector = 0;
1738
1739 for (i = 0; i < adapter->num_rx_queues; i++) {
1740 if (adapter->share_intr != VMXNET3_INTR_BUDDYSHARE)
1741 sprintf(adapter->rx_queue[i].name, "%s-rx-%d",
1742 adapter->netdev->name, vector);
1743 else
1744 sprintf(adapter->rx_queue[i].name, "%s-rxtx-%d",
1745 adapter->netdev->name, vector);
1746 err = request_irq(intr->msix_entries[vector].vector,
1747 vmxnet3_msix_rx, 0,
1748 adapter->rx_queue[i].name,
1749 &(adapter->rx_queue[i]));
1750 if (err) {
1751 printk(KERN_ERR "Failed to request irq for MSIX"
1752 ", %s, error %d\n",
1753 adapter->rx_queue[i].name, err);
1754 return err;
1755 }
1756
1757 adapter->rx_queue[i].comp_ring.intr_idx = vector++;
1758 }
1759
1760 sprintf(intr->event_msi_vector_name, "%s-event-%d",
1761 adapter->netdev->name, vector);
1762 err = request_irq(intr->msix_entries[vector].vector,
1763 vmxnet3_msix_event, 0,
1764 intr->event_msi_vector_name, adapter->netdev);
1765 intr->event_intr_idx = vector;
1766
1767 } else if (intr->type == VMXNET3_IT_MSI) {
1768 adapter->num_rx_queues = 1;
d1a890fa
SB
1769 err = request_irq(adapter->pdev->irq, vmxnet3_intr, 0,
1770 adapter->netdev->name, adapter->netdev);
09c5088e 1771 } else {
115924b6 1772#endif
09c5088e 1773 adapter->num_rx_queues = 1;
d1a890fa
SB
1774 err = request_irq(adapter->pdev->irq, vmxnet3_intr,
1775 IRQF_SHARED, adapter->netdev->name,
1776 adapter->netdev);
09c5088e 1777#ifdef CONFIG_PCI_MSI
d1a890fa 1778 }
09c5088e
SB
1779#endif
1780 intr->num_intrs = vector + 1;
1781 if (err) {
d1a890fa 1782 printk(KERN_ERR "Failed to request irq %s (intr type:%d), error"
09c5088e
SB
1783 ":%d\n", adapter->netdev->name, intr->type, err);
1784 } else {
1785 /* Number of rx queues will not change after this */
1786 for (i = 0; i < adapter->num_rx_queues; i++) {
1787 struct vmxnet3_rx_queue *rq = &adapter->rx_queue[i];
1788 rq->qid = i;
1789 rq->qid2 = i + adapter->num_rx_queues;
1790 }
d1a890fa
SB
1791
1792
d1a890fa 1793
09c5088e
SB
1794 /* init our intr settings */
1795 for (i = 0; i < intr->num_intrs; i++)
1796 intr->mod_levels[i] = UPT1_IML_ADAPTIVE;
1797 if (adapter->intr.type != VMXNET3_IT_MSIX) {
1798 adapter->intr.event_intr_idx = 0;
1799 for (i = 0; i < adapter->num_tx_queues; i++)
1800 adapter->tx_queue[i].comp_ring.intr_idx = 0;
1801 adapter->rx_queue[0].comp_ring.intr_idx = 0;
1802 }
d1a890fa
SB
1803
1804 printk(KERN_INFO "%s: intr type %u, mode %u, %u vectors "
09c5088e
SB
1805 "allocated\n", adapter->netdev->name, intr->type,
1806 intr->mask_mode, intr->num_intrs);
d1a890fa
SB
1807 }
1808
1809 return err;
1810}
1811
1812
1813static void
1814vmxnet3_free_irqs(struct vmxnet3_adapter *adapter)
1815{
09c5088e
SB
1816 struct vmxnet3_intr *intr = &adapter->intr;
1817 BUG_ON(intr->type == VMXNET3_IT_AUTO || intr->num_intrs <= 0);
d1a890fa 1818
09c5088e 1819 switch (intr->type) {
8f7e524c 1820#ifdef CONFIG_PCI_MSI
d1a890fa
SB
1821 case VMXNET3_IT_MSIX:
1822 {
09c5088e 1823 int i, vector = 0;
d1a890fa 1824
09c5088e
SB
1825 if (adapter->share_intr != VMXNET3_INTR_BUDDYSHARE) {
1826 for (i = 0; i < adapter->num_tx_queues; i++) {
1827 free_irq(intr->msix_entries[vector++].vector,
1828 &(adapter->tx_queue[i]));
1829 if (adapter->share_intr == VMXNET3_INTR_TXSHARE)
1830 break;
1831 }
1832 }
1833
1834 for (i = 0; i < adapter->num_rx_queues; i++) {
1835 free_irq(intr->msix_entries[vector++].vector,
1836 &(adapter->rx_queue[i]));
1837 }
1838
1839 free_irq(intr->msix_entries[vector].vector,
1840 adapter->netdev);
1841 BUG_ON(vector >= intr->num_intrs);
d1a890fa
SB
1842 break;
1843 }
8f7e524c 1844#endif
d1a890fa
SB
1845 case VMXNET3_IT_MSI:
1846 free_irq(adapter->pdev->irq, adapter->netdev);
1847 break;
1848 case VMXNET3_IT_INTX:
1849 free_irq(adapter->pdev->irq, adapter->netdev);
1850 break;
1851 default:
1852 BUG_ON(true);
1853 }
1854}
1855
d1a890fa
SB
1856static void
1857vmxnet3_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp)
1858{
1859 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
1860 struct Vmxnet3_DriverShared *shared = adapter->shared;
1861 u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable;
1862
1863 if (grp) {
1864 /* add vlan rx stripping. */
1865 if (adapter->netdev->features & NETIF_F_HW_VLAN_RX) {
1866 int i;
1867 struct Vmxnet3_DSDevRead *devRead = &shared->devRead;
1868 adapter->vlan_grp = grp;
1869
1870 /* update FEATURES to device */
3843e515 1871 devRead->misc.uptFeatures |= UPT1_F_RXVLAN;
d1a890fa
SB
1872 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
1873 VMXNET3_CMD_UPDATE_FEATURE);
1874 /*
1875 * Clear entire vfTable; then enable untagged pkts.
1876 * Note: setting one entry in vfTable to non-zero turns
1877 * on VLAN rx filtering.
1878 */
1879 for (i = 0; i < VMXNET3_VFT_SIZE; i++)
1880 vfTable[i] = 0;
1881
1882 VMXNET3_SET_VFTABLE_ENTRY(vfTable, 0);
1883 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
1884 VMXNET3_CMD_UPDATE_VLAN_FILTERS);
1885 } else {
1886 printk(KERN_ERR "%s: vlan_rx_register when device has "
1887 "no NETIF_F_HW_VLAN_RX\n", netdev->name);
1888 }
1889 } else {
1890 /* remove vlan rx stripping. */
1891 struct Vmxnet3_DSDevRead *devRead = &shared->devRead;
1892 adapter->vlan_grp = NULL;
1893
3843e515 1894 if (devRead->misc.uptFeatures & UPT1_F_RXVLAN) {
d1a890fa
SB
1895 int i;
1896
1897 for (i = 0; i < VMXNET3_VFT_SIZE; i++) {
1898 /* clear entire vfTable; this also disables
1899 * VLAN rx filtering
1900 */
1901 vfTable[i] = 0;
1902 }
1903 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
1904 VMXNET3_CMD_UPDATE_VLAN_FILTERS);
1905
1906 /* update FEATURES to device */
3843e515 1907 devRead->misc.uptFeatures &= ~UPT1_F_RXVLAN;
d1a890fa
SB
1908 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
1909 VMXNET3_CMD_UPDATE_FEATURE);
1910 }
1911 }
1912}
1913
1914
1915static void
1916vmxnet3_restore_vlan(struct vmxnet3_adapter *adapter)
1917{
1918 if (adapter->vlan_grp) {
1919 u16 vid;
1920 u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable;
1921 bool activeVlan = false;
1922
b738127d 1923 for (vid = 0; vid < VLAN_N_VID; vid++) {
d1a890fa
SB
1924 if (vlan_group_get_device(adapter->vlan_grp, vid)) {
1925 VMXNET3_SET_VFTABLE_ENTRY(vfTable, vid);
1926 activeVlan = true;
1927 }
1928 }
1929 if (activeVlan) {
1930 /* continue to allow untagged pkts */
1931 VMXNET3_SET_VFTABLE_ENTRY(vfTable, 0);
1932 }
1933 }
1934}
1935
1936
1937static void
1938vmxnet3_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
1939{
1940 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
1941 u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable;
1942
1943 VMXNET3_SET_VFTABLE_ENTRY(vfTable, vid);
1944 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
1945 VMXNET3_CMD_UPDATE_VLAN_FILTERS);
1946}
1947
1948
1949static void
1950vmxnet3_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
1951{
1952 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
1953 u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable;
1954
1955 VMXNET3_CLEAR_VFTABLE_ENTRY(vfTable, vid);
1956 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
1957 VMXNET3_CMD_UPDATE_VLAN_FILTERS);
1958}
1959
1960
1961static u8 *
1962vmxnet3_copy_mc(struct net_device *netdev)
1963{
1964 u8 *buf = NULL;
4cd24eaf 1965 u32 sz = netdev_mc_count(netdev) * ETH_ALEN;
d1a890fa
SB
1966
1967 /* struct Vmxnet3_RxFilterConf.mfTableLen is u16. */
1968 if (sz <= 0xffff) {
1969 /* We may be called with BH disabled */
1970 buf = kmalloc(sz, GFP_ATOMIC);
1971 if (buf) {
22bedad3 1972 struct netdev_hw_addr *ha;
567ec874 1973 int i = 0;
d1a890fa 1974
22bedad3
JP
1975 netdev_for_each_mc_addr(ha, netdev)
1976 memcpy(buf + i++ * ETH_ALEN, ha->addr,
d1a890fa 1977 ETH_ALEN);
d1a890fa
SB
1978 }
1979 }
1980 return buf;
1981}
1982
1983
1984static void
1985vmxnet3_set_mc(struct net_device *netdev)
1986{
1987 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
1988 struct Vmxnet3_RxFilterConf *rxConf =
1989 &adapter->shared->devRead.rxFilterConf;
1990 u8 *new_table = NULL;
1991 u32 new_mode = VMXNET3_RXM_UCAST;
1992
1993 if (netdev->flags & IFF_PROMISC)
1994 new_mode |= VMXNET3_RXM_PROMISC;
1995
1996 if (netdev->flags & IFF_BROADCAST)
1997 new_mode |= VMXNET3_RXM_BCAST;
1998
1999 if (netdev->flags & IFF_ALLMULTI)
2000 new_mode |= VMXNET3_RXM_ALL_MULTI;
2001 else
4cd24eaf 2002 if (!netdev_mc_empty(netdev)) {
d1a890fa
SB
2003 new_table = vmxnet3_copy_mc(netdev);
2004 if (new_table) {
2005 new_mode |= VMXNET3_RXM_MCAST;
115924b6 2006 rxConf->mfTableLen = cpu_to_le16(
4cd24eaf 2007 netdev_mc_count(netdev) * ETH_ALEN);
115924b6
SB
2008 rxConf->mfTablePA = cpu_to_le64(virt_to_phys(
2009 new_table));
d1a890fa
SB
2010 } else {
2011 printk(KERN_INFO "%s: failed to copy mcast list"
2012 ", setting ALL_MULTI\n", netdev->name);
2013 new_mode |= VMXNET3_RXM_ALL_MULTI;
2014 }
2015 }
2016
2017
2018 if (!(new_mode & VMXNET3_RXM_MCAST)) {
2019 rxConf->mfTableLen = 0;
2020 rxConf->mfTablePA = 0;
2021 }
2022
2023 if (new_mode != rxConf->rxMode) {
115924b6 2024 rxConf->rxMode = cpu_to_le32(new_mode);
d1a890fa
SB
2025 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
2026 VMXNET3_CMD_UPDATE_RX_MODE);
2027 }
2028
2029 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
2030 VMXNET3_CMD_UPDATE_MAC_FILTERS);
2031
2032 kfree(new_table);
2033}
2034
09c5088e
SB
2035void
2036vmxnet3_rq_destroy_all(struct vmxnet3_adapter *adapter)
2037{
2038 int i;
2039
2040 for (i = 0; i < adapter->num_rx_queues; i++)
2041 vmxnet3_rq_destroy(&adapter->rx_queue[i], adapter);
2042}
2043
d1a890fa
SB
2044
2045/*
2046 * Set up driver_shared based on settings in adapter.
2047 */
2048
2049static void
2050vmxnet3_setup_driver_shared(struct vmxnet3_adapter *adapter)
2051{
2052 struct Vmxnet3_DriverShared *shared = adapter->shared;
2053 struct Vmxnet3_DSDevRead *devRead = &shared->devRead;
2054 struct Vmxnet3_TxQueueConf *tqc;
2055 struct Vmxnet3_RxQueueConf *rqc;
2056 int i;
2057
2058 memset(shared, 0, sizeof(*shared));
2059
2060 /* driver settings */
115924b6
SB
2061 shared->magic = cpu_to_le32(VMXNET3_REV1_MAGIC);
2062 devRead->misc.driverInfo.version = cpu_to_le32(
2063 VMXNET3_DRIVER_VERSION_NUM);
d1a890fa
SB
2064 devRead->misc.driverInfo.gos.gosBits = (sizeof(void *) == 4 ?
2065 VMXNET3_GOS_BITS_32 : VMXNET3_GOS_BITS_64);
2066 devRead->misc.driverInfo.gos.gosType = VMXNET3_GOS_TYPE_LINUX;
115924b6
SB
2067 *((u32 *)&devRead->misc.driverInfo.gos) = cpu_to_le32(
2068 *((u32 *)&devRead->misc.driverInfo.gos));
2069 devRead->misc.driverInfo.vmxnet3RevSpt = cpu_to_le32(1);
2070 devRead->misc.driverInfo.uptVerSpt = cpu_to_le32(1);
d1a890fa 2071
115924b6
SB
2072 devRead->misc.ddPA = cpu_to_le64(virt_to_phys(adapter));
2073 devRead->misc.ddLen = cpu_to_le32(sizeof(struct vmxnet3_adapter));
d1a890fa
SB
2074
2075 /* set up feature flags */
2076 if (adapter->rxcsum)
3843e515 2077 devRead->misc.uptFeatures |= UPT1_F_RXCSUM;
d1a890fa
SB
2078
2079 if (adapter->lro) {
3843e515 2080 devRead->misc.uptFeatures |= UPT1_F_LRO;
115924b6 2081 devRead->misc.maxNumRxSG = cpu_to_le16(1 + MAX_SKB_FRAGS);
d1a890fa 2082 }
8e95a202
JP
2083 if ((adapter->netdev->features & NETIF_F_HW_VLAN_RX) &&
2084 adapter->vlan_grp) {
3843e515 2085 devRead->misc.uptFeatures |= UPT1_F_RXVLAN;
d1a890fa
SB
2086 }
2087
115924b6
SB
2088 devRead->misc.mtu = cpu_to_le32(adapter->netdev->mtu);
2089 devRead->misc.queueDescPA = cpu_to_le64(adapter->queue_desc_pa);
2090 devRead->misc.queueDescLen = cpu_to_le32(
09c5088e
SB
2091 adapter->num_tx_queues * sizeof(struct Vmxnet3_TxQueueDesc) +
2092 adapter->num_rx_queues * sizeof(struct Vmxnet3_RxQueueDesc));
d1a890fa
SB
2093
2094 /* tx queue settings */
09c5088e
SB
2095 devRead->misc.numTxQueues = adapter->num_tx_queues;
2096 for (i = 0; i < adapter->num_tx_queues; i++) {
2097 struct vmxnet3_tx_queue *tq = &adapter->tx_queue[i];
2098 BUG_ON(adapter->tx_queue[i].tx_ring.base == NULL);
2099 tqc = &adapter->tqd_start[i].conf;
2100 tqc->txRingBasePA = cpu_to_le64(tq->tx_ring.basePA);
2101 tqc->dataRingBasePA = cpu_to_le64(tq->data_ring.basePA);
2102 tqc->compRingBasePA = cpu_to_le64(tq->comp_ring.basePA);
2103 tqc->ddPA = cpu_to_le64(virt_to_phys(tq->buf_info));
2104 tqc->txRingSize = cpu_to_le32(tq->tx_ring.size);
2105 tqc->dataRingSize = cpu_to_le32(tq->data_ring.size);
2106 tqc->compRingSize = cpu_to_le32(tq->comp_ring.size);
2107 tqc->ddLen = cpu_to_le32(
2108 sizeof(struct vmxnet3_tx_buf_info) *
2109 tqc->txRingSize);
2110 tqc->intrIdx = tq->comp_ring.intr_idx;
2111 }
d1a890fa
SB
2112
2113 /* rx queue settings */
09c5088e
SB
2114 devRead->misc.numRxQueues = adapter->num_rx_queues;
2115 for (i = 0; i < adapter->num_rx_queues; i++) {
2116 struct vmxnet3_rx_queue *rq = &adapter->rx_queue[i];
2117 rqc = &adapter->rqd_start[i].conf;
2118 rqc->rxRingBasePA[0] = cpu_to_le64(rq->rx_ring[0].basePA);
2119 rqc->rxRingBasePA[1] = cpu_to_le64(rq->rx_ring[1].basePA);
2120 rqc->compRingBasePA = cpu_to_le64(rq->comp_ring.basePA);
2121 rqc->ddPA = cpu_to_le64(virt_to_phys(
2122 rq->buf_info));
2123 rqc->rxRingSize[0] = cpu_to_le32(rq->rx_ring[0].size);
2124 rqc->rxRingSize[1] = cpu_to_le32(rq->rx_ring[1].size);
2125 rqc->compRingSize = cpu_to_le32(rq->comp_ring.size);
2126 rqc->ddLen = cpu_to_le32(
2127 sizeof(struct vmxnet3_rx_buf_info) *
2128 (rqc->rxRingSize[0] +
2129 rqc->rxRingSize[1]));
2130 rqc->intrIdx = rq->comp_ring.intr_idx;
2131 }
2132
2133#ifdef VMXNET3_RSS
2134 memset(adapter->rss_conf, 0, sizeof(*adapter->rss_conf));
2135
2136 if (adapter->rss) {
2137 struct UPT1_RSSConf *rssConf = adapter->rss_conf;
2138 devRead->misc.uptFeatures |= UPT1_F_RSS;
2139 devRead->misc.numRxQueues = adapter->num_rx_queues;
2140 rssConf->hashType = UPT1_RSS_HASH_TYPE_TCP_IPV4 |
2141 UPT1_RSS_HASH_TYPE_IPV4 |
2142 UPT1_RSS_HASH_TYPE_TCP_IPV6 |
2143 UPT1_RSS_HASH_TYPE_IPV6;
2144 rssConf->hashFunc = UPT1_RSS_HASH_FUNC_TOEPLITZ;
2145 rssConf->hashKeySize = UPT1_RSS_MAX_KEY_SIZE;
2146 rssConf->indTableSize = VMXNET3_RSS_IND_TABLE_SIZE;
2147 get_random_bytes(&rssConf->hashKey[0], rssConf->hashKeySize);
2148 for (i = 0; i < rssConf->indTableSize; i++)
2149 rssConf->indTable[i] = i % adapter->num_rx_queues;
2150
2151 devRead->rssConfDesc.confVer = 1;
2152 devRead->rssConfDesc.confLen = sizeof(*rssConf);
2153 devRead->rssConfDesc.confPA = virt_to_phys(rssConf);
2154 }
2155
2156#endif /* VMXNET3_RSS */
d1a890fa
SB
2157
2158 /* intr settings */
2159 devRead->intrConf.autoMask = adapter->intr.mask_mode ==
2160 VMXNET3_IMM_AUTO;
2161 devRead->intrConf.numIntrs = adapter->intr.num_intrs;
2162 for (i = 0; i < adapter->intr.num_intrs; i++)
2163 devRead->intrConf.modLevels[i] = adapter->intr.mod_levels[i];
2164
2165 devRead->intrConf.eventIntrIdx = adapter->intr.event_intr_idx;
6929fe8a 2166 devRead->intrConf.intrCtrl |= cpu_to_le32(VMXNET3_IC_DISABLE_ALL);
d1a890fa
SB
2167
2168 /* rx filter settings */
2169 devRead->rxFilterConf.rxMode = 0;
2170 vmxnet3_restore_vlan(adapter);
2171 /* the rest are already zeroed */
2172}
2173
2174
2175int
2176vmxnet3_activate_dev(struct vmxnet3_adapter *adapter)
2177{
09c5088e 2178 int err, i;
d1a890fa
SB
2179 u32 ret;
2180
09c5088e
SB
2181 dev_dbg(&adapter->netdev->dev, "%s: skb_buf_size %d, rx_buf_per_pkt %d,"
2182 " ring sizes %u %u %u\n", adapter->netdev->name,
2183 adapter->skb_buf_size, adapter->rx_buf_per_pkt,
2184 adapter->tx_queue[0].tx_ring.size,
2185 adapter->rx_queue[0].rx_ring[0].size,
2186 adapter->rx_queue[0].rx_ring[1].size);
2187
2188 vmxnet3_tq_init_all(adapter);
2189 err = vmxnet3_rq_init_all(adapter);
d1a890fa
SB
2190 if (err) {
2191 printk(KERN_ERR "Failed to init rx queue for %s: error %d\n",
2192 adapter->netdev->name, err);
2193 goto rq_err;
2194 }
2195
2196 err = vmxnet3_request_irqs(adapter);
2197 if (err) {
2198 printk(KERN_ERR "Failed to setup irq for %s: error %d\n",
2199 adapter->netdev->name, err);
2200 goto irq_err;
2201 }
2202
2203 vmxnet3_setup_driver_shared(adapter);
2204
115924b6
SB
2205 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DSAL, VMXNET3_GET_ADDR_LO(
2206 adapter->shared_pa));
2207 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DSAH, VMXNET3_GET_ADDR_HI(
2208 adapter->shared_pa));
d1a890fa
SB
2209 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
2210 VMXNET3_CMD_ACTIVATE_DEV);
2211 ret = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
2212
2213 if (ret != 0) {
2214 printk(KERN_ERR "Failed to activate dev %s: error %u\n",
2215 adapter->netdev->name, ret);
2216 err = -EINVAL;
2217 goto activate_err;
2218 }
09c5088e
SB
2219
2220 for (i = 0; i < adapter->num_rx_queues; i++) {
2221 VMXNET3_WRITE_BAR0_REG(adapter,
2222 VMXNET3_REG_RXPROD + i * VMXNET3_REG_ALIGN,
2223 adapter->rx_queue[i].rx_ring[0].next2fill);
2224 VMXNET3_WRITE_BAR0_REG(adapter, (VMXNET3_REG_RXPROD2 +
2225 (i * VMXNET3_REG_ALIGN)),
2226 adapter->rx_queue[i].rx_ring[1].next2fill);
2227 }
d1a890fa
SB
2228
2229 /* Apply the rx filter settins last. */
2230 vmxnet3_set_mc(adapter->netdev);
2231
2232 /*
2233 * Check link state when first activating device. It will start the
2234 * tx queue if the link is up.
2235 */
4a1745fc 2236 vmxnet3_check_link(adapter, true);
09c5088e
SB
2237 for (i = 0; i < adapter->num_rx_queues; i++)
2238 napi_enable(&adapter->rx_queue[i].napi);
d1a890fa
SB
2239 vmxnet3_enable_all_intrs(adapter);
2240 clear_bit(VMXNET3_STATE_BIT_QUIESCED, &adapter->state);
2241 return 0;
2242
2243activate_err:
2244 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DSAL, 0);
2245 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DSAH, 0);
2246 vmxnet3_free_irqs(adapter);
2247irq_err:
2248rq_err:
2249 /* free up buffers we allocated */
09c5088e 2250 vmxnet3_rq_cleanup_all(adapter);
d1a890fa
SB
2251 return err;
2252}
2253
2254
2255void
2256vmxnet3_reset_dev(struct vmxnet3_adapter *adapter)
2257{
2258 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_RESET_DEV);
2259}
2260
2261
2262int
2263vmxnet3_quiesce_dev(struct vmxnet3_adapter *adapter)
2264{
09c5088e 2265 int i;
d1a890fa
SB
2266 if (test_and_set_bit(VMXNET3_STATE_BIT_QUIESCED, &adapter->state))
2267 return 0;
2268
2269
2270 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
2271 VMXNET3_CMD_QUIESCE_DEV);
2272 vmxnet3_disable_all_intrs(adapter);
2273
09c5088e
SB
2274 for (i = 0; i < adapter->num_rx_queues; i++)
2275 napi_disable(&adapter->rx_queue[i].napi);
d1a890fa
SB
2276 netif_tx_disable(adapter->netdev);
2277 adapter->link_speed = 0;
2278 netif_carrier_off(adapter->netdev);
2279
09c5088e
SB
2280 vmxnet3_tq_cleanup_all(adapter);
2281 vmxnet3_rq_cleanup_all(adapter);
d1a890fa
SB
2282 vmxnet3_free_irqs(adapter);
2283 return 0;
2284}
2285
2286
2287static void
2288vmxnet3_write_mac_addr(struct vmxnet3_adapter *adapter, u8 *mac)
2289{
2290 u32 tmp;
2291
2292 tmp = *(u32 *)mac;
2293 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_MACL, tmp);
2294
2295 tmp = (mac[5] << 8) | mac[4];
2296 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_MACH, tmp);
2297}
2298
2299
2300static int
2301vmxnet3_set_mac_addr(struct net_device *netdev, void *p)
2302{
2303 struct sockaddr *addr = p;
2304 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
2305
2306 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
2307 vmxnet3_write_mac_addr(adapter, addr->sa_data);
2308
2309 return 0;
2310}
2311
2312
2313/* ==================== initialization and cleanup routines ============ */
2314
2315static int
2316vmxnet3_alloc_pci_resources(struct vmxnet3_adapter *adapter, bool *dma64)
2317{
2318 int err;
2319 unsigned long mmio_start, mmio_len;
2320 struct pci_dev *pdev = adapter->pdev;
2321
2322 err = pci_enable_device(pdev);
2323 if (err) {
2324 printk(KERN_ERR "Failed to enable adapter %s: error %d\n",
2325 pci_name(pdev), err);
2326 return err;
2327 }
2328
2329 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) == 0) {
2330 if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)) != 0) {
2331 printk(KERN_ERR "pci_set_consistent_dma_mask failed "
2332 "for adapter %s\n", pci_name(pdev));
2333 err = -EIO;
2334 goto err_set_mask;
2335 }
2336 *dma64 = true;
2337 } else {
2338 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) {
2339 printk(KERN_ERR "pci_set_dma_mask failed for adapter "
2340 "%s\n", pci_name(pdev));
2341 err = -EIO;
2342 goto err_set_mask;
2343 }
2344 *dma64 = false;
2345 }
2346
2347 err = pci_request_selected_regions(pdev, (1 << 2) - 1,
2348 vmxnet3_driver_name);
2349 if (err) {
2350 printk(KERN_ERR "Failed to request region for adapter %s: "
2351 "error %d\n", pci_name(pdev), err);
2352 goto err_set_mask;
2353 }
2354
2355 pci_set_master(pdev);
2356
2357 mmio_start = pci_resource_start(pdev, 0);
2358 mmio_len = pci_resource_len(pdev, 0);
2359 adapter->hw_addr0 = ioremap(mmio_start, mmio_len);
2360 if (!adapter->hw_addr0) {
2361 printk(KERN_ERR "Failed to map bar0 for adapter %s\n",
2362 pci_name(pdev));
2363 err = -EIO;
2364 goto err_ioremap;
2365 }
2366
2367 mmio_start = pci_resource_start(pdev, 1);
2368 mmio_len = pci_resource_len(pdev, 1);
2369 adapter->hw_addr1 = ioremap(mmio_start, mmio_len);
2370 if (!adapter->hw_addr1) {
2371 printk(KERN_ERR "Failed to map bar1 for adapter %s\n",
2372 pci_name(pdev));
2373 err = -EIO;
2374 goto err_bar1;
2375 }
2376 return 0;
2377
2378err_bar1:
2379 iounmap(adapter->hw_addr0);
2380err_ioremap:
2381 pci_release_selected_regions(pdev, (1 << 2) - 1);
2382err_set_mask:
2383 pci_disable_device(pdev);
2384 return err;
2385}
2386
2387
2388static void
2389vmxnet3_free_pci_resources(struct vmxnet3_adapter *adapter)
2390{
2391 BUG_ON(!adapter->pdev);
2392
2393 iounmap(adapter->hw_addr0);
2394 iounmap(adapter->hw_addr1);
2395 pci_release_selected_regions(adapter->pdev, (1 << 2) - 1);
2396 pci_disable_device(adapter->pdev);
2397}
2398
2399
2400static void
2401vmxnet3_adjust_rx_ring_size(struct vmxnet3_adapter *adapter)
2402{
09c5088e
SB
2403 size_t sz, i, ring0_size, ring1_size, comp_size;
2404 struct vmxnet3_rx_queue *rq = &adapter->rx_queue[0];
2405
d1a890fa
SB
2406
2407 if (adapter->netdev->mtu <= VMXNET3_MAX_SKB_BUF_SIZE -
2408 VMXNET3_MAX_ETH_HDR_SIZE) {
2409 adapter->skb_buf_size = adapter->netdev->mtu +
2410 VMXNET3_MAX_ETH_HDR_SIZE;
2411 if (adapter->skb_buf_size < VMXNET3_MIN_T0_BUF_SIZE)
2412 adapter->skb_buf_size = VMXNET3_MIN_T0_BUF_SIZE;
2413
2414 adapter->rx_buf_per_pkt = 1;
2415 } else {
2416 adapter->skb_buf_size = VMXNET3_MAX_SKB_BUF_SIZE;
2417 sz = adapter->netdev->mtu - VMXNET3_MAX_SKB_BUF_SIZE +
2418 VMXNET3_MAX_ETH_HDR_SIZE;
2419 adapter->rx_buf_per_pkt = 1 + (sz + PAGE_SIZE - 1) / PAGE_SIZE;
2420 }
2421
2422 /*
2423 * for simplicity, force the ring0 size to be a multiple of
2424 * rx_buf_per_pkt * VMXNET3_RING_SIZE_ALIGN
2425 */
2426 sz = adapter->rx_buf_per_pkt * VMXNET3_RING_SIZE_ALIGN;
09c5088e
SB
2427 ring0_size = adapter->rx_queue[0].rx_ring[0].size;
2428 ring0_size = (ring0_size + sz - 1) / sz * sz;
2429 ring0_size = min_t(u32, rq->rx_ring[0].size, VMXNET3_RX_RING_MAX_SIZE /
2430 sz * sz);
2431 ring1_size = adapter->rx_queue[0].rx_ring[1].size;
2432 comp_size = ring0_size + ring1_size;
2433
2434 for (i = 0; i < adapter->num_rx_queues; i++) {
2435 rq = &adapter->rx_queue[i];
2436 rq->rx_ring[0].size = ring0_size;
2437 rq->rx_ring[1].size = ring1_size;
2438 rq->comp_ring.size = comp_size;
2439 }
d1a890fa
SB
2440}
2441
2442
2443int
2444vmxnet3_create_queues(struct vmxnet3_adapter *adapter, u32 tx_ring_size,
2445 u32 rx_ring_size, u32 rx_ring2_size)
2446{
09c5088e
SB
2447 int err = 0, i;
2448
2449 for (i = 0; i < adapter->num_tx_queues; i++) {
2450 struct vmxnet3_tx_queue *tq = &adapter->tx_queue[i];
2451 tq->tx_ring.size = tx_ring_size;
2452 tq->data_ring.size = tx_ring_size;
2453 tq->comp_ring.size = tx_ring_size;
2454 tq->shared = &adapter->tqd_start[i].ctrl;
2455 tq->stopped = true;
2456 tq->adapter = adapter;
2457 tq->qid = i;
2458 err = vmxnet3_tq_create(tq, adapter);
2459 /*
2460 * Too late to change num_tx_queues. We cannot do away with
2461 * lesser number of queues than what we asked for
2462 */
2463 if (err)
2464 goto queue_err;
2465 }
d1a890fa 2466
09c5088e
SB
2467 adapter->rx_queue[0].rx_ring[0].size = rx_ring_size;
2468 adapter->rx_queue[0].rx_ring[1].size = rx_ring2_size;
d1a890fa 2469 vmxnet3_adjust_rx_ring_size(adapter);
09c5088e
SB
2470 for (i = 0; i < adapter->num_rx_queues; i++) {
2471 struct vmxnet3_rx_queue *rq = &adapter->rx_queue[i];
2472 /* qid and qid2 for rx queues will be assigned later when num
2473 * of rx queues is finalized after allocating intrs */
2474 rq->shared = &adapter->rqd_start[i].ctrl;
2475 rq->adapter = adapter;
2476 err = vmxnet3_rq_create(rq, adapter);
2477 if (err) {
2478 if (i == 0) {
2479 printk(KERN_ERR "Could not allocate any rx"
2480 "queues. Aborting.\n");
2481 goto queue_err;
2482 } else {
2483 printk(KERN_INFO "Number of rx queues changed "
2484 "to : %d.\n", i);
2485 adapter->num_rx_queues = i;
2486 err = 0;
2487 break;
2488 }
2489 }
2490 }
2491 return err;
2492queue_err:
2493 vmxnet3_tq_destroy_all(adapter);
d1a890fa
SB
2494 return err;
2495}
2496
2497static int
2498vmxnet3_open(struct net_device *netdev)
2499{
2500 struct vmxnet3_adapter *adapter;
09c5088e 2501 int err, i;
d1a890fa
SB
2502
2503 adapter = netdev_priv(netdev);
2504
09c5088e
SB
2505 for (i = 0; i < adapter->num_tx_queues; i++)
2506 spin_lock_init(&adapter->tx_queue[i].tx_lock);
d1a890fa
SB
2507
2508 err = vmxnet3_create_queues(adapter, VMXNET3_DEF_TX_RING_SIZE,
2509 VMXNET3_DEF_RX_RING_SIZE,
2510 VMXNET3_DEF_RX_RING_SIZE);
2511 if (err)
2512 goto queue_err;
2513
2514 err = vmxnet3_activate_dev(adapter);
2515 if (err)
2516 goto activate_err;
2517
2518 return 0;
2519
2520activate_err:
09c5088e
SB
2521 vmxnet3_rq_destroy_all(adapter);
2522 vmxnet3_tq_destroy_all(adapter);
d1a890fa
SB
2523queue_err:
2524 return err;
2525}
2526
2527
2528static int
2529vmxnet3_close(struct net_device *netdev)
2530{
2531 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
2532
2533 /*
2534 * Reset_work may be in the middle of resetting the device, wait for its
2535 * completion.
2536 */
2537 while (test_and_set_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state))
2538 msleep(1);
2539
2540 vmxnet3_quiesce_dev(adapter);
2541
09c5088e
SB
2542 vmxnet3_rq_destroy_all(adapter);
2543 vmxnet3_tq_destroy_all(adapter);
d1a890fa
SB
2544
2545 clear_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state);
2546
2547
2548 return 0;
2549}
2550
2551
2552void
2553vmxnet3_force_close(struct vmxnet3_adapter *adapter)
2554{
09c5088e
SB
2555 int i;
2556
d1a890fa
SB
2557 /*
2558 * we must clear VMXNET3_STATE_BIT_RESETTING, otherwise
2559 * vmxnet3_close() will deadlock.
2560 */
2561 BUG_ON(test_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state));
2562
2563 /* we need to enable NAPI, otherwise dev_close will deadlock */
09c5088e
SB
2564 for (i = 0; i < adapter->num_rx_queues; i++)
2565 napi_enable(&adapter->rx_queue[i].napi);
d1a890fa
SB
2566 dev_close(adapter->netdev);
2567}
2568
2569
2570static int
2571vmxnet3_change_mtu(struct net_device *netdev, int new_mtu)
2572{
2573 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
2574 int err = 0;
2575
2576 if (new_mtu < VMXNET3_MIN_MTU || new_mtu > VMXNET3_MAX_MTU)
2577 return -EINVAL;
2578
2579 if (new_mtu > 1500 && !adapter->jumbo_frame)
2580 return -EINVAL;
2581
2582 netdev->mtu = new_mtu;
2583
2584 /*
2585 * Reset_work may be in the middle of resetting the device, wait for its
2586 * completion.
2587 */
2588 while (test_and_set_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state))
2589 msleep(1);
2590
2591 if (netif_running(netdev)) {
2592 vmxnet3_quiesce_dev(adapter);
2593 vmxnet3_reset_dev(adapter);
2594
2595 /* we need to re-create the rx queue based on the new mtu */
09c5088e 2596 vmxnet3_rq_destroy_all(adapter);
d1a890fa 2597 vmxnet3_adjust_rx_ring_size(adapter);
09c5088e 2598 err = vmxnet3_rq_create_all(adapter);
d1a890fa 2599 if (err) {
09c5088e 2600 printk(KERN_ERR "%s: failed to re-create rx queues,"
d1a890fa
SB
2601 " error %d. Closing it.\n", netdev->name, err);
2602 goto out;
2603 }
2604
2605 err = vmxnet3_activate_dev(adapter);
2606 if (err) {
2607 printk(KERN_ERR "%s: failed to re-activate, error %d. "
2608 "Closing it\n", netdev->name, err);
2609 goto out;
2610 }
2611 }
2612
2613out:
2614 clear_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state);
2615 if (err)
2616 vmxnet3_force_close(adapter);
2617
2618 return err;
2619}
2620
2621
2622static void
2623vmxnet3_declare_features(struct vmxnet3_adapter *adapter, bool dma64)
2624{
2625 struct net_device *netdev = adapter->netdev;
2626
2627 netdev->features = NETIF_F_SG |
2628 NETIF_F_HW_CSUM |
2629 NETIF_F_HW_VLAN_TX |
2630 NETIF_F_HW_VLAN_RX |
2631 NETIF_F_HW_VLAN_FILTER |
2632 NETIF_F_TSO |
2633 NETIF_F_TSO6 |
2634 NETIF_F_LRO;
2635
2636 printk(KERN_INFO "features: sg csum vlan jf tso tsoIPv6 lro");
2637
2638 adapter->rxcsum = true;
2639 adapter->jumbo_frame = true;
2640 adapter->lro = true;
2641
2642 if (dma64) {
2643 netdev->features |= NETIF_F_HIGHDMA;
2644 printk(" highDMA");
2645 }
2646
2647 netdev->vlan_features = netdev->features;
2648 printk("\n");
2649}
2650
2651
2652static void
2653vmxnet3_read_mac_addr(struct vmxnet3_adapter *adapter, u8 *mac)
2654{
2655 u32 tmp;
2656
2657 tmp = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_MACL);
2658 *(u32 *)mac = tmp;
2659
2660 tmp = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_MACH);
2661 mac[4] = tmp & 0xff;
2662 mac[5] = (tmp >> 8) & 0xff;
2663}
2664
09c5088e
SB
2665#ifdef CONFIG_PCI_MSI
2666
2667/*
2668 * Enable MSIx vectors.
2669 * Returns :
2670 * 0 on successful enabling of required vectors,
2671 * VMXNET3_LINUX_MIN_MSIX_VECT when only minumum number of vectors required
2672 * could be enabled.
2673 * number of vectors which can be enabled otherwise (this number is smaller
2674 * than VMXNET3_LINUX_MIN_MSIX_VECT)
2675 */
2676
2677static int
2678vmxnet3_acquire_msix_vectors(struct vmxnet3_adapter *adapter,
2679 int vectors)
2680{
2681 int err = 0, vector_threshold;
2682 vector_threshold = VMXNET3_LINUX_MIN_MSIX_VECT;
2683
2684 while (vectors >= vector_threshold) {
2685 err = pci_enable_msix(adapter->pdev, adapter->intr.msix_entries,
2686 vectors);
2687 if (!err) {
2688 adapter->intr.num_intrs = vectors;
2689 return 0;
2690 } else if (err < 0) {
2691 printk(KERN_ERR "Failed to enable MSI-X for %s, error"
2692 " %d\n", adapter->netdev->name, err);
2693 vectors = 0;
2694 } else if (err < vector_threshold) {
2695 break;
2696 } else {
2697 /* If fails to enable required number of MSI-x vectors
2698 * try enabling 3 of them. One each for rx, tx and event
2699 */
2700 vectors = vector_threshold;
2701 printk(KERN_ERR "Failed to enable %d MSI-X for %s, try"
2702 " %d instead\n", vectors, adapter->netdev->name,
2703 vector_threshold);
2704 }
2705 }
2706
2707 printk(KERN_INFO "Number of MSI-X interrupts which can be allocatedi"
2708 " are lower than min threshold required.\n");
2709 return err;
2710}
2711
2712
2713#endif /* CONFIG_PCI_MSI */
d1a890fa
SB
2714
2715static void
2716vmxnet3_alloc_intr_resources(struct vmxnet3_adapter *adapter)
2717{
2718 u32 cfg;
2719
2720 /* intr settings */
2721 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
2722 VMXNET3_CMD_GET_CONF_INTR);
2723 cfg = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
2724 adapter->intr.type = cfg & 0x3;
2725 adapter->intr.mask_mode = (cfg >> 2) & 0x3;
2726
2727 if (adapter->intr.type == VMXNET3_IT_AUTO) {
0bdc0d70
SB
2728 adapter->intr.type = VMXNET3_IT_MSIX;
2729 }
d1a890fa 2730
8f7e524c 2731#ifdef CONFIG_PCI_MSI
0bdc0d70 2732 if (adapter->intr.type == VMXNET3_IT_MSIX) {
09c5088e
SB
2733 int vector, err = 0;
2734
2735 adapter->intr.num_intrs = (adapter->share_intr ==
2736 VMXNET3_INTR_TXSHARE) ? 1 :
2737 adapter->num_tx_queues;
2738 adapter->intr.num_intrs += (adapter->share_intr ==
2739 VMXNET3_INTR_BUDDYSHARE) ? 0 :
2740 adapter->num_rx_queues;
2741 adapter->intr.num_intrs += 1; /* for link event */
2742
2743 adapter->intr.num_intrs = (adapter->intr.num_intrs >
2744 VMXNET3_LINUX_MIN_MSIX_VECT
2745 ? adapter->intr.num_intrs :
2746 VMXNET3_LINUX_MIN_MSIX_VECT);
2747
2748 for (vector = 0; vector < adapter->intr.num_intrs; vector++)
2749 adapter->intr.msix_entries[vector].entry = vector;
2750
2751 err = vmxnet3_acquire_msix_vectors(adapter,
2752 adapter->intr.num_intrs);
2753 /* If we cannot allocate one MSIx vector per queue
2754 * then limit the number of rx queues to 1
2755 */
2756 if (err == VMXNET3_LINUX_MIN_MSIX_VECT) {
2757 if (adapter->share_intr != VMXNET3_INTR_BUDDYSHARE
2758 || adapter->num_rx_queues != 2) {
2759 adapter->share_intr = VMXNET3_INTR_TXSHARE;
2760 printk(KERN_ERR "Number of rx queues : 1\n");
2761 adapter->num_rx_queues = 1;
2762 adapter->intr.num_intrs =
2763 VMXNET3_LINUX_MIN_MSIX_VECT;
2764 }
d1a890fa
SB
2765 return;
2766 }
09c5088e
SB
2767 if (!err)
2768 return;
2769
2770 /* If we cannot allocate MSIx vectors use only one rx queue */
2771 printk(KERN_INFO "Failed to enable MSI-X for %s, error %d."
2772 "#rx queues : 1, try MSI\n", adapter->netdev->name, err);
2773
0bdc0d70
SB
2774 adapter->intr.type = VMXNET3_IT_MSI;
2775 }
d1a890fa 2776
0bdc0d70
SB
2777 if (adapter->intr.type == VMXNET3_IT_MSI) {
2778 int err;
d1a890fa
SB
2779 err = pci_enable_msi(adapter->pdev);
2780 if (!err) {
09c5088e 2781 adapter->num_rx_queues = 1;
d1a890fa 2782 adapter->intr.num_intrs = 1;
d1a890fa
SB
2783 return;
2784 }
2785 }
0bdc0d70 2786#endif /* CONFIG_PCI_MSI */
d1a890fa 2787
09c5088e
SB
2788 adapter->num_rx_queues = 1;
2789 printk(KERN_INFO "Using INTx interrupt, #Rx queues: 1.\n");
d1a890fa
SB
2790 adapter->intr.type = VMXNET3_IT_INTX;
2791
2792 /* INT-X related setting */
2793 adapter->intr.num_intrs = 1;
2794}
2795
2796
2797static void
2798vmxnet3_free_intr_resources(struct vmxnet3_adapter *adapter)
2799{
2800 if (adapter->intr.type == VMXNET3_IT_MSIX)
2801 pci_disable_msix(adapter->pdev);
2802 else if (adapter->intr.type == VMXNET3_IT_MSI)
2803 pci_disable_msi(adapter->pdev);
2804 else
2805 BUG_ON(adapter->intr.type != VMXNET3_IT_INTX);
2806}
2807
2808
2809static void
2810vmxnet3_tx_timeout(struct net_device *netdev)
2811{
2812 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
2813 adapter->tx_timeout_count++;
2814
2815 printk(KERN_ERR "%s: tx hang\n", adapter->netdev->name);
2816 schedule_work(&adapter->work);
09c5088e 2817 netif_wake_queue(adapter->netdev);
d1a890fa
SB
2818}
2819
2820
2821static void
2822vmxnet3_reset_work(struct work_struct *data)
2823{
2824 struct vmxnet3_adapter *adapter;
2825
2826 adapter = container_of(data, struct vmxnet3_adapter, work);
2827
2828 /* if another thread is resetting the device, no need to proceed */
2829 if (test_and_set_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state))
2830 return;
2831
2832 /* if the device is closed, we must leave it alone */
d9a5f210 2833 rtnl_lock();
d1a890fa
SB
2834 if (netif_running(adapter->netdev)) {
2835 printk(KERN_INFO "%s: resetting\n", adapter->netdev->name);
2836 vmxnet3_quiesce_dev(adapter);
2837 vmxnet3_reset_dev(adapter);
2838 vmxnet3_activate_dev(adapter);
2839 } else {
2840 printk(KERN_INFO "%s: already closed\n", adapter->netdev->name);
2841 }
d9a5f210 2842 rtnl_unlock();
d1a890fa
SB
2843
2844 clear_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state);
2845}
2846
2847
2848static int __devinit
2849vmxnet3_probe_device(struct pci_dev *pdev,
2850 const struct pci_device_id *id)
2851{
2852 static const struct net_device_ops vmxnet3_netdev_ops = {
2853 .ndo_open = vmxnet3_open,
2854 .ndo_stop = vmxnet3_close,
2855 .ndo_start_xmit = vmxnet3_xmit_frame,
2856 .ndo_set_mac_address = vmxnet3_set_mac_addr,
2857 .ndo_change_mtu = vmxnet3_change_mtu,
2858 .ndo_get_stats = vmxnet3_get_stats,
2859 .ndo_tx_timeout = vmxnet3_tx_timeout,
2860 .ndo_set_multicast_list = vmxnet3_set_mc,
2861 .ndo_vlan_rx_register = vmxnet3_vlan_rx_register,
2862 .ndo_vlan_rx_add_vid = vmxnet3_vlan_rx_add_vid,
2863 .ndo_vlan_rx_kill_vid = vmxnet3_vlan_rx_kill_vid,
2864#ifdef CONFIG_NET_POLL_CONTROLLER
2865 .ndo_poll_controller = vmxnet3_netpoll,
2866#endif
2867 };
2868 int err;
2869 bool dma64 = false; /* stupid gcc */
2870 u32 ver;
2871 struct net_device *netdev;
2872 struct vmxnet3_adapter *adapter;
2873 u8 mac[ETH_ALEN];
09c5088e
SB
2874 int size;
2875 int num_tx_queues;
2876 int num_rx_queues;
2877
2878#ifdef VMXNET3_RSS
2879 if (enable_mq)
2880 num_rx_queues = min(VMXNET3_DEVICE_MAX_RX_QUEUES,
2881 (int)num_online_cpus());
2882 else
2883#endif
2884 num_rx_queues = 1;
2885
2886 if (enable_mq)
2887 num_tx_queues = min(VMXNET3_DEVICE_MAX_TX_QUEUES,
2888 (int)num_online_cpus());
2889 else
2890 num_tx_queues = 1;
2891
2892 netdev = alloc_etherdev_mq(sizeof(struct vmxnet3_adapter),
2893 max(num_tx_queues, num_rx_queues));
2894 printk(KERN_INFO "# of Tx queues : %d, # of Rx queues : %d\n",
2895 num_tx_queues, num_rx_queues);
d1a890fa 2896
d1a890fa
SB
2897 if (!netdev) {
2898 printk(KERN_ERR "Failed to alloc ethernet device for adapter "
2899 "%s\n", pci_name(pdev));
2900 return -ENOMEM;
2901 }
2902
2903 pci_set_drvdata(pdev, netdev);
2904 adapter = netdev_priv(netdev);
2905 adapter->netdev = netdev;
2906 adapter->pdev = pdev;
2907
2908 adapter->shared = pci_alloc_consistent(adapter->pdev,
2909 sizeof(struct Vmxnet3_DriverShared),
2910 &adapter->shared_pa);
2911 if (!adapter->shared) {
2912 printk(KERN_ERR "Failed to allocate memory for %s\n",
2913 pci_name(pdev));
2914 err = -ENOMEM;
2915 goto err_alloc_shared;
2916 }
2917
09c5088e
SB
2918 adapter->num_rx_queues = num_rx_queues;
2919 adapter->num_tx_queues = num_tx_queues;
2920
2921 size = sizeof(struct Vmxnet3_TxQueueDesc) * adapter->num_tx_queues;
2922 size += sizeof(struct Vmxnet3_RxQueueDesc) * adapter->num_rx_queues;
2923 adapter->tqd_start = pci_alloc_consistent(adapter->pdev, size,
d1a890fa
SB
2924 &adapter->queue_desc_pa);
2925
2926 if (!adapter->tqd_start) {
2927 printk(KERN_ERR "Failed to allocate memory for %s\n",
2928 pci_name(pdev));
2929 err = -ENOMEM;
2930 goto err_alloc_queue_desc;
2931 }
09c5088e
SB
2932 adapter->rqd_start = (struct Vmxnet3_RxQueueDesc *)(adapter->tqd_start +
2933 adapter->num_tx_queues);
d1a890fa
SB
2934
2935 adapter->pm_conf = kmalloc(sizeof(struct Vmxnet3_PMConf), GFP_KERNEL);
2936 if (adapter->pm_conf == NULL) {
2937 printk(KERN_ERR "Failed to allocate memory for %s\n",
2938 pci_name(pdev));
2939 err = -ENOMEM;
2940 goto err_alloc_pm;
2941 }
2942
09c5088e
SB
2943#ifdef VMXNET3_RSS
2944
2945 adapter->rss_conf = kmalloc(sizeof(struct UPT1_RSSConf), GFP_KERNEL);
2946 if (adapter->rss_conf == NULL) {
2947 printk(KERN_ERR "Failed to allocate memory for %s\n",
2948 pci_name(pdev));
2949 err = -ENOMEM;
2950 goto err_alloc_rss;
2951 }
2952#endif /* VMXNET3_RSS */
2953
d1a890fa
SB
2954 err = vmxnet3_alloc_pci_resources(adapter, &dma64);
2955 if (err < 0)
2956 goto err_alloc_pci;
2957
2958 ver = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_VRRS);
2959 if (ver & 1) {
2960 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_VRRS, 1);
2961 } else {
2962 printk(KERN_ERR "Incompatible h/w version (0x%x) for adapter"
2963 " %s\n", ver, pci_name(pdev));
2964 err = -EBUSY;
2965 goto err_ver;
2966 }
2967
2968 ver = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_UVRS);
2969 if (ver & 1) {
2970 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_UVRS, 1);
2971 } else {
2972 printk(KERN_ERR "Incompatible upt version (0x%x) for "
2973 "adapter %s\n", ver, pci_name(pdev));
2974 err = -EBUSY;
2975 goto err_ver;
2976 }
2977
2978 vmxnet3_declare_features(adapter, dma64);
2979
2980 adapter->dev_number = atomic_read(&devices_found);
09c5088e
SB
2981
2982 adapter->share_intr = irq_share_mode;
2983 if (adapter->share_intr == VMXNET3_INTR_BUDDYSHARE &&
2984 adapter->num_tx_queues != adapter->num_rx_queues)
2985 adapter->share_intr = VMXNET3_INTR_DONTSHARE;
2986
d1a890fa
SB
2987 vmxnet3_alloc_intr_resources(adapter);
2988
09c5088e
SB
2989#ifdef VMXNET3_RSS
2990 if (adapter->num_rx_queues > 1 &&
2991 adapter->intr.type == VMXNET3_IT_MSIX) {
2992 adapter->rss = true;
2993 printk(KERN_INFO "RSS is enabled.\n");
2994 } else {
2995 adapter->rss = false;
2996 }
2997#endif
2998
d1a890fa
SB
2999 vmxnet3_read_mac_addr(adapter, mac);
3000 memcpy(netdev->dev_addr, mac, netdev->addr_len);
3001
3002 netdev->netdev_ops = &vmxnet3_netdev_ops;
d1a890fa 3003 vmxnet3_set_ethtool_ops(netdev);
09c5088e 3004 netdev->watchdog_timeo = 5 * HZ;
d1a890fa
SB
3005
3006 INIT_WORK(&adapter->work, vmxnet3_reset_work);
3007
09c5088e
SB
3008 if (adapter->intr.type == VMXNET3_IT_MSIX) {
3009 int i;
3010 for (i = 0; i < adapter->num_rx_queues; i++) {
3011 netif_napi_add(adapter->netdev,
3012 &adapter->rx_queue[i].napi,
3013 vmxnet3_poll_rx_only, 64);
3014 }
3015 } else {
3016 netif_napi_add(adapter->netdev, &adapter->rx_queue[0].napi,
3017 vmxnet3_poll, 64);
3018 }
3019
3020 netif_set_real_num_tx_queues(adapter->netdev, adapter->num_tx_queues);
3021 netif_set_real_num_rx_queues(adapter->netdev, adapter->num_rx_queues);
3022
d1a890fa
SB
3023 SET_NETDEV_DEV(netdev, &pdev->dev);
3024 err = register_netdev(netdev);
3025
3026 if (err) {
3027 printk(KERN_ERR "Failed to register adapter %s\n",
3028 pci_name(pdev));
3029 goto err_register;
3030 }
3031
3032 set_bit(VMXNET3_STATE_BIT_QUIESCED, &adapter->state);
4a1745fc 3033 vmxnet3_check_link(adapter, false);
d1a890fa
SB
3034 atomic_inc(&devices_found);
3035 return 0;
3036
3037err_register:
3038 vmxnet3_free_intr_resources(adapter);
3039err_ver:
3040 vmxnet3_free_pci_resources(adapter);
3041err_alloc_pci:
09c5088e
SB
3042#ifdef VMXNET3_RSS
3043 kfree(adapter->rss_conf);
3044err_alloc_rss:
3045#endif
d1a890fa
SB
3046 kfree(adapter->pm_conf);
3047err_alloc_pm:
09c5088e
SB
3048 pci_free_consistent(adapter->pdev, size, adapter->tqd_start,
3049 adapter->queue_desc_pa);
d1a890fa
SB
3050err_alloc_queue_desc:
3051 pci_free_consistent(adapter->pdev, sizeof(struct Vmxnet3_DriverShared),
3052 adapter->shared, adapter->shared_pa);
3053err_alloc_shared:
3054 pci_set_drvdata(pdev, NULL);
3055 free_netdev(netdev);
3056 return err;
3057}
3058
3059
3060static void __devexit
3061vmxnet3_remove_device(struct pci_dev *pdev)
3062{
3063 struct net_device *netdev = pci_get_drvdata(pdev);
3064 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
09c5088e
SB
3065 int size = 0;
3066 int num_rx_queues;
3067
3068#ifdef VMXNET3_RSS
3069 if (enable_mq)
3070 num_rx_queues = min(VMXNET3_DEVICE_MAX_RX_QUEUES,
3071 (int)num_online_cpus());
3072 else
3073#endif
3074 num_rx_queues = 1;
d1a890fa 3075
23f333a2 3076 cancel_work_sync(&adapter->work);
d1a890fa
SB
3077
3078 unregister_netdev(netdev);
3079
3080 vmxnet3_free_intr_resources(adapter);
3081 vmxnet3_free_pci_resources(adapter);
09c5088e
SB
3082#ifdef VMXNET3_RSS
3083 kfree(adapter->rss_conf);
3084#endif
d1a890fa 3085 kfree(adapter->pm_conf);
09c5088e
SB
3086
3087 size = sizeof(struct Vmxnet3_TxQueueDesc) * adapter->num_tx_queues;
3088 size += sizeof(struct Vmxnet3_RxQueueDesc) * num_rx_queues;
3089 pci_free_consistent(adapter->pdev, size, adapter->tqd_start,
3090 adapter->queue_desc_pa);
d1a890fa
SB
3091 pci_free_consistent(adapter->pdev, sizeof(struct Vmxnet3_DriverShared),
3092 adapter->shared, adapter->shared_pa);
3093 free_netdev(netdev);
3094}
3095
3096
3097#ifdef CONFIG_PM
3098
3099static int
3100vmxnet3_suspend(struct device *device)
3101{
3102 struct pci_dev *pdev = to_pci_dev(device);
3103 struct net_device *netdev = pci_get_drvdata(pdev);
3104 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
3105 struct Vmxnet3_PMConf *pmConf;
3106 struct ethhdr *ehdr;
3107 struct arphdr *ahdr;
3108 u8 *arpreq;
3109 struct in_device *in_dev;
3110 struct in_ifaddr *ifa;
3111 int i = 0;
3112
3113 if (!netif_running(netdev))
3114 return 0;
3115
3116 vmxnet3_disable_all_intrs(adapter);
3117 vmxnet3_free_irqs(adapter);
3118 vmxnet3_free_intr_resources(adapter);
3119
3120 netif_device_detach(netdev);
09c5088e 3121 netif_tx_stop_all_queues(netdev);
d1a890fa
SB
3122
3123 /* Create wake-up filters. */
3124 pmConf = adapter->pm_conf;
3125 memset(pmConf, 0, sizeof(*pmConf));
3126
3127 if (adapter->wol & WAKE_UCAST) {
3128 pmConf->filters[i].patternSize = ETH_ALEN;
3129 pmConf->filters[i].maskSize = 1;
3130 memcpy(pmConf->filters[i].pattern, netdev->dev_addr, ETH_ALEN);
3131 pmConf->filters[i].mask[0] = 0x3F; /* LSB ETH_ALEN bits */
3132
3843e515 3133 pmConf->wakeUpEvents |= VMXNET3_PM_WAKEUP_FILTER;
d1a890fa
SB
3134 i++;
3135 }
3136
3137 if (adapter->wol & WAKE_ARP) {
3138 in_dev = in_dev_get(netdev);
3139 if (!in_dev)
3140 goto skip_arp;
3141
3142 ifa = (struct in_ifaddr *)in_dev->ifa_list;
3143 if (!ifa)
3144 goto skip_arp;
3145
3146 pmConf->filters[i].patternSize = ETH_HLEN + /* Ethernet header*/
3147 sizeof(struct arphdr) + /* ARP header */
3148 2 * ETH_ALEN + /* 2 Ethernet addresses*/
3149 2 * sizeof(u32); /*2 IPv4 addresses */
3150 pmConf->filters[i].maskSize =
3151 (pmConf->filters[i].patternSize - 1) / 8 + 1;
3152
3153 /* ETH_P_ARP in Ethernet header. */
3154 ehdr = (struct ethhdr *)pmConf->filters[i].pattern;
3155 ehdr->h_proto = htons(ETH_P_ARP);
3156
3157 /* ARPOP_REQUEST in ARP header. */
3158 ahdr = (struct arphdr *)&pmConf->filters[i].pattern[ETH_HLEN];
3159 ahdr->ar_op = htons(ARPOP_REQUEST);
3160 arpreq = (u8 *)(ahdr + 1);
3161
3162 /* The Unicast IPv4 address in 'tip' field. */
3163 arpreq += 2 * ETH_ALEN + sizeof(u32);
3164 *(u32 *)arpreq = ifa->ifa_address;
3165
3166 /* The mask for the relevant bits. */
3167 pmConf->filters[i].mask[0] = 0x00;
3168 pmConf->filters[i].mask[1] = 0x30; /* ETH_P_ARP */
3169 pmConf->filters[i].mask[2] = 0x30; /* ARPOP_REQUEST */
3170 pmConf->filters[i].mask[3] = 0x00;
3171 pmConf->filters[i].mask[4] = 0xC0; /* IPv4 TIP */
3172 pmConf->filters[i].mask[5] = 0x03; /* IPv4 TIP */
3173 in_dev_put(in_dev);
3174
3843e515 3175 pmConf->wakeUpEvents |= VMXNET3_PM_WAKEUP_FILTER;
d1a890fa
SB
3176 i++;
3177 }
3178
3179skip_arp:
3180 if (adapter->wol & WAKE_MAGIC)
3843e515 3181 pmConf->wakeUpEvents |= VMXNET3_PM_WAKEUP_MAGIC;
d1a890fa
SB
3182
3183 pmConf->numFilters = i;
3184
115924b6
SB
3185 adapter->shared->devRead.pmConfDesc.confVer = cpu_to_le32(1);
3186 adapter->shared->devRead.pmConfDesc.confLen = cpu_to_le32(sizeof(
3187 *pmConf));
3188 adapter->shared->devRead.pmConfDesc.confPA = cpu_to_le64(virt_to_phys(
3189 pmConf));
d1a890fa
SB
3190
3191 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
3192 VMXNET3_CMD_UPDATE_PMCFG);
3193
3194 pci_save_state(pdev);
3195 pci_enable_wake(pdev, pci_choose_state(pdev, PMSG_SUSPEND),
3196 adapter->wol);
3197 pci_disable_device(pdev);
3198 pci_set_power_state(pdev, pci_choose_state(pdev, PMSG_SUSPEND));
3199
3200 return 0;
3201}
3202
3203
3204static int
3205vmxnet3_resume(struct device *device)
3206{
3207 int err;
3208 struct pci_dev *pdev = to_pci_dev(device);
3209 struct net_device *netdev = pci_get_drvdata(pdev);
3210 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
3211 struct Vmxnet3_PMConf *pmConf;
3212
3213 if (!netif_running(netdev))
3214 return 0;
3215
3216 /* Destroy wake-up filters. */
3217 pmConf = adapter->pm_conf;
3218 memset(pmConf, 0, sizeof(*pmConf));
3219
115924b6
SB
3220 adapter->shared->devRead.pmConfDesc.confVer = cpu_to_le32(1);
3221 adapter->shared->devRead.pmConfDesc.confLen = cpu_to_le32(sizeof(
3222 *pmConf));
0561cf3d 3223 adapter->shared->devRead.pmConfDesc.confPA = cpu_to_le64(virt_to_phys(
115924b6 3224 pmConf));
d1a890fa
SB
3225
3226 netif_device_attach(netdev);
3227 pci_set_power_state(pdev, PCI_D0);
3228 pci_restore_state(pdev);
3229 err = pci_enable_device_mem(pdev);
3230 if (err != 0)
3231 return err;
3232
3233 pci_enable_wake(pdev, PCI_D0, 0);
3234
3235 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
3236 VMXNET3_CMD_UPDATE_PMCFG);
3237 vmxnet3_alloc_intr_resources(adapter);
3238 vmxnet3_request_irqs(adapter);
3239 vmxnet3_enable_all_intrs(adapter);
3240
3241 return 0;
3242}
3243
47145210 3244static const struct dev_pm_ops vmxnet3_pm_ops = {
d1a890fa
SB
3245 .suspend = vmxnet3_suspend,
3246 .resume = vmxnet3_resume,
3247};
3248#endif
3249
3250static struct pci_driver vmxnet3_driver = {
3251 .name = vmxnet3_driver_name,
3252 .id_table = vmxnet3_pciid_table,
3253 .probe = vmxnet3_probe_device,
3254 .remove = __devexit_p(vmxnet3_remove_device),
3255#ifdef CONFIG_PM
3256 .driver.pm = &vmxnet3_pm_ops,
3257#endif
3258};
3259
3260
3261static int __init
3262vmxnet3_init_module(void)
3263{
3264 printk(KERN_INFO "%s - version %s\n", VMXNET3_DRIVER_DESC,
3265 VMXNET3_DRIVER_VERSION_REPORT);
3266 return pci_register_driver(&vmxnet3_driver);
3267}
3268
3269module_init(vmxnet3_init_module);
3270
3271
3272static void
3273vmxnet3_exit_module(void)
3274{
3275 pci_unregister_driver(&vmxnet3_driver);
3276}
3277
3278module_exit(vmxnet3_exit_module);
3279
3280MODULE_AUTHOR("VMware, Inc.");
3281MODULE_DESCRIPTION(VMXNET3_DRIVER_DESC);
3282MODULE_LICENSE("GPL v2");
3283MODULE_VERSION(VMXNET3_DRIVER_VERSION_STRING);
This page took 0.323198 seconds and 5 git commands to generate.