Merge branch 'core-rcu-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git...
[deliverable/linux.git] / drivers / net / qlge / qlge_main.c
1 /*
2 * QLogic qlge NIC HBA Driver
3 * Copyright (c) 2003-2008 QLogic Corporation
4 * See LICENSE.qlge for copyright and licensing details.
5 * Author: Linux qlge network device driver by
6 * Ron Mercer <ron.mercer@qlogic.com>
7 */
8 #include <linux/kernel.h>
9 #include <linux/init.h>
10 #include <linux/bitops.h>
11 #include <linux/types.h>
12 #include <linux/module.h>
13 #include <linux/list.h>
14 #include <linux/pci.h>
15 #include <linux/dma-mapping.h>
16 #include <linux/pagemap.h>
17 #include <linux/sched.h>
18 #include <linux/slab.h>
19 #include <linux/dmapool.h>
20 #include <linux/mempool.h>
21 #include <linux/spinlock.h>
22 #include <linux/kthread.h>
23 #include <linux/interrupt.h>
24 #include <linux/errno.h>
25 #include <linux/ioport.h>
26 #include <linux/in.h>
27 #include <linux/ip.h>
28 #include <linux/ipv6.h>
29 #include <net/ipv6.h>
30 #include <linux/tcp.h>
31 #include <linux/udp.h>
32 #include <linux/if_arp.h>
33 #include <linux/if_ether.h>
34 #include <linux/netdevice.h>
35 #include <linux/etherdevice.h>
36 #include <linux/ethtool.h>
37 #include <linux/if_vlan.h>
38 #include <linux/skbuff.h>
39 #include <linux/if_vlan.h>
40 #include <linux/delay.h>
41 #include <linux/mm.h>
42 #include <linux/vmalloc.h>
43 #include <linux/prefetch.h>
44 #include <net/ip6_checksum.h>
45
46 #include "qlge.h"
47
48 char qlge_driver_name[] = DRV_NAME;
49 const char qlge_driver_version[] = DRV_VERSION;
50
51 MODULE_AUTHOR("Ron Mercer <ron.mercer@qlogic.com>");
52 MODULE_DESCRIPTION(DRV_STRING " ");
53 MODULE_LICENSE("GPL");
54 MODULE_VERSION(DRV_VERSION);
55
56 static const u32 default_msg =
57 NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK |
58 /* NETIF_MSG_TIMER | */
59 NETIF_MSG_IFDOWN |
60 NETIF_MSG_IFUP |
61 NETIF_MSG_RX_ERR |
62 NETIF_MSG_TX_ERR |
63 /* NETIF_MSG_TX_QUEUED | */
64 /* NETIF_MSG_INTR | NETIF_MSG_TX_DONE | NETIF_MSG_RX_STATUS | */
65 /* NETIF_MSG_PKTDATA | */
66 NETIF_MSG_HW | NETIF_MSG_WOL | 0;
67
68 static int debug = -1; /* defaults above */
69 module_param(debug, int, 0664);
70 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
71
72 #define MSIX_IRQ 0
73 #define MSI_IRQ 1
74 #define LEG_IRQ 2
75 static int qlge_irq_type = MSIX_IRQ;
76 module_param(qlge_irq_type, int, 0664);
77 MODULE_PARM_DESC(qlge_irq_type, "0 = MSI-X, 1 = MSI, 2 = Legacy.");
78
79 static int qlge_mpi_coredump;
80 module_param(qlge_mpi_coredump, int, 0);
81 MODULE_PARM_DESC(qlge_mpi_coredump,
82 "Option to enable MPI firmware dump. "
83 "Default is OFF - Do Not allocate memory. ");
84
85 static int qlge_force_coredump;
86 module_param(qlge_force_coredump, int, 0);
87 MODULE_PARM_DESC(qlge_force_coredump,
88 "Option to allow force of firmware core dump. "
89 "Default is OFF - Do not allow.");
90
91 static DEFINE_PCI_DEVICE_TABLE(qlge_pci_tbl) = {
92 {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID_8012)},
93 {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID_8000)},
94 /* required last entry */
95 {0,}
96 };
97
98 MODULE_DEVICE_TABLE(pci, qlge_pci_tbl);
99
100 static int ql_wol(struct ql_adapter *qdev);
101 static void qlge_set_multicast_list(struct net_device *ndev);
102
103 /* This hardware semaphore causes exclusive access to
104 * resources shared between the NIC driver, MPI firmware,
105 * FCOE firmware and the FC driver.
106 */
107 static int ql_sem_trylock(struct ql_adapter *qdev, u32 sem_mask)
108 {
109 u32 sem_bits = 0;
110
111 switch (sem_mask) {
112 case SEM_XGMAC0_MASK:
113 sem_bits = SEM_SET << SEM_XGMAC0_SHIFT;
114 break;
115 case SEM_XGMAC1_MASK:
116 sem_bits = SEM_SET << SEM_XGMAC1_SHIFT;
117 break;
118 case SEM_ICB_MASK:
119 sem_bits = SEM_SET << SEM_ICB_SHIFT;
120 break;
121 case SEM_MAC_ADDR_MASK:
122 sem_bits = SEM_SET << SEM_MAC_ADDR_SHIFT;
123 break;
124 case SEM_FLASH_MASK:
125 sem_bits = SEM_SET << SEM_FLASH_SHIFT;
126 break;
127 case SEM_PROBE_MASK:
128 sem_bits = SEM_SET << SEM_PROBE_SHIFT;
129 break;
130 case SEM_RT_IDX_MASK:
131 sem_bits = SEM_SET << SEM_RT_IDX_SHIFT;
132 break;
133 case SEM_PROC_REG_MASK:
134 sem_bits = SEM_SET << SEM_PROC_REG_SHIFT;
135 break;
136 default:
137 netif_alert(qdev, probe, qdev->ndev, "bad Semaphore mask!.\n");
138 return -EINVAL;
139 }
140
141 ql_write32(qdev, SEM, sem_bits | sem_mask);
142 return !(ql_read32(qdev, SEM) & sem_bits);
143 }
144
145 int ql_sem_spinlock(struct ql_adapter *qdev, u32 sem_mask)
146 {
147 unsigned int wait_count = 30;
148 do {
149 if (!ql_sem_trylock(qdev, sem_mask))
150 return 0;
151 udelay(100);
152 } while (--wait_count);
153 return -ETIMEDOUT;
154 }
155
156 void ql_sem_unlock(struct ql_adapter *qdev, u32 sem_mask)
157 {
158 ql_write32(qdev, SEM, sem_mask);
159 ql_read32(qdev, SEM); /* flush */
160 }
161
162 /* This function waits for a specific bit to come ready
163 * in a given register. It is used mostly by the initialize
164 * process, but is also used in kernel thread API such as
165 * netdev->set_multi, netdev->set_mac_address, netdev->vlan_rx_add_vid.
166 */
167 int ql_wait_reg_rdy(struct ql_adapter *qdev, u32 reg, u32 bit, u32 err_bit)
168 {
169 u32 temp;
170 int count = UDELAY_COUNT;
171
172 while (count) {
173 temp = ql_read32(qdev, reg);
174
175 /* check for errors */
176 if (temp & err_bit) {
177 netif_alert(qdev, probe, qdev->ndev,
178 "register 0x%.08x access error, value = 0x%.08x!.\n",
179 reg, temp);
180 return -EIO;
181 } else if (temp & bit)
182 return 0;
183 udelay(UDELAY_DELAY);
184 count--;
185 }
186 netif_alert(qdev, probe, qdev->ndev,
187 "Timed out waiting for reg %x to come ready.\n", reg);
188 return -ETIMEDOUT;
189 }
190
191 /* The CFG register is used to download TX and RX control blocks
192 * to the chip. This function waits for an operation to complete.
193 */
194 static int ql_wait_cfg(struct ql_adapter *qdev, u32 bit)
195 {
196 int count = UDELAY_COUNT;
197 u32 temp;
198
199 while (count) {
200 temp = ql_read32(qdev, CFG);
201 if (temp & CFG_LE)
202 return -EIO;
203 if (!(temp & bit))
204 return 0;
205 udelay(UDELAY_DELAY);
206 count--;
207 }
208 return -ETIMEDOUT;
209 }
210
211
212 /* Used to issue init control blocks to hw. Maps control block,
213 * sets address, triggers download, waits for completion.
214 */
215 int ql_write_cfg(struct ql_adapter *qdev, void *ptr, int size, u32 bit,
216 u16 q_id)
217 {
218 u64 map;
219 int status = 0;
220 int direction;
221 u32 mask;
222 u32 value;
223
224 direction =
225 (bit & (CFG_LRQ | CFG_LR | CFG_LCQ)) ? PCI_DMA_TODEVICE :
226 PCI_DMA_FROMDEVICE;
227
228 map = pci_map_single(qdev->pdev, ptr, size, direction);
229 if (pci_dma_mapping_error(qdev->pdev, map)) {
230 netif_err(qdev, ifup, qdev->ndev, "Couldn't map DMA area.\n");
231 return -ENOMEM;
232 }
233
234 status = ql_sem_spinlock(qdev, SEM_ICB_MASK);
235 if (status)
236 return status;
237
238 status = ql_wait_cfg(qdev, bit);
239 if (status) {
240 netif_err(qdev, ifup, qdev->ndev,
241 "Timed out waiting for CFG to come ready.\n");
242 goto exit;
243 }
244
245 ql_write32(qdev, ICB_L, (u32) map);
246 ql_write32(qdev, ICB_H, (u32) (map >> 32));
247
248 mask = CFG_Q_MASK | (bit << 16);
249 value = bit | (q_id << CFG_Q_SHIFT);
250 ql_write32(qdev, CFG, (mask | value));
251
252 /*
253 * Wait for the bit to clear after signaling hw.
254 */
255 status = ql_wait_cfg(qdev, bit);
256 exit:
257 ql_sem_unlock(qdev, SEM_ICB_MASK); /* does flush too */
258 pci_unmap_single(qdev->pdev, map, size, direction);
259 return status;
260 }
261
262 /* Get a specific MAC address from the CAM. Used for debug and reg dump. */
263 int ql_get_mac_addr_reg(struct ql_adapter *qdev, u32 type, u16 index,
264 u32 *value)
265 {
266 u32 offset = 0;
267 int status;
268
269 switch (type) {
270 case MAC_ADDR_TYPE_MULTI_MAC:
271 case MAC_ADDR_TYPE_CAM_MAC:
272 {
273 status =
274 ql_wait_reg_rdy(qdev,
275 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
276 if (status)
277 goto exit;
278 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
279 (index << MAC_ADDR_IDX_SHIFT) | /* index */
280 MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
281 status =
282 ql_wait_reg_rdy(qdev,
283 MAC_ADDR_IDX, MAC_ADDR_MR, 0);
284 if (status)
285 goto exit;
286 *value++ = ql_read32(qdev, MAC_ADDR_DATA);
287 status =
288 ql_wait_reg_rdy(qdev,
289 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
290 if (status)
291 goto exit;
292 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
293 (index << MAC_ADDR_IDX_SHIFT) | /* index */
294 MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
295 status =
296 ql_wait_reg_rdy(qdev,
297 MAC_ADDR_IDX, MAC_ADDR_MR, 0);
298 if (status)
299 goto exit;
300 *value++ = ql_read32(qdev, MAC_ADDR_DATA);
301 if (type == MAC_ADDR_TYPE_CAM_MAC) {
302 status =
303 ql_wait_reg_rdy(qdev,
304 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
305 if (status)
306 goto exit;
307 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
308 (index << MAC_ADDR_IDX_SHIFT) | /* index */
309 MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
310 status =
311 ql_wait_reg_rdy(qdev, MAC_ADDR_IDX,
312 MAC_ADDR_MR, 0);
313 if (status)
314 goto exit;
315 *value++ = ql_read32(qdev, MAC_ADDR_DATA);
316 }
317 break;
318 }
319 case MAC_ADDR_TYPE_VLAN:
320 case MAC_ADDR_TYPE_MULTI_FLTR:
321 default:
322 netif_crit(qdev, ifup, qdev->ndev,
323 "Address type %d not yet supported.\n", type);
324 status = -EPERM;
325 }
326 exit:
327 return status;
328 }
329
330 /* Set up a MAC, multicast or VLAN address for the
331 * inbound frame matching.
332 */
333 static int ql_set_mac_addr_reg(struct ql_adapter *qdev, u8 *addr, u32 type,
334 u16 index)
335 {
336 u32 offset = 0;
337 int status = 0;
338
339 switch (type) {
340 case MAC_ADDR_TYPE_MULTI_MAC:
341 {
342 u32 upper = (addr[0] << 8) | addr[1];
343 u32 lower = (addr[2] << 24) | (addr[3] << 16) |
344 (addr[4] << 8) | (addr[5]);
345
346 status =
347 ql_wait_reg_rdy(qdev,
348 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
349 if (status)
350 goto exit;
351 ql_write32(qdev, MAC_ADDR_IDX, (offset++) |
352 (index << MAC_ADDR_IDX_SHIFT) |
353 type | MAC_ADDR_E);
354 ql_write32(qdev, MAC_ADDR_DATA, lower);
355 status =
356 ql_wait_reg_rdy(qdev,
357 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
358 if (status)
359 goto exit;
360 ql_write32(qdev, MAC_ADDR_IDX, (offset++) |
361 (index << MAC_ADDR_IDX_SHIFT) |
362 type | MAC_ADDR_E);
363
364 ql_write32(qdev, MAC_ADDR_DATA, upper);
365 status =
366 ql_wait_reg_rdy(qdev,
367 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
368 if (status)
369 goto exit;
370 break;
371 }
372 case MAC_ADDR_TYPE_CAM_MAC:
373 {
374 u32 cam_output;
375 u32 upper = (addr[0] << 8) | addr[1];
376 u32 lower =
377 (addr[2] << 24) | (addr[3] << 16) | (addr[4] << 8) |
378 (addr[5]);
379
380 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
381 "Adding %s address %pM at index %d in the CAM.\n",
382 type == MAC_ADDR_TYPE_MULTI_MAC ?
383 "MULTICAST" : "UNICAST",
384 addr, index);
385
386 status =
387 ql_wait_reg_rdy(qdev,
388 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
389 if (status)
390 goto exit;
391 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
392 (index << MAC_ADDR_IDX_SHIFT) | /* index */
393 type); /* type */
394 ql_write32(qdev, MAC_ADDR_DATA, lower);
395 status =
396 ql_wait_reg_rdy(qdev,
397 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
398 if (status)
399 goto exit;
400 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
401 (index << MAC_ADDR_IDX_SHIFT) | /* index */
402 type); /* type */
403 ql_write32(qdev, MAC_ADDR_DATA, upper);
404 status =
405 ql_wait_reg_rdy(qdev,
406 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
407 if (status)
408 goto exit;
409 ql_write32(qdev, MAC_ADDR_IDX, (offset) | /* offset */
410 (index << MAC_ADDR_IDX_SHIFT) | /* index */
411 type); /* type */
412 /* This field should also include the queue id
413 and possibly the function id. Right now we hardcode
414 the route field to NIC core.
415 */
416 cam_output = (CAM_OUT_ROUTE_NIC |
417 (qdev->
418 func << CAM_OUT_FUNC_SHIFT) |
419 (0 << CAM_OUT_CQ_ID_SHIFT));
420 if (qdev->ndev->features & NETIF_F_HW_VLAN_RX)
421 cam_output |= CAM_OUT_RV;
422 /* route to NIC core */
423 ql_write32(qdev, MAC_ADDR_DATA, cam_output);
424 break;
425 }
426 case MAC_ADDR_TYPE_VLAN:
427 {
428 u32 enable_bit = *((u32 *) &addr[0]);
429 /* For VLAN, the addr actually holds a bit that
430 * either enables or disables the vlan id we are
431 * addressing. It's either MAC_ADDR_E on or off.
432 * That's bit-27 we're talking about.
433 */
434 netif_info(qdev, ifup, qdev->ndev,
435 "%s VLAN ID %d %s the CAM.\n",
436 enable_bit ? "Adding" : "Removing",
437 index,
438 enable_bit ? "to" : "from");
439
440 status =
441 ql_wait_reg_rdy(qdev,
442 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
443 if (status)
444 goto exit;
445 ql_write32(qdev, MAC_ADDR_IDX, offset | /* offset */
446 (index << MAC_ADDR_IDX_SHIFT) | /* index */
447 type | /* type */
448 enable_bit); /* enable/disable */
449 break;
450 }
451 case MAC_ADDR_TYPE_MULTI_FLTR:
452 default:
453 netif_crit(qdev, ifup, qdev->ndev,
454 "Address type %d not yet supported.\n", type);
455 status = -EPERM;
456 }
457 exit:
458 return status;
459 }
460
461 /* Set or clear MAC address in hardware. We sometimes
462 * have to clear it to prevent wrong frame routing
463 * especially in a bonding environment.
464 */
465 static int ql_set_mac_addr(struct ql_adapter *qdev, int set)
466 {
467 int status;
468 char zero_mac_addr[ETH_ALEN];
469 char *addr;
470
471 if (set) {
472 addr = &qdev->current_mac_addr[0];
473 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
474 "Set Mac addr %pM\n", addr);
475 } else {
476 memset(zero_mac_addr, 0, ETH_ALEN);
477 addr = &zero_mac_addr[0];
478 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
479 "Clearing MAC address\n");
480 }
481 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
482 if (status)
483 return status;
484 status = ql_set_mac_addr_reg(qdev, (u8 *) addr,
485 MAC_ADDR_TYPE_CAM_MAC, qdev->func * MAX_CQ);
486 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
487 if (status)
488 netif_err(qdev, ifup, qdev->ndev,
489 "Failed to init mac address.\n");
490 return status;
491 }
492
493 void ql_link_on(struct ql_adapter *qdev)
494 {
495 netif_err(qdev, link, qdev->ndev, "Link is up.\n");
496 netif_carrier_on(qdev->ndev);
497 ql_set_mac_addr(qdev, 1);
498 }
499
500 void ql_link_off(struct ql_adapter *qdev)
501 {
502 netif_err(qdev, link, qdev->ndev, "Link is down.\n");
503 netif_carrier_off(qdev->ndev);
504 ql_set_mac_addr(qdev, 0);
505 }
506
507 /* Get a specific frame routing value from the CAM.
508 * Used for debug and reg dump.
509 */
510 int ql_get_routing_reg(struct ql_adapter *qdev, u32 index, u32 *value)
511 {
512 int status = 0;
513
514 status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MW, 0);
515 if (status)
516 goto exit;
517
518 ql_write32(qdev, RT_IDX,
519 RT_IDX_TYPE_NICQ | RT_IDX_RS | (index << RT_IDX_IDX_SHIFT));
520 status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MR, 0);
521 if (status)
522 goto exit;
523 *value = ql_read32(qdev, RT_DATA);
524 exit:
525 return status;
526 }
527
528 /* The NIC function for this chip has 16 routing indexes. Each one can be used
529 * to route different frame types to various inbound queues. We send broadcast/
530 * multicast/error frames to the default queue for slow handling,
531 * and CAM hit/RSS frames to the fast handling queues.
532 */
533 static int ql_set_routing_reg(struct ql_adapter *qdev, u32 index, u32 mask,
534 int enable)
535 {
536 int status = -EINVAL; /* Return error if no mask match. */
537 u32 value = 0;
538
539 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
540 "%s %s mask %s the routing reg.\n",
541 enable ? "Adding" : "Removing",
542 index == RT_IDX_ALL_ERR_SLOT ? "MAC ERROR/ALL ERROR" :
543 index == RT_IDX_IP_CSUM_ERR_SLOT ? "IP CSUM ERROR" :
544 index == RT_IDX_TCP_UDP_CSUM_ERR_SLOT ? "TCP/UDP CSUM ERROR" :
545 index == RT_IDX_BCAST_SLOT ? "BROADCAST" :
546 index == RT_IDX_MCAST_MATCH_SLOT ? "MULTICAST MATCH" :
547 index == RT_IDX_ALLMULTI_SLOT ? "ALL MULTICAST MATCH" :
548 index == RT_IDX_UNUSED6_SLOT ? "UNUSED6" :
549 index == RT_IDX_UNUSED7_SLOT ? "UNUSED7" :
550 index == RT_IDX_RSS_MATCH_SLOT ? "RSS ALL/IPV4 MATCH" :
551 index == RT_IDX_RSS_IPV6_SLOT ? "RSS IPV6" :
552 index == RT_IDX_RSS_TCP4_SLOT ? "RSS TCP4" :
553 index == RT_IDX_RSS_TCP6_SLOT ? "RSS TCP6" :
554 index == RT_IDX_CAM_HIT_SLOT ? "CAM HIT" :
555 index == RT_IDX_UNUSED013 ? "UNUSED13" :
556 index == RT_IDX_UNUSED014 ? "UNUSED14" :
557 index == RT_IDX_PROMISCUOUS_SLOT ? "PROMISCUOUS" :
558 "(Bad index != RT_IDX)",
559 enable ? "to" : "from");
560
561 switch (mask) {
562 case RT_IDX_CAM_HIT:
563 {
564 value = RT_IDX_DST_CAM_Q | /* dest */
565 RT_IDX_TYPE_NICQ | /* type */
566 (RT_IDX_CAM_HIT_SLOT << RT_IDX_IDX_SHIFT);/* index */
567 break;
568 }
569 case RT_IDX_VALID: /* Promiscuous Mode frames. */
570 {
571 value = RT_IDX_DST_DFLT_Q | /* dest */
572 RT_IDX_TYPE_NICQ | /* type */
573 (RT_IDX_PROMISCUOUS_SLOT << RT_IDX_IDX_SHIFT);/* index */
574 break;
575 }
576 case RT_IDX_ERR: /* Pass up MAC,IP,TCP/UDP error frames. */
577 {
578 value = RT_IDX_DST_DFLT_Q | /* dest */
579 RT_IDX_TYPE_NICQ | /* type */
580 (RT_IDX_ALL_ERR_SLOT << RT_IDX_IDX_SHIFT);/* index */
581 break;
582 }
583 case RT_IDX_IP_CSUM_ERR: /* Pass up IP CSUM error frames. */
584 {
585 value = RT_IDX_DST_DFLT_Q | /* dest */
586 RT_IDX_TYPE_NICQ | /* type */
587 (RT_IDX_IP_CSUM_ERR_SLOT <<
588 RT_IDX_IDX_SHIFT); /* index */
589 break;
590 }
591 case RT_IDX_TU_CSUM_ERR: /* Pass up TCP/UDP CSUM error frames. */
592 {
593 value = RT_IDX_DST_DFLT_Q | /* dest */
594 RT_IDX_TYPE_NICQ | /* type */
595 (RT_IDX_TCP_UDP_CSUM_ERR_SLOT <<
596 RT_IDX_IDX_SHIFT); /* index */
597 break;
598 }
599 case RT_IDX_BCAST: /* Pass up Broadcast frames to default Q. */
600 {
601 value = RT_IDX_DST_DFLT_Q | /* dest */
602 RT_IDX_TYPE_NICQ | /* type */
603 (RT_IDX_BCAST_SLOT << RT_IDX_IDX_SHIFT);/* index */
604 break;
605 }
606 case RT_IDX_MCAST: /* Pass up All Multicast frames. */
607 {
608 value = RT_IDX_DST_DFLT_Q | /* dest */
609 RT_IDX_TYPE_NICQ | /* type */
610 (RT_IDX_ALLMULTI_SLOT << RT_IDX_IDX_SHIFT);/* index */
611 break;
612 }
613 case RT_IDX_MCAST_MATCH: /* Pass up matched Multicast frames. */
614 {
615 value = RT_IDX_DST_DFLT_Q | /* dest */
616 RT_IDX_TYPE_NICQ | /* type */
617 (RT_IDX_MCAST_MATCH_SLOT << RT_IDX_IDX_SHIFT);/* index */
618 break;
619 }
620 case RT_IDX_RSS_MATCH: /* Pass up matched RSS frames. */
621 {
622 value = RT_IDX_DST_RSS | /* dest */
623 RT_IDX_TYPE_NICQ | /* type */
624 (RT_IDX_RSS_MATCH_SLOT << RT_IDX_IDX_SHIFT);/* index */
625 break;
626 }
627 case 0: /* Clear the E-bit on an entry. */
628 {
629 value = RT_IDX_DST_DFLT_Q | /* dest */
630 RT_IDX_TYPE_NICQ | /* type */
631 (index << RT_IDX_IDX_SHIFT);/* index */
632 break;
633 }
634 default:
635 netif_err(qdev, ifup, qdev->ndev,
636 "Mask type %d not yet supported.\n", mask);
637 status = -EPERM;
638 goto exit;
639 }
640
641 if (value) {
642 status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MW, 0);
643 if (status)
644 goto exit;
645 value |= (enable ? RT_IDX_E : 0);
646 ql_write32(qdev, RT_IDX, value);
647 ql_write32(qdev, RT_DATA, enable ? mask : 0);
648 }
649 exit:
650 return status;
651 }
652
653 static void ql_enable_interrupts(struct ql_adapter *qdev)
654 {
655 ql_write32(qdev, INTR_EN, (INTR_EN_EI << 16) | INTR_EN_EI);
656 }
657
658 static void ql_disable_interrupts(struct ql_adapter *qdev)
659 {
660 ql_write32(qdev, INTR_EN, (INTR_EN_EI << 16));
661 }
662
663 /* If we're running with multiple MSI-X vectors then we enable on the fly.
664 * Otherwise, we may have multiple outstanding workers and don't want to
665 * enable until the last one finishes. In this case, the irq_cnt gets
666 * incremented every time we queue a worker and decremented every time
667 * a worker finishes. Once it hits zero we enable the interrupt.
668 */
669 u32 ql_enable_completion_interrupt(struct ql_adapter *qdev, u32 intr)
670 {
671 u32 var = 0;
672 unsigned long hw_flags = 0;
673 struct intr_context *ctx = qdev->intr_context + intr;
674
675 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags) && intr)) {
676 /* Always enable if we're MSIX multi interrupts and
677 * it's not the default (zeroeth) interrupt.
678 */
679 ql_write32(qdev, INTR_EN,
680 ctx->intr_en_mask);
681 var = ql_read32(qdev, STS);
682 return var;
683 }
684
685 spin_lock_irqsave(&qdev->hw_lock, hw_flags);
686 if (atomic_dec_and_test(&ctx->irq_cnt)) {
687 ql_write32(qdev, INTR_EN,
688 ctx->intr_en_mask);
689 var = ql_read32(qdev, STS);
690 }
691 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
692 return var;
693 }
694
695 static u32 ql_disable_completion_interrupt(struct ql_adapter *qdev, u32 intr)
696 {
697 u32 var = 0;
698 struct intr_context *ctx;
699
700 /* HW disables for us if we're MSIX multi interrupts and
701 * it's not the default (zeroeth) interrupt.
702 */
703 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags) && intr))
704 return 0;
705
706 ctx = qdev->intr_context + intr;
707 spin_lock(&qdev->hw_lock);
708 if (!atomic_read(&ctx->irq_cnt)) {
709 ql_write32(qdev, INTR_EN,
710 ctx->intr_dis_mask);
711 var = ql_read32(qdev, STS);
712 }
713 atomic_inc(&ctx->irq_cnt);
714 spin_unlock(&qdev->hw_lock);
715 return var;
716 }
717
718 static void ql_enable_all_completion_interrupts(struct ql_adapter *qdev)
719 {
720 int i;
721 for (i = 0; i < qdev->intr_count; i++) {
722 /* The enable call does a atomic_dec_and_test
723 * and enables only if the result is zero.
724 * So we precharge it here.
725 */
726 if (unlikely(!test_bit(QL_MSIX_ENABLED, &qdev->flags) ||
727 i == 0))
728 atomic_set(&qdev->intr_context[i].irq_cnt, 1);
729 ql_enable_completion_interrupt(qdev, i);
730 }
731
732 }
733
734 static int ql_validate_flash(struct ql_adapter *qdev, u32 size, const char *str)
735 {
736 int status, i;
737 u16 csum = 0;
738 __le16 *flash = (__le16 *)&qdev->flash;
739
740 status = strncmp((char *)&qdev->flash, str, 4);
741 if (status) {
742 netif_err(qdev, ifup, qdev->ndev, "Invalid flash signature.\n");
743 return status;
744 }
745
746 for (i = 0; i < size; i++)
747 csum += le16_to_cpu(*flash++);
748
749 if (csum)
750 netif_err(qdev, ifup, qdev->ndev,
751 "Invalid flash checksum, csum = 0x%.04x.\n", csum);
752
753 return csum;
754 }
755
756 static int ql_read_flash_word(struct ql_adapter *qdev, int offset, __le32 *data)
757 {
758 int status = 0;
759 /* wait for reg to come ready */
760 status = ql_wait_reg_rdy(qdev,
761 FLASH_ADDR, FLASH_ADDR_RDY, FLASH_ADDR_ERR);
762 if (status)
763 goto exit;
764 /* set up for reg read */
765 ql_write32(qdev, FLASH_ADDR, FLASH_ADDR_R | offset);
766 /* wait for reg to come ready */
767 status = ql_wait_reg_rdy(qdev,
768 FLASH_ADDR, FLASH_ADDR_RDY, FLASH_ADDR_ERR);
769 if (status)
770 goto exit;
771 /* This data is stored on flash as an array of
772 * __le32. Since ql_read32() returns cpu endian
773 * we need to swap it back.
774 */
775 *data = cpu_to_le32(ql_read32(qdev, FLASH_DATA));
776 exit:
777 return status;
778 }
779
780 static int ql_get_8000_flash_params(struct ql_adapter *qdev)
781 {
782 u32 i, size;
783 int status;
784 __le32 *p = (__le32 *)&qdev->flash;
785 u32 offset;
786 u8 mac_addr[6];
787
788 /* Get flash offset for function and adjust
789 * for dword access.
790 */
791 if (!qdev->port)
792 offset = FUNC0_FLASH_OFFSET / sizeof(u32);
793 else
794 offset = FUNC1_FLASH_OFFSET / sizeof(u32);
795
796 if (ql_sem_spinlock(qdev, SEM_FLASH_MASK))
797 return -ETIMEDOUT;
798
799 size = sizeof(struct flash_params_8000) / sizeof(u32);
800 for (i = 0; i < size; i++, p++) {
801 status = ql_read_flash_word(qdev, i+offset, p);
802 if (status) {
803 netif_err(qdev, ifup, qdev->ndev,
804 "Error reading flash.\n");
805 goto exit;
806 }
807 }
808
809 status = ql_validate_flash(qdev,
810 sizeof(struct flash_params_8000) / sizeof(u16),
811 "8000");
812 if (status) {
813 netif_err(qdev, ifup, qdev->ndev, "Invalid flash.\n");
814 status = -EINVAL;
815 goto exit;
816 }
817
818 /* Extract either manufacturer or BOFM modified
819 * MAC address.
820 */
821 if (qdev->flash.flash_params_8000.data_type1 == 2)
822 memcpy(mac_addr,
823 qdev->flash.flash_params_8000.mac_addr1,
824 qdev->ndev->addr_len);
825 else
826 memcpy(mac_addr,
827 qdev->flash.flash_params_8000.mac_addr,
828 qdev->ndev->addr_len);
829
830 if (!is_valid_ether_addr(mac_addr)) {
831 netif_err(qdev, ifup, qdev->ndev, "Invalid MAC address.\n");
832 status = -EINVAL;
833 goto exit;
834 }
835
836 memcpy(qdev->ndev->dev_addr,
837 mac_addr,
838 qdev->ndev->addr_len);
839
840 exit:
841 ql_sem_unlock(qdev, SEM_FLASH_MASK);
842 return status;
843 }
844
845 static int ql_get_8012_flash_params(struct ql_adapter *qdev)
846 {
847 int i;
848 int status;
849 __le32 *p = (__le32 *)&qdev->flash;
850 u32 offset = 0;
851 u32 size = sizeof(struct flash_params_8012) / sizeof(u32);
852
853 /* Second function's parameters follow the first
854 * function's.
855 */
856 if (qdev->port)
857 offset = size;
858
859 if (ql_sem_spinlock(qdev, SEM_FLASH_MASK))
860 return -ETIMEDOUT;
861
862 for (i = 0; i < size; i++, p++) {
863 status = ql_read_flash_word(qdev, i+offset, p);
864 if (status) {
865 netif_err(qdev, ifup, qdev->ndev,
866 "Error reading flash.\n");
867 goto exit;
868 }
869
870 }
871
872 status = ql_validate_flash(qdev,
873 sizeof(struct flash_params_8012) / sizeof(u16),
874 "8012");
875 if (status) {
876 netif_err(qdev, ifup, qdev->ndev, "Invalid flash.\n");
877 status = -EINVAL;
878 goto exit;
879 }
880
881 if (!is_valid_ether_addr(qdev->flash.flash_params_8012.mac_addr)) {
882 status = -EINVAL;
883 goto exit;
884 }
885
886 memcpy(qdev->ndev->dev_addr,
887 qdev->flash.flash_params_8012.mac_addr,
888 qdev->ndev->addr_len);
889
890 exit:
891 ql_sem_unlock(qdev, SEM_FLASH_MASK);
892 return status;
893 }
894
895 /* xgmac register are located behind the xgmac_addr and xgmac_data
896 * register pair. Each read/write requires us to wait for the ready
897 * bit before reading/writing the data.
898 */
899 static int ql_write_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 data)
900 {
901 int status;
902 /* wait for reg to come ready */
903 status = ql_wait_reg_rdy(qdev,
904 XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
905 if (status)
906 return status;
907 /* write the data to the data reg */
908 ql_write32(qdev, XGMAC_DATA, data);
909 /* trigger the write */
910 ql_write32(qdev, XGMAC_ADDR, reg);
911 return status;
912 }
913
914 /* xgmac register are located behind the xgmac_addr and xgmac_data
915 * register pair. Each read/write requires us to wait for the ready
916 * bit before reading/writing the data.
917 */
918 int ql_read_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 *data)
919 {
920 int status = 0;
921 /* wait for reg to come ready */
922 status = ql_wait_reg_rdy(qdev,
923 XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
924 if (status)
925 goto exit;
926 /* set up for reg read */
927 ql_write32(qdev, XGMAC_ADDR, reg | XGMAC_ADDR_R);
928 /* wait for reg to come ready */
929 status = ql_wait_reg_rdy(qdev,
930 XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
931 if (status)
932 goto exit;
933 /* get the data */
934 *data = ql_read32(qdev, XGMAC_DATA);
935 exit:
936 return status;
937 }
938
939 /* This is used for reading the 64-bit statistics regs. */
940 int ql_read_xgmac_reg64(struct ql_adapter *qdev, u32 reg, u64 *data)
941 {
942 int status = 0;
943 u32 hi = 0;
944 u32 lo = 0;
945
946 status = ql_read_xgmac_reg(qdev, reg, &lo);
947 if (status)
948 goto exit;
949
950 status = ql_read_xgmac_reg(qdev, reg + 4, &hi);
951 if (status)
952 goto exit;
953
954 *data = (u64) lo | ((u64) hi << 32);
955
956 exit:
957 return status;
958 }
959
960 static int ql_8000_port_initialize(struct ql_adapter *qdev)
961 {
962 int status;
963 /*
964 * Get MPI firmware version for driver banner
965 * and ethool info.
966 */
967 status = ql_mb_about_fw(qdev);
968 if (status)
969 goto exit;
970 status = ql_mb_get_fw_state(qdev);
971 if (status)
972 goto exit;
973 /* Wake up a worker to get/set the TX/RX frame sizes. */
974 queue_delayed_work(qdev->workqueue, &qdev->mpi_port_cfg_work, 0);
975 exit:
976 return status;
977 }
978
979 /* Take the MAC Core out of reset.
980 * Enable statistics counting.
981 * Take the transmitter/receiver out of reset.
982 * This functionality may be done in the MPI firmware at a
983 * later date.
984 */
985 static int ql_8012_port_initialize(struct ql_adapter *qdev)
986 {
987 int status = 0;
988 u32 data;
989
990 if (ql_sem_trylock(qdev, qdev->xg_sem_mask)) {
991 /* Another function has the semaphore, so
992 * wait for the port init bit to come ready.
993 */
994 netif_info(qdev, link, qdev->ndev,
995 "Another function has the semaphore, so wait for the port init bit to come ready.\n");
996 status = ql_wait_reg_rdy(qdev, STS, qdev->port_init, 0);
997 if (status) {
998 netif_crit(qdev, link, qdev->ndev,
999 "Port initialize timed out.\n");
1000 }
1001 return status;
1002 }
1003
1004 netif_info(qdev, link, qdev->ndev, "Got xgmac semaphore!.\n");
1005 /* Set the core reset. */
1006 status = ql_read_xgmac_reg(qdev, GLOBAL_CFG, &data);
1007 if (status)
1008 goto end;
1009 data |= GLOBAL_CFG_RESET;
1010 status = ql_write_xgmac_reg(qdev, GLOBAL_CFG, data);
1011 if (status)
1012 goto end;
1013
1014 /* Clear the core reset and turn on jumbo for receiver. */
1015 data &= ~GLOBAL_CFG_RESET; /* Clear core reset. */
1016 data |= GLOBAL_CFG_JUMBO; /* Turn on jumbo. */
1017 data |= GLOBAL_CFG_TX_STAT_EN;
1018 data |= GLOBAL_CFG_RX_STAT_EN;
1019 status = ql_write_xgmac_reg(qdev, GLOBAL_CFG, data);
1020 if (status)
1021 goto end;
1022
1023 /* Enable transmitter, and clear it's reset. */
1024 status = ql_read_xgmac_reg(qdev, TX_CFG, &data);
1025 if (status)
1026 goto end;
1027 data &= ~TX_CFG_RESET; /* Clear the TX MAC reset. */
1028 data |= TX_CFG_EN; /* Enable the transmitter. */
1029 status = ql_write_xgmac_reg(qdev, TX_CFG, data);
1030 if (status)
1031 goto end;
1032
1033 /* Enable receiver and clear it's reset. */
1034 status = ql_read_xgmac_reg(qdev, RX_CFG, &data);
1035 if (status)
1036 goto end;
1037 data &= ~RX_CFG_RESET; /* Clear the RX MAC reset. */
1038 data |= RX_CFG_EN; /* Enable the receiver. */
1039 status = ql_write_xgmac_reg(qdev, RX_CFG, data);
1040 if (status)
1041 goto end;
1042
1043 /* Turn on jumbo. */
1044 status =
1045 ql_write_xgmac_reg(qdev, MAC_TX_PARAMS, MAC_TX_PARAMS_JUMBO | (0x2580 << 16));
1046 if (status)
1047 goto end;
1048 status =
1049 ql_write_xgmac_reg(qdev, MAC_RX_PARAMS, 0x2580);
1050 if (status)
1051 goto end;
1052
1053 /* Signal to the world that the port is enabled. */
1054 ql_write32(qdev, STS, ((qdev->port_init << 16) | qdev->port_init));
1055 end:
1056 ql_sem_unlock(qdev, qdev->xg_sem_mask);
1057 return status;
1058 }
1059
1060 static inline unsigned int ql_lbq_block_size(struct ql_adapter *qdev)
1061 {
1062 return PAGE_SIZE << qdev->lbq_buf_order;
1063 }
1064
1065 /* Get the next large buffer. */
1066 static struct bq_desc *ql_get_curr_lbuf(struct rx_ring *rx_ring)
1067 {
1068 struct bq_desc *lbq_desc = &rx_ring->lbq[rx_ring->lbq_curr_idx];
1069 rx_ring->lbq_curr_idx++;
1070 if (rx_ring->lbq_curr_idx == rx_ring->lbq_len)
1071 rx_ring->lbq_curr_idx = 0;
1072 rx_ring->lbq_free_cnt++;
1073 return lbq_desc;
1074 }
1075
1076 static struct bq_desc *ql_get_curr_lchunk(struct ql_adapter *qdev,
1077 struct rx_ring *rx_ring)
1078 {
1079 struct bq_desc *lbq_desc = ql_get_curr_lbuf(rx_ring);
1080
1081 pci_dma_sync_single_for_cpu(qdev->pdev,
1082 dma_unmap_addr(lbq_desc, mapaddr),
1083 rx_ring->lbq_buf_size,
1084 PCI_DMA_FROMDEVICE);
1085
1086 /* If it's the last chunk of our master page then
1087 * we unmap it.
1088 */
1089 if ((lbq_desc->p.pg_chunk.offset + rx_ring->lbq_buf_size)
1090 == ql_lbq_block_size(qdev))
1091 pci_unmap_page(qdev->pdev,
1092 lbq_desc->p.pg_chunk.map,
1093 ql_lbq_block_size(qdev),
1094 PCI_DMA_FROMDEVICE);
1095 return lbq_desc;
1096 }
1097
1098 /* Get the next small buffer. */
1099 static struct bq_desc *ql_get_curr_sbuf(struct rx_ring *rx_ring)
1100 {
1101 struct bq_desc *sbq_desc = &rx_ring->sbq[rx_ring->sbq_curr_idx];
1102 rx_ring->sbq_curr_idx++;
1103 if (rx_ring->sbq_curr_idx == rx_ring->sbq_len)
1104 rx_ring->sbq_curr_idx = 0;
1105 rx_ring->sbq_free_cnt++;
1106 return sbq_desc;
1107 }
1108
1109 /* Update an rx ring index. */
1110 static void ql_update_cq(struct rx_ring *rx_ring)
1111 {
1112 rx_ring->cnsmr_idx++;
1113 rx_ring->curr_entry++;
1114 if (unlikely(rx_ring->cnsmr_idx == rx_ring->cq_len)) {
1115 rx_ring->cnsmr_idx = 0;
1116 rx_ring->curr_entry = rx_ring->cq_base;
1117 }
1118 }
1119
1120 static void ql_write_cq_idx(struct rx_ring *rx_ring)
1121 {
1122 ql_write_db_reg(rx_ring->cnsmr_idx, rx_ring->cnsmr_idx_db_reg);
1123 }
1124
1125 static int ql_get_next_chunk(struct ql_adapter *qdev, struct rx_ring *rx_ring,
1126 struct bq_desc *lbq_desc)
1127 {
1128 if (!rx_ring->pg_chunk.page) {
1129 u64 map;
1130 rx_ring->pg_chunk.page = alloc_pages(__GFP_COLD | __GFP_COMP |
1131 GFP_ATOMIC,
1132 qdev->lbq_buf_order);
1133 if (unlikely(!rx_ring->pg_chunk.page)) {
1134 netif_err(qdev, drv, qdev->ndev,
1135 "page allocation failed.\n");
1136 return -ENOMEM;
1137 }
1138 rx_ring->pg_chunk.offset = 0;
1139 map = pci_map_page(qdev->pdev, rx_ring->pg_chunk.page,
1140 0, ql_lbq_block_size(qdev),
1141 PCI_DMA_FROMDEVICE);
1142 if (pci_dma_mapping_error(qdev->pdev, map)) {
1143 __free_pages(rx_ring->pg_chunk.page,
1144 qdev->lbq_buf_order);
1145 netif_err(qdev, drv, qdev->ndev,
1146 "PCI mapping failed.\n");
1147 return -ENOMEM;
1148 }
1149 rx_ring->pg_chunk.map = map;
1150 rx_ring->pg_chunk.va = page_address(rx_ring->pg_chunk.page);
1151 }
1152
1153 /* Copy the current master pg_chunk info
1154 * to the current descriptor.
1155 */
1156 lbq_desc->p.pg_chunk = rx_ring->pg_chunk;
1157
1158 /* Adjust the master page chunk for next
1159 * buffer get.
1160 */
1161 rx_ring->pg_chunk.offset += rx_ring->lbq_buf_size;
1162 if (rx_ring->pg_chunk.offset == ql_lbq_block_size(qdev)) {
1163 rx_ring->pg_chunk.page = NULL;
1164 lbq_desc->p.pg_chunk.last_flag = 1;
1165 } else {
1166 rx_ring->pg_chunk.va += rx_ring->lbq_buf_size;
1167 get_page(rx_ring->pg_chunk.page);
1168 lbq_desc->p.pg_chunk.last_flag = 0;
1169 }
1170 return 0;
1171 }
1172 /* Process (refill) a large buffer queue. */
1173 static void ql_update_lbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
1174 {
1175 u32 clean_idx = rx_ring->lbq_clean_idx;
1176 u32 start_idx = clean_idx;
1177 struct bq_desc *lbq_desc;
1178 u64 map;
1179 int i;
1180
1181 while (rx_ring->lbq_free_cnt > 32) {
1182 for (i = 0; i < 16; i++) {
1183 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1184 "lbq: try cleaning clean_idx = %d.\n",
1185 clean_idx);
1186 lbq_desc = &rx_ring->lbq[clean_idx];
1187 if (ql_get_next_chunk(qdev, rx_ring, lbq_desc)) {
1188 netif_err(qdev, ifup, qdev->ndev,
1189 "Could not get a page chunk.\n");
1190 return;
1191 }
1192
1193 map = lbq_desc->p.pg_chunk.map +
1194 lbq_desc->p.pg_chunk.offset;
1195 dma_unmap_addr_set(lbq_desc, mapaddr, map);
1196 dma_unmap_len_set(lbq_desc, maplen,
1197 rx_ring->lbq_buf_size);
1198 *lbq_desc->addr = cpu_to_le64(map);
1199
1200 pci_dma_sync_single_for_device(qdev->pdev, map,
1201 rx_ring->lbq_buf_size,
1202 PCI_DMA_FROMDEVICE);
1203 clean_idx++;
1204 if (clean_idx == rx_ring->lbq_len)
1205 clean_idx = 0;
1206 }
1207
1208 rx_ring->lbq_clean_idx = clean_idx;
1209 rx_ring->lbq_prod_idx += 16;
1210 if (rx_ring->lbq_prod_idx == rx_ring->lbq_len)
1211 rx_ring->lbq_prod_idx = 0;
1212 rx_ring->lbq_free_cnt -= 16;
1213 }
1214
1215 if (start_idx != clean_idx) {
1216 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1217 "lbq: updating prod idx = %d.\n",
1218 rx_ring->lbq_prod_idx);
1219 ql_write_db_reg(rx_ring->lbq_prod_idx,
1220 rx_ring->lbq_prod_idx_db_reg);
1221 }
1222 }
1223
1224 /* Process (refill) a small buffer queue. */
1225 static void ql_update_sbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
1226 {
1227 u32 clean_idx = rx_ring->sbq_clean_idx;
1228 u32 start_idx = clean_idx;
1229 struct bq_desc *sbq_desc;
1230 u64 map;
1231 int i;
1232
1233 while (rx_ring->sbq_free_cnt > 16) {
1234 for (i = 0; i < 16; i++) {
1235 sbq_desc = &rx_ring->sbq[clean_idx];
1236 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1237 "sbq: try cleaning clean_idx = %d.\n",
1238 clean_idx);
1239 if (sbq_desc->p.skb == NULL) {
1240 netif_printk(qdev, rx_status, KERN_DEBUG,
1241 qdev->ndev,
1242 "sbq: getting new skb for index %d.\n",
1243 sbq_desc->index);
1244 sbq_desc->p.skb =
1245 netdev_alloc_skb(qdev->ndev,
1246 SMALL_BUFFER_SIZE);
1247 if (sbq_desc->p.skb == NULL) {
1248 netif_err(qdev, probe, qdev->ndev,
1249 "Couldn't get an skb.\n");
1250 rx_ring->sbq_clean_idx = clean_idx;
1251 return;
1252 }
1253 skb_reserve(sbq_desc->p.skb, QLGE_SB_PAD);
1254 map = pci_map_single(qdev->pdev,
1255 sbq_desc->p.skb->data,
1256 rx_ring->sbq_buf_size,
1257 PCI_DMA_FROMDEVICE);
1258 if (pci_dma_mapping_error(qdev->pdev, map)) {
1259 netif_err(qdev, ifup, qdev->ndev,
1260 "PCI mapping failed.\n");
1261 rx_ring->sbq_clean_idx = clean_idx;
1262 dev_kfree_skb_any(sbq_desc->p.skb);
1263 sbq_desc->p.skb = NULL;
1264 return;
1265 }
1266 dma_unmap_addr_set(sbq_desc, mapaddr, map);
1267 dma_unmap_len_set(sbq_desc, maplen,
1268 rx_ring->sbq_buf_size);
1269 *sbq_desc->addr = cpu_to_le64(map);
1270 }
1271
1272 clean_idx++;
1273 if (clean_idx == rx_ring->sbq_len)
1274 clean_idx = 0;
1275 }
1276 rx_ring->sbq_clean_idx = clean_idx;
1277 rx_ring->sbq_prod_idx += 16;
1278 if (rx_ring->sbq_prod_idx == rx_ring->sbq_len)
1279 rx_ring->sbq_prod_idx = 0;
1280 rx_ring->sbq_free_cnt -= 16;
1281 }
1282
1283 if (start_idx != clean_idx) {
1284 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1285 "sbq: updating prod idx = %d.\n",
1286 rx_ring->sbq_prod_idx);
1287 ql_write_db_reg(rx_ring->sbq_prod_idx,
1288 rx_ring->sbq_prod_idx_db_reg);
1289 }
1290 }
1291
1292 static void ql_update_buffer_queues(struct ql_adapter *qdev,
1293 struct rx_ring *rx_ring)
1294 {
1295 ql_update_sbq(qdev, rx_ring);
1296 ql_update_lbq(qdev, rx_ring);
1297 }
1298
1299 /* Unmaps tx buffers. Can be called from send() if a pci mapping
1300 * fails at some stage, or from the interrupt when a tx completes.
1301 */
1302 static void ql_unmap_send(struct ql_adapter *qdev,
1303 struct tx_ring_desc *tx_ring_desc, int mapped)
1304 {
1305 int i;
1306 for (i = 0; i < mapped; i++) {
1307 if (i == 0 || (i == 7 && mapped > 7)) {
1308 /*
1309 * Unmap the skb->data area, or the
1310 * external sglist (AKA the Outbound
1311 * Address List (OAL)).
1312 * If its the zeroeth element, then it's
1313 * the skb->data area. If it's the 7th
1314 * element and there is more than 6 frags,
1315 * then its an OAL.
1316 */
1317 if (i == 7) {
1318 netif_printk(qdev, tx_done, KERN_DEBUG,
1319 qdev->ndev,
1320 "unmapping OAL area.\n");
1321 }
1322 pci_unmap_single(qdev->pdev,
1323 dma_unmap_addr(&tx_ring_desc->map[i],
1324 mapaddr),
1325 dma_unmap_len(&tx_ring_desc->map[i],
1326 maplen),
1327 PCI_DMA_TODEVICE);
1328 } else {
1329 netif_printk(qdev, tx_done, KERN_DEBUG, qdev->ndev,
1330 "unmapping frag %d.\n", i);
1331 pci_unmap_page(qdev->pdev,
1332 dma_unmap_addr(&tx_ring_desc->map[i],
1333 mapaddr),
1334 dma_unmap_len(&tx_ring_desc->map[i],
1335 maplen), PCI_DMA_TODEVICE);
1336 }
1337 }
1338
1339 }
1340
1341 /* Map the buffers for this transmit. This will return
1342 * NETDEV_TX_BUSY or NETDEV_TX_OK based on success.
1343 */
1344 static int ql_map_send(struct ql_adapter *qdev,
1345 struct ob_mac_iocb_req *mac_iocb_ptr,
1346 struct sk_buff *skb, struct tx_ring_desc *tx_ring_desc)
1347 {
1348 int len = skb_headlen(skb);
1349 dma_addr_t map;
1350 int frag_idx, err, map_idx = 0;
1351 struct tx_buf_desc *tbd = mac_iocb_ptr->tbd;
1352 int frag_cnt = skb_shinfo(skb)->nr_frags;
1353
1354 if (frag_cnt) {
1355 netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
1356 "frag_cnt = %d.\n", frag_cnt);
1357 }
1358 /*
1359 * Map the skb buffer first.
1360 */
1361 map = pci_map_single(qdev->pdev, skb->data, len, PCI_DMA_TODEVICE);
1362
1363 err = pci_dma_mapping_error(qdev->pdev, map);
1364 if (err) {
1365 netif_err(qdev, tx_queued, qdev->ndev,
1366 "PCI mapping failed with error: %d\n", err);
1367
1368 return NETDEV_TX_BUSY;
1369 }
1370
1371 tbd->len = cpu_to_le32(len);
1372 tbd->addr = cpu_to_le64(map);
1373 dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map);
1374 dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen, len);
1375 map_idx++;
1376
1377 /*
1378 * This loop fills the remainder of the 8 address descriptors
1379 * in the IOCB. If there are more than 7 fragments, then the
1380 * eighth address desc will point to an external list (OAL).
1381 * When this happens, the remainder of the frags will be stored
1382 * in this list.
1383 */
1384 for (frag_idx = 0; frag_idx < frag_cnt; frag_idx++, map_idx++) {
1385 skb_frag_t *frag = &skb_shinfo(skb)->frags[frag_idx];
1386 tbd++;
1387 if (frag_idx == 6 && frag_cnt > 7) {
1388 /* Let's tack on an sglist.
1389 * Our control block will now
1390 * look like this:
1391 * iocb->seg[0] = skb->data
1392 * iocb->seg[1] = frag[0]
1393 * iocb->seg[2] = frag[1]
1394 * iocb->seg[3] = frag[2]
1395 * iocb->seg[4] = frag[3]
1396 * iocb->seg[5] = frag[4]
1397 * iocb->seg[6] = frag[5]
1398 * iocb->seg[7] = ptr to OAL (external sglist)
1399 * oal->seg[0] = frag[6]
1400 * oal->seg[1] = frag[7]
1401 * oal->seg[2] = frag[8]
1402 * oal->seg[3] = frag[9]
1403 * oal->seg[4] = frag[10]
1404 * etc...
1405 */
1406 /* Tack on the OAL in the eighth segment of IOCB. */
1407 map = pci_map_single(qdev->pdev, &tx_ring_desc->oal,
1408 sizeof(struct oal),
1409 PCI_DMA_TODEVICE);
1410 err = pci_dma_mapping_error(qdev->pdev, map);
1411 if (err) {
1412 netif_err(qdev, tx_queued, qdev->ndev,
1413 "PCI mapping outbound address list with error: %d\n",
1414 err);
1415 goto map_error;
1416 }
1417
1418 tbd->addr = cpu_to_le64(map);
1419 /*
1420 * The length is the number of fragments
1421 * that remain to be mapped times the length
1422 * of our sglist (OAL).
1423 */
1424 tbd->len =
1425 cpu_to_le32((sizeof(struct tx_buf_desc) *
1426 (frag_cnt - frag_idx)) | TX_DESC_C);
1427 dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr,
1428 map);
1429 dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen,
1430 sizeof(struct oal));
1431 tbd = (struct tx_buf_desc *)&tx_ring_desc->oal;
1432 map_idx++;
1433 }
1434
1435 map =
1436 pci_map_page(qdev->pdev, frag->page,
1437 frag->page_offset, frag->size,
1438 PCI_DMA_TODEVICE);
1439
1440 err = pci_dma_mapping_error(qdev->pdev, map);
1441 if (err) {
1442 netif_err(qdev, tx_queued, qdev->ndev,
1443 "PCI mapping frags failed with error: %d.\n",
1444 err);
1445 goto map_error;
1446 }
1447
1448 tbd->addr = cpu_to_le64(map);
1449 tbd->len = cpu_to_le32(frag->size);
1450 dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map);
1451 dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen,
1452 frag->size);
1453
1454 }
1455 /* Save the number of segments we've mapped. */
1456 tx_ring_desc->map_cnt = map_idx;
1457 /* Terminate the last segment. */
1458 tbd->len = cpu_to_le32(le32_to_cpu(tbd->len) | TX_DESC_E);
1459 return NETDEV_TX_OK;
1460
1461 map_error:
1462 /*
1463 * If the first frag mapping failed, then i will be zero.
1464 * This causes the unmap of the skb->data area. Otherwise
1465 * we pass in the number of frags that mapped successfully
1466 * so they can be umapped.
1467 */
1468 ql_unmap_send(qdev, tx_ring_desc, map_idx);
1469 return NETDEV_TX_BUSY;
1470 }
1471
1472 /* Process an inbound completion from an rx ring. */
1473 static void ql_process_mac_rx_gro_page(struct ql_adapter *qdev,
1474 struct rx_ring *rx_ring,
1475 struct ib_mac_iocb_rsp *ib_mac_rsp,
1476 u32 length,
1477 u16 vlan_id)
1478 {
1479 struct sk_buff *skb;
1480 struct bq_desc *lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1481 struct skb_frag_struct *rx_frag;
1482 int nr_frags;
1483 struct napi_struct *napi = &rx_ring->napi;
1484
1485 napi->dev = qdev->ndev;
1486
1487 skb = napi_get_frags(napi);
1488 if (!skb) {
1489 netif_err(qdev, drv, qdev->ndev,
1490 "Couldn't get an skb, exiting.\n");
1491 rx_ring->rx_dropped++;
1492 put_page(lbq_desc->p.pg_chunk.page);
1493 return;
1494 }
1495 prefetch(lbq_desc->p.pg_chunk.va);
1496 rx_frag = skb_shinfo(skb)->frags;
1497 nr_frags = skb_shinfo(skb)->nr_frags;
1498 rx_frag += nr_frags;
1499 rx_frag->page = lbq_desc->p.pg_chunk.page;
1500 rx_frag->page_offset = lbq_desc->p.pg_chunk.offset;
1501 rx_frag->size = length;
1502
1503 skb->len += length;
1504 skb->data_len += length;
1505 skb->truesize += length;
1506 skb_shinfo(skb)->nr_frags++;
1507
1508 rx_ring->rx_packets++;
1509 rx_ring->rx_bytes += length;
1510 skb->ip_summed = CHECKSUM_UNNECESSARY;
1511 skb_record_rx_queue(skb, rx_ring->cq_id);
1512 if (vlan_id != 0xffff)
1513 __vlan_hwaccel_put_tag(skb, vlan_id);
1514 napi_gro_frags(napi);
1515 }
1516
1517 /* Process an inbound completion from an rx ring. */
1518 static void ql_process_mac_rx_page(struct ql_adapter *qdev,
1519 struct rx_ring *rx_ring,
1520 struct ib_mac_iocb_rsp *ib_mac_rsp,
1521 u32 length,
1522 u16 vlan_id)
1523 {
1524 struct net_device *ndev = qdev->ndev;
1525 struct sk_buff *skb = NULL;
1526 void *addr;
1527 struct bq_desc *lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1528 struct napi_struct *napi = &rx_ring->napi;
1529
1530 skb = netdev_alloc_skb(ndev, length);
1531 if (!skb) {
1532 netif_err(qdev, drv, qdev->ndev,
1533 "Couldn't get an skb, need to unwind!.\n");
1534 rx_ring->rx_dropped++;
1535 put_page(lbq_desc->p.pg_chunk.page);
1536 return;
1537 }
1538
1539 addr = lbq_desc->p.pg_chunk.va;
1540 prefetch(addr);
1541
1542
1543 /* Frame error, so drop the packet. */
1544 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
1545 netif_info(qdev, drv, qdev->ndev,
1546 "Receive error, flags2 = 0x%x\n", ib_mac_rsp->flags2);
1547 rx_ring->rx_errors++;
1548 goto err_out;
1549 }
1550
1551 /* The max framesize filter on this chip is set higher than
1552 * MTU since FCoE uses 2k frames.
1553 */
1554 if (skb->len > ndev->mtu + ETH_HLEN) {
1555 netif_err(qdev, drv, qdev->ndev,
1556 "Segment too small, dropping.\n");
1557 rx_ring->rx_dropped++;
1558 goto err_out;
1559 }
1560 memcpy(skb_put(skb, ETH_HLEN), addr, ETH_HLEN);
1561 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1562 "%d bytes of headers and data in large. Chain page to new skb and pull tail.\n",
1563 length);
1564 skb_fill_page_desc(skb, 0, lbq_desc->p.pg_chunk.page,
1565 lbq_desc->p.pg_chunk.offset+ETH_HLEN,
1566 length-ETH_HLEN);
1567 skb->len += length-ETH_HLEN;
1568 skb->data_len += length-ETH_HLEN;
1569 skb->truesize += length-ETH_HLEN;
1570
1571 rx_ring->rx_packets++;
1572 rx_ring->rx_bytes += skb->len;
1573 skb->protocol = eth_type_trans(skb, ndev);
1574 skb_checksum_none_assert(skb);
1575
1576 if ((ndev->features & NETIF_F_RXCSUM) &&
1577 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
1578 /* TCP frame. */
1579 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
1580 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1581 "TCP checksum done!\n");
1582 skb->ip_summed = CHECKSUM_UNNECESSARY;
1583 } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
1584 (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
1585 /* Unfragmented ipv4 UDP frame. */
1586 struct iphdr *iph = (struct iphdr *) skb->data;
1587 if (!(iph->frag_off &
1588 cpu_to_be16(IP_MF|IP_OFFSET))) {
1589 skb->ip_summed = CHECKSUM_UNNECESSARY;
1590 netif_printk(qdev, rx_status, KERN_DEBUG,
1591 qdev->ndev,
1592 "TCP checksum done!\n");
1593 }
1594 }
1595 }
1596
1597 skb_record_rx_queue(skb, rx_ring->cq_id);
1598 if (vlan_id != 0xffff)
1599 __vlan_hwaccel_put_tag(skb, vlan_id);
1600 if (skb->ip_summed == CHECKSUM_UNNECESSARY)
1601 napi_gro_receive(napi, skb);
1602 else
1603 netif_receive_skb(skb);
1604 return;
1605 err_out:
1606 dev_kfree_skb_any(skb);
1607 put_page(lbq_desc->p.pg_chunk.page);
1608 }
1609
1610 /* Process an inbound completion from an rx ring. */
1611 static void ql_process_mac_rx_skb(struct ql_adapter *qdev,
1612 struct rx_ring *rx_ring,
1613 struct ib_mac_iocb_rsp *ib_mac_rsp,
1614 u32 length,
1615 u16 vlan_id)
1616 {
1617 struct net_device *ndev = qdev->ndev;
1618 struct sk_buff *skb = NULL;
1619 struct sk_buff *new_skb = NULL;
1620 struct bq_desc *sbq_desc = ql_get_curr_sbuf(rx_ring);
1621
1622 skb = sbq_desc->p.skb;
1623 /* Allocate new_skb and copy */
1624 new_skb = netdev_alloc_skb(qdev->ndev, length + NET_IP_ALIGN);
1625 if (new_skb == NULL) {
1626 netif_err(qdev, probe, qdev->ndev,
1627 "No skb available, drop the packet.\n");
1628 rx_ring->rx_dropped++;
1629 return;
1630 }
1631 skb_reserve(new_skb, NET_IP_ALIGN);
1632 memcpy(skb_put(new_skb, length), skb->data, length);
1633 skb = new_skb;
1634
1635 /* Frame error, so drop the packet. */
1636 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
1637 netif_info(qdev, drv, qdev->ndev,
1638 "Receive error, flags2 = 0x%x\n", ib_mac_rsp->flags2);
1639 dev_kfree_skb_any(skb);
1640 rx_ring->rx_errors++;
1641 return;
1642 }
1643
1644 /* loopback self test for ethtool */
1645 if (test_bit(QL_SELFTEST, &qdev->flags)) {
1646 ql_check_lb_frame(qdev, skb);
1647 dev_kfree_skb_any(skb);
1648 return;
1649 }
1650
1651 /* The max framesize filter on this chip is set higher than
1652 * MTU since FCoE uses 2k frames.
1653 */
1654 if (skb->len > ndev->mtu + ETH_HLEN) {
1655 dev_kfree_skb_any(skb);
1656 rx_ring->rx_dropped++;
1657 return;
1658 }
1659
1660 prefetch(skb->data);
1661 skb->dev = ndev;
1662 if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) {
1663 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1664 "%s Multicast.\n",
1665 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1666 IB_MAC_IOCB_RSP_M_HASH ? "Hash" :
1667 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1668 IB_MAC_IOCB_RSP_M_REG ? "Registered" :
1669 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1670 IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : "");
1671 }
1672 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P)
1673 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1674 "Promiscuous Packet.\n");
1675
1676 rx_ring->rx_packets++;
1677 rx_ring->rx_bytes += skb->len;
1678 skb->protocol = eth_type_trans(skb, ndev);
1679 skb_checksum_none_assert(skb);
1680
1681 /* If rx checksum is on, and there are no
1682 * csum or frame errors.
1683 */
1684 if ((ndev->features & NETIF_F_RXCSUM) &&
1685 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
1686 /* TCP frame. */
1687 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
1688 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1689 "TCP checksum done!\n");
1690 skb->ip_summed = CHECKSUM_UNNECESSARY;
1691 } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
1692 (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
1693 /* Unfragmented ipv4 UDP frame. */
1694 struct iphdr *iph = (struct iphdr *) skb->data;
1695 if (!(iph->frag_off &
1696 ntohs(IP_MF|IP_OFFSET))) {
1697 skb->ip_summed = CHECKSUM_UNNECESSARY;
1698 netif_printk(qdev, rx_status, KERN_DEBUG,
1699 qdev->ndev,
1700 "TCP checksum done!\n");
1701 }
1702 }
1703 }
1704
1705 skb_record_rx_queue(skb, rx_ring->cq_id);
1706 if (vlan_id != 0xffff)
1707 __vlan_hwaccel_put_tag(skb, vlan_id);
1708 if (skb->ip_summed == CHECKSUM_UNNECESSARY)
1709 napi_gro_receive(&rx_ring->napi, skb);
1710 else
1711 netif_receive_skb(skb);
1712 }
1713
1714 static void ql_realign_skb(struct sk_buff *skb, int len)
1715 {
1716 void *temp_addr = skb->data;
1717
1718 /* Undo the skb_reserve(skb,32) we did before
1719 * giving to hardware, and realign data on
1720 * a 2-byte boundary.
1721 */
1722 skb->data -= QLGE_SB_PAD - NET_IP_ALIGN;
1723 skb->tail -= QLGE_SB_PAD - NET_IP_ALIGN;
1724 skb_copy_to_linear_data(skb, temp_addr,
1725 (unsigned int)len);
1726 }
1727
1728 /*
1729 * This function builds an skb for the given inbound
1730 * completion. It will be rewritten for readability in the near
1731 * future, but for not it works well.
1732 */
1733 static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
1734 struct rx_ring *rx_ring,
1735 struct ib_mac_iocb_rsp *ib_mac_rsp)
1736 {
1737 struct bq_desc *lbq_desc;
1738 struct bq_desc *sbq_desc;
1739 struct sk_buff *skb = NULL;
1740 u32 length = le32_to_cpu(ib_mac_rsp->data_len);
1741 u32 hdr_len = le32_to_cpu(ib_mac_rsp->hdr_len);
1742
1743 /*
1744 * Handle the header buffer if present.
1745 */
1746 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV &&
1747 ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
1748 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1749 "Header of %d bytes in small buffer.\n", hdr_len);
1750 /*
1751 * Headers fit nicely into a small buffer.
1752 */
1753 sbq_desc = ql_get_curr_sbuf(rx_ring);
1754 pci_unmap_single(qdev->pdev,
1755 dma_unmap_addr(sbq_desc, mapaddr),
1756 dma_unmap_len(sbq_desc, maplen),
1757 PCI_DMA_FROMDEVICE);
1758 skb = sbq_desc->p.skb;
1759 ql_realign_skb(skb, hdr_len);
1760 skb_put(skb, hdr_len);
1761 sbq_desc->p.skb = NULL;
1762 }
1763
1764 /*
1765 * Handle the data buffer(s).
1766 */
1767 if (unlikely(!length)) { /* Is there data too? */
1768 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1769 "No Data buffer in this packet.\n");
1770 return skb;
1771 }
1772
1773 if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS) {
1774 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
1775 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1776 "Headers in small, data of %d bytes in small, combine them.\n",
1777 length);
1778 /*
1779 * Data is less than small buffer size so it's
1780 * stuffed in a small buffer.
1781 * For this case we append the data
1782 * from the "data" small buffer to the "header" small
1783 * buffer.
1784 */
1785 sbq_desc = ql_get_curr_sbuf(rx_ring);
1786 pci_dma_sync_single_for_cpu(qdev->pdev,
1787 dma_unmap_addr
1788 (sbq_desc, mapaddr),
1789 dma_unmap_len
1790 (sbq_desc, maplen),
1791 PCI_DMA_FROMDEVICE);
1792 memcpy(skb_put(skb, length),
1793 sbq_desc->p.skb->data, length);
1794 pci_dma_sync_single_for_device(qdev->pdev,
1795 dma_unmap_addr
1796 (sbq_desc,
1797 mapaddr),
1798 dma_unmap_len
1799 (sbq_desc,
1800 maplen),
1801 PCI_DMA_FROMDEVICE);
1802 } else {
1803 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1804 "%d bytes in a single small buffer.\n",
1805 length);
1806 sbq_desc = ql_get_curr_sbuf(rx_ring);
1807 skb = sbq_desc->p.skb;
1808 ql_realign_skb(skb, length);
1809 skb_put(skb, length);
1810 pci_unmap_single(qdev->pdev,
1811 dma_unmap_addr(sbq_desc,
1812 mapaddr),
1813 dma_unmap_len(sbq_desc,
1814 maplen),
1815 PCI_DMA_FROMDEVICE);
1816 sbq_desc->p.skb = NULL;
1817 }
1818 } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) {
1819 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
1820 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1821 "Header in small, %d bytes in large. Chain large to small!\n",
1822 length);
1823 /*
1824 * The data is in a single large buffer. We
1825 * chain it to the header buffer's skb and let
1826 * it rip.
1827 */
1828 lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1829 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1830 "Chaining page at offset = %d, for %d bytes to skb.\n",
1831 lbq_desc->p.pg_chunk.offset, length);
1832 skb_fill_page_desc(skb, 0, lbq_desc->p.pg_chunk.page,
1833 lbq_desc->p.pg_chunk.offset,
1834 length);
1835 skb->len += length;
1836 skb->data_len += length;
1837 skb->truesize += length;
1838 } else {
1839 /*
1840 * The headers and data are in a single large buffer. We
1841 * copy it to a new skb and let it go. This can happen with
1842 * jumbo mtu on a non-TCP/UDP frame.
1843 */
1844 lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1845 skb = netdev_alloc_skb(qdev->ndev, length);
1846 if (skb == NULL) {
1847 netif_printk(qdev, probe, KERN_DEBUG, qdev->ndev,
1848 "No skb available, drop the packet.\n");
1849 return NULL;
1850 }
1851 pci_unmap_page(qdev->pdev,
1852 dma_unmap_addr(lbq_desc,
1853 mapaddr),
1854 dma_unmap_len(lbq_desc, maplen),
1855 PCI_DMA_FROMDEVICE);
1856 skb_reserve(skb, NET_IP_ALIGN);
1857 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1858 "%d bytes of headers and data in large. Chain page to new skb and pull tail.\n",
1859 length);
1860 skb_fill_page_desc(skb, 0,
1861 lbq_desc->p.pg_chunk.page,
1862 lbq_desc->p.pg_chunk.offset,
1863 length);
1864 skb->len += length;
1865 skb->data_len += length;
1866 skb->truesize += length;
1867 length -= length;
1868 __pskb_pull_tail(skb,
1869 (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ?
1870 VLAN_ETH_HLEN : ETH_HLEN);
1871 }
1872 } else {
1873 /*
1874 * The data is in a chain of large buffers
1875 * pointed to by a small buffer. We loop
1876 * thru and chain them to the our small header
1877 * buffer's skb.
1878 * frags: There are 18 max frags and our small
1879 * buffer will hold 32 of them. The thing is,
1880 * we'll use 3 max for our 9000 byte jumbo
1881 * frames. If the MTU goes up we could
1882 * eventually be in trouble.
1883 */
1884 int size, i = 0;
1885 sbq_desc = ql_get_curr_sbuf(rx_ring);
1886 pci_unmap_single(qdev->pdev,
1887 dma_unmap_addr(sbq_desc, mapaddr),
1888 dma_unmap_len(sbq_desc, maplen),
1889 PCI_DMA_FROMDEVICE);
1890 if (!(ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS)) {
1891 /*
1892 * This is an non TCP/UDP IP frame, so
1893 * the headers aren't split into a small
1894 * buffer. We have to use the small buffer
1895 * that contains our sg list as our skb to
1896 * send upstairs. Copy the sg list here to
1897 * a local buffer and use it to find the
1898 * pages to chain.
1899 */
1900 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1901 "%d bytes of headers & data in chain of large.\n",
1902 length);
1903 skb = sbq_desc->p.skb;
1904 sbq_desc->p.skb = NULL;
1905 skb_reserve(skb, NET_IP_ALIGN);
1906 }
1907 while (length > 0) {
1908 lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1909 size = (length < rx_ring->lbq_buf_size) ? length :
1910 rx_ring->lbq_buf_size;
1911
1912 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1913 "Adding page %d to skb for %d bytes.\n",
1914 i, size);
1915 skb_fill_page_desc(skb, i,
1916 lbq_desc->p.pg_chunk.page,
1917 lbq_desc->p.pg_chunk.offset,
1918 size);
1919 skb->len += size;
1920 skb->data_len += size;
1921 skb->truesize += size;
1922 length -= size;
1923 i++;
1924 }
1925 __pskb_pull_tail(skb, (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ?
1926 VLAN_ETH_HLEN : ETH_HLEN);
1927 }
1928 return skb;
1929 }
1930
1931 /* Process an inbound completion from an rx ring. */
1932 static void ql_process_mac_split_rx_intr(struct ql_adapter *qdev,
1933 struct rx_ring *rx_ring,
1934 struct ib_mac_iocb_rsp *ib_mac_rsp,
1935 u16 vlan_id)
1936 {
1937 struct net_device *ndev = qdev->ndev;
1938 struct sk_buff *skb = NULL;
1939
1940 QL_DUMP_IB_MAC_RSP(ib_mac_rsp);
1941
1942 skb = ql_build_rx_skb(qdev, rx_ring, ib_mac_rsp);
1943 if (unlikely(!skb)) {
1944 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1945 "No skb available, drop packet.\n");
1946 rx_ring->rx_dropped++;
1947 return;
1948 }
1949
1950 /* Frame error, so drop the packet. */
1951 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
1952 netif_info(qdev, drv, qdev->ndev,
1953 "Receive error, flags2 = 0x%x\n", ib_mac_rsp->flags2);
1954 dev_kfree_skb_any(skb);
1955 rx_ring->rx_errors++;
1956 return;
1957 }
1958
1959 /* The max framesize filter on this chip is set higher than
1960 * MTU since FCoE uses 2k frames.
1961 */
1962 if (skb->len > ndev->mtu + ETH_HLEN) {
1963 dev_kfree_skb_any(skb);
1964 rx_ring->rx_dropped++;
1965 return;
1966 }
1967
1968 /* loopback self test for ethtool */
1969 if (test_bit(QL_SELFTEST, &qdev->flags)) {
1970 ql_check_lb_frame(qdev, skb);
1971 dev_kfree_skb_any(skb);
1972 return;
1973 }
1974
1975 prefetch(skb->data);
1976 skb->dev = ndev;
1977 if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) {
1978 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, "%s Multicast.\n",
1979 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1980 IB_MAC_IOCB_RSP_M_HASH ? "Hash" :
1981 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1982 IB_MAC_IOCB_RSP_M_REG ? "Registered" :
1983 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1984 IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : "");
1985 rx_ring->rx_multicast++;
1986 }
1987 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P) {
1988 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1989 "Promiscuous Packet.\n");
1990 }
1991
1992 skb->protocol = eth_type_trans(skb, ndev);
1993 skb_checksum_none_assert(skb);
1994
1995 /* If rx checksum is on, and there are no
1996 * csum or frame errors.
1997 */
1998 if ((ndev->features & NETIF_F_RXCSUM) &&
1999 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
2000 /* TCP frame. */
2001 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
2002 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2003 "TCP checksum done!\n");
2004 skb->ip_summed = CHECKSUM_UNNECESSARY;
2005 } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
2006 (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
2007 /* Unfragmented ipv4 UDP frame. */
2008 struct iphdr *iph = (struct iphdr *) skb->data;
2009 if (!(iph->frag_off &
2010 ntohs(IP_MF|IP_OFFSET))) {
2011 skb->ip_summed = CHECKSUM_UNNECESSARY;
2012 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2013 "TCP checksum done!\n");
2014 }
2015 }
2016 }
2017
2018 rx_ring->rx_packets++;
2019 rx_ring->rx_bytes += skb->len;
2020 skb_record_rx_queue(skb, rx_ring->cq_id);
2021 if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) && (vlan_id != 0))
2022 __vlan_hwaccel_put_tag(skb, vlan_id);
2023 if (skb->ip_summed == CHECKSUM_UNNECESSARY)
2024 napi_gro_receive(&rx_ring->napi, skb);
2025 else
2026 netif_receive_skb(skb);
2027 }
2028
2029 /* Process an inbound completion from an rx ring. */
2030 static unsigned long ql_process_mac_rx_intr(struct ql_adapter *qdev,
2031 struct rx_ring *rx_ring,
2032 struct ib_mac_iocb_rsp *ib_mac_rsp)
2033 {
2034 u32 length = le32_to_cpu(ib_mac_rsp->data_len);
2035 u16 vlan_id = (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ?
2036 ((le16_to_cpu(ib_mac_rsp->vlan_id) &
2037 IB_MAC_IOCB_RSP_VLAN_MASK)) : 0xffff;
2038
2039 QL_DUMP_IB_MAC_RSP(ib_mac_rsp);
2040
2041 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV) {
2042 /* The data and headers are split into
2043 * separate buffers.
2044 */
2045 ql_process_mac_split_rx_intr(qdev, rx_ring, ib_mac_rsp,
2046 vlan_id);
2047 } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS) {
2048 /* The data fit in a single small buffer.
2049 * Allocate a new skb, copy the data and
2050 * return the buffer to the free pool.
2051 */
2052 ql_process_mac_rx_skb(qdev, rx_ring, ib_mac_rsp,
2053 length, vlan_id);
2054 } else if ((ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) &&
2055 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK) &&
2056 (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T)) {
2057 /* TCP packet in a page chunk that's been checksummed.
2058 * Tack it on to our GRO skb and let it go.
2059 */
2060 ql_process_mac_rx_gro_page(qdev, rx_ring, ib_mac_rsp,
2061 length, vlan_id);
2062 } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) {
2063 /* Non-TCP packet in a page chunk. Allocate an
2064 * skb, tack it on frags, and send it up.
2065 */
2066 ql_process_mac_rx_page(qdev, rx_ring, ib_mac_rsp,
2067 length, vlan_id);
2068 } else {
2069 /* Non-TCP/UDP large frames that span multiple buffers
2070 * can be processed corrrectly by the split frame logic.
2071 */
2072 ql_process_mac_split_rx_intr(qdev, rx_ring, ib_mac_rsp,
2073 vlan_id);
2074 }
2075
2076 return (unsigned long)length;
2077 }
2078
2079 /* Process an outbound completion from an rx ring. */
2080 static void ql_process_mac_tx_intr(struct ql_adapter *qdev,
2081 struct ob_mac_iocb_rsp *mac_rsp)
2082 {
2083 struct tx_ring *tx_ring;
2084 struct tx_ring_desc *tx_ring_desc;
2085
2086 QL_DUMP_OB_MAC_RSP(mac_rsp);
2087 tx_ring = &qdev->tx_ring[mac_rsp->txq_idx];
2088 tx_ring_desc = &tx_ring->q[mac_rsp->tid];
2089 ql_unmap_send(qdev, tx_ring_desc, tx_ring_desc->map_cnt);
2090 tx_ring->tx_bytes += (tx_ring_desc->skb)->len;
2091 tx_ring->tx_packets++;
2092 dev_kfree_skb(tx_ring_desc->skb);
2093 tx_ring_desc->skb = NULL;
2094
2095 if (unlikely(mac_rsp->flags1 & (OB_MAC_IOCB_RSP_E |
2096 OB_MAC_IOCB_RSP_S |
2097 OB_MAC_IOCB_RSP_L |
2098 OB_MAC_IOCB_RSP_P | OB_MAC_IOCB_RSP_B))) {
2099 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_E) {
2100 netif_warn(qdev, tx_done, qdev->ndev,
2101 "Total descriptor length did not match transfer length.\n");
2102 }
2103 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_S) {
2104 netif_warn(qdev, tx_done, qdev->ndev,
2105 "Frame too short to be valid, not sent.\n");
2106 }
2107 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_L) {
2108 netif_warn(qdev, tx_done, qdev->ndev,
2109 "Frame too long, but sent anyway.\n");
2110 }
2111 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_B) {
2112 netif_warn(qdev, tx_done, qdev->ndev,
2113 "PCI backplane error. Frame not sent.\n");
2114 }
2115 }
2116 atomic_inc(&tx_ring->tx_count);
2117 }
2118
2119 /* Fire up a handler to reset the MPI processor. */
2120 void ql_queue_fw_error(struct ql_adapter *qdev)
2121 {
2122 ql_link_off(qdev);
2123 queue_delayed_work(qdev->workqueue, &qdev->mpi_reset_work, 0);
2124 }
2125
2126 void ql_queue_asic_error(struct ql_adapter *qdev)
2127 {
2128 ql_link_off(qdev);
2129 ql_disable_interrupts(qdev);
2130 /* Clear adapter up bit to signal the recovery
2131 * process that it shouldn't kill the reset worker
2132 * thread
2133 */
2134 clear_bit(QL_ADAPTER_UP, &qdev->flags);
2135 /* Set asic recovery bit to indicate reset process that we are
2136 * in fatal error recovery process rather than normal close
2137 */
2138 set_bit(QL_ASIC_RECOVERY, &qdev->flags);
2139 queue_delayed_work(qdev->workqueue, &qdev->asic_reset_work, 0);
2140 }
2141
2142 static void ql_process_chip_ae_intr(struct ql_adapter *qdev,
2143 struct ib_ae_iocb_rsp *ib_ae_rsp)
2144 {
2145 switch (ib_ae_rsp->event) {
2146 case MGMT_ERR_EVENT:
2147 netif_err(qdev, rx_err, qdev->ndev,
2148 "Management Processor Fatal Error.\n");
2149 ql_queue_fw_error(qdev);
2150 return;
2151
2152 case CAM_LOOKUP_ERR_EVENT:
2153 netdev_err(qdev->ndev, "Multiple CAM hits lookup occurred.\n");
2154 netdev_err(qdev->ndev, "This event shouldn't occur.\n");
2155 ql_queue_asic_error(qdev);
2156 return;
2157
2158 case SOFT_ECC_ERROR_EVENT:
2159 netdev_err(qdev->ndev, "Soft ECC error detected.\n");
2160 ql_queue_asic_error(qdev);
2161 break;
2162
2163 case PCI_ERR_ANON_BUF_RD:
2164 netdev_err(qdev->ndev, "PCI error occurred when reading "
2165 "anonymous buffers from rx_ring %d.\n",
2166 ib_ae_rsp->q_id);
2167 ql_queue_asic_error(qdev);
2168 break;
2169
2170 default:
2171 netif_err(qdev, drv, qdev->ndev, "Unexpected event %d.\n",
2172 ib_ae_rsp->event);
2173 ql_queue_asic_error(qdev);
2174 break;
2175 }
2176 }
2177
2178 static int ql_clean_outbound_rx_ring(struct rx_ring *rx_ring)
2179 {
2180 struct ql_adapter *qdev = rx_ring->qdev;
2181 u32 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
2182 struct ob_mac_iocb_rsp *net_rsp = NULL;
2183 int count = 0;
2184
2185 struct tx_ring *tx_ring;
2186 /* While there are entries in the completion queue. */
2187 while (prod != rx_ring->cnsmr_idx) {
2188
2189 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2190 "cq_id = %d, prod = %d, cnsmr = %d.\n.",
2191 rx_ring->cq_id, prod, rx_ring->cnsmr_idx);
2192
2193 net_rsp = (struct ob_mac_iocb_rsp *)rx_ring->curr_entry;
2194 rmb();
2195 switch (net_rsp->opcode) {
2196
2197 case OPCODE_OB_MAC_TSO_IOCB:
2198 case OPCODE_OB_MAC_IOCB:
2199 ql_process_mac_tx_intr(qdev, net_rsp);
2200 break;
2201 default:
2202 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2203 "Hit default case, not handled! dropping the packet, opcode = %x.\n",
2204 net_rsp->opcode);
2205 }
2206 count++;
2207 ql_update_cq(rx_ring);
2208 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
2209 }
2210 if (!net_rsp)
2211 return 0;
2212 ql_write_cq_idx(rx_ring);
2213 tx_ring = &qdev->tx_ring[net_rsp->txq_idx];
2214 if (__netif_subqueue_stopped(qdev->ndev, tx_ring->wq_id)) {
2215 if (atomic_read(&tx_ring->queue_stopped) &&
2216 (atomic_read(&tx_ring->tx_count) > (tx_ring->wq_len / 4)))
2217 /*
2218 * The queue got stopped because the tx_ring was full.
2219 * Wake it up, because it's now at least 25% empty.
2220 */
2221 netif_wake_subqueue(qdev->ndev, tx_ring->wq_id);
2222 }
2223
2224 return count;
2225 }
2226
2227 static int ql_clean_inbound_rx_ring(struct rx_ring *rx_ring, int budget)
2228 {
2229 struct ql_adapter *qdev = rx_ring->qdev;
2230 u32 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
2231 struct ql_net_rsp_iocb *net_rsp;
2232 int count = 0;
2233
2234 /* While there are entries in the completion queue. */
2235 while (prod != rx_ring->cnsmr_idx) {
2236
2237 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2238 "cq_id = %d, prod = %d, cnsmr = %d.\n.",
2239 rx_ring->cq_id, prod, rx_ring->cnsmr_idx);
2240
2241 net_rsp = rx_ring->curr_entry;
2242 rmb();
2243 switch (net_rsp->opcode) {
2244 case OPCODE_IB_MAC_IOCB:
2245 ql_process_mac_rx_intr(qdev, rx_ring,
2246 (struct ib_mac_iocb_rsp *)
2247 net_rsp);
2248 break;
2249
2250 case OPCODE_IB_AE_IOCB:
2251 ql_process_chip_ae_intr(qdev, (struct ib_ae_iocb_rsp *)
2252 net_rsp);
2253 break;
2254 default:
2255 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2256 "Hit default case, not handled! dropping the packet, opcode = %x.\n",
2257 net_rsp->opcode);
2258 break;
2259 }
2260 count++;
2261 ql_update_cq(rx_ring);
2262 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
2263 if (count == budget)
2264 break;
2265 }
2266 ql_update_buffer_queues(qdev, rx_ring);
2267 ql_write_cq_idx(rx_ring);
2268 return count;
2269 }
2270
2271 static int ql_napi_poll_msix(struct napi_struct *napi, int budget)
2272 {
2273 struct rx_ring *rx_ring = container_of(napi, struct rx_ring, napi);
2274 struct ql_adapter *qdev = rx_ring->qdev;
2275 struct rx_ring *trx_ring;
2276 int i, work_done = 0;
2277 struct intr_context *ctx = &qdev->intr_context[rx_ring->cq_id];
2278
2279 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2280 "Enter, NAPI POLL cq_id = %d.\n", rx_ring->cq_id);
2281
2282 /* Service the TX rings first. They start
2283 * right after the RSS rings. */
2284 for (i = qdev->rss_ring_count; i < qdev->rx_ring_count; i++) {
2285 trx_ring = &qdev->rx_ring[i];
2286 /* If this TX completion ring belongs to this vector and
2287 * it's not empty then service it.
2288 */
2289 if ((ctx->irq_mask & (1 << trx_ring->cq_id)) &&
2290 (ql_read_sh_reg(trx_ring->prod_idx_sh_reg) !=
2291 trx_ring->cnsmr_idx)) {
2292 netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev,
2293 "%s: Servicing TX completion ring %d.\n",
2294 __func__, trx_ring->cq_id);
2295 ql_clean_outbound_rx_ring(trx_ring);
2296 }
2297 }
2298
2299 /*
2300 * Now service the RSS ring if it's active.
2301 */
2302 if (ql_read_sh_reg(rx_ring->prod_idx_sh_reg) !=
2303 rx_ring->cnsmr_idx) {
2304 netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev,
2305 "%s: Servicing RX completion ring %d.\n",
2306 __func__, rx_ring->cq_id);
2307 work_done = ql_clean_inbound_rx_ring(rx_ring, budget);
2308 }
2309
2310 if (work_done < budget) {
2311 napi_complete(napi);
2312 ql_enable_completion_interrupt(qdev, rx_ring->irq);
2313 }
2314 return work_done;
2315 }
2316
2317 static void qlge_vlan_mode(struct net_device *ndev, u32 features)
2318 {
2319 struct ql_adapter *qdev = netdev_priv(ndev);
2320
2321 if (features & NETIF_F_HW_VLAN_RX) {
2322 netif_printk(qdev, ifup, KERN_DEBUG, ndev,
2323 "Turning on VLAN in NIC_RCV_CFG.\n");
2324 ql_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK |
2325 NIC_RCV_CFG_VLAN_MATCH_AND_NON);
2326 } else {
2327 netif_printk(qdev, ifup, KERN_DEBUG, ndev,
2328 "Turning off VLAN in NIC_RCV_CFG.\n");
2329 ql_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK);
2330 }
2331 }
2332
2333 static u32 qlge_fix_features(struct net_device *ndev, u32 features)
2334 {
2335 /*
2336 * Since there is no support for separate rx/tx vlan accel
2337 * enable/disable make sure tx flag is always in same state as rx.
2338 */
2339 if (features & NETIF_F_HW_VLAN_RX)
2340 features |= NETIF_F_HW_VLAN_TX;
2341 else
2342 features &= ~NETIF_F_HW_VLAN_TX;
2343
2344 return features;
2345 }
2346
2347 static int qlge_set_features(struct net_device *ndev, u32 features)
2348 {
2349 u32 changed = ndev->features ^ features;
2350
2351 if (changed & NETIF_F_HW_VLAN_RX)
2352 qlge_vlan_mode(ndev, features);
2353
2354 return 0;
2355 }
2356
2357 static void __qlge_vlan_rx_add_vid(struct ql_adapter *qdev, u16 vid)
2358 {
2359 u32 enable_bit = MAC_ADDR_E;
2360
2361 if (ql_set_mac_addr_reg
2362 (qdev, (u8 *) &enable_bit, MAC_ADDR_TYPE_VLAN, vid)) {
2363 netif_err(qdev, ifup, qdev->ndev,
2364 "Failed to init vlan address.\n");
2365 }
2366 }
2367
2368 static void qlge_vlan_rx_add_vid(struct net_device *ndev, u16 vid)
2369 {
2370 struct ql_adapter *qdev = netdev_priv(ndev);
2371 int status;
2372
2373 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
2374 if (status)
2375 return;
2376
2377 __qlge_vlan_rx_add_vid(qdev, vid);
2378 set_bit(vid, qdev->active_vlans);
2379
2380 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
2381 }
2382
2383 static void __qlge_vlan_rx_kill_vid(struct ql_adapter *qdev, u16 vid)
2384 {
2385 u32 enable_bit = 0;
2386
2387 if (ql_set_mac_addr_reg
2388 (qdev, (u8 *) &enable_bit, MAC_ADDR_TYPE_VLAN, vid)) {
2389 netif_err(qdev, ifup, qdev->ndev,
2390 "Failed to clear vlan address.\n");
2391 }
2392 }
2393
2394 static void qlge_vlan_rx_kill_vid(struct net_device *ndev, u16 vid)
2395 {
2396 struct ql_adapter *qdev = netdev_priv(ndev);
2397 int status;
2398
2399 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
2400 if (status)
2401 return;
2402
2403 __qlge_vlan_rx_kill_vid(qdev, vid);
2404 clear_bit(vid, qdev->active_vlans);
2405
2406 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
2407 }
2408
2409 static void qlge_restore_vlan(struct ql_adapter *qdev)
2410 {
2411 int status;
2412 u16 vid;
2413
2414 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
2415 if (status)
2416 return;
2417
2418 for_each_set_bit(vid, qdev->active_vlans, VLAN_N_VID)
2419 __qlge_vlan_rx_add_vid(qdev, vid);
2420
2421 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
2422 }
2423
2424 /* MSI-X Multiple Vector Interrupt Handler for inbound completions. */
2425 static irqreturn_t qlge_msix_rx_isr(int irq, void *dev_id)
2426 {
2427 struct rx_ring *rx_ring = dev_id;
2428 napi_schedule(&rx_ring->napi);
2429 return IRQ_HANDLED;
2430 }
2431
2432 /* This handles a fatal error, MPI activity, and the default
2433 * rx_ring in an MSI-X multiple vector environment.
2434 * In MSI/Legacy environment it also process the rest of
2435 * the rx_rings.
2436 */
2437 static irqreturn_t qlge_isr(int irq, void *dev_id)
2438 {
2439 struct rx_ring *rx_ring = dev_id;
2440 struct ql_adapter *qdev = rx_ring->qdev;
2441 struct intr_context *intr_context = &qdev->intr_context[0];
2442 u32 var;
2443 int work_done = 0;
2444
2445 spin_lock(&qdev->hw_lock);
2446 if (atomic_read(&qdev->intr_context[0].irq_cnt)) {
2447 netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev,
2448 "Shared Interrupt, Not ours!\n");
2449 spin_unlock(&qdev->hw_lock);
2450 return IRQ_NONE;
2451 }
2452 spin_unlock(&qdev->hw_lock);
2453
2454 var = ql_disable_completion_interrupt(qdev, intr_context->intr);
2455
2456 /*
2457 * Check for fatal error.
2458 */
2459 if (var & STS_FE) {
2460 ql_queue_asic_error(qdev);
2461 netdev_err(qdev->ndev, "Got fatal error, STS = %x.\n", var);
2462 var = ql_read32(qdev, ERR_STS);
2463 netdev_err(qdev->ndev, "Resetting chip. "
2464 "Error Status Register = 0x%x\n", var);
2465 return IRQ_HANDLED;
2466 }
2467
2468 /*
2469 * Check MPI processor activity.
2470 */
2471 if ((var & STS_PI) &&
2472 (ql_read32(qdev, INTR_MASK) & INTR_MASK_PI)) {
2473 /*
2474 * We've got an async event or mailbox completion.
2475 * Handle it and clear the source of the interrupt.
2476 */
2477 netif_err(qdev, intr, qdev->ndev,
2478 "Got MPI processor interrupt.\n");
2479 ql_disable_completion_interrupt(qdev, intr_context->intr);
2480 ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16));
2481 queue_delayed_work_on(smp_processor_id(),
2482 qdev->workqueue, &qdev->mpi_work, 0);
2483 work_done++;
2484 }
2485
2486 /*
2487 * Get the bit-mask that shows the active queues for this
2488 * pass. Compare it to the queues that this irq services
2489 * and call napi if there's a match.
2490 */
2491 var = ql_read32(qdev, ISR1);
2492 if (var & intr_context->irq_mask) {
2493 netif_info(qdev, intr, qdev->ndev,
2494 "Waking handler for rx_ring[0].\n");
2495 ql_disable_completion_interrupt(qdev, intr_context->intr);
2496 napi_schedule(&rx_ring->napi);
2497 work_done++;
2498 }
2499 ql_enable_completion_interrupt(qdev, intr_context->intr);
2500 return work_done ? IRQ_HANDLED : IRQ_NONE;
2501 }
2502
2503 static int ql_tso(struct sk_buff *skb, struct ob_mac_tso_iocb_req *mac_iocb_ptr)
2504 {
2505
2506 if (skb_is_gso(skb)) {
2507 int err;
2508 if (skb_header_cloned(skb)) {
2509 err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
2510 if (err)
2511 return err;
2512 }
2513
2514 mac_iocb_ptr->opcode = OPCODE_OB_MAC_TSO_IOCB;
2515 mac_iocb_ptr->flags3 |= OB_MAC_TSO_IOCB_IC;
2516 mac_iocb_ptr->frame_len = cpu_to_le32((u32) skb->len);
2517 mac_iocb_ptr->total_hdrs_len =
2518 cpu_to_le16(skb_transport_offset(skb) + tcp_hdrlen(skb));
2519 mac_iocb_ptr->net_trans_offset =
2520 cpu_to_le16(skb_network_offset(skb) |
2521 skb_transport_offset(skb)
2522 << OB_MAC_TRANSPORT_HDR_SHIFT);
2523 mac_iocb_ptr->mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
2524 mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_LSO;
2525 if (likely(skb->protocol == htons(ETH_P_IP))) {
2526 struct iphdr *iph = ip_hdr(skb);
2527 iph->check = 0;
2528 mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP4;
2529 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
2530 iph->daddr, 0,
2531 IPPROTO_TCP,
2532 0);
2533 } else if (skb->protocol == htons(ETH_P_IPV6)) {
2534 mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP6;
2535 tcp_hdr(skb)->check =
2536 ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
2537 &ipv6_hdr(skb)->daddr,
2538 0, IPPROTO_TCP, 0);
2539 }
2540 return 1;
2541 }
2542 return 0;
2543 }
2544
2545 static void ql_hw_csum_setup(struct sk_buff *skb,
2546 struct ob_mac_tso_iocb_req *mac_iocb_ptr)
2547 {
2548 int len;
2549 struct iphdr *iph = ip_hdr(skb);
2550 __sum16 *check;
2551 mac_iocb_ptr->opcode = OPCODE_OB_MAC_TSO_IOCB;
2552 mac_iocb_ptr->frame_len = cpu_to_le32((u32) skb->len);
2553 mac_iocb_ptr->net_trans_offset =
2554 cpu_to_le16(skb_network_offset(skb) |
2555 skb_transport_offset(skb) << OB_MAC_TRANSPORT_HDR_SHIFT);
2556
2557 mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP4;
2558 len = (ntohs(iph->tot_len) - (iph->ihl << 2));
2559 if (likely(iph->protocol == IPPROTO_TCP)) {
2560 check = &(tcp_hdr(skb)->check);
2561 mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_TC;
2562 mac_iocb_ptr->total_hdrs_len =
2563 cpu_to_le16(skb_transport_offset(skb) +
2564 (tcp_hdr(skb)->doff << 2));
2565 } else {
2566 check = &(udp_hdr(skb)->check);
2567 mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_UC;
2568 mac_iocb_ptr->total_hdrs_len =
2569 cpu_to_le16(skb_transport_offset(skb) +
2570 sizeof(struct udphdr));
2571 }
2572 *check = ~csum_tcpudp_magic(iph->saddr,
2573 iph->daddr, len, iph->protocol, 0);
2574 }
2575
2576 static netdev_tx_t qlge_send(struct sk_buff *skb, struct net_device *ndev)
2577 {
2578 struct tx_ring_desc *tx_ring_desc;
2579 struct ob_mac_iocb_req *mac_iocb_ptr;
2580 struct ql_adapter *qdev = netdev_priv(ndev);
2581 int tso;
2582 struct tx_ring *tx_ring;
2583 u32 tx_ring_idx = (u32) skb->queue_mapping;
2584
2585 tx_ring = &qdev->tx_ring[tx_ring_idx];
2586
2587 if (skb_padto(skb, ETH_ZLEN))
2588 return NETDEV_TX_OK;
2589
2590 if (unlikely(atomic_read(&tx_ring->tx_count) < 2)) {
2591 netif_info(qdev, tx_queued, qdev->ndev,
2592 "%s: shutting down tx queue %d du to lack of resources.\n",
2593 __func__, tx_ring_idx);
2594 netif_stop_subqueue(ndev, tx_ring->wq_id);
2595 atomic_inc(&tx_ring->queue_stopped);
2596 tx_ring->tx_errors++;
2597 return NETDEV_TX_BUSY;
2598 }
2599 tx_ring_desc = &tx_ring->q[tx_ring->prod_idx];
2600 mac_iocb_ptr = tx_ring_desc->queue_entry;
2601 memset((void *)mac_iocb_ptr, 0, sizeof(*mac_iocb_ptr));
2602
2603 mac_iocb_ptr->opcode = OPCODE_OB_MAC_IOCB;
2604 mac_iocb_ptr->tid = tx_ring_desc->index;
2605 /* We use the upper 32-bits to store the tx queue for this IO.
2606 * When we get the completion we can use it to establish the context.
2607 */
2608 mac_iocb_ptr->txq_idx = tx_ring_idx;
2609 tx_ring_desc->skb = skb;
2610
2611 mac_iocb_ptr->frame_len = cpu_to_le16((u16) skb->len);
2612
2613 if (vlan_tx_tag_present(skb)) {
2614 netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
2615 "Adding a vlan tag %d.\n", vlan_tx_tag_get(skb));
2616 mac_iocb_ptr->flags3 |= OB_MAC_IOCB_V;
2617 mac_iocb_ptr->vlan_tci = cpu_to_le16(vlan_tx_tag_get(skb));
2618 }
2619 tso = ql_tso(skb, (struct ob_mac_tso_iocb_req *)mac_iocb_ptr);
2620 if (tso < 0) {
2621 dev_kfree_skb_any(skb);
2622 return NETDEV_TX_OK;
2623 } else if (unlikely(!tso) && (skb->ip_summed == CHECKSUM_PARTIAL)) {
2624 ql_hw_csum_setup(skb,
2625 (struct ob_mac_tso_iocb_req *)mac_iocb_ptr);
2626 }
2627 if (ql_map_send(qdev, mac_iocb_ptr, skb, tx_ring_desc) !=
2628 NETDEV_TX_OK) {
2629 netif_err(qdev, tx_queued, qdev->ndev,
2630 "Could not map the segments.\n");
2631 tx_ring->tx_errors++;
2632 return NETDEV_TX_BUSY;
2633 }
2634 QL_DUMP_OB_MAC_IOCB(mac_iocb_ptr);
2635 tx_ring->prod_idx++;
2636 if (tx_ring->prod_idx == tx_ring->wq_len)
2637 tx_ring->prod_idx = 0;
2638 wmb();
2639
2640 ql_write_db_reg(tx_ring->prod_idx, tx_ring->prod_idx_db_reg);
2641 netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
2642 "tx queued, slot %d, len %d\n",
2643 tx_ring->prod_idx, skb->len);
2644
2645 atomic_dec(&tx_ring->tx_count);
2646 return NETDEV_TX_OK;
2647 }
2648
2649
2650 static void ql_free_shadow_space(struct ql_adapter *qdev)
2651 {
2652 if (qdev->rx_ring_shadow_reg_area) {
2653 pci_free_consistent(qdev->pdev,
2654 PAGE_SIZE,
2655 qdev->rx_ring_shadow_reg_area,
2656 qdev->rx_ring_shadow_reg_dma);
2657 qdev->rx_ring_shadow_reg_area = NULL;
2658 }
2659 if (qdev->tx_ring_shadow_reg_area) {
2660 pci_free_consistent(qdev->pdev,
2661 PAGE_SIZE,
2662 qdev->tx_ring_shadow_reg_area,
2663 qdev->tx_ring_shadow_reg_dma);
2664 qdev->tx_ring_shadow_reg_area = NULL;
2665 }
2666 }
2667
2668 static int ql_alloc_shadow_space(struct ql_adapter *qdev)
2669 {
2670 qdev->rx_ring_shadow_reg_area =
2671 pci_alloc_consistent(qdev->pdev,
2672 PAGE_SIZE, &qdev->rx_ring_shadow_reg_dma);
2673 if (qdev->rx_ring_shadow_reg_area == NULL) {
2674 netif_err(qdev, ifup, qdev->ndev,
2675 "Allocation of RX shadow space failed.\n");
2676 return -ENOMEM;
2677 }
2678 memset(qdev->rx_ring_shadow_reg_area, 0, PAGE_SIZE);
2679 qdev->tx_ring_shadow_reg_area =
2680 pci_alloc_consistent(qdev->pdev, PAGE_SIZE,
2681 &qdev->tx_ring_shadow_reg_dma);
2682 if (qdev->tx_ring_shadow_reg_area == NULL) {
2683 netif_err(qdev, ifup, qdev->ndev,
2684 "Allocation of TX shadow space failed.\n");
2685 goto err_wqp_sh_area;
2686 }
2687 memset(qdev->tx_ring_shadow_reg_area, 0, PAGE_SIZE);
2688 return 0;
2689
2690 err_wqp_sh_area:
2691 pci_free_consistent(qdev->pdev,
2692 PAGE_SIZE,
2693 qdev->rx_ring_shadow_reg_area,
2694 qdev->rx_ring_shadow_reg_dma);
2695 return -ENOMEM;
2696 }
2697
2698 static void ql_init_tx_ring(struct ql_adapter *qdev, struct tx_ring *tx_ring)
2699 {
2700 struct tx_ring_desc *tx_ring_desc;
2701 int i;
2702 struct ob_mac_iocb_req *mac_iocb_ptr;
2703
2704 mac_iocb_ptr = tx_ring->wq_base;
2705 tx_ring_desc = tx_ring->q;
2706 for (i = 0; i < tx_ring->wq_len; i++) {
2707 tx_ring_desc->index = i;
2708 tx_ring_desc->skb = NULL;
2709 tx_ring_desc->queue_entry = mac_iocb_ptr;
2710 mac_iocb_ptr++;
2711 tx_ring_desc++;
2712 }
2713 atomic_set(&tx_ring->tx_count, tx_ring->wq_len);
2714 atomic_set(&tx_ring->queue_stopped, 0);
2715 }
2716
2717 static void ql_free_tx_resources(struct ql_adapter *qdev,
2718 struct tx_ring *tx_ring)
2719 {
2720 if (tx_ring->wq_base) {
2721 pci_free_consistent(qdev->pdev, tx_ring->wq_size,
2722 tx_ring->wq_base, tx_ring->wq_base_dma);
2723 tx_ring->wq_base = NULL;
2724 }
2725 kfree(tx_ring->q);
2726 tx_ring->q = NULL;
2727 }
2728
2729 static int ql_alloc_tx_resources(struct ql_adapter *qdev,
2730 struct tx_ring *tx_ring)
2731 {
2732 tx_ring->wq_base =
2733 pci_alloc_consistent(qdev->pdev, tx_ring->wq_size,
2734 &tx_ring->wq_base_dma);
2735
2736 if ((tx_ring->wq_base == NULL) ||
2737 tx_ring->wq_base_dma & WQ_ADDR_ALIGN) {
2738 netif_err(qdev, ifup, qdev->ndev, "tx_ring alloc failed.\n");
2739 return -ENOMEM;
2740 }
2741 tx_ring->q =
2742 kmalloc(tx_ring->wq_len * sizeof(struct tx_ring_desc), GFP_KERNEL);
2743 if (tx_ring->q == NULL)
2744 goto err;
2745
2746 return 0;
2747 err:
2748 pci_free_consistent(qdev->pdev, tx_ring->wq_size,
2749 tx_ring->wq_base, tx_ring->wq_base_dma);
2750 return -ENOMEM;
2751 }
2752
2753 static void ql_free_lbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring)
2754 {
2755 struct bq_desc *lbq_desc;
2756
2757 uint32_t curr_idx, clean_idx;
2758
2759 curr_idx = rx_ring->lbq_curr_idx;
2760 clean_idx = rx_ring->lbq_clean_idx;
2761 while (curr_idx != clean_idx) {
2762 lbq_desc = &rx_ring->lbq[curr_idx];
2763
2764 if (lbq_desc->p.pg_chunk.last_flag) {
2765 pci_unmap_page(qdev->pdev,
2766 lbq_desc->p.pg_chunk.map,
2767 ql_lbq_block_size(qdev),
2768 PCI_DMA_FROMDEVICE);
2769 lbq_desc->p.pg_chunk.last_flag = 0;
2770 }
2771
2772 put_page(lbq_desc->p.pg_chunk.page);
2773 lbq_desc->p.pg_chunk.page = NULL;
2774
2775 if (++curr_idx == rx_ring->lbq_len)
2776 curr_idx = 0;
2777
2778 }
2779 }
2780
2781 static void ql_free_sbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring)
2782 {
2783 int i;
2784 struct bq_desc *sbq_desc;
2785
2786 for (i = 0; i < rx_ring->sbq_len; i++) {
2787 sbq_desc = &rx_ring->sbq[i];
2788 if (sbq_desc == NULL) {
2789 netif_err(qdev, ifup, qdev->ndev,
2790 "sbq_desc %d is NULL.\n", i);
2791 return;
2792 }
2793 if (sbq_desc->p.skb) {
2794 pci_unmap_single(qdev->pdev,
2795 dma_unmap_addr(sbq_desc, mapaddr),
2796 dma_unmap_len(sbq_desc, maplen),
2797 PCI_DMA_FROMDEVICE);
2798 dev_kfree_skb(sbq_desc->p.skb);
2799 sbq_desc->p.skb = NULL;
2800 }
2801 }
2802 }
2803
2804 /* Free all large and small rx buffers associated
2805 * with the completion queues for this device.
2806 */
2807 static void ql_free_rx_buffers(struct ql_adapter *qdev)
2808 {
2809 int i;
2810 struct rx_ring *rx_ring;
2811
2812 for (i = 0; i < qdev->rx_ring_count; i++) {
2813 rx_ring = &qdev->rx_ring[i];
2814 if (rx_ring->lbq)
2815 ql_free_lbq_buffers(qdev, rx_ring);
2816 if (rx_ring->sbq)
2817 ql_free_sbq_buffers(qdev, rx_ring);
2818 }
2819 }
2820
2821 static void ql_alloc_rx_buffers(struct ql_adapter *qdev)
2822 {
2823 struct rx_ring *rx_ring;
2824 int i;
2825
2826 for (i = 0; i < qdev->rx_ring_count; i++) {
2827 rx_ring = &qdev->rx_ring[i];
2828 if (rx_ring->type != TX_Q)
2829 ql_update_buffer_queues(qdev, rx_ring);
2830 }
2831 }
2832
2833 static void ql_init_lbq_ring(struct ql_adapter *qdev,
2834 struct rx_ring *rx_ring)
2835 {
2836 int i;
2837 struct bq_desc *lbq_desc;
2838 __le64 *bq = rx_ring->lbq_base;
2839
2840 memset(rx_ring->lbq, 0, rx_ring->lbq_len * sizeof(struct bq_desc));
2841 for (i = 0; i < rx_ring->lbq_len; i++) {
2842 lbq_desc = &rx_ring->lbq[i];
2843 memset(lbq_desc, 0, sizeof(*lbq_desc));
2844 lbq_desc->index = i;
2845 lbq_desc->addr = bq;
2846 bq++;
2847 }
2848 }
2849
2850 static void ql_init_sbq_ring(struct ql_adapter *qdev,
2851 struct rx_ring *rx_ring)
2852 {
2853 int i;
2854 struct bq_desc *sbq_desc;
2855 __le64 *bq = rx_ring->sbq_base;
2856
2857 memset(rx_ring->sbq, 0, rx_ring->sbq_len * sizeof(struct bq_desc));
2858 for (i = 0; i < rx_ring->sbq_len; i++) {
2859 sbq_desc = &rx_ring->sbq[i];
2860 memset(sbq_desc, 0, sizeof(*sbq_desc));
2861 sbq_desc->index = i;
2862 sbq_desc->addr = bq;
2863 bq++;
2864 }
2865 }
2866
2867 static void ql_free_rx_resources(struct ql_adapter *qdev,
2868 struct rx_ring *rx_ring)
2869 {
2870 /* Free the small buffer queue. */
2871 if (rx_ring->sbq_base) {
2872 pci_free_consistent(qdev->pdev,
2873 rx_ring->sbq_size,
2874 rx_ring->sbq_base, rx_ring->sbq_base_dma);
2875 rx_ring->sbq_base = NULL;
2876 }
2877
2878 /* Free the small buffer queue control blocks. */
2879 kfree(rx_ring->sbq);
2880 rx_ring->sbq = NULL;
2881
2882 /* Free the large buffer queue. */
2883 if (rx_ring->lbq_base) {
2884 pci_free_consistent(qdev->pdev,
2885 rx_ring->lbq_size,
2886 rx_ring->lbq_base, rx_ring->lbq_base_dma);
2887 rx_ring->lbq_base = NULL;
2888 }
2889
2890 /* Free the large buffer queue control blocks. */
2891 kfree(rx_ring->lbq);
2892 rx_ring->lbq = NULL;
2893
2894 /* Free the rx queue. */
2895 if (rx_ring->cq_base) {
2896 pci_free_consistent(qdev->pdev,
2897 rx_ring->cq_size,
2898 rx_ring->cq_base, rx_ring->cq_base_dma);
2899 rx_ring->cq_base = NULL;
2900 }
2901 }
2902
2903 /* Allocate queues and buffers for this completions queue based
2904 * on the values in the parameter structure. */
2905 static int ql_alloc_rx_resources(struct ql_adapter *qdev,
2906 struct rx_ring *rx_ring)
2907 {
2908
2909 /*
2910 * Allocate the completion queue for this rx_ring.
2911 */
2912 rx_ring->cq_base =
2913 pci_alloc_consistent(qdev->pdev, rx_ring->cq_size,
2914 &rx_ring->cq_base_dma);
2915
2916 if (rx_ring->cq_base == NULL) {
2917 netif_err(qdev, ifup, qdev->ndev, "rx_ring alloc failed.\n");
2918 return -ENOMEM;
2919 }
2920
2921 if (rx_ring->sbq_len) {
2922 /*
2923 * Allocate small buffer queue.
2924 */
2925 rx_ring->sbq_base =
2926 pci_alloc_consistent(qdev->pdev, rx_ring->sbq_size,
2927 &rx_ring->sbq_base_dma);
2928
2929 if (rx_ring->sbq_base == NULL) {
2930 netif_err(qdev, ifup, qdev->ndev,
2931 "Small buffer queue allocation failed.\n");
2932 goto err_mem;
2933 }
2934
2935 /*
2936 * Allocate small buffer queue control blocks.
2937 */
2938 rx_ring->sbq =
2939 kmalloc(rx_ring->sbq_len * sizeof(struct bq_desc),
2940 GFP_KERNEL);
2941 if (rx_ring->sbq == NULL) {
2942 netif_err(qdev, ifup, qdev->ndev,
2943 "Small buffer queue control block allocation failed.\n");
2944 goto err_mem;
2945 }
2946
2947 ql_init_sbq_ring(qdev, rx_ring);
2948 }
2949
2950 if (rx_ring->lbq_len) {
2951 /*
2952 * Allocate large buffer queue.
2953 */
2954 rx_ring->lbq_base =
2955 pci_alloc_consistent(qdev->pdev, rx_ring->lbq_size,
2956 &rx_ring->lbq_base_dma);
2957
2958 if (rx_ring->lbq_base == NULL) {
2959 netif_err(qdev, ifup, qdev->ndev,
2960 "Large buffer queue allocation failed.\n");
2961 goto err_mem;
2962 }
2963 /*
2964 * Allocate large buffer queue control blocks.
2965 */
2966 rx_ring->lbq =
2967 kmalloc(rx_ring->lbq_len * sizeof(struct bq_desc),
2968 GFP_KERNEL);
2969 if (rx_ring->lbq == NULL) {
2970 netif_err(qdev, ifup, qdev->ndev,
2971 "Large buffer queue control block allocation failed.\n");
2972 goto err_mem;
2973 }
2974
2975 ql_init_lbq_ring(qdev, rx_ring);
2976 }
2977
2978 return 0;
2979
2980 err_mem:
2981 ql_free_rx_resources(qdev, rx_ring);
2982 return -ENOMEM;
2983 }
2984
2985 static void ql_tx_ring_clean(struct ql_adapter *qdev)
2986 {
2987 struct tx_ring *tx_ring;
2988 struct tx_ring_desc *tx_ring_desc;
2989 int i, j;
2990
2991 /*
2992 * Loop through all queues and free
2993 * any resources.
2994 */
2995 for (j = 0; j < qdev->tx_ring_count; j++) {
2996 tx_ring = &qdev->tx_ring[j];
2997 for (i = 0; i < tx_ring->wq_len; i++) {
2998 tx_ring_desc = &tx_ring->q[i];
2999 if (tx_ring_desc && tx_ring_desc->skb) {
3000 netif_err(qdev, ifdown, qdev->ndev,
3001 "Freeing lost SKB %p, from queue %d, index %d.\n",
3002 tx_ring_desc->skb, j,
3003 tx_ring_desc->index);
3004 ql_unmap_send(qdev, tx_ring_desc,
3005 tx_ring_desc->map_cnt);
3006 dev_kfree_skb(tx_ring_desc->skb);
3007 tx_ring_desc->skb = NULL;
3008 }
3009 }
3010 }
3011 }
3012
3013 static void ql_free_mem_resources(struct ql_adapter *qdev)
3014 {
3015 int i;
3016
3017 for (i = 0; i < qdev->tx_ring_count; i++)
3018 ql_free_tx_resources(qdev, &qdev->tx_ring[i]);
3019 for (i = 0; i < qdev->rx_ring_count; i++)
3020 ql_free_rx_resources(qdev, &qdev->rx_ring[i]);
3021 ql_free_shadow_space(qdev);
3022 }
3023
3024 static int ql_alloc_mem_resources(struct ql_adapter *qdev)
3025 {
3026 int i;
3027
3028 /* Allocate space for our shadow registers and such. */
3029 if (ql_alloc_shadow_space(qdev))
3030 return -ENOMEM;
3031
3032 for (i = 0; i < qdev->rx_ring_count; i++) {
3033 if (ql_alloc_rx_resources(qdev, &qdev->rx_ring[i]) != 0) {
3034 netif_err(qdev, ifup, qdev->ndev,
3035 "RX resource allocation failed.\n");
3036 goto err_mem;
3037 }
3038 }
3039 /* Allocate tx queue resources */
3040 for (i = 0; i < qdev->tx_ring_count; i++) {
3041 if (ql_alloc_tx_resources(qdev, &qdev->tx_ring[i]) != 0) {
3042 netif_err(qdev, ifup, qdev->ndev,
3043 "TX resource allocation failed.\n");
3044 goto err_mem;
3045 }
3046 }
3047 return 0;
3048
3049 err_mem:
3050 ql_free_mem_resources(qdev);
3051 return -ENOMEM;
3052 }
3053
3054 /* Set up the rx ring control block and pass it to the chip.
3055 * The control block is defined as
3056 * "Completion Queue Initialization Control Block", or cqicb.
3057 */
3058 static int ql_start_rx_ring(struct ql_adapter *qdev, struct rx_ring *rx_ring)
3059 {
3060 struct cqicb *cqicb = &rx_ring->cqicb;
3061 void *shadow_reg = qdev->rx_ring_shadow_reg_area +
3062 (rx_ring->cq_id * RX_RING_SHADOW_SPACE);
3063 u64 shadow_reg_dma = qdev->rx_ring_shadow_reg_dma +
3064 (rx_ring->cq_id * RX_RING_SHADOW_SPACE);
3065 void __iomem *doorbell_area =
3066 qdev->doorbell_area + (DB_PAGE_SIZE * (128 + rx_ring->cq_id));
3067 int err = 0;
3068 u16 bq_len;
3069 u64 tmp;
3070 __le64 *base_indirect_ptr;
3071 int page_entries;
3072
3073 /* Set up the shadow registers for this ring. */
3074 rx_ring->prod_idx_sh_reg = shadow_reg;
3075 rx_ring->prod_idx_sh_reg_dma = shadow_reg_dma;
3076 *rx_ring->prod_idx_sh_reg = 0;
3077 shadow_reg += sizeof(u64);
3078 shadow_reg_dma += sizeof(u64);
3079 rx_ring->lbq_base_indirect = shadow_reg;
3080 rx_ring->lbq_base_indirect_dma = shadow_reg_dma;
3081 shadow_reg += (sizeof(u64) * MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len));
3082 shadow_reg_dma += (sizeof(u64) * MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len));
3083 rx_ring->sbq_base_indirect = shadow_reg;
3084 rx_ring->sbq_base_indirect_dma = shadow_reg_dma;
3085
3086 /* PCI doorbell mem area + 0x00 for consumer index register */
3087 rx_ring->cnsmr_idx_db_reg = (u32 __iomem *) doorbell_area;
3088 rx_ring->cnsmr_idx = 0;
3089 rx_ring->curr_entry = rx_ring->cq_base;
3090
3091 /* PCI doorbell mem area + 0x04 for valid register */
3092 rx_ring->valid_db_reg = doorbell_area + 0x04;
3093
3094 /* PCI doorbell mem area + 0x18 for large buffer consumer */
3095 rx_ring->lbq_prod_idx_db_reg = (u32 __iomem *) (doorbell_area + 0x18);
3096
3097 /* PCI doorbell mem area + 0x1c */
3098 rx_ring->sbq_prod_idx_db_reg = (u32 __iomem *) (doorbell_area + 0x1c);
3099
3100 memset((void *)cqicb, 0, sizeof(struct cqicb));
3101 cqicb->msix_vect = rx_ring->irq;
3102
3103 bq_len = (rx_ring->cq_len == 65536) ? 0 : (u16) rx_ring->cq_len;
3104 cqicb->len = cpu_to_le16(bq_len | LEN_V | LEN_CPP_CONT);
3105
3106 cqicb->addr = cpu_to_le64(rx_ring->cq_base_dma);
3107
3108 cqicb->prod_idx_addr = cpu_to_le64(rx_ring->prod_idx_sh_reg_dma);
3109
3110 /*
3111 * Set up the control block load flags.
3112 */
3113 cqicb->flags = FLAGS_LC | /* Load queue base address */
3114 FLAGS_LV | /* Load MSI-X vector */
3115 FLAGS_LI; /* Load irq delay values */
3116 if (rx_ring->lbq_len) {
3117 cqicb->flags |= FLAGS_LL; /* Load lbq values */
3118 tmp = (u64)rx_ring->lbq_base_dma;
3119 base_indirect_ptr = rx_ring->lbq_base_indirect;
3120 page_entries = 0;
3121 do {
3122 *base_indirect_ptr = cpu_to_le64(tmp);
3123 tmp += DB_PAGE_SIZE;
3124 base_indirect_ptr++;
3125 page_entries++;
3126 } while (page_entries < MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len));
3127 cqicb->lbq_addr =
3128 cpu_to_le64(rx_ring->lbq_base_indirect_dma);
3129 bq_len = (rx_ring->lbq_buf_size == 65536) ? 0 :
3130 (u16) rx_ring->lbq_buf_size;
3131 cqicb->lbq_buf_size = cpu_to_le16(bq_len);
3132 bq_len = (rx_ring->lbq_len == 65536) ? 0 :
3133 (u16) rx_ring->lbq_len;
3134 cqicb->lbq_len = cpu_to_le16(bq_len);
3135 rx_ring->lbq_prod_idx = 0;
3136 rx_ring->lbq_curr_idx = 0;
3137 rx_ring->lbq_clean_idx = 0;
3138 rx_ring->lbq_free_cnt = rx_ring->lbq_len;
3139 }
3140 if (rx_ring->sbq_len) {
3141 cqicb->flags |= FLAGS_LS; /* Load sbq values */
3142 tmp = (u64)rx_ring->sbq_base_dma;
3143 base_indirect_ptr = rx_ring->sbq_base_indirect;
3144 page_entries = 0;
3145 do {
3146 *base_indirect_ptr = cpu_to_le64(tmp);
3147 tmp += DB_PAGE_SIZE;
3148 base_indirect_ptr++;
3149 page_entries++;
3150 } while (page_entries < MAX_DB_PAGES_PER_BQ(rx_ring->sbq_len));
3151 cqicb->sbq_addr =
3152 cpu_to_le64(rx_ring->sbq_base_indirect_dma);
3153 cqicb->sbq_buf_size =
3154 cpu_to_le16((u16)(rx_ring->sbq_buf_size));
3155 bq_len = (rx_ring->sbq_len == 65536) ? 0 :
3156 (u16) rx_ring->sbq_len;
3157 cqicb->sbq_len = cpu_to_le16(bq_len);
3158 rx_ring->sbq_prod_idx = 0;
3159 rx_ring->sbq_curr_idx = 0;
3160 rx_ring->sbq_clean_idx = 0;
3161 rx_ring->sbq_free_cnt = rx_ring->sbq_len;
3162 }
3163 switch (rx_ring->type) {
3164 case TX_Q:
3165 cqicb->irq_delay = cpu_to_le16(qdev->tx_coalesce_usecs);
3166 cqicb->pkt_delay = cpu_to_le16(qdev->tx_max_coalesced_frames);
3167 break;
3168 case RX_Q:
3169 /* Inbound completion handling rx_rings run in
3170 * separate NAPI contexts.
3171 */
3172 netif_napi_add(qdev->ndev, &rx_ring->napi, ql_napi_poll_msix,
3173 64);
3174 cqicb->irq_delay = cpu_to_le16(qdev->rx_coalesce_usecs);
3175 cqicb->pkt_delay = cpu_to_le16(qdev->rx_max_coalesced_frames);
3176 break;
3177 default:
3178 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3179 "Invalid rx_ring->type = %d.\n", rx_ring->type);
3180 }
3181 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3182 "Initializing rx work queue.\n");
3183 err = ql_write_cfg(qdev, cqicb, sizeof(struct cqicb),
3184 CFG_LCQ, rx_ring->cq_id);
3185 if (err) {
3186 netif_err(qdev, ifup, qdev->ndev, "Failed to load CQICB.\n");
3187 return err;
3188 }
3189 return err;
3190 }
3191
3192 static int ql_start_tx_ring(struct ql_adapter *qdev, struct tx_ring *tx_ring)
3193 {
3194 struct wqicb *wqicb = (struct wqicb *)tx_ring;
3195 void __iomem *doorbell_area =
3196 qdev->doorbell_area + (DB_PAGE_SIZE * tx_ring->wq_id);
3197 void *shadow_reg = qdev->tx_ring_shadow_reg_area +
3198 (tx_ring->wq_id * sizeof(u64));
3199 u64 shadow_reg_dma = qdev->tx_ring_shadow_reg_dma +
3200 (tx_ring->wq_id * sizeof(u64));
3201 int err = 0;
3202
3203 /*
3204 * Assign doorbell registers for this tx_ring.
3205 */
3206 /* TX PCI doorbell mem area for tx producer index */
3207 tx_ring->prod_idx_db_reg = (u32 __iomem *) doorbell_area;
3208 tx_ring->prod_idx = 0;
3209 /* TX PCI doorbell mem area + 0x04 */
3210 tx_ring->valid_db_reg = doorbell_area + 0x04;
3211
3212 /*
3213 * Assign shadow registers for this tx_ring.
3214 */
3215 tx_ring->cnsmr_idx_sh_reg = shadow_reg;
3216 tx_ring->cnsmr_idx_sh_reg_dma = shadow_reg_dma;
3217
3218 wqicb->len = cpu_to_le16(tx_ring->wq_len | Q_LEN_V | Q_LEN_CPP_CONT);
3219 wqicb->flags = cpu_to_le16(Q_FLAGS_LC |
3220 Q_FLAGS_LB | Q_FLAGS_LI | Q_FLAGS_LO);
3221 wqicb->cq_id_rss = cpu_to_le16(tx_ring->cq_id);
3222 wqicb->rid = 0;
3223 wqicb->addr = cpu_to_le64(tx_ring->wq_base_dma);
3224
3225 wqicb->cnsmr_idx_addr = cpu_to_le64(tx_ring->cnsmr_idx_sh_reg_dma);
3226
3227 ql_init_tx_ring(qdev, tx_ring);
3228
3229 err = ql_write_cfg(qdev, wqicb, sizeof(*wqicb), CFG_LRQ,
3230 (u16) tx_ring->wq_id);
3231 if (err) {
3232 netif_err(qdev, ifup, qdev->ndev, "Failed to load tx_ring.\n");
3233 return err;
3234 }
3235 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3236 "Successfully loaded WQICB.\n");
3237 return err;
3238 }
3239
3240 static void ql_disable_msix(struct ql_adapter *qdev)
3241 {
3242 if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
3243 pci_disable_msix(qdev->pdev);
3244 clear_bit(QL_MSIX_ENABLED, &qdev->flags);
3245 kfree(qdev->msi_x_entry);
3246 qdev->msi_x_entry = NULL;
3247 } else if (test_bit(QL_MSI_ENABLED, &qdev->flags)) {
3248 pci_disable_msi(qdev->pdev);
3249 clear_bit(QL_MSI_ENABLED, &qdev->flags);
3250 }
3251 }
3252
3253 /* We start by trying to get the number of vectors
3254 * stored in qdev->intr_count. If we don't get that
3255 * many then we reduce the count and try again.
3256 */
3257 static void ql_enable_msix(struct ql_adapter *qdev)
3258 {
3259 int i, err;
3260
3261 /* Get the MSIX vectors. */
3262 if (qlge_irq_type == MSIX_IRQ) {
3263 /* Try to alloc space for the msix struct,
3264 * if it fails then go to MSI/legacy.
3265 */
3266 qdev->msi_x_entry = kcalloc(qdev->intr_count,
3267 sizeof(struct msix_entry),
3268 GFP_KERNEL);
3269 if (!qdev->msi_x_entry) {
3270 qlge_irq_type = MSI_IRQ;
3271 goto msi;
3272 }
3273
3274 for (i = 0; i < qdev->intr_count; i++)
3275 qdev->msi_x_entry[i].entry = i;
3276
3277 /* Loop to get our vectors. We start with
3278 * what we want and settle for what we get.
3279 */
3280 do {
3281 err = pci_enable_msix(qdev->pdev,
3282 qdev->msi_x_entry, qdev->intr_count);
3283 if (err > 0)
3284 qdev->intr_count = err;
3285 } while (err > 0);
3286
3287 if (err < 0) {
3288 kfree(qdev->msi_x_entry);
3289 qdev->msi_x_entry = NULL;
3290 netif_warn(qdev, ifup, qdev->ndev,
3291 "MSI-X Enable failed, trying MSI.\n");
3292 qdev->intr_count = 1;
3293 qlge_irq_type = MSI_IRQ;
3294 } else if (err == 0) {
3295 set_bit(QL_MSIX_ENABLED, &qdev->flags);
3296 netif_info(qdev, ifup, qdev->ndev,
3297 "MSI-X Enabled, got %d vectors.\n",
3298 qdev->intr_count);
3299 return;
3300 }
3301 }
3302 msi:
3303 qdev->intr_count = 1;
3304 if (qlge_irq_type == MSI_IRQ) {
3305 if (!pci_enable_msi(qdev->pdev)) {
3306 set_bit(QL_MSI_ENABLED, &qdev->flags);
3307 netif_info(qdev, ifup, qdev->ndev,
3308 "Running with MSI interrupts.\n");
3309 return;
3310 }
3311 }
3312 qlge_irq_type = LEG_IRQ;
3313 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3314 "Running with legacy interrupts.\n");
3315 }
3316
3317 /* Each vector services 1 RSS ring and and 1 or more
3318 * TX completion rings. This function loops through
3319 * the TX completion rings and assigns the vector that
3320 * will service it. An example would be if there are
3321 * 2 vectors (so 2 RSS rings) and 8 TX completion rings.
3322 * This would mean that vector 0 would service RSS ring 0
3323 * and TX completion rings 0,1,2 and 3. Vector 1 would
3324 * service RSS ring 1 and TX completion rings 4,5,6 and 7.
3325 */
3326 static void ql_set_tx_vect(struct ql_adapter *qdev)
3327 {
3328 int i, j, vect;
3329 u32 tx_rings_per_vector = qdev->tx_ring_count / qdev->intr_count;
3330
3331 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
3332 /* Assign irq vectors to TX rx_rings.*/
3333 for (vect = 0, j = 0, i = qdev->rss_ring_count;
3334 i < qdev->rx_ring_count; i++) {
3335 if (j == tx_rings_per_vector) {
3336 vect++;
3337 j = 0;
3338 }
3339 qdev->rx_ring[i].irq = vect;
3340 j++;
3341 }
3342 } else {
3343 /* For single vector all rings have an irq
3344 * of zero.
3345 */
3346 for (i = 0; i < qdev->rx_ring_count; i++)
3347 qdev->rx_ring[i].irq = 0;
3348 }
3349 }
3350
3351 /* Set the interrupt mask for this vector. Each vector
3352 * will service 1 RSS ring and 1 or more TX completion
3353 * rings. This function sets up a bit mask per vector
3354 * that indicates which rings it services.
3355 */
3356 static void ql_set_irq_mask(struct ql_adapter *qdev, struct intr_context *ctx)
3357 {
3358 int j, vect = ctx->intr;
3359 u32 tx_rings_per_vector = qdev->tx_ring_count / qdev->intr_count;
3360
3361 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
3362 /* Add the RSS ring serviced by this vector
3363 * to the mask.
3364 */
3365 ctx->irq_mask = (1 << qdev->rx_ring[vect].cq_id);
3366 /* Add the TX ring(s) serviced by this vector
3367 * to the mask. */
3368 for (j = 0; j < tx_rings_per_vector; j++) {
3369 ctx->irq_mask |=
3370 (1 << qdev->rx_ring[qdev->rss_ring_count +
3371 (vect * tx_rings_per_vector) + j].cq_id);
3372 }
3373 } else {
3374 /* For single vector we just shift each queue's
3375 * ID into the mask.
3376 */
3377 for (j = 0; j < qdev->rx_ring_count; j++)
3378 ctx->irq_mask |= (1 << qdev->rx_ring[j].cq_id);
3379 }
3380 }
3381
3382 /*
3383 * Here we build the intr_context structures based on
3384 * our rx_ring count and intr vector count.
3385 * The intr_context structure is used to hook each vector
3386 * to possibly different handlers.
3387 */
3388 static void ql_resolve_queues_to_irqs(struct ql_adapter *qdev)
3389 {
3390 int i = 0;
3391 struct intr_context *intr_context = &qdev->intr_context[0];
3392
3393 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
3394 /* Each rx_ring has it's
3395 * own intr_context since we have separate
3396 * vectors for each queue.
3397 */
3398 for (i = 0; i < qdev->intr_count; i++, intr_context++) {
3399 qdev->rx_ring[i].irq = i;
3400 intr_context->intr = i;
3401 intr_context->qdev = qdev;
3402 /* Set up this vector's bit-mask that indicates
3403 * which queues it services.
3404 */
3405 ql_set_irq_mask(qdev, intr_context);
3406 /*
3407 * We set up each vectors enable/disable/read bits so
3408 * there's no bit/mask calculations in the critical path.
3409 */
3410 intr_context->intr_en_mask =
3411 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3412 INTR_EN_TYPE_ENABLE | INTR_EN_IHD_MASK | INTR_EN_IHD
3413 | i;
3414 intr_context->intr_dis_mask =
3415 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3416 INTR_EN_TYPE_DISABLE | INTR_EN_IHD_MASK |
3417 INTR_EN_IHD | i;
3418 intr_context->intr_read_mask =
3419 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3420 INTR_EN_TYPE_READ | INTR_EN_IHD_MASK | INTR_EN_IHD |
3421 i;
3422 if (i == 0) {
3423 /* The first vector/queue handles
3424 * broadcast/multicast, fatal errors,
3425 * and firmware events. This in addition
3426 * to normal inbound NAPI processing.
3427 */
3428 intr_context->handler = qlge_isr;
3429 sprintf(intr_context->name, "%s-rx-%d",
3430 qdev->ndev->name, i);
3431 } else {
3432 /*
3433 * Inbound queues handle unicast frames only.
3434 */
3435 intr_context->handler = qlge_msix_rx_isr;
3436 sprintf(intr_context->name, "%s-rx-%d",
3437 qdev->ndev->name, i);
3438 }
3439 }
3440 } else {
3441 /*
3442 * All rx_rings use the same intr_context since
3443 * there is only one vector.
3444 */
3445 intr_context->intr = 0;
3446 intr_context->qdev = qdev;
3447 /*
3448 * We set up each vectors enable/disable/read bits so
3449 * there's no bit/mask calculations in the critical path.
3450 */
3451 intr_context->intr_en_mask =
3452 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | INTR_EN_TYPE_ENABLE;
3453 intr_context->intr_dis_mask =
3454 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3455 INTR_EN_TYPE_DISABLE;
3456 intr_context->intr_read_mask =
3457 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | INTR_EN_TYPE_READ;
3458 /*
3459 * Single interrupt means one handler for all rings.
3460 */
3461 intr_context->handler = qlge_isr;
3462 sprintf(intr_context->name, "%s-single_irq", qdev->ndev->name);
3463 /* Set up this vector's bit-mask that indicates
3464 * which queues it services. In this case there is
3465 * a single vector so it will service all RSS and
3466 * TX completion rings.
3467 */
3468 ql_set_irq_mask(qdev, intr_context);
3469 }
3470 /* Tell the TX completion rings which MSIx vector
3471 * they will be using.
3472 */
3473 ql_set_tx_vect(qdev);
3474 }
3475
3476 static void ql_free_irq(struct ql_adapter *qdev)
3477 {
3478 int i;
3479 struct intr_context *intr_context = &qdev->intr_context[0];
3480
3481 for (i = 0; i < qdev->intr_count; i++, intr_context++) {
3482 if (intr_context->hooked) {
3483 if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
3484 free_irq(qdev->msi_x_entry[i].vector,
3485 &qdev->rx_ring[i]);
3486 netif_printk(qdev, ifdown, KERN_DEBUG, qdev->ndev,
3487 "freeing msix interrupt %d.\n", i);
3488 } else {
3489 free_irq(qdev->pdev->irq, &qdev->rx_ring[0]);
3490 netif_printk(qdev, ifdown, KERN_DEBUG, qdev->ndev,
3491 "freeing msi interrupt %d.\n", i);
3492 }
3493 }
3494 }
3495 ql_disable_msix(qdev);
3496 }
3497
3498 static int ql_request_irq(struct ql_adapter *qdev)
3499 {
3500 int i;
3501 int status = 0;
3502 struct pci_dev *pdev = qdev->pdev;
3503 struct intr_context *intr_context = &qdev->intr_context[0];
3504
3505 ql_resolve_queues_to_irqs(qdev);
3506
3507 for (i = 0; i < qdev->intr_count; i++, intr_context++) {
3508 atomic_set(&intr_context->irq_cnt, 0);
3509 if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
3510 status = request_irq(qdev->msi_x_entry[i].vector,
3511 intr_context->handler,
3512 0,
3513 intr_context->name,
3514 &qdev->rx_ring[i]);
3515 if (status) {
3516 netif_err(qdev, ifup, qdev->ndev,
3517 "Failed request for MSIX interrupt %d.\n",
3518 i);
3519 goto err_irq;
3520 } else {
3521 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3522 "Hooked intr %d, queue type %s, with name %s.\n",
3523 i,
3524 qdev->rx_ring[i].type == DEFAULT_Q ?
3525 "DEFAULT_Q" :
3526 qdev->rx_ring[i].type == TX_Q ?
3527 "TX_Q" :
3528 qdev->rx_ring[i].type == RX_Q ?
3529 "RX_Q" : "",
3530 intr_context->name);
3531 }
3532 } else {
3533 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3534 "trying msi or legacy interrupts.\n");
3535 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3536 "%s: irq = %d.\n", __func__, pdev->irq);
3537 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3538 "%s: context->name = %s.\n", __func__,
3539 intr_context->name);
3540 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3541 "%s: dev_id = 0x%p.\n", __func__,
3542 &qdev->rx_ring[0]);
3543 status =
3544 request_irq(pdev->irq, qlge_isr,
3545 test_bit(QL_MSI_ENABLED,
3546 &qdev->
3547 flags) ? 0 : IRQF_SHARED,
3548 intr_context->name, &qdev->rx_ring[0]);
3549 if (status)
3550 goto err_irq;
3551
3552 netif_err(qdev, ifup, qdev->ndev,
3553 "Hooked intr %d, queue type %s, with name %s.\n",
3554 i,
3555 qdev->rx_ring[0].type == DEFAULT_Q ?
3556 "DEFAULT_Q" :
3557 qdev->rx_ring[0].type == TX_Q ? "TX_Q" :
3558 qdev->rx_ring[0].type == RX_Q ? "RX_Q" : "",
3559 intr_context->name);
3560 }
3561 intr_context->hooked = 1;
3562 }
3563 return status;
3564 err_irq:
3565 netif_err(qdev, ifup, qdev->ndev, "Failed to get the interrupts!!!/n");
3566 ql_free_irq(qdev);
3567 return status;
3568 }
3569
3570 static int ql_start_rss(struct ql_adapter *qdev)
3571 {
3572 static const u8 init_hash_seed[] = {
3573 0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2,
3574 0x41, 0x67, 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0,
3575 0xd0, 0xca, 0x2b, 0xcb, 0xae, 0x7b, 0x30, 0xb4,
3576 0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30, 0xf2, 0x0c,
3577 0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa
3578 };
3579 struct ricb *ricb = &qdev->ricb;
3580 int status = 0;
3581 int i;
3582 u8 *hash_id = (u8 *) ricb->hash_cq_id;
3583
3584 memset((void *)ricb, 0, sizeof(*ricb));
3585
3586 ricb->base_cq = RSS_L4K;
3587 ricb->flags =
3588 (RSS_L6K | RSS_LI | RSS_LB | RSS_LM | RSS_RT4 | RSS_RT6);
3589 ricb->mask = cpu_to_le16((u16)(0x3ff));
3590
3591 /*
3592 * Fill out the Indirection Table.
3593 */
3594 for (i = 0; i < 1024; i++)
3595 hash_id[i] = (i & (qdev->rss_ring_count - 1));
3596
3597 memcpy((void *)&ricb->ipv6_hash_key[0], init_hash_seed, 40);
3598 memcpy((void *)&ricb->ipv4_hash_key[0], init_hash_seed, 16);
3599
3600 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev, "Initializing RSS.\n");
3601
3602 status = ql_write_cfg(qdev, ricb, sizeof(*ricb), CFG_LR, 0);
3603 if (status) {
3604 netif_err(qdev, ifup, qdev->ndev, "Failed to load RICB.\n");
3605 return status;
3606 }
3607 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3608 "Successfully loaded RICB.\n");
3609 return status;
3610 }
3611
3612 static int ql_clear_routing_entries(struct ql_adapter *qdev)
3613 {
3614 int i, status = 0;
3615
3616 status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
3617 if (status)
3618 return status;
3619 /* Clear all the entries in the routing table. */
3620 for (i = 0; i < 16; i++) {
3621 status = ql_set_routing_reg(qdev, i, 0, 0);
3622 if (status) {
3623 netif_err(qdev, ifup, qdev->ndev,
3624 "Failed to init routing register for CAM packets.\n");
3625 break;
3626 }
3627 }
3628 ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
3629 return status;
3630 }
3631
3632 /* Initialize the frame-to-queue routing. */
3633 static int ql_route_initialize(struct ql_adapter *qdev)
3634 {
3635 int status = 0;
3636
3637 /* Clear all the entries in the routing table. */
3638 status = ql_clear_routing_entries(qdev);
3639 if (status)
3640 return status;
3641
3642 status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
3643 if (status)
3644 return status;
3645
3646 status = ql_set_routing_reg(qdev, RT_IDX_IP_CSUM_ERR_SLOT,
3647 RT_IDX_IP_CSUM_ERR, 1);
3648 if (status) {
3649 netif_err(qdev, ifup, qdev->ndev,
3650 "Failed to init routing register "
3651 "for IP CSUM error packets.\n");
3652 goto exit;
3653 }
3654 status = ql_set_routing_reg(qdev, RT_IDX_TCP_UDP_CSUM_ERR_SLOT,
3655 RT_IDX_TU_CSUM_ERR, 1);
3656 if (status) {
3657 netif_err(qdev, ifup, qdev->ndev,
3658 "Failed to init routing register "
3659 "for TCP/UDP CSUM error packets.\n");
3660 goto exit;
3661 }
3662 status = ql_set_routing_reg(qdev, RT_IDX_BCAST_SLOT, RT_IDX_BCAST, 1);
3663 if (status) {
3664 netif_err(qdev, ifup, qdev->ndev,
3665 "Failed to init routing register for broadcast packets.\n");
3666 goto exit;
3667 }
3668 /* If we have more than one inbound queue, then turn on RSS in the
3669 * routing block.
3670 */
3671 if (qdev->rss_ring_count > 1) {
3672 status = ql_set_routing_reg(qdev, RT_IDX_RSS_MATCH_SLOT,
3673 RT_IDX_RSS_MATCH, 1);
3674 if (status) {
3675 netif_err(qdev, ifup, qdev->ndev,
3676 "Failed to init routing register for MATCH RSS packets.\n");
3677 goto exit;
3678 }
3679 }
3680
3681 status = ql_set_routing_reg(qdev, RT_IDX_CAM_HIT_SLOT,
3682 RT_IDX_CAM_HIT, 1);
3683 if (status)
3684 netif_err(qdev, ifup, qdev->ndev,
3685 "Failed to init routing register for CAM packets.\n");
3686 exit:
3687 ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
3688 return status;
3689 }
3690
3691 int ql_cam_route_initialize(struct ql_adapter *qdev)
3692 {
3693 int status, set;
3694
3695 /* If check if the link is up and use to
3696 * determine if we are setting or clearing
3697 * the MAC address in the CAM.
3698 */
3699 set = ql_read32(qdev, STS);
3700 set &= qdev->port_link_up;
3701 status = ql_set_mac_addr(qdev, set);
3702 if (status) {
3703 netif_err(qdev, ifup, qdev->ndev, "Failed to init mac address.\n");
3704 return status;
3705 }
3706
3707 status = ql_route_initialize(qdev);
3708 if (status)
3709 netif_err(qdev, ifup, qdev->ndev, "Failed to init routing table.\n");
3710
3711 return status;
3712 }
3713
3714 static int ql_adapter_initialize(struct ql_adapter *qdev)
3715 {
3716 u32 value, mask;
3717 int i;
3718 int status = 0;
3719
3720 /*
3721 * Set up the System register to halt on errors.
3722 */
3723 value = SYS_EFE | SYS_FAE;
3724 mask = value << 16;
3725 ql_write32(qdev, SYS, mask | value);
3726
3727 /* Set the default queue, and VLAN behavior. */
3728 value = NIC_RCV_CFG_DFQ | NIC_RCV_CFG_RV;
3729 mask = NIC_RCV_CFG_DFQ_MASK | (NIC_RCV_CFG_RV << 16);
3730 ql_write32(qdev, NIC_RCV_CFG, (mask | value));
3731
3732 /* Set the MPI interrupt to enabled. */
3733 ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16) | INTR_MASK_PI);
3734
3735 /* Enable the function, set pagesize, enable error checking. */
3736 value = FSC_FE | FSC_EPC_INBOUND | FSC_EPC_OUTBOUND |
3737 FSC_EC | FSC_VM_PAGE_4K;
3738 value |= SPLT_SETTING;
3739
3740 /* Set/clear header splitting. */
3741 mask = FSC_VM_PAGESIZE_MASK |
3742 FSC_DBL_MASK | FSC_DBRST_MASK | (value << 16);
3743 ql_write32(qdev, FSC, mask | value);
3744
3745 ql_write32(qdev, SPLT_HDR, SPLT_LEN);
3746
3747 /* Set RX packet routing to use port/pci function on which the
3748 * packet arrived on in addition to usual frame routing.
3749 * This is helpful on bonding where both interfaces can have
3750 * the same MAC address.
3751 */
3752 ql_write32(qdev, RST_FO, RST_FO_RR_MASK | RST_FO_RR_RCV_FUNC_CQ);
3753 /* Reroute all packets to our Interface.
3754 * They may have been routed to MPI firmware
3755 * due to WOL.
3756 */
3757 value = ql_read32(qdev, MGMT_RCV_CFG);
3758 value &= ~MGMT_RCV_CFG_RM;
3759 mask = 0xffff0000;
3760
3761 /* Sticky reg needs clearing due to WOL. */
3762 ql_write32(qdev, MGMT_RCV_CFG, mask);
3763 ql_write32(qdev, MGMT_RCV_CFG, mask | value);
3764
3765 /* Default WOL is enable on Mezz cards */
3766 if (qdev->pdev->subsystem_device == 0x0068 ||
3767 qdev->pdev->subsystem_device == 0x0180)
3768 qdev->wol = WAKE_MAGIC;
3769
3770 /* Start up the rx queues. */
3771 for (i = 0; i < qdev->rx_ring_count; i++) {
3772 status = ql_start_rx_ring(qdev, &qdev->rx_ring[i]);
3773 if (status) {
3774 netif_err(qdev, ifup, qdev->ndev,
3775 "Failed to start rx ring[%d].\n", i);
3776 return status;
3777 }
3778 }
3779
3780 /* If there is more than one inbound completion queue
3781 * then download a RICB to configure RSS.
3782 */
3783 if (qdev->rss_ring_count > 1) {
3784 status = ql_start_rss(qdev);
3785 if (status) {
3786 netif_err(qdev, ifup, qdev->ndev, "Failed to start RSS.\n");
3787 return status;
3788 }
3789 }
3790
3791 /* Start up the tx queues. */
3792 for (i = 0; i < qdev->tx_ring_count; i++) {
3793 status = ql_start_tx_ring(qdev, &qdev->tx_ring[i]);
3794 if (status) {
3795 netif_err(qdev, ifup, qdev->ndev,
3796 "Failed to start tx ring[%d].\n", i);
3797 return status;
3798 }
3799 }
3800
3801 /* Initialize the port and set the max framesize. */
3802 status = qdev->nic_ops->port_initialize(qdev);
3803 if (status)
3804 netif_err(qdev, ifup, qdev->ndev, "Failed to start port.\n");
3805
3806 /* Set up the MAC address and frame routing filter. */
3807 status = ql_cam_route_initialize(qdev);
3808 if (status) {
3809 netif_err(qdev, ifup, qdev->ndev,
3810 "Failed to init CAM/Routing tables.\n");
3811 return status;
3812 }
3813
3814 /* Start NAPI for the RSS queues. */
3815 for (i = 0; i < qdev->rss_ring_count; i++) {
3816 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3817 "Enabling NAPI for rx_ring[%d].\n", i);
3818 napi_enable(&qdev->rx_ring[i].napi);
3819 }
3820
3821 return status;
3822 }
3823
3824 /* Issue soft reset to chip. */
3825 static int ql_adapter_reset(struct ql_adapter *qdev)
3826 {
3827 u32 value;
3828 int status = 0;
3829 unsigned long end_jiffies;
3830
3831 /* Clear all the entries in the routing table. */
3832 status = ql_clear_routing_entries(qdev);
3833 if (status) {
3834 netif_err(qdev, ifup, qdev->ndev, "Failed to clear routing bits.\n");
3835 return status;
3836 }
3837
3838 end_jiffies = jiffies +
3839 max((unsigned long)1, usecs_to_jiffies(30));
3840
3841 /* Check if bit is set then skip the mailbox command and
3842 * clear the bit, else we are in normal reset process.
3843 */
3844 if (!test_bit(QL_ASIC_RECOVERY, &qdev->flags)) {
3845 /* Stop management traffic. */
3846 ql_mb_set_mgmnt_traffic_ctl(qdev, MB_SET_MPI_TFK_STOP);
3847
3848 /* Wait for the NIC and MGMNT FIFOs to empty. */
3849 ql_wait_fifo_empty(qdev);
3850 } else
3851 clear_bit(QL_ASIC_RECOVERY, &qdev->flags);
3852
3853 ql_write32(qdev, RST_FO, (RST_FO_FR << 16) | RST_FO_FR);
3854
3855 do {
3856 value = ql_read32(qdev, RST_FO);
3857 if ((value & RST_FO_FR) == 0)
3858 break;
3859 cpu_relax();
3860 } while (time_before(jiffies, end_jiffies));
3861
3862 if (value & RST_FO_FR) {
3863 netif_err(qdev, ifdown, qdev->ndev,
3864 "ETIMEDOUT!!! errored out of resetting the chip!\n");
3865 status = -ETIMEDOUT;
3866 }
3867
3868 /* Resume management traffic. */
3869 ql_mb_set_mgmnt_traffic_ctl(qdev, MB_SET_MPI_TFK_RESUME);
3870 return status;
3871 }
3872
3873 static void ql_display_dev_info(struct net_device *ndev)
3874 {
3875 struct ql_adapter *qdev = netdev_priv(ndev);
3876
3877 netif_info(qdev, probe, qdev->ndev,
3878 "Function #%d, Port %d, NIC Roll %d, NIC Rev = %d, "
3879 "XG Roll = %d, XG Rev = %d.\n",
3880 qdev->func,
3881 qdev->port,
3882 qdev->chip_rev_id & 0x0000000f,
3883 qdev->chip_rev_id >> 4 & 0x0000000f,
3884 qdev->chip_rev_id >> 8 & 0x0000000f,
3885 qdev->chip_rev_id >> 12 & 0x0000000f);
3886 netif_info(qdev, probe, qdev->ndev,
3887 "MAC address %pM\n", ndev->dev_addr);
3888 }
3889
3890 static int ql_wol(struct ql_adapter *qdev)
3891 {
3892 int status = 0;
3893 u32 wol = MB_WOL_DISABLE;
3894
3895 /* The CAM is still intact after a reset, but if we
3896 * are doing WOL, then we may need to program the
3897 * routing regs. We would also need to issue the mailbox
3898 * commands to instruct the MPI what to do per the ethtool
3899 * settings.
3900 */
3901
3902 if (qdev->wol & (WAKE_ARP | WAKE_MAGICSECURE | WAKE_PHY | WAKE_UCAST |
3903 WAKE_MCAST | WAKE_BCAST)) {
3904 netif_err(qdev, ifdown, qdev->ndev,
3905 "Unsupported WOL paramter. qdev->wol = 0x%x.\n",
3906 qdev->wol);
3907 return -EINVAL;
3908 }
3909
3910 if (qdev->wol & WAKE_MAGIC) {
3911 status = ql_mb_wol_set_magic(qdev, 1);
3912 if (status) {
3913 netif_err(qdev, ifdown, qdev->ndev,
3914 "Failed to set magic packet on %s.\n",
3915 qdev->ndev->name);
3916 return status;
3917 } else
3918 netif_info(qdev, drv, qdev->ndev,
3919 "Enabled magic packet successfully on %s.\n",
3920 qdev->ndev->name);
3921
3922 wol |= MB_WOL_MAGIC_PKT;
3923 }
3924
3925 if (qdev->wol) {
3926 wol |= MB_WOL_MODE_ON;
3927 status = ql_mb_wol_mode(qdev, wol);
3928 netif_err(qdev, drv, qdev->ndev,
3929 "WOL %s (wol code 0x%x) on %s\n",
3930 (status == 0) ? "Successfully set" : "Failed",
3931 wol, qdev->ndev->name);
3932 }
3933
3934 return status;
3935 }
3936
3937 static void ql_cancel_all_work_sync(struct ql_adapter *qdev)
3938 {
3939
3940 /* Don't kill the reset worker thread if we
3941 * are in the process of recovery.
3942 */
3943 if (test_bit(QL_ADAPTER_UP, &qdev->flags))
3944 cancel_delayed_work_sync(&qdev->asic_reset_work);
3945 cancel_delayed_work_sync(&qdev->mpi_reset_work);
3946 cancel_delayed_work_sync(&qdev->mpi_work);
3947 cancel_delayed_work_sync(&qdev->mpi_idc_work);
3948 cancel_delayed_work_sync(&qdev->mpi_core_to_log);
3949 cancel_delayed_work_sync(&qdev->mpi_port_cfg_work);
3950 }
3951
3952 static int ql_adapter_down(struct ql_adapter *qdev)
3953 {
3954 int i, status = 0;
3955
3956 ql_link_off(qdev);
3957
3958 ql_cancel_all_work_sync(qdev);
3959
3960 for (i = 0; i < qdev->rss_ring_count; i++)
3961 napi_disable(&qdev->rx_ring[i].napi);
3962
3963 clear_bit(QL_ADAPTER_UP, &qdev->flags);
3964
3965 ql_disable_interrupts(qdev);
3966
3967 ql_tx_ring_clean(qdev);
3968
3969 /* Call netif_napi_del() from common point.
3970 */
3971 for (i = 0; i < qdev->rss_ring_count; i++)
3972 netif_napi_del(&qdev->rx_ring[i].napi);
3973
3974 status = ql_adapter_reset(qdev);
3975 if (status)
3976 netif_err(qdev, ifdown, qdev->ndev, "reset(func #%d) FAILED!\n",
3977 qdev->func);
3978 ql_free_rx_buffers(qdev);
3979
3980 return status;
3981 }
3982
3983 static int ql_adapter_up(struct ql_adapter *qdev)
3984 {
3985 int err = 0;
3986
3987 err = ql_adapter_initialize(qdev);
3988 if (err) {
3989 netif_info(qdev, ifup, qdev->ndev, "Unable to initialize adapter.\n");
3990 goto err_init;
3991 }
3992 set_bit(QL_ADAPTER_UP, &qdev->flags);
3993 ql_alloc_rx_buffers(qdev);
3994 /* If the port is initialized and the
3995 * link is up the turn on the carrier.
3996 */
3997 if ((ql_read32(qdev, STS) & qdev->port_init) &&
3998 (ql_read32(qdev, STS) & qdev->port_link_up))
3999 ql_link_on(qdev);
4000 /* Restore rx mode. */
4001 clear_bit(QL_ALLMULTI, &qdev->flags);
4002 clear_bit(QL_PROMISCUOUS, &qdev->flags);
4003 qlge_set_multicast_list(qdev->ndev);
4004
4005 /* Restore vlan setting. */
4006 qlge_restore_vlan(qdev);
4007
4008 ql_enable_interrupts(qdev);
4009 ql_enable_all_completion_interrupts(qdev);
4010 netif_tx_start_all_queues(qdev->ndev);
4011
4012 return 0;
4013 err_init:
4014 ql_adapter_reset(qdev);
4015 return err;
4016 }
4017
4018 static void ql_release_adapter_resources(struct ql_adapter *qdev)
4019 {
4020 ql_free_mem_resources(qdev);
4021 ql_free_irq(qdev);
4022 }
4023
4024 static int ql_get_adapter_resources(struct ql_adapter *qdev)
4025 {
4026 int status = 0;
4027
4028 if (ql_alloc_mem_resources(qdev)) {
4029 netif_err(qdev, ifup, qdev->ndev, "Unable to allocate memory.\n");
4030 return -ENOMEM;
4031 }
4032 status = ql_request_irq(qdev);
4033 return status;
4034 }
4035
4036 static int qlge_close(struct net_device *ndev)
4037 {
4038 struct ql_adapter *qdev = netdev_priv(ndev);
4039
4040 /* If we hit pci_channel_io_perm_failure
4041 * failure condition, then we already
4042 * brought the adapter down.
4043 */
4044 if (test_bit(QL_EEH_FATAL, &qdev->flags)) {
4045 netif_err(qdev, drv, qdev->ndev, "EEH fatal did unload.\n");
4046 clear_bit(QL_EEH_FATAL, &qdev->flags);
4047 return 0;
4048 }
4049
4050 /*
4051 * Wait for device to recover from a reset.
4052 * (Rarely happens, but possible.)
4053 */
4054 while (!test_bit(QL_ADAPTER_UP, &qdev->flags))
4055 msleep(1);
4056 ql_adapter_down(qdev);
4057 ql_release_adapter_resources(qdev);
4058 return 0;
4059 }
4060
4061 static int ql_configure_rings(struct ql_adapter *qdev)
4062 {
4063 int i;
4064 struct rx_ring *rx_ring;
4065 struct tx_ring *tx_ring;
4066 int cpu_cnt = min(MAX_CPUS, (int)num_online_cpus());
4067 unsigned int lbq_buf_len = (qdev->ndev->mtu > 1500) ?
4068 LARGE_BUFFER_MAX_SIZE : LARGE_BUFFER_MIN_SIZE;
4069
4070 qdev->lbq_buf_order = get_order(lbq_buf_len);
4071
4072 /* In a perfect world we have one RSS ring for each CPU
4073 * and each has it's own vector. To do that we ask for
4074 * cpu_cnt vectors. ql_enable_msix() will adjust the
4075 * vector count to what we actually get. We then
4076 * allocate an RSS ring for each.
4077 * Essentially, we are doing min(cpu_count, msix_vector_count).
4078 */
4079 qdev->intr_count = cpu_cnt;
4080 ql_enable_msix(qdev);
4081 /* Adjust the RSS ring count to the actual vector count. */
4082 qdev->rss_ring_count = qdev->intr_count;
4083 qdev->tx_ring_count = cpu_cnt;
4084 qdev->rx_ring_count = qdev->tx_ring_count + qdev->rss_ring_count;
4085
4086 for (i = 0; i < qdev->tx_ring_count; i++) {
4087 tx_ring = &qdev->tx_ring[i];
4088 memset((void *)tx_ring, 0, sizeof(*tx_ring));
4089 tx_ring->qdev = qdev;
4090 tx_ring->wq_id = i;
4091 tx_ring->wq_len = qdev->tx_ring_size;
4092 tx_ring->wq_size =
4093 tx_ring->wq_len * sizeof(struct ob_mac_iocb_req);
4094
4095 /*
4096 * The completion queue ID for the tx rings start
4097 * immediately after the rss rings.
4098 */
4099 tx_ring->cq_id = qdev->rss_ring_count + i;
4100 }
4101
4102 for (i = 0; i < qdev->rx_ring_count; i++) {
4103 rx_ring = &qdev->rx_ring[i];
4104 memset((void *)rx_ring, 0, sizeof(*rx_ring));
4105 rx_ring->qdev = qdev;
4106 rx_ring->cq_id = i;
4107 rx_ring->cpu = i % cpu_cnt; /* CPU to run handler on. */
4108 if (i < qdev->rss_ring_count) {
4109 /*
4110 * Inbound (RSS) queues.
4111 */
4112 rx_ring->cq_len = qdev->rx_ring_size;
4113 rx_ring->cq_size =
4114 rx_ring->cq_len * sizeof(struct ql_net_rsp_iocb);
4115 rx_ring->lbq_len = NUM_LARGE_BUFFERS;
4116 rx_ring->lbq_size =
4117 rx_ring->lbq_len * sizeof(__le64);
4118 rx_ring->lbq_buf_size = (u16)lbq_buf_len;
4119 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
4120 "lbq_buf_size %d, order = %d\n",
4121 rx_ring->lbq_buf_size,
4122 qdev->lbq_buf_order);
4123 rx_ring->sbq_len = NUM_SMALL_BUFFERS;
4124 rx_ring->sbq_size =
4125 rx_ring->sbq_len * sizeof(__le64);
4126 rx_ring->sbq_buf_size = SMALL_BUF_MAP_SIZE;
4127 rx_ring->type = RX_Q;
4128 } else {
4129 /*
4130 * Outbound queue handles outbound completions only.
4131 */
4132 /* outbound cq is same size as tx_ring it services. */
4133 rx_ring->cq_len = qdev->tx_ring_size;
4134 rx_ring->cq_size =
4135 rx_ring->cq_len * sizeof(struct ql_net_rsp_iocb);
4136 rx_ring->lbq_len = 0;
4137 rx_ring->lbq_size = 0;
4138 rx_ring->lbq_buf_size = 0;
4139 rx_ring->sbq_len = 0;
4140 rx_ring->sbq_size = 0;
4141 rx_ring->sbq_buf_size = 0;
4142 rx_ring->type = TX_Q;
4143 }
4144 }
4145 return 0;
4146 }
4147
4148 static int qlge_open(struct net_device *ndev)
4149 {
4150 int err = 0;
4151 struct ql_adapter *qdev = netdev_priv(ndev);
4152
4153 err = ql_adapter_reset(qdev);
4154 if (err)
4155 return err;
4156
4157 err = ql_configure_rings(qdev);
4158 if (err)
4159 return err;
4160
4161 err = ql_get_adapter_resources(qdev);
4162 if (err)
4163 goto error_up;
4164
4165 err = ql_adapter_up(qdev);
4166 if (err)
4167 goto error_up;
4168
4169 return err;
4170
4171 error_up:
4172 ql_release_adapter_resources(qdev);
4173 return err;
4174 }
4175
4176 static int ql_change_rx_buffers(struct ql_adapter *qdev)
4177 {
4178 struct rx_ring *rx_ring;
4179 int i, status;
4180 u32 lbq_buf_len;
4181
4182 /* Wait for an outstanding reset to complete. */
4183 if (!test_bit(QL_ADAPTER_UP, &qdev->flags)) {
4184 int i = 3;
4185 while (i-- && !test_bit(QL_ADAPTER_UP, &qdev->flags)) {
4186 netif_err(qdev, ifup, qdev->ndev,
4187 "Waiting for adapter UP...\n");
4188 ssleep(1);
4189 }
4190
4191 if (!i) {
4192 netif_err(qdev, ifup, qdev->ndev,
4193 "Timed out waiting for adapter UP\n");
4194 return -ETIMEDOUT;
4195 }
4196 }
4197
4198 status = ql_adapter_down(qdev);
4199 if (status)
4200 goto error;
4201
4202 /* Get the new rx buffer size. */
4203 lbq_buf_len = (qdev->ndev->mtu > 1500) ?
4204 LARGE_BUFFER_MAX_SIZE : LARGE_BUFFER_MIN_SIZE;
4205 qdev->lbq_buf_order = get_order(lbq_buf_len);
4206
4207 for (i = 0; i < qdev->rss_ring_count; i++) {
4208 rx_ring = &qdev->rx_ring[i];
4209 /* Set the new size. */
4210 rx_ring->lbq_buf_size = lbq_buf_len;
4211 }
4212
4213 status = ql_adapter_up(qdev);
4214 if (status)
4215 goto error;
4216
4217 return status;
4218 error:
4219 netif_alert(qdev, ifup, qdev->ndev,
4220 "Driver up/down cycle failed, closing device.\n");
4221 set_bit(QL_ADAPTER_UP, &qdev->flags);
4222 dev_close(qdev->ndev);
4223 return status;
4224 }
4225
4226 static int qlge_change_mtu(struct net_device *ndev, int new_mtu)
4227 {
4228 struct ql_adapter *qdev = netdev_priv(ndev);
4229 int status;
4230
4231 if (ndev->mtu == 1500 && new_mtu == 9000) {
4232 netif_err(qdev, ifup, qdev->ndev, "Changing to jumbo MTU.\n");
4233 } else if (ndev->mtu == 9000 && new_mtu == 1500) {
4234 netif_err(qdev, ifup, qdev->ndev, "Changing to normal MTU.\n");
4235 } else
4236 return -EINVAL;
4237
4238 queue_delayed_work(qdev->workqueue,
4239 &qdev->mpi_port_cfg_work, 3*HZ);
4240
4241 ndev->mtu = new_mtu;
4242
4243 if (!netif_running(qdev->ndev)) {
4244 return 0;
4245 }
4246
4247 status = ql_change_rx_buffers(qdev);
4248 if (status) {
4249 netif_err(qdev, ifup, qdev->ndev,
4250 "Changing MTU failed.\n");
4251 }
4252
4253 return status;
4254 }
4255
4256 static struct net_device_stats *qlge_get_stats(struct net_device
4257 *ndev)
4258 {
4259 struct ql_adapter *qdev = netdev_priv(ndev);
4260 struct rx_ring *rx_ring = &qdev->rx_ring[0];
4261 struct tx_ring *tx_ring = &qdev->tx_ring[0];
4262 unsigned long pkts, mcast, dropped, errors, bytes;
4263 int i;
4264
4265 /* Get RX stats. */
4266 pkts = mcast = dropped = errors = bytes = 0;
4267 for (i = 0; i < qdev->rss_ring_count; i++, rx_ring++) {
4268 pkts += rx_ring->rx_packets;
4269 bytes += rx_ring->rx_bytes;
4270 dropped += rx_ring->rx_dropped;
4271 errors += rx_ring->rx_errors;
4272 mcast += rx_ring->rx_multicast;
4273 }
4274 ndev->stats.rx_packets = pkts;
4275 ndev->stats.rx_bytes = bytes;
4276 ndev->stats.rx_dropped = dropped;
4277 ndev->stats.rx_errors = errors;
4278 ndev->stats.multicast = mcast;
4279
4280 /* Get TX stats. */
4281 pkts = errors = bytes = 0;
4282 for (i = 0; i < qdev->tx_ring_count; i++, tx_ring++) {
4283 pkts += tx_ring->tx_packets;
4284 bytes += tx_ring->tx_bytes;
4285 errors += tx_ring->tx_errors;
4286 }
4287 ndev->stats.tx_packets = pkts;
4288 ndev->stats.tx_bytes = bytes;
4289 ndev->stats.tx_errors = errors;
4290 return &ndev->stats;
4291 }
4292
4293 static void qlge_set_multicast_list(struct net_device *ndev)
4294 {
4295 struct ql_adapter *qdev = netdev_priv(ndev);
4296 struct netdev_hw_addr *ha;
4297 int i, status;
4298
4299 status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
4300 if (status)
4301 return;
4302 /*
4303 * Set or clear promiscuous mode if a
4304 * transition is taking place.
4305 */
4306 if (ndev->flags & IFF_PROMISC) {
4307 if (!test_bit(QL_PROMISCUOUS, &qdev->flags)) {
4308 if (ql_set_routing_reg
4309 (qdev, RT_IDX_PROMISCUOUS_SLOT, RT_IDX_VALID, 1)) {
4310 netif_err(qdev, hw, qdev->ndev,
4311 "Failed to set promiscuous mode.\n");
4312 } else {
4313 set_bit(QL_PROMISCUOUS, &qdev->flags);
4314 }
4315 }
4316 } else {
4317 if (test_bit(QL_PROMISCUOUS, &qdev->flags)) {
4318 if (ql_set_routing_reg
4319 (qdev, RT_IDX_PROMISCUOUS_SLOT, RT_IDX_VALID, 0)) {
4320 netif_err(qdev, hw, qdev->ndev,
4321 "Failed to clear promiscuous mode.\n");
4322 } else {
4323 clear_bit(QL_PROMISCUOUS, &qdev->flags);
4324 }
4325 }
4326 }
4327
4328 /*
4329 * Set or clear all multicast mode if a
4330 * transition is taking place.
4331 */
4332 if ((ndev->flags & IFF_ALLMULTI) ||
4333 (netdev_mc_count(ndev) > MAX_MULTICAST_ENTRIES)) {
4334 if (!test_bit(QL_ALLMULTI, &qdev->flags)) {
4335 if (ql_set_routing_reg
4336 (qdev, RT_IDX_ALLMULTI_SLOT, RT_IDX_MCAST, 1)) {
4337 netif_err(qdev, hw, qdev->ndev,
4338 "Failed to set all-multi mode.\n");
4339 } else {
4340 set_bit(QL_ALLMULTI, &qdev->flags);
4341 }
4342 }
4343 } else {
4344 if (test_bit(QL_ALLMULTI, &qdev->flags)) {
4345 if (ql_set_routing_reg
4346 (qdev, RT_IDX_ALLMULTI_SLOT, RT_IDX_MCAST, 0)) {
4347 netif_err(qdev, hw, qdev->ndev,
4348 "Failed to clear all-multi mode.\n");
4349 } else {
4350 clear_bit(QL_ALLMULTI, &qdev->flags);
4351 }
4352 }
4353 }
4354
4355 if (!netdev_mc_empty(ndev)) {
4356 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
4357 if (status)
4358 goto exit;
4359 i = 0;
4360 netdev_for_each_mc_addr(ha, ndev) {
4361 if (ql_set_mac_addr_reg(qdev, (u8 *) ha->addr,
4362 MAC_ADDR_TYPE_MULTI_MAC, i)) {
4363 netif_err(qdev, hw, qdev->ndev,
4364 "Failed to loadmulticast address.\n");
4365 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
4366 goto exit;
4367 }
4368 i++;
4369 }
4370 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
4371 if (ql_set_routing_reg
4372 (qdev, RT_IDX_MCAST_MATCH_SLOT, RT_IDX_MCAST_MATCH, 1)) {
4373 netif_err(qdev, hw, qdev->ndev,
4374 "Failed to set multicast match mode.\n");
4375 } else {
4376 set_bit(QL_ALLMULTI, &qdev->flags);
4377 }
4378 }
4379 exit:
4380 ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
4381 }
4382
4383 static int qlge_set_mac_address(struct net_device *ndev, void *p)
4384 {
4385 struct ql_adapter *qdev = netdev_priv(ndev);
4386 struct sockaddr *addr = p;
4387 int status;
4388
4389 if (!is_valid_ether_addr(addr->sa_data))
4390 return -EADDRNOTAVAIL;
4391 memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len);
4392 /* Update local copy of current mac address. */
4393 memcpy(qdev->current_mac_addr, ndev->dev_addr, ndev->addr_len);
4394
4395 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
4396 if (status)
4397 return status;
4398 status = ql_set_mac_addr_reg(qdev, (u8 *) ndev->dev_addr,
4399 MAC_ADDR_TYPE_CAM_MAC, qdev->func * MAX_CQ);
4400 if (status)
4401 netif_err(qdev, hw, qdev->ndev, "Failed to load MAC address.\n");
4402 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
4403 return status;
4404 }
4405
4406 static void qlge_tx_timeout(struct net_device *ndev)
4407 {
4408 struct ql_adapter *qdev = netdev_priv(ndev);
4409 ql_queue_asic_error(qdev);
4410 }
4411
4412 static void ql_asic_reset_work(struct work_struct *work)
4413 {
4414 struct ql_adapter *qdev =
4415 container_of(work, struct ql_adapter, asic_reset_work.work);
4416 int status;
4417 rtnl_lock();
4418 status = ql_adapter_down(qdev);
4419 if (status)
4420 goto error;
4421
4422 status = ql_adapter_up(qdev);
4423 if (status)
4424 goto error;
4425
4426 /* Restore rx mode. */
4427 clear_bit(QL_ALLMULTI, &qdev->flags);
4428 clear_bit(QL_PROMISCUOUS, &qdev->flags);
4429 qlge_set_multicast_list(qdev->ndev);
4430
4431 rtnl_unlock();
4432 return;
4433 error:
4434 netif_alert(qdev, ifup, qdev->ndev,
4435 "Driver up/down cycle failed, closing device\n");
4436
4437 set_bit(QL_ADAPTER_UP, &qdev->flags);
4438 dev_close(qdev->ndev);
4439 rtnl_unlock();
4440 }
4441
4442 static const struct nic_operations qla8012_nic_ops = {
4443 .get_flash = ql_get_8012_flash_params,
4444 .port_initialize = ql_8012_port_initialize,
4445 };
4446
4447 static const struct nic_operations qla8000_nic_ops = {
4448 .get_flash = ql_get_8000_flash_params,
4449 .port_initialize = ql_8000_port_initialize,
4450 };
4451
4452 /* Find the pcie function number for the other NIC
4453 * on this chip. Since both NIC functions share a
4454 * common firmware we have the lowest enabled function
4455 * do any common work. Examples would be resetting
4456 * after a fatal firmware error, or doing a firmware
4457 * coredump.
4458 */
4459 static int ql_get_alt_pcie_func(struct ql_adapter *qdev)
4460 {
4461 int status = 0;
4462 u32 temp;
4463 u32 nic_func1, nic_func2;
4464
4465 status = ql_read_mpi_reg(qdev, MPI_TEST_FUNC_PORT_CFG,
4466 &temp);
4467 if (status)
4468 return status;
4469
4470 nic_func1 = ((temp >> MPI_TEST_NIC1_FUNC_SHIFT) &
4471 MPI_TEST_NIC_FUNC_MASK);
4472 nic_func2 = ((temp >> MPI_TEST_NIC2_FUNC_SHIFT) &
4473 MPI_TEST_NIC_FUNC_MASK);
4474
4475 if (qdev->func == nic_func1)
4476 qdev->alt_func = nic_func2;
4477 else if (qdev->func == nic_func2)
4478 qdev->alt_func = nic_func1;
4479 else
4480 status = -EIO;
4481
4482 return status;
4483 }
4484
4485 static int ql_get_board_info(struct ql_adapter *qdev)
4486 {
4487 int status;
4488 qdev->func =
4489 (ql_read32(qdev, STS) & STS_FUNC_ID_MASK) >> STS_FUNC_ID_SHIFT;
4490 if (qdev->func > 3)
4491 return -EIO;
4492
4493 status = ql_get_alt_pcie_func(qdev);
4494 if (status)
4495 return status;
4496
4497 qdev->port = (qdev->func < qdev->alt_func) ? 0 : 1;
4498 if (qdev->port) {
4499 qdev->xg_sem_mask = SEM_XGMAC1_MASK;
4500 qdev->port_link_up = STS_PL1;
4501 qdev->port_init = STS_PI1;
4502 qdev->mailbox_in = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC2_MBI;
4503 qdev->mailbox_out = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC2_MBO;
4504 } else {
4505 qdev->xg_sem_mask = SEM_XGMAC0_MASK;
4506 qdev->port_link_up = STS_PL0;
4507 qdev->port_init = STS_PI0;
4508 qdev->mailbox_in = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC0_MBI;
4509 qdev->mailbox_out = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC0_MBO;
4510 }
4511 qdev->chip_rev_id = ql_read32(qdev, REV_ID);
4512 qdev->device_id = qdev->pdev->device;
4513 if (qdev->device_id == QLGE_DEVICE_ID_8012)
4514 qdev->nic_ops = &qla8012_nic_ops;
4515 else if (qdev->device_id == QLGE_DEVICE_ID_8000)
4516 qdev->nic_ops = &qla8000_nic_ops;
4517 return status;
4518 }
4519
4520 static void ql_release_all(struct pci_dev *pdev)
4521 {
4522 struct net_device *ndev = pci_get_drvdata(pdev);
4523 struct ql_adapter *qdev = netdev_priv(ndev);
4524
4525 if (qdev->workqueue) {
4526 destroy_workqueue(qdev->workqueue);
4527 qdev->workqueue = NULL;
4528 }
4529
4530 if (qdev->reg_base)
4531 iounmap(qdev->reg_base);
4532 if (qdev->doorbell_area)
4533 iounmap(qdev->doorbell_area);
4534 vfree(qdev->mpi_coredump);
4535 pci_release_regions(pdev);
4536 pci_set_drvdata(pdev, NULL);
4537 }
4538
4539 static int __devinit ql_init_device(struct pci_dev *pdev,
4540 struct net_device *ndev, int cards_found)
4541 {
4542 struct ql_adapter *qdev = netdev_priv(ndev);
4543 int err = 0;
4544
4545 memset((void *)qdev, 0, sizeof(*qdev));
4546 err = pci_enable_device(pdev);
4547 if (err) {
4548 dev_err(&pdev->dev, "PCI device enable failed.\n");
4549 return err;
4550 }
4551
4552 qdev->ndev = ndev;
4553 qdev->pdev = pdev;
4554 pci_set_drvdata(pdev, ndev);
4555
4556 /* Set PCIe read request size */
4557 err = pcie_set_readrq(pdev, 4096);
4558 if (err) {
4559 dev_err(&pdev->dev, "Set readrq failed.\n");
4560 goto err_out1;
4561 }
4562
4563 err = pci_request_regions(pdev, DRV_NAME);
4564 if (err) {
4565 dev_err(&pdev->dev, "PCI region request failed.\n");
4566 return err;
4567 }
4568
4569 pci_set_master(pdev);
4570 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
4571 set_bit(QL_DMA64, &qdev->flags);
4572 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
4573 } else {
4574 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
4575 if (!err)
4576 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
4577 }
4578
4579 if (err) {
4580 dev_err(&pdev->dev, "No usable DMA configuration.\n");
4581 goto err_out2;
4582 }
4583
4584 /* Set PCIe reset type for EEH to fundamental. */
4585 pdev->needs_freset = 1;
4586 pci_save_state(pdev);
4587 qdev->reg_base =
4588 ioremap_nocache(pci_resource_start(pdev, 1),
4589 pci_resource_len(pdev, 1));
4590 if (!qdev->reg_base) {
4591 dev_err(&pdev->dev, "Register mapping failed.\n");
4592 err = -ENOMEM;
4593 goto err_out2;
4594 }
4595
4596 qdev->doorbell_area_size = pci_resource_len(pdev, 3);
4597 qdev->doorbell_area =
4598 ioremap_nocache(pci_resource_start(pdev, 3),
4599 pci_resource_len(pdev, 3));
4600 if (!qdev->doorbell_area) {
4601 dev_err(&pdev->dev, "Doorbell register mapping failed.\n");
4602 err = -ENOMEM;
4603 goto err_out2;
4604 }
4605
4606 err = ql_get_board_info(qdev);
4607 if (err) {
4608 dev_err(&pdev->dev, "Register access failed.\n");
4609 err = -EIO;
4610 goto err_out2;
4611 }
4612 qdev->msg_enable = netif_msg_init(debug, default_msg);
4613 spin_lock_init(&qdev->hw_lock);
4614 spin_lock_init(&qdev->stats_lock);
4615
4616 if (qlge_mpi_coredump) {
4617 qdev->mpi_coredump =
4618 vmalloc(sizeof(struct ql_mpi_coredump));
4619 if (qdev->mpi_coredump == NULL) {
4620 dev_err(&pdev->dev, "Coredump alloc failed.\n");
4621 err = -ENOMEM;
4622 goto err_out2;
4623 }
4624 if (qlge_force_coredump)
4625 set_bit(QL_FRC_COREDUMP, &qdev->flags);
4626 }
4627 /* make sure the EEPROM is good */
4628 err = qdev->nic_ops->get_flash(qdev);
4629 if (err) {
4630 dev_err(&pdev->dev, "Invalid FLASH.\n");
4631 goto err_out2;
4632 }
4633
4634 memcpy(ndev->perm_addr, ndev->dev_addr, ndev->addr_len);
4635 /* Keep local copy of current mac address. */
4636 memcpy(qdev->current_mac_addr, ndev->dev_addr, ndev->addr_len);
4637
4638 /* Set up the default ring sizes. */
4639 qdev->tx_ring_size = NUM_TX_RING_ENTRIES;
4640 qdev->rx_ring_size = NUM_RX_RING_ENTRIES;
4641
4642 /* Set up the coalescing parameters. */
4643 qdev->rx_coalesce_usecs = DFLT_COALESCE_WAIT;
4644 qdev->tx_coalesce_usecs = DFLT_COALESCE_WAIT;
4645 qdev->rx_max_coalesced_frames = DFLT_INTER_FRAME_WAIT;
4646 qdev->tx_max_coalesced_frames = DFLT_INTER_FRAME_WAIT;
4647
4648 /*
4649 * Set up the operating parameters.
4650 */
4651 qdev->workqueue = create_singlethread_workqueue(ndev->name);
4652 INIT_DELAYED_WORK(&qdev->asic_reset_work, ql_asic_reset_work);
4653 INIT_DELAYED_WORK(&qdev->mpi_reset_work, ql_mpi_reset_work);
4654 INIT_DELAYED_WORK(&qdev->mpi_work, ql_mpi_work);
4655 INIT_DELAYED_WORK(&qdev->mpi_port_cfg_work, ql_mpi_port_cfg_work);
4656 INIT_DELAYED_WORK(&qdev->mpi_idc_work, ql_mpi_idc_work);
4657 INIT_DELAYED_WORK(&qdev->mpi_core_to_log, ql_mpi_core_to_log);
4658 init_completion(&qdev->ide_completion);
4659 mutex_init(&qdev->mpi_mutex);
4660
4661 if (!cards_found) {
4662 dev_info(&pdev->dev, "%s\n", DRV_STRING);
4663 dev_info(&pdev->dev, "Driver name: %s, Version: %s.\n",
4664 DRV_NAME, DRV_VERSION);
4665 }
4666 return 0;
4667 err_out2:
4668 ql_release_all(pdev);
4669 err_out1:
4670 pci_disable_device(pdev);
4671 return err;
4672 }
4673
4674 static const struct net_device_ops qlge_netdev_ops = {
4675 .ndo_open = qlge_open,
4676 .ndo_stop = qlge_close,
4677 .ndo_start_xmit = qlge_send,
4678 .ndo_change_mtu = qlge_change_mtu,
4679 .ndo_get_stats = qlge_get_stats,
4680 .ndo_set_multicast_list = qlge_set_multicast_list,
4681 .ndo_set_mac_address = qlge_set_mac_address,
4682 .ndo_validate_addr = eth_validate_addr,
4683 .ndo_tx_timeout = qlge_tx_timeout,
4684 .ndo_fix_features = qlge_fix_features,
4685 .ndo_set_features = qlge_set_features,
4686 .ndo_vlan_rx_add_vid = qlge_vlan_rx_add_vid,
4687 .ndo_vlan_rx_kill_vid = qlge_vlan_rx_kill_vid,
4688 };
4689
4690 static void ql_timer(unsigned long data)
4691 {
4692 struct ql_adapter *qdev = (struct ql_adapter *)data;
4693 u32 var = 0;
4694
4695 var = ql_read32(qdev, STS);
4696 if (pci_channel_offline(qdev->pdev)) {
4697 netif_err(qdev, ifup, qdev->ndev, "EEH STS = 0x%.08x.\n", var);
4698 return;
4699 }
4700
4701 mod_timer(&qdev->timer, jiffies + (5*HZ));
4702 }
4703
4704 static int __devinit qlge_probe(struct pci_dev *pdev,
4705 const struct pci_device_id *pci_entry)
4706 {
4707 struct net_device *ndev = NULL;
4708 struct ql_adapter *qdev = NULL;
4709 static int cards_found = 0;
4710 int err = 0;
4711
4712 ndev = alloc_etherdev_mq(sizeof(struct ql_adapter),
4713 min(MAX_CPUS, (int)num_online_cpus()));
4714 if (!ndev)
4715 return -ENOMEM;
4716
4717 err = ql_init_device(pdev, ndev, cards_found);
4718 if (err < 0) {
4719 free_netdev(ndev);
4720 return err;
4721 }
4722
4723 qdev = netdev_priv(ndev);
4724 SET_NETDEV_DEV(ndev, &pdev->dev);
4725 ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM |
4726 NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN |
4727 NETIF_F_HW_VLAN_TX | NETIF_F_RXCSUM;
4728 ndev->features = ndev->hw_features |
4729 NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
4730
4731 if (test_bit(QL_DMA64, &qdev->flags))
4732 ndev->features |= NETIF_F_HIGHDMA;
4733
4734 /*
4735 * Set up net_device structure.
4736 */
4737 ndev->tx_queue_len = qdev->tx_ring_size;
4738 ndev->irq = pdev->irq;
4739
4740 ndev->netdev_ops = &qlge_netdev_ops;
4741 SET_ETHTOOL_OPS(ndev, &qlge_ethtool_ops);
4742 ndev->watchdog_timeo = 10 * HZ;
4743
4744 err = register_netdev(ndev);
4745 if (err) {
4746 dev_err(&pdev->dev, "net device registration failed.\n");
4747 ql_release_all(pdev);
4748 pci_disable_device(pdev);
4749 return err;
4750 }
4751 /* Start up the timer to trigger EEH if
4752 * the bus goes dead
4753 */
4754 init_timer_deferrable(&qdev->timer);
4755 qdev->timer.data = (unsigned long)qdev;
4756 qdev->timer.function = ql_timer;
4757 qdev->timer.expires = jiffies + (5*HZ);
4758 add_timer(&qdev->timer);
4759 ql_link_off(qdev);
4760 ql_display_dev_info(ndev);
4761 atomic_set(&qdev->lb_count, 0);
4762 cards_found++;
4763 return 0;
4764 }
4765
4766 netdev_tx_t ql_lb_send(struct sk_buff *skb, struct net_device *ndev)
4767 {
4768 return qlge_send(skb, ndev);
4769 }
4770
4771 int ql_clean_lb_rx_ring(struct rx_ring *rx_ring, int budget)
4772 {
4773 return ql_clean_inbound_rx_ring(rx_ring, budget);
4774 }
4775
4776 static void __devexit qlge_remove(struct pci_dev *pdev)
4777 {
4778 struct net_device *ndev = pci_get_drvdata(pdev);
4779 struct ql_adapter *qdev = netdev_priv(ndev);
4780 del_timer_sync(&qdev->timer);
4781 ql_cancel_all_work_sync(qdev);
4782 unregister_netdev(ndev);
4783 ql_release_all(pdev);
4784 pci_disable_device(pdev);
4785 free_netdev(ndev);
4786 }
4787
4788 /* Clean up resources without touching hardware. */
4789 static void ql_eeh_close(struct net_device *ndev)
4790 {
4791 int i;
4792 struct ql_adapter *qdev = netdev_priv(ndev);
4793
4794 if (netif_carrier_ok(ndev)) {
4795 netif_carrier_off(ndev);
4796 netif_stop_queue(ndev);
4797 }
4798
4799 /* Disabling the timer */
4800 del_timer_sync(&qdev->timer);
4801 ql_cancel_all_work_sync(qdev);
4802
4803 for (i = 0; i < qdev->rss_ring_count; i++)
4804 netif_napi_del(&qdev->rx_ring[i].napi);
4805
4806 clear_bit(QL_ADAPTER_UP, &qdev->flags);
4807 ql_tx_ring_clean(qdev);
4808 ql_free_rx_buffers(qdev);
4809 ql_release_adapter_resources(qdev);
4810 }
4811
4812 /*
4813 * This callback is called by the PCI subsystem whenever
4814 * a PCI bus error is detected.
4815 */
4816 static pci_ers_result_t qlge_io_error_detected(struct pci_dev *pdev,
4817 enum pci_channel_state state)
4818 {
4819 struct net_device *ndev = pci_get_drvdata(pdev);
4820 struct ql_adapter *qdev = netdev_priv(ndev);
4821
4822 switch (state) {
4823 case pci_channel_io_normal:
4824 return PCI_ERS_RESULT_CAN_RECOVER;
4825 case pci_channel_io_frozen:
4826 netif_device_detach(ndev);
4827 if (netif_running(ndev))
4828 ql_eeh_close(ndev);
4829 pci_disable_device(pdev);
4830 return PCI_ERS_RESULT_NEED_RESET;
4831 case pci_channel_io_perm_failure:
4832 dev_err(&pdev->dev,
4833 "%s: pci_channel_io_perm_failure.\n", __func__);
4834 ql_eeh_close(ndev);
4835 set_bit(QL_EEH_FATAL, &qdev->flags);
4836 return PCI_ERS_RESULT_DISCONNECT;
4837 }
4838
4839 /* Request a slot reset. */
4840 return PCI_ERS_RESULT_NEED_RESET;
4841 }
4842
4843 /*
4844 * This callback is called after the PCI buss has been reset.
4845 * Basically, this tries to restart the card from scratch.
4846 * This is a shortened version of the device probe/discovery code,
4847 * it resembles the first-half of the () routine.
4848 */
4849 static pci_ers_result_t qlge_io_slot_reset(struct pci_dev *pdev)
4850 {
4851 struct net_device *ndev = pci_get_drvdata(pdev);
4852 struct ql_adapter *qdev = netdev_priv(ndev);
4853
4854 pdev->error_state = pci_channel_io_normal;
4855
4856 pci_restore_state(pdev);
4857 if (pci_enable_device(pdev)) {
4858 netif_err(qdev, ifup, qdev->ndev,
4859 "Cannot re-enable PCI device after reset.\n");
4860 return PCI_ERS_RESULT_DISCONNECT;
4861 }
4862 pci_set_master(pdev);
4863
4864 if (ql_adapter_reset(qdev)) {
4865 netif_err(qdev, drv, qdev->ndev, "reset FAILED!\n");
4866 set_bit(QL_EEH_FATAL, &qdev->flags);
4867 return PCI_ERS_RESULT_DISCONNECT;
4868 }
4869
4870 return PCI_ERS_RESULT_RECOVERED;
4871 }
4872
4873 static void qlge_io_resume(struct pci_dev *pdev)
4874 {
4875 struct net_device *ndev = pci_get_drvdata(pdev);
4876 struct ql_adapter *qdev = netdev_priv(ndev);
4877 int err = 0;
4878
4879 if (netif_running(ndev)) {
4880 err = qlge_open(ndev);
4881 if (err) {
4882 netif_err(qdev, ifup, qdev->ndev,
4883 "Device initialization failed after reset.\n");
4884 return;
4885 }
4886 } else {
4887 netif_err(qdev, ifup, qdev->ndev,
4888 "Device was not running prior to EEH.\n");
4889 }
4890 mod_timer(&qdev->timer, jiffies + (5*HZ));
4891 netif_device_attach(ndev);
4892 }
4893
4894 static struct pci_error_handlers qlge_err_handler = {
4895 .error_detected = qlge_io_error_detected,
4896 .slot_reset = qlge_io_slot_reset,
4897 .resume = qlge_io_resume,
4898 };
4899
4900 static int qlge_suspend(struct pci_dev *pdev, pm_message_t state)
4901 {
4902 struct net_device *ndev = pci_get_drvdata(pdev);
4903 struct ql_adapter *qdev = netdev_priv(ndev);
4904 int err;
4905
4906 netif_device_detach(ndev);
4907 del_timer_sync(&qdev->timer);
4908
4909 if (netif_running(ndev)) {
4910 err = ql_adapter_down(qdev);
4911 if (!err)
4912 return err;
4913 }
4914
4915 ql_wol(qdev);
4916 err = pci_save_state(pdev);
4917 if (err)
4918 return err;
4919
4920 pci_disable_device(pdev);
4921
4922 pci_set_power_state(pdev, pci_choose_state(pdev, state));
4923
4924 return 0;
4925 }
4926
4927 #ifdef CONFIG_PM
4928 static int qlge_resume(struct pci_dev *pdev)
4929 {
4930 struct net_device *ndev = pci_get_drvdata(pdev);
4931 struct ql_adapter *qdev = netdev_priv(ndev);
4932 int err;
4933
4934 pci_set_power_state(pdev, PCI_D0);
4935 pci_restore_state(pdev);
4936 err = pci_enable_device(pdev);
4937 if (err) {
4938 netif_err(qdev, ifup, qdev->ndev, "Cannot enable PCI device from suspend\n");
4939 return err;
4940 }
4941 pci_set_master(pdev);
4942
4943 pci_enable_wake(pdev, PCI_D3hot, 0);
4944 pci_enable_wake(pdev, PCI_D3cold, 0);
4945
4946 if (netif_running(ndev)) {
4947 err = ql_adapter_up(qdev);
4948 if (err)
4949 return err;
4950 }
4951
4952 mod_timer(&qdev->timer, jiffies + (5*HZ));
4953 netif_device_attach(ndev);
4954
4955 return 0;
4956 }
4957 #endif /* CONFIG_PM */
4958
4959 static void qlge_shutdown(struct pci_dev *pdev)
4960 {
4961 qlge_suspend(pdev, PMSG_SUSPEND);
4962 }
4963
4964 static struct pci_driver qlge_driver = {
4965 .name = DRV_NAME,
4966 .id_table = qlge_pci_tbl,
4967 .probe = qlge_probe,
4968 .remove = __devexit_p(qlge_remove),
4969 #ifdef CONFIG_PM
4970 .suspend = qlge_suspend,
4971 .resume = qlge_resume,
4972 #endif
4973 .shutdown = qlge_shutdown,
4974 .err_handler = &qlge_err_handler
4975 };
4976
4977 static int __init qlge_init_module(void)
4978 {
4979 return pci_register_driver(&qlge_driver);
4980 }
4981
4982 static void __exit qlge_exit(void)
4983 {
4984 pci_unregister_driver(&qlge_driver);
4985 }
4986
4987 module_init(qlge_init_module);
4988 module_exit(qlge_exit);
This page took 0.144097 seconds and 6 git commands to generate.