Merge git://git.infradead.org/~dwmw2/battery-2.6
[deliverable/linux.git] / drivers / net / qla3xxx.c
1 /*
2 * QLogic QLA3xxx NIC HBA Driver
3 * Copyright (c) 2003-2006 QLogic Corporation
4 *
5 * See LICENSE.qla3xxx for copyright and licensing details.
6 */
7
8 #include <linux/kernel.h>
9 #include <linux/init.h>
10 #include <linux/types.h>
11 #include <linux/module.h>
12 #include <linux/list.h>
13 #include <linux/pci.h>
14 #include <linux/dma-mapping.h>
15 #include <linux/sched.h>
16 #include <linux/slab.h>
17 #include <linux/dmapool.h>
18 #include <linux/mempool.h>
19 #include <linux/spinlock.h>
20 #include <linux/kthread.h>
21 #include <linux/interrupt.h>
22 #include <linux/errno.h>
23 #include <linux/ioport.h>
24 #include <linux/ip.h>
25 #include <linux/in.h>
26 #include <linux/if_arp.h>
27 #include <linux/if_ether.h>
28 #include <linux/netdevice.h>
29 #include <linux/etherdevice.h>
30 #include <linux/ethtool.h>
31 #include <linux/skbuff.h>
32 #include <linux/rtnetlink.h>
33 #include <linux/if_vlan.h>
34 #include <linux/init.h>
35 #include <linux/delay.h>
36 #include <linux/mm.h>
37
38 #include "qla3xxx.h"
39
40 #define DRV_NAME "qla3xxx"
41 #define DRV_STRING "QLogic ISP3XXX Network Driver"
42 #define DRV_VERSION "v2.03.00-k4"
43 #define PFX DRV_NAME " "
44
45 static const char ql3xxx_driver_name[] = DRV_NAME;
46 static const char ql3xxx_driver_version[] = DRV_VERSION;
47
48 MODULE_AUTHOR("QLogic Corporation");
49 MODULE_DESCRIPTION("QLogic ISP3XXX Network Driver " DRV_VERSION " ");
50 MODULE_LICENSE("GPL");
51 MODULE_VERSION(DRV_VERSION);
52
53 static const u32 default_msg
54 = NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK
55 | NETIF_MSG_IFUP | NETIF_MSG_IFDOWN;
56
57 static int debug = -1; /* defaults above */
58 module_param(debug, int, 0);
59 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
60
61 static int msi;
62 module_param(msi, int, 0);
63 MODULE_PARM_DESC(msi, "Turn on Message Signaled Interrupts.");
64
65 static struct pci_device_id ql3xxx_pci_tbl[] __devinitdata = {
66 {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QL3022_DEVICE_ID)},
67 {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QL3032_DEVICE_ID)},
68 /* required last entry */
69 {0,}
70 };
71
72 MODULE_DEVICE_TABLE(pci, ql3xxx_pci_tbl);
73
74 /*
75 * These are the known PHY's which are used
76 */
77 typedef enum {
78 PHY_TYPE_UNKNOWN = 0,
79 PHY_VITESSE_VSC8211,
80 PHY_AGERE_ET1011C,
81 MAX_PHY_DEV_TYPES
82 } PHY_DEVICE_et;
83
84 typedef struct {
85 PHY_DEVICE_et phyDevice;
86 u32 phyIdOUI;
87 u16 phyIdModel;
88 char *name;
89 } PHY_DEVICE_INFO_t;
90
91 static const PHY_DEVICE_INFO_t PHY_DEVICES[] =
92 {{PHY_TYPE_UNKNOWN, 0x000000, 0x0, "PHY_TYPE_UNKNOWN"},
93 {PHY_VITESSE_VSC8211, 0x0003f1, 0xb, "PHY_VITESSE_VSC8211"},
94 {PHY_AGERE_ET1011C, 0x00a0bc, 0x1, "PHY_AGERE_ET1011C"},
95 };
96
97
98 /*
99 * Caller must take hw_lock.
100 */
101 static int ql_sem_spinlock(struct ql3_adapter *qdev,
102 u32 sem_mask, u32 sem_bits)
103 {
104 struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
105 u32 value;
106 unsigned int seconds = 3;
107
108 do {
109 writel((sem_mask | sem_bits),
110 &port_regs->CommonRegs.semaphoreReg);
111 value = readl(&port_regs->CommonRegs.semaphoreReg);
112 if ((value & (sem_mask >> 16)) == sem_bits)
113 return 0;
114 ssleep(1);
115 } while(--seconds);
116 return -1;
117 }
118
119 static void ql_sem_unlock(struct ql3_adapter *qdev, u32 sem_mask)
120 {
121 struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
122 writel(sem_mask, &port_regs->CommonRegs.semaphoreReg);
123 readl(&port_regs->CommonRegs.semaphoreReg);
124 }
125
126 static int ql_sem_lock(struct ql3_adapter *qdev, u32 sem_mask, u32 sem_bits)
127 {
128 struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
129 u32 value;
130
131 writel((sem_mask | sem_bits), &port_regs->CommonRegs.semaphoreReg);
132 value = readl(&port_regs->CommonRegs.semaphoreReg);
133 return ((value & (sem_mask >> 16)) == sem_bits);
134 }
135
136 /*
137 * Caller holds hw_lock.
138 */
139 static int ql_wait_for_drvr_lock(struct ql3_adapter *qdev)
140 {
141 int i = 0;
142
143 while (1) {
144 if (!ql_sem_lock(qdev,
145 QL_DRVR_SEM_MASK,
146 (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index)
147 * 2) << 1)) {
148 if (i < 10) {
149 ssleep(1);
150 i++;
151 } else {
152 printk(KERN_ERR PFX "%s: Timed out waiting for "
153 "driver lock...\n",
154 qdev->ndev->name);
155 return 0;
156 }
157 } else {
158 printk(KERN_DEBUG PFX
159 "%s: driver lock acquired.\n",
160 qdev->ndev->name);
161 return 1;
162 }
163 }
164 }
165
166 static void ql_set_register_page(struct ql3_adapter *qdev, u32 page)
167 {
168 struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
169
170 writel(((ISP_CONTROL_NP_MASK << 16) | page),
171 &port_regs->CommonRegs.ispControlStatus);
172 readl(&port_regs->CommonRegs.ispControlStatus);
173 qdev->current_page = page;
174 }
175
176 static u32 ql_read_common_reg_l(struct ql3_adapter *qdev,
177 u32 __iomem * reg)
178 {
179 u32 value;
180 unsigned long hw_flags;
181
182 spin_lock_irqsave(&qdev->hw_lock, hw_flags);
183 value = readl(reg);
184 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
185
186 return value;
187 }
188
189 static u32 ql_read_common_reg(struct ql3_adapter *qdev,
190 u32 __iomem * reg)
191 {
192 return readl(reg);
193 }
194
195 static u32 ql_read_page0_reg_l(struct ql3_adapter *qdev, u32 __iomem *reg)
196 {
197 u32 value;
198 unsigned long hw_flags;
199
200 spin_lock_irqsave(&qdev->hw_lock, hw_flags);
201
202 if (qdev->current_page != 0)
203 ql_set_register_page(qdev,0);
204 value = readl(reg);
205
206 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
207 return value;
208 }
209
210 static u32 ql_read_page0_reg(struct ql3_adapter *qdev, u32 __iomem *reg)
211 {
212 if (qdev->current_page != 0)
213 ql_set_register_page(qdev,0);
214 return readl(reg);
215 }
216
217 static void ql_write_common_reg_l(struct ql3_adapter *qdev,
218 u32 __iomem *reg, u32 value)
219 {
220 unsigned long hw_flags;
221
222 spin_lock_irqsave(&qdev->hw_lock, hw_flags);
223 writel(value, reg);
224 readl(reg);
225 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
226 return;
227 }
228
229 static void ql_write_common_reg(struct ql3_adapter *qdev,
230 u32 __iomem *reg, u32 value)
231 {
232 writel(value, reg);
233 readl(reg);
234 return;
235 }
236
237 static void ql_write_nvram_reg(struct ql3_adapter *qdev,
238 u32 __iomem *reg, u32 value)
239 {
240 writel(value, reg);
241 readl(reg);
242 udelay(1);
243 return;
244 }
245
246 static void ql_write_page0_reg(struct ql3_adapter *qdev,
247 u32 __iomem *reg, u32 value)
248 {
249 if (qdev->current_page != 0)
250 ql_set_register_page(qdev,0);
251 writel(value, reg);
252 readl(reg);
253 return;
254 }
255
256 /*
257 * Caller holds hw_lock. Only called during init.
258 */
259 static void ql_write_page1_reg(struct ql3_adapter *qdev,
260 u32 __iomem *reg, u32 value)
261 {
262 if (qdev->current_page != 1)
263 ql_set_register_page(qdev,1);
264 writel(value, reg);
265 readl(reg);
266 return;
267 }
268
269 /*
270 * Caller holds hw_lock. Only called during init.
271 */
272 static void ql_write_page2_reg(struct ql3_adapter *qdev,
273 u32 __iomem *reg, u32 value)
274 {
275 if (qdev->current_page != 2)
276 ql_set_register_page(qdev,2);
277 writel(value, reg);
278 readl(reg);
279 return;
280 }
281
282 static void ql_disable_interrupts(struct ql3_adapter *qdev)
283 {
284 struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
285
286 ql_write_common_reg_l(qdev, &port_regs->CommonRegs.ispInterruptMaskReg,
287 (ISP_IMR_ENABLE_INT << 16));
288
289 }
290
291 static void ql_enable_interrupts(struct ql3_adapter *qdev)
292 {
293 struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
294
295 ql_write_common_reg_l(qdev, &port_regs->CommonRegs.ispInterruptMaskReg,
296 ((0xff << 16) | ISP_IMR_ENABLE_INT));
297
298 }
299
300 static void ql_release_to_lrg_buf_free_list(struct ql3_adapter *qdev,
301 struct ql_rcv_buf_cb *lrg_buf_cb)
302 {
303 dma_addr_t map;
304 int err;
305 lrg_buf_cb->next = NULL;
306
307 if (qdev->lrg_buf_free_tail == NULL) { /* The list is empty */
308 qdev->lrg_buf_free_head = qdev->lrg_buf_free_tail = lrg_buf_cb;
309 } else {
310 qdev->lrg_buf_free_tail->next = lrg_buf_cb;
311 qdev->lrg_buf_free_tail = lrg_buf_cb;
312 }
313
314 if (!lrg_buf_cb->skb) {
315 lrg_buf_cb->skb = netdev_alloc_skb(qdev->ndev,
316 qdev->lrg_buffer_len);
317 if (unlikely(!lrg_buf_cb->skb)) {
318 printk(KERN_ERR PFX "%s: failed netdev_alloc_skb().\n",
319 qdev->ndev->name);
320 qdev->lrg_buf_skb_check++;
321 } else {
322 /*
323 * We save some space to copy the ethhdr from first
324 * buffer
325 */
326 skb_reserve(lrg_buf_cb->skb, QL_HEADER_SPACE);
327 map = pci_map_single(qdev->pdev,
328 lrg_buf_cb->skb->data,
329 qdev->lrg_buffer_len -
330 QL_HEADER_SPACE,
331 PCI_DMA_FROMDEVICE);
332 err = pci_dma_mapping_error(map);
333 if(err) {
334 printk(KERN_ERR "%s: PCI mapping failed with error: %d\n",
335 qdev->ndev->name, err);
336 dev_kfree_skb(lrg_buf_cb->skb);
337 lrg_buf_cb->skb = NULL;
338
339 qdev->lrg_buf_skb_check++;
340 return;
341 }
342
343 lrg_buf_cb->buf_phy_addr_low =
344 cpu_to_le32(LS_64BITS(map));
345 lrg_buf_cb->buf_phy_addr_high =
346 cpu_to_le32(MS_64BITS(map));
347 pci_unmap_addr_set(lrg_buf_cb, mapaddr, map);
348 pci_unmap_len_set(lrg_buf_cb, maplen,
349 qdev->lrg_buffer_len -
350 QL_HEADER_SPACE);
351 }
352 }
353
354 qdev->lrg_buf_free_count++;
355 }
356
357 static struct ql_rcv_buf_cb *ql_get_from_lrg_buf_free_list(struct ql3_adapter
358 *qdev)
359 {
360 struct ql_rcv_buf_cb *lrg_buf_cb;
361
362 if ((lrg_buf_cb = qdev->lrg_buf_free_head) != NULL) {
363 if ((qdev->lrg_buf_free_head = lrg_buf_cb->next) == NULL)
364 qdev->lrg_buf_free_tail = NULL;
365 qdev->lrg_buf_free_count--;
366 }
367
368 return lrg_buf_cb;
369 }
370
371 static u32 addrBits = EEPROM_NO_ADDR_BITS;
372 static u32 dataBits = EEPROM_NO_DATA_BITS;
373
374 static void fm93c56a_deselect(struct ql3_adapter *qdev);
375 static void eeprom_readword(struct ql3_adapter *qdev, u32 eepromAddr,
376 unsigned short *value);
377
378 /*
379 * Caller holds hw_lock.
380 */
381 static void fm93c56a_select(struct ql3_adapter *qdev)
382 {
383 struct ql3xxx_port_registers __iomem *port_regs =
384 qdev->mem_map_registers;
385
386 qdev->eeprom_cmd_data = AUBURN_EEPROM_CS_1;
387 ql_write_nvram_reg(qdev, &port_regs->CommonRegs.serialPortInterfaceReg,
388 ISP_NVRAM_MASK | qdev->eeprom_cmd_data);
389 ql_write_nvram_reg(qdev, &port_regs->CommonRegs.serialPortInterfaceReg,
390 ((ISP_NVRAM_MASK << 16) | qdev->eeprom_cmd_data));
391 }
392
393 /*
394 * Caller holds hw_lock.
395 */
396 static void fm93c56a_cmd(struct ql3_adapter *qdev, u32 cmd, u32 eepromAddr)
397 {
398 int i;
399 u32 mask;
400 u32 dataBit;
401 u32 previousBit;
402 struct ql3xxx_port_registers __iomem *port_regs =
403 qdev->mem_map_registers;
404
405 /* Clock in a zero, then do the start bit */
406 ql_write_nvram_reg(qdev, &port_regs->CommonRegs.serialPortInterfaceReg,
407 ISP_NVRAM_MASK | qdev->eeprom_cmd_data |
408 AUBURN_EEPROM_DO_1);
409 ql_write_nvram_reg(qdev, &port_regs->CommonRegs.serialPortInterfaceReg,
410 ISP_NVRAM_MASK | qdev->
411 eeprom_cmd_data | AUBURN_EEPROM_DO_1 |
412 AUBURN_EEPROM_CLK_RISE);
413 ql_write_nvram_reg(qdev, &port_regs->CommonRegs.serialPortInterfaceReg,
414 ISP_NVRAM_MASK | qdev->
415 eeprom_cmd_data | AUBURN_EEPROM_DO_1 |
416 AUBURN_EEPROM_CLK_FALL);
417
418 mask = 1 << (FM93C56A_CMD_BITS - 1);
419 /* Force the previous data bit to be different */
420 previousBit = 0xffff;
421 for (i = 0; i < FM93C56A_CMD_BITS; i++) {
422 dataBit =
423 (cmd & mask) ? AUBURN_EEPROM_DO_1 : AUBURN_EEPROM_DO_0;
424 if (previousBit != dataBit) {
425 /*
426 * If the bit changed, then change the DO state to
427 * match
428 */
429 ql_write_nvram_reg(qdev,
430 &port_regs->CommonRegs.
431 serialPortInterfaceReg,
432 ISP_NVRAM_MASK | qdev->
433 eeprom_cmd_data | dataBit);
434 previousBit = dataBit;
435 }
436 ql_write_nvram_reg(qdev,
437 &port_regs->CommonRegs.
438 serialPortInterfaceReg,
439 ISP_NVRAM_MASK | qdev->
440 eeprom_cmd_data | dataBit |
441 AUBURN_EEPROM_CLK_RISE);
442 ql_write_nvram_reg(qdev,
443 &port_regs->CommonRegs.
444 serialPortInterfaceReg,
445 ISP_NVRAM_MASK | qdev->
446 eeprom_cmd_data | dataBit |
447 AUBURN_EEPROM_CLK_FALL);
448 cmd = cmd << 1;
449 }
450
451 mask = 1 << (addrBits - 1);
452 /* Force the previous data bit to be different */
453 previousBit = 0xffff;
454 for (i = 0; i < addrBits; i++) {
455 dataBit =
456 (eepromAddr & mask) ? AUBURN_EEPROM_DO_1 :
457 AUBURN_EEPROM_DO_0;
458 if (previousBit != dataBit) {
459 /*
460 * If the bit changed, then change the DO state to
461 * match
462 */
463 ql_write_nvram_reg(qdev,
464 &port_regs->CommonRegs.
465 serialPortInterfaceReg,
466 ISP_NVRAM_MASK | qdev->
467 eeprom_cmd_data | dataBit);
468 previousBit = dataBit;
469 }
470 ql_write_nvram_reg(qdev,
471 &port_regs->CommonRegs.
472 serialPortInterfaceReg,
473 ISP_NVRAM_MASK | qdev->
474 eeprom_cmd_data | dataBit |
475 AUBURN_EEPROM_CLK_RISE);
476 ql_write_nvram_reg(qdev,
477 &port_regs->CommonRegs.
478 serialPortInterfaceReg,
479 ISP_NVRAM_MASK | qdev->
480 eeprom_cmd_data | dataBit |
481 AUBURN_EEPROM_CLK_FALL);
482 eepromAddr = eepromAddr << 1;
483 }
484 }
485
486 /*
487 * Caller holds hw_lock.
488 */
489 static void fm93c56a_deselect(struct ql3_adapter *qdev)
490 {
491 struct ql3xxx_port_registers __iomem *port_regs =
492 qdev->mem_map_registers;
493 qdev->eeprom_cmd_data = AUBURN_EEPROM_CS_0;
494 ql_write_nvram_reg(qdev, &port_regs->CommonRegs.serialPortInterfaceReg,
495 ISP_NVRAM_MASK | qdev->eeprom_cmd_data);
496 }
497
498 /*
499 * Caller holds hw_lock.
500 */
501 static void fm93c56a_datain(struct ql3_adapter *qdev, unsigned short *value)
502 {
503 int i;
504 u32 data = 0;
505 u32 dataBit;
506 struct ql3xxx_port_registers __iomem *port_regs =
507 qdev->mem_map_registers;
508
509 /* Read the data bits */
510 /* The first bit is a dummy. Clock right over it. */
511 for (i = 0; i < dataBits; i++) {
512 ql_write_nvram_reg(qdev,
513 &port_regs->CommonRegs.
514 serialPortInterfaceReg,
515 ISP_NVRAM_MASK | qdev->eeprom_cmd_data |
516 AUBURN_EEPROM_CLK_RISE);
517 ql_write_nvram_reg(qdev,
518 &port_regs->CommonRegs.
519 serialPortInterfaceReg,
520 ISP_NVRAM_MASK | qdev->eeprom_cmd_data |
521 AUBURN_EEPROM_CLK_FALL);
522 dataBit =
523 (ql_read_common_reg
524 (qdev,
525 &port_regs->CommonRegs.
526 serialPortInterfaceReg) & AUBURN_EEPROM_DI_1) ? 1 : 0;
527 data = (data << 1) | dataBit;
528 }
529 *value = (u16) data;
530 }
531
532 /*
533 * Caller holds hw_lock.
534 */
535 static void eeprom_readword(struct ql3_adapter *qdev,
536 u32 eepromAddr, unsigned short *value)
537 {
538 fm93c56a_select(qdev);
539 fm93c56a_cmd(qdev, (int)FM93C56A_READ, eepromAddr);
540 fm93c56a_datain(qdev, value);
541 fm93c56a_deselect(qdev);
542 }
543
544 static void ql_swap_mac_addr(u8 * macAddress)
545 {
546 #ifdef __BIG_ENDIAN
547 u8 temp;
548 temp = macAddress[0];
549 macAddress[0] = macAddress[1];
550 macAddress[1] = temp;
551 temp = macAddress[2];
552 macAddress[2] = macAddress[3];
553 macAddress[3] = temp;
554 temp = macAddress[4];
555 macAddress[4] = macAddress[5];
556 macAddress[5] = temp;
557 #endif
558 }
559
560 static int ql_get_nvram_params(struct ql3_adapter *qdev)
561 {
562 u16 *pEEPROMData;
563 u16 checksum = 0;
564 u32 index;
565 unsigned long hw_flags;
566
567 spin_lock_irqsave(&qdev->hw_lock, hw_flags);
568
569 pEEPROMData = (u16 *) & qdev->nvram_data;
570 qdev->eeprom_cmd_data = 0;
571 if(ql_sem_spinlock(qdev, QL_NVRAM_SEM_MASK,
572 (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) *
573 2) << 10)) {
574 printk(KERN_ERR PFX"%s: Failed ql_sem_spinlock().\n",
575 __func__);
576 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
577 return -1;
578 }
579
580 for (index = 0; index < EEPROM_SIZE; index++) {
581 eeprom_readword(qdev, index, pEEPROMData);
582 checksum += *pEEPROMData;
583 pEEPROMData++;
584 }
585 ql_sem_unlock(qdev, QL_NVRAM_SEM_MASK);
586
587 if (checksum != 0) {
588 printk(KERN_ERR PFX "%s: checksum should be zero, is %x!!\n",
589 qdev->ndev->name, checksum);
590 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
591 return -1;
592 }
593
594 /*
595 * We have a problem with endianness for the MAC addresses
596 * and the two 8-bit values version, and numPorts. We
597 * have to swap them on big endian systems.
598 */
599 ql_swap_mac_addr(qdev->nvram_data.funcCfg_fn0.macAddress);
600 ql_swap_mac_addr(qdev->nvram_data.funcCfg_fn1.macAddress);
601 ql_swap_mac_addr(qdev->nvram_data.funcCfg_fn2.macAddress);
602 ql_swap_mac_addr(qdev->nvram_data.funcCfg_fn3.macAddress);
603 pEEPROMData = (u16 *) & qdev->nvram_data.version;
604 *pEEPROMData = le16_to_cpu(*pEEPROMData);
605
606 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
607 return checksum;
608 }
609
610 static const u32 PHYAddr[2] = {
611 PORT0_PHY_ADDRESS, PORT1_PHY_ADDRESS
612 };
613
614 static int ql_wait_for_mii_ready(struct ql3_adapter *qdev)
615 {
616 struct ql3xxx_port_registers __iomem *port_regs =
617 qdev->mem_map_registers;
618 u32 temp;
619 int count = 1000;
620
621 while (count) {
622 temp = ql_read_page0_reg(qdev, &port_regs->macMIIStatusReg);
623 if (!(temp & MAC_MII_STATUS_BSY))
624 return 0;
625 udelay(10);
626 count--;
627 }
628 return -1;
629 }
630
631 static void ql_mii_enable_scan_mode(struct ql3_adapter *qdev)
632 {
633 struct ql3xxx_port_registers __iomem *port_regs =
634 qdev->mem_map_registers;
635 u32 scanControl;
636
637 if (qdev->numPorts > 1) {
638 /* Auto scan will cycle through multiple ports */
639 scanControl = MAC_MII_CONTROL_AS | MAC_MII_CONTROL_SC;
640 } else {
641 scanControl = MAC_MII_CONTROL_SC;
642 }
643
644 /*
645 * Scan register 1 of PHY/PETBI,
646 * Set up to scan both devices
647 * The autoscan starts from the first register, completes
648 * the last one before rolling over to the first
649 */
650 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg,
651 PHYAddr[0] | MII_SCAN_REGISTER);
652
653 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg,
654 (scanControl) |
655 ((MAC_MII_CONTROL_SC | MAC_MII_CONTROL_AS) << 16));
656 }
657
658 static u8 ql_mii_disable_scan_mode(struct ql3_adapter *qdev)
659 {
660 u8 ret;
661 struct ql3xxx_port_registers __iomem *port_regs =
662 qdev->mem_map_registers;
663
664 /* See if scan mode is enabled before we turn it off */
665 if (ql_read_page0_reg(qdev, &port_regs->macMIIMgmtControlReg) &
666 (MAC_MII_CONTROL_AS | MAC_MII_CONTROL_SC)) {
667 /* Scan is enabled */
668 ret = 1;
669 } else {
670 /* Scan is disabled */
671 ret = 0;
672 }
673
674 /*
675 * When disabling scan mode you must first change the MII register
676 * address
677 */
678 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg,
679 PHYAddr[0] | MII_SCAN_REGISTER);
680
681 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg,
682 ((MAC_MII_CONTROL_SC | MAC_MII_CONTROL_AS |
683 MAC_MII_CONTROL_RC) << 16));
684
685 return ret;
686 }
687
688 static int ql_mii_write_reg_ex(struct ql3_adapter *qdev,
689 u16 regAddr, u16 value, u32 phyAddr)
690 {
691 struct ql3xxx_port_registers __iomem *port_regs =
692 qdev->mem_map_registers;
693 u8 scanWasEnabled;
694
695 scanWasEnabled = ql_mii_disable_scan_mode(qdev);
696
697 if (ql_wait_for_mii_ready(qdev)) {
698 if (netif_msg_link(qdev))
699 printk(KERN_WARNING PFX
700 "%s Timed out waiting for management port to "
701 "get free before issuing command.\n",
702 qdev->ndev->name);
703 return -1;
704 }
705
706 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg,
707 phyAddr | regAddr);
708
709 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtDataReg, value);
710
711 /* Wait for write to complete 9/10/04 SJP */
712 if (ql_wait_for_mii_ready(qdev)) {
713 if (netif_msg_link(qdev))
714 printk(KERN_WARNING PFX
715 "%s: Timed out waiting for management port to"
716 "get free before issuing command.\n",
717 qdev->ndev->name);
718 return -1;
719 }
720
721 if (scanWasEnabled)
722 ql_mii_enable_scan_mode(qdev);
723
724 return 0;
725 }
726
727 static int ql_mii_read_reg_ex(struct ql3_adapter *qdev, u16 regAddr,
728 u16 * value, u32 phyAddr)
729 {
730 struct ql3xxx_port_registers __iomem *port_regs =
731 qdev->mem_map_registers;
732 u8 scanWasEnabled;
733 u32 temp;
734
735 scanWasEnabled = ql_mii_disable_scan_mode(qdev);
736
737 if (ql_wait_for_mii_ready(qdev)) {
738 if (netif_msg_link(qdev))
739 printk(KERN_WARNING PFX
740 "%s: Timed out waiting for management port to "
741 "get free before issuing command.\n",
742 qdev->ndev->name);
743 return -1;
744 }
745
746 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg,
747 phyAddr | regAddr);
748
749 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg,
750 (MAC_MII_CONTROL_RC << 16));
751
752 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg,
753 (MAC_MII_CONTROL_RC << 16) | MAC_MII_CONTROL_RC);
754
755 /* Wait for the read to complete */
756 if (ql_wait_for_mii_ready(qdev)) {
757 if (netif_msg_link(qdev))
758 printk(KERN_WARNING PFX
759 "%s: Timed out waiting for management port to "
760 "get free after issuing command.\n",
761 qdev->ndev->name);
762 return -1;
763 }
764
765 temp = ql_read_page0_reg(qdev, &port_regs->macMIIMgmtDataReg);
766 *value = (u16) temp;
767
768 if (scanWasEnabled)
769 ql_mii_enable_scan_mode(qdev);
770
771 return 0;
772 }
773
774 static int ql_mii_write_reg(struct ql3_adapter *qdev, u16 regAddr, u16 value)
775 {
776 struct ql3xxx_port_registers __iomem *port_regs =
777 qdev->mem_map_registers;
778
779 ql_mii_disable_scan_mode(qdev);
780
781 if (ql_wait_for_mii_ready(qdev)) {
782 if (netif_msg_link(qdev))
783 printk(KERN_WARNING PFX
784 "%s: Timed out waiting for management port to "
785 "get free before issuing command.\n",
786 qdev->ndev->name);
787 return -1;
788 }
789
790 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg,
791 qdev->PHYAddr | regAddr);
792
793 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtDataReg, value);
794
795 /* Wait for write to complete. */
796 if (ql_wait_for_mii_ready(qdev)) {
797 if (netif_msg_link(qdev))
798 printk(KERN_WARNING PFX
799 "%s: Timed out waiting for management port to "
800 "get free before issuing command.\n",
801 qdev->ndev->name);
802 return -1;
803 }
804
805 ql_mii_enable_scan_mode(qdev);
806
807 return 0;
808 }
809
810 static int ql_mii_read_reg(struct ql3_adapter *qdev, u16 regAddr, u16 *value)
811 {
812 u32 temp;
813 struct ql3xxx_port_registers __iomem *port_regs =
814 qdev->mem_map_registers;
815
816 ql_mii_disable_scan_mode(qdev);
817
818 if (ql_wait_for_mii_ready(qdev)) {
819 if (netif_msg_link(qdev))
820 printk(KERN_WARNING PFX
821 "%s: Timed out waiting for management port to "
822 "get free before issuing command.\n",
823 qdev->ndev->name);
824 return -1;
825 }
826
827 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg,
828 qdev->PHYAddr | regAddr);
829
830 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg,
831 (MAC_MII_CONTROL_RC << 16));
832
833 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg,
834 (MAC_MII_CONTROL_RC << 16) | MAC_MII_CONTROL_RC);
835
836 /* Wait for the read to complete */
837 if (ql_wait_for_mii_ready(qdev)) {
838 if (netif_msg_link(qdev))
839 printk(KERN_WARNING PFX
840 "%s: Timed out waiting for management port to "
841 "get free before issuing command.\n",
842 qdev->ndev->name);
843 return -1;
844 }
845
846 temp = ql_read_page0_reg(qdev, &port_regs->macMIIMgmtDataReg);
847 *value = (u16) temp;
848
849 ql_mii_enable_scan_mode(qdev);
850
851 return 0;
852 }
853
854 static void ql_petbi_reset(struct ql3_adapter *qdev)
855 {
856 ql_mii_write_reg(qdev, PETBI_CONTROL_REG, PETBI_CTRL_SOFT_RESET);
857 }
858
859 static void ql_petbi_start_neg(struct ql3_adapter *qdev)
860 {
861 u16 reg;
862
863 /* Enable Auto-negotiation sense */
864 ql_mii_read_reg(qdev, PETBI_TBI_CTRL, &reg);
865 reg |= PETBI_TBI_AUTO_SENSE;
866 ql_mii_write_reg(qdev, PETBI_TBI_CTRL, reg);
867
868 ql_mii_write_reg(qdev, PETBI_NEG_ADVER,
869 PETBI_NEG_PAUSE | PETBI_NEG_DUPLEX);
870
871 ql_mii_write_reg(qdev, PETBI_CONTROL_REG,
872 PETBI_CTRL_AUTO_NEG | PETBI_CTRL_RESTART_NEG |
873 PETBI_CTRL_FULL_DUPLEX | PETBI_CTRL_SPEED_1000);
874
875 }
876
877 static void ql_petbi_reset_ex(struct ql3_adapter *qdev)
878 {
879 ql_mii_write_reg_ex(qdev, PETBI_CONTROL_REG, PETBI_CTRL_SOFT_RESET,
880 PHYAddr[qdev->mac_index]);
881 }
882
883 static void ql_petbi_start_neg_ex(struct ql3_adapter *qdev)
884 {
885 u16 reg;
886
887 /* Enable Auto-negotiation sense */
888 ql_mii_read_reg_ex(qdev, PETBI_TBI_CTRL, &reg,
889 PHYAddr[qdev->mac_index]);
890 reg |= PETBI_TBI_AUTO_SENSE;
891 ql_mii_write_reg_ex(qdev, PETBI_TBI_CTRL, reg,
892 PHYAddr[qdev->mac_index]);
893
894 ql_mii_write_reg_ex(qdev, PETBI_NEG_ADVER,
895 PETBI_NEG_PAUSE | PETBI_NEG_DUPLEX,
896 PHYAddr[qdev->mac_index]);
897
898 ql_mii_write_reg_ex(qdev, PETBI_CONTROL_REG,
899 PETBI_CTRL_AUTO_NEG | PETBI_CTRL_RESTART_NEG |
900 PETBI_CTRL_FULL_DUPLEX | PETBI_CTRL_SPEED_1000,
901 PHYAddr[qdev->mac_index]);
902 }
903
904 static void ql_petbi_init(struct ql3_adapter *qdev)
905 {
906 ql_petbi_reset(qdev);
907 ql_petbi_start_neg(qdev);
908 }
909
910 static void ql_petbi_init_ex(struct ql3_adapter *qdev)
911 {
912 ql_petbi_reset_ex(qdev);
913 ql_petbi_start_neg_ex(qdev);
914 }
915
916 static int ql_is_petbi_neg_pause(struct ql3_adapter *qdev)
917 {
918 u16 reg;
919
920 if (ql_mii_read_reg(qdev, PETBI_NEG_PARTNER, &reg) < 0)
921 return 0;
922
923 return (reg & PETBI_NEG_PAUSE_MASK) == PETBI_NEG_PAUSE;
924 }
925
926 static void phyAgereSpecificInit(struct ql3_adapter *qdev, u32 miiAddr)
927 {
928 printk(KERN_INFO "%s: enabling Agere specific PHY\n", qdev->ndev->name);
929 /* power down device bit 11 = 1 */
930 ql_mii_write_reg_ex(qdev, 0x00, 0x1940, miiAddr);
931 /* enable diagnostic mode bit 2 = 1 */
932 ql_mii_write_reg_ex(qdev, 0x12, 0x840e, miiAddr);
933 /* 1000MB amplitude adjust (see Agere errata) */
934 ql_mii_write_reg_ex(qdev, 0x10, 0x8805, miiAddr);
935 /* 1000MB amplitude adjust (see Agere errata) */
936 ql_mii_write_reg_ex(qdev, 0x11, 0xf03e, miiAddr);
937 /* 100MB amplitude adjust (see Agere errata) */
938 ql_mii_write_reg_ex(qdev, 0x10, 0x8806, miiAddr);
939 /* 100MB amplitude adjust (see Agere errata) */
940 ql_mii_write_reg_ex(qdev, 0x11, 0x003e, miiAddr);
941 /* 10MB amplitude adjust (see Agere errata) */
942 ql_mii_write_reg_ex(qdev, 0x10, 0x8807, miiAddr);
943 /* 10MB amplitude adjust (see Agere errata) */
944 ql_mii_write_reg_ex(qdev, 0x11, 0x1f00, miiAddr);
945 /* point to hidden reg 0x2806 */
946 ql_mii_write_reg_ex(qdev, 0x10, 0x2806, miiAddr);
947 /* Write new PHYAD w/bit 5 set */
948 ql_mii_write_reg_ex(qdev, 0x11, 0x0020 | (PHYAddr[qdev->mac_index] >> 8), miiAddr);
949 /*
950 * Disable diagnostic mode bit 2 = 0
951 * Power up device bit 11 = 0
952 * Link up (on) and activity (blink)
953 */
954 ql_mii_write_reg(qdev, 0x12, 0x840a);
955 ql_mii_write_reg(qdev, 0x00, 0x1140);
956 ql_mii_write_reg(qdev, 0x1c, 0xfaf0);
957 }
958
959 static PHY_DEVICE_et getPhyType (struct ql3_adapter *qdev,
960 u16 phyIdReg0, u16 phyIdReg1)
961 {
962 PHY_DEVICE_et result = PHY_TYPE_UNKNOWN;
963 u32 oui;
964 u16 model;
965 int i;
966
967 if (phyIdReg0 == 0xffff) {
968 return result;
969 }
970
971 if (phyIdReg1 == 0xffff) {
972 return result;
973 }
974
975 /* oui is split between two registers */
976 oui = (phyIdReg0 << 6) | ((phyIdReg1 & PHY_OUI_1_MASK) >> 10);
977
978 model = (phyIdReg1 & PHY_MODEL_MASK) >> 4;
979
980 /* Scan table for this PHY */
981 for(i = 0; i < MAX_PHY_DEV_TYPES; i++) {
982 if ((oui == PHY_DEVICES[i].phyIdOUI) && (model == PHY_DEVICES[i].phyIdModel))
983 {
984 result = PHY_DEVICES[i].phyDevice;
985
986 printk(KERN_INFO "%s: Phy: %s\n",
987 qdev->ndev->name, PHY_DEVICES[i].name);
988
989 break;
990 }
991 }
992
993 return result;
994 }
995
996 static int ql_phy_get_speed(struct ql3_adapter *qdev)
997 {
998 u16 reg;
999
1000 switch(qdev->phyType) {
1001 case PHY_AGERE_ET1011C:
1002 {
1003 if (ql_mii_read_reg(qdev, 0x1A, &reg) < 0)
1004 return 0;
1005
1006 reg = (reg >> 8) & 3;
1007 break;
1008 }
1009 default:
1010 if (ql_mii_read_reg(qdev, AUX_CONTROL_STATUS, &reg) < 0)
1011 return 0;
1012
1013 reg = (((reg & 0x18) >> 3) & 3);
1014 }
1015
1016 switch(reg) {
1017 case 2:
1018 return SPEED_1000;
1019 case 1:
1020 return SPEED_100;
1021 case 0:
1022 return SPEED_10;
1023 default:
1024 return -1;
1025 }
1026 }
1027
1028 static int ql_is_full_dup(struct ql3_adapter *qdev)
1029 {
1030 u16 reg;
1031
1032 switch(qdev->phyType) {
1033 case PHY_AGERE_ET1011C:
1034 {
1035 if (ql_mii_read_reg(qdev, 0x1A, &reg))
1036 return 0;
1037
1038 return ((reg & 0x0080) && (reg & 0x1000)) != 0;
1039 }
1040 case PHY_VITESSE_VSC8211:
1041 default:
1042 {
1043 if (ql_mii_read_reg(qdev, AUX_CONTROL_STATUS, &reg) < 0)
1044 return 0;
1045 return (reg & PHY_AUX_DUPLEX_STAT) != 0;
1046 }
1047 }
1048 }
1049
1050 static int ql_is_phy_neg_pause(struct ql3_adapter *qdev)
1051 {
1052 u16 reg;
1053
1054 if (ql_mii_read_reg(qdev, PHY_NEG_PARTNER, &reg) < 0)
1055 return 0;
1056
1057 return (reg & PHY_NEG_PAUSE) != 0;
1058 }
1059
1060 static int PHY_Setup(struct ql3_adapter *qdev)
1061 {
1062 u16 reg1;
1063 u16 reg2;
1064 bool agereAddrChangeNeeded = false;
1065 u32 miiAddr = 0;
1066 int err;
1067
1068 /* Determine the PHY we are using by reading the ID's */
1069 err = ql_mii_read_reg(qdev, PHY_ID_0_REG, &reg1);
1070 if(err != 0) {
1071 printk(KERN_ERR "%s: Could not read from reg PHY_ID_0_REG\n",
1072 qdev->ndev->name);
1073 return err;
1074 }
1075
1076 err = ql_mii_read_reg(qdev, PHY_ID_1_REG, &reg2);
1077 if(err != 0) {
1078 printk(KERN_ERR "%s: Could not read from reg PHY_ID_0_REG\n",
1079 qdev->ndev->name);
1080 return err;
1081 }
1082
1083 /* Check if we have a Agere PHY */
1084 if ((reg1 == 0xffff) || (reg2 == 0xffff)) {
1085
1086 /* Determine which MII address we should be using
1087 determined by the index of the card */
1088 if (qdev->mac_index == 0) {
1089 miiAddr = MII_AGERE_ADDR_1;
1090 } else {
1091 miiAddr = MII_AGERE_ADDR_2;
1092 }
1093
1094 err =ql_mii_read_reg_ex(qdev, PHY_ID_0_REG, &reg1, miiAddr);
1095 if(err != 0) {
1096 printk(KERN_ERR "%s: Could not read from reg PHY_ID_0_REG after Agere detected\n",
1097 qdev->ndev->name);
1098 return err;
1099 }
1100
1101 err = ql_mii_read_reg_ex(qdev, PHY_ID_1_REG, &reg2, miiAddr);
1102 if(err != 0) {
1103 printk(KERN_ERR "%s: Could not read from reg PHY_ID_0_REG after Agere detected\n",
1104 qdev->ndev->name);
1105 return err;
1106 }
1107
1108 /* We need to remember to initialize the Agere PHY */
1109 agereAddrChangeNeeded = true;
1110 }
1111
1112 /* Determine the particular PHY we have on board to apply
1113 PHY specific initializations */
1114 qdev->phyType = getPhyType(qdev, reg1, reg2);
1115
1116 if ((qdev->phyType == PHY_AGERE_ET1011C) && agereAddrChangeNeeded) {
1117 /* need this here so address gets changed */
1118 phyAgereSpecificInit(qdev, miiAddr);
1119 } else if (qdev->phyType == PHY_TYPE_UNKNOWN) {
1120 printk(KERN_ERR "%s: PHY is unknown\n", qdev->ndev->name);
1121 return -EIO;
1122 }
1123
1124 return 0;
1125 }
1126
1127 /*
1128 * Caller holds hw_lock.
1129 */
1130 static void ql_mac_enable(struct ql3_adapter *qdev, u32 enable)
1131 {
1132 struct ql3xxx_port_registers __iomem *port_regs =
1133 qdev->mem_map_registers;
1134 u32 value;
1135
1136 if (enable)
1137 value = (MAC_CONFIG_REG_PE | (MAC_CONFIG_REG_PE << 16));
1138 else
1139 value = (MAC_CONFIG_REG_PE << 16);
1140
1141 if (qdev->mac_index)
1142 ql_write_page0_reg(qdev, &port_regs->mac1ConfigReg, value);
1143 else
1144 ql_write_page0_reg(qdev, &port_regs->mac0ConfigReg, value);
1145 }
1146
1147 /*
1148 * Caller holds hw_lock.
1149 */
1150 static void ql_mac_cfg_soft_reset(struct ql3_adapter *qdev, u32 enable)
1151 {
1152 struct ql3xxx_port_registers __iomem *port_regs =
1153 qdev->mem_map_registers;
1154 u32 value;
1155
1156 if (enable)
1157 value = (MAC_CONFIG_REG_SR | (MAC_CONFIG_REG_SR << 16));
1158 else
1159 value = (MAC_CONFIG_REG_SR << 16);
1160
1161 if (qdev->mac_index)
1162 ql_write_page0_reg(qdev, &port_regs->mac1ConfigReg, value);
1163 else
1164 ql_write_page0_reg(qdev, &port_regs->mac0ConfigReg, value);
1165 }
1166
1167 /*
1168 * Caller holds hw_lock.
1169 */
1170 static void ql_mac_cfg_gig(struct ql3_adapter *qdev, u32 enable)
1171 {
1172 struct ql3xxx_port_registers __iomem *port_regs =
1173 qdev->mem_map_registers;
1174 u32 value;
1175
1176 if (enable)
1177 value = (MAC_CONFIG_REG_GM | (MAC_CONFIG_REG_GM << 16));
1178 else
1179 value = (MAC_CONFIG_REG_GM << 16);
1180
1181 if (qdev->mac_index)
1182 ql_write_page0_reg(qdev, &port_regs->mac1ConfigReg, value);
1183 else
1184 ql_write_page0_reg(qdev, &port_regs->mac0ConfigReg, value);
1185 }
1186
1187 /*
1188 * Caller holds hw_lock.
1189 */
1190 static void ql_mac_cfg_full_dup(struct ql3_adapter *qdev, u32 enable)
1191 {
1192 struct ql3xxx_port_registers __iomem *port_regs =
1193 qdev->mem_map_registers;
1194 u32 value;
1195
1196 if (enable)
1197 value = (MAC_CONFIG_REG_FD | (MAC_CONFIG_REG_FD << 16));
1198 else
1199 value = (MAC_CONFIG_REG_FD << 16);
1200
1201 if (qdev->mac_index)
1202 ql_write_page0_reg(qdev, &port_regs->mac1ConfigReg, value);
1203 else
1204 ql_write_page0_reg(qdev, &port_regs->mac0ConfigReg, value);
1205 }
1206
1207 /*
1208 * Caller holds hw_lock.
1209 */
1210 static void ql_mac_cfg_pause(struct ql3_adapter *qdev, u32 enable)
1211 {
1212 struct ql3xxx_port_registers __iomem *port_regs =
1213 qdev->mem_map_registers;
1214 u32 value;
1215
1216 if (enable)
1217 value =
1218 ((MAC_CONFIG_REG_TF | MAC_CONFIG_REG_RF) |
1219 ((MAC_CONFIG_REG_TF | MAC_CONFIG_REG_RF) << 16));
1220 else
1221 value = ((MAC_CONFIG_REG_TF | MAC_CONFIG_REG_RF) << 16);
1222
1223 if (qdev->mac_index)
1224 ql_write_page0_reg(qdev, &port_regs->mac1ConfigReg, value);
1225 else
1226 ql_write_page0_reg(qdev, &port_regs->mac0ConfigReg, value);
1227 }
1228
1229 /*
1230 * Caller holds hw_lock.
1231 */
1232 static int ql_is_fiber(struct ql3_adapter *qdev)
1233 {
1234 struct ql3xxx_port_registers __iomem *port_regs =
1235 qdev->mem_map_registers;
1236 u32 bitToCheck = 0;
1237 u32 temp;
1238
1239 switch (qdev->mac_index) {
1240 case 0:
1241 bitToCheck = PORT_STATUS_SM0;
1242 break;
1243 case 1:
1244 bitToCheck = PORT_STATUS_SM1;
1245 break;
1246 }
1247
1248 temp = ql_read_page0_reg(qdev, &port_regs->portStatus);
1249 return (temp & bitToCheck) != 0;
1250 }
1251
1252 static int ql_is_auto_cfg(struct ql3_adapter *qdev)
1253 {
1254 u16 reg;
1255 ql_mii_read_reg(qdev, 0x00, &reg);
1256 return (reg & 0x1000) != 0;
1257 }
1258
1259 /*
1260 * Caller holds hw_lock.
1261 */
1262 static int ql_is_auto_neg_complete(struct ql3_adapter *qdev)
1263 {
1264 struct ql3xxx_port_registers __iomem *port_regs =
1265 qdev->mem_map_registers;
1266 u32 bitToCheck = 0;
1267 u32 temp;
1268
1269 switch (qdev->mac_index) {
1270 case 0:
1271 bitToCheck = PORT_STATUS_AC0;
1272 break;
1273 case 1:
1274 bitToCheck = PORT_STATUS_AC1;
1275 break;
1276 }
1277
1278 temp = ql_read_page0_reg(qdev, &port_regs->portStatus);
1279 if (temp & bitToCheck) {
1280 if (netif_msg_link(qdev))
1281 printk(KERN_INFO PFX
1282 "%s: Auto-Negotiate complete.\n",
1283 qdev->ndev->name);
1284 return 1;
1285 } else {
1286 if (netif_msg_link(qdev))
1287 printk(KERN_WARNING PFX
1288 "%s: Auto-Negotiate incomplete.\n",
1289 qdev->ndev->name);
1290 return 0;
1291 }
1292 }
1293
1294 /*
1295 * ql_is_neg_pause() returns 1 if pause was negotiated to be on
1296 */
1297 static int ql_is_neg_pause(struct ql3_adapter *qdev)
1298 {
1299 if (ql_is_fiber(qdev))
1300 return ql_is_petbi_neg_pause(qdev);
1301 else
1302 return ql_is_phy_neg_pause(qdev);
1303 }
1304
1305 static int ql_auto_neg_error(struct ql3_adapter *qdev)
1306 {
1307 struct ql3xxx_port_registers __iomem *port_regs =
1308 qdev->mem_map_registers;
1309 u32 bitToCheck = 0;
1310 u32 temp;
1311
1312 switch (qdev->mac_index) {
1313 case 0:
1314 bitToCheck = PORT_STATUS_AE0;
1315 break;
1316 case 1:
1317 bitToCheck = PORT_STATUS_AE1;
1318 break;
1319 }
1320 temp = ql_read_page0_reg(qdev, &port_regs->portStatus);
1321 return (temp & bitToCheck) != 0;
1322 }
1323
1324 static u32 ql_get_link_speed(struct ql3_adapter *qdev)
1325 {
1326 if (ql_is_fiber(qdev))
1327 return SPEED_1000;
1328 else
1329 return ql_phy_get_speed(qdev);
1330 }
1331
1332 static int ql_is_link_full_dup(struct ql3_adapter *qdev)
1333 {
1334 if (ql_is_fiber(qdev))
1335 return 1;
1336 else
1337 return ql_is_full_dup(qdev);
1338 }
1339
1340 /*
1341 * Caller holds hw_lock.
1342 */
1343 static int ql_link_down_detect(struct ql3_adapter *qdev)
1344 {
1345 struct ql3xxx_port_registers __iomem *port_regs =
1346 qdev->mem_map_registers;
1347 u32 bitToCheck = 0;
1348 u32 temp;
1349
1350 switch (qdev->mac_index) {
1351 case 0:
1352 bitToCheck = ISP_CONTROL_LINK_DN_0;
1353 break;
1354 case 1:
1355 bitToCheck = ISP_CONTROL_LINK_DN_1;
1356 break;
1357 }
1358
1359 temp =
1360 ql_read_common_reg(qdev, &port_regs->CommonRegs.ispControlStatus);
1361 return (temp & bitToCheck) != 0;
1362 }
1363
1364 /*
1365 * Caller holds hw_lock.
1366 */
1367 static int ql_link_down_detect_clear(struct ql3_adapter *qdev)
1368 {
1369 struct ql3xxx_port_registers __iomem *port_regs =
1370 qdev->mem_map_registers;
1371
1372 switch (qdev->mac_index) {
1373 case 0:
1374 ql_write_common_reg(qdev,
1375 &port_regs->CommonRegs.ispControlStatus,
1376 (ISP_CONTROL_LINK_DN_0) |
1377 (ISP_CONTROL_LINK_DN_0 << 16));
1378 break;
1379
1380 case 1:
1381 ql_write_common_reg(qdev,
1382 &port_regs->CommonRegs.ispControlStatus,
1383 (ISP_CONTROL_LINK_DN_1) |
1384 (ISP_CONTROL_LINK_DN_1 << 16));
1385 break;
1386
1387 default:
1388 return 1;
1389 }
1390
1391 return 0;
1392 }
1393
1394 /*
1395 * Caller holds hw_lock.
1396 */
1397 static int ql_this_adapter_controls_port(struct ql3_adapter *qdev)
1398 {
1399 struct ql3xxx_port_registers __iomem *port_regs =
1400 qdev->mem_map_registers;
1401 u32 bitToCheck = 0;
1402 u32 temp;
1403
1404 switch (qdev->mac_index) {
1405 case 0:
1406 bitToCheck = PORT_STATUS_F1_ENABLED;
1407 break;
1408 case 1:
1409 bitToCheck = PORT_STATUS_F3_ENABLED;
1410 break;
1411 default:
1412 break;
1413 }
1414
1415 temp = ql_read_page0_reg(qdev, &port_regs->portStatus);
1416 if (temp & bitToCheck) {
1417 if (netif_msg_link(qdev))
1418 printk(KERN_DEBUG PFX
1419 "%s: is not link master.\n", qdev->ndev->name);
1420 return 0;
1421 } else {
1422 if (netif_msg_link(qdev))
1423 printk(KERN_DEBUG PFX
1424 "%s: is link master.\n", qdev->ndev->name);
1425 return 1;
1426 }
1427 }
1428
1429 static void ql_phy_reset_ex(struct ql3_adapter *qdev)
1430 {
1431 ql_mii_write_reg_ex(qdev, CONTROL_REG, PHY_CTRL_SOFT_RESET,
1432 PHYAddr[qdev->mac_index]);
1433 }
1434
1435 static void ql_phy_start_neg_ex(struct ql3_adapter *qdev)
1436 {
1437 u16 reg;
1438 u16 portConfiguration;
1439
1440 if(qdev->phyType == PHY_AGERE_ET1011C) {
1441 /* turn off external loopback */
1442 ql_mii_write_reg(qdev, 0x13, 0x0000);
1443 }
1444
1445 if(qdev->mac_index == 0)
1446 portConfiguration = qdev->nvram_data.macCfg_port0.portConfiguration;
1447 else
1448 portConfiguration = qdev->nvram_data.macCfg_port1.portConfiguration;
1449
1450 /* Some HBA's in the field are set to 0 and they need to
1451 be reinterpreted with a default value */
1452 if(portConfiguration == 0)
1453 portConfiguration = PORT_CONFIG_DEFAULT;
1454
1455 /* Set the 1000 advertisements */
1456 ql_mii_read_reg_ex(qdev, PHY_GIG_CONTROL, &reg,
1457 PHYAddr[qdev->mac_index]);
1458 reg &= ~PHY_GIG_ALL_PARAMS;
1459
1460 if(portConfiguration &
1461 PORT_CONFIG_FULL_DUPLEX_ENABLED &
1462 PORT_CONFIG_1000MB_SPEED) {
1463 reg |= PHY_GIG_ADV_1000F;
1464 }
1465
1466 if(portConfiguration &
1467 PORT_CONFIG_HALF_DUPLEX_ENABLED &
1468 PORT_CONFIG_1000MB_SPEED) {
1469 reg |= PHY_GIG_ADV_1000H;
1470 }
1471
1472 ql_mii_write_reg_ex(qdev, PHY_GIG_CONTROL, reg,
1473 PHYAddr[qdev->mac_index]);
1474
1475 /* Set the 10/100 & pause negotiation advertisements */
1476 ql_mii_read_reg_ex(qdev, PHY_NEG_ADVER, &reg,
1477 PHYAddr[qdev->mac_index]);
1478 reg &= ~PHY_NEG_ALL_PARAMS;
1479
1480 if(portConfiguration & PORT_CONFIG_SYM_PAUSE_ENABLED)
1481 reg |= PHY_NEG_ASY_PAUSE | PHY_NEG_SYM_PAUSE;
1482
1483 if(portConfiguration & PORT_CONFIG_FULL_DUPLEX_ENABLED) {
1484 if(portConfiguration & PORT_CONFIG_100MB_SPEED)
1485 reg |= PHY_NEG_ADV_100F;
1486
1487 if(portConfiguration & PORT_CONFIG_10MB_SPEED)
1488 reg |= PHY_NEG_ADV_10F;
1489 }
1490
1491 if(portConfiguration & PORT_CONFIG_HALF_DUPLEX_ENABLED) {
1492 if(portConfiguration & PORT_CONFIG_100MB_SPEED)
1493 reg |= PHY_NEG_ADV_100H;
1494
1495 if(portConfiguration & PORT_CONFIG_10MB_SPEED)
1496 reg |= PHY_NEG_ADV_10H;
1497 }
1498
1499 if(portConfiguration &
1500 PORT_CONFIG_1000MB_SPEED) {
1501 reg |= 1;
1502 }
1503
1504 ql_mii_write_reg_ex(qdev, PHY_NEG_ADVER, reg,
1505 PHYAddr[qdev->mac_index]);
1506
1507 ql_mii_read_reg_ex(qdev, CONTROL_REG, &reg, PHYAddr[qdev->mac_index]);
1508
1509 ql_mii_write_reg_ex(qdev, CONTROL_REG,
1510 reg | PHY_CTRL_RESTART_NEG | PHY_CTRL_AUTO_NEG,
1511 PHYAddr[qdev->mac_index]);
1512 }
1513
1514 static void ql_phy_init_ex(struct ql3_adapter *qdev)
1515 {
1516 ql_phy_reset_ex(qdev);
1517 PHY_Setup(qdev);
1518 ql_phy_start_neg_ex(qdev);
1519 }
1520
1521 /*
1522 * Caller holds hw_lock.
1523 */
1524 static u32 ql_get_link_state(struct ql3_adapter *qdev)
1525 {
1526 struct ql3xxx_port_registers __iomem *port_regs =
1527 qdev->mem_map_registers;
1528 u32 bitToCheck = 0;
1529 u32 temp, linkState;
1530
1531 switch (qdev->mac_index) {
1532 case 0:
1533 bitToCheck = PORT_STATUS_UP0;
1534 break;
1535 case 1:
1536 bitToCheck = PORT_STATUS_UP1;
1537 break;
1538 }
1539 temp = ql_read_page0_reg(qdev, &port_regs->portStatus);
1540 if (temp & bitToCheck) {
1541 linkState = LS_UP;
1542 } else {
1543 linkState = LS_DOWN;
1544 if (netif_msg_link(qdev))
1545 printk(KERN_WARNING PFX
1546 "%s: Link is down.\n", qdev->ndev->name);
1547 }
1548 return linkState;
1549 }
1550
1551 static int ql_port_start(struct ql3_adapter *qdev)
1552 {
1553 if(ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK,
1554 (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) *
1555 2) << 7)) {
1556 printk(KERN_ERR "%s: Could not get hw lock for GIO\n",
1557 qdev->ndev->name);
1558 return -1;
1559 }
1560
1561 if (ql_is_fiber(qdev)) {
1562 ql_petbi_init(qdev);
1563 } else {
1564 /* Copper port */
1565 ql_phy_init_ex(qdev);
1566 }
1567
1568 ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK);
1569 return 0;
1570 }
1571
1572 static int ql_finish_auto_neg(struct ql3_adapter *qdev)
1573 {
1574
1575 if(ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK,
1576 (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) *
1577 2) << 7))
1578 return -1;
1579
1580 if (!ql_auto_neg_error(qdev)) {
1581 if (test_bit(QL_LINK_MASTER,&qdev->flags)) {
1582 /* configure the MAC */
1583 if (netif_msg_link(qdev))
1584 printk(KERN_DEBUG PFX
1585 "%s: Configuring link.\n",
1586 qdev->ndev->
1587 name);
1588 ql_mac_cfg_soft_reset(qdev, 1);
1589 ql_mac_cfg_gig(qdev,
1590 (ql_get_link_speed
1591 (qdev) ==
1592 SPEED_1000));
1593 ql_mac_cfg_full_dup(qdev,
1594 ql_is_link_full_dup
1595 (qdev));
1596 ql_mac_cfg_pause(qdev,
1597 ql_is_neg_pause
1598 (qdev));
1599 ql_mac_cfg_soft_reset(qdev, 0);
1600
1601 /* enable the MAC */
1602 if (netif_msg_link(qdev))
1603 printk(KERN_DEBUG PFX
1604 "%s: Enabling mac.\n",
1605 qdev->ndev->
1606 name);
1607 ql_mac_enable(qdev, 1);
1608 }
1609
1610 if (netif_msg_link(qdev))
1611 printk(KERN_DEBUG PFX
1612 "%s: Change port_link_state LS_DOWN to LS_UP.\n",
1613 qdev->ndev->name);
1614 qdev->port_link_state = LS_UP;
1615 netif_start_queue(qdev->ndev);
1616 netif_carrier_on(qdev->ndev);
1617 if (netif_msg_link(qdev))
1618 printk(KERN_INFO PFX
1619 "%s: Link is up at %d Mbps, %s duplex.\n",
1620 qdev->ndev->name,
1621 ql_get_link_speed(qdev),
1622 ql_is_link_full_dup(qdev)
1623 ? "full" : "half");
1624
1625 } else { /* Remote error detected */
1626
1627 if (test_bit(QL_LINK_MASTER,&qdev->flags)) {
1628 if (netif_msg_link(qdev))
1629 printk(KERN_DEBUG PFX
1630 "%s: Remote error detected. "
1631 "Calling ql_port_start().\n",
1632 qdev->ndev->
1633 name);
1634 /*
1635 * ql_port_start() is shared code and needs
1636 * to lock the PHY on it's own.
1637 */
1638 ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK);
1639 if(ql_port_start(qdev)) {/* Restart port */
1640 return -1;
1641 } else
1642 return 0;
1643 }
1644 }
1645 ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK);
1646 return 0;
1647 }
1648
1649 static void ql_link_state_machine(struct ql3_adapter *qdev)
1650 {
1651 u32 curr_link_state;
1652 unsigned long hw_flags;
1653
1654 spin_lock_irqsave(&qdev->hw_lock, hw_flags);
1655
1656 curr_link_state = ql_get_link_state(qdev);
1657
1658 if (test_bit(QL_RESET_ACTIVE,&qdev->flags)) {
1659 if (netif_msg_link(qdev))
1660 printk(KERN_INFO PFX
1661 "%s: Reset in progress, skip processing link "
1662 "state.\n", qdev->ndev->name);
1663
1664 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
1665 return;
1666 }
1667
1668 switch (qdev->port_link_state) {
1669 default:
1670 if (test_bit(QL_LINK_MASTER,&qdev->flags)) {
1671 ql_port_start(qdev);
1672 }
1673 qdev->port_link_state = LS_DOWN;
1674 /* Fall Through */
1675
1676 case LS_DOWN:
1677 if (netif_msg_link(qdev))
1678 printk(KERN_DEBUG PFX
1679 "%s: port_link_state = LS_DOWN.\n",
1680 qdev->ndev->name);
1681 if (curr_link_state == LS_UP) {
1682 if (netif_msg_link(qdev))
1683 printk(KERN_DEBUG PFX
1684 "%s: curr_link_state = LS_UP.\n",
1685 qdev->ndev->name);
1686 if (ql_is_auto_neg_complete(qdev))
1687 ql_finish_auto_neg(qdev);
1688
1689 if (qdev->port_link_state == LS_UP)
1690 ql_link_down_detect_clear(qdev);
1691
1692 }
1693 break;
1694
1695 case LS_UP:
1696 /*
1697 * See if the link is currently down or went down and came
1698 * back up
1699 */
1700 if ((curr_link_state == LS_DOWN) || ql_link_down_detect(qdev)) {
1701 if (netif_msg_link(qdev))
1702 printk(KERN_INFO PFX "%s: Link is down.\n",
1703 qdev->ndev->name);
1704 qdev->port_link_state = LS_DOWN;
1705 }
1706 break;
1707 }
1708 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
1709 }
1710
1711 /*
1712 * Caller must take hw_lock and QL_PHY_GIO_SEM.
1713 */
1714 static void ql_get_phy_owner(struct ql3_adapter *qdev)
1715 {
1716 if (ql_this_adapter_controls_port(qdev))
1717 set_bit(QL_LINK_MASTER,&qdev->flags);
1718 else
1719 clear_bit(QL_LINK_MASTER,&qdev->flags);
1720 }
1721
1722 /*
1723 * Caller must take hw_lock and QL_PHY_GIO_SEM.
1724 */
1725 static void ql_init_scan_mode(struct ql3_adapter *qdev)
1726 {
1727 ql_mii_enable_scan_mode(qdev);
1728
1729 if (test_bit(QL_LINK_OPTICAL,&qdev->flags)) {
1730 if (ql_this_adapter_controls_port(qdev))
1731 ql_petbi_init_ex(qdev);
1732 } else {
1733 if (ql_this_adapter_controls_port(qdev))
1734 ql_phy_init_ex(qdev);
1735 }
1736 }
1737
1738 /*
1739 * MII_Setup needs to be called before taking the PHY out of reset so that the
1740 * management interface clock speed can be set properly. It would be better if
1741 * we had a way to disable MDC until after the PHY is out of reset, but we
1742 * don't have that capability.
1743 */
1744 static int ql_mii_setup(struct ql3_adapter *qdev)
1745 {
1746 u32 reg;
1747 struct ql3xxx_port_registers __iomem *port_regs =
1748 qdev->mem_map_registers;
1749
1750 if(ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK,
1751 (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) *
1752 2) << 7))
1753 return -1;
1754
1755 if (qdev->device_id == QL3032_DEVICE_ID)
1756 ql_write_page0_reg(qdev,
1757 &port_regs->macMIIMgmtControlReg, 0x0f00000);
1758
1759 /* Divide 125MHz clock by 28 to meet PHY timing requirements */
1760 reg = MAC_MII_CONTROL_CLK_SEL_DIV28;
1761
1762 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg,
1763 reg | ((MAC_MII_CONTROL_CLK_SEL_MASK) << 16));
1764
1765 ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK);
1766 return 0;
1767 }
1768
1769 static u32 ql_supported_modes(struct ql3_adapter *qdev)
1770 {
1771 u32 supported;
1772
1773 if (test_bit(QL_LINK_OPTICAL,&qdev->flags)) {
1774 supported = SUPPORTED_1000baseT_Full | SUPPORTED_FIBRE
1775 | SUPPORTED_Autoneg;
1776 } else {
1777 supported = SUPPORTED_10baseT_Half
1778 | SUPPORTED_10baseT_Full
1779 | SUPPORTED_100baseT_Half
1780 | SUPPORTED_100baseT_Full
1781 | SUPPORTED_1000baseT_Half
1782 | SUPPORTED_1000baseT_Full
1783 | SUPPORTED_Autoneg | SUPPORTED_TP;
1784 }
1785
1786 return supported;
1787 }
1788
1789 static int ql_get_auto_cfg_status(struct ql3_adapter *qdev)
1790 {
1791 int status;
1792 unsigned long hw_flags;
1793 spin_lock_irqsave(&qdev->hw_lock, hw_flags);
1794 if(ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK,
1795 (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) *
1796 2) << 7)) {
1797 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
1798 return 0;
1799 }
1800 status = ql_is_auto_cfg(qdev);
1801 ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK);
1802 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
1803 return status;
1804 }
1805
1806 static u32 ql_get_speed(struct ql3_adapter *qdev)
1807 {
1808 u32 status;
1809 unsigned long hw_flags;
1810 spin_lock_irqsave(&qdev->hw_lock, hw_flags);
1811 if(ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK,
1812 (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) *
1813 2) << 7)) {
1814 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
1815 return 0;
1816 }
1817 status = ql_get_link_speed(qdev);
1818 ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK);
1819 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
1820 return status;
1821 }
1822
1823 static int ql_get_full_dup(struct ql3_adapter *qdev)
1824 {
1825 int status;
1826 unsigned long hw_flags;
1827 spin_lock_irqsave(&qdev->hw_lock, hw_flags);
1828 if(ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK,
1829 (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) *
1830 2) << 7)) {
1831 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
1832 return 0;
1833 }
1834 status = ql_is_link_full_dup(qdev);
1835 ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK);
1836 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
1837 return status;
1838 }
1839
1840
1841 static int ql_get_settings(struct net_device *ndev, struct ethtool_cmd *ecmd)
1842 {
1843 struct ql3_adapter *qdev = netdev_priv(ndev);
1844
1845 ecmd->transceiver = XCVR_INTERNAL;
1846 ecmd->supported = ql_supported_modes(qdev);
1847
1848 if (test_bit(QL_LINK_OPTICAL,&qdev->flags)) {
1849 ecmd->port = PORT_FIBRE;
1850 } else {
1851 ecmd->port = PORT_TP;
1852 ecmd->phy_address = qdev->PHYAddr;
1853 }
1854 ecmd->advertising = ql_supported_modes(qdev);
1855 ecmd->autoneg = ql_get_auto_cfg_status(qdev);
1856 ecmd->speed = ql_get_speed(qdev);
1857 ecmd->duplex = ql_get_full_dup(qdev);
1858 return 0;
1859 }
1860
1861 static void ql_get_drvinfo(struct net_device *ndev,
1862 struct ethtool_drvinfo *drvinfo)
1863 {
1864 struct ql3_adapter *qdev = netdev_priv(ndev);
1865 strncpy(drvinfo->driver, ql3xxx_driver_name, 32);
1866 strncpy(drvinfo->version, ql3xxx_driver_version, 32);
1867 strncpy(drvinfo->fw_version, "N/A", 32);
1868 strncpy(drvinfo->bus_info, pci_name(qdev->pdev), 32);
1869 drvinfo->n_stats = 0;
1870 drvinfo->testinfo_len = 0;
1871 drvinfo->regdump_len = 0;
1872 drvinfo->eedump_len = 0;
1873 }
1874
1875 static u32 ql_get_msglevel(struct net_device *ndev)
1876 {
1877 struct ql3_adapter *qdev = netdev_priv(ndev);
1878 return qdev->msg_enable;
1879 }
1880
1881 static void ql_set_msglevel(struct net_device *ndev, u32 value)
1882 {
1883 struct ql3_adapter *qdev = netdev_priv(ndev);
1884 qdev->msg_enable = value;
1885 }
1886
1887 static void ql_get_pauseparam(struct net_device *ndev,
1888 struct ethtool_pauseparam *pause)
1889 {
1890 struct ql3_adapter *qdev = netdev_priv(ndev);
1891 struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
1892
1893 u32 reg;
1894 if(qdev->mac_index == 0)
1895 reg = ql_read_page0_reg(qdev, &port_regs->mac0ConfigReg);
1896 else
1897 reg = ql_read_page0_reg(qdev, &port_regs->mac1ConfigReg);
1898
1899 pause->autoneg = ql_get_auto_cfg_status(qdev);
1900 pause->rx_pause = (reg & MAC_CONFIG_REG_RF) >> 2;
1901 pause->tx_pause = (reg & MAC_CONFIG_REG_TF) >> 1;
1902 }
1903
1904 static const struct ethtool_ops ql3xxx_ethtool_ops = {
1905 .get_settings = ql_get_settings,
1906 .get_drvinfo = ql_get_drvinfo,
1907 .get_perm_addr = ethtool_op_get_perm_addr,
1908 .get_link = ethtool_op_get_link,
1909 .get_msglevel = ql_get_msglevel,
1910 .set_msglevel = ql_set_msglevel,
1911 .get_pauseparam = ql_get_pauseparam,
1912 };
1913
1914 static int ql_populate_free_queue(struct ql3_adapter *qdev)
1915 {
1916 struct ql_rcv_buf_cb *lrg_buf_cb = qdev->lrg_buf_free_head;
1917 dma_addr_t map;
1918 int err;
1919
1920 while (lrg_buf_cb) {
1921 if (!lrg_buf_cb->skb) {
1922 lrg_buf_cb->skb = netdev_alloc_skb(qdev->ndev,
1923 qdev->lrg_buffer_len);
1924 if (unlikely(!lrg_buf_cb->skb)) {
1925 printk(KERN_DEBUG PFX
1926 "%s: Failed netdev_alloc_skb().\n",
1927 qdev->ndev->name);
1928 break;
1929 } else {
1930 /*
1931 * We save some space to copy the ethhdr from
1932 * first buffer
1933 */
1934 skb_reserve(lrg_buf_cb->skb, QL_HEADER_SPACE);
1935 map = pci_map_single(qdev->pdev,
1936 lrg_buf_cb->skb->data,
1937 qdev->lrg_buffer_len -
1938 QL_HEADER_SPACE,
1939 PCI_DMA_FROMDEVICE);
1940
1941 err = pci_dma_mapping_error(map);
1942 if(err) {
1943 printk(KERN_ERR "%s: PCI mapping failed with error: %d\n",
1944 qdev->ndev->name, err);
1945 dev_kfree_skb(lrg_buf_cb->skb);
1946 lrg_buf_cb->skb = NULL;
1947 break;
1948 }
1949
1950
1951 lrg_buf_cb->buf_phy_addr_low =
1952 cpu_to_le32(LS_64BITS(map));
1953 lrg_buf_cb->buf_phy_addr_high =
1954 cpu_to_le32(MS_64BITS(map));
1955 pci_unmap_addr_set(lrg_buf_cb, mapaddr, map);
1956 pci_unmap_len_set(lrg_buf_cb, maplen,
1957 qdev->lrg_buffer_len -
1958 QL_HEADER_SPACE);
1959 --qdev->lrg_buf_skb_check;
1960 if (!qdev->lrg_buf_skb_check)
1961 return 1;
1962 }
1963 }
1964 lrg_buf_cb = lrg_buf_cb->next;
1965 }
1966 return 0;
1967 }
1968
1969 /*
1970 * Caller holds hw_lock.
1971 */
1972 static void ql_update_small_bufq_prod_index(struct ql3_adapter *qdev)
1973 {
1974 struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
1975 if (qdev->small_buf_release_cnt >= 16) {
1976 while (qdev->small_buf_release_cnt >= 16) {
1977 qdev->small_buf_q_producer_index++;
1978
1979 if (qdev->small_buf_q_producer_index ==
1980 NUM_SBUFQ_ENTRIES)
1981 qdev->small_buf_q_producer_index = 0;
1982 qdev->small_buf_release_cnt -= 8;
1983 }
1984 wmb();
1985 writel(qdev->small_buf_q_producer_index,
1986 &port_regs->CommonRegs.rxSmallQProducerIndex);
1987 }
1988 }
1989
1990 /*
1991 * Caller holds hw_lock.
1992 */
1993 static void ql_update_lrg_bufq_prod_index(struct ql3_adapter *qdev)
1994 {
1995 struct bufq_addr_element *lrg_buf_q_ele;
1996 int i;
1997 struct ql_rcv_buf_cb *lrg_buf_cb;
1998 struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
1999
2000 if ((qdev->lrg_buf_free_count >= 8)
2001 && (qdev->lrg_buf_release_cnt >= 16)) {
2002
2003 if (qdev->lrg_buf_skb_check)
2004 if (!ql_populate_free_queue(qdev))
2005 return;
2006
2007 lrg_buf_q_ele = qdev->lrg_buf_next_free;
2008
2009 while ((qdev->lrg_buf_release_cnt >= 16)
2010 && (qdev->lrg_buf_free_count >= 8)) {
2011
2012 for (i = 0; i < 8; i++) {
2013 lrg_buf_cb =
2014 ql_get_from_lrg_buf_free_list(qdev);
2015 lrg_buf_q_ele->addr_high =
2016 lrg_buf_cb->buf_phy_addr_high;
2017 lrg_buf_q_ele->addr_low =
2018 lrg_buf_cb->buf_phy_addr_low;
2019 lrg_buf_q_ele++;
2020
2021 qdev->lrg_buf_release_cnt--;
2022 }
2023
2024 qdev->lrg_buf_q_producer_index++;
2025
2026 if (qdev->lrg_buf_q_producer_index == qdev->num_lbufq_entries)
2027 qdev->lrg_buf_q_producer_index = 0;
2028
2029 if (qdev->lrg_buf_q_producer_index ==
2030 (qdev->num_lbufq_entries - 1)) {
2031 lrg_buf_q_ele = qdev->lrg_buf_q_virt_addr;
2032 }
2033 }
2034 wmb();
2035 qdev->lrg_buf_next_free = lrg_buf_q_ele;
2036 writel(qdev->lrg_buf_q_producer_index,
2037 &port_regs->CommonRegs.rxLargeQProducerIndex);
2038 }
2039 }
2040
2041 static void ql_process_mac_tx_intr(struct ql3_adapter *qdev,
2042 struct ob_mac_iocb_rsp *mac_rsp)
2043 {
2044 struct ql_tx_buf_cb *tx_cb;
2045 int i;
2046 int retval = 0;
2047
2048 if(mac_rsp->flags & OB_MAC_IOCB_RSP_S) {
2049 printk(KERN_WARNING "Frame short but, frame was padded and sent.\n");
2050 }
2051
2052 tx_cb = &qdev->tx_buf[mac_rsp->transaction_id];
2053
2054 /* Check the transmit response flags for any errors */
2055 if(mac_rsp->flags & OB_MAC_IOCB_RSP_S) {
2056 printk(KERN_ERR "Frame too short to be legal, frame not sent.\n");
2057
2058 qdev->stats.tx_errors++;
2059 retval = -EIO;
2060 goto frame_not_sent;
2061 }
2062
2063 if(tx_cb->seg_count == 0) {
2064 printk(KERN_ERR "tx_cb->seg_count == 0: %d\n", mac_rsp->transaction_id);
2065
2066 qdev->stats.tx_errors++;
2067 retval = -EIO;
2068 goto invalid_seg_count;
2069 }
2070
2071 pci_unmap_single(qdev->pdev,
2072 pci_unmap_addr(&tx_cb->map[0], mapaddr),
2073 pci_unmap_len(&tx_cb->map[0], maplen),
2074 PCI_DMA_TODEVICE);
2075 tx_cb->seg_count--;
2076 if (tx_cb->seg_count) {
2077 for (i = 1; i < tx_cb->seg_count; i++) {
2078 pci_unmap_page(qdev->pdev,
2079 pci_unmap_addr(&tx_cb->map[i],
2080 mapaddr),
2081 pci_unmap_len(&tx_cb->map[i], maplen),
2082 PCI_DMA_TODEVICE);
2083 }
2084 }
2085 qdev->stats.tx_packets++;
2086 qdev->stats.tx_bytes += tx_cb->skb->len;
2087
2088 frame_not_sent:
2089 dev_kfree_skb_irq(tx_cb->skb);
2090 tx_cb->skb = NULL;
2091
2092 invalid_seg_count:
2093 atomic_inc(&qdev->tx_count);
2094 }
2095
2096 static void ql_get_sbuf(struct ql3_adapter *qdev)
2097 {
2098 if (++qdev->small_buf_index == NUM_SMALL_BUFFERS)
2099 qdev->small_buf_index = 0;
2100 qdev->small_buf_release_cnt++;
2101 }
2102
2103 static struct ql_rcv_buf_cb *ql_get_lbuf(struct ql3_adapter *qdev)
2104 {
2105 struct ql_rcv_buf_cb *lrg_buf_cb = NULL;
2106 lrg_buf_cb = &qdev->lrg_buf[qdev->lrg_buf_index];
2107 qdev->lrg_buf_release_cnt++;
2108 if (++qdev->lrg_buf_index == qdev->num_large_buffers)
2109 qdev->lrg_buf_index = 0;
2110 return(lrg_buf_cb);
2111 }
2112
2113 /*
2114 * The difference between 3022 and 3032 for inbound completions:
2115 * 3022 uses two buffers per completion. The first buffer contains
2116 * (some) header info, the second the remainder of the headers plus
2117 * the data. For this chip we reserve some space at the top of the
2118 * receive buffer so that the header info in buffer one can be
2119 * prepended to the buffer two. Buffer two is the sent up while
2120 * buffer one is returned to the hardware to be reused.
2121 * 3032 receives all of it's data and headers in one buffer for a
2122 * simpler process. 3032 also supports checksum verification as
2123 * can be seen in ql_process_macip_rx_intr().
2124 */
2125 static void ql_process_mac_rx_intr(struct ql3_adapter *qdev,
2126 struct ib_mac_iocb_rsp *ib_mac_rsp_ptr)
2127 {
2128 struct ql_rcv_buf_cb *lrg_buf_cb1 = NULL;
2129 struct ql_rcv_buf_cb *lrg_buf_cb2 = NULL;
2130 struct sk_buff *skb;
2131 u16 length = le16_to_cpu(ib_mac_rsp_ptr->length);
2132
2133 /*
2134 * Get the inbound address list (small buffer).
2135 */
2136 ql_get_sbuf(qdev);
2137
2138 if (qdev->device_id == QL3022_DEVICE_ID)
2139 lrg_buf_cb1 = ql_get_lbuf(qdev);
2140
2141 /* start of second buffer */
2142 lrg_buf_cb2 = ql_get_lbuf(qdev);
2143 skb = lrg_buf_cb2->skb;
2144
2145 qdev->stats.rx_packets++;
2146 qdev->stats.rx_bytes += length;
2147
2148 skb_put(skb, length);
2149 pci_unmap_single(qdev->pdev,
2150 pci_unmap_addr(lrg_buf_cb2, mapaddr),
2151 pci_unmap_len(lrg_buf_cb2, maplen),
2152 PCI_DMA_FROMDEVICE);
2153 prefetch(skb->data);
2154 skb->ip_summed = CHECKSUM_NONE;
2155 skb->protocol = eth_type_trans(skb, qdev->ndev);
2156
2157 netif_receive_skb(skb);
2158 qdev->ndev->last_rx = jiffies;
2159 lrg_buf_cb2->skb = NULL;
2160
2161 if (qdev->device_id == QL3022_DEVICE_ID)
2162 ql_release_to_lrg_buf_free_list(qdev, lrg_buf_cb1);
2163 ql_release_to_lrg_buf_free_list(qdev, lrg_buf_cb2);
2164 }
2165
2166 static void ql_process_macip_rx_intr(struct ql3_adapter *qdev,
2167 struct ib_ip_iocb_rsp *ib_ip_rsp_ptr)
2168 {
2169 struct ql_rcv_buf_cb *lrg_buf_cb1 = NULL;
2170 struct ql_rcv_buf_cb *lrg_buf_cb2 = NULL;
2171 struct sk_buff *skb1 = NULL, *skb2;
2172 struct net_device *ndev = qdev->ndev;
2173 u16 length = le16_to_cpu(ib_ip_rsp_ptr->length);
2174 u16 size = 0;
2175
2176 /*
2177 * Get the inbound address list (small buffer).
2178 */
2179
2180 ql_get_sbuf(qdev);
2181
2182 if (qdev->device_id == QL3022_DEVICE_ID) {
2183 /* start of first buffer on 3022 */
2184 lrg_buf_cb1 = ql_get_lbuf(qdev);
2185 skb1 = lrg_buf_cb1->skb;
2186 size = ETH_HLEN;
2187 if (*((u16 *) skb1->data) != 0xFFFF)
2188 size += VLAN_ETH_HLEN - ETH_HLEN;
2189 }
2190
2191 /* start of second buffer */
2192 lrg_buf_cb2 = ql_get_lbuf(qdev);
2193 skb2 = lrg_buf_cb2->skb;
2194
2195 skb_put(skb2, length); /* Just the second buffer length here. */
2196 pci_unmap_single(qdev->pdev,
2197 pci_unmap_addr(lrg_buf_cb2, mapaddr),
2198 pci_unmap_len(lrg_buf_cb2, maplen),
2199 PCI_DMA_FROMDEVICE);
2200 prefetch(skb2->data);
2201
2202 skb2->ip_summed = CHECKSUM_NONE;
2203 if (qdev->device_id == QL3022_DEVICE_ID) {
2204 /*
2205 * Copy the ethhdr from first buffer to second. This
2206 * is necessary for 3022 IP completions.
2207 */
2208 skb_copy_from_linear_data_offset(skb1, VLAN_ID_LEN,
2209 skb_push(skb2, size), size);
2210 } else {
2211 u16 checksum = le16_to_cpu(ib_ip_rsp_ptr->checksum);
2212 if (checksum &
2213 (IB_IP_IOCB_RSP_3032_ICE |
2214 IB_IP_IOCB_RSP_3032_CE)) {
2215 printk(KERN_ERR
2216 "%s: Bad checksum for this %s packet, checksum = %x.\n",
2217 __func__,
2218 ((checksum &
2219 IB_IP_IOCB_RSP_3032_TCP) ? "TCP" :
2220 "UDP"),checksum);
2221 } else if ((checksum & IB_IP_IOCB_RSP_3032_TCP) ||
2222 (checksum & IB_IP_IOCB_RSP_3032_UDP &&
2223 !(checksum & IB_IP_IOCB_RSP_3032_NUC))) {
2224 skb2->ip_summed = CHECKSUM_UNNECESSARY;
2225 }
2226 }
2227 skb2->protocol = eth_type_trans(skb2, qdev->ndev);
2228
2229 netif_receive_skb(skb2);
2230 qdev->stats.rx_packets++;
2231 qdev->stats.rx_bytes += length;
2232 ndev->last_rx = jiffies;
2233 lrg_buf_cb2->skb = NULL;
2234
2235 if (qdev->device_id == QL3022_DEVICE_ID)
2236 ql_release_to_lrg_buf_free_list(qdev, lrg_buf_cb1);
2237 ql_release_to_lrg_buf_free_list(qdev, lrg_buf_cb2);
2238 }
2239
2240 static int ql_tx_rx_clean(struct ql3_adapter *qdev,
2241 int *tx_cleaned, int *rx_cleaned, int work_to_do)
2242 {
2243 struct net_rsp_iocb *net_rsp;
2244 struct net_device *ndev = qdev->ndev;
2245 int work_done = 0;
2246
2247 /* While there are entries in the completion queue. */
2248 while ((le32_to_cpu(*(qdev->prsp_producer_index)) !=
2249 qdev->rsp_consumer_index) && (work_done < work_to_do)) {
2250
2251 net_rsp = qdev->rsp_current;
2252 switch (net_rsp->opcode) {
2253
2254 case OPCODE_OB_MAC_IOCB_FN0:
2255 case OPCODE_OB_MAC_IOCB_FN2:
2256 ql_process_mac_tx_intr(qdev, (struct ob_mac_iocb_rsp *)
2257 net_rsp);
2258 (*tx_cleaned)++;
2259 break;
2260
2261 case OPCODE_IB_MAC_IOCB:
2262 case OPCODE_IB_3032_MAC_IOCB:
2263 ql_process_mac_rx_intr(qdev, (struct ib_mac_iocb_rsp *)
2264 net_rsp);
2265 (*rx_cleaned)++;
2266 break;
2267
2268 case OPCODE_IB_IP_IOCB:
2269 case OPCODE_IB_3032_IP_IOCB:
2270 ql_process_macip_rx_intr(qdev, (struct ib_ip_iocb_rsp *)
2271 net_rsp);
2272 (*rx_cleaned)++;
2273 break;
2274 default:
2275 {
2276 u32 *tmp = (u32 *) net_rsp;
2277 printk(KERN_ERR PFX
2278 "%s: Hit default case, not "
2279 "handled!\n"
2280 " dropping the packet, opcode = "
2281 "%x.\n",
2282 ndev->name, net_rsp->opcode);
2283 printk(KERN_ERR PFX
2284 "0x%08lx 0x%08lx 0x%08lx 0x%08lx \n",
2285 (unsigned long int)tmp[0],
2286 (unsigned long int)tmp[1],
2287 (unsigned long int)tmp[2],
2288 (unsigned long int)tmp[3]);
2289 }
2290 }
2291
2292 qdev->rsp_consumer_index++;
2293
2294 if (qdev->rsp_consumer_index == NUM_RSP_Q_ENTRIES) {
2295 qdev->rsp_consumer_index = 0;
2296 qdev->rsp_current = qdev->rsp_q_virt_addr;
2297 } else {
2298 qdev->rsp_current++;
2299 }
2300
2301 work_done = *tx_cleaned + *rx_cleaned;
2302 }
2303
2304 return work_done;
2305 }
2306
2307 static int ql_poll(struct net_device *ndev, int *budget)
2308 {
2309 struct ql3_adapter *qdev = netdev_priv(ndev);
2310 int work_to_do = min(*budget, ndev->quota);
2311 int rx_cleaned = 0, tx_cleaned = 0;
2312 unsigned long hw_flags;
2313 struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
2314
2315 if (!netif_carrier_ok(ndev))
2316 goto quit_polling;
2317
2318 ql_tx_rx_clean(qdev, &tx_cleaned, &rx_cleaned, work_to_do);
2319 *budget -= rx_cleaned;
2320 ndev->quota -= rx_cleaned;
2321
2322 if( tx_cleaned + rx_cleaned != work_to_do ||
2323 !netif_running(ndev)) {
2324 quit_polling:
2325 netif_rx_complete(ndev);
2326
2327 spin_lock_irqsave(&qdev->hw_lock, hw_flags);
2328 ql_update_small_bufq_prod_index(qdev);
2329 ql_update_lrg_bufq_prod_index(qdev);
2330 writel(qdev->rsp_consumer_index,
2331 &port_regs->CommonRegs.rspQConsumerIndex);
2332 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
2333
2334 ql_enable_interrupts(qdev);
2335 return 0;
2336 }
2337 return 1;
2338 }
2339
2340 static irqreturn_t ql3xxx_isr(int irq, void *dev_id)
2341 {
2342
2343 struct net_device *ndev = dev_id;
2344 struct ql3_adapter *qdev = netdev_priv(ndev);
2345 struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
2346 u32 value;
2347 int handled = 1;
2348 u32 var;
2349
2350 port_regs = qdev->mem_map_registers;
2351
2352 value =
2353 ql_read_common_reg_l(qdev, &port_regs->CommonRegs.ispControlStatus);
2354
2355 if (value & (ISP_CONTROL_FE | ISP_CONTROL_RI)) {
2356 spin_lock(&qdev->adapter_lock);
2357 netif_stop_queue(qdev->ndev);
2358 netif_carrier_off(qdev->ndev);
2359 ql_disable_interrupts(qdev);
2360 qdev->port_link_state = LS_DOWN;
2361 set_bit(QL_RESET_ACTIVE,&qdev->flags) ;
2362
2363 if (value & ISP_CONTROL_FE) {
2364 /*
2365 * Chip Fatal Error.
2366 */
2367 var =
2368 ql_read_page0_reg_l(qdev,
2369 &port_regs->PortFatalErrStatus);
2370 printk(KERN_WARNING PFX
2371 "%s: Resetting chip. PortFatalErrStatus "
2372 "register = 0x%x\n", ndev->name, var);
2373 set_bit(QL_RESET_START,&qdev->flags) ;
2374 } else {
2375 /*
2376 * Soft Reset Requested.
2377 */
2378 set_bit(QL_RESET_PER_SCSI,&qdev->flags) ;
2379 printk(KERN_ERR PFX
2380 "%s: Another function issued a reset to the "
2381 "chip. ISR value = %x.\n", ndev->name, value);
2382 }
2383 queue_delayed_work(qdev->workqueue, &qdev->reset_work, 0);
2384 spin_unlock(&qdev->adapter_lock);
2385 } else if (value & ISP_IMR_DISABLE_CMPL_INT) {
2386 ql_disable_interrupts(qdev);
2387 if (likely(netif_rx_schedule_prep(ndev))) {
2388 __netif_rx_schedule(ndev);
2389 }
2390 } else {
2391 return IRQ_NONE;
2392 }
2393
2394 return IRQ_RETVAL(handled);
2395 }
2396
2397 /*
2398 * Get the total number of segments needed for the
2399 * given number of fragments. This is necessary because
2400 * outbound address lists (OAL) will be used when more than
2401 * two frags are given. Each address list has 5 addr/len
2402 * pairs. The 5th pair in each AOL is used to point to
2403 * the next AOL if more frags are coming.
2404 * That is why the frags:segment count ratio is not linear.
2405 */
2406 static int ql_get_seg_count(struct ql3_adapter *qdev,
2407 unsigned short frags)
2408 {
2409 if (qdev->device_id == QL3022_DEVICE_ID)
2410 return 1;
2411
2412 switch(frags) {
2413 case 0: return 1; /* just the skb->data seg */
2414 case 1: return 2; /* skb->data + 1 frag */
2415 case 2: return 3; /* skb->data + 2 frags */
2416 case 3: return 5; /* skb->data + 1 frag + 1 AOL containting 2 frags */
2417 case 4: return 6;
2418 case 5: return 7;
2419 case 6: return 8;
2420 case 7: return 10;
2421 case 8: return 11;
2422 case 9: return 12;
2423 case 10: return 13;
2424 case 11: return 15;
2425 case 12: return 16;
2426 case 13: return 17;
2427 case 14: return 18;
2428 case 15: return 20;
2429 case 16: return 21;
2430 case 17: return 22;
2431 case 18: return 23;
2432 }
2433 return -1;
2434 }
2435
2436 static void ql_hw_csum_setup(const struct sk_buff *skb,
2437 struct ob_mac_iocb_req *mac_iocb_ptr)
2438 {
2439 const struct iphdr *ip = ip_hdr(skb);
2440
2441 mac_iocb_ptr->ip_hdr_off = skb_network_offset(skb);
2442 mac_iocb_ptr->ip_hdr_len = ip->ihl;
2443
2444 if (ip->protocol == IPPROTO_TCP) {
2445 mac_iocb_ptr->flags1 |= OB_3032MAC_IOCB_REQ_TC |
2446 OB_3032MAC_IOCB_REQ_IC;
2447 } else {
2448 mac_iocb_ptr->flags1 |= OB_3032MAC_IOCB_REQ_UC |
2449 OB_3032MAC_IOCB_REQ_IC;
2450 }
2451
2452 }
2453
2454 /*
2455 * Map the buffers for this transmit. This will return
2456 * NETDEV_TX_BUSY or NETDEV_TX_OK based on success.
2457 */
2458 static int ql_send_map(struct ql3_adapter *qdev,
2459 struct ob_mac_iocb_req *mac_iocb_ptr,
2460 struct ql_tx_buf_cb *tx_cb,
2461 struct sk_buff *skb)
2462 {
2463 struct oal *oal;
2464 struct oal_entry *oal_entry;
2465 int len = skb_headlen(skb);
2466 dma_addr_t map;
2467 int err;
2468 int completed_segs, i;
2469 int seg_cnt, seg = 0;
2470 int frag_cnt = (int)skb_shinfo(skb)->nr_frags;
2471
2472 seg_cnt = tx_cb->seg_count;
2473 /*
2474 * Map the skb buffer first.
2475 */
2476 map = pci_map_single(qdev->pdev, skb->data, len, PCI_DMA_TODEVICE);
2477
2478 err = pci_dma_mapping_error(map);
2479 if(err) {
2480 printk(KERN_ERR "%s: PCI mapping failed with error: %d\n",
2481 qdev->ndev->name, err);
2482
2483 return NETDEV_TX_BUSY;
2484 }
2485
2486 oal_entry = (struct oal_entry *)&mac_iocb_ptr->buf_addr0_low;
2487 oal_entry->dma_lo = cpu_to_le32(LS_64BITS(map));
2488 oal_entry->dma_hi = cpu_to_le32(MS_64BITS(map));
2489 oal_entry->len = cpu_to_le32(len);
2490 pci_unmap_addr_set(&tx_cb->map[seg], mapaddr, map);
2491 pci_unmap_len_set(&tx_cb->map[seg], maplen, len);
2492 seg++;
2493
2494 if (seg_cnt == 1) {
2495 /* Terminate the last segment. */
2496 oal_entry->len =
2497 cpu_to_le32(le32_to_cpu(oal_entry->len) | OAL_LAST_ENTRY);
2498 } else {
2499 oal = tx_cb->oal;
2500 for (completed_segs=0; completed_segs<frag_cnt; completed_segs++,seg++) {
2501 skb_frag_t *frag = &skb_shinfo(skb)->frags[completed_segs];
2502 oal_entry++;
2503 if ((seg == 2 && seg_cnt > 3) || /* Check for continuation */
2504 (seg == 7 && seg_cnt > 8) || /* requirements. It's strange */
2505 (seg == 12 && seg_cnt > 13) || /* but necessary. */
2506 (seg == 17 && seg_cnt > 18)) {
2507 /* Continuation entry points to outbound address list. */
2508 map = pci_map_single(qdev->pdev, oal,
2509 sizeof(struct oal),
2510 PCI_DMA_TODEVICE);
2511
2512 err = pci_dma_mapping_error(map);
2513 if(err) {
2514
2515 printk(KERN_ERR "%s: PCI mapping outbound address list with error: %d\n",
2516 qdev->ndev->name, err);
2517 goto map_error;
2518 }
2519
2520 oal_entry->dma_lo = cpu_to_le32(LS_64BITS(map));
2521 oal_entry->dma_hi = cpu_to_le32(MS_64BITS(map));
2522 oal_entry->len =
2523 cpu_to_le32(sizeof(struct oal) |
2524 OAL_CONT_ENTRY);
2525 pci_unmap_addr_set(&tx_cb->map[seg], mapaddr,
2526 map);
2527 pci_unmap_len_set(&tx_cb->map[seg], maplen,
2528 sizeof(struct oal));
2529 oal_entry = (struct oal_entry *)oal;
2530 oal++;
2531 seg++;
2532 }
2533
2534 map =
2535 pci_map_page(qdev->pdev, frag->page,
2536 frag->page_offset, frag->size,
2537 PCI_DMA_TODEVICE);
2538
2539 err = pci_dma_mapping_error(map);
2540 if(err) {
2541 printk(KERN_ERR "%s: PCI mapping frags failed with error: %d\n",
2542 qdev->ndev->name, err);
2543 goto map_error;
2544 }
2545
2546 oal_entry->dma_lo = cpu_to_le32(LS_64BITS(map));
2547 oal_entry->dma_hi = cpu_to_le32(MS_64BITS(map));
2548 oal_entry->len = cpu_to_le32(frag->size);
2549 pci_unmap_addr_set(&tx_cb->map[seg], mapaddr, map);
2550 pci_unmap_len_set(&tx_cb->map[seg], maplen,
2551 frag->size);
2552 }
2553 /* Terminate the last segment. */
2554 oal_entry->len =
2555 cpu_to_le32(le32_to_cpu(oal_entry->len) | OAL_LAST_ENTRY);
2556 }
2557
2558 return NETDEV_TX_OK;
2559
2560 map_error:
2561 /* A PCI mapping failed and now we will need to back out
2562 * We need to traverse through the oal's and associated pages which
2563 * have been mapped and now we must unmap them to clean up properly
2564 */
2565
2566 seg = 1;
2567 oal_entry = (struct oal_entry *)&mac_iocb_ptr->buf_addr0_low;
2568 oal = tx_cb->oal;
2569 for (i=0; i<completed_segs; i++,seg++) {
2570 oal_entry++;
2571
2572 if((seg == 2 && seg_cnt > 3) || /* Check for continuation */
2573 (seg == 7 && seg_cnt > 8) || /* requirements. It's strange */
2574 (seg == 12 && seg_cnt > 13) || /* but necessary. */
2575 (seg == 17 && seg_cnt > 18)) {
2576 pci_unmap_single(qdev->pdev,
2577 pci_unmap_addr(&tx_cb->map[seg], mapaddr),
2578 pci_unmap_len(&tx_cb->map[seg], maplen),
2579 PCI_DMA_TODEVICE);
2580 oal++;
2581 seg++;
2582 }
2583
2584 pci_unmap_page(qdev->pdev,
2585 pci_unmap_addr(&tx_cb->map[seg], mapaddr),
2586 pci_unmap_len(&tx_cb->map[seg], maplen),
2587 PCI_DMA_TODEVICE);
2588 }
2589
2590 pci_unmap_single(qdev->pdev,
2591 pci_unmap_addr(&tx_cb->map[0], mapaddr),
2592 pci_unmap_addr(&tx_cb->map[0], maplen),
2593 PCI_DMA_TODEVICE);
2594
2595 return NETDEV_TX_BUSY;
2596
2597 }
2598
2599 /*
2600 * The difference between 3022 and 3032 sends:
2601 * 3022 only supports a simple single segment transmission.
2602 * 3032 supports checksumming and scatter/gather lists (fragments).
2603 * The 3032 supports sglists by using the 3 addr/len pairs (ALP)
2604 * in the IOCB plus a chain of outbound address lists (OAL) that
2605 * each contain 5 ALPs. The last ALP of the IOCB (3rd) or OAL (5th)
2606 * will used to point to an OAL when more ALP entries are required.
2607 * The IOCB is always the top of the chain followed by one or more
2608 * OALs (when necessary).
2609 */
2610 static int ql3xxx_send(struct sk_buff *skb, struct net_device *ndev)
2611 {
2612 struct ql3_adapter *qdev = (struct ql3_adapter *)netdev_priv(ndev);
2613 struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
2614 struct ql_tx_buf_cb *tx_cb;
2615 u32 tot_len = skb->len;
2616 struct ob_mac_iocb_req *mac_iocb_ptr;
2617
2618 if (unlikely(atomic_read(&qdev->tx_count) < 2)) {
2619 return NETDEV_TX_BUSY;
2620 }
2621
2622 tx_cb = &qdev->tx_buf[qdev->req_producer_index] ;
2623 if((tx_cb->seg_count = ql_get_seg_count(qdev,
2624 (skb_shinfo(skb)->nr_frags))) == -1) {
2625 printk(KERN_ERR PFX"%s: invalid segment count!\n",__func__);
2626 return NETDEV_TX_OK;
2627 }
2628
2629 mac_iocb_ptr = tx_cb->queue_entry;
2630 memset((void *)mac_iocb_ptr, 0, sizeof(struct ob_mac_iocb_req));
2631 mac_iocb_ptr->opcode = qdev->mac_ob_opcode;
2632 mac_iocb_ptr->flags = OB_MAC_IOCB_REQ_X;
2633 mac_iocb_ptr->flags |= qdev->mb_bit_mask;
2634 mac_iocb_ptr->transaction_id = qdev->req_producer_index;
2635 mac_iocb_ptr->data_len = cpu_to_le16((u16) tot_len);
2636 tx_cb->skb = skb;
2637 if (qdev->device_id == QL3032_DEVICE_ID &&
2638 skb->ip_summed == CHECKSUM_PARTIAL)
2639 ql_hw_csum_setup(skb, mac_iocb_ptr);
2640
2641 if(ql_send_map(qdev,mac_iocb_ptr,tx_cb,skb) != NETDEV_TX_OK) {
2642 printk(KERN_ERR PFX"%s: Could not map the segments!\n",__func__);
2643 return NETDEV_TX_BUSY;
2644 }
2645
2646 wmb();
2647 qdev->req_producer_index++;
2648 if (qdev->req_producer_index == NUM_REQ_Q_ENTRIES)
2649 qdev->req_producer_index = 0;
2650 wmb();
2651 ql_write_common_reg_l(qdev,
2652 &port_regs->CommonRegs.reqQProducerIndex,
2653 qdev->req_producer_index);
2654
2655 ndev->trans_start = jiffies;
2656 if (netif_msg_tx_queued(qdev))
2657 printk(KERN_DEBUG PFX "%s: tx queued, slot %d, len %d\n",
2658 ndev->name, qdev->req_producer_index, skb->len);
2659
2660 atomic_dec(&qdev->tx_count);
2661 return NETDEV_TX_OK;
2662 }
2663
2664 static int ql_alloc_net_req_rsp_queues(struct ql3_adapter *qdev)
2665 {
2666 qdev->req_q_size =
2667 (u32) (NUM_REQ_Q_ENTRIES * sizeof(struct ob_mac_iocb_req));
2668
2669 qdev->req_q_virt_addr =
2670 pci_alloc_consistent(qdev->pdev,
2671 (size_t) qdev->req_q_size,
2672 &qdev->req_q_phy_addr);
2673
2674 if ((qdev->req_q_virt_addr == NULL) ||
2675 LS_64BITS(qdev->req_q_phy_addr) & (qdev->req_q_size - 1)) {
2676 printk(KERN_ERR PFX "%s: reqQ failed.\n",
2677 qdev->ndev->name);
2678 return -ENOMEM;
2679 }
2680
2681 qdev->rsp_q_size = NUM_RSP_Q_ENTRIES * sizeof(struct net_rsp_iocb);
2682
2683 qdev->rsp_q_virt_addr =
2684 pci_alloc_consistent(qdev->pdev,
2685 (size_t) qdev->rsp_q_size,
2686 &qdev->rsp_q_phy_addr);
2687
2688 if ((qdev->rsp_q_virt_addr == NULL) ||
2689 LS_64BITS(qdev->rsp_q_phy_addr) & (qdev->rsp_q_size - 1)) {
2690 printk(KERN_ERR PFX
2691 "%s: rspQ allocation failed\n",
2692 qdev->ndev->name);
2693 pci_free_consistent(qdev->pdev, (size_t) qdev->req_q_size,
2694 qdev->req_q_virt_addr,
2695 qdev->req_q_phy_addr);
2696 return -ENOMEM;
2697 }
2698
2699 set_bit(QL_ALLOC_REQ_RSP_Q_DONE,&qdev->flags);
2700
2701 return 0;
2702 }
2703
2704 static void ql_free_net_req_rsp_queues(struct ql3_adapter *qdev)
2705 {
2706 if (!test_bit(QL_ALLOC_REQ_RSP_Q_DONE,&qdev->flags)) {
2707 printk(KERN_INFO PFX
2708 "%s: Already done.\n", qdev->ndev->name);
2709 return;
2710 }
2711
2712 pci_free_consistent(qdev->pdev,
2713 qdev->req_q_size,
2714 qdev->req_q_virt_addr, qdev->req_q_phy_addr);
2715
2716 qdev->req_q_virt_addr = NULL;
2717
2718 pci_free_consistent(qdev->pdev,
2719 qdev->rsp_q_size,
2720 qdev->rsp_q_virt_addr, qdev->rsp_q_phy_addr);
2721
2722 qdev->rsp_q_virt_addr = NULL;
2723
2724 clear_bit(QL_ALLOC_REQ_RSP_Q_DONE,&qdev->flags);
2725 }
2726
2727 static int ql_alloc_buffer_queues(struct ql3_adapter *qdev)
2728 {
2729 /* Create Large Buffer Queue */
2730 qdev->lrg_buf_q_size =
2731 qdev->num_lbufq_entries * sizeof(struct lrg_buf_q_entry);
2732 if (qdev->lrg_buf_q_size < PAGE_SIZE)
2733 qdev->lrg_buf_q_alloc_size = PAGE_SIZE;
2734 else
2735 qdev->lrg_buf_q_alloc_size = qdev->lrg_buf_q_size * 2;
2736
2737 qdev->lrg_buf = kmalloc(qdev->num_large_buffers * sizeof(struct ql_rcv_buf_cb),GFP_KERNEL);
2738 if (qdev->lrg_buf == NULL) {
2739 printk(KERN_ERR PFX
2740 "%s: qdev->lrg_buf alloc failed.\n", qdev->ndev->name);
2741 return -ENOMEM;
2742 }
2743
2744 qdev->lrg_buf_q_alloc_virt_addr =
2745 pci_alloc_consistent(qdev->pdev,
2746 qdev->lrg_buf_q_alloc_size,
2747 &qdev->lrg_buf_q_alloc_phy_addr);
2748
2749 if (qdev->lrg_buf_q_alloc_virt_addr == NULL) {
2750 printk(KERN_ERR PFX
2751 "%s: lBufQ failed\n", qdev->ndev->name);
2752 return -ENOMEM;
2753 }
2754 qdev->lrg_buf_q_virt_addr = qdev->lrg_buf_q_alloc_virt_addr;
2755 qdev->lrg_buf_q_phy_addr = qdev->lrg_buf_q_alloc_phy_addr;
2756
2757 /* Create Small Buffer Queue */
2758 qdev->small_buf_q_size =
2759 NUM_SBUFQ_ENTRIES * sizeof(struct lrg_buf_q_entry);
2760 if (qdev->small_buf_q_size < PAGE_SIZE)
2761 qdev->small_buf_q_alloc_size = PAGE_SIZE;
2762 else
2763 qdev->small_buf_q_alloc_size = qdev->small_buf_q_size * 2;
2764
2765 qdev->small_buf_q_alloc_virt_addr =
2766 pci_alloc_consistent(qdev->pdev,
2767 qdev->small_buf_q_alloc_size,
2768 &qdev->small_buf_q_alloc_phy_addr);
2769
2770 if (qdev->small_buf_q_alloc_virt_addr == NULL) {
2771 printk(KERN_ERR PFX
2772 "%s: Small Buffer Queue allocation failed.\n",
2773 qdev->ndev->name);
2774 pci_free_consistent(qdev->pdev, qdev->lrg_buf_q_alloc_size,
2775 qdev->lrg_buf_q_alloc_virt_addr,
2776 qdev->lrg_buf_q_alloc_phy_addr);
2777 return -ENOMEM;
2778 }
2779
2780 qdev->small_buf_q_virt_addr = qdev->small_buf_q_alloc_virt_addr;
2781 qdev->small_buf_q_phy_addr = qdev->small_buf_q_alloc_phy_addr;
2782 set_bit(QL_ALLOC_BUFQS_DONE,&qdev->flags);
2783 return 0;
2784 }
2785
2786 static void ql_free_buffer_queues(struct ql3_adapter *qdev)
2787 {
2788 if (!test_bit(QL_ALLOC_BUFQS_DONE,&qdev->flags)) {
2789 printk(KERN_INFO PFX
2790 "%s: Already done.\n", qdev->ndev->name);
2791 return;
2792 }
2793 if(qdev->lrg_buf) kfree(qdev->lrg_buf);
2794 pci_free_consistent(qdev->pdev,
2795 qdev->lrg_buf_q_alloc_size,
2796 qdev->lrg_buf_q_alloc_virt_addr,
2797 qdev->lrg_buf_q_alloc_phy_addr);
2798
2799 qdev->lrg_buf_q_virt_addr = NULL;
2800
2801 pci_free_consistent(qdev->pdev,
2802 qdev->small_buf_q_alloc_size,
2803 qdev->small_buf_q_alloc_virt_addr,
2804 qdev->small_buf_q_alloc_phy_addr);
2805
2806 qdev->small_buf_q_virt_addr = NULL;
2807
2808 clear_bit(QL_ALLOC_BUFQS_DONE,&qdev->flags);
2809 }
2810
2811 static int ql_alloc_small_buffers(struct ql3_adapter *qdev)
2812 {
2813 int i;
2814 struct bufq_addr_element *small_buf_q_entry;
2815
2816 /* Currently we allocate on one of memory and use it for smallbuffers */
2817 qdev->small_buf_total_size =
2818 (QL_ADDR_ELE_PER_BUFQ_ENTRY * NUM_SBUFQ_ENTRIES *
2819 QL_SMALL_BUFFER_SIZE);
2820
2821 qdev->small_buf_virt_addr =
2822 pci_alloc_consistent(qdev->pdev,
2823 qdev->small_buf_total_size,
2824 &qdev->small_buf_phy_addr);
2825
2826 if (qdev->small_buf_virt_addr == NULL) {
2827 printk(KERN_ERR PFX
2828 "%s: Failed to get small buffer memory.\n",
2829 qdev->ndev->name);
2830 return -ENOMEM;
2831 }
2832
2833 qdev->small_buf_phy_addr_low = LS_64BITS(qdev->small_buf_phy_addr);
2834 qdev->small_buf_phy_addr_high = MS_64BITS(qdev->small_buf_phy_addr);
2835
2836 small_buf_q_entry = qdev->small_buf_q_virt_addr;
2837
2838 /* Initialize the small buffer queue. */
2839 for (i = 0; i < (QL_ADDR_ELE_PER_BUFQ_ENTRY * NUM_SBUFQ_ENTRIES); i++) {
2840 small_buf_q_entry->addr_high =
2841 cpu_to_le32(qdev->small_buf_phy_addr_high);
2842 small_buf_q_entry->addr_low =
2843 cpu_to_le32(qdev->small_buf_phy_addr_low +
2844 (i * QL_SMALL_BUFFER_SIZE));
2845 small_buf_q_entry++;
2846 }
2847 qdev->small_buf_index = 0;
2848 set_bit(QL_ALLOC_SMALL_BUF_DONE,&qdev->flags);
2849 return 0;
2850 }
2851
2852 static void ql_free_small_buffers(struct ql3_adapter *qdev)
2853 {
2854 if (!test_bit(QL_ALLOC_SMALL_BUF_DONE,&qdev->flags)) {
2855 printk(KERN_INFO PFX
2856 "%s: Already done.\n", qdev->ndev->name);
2857 return;
2858 }
2859 if (qdev->small_buf_virt_addr != NULL) {
2860 pci_free_consistent(qdev->pdev,
2861 qdev->small_buf_total_size,
2862 qdev->small_buf_virt_addr,
2863 qdev->small_buf_phy_addr);
2864
2865 qdev->small_buf_virt_addr = NULL;
2866 }
2867 }
2868
2869 static void ql_free_large_buffers(struct ql3_adapter *qdev)
2870 {
2871 int i = 0;
2872 struct ql_rcv_buf_cb *lrg_buf_cb;
2873
2874 for (i = 0; i < qdev->num_large_buffers; i++) {
2875 lrg_buf_cb = &qdev->lrg_buf[i];
2876 if (lrg_buf_cb->skb) {
2877 dev_kfree_skb(lrg_buf_cb->skb);
2878 pci_unmap_single(qdev->pdev,
2879 pci_unmap_addr(lrg_buf_cb, mapaddr),
2880 pci_unmap_len(lrg_buf_cb, maplen),
2881 PCI_DMA_FROMDEVICE);
2882 memset(lrg_buf_cb, 0, sizeof(struct ql_rcv_buf_cb));
2883 } else {
2884 break;
2885 }
2886 }
2887 }
2888
2889 static void ql_init_large_buffers(struct ql3_adapter *qdev)
2890 {
2891 int i;
2892 struct ql_rcv_buf_cb *lrg_buf_cb;
2893 struct bufq_addr_element *buf_addr_ele = qdev->lrg_buf_q_virt_addr;
2894
2895 for (i = 0; i < qdev->num_large_buffers; i++) {
2896 lrg_buf_cb = &qdev->lrg_buf[i];
2897 buf_addr_ele->addr_high = lrg_buf_cb->buf_phy_addr_high;
2898 buf_addr_ele->addr_low = lrg_buf_cb->buf_phy_addr_low;
2899 buf_addr_ele++;
2900 }
2901 qdev->lrg_buf_index = 0;
2902 qdev->lrg_buf_skb_check = 0;
2903 }
2904
2905 static int ql_alloc_large_buffers(struct ql3_adapter *qdev)
2906 {
2907 int i;
2908 struct ql_rcv_buf_cb *lrg_buf_cb;
2909 struct sk_buff *skb;
2910 dma_addr_t map;
2911 int err;
2912
2913 for (i = 0; i < qdev->num_large_buffers; i++) {
2914 skb = netdev_alloc_skb(qdev->ndev,
2915 qdev->lrg_buffer_len);
2916 if (unlikely(!skb)) {
2917 /* Better luck next round */
2918 printk(KERN_ERR PFX
2919 "%s: large buff alloc failed, "
2920 "for %d bytes at index %d.\n",
2921 qdev->ndev->name,
2922 qdev->lrg_buffer_len * 2, i);
2923 ql_free_large_buffers(qdev);
2924 return -ENOMEM;
2925 } else {
2926
2927 lrg_buf_cb = &qdev->lrg_buf[i];
2928 memset(lrg_buf_cb, 0, sizeof(struct ql_rcv_buf_cb));
2929 lrg_buf_cb->index = i;
2930 lrg_buf_cb->skb = skb;
2931 /*
2932 * We save some space to copy the ethhdr from first
2933 * buffer
2934 */
2935 skb_reserve(skb, QL_HEADER_SPACE);
2936 map = pci_map_single(qdev->pdev,
2937 skb->data,
2938 qdev->lrg_buffer_len -
2939 QL_HEADER_SPACE,
2940 PCI_DMA_FROMDEVICE);
2941
2942 err = pci_dma_mapping_error(map);
2943 if(err) {
2944 printk(KERN_ERR "%s: PCI mapping failed with error: %d\n",
2945 qdev->ndev->name, err);
2946 ql_free_large_buffers(qdev);
2947 return -ENOMEM;
2948 }
2949
2950 pci_unmap_addr_set(lrg_buf_cb, mapaddr, map);
2951 pci_unmap_len_set(lrg_buf_cb, maplen,
2952 qdev->lrg_buffer_len -
2953 QL_HEADER_SPACE);
2954 lrg_buf_cb->buf_phy_addr_low =
2955 cpu_to_le32(LS_64BITS(map));
2956 lrg_buf_cb->buf_phy_addr_high =
2957 cpu_to_le32(MS_64BITS(map));
2958 }
2959 }
2960 return 0;
2961 }
2962
2963 static void ql_free_send_free_list(struct ql3_adapter *qdev)
2964 {
2965 struct ql_tx_buf_cb *tx_cb;
2966 int i;
2967
2968 tx_cb = &qdev->tx_buf[0];
2969 for (i = 0; i < NUM_REQ_Q_ENTRIES; i++) {
2970 if (tx_cb->oal) {
2971 kfree(tx_cb->oal);
2972 tx_cb->oal = NULL;
2973 }
2974 tx_cb++;
2975 }
2976 }
2977
2978 static int ql_create_send_free_list(struct ql3_adapter *qdev)
2979 {
2980 struct ql_tx_buf_cb *tx_cb;
2981 int i;
2982 struct ob_mac_iocb_req *req_q_curr =
2983 qdev->req_q_virt_addr;
2984
2985 /* Create free list of transmit buffers */
2986 for (i = 0; i < NUM_REQ_Q_ENTRIES; i++) {
2987
2988 tx_cb = &qdev->tx_buf[i];
2989 tx_cb->skb = NULL;
2990 tx_cb->queue_entry = req_q_curr;
2991 req_q_curr++;
2992 tx_cb->oal = kmalloc(512, GFP_KERNEL);
2993 if (tx_cb->oal == NULL)
2994 return -1;
2995 }
2996 return 0;
2997 }
2998
2999 static int ql_alloc_mem_resources(struct ql3_adapter *qdev)
3000 {
3001 if (qdev->ndev->mtu == NORMAL_MTU_SIZE) {
3002 qdev->num_lbufq_entries = NUM_LBUFQ_ENTRIES;
3003 qdev->lrg_buffer_len = NORMAL_MTU_SIZE;
3004 }
3005 else if (qdev->ndev->mtu == JUMBO_MTU_SIZE) {
3006 /*
3007 * Bigger buffers, so less of them.
3008 */
3009 qdev->num_lbufq_entries = JUMBO_NUM_LBUFQ_ENTRIES;
3010 qdev->lrg_buffer_len = JUMBO_MTU_SIZE;
3011 } else {
3012 printk(KERN_ERR PFX
3013 "%s: Invalid mtu size. Only 1500 and 9000 are accepted.\n",
3014 qdev->ndev->name);
3015 return -ENOMEM;
3016 }
3017 qdev->num_large_buffers = qdev->num_lbufq_entries * QL_ADDR_ELE_PER_BUFQ_ENTRY;
3018 qdev->lrg_buffer_len += VLAN_ETH_HLEN + VLAN_ID_LEN + QL_HEADER_SPACE;
3019 qdev->max_frame_size =
3020 (qdev->lrg_buffer_len - QL_HEADER_SPACE) + ETHERNET_CRC_SIZE;
3021
3022 /*
3023 * First allocate a page of shared memory and use it for shadow
3024 * locations of Network Request Queue Consumer Address Register and
3025 * Network Completion Queue Producer Index Register
3026 */
3027 qdev->shadow_reg_virt_addr =
3028 pci_alloc_consistent(qdev->pdev,
3029 PAGE_SIZE, &qdev->shadow_reg_phy_addr);
3030
3031 if (qdev->shadow_reg_virt_addr != NULL) {
3032 qdev->preq_consumer_index = (u16 *) qdev->shadow_reg_virt_addr;
3033 qdev->req_consumer_index_phy_addr_high =
3034 MS_64BITS(qdev->shadow_reg_phy_addr);
3035 qdev->req_consumer_index_phy_addr_low =
3036 LS_64BITS(qdev->shadow_reg_phy_addr);
3037
3038 qdev->prsp_producer_index =
3039 (u32 *) (((u8 *) qdev->preq_consumer_index) + 8);
3040 qdev->rsp_producer_index_phy_addr_high =
3041 qdev->req_consumer_index_phy_addr_high;
3042 qdev->rsp_producer_index_phy_addr_low =
3043 qdev->req_consumer_index_phy_addr_low + 8;
3044 } else {
3045 printk(KERN_ERR PFX
3046 "%s: shadowReg Alloc failed.\n", qdev->ndev->name);
3047 return -ENOMEM;
3048 }
3049
3050 if (ql_alloc_net_req_rsp_queues(qdev) != 0) {
3051 printk(KERN_ERR PFX
3052 "%s: ql_alloc_net_req_rsp_queues failed.\n",
3053 qdev->ndev->name);
3054 goto err_req_rsp;
3055 }
3056
3057 if (ql_alloc_buffer_queues(qdev) != 0) {
3058 printk(KERN_ERR PFX
3059 "%s: ql_alloc_buffer_queues failed.\n",
3060 qdev->ndev->name);
3061 goto err_buffer_queues;
3062 }
3063
3064 if (ql_alloc_small_buffers(qdev) != 0) {
3065 printk(KERN_ERR PFX
3066 "%s: ql_alloc_small_buffers failed\n", qdev->ndev->name);
3067 goto err_small_buffers;
3068 }
3069
3070 if (ql_alloc_large_buffers(qdev) != 0) {
3071 printk(KERN_ERR PFX
3072 "%s: ql_alloc_large_buffers failed\n", qdev->ndev->name);
3073 goto err_small_buffers;
3074 }
3075
3076 /* Initialize the large buffer queue. */
3077 ql_init_large_buffers(qdev);
3078 if (ql_create_send_free_list(qdev))
3079 goto err_free_list;
3080
3081 qdev->rsp_current = qdev->rsp_q_virt_addr;
3082
3083 return 0;
3084 err_free_list:
3085 ql_free_send_free_list(qdev);
3086 err_small_buffers:
3087 ql_free_buffer_queues(qdev);
3088 err_buffer_queues:
3089 ql_free_net_req_rsp_queues(qdev);
3090 err_req_rsp:
3091 pci_free_consistent(qdev->pdev,
3092 PAGE_SIZE,
3093 qdev->shadow_reg_virt_addr,
3094 qdev->shadow_reg_phy_addr);
3095
3096 return -ENOMEM;
3097 }
3098
3099 static void ql_free_mem_resources(struct ql3_adapter *qdev)
3100 {
3101 ql_free_send_free_list(qdev);
3102 ql_free_large_buffers(qdev);
3103 ql_free_small_buffers(qdev);
3104 ql_free_buffer_queues(qdev);
3105 ql_free_net_req_rsp_queues(qdev);
3106 if (qdev->shadow_reg_virt_addr != NULL) {
3107 pci_free_consistent(qdev->pdev,
3108 PAGE_SIZE,
3109 qdev->shadow_reg_virt_addr,
3110 qdev->shadow_reg_phy_addr);
3111 qdev->shadow_reg_virt_addr = NULL;
3112 }
3113 }
3114
3115 static int ql_init_misc_registers(struct ql3_adapter *qdev)
3116 {
3117 struct ql3xxx_local_ram_registers __iomem *local_ram =
3118 (void __iomem *)qdev->mem_map_registers;
3119
3120 if(ql_sem_spinlock(qdev, QL_DDR_RAM_SEM_MASK,
3121 (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) *
3122 2) << 4))
3123 return -1;
3124
3125 ql_write_page2_reg(qdev,
3126 &local_ram->bufletSize, qdev->nvram_data.bufletSize);
3127
3128 ql_write_page2_reg(qdev,
3129 &local_ram->maxBufletCount,
3130 qdev->nvram_data.bufletCount);
3131
3132 ql_write_page2_reg(qdev,
3133 &local_ram->freeBufletThresholdLow,
3134 (qdev->nvram_data.tcpWindowThreshold25 << 16) |
3135 (qdev->nvram_data.tcpWindowThreshold0));
3136
3137 ql_write_page2_reg(qdev,
3138 &local_ram->freeBufletThresholdHigh,
3139 qdev->nvram_data.tcpWindowThreshold50);
3140
3141 ql_write_page2_reg(qdev,
3142 &local_ram->ipHashTableBase,
3143 (qdev->nvram_data.ipHashTableBaseHi << 16) |
3144 qdev->nvram_data.ipHashTableBaseLo);
3145 ql_write_page2_reg(qdev,
3146 &local_ram->ipHashTableCount,
3147 qdev->nvram_data.ipHashTableSize);
3148 ql_write_page2_reg(qdev,
3149 &local_ram->tcpHashTableBase,
3150 (qdev->nvram_data.tcpHashTableBaseHi << 16) |
3151 qdev->nvram_data.tcpHashTableBaseLo);
3152 ql_write_page2_reg(qdev,
3153 &local_ram->tcpHashTableCount,
3154 qdev->nvram_data.tcpHashTableSize);
3155 ql_write_page2_reg(qdev,
3156 &local_ram->ncbBase,
3157 (qdev->nvram_data.ncbTableBaseHi << 16) |
3158 qdev->nvram_data.ncbTableBaseLo);
3159 ql_write_page2_reg(qdev,
3160 &local_ram->maxNcbCount,
3161 qdev->nvram_data.ncbTableSize);
3162 ql_write_page2_reg(qdev,
3163 &local_ram->drbBase,
3164 (qdev->nvram_data.drbTableBaseHi << 16) |
3165 qdev->nvram_data.drbTableBaseLo);
3166 ql_write_page2_reg(qdev,
3167 &local_ram->maxDrbCount,
3168 qdev->nvram_data.drbTableSize);
3169 ql_sem_unlock(qdev, QL_DDR_RAM_SEM_MASK);
3170 return 0;
3171 }
3172
3173 static int ql_adapter_initialize(struct ql3_adapter *qdev)
3174 {
3175 u32 value;
3176 struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
3177 struct ql3xxx_host_memory_registers __iomem *hmem_regs =
3178 (void __iomem *)port_regs;
3179 u32 delay = 10;
3180 int status = 0;
3181
3182 if(ql_mii_setup(qdev))
3183 return -1;
3184
3185 /* Bring out PHY out of reset */
3186 ql_write_common_reg(qdev, &port_regs->CommonRegs.serialPortInterfaceReg,
3187 (ISP_SERIAL_PORT_IF_WE |
3188 (ISP_SERIAL_PORT_IF_WE << 16)));
3189
3190 qdev->port_link_state = LS_DOWN;
3191 netif_carrier_off(qdev->ndev);
3192
3193 /* V2 chip fix for ARS-39168. */
3194 ql_write_common_reg(qdev, &port_regs->CommonRegs.serialPortInterfaceReg,
3195 (ISP_SERIAL_PORT_IF_SDE |
3196 (ISP_SERIAL_PORT_IF_SDE << 16)));
3197
3198 /* Request Queue Registers */
3199 *((u32 *) (qdev->preq_consumer_index)) = 0;
3200 atomic_set(&qdev->tx_count,NUM_REQ_Q_ENTRIES);
3201 qdev->req_producer_index = 0;
3202
3203 ql_write_page1_reg(qdev,
3204 &hmem_regs->reqConsumerIndexAddrHigh,
3205 qdev->req_consumer_index_phy_addr_high);
3206 ql_write_page1_reg(qdev,
3207 &hmem_regs->reqConsumerIndexAddrLow,
3208 qdev->req_consumer_index_phy_addr_low);
3209
3210 ql_write_page1_reg(qdev,
3211 &hmem_regs->reqBaseAddrHigh,
3212 MS_64BITS(qdev->req_q_phy_addr));
3213 ql_write_page1_reg(qdev,
3214 &hmem_regs->reqBaseAddrLow,
3215 LS_64BITS(qdev->req_q_phy_addr));
3216 ql_write_page1_reg(qdev, &hmem_regs->reqLength, NUM_REQ_Q_ENTRIES);
3217
3218 /* Response Queue Registers */
3219 *((u16 *) (qdev->prsp_producer_index)) = 0;
3220 qdev->rsp_consumer_index = 0;
3221 qdev->rsp_current = qdev->rsp_q_virt_addr;
3222
3223 ql_write_page1_reg(qdev,
3224 &hmem_regs->rspProducerIndexAddrHigh,
3225 qdev->rsp_producer_index_phy_addr_high);
3226
3227 ql_write_page1_reg(qdev,
3228 &hmem_regs->rspProducerIndexAddrLow,
3229 qdev->rsp_producer_index_phy_addr_low);
3230
3231 ql_write_page1_reg(qdev,
3232 &hmem_regs->rspBaseAddrHigh,
3233 MS_64BITS(qdev->rsp_q_phy_addr));
3234
3235 ql_write_page1_reg(qdev,
3236 &hmem_regs->rspBaseAddrLow,
3237 LS_64BITS(qdev->rsp_q_phy_addr));
3238
3239 ql_write_page1_reg(qdev, &hmem_regs->rspLength, NUM_RSP_Q_ENTRIES);
3240
3241 /* Large Buffer Queue */
3242 ql_write_page1_reg(qdev,
3243 &hmem_regs->rxLargeQBaseAddrHigh,
3244 MS_64BITS(qdev->lrg_buf_q_phy_addr));
3245
3246 ql_write_page1_reg(qdev,
3247 &hmem_regs->rxLargeQBaseAddrLow,
3248 LS_64BITS(qdev->lrg_buf_q_phy_addr));
3249
3250 ql_write_page1_reg(qdev, &hmem_regs->rxLargeQLength, qdev->num_lbufq_entries);
3251
3252 ql_write_page1_reg(qdev,
3253 &hmem_regs->rxLargeBufferLength,
3254 qdev->lrg_buffer_len);
3255
3256 /* Small Buffer Queue */
3257 ql_write_page1_reg(qdev,
3258 &hmem_regs->rxSmallQBaseAddrHigh,
3259 MS_64BITS(qdev->small_buf_q_phy_addr));
3260
3261 ql_write_page1_reg(qdev,
3262 &hmem_regs->rxSmallQBaseAddrLow,
3263 LS_64BITS(qdev->small_buf_q_phy_addr));
3264
3265 ql_write_page1_reg(qdev, &hmem_regs->rxSmallQLength, NUM_SBUFQ_ENTRIES);
3266 ql_write_page1_reg(qdev,
3267 &hmem_regs->rxSmallBufferLength,
3268 QL_SMALL_BUFFER_SIZE);
3269
3270 qdev->small_buf_q_producer_index = NUM_SBUFQ_ENTRIES - 1;
3271 qdev->small_buf_release_cnt = 8;
3272 qdev->lrg_buf_q_producer_index = qdev->num_lbufq_entries - 1;
3273 qdev->lrg_buf_release_cnt = 8;
3274 qdev->lrg_buf_next_free =
3275 (struct bufq_addr_element *)qdev->lrg_buf_q_virt_addr;
3276 qdev->small_buf_index = 0;
3277 qdev->lrg_buf_index = 0;
3278 qdev->lrg_buf_free_count = 0;
3279 qdev->lrg_buf_free_head = NULL;
3280 qdev->lrg_buf_free_tail = NULL;
3281
3282 ql_write_common_reg(qdev,
3283 &port_regs->CommonRegs.
3284 rxSmallQProducerIndex,
3285 qdev->small_buf_q_producer_index);
3286 ql_write_common_reg(qdev,
3287 &port_regs->CommonRegs.
3288 rxLargeQProducerIndex,
3289 qdev->lrg_buf_q_producer_index);
3290
3291 /*
3292 * Find out if the chip has already been initialized. If it has, then
3293 * we skip some of the initialization.
3294 */
3295 clear_bit(QL_LINK_MASTER, &qdev->flags);
3296 value = ql_read_page0_reg(qdev, &port_regs->portStatus);
3297 if ((value & PORT_STATUS_IC) == 0) {
3298
3299 /* Chip has not been configured yet, so let it rip. */
3300 if(ql_init_misc_registers(qdev)) {
3301 status = -1;
3302 goto out;
3303 }
3304
3305 value = qdev->nvram_data.tcpMaxWindowSize;
3306 ql_write_page0_reg(qdev, &port_regs->tcpMaxWindow, value);
3307
3308 value = (0xFFFF << 16) | qdev->nvram_data.extHwConfig;
3309
3310 if(ql_sem_spinlock(qdev, QL_FLASH_SEM_MASK,
3311 (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index)
3312 * 2) << 13)) {
3313 status = -1;
3314 goto out;
3315 }
3316 ql_write_page0_reg(qdev, &port_regs->ExternalHWConfig, value);
3317 ql_write_page0_reg(qdev, &port_regs->InternalChipConfig,
3318 (((INTERNAL_CHIP_SD | INTERNAL_CHIP_WE) <<
3319 16) | (INTERNAL_CHIP_SD |
3320 INTERNAL_CHIP_WE)));
3321 ql_sem_unlock(qdev, QL_FLASH_SEM_MASK);
3322 }
3323
3324 if (qdev->mac_index)
3325 ql_write_page0_reg(qdev,
3326 &port_regs->mac1MaxFrameLengthReg,
3327 qdev->max_frame_size);
3328 else
3329 ql_write_page0_reg(qdev,
3330 &port_regs->mac0MaxFrameLengthReg,
3331 qdev->max_frame_size);
3332
3333 if(ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK,
3334 (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) *
3335 2) << 7)) {
3336 status = -1;
3337 goto out;
3338 }
3339
3340 PHY_Setup(qdev);
3341 ql_init_scan_mode(qdev);
3342 ql_get_phy_owner(qdev);
3343
3344 /* Load the MAC Configuration */
3345
3346 /* Program lower 32 bits of the MAC address */
3347 ql_write_page0_reg(qdev, &port_regs->macAddrIndirectPtrReg,
3348 (MAC_ADDR_INDIRECT_PTR_REG_RP_MASK << 16));
3349 ql_write_page0_reg(qdev, &port_regs->macAddrDataReg,
3350 ((qdev->ndev->dev_addr[2] << 24)
3351 | (qdev->ndev->dev_addr[3] << 16)
3352 | (qdev->ndev->dev_addr[4] << 8)
3353 | qdev->ndev->dev_addr[5]));
3354
3355 /* Program top 16 bits of the MAC address */
3356 ql_write_page0_reg(qdev, &port_regs->macAddrIndirectPtrReg,
3357 ((MAC_ADDR_INDIRECT_PTR_REG_RP_MASK << 16) | 1));
3358 ql_write_page0_reg(qdev, &port_regs->macAddrDataReg,
3359 ((qdev->ndev->dev_addr[0] << 8)
3360 | qdev->ndev->dev_addr[1]));
3361
3362 /* Enable Primary MAC */
3363 ql_write_page0_reg(qdev, &port_regs->macAddrIndirectPtrReg,
3364 ((MAC_ADDR_INDIRECT_PTR_REG_PE << 16) |
3365 MAC_ADDR_INDIRECT_PTR_REG_PE));
3366
3367 /* Clear Primary and Secondary IP addresses */
3368 ql_write_page0_reg(qdev, &port_regs->ipAddrIndexReg,
3369 ((IP_ADDR_INDEX_REG_MASK << 16) |
3370 (qdev->mac_index << 2)));
3371 ql_write_page0_reg(qdev, &port_regs->ipAddrDataReg, 0);
3372
3373 ql_write_page0_reg(qdev, &port_regs->ipAddrIndexReg,
3374 ((IP_ADDR_INDEX_REG_MASK << 16) |
3375 ((qdev->mac_index << 2) + 1)));
3376 ql_write_page0_reg(qdev, &port_regs->ipAddrDataReg, 0);
3377
3378 ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK);
3379
3380 /* Indicate Configuration Complete */
3381 ql_write_page0_reg(qdev,
3382 &port_regs->portControl,
3383 ((PORT_CONTROL_CC << 16) | PORT_CONTROL_CC));
3384
3385 do {
3386 value = ql_read_page0_reg(qdev, &port_regs->portStatus);
3387 if (value & PORT_STATUS_IC)
3388 break;
3389 msleep(500);
3390 } while (--delay);
3391
3392 if (delay == 0) {
3393 printk(KERN_ERR PFX
3394 "%s: Hw Initialization timeout.\n", qdev->ndev->name);
3395 status = -1;
3396 goto out;
3397 }
3398
3399 /* Enable Ethernet Function */
3400 if (qdev->device_id == QL3032_DEVICE_ID) {
3401 value =
3402 (QL3032_PORT_CONTROL_EF | QL3032_PORT_CONTROL_KIE |
3403 QL3032_PORT_CONTROL_EIv6 | QL3032_PORT_CONTROL_EIv4 |
3404 QL3032_PORT_CONTROL_ET);
3405 ql_write_page0_reg(qdev, &port_regs->functionControl,
3406 ((value << 16) | value));
3407 } else {
3408 value =
3409 (PORT_CONTROL_EF | PORT_CONTROL_ET | PORT_CONTROL_EI |
3410 PORT_CONTROL_HH);
3411 ql_write_page0_reg(qdev, &port_regs->portControl,
3412 ((value << 16) | value));
3413 }
3414
3415
3416 out:
3417 return status;
3418 }
3419
3420 /*
3421 * Caller holds hw_lock.
3422 */
3423 static int ql_adapter_reset(struct ql3_adapter *qdev)
3424 {
3425 struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
3426 int status = 0;
3427 u16 value;
3428 int max_wait_time;
3429
3430 set_bit(QL_RESET_ACTIVE, &qdev->flags);
3431 clear_bit(QL_RESET_DONE, &qdev->flags);
3432
3433 /*
3434 * Issue soft reset to chip.
3435 */
3436 printk(KERN_DEBUG PFX
3437 "%s: Issue soft reset to chip.\n",
3438 qdev->ndev->name);
3439 ql_write_common_reg(qdev,
3440 &port_regs->CommonRegs.ispControlStatus,
3441 ((ISP_CONTROL_SR << 16) | ISP_CONTROL_SR));
3442
3443 /* Wait 3 seconds for reset to complete. */
3444 printk(KERN_DEBUG PFX
3445 "%s: Wait 10 milliseconds for reset to complete.\n",
3446 qdev->ndev->name);
3447
3448 /* Wait until the firmware tells us the Soft Reset is done */
3449 max_wait_time = 5;
3450 do {
3451 value =
3452 ql_read_common_reg(qdev,
3453 &port_regs->CommonRegs.ispControlStatus);
3454 if ((value & ISP_CONTROL_SR) == 0)
3455 break;
3456
3457 ssleep(1);
3458 } while ((--max_wait_time));
3459
3460 /*
3461 * Also, make sure that the Network Reset Interrupt bit has been
3462 * cleared after the soft reset has taken place.
3463 */
3464 value =
3465 ql_read_common_reg(qdev, &port_regs->CommonRegs.ispControlStatus);
3466 if (value & ISP_CONTROL_RI) {
3467 printk(KERN_DEBUG PFX
3468 "ql_adapter_reset: clearing RI after reset.\n");
3469 ql_write_common_reg(qdev,
3470 &port_regs->CommonRegs.
3471 ispControlStatus,
3472 ((ISP_CONTROL_RI << 16) | ISP_CONTROL_RI));
3473 }
3474
3475 if (max_wait_time == 0) {
3476 /* Issue Force Soft Reset */
3477 ql_write_common_reg(qdev,
3478 &port_regs->CommonRegs.
3479 ispControlStatus,
3480 ((ISP_CONTROL_FSR << 16) |
3481 ISP_CONTROL_FSR));
3482 /*
3483 * Wait until the firmware tells us the Force Soft Reset is
3484 * done
3485 */
3486 max_wait_time = 5;
3487 do {
3488 value =
3489 ql_read_common_reg(qdev,
3490 &port_regs->CommonRegs.
3491 ispControlStatus);
3492 if ((value & ISP_CONTROL_FSR) == 0) {
3493 break;
3494 }
3495 ssleep(1);
3496 } while ((--max_wait_time));
3497 }
3498 if (max_wait_time == 0)
3499 status = 1;
3500
3501 clear_bit(QL_RESET_ACTIVE, &qdev->flags);
3502 set_bit(QL_RESET_DONE, &qdev->flags);
3503 return status;
3504 }
3505
3506 static void ql_set_mac_info(struct ql3_adapter *qdev)
3507 {
3508 struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
3509 u32 value, port_status;
3510 u8 func_number;
3511
3512 /* Get the function number */
3513 value =
3514 ql_read_common_reg_l(qdev, &port_regs->CommonRegs.ispControlStatus);
3515 func_number = (u8) ((value >> 4) & OPCODE_FUNC_ID_MASK);
3516 port_status = ql_read_page0_reg(qdev, &port_regs->portStatus);
3517 switch (value & ISP_CONTROL_FN_MASK) {
3518 case ISP_CONTROL_FN0_NET:
3519 qdev->mac_index = 0;
3520 qdev->mac_ob_opcode = OUTBOUND_MAC_IOCB | func_number;
3521 qdev->tcp_ob_opcode = OUTBOUND_TCP_IOCB | func_number;
3522 qdev->update_ob_opcode = UPDATE_NCB_IOCB | func_number;
3523 qdev->mb_bit_mask = FN0_MA_BITS_MASK;
3524 qdev->PHYAddr = PORT0_PHY_ADDRESS;
3525 if (port_status & PORT_STATUS_SM0)
3526 set_bit(QL_LINK_OPTICAL,&qdev->flags);
3527 else
3528 clear_bit(QL_LINK_OPTICAL,&qdev->flags);
3529 break;
3530
3531 case ISP_CONTROL_FN1_NET:
3532 qdev->mac_index = 1;
3533 qdev->mac_ob_opcode = OUTBOUND_MAC_IOCB | func_number;
3534 qdev->tcp_ob_opcode = OUTBOUND_TCP_IOCB | func_number;
3535 qdev->update_ob_opcode = UPDATE_NCB_IOCB | func_number;
3536 qdev->mb_bit_mask = FN1_MA_BITS_MASK;
3537 qdev->PHYAddr = PORT1_PHY_ADDRESS;
3538 if (port_status & PORT_STATUS_SM1)
3539 set_bit(QL_LINK_OPTICAL,&qdev->flags);
3540 else
3541 clear_bit(QL_LINK_OPTICAL,&qdev->flags);
3542 break;
3543
3544 case ISP_CONTROL_FN0_SCSI:
3545 case ISP_CONTROL_FN1_SCSI:
3546 default:
3547 printk(KERN_DEBUG PFX
3548 "%s: Invalid function number, ispControlStatus = 0x%x\n",
3549 qdev->ndev->name,value);
3550 break;
3551 }
3552 qdev->numPorts = qdev->nvram_data.numPorts;
3553 }
3554
3555 static void ql_display_dev_info(struct net_device *ndev)
3556 {
3557 struct ql3_adapter *qdev = (struct ql3_adapter *)netdev_priv(ndev);
3558 struct pci_dev *pdev = qdev->pdev;
3559
3560 printk(KERN_INFO PFX
3561 "\n%s Adapter %d RevisionID %d found %s on PCI slot %d.\n",
3562 DRV_NAME, qdev->index, qdev->chip_rev_id,
3563 (qdev->device_id == QL3032_DEVICE_ID) ? "QLA3032" : "QLA3022",
3564 qdev->pci_slot);
3565 printk(KERN_INFO PFX
3566 "%s Interface.\n",
3567 test_bit(QL_LINK_OPTICAL,&qdev->flags) ? "OPTICAL" : "COPPER");
3568
3569 /*
3570 * Print PCI bus width/type.
3571 */
3572 printk(KERN_INFO PFX
3573 "Bus interface is %s %s.\n",
3574 ((qdev->pci_width == 64) ? "64-bit" : "32-bit"),
3575 ((qdev->pci_x) ? "PCI-X" : "PCI"));
3576
3577 printk(KERN_INFO PFX
3578 "mem IO base address adjusted = 0x%p\n",
3579 qdev->mem_map_registers);
3580 printk(KERN_INFO PFX "Interrupt number = %d\n", pdev->irq);
3581
3582 if (netif_msg_probe(qdev))
3583 printk(KERN_INFO PFX
3584 "%s: MAC address %02x:%02x:%02x:%02x:%02x:%02x\n",
3585 ndev->name, ndev->dev_addr[0], ndev->dev_addr[1],
3586 ndev->dev_addr[2], ndev->dev_addr[3], ndev->dev_addr[4],
3587 ndev->dev_addr[5]);
3588 }
3589
3590 static int ql_adapter_down(struct ql3_adapter *qdev, int do_reset)
3591 {
3592 struct net_device *ndev = qdev->ndev;
3593 int retval = 0;
3594
3595 netif_stop_queue(ndev);
3596 netif_carrier_off(ndev);
3597
3598 clear_bit(QL_ADAPTER_UP,&qdev->flags);
3599 clear_bit(QL_LINK_MASTER,&qdev->flags);
3600
3601 ql_disable_interrupts(qdev);
3602
3603 free_irq(qdev->pdev->irq, ndev);
3604
3605 if (qdev->msi && test_bit(QL_MSI_ENABLED,&qdev->flags)) {
3606 printk(KERN_INFO PFX
3607 "%s: calling pci_disable_msi().\n", qdev->ndev->name);
3608 clear_bit(QL_MSI_ENABLED,&qdev->flags);
3609 pci_disable_msi(qdev->pdev);
3610 }
3611
3612 del_timer_sync(&qdev->adapter_timer);
3613
3614 netif_poll_disable(ndev);
3615
3616 if (do_reset) {
3617 int soft_reset;
3618 unsigned long hw_flags;
3619
3620 spin_lock_irqsave(&qdev->hw_lock, hw_flags);
3621 if (ql_wait_for_drvr_lock(qdev)) {
3622 if ((soft_reset = ql_adapter_reset(qdev))) {
3623 printk(KERN_ERR PFX
3624 "%s: ql_adapter_reset(%d) FAILED!\n",
3625 ndev->name, qdev->index);
3626 }
3627 printk(KERN_ERR PFX
3628 "%s: Releaseing driver lock via chip reset.\n",ndev->name);
3629 } else {
3630 printk(KERN_ERR PFX
3631 "%s: Could not acquire driver lock to do "
3632 "reset!\n", ndev->name);
3633 retval = -1;
3634 }
3635 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
3636 }
3637 ql_free_mem_resources(qdev);
3638 return retval;
3639 }
3640
3641 static int ql_adapter_up(struct ql3_adapter *qdev)
3642 {
3643 struct net_device *ndev = qdev->ndev;
3644 int err;
3645 unsigned long irq_flags = IRQF_SAMPLE_RANDOM | IRQF_SHARED;
3646 unsigned long hw_flags;
3647
3648 if (ql_alloc_mem_resources(qdev)) {
3649 printk(KERN_ERR PFX
3650 "%s Unable to allocate buffers.\n", ndev->name);
3651 return -ENOMEM;
3652 }
3653
3654 if (qdev->msi) {
3655 if (pci_enable_msi(qdev->pdev)) {
3656 printk(KERN_ERR PFX
3657 "%s: User requested MSI, but MSI failed to "
3658 "initialize. Continuing without MSI.\n",
3659 qdev->ndev->name);
3660 qdev->msi = 0;
3661 } else {
3662 printk(KERN_INFO PFX "%s: MSI Enabled...\n", qdev->ndev->name);
3663 set_bit(QL_MSI_ENABLED,&qdev->flags);
3664 irq_flags &= ~IRQF_SHARED;
3665 }
3666 }
3667
3668 if ((err = request_irq(qdev->pdev->irq,
3669 ql3xxx_isr,
3670 irq_flags, ndev->name, ndev))) {
3671 printk(KERN_ERR PFX
3672 "%s: Failed to reserve interrupt %d already in use.\n",
3673 ndev->name, qdev->pdev->irq);
3674 goto err_irq;
3675 }
3676
3677 spin_lock_irqsave(&qdev->hw_lock, hw_flags);
3678
3679 if ((err = ql_wait_for_drvr_lock(qdev))) {
3680 if ((err = ql_adapter_initialize(qdev))) {
3681 printk(KERN_ERR PFX
3682 "%s: Unable to initialize adapter.\n",
3683 ndev->name);
3684 goto err_init;
3685 }
3686 printk(KERN_ERR PFX
3687 "%s: Releaseing driver lock.\n",ndev->name);
3688 ql_sem_unlock(qdev, QL_DRVR_SEM_MASK);
3689 } else {
3690 printk(KERN_ERR PFX
3691 "%s: Could not aquire driver lock.\n",
3692 ndev->name);
3693 goto err_lock;
3694 }
3695
3696 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
3697
3698 set_bit(QL_ADAPTER_UP,&qdev->flags);
3699
3700 mod_timer(&qdev->adapter_timer, jiffies + HZ * 1);
3701
3702 netif_poll_enable(ndev);
3703 ql_enable_interrupts(qdev);
3704 return 0;
3705
3706 err_init:
3707 ql_sem_unlock(qdev, QL_DRVR_SEM_MASK);
3708 err_lock:
3709 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
3710 free_irq(qdev->pdev->irq, ndev);
3711 err_irq:
3712 if (qdev->msi && test_bit(QL_MSI_ENABLED,&qdev->flags)) {
3713 printk(KERN_INFO PFX
3714 "%s: calling pci_disable_msi().\n",
3715 qdev->ndev->name);
3716 clear_bit(QL_MSI_ENABLED,&qdev->flags);
3717 pci_disable_msi(qdev->pdev);
3718 }
3719 return err;
3720 }
3721
3722 static int ql_cycle_adapter(struct ql3_adapter *qdev, int reset)
3723 {
3724 if( ql_adapter_down(qdev,reset) || ql_adapter_up(qdev)) {
3725 printk(KERN_ERR PFX
3726 "%s: Driver up/down cycle failed, "
3727 "closing device\n",qdev->ndev->name);
3728 dev_close(qdev->ndev);
3729 return -1;
3730 }
3731 return 0;
3732 }
3733
3734 static int ql3xxx_close(struct net_device *ndev)
3735 {
3736 struct ql3_adapter *qdev = netdev_priv(ndev);
3737
3738 /*
3739 * Wait for device to recover from a reset.
3740 * (Rarely happens, but possible.)
3741 */
3742 while (!test_bit(QL_ADAPTER_UP,&qdev->flags))
3743 msleep(50);
3744
3745 ql_adapter_down(qdev,QL_DO_RESET);
3746 return 0;
3747 }
3748
3749 static int ql3xxx_open(struct net_device *ndev)
3750 {
3751 struct ql3_adapter *qdev = netdev_priv(ndev);
3752 return (ql_adapter_up(qdev));
3753 }
3754
3755 static struct net_device_stats *ql3xxx_get_stats(struct net_device *dev)
3756 {
3757 struct ql3_adapter *qdev = (struct ql3_adapter *)dev->priv;
3758 return &qdev->stats;
3759 }
3760
3761 static void ql3xxx_set_multicast_list(struct net_device *ndev)
3762 {
3763 /*
3764 * We are manually parsing the list in the net_device structure.
3765 */
3766 return;
3767 }
3768
3769 static int ql3xxx_set_mac_address(struct net_device *ndev, void *p)
3770 {
3771 struct ql3_adapter *qdev = (struct ql3_adapter *)netdev_priv(ndev);
3772 struct ql3xxx_port_registers __iomem *port_regs =
3773 qdev->mem_map_registers;
3774 struct sockaddr *addr = p;
3775 unsigned long hw_flags;
3776
3777 if (netif_running(ndev))
3778 return -EBUSY;
3779
3780 if (!is_valid_ether_addr(addr->sa_data))
3781 return -EADDRNOTAVAIL;
3782
3783 memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len);
3784
3785 spin_lock_irqsave(&qdev->hw_lock, hw_flags);
3786 /* Program lower 32 bits of the MAC address */
3787 ql_write_page0_reg(qdev, &port_regs->macAddrIndirectPtrReg,
3788 (MAC_ADDR_INDIRECT_PTR_REG_RP_MASK << 16));
3789 ql_write_page0_reg(qdev, &port_regs->macAddrDataReg,
3790 ((ndev->dev_addr[2] << 24) | (ndev->
3791 dev_addr[3] << 16) |
3792 (ndev->dev_addr[4] << 8) | ndev->dev_addr[5]));
3793
3794 /* Program top 16 bits of the MAC address */
3795 ql_write_page0_reg(qdev, &port_regs->macAddrIndirectPtrReg,
3796 ((MAC_ADDR_INDIRECT_PTR_REG_RP_MASK << 16) | 1));
3797 ql_write_page0_reg(qdev, &port_regs->macAddrDataReg,
3798 ((ndev->dev_addr[0] << 8) | ndev->dev_addr[1]));
3799 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
3800
3801 return 0;
3802 }
3803
3804 static void ql3xxx_tx_timeout(struct net_device *ndev)
3805 {
3806 struct ql3_adapter *qdev = (struct ql3_adapter *)netdev_priv(ndev);
3807
3808 printk(KERN_ERR PFX "%s: Resetting...\n", ndev->name);
3809 /*
3810 * Stop the queues, we've got a problem.
3811 */
3812 netif_stop_queue(ndev);
3813
3814 /*
3815 * Wake up the worker to process this event.
3816 */
3817 queue_delayed_work(qdev->workqueue, &qdev->tx_timeout_work, 0);
3818 }
3819
3820 static void ql_reset_work(struct work_struct *work)
3821 {
3822 struct ql3_adapter *qdev =
3823 container_of(work, struct ql3_adapter, reset_work.work);
3824 struct net_device *ndev = qdev->ndev;
3825 u32 value;
3826 struct ql_tx_buf_cb *tx_cb;
3827 int max_wait_time, i;
3828 struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
3829 unsigned long hw_flags;
3830
3831 if (test_bit((QL_RESET_PER_SCSI | QL_RESET_START),&qdev->flags)) {
3832 clear_bit(QL_LINK_MASTER,&qdev->flags);
3833
3834 /*
3835 * Loop through the active list and return the skb.
3836 */
3837 for (i = 0; i < NUM_REQ_Q_ENTRIES; i++) {
3838 int j;
3839 tx_cb = &qdev->tx_buf[i];
3840 if (tx_cb->skb) {
3841 printk(KERN_DEBUG PFX
3842 "%s: Freeing lost SKB.\n",
3843 qdev->ndev->name);
3844 pci_unmap_single(qdev->pdev,
3845 pci_unmap_addr(&tx_cb->map[0], mapaddr),
3846 pci_unmap_len(&tx_cb->map[0], maplen),
3847 PCI_DMA_TODEVICE);
3848 for(j=1;j<tx_cb->seg_count;j++) {
3849 pci_unmap_page(qdev->pdev,
3850 pci_unmap_addr(&tx_cb->map[j],mapaddr),
3851 pci_unmap_len(&tx_cb->map[j],maplen),
3852 PCI_DMA_TODEVICE);
3853 }
3854 dev_kfree_skb(tx_cb->skb);
3855 tx_cb->skb = NULL;
3856 }
3857 }
3858
3859 printk(KERN_ERR PFX
3860 "%s: Clearing NRI after reset.\n", qdev->ndev->name);
3861 spin_lock_irqsave(&qdev->hw_lock, hw_flags);
3862 ql_write_common_reg(qdev,
3863 &port_regs->CommonRegs.
3864 ispControlStatus,
3865 ((ISP_CONTROL_RI << 16) | ISP_CONTROL_RI));
3866 /*
3867 * Wait the for Soft Reset to Complete.
3868 */
3869 max_wait_time = 10;
3870 do {
3871 value = ql_read_common_reg(qdev,
3872 &port_regs->CommonRegs.
3873
3874 ispControlStatus);
3875 if ((value & ISP_CONTROL_SR) == 0) {
3876 printk(KERN_DEBUG PFX
3877 "%s: reset completed.\n",
3878 qdev->ndev->name);
3879 break;
3880 }
3881
3882 if (value & ISP_CONTROL_RI) {
3883 printk(KERN_DEBUG PFX
3884 "%s: clearing NRI after reset.\n",
3885 qdev->ndev->name);
3886 ql_write_common_reg(qdev,
3887 &port_regs->
3888 CommonRegs.
3889 ispControlStatus,
3890 ((ISP_CONTROL_RI <<
3891 16) | ISP_CONTROL_RI));
3892 }
3893
3894 ssleep(1);
3895 } while (--max_wait_time);
3896 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
3897
3898 if (value & ISP_CONTROL_SR) {
3899
3900 /*
3901 * Set the reset flags and clear the board again.
3902 * Nothing else to do...
3903 */
3904 printk(KERN_ERR PFX
3905 "%s: Timed out waiting for reset to "
3906 "complete.\n", ndev->name);
3907 printk(KERN_ERR PFX
3908 "%s: Do a reset.\n", ndev->name);
3909 clear_bit(QL_RESET_PER_SCSI,&qdev->flags);
3910 clear_bit(QL_RESET_START,&qdev->flags);
3911 ql_cycle_adapter(qdev,QL_DO_RESET);
3912 return;
3913 }
3914
3915 clear_bit(QL_RESET_ACTIVE,&qdev->flags);
3916 clear_bit(QL_RESET_PER_SCSI,&qdev->flags);
3917 clear_bit(QL_RESET_START,&qdev->flags);
3918 ql_cycle_adapter(qdev,QL_NO_RESET);
3919 }
3920 }
3921
3922 static void ql_tx_timeout_work(struct work_struct *work)
3923 {
3924 struct ql3_adapter *qdev =
3925 container_of(work, struct ql3_adapter, tx_timeout_work.work);
3926
3927 ql_cycle_adapter(qdev, QL_DO_RESET);
3928 }
3929
3930 static void ql_get_board_info(struct ql3_adapter *qdev)
3931 {
3932 struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
3933 u32 value;
3934
3935 value = ql_read_page0_reg_l(qdev, &port_regs->portStatus);
3936
3937 qdev->chip_rev_id = ((value & PORT_STATUS_REV_ID_MASK) >> 12);
3938 if (value & PORT_STATUS_64)
3939 qdev->pci_width = 64;
3940 else
3941 qdev->pci_width = 32;
3942 if (value & PORT_STATUS_X)
3943 qdev->pci_x = 1;
3944 else
3945 qdev->pci_x = 0;
3946 qdev->pci_slot = (u8) PCI_SLOT(qdev->pdev->devfn);
3947 }
3948
3949 static void ql3xxx_timer(unsigned long ptr)
3950 {
3951 struct ql3_adapter *qdev = (struct ql3_adapter *)ptr;
3952
3953 if (test_bit(QL_RESET_ACTIVE,&qdev->flags)) {
3954 printk(KERN_DEBUG PFX
3955 "%s: Reset in progress.\n",
3956 qdev->ndev->name);
3957 goto end;
3958 }
3959
3960 ql_link_state_machine(qdev);
3961
3962 /* Restart timer on 2 second interval. */
3963 end:
3964 mod_timer(&qdev->adapter_timer, jiffies + HZ * 1);
3965 }
3966
3967 static int __devinit ql3xxx_probe(struct pci_dev *pdev,
3968 const struct pci_device_id *pci_entry)
3969 {
3970 struct net_device *ndev = NULL;
3971 struct ql3_adapter *qdev = NULL;
3972 static int cards_found = 0;
3973 int pci_using_dac, err;
3974
3975 err = pci_enable_device(pdev);
3976 if (err) {
3977 printk(KERN_ERR PFX "%s cannot enable PCI device\n",
3978 pci_name(pdev));
3979 goto err_out;
3980 }
3981
3982 err = pci_request_regions(pdev, DRV_NAME);
3983 if (err) {
3984 printk(KERN_ERR PFX "%s cannot obtain PCI resources\n",
3985 pci_name(pdev));
3986 goto err_out_disable_pdev;
3987 }
3988
3989 pci_set_master(pdev);
3990
3991 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
3992 pci_using_dac = 1;
3993 err = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
3994 } else if (!(err = pci_set_dma_mask(pdev, DMA_32BIT_MASK))) {
3995 pci_using_dac = 0;
3996 err = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
3997 }
3998
3999 if (err) {
4000 printk(KERN_ERR PFX "%s no usable DMA configuration\n",
4001 pci_name(pdev));
4002 goto err_out_free_regions;
4003 }
4004
4005 ndev = alloc_etherdev(sizeof(struct ql3_adapter));
4006 if (!ndev) {
4007 printk(KERN_ERR PFX "%s could not alloc etherdev\n",
4008 pci_name(pdev));
4009 err = -ENOMEM;
4010 goto err_out_free_regions;
4011 }
4012
4013 SET_MODULE_OWNER(ndev);
4014 SET_NETDEV_DEV(ndev, &pdev->dev);
4015
4016 pci_set_drvdata(pdev, ndev);
4017
4018 qdev = netdev_priv(ndev);
4019 qdev->index = cards_found;
4020 qdev->ndev = ndev;
4021 qdev->pdev = pdev;
4022 qdev->device_id = pci_entry->device;
4023 qdev->port_link_state = LS_DOWN;
4024 if (msi)
4025 qdev->msi = 1;
4026
4027 qdev->msg_enable = netif_msg_init(debug, default_msg);
4028
4029 if (pci_using_dac)
4030 ndev->features |= NETIF_F_HIGHDMA;
4031 if (qdev->device_id == QL3032_DEVICE_ID)
4032 ndev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
4033
4034 qdev->mem_map_registers =
4035 ioremap_nocache(pci_resource_start(pdev, 1),
4036 pci_resource_len(qdev->pdev, 1));
4037 if (!qdev->mem_map_registers) {
4038 printk(KERN_ERR PFX "%s: cannot map device registers\n",
4039 pci_name(pdev));
4040 err = -EIO;
4041 goto err_out_free_ndev;
4042 }
4043
4044 spin_lock_init(&qdev->adapter_lock);
4045 spin_lock_init(&qdev->hw_lock);
4046
4047 /* Set driver entry points */
4048 ndev->open = ql3xxx_open;
4049 ndev->hard_start_xmit = ql3xxx_send;
4050 ndev->stop = ql3xxx_close;
4051 ndev->get_stats = ql3xxx_get_stats;
4052 ndev->set_multicast_list = ql3xxx_set_multicast_list;
4053 SET_ETHTOOL_OPS(ndev, &ql3xxx_ethtool_ops);
4054 ndev->set_mac_address = ql3xxx_set_mac_address;
4055 ndev->tx_timeout = ql3xxx_tx_timeout;
4056 ndev->watchdog_timeo = 5 * HZ;
4057
4058 ndev->poll = &ql_poll;
4059 ndev->weight = 64;
4060
4061 ndev->irq = pdev->irq;
4062
4063 /* make sure the EEPROM is good */
4064 if (ql_get_nvram_params(qdev)) {
4065 printk(KERN_ALERT PFX
4066 "ql3xxx_probe: Adapter #%d, Invalid NVRAM parameters.\n",
4067 qdev->index);
4068 err = -EIO;
4069 goto err_out_iounmap;
4070 }
4071
4072 ql_set_mac_info(qdev);
4073
4074 /* Validate and set parameters */
4075 if (qdev->mac_index) {
4076 ndev->mtu = qdev->nvram_data.macCfg_port1.etherMtu_mac ;
4077 memcpy(ndev->dev_addr, &qdev->nvram_data.funcCfg_fn2.macAddress,
4078 ETH_ALEN);
4079 } else {
4080 ndev->mtu = qdev->nvram_data.macCfg_port0.etherMtu_mac ;
4081 memcpy(ndev->dev_addr, &qdev->nvram_data.funcCfg_fn0.macAddress,
4082 ETH_ALEN);
4083 }
4084 memcpy(ndev->perm_addr, ndev->dev_addr, ndev->addr_len);
4085
4086 ndev->tx_queue_len = NUM_REQ_Q_ENTRIES;
4087
4088 /* Turn off support for multicasting */
4089 ndev->flags &= ~IFF_MULTICAST;
4090
4091 /* Record PCI bus information. */
4092 ql_get_board_info(qdev);
4093
4094 /*
4095 * Set the Maximum Memory Read Byte Count value. We do this to handle
4096 * jumbo frames.
4097 */
4098 if (qdev->pci_x) {
4099 pci_write_config_word(pdev, (int)0x4e, (u16) 0x0036);
4100 }
4101
4102 err = register_netdev(ndev);
4103 if (err) {
4104 printk(KERN_ERR PFX "%s: cannot register net device\n",
4105 pci_name(pdev));
4106 goto err_out_iounmap;
4107 }
4108
4109 /* we're going to reset, so assume we have no link for now */
4110
4111 netif_carrier_off(ndev);
4112 netif_stop_queue(ndev);
4113
4114 qdev->workqueue = create_singlethread_workqueue(ndev->name);
4115 INIT_DELAYED_WORK(&qdev->reset_work, ql_reset_work);
4116 INIT_DELAYED_WORK(&qdev->tx_timeout_work, ql_tx_timeout_work);
4117
4118 init_timer(&qdev->adapter_timer);
4119 qdev->adapter_timer.function = ql3xxx_timer;
4120 qdev->adapter_timer.expires = jiffies + HZ * 2; /* two second delay */
4121 qdev->adapter_timer.data = (unsigned long)qdev;
4122
4123 if(!cards_found) {
4124 printk(KERN_ALERT PFX "%s\n", DRV_STRING);
4125 printk(KERN_ALERT PFX "Driver name: %s, Version: %s.\n",
4126 DRV_NAME, DRV_VERSION);
4127 }
4128 ql_display_dev_info(ndev);
4129
4130 cards_found++;
4131 return 0;
4132
4133 err_out_iounmap:
4134 iounmap(qdev->mem_map_registers);
4135 err_out_free_ndev:
4136 free_netdev(ndev);
4137 err_out_free_regions:
4138 pci_release_regions(pdev);
4139 err_out_disable_pdev:
4140 pci_disable_device(pdev);
4141 pci_set_drvdata(pdev, NULL);
4142 err_out:
4143 return err;
4144 }
4145
4146 static void __devexit ql3xxx_remove(struct pci_dev *pdev)
4147 {
4148 struct net_device *ndev = pci_get_drvdata(pdev);
4149 struct ql3_adapter *qdev = netdev_priv(ndev);
4150
4151 unregister_netdev(ndev);
4152 qdev = netdev_priv(ndev);
4153
4154 ql_disable_interrupts(qdev);
4155
4156 if (qdev->workqueue) {
4157 cancel_delayed_work(&qdev->reset_work);
4158 cancel_delayed_work(&qdev->tx_timeout_work);
4159 destroy_workqueue(qdev->workqueue);
4160 qdev->workqueue = NULL;
4161 }
4162
4163 iounmap(qdev->mem_map_registers);
4164 pci_release_regions(pdev);
4165 pci_set_drvdata(pdev, NULL);
4166 free_netdev(ndev);
4167 }
4168
4169 static struct pci_driver ql3xxx_driver = {
4170
4171 .name = DRV_NAME,
4172 .id_table = ql3xxx_pci_tbl,
4173 .probe = ql3xxx_probe,
4174 .remove = __devexit_p(ql3xxx_remove),
4175 };
4176
4177 static int __init ql3xxx_init_module(void)
4178 {
4179 return pci_register_driver(&ql3xxx_driver);
4180 }
4181
4182 static void __exit ql3xxx_exit(void)
4183 {
4184 pci_unregister_driver(&ql3xxx_driver);
4185 }
4186
4187 module_init(ql3xxx_init_module);
4188 module_exit(ql3xxx_exit);
This page took 0.119474 seconds and 6 git commands to generate.