Commit | Line | Data |
---|---|---|
5a4faa87 RM |
1 | /* |
2 | * QLogic QLA3xxx NIC HBA Driver | |
3 | * Copyright (c) 2003-2006 QLogic Corporation | |
4 | * | |
5 | * See LICENSE.qla3xxx for copyright and licensing details. | |
6 | */ | |
7 | ||
8 | #include <linux/kernel.h> | |
9 | #include <linux/init.h> | |
10 | #include <linux/types.h> | |
11 | #include <linux/module.h> | |
12 | #include <linux/list.h> | |
13 | #include <linux/pci.h> | |
14 | #include <linux/dma-mapping.h> | |
15 | #include <linux/sched.h> | |
16 | #include <linux/slab.h> | |
17 | #include <linux/dmapool.h> | |
18 | #include <linux/mempool.h> | |
19 | #include <linux/spinlock.h> | |
20 | #include <linux/kthread.h> | |
21 | #include <linux/interrupt.h> | |
22 | #include <linux/errno.h> | |
23 | #include <linux/ioport.h> | |
24 | #include <linux/ip.h> | |
bd36b0ac | 25 | #include <linux/in.h> |
5a4faa87 RM |
26 | #include <linux/if_arp.h> |
27 | #include <linux/if_ether.h> | |
28 | #include <linux/netdevice.h> | |
29 | #include <linux/etherdevice.h> | |
30 | #include <linux/ethtool.h> | |
31 | #include <linux/skbuff.h> | |
32 | #include <linux/rtnetlink.h> | |
33 | #include <linux/if_vlan.h> | |
5a4faa87 RM |
34 | #include <linux/delay.h> |
35 | #include <linux/mm.h> | |
36 | ||
37 | #include "qla3xxx.h" | |
38 | ||
39 | #define DRV_NAME "qla3xxx" | |
40 | #define DRV_STRING "QLogic ISP3XXX Network Driver" | |
b08c42b2 | 41 | #define DRV_VERSION "v2.03.00-k5" |
5a4faa87 RM |
42 | #define PFX DRV_NAME " " |
43 | ||
44 | static const char ql3xxx_driver_name[] = DRV_NAME; | |
45 | static const char ql3xxx_driver_version[] = DRV_VERSION; | |
46 | ||
47 | MODULE_AUTHOR("QLogic Corporation"); | |
48 | MODULE_DESCRIPTION("QLogic ISP3XXX Network Driver " DRV_VERSION " "); | |
49 | MODULE_LICENSE("GPL"); | |
50 | MODULE_VERSION(DRV_VERSION); | |
51 | ||
52 | static const u32 default_msg | |
53 | = NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | |
54 | | NETIF_MSG_IFUP | NETIF_MSG_IFDOWN; | |
55 | ||
56 | static int debug = -1; /* defaults above */ | |
57 | module_param(debug, int, 0); | |
58 | MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); | |
59 | ||
60 | static int msi; | |
61 | module_param(msi, int, 0); | |
62 | MODULE_PARM_DESC(msi, "Turn on Message Signaled Interrupts."); | |
63 | ||
64 | static struct pci_device_id ql3xxx_pci_tbl[] __devinitdata = { | |
65 | {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QL3022_DEVICE_ID)}, | |
bd36b0ac | 66 | {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QL3032_DEVICE_ID)}, |
5a4faa87 RM |
67 | /* required last entry */ |
68 | {0,} | |
69 | }; | |
70 | ||
71 | MODULE_DEVICE_TABLE(pci, ql3xxx_pci_tbl); | |
72 | ||
3efedf2e RM |
73 | /* |
74 | * These are the known PHY's which are used | |
75 | */ | |
76 | typedef enum { | |
77 | PHY_TYPE_UNKNOWN = 0, | |
78 | PHY_VITESSE_VSC8211, | |
79 | PHY_AGERE_ET1011C, | |
80 | MAX_PHY_DEV_TYPES | |
81 | } PHY_DEVICE_et; | |
82 | ||
83 | typedef struct { | |
9ddf7774 | 84 | PHY_DEVICE_et phyDevice; |
3efedf2e RM |
85 | u32 phyIdOUI; |
86 | u16 phyIdModel; | |
87 | char *name; | |
88 | } PHY_DEVICE_INFO_t; | |
89 | ||
b1fc1fa9 | 90 | static const PHY_DEVICE_INFO_t PHY_DEVICES[] = |
3efedf2e RM |
91 | {{PHY_TYPE_UNKNOWN, 0x000000, 0x0, "PHY_TYPE_UNKNOWN"}, |
92 | {PHY_VITESSE_VSC8211, 0x0003f1, 0xb, "PHY_VITESSE_VSC8211"}, | |
93 | {PHY_AGERE_ET1011C, 0x00a0bc, 0x1, "PHY_AGERE_ET1011C"}, | |
94 | }; | |
95 | ||
96 | ||
5a4faa87 RM |
97 | /* |
98 | * Caller must take hw_lock. | |
99 | */ | |
100 | static int ql_sem_spinlock(struct ql3_adapter *qdev, | |
101 | u32 sem_mask, u32 sem_bits) | |
102 | { | |
103 | struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers; | |
104 | u32 value; | |
105 | unsigned int seconds = 3; | |
106 | ||
107 | do { | |
108 | writel((sem_mask | sem_bits), | |
109 | &port_regs->CommonRegs.semaphoreReg); | |
110 | value = readl(&port_regs->CommonRegs.semaphoreReg); | |
111 | if ((value & (sem_mask >> 16)) == sem_bits) | |
112 | return 0; | |
113 | ssleep(1); | |
114 | } while(--seconds); | |
115 | return -1; | |
116 | } | |
117 | ||
118 | static void ql_sem_unlock(struct ql3_adapter *qdev, u32 sem_mask) | |
119 | { | |
120 | struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers; | |
121 | writel(sem_mask, &port_regs->CommonRegs.semaphoreReg); | |
122 | readl(&port_regs->CommonRegs.semaphoreReg); | |
123 | } | |
124 | ||
125 | static int ql_sem_lock(struct ql3_adapter *qdev, u32 sem_mask, u32 sem_bits) | |
126 | { | |
127 | struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers; | |
128 | u32 value; | |
129 | ||
130 | writel((sem_mask | sem_bits), &port_regs->CommonRegs.semaphoreReg); | |
131 | value = readl(&port_regs->CommonRegs.semaphoreReg); | |
132 | return ((value & (sem_mask >> 16)) == sem_bits); | |
133 | } | |
134 | ||
135 | /* | |
136 | * Caller holds hw_lock. | |
137 | */ | |
138 | static int ql_wait_for_drvr_lock(struct ql3_adapter *qdev) | |
139 | { | |
140 | int i = 0; | |
141 | ||
142 | while (1) { | |
143 | if (!ql_sem_lock(qdev, | |
144 | QL_DRVR_SEM_MASK, | |
145 | (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) | |
146 | * 2) << 1)) { | |
147 | if (i < 10) { | |
148 | ssleep(1); | |
149 | i++; | |
150 | } else { | |
151 | printk(KERN_ERR PFX "%s: Timed out waiting for " | |
152 | "driver lock...\n", | |
153 | qdev->ndev->name); | |
154 | return 0; | |
155 | } | |
156 | } else { | |
157 | printk(KERN_DEBUG PFX | |
158 | "%s: driver lock acquired.\n", | |
159 | qdev->ndev->name); | |
160 | return 1; | |
161 | } | |
162 | } | |
163 | } | |
164 | ||
165 | static void ql_set_register_page(struct ql3_adapter *qdev, u32 page) | |
166 | { | |
167 | struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers; | |
168 | ||
169 | writel(((ISP_CONTROL_NP_MASK << 16) | page), | |
170 | &port_regs->CommonRegs.ispControlStatus); | |
171 | readl(&port_regs->CommonRegs.ispControlStatus); | |
172 | qdev->current_page = page; | |
173 | } | |
174 | ||
175 | static u32 ql_read_common_reg_l(struct ql3_adapter *qdev, | |
176 | u32 __iomem * reg) | |
177 | { | |
178 | u32 value; | |
179 | unsigned long hw_flags; | |
180 | ||
181 | spin_lock_irqsave(&qdev->hw_lock, hw_flags); | |
182 | value = readl(reg); | |
183 | spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); | |
184 | ||
185 | return value; | |
186 | } | |
187 | ||
188 | static u32 ql_read_common_reg(struct ql3_adapter *qdev, | |
189 | u32 __iomem * reg) | |
190 | { | |
191 | return readl(reg); | |
192 | } | |
193 | ||
194 | static u32 ql_read_page0_reg_l(struct ql3_adapter *qdev, u32 __iomem *reg) | |
195 | { | |
196 | u32 value; | |
197 | unsigned long hw_flags; | |
198 | ||
199 | spin_lock_irqsave(&qdev->hw_lock, hw_flags); | |
200 | ||
201 | if (qdev->current_page != 0) | |
202 | ql_set_register_page(qdev,0); | |
203 | value = readl(reg); | |
204 | ||
205 | spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); | |
206 | return value; | |
207 | } | |
208 | ||
209 | static u32 ql_read_page0_reg(struct ql3_adapter *qdev, u32 __iomem *reg) | |
210 | { | |
211 | if (qdev->current_page != 0) | |
212 | ql_set_register_page(qdev,0); | |
213 | return readl(reg); | |
214 | } | |
215 | ||
216 | static void ql_write_common_reg_l(struct ql3_adapter *qdev, | |
ee111d11 | 217 | u32 __iomem *reg, u32 value) |
5a4faa87 RM |
218 | { |
219 | unsigned long hw_flags; | |
220 | ||
221 | spin_lock_irqsave(&qdev->hw_lock, hw_flags); | |
ee111d11 | 222 | writel(value, reg); |
5a4faa87 RM |
223 | readl(reg); |
224 | spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); | |
225 | return; | |
226 | } | |
227 | ||
228 | static void ql_write_common_reg(struct ql3_adapter *qdev, | |
ee111d11 | 229 | u32 __iomem *reg, u32 value) |
5a4faa87 | 230 | { |
ee111d11 | 231 | writel(value, reg); |
5a4faa87 RM |
232 | readl(reg); |
233 | return; | |
234 | } | |
235 | ||
80b02e59 RM |
236 | static void ql_write_nvram_reg(struct ql3_adapter *qdev, |
237 | u32 __iomem *reg, u32 value) | |
238 | { | |
239 | writel(value, reg); | |
240 | readl(reg); | |
241 | udelay(1); | |
242 | return; | |
243 | } | |
244 | ||
5a4faa87 | 245 | static void ql_write_page0_reg(struct ql3_adapter *qdev, |
ee111d11 | 246 | u32 __iomem *reg, u32 value) |
5a4faa87 RM |
247 | { |
248 | if (qdev->current_page != 0) | |
249 | ql_set_register_page(qdev,0); | |
ee111d11 | 250 | writel(value, reg); |
5a4faa87 RM |
251 | readl(reg); |
252 | return; | |
253 | } | |
254 | ||
255 | /* | |
256 | * Caller holds hw_lock. Only called during init. | |
257 | */ | |
258 | static void ql_write_page1_reg(struct ql3_adapter *qdev, | |
ee111d11 | 259 | u32 __iomem *reg, u32 value) |
5a4faa87 RM |
260 | { |
261 | if (qdev->current_page != 1) | |
262 | ql_set_register_page(qdev,1); | |
ee111d11 | 263 | writel(value, reg); |
5a4faa87 RM |
264 | readl(reg); |
265 | return; | |
266 | } | |
267 | ||
268 | /* | |
269 | * Caller holds hw_lock. Only called during init. | |
270 | */ | |
271 | static void ql_write_page2_reg(struct ql3_adapter *qdev, | |
ee111d11 | 272 | u32 __iomem *reg, u32 value) |
5a4faa87 RM |
273 | { |
274 | if (qdev->current_page != 2) | |
275 | ql_set_register_page(qdev,2); | |
ee111d11 | 276 | writel(value, reg); |
5a4faa87 RM |
277 | readl(reg); |
278 | return; | |
279 | } | |
280 | ||
281 | static void ql_disable_interrupts(struct ql3_adapter *qdev) | |
282 | { | |
283 | struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers; | |
284 | ||
285 | ql_write_common_reg_l(qdev, &port_regs->CommonRegs.ispInterruptMaskReg, | |
286 | (ISP_IMR_ENABLE_INT << 16)); | |
287 | ||
288 | } | |
289 | ||
290 | static void ql_enable_interrupts(struct ql3_adapter *qdev) | |
291 | { | |
292 | struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers; | |
293 | ||
294 | ql_write_common_reg_l(qdev, &port_regs->CommonRegs.ispInterruptMaskReg, | |
295 | ((0xff << 16) | ISP_IMR_ENABLE_INT)); | |
296 | ||
297 | } | |
298 | ||
299 | static void ql_release_to_lrg_buf_free_list(struct ql3_adapter *qdev, | |
300 | struct ql_rcv_buf_cb *lrg_buf_cb) | |
301 | { | |
0f8ab89e BL |
302 | dma_addr_t map; |
303 | int err; | |
5a4faa87 RM |
304 | lrg_buf_cb->next = NULL; |
305 | ||
306 | if (qdev->lrg_buf_free_tail == NULL) { /* The list is empty */ | |
307 | qdev->lrg_buf_free_head = qdev->lrg_buf_free_tail = lrg_buf_cb; | |
308 | } else { | |
309 | qdev->lrg_buf_free_tail->next = lrg_buf_cb; | |
310 | qdev->lrg_buf_free_tail = lrg_buf_cb; | |
311 | } | |
312 | ||
313 | if (!lrg_buf_cb->skb) { | |
cd238faa BL |
314 | lrg_buf_cb->skb = netdev_alloc_skb(qdev->ndev, |
315 | qdev->lrg_buffer_len); | |
5a4faa87 | 316 | if (unlikely(!lrg_buf_cb->skb)) { |
cd238faa | 317 | printk(KERN_ERR PFX "%s: failed netdev_alloc_skb().\n", |
5a4faa87 RM |
318 | qdev->ndev->name); |
319 | qdev->lrg_buf_skb_check++; | |
320 | } else { | |
321 | /* | |
322 | * We save some space to copy the ethhdr from first | |
323 | * buffer | |
324 | */ | |
325 | skb_reserve(lrg_buf_cb->skb, QL_HEADER_SPACE); | |
326 | map = pci_map_single(qdev->pdev, | |
327 | lrg_buf_cb->skb->data, | |
328 | qdev->lrg_buffer_len - | |
329 | QL_HEADER_SPACE, | |
330 | PCI_DMA_FROMDEVICE); | |
8d8bb39b | 331 | err = pci_dma_mapping_error(qdev->pdev, map); |
0f8ab89e | 332 | if(err) { |
9ddf7774 | 333 | printk(KERN_ERR "%s: PCI mapping failed with error: %d\n", |
0f8ab89e BL |
334 | qdev->ndev->name, err); |
335 | dev_kfree_skb(lrg_buf_cb->skb); | |
336 | lrg_buf_cb->skb = NULL; | |
337 | ||
338 | qdev->lrg_buf_skb_check++; | |
339 | return; | |
340 | } | |
341 | ||
5a4faa87 RM |
342 | lrg_buf_cb->buf_phy_addr_low = |
343 | cpu_to_le32(LS_64BITS(map)); | |
344 | lrg_buf_cb->buf_phy_addr_high = | |
345 | cpu_to_le32(MS_64BITS(map)); | |
346 | pci_unmap_addr_set(lrg_buf_cb, mapaddr, map); | |
347 | pci_unmap_len_set(lrg_buf_cb, maplen, | |
348 | qdev->lrg_buffer_len - | |
349 | QL_HEADER_SPACE); | |
350 | } | |
351 | } | |
352 | ||
353 | qdev->lrg_buf_free_count++; | |
354 | } | |
355 | ||
356 | static struct ql_rcv_buf_cb *ql_get_from_lrg_buf_free_list(struct ql3_adapter | |
357 | *qdev) | |
358 | { | |
359 | struct ql_rcv_buf_cb *lrg_buf_cb; | |
360 | ||
361 | if ((lrg_buf_cb = qdev->lrg_buf_free_head) != NULL) { | |
362 | if ((qdev->lrg_buf_free_head = lrg_buf_cb->next) == NULL) | |
363 | qdev->lrg_buf_free_tail = NULL; | |
364 | qdev->lrg_buf_free_count--; | |
365 | } | |
366 | ||
367 | return lrg_buf_cb; | |
368 | } | |
369 | ||
370 | static u32 addrBits = EEPROM_NO_ADDR_BITS; | |
371 | static u32 dataBits = EEPROM_NO_DATA_BITS; | |
372 | ||
373 | static void fm93c56a_deselect(struct ql3_adapter *qdev); | |
374 | static void eeprom_readword(struct ql3_adapter *qdev, u32 eepromAddr, | |
375 | unsigned short *value); | |
376 | ||
377 | /* | |
378 | * Caller holds hw_lock. | |
379 | */ | |
380 | static void fm93c56a_select(struct ql3_adapter *qdev) | |
381 | { | |
382 | struct ql3xxx_port_registers __iomem *port_regs = | |
383 | qdev->mem_map_registers; | |
384 | ||
385 | qdev->eeprom_cmd_data = AUBURN_EEPROM_CS_1; | |
80b02e59 | 386 | ql_write_nvram_reg(qdev, &port_regs->CommonRegs.serialPortInterfaceReg, |
5a4faa87 | 387 | ISP_NVRAM_MASK | qdev->eeprom_cmd_data); |
80b02e59 | 388 | ql_write_nvram_reg(qdev, &port_regs->CommonRegs.serialPortInterfaceReg, |
5a4faa87 RM |
389 | ((ISP_NVRAM_MASK << 16) | qdev->eeprom_cmd_data)); |
390 | } | |
391 | ||
392 | /* | |
393 | * Caller holds hw_lock. | |
394 | */ | |
395 | static void fm93c56a_cmd(struct ql3_adapter *qdev, u32 cmd, u32 eepromAddr) | |
396 | { | |
397 | int i; | |
398 | u32 mask; | |
399 | u32 dataBit; | |
400 | u32 previousBit; | |
401 | struct ql3xxx_port_registers __iomem *port_regs = | |
402 | qdev->mem_map_registers; | |
403 | ||
404 | /* Clock in a zero, then do the start bit */ | |
80b02e59 | 405 | ql_write_nvram_reg(qdev, &port_regs->CommonRegs.serialPortInterfaceReg, |
5a4faa87 RM |
406 | ISP_NVRAM_MASK | qdev->eeprom_cmd_data | |
407 | AUBURN_EEPROM_DO_1); | |
80b02e59 | 408 | ql_write_nvram_reg(qdev, &port_regs->CommonRegs.serialPortInterfaceReg, |
5a4faa87 RM |
409 | ISP_NVRAM_MASK | qdev-> |
410 | eeprom_cmd_data | AUBURN_EEPROM_DO_1 | | |
411 | AUBURN_EEPROM_CLK_RISE); | |
80b02e59 | 412 | ql_write_nvram_reg(qdev, &port_regs->CommonRegs.serialPortInterfaceReg, |
5a4faa87 RM |
413 | ISP_NVRAM_MASK | qdev-> |
414 | eeprom_cmd_data | AUBURN_EEPROM_DO_1 | | |
415 | AUBURN_EEPROM_CLK_FALL); | |
416 | ||
417 | mask = 1 << (FM93C56A_CMD_BITS - 1); | |
418 | /* Force the previous data bit to be different */ | |
419 | previousBit = 0xffff; | |
420 | for (i = 0; i < FM93C56A_CMD_BITS; i++) { | |
421 | dataBit = | |
422 | (cmd & mask) ? AUBURN_EEPROM_DO_1 : AUBURN_EEPROM_DO_0; | |
423 | if (previousBit != dataBit) { | |
424 | /* | |
425 | * If the bit changed, then change the DO state to | |
426 | * match | |
427 | */ | |
80b02e59 | 428 | ql_write_nvram_reg(qdev, |
5a4faa87 RM |
429 | &port_regs->CommonRegs. |
430 | serialPortInterfaceReg, | |
431 | ISP_NVRAM_MASK | qdev-> | |
432 | eeprom_cmd_data | dataBit); | |
433 | previousBit = dataBit; | |
434 | } | |
80b02e59 | 435 | ql_write_nvram_reg(qdev, |
5a4faa87 RM |
436 | &port_regs->CommonRegs. |
437 | serialPortInterfaceReg, | |
438 | ISP_NVRAM_MASK | qdev-> | |
439 | eeprom_cmd_data | dataBit | | |
440 | AUBURN_EEPROM_CLK_RISE); | |
80b02e59 | 441 | ql_write_nvram_reg(qdev, |
5a4faa87 RM |
442 | &port_regs->CommonRegs. |
443 | serialPortInterfaceReg, | |
444 | ISP_NVRAM_MASK | qdev-> | |
445 | eeprom_cmd_data | dataBit | | |
446 | AUBURN_EEPROM_CLK_FALL); | |
447 | cmd = cmd << 1; | |
448 | } | |
449 | ||
450 | mask = 1 << (addrBits - 1); | |
451 | /* Force the previous data bit to be different */ | |
452 | previousBit = 0xffff; | |
453 | for (i = 0; i < addrBits; i++) { | |
454 | dataBit = | |
455 | (eepromAddr & mask) ? AUBURN_EEPROM_DO_1 : | |
456 | AUBURN_EEPROM_DO_0; | |
457 | if (previousBit != dataBit) { | |
458 | /* | |
459 | * If the bit changed, then change the DO state to | |
460 | * match | |
461 | */ | |
80b02e59 | 462 | ql_write_nvram_reg(qdev, |
5a4faa87 RM |
463 | &port_regs->CommonRegs. |
464 | serialPortInterfaceReg, | |
465 | ISP_NVRAM_MASK | qdev-> | |
466 | eeprom_cmd_data | dataBit); | |
467 | previousBit = dataBit; | |
468 | } | |
80b02e59 | 469 | ql_write_nvram_reg(qdev, |
5a4faa87 RM |
470 | &port_regs->CommonRegs. |
471 | serialPortInterfaceReg, | |
472 | ISP_NVRAM_MASK | qdev-> | |
473 | eeprom_cmd_data | dataBit | | |
474 | AUBURN_EEPROM_CLK_RISE); | |
80b02e59 | 475 | ql_write_nvram_reg(qdev, |
5a4faa87 RM |
476 | &port_regs->CommonRegs. |
477 | serialPortInterfaceReg, | |
478 | ISP_NVRAM_MASK | qdev-> | |
479 | eeprom_cmd_data | dataBit | | |
480 | AUBURN_EEPROM_CLK_FALL); | |
481 | eepromAddr = eepromAddr << 1; | |
482 | } | |
483 | } | |
484 | ||
485 | /* | |
486 | * Caller holds hw_lock. | |
487 | */ | |
488 | static void fm93c56a_deselect(struct ql3_adapter *qdev) | |
489 | { | |
490 | struct ql3xxx_port_registers __iomem *port_regs = | |
491 | qdev->mem_map_registers; | |
492 | qdev->eeprom_cmd_data = AUBURN_EEPROM_CS_0; | |
80b02e59 | 493 | ql_write_nvram_reg(qdev, &port_regs->CommonRegs.serialPortInterfaceReg, |
5a4faa87 RM |
494 | ISP_NVRAM_MASK | qdev->eeprom_cmd_data); |
495 | } | |
496 | ||
497 | /* | |
498 | * Caller holds hw_lock. | |
499 | */ | |
500 | static void fm93c56a_datain(struct ql3_adapter *qdev, unsigned short *value) | |
501 | { | |
502 | int i; | |
503 | u32 data = 0; | |
504 | u32 dataBit; | |
505 | struct ql3xxx_port_registers __iomem *port_regs = | |
506 | qdev->mem_map_registers; | |
507 | ||
508 | /* Read the data bits */ | |
509 | /* The first bit is a dummy. Clock right over it. */ | |
510 | for (i = 0; i < dataBits; i++) { | |
80b02e59 | 511 | ql_write_nvram_reg(qdev, |
5a4faa87 RM |
512 | &port_regs->CommonRegs. |
513 | serialPortInterfaceReg, | |
514 | ISP_NVRAM_MASK | qdev->eeprom_cmd_data | | |
515 | AUBURN_EEPROM_CLK_RISE); | |
80b02e59 | 516 | ql_write_nvram_reg(qdev, |
5a4faa87 RM |
517 | &port_regs->CommonRegs. |
518 | serialPortInterfaceReg, | |
519 | ISP_NVRAM_MASK | qdev->eeprom_cmd_data | | |
520 | AUBURN_EEPROM_CLK_FALL); | |
521 | dataBit = | |
522 | (ql_read_common_reg | |
523 | (qdev, | |
524 | &port_regs->CommonRegs. | |
525 | serialPortInterfaceReg) & AUBURN_EEPROM_DI_1) ? 1 : 0; | |
526 | data = (data << 1) | dataBit; | |
527 | } | |
528 | *value = (u16) data; | |
529 | } | |
530 | ||
531 | /* | |
532 | * Caller holds hw_lock. | |
533 | */ | |
534 | static void eeprom_readword(struct ql3_adapter *qdev, | |
535 | u32 eepromAddr, unsigned short *value) | |
536 | { | |
537 | fm93c56a_select(qdev); | |
538 | fm93c56a_cmd(qdev, (int)FM93C56A_READ, eepromAddr); | |
539 | fm93c56a_datain(qdev, value); | |
540 | fm93c56a_deselect(qdev); | |
541 | } | |
542 | ||
804d8541 AV |
543 | static void ql_set_mac_addr(struct net_device *ndev, u16 *addr) |
544 | { | |
545 | __le16 *p = (__le16 *)ndev->dev_addr; | |
546 | p[0] = cpu_to_le16(addr[0]); | |
547 | p[1] = cpu_to_le16(addr[1]); | |
548 | p[2] = cpu_to_le16(addr[2]); | |
5a4faa87 RM |
549 | } |
550 | ||
551 | static int ql_get_nvram_params(struct ql3_adapter *qdev) | |
552 | { | |
553 | u16 *pEEPROMData; | |
554 | u16 checksum = 0; | |
555 | u32 index; | |
556 | unsigned long hw_flags; | |
557 | ||
558 | spin_lock_irqsave(&qdev->hw_lock, hw_flags); | |
559 | ||
560 | pEEPROMData = (u16 *) & qdev->nvram_data; | |
561 | qdev->eeprom_cmd_data = 0; | |
562 | if(ql_sem_spinlock(qdev, QL_NVRAM_SEM_MASK, | |
563 | (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) * | |
564 | 2) << 10)) { | |
565 | printk(KERN_ERR PFX"%s: Failed ql_sem_spinlock().\n", | |
566 | __func__); | |
567 | spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); | |
568 | return -1; | |
569 | } | |
570 | ||
571 | for (index = 0; index < EEPROM_SIZE; index++) { | |
572 | eeprom_readword(qdev, index, pEEPROMData); | |
573 | checksum += *pEEPROMData; | |
574 | pEEPROMData++; | |
575 | } | |
576 | ql_sem_unlock(qdev, QL_NVRAM_SEM_MASK); | |
577 | ||
578 | if (checksum != 0) { | |
579 | printk(KERN_ERR PFX "%s: checksum should be zero, is %x!!\n", | |
580 | qdev->ndev->name, checksum); | |
581 | spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); | |
582 | return -1; | |
583 | } | |
584 | ||
5a4faa87 RM |
585 | spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); |
586 | return checksum; | |
587 | } | |
588 | ||
589 | static const u32 PHYAddr[2] = { | |
590 | PORT0_PHY_ADDRESS, PORT1_PHY_ADDRESS | |
591 | }; | |
592 | ||
593 | static int ql_wait_for_mii_ready(struct ql3_adapter *qdev) | |
594 | { | |
595 | struct ql3xxx_port_registers __iomem *port_regs = | |
596 | qdev->mem_map_registers; | |
597 | u32 temp; | |
598 | int count = 1000; | |
599 | ||
600 | while (count) { | |
601 | temp = ql_read_page0_reg(qdev, &port_regs->macMIIStatusReg); | |
602 | if (!(temp & MAC_MII_STATUS_BSY)) | |
603 | return 0; | |
604 | udelay(10); | |
605 | count--; | |
606 | } | |
607 | return -1; | |
608 | } | |
609 | ||
610 | static void ql_mii_enable_scan_mode(struct ql3_adapter *qdev) | |
611 | { | |
612 | struct ql3xxx_port_registers __iomem *port_regs = | |
613 | qdev->mem_map_registers; | |
614 | u32 scanControl; | |
615 | ||
616 | if (qdev->numPorts > 1) { | |
617 | /* Auto scan will cycle through multiple ports */ | |
618 | scanControl = MAC_MII_CONTROL_AS | MAC_MII_CONTROL_SC; | |
619 | } else { | |
620 | scanControl = MAC_MII_CONTROL_SC; | |
621 | } | |
622 | ||
623 | /* | |
624 | * Scan register 1 of PHY/PETBI, | |
625 | * Set up to scan both devices | |
626 | * The autoscan starts from the first register, completes | |
627 | * the last one before rolling over to the first | |
628 | */ | |
629 | ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg, | |
630 | PHYAddr[0] | MII_SCAN_REGISTER); | |
631 | ||
632 | ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg, | |
633 | (scanControl) | | |
634 | ((MAC_MII_CONTROL_SC | MAC_MII_CONTROL_AS) << 16)); | |
635 | } | |
636 | ||
637 | static u8 ql_mii_disable_scan_mode(struct ql3_adapter *qdev) | |
638 | { | |
639 | u8 ret; | |
640 | struct ql3xxx_port_registers __iomem *port_regs = | |
641 | qdev->mem_map_registers; | |
642 | ||
643 | /* See if scan mode is enabled before we turn it off */ | |
644 | if (ql_read_page0_reg(qdev, &port_regs->macMIIMgmtControlReg) & | |
645 | (MAC_MII_CONTROL_AS | MAC_MII_CONTROL_SC)) { | |
646 | /* Scan is enabled */ | |
647 | ret = 1; | |
648 | } else { | |
649 | /* Scan is disabled */ | |
650 | ret = 0; | |
651 | } | |
652 | ||
653 | /* | |
654 | * When disabling scan mode you must first change the MII register | |
655 | * address | |
656 | */ | |
657 | ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg, | |
658 | PHYAddr[0] | MII_SCAN_REGISTER); | |
659 | ||
660 | ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg, | |
661 | ((MAC_MII_CONTROL_SC | MAC_MII_CONTROL_AS | | |
662 | MAC_MII_CONTROL_RC) << 16)); | |
663 | ||
664 | return ret; | |
665 | } | |
666 | ||
667 | static int ql_mii_write_reg_ex(struct ql3_adapter *qdev, | |
3efedf2e | 668 | u16 regAddr, u16 value, u32 phyAddr) |
5a4faa87 RM |
669 | { |
670 | struct ql3xxx_port_registers __iomem *port_regs = | |
671 | qdev->mem_map_registers; | |
672 | u8 scanWasEnabled; | |
673 | ||
674 | scanWasEnabled = ql_mii_disable_scan_mode(qdev); | |
675 | ||
676 | if (ql_wait_for_mii_ready(qdev)) { | |
677 | if (netif_msg_link(qdev)) | |
678 | printk(KERN_WARNING PFX | |
679 | "%s Timed out waiting for management port to " | |
680 | "get free before issuing command.\n", | |
681 | qdev->ndev->name); | |
682 | return -1; | |
683 | } | |
684 | ||
685 | ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg, | |
3efedf2e | 686 | phyAddr | regAddr); |
5a4faa87 RM |
687 | |
688 | ql_write_page0_reg(qdev, &port_regs->macMIIMgmtDataReg, value); | |
689 | ||
690 | /* Wait for write to complete 9/10/04 SJP */ | |
691 | if (ql_wait_for_mii_ready(qdev)) { | |
692 | if (netif_msg_link(qdev)) | |
693 | printk(KERN_WARNING PFX | |
2450022a | 694 | "%s: Timed out waiting for management port to " |
5a4faa87 RM |
695 | "get free before issuing command.\n", |
696 | qdev->ndev->name); | |
697 | return -1; | |
698 | } | |
699 | ||
700 | if (scanWasEnabled) | |
701 | ql_mii_enable_scan_mode(qdev); | |
702 | ||
703 | return 0; | |
704 | } | |
705 | ||
706 | static int ql_mii_read_reg_ex(struct ql3_adapter *qdev, u16 regAddr, | |
3efedf2e | 707 | u16 * value, u32 phyAddr) |
5a4faa87 RM |
708 | { |
709 | struct ql3xxx_port_registers __iomem *port_regs = | |
710 | qdev->mem_map_registers; | |
711 | u8 scanWasEnabled; | |
712 | u32 temp; | |
713 | ||
714 | scanWasEnabled = ql_mii_disable_scan_mode(qdev); | |
715 | ||
716 | if (ql_wait_for_mii_ready(qdev)) { | |
717 | if (netif_msg_link(qdev)) | |
718 | printk(KERN_WARNING PFX | |
719 | "%s: Timed out waiting for management port to " | |
720 | "get free before issuing command.\n", | |
721 | qdev->ndev->name); | |
722 | return -1; | |
723 | } | |
724 | ||
725 | ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg, | |
3efedf2e | 726 | phyAddr | regAddr); |
5a4faa87 RM |
727 | |
728 | ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg, | |
729 | (MAC_MII_CONTROL_RC << 16)); | |
730 | ||
731 | ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg, | |
732 | (MAC_MII_CONTROL_RC << 16) | MAC_MII_CONTROL_RC); | |
733 | ||
734 | /* Wait for the read to complete */ | |
735 | if (ql_wait_for_mii_ready(qdev)) { | |
736 | if (netif_msg_link(qdev)) | |
737 | printk(KERN_WARNING PFX | |
738 | "%s: Timed out waiting for management port to " | |
739 | "get free after issuing command.\n", | |
740 | qdev->ndev->name); | |
741 | return -1; | |
742 | } | |
743 | ||
744 | temp = ql_read_page0_reg(qdev, &port_regs->macMIIMgmtDataReg); | |
745 | *value = (u16) temp; | |
746 | ||
747 | if (scanWasEnabled) | |
748 | ql_mii_enable_scan_mode(qdev); | |
749 | ||
750 | return 0; | |
751 | } | |
752 | ||
753 | static int ql_mii_write_reg(struct ql3_adapter *qdev, u16 regAddr, u16 value) | |
754 | { | |
755 | struct ql3xxx_port_registers __iomem *port_regs = | |
756 | qdev->mem_map_registers; | |
757 | ||
758 | ql_mii_disable_scan_mode(qdev); | |
759 | ||
760 | if (ql_wait_for_mii_ready(qdev)) { | |
761 | if (netif_msg_link(qdev)) | |
762 | printk(KERN_WARNING PFX | |
763 | "%s: Timed out waiting for management port to " | |
764 | "get free before issuing command.\n", | |
765 | qdev->ndev->name); | |
766 | return -1; | |
767 | } | |
768 | ||
769 | ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg, | |
770 | qdev->PHYAddr | regAddr); | |
771 | ||
772 | ql_write_page0_reg(qdev, &port_regs->macMIIMgmtDataReg, value); | |
773 | ||
774 | /* Wait for write to complete. */ | |
775 | if (ql_wait_for_mii_ready(qdev)) { | |
776 | if (netif_msg_link(qdev)) | |
777 | printk(KERN_WARNING PFX | |
778 | "%s: Timed out waiting for management port to " | |
779 | "get free before issuing command.\n", | |
780 | qdev->ndev->name); | |
781 | return -1; | |
782 | } | |
783 | ||
784 | ql_mii_enable_scan_mode(qdev); | |
785 | ||
786 | return 0; | |
787 | } | |
788 | ||
789 | static int ql_mii_read_reg(struct ql3_adapter *qdev, u16 regAddr, u16 *value) | |
790 | { | |
791 | u32 temp; | |
792 | struct ql3xxx_port_registers __iomem *port_regs = | |
793 | qdev->mem_map_registers; | |
794 | ||
795 | ql_mii_disable_scan_mode(qdev); | |
796 | ||
797 | if (ql_wait_for_mii_ready(qdev)) { | |
798 | if (netif_msg_link(qdev)) | |
799 | printk(KERN_WARNING PFX | |
800 | "%s: Timed out waiting for management port to " | |
801 | "get free before issuing command.\n", | |
802 | qdev->ndev->name); | |
803 | return -1; | |
804 | } | |
805 | ||
806 | ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg, | |
807 | qdev->PHYAddr | regAddr); | |
808 | ||
809 | ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg, | |
810 | (MAC_MII_CONTROL_RC << 16)); | |
811 | ||
812 | ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg, | |
813 | (MAC_MII_CONTROL_RC << 16) | MAC_MII_CONTROL_RC); | |
814 | ||
815 | /* Wait for the read to complete */ | |
816 | if (ql_wait_for_mii_ready(qdev)) { | |
817 | if (netif_msg_link(qdev)) | |
818 | printk(KERN_WARNING PFX | |
819 | "%s: Timed out waiting for management port to " | |
820 | "get free before issuing command.\n", | |
821 | qdev->ndev->name); | |
822 | return -1; | |
823 | } | |
824 | ||
825 | temp = ql_read_page0_reg(qdev, &port_regs->macMIIMgmtDataReg); | |
826 | *value = (u16) temp; | |
827 | ||
828 | ql_mii_enable_scan_mode(qdev); | |
829 | ||
830 | return 0; | |
831 | } | |
832 | ||
833 | static void ql_petbi_reset(struct ql3_adapter *qdev) | |
834 | { | |
835 | ql_mii_write_reg(qdev, PETBI_CONTROL_REG, PETBI_CTRL_SOFT_RESET); | |
836 | } | |
837 | ||
838 | static void ql_petbi_start_neg(struct ql3_adapter *qdev) | |
839 | { | |
840 | u16 reg; | |
841 | ||
842 | /* Enable Auto-negotiation sense */ | |
843 | ql_mii_read_reg(qdev, PETBI_TBI_CTRL, ®); | |
844 | reg |= PETBI_TBI_AUTO_SENSE; | |
845 | ql_mii_write_reg(qdev, PETBI_TBI_CTRL, reg); | |
846 | ||
847 | ql_mii_write_reg(qdev, PETBI_NEG_ADVER, | |
848 | PETBI_NEG_PAUSE | PETBI_NEG_DUPLEX); | |
849 | ||
850 | ql_mii_write_reg(qdev, PETBI_CONTROL_REG, | |
851 | PETBI_CTRL_AUTO_NEG | PETBI_CTRL_RESTART_NEG | | |
852 | PETBI_CTRL_FULL_DUPLEX | PETBI_CTRL_SPEED_1000); | |
853 | ||
854 | } | |
855 | ||
3efedf2e | 856 | static void ql_petbi_reset_ex(struct ql3_adapter *qdev) |
5a4faa87 RM |
857 | { |
858 | ql_mii_write_reg_ex(qdev, PETBI_CONTROL_REG, PETBI_CTRL_SOFT_RESET, | |
3efedf2e | 859 | PHYAddr[qdev->mac_index]); |
5a4faa87 RM |
860 | } |
861 | ||
3efedf2e | 862 | static void ql_petbi_start_neg_ex(struct ql3_adapter *qdev) |
5a4faa87 RM |
863 | { |
864 | u16 reg; | |
865 | ||
866 | /* Enable Auto-negotiation sense */ | |
9ddf7774 | 867 | ql_mii_read_reg_ex(qdev, PETBI_TBI_CTRL, ®, |
3efedf2e | 868 | PHYAddr[qdev->mac_index]); |
5a4faa87 | 869 | reg |= PETBI_TBI_AUTO_SENSE; |
9ddf7774 | 870 | ql_mii_write_reg_ex(qdev, PETBI_TBI_CTRL, reg, |
3efedf2e | 871 | PHYAddr[qdev->mac_index]); |
5a4faa87 RM |
872 | |
873 | ql_mii_write_reg_ex(qdev, PETBI_NEG_ADVER, | |
9ddf7774 | 874 | PETBI_NEG_PAUSE | PETBI_NEG_DUPLEX, |
3efedf2e | 875 | PHYAddr[qdev->mac_index]); |
5a4faa87 RM |
876 | |
877 | ql_mii_write_reg_ex(qdev, PETBI_CONTROL_REG, | |
878 | PETBI_CTRL_AUTO_NEG | PETBI_CTRL_RESTART_NEG | | |
879 | PETBI_CTRL_FULL_DUPLEX | PETBI_CTRL_SPEED_1000, | |
3efedf2e | 880 | PHYAddr[qdev->mac_index]); |
5a4faa87 RM |
881 | } |
882 | ||
883 | static void ql_petbi_init(struct ql3_adapter *qdev) | |
884 | { | |
885 | ql_petbi_reset(qdev); | |
886 | ql_petbi_start_neg(qdev); | |
887 | } | |
888 | ||
3efedf2e | 889 | static void ql_petbi_init_ex(struct ql3_adapter *qdev) |
5a4faa87 | 890 | { |
3efedf2e RM |
891 | ql_petbi_reset_ex(qdev); |
892 | ql_petbi_start_neg_ex(qdev); | |
5a4faa87 RM |
893 | } |
894 | ||
895 | static int ql_is_petbi_neg_pause(struct ql3_adapter *qdev) | |
896 | { | |
897 | u16 reg; | |
898 | ||
899 | if (ql_mii_read_reg(qdev, PETBI_NEG_PARTNER, ®) < 0) | |
900 | return 0; | |
901 | ||
902 | return (reg & PETBI_NEG_PAUSE_MASK) == PETBI_NEG_PAUSE; | |
903 | } | |
904 | ||
3efedf2e RM |
905 | static void phyAgereSpecificInit(struct ql3_adapter *qdev, u32 miiAddr) |
906 | { | |
907 | printk(KERN_INFO "%s: enabling Agere specific PHY\n", qdev->ndev->name); | |
908 | /* power down device bit 11 = 1 */ | |
909 | ql_mii_write_reg_ex(qdev, 0x00, 0x1940, miiAddr); | |
910 | /* enable diagnostic mode bit 2 = 1 */ | |
911 | ql_mii_write_reg_ex(qdev, 0x12, 0x840e, miiAddr); | |
912 | /* 1000MB amplitude adjust (see Agere errata) */ | |
913 | ql_mii_write_reg_ex(qdev, 0x10, 0x8805, miiAddr); | |
914 | /* 1000MB amplitude adjust (see Agere errata) */ | |
915 | ql_mii_write_reg_ex(qdev, 0x11, 0xf03e, miiAddr); | |
916 | /* 100MB amplitude adjust (see Agere errata) */ | |
917 | ql_mii_write_reg_ex(qdev, 0x10, 0x8806, miiAddr); | |
918 | /* 100MB amplitude adjust (see Agere errata) */ | |
919 | ql_mii_write_reg_ex(qdev, 0x11, 0x003e, miiAddr); | |
920 | /* 10MB amplitude adjust (see Agere errata) */ | |
921 | ql_mii_write_reg_ex(qdev, 0x10, 0x8807, miiAddr); | |
922 | /* 10MB amplitude adjust (see Agere errata) */ | |
923 | ql_mii_write_reg_ex(qdev, 0x11, 0x1f00, miiAddr); | |
924 | /* point to hidden reg 0x2806 */ | |
925 | ql_mii_write_reg_ex(qdev, 0x10, 0x2806, miiAddr); | |
926 | /* Write new PHYAD w/bit 5 set */ | |
927 | ql_mii_write_reg_ex(qdev, 0x11, 0x0020 | (PHYAddr[qdev->mac_index] >> 8), miiAddr); | |
9ddf7774 | 928 | /* |
3efedf2e RM |
929 | * Disable diagnostic mode bit 2 = 0 |
930 | * Power up device bit 11 = 0 | |
931 | * Link up (on) and activity (blink) | |
932 | */ | |
933 | ql_mii_write_reg(qdev, 0x12, 0x840a); | |
934 | ql_mii_write_reg(qdev, 0x00, 0x1140); | |
935 | ql_mii_write_reg(qdev, 0x1c, 0xfaf0); | |
936 | } | |
937 | ||
9ddf7774 | 938 | static PHY_DEVICE_et getPhyType (struct ql3_adapter *qdev, |
3efedf2e RM |
939 | u16 phyIdReg0, u16 phyIdReg1) |
940 | { | |
941 | PHY_DEVICE_et result = PHY_TYPE_UNKNOWN; | |
9ddf7774 | 942 | u32 oui; |
3efedf2e | 943 | u16 model; |
9ddf7774 | 944 | int i; |
3efedf2e RM |
945 | |
946 | if (phyIdReg0 == 0xffff) { | |
947 | return result; | |
948 | } | |
9ddf7774 | 949 | |
3efedf2e RM |
950 | if (phyIdReg1 == 0xffff) { |
951 | return result; | |
952 | } | |
953 | ||
954 | /* oui is split between two registers */ | |
955 | oui = (phyIdReg0 << 6) | ((phyIdReg1 & PHY_OUI_1_MASK) >> 10); | |
956 | ||
957 | model = (phyIdReg1 & PHY_MODEL_MASK) >> 4; | |
958 | ||
959 | /* Scan table for this PHY */ | |
960 | for(i = 0; i < MAX_PHY_DEV_TYPES; i++) { | |
961 | if ((oui == PHY_DEVICES[i].phyIdOUI) && (model == PHY_DEVICES[i].phyIdModel)) | |
962 | { | |
963 | result = PHY_DEVICES[i].phyDevice; | |
964 | ||
965 | printk(KERN_INFO "%s: Phy: %s\n", | |
966 | qdev->ndev->name, PHY_DEVICES[i].name); | |
9ddf7774 | 967 | |
3efedf2e RM |
968 | break; |
969 | } | |
970 | } | |
971 | ||
972 | return result; | |
973 | } | |
974 | ||
5a4faa87 RM |
975 | static int ql_phy_get_speed(struct ql3_adapter *qdev) |
976 | { | |
977 | u16 reg; | |
978 | ||
3efedf2e RM |
979 | switch(qdev->phyType) { |
980 | case PHY_AGERE_ET1011C: | |
981 | { | |
982 | if (ql_mii_read_reg(qdev, 0x1A, ®) < 0) | |
983 | return 0; | |
984 | ||
985 | reg = (reg >> 8) & 3; | |
986 | break; | |
987 | } | |
988 | default: | |
5a4faa87 RM |
989 | if (ql_mii_read_reg(qdev, AUX_CONTROL_STATUS, ®) < 0) |
990 | return 0; | |
991 | ||
992 | reg = (((reg & 0x18) >> 3) & 3); | |
3efedf2e | 993 | } |
5a4faa87 | 994 | |
3efedf2e RM |
995 | switch(reg) { |
996 | case 2: | |
5a4faa87 | 997 | return SPEED_1000; |
3efedf2e | 998 | case 1: |
5a4faa87 | 999 | return SPEED_100; |
3efedf2e | 1000 | case 0: |
5a4faa87 | 1001 | return SPEED_10; |
3efedf2e | 1002 | default: |
5a4faa87 | 1003 | return -1; |
3efedf2e | 1004 | } |
5a4faa87 RM |
1005 | } |
1006 | ||
1007 | static int ql_is_full_dup(struct ql3_adapter *qdev) | |
1008 | { | |
1009 | u16 reg; | |
1010 | ||
3efedf2e RM |
1011 | switch(qdev->phyType) { |
1012 | case PHY_AGERE_ET1011C: | |
1013 | { | |
1014 | if (ql_mii_read_reg(qdev, 0x1A, ®)) | |
1015 | return 0; | |
9ddf7774 | 1016 | |
3efedf2e RM |
1017 | return ((reg & 0x0080) && (reg & 0x1000)) != 0; |
1018 | } | |
1019 | case PHY_VITESSE_VSC8211: | |
1020 | default: | |
1021 | { | |
1022 | if (ql_mii_read_reg(qdev, AUX_CONTROL_STATUS, ®) < 0) | |
1023 | return 0; | |
1024 | return (reg & PHY_AUX_DUPLEX_STAT) != 0; | |
1025 | } | |
1026 | } | |
5a4faa87 RM |
1027 | } |
1028 | ||
1029 | static int ql_is_phy_neg_pause(struct ql3_adapter *qdev) | |
1030 | { | |
1031 | u16 reg; | |
1032 | ||
1033 | if (ql_mii_read_reg(qdev, PHY_NEG_PARTNER, ®) < 0) | |
1034 | return 0; | |
1035 | ||
1036 | return (reg & PHY_NEG_PAUSE) != 0; | |
1037 | } | |
1038 | ||
3efedf2e RM |
1039 | static int PHY_Setup(struct ql3_adapter *qdev) |
1040 | { | |
1041 | u16 reg1; | |
1042 | u16 reg2; | |
1043 | bool agereAddrChangeNeeded = false; | |
1044 | u32 miiAddr = 0; | |
1045 | int err; | |
1046 | ||
1047 | /* Determine the PHY we are using by reading the ID's */ | |
1048 | err = ql_mii_read_reg(qdev, PHY_ID_0_REG, ®1); | |
1049 | if(err != 0) { | |
1050 | printk(KERN_ERR "%s: Could not read from reg PHY_ID_0_REG\n", | |
1051 | qdev->ndev->name); | |
1052 | return err; | |
1053 | } | |
1054 | ||
1055 | err = ql_mii_read_reg(qdev, PHY_ID_1_REG, ®2); | |
1056 | if(err != 0) { | |
1057 | printk(KERN_ERR "%s: Could not read from reg PHY_ID_0_REG\n", | |
1058 | qdev->ndev->name); | |
1059 | return err; | |
1060 | } | |
1061 | ||
1062 | /* Check if we have a Agere PHY */ | |
1063 | if ((reg1 == 0xffff) || (reg2 == 0xffff)) { | |
1064 | ||
9ddf7774 | 1065 | /* Determine which MII address we should be using |
3efedf2e RM |
1066 | determined by the index of the card */ |
1067 | if (qdev->mac_index == 0) { | |
1068 | miiAddr = MII_AGERE_ADDR_1; | |
1069 | } else { | |
1070 | miiAddr = MII_AGERE_ADDR_2; | |
1071 | } | |
9ddf7774 | 1072 | |
3efedf2e RM |
1073 | err =ql_mii_read_reg_ex(qdev, PHY_ID_0_REG, ®1, miiAddr); |
1074 | if(err != 0) { | |
1075 | printk(KERN_ERR "%s: Could not read from reg PHY_ID_0_REG after Agere detected\n", | |
1076 | qdev->ndev->name); | |
9ddf7774 | 1077 | return err; |
3efedf2e RM |
1078 | } |
1079 | ||
1080 | err = ql_mii_read_reg_ex(qdev, PHY_ID_1_REG, ®2, miiAddr); | |
1081 | if(err != 0) { | |
1082 | printk(KERN_ERR "%s: Could not read from reg PHY_ID_0_REG after Agere detected\n", | |
1083 | qdev->ndev->name); | |
1084 | return err; | |
1085 | } | |
9ddf7774 | 1086 | |
3efedf2e | 1087 | /* We need to remember to initialize the Agere PHY */ |
9ddf7774 | 1088 | agereAddrChangeNeeded = true; |
3efedf2e RM |
1089 | } |
1090 | ||
1091 | /* Determine the particular PHY we have on board to apply | |
1092 | PHY specific initializations */ | |
1093 | qdev->phyType = getPhyType(qdev, reg1, reg2); | |
1094 | ||
1095 | if ((qdev->phyType == PHY_AGERE_ET1011C) && agereAddrChangeNeeded) { | |
1096 | /* need this here so address gets changed */ | |
9ddf7774 | 1097 | phyAgereSpecificInit(qdev, miiAddr); |
3efedf2e RM |
1098 | } else if (qdev->phyType == PHY_TYPE_UNKNOWN) { |
1099 | printk(KERN_ERR "%s: PHY is unknown\n", qdev->ndev->name); | |
1100 | return -EIO; | |
1101 | } | |
1102 | ||
1103 | return 0; | |
1104 | } | |
1105 | ||
5a4faa87 RM |
1106 | /* |
1107 | * Caller holds hw_lock. | |
1108 | */ | |
1109 | static void ql_mac_enable(struct ql3_adapter *qdev, u32 enable) | |
1110 | { | |
1111 | struct ql3xxx_port_registers __iomem *port_regs = | |
1112 | qdev->mem_map_registers; | |
1113 | u32 value; | |
1114 | ||
1115 | if (enable) | |
1116 | value = (MAC_CONFIG_REG_PE | (MAC_CONFIG_REG_PE << 16)); | |
1117 | else | |
1118 | value = (MAC_CONFIG_REG_PE << 16); | |
1119 | ||
1120 | if (qdev->mac_index) | |
1121 | ql_write_page0_reg(qdev, &port_regs->mac1ConfigReg, value); | |
1122 | else | |
1123 | ql_write_page0_reg(qdev, &port_regs->mac0ConfigReg, value); | |
1124 | } | |
1125 | ||
1126 | /* | |
1127 | * Caller holds hw_lock. | |
1128 | */ | |
1129 | static void ql_mac_cfg_soft_reset(struct ql3_adapter *qdev, u32 enable) | |
1130 | { | |
1131 | struct ql3xxx_port_registers __iomem *port_regs = | |
1132 | qdev->mem_map_registers; | |
1133 | u32 value; | |
1134 | ||
1135 | if (enable) | |
1136 | value = (MAC_CONFIG_REG_SR | (MAC_CONFIG_REG_SR << 16)); | |
1137 | else | |
1138 | value = (MAC_CONFIG_REG_SR << 16); | |
1139 | ||
1140 | if (qdev->mac_index) | |
1141 | ql_write_page0_reg(qdev, &port_regs->mac1ConfigReg, value); | |
1142 | else | |
1143 | ql_write_page0_reg(qdev, &port_regs->mac0ConfigReg, value); | |
1144 | } | |
1145 | ||
1146 | /* | |
1147 | * Caller holds hw_lock. | |
1148 | */ | |
1149 | static void ql_mac_cfg_gig(struct ql3_adapter *qdev, u32 enable) | |
1150 | { | |
1151 | struct ql3xxx_port_registers __iomem *port_regs = | |
1152 | qdev->mem_map_registers; | |
1153 | u32 value; | |
1154 | ||
1155 | if (enable) | |
1156 | value = (MAC_CONFIG_REG_GM | (MAC_CONFIG_REG_GM << 16)); | |
1157 | else | |
1158 | value = (MAC_CONFIG_REG_GM << 16); | |
1159 | ||
1160 | if (qdev->mac_index) | |
1161 | ql_write_page0_reg(qdev, &port_regs->mac1ConfigReg, value); | |
1162 | else | |
1163 | ql_write_page0_reg(qdev, &port_regs->mac0ConfigReg, value); | |
1164 | } | |
1165 | ||
1166 | /* | |
1167 | * Caller holds hw_lock. | |
1168 | */ | |
1169 | static void ql_mac_cfg_full_dup(struct ql3_adapter *qdev, u32 enable) | |
1170 | { | |
1171 | struct ql3xxx_port_registers __iomem *port_regs = | |
1172 | qdev->mem_map_registers; | |
1173 | u32 value; | |
1174 | ||
1175 | if (enable) | |
1176 | value = (MAC_CONFIG_REG_FD | (MAC_CONFIG_REG_FD << 16)); | |
1177 | else | |
1178 | value = (MAC_CONFIG_REG_FD << 16); | |
1179 | ||
1180 | if (qdev->mac_index) | |
1181 | ql_write_page0_reg(qdev, &port_regs->mac1ConfigReg, value); | |
1182 | else | |
1183 | ql_write_page0_reg(qdev, &port_regs->mac0ConfigReg, value); | |
1184 | } | |
1185 | ||
1186 | /* | |
1187 | * Caller holds hw_lock. | |
1188 | */ | |
1189 | static void ql_mac_cfg_pause(struct ql3_adapter *qdev, u32 enable) | |
1190 | { | |
1191 | struct ql3xxx_port_registers __iomem *port_regs = | |
1192 | qdev->mem_map_registers; | |
1193 | u32 value; | |
1194 | ||
1195 | if (enable) | |
1196 | value = | |
1197 | ((MAC_CONFIG_REG_TF | MAC_CONFIG_REG_RF) | | |
1198 | ((MAC_CONFIG_REG_TF | MAC_CONFIG_REG_RF) << 16)); | |
1199 | else | |
1200 | value = ((MAC_CONFIG_REG_TF | MAC_CONFIG_REG_RF) << 16); | |
1201 | ||
1202 | if (qdev->mac_index) | |
1203 | ql_write_page0_reg(qdev, &port_regs->mac1ConfigReg, value); | |
1204 | else | |
1205 | ql_write_page0_reg(qdev, &port_regs->mac0ConfigReg, value); | |
1206 | } | |
1207 | ||
1208 | /* | |
1209 | * Caller holds hw_lock. | |
1210 | */ | |
1211 | static int ql_is_fiber(struct ql3_adapter *qdev) | |
1212 | { | |
1213 | struct ql3xxx_port_registers __iomem *port_regs = | |
1214 | qdev->mem_map_registers; | |
1215 | u32 bitToCheck = 0; | |
1216 | u32 temp; | |
1217 | ||
1218 | switch (qdev->mac_index) { | |
1219 | case 0: | |
1220 | bitToCheck = PORT_STATUS_SM0; | |
1221 | break; | |
1222 | case 1: | |
1223 | bitToCheck = PORT_STATUS_SM1; | |
1224 | break; | |
1225 | } | |
1226 | ||
1227 | temp = ql_read_page0_reg(qdev, &port_regs->portStatus); | |
1228 | return (temp & bitToCheck) != 0; | |
1229 | } | |
1230 | ||
1231 | static int ql_is_auto_cfg(struct ql3_adapter *qdev) | |
1232 | { | |
1233 | u16 reg; | |
1234 | ql_mii_read_reg(qdev, 0x00, ®); | |
1235 | return (reg & 0x1000) != 0; | |
1236 | } | |
1237 | ||
1238 | /* | |
1239 | * Caller holds hw_lock. | |
1240 | */ | |
1241 | static int ql_is_auto_neg_complete(struct ql3_adapter *qdev) | |
1242 | { | |
1243 | struct ql3xxx_port_registers __iomem *port_regs = | |
1244 | qdev->mem_map_registers; | |
1245 | u32 bitToCheck = 0; | |
1246 | u32 temp; | |
1247 | ||
1248 | switch (qdev->mac_index) { | |
1249 | case 0: | |
1250 | bitToCheck = PORT_STATUS_AC0; | |
1251 | break; | |
1252 | case 1: | |
1253 | bitToCheck = PORT_STATUS_AC1; | |
1254 | break; | |
1255 | } | |
1256 | ||
1257 | temp = ql_read_page0_reg(qdev, &port_regs->portStatus); | |
1258 | if (temp & bitToCheck) { | |
1259 | if (netif_msg_link(qdev)) | |
1260 | printk(KERN_INFO PFX | |
1261 | "%s: Auto-Negotiate complete.\n", | |
1262 | qdev->ndev->name); | |
1263 | return 1; | |
1264 | } else { | |
1265 | if (netif_msg_link(qdev)) | |
1266 | printk(KERN_WARNING PFX | |
1267 | "%s: Auto-Negotiate incomplete.\n", | |
1268 | qdev->ndev->name); | |
1269 | return 0; | |
1270 | } | |
1271 | } | |
1272 | ||
1273 | /* | |
1274 | * ql_is_neg_pause() returns 1 if pause was negotiated to be on | |
1275 | */ | |
1276 | static int ql_is_neg_pause(struct ql3_adapter *qdev) | |
1277 | { | |
1278 | if (ql_is_fiber(qdev)) | |
1279 | return ql_is_petbi_neg_pause(qdev); | |
1280 | else | |
1281 | return ql_is_phy_neg_pause(qdev); | |
1282 | } | |
1283 | ||
1284 | static int ql_auto_neg_error(struct ql3_adapter *qdev) | |
1285 | { | |
1286 | struct ql3xxx_port_registers __iomem *port_regs = | |
1287 | qdev->mem_map_registers; | |
1288 | u32 bitToCheck = 0; | |
1289 | u32 temp; | |
1290 | ||
1291 | switch (qdev->mac_index) { | |
1292 | case 0: | |
1293 | bitToCheck = PORT_STATUS_AE0; | |
1294 | break; | |
1295 | case 1: | |
1296 | bitToCheck = PORT_STATUS_AE1; | |
1297 | break; | |
1298 | } | |
1299 | temp = ql_read_page0_reg(qdev, &port_regs->portStatus); | |
1300 | return (temp & bitToCheck) != 0; | |
1301 | } | |
1302 | ||
1303 | static u32 ql_get_link_speed(struct ql3_adapter *qdev) | |
1304 | { | |
1305 | if (ql_is_fiber(qdev)) | |
1306 | return SPEED_1000; | |
1307 | else | |
1308 | return ql_phy_get_speed(qdev); | |
1309 | } | |
1310 | ||
1311 | static int ql_is_link_full_dup(struct ql3_adapter *qdev) | |
1312 | { | |
1313 | if (ql_is_fiber(qdev)) | |
1314 | return 1; | |
1315 | else | |
1316 | return ql_is_full_dup(qdev); | |
1317 | } | |
1318 | ||
1319 | /* | |
1320 | * Caller holds hw_lock. | |
1321 | */ | |
1322 | static int ql_link_down_detect(struct ql3_adapter *qdev) | |
1323 | { | |
1324 | struct ql3xxx_port_registers __iomem *port_regs = | |
1325 | qdev->mem_map_registers; | |
1326 | u32 bitToCheck = 0; | |
1327 | u32 temp; | |
1328 | ||
1329 | switch (qdev->mac_index) { | |
1330 | case 0: | |
1331 | bitToCheck = ISP_CONTROL_LINK_DN_0; | |
1332 | break; | |
1333 | case 1: | |
1334 | bitToCheck = ISP_CONTROL_LINK_DN_1; | |
1335 | break; | |
1336 | } | |
1337 | ||
1338 | temp = | |
1339 | ql_read_common_reg(qdev, &port_regs->CommonRegs.ispControlStatus); | |
1340 | return (temp & bitToCheck) != 0; | |
1341 | } | |
1342 | ||
1343 | /* | |
1344 | * Caller holds hw_lock. | |
1345 | */ | |
1346 | static int ql_link_down_detect_clear(struct ql3_adapter *qdev) | |
1347 | { | |
1348 | struct ql3xxx_port_registers __iomem *port_regs = | |
1349 | qdev->mem_map_registers; | |
1350 | ||
1351 | switch (qdev->mac_index) { | |
1352 | case 0: | |
1353 | ql_write_common_reg(qdev, | |
1354 | &port_regs->CommonRegs.ispControlStatus, | |
1355 | (ISP_CONTROL_LINK_DN_0) | | |
1356 | (ISP_CONTROL_LINK_DN_0 << 16)); | |
1357 | break; | |
1358 | ||
1359 | case 1: | |
1360 | ql_write_common_reg(qdev, | |
1361 | &port_regs->CommonRegs.ispControlStatus, | |
1362 | (ISP_CONTROL_LINK_DN_1) | | |
1363 | (ISP_CONTROL_LINK_DN_1 << 16)); | |
1364 | break; | |
1365 | ||
1366 | default: | |
1367 | return 1; | |
1368 | } | |
1369 | ||
1370 | return 0; | |
1371 | } | |
1372 | ||
1373 | /* | |
1374 | * Caller holds hw_lock. | |
1375 | */ | |
3efedf2e | 1376 | static int ql_this_adapter_controls_port(struct ql3_adapter *qdev) |
5a4faa87 RM |
1377 | { |
1378 | struct ql3xxx_port_registers __iomem *port_regs = | |
1379 | qdev->mem_map_registers; | |
1380 | u32 bitToCheck = 0; | |
1381 | u32 temp; | |
1382 | ||
3efedf2e | 1383 | switch (qdev->mac_index) { |
5a4faa87 RM |
1384 | case 0: |
1385 | bitToCheck = PORT_STATUS_F1_ENABLED; | |
1386 | break; | |
1387 | case 1: | |
1388 | bitToCheck = PORT_STATUS_F3_ENABLED; | |
1389 | break; | |
1390 | default: | |
1391 | break; | |
1392 | } | |
1393 | ||
1394 | temp = ql_read_page0_reg(qdev, &port_regs->portStatus); | |
1395 | if (temp & bitToCheck) { | |
1396 | if (netif_msg_link(qdev)) | |
1397 | printk(KERN_DEBUG PFX | |
1398 | "%s: is not link master.\n", qdev->ndev->name); | |
1399 | return 0; | |
1400 | } else { | |
1401 | if (netif_msg_link(qdev)) | |
1402 | printk(KERN_DEBUG PFX | |
1403 | "%s: is link master.\n", qdev->ndev->name); | |
1404 | return 1; | |
1405 | } | |
1406 | } | |
1407 | ||
3efedf2e | 1408 | static void ql_phy_reset_ex(struct ql3_adapter *qdev) |
5a4faa87 | 1409 | { |
9ddf7774 | 1410 | ql_mii_write_reg_ex(qdev, CONTROL_REG, PHY_CTRL_SOFT_RESET, |
3efedf2e | 1411 | PHYAddr[qdev->mac_index]); |
5a4faa87 RM |
1412 | } |
1413 | ||
3efedf2e | 1414 | static void ql_phy_start_neg_ex(struct ql3_adapter *qdev) |
5a4faa87 RM |
1415 | { |
1416 | u16 reg; | |
3efedf2e RM |
1417 | u16 portConfiguration; |
1418 | ||
1419 | if(qdev->phyType == PHY_AGERE_ET1011C) { | |
1420 | /* turn off external loopback */ | |
9ddf7774 | 1421 | ql_mii_write_reg(qdev, 0x13, 0x0000); |
3efedf2e | 1422 | } |
5a4faa87 | 1423 | |
3efedf2e RM |
1424 | if(qdev->mac_index == 0) |
1425 | portConfiguration = qdev->nvram_data.macCfg_port0.portConfiguration; | |
1426 | else | |
1427 | portConfiguration = qdev->nvram_data.macCfg_port1.portConfiguration; | |
1428 | ||
1429 | /* Some HBA's in the field are set to 0 and they need to | |
1430 | be reinterpreted with a default value */ | |
1431 | if(portConfiguration == 0) | |
1432 | portConfiguration = PORT_CONFIG_DEFAULT; | |
1433 | ||
1434 | /* Set the 1000 advertisements */ | |
9ddf7774 | 1435 | ql_mii_read_reg_ex(qdev, PHY_GIG_CONTROL, ®, |
3efedf2e RM |
1436 | PHYAddr[qdev->mac_index]); |
1437 | reg &= ~PHY_GIG_ALL_PARAMS; | |
1438 | ||
ad4c9a09 | 1439 | if(portConfiguration & PORT_CONFIG_1000MB_SPEED) { |
7d2e3cb7 | 1440 | if(portConfiguration & PORT_CONFIG_FULL_DUPLEX_ENABLED) |
ad4c9a09 | 1441 | reg |= PHY_GIG_ADV_1000F; |
7d2e3cb7 | 1442 | else |
ad4c9a09 | 1443 | reg |= PHY_GIG_ADV_1000H; |
3efedf2e RM |
1444 | } |
1445 | ||
9ddf7774 | 1446 | ql_mii_write_reg_ex(qdev, PHY_GIG_CONTROL, reg, |
3efedf2e RM |
1447 | PHYAddr[qdev->mac_index]); |
1448 | ||
1449 | /* Set the 10/100 & pause negotiation advertisements */ | |
1450 | ql_mii_read_reg_ex(qdev, PHY_NEG_ADVER, ®, | |
1451 | PHYAddr[qdev->mac_index]); | |
1452 | reg &= ~PHY_NEG_ALL_PARAMS; | |
1453 | ||
1454 | if(portConfiguration & PORT_CONFIG_SYM_PAUSE_ENABLED) | |
1455 | reg |= PHY_NEG_ASY_PAUSE | PHY_NEG_SYM_PAUSE; | |
1456 | ||
1457 | if(portConfiguration & PORT_CONFIG_FULL_DUPLEX_ENABLED) { | |
1458 | if(portConfiguration & PORT_CONFIG_100MB_SPEED) | |
1459 | reg |= PHY_NEG_ADV_100F; | |
9ddf7774 | 1460 | |
3efedf2e RM |
1461 | if(portConfiguration & PORT_CONFIG_10MB_SPEED) |
1462 | reg |= PHY_NEG_ADV_10F; | |
1463 | } | |
1464 | ||
1465 | if(portConfiguration & PORT_CONFIG_HALF_DUPLEX_ENABLED) { | |
1466 | if(portConfiguration & PORT_CONFIG_100MB_SPEED) | |
1467 | reg |= PHY_NEG_ADV_100H; | |
9ddf7774 | 1468 | |
3efedf2e RM |
1469 | if(portConfiguration & PORT_CONFIG_10MB_SPEED) |
1470 | reg |= PHY_NEG_ADV_10H; | |
1471 | } | |
1472 | ||
1473 | if(portConfiguration & | |
1474 | PORT_CONFIG_1000MB_SPEED) { | |
9ddf7774 | 1475 | reg |= 1; |
3efedf2e RM |
1476 | } |
1477 | ||
9ddf7774 | 1478 | ql_mii_write_reg_ex(qdev, PHY_NEG_ADVER, reg, |
3efedf2e | 1479 | PHYAddr[qdev->mac_index]); |
5a4faa87 | 1480 | |
3efedf2e | 1481 | ql_mii_read_reg_ex(qdev, CONTROL_REG, ®, PHYAddr[qdev->mac_index]); |
9ddf7774 JG |
1482 | |
1483 | ql_mii_write_reg_ex(qdev, CONTROL_REG, | |
3efedf2e RM |
1484 | reg | PHY_CTRL_RESTART_NEG | PHY_CTRL_AUTO_NEG, |
1485 | PHYAddr[qdev->mac_index]); | |
5a4faa87 RM |
1486 | } |
1487 | ||
3efedf2e | 1488 | static void ql_phy_init_ex(struct ql3_adapter *qdev) |
5a4faa87 | 1489 | { |
3efedf2e RM |
1490 | ql_phy_reset_ex(qdev); |
1491 | PHY_Setup(qdev); | |
1492 | ql_phy_start_neg_ex(qdev); | |
5a4faa87 RM |
1493 | } |
1494 | ||
1495 | /* | |
1496 | * Caller holds hw_lock. | |
1497 | */ | |
1498 | static u32 ql_get_link_state(struct ql3_adapter *qdev) | |
1499 | { | |
1500 | struct ql3xxx_port_registers __iomem *port_regs = | |
1501 | qdev->mem_map_registers; | |
1502 | u32 bitToCheck = 0; | |
1503 | u32 temp, linkState; | |
1504 | ||
1505 | switch (qdev->mac_index) { | |
1506 | case 0: | |
1507 | bitToCheck = PORT_STATUS_UP0; | |
1508 | break; | |
1509 | case 1: | |
1510 | bitToCheck = PORT_STATUS_UP1; | |
1511 | break; | |
1512 | } | |
1513 | temp = ql_read_page0_reg(qdev, &port_regs->portStatus); | |
1514 | if (temp & bitToCheck) { | |
1515 | linkState = LS_UP; | |
1516 | } else { | |
1517 | linkState = LS_DOWN; | |
5a4faa87 RM |
1518 | } |
1519 | return linkState; | |
1520 | } | |
1521 | ||
1522 | static int ql_port_start(struct ql3_adapter *qdev) | |
1523 | { | |
1524 | if(ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK, | |
1525 | (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) * | |
3efedf2e RM |
1526 | 2) << 7)) { |
1527 | printk(KERN_ERR "%s: Could not get hw lock for GIO\n", | |
1528 | qdev->ndev->name); | |
5a4faa87 | 1529 | return -1; |
3efedf2e | 1530 | } |
5a4faa87 RM |
1531 | |
1532 | if (ql_is_fiber(qdev)) { | |
1533 | ql_petbi_init(qdev); | |
1534 | } else { | |
1535 | /* Copper port */ | |
3efedf2e | 1536 | ql_phy_init_ex(qdev); |
5a4faa87 RM |
1537 | } |
1538 | ||
1539 | ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK); | |
1540 | return 0; | |
1541 | } | |
1542 | ||
1543 | static int ql_finish_auto_neg(struct ql3_adapter *qdev) | |
1544 | { | |
1545 | ||
1546 | if(ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK, | |
1547 | (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) * | |
1548 | 2) << 7)) | |
1549 | return -1; | |
1550 | ||
1551 | if (!ql_auto_neg_error(qdev)) { | |
1552 | if (test_bit(QL_LINK_MASTER,&qdev->flags)) { | |
1553 | /* configure the MAC */ | |
1554 | if (netif_msg_link(qdev)) | |
1555 | printk(KERN_DEBUG PFX | |
1556 | "%s: Configuring link.\n", | |
1557 | qdev->ndev-> | |
1558 | name); | |
1559 | ql_mac_cfg_soft_reset(qdev, 1); | |
1560 | ql_mac_cfg_gig(qdev, | |
1561 | (ql_get_link_speed | |
1562 | (qdev) == | |
1563 | SPEED_1000)); | |
1564 | ql_mac_cfg_full_dup(qdev, | |
1565 | ql_is_link_full_dup | |
1566 | (qdev)); | |
1567 | ql_mac_cfg_pause(qdev, | |
1568 | ql_is_neg_pause | |
1569 | (qdev)); | |
1570 | ql_mac_cfg_soft_reset(qdev, 0); | |
1571 | ||
1572 | /* enable the MAC */ | |
1573 | if (netif_msg_link(qdev)) | |
1574 | printk(KERN_DEBUG PFX | |
1575 | "%s: Enabling mac.\n", | |
1576 | qdev->ndev-> | |
1577 | name); | |
1578 | ql_mac_enable(qdev, 1); | |
1579 | } | |
1580 | ||
5a4faa87 RM |
1581 | qdev->port_link_state = LS_UP; |
1582 | netif_start_queue(qdev->ndev); | |
1583 | netif_carrier_on(qdev->ndev); | |
1584 | if (netif_msg_link(qdev)) | |
1585 | printk(KERN_INFO PFX | |
1586 | "%s: Link is up at %d Mbps, %s duplex.\n", | |
1587 | qdev->ndev->name, | |
1588 | ql_get_link_speed(qdev), | |
1589 | ql_is_link_full_dup(qdev) | |
1590 | ? "full" : "half"); | |
1591 | ||
1592 | } else { /* Remote error detected */ | |
1593 | ||
1594 | if (test_bit(QL_LINK_MASTER,&qdev->flags)) { | |
1595 | if (netif_msg_link(qdev)) | |
1596 | printk(KERN_DEBUG PFX | |
1597 | "%s: Remote error detected. " | |
1598 | "Calling ql_port_start().\n", | |
1599 | qdev->ndev-> | |
1600 | name); | |
1601 | /* | |
1602 | * ql_port_start() is shared code and needs | |
1603 | * to lock the PHY on it's own. | |
1604 | */ | |
1605 | ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK); | |
1606 | if(ql_port_start(qdev)) {/* Restart port */ | |
1607 | return -1; | |
1608 | } else | |
1609 | return 0; | |
1610 | } | |
1611 | } | |
1612 | ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK); | |
1613 | return 0; | |
1614 | } | |
1615 | ||
3e23b7d3 | 1616 | static void ql_link_state_machine_work(struct work_struct *work) |
5a4faa87 | 1617 | { |
3e23b7d3 RM |
1618 | struct ql3_adapter *qdev = |
1619 | container_of(work, struct ql3_adapter, link_state_work.work); | |
1620 | ||
5a4faa87 RM |
1621 | u32 curr_link_state; |
1622 | unsigned long hw_flags; | |
1623 | ||
1624 | spin_lock_irqsave(&qdev->hw_lock, hw_flags); | |
1625 | ||
1626 | curr_link_state = ql_get_link_state(qdev); | |
1627 | ||
1628 | if (test_bit(QL_RESET_ACTIVE,&qdev->flags)) { | |
1629 | if (netif_msg_link(qdev)) | |
1630 | printk(KERN_INFO PFX | |
1631 | "%s: Reset in progress, skip processing link " | |
1632 | "state.\n", qdev->ndev->name); | |
04f10773 | 1633 | |
9ddf7774 | 1634 | spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); |
3e23b7d3 RM |
1635 | |
1636 | /* Restart timer on 2 second interval. */ | |
1637 | mod_timer(&qdev->adapter_timer, jiffies + HZ * 1);\ | |
1638 | ||
5a4faa87 RM |
1639 | return; |
1640 | } | |
1641 | ||
1642 | switch (qdev->port_link_state) { | |
1643 | default: | |
1644 | if (test_bit(QL_LINK_MASTER,&qdev->flags)) { | |
1645 | ql_port_start(qdev); | |
1646 | } | |
1647 | qdev->port_link_state = LS_DOWN; | |
1648 | /* Fall Through */ | |
1649 | ||
1650 | case LS_DOWN: | |
5a4faa87 RM |
1651 | if (curr_link_state == LS_UP) { |
1652 | if (netif_msg_link(qdev)) | |
0f807044 | 1653 | printk(KERN_INFO PFX "%s: Link is up.\n", |
5a4faa87 RM |
1654 | qdev->ndev->name); |
1655 | if (ql_is_auto_neg_complete(qdev)) | |
1656 | ql_finish_auto_neg(qdev); | |
1657 | ||
1658 | if (qdev->port_link_state == LS_UP) | |
1659 | ql_link_down_detect_clear(qdev); | |
1660 | ||
0f807044 | 1661 | qdev->port_link_state = LS_UP; |
5a4faa87 RM |
1662 | } |
1663 | break; | |
1664 | ||
1665 | case LS_UP: | |
1666 | /* | |
1667 | * See if the link is currently down or went down and came | |
1668 | * back up | |
1669 | */ | |
0f807044 | 1670 | if (curr_link_state == LS_DOWN) { |
5a4faa87 RM |
1671 | if (netif_msg_link(qdev)) |
1672 | printk(KERN_INFO PFX "%s: Link is down.\n", | |
1673 | qdev->ndev->name); | |
1674 | qdev->port_link_state = LS_DOWN; | |
1675 | } | |
0f807044 RM |
1676 | if (ql_link_down_detect(qdev)) |
1677 | qdev->port_link_state = LS_DOWN; | |
5a4faa87 RM |
1678 | break; |
1679 | } | |
1680 | spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); | |
3e23b7d3 RM |
1681 | |
1682 | /* Restart timer on 2 second interval. */ | |
1683 | mod_timer(&qdev->adapter_timer, jiffies + HZ * 1); | |
5a4faa87 RM |
1684 | } |
1685 | ||
1686 | /* | |
1687 | * Caller must take hw_lock and QL_PHY_GIO_SEM. | |
1688 | */ | |
1689 | static void ql_get_phy_owner(struct ql3_adapter *qdev) | |
1690 | { | |
3efedf2e | 1691 | if (ql_this_adapter_controls_port(qdev)) |
5a4faa87 RM |
1692 | set_bit(QL_LINK_MASTER,&qdev->flags); |
1693 | else | |
1694 | clear_bit(QL_LINK_MASTER,&qdev->flags); | |
1695 | } | |
1696 | ||
1697 | /* | |
1698 | * Caller must take hw_lock and QL_PHY_GIO_SEM. | |
1699 | */ | |
1700 | static void ql_init_scan_mode(struct ql3_adapter *qdev) | |
1701 | { | |
1702 | ql_mii_enable_scan_mode(qdev); | |
1703 | ||
1704 | if (test_bit(QL_LINK_OPTICAL,&qdev->flags)) { | |
3efedf2e RM |
1705 | if (ql_this_adapter_controls_port(qdev)) |
1706 | ql_petbi_init_ex(qdev); | |
5a4faa87 | 1707 | } else { |
3efedf2e RM |
1708 | if (ql_this_adapter_controls_port(qdev)) |
1709 | ql_phy_init_ex(qdev); | |
5a4faa87 RM |
1710 | } |
1711 | } | |
1712 | ||
1713 | /* | |
1714 | * MII_Setup needs to be called before taking the PHY out of reset so that the | |
1715 | * management interface clock speed can be set properly. It would be better if | |
1716 | * we had a way to disable MDC until after the PHY is out of reset, but we | |
1717 | * don't have that capability. | |
1718 | */ | |
1719 | static int ql_mii_setup(struct ql3_adapter *qdev) | |
1720 | { | |
1721 | u32 reg; | |
1722 | struct ql3xxx_port_registers __iomem *port_regs = | |
1723 | qdev->mem_map_registers; | |
1724 | ||
1725 | if(ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK, | |
1726 | (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) * | |
1727 | 2) << 7)) | |
1728 | return -1; | |
1729 | ||
bd36b0ac | 1730 | if (qdev->device_id == QL3032_DEVICE_ID) |
9ddf7774 | 1731 | ql_write_page0_reg(qdev, |
bd36b0ac RM |
1732 | &port_regs->macMIIMgmtControlReg, 0x0f00000); |
1733 | ||
5a4faa87 RM |
1734 | /* Divide 125MHz clock by 28 to meet PHY timing requirements */ |
1735 | reg = MAC_MII_CONTROL_CLK_SEL_DIV28; | |
1736 | ||
1737 | ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg, | |
1738 | reg | ((MAC_MII_CONTROL_CLK_SEL_MASK) << 16)); | |
1739 | ||
1740 | ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK); | |
1741 | return 0; | |
1742 | } | |
1743 | ||
1744 | static u32 ql_supported_modes(struct ql3_adapter *qdev) | |
1745 | { | |
1746 | u32 supported; | |
1747 | ||
1748 | if (test_bit(QL_LINK_OPTICAL,&qdev->flags)) { | |
1749 | supported = SUPPORTED_1000baseT_Full | SUPPORTED_FIBRE | |
1750 | | SUPPORTED_Autoneg; | |
1751 | } else { | |
1752 | supported = SUPPORTED_10baseT_Half | |
1753 | | SUPPORTED_10baseT_Full | |
1754 | | SUPPORTED_100baseT_Half | |
1755 | | SUPPORTED_100baseT_Full | |
1756 | | SUPPORTED_1000baseT_Half | |
1757 | | SUPPORTED_1000baseT_Full | |
1758 | | SUPPORTED_Autoneg | SUPPORTED_TP; | |
1759 | } | |
1760 | ||
1761 | return supported; | |
1762 | } | |
1763 | ||
1764 | static int ql_get_auto_cfg_status(struct ql3_adapter *qdev) | |
1765 | { | |
1766 | int status; | |
1767 | unsigned long hw_flags; | |
1768 | spin_lock_irqsave(&qdev->hw_lock, hw_flags); | |
1769 | if(ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK, | |
1770 | (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) * | |
04f10773 BL |
1771 | 2) << 7)) { |
1772 | spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); | |
5a4faa87 | 1773 | return 0; |
04f10773 | 1774 | } |
5a4faa87 RM |
1775 | status = ql_is_auto_cfg(qdev); |
1776 | ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK); | |
1777 | spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); | |
1778 | return status; | |
1779 | } | |
1780 | ||
1781 | static u32 ql_get_speed(struct ql3_adapter *qdev) | |
1782 | { | |
1783 | u32 status; | |
1784 | unsigned long hw_flags; | |
1785 | spin_lock_irqsave(&qdev->hw_lock, hw_flags); | |
1786 | if(ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK, | |
1787 | (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) * | |
04f10773 BL |
1788 | 2) << 7)) { |
1789 | spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); | |
5a4faa87 | 1790 | return 0; |
04f10773 | 1791 | } |
5a4faa87 RM |
1792 | status = ql_get_link_speed(qdev); |
1793 | ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK); | |
1794 | spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); | |
1795 | return status; | |
1796 | } | |
1797 | ||
1798 | static int ql_get_full_dup(struct ql3_adapter *qdev) | |
1799 | { | |
1800 | int status; | |
1801 | unsigned long hw_flags; | |
1802 | spin_lock_irqsave(&qdev->hw_lock, hw_flags); | |
1803 | if(ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK, | |
1804 | (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) * | |
04f10773 BL |
1805 | 2) << 7)) { |
1806 | spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); | |
5a4faa87 | 1807 | return 0; |
04f10773 | 1808 | } |
5a4faa87 RM |
1809 | status = ql_is_link_full_dup(qdev); |
1810 | ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK); | |
1811 | spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); | |
1812 | return status; | |
1813 | } | |
1814 | ||
1815 | ||
1816 | static int ql_get_settings(struct net_device *ndev, struct ethtool_cmd *ecmd) | |
1817 | { | |
1818 | struct ql3_adapter *qdev = netdev_priv(ndev); | |
1819 | ||
1820 | ecmd->transceiver = XCVR_INTERNAL; | |
1821 | ecmd->supported = ql_supported_modes(qdev); | |
1822 | ||
1823 | if (test_bit(QL_LINK_OPTICAL,&qdev->flags)) { | |
1824 | ecmd->port = PORT_FIBRE; | |
1825 | } else { | |
1826 | ecmd->port = PORT_TP; | |
1827 | ecmd->phy_address = qdev->PHYAddr; | |
1828 | } | |
1829 | ecmd->advertising = ql_supported_modes(qdev); | |
1830 | ecmd->autoneg = ql_get_auto_cfg_status(qdev); | |
1831 | ecmd->speed = ql_get_speed(qdev); | |
1832 | ecmd->duplex = ql_get_full_dup(qdev); | |
1833 | return 0; | |
1834 | } | |
1835 | ||
1836 | static void ql_get_drvinfo(struct net_device *ndev, | |
1837 | struct ethtool_drvinfo *drvinfo) | |
1838 | { | |
1839 | struct ql3_adapter *qdev = netdev_priv(ndev); | |
1840 | strncpy(drvinfo->driver, ql3xxx_driver_name, 32); | |
1841 | strncpy(drvinfo->version, ql3xxx_driver_version, 32); | |
1842 | strncpy(drvinfo->fw_version, "N/A", 32); | |
1843 | strncpy(drvinfo->bus_info, pci_name(qdev->pdev), 32); | |
5a4faa87 RM |
1844 | drvinfo->regdump_len = 0; |
1845 | drvinfo->eedump_len = 0; | |
1846 | } | |
1847 | ||
1848 | static u32 ql_get_msglevel(struct net_device *ndev) | |
1849 | { | |
1850 | struct ql3_adapter *qdev = netdev_priv(ndev); | |
1851 | return qdev->msg_enable; | |
1852 | } | |
1853 | ||
1854 | static void ql_set_msglevel(struct net_device *ndev, u32 value) | |
1855 | { | |
1856 | struct ql3_adapter *qdev = netdev_priv(ndev); | |
1857 | qdev->msg_enable = value; | |
1858 | } | |
1859 | ||
ec826383 RM |
1860 | static void ql_get_pauseparam(struct net_device *ndev, |
1861 | struct ethtool_pauseparam *pause) | |
1862 | { | |
1863 | struct ql3_adapter *qdev = netdev_priv(ndev); | |
1864 | struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers; | |
1865 | ||
1866 | u32 reg; | |
1867 | if(qdev->mac_index == 0) | |
1868 | reg = ql_read_page0_reg(qdev, &port_regs->mac0ConfigReg); | |
1869 | else | |
1870 | reg = ql_read_page0_reg(qdev, &port_regs->mac1ConfigReg); | |
1871 | ||
1872 | pause->autoneg = ql_get_auto_cfg_status(qdev); | |
1873 | pause->rx_pause = (reg & MAC_CONFIG_REG_RF) >> 2; | |
1874 | pause->tx_pause = (reg & MAC_CONFIG_REG_TF) >> 1; | |
1875 | } | |
1876 | ||
7282d491 | 1877 | static const struct ethtool_ops ql3xxx_ethtool_ops = { |
5a4faa87 RM |
1878 | .get_settings = ql_get_settings, |
1879 | .get_drvinfo = ql_get_drvinfo, | |
5a4faa87 RM |
1880 | .get_link = ethtool_op_get_link, |
1881 | .get_msglevel = ql_get_msglevel, | |
1882 | .set_msglevel = ql_set_msglevel, | |
ec826383 | 1883 | .get_pauseparam = ql_get_pauseparam, |
5a4faa87 RM |
1884 | }; |
1885 | ||
1886 | static int ql_populate_free_queue(struct ql3_adapter *qdev) | |
1887 | { | |
1888 | struct ql_rcv_buf_cb *lrg_buf_cb = qdev->lrg_buf_free_head; | |
0f8ab89e BL |
1889 | dma_addr_t map; |
1890 | int err; | |
5a4faa87 RM |
1891 | |
1892 | while (lrg_buf_cb) { | |
1893 | if (!lrg_buf_cb->skb) { | |
cd238faa BL |
1894 | lrg_buf_cb->skb = netdev_alloc_skb(qdev->ndev, |
1895 | qdev->lrg_buffer_len); | |
5a4faa87 RM |
1896 | if (unlikely(!lrg_buf_cb->skb)) { |
1897 | printk(KERN_DEBUG PFX | |
cd238faa | 1898 | "%s: Failed netdev_alloc_skb().\n", |
5a4faa87 RM |
1899 | qdev->ndev->name); |
1900 | break; | |
1901 | } else { | |
1902 | /* | |
1903 | * We save some space to copy the ethhdr from | |
1904 | * first buffer | |
1905 | */ | |
1906 | skb_reserve(lrg_buf_cb->skb, QL_HEADER_SPACE); | |
1907 | map = pci_map_single(qdev->pdev, | |
1908 | lrg_buf_cb->skb->data, | |
1909 | qdev->lrg_buffer_len - | |
1910 | QL_HEADER_SPACE, | |
1911 | PCI_DMA_FROMDEVICE); | |
0f8ab89e | 1912 | |
8d8bb39b | 1913 | err = pci_dma_mapping_error(qdev->pdev, map); |
0f8ab89e | 1914 | if(err) { |
9ddf7774 | 1915 | printk(KERN_ERR "%s: PCI mapping failed with error: %d\n", |
0f8ab89e BL |
1916 | qdev->ndev->name, err); |
1917 | dev_kfree_skb(lrg_buf_cb->skb); | |
1918 | lrg_buf_cb->skb = NULL; | |
1919 | break; | |
1920 | } | |
1921 | ||
1922 | ||
5a4faa87 RM |
1923 | lrg_buf_cb->buf_phy_addr_low = |
1924 | cpu_to_le32(LS_64BITS(map)); | |
1925 | lrg_buf_cb->buf_phy_addr_high = | |
1926 | cpu_to_le32(MS_64BITS(map)); | |
1927 | pci_unmap_addr_set(lrg_buf_cb, mapaddr, map); | |
1928 | pci_unmap_len_set(lrg_buf_cb, maplen, | |
1929 | qdev->lrg_buffer_len - | |
1930 | QL_HEADER_SPACE); | |
1931 | --qdev->lrg_buf_skb_check; | |
1932 | if (!qdev->lrg_buf_skb_check) | |
1933 | return 1; | |
1934 | } | |
1935 | } | |
1936 | lrg_buf_cb = lrg_buf_cb->next; | |
1937 | } | |
1938 | return 0; | |
1939 | } | |
1940 | ||
f67cac01 RM |
1941 | /* |
1942 | * Caller holds hw_lock. | |
1943 | */ | |
1944 | static void ql_update_small_bufq_prod_index(struct ql3_adapter *qdev) | |
1945 | { | |
1946 | struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers; | |
1947 | if (qdev->small_buf_release_cnt >= 16) { | |
1948 | while (qdev->small_buf_release_cnt >= 16) { | |
1949 | qdev->small_buf_q_producer_index++; | |
1950 | ||
1951 | if (qdev->small_buf_q_producer_index == | |
1952 | NUM_SBUFQ_ENTRIES) | |
1953 | qdev->small_buf_q_producer_index = 0; | |
1954 | qdev->small_buf_release_cnt -= 8; | |
1955 | } | |
1956 | wmb(); | |
1957 | writel(qdev->small_buf_q_producer_index, | |
1958 | &port_regs->CommonRegs.rxSmallQProducerIndex); | |
1959 | } | |
1960 | } | |
1961 | ||
5a4faa87 RM |
1962 | /* |
1963 | * Caller holds hw_lock. | |
1964 | */ | |
1965 | static void ql_update_lrg_bufq_prod_index(struct ql3_adapter *qdev) | |
1966 | { | |
1967 | struct bufq_addr_element *lrg_buf_q_ele; | |
1968 | int i; | |
1969 | struct ql_rcv_buf_cb *lrg_buf_cb; | |
1970 | struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers; | |
1971 | ||
1972 | if ((qdev->lrg_buf_free_count >= 8) | |
1973 | && (qdev->lrg_buf_release_cnt >= 16)) { | |
1974 | ||
1975 | if (qdev->lrg_buf_skb_check) | |
1976 | if (!ql_populate_free_queue(qdev)) | |
1977 | return; | |
1978 | ||
1979 | lrg_buf_q_ele = qdev->lrg_buf_next_free; | |
1980 | ||
1981 | while ((qdev->lrg_buf_release_cnt >= 16) | |
1982 | && (qdev->lrg_buf_free_count >= 8)) { | |
1983 | ||
1984 | for (i = 0; i < 8; i++) { | |
1985 | lrg_buf_cb = | |
1986 | ql_get_from_lrg_buf_free_list(qdev); | |
1987 | lrg_buf_q_ele->addr_high = | |
1988 | lrg_buf_cb->buf_phy_addr_high; | |
1989 | lrg_buf_q_ele->addr_low = | |
1990 | lrg_buf_cb->buf_phy_addr_low; | |
1991 | lrg_buf_q_ele++; | |
1992 | ||
1993 | qdev->lrg_buf_release_cnt--; | |
1994 | } | |
1995 | ||
1996 | qdev->lrg_buf_q_producer_index++; | |
1997 | ||
1357bfcf | 1998 | if (qdev->lrg_buf_q_producer_index == qdev->num_lbufq_entries) |
5a4faa87 RM |
1999 | qdev->lrg_buf_q_producer_index = 0; |
2000 | ||
2001 | if (qdev->lrg_buf_q_producer_index == | |
1357bfcf | 2002 | (qdev->num_lbufq_entries - 1)) { |
5a4faa87 RM |
2003 | lrg_buf_q_ele = qdev->lrg_buf_q_virt_addr; |
2004 | } | |
2005 | } | |
f67cac01 | 2006 | wmb(); |
5a4faa87 | 2007 | qdev->lrg_buf_next_free = lrg_buf_q_ele; |
f67cac01 RM |
2008 | writel(qdev->lrg_buf_q_producer_index, |
2009 | &port_regs->CommonRegs.rxLargeQProducerIndex); | |
5a4faa87 RM |
2010 | } |
2011 | } | |
2012 | ||
2013 | static void ql_process_mac_tx_intr(struct ql3_adapter *qdev, | |
2014 | struct ob_mac_iocb_rsp *mac_rsp) | |
2015 | { | |
2016 | struct ql_tx_buf_cb *tx_cb; | |
bd36b0ac | 2017 | int i; |
e8f4df24 | 2018 | int retval = 0; |
5a4faa87 | 2019 | |
e8f4df24 BL |
2020 | if(mac_rsp->flags & OB_MAC_IOCB_RSP_S) { |
2021 | printk(KERN_WARNING "Frame short but, frame was padded and sent.\n"); | |
2022 | } | |
9ddf7774 | 2023 | |
5a4faa87 | 2024 | tx_cb = &qdev->tx_buf[mac_rsp->transaction_id]; |
e8f4df24 BL |
2025 | |
2026 | /* Check the transmit response flags for any errors */ | |
2027 | if(mac_rsp->flags & OB_MAC_IOCB_RSP_S) { | |
2028 | printk(KERN_ERR "Frame too short to be legal, frame not sent.\n"); | |
2029 | ||
09f75cd7 | 2030 | qdev->ndev->stats.tx_errors++; |
e8f4df24 BL |
2031 | retval = -EIO; |
2032 | goto frame_not_sent; | |
2033 | } | |
2034 | ||
2035 | if(tx_cb->seg_count == 0) { | |
2036 | printk(KERN_ERR "tx_cb->seg_count == 0: %d\n", mac_rsp->transaction_id); | |
2037 | ||
09f75cd7 | 2038 | qdev->ndev->stats.tx_errors++; |
e8f4df24 BL |
2039 | retval = -EIO; |
2040 | goto invalid_seg_count; | |
2041 | } | |
2042 | ||
5a4faa87 | 2043 | pci_unmap_single(qdev->pdev, |
bd36b0ac RM |
2044 | pci_unmap_addr(&tx_cb->map[0], mapaddr), |
2045 | pci_unmap_len(&tx_cb->map[0], maplen), | |
2046 | PCI_DMA_TODEVICE); | |
2047 | tx_cb->seg_count--; | |
2048 | if (tx_cb->seg_count) { | |
2049 | for (i = 1; i < tx_cb->seg_count; i++) { | |
2050 | pci_unmap_page(qdev->pdev, | |
2051 | pci_unmap_addr(&tx_cb->map[i], | |
2052 | mapaddr), | |
2053 | pci_unmap_len(&tx_cb->map[i], maplen), | |
2054 | PCI_DMA_TODEVICE); | |
2055 | } | |
2056 | } | |
09f75cd7 JG |
2057 | qdev->ndev->stats.tx_packets++; |
2058 | qdev->ndev->stats.tx_bytes += tx_cb->skb->len; | |
e8f4df24 BL |
2059 | |
2060 | frame_not_sent: | |
bd36b0ac | 2061 | dev_kfree_skb_irq(tx_cb->skb); |
5a4faa87 | 2062 | tx_cb->skb = NULL; |
e8f4df24 BL |
2063 | |
2064 | invalid_seg_count: | |
5a4faa87 RM |
2065 | atomic_inc(&qdev->tx_count); |
2066 | } | |
2067 | ||
3664006a | 2068 | static void ql_get_sbuf(struct ql3_adapter *qdev) |
97916330 RM |
2069 | { |
2070 | if (++qdev->small_buf_index == NUM_SMALL_BUFFERS) | |
2071 | qdev->small_buf_index = 0; | |
2072 | qdev->small_buf_release_cnt++; | |
2073 | } | |
2074 | ||
3664006a | 2075 | static struct ql_rcv_buf_cb *ql_get_lbuf(struct ql3_adapter *qdev) |
97916330 RM |
2076 | { |
2077 | struct ql_rcv_buf_cb *lrg_buf_cb = NULL; | |
2078 | lrg_buf_cb = &qdev->lrg_buf[qdev->lrg_buf_index]; | |
2079 | qdev->lrg_buf_release_cnt++; | |
2080 | if (++qdev->lrg_buf_index == qdev->num_large_buffers) | |
2081 | qdev->lrg_buf_index = 0; | |
2082 | return(lrg_buf_cb); | |
2083 | } | |
2084 | ||
bd36b0ac RM |
2085 | /* |
2086 | * The difference between 3022 and 3032 for inbound completions: | |
9ddf7774 JG |
2087 | * 3022 uses two buffers per completion. The first buffer contains |
2088 | * (some) header info, the second the remainder of the headers plus | |
2089 | * the data. For this chip we reserve some space at the top of the | |
2090 | * receive buffer so that the header info in buffer one can be | |
2091 | * prepended to the buffer two. Buffer two is the sent up while | |
bd36b0ac | 2092 | * buffer one is returned to the hardware to be reused. |
9ddf7774 | 2093 | * 3032 receives all of it's data and headers in one buffer for a |
bd36b0ac RM |
2094 | * simpler process. 3032 also supports checksum verification as |
2095 | * can be seen in ql_process_macip_rx_intr(). | |
2096 | */ | |
5a4faa87 RM |
2097 | static void ql_process_mac_rx_intr(struct ql3_adapter *qdev, |
2098 | struct ib_mac_iocb_rsp *ib_mac_rsp_ptr) | |
2099 | { | |
5a4faa87 RM |
2100 | struct ql_rcv_buf_cb *lrg_buf_cb1 = NULL; |
2101 | struct ql_rcv_buf_cb *lrg_buf_cb2 = NULL; | |
5a4faa87 RM |
2102 | struct sk_buff *skb; |
2103 | u16 length = le16_to_cpu(ib_mac_rsp_ptr->length); | |
2104 | ||
2105 | /* | |
2106 | * Get the inbound address list (small buffer). | |
2107 | */ | |
97916330 | 2108 | ql_get_sbuf(qdev); |
5a4faa87 | 2109 | |
97916330 RM |
2110 | if (qdev->device_id == QL3022_DEVICE_ID) |
2111 | lrg_buf_cb1 = ql_get_lbuf(qdev); | |
5a4faa87 RM |
2112 | |
2113 | /* start of second buffer */ | |
97916330 | 2114 | lrg_buf_cb2 = ql_get_lbuf(qdev); |
5a4faa87 RM |
2115 | skb = lrg_buf_cb2->skb; |
2116 | ||
09f75cd7 JG |
2117 | qdev->ndev->stats.rx_packets++; |
2118 | qdev->ndev->stats.rx_bytes += length; | |
5a4faa87 RM |
2119 | |
2120 | skb_put(skb, length); | |
2121 | pci_unmap_single(qdev->pdev, | |
2122 | pci_unmap_addr(lrg_buf_cb2, mapaddr), | |
2123 | pci_unmap_len(lrg_buf_cb2, maplen), | |
2124 | PCI_DMA_FROMDEVICE); | |
2125 | prefetch(skb->data); | |
5a4faa87 RM |
2126 | skb->ip_summed = CHECKSUM_NONE; |
2127 | skb->protocol = eth_type_trans(skb, qdev->ndev); | |
2128 | ||
2129 | netif_receive_skb(skb); | |
5a4faa87 RM |
2130 | lrg_buf_cb2->skb = NULL; |
2131 | ||
bd36b0ac RM |
2132 | if (qdev->device_id == QL3022_DEVICE_ID) |
2133 | ql_release_to_lrg_buf_free_list(qdev, lrg_buf_cb1); | |
5a4faa87 RM |
2134 | ql_release_to_lrg_buf_free_list(qdev, lrg_buf_cb2); |
2135 | } | |
2136 | ||
2137 | static void ql_process_macip_rx_intr(struct ql3_adapter *qdev, | |
2138 | struct ib_ip_iocb_rsp *ib_ip_rsp_ptr) | |
2139 | { | |
5a4faa87 RM |
2140 | struct ql_rcv_buf_cb *lrg_buf_cb1 = NULL; |
2141 | struct ql_rcv_buf_cb *lrg_buf_cb2 = NULL; | |
bd36b0ac | 2142 | struct sk_buff *skb1 = NULL, *skb2; |
5a4faa87 RM |
2143 | struct net_device *ndev = qdev->ndev; |
2144 | u16 length = le16_to_cpu(ib_ip_rsp_ptr->length); | |
2145 | u16 size = 0; | |
2146 | ||
2147 | /* | |
2148 | * Get the inbound address list (small buffer). | |
2149 | */ | |
2150 | ||
97916330 | 2151 | ql_get_sbuf(qdev); |
5a4faa87 | 2152 | |
bd36b0ac RM |
2153 | if (qdev->device_id == QL3022_DEVICE_ID) { |
2154 | /* start of first buffer on 3022 */ | |
97916330 | 2155 | lrg_buf_cb1 = ql_get_lbuf(qdev); |
bd36b0ac | 2156 | skb1 = lrg_buf_cb1->skb; |
bd36b0ac RM |
2157 | size = ETH_HLEN; |
2158 | if (*((u16 *) skb1->data) != 0xFFFF) | |
2159 | size += VLAN_ETH_HLEN - ETH_HLEN; | |
2160 | } | |
5a4faa87 RM |
2161 | |
2162 | /* start of second buffer */ | |
97916330 | 2163 | lrg_buf_cb2 = ql_get_lbuf(qdev); |
5a4faa87 | 2164 | skb2 = lrg_buf_cb2->skb; |
5a4faa87 | 2165 | |
5a4faa87 RM |
2166 | skb_put(skb2, length); /* Just the second buffer length here. */ |
2167 | pci_unmap_single(qdev->pdev, | |
2168 | pci_unmap_addr(lrg_buf_cb2, mapaddr), | |
2169 | pci_unmap_len(lrg_buf_cb2, maplen), | |
2170 | PCI_DMA_FROMDEVICE); | |
2171 | prefetch(skb2->data); | |
2172 | ||
5a4faa87 | 2173 | skb2->ip_summed = CHECKSUM_NONE; |
bd36b0ac RM |
2174 | if (qdev->device_id == QL3022_DEVICE_ID) { |
2175 | /* | |
2176 | * Copy the ethhdr from first buffer to second. This | |
2177 | * is necessary for 3022 IP completions. | |
2178 | */ | |
d626f62b ACM |
2179 | skb_copy_from_linear_data_offset(skb1, VLAN_ID_LEN, |
2180 | skb_push(skb2, size), size); | |
bd36b0ac RM |
2181 | } else { |
2182 | u16 checksum = le16_to_cpu(ib_ip_rsp_ptr->checksum); | |
9ddf7774 JG |
2183 | if (checksum & |
2184 | (IB_IP_IOCB_RSP_3032_ICE | | |
2185 | IB_IP_IOCB_RSP_3032_CE)) { | |
bd36b0ac RM |
2186 | printk(KERN_ERR |
2187 | "%s: Bad checksum for this %s packet, checksum = %x.\n", | |
2188 | __func__, | |
9ddf7774 | 2189 | ((checksum & |
bd36b0ac RM |
2190 | IB_IP_IOCB_RSP_3032_TCP) ? "TCP" : |
2191 | "UDP"),checksum); | |
b3b1514c RM |
2192 | } else if ((checksum & IB_IP_IOCB_RSP_3032_TCP) || |
2193 | (checksum & IB_IP_IOCB_RSP_3032_UDP && | |
2194 | !(checksum & IB_IP_IOCB_RSP_3032_NUC))) { | |
bd36b0ac | 2195 | skb2->ip_summed = CHECKSUM_UNNECESSARY; |
b3b1514c | 2196 | } |
bd36b0ac | 2197 | } |
5a4faa87 RM |
2198 | skb2->protocol = eth_type_trans(skb2, qdev->ndev); |
2199 | ||
2200 | netif_receive_skb(skb2); | |
09f75cd7 JG |
2201 | ndev->stats.rx_packets++; |
2202 | ndev->stats.rx_bytes += length; | |
5a4faa87 RM |
2203 | lrg_buf_cb2->skb = NULL; |
2204 | ||
bd36b0ac RM |
2205 | if (qdev->device_id == QL3022_DEVICE_ID) |
2206 | ql_release_to_lrg_buf_free_list(qdev, lrg_buf_cb1); | |
5a4faa87 RM |
2207 | ql_release_to_lrg_buf_free_list(qdev, lrg_buf_cb2); |
2208 | } | |
2209 | ||
2210 | static int ql_tx_rx_clean(struct ql3_adapter *qdev, | |
2211 | int *tx_cleaned, int *rx_cleaned, int work_to_do) | |
2212 | { | |
5a4faa87 RM |
2213 | struct net_rsp_iocb *net_rsp; |
2214 | struct net_device *ndev = qdev->ndev; | |
63b66d12 | 2215 | int work_done = 0; |
5a4faa87 RM |
2216 | |
2217 | /* While there are entries in the completion queue. */ | |
f67cac01 | 2218 | while ((le32_to_cpu(*(qdev->prsp_producer_index)) != |
63b66d12 | 2219 | qdev->rsp_consumer_index) && (work_done < work_to_do)) { |
5a4faa87 RM |
2220 | |
2221 | net_rsp = qdev->rsp_current; | |
b323e0e4 | 2222 | rmb(); |
50626297 RM |
2223 | /* |
2224 | * Fix 4032 chipe undocumented "feature" where bit-8 is set if the | |
2225 | * inbound completion is for a VLAN. | |
2226 | */ | |
2227 | if (qdev->device_id == QL3032_DEVICE_ID) | |
2228 | net_rsp->opcode &= 0x7f; | |
5a4faa87 RM |
2229 | switch (net_rsp->opcode) { |
2230 | ||
2231 | case OPCODE_OB_MAC_IOCB_FN0: | |
2232 | case OPCODE_OB_MAC_IOCB_FN2: | |
2233 | ql_process_mac_tx_intr(qdev, (struct ob_mac_iocb_rsp *) | |
2234 | net_rsp); | |
2235 | (*tx_cleaned)++; | |
2236 | break; | |
2237 | ||
2238 | case OPCODE_IB_MAC_IOCB: | |
bd36b0ac | 2239 | case OPCODE_IB_3032_MAC_IOCB: |
5a4faa87 RM |
2240 | ql_process_mac_rx_intr(qdev, (struct ib_mac_iocb_rsp *) |
2241 | net_rsp); | |
2242 | (*rx_cleaned)++; | |
2243 | break; | |
2244 | ||
2245 | case OPCODE_IB_IP_IOCB: | |
bd36b0ac | 2246 | case OPCODE_IB_3032_IP_IOCB: |
5a4faa87 RM |
2247 | ql_process_macip_rx_intr(qdev, (struct ib_ip_iocb_rsp *) |
2248 | net_rsp); | |
2249 | (*rx_cleaned)++; | |
2250 | break; | |
2251 | default: | |
2252 | { | |
2253 | u32 *tmp = (u32 *) net_rsp; | |
2254 | printk(KERN_ERR PFX | |
2255 | "%s: Hit default case, not " | |
2256 | "handled!\n" | |
2257 | " dropping the packet, opcode = " | |
2258 | "%x.\n", | |
2259 | ndev->name, net_rsp->opcode); | |
2260 | printk(KERN_ERR PFX | |
2261 | "0x%08lx 0x%08lx 0x%08lx 0x%08lx \n", | |
2262 | (unsigned long int)tmp[0], | |
2263 | (unsigned long int)tmp[1], | |
2264 | (unsigned long int)tmp[2], | |
2265 | (unsigned long int)tmp[3]); | |
2266 | } | |
2267 | } | |
2268 | ||
2269 | qdev->rsp_consumer_index++; | |
2270 | ||
2271 | if (qdev->rsp_consumer_index == NUM_RSP_Q_ENTRIES) { | |
2272 | qdev->rsp_consumer_index = 0; | |
2273 | qdev->rsp_current = qdev->rsp_q_virt_addr; | |
2274 | } else { | |
2275 | qdev->rsp_current++; | |
2276 | } | |
63b66d12 RM |
2277 | |
2278 | work_done = *tx_cleaned + *rx_cleaned; | |
5a4faa87 RM |
2279 | } |
2280 | ||
f67cac01 | 2281 | return work_done; |
5a4faa87 RM |
2282 | } |
2283 | ||
bea3348e | 2284 | static int ql_poll(struct napi_struct *napi, int budget) |
5a4faa87 | 2285 | { |
bea3348e | 2286 | struct ql3_adapter *qdev = container_of(napi, struct ql3_adapter, napi); |
5a4faa87 | 2287 | int rx_cleaned = 0, tx_cleaned = 0; |
63b66d12 RM |
2288 | unsigned long hw_flags; |
2289 | struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers; | |
5a4faa87 | 2290 | |
bea3348e | 2291 | ql_tx_rx_clean(qdev, &tx_cleaned, &rx_cleaned, budget); |
5a4faa87 | 2292 | |
4ec24119 | 2293 | if (tx_cleaned + rx_cleaned != budget) { |
63b66d12 | 2294 | spin_lock_irqsave(&qdev->hw_lock, hw_flags); |
288379f0 | 2295 | __napi_complete(napi); |
f67cac01 RM |
2296 | ql_update_small_bufq_prod_index(qdev); |
2297 | ql_update_lrg_bufq_prod_index(qdev); | |
2298 | writel(qdev->rsp_consumer_index, | |
2299 | &port_regs->CommonRegs.rspQConsumerIndex); | |
63b66d12 RM |
2300 | spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); |
2301 | ||
5a4faa87 | 2302 | ql_enable_interrupts(qdev); |
5a4faa87 | 2303 | } |
bea3348e | 2304 | return tx_cleaned + rx_cleaned; |
5a4faa87 RM |
2305 | } |
2306 | ||
7d12e780 | 2307 | static irqreturn_t ql3xxx_isr(int irq, void *dev_id) |
5a4faa87 RM |
2308 | { |
2309 | ||
2310 | struct net_device *ndev = dev_id; | |
2311 | struct ql3_adapter *qdev = netdev_priv(ndev); | |
2312 | struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers; | |
2313 | u32 value; | |
2314 | int handled = 1; | |
2315 | u32 var; | |
2316 | ||
2317 | port_regs = qdev->mem_map_registers; | |
2318 | ||
2319 | value = | |
2320 | ql_read_common_reg_l(qdev, &port_regs->CommonRegs.ispControlStatus); | |
2321 | ||
2322 | if (value & (ISP_CONTROL_FE | ISP_CONTROL_RI)) { | |
2323 | spin_lock(&qdev->adapter_lock); | |
2324 | netif_stop_queue(qdev->ndev); | |
2325 | netif_carrier_off(qdev->ndev); | |
2326 | ql_disable_interrupts(qdev); | |
2327 | qdev->port_link_state = LS_DOWN; | |
2328 | set_bit(QL_RESET_ACTIVE,&qdev->flags) ; | |
2329 | ||
2330 | if (value & ISP_CONTROL_FE) { | |
2331 | /* | |
2332 | * Chip Fatal Error. | |
2333 | */ | |
2334 | var = | |
2335 | ql_read_page0_reg_l(qdev, | |
2336 | &port_regs->PortFatalErrStatus); | |
2337 | printk(KERN_WARNING PFX | |
2338 | "%s: Resetting chip. PortFatalErrStatus " | |
2339 | "register = 0x%x\n", ndev->name, var); | |
2340 | set_bit(QL_RESET_START,&qdev->flags) ; | |
2341 | } else { | |
2342 | /* | |
2343 | * Soft Reset Requested. | |
2344 | */ | |
2345 | set_bit(QL_RESET_PER_SCSI,&qdev->flags) ; | |
2346 | printk(KERN_ERR PFX | |
2347 | "%s: Another function issued a reset to the " | |
2348 | "chip. ISR value = %x.\n", ndev->name, value); | |
2349 | } | |
c4028958 | 2350 | queue_delayed_work(qdev->workqueue, &qdev->reset_work, 0); |
5a4faa87 RM |
2351 | spin_unlock(&qdev->adapter_lock); |
2352 | } else if (value & ISP_IMR_DISABLE_CMPL_INT) { | |
e8f4df24 | 2353 | ql_disable_interrupts(qdev); |
288379f0 BH |
2354 | if (likely(napi_schedule_prep(&qdev->napi))) { |
2355 | __napi_schedule(&qdev->napi); | |
63b66d12 | 2356 | } |
5a4faa87 RM |
2357 | } else { |
2358 | return IRQ_NONE; | |
2359 | } | |
2360 | ||
2361 | return IRQ_RETVAL(handled); | |
2362 | } | |
2363 | ||
bd36b0ac | 2364 | /* |
9ddf7774 | 2365 | * Get the total number of segments needed for the |
bd36b0ac RM |
2366 | * given number of fragments. This is necessary because |
2367 | * outbound address lists (OAL) will be used when more than | |
9ddf7774 | 2368 | * two frags are given. Each address list has 5 addr/len |
bd36b0ac | 2369 | * pairs. The 5th pair in each AOL is used to point to |
9ddf7774 | 2370 | * the next AOL if more frags are coming. |
bd36b0ac RM |
2371 | * That is why the frags:segment count ratio is not linear. |
2372 | */ | |
e8f4df24 BL |
2373 | static int ql_get_seg_count(struct ql3_adapter *qdev, |
2374 | unsigned short frags) | |
bd36b0ac | 2375 | { |
e8f4df24 BL |
2376 | if (qdev->device_id == QL3022_DEVICE_ID) |
2377 | return 1; | |
2378 | ||
bd36b0ac RM |
2379 | switch(frags) { |
2380 | case 0: return 1; /* just the skb->data seg */ | |
2381 | case 1: return 2; /* skb->data + 1 frag */ | |
2382 | case 2: return 3; /* skb->data + 2 frags */ | |
2383 | case 3: return 5; /* skb->data + 1 frag + 1 AOL containting 2 frags */ | |
2384 | case 4: return 6; | |
2385 | case 5: return 7; | |
2386 | case 6: return 8; | |
2387 | case 7: return 10; | |
2388 | case 8: return 11; | |
2389 | case 9: return 12; | |
2390 | case 10: return 13; | |
2391 | case 11: return 15; | |
2392 | case 12: return 16; | |
2393 | case 13: return 17; | |
2394 | case 14: return 18; | |
2395 | case 15: return 20; | |
2396 | case 16: return 21; | |
2397 | case 17: return 22; | |
2398 | case 18: return 23; | |
2399 | } | |
2400 | return -1; | |
2401 | } | |
2402 | ||
91e745aa | 2403 | static void ql_hw_csum_setup(const struct sk_buff *skb, |
bd36b0ac RM |
2404 | struct ob_mac_iocb_req *mac_iocb_ptr) |
2405 | { | |
91e745aa | 2406 | const struct iphdr *ip = ip_hdr(skb); |
bd36b0ac | 2407 | |
91e745aa SH |
2408 | mac_iocb_ptr->ip_hdr_off = skb_network_offset(skb); |
2409 | mac_iocb_ptr->ip_hdr_len = ip->ihl; | |
bd36b0ac | 2410 | |
91e745aa SH |
2411 | if (ip->protocol == IPPROTO_TCP) { |
2412 | mac_iocb_ptr->flags1 |= OB_3032MAC_IOCB_REQ_TC | | |
3e71f6dd | 2413 | OB_3032MAC_IOCB_REQ_IC; |
91e745aa SH |
2414 | } else { |
2415 | mac_iocb_ptr->flags1 |= OB_3032MAC_IOCB_REQ_UC | | |
3e71f6dd | 2416 | OB_3032MAC_IOCB_REQ_IC; |
bd36b0ac | 2417 | } |
91e745aa | 2418 | |
bd36b0ac RM |
2419 | } |
2420 | ||
2421 | /* | |
3e71f6dd RM |
2422 | * Map the buffers for this transmit. This will return |
2423 | * NETDEV_TX_BUSY or NETDEV_TX_OK based on success. | |
bd36b0ac | 2424 | */ |
3e71f6dd RM |
2425 | static int ql_send_map(struct ql3_adapter *qdev, |
2426 | struct ob_mac_iocb_req *mac_iocb_ptr, | |
2427 | struct ql_tx_buf_cb *tx_cb, | |
2428 | struct sk_buff *skb) | |
5a4faa87 | 2429 | { |
bd36b0ac RM |
2430 | struct oal *oal; |
2431 | struct oal_entry *oal_entry; | |
63f77926 | 2432 | int len = skb_headlen(skb); |
0f8ab89e BL |
2433 | dma_addr_t map; |
2434 | int err; | |
2435 | int completed_segs, i; | |
bd36b0ac RM |
2436 | int seg_cnt, seg = 0; |
2437 | int frag_cnt = (int)skb_shinfo(skb)->nr_frags; | |
5a4faa87 | 2438 | |
b6967eb9 | 2439 | seg_cnt = tx_cb->seg_count; |
3e71f6dd RM |
2440 | /* |
2441 | * Map the skb buffer first. | |
2442 | */ | |
bd36b0ac | 2443 | map = pci_map_single(qdev->pdev, skb->data, len, PCI_DMA_TODEVICE); |
0f8ab89e | 2444 | |
8d8bb39b | 2445 | err = pci_dma_mapping_error(qdev->pdev, map); |
0f8ab89e | 2446 | if(err) { |
9ddf7774 | 2447 | printk(KERN_ERR "%s: PCI mapping failed with error: %d\n", |
0f8ab89e BL |
2448 | qdev->ndev->name, err); |
2449 | ||
2450 | return NETDEV_TX_BUSY; | |
2451 | } | |
9ddf7774 | 2452 | |
bd36b0ac RM |
2453 | oal_entry = (struct oal_entry *)&mac_iocb_ptr->buf_addr0_low; |
2454 | oal_entry->dma_lo = cpu_to_le32(LS_64BITS(map)); | |
2455 | oal_entry->dma_hi = cpu_to_le32(MS_64BITS(map)); | |
2456 | oal_entry->len = cpu_to_le32(len); | |
2457 | pci_unmap_addr_set(&tx_cb->map[seg], mapaddr, map); | |
2458 | pci_unmap_len_set(&tx_cb->map[seg], maplen, len); | |
2459 | seg++; | |
2460 | ||
e8f4df24 | 2461 | if (seg_cnt == 1) { |
bd36b0ac | 2462 | /* Terminate the last segment. */ |
b39b5a2b | 2463 | oal_entry->len |= cpu_to_le32(OAL_LAST_ENTRY); |
bd36b0ac | 2464 | } else { |
bd36b0ac | 2465 | oal = tx_cb->oal; |
0f8ab89e BL |
2466 | for (completed_segs=0; completed_segs<frag_cnt; completed_segs++,seg++) { |
2467 | skb_frag_t *frag = &skb_shinfo(skb)->frags[completed_segs]; | |
bd36b0ac RM |
2468 | oal_entry++; |
2469 | if ((seg == 2 && seg_cnt > 3) || /* Check for continuation */ | |
2470 | (seg == 7 && seg_cnt > 8) || /* requirements. It's strange */ | |
2471 | (seg == 12 && seg_cnt > 13) || /* but necessary. */ | |
2472 | (seg == 17 && seg_cnt > 18)) { | |
2473 | /* Continuation entry points to outbound address list. */ | |
2474 | map = pci_map_single(qdev->pdev, oal, | |
2475 | sizeof(struct oal), | |
2476 | PCI_DMA_TODEVICE); | |
0f8ab89e | 2477 | |
8d8bb39b | 2478 | err = pci_dma_mapping_error(qdev->pdev, map); |
0f8ab89e BL |
2479 | if(err) { |
2480 | ||
9ddf7774 | 2481 | printk(KERN_ERR "%s: PCI mapping outbound address list with error: %d\n", |
0f8ab89e BL |
2482 | qdev->ndev->name, err); |
2483 | goto map_error; | |
2484 | } | |
2485 | ||
bd36b0ac RM |
2486 | oal_entry->dma_lo = cpu_to_le32(LS_64BITS(map)); |
2487 | oal_entry->dma_hi = cpu_to_le32(MS_64BITS(map)); | |
2488 | oal_entry->len = | |
2489 | cpu_to_le32(sizeof(struct oal) | | |
2490 | OAL_CONT_ENTRY); | |
2491 | pci_unmap_addr_set(&tx_cb->map[seg], mapaddr, | |
2492 | map); | |
2493 | pci_unmap_len_set(&tx_cb->map[seg], maplen, | |
b6967eb9 | 2494 | sizeof(struct oal)); |
bd36b0ac RM |
2495 | oal_entry = (struct oal_entry *)oal; |
2496 | oal++; | |
2497 | seg++; | |
2498 | } | |
5a4faa87 | 2499 | |
bd36b0ac RM |
2500 | map = |
2501 | pci_map_page(qdev->pdev, frag->page, | |
2502 | frag->page_offset, frag->size, | |
2503 | PCI_DMA_TODEVICE); | |
0f8ab89e | 2504 | |
8d8bb39b | 2505 | err = pci_dma_mapping_error(qdev->pdev, map); |
0f8ab89e | 2506 | if(err) { |
9ddf7774 | 2507 | printk(KERN_ERR "%s: PCI mapping frags failed with error: %d\n", |
0f8ab89e BL |
2508 | qdev->ndev->name, err); |
2509 | goto map_error; | |
2510 | } | |
2511 | ||
bd36b0ac RM |
2512 | oal_entry->dma_lo = cpu_to_le32(LS_64BITS(map)); |
2513 | oal_entry->dma_hi = cpu_to_le32(MS_64BITS(map)); | |
2514 | oal_entry->len = cpu_to_le32(frag->size); | |
2515 | pci_unmap_addr_set(&tx_cb->map[seg], mapaddr, map); | |
2516 | pci_unmap_len_set(&tx_cb->map[seg], maplen, | |
2517 | frag->size); | |
2518 | } | |
2519 | /* Terminate the last segment. */ | |
b39b5a2b | 2520 | oal_entry->len |= cpu_to_le32(OAL_LAST_ENTRY); |
bd36b0ac | 2521 | } |
0f8ab89e | 2522 | |
3e71f6dd | 2523 | return NETDEV_TX_OK; |
0f8ab89e BL |
2524 | |
2525 | map_error: | |
2526 | /* A PCI mapping failed and now we will need to back out | |
9ddf7774 | 2527 | * We need to traverse through the oal's and associated pages which |
0f8ab89e BL |
2528 | * have been mapped and now we must unmap them to clean up properly |
2529 | */ | |
9ddf7774 | 2530 | |
0f8ab89e BL |
2531 | seg = 1; |
2532 | oal_entry = (struct oal_entry *)&mac_iocb_ptr->buf_addr0_low; | |
2533 | oal = tx_cb->oal; | |
2534 | for (i=0; i<completed_segs; i++,seg++) { | |
2535 | oal_entry++; | |
2536 | ||
2537 | if((seg == 2 && seg_cnt > 3) || /* Check for continuation */ | |
2538 | (seg == 7 && seg_cnt > 8) || /* requirements. It's strange */ | |
2539 | (seg == 12 && seg_cnt > 13) || /* but necessary. */ | |
2540 | (seg == 17 && seg_cnt > 18)) { | |
2541 | pci_unmap_single(qdev->pdev, | |
2542 | pci_unmap_addr(&tx_cb->map[seg], mapaddr), | |
2543 | pci_unmap_len(&tx_cb->map[seg], maplen), | |
2544 | PCI_DMA_TODEVICE); | |
2545 | oal++; | |
2546 | seg++; | |
2547 | } | |
2548 | ||
2549 | pci_unmap_page(qdev->pdev, | |
2550 | pci_unmap_addr(&tx_cb->map[seg], mapaddr), | |
2551 | pci_unmap_len(&tx_cb->map[seg], maplen), | |
2552 | PCI_DMA_TODEVICE); | |
2553 | } | |
2554 | ||
2555 | pci_unmap_single(qdev->pdev, | |
2556 | pci_unmap_addr(&tx_cb->map[0], mapaddr), | |
2557 | pci_unmap_addr(&tx_cb->map[0], maplen), | |
2558 | PCI_DMA_TODEVICE); | |
2559 | ||
2560 | return NETDEV_TX_BUSY; | |
2561 | ||
3e71f6dd RM |
2562 | } |
2563 | ||
2564 | /* | |
2565 | * The difference between 3022 and 3032 sends: | |
2566 | * 3022 only supports a simple single segment transmission. | |
2567 | * 3032 supports checksumming and scatter/gather lists (fragments). | |
9ddf7774 JG |
2568 | * The 3032 supports sglists by using the 3 addr/len pairs (ALP) |
2569 | * in the IOCB plus a chain of outbound address lists (OAL) that | |
2570 | * each contain 5 ALPs. The last ALP of the IOCB (3rd) or OAL (5th) | |
2571 | * will used to point to an OAL when more ALP entries are required. | |
2572 | * The IOCB is always the top of the chain followed by one or more | |
3e71f6dd RM |
2573 | * OALs (when necessary). |
2574 | */ | |
2575 | static int ql3xxx_send(struct sk_buff *skb, struct net_device *ndev) | |
2576 | { | |
2577 | struct ql3_adapter *qdev = (struct ql3_adapter *)netdev_priv(ndev); | |
2578 | struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers; | |
2579 | struct ql_tx_buf_cb *tx_cb; | |
2580 | u32 tot_len = skb->len; | |
2581 | struct ob_mac_iocb_req *mac_iocb_ptr; | |
2582 | ||
2583 | if (unlikely(atomic_read(&qdev->tx_count) < 2)) { | |
3e71f6dd RM |
2584 | return NETDEV_TX_BUSY; |
2585 | } | |
9ddf7774 | 2586 | |
3e71f6dd | 2587 | tx_cb = &qdev->tx_buf[qdev->req_producer_index] ; |
e8f4df24 BL |
2588 | if((tx_cb->seg_count = ql_get_seg_count(qdev, |
2589 | (skb_shinfo(skb)->nr_frags))) == -1) { | |
3e71f6dd RM |
2590 | printk(KERN_ERR PFX"%s: invalid segment count!\n",__func__); |
2591 | return NETDEV_TX_OK; | |
2592 | } | |
9ddf7774 | 2593 | |
3e71f6dd | 2594 | mac_iocb_ptr = tx_cb->queue_entry; |
d8a759ff | 2595 | memset((void *)mac_iocb_ptr, 0, sizeof(struct ob_mac_iocb_req)); |
3e71f6dd RM |
2596 | mac_iocb_ptr->opcode = qdev->mac_ob_opcode; |
2597 | mac_iocb_ptr->flags = OB_MAC_IOCB_REQ_X; | |
2598 | mac_iocb_ptr->flags |= qdev->mb_bit_mask; | |
2599 | mac_iocb_ptr->transaction_id = qdev->req_producer_index; | |
2600 | mac_iocb_ptr->data_len = cpu_to_le16((u16) tot_len); | |
2601 | tx_cb->skb = skb; | |
e8f4df24 BL |
2602 | if (qdev->device_id == QL3032_DEVICE_ID && |
2603 | skb->ip_summed == CHECKSUM_PARTIAL) | |
3e71f6dd | 2604 | ql_hw_csum_setup(skb, mac_iocb_ptr); |
9ddf7774 | 2605 | |
3e71f6dd RM |
2606 | if(ql_send_map(qdev,mac_iocb_ptr,tx_cb,skb) != NETDEV_TX_OK) { |
2607 | printk(KERN_ERR PFX"%s: Could not map the segments!\n",__func__); | |
2608 | return NETDEV_TX_BUSY; | |
2609 | } | |
9ddf7774 | 2610 | |
bd36b0ac | 2611 | wmb(); |
5a4faa87 RM |
2612 | qdev->req_producer_index++; |
2613 | if (qdev->req_producer_index == NUM_REQ_Q_ENTRIES) | |
2614 | qdev->req_producer_index = 0; | |
2615 | wmb(); | |
2616 | ql_write_common_reg_l(qdev, | |
ee111d11 | 2617 | &port_regs->CommonRegs.reqQProducerIndex, |
5a4faa87 RM |
2618 | qdev->req_producer_index); |
2619 | ||
2620 | ndev->trans_start = jiffies; | |
2621 | if (netif_msg_tx_queued(qdev)) | |
2622 | printk(KERN_DEBUG PFX "%s: tx queued, slot %d, len %d\n", | |
2623 | ndev->name, qdev->req_producer_index, skb->len); | |
2624 | ||
bd36b0ac | 2625 | atomic_dec(&qdev->tx_count); |
5a4faa87 RM |
2626 | return NETDEV_TX_OK; |
2627 | } | |
bd36b0ac | 2628 | |
5a4faa87 RM |
2629 | static int ql_alloc_net_req_rsp_queues(struct ql3_adapter *qdev) |
2630 | { | |
2631 | qdev->req_q_size = | |
2632 | (u32) (NUM_REQ_Q_ENTRIES * sizeof(struct ob_mac_iocb_req)); | |
2633 | ||
2634 | qdev->req_q_virt_addr = | |
2635 | pci_alloc_consistent(qdev->pdev, | |
2636 | (size_t) qdev->req_q_size, | |
2637 | &qdev->req_q_phy_addr); | |
2638 | ||
2639 | if ((qdev->req_q_virt_addr == NULL) || | |
2640 | LS_64BITS(qdev->req_q_phy_addr) & (qdev->req_q_size - 1)) { | |
2641 | printk(KERN_ERR PFX "%s: reqQ failed.\n", | |
2642 | qdev->ndev->name); | |
2643 | return -ENOMEM; | |
2644 | } | |
2645 | ||
2646 | qdev->rsp_q_size = NUM_RSP_Q_ENTRIES * sizeof(struct net_rsp_iocb); | |
2647 | ||
2648 | qdev->rsp_q_virt_addr = | |
2649 | pci_alloc_consistent(qdev->pdev, | |
2650 | (size_t) qdev->rsp_q_size, | |
2651 | &qdev->rsp_q_phy_addr); | |
2652 | ||
2653 | if ((qdev->rsp_q_virt_addr == NULL) || | |
2654 | LS_64BITS(qdev->rsp_q_phy_addr) & (qdev->rsp_q_size - 1)) { | |
2655 | printk(KERN_ERR PFX | |
2656 | "%s: rspQ allocation failed\n", | |
2657 | qdev->ndev->name); | |
2658 | pci_free_consistent(qdev->pdev, (size_t) qdev->req_q_size, | |
2659 | qdev->req_q_virt_addr, | |
2660 | qdev->req_q_phy_addr); | |
2661 | return -ENOMEM; | |
2662 | } | |
2663 | ||
2664 | set_bit(QL_ALLOC_REQ_RSP_Q_DONE,&qdev->flags); | |
2665 | ||
2666 | return 0; | |
2667 | } | |
2668 | ||
2669 | static void ql_free_net_req_rsp_queues(struct ql3_adapter *qdev) | |
2670 | { | |
2671 | if (!test_bit(QL_ALLOC_REQ_RSP_Q_DONE,&qdev->flags)) { | |
2672 | printk(KERN_INFO PFX | |
2673 | "%s: Already done.\n", qdev->ndev->name); | |
2674 | return; | |
2675 | } | |
2676 | ||
2677 | pci_free_consistent(qdev->pdev, | |
2678 | qdev->req_q_size, | |
2679 | qdev->req_q_virt_addr, qdev->req_q_phy_addr); | |
2680 | ||
2681 | qdev->req_q_virt_addr = NULL; | |
2682 | ||
2683 | pci_free_consistent(qdev->pdev, | |
2684 | qdev->rsp_q_size, | |
2685 | qdev->rsp_q_virt_addr, qdev->rsp_q_phy_addr); | |
2686 | ||
2687 | qdev->rsp_q_virt_addr = NULL; | |
2688 | ||
2689 | clear_bit(QL_ALLOC_REQ_RSP_Q_DONE,&qdev->flags); | |
2690 | } | |
2691 | ||
2692 | static int ql_alloc_buffer_queues(struct ql3_adapter *qdev) | |
2693 | { | |
2694 | /* Create Large Buffer Queue */ | |
2695 | qdev->lrg_buf_q_size = | |
1357bfcf | 2696 | qdev->num_lbufq_entries * sizeof(struct lrg_buf_q_entry); |
5a4faa87 RM |
2697 | if (qdev->lrg_buf_q_size < PAGE_SIZE) |
2698 | qdev->lrg_buf_q_alloc_size = PAGE_SIZE; | |
2699 | else | |
2700 | qdev->lrg_buf_q_alloc_size = qdev->lrg_buf_q_size * 2; | |
2701 | ||
1357bfcf RM |
2702 | qdev->lrg_buf = kmalloc(qdev->num_large_buffers * sizeof(struct ql_rcv_buf_cb),GFP_KERNEL); |
2703 | if (qdev->lrg_buf == NULL) { | |
2704 | printk(KERN_ERR PFX | |
2705 | "%s: qdev->lrg_buf alloc failed.\n", qdev->ndev->name); | |
2706 | return -ENOMEM; | |
2707 | } | |
9ddf7774 | 2708 | |
5a4faa87 RM |
2709 | qdev->lrg_buf_q_alloc_virt_addr = |
2710 | pci_alloc_consistent(qdev->pdev, | |
2711 | qdev->lrg_buf_q_alloc_size, | |
2712 | &qdev->lrg_buf_q_alloc_phy_addr); | |
2713 | ||
2714 | if (qdev->lrg_buf_q_alloc_virt_addr == NULL) { | |
2715 | printk(KERN_ERR PFX | |
2716 | "%s: lBufQ failed\n", qdev->ndev->name); | |
2717 | return -ENOMEM; | |
2718 | } | |
2719 | qdev->lrg_buf_q_virt_addr = qdev->lrg_buf_q_alloc_virt_addr; | |
2720 | qdev->lrg_buf_q_phy_addr = qdev->lrg_buf_q_alloc_phy_addr; | |
2721 | ||
2722 | /* Create Small Buffer Queue */ | |
2723 | qdev->small_buf_q_size = | |
2724 | NUM_SBUFQ_ENTRIES * sizeof(struct lrg_buf_q_entry); | |
2725 | if (qdev->small_buf_q_size < PAGE_SIZE) | |
2726 | qdev->small_buf_q_alloc_size = PAGE_SIZE; | |
2727 | else | |
2728 | qdev->small_buf_q_alloc_size = qdev->small_buf_q_size * 2; | |
2729 | ||
2730 | qdev->small_buf_q_alloc_virt_addr = | |
2731 | pci_alloc_consistent(qdev->pdev, | |
2732 | qdev->small_buf_q_alloc_size, | |
2733 | &qdev->small_buf_q_alloc_phy_addr); | |
2734 | ||
2735 | if (qdev->small_buf_q_alloc_virt_addr == NULL) { | |
2736 | printk(KERN_ERR PFX | |
2737 | "%s: Small Buffer Queue allocation failed.\n", | |
2738 | qdev->ndev->name); | |
2739 | pci_free_consistent(qdev->pdev, qdev->lrg_buf_q_alloc_size, | |
2740 | qdev->lrg_buf_q_alloc_virt_addr, | |
2741 | qdev->lrg_buf_q_alloc_phy_addr); | |
2742 | return -ENOMEM; | |
2743 | } | |
2744 | ||
2745 | qdev->small_buf_q_virt_addr = qdev->small_buf_q_alloc_virt_addr; | |
2746 | qdev->small_buf_q_phy_addr = qdev->small_buf_q_alloc_phy_addr; | |
2747 | set_bit(QL_ALLOC_BUFQS_DONE,&qdev->flags); | |
2748 | return 0; | |
2749 | } | |
2750 | ||
2751 | static void ql_free_buffer_queues(struct ql3_adapter *qdev) | |
2752 | { | |
2753 | if (!test_bit(QL_ALLOC_BUFQS_DONE,&qdev->flags)) { | |
2754 | printk(KERN_INFO PFX | |
2755 | "%s: Already done.\n", qdev->ndev->name); | |
2756 | return; | |
2757 | } | |
1357bfcf | 2758 | if(qdev->lrg_buf) kfree(qdev->lrg_buf); |
5a4faa87 RM |
2759 | pci_free_consistent(qdev->pdev, |
2760 | qdev->lrg_buf_q_alloc_size, | |
2761 | qdev->lrg_buf_q_alloc_virt_addr, | |
2762 | qdev->lrg_buf_q_alloc_phy_addr); | |
2763 | ||
2764 | qdev->lrg_buf_q_virt_addr = NULL; | |
2765 | ||
2766 | pci_free_consistent(qdev->pdev, | |
2767 | qdev->small_buf_q_alloc_size, | |
2768 | qdev->small_buf_q_alloc_virt_addr, | |
2769 | qdev->small_buf_q_alloc_phy_addr); | |
2770 | ||
2771 | qdev->small_buf_q_virt_addr = NULL; | |
2772 | ||
2773 | clear_bit(QL_ALLOC_BUFQS_DONE,&qdev->flags); | |
2774 | } | |
2775 | ||
2776 | static int ql_alloc_small_buffers(struct ql3_adapter *qdev) | |
2777 | { | |
2778 | int i; | |
2779 | struct bufq_addr_element *small_buf_q_entry; | |
2780 | ||
2781 | /* Currently we allocate on one of memory and use it for smallbuffers */ | |
2782 | qdev->small_buf_total_size = | |
2783 | (QL_ADDR_ELE_PER_BUFQ_ENTRY * NUM_SBUFQ_ENTRIES * | |
2784 | QL_SMALL_BUFFER_SIZE); | |
2785 | ||
2786 | qdev->small_buf_virt_addr = | |
2787 | pci_alloc_consistent(qdev->pdev, | |
2788 | qdev->small_buf_total_size, | |
2789 | &qdev->small_buf_phy_addr); | |
2790 | ||
2791 | if (qdev->small_buf_virt_addr == NULL) { | |
2792 | printk(KERN_ERR PFX | |
2793 | "%s: Failed to get small buffer memory.\n", | |
2794 | qdev->ndev->name); | |
2795 | return -ENOMEM; | |
2796 | } | |
2797 | ||
2798 | qdev->small_buf_phy_addr_low = LS_64BITS(qdev->small_buf_phy_addr); | |
2799 | qdev->small_buf_phy_addr_high = MS_64BITS(qdev->small_buf_phy_addr); | |
2800 | ||
2801 | small_buf_q_entry = qdev->small_buf_q_virt_addr; | |
2802 | ||
5a4faa87 RM |
2803 | /* Initialize the small buffer queue. */ |
2804 | for (i = 0; i < (QL_ADDR_ELE_PER_BUFQ_ENTRY * NUM_SBUFQ_ENTRIES); i++) { | |
2805 | small_buf_q_entry->addr_high = | |
2806 | cpu_to_le32(qdev->small_buf_phy_addr_high); | |
2807 | small_buf_q_entry->addr_low = | |
2808 | cpu_to_le32(qdev->small_buf_phy_addr_low + | |
2809 | (i * QL_SMALL_BUFFER_SIZE)); | |
2810 | small_buf_q_entry++; | |
2811 | } | |
2812 | qdev->small_buf_index = 0; | |
2813 | set_bit(QL_ALLOC_SMALL_BUF_DONE,&qdev->flags); | |
2814 | return 0; | |
2815 | } | |
2816 | ||
2817 | static void ql_free_small_buffers(struct ql3_adapter *qdev) | |
2818 | { | |
2819 | if (!test_bit(QL_ALLOC_SMALL_BUF_DONE,&qdev->flags)) { | |
2820 | printk(KERN_INFO PFX | |
2821 | "%s: Already done.\n", qdev->ndev->name); | |
2822 | return; | |
2823 | } | |
2824 | if (qdev->small_buf_virt_addr != NULL) { | |
2825 | pci_free_consistent(qdev->pdev, | |
2826 | qdev->small_buf_total_size, | |
2827 | qdev->small_buf_virt_addr, | |
2828 | qdev->small_buf_phy_addr); | |
2829 | ||
2830 | qdev->small_buf_virt_addr = NULL; | |
2831 | } | |
2832 | } | |
2833 | ||
2834 | static void ql_free_large_buffers(struct ql3_adapter *qdev) | |
2835 | { | |
2836 | int i = 0; | |
2837 | struct ql_rcv_buf_cb *lrg_buf_cb; | |
2838 | ||
1357bfcf | 2839 | for (i = 0; i < qdev->num_large_buffers; i++) { |
5a4faa87 RM |
2840 | lrg_buf_cb = &qdev->lrg_buf[i]; |
2841 | if (lrg_buf_cb->skb) { | |
2842 | dev_kfree_skb(lrg_buf_cb->skb); | |
2843 | pci_unmap_single(qdev->pdev, | |
2844 | pci_unmap_addr(lrg_buf_cb, mapaddr), | |
2845 | pci_unmap_len(lrg_buf_cb, maplen), | |
2846 | PCI_DMA_FROMDEVICE); | |
2847 | memset(lrg_buf_cb, 0, sizeof(struct ql_rcv_buf_cb)); | |
2848 | } else { | |
2849 | break; | |
2850 | } | |
2851 | } | |
2852 | } | |
2853 | ||
2854 | static void ql_init_large_buffers(struct ql3_adapter *qdev) | |
2855 | { | |
2856 | int i; | |
2857 | struct ql_rcv_buf_cb *lrg_buf_cb; | |
2858 | struct bufq_addr_element *buf_addr_ele = qdev->lrg_buf_q_virt_addr; | |
2859 | ||
1357bfcf | 2860 | for (i = 0; i < qdev->num_large_buffers; i++) { |
5a4faa87 RM |
2861 | lrg_buf_cb = &qdev->lrg_buf[i]; |
2862 | buf_addr_ele->addr_high = lrg_buf_cb->buf_phy_addr_high; | |
2863 | buf_addr_ele->addr_low = lrg_buf_cb->buf_phy_addr_low; | |
2864 | buf_addr_ele++; | |
2865 | } | |
2866 | qdev->lrg_buf_index = 0; | |
2867 | qdev->lrg_buf_skb_check = 0; | |
2868 | } | |
2869 | ||
2870 | static int ql_alloc_large_buffers(struct ql3_adapter *qdev) | |
2871 | { | |
2872 | int i; | |
2873 | struct ql_rcv_buf_cb *lrg_buf_cb; | |
2874 | struct sk_buff *skb; | |
0f8ab89e BL |
2875 | dma_addr_t map; |
2876 | int err; | |
5a4faa87 | 2877 | |
1357bfcf | 2878 | for (i = 0; i < qdev->num_large_buffers; i++) { |
cd238faa BL |
2879 | skb = netdev_alloc_skb(qdev->ndev, |
2880 | qdev->lrg_buffer_len); | |
5a4faa87 RM |
2881 | if (unlikely(!skb)) { |
2882 | /* Better luck next round */ | |
2883 | printk(KERN_ERR PFX | |
2884 | "%s: large buff alloc failed, " | |
2885 | "for %d bytes at index %d.\n", | |
2886 | qdev->ndev->name, | |
2887 | qdev->lrg_buffer_len * 2, i); | |
2888 | ql_free_large_buffers(qdev); | |
2889 | return -ENOMEM; | |
2890 | } else { | |
2891 | ||
2892 | lrg_buf_cb = &qdev->lrg_buf[i]; | |
2893 | memset(lrg_buf_cb, 0, sizeof(struct ql_rcv_buf_cb)); | |
2894 | lrg_buf_cb->index = i; | |
2895 | lrg_buf_cb->skb = skb; | |
2896 | /* | |
2897 | * We save some space to copy the ethhdr from first | |
2898 | * buffer | |
2899 | */ | |
2900 | skb_reserve(skb, QL_HEADER_SPACE); | |
2901 | map = pci_map_single(qdev->pdev, | |
2902 | skb->data, | |
2903 | qdev->lrg_buffer_len - | |
2904 | QL_HEADER_SPACE, | |
2905 | PCI_DMA_FROMDEVICE); | |
0f8ab89e | 2906 | |
8d8bb39b | 2907 | err = pci_dma_mapping_error(qdev->pdev, map); |
0f8ab89e BL |
2908 | if(err) { |
2909 | printk(KERN_ERR "%s: PCI mapping failed with error: %d\n", | |
2910 | qdev->ndev->name, err); | |
2911 | ql_free_large_buffers(qdev); | |
2912 | return -ENOMEM; | |
2913 | } | |
2914 | ||
5a4faa87 RM |
2915 | pci_unmap_addr_set(lrg_buf_cb, mapaddr, map); |
2916 | pci_unmap_len_set(lrg_buf_cb, maplen, | |
2917 | qdev->lrg_buffer_len - | |
2918 | QL_HEADER_SPACE); | |
2919 | lrg_buf_cb->buf_phy_addr_low = | |
2920 | cpu_to_le32(LS_64BITS(map)); | |
2921 | lrg_buf_cb->buf_phy_addr_high = | |
2922 | cpu_to_le32(MS_64BITS(map)); | |
2923 | } | |
2924 | } | |
2925 | return 0; | |
2926 | } | |
2927 | ||
bd36b0ac RM |
2928 | static void ql_free_send_free_list(struct ql3_adapter *qdev) |
2929 | { | |
2930 | struct ql_tx_buf_cb *tx_cb; | |
2931 | int i; | |
2932 | ||
2933 | tx_cb = &qdev->tx_buf[0]; | |
2934 | for (i = 0; i < NUM_REQ_Q_ENTRIES; i++) { | |
2935 | if (tx_cb->oal) { | |
2936 | kfree(tx_cb->oal); | |
2937 | tx_cb->oal = NULL; | |
2938 | } | |
2939 | tx_cb++; | |
2940 | } | |
2941 | } | |
2942 | ||
2943 | static int ql_create_send_free_list(struct ql3_adapter *qdev) | |
5a4faa87 RM |
2944 | { |
2945 | struct ql_tx_buf_cb *tx_cb; | |
2946 | int i; | |
2947 | struct ob_mac_iocb_req *req_q_curr = | |
2948 | qdev->req_q_virt_addr; | |
2949 | ||
2950 | /* Create free list of transmit buffers */ | |
2951 | for (i = 0; i < NUM_REQ_Q_ENTRIES; i++) { | |
bd36b0ac | 2952 | |
5a4faa87 RM |
2953 | tx_cb = &qdev->tx_buf[i]; |
2954 | tx_cb->skb = NULL; | |
2955 | tx_cb->queue_entry = req_q_curr; | |
2956 | req_q_curr++; | |
bd36b0ac RM |
2957 | tx_cb->oal = kmalloc(512, GFP_KERNEL); |
2958 | if (tx_cb->oal == NULL) | |
2959 | return -1; | |
5a4faa87 | 2960 | } |
bd36b0ac | 2961 | return 0; |
5a4faa87 RM |
2962 | } |
2963 | ||
2964 | static int ql_alloc_mem_resources(struct ql3_adapter *qdev) | |
2965 | { | |
1357bfcf RM |
2966 | if (qdev->ndev->mtu == NORMAL_MTU_SIZE) { |
2967 | qdev->num_lbufq_entries = NUM_LBUFQ_ENTRIES; | |
5a4faa87 | 2968 | qdev->lrg_buffer_len = NORMAL_MTU_SIZE; |
1357bfcf | 2969 | } |
5a4faa87 | 2970 | else if (qdev->ndev->mtu == JUMBO_MTU_SIZE) { |
1357bfcf RM |
2971 | /* |
2972 | * Bigger buffers, so less of them. | |
2973 | */ | |
2974 | qdev->num_lbufq_entries = JUMBO_NUM_LBUFQ_ENTRIES; | |
5a4faa87 RM |
2975 | qdev->lrg_buffer_len = JUMBO_MTU_SIZE; |
2976 | } else { | |
2977 | printk(KERN_ERR PFX | |
2978 | "%s: Invalid mtu size. Only 1500 and 9000 are accepted.\n", | |
2979 | qdev->ndev->name); | |
2980 | return -ENOMEM; | |
2981 | } | |
1357bfcf | 2982 | qdev->num_large_buffers = qdev->num_lbufq_entries * QL_ADDR_ELE_PER_BUFQ_ENTRY; |
5a4faa87 RM |
2983 | qdev->lrg_buffer_len += VLAN_ETH_HLEN + VLAN_ID_LEN + QL_HEADER_SPACE; |
2984 | qdev->max_frame_size = | |
2985 | (qdev->lrg_buffer_len - QL_HEADER_SPACE) + ETHERNET_CRC_SIZE; | |
2986 | ||
2987 | /* | |
2988 | * First allocate a page of shared memory and use it for shadow | |
2989 | * locations of Network Request Queue Consumer Address Register and | |
2990 | * Network Completion Queue Producer Index Register | |
2991 | */ | |
2992 | qdev->shadow_reg_virt_addr = | |
2993 | pci_alloc_consistent(qdev->pdev, | |
2994 | PAGE_SIZE, &qdev->shadow_reg_phy_addr); | |
2995 | ||
2996 | if (qdev->shadow_reg_virt_addr != NULL) { | |
2997 | qdev->preq_consumer_index = (u16 *) qdev->shadow_reg_virt_addr; | |
2998 | qdev->req_consumer_index_phy_addr_high = | |
2999 | MS_64BITS(qdev->shadow_reg_phy_addr); | |
3000 | qdev->req_consumer_index_phy_addr_low = | |
3001 | LS_64BITS(qdev->shadow_reg_phy_addr); | |
3002 | ||
3003 | qdev->prsp_producer_index = | |
804d8541 | 3004 | (__le32 *) (((u8 *) qdev->preq_consumer_index) + 8); |
5a4faa87 RM |
3005 | qdev->rsp_producer_index_phy_addr_high = |
3006 | qdev->req_consumer_index_phy_addr_high; | |
3007 | qdev->rsp_producer_index_phy_addr_low = | |
3008 | qdev->req_consumer_index_phy_addr_low + 8; | |
3009 | } else { | |
3010 | printk(KERN_ERR PFX | |
3011 | "%s: shadowReg Alloc failed.\n", qdev->ndev->name); | |
3012 | return -ENOMEM; | |
3013 | } | |
3014 | ||
3015 | if (ql_alloc_net_req_rsp_queues(qdev) != 0) { | |
3016 | printk(KERN_ERR PFX | |
3017 | "%s: ql_alloc_net_req_rsp_queues failed.\n", | |
3018 | qdev->ndev->name); | |
3019 | goto err_req_rsp; | |
3020 | } | |
3021 | ||
3022 | if (ql_alloc_buffer_queues(qdev) != 0) { | |
3023 | printk(KERN_ERR PFX | |
3024 | "%s: ql_alloc_buffer_queues failed.\n", | |
3025 | qdev->ndev->name); | |
3026 | goto err_buffer_queues; | |
3027 | } | |
3028 | ||
3029 | if (ql_alloc_small_buffers(qdev) != 0) { | |
3030 | printk(KERN_ERR PFX | |
3031 | "%s: ql_alloc_small_buffers failed\n", qdev->ndev->name); | |
3032 | goto err_small_buffers; | |
3033 | } | |
3034 | ||
3035 | if (ql_alloc_large_buffers(qdev) != 0) { | |
3036 | printk(KERN_ERR PFX | |
3037 | "%s: ql_alloc_large_buffers failed\n", qdev->ndev->name); | |
3038 | goto err_small_buffers; | |
3039 | } | |
3040 | ||
3041 | /* Initialize the large buffer queue. */ | |
3042 | ql_init_large_buffers(qdev); | |
bd36b0ac RM |
3043 | if (ql_create_send_free_list(qdev)) |
3044 | goto err_free_list; | |
5a4faa87 RM |
3045 | |
3046 | qdev->rsp_current = qdev->rsp_q_virt_addr; | |
3047 | ||
3048 | return 0; | |
bd36b0ac RM |
3049 | err_free_list: |
3050 | ql_free_send_free_list(qdev); | |
5a4faa87 RM |
3051 | err_small_buffers: |
3052 | ql_free_buffer_queues(qdev); | |
3053 | err_buffer_queues: | |
3054 | ql_free_net_req_rsp_queues(qdev); | |
3055 | err_req_rsp: | |
3056 | pci_free_consistent(qdev->pdev, | |
3057 | PAGE_SIZE, | |
3058 | qdev->shadow_reg_virt_addr, | |
3059 | qdev->shadow_reg_phy_addr); | |
3060 | ||
3061 | return -ENOMEM; | |
3062 | } | |
3063 | ||
3064 | static void ql_free_mem_resources(struct ql3_adapter *qdev) | |
3065 | { | |
bd36b0ac | 3066 | ql_free_send_free_list(qdev); |
5a4faa87 RM |
3067 | ql_free_large_buffers(qdev); |
3068 | ql_free_small_buffers(qdev); | |
3069 | ql_free_buffer_queues(qdev); | |
3070 | ql_free_net_req_rsp_queues(qdev); | |
3071 | if (qdev->shadow_reg_virt_addr != NULL) { | |
3072 | pci_free_consistent(qdev->pdev, | |
3073 | PAGE_SIZE, | |
3074 | qdev->shadow_reg_virt_addr, | |
3075 | qdev->shadow_reg_phy_addr); | |
3076 | qdev->shadow_reg_virt_addr = NULL; | |
3077 | } | |
3078 | } | |
3079 | ||
3080 | static int ql_init_misc_registers(struct ql3_adapter *qdev) | |
3081 | { | |
ee111d11 AV |
3082 | struct ql3xxx_local_ram_registers __iomem *local_ram = |
3083 | (void __iomem *)qdev->mem_map_registers; | |
5a4faa87 RM |
3084 | |
3085 | if(ql_sem_spinlock(qdev, QL_DDR_RAM_SEM_MASK, | |
3086 | (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) * | |
3087 | 2) << 4)) | |
3088 | return -1; | |
3089 | ||
3090 | ql_write_page2_reg(qdev, | |
3091 | &local_ram->bufletSize, qdev->nvram_data.bufletSize); | |
3092 | ||
3093 | ql_write_page2_reg(qdev, | |
3094 | &local_ram->maxBufletCount, | |
3095 | qdev->nvram_data.bufletCount); | |
3096 | ||
3097 | ql_write_page2_reg(qdev, | |
3098 | &local_ram->freeBufletThresholdLow, | |
3099 | (qdev->nvram_data.tcpWindowThreshold25 << 16) | | |
3100 | (qdev->nvram_data.tcpWindowThreshold0)); | |
3101 | ||
3102 | ql_write_page2_reg(qdev, | |
3103 | &local_ram->freeBufletThresholdHigh, | |
3104 | qdev->nvram_data.tcpWindowThreshold50); | |
3105 | ||
3106 | ql_write_page2_reg(qdev, | |
3107 | &local_ram->ipHashTableBase, | |
3108 | (qdev->nvram_data.ipHashTableBaseHi << 16) | | |
3109 | qdev->nvram_data.ipHashTableBaseLo); | |
3110 | ql_write_page2_reg(qdev, | |
3111 | &local_ram->ipHashTableCount, | |
3112 | qdev->nvram_data.ipHashTableSize); | |
3113 | ql_write_page2_reg(qdev, | |
3114 | &local_ram->tcpHashTableBase, | |
3115 | (qdev->nvram_data.tcpHashTableBaseHi << 16) | | |
3116 | qdev->nvram_data.tcpHashTableBaseLo); | |
3117 | ql_write_page2_reg(qdev, | |
3118 | &local_ram->tcpHashTableCount, | |
3119 | qdev->nvram_data.tcpHashTableSize); | |
3120 | ql_write_page2_reg(qdev, | |
3121 | &local_ram->ncbBase, | |
3122 | (qdev->nvram_data.ncbTableBaseHi << 16) | | |
3123 | qdev->nvram_data.ncbTableBaseLo); | |
3124 | ql_write_page2_reg(qdev, | |
3125 | &local_ram->maxNcbCount, | |
3126 | qdev->nvram_data.ncbTableSize); | |
3127 | ql_write_page2_reg(qdev, | |
3128 | &local_ram->drbBase, | |
3129 | (qdev->nvram_data.drbTableBaseHi << 16) | | |
3130 | qdev->nvram_data.drbTableBaseLo); | |
3131 | ql_write_page2_reg(qdev, | |
3132 | &local_ram->maxDrbCount, | |
3133 | qdev->nvram_data.drbTableSize); | |
3134 | ql_sem_unlock(qdev, QL_DDR_RAM_SEM_MASK); | |
3135 | return 0; | |
3136 | } | |
3137 | ||
3138 | static int ql_adapter_initialize(struct ql3_adapter *qdev) | |
3139 | { | |
3140 | u32 value; | |
3141 | struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers; | |
3142 | struct ql3xxx_host_memory_registers __iomem *hmem_regs = | |
ee111d11 | 3143 | (void __iomem *)port_regs; |
5a4faa87 RM |
3144 | u32 delay = 10; |
3145 | int status = 0; | |
3146 | ||
3147 | if(ql_mii_setup(qdev)) | |
3148 | return -1; | |
3149 | ||
3150 | /* Bring out PHY out of reset */ | |
3151 | ql_write_common_reg(qdev, &port_regs->CommonRegs.serialPortInterfaceReg, | |
3152 | (ISP_SERIAL_PORT_IF_WE | | |
3153 | (ISP_SERIAL_PORT_IF_WE << 16))); | |
3154 | ||
3155 | qdev->port_link_state = LS_DOWN; | |
3156 | netif_carrier_off(qdev->ndev); | |
3157 | ||
3158 | /* V2 chip fix for ARS-39168. */ | |
3159 | ql_write_common_reg(qdev, &port_regs->CommonRegs.serialPortInterfaceReg, | |
3160 | (ISP_SERIAL_PORT_IF_SDE | | |
3161 | (ISP_SERIAL_PORT_IF_SDE << 16))); | |
3162 | ||
3163 | /* Request Queue Registers */ | |
3164 | *((u32 *) (qdev->preq_consumer_index)) = 0; | |
3165 | atomic_set(&qdev->tx_count,NUM_REQ_Q_ENTRIES); | |
3166 | qdev->req_producer_index = 0; | |
3167 | ||
3168 | ql_write_page1_reg(qdev, | |
3169 | &hmem_regs->reqConsumerIndexAddrHigh, | |
3170 | qdev->req_consumer_index_phy_addr_high); | |
3171 | ql_write_page1_reg(qdev, | |
3172 | &hmem_regs->reqConsumerIndexAddrLow, | |
3173 | qdev->req_consumer_index_phy_addr_low); | |
3174 | ||
3175 | ql_write_page1_reg(qdev, | |
3176 | &hmem_regs->reqBaseAddrHigh, | |
3177 | MS_64BITS(qdev->req_q_phy_addr)); | |
3178 | ql_write_page1_reg(qdev, | |
3179 | &hmem_regs->reqBaseAddrLow, | |
3180 | LS_64BITS(qdev->req_q_phy_addr)); | |
3181 | ql_write_page1_reg(qdev, &hmem_regs->reqLength, NUM_REQ_Q_ENTRIES); | |
3182 | ||
3183 | /* Response Queue Registers */ | |
804d8541 | 3184 | *((__le16 *) (qdev->prsp_producer_index)) = 0; |
5a4faa87 RM |
3185 | qdev->rsp_consumer_index = 0; |
3186 | qdev->rsp_current = qdev->rsp_q_virt_addr; | |
3187 | ||
3188 | ql_write_page1_reg(qdev, | |
3189 | &hmem_regs->rspProducerIndexAddrHigh, | |
3190 | qdev->rsp_producer_index_phy_addr_high); | |
3191 | ||
3192 | ql_write_page1_reg(qdev, | |
3193 | &hmem_regs->rspProducerIndexAddrLow, | |
3194 | qdev->rsp_producer_index_phy_addr_low); | |
3195 | ||
3196 | ql_write_page1_reg(qdev, | |
3197 | &hmem_regs->rspBaseAddrHigh, | |
3198 | MS_64BITS(qdev->rsp_q_phy_addr)); | |
3199 | ||
3200 | ql_write_page1_reg(qdev, | |
3201 | &hmem_regs->rspBaseAddrLow, | |
3202 | LS_64BITS(qdev->rsp_q_phy_addr)); | |
3203 | ||
3204 | ql_write_page1_reg(qdev, &hmem_regs->rspLength, NUM_RSP_Q_ENTRIES); | |
3205 | ||
3206 | /* Large Buffer Queue */ | |
3207 | ql_write_page1_reg(qdev, | |
3208 | &hmem_regs->rxLargeQBaseAddrHigh, | |
3209 | MS_64BITS(qdev->lrg_buf_q_phy_addr)); | |
3210 | ||
3211 | ql_write_page1_reg(qdev, | |
3212 | &hmem_regs->rxLargeQBaseAddrLow, | |
3213 | LS_64BITS(qdev->lrg_buf_q_phy_addr)); | |
3214 | ||
1357bfcf | 3215 | ql_write_page1_reg(qdev, &hmem_regs->rxLargeQLength, qdev->num_lbufq_entries); |
5a4faa87 RM |
3216 | |
3217 | ql_write_page1_reg(qdev, | |
3218 | &hmem_regs->rxLargeBufferLength, | |
3219 | qdev->lrg_buffer_len); | |
3220 | ||
3221 | /* Small Buffer Queue */ | |
3222 | ql_write_page1_reg(qdev, | |
3223 | &hmem_regs->rxSmallQBaseAddrHigh, | |
3224 | MS_64BITS(qdev->small_buf_q_phy_addr)); | |
3225 | ||
3226 | ql_write_page1_reg(qdev, | |
3227 | &hmem_regs->rxSmallQBaseAddrLow, | |
3228 | LS_64BITS(qdev->small_buf_q_phy_addr)); | |
3229 | ||
3230 | ql_write_page1_reg(qdev, &hmem_regs->rxSmallQLength, NUM_SBUFQ_ENTRIES); | |
3231 | ql_write_page1_reg(qdev, | |
3232 | &hmem_regs->rxSmallBufferLength, | |
3233 | QL_SMALL_BUFFER_SIZE); | |
3234 | ||
3235 | qdev->small_buf_q_producer_index = NUM_SBUFQ_ENTRIES - 1; | |
3236 | qdev->small_buf_release_cnt = 8; | |
1357bfcf | 3237 | qdev->lrg_buf_q_producer_index = qdev->num_lbufq_entries - 1; |
5a4faa87 RM |
3238 | qdev->lrg_buf_release_cnt = 8; |
3239 | qdev->lrg_buf_next_free = | |
3240 | (struct bufq_addr_element *)qdev->lrg_buf_q_virt_addr; | |
3241 | qdev->small_buf_index = 0; | |
3242 | qdev->lrg_buf_index = 0; | |
3243 | qdev->lrg_buf_free_count = 0; | |
3244 | qdev->lrg_buf_free_head = NULL; | |
3245 | qdev->lrg_buf_free_tail = NULL; | |
3246 | ||
3247 | ql_write_common_reg(qdev, | |
ee111d11 | 3248 | &port_regs->CommonRegs. |
5a4faa87 RM |
3249 | rxSmallQProducerIndex, |
3250 | qdev->small_buf_q_producer_index); | |
3251 | ql_write_common_reg(qdev, | |
ee111d11 | 3252 | &port_regs->CommonRegs. |
5a4faa87 RM |
3253 | rxLargeQProducerIndex, |
3254 | qdev->lrg_buf_q_producer_index); | |
3255 | ||
3256 | /* | |
3257 | * Find out if the chip has already been initialized. If it has, then | |
3258 | * we skip some of the initialization. | |
3259 | */ | |
3260 | clear_bit(QL_LINK_MASTER, &qdev->flags); | |
3261 | value = ql_read_page0_reg(qdev, &port_regs->portStatus); | |
3262 | if ((value & PORT_STATUS_IC) == 0) { | |
3263 | ||
3264 | /* Chip has not been configured yet, so let it rip. */ | |
3265 | if(ql_init_misc_registers(qdev)) { | |
3266 | status = -1; | |
3267 | goto out; | |
3268 | } | |
3269 | ||
5a4faa87 RM |
3270 | value = qdev->nvram_data.tcpMaxWindowSize; |
3271 | ql_write_page0_reg(qdev, &port_regs->tcpMaxWindow, value); | |
3272 | ||
3273 | value = (0xFFFF << 16) | qdev->nvram_data.extHwConfig; | |
3274 | ||
3275 | if(ql_sem_spinlock(qdev, QL_FLASH_SEM_MASK, | |
3276 | (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) | |
3277 | * 2) << 13)) { | |
3278 | status = -1; | |
3279 | goto out; | |
3280 | } | |
3281 | ql_write_page0_reg(qdev, &port_regs->ExternalHWConfig, value); | |
3282 | ql_write_page0_reg(qdev, &port_regs->InternalChipConfig, | |
3283 | (((INTERNAL_CHIP_SD | INTERNAL_CHIP_WE) << | |
3284 | 16) | (INTERNAL_CHIP_SD | | |
3285 | INTERNAL_CHIP_WE))); | |
3286 | ql_sem_unlock(qdev, QL_FLASH_SEM_MASK); | |
3287 | } | |
3288 | ||
b3b1514c RM |
3289 | if (qdev->mac_index) |
3290 | ql_write_page0_reg(qdev, | |
3291 | &port_regs->mac1MaxFrameLengthReg, | |
3292 | qdev->max_frame_size); | |
3293 | else | |
3294 | ql_write_page0_reg(qdev, | |
3295 | &port_regs->mac0MaxFrameLengthReg, | |
3296 | qdev->max_frame_size); | |
5a4faa87 RM |
3297 | |
3298 | if(ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK, | |
3299 | (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) * | |
3300 | 2) << 7)) { | |
3301 | status = -1; | |
3302 | goto out; | |
3303 | } | |
3304 | ||
3efedf2e | 3305 | PHY_Setup(qdev); |
5a4faa87 RM |
3306 | ql_init_scan_mode(qdev); |
3307 | ql_get_phy_owner(qdev); | |
3308 | ||
3309 | /* Load the MAC Configuration */ | |
3310 | ||
3311 | /* Program lower 32 bits of the MAC address */ | |
3312 | ql_write_page0_reg(qdev, &port_regs->macAddrIndirectPtrReg, | |
3313 | (MAC_ADDR_INDIRECT_PTR_REG_RP_MASK << 16)); | |
3314 | ql_write_page0_reg(qdev, &port_regs->macAddrDataReg, | |
3315 | ((qdev->ndev->dev_addr[2] << 24) | |
3316 | | (qdev->ndev->dev_addr[3] << 16) | |
3317 | | (qdev->ndev->dev_addr[4] << 8) | |
3318 | | qdev->ndev->dev_addr[5])); | |
3319 | ||
3320 | /* Program top 16 bits of the MAC address */ | |
3321 | ql_write_page0_reg(qdev, &port_regs->macAddrIndirectPtrReg, | |
3322 | ((MAC_ADDR_INDIRECT_PTR_REG_RP_MASK << 16) | 1)); | |
3323 | ql_write_page0_reg(qdev, &port_regs->macAddrDataReg, | |
3324 | ((qdev->ndev->dev_addr[0] << 8) | |
3325 | | qdev->ndev->dev_addr[1])); | |
3326 | ||
3327 | /* Enable Primary MAC */ | |
3328 | ql_write_page0_reg(qdev, &port_regs->macAddrIndirectPtrReg, | |
3329 | ((MAC_ADDR_INDIRECT_PTR_REG_PE << 16) | | |
3330 | MAC_ADDR_INDIRECT_PTR_REG_PE)); | |
3331 | ||
3332 | /* Clear Primary and Secondary IP addresses */ | |
3333 | ql_write_page0_reg(qdev, &port_regs->ipAddrIndexReg, | |
3334 | ((IP_ADDR_INDEX_REG_MASK << 16) | | |
3335 | (qdev->mac_index << 2))); | |
3336 | ql_write_page0_reg(qdev, &port_regs->ipAddrDataReg, 0); | |
3337 | ||
3338 | ql_write_page0_reg(qdev, &port_regs->ipAddrIndexReg, | |
3339 | ((IP_ADDR_INDEX_REG_MASK << 16) | | |
3340 | ((qdev->mac_index << 2) + 1))); | |
3341 | ql_write_page0_reg(qdev, &port_regs->ipAddrDataReg, 0); | |
3342 | ||
3343 | ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK); | |
3344 | ||
3345 | /* Indicate Configuration Complete */ | |
3346 | ql_write_page0_reg(qdev, | |
3347 | &port_regs->portControl, | |
3348 | ((PORT_CONTROL_CC << 16) | PORT_CONTROL_CC)); | |
3349 | ||
3350 | do { | |
3351 | value = ql_read_page0_reg(qdev, &port_regs->portStatus); | |
3352 | if (value & PORT_STATUS_IC) | |
3353 | break; | |
3354 | msleep(500); | |
3355 | } while (--delay); | |
3356 | ||
3357 | if (delay == 0) { | |
3358 | printk(KERN_ERR PFX | |
3359 | "%s: Hw Initialization timeout.\n", qdev->ndev->name); | |
3360 | status = -1; | |
3361 | goto out; | |
3362 | } | |
3363 | ||
3364 | /* Enable Ethernet Function */ | |
bd36b0ac RM |
3365 | if (qdev->device_id == QL3032_DEVICE_ID) { |
3366 | value = | |
3367 | (QL3032_PORT_CONTROL_EF | QL3032_PORT_CONTROL_KIE | | |
b3b1514c RM |
3368 | QL3032_PORT_CONTROL_EIv6 | QL3032_PORT_CONTROL_EIv4 | |
3369 | QL3032_PORT_CONTROL_ET); | |
bd36b0ac RM |
3370 | ql_write_page0_reg(qdev, &port_regs->functionControl, |
3371 | ((value << 16) | value)); | |
3372 | } else { | |
3373 | value = | |
3374 | (PORT_CONTROL_EF | PORT_CONTROL_ET | PORT_CONTROL_EI | | |
3375 | PORT_CONTROL_HH); | |
3376 | ql_write_page0_reg(qdev, &port_regs->portControl, | |
3377 | ((value << 16) | value)); | |
3378 | } | |
3379 | ||
5a4faa87 RM |
3380 | |
3381 | out: | |
3382 | return status; | |
3383 | } | |
3384 | ||
3385 | /* | |
3386 | * Caller holds hw_lock. | |
3387 | */ | |
3388 | static int ql_adapter_reset(struct ql3_adapter *qdev) | |
3389 | { | |
3390 | struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers; | |
3391 | int status = 0; | |
3392 | u16 value; | |
3393 | int max_wait_time; | |
3394 | ||
3395 | set_bit(QL_RESET_ACTIVE, &qdev->flags); | |
3396 | clear_bit(QL_RESET_DONE, &qdev->flags); | |
3397 | ||
3398 | /* | |
3399 | * Issue soft reset to chip. | |
3400 | */ | |
3401 | printk(KERN_DEBUG PFX | |
3402 | "%s: Issue soft reset to chip.\n", | |
3403 | qdev->ndev->name); | |
3404 | ql_write_common_reg(qdev, | |
ee111d11 | 3405 | &port_regs->CommonRegs.ispControlStatus, |
5a4faa87 RM |
3406 | ((ISP_CONTROL_SR << 16) | ISP_CONTROL_SR)); |
3407 | ||
3408 | /* Wait 3 seconds for reset to complete. */ | |
3409 | printk(KERN_DEBUG PFX | |
3410 | "%s: Wait 10 milliseconds for reset to complete.\n", | |
3411 | qdev->ndev->name); | |
3412 | ||
3413 | /* Wait until the firmware tells us the Soft Reset is done */ | |
3414 | max_wait_time = 5; | |
3415 | do { | |
3416 | value = | |
3417 | ql_read_common_reg(qdev, | |
3418 | &port_regs->CommonRegs.ispControlStatus); | |
3419 | if ((value & ISP_CONTROL_SR) == 0) | |
3420 | break; | |
3421 | ||
3422 | ssleep(1); | |
3423 | } while ((--max_wait_time)); | |
3424 | ||
3425 | /* | |
3426 | * Also, make sure that the Network Reset Interrupt bit has been | |
3427 | * cleared after the soft reset has taken place. | |
3428 | */ | |
3429 | value = | |
3430 | ql_read_common_reg(qdev, &port_regs->CommonRegs.ispControlStatus); | |
3431 | if (value & ISP_CONTROL_RI) { | |
3432 | printk(KERN_DEBUG PFX | |
3433 | "ql_adapter_reset: clearing RI after reset.\n"); | |
3434 | ql_write_common_reg(qdev, | |
ee111d11 | 3435 | &port_regs->CommonRegs. |
5a4faa87 RM |
3436 | ispControlStatus, |
3437 | ((ISP_CONTROL_RI << 16) | ISP_CONTROL_RI)); | |
3438 | } | |
3439 | ||
3440 | if (max_wait_time == 0) { | |
3441 | /* Issue Force Soft Reset */ | |
3442 | ql_write_common_reg(qdev, | |
ee111d11 | 3443 | &port_regs->CommonRegs. |
5a4faa87 RM |
3444 | ispControlStatus, |
3445 | ((ISP_CONTROL_FSR << 16) | | |
3446 | ISP_CONTROL_FSR)); | |
3447 | /* | |
3448 | * Wait until the firmware tells us the Force Soft Reset is | |
3449 | * done | |
3450 | */ | |
3451 | max_wait_time = 5; | |
3452 | do { | |
3453 | value = | |
3454 | ql_read_common_reg(qdev, | |
3455 | &port_regs->CommonRegs. | |
3456 | ispControlStatus); | |
3457 | if ((value & ISP_CONTROL_FSR) == 0) { | |
3458 | break; | |
3459 | } | |
3460 | ssleep(1); | |
3461 | } while ((--max_wait_time)); | |
3462 | } | |
3463 | if (max_wait_time == 0) | |
3464 | status = 1; | |
3465 | ||
3466 | clear_bit(QL_RESET_ACTIVE, &qdev->flags); | |
3467 | set_bit(QL_RESET_DONE, &qdev->flags); | |
3468 | return status; | |
3469 | } | |
3470 | ||
3471 | static void ql_set_mac_info(struct ql3_adapter *qdev) | |
3472 | { | |
3473 | struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers; | |
3474 | u32 value, port_status; | |
3475 | u8 func_number; | |
3476 | ||
3477 | /* Get the function number */ | |
3478 | value = | |
3479 | ql_read_common_reg_l(qdev, &port_regs->CommonRegs.ispControlStatus); | |
3480 | func_number = (u8) ((value >> 4) & OPCODE_FUNC_ID_MASK); | |
3481 | port_status = ql_read_page0_reg(qdev, &port_regs->portStatus); | |
3482 | switch (value & ISP_CONTROL_FN_MASK) { | |
3483 | case ISP_CONTROL_FN0_NET: | |
3484 | qdev->mac_index = 0; | |
3485 | qdev->mac_ob_opcode = OUTBOUND_MAC_IOCB | func_number; | |
5a4faa87 RM |
3486 | qdev->mb_bit_mask = FN0_MA_BITS_MASK; |
3487 | qdev->PHYAddr = PORT0_PHY_ADDRESS; | |
3488 | if (port_status & PORT_STATUS_SM0) | |
3489 | set_bit(QL_LINK_OPTICAL,&qdev->flags); | |
3490 | else | |
3491 | clear_bit(QL_LINK_OPTICAL,&qdev->flags); | |
3492 | break; | |
3493 | ||
3494 | case ISP_CONTROL_FN1_NET: | |
3495 | qdev->mac_index = 1; | |
3496 | qdev->mac_ob_opcode = OUTBOUND_MAC_IOCB | func_number; | |
5a4faa87 RM |
3497 | qdev->mb_bit_mask = FN1_MA_BITS_MASK; |
3498 | qdev->PHYAddr = PORT1_PHY_ADDRESS; | |
3499 | if (port_status & PORT_STATUS_SM1) | |
3500 | set_bit(QL_LINK_OPTICAL,&qdev->flags); | |
3501 | else | |
3502 | clear_bit(QL_LINK_OPTICAL,&qdev->flags); | |
3503 | break; | |
3504 | ||
3505 | case ISP_CONTROL_FN0_SCSI: | |
3506 | case ISP_CONTROL_FN1_SCSI: | |
3507 | default: | |
3508 | printk(KERN_DEBUG PFX | |
3509 | "%s: Invalid function number, ispControlStatus = 0x%x\n", | |
3510 | qdev->ndev->name,value); | |
3511 | break; | |
3512 | } | |
804d8541 | 3513 | qdev->numPorts = qdev->nvram_data.version_and_numPorts >> 8; |
5a4faa87 RM |
3514 | } |
3515 | ||
3516 | static void ql_display_dev_info(struct net_device *ndev) | |
3517 | { | |
3518 | struct ql3_adapter *qdev = (struct ql3_adapter *)netdev_priv(ndev); | |
3519 | struct pci_dev *pdev = qdev->pdev; | |
3520 | ||
3521 | printk(KERN_INFO PFX | |
bd36b0ac RM |
3522 | "\n%s Adapter %d RevisionID %d found %s on PCI slot %d.\n", |
3523 | DRV_NAME, qdev->index, qdev->chip_rev_id, | |
3524 | (qdev->device_id == QL3032_DEVICE_ID) ? "QLA3032" : "QLA3022", | |
3525 | qdev->pci_slot); | |
5a4faa87 RM |
3526 | printk(KERN_INFO PFX |
3527 | "%s Interface.\n", | |
3528 | test_bit(QL_LINK_OPTICAL,&qdev->flags) ? "OPTICAL" : "COPPER"); | |
3529 | ||
3530 | /* | |
3531 | * Print PCI bus width/type. | |
3532 | */ | |
3533 | printk(KERN_INFO PFX | |
3534 | "Bus interface is %s %s.\n", | |
3535 | ((qdev->pci_width == 64) ? "64-bit" : "32-bit"), | |
3536 | ((qdev->pci_x) ? "PCI-X" : "PCI")); | |
3537 | ||
3538 | printk(KERN_INFO PFX | |
3539 | "mem IO base address adjusted = 0x%p\n", | |
3540 | qdev->mem_map_registers); | |
3541 | printk(KERN_INFO PFX "Interrupt number = %d\n", pdev->irq); | |
3542 | ||
3543 | if (netif_msg_probe(qdev)) | |
3544 | printk(KERN_INFO PFX | |
e174961c JB |
3545 | "%s: MAC address %pM\n", |
3546 | ndev->name, ndev->dev_addr); | |
5a4faa87 RM |
3547 | } |
3548 | ||
3549 | static int ql_adapter_down(struct ql3_adapter *qdev, int do_reset) | |
3550 | { | |
3551 | struct net_device *ndev = qdev->ndev; | |
3552 | int retval = 0; | |
3553 | ||
3554 | netif_stop_queue(ndev); | |
3555 | netif_carrier_off(ndev); | |
3556 | ||
3557 | clear_bit(QL_ADAPTER_UP,&qdev->flags); | |
3558 | clear_bit(QL_LINK_MASTER,&qdev->flags); | |
3559 | ||
3560 | ql_disable_interrupts(qdev); | |
3561 | ||
3562 | free_irq(qdev->pdev->irq, ndev); | |
3563 | ||
3564 | if (qdev->msi && test_bit(QL_MSI_ENABLED,&qdev->flags)) { | |
3565 | printk(KERN_INFO PFX | |
3566 | "%s: calling pci_disable_msi().\n", qdev->ndev->name); | |
3567 | clear_bit(QL_MSI_ENABLED,&qdev->flags); | |
3568 | pci_disable_msi(qdev->pdev); | |
3569 | } | |
3570 | ||
3571 | del_timer_sync(&qdev->adapter_timer); | |
3572 | ||
bea3348e | 3573 | napi_disable(&qdev->napi); |
5a4faa87 RM |
3574 | |
3575 | if (do_reset) { | |
3576 | int soft_reset; | |
3577 | unsigned long hw_flags; | |
3578 | ||
3579 | spin_lock_irqsave(&qdev->hw_lock, hw_flags); | |
3580 | if (ql_wait_for_drvr_lock(qdev)) { | |
3581 | if ((soft_reset = ql_adapter_reset(qdev))) { | |
3582 | printk(KERN_ERR PFX | |
3583 | "%s: ql_adapter_reset(%d) FAILED!\n", | |
3584 | ndev->name, qdev->index); | |
3585 | } | |
3586 | printk(KERN_ERR PFX | |
3587 | "%s: Releaseing driver lock via chip reset.\n",ndev->name); | |
3588 | } else { | |
3589 | printk(KERN_ERR PFX | |
3590 | "%s: Could not acquire driver lock to do " | |
3591 | "reset!\n", ndev->name); | |
3592 | retval = -1; | |
3593 | } | |
3594 | spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); | |
3595 | } | |
3596 | ql_free_mem_resources(qdev); | |
3597 | return retval; | |
3598 | } | |
3599 | ||
3600 | static int ql_adapter_up(struct ql3_adapter *qdev) | |
3601 | { | |
3602 | struct net_device *ndev = qdev->ndev; | |
3603 | int err; | |
38515e90 | 3604 | unsigned long irq_flags = IRQF_SAMPLE_RANDOM | IRQF_SHARED; |
5a4faa87 RM |
3605 | unsigned long hw_flags; |
3606 | ||
3607 | if (ql_alloc_mem_resources(qdev)) { | |
3608 | printk(KERN_ERR PFX | |
3609 | "%s Unable to allocate buffers.\n", ndev->name); | |
3610 | return -ENOMEM; | |
3611 | } | |
3612 | ||
3613 | if (qdev->msi) { | |
3614 | if (pci_enable_msi(qdev->pdev)) { | |
3615 | printk(KERN_ERR PFX | |
3616 | "%s: User requested MSI, but MSI failed to " | |
3617 | "initialize. Continuing without MSI.\n", | |
3618 | qdev->ndev->name); | |
3619 | qdev->msi = 0; | |
3620 | } else { | |
3621 | printk(KERN_INFO PFX "%s: MSI Enabled...\n", qdev->ndev->name); | |
3622 | set_bit(QL_MSI_ENABLED,&qdev->flags); | |
38515e90 | 3623 | irq_flags &= ~IRQF_SHARED; |
5a4faa87 RM |
3624 | } |
3625 | } | |
3626 | ||
3627 | if ((err = request_irq(qdev->pdev->irq, | |
3628 | ql3xxx_isr, | |
3629 | irq_flags, ndev->name, ndev))) { | |
3630 | printk(KERN_ERR PFX | |
3631 | "%s: Failed to reserve interrupt %d already in use.\n", | |
3632 | ndev->name, qdev->pdev->irq); | |
3633 | goto err_irq; | |
3634 | } | |
3635 | ||
3636 | spin_lock_irqsave(&qdev->hw_lock, hw_flags); | |
3637 | ||
3638 | if ((err = ql_wait_for_drvr_lock(qdev))) { | |
3639 | if ((err = ql_adapter_initialize(qdev))) { | |
3640 | printk(KERN_ERR PFX | |
3641 | "%s: Unable to initialize adapter.\n", | |
3642 | ndev->name); | |
3643 | goto err_init; | |
3644 | } | |
3645 | printk(KERN_ERR PFX | |
3646 | "%s: Releaseing driver lock.\n",ndev->name); | |
3647 | ql_sem_unlock(qdev, QL_DRVR_SEM_MASK); | |
3648 | } else { | |
3649 | printk(KERN_ERR PFX | |
3650 | "%s: Could not aquire driver lock.\n", | |
3651 | ndev->name); | |
3652 | goto err_lock; | |
3653 | } | |
3654 | ||
3655 | spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); | |
3656 | ||
3657 | set_bit(QL_ADAPTER_UP,&qdev->flags); | |
3658 | ||
3659 | mod_timer(&qdev->adapter_timer, jiffies + HZ * 1); | |
3660 | ||
bea3348e | 3661 | napi_enable(&qdev->napi); |
5a4faa87 RM |
3662 | ql_enable_interrupts(qdev); |
3663 | return 0; | |
3664 | ||
3665 | err_init: | |
3666 | ql_sem_unlock(qdev, QL_DRVR_SEM_MASK); | |
3667 | err_lock: | |
04f10773 | 3668 | spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); |
5a4faa87 RM |
3669 | free_irq(qdev->pdev->irq, ndev); |
3670 | err_irq: | |
3671 | if (qdev->msi && test_bit(QL_MSI_ENABLED,&qdev->flags)) { | |
3672 | printk(KERN_INFO PFX | |
3673 | "%s: calling pci_disable_msi().\n", | |
3674 | qdev->ndev->name); | |
3675 | clear_bit(QL_MSI_ENABLED,&qdev->flags); | |
3676 | pci_disable_msi(qdev->pdev); | |
3677 | } | |
3678 | return err; | |
3679 | } | |
3680 | ||
3681 | static int ql_cycle_adapter(struct ql3_adapter *qdev, int reset) | |
3682 | { | |
3683 | if( ql_adapter_down(qdev,reset) || ql_adapter_up(qdev)) { | |
3684 | printk(KERN_ERR PFX | |
3685 | "%s: Driver up/down cycle failed, " | |
3686 | "closing device\n",qdev->ndev->name); | |
c81ec80b | 3687 | rtnl_lock(); |
5a4faa87 | 3688 | dev_close(qdev->ndev); |
c81ec80b | 3689 | rtnl_unlock(); |
5a4faa87 RM |
3690 | return -1; |
3691 | } | |
3692 | return 0; | |
3693 | } | |
3694 | ||
3695 | static int ql3xxx_close(struct net_device *ndev) | |
3696 | { | |
3697 | struct ql3_adapter *qdev = netdev_priv(ndev); | |
3698 | ||
3699 | /* | |
3700 | * Wait for device to recover from a reset. | |
3701 | * (Rarely happens, but possible.) | |
3702 | */ | |
3703 | while (!test_bit(QL_ADAPTER_UP,&qdev->flags)) | |
3704 | msleep(50); | |
3705 | ||
3706 | ql_adapter_down(qdev,QL_DO_RESET); | |
3707 | return 0; | |
3708 | } | |
3709 | ||
3710 | static int ql3xxx_open(struct net_device *ndev) | |
3711 | { | |
3712 | struct ql3_adapter *qdev = netdev_priv(ndev); | |
3713 | return (ql_adapter_up(qdev)); | |
3714 | } | |
3715 | ||
5a4faa87 RM |
3716 | static int ql3xxx_set_mac_address(struct net_device *ndev, void *p) |
3717 | { | |
3718 | struct ql3_adapter *qdev = (struct ql3_adapter *)netdev_priv(ndev); | |
3719 | struct ql3xxx_port_registers __iomem *port_regs = | |
3720 | qdev->mem_map_registers; | |
3721 | struct sockaddr *addr = p; | |
3722 | unsigned long hw_flags; | |
3723 | ||
3724 | if (netif_running(ndev)) | |
3725 | return -EBUSY; | |
3726 | ||
3727 | if (!is_valid_ether_addr(addr->sa_data)) | |
3728 | return -EADDRNOTAVAIL; | |
3729 | ||
3730 | memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len); | |
3731 | ||
3732 | spin_lock_irqsave(&qdev->hw_lock, hw_flags); | |
3733 | /* Program lower 32 bits of the MAC address */ | |
3734 | ql_write_page0_reg(qdev, &port_regs->macAddrIndirectPtrReg, | |
3735 | (MAC_ADDR_INDIRECT_PTR_REG_RP_MASK << 16)); | |
3736 | ql_write_page0_reg(qdev, &port_regs->macAddrDataReg, | |
3737 | ((ndev->dev_addr[2] << 24) | (ndev-> | |
3738 | dev_addr[3] << 16) | | |
3739 | (ndev->dev_addr[4] << 8) | ndev->dev_addr[5])); | |
3740 | ||
3741 | /* Program top 16 bits of the MAC address */ | |
3742 | ql_write_page0_reg(qdev, &port_regs->macAddrIndirectPtrReg, | |
3743 | ((MAC_ADDR_INDIRECT_PTR_REG_RP_MASK << 16) | 1)); | |
3744 | ql_write_page0_reg(qdev, &port_regs->macAddrDataReg, | |
3745 | ((ndev->dev_addr[0] << 8) | ndev->dev_addr[1])); | |
3746 | spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); | |
3747 | ||
3748 | return 0; | |
3749 | } | |
3750 | ||
3751 | static void ql3xxx_tx_timeout(struct net_device *ndev) | |
3752 | { | |
3753 | struct ql3_adapter *qdev = (struct ql3_adapter *)netdev_priv(ndev); | |
3754 | ||
3755 | printk(KERN_ERR PFX "%s: Resetting...\n", ndev->name); | |
3756 | /* | |
3757 | * Stop the queues, we've got a problem. | |
3758 | */ | |
3759 | netif_stop_queue(ndev); | |
3760 | ||
3761 | /* | |
3762 | * Wake up the worker to process this event. | |
3763 | */ | |
c4028958 | 3764 | queue_delayed_work(qdev->workqueue, &qdev->tx_timeout_work, 0); |
5a4faa87 RM |
3765 | } |
3766 | ||
c4028958 | 3767 | static void ql_reset_work(struct work_struct *work) |
5a4faa87 | 3768 | { |
c4028958 DH |
3769 | struct ql3_adapter *qdev = |
3770 | container_of(work, struct ql3_adapter, reset_work.work); | |
5a4faa87 RM |
3771 | struct net_device *ndev = qdev->ndev; |
3772 | u32 value; | |
3773 | struct ql_tx_buf_cb *tx_cb; | |
3774 | int max_wait_time, i; | |
3775 | struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers; | |
3776 | unsigned long hw_flags; | |
3777 | ||
3778 | if (test_bit((QL_RESET_PER_SCSI | QL_RESET_START),&qdev->flags)) { | |
3779 | clear_bit(QL_LINK_MASTER,&qdev->flags); | |
3780 | ||
3781 | /* | |
3782 | * Loop through the active list and return the skb. | |
3783 | */ | |
3784 | for (i = 0; i < NUM_REQ_Q_ENTRIES; i++) { | |
bd36b0ac | 3785 | int j; |
5a4faa87 RM |
3786 | tx_cb = &qdev->tx_buf[i]; |
3787 | if (tx_cb->skb) { | |
5a4faa87 RM |
3788 | printk(KERN_DEBUG PFX |
3789 | "%s: Freeing lost SKB.\n", | |
3790 | qdev->ndev->name); | |
3791 | pci_unmap_single(qdev->pdev, | |
bd36b0ac RM |
3792 | pci_unmap_addr(&tx_cb->map[0], mapaddr), |
3793 | pci_unmap_len(&tx_cb->map[0], maplen), | |
3794 | PCI_DMA_TODEVICE); | |
3795 | for(j=1;j<tx_cb->seg_count;j++) { | |
3796 | pci_unmap_page(qdev->pdev, | |
3797 | pci_unmap_addr(&tx_cb->map[j],mapaddr), | |
3798 | pci_unmap_len(&tx_cb->map[j],maplen), | |
3799 | PCI_DMA_TODEVICE); | |
3800 | } | |
5a4faa87 RM |
3801 | dev_kfree_skb(tx_cb->skb); |
3802 | tx_cb->skb = NULL; | |
3803 | } | |
3804 | } | |
3805 | ||
3806 | printk(KERN_ERR PFX | |
3807 | "%s: Clearing NRI after reset.\n", qdev->ndev->name); | |
3808 | spin_lock_irqsave(&qdev->hw_lock, hw_flags); | |
3809 | ql_write_common_reg(qdev, | |
3810 | &port_regs->CommonRegs. | |
3811 | ispControlStatus, | |
3812 | ((ISP_CONTROL_RI << 16) | ISP_CONTROL_RI)); | |
3813 | /* | |
3814 | * Wait the for Soft Reset to Complete. | |
3815 | */ | |
3816 | max_wait_time = 10; | |
3817 | do { | |
3818 | value = ql_read_common_reg(qdev, | |
3819 | &port_regs->CommonRegs. | |
3820 | ||
3821 | ispControlStatus); | |
3822 | if ((value & ISP_CONTROL_SR) == 0) { | |
3823 | printk(KERN_DEBUG PFX | |
3824 | "%s: reset completed.\n", | |
3825 | qdev->ndev->name); | |
3826 | break; | |
3827 | } | |
3828 | ||
3829 | if (value & ISP_CONTROL_RI) { | |
3830 | printk(KERN_DEBUG PFX | |
3831 | "%s: clearing NRI after reset.\n", | |
3832 | qdev->ndev->name); | |
3833 | ql_write_common_reg(qdev, | |
ee111d11 | 3834 | &port_regs-> |
5a4faa87 RM |
3835 | CommonRegs. |
3836 | ispControlStatus, | |
3837 | ((ISP_CONTROL_RI << | |
3838 | 16) | ISP_CONTROL_RI)); | |
3839 | } | |
3840 | ||
3841 | ssleep(1); | |
3842 | } while (--max_wait_time); | |
3843 | spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); | |
3844 | ||
3845 | if (value & ISP_CONTROL_SR) { | |
3846 | ||
3847 | /* | |
3848 | * Set the reset flags and clear the board again. | |
3849 | * Nothing else to do... | |
3850 | */ | |
3851 | printk(KERN_ERR PFX | |
3852 | "%s: Timed out waiting for reset to " | |
3853 | "complete.\n", ndev->name); | |
3854 | printk(KERN_ERR PFX | |
3855 | "%s: Do a reset.\n", ndev->name); | |
3856 | clear_bit(QL_RESET_PER_SCSI,&qdev->flags); | |
3857 | clear_bit(QL_RESET_START,&qdev->flags); | |
3858 | ql_cycle_adapter(qdev,QL_DO_RESET); | |
3859 | return; | |
3860 | } | |
3861 | ||
3862 | clear_bit(QL_RESET_ACTIVE,&qdev->flags); | |
3863 | clear_bit(QL_RESET_PER_SCSI,&qdev->flags); | |
3864 | clear_bit(QL_RESET_START,&qdev->flags); | |
3865 | ql_cycle_adapter(qdev,QL_NO_RESET); | |
3866 | } | |
3867 | } | |
3868 | ||
c4028958 | 3869 | static void ql_tx_timeout_work(struct work_struct *work) |
5a4faa87 | 3870 | { |
c4028958 DH |
3871 | struct ql3_adapter *qdev = |
3872 | container_of(work, struct ql3_adapter, tx_timeout_work.work); | |
3873 | ||
3874 | ql_cycle_adapter(qdev, QL_DO_RESET); | |
5a4faa87 RM |
3875 | } |
3876 | ||
3877 | static void ql_get_board_info(struct ql3_adapter *qdev) | |
3878 | { | |
3879 | struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers; | |
3880 | u32 value; | |
3881 | ||
3882 | value = ql_read_page0_reg_l(qdev, &port_regs->portStatus); | |
3883 | ||
3884 | qdev->chip_rev_id = ((value & PORT_STATUS_REV_ID_MASK) >> 12); | |
3885 | if (value & PORT_STATUS_64) | |
3886 | qdev->pci_width = 64; | |
3887 | else | |
3888 | qdev->pci_width = 32; | |
3889 | if (value & PORT_STATUS_X) | |
3890 | qdev->pci_x = 1; | |
3891 | else | |
3892 | qdev->pci_x = 0; | |
3893 | qdev->pci_slot = (u8) PCI_SLOT(qdev->pdev->devfn); | |
3894 | } | |
3895 | ||
3896 | static void ql3xxx_timer(unsigned long ptr) | |
3897 | { | |
3898 | struct ql3_adapter *qdev = (struct ql3_adapter *)ptr; | |
3e23b7d3 | 3899 | queue_delayed_work(qdev->workqueue, &qdev->link_state_work, 0); |
5a4faa87 RM |
3900 | } |
3901 | ||
da1c14a1 SH |
3902 | static const struct net_device_ops ql3xxx_netdev_ops = { |
3903 | .ndo_open = ql3xxx_open, | |
3904 | .ndo_start_xmit = ql3xxx_send, | |
3905 | .ndo_stop = ql3xxx_close, | |
3906 | .ndo_set_multicast_list = NULL, /* not allowed on NIC side */ | |
3907 | .ndo_change_mtu = eth_change_mtu, | |
3908 | .ndo_validate_addr = eth_validate_addr, | |
3909 | .ndo_set_mac_address = ql3xxx_set_mac_address, | |
3910 | .ndo_tx_timeout = ql3xxx_tx_timeout, | |
3911 | }; | |
3912 | ||
5a4faa87 RM |
3913 | static int __devinit ql3xxx_probe(struct pci_dev *pdev, |
3914 | const struct pci_device_id *pci_entry) | |
3915 | { | |
3916 | struct net_device *ndev = NULL; | |
3917 | struct ql3_adapter *qdev = NULL; | |
3918 | static int cards_found = 0; | |
be5a3c62 | 3919 | int uninitialized_var(pci_using_dac), err; |
5a4faa87 RM |
3920 | |
3921 | err = pci_enable_device(pdev); | |
3922 | if (err) { | |
3923 | printk(KERN_ERR PFX "%s cannot enable PCI device\n", | |
3924 | pci_name(pdev)); | |
3925 | goto err_out; | |
3926 | } | |
3927 | ||
3928 | err = pci_request_regions(pdev, DRV_NAME); | |
3929 | if (err) { | |
3930 | printk(KERN_ERR PFX "%s cannot obtain PCI resources\n", | |
3931 | pci_name(pdev)); | |
3932 | goto err_out_disable_pdev; | |
3933 | } | |
3934 | ||
3935 | pci_set_master(pdev); | |
3936 | ||
6a35528a | 3937 | if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) { |
5a4faa87 | 3938 | pci_using_dac = 1; |
6a35528a | 3939 | err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); |
284901a9 | 3940 | } else if (!(err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)))) { |
5a4faa87 | 3941 | pci_using_dac = 0; |
284901a9 | 3942 | err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); |
5a4faa87 RM |
3943 | } |
3944 | ||
3945 | if (err) { | |
3946 | printk(KERN_ERR PFX "%s no usable DMA configuration\n", | |
3947 | pci_name(pdev)); | |
3948 | goto err_out_free_regions; | |
3949 | } | |
3950 | ||
3951 | ndev = alloc_etherdev(sizeof(struct ql3_adapter)); | |
546faf07 BL |
3952 | if (!ndev) { |
3953 | printk(KERN_ERR PFX "%s could not alloc etherdev\n", | |
3954 | pci_name(pdev)); | |
3955 | err = -ENOMEM; | |
5a4faa87 | 3956 | goto err_out_free_regions; |
546faf07 | 3957 | } |
5a4faa87 | 3958 | |
5a4faa87 RM |
3959 | SET_NETDEV_DEV(ndev, &pdev->dev); |
3960 | ||
5a4faa87 RM |
3961 | pci_set_drvdata(pdev, ndev); |
3962 | ||
3963 | qdev = netdev_priv(ndev); | |
3964 | qdev->index = cards_found; | |
3965 | qdev->ndev = ndev; | |
3966 | qdev->pdev = pdev; | |
bd36b0ac | 3967 | qdev->device_id = pci_entry->device; |
5a4faa87 RM |
3968 | qdev->port_link_state = LS_DOWN; |
3969 | if (msi) | |
3970 | qdev->msi = 1; | |
3971 | ||
3972 | qdev->msg_enable = netif_msg_init(debug, default_msg); | |
3973 | ||
bd36b0ac RM |
3974 | if (pci_using_dac) |
3975 | ndev->features |= NETIF_F_HIGHDMA; | |
3976 | if (qdev->device_id == QL3032_DEVICE_ID) | |
e68a8c10 | 3977 | ndev->features |= NETIF_F_IP_CSUM | NETIF_F_SG; |
bd36b0ac | 3978 | |
275f165f | 3979 | qdev->mem_map_registers = pci_ioremap_bar(pdev, 1); |
5a4faa87 RM |
3980 | if (!qdev->mem_map_registers) { |
3981 | printk(KERN_ERR PFX "%s: cannot map device registers\n", | |
3982 | pci_name(pdev)); | |
546faf07 | 3983 | err = -EIO; |
5a4faa87 RM |
3984 | goto err_out_free_ndev; |
3985 | } | |
3986 | ||
3987 | spin_lock_init(&qdev->adapter_lock); | |
3988 | spin_lock_init(&qdev->hw_lock); | |
3989 | ||
3990 | /* Set driver entry points */ | |
da1c14a1 | 3991 | ndev->netdev_ops = &ql3xxx_netdev_ops; |
5a4faa87 | 3992 | SET_ETHTOOL_OPS(ndev, &ql3xxx_ethtool_ops); |
5a4faa87 RM |
3993 | ndev->watchdog_timeo = 5 * HZ; |
3994 | ||
bea3348e | 3995 | netif_napi_add(ndev, &qdev->napi, ql_poll, 64); |
5a4faa87 RM |
3996 | |
3997 | ndev->irq = pdev->irq; | |
3998 | ||
3999 | /* make sure the EEPROM is good */ | |
4000 | if (ql_get_nvram_params(qdev)) { | |
4001 | printk(KERN_ALERT PFX | |
4002 | "ql3xxx_probe: Adapter #%d, Invalid NVRAM parameters.\n", | |
4003 | qdev->index); | |
546faf07 | 4004 | err = -EIO; |
5a4faa87 RM |
4005 | goto err_out_iounmap; |
4006 | } | |
4007 | ||
4008 | ql_set_mac_info(qdev); | |
4009 | ||
4010 | /* Validate and set parameters */ | |
4011 | if (qdev->mac_index) { | |
cb8bac12 | 4012 | ndev->mtu = qdev->nvram_data.macCfg_port1.etherMtu_mac ; |
804d8541 | 4013 | ql_set_mac_addr(ndev, qdev->nvram_data.funcCfg_fn2.macAddress); |
5a4faa87 | 4014 | } else { |
cb8bac12 | 4015 | ndev->mtu = qdev->nvram_data.macCfg_port0.etherMtu_mac ; |
804d8541 | 4016 | ql_set_mac_addr(ndev, qdev->nvram_data.funcCfg_fn0.macAddress); |
5a4faa87 RM |
4017 | } |
4018 | memcpy(ndev->perm_addr, ndev->dev_addr, ndev->addr_len); | |
4019 | ||
4020 | ndev->tx_queue_len = NUM_REQ_Q_ENTRIES; | |
4021 | ||
5a4faa87 RM |
4022 | /* Record PCI bus information. */ |
4023 | ql_get_board_info(qdev); | |
4024 | ||
4025 | /* | |
4026 | * Set the Maximum Memory Read Byte Count value. We do this to handle | |
4027 | * jumbo frames. | |
4028 | */ | |
4029 | if (qdev->pci_x) { | |
4030 | pci_write_config_word(pdev, (int)0x4e, (u16) 0x0036); | |
4031 | } | |
4032 | ||
4033 | err = register_netdev(ndev); | |
4034 | if (err) { | |
4035 | printk(KERN_ERR PFX "%s: cannot register net device\n", | |
4036 | pci_name(pdev)); | |
4037 | goto err_out_iounmap; | |
4038 | } | |
4039 | ||
4040 | /* we're going to reset, so assume we have no link for now */ | |
4041 | ||
4042 | netif_carrier_off(ndev); | |
4043 | netif_stop_queue(ndev); | |
4044 | ||
4045 | qdev->workqueue = create_singlethread_workqueue(ndev->name); | |
c4028958 DH |
4046 | INIT_DELAYED_WORK(&qdev->reset_work, ql_reset_work); |
4047 | INIT_DELAYED_WORK(&qdev->tx_timeout_work, ql_tx_timeout_work); | |
3e23b7d3 | 4048 | INIT_DELAYED_WORK(&qdev->link_state_work, ql_link_state_machine_work); |
5a4faa87 RM |
4049 | |
4050 | init_timer(&qdev->adapter_timer); | |
4051 | qdev->adapter_timer.function = ql3xxx_timer; | |
4052 | qdev->adapter_timer.expires = jiffies + HZ * 2; /* two second delay */ | |
4053 | qdev->adapter_timer.data = (unsigned long)qdev; | |
4054 | ||
4055 | if(!cards_found) { | |
4056 | printk(KERN_ALERT PFX "%s\n", DRV_STRING); | |
4057 | printk(KERN_ALERT PFX "Driver name: %s, Version: %s.\n", | |
4058 | DRV_NAME, DRV_VERSION); | |
4059 | } | |
4060 | ql_display_dev_info(ndev); | |
4061 | ||
4062 | cards_found++; | |
4063 | return 0; | |
4064 | ||
4065 | err_out_iounmap: | |
4066 | iounmap(qdev->mem_map_registers); | |
4067 | err_out_free_ndev: | |
4068 | free_netdev(ndev); | |
4069 | err_out_free_regions: | |
4070 | pci_release_regions(pdev); | |
4071 | err_out_disable_pdev: | |
4072 | pci_disable_device(pdev); | |
4073 | pci_set_drvdata(pdev, NULL); | |
4074 | err_out: | |
4075 | return err; | |
4076 | } | |
4077 | ||
4078 | static void __devexit ql3xxx_remove(struct pci_dev *pdev) | |
4079 | { | |
4080 | struct net_device *ndev = pci_get_drvdata(pdev); | |
4081 | struct ql3_adapter *qdev = netdev_priv(ndev); | |
4082 | ||
4083 | unregister_netdev(ndev); | |
4084 | qdev = netdev_priv(ndev); | |
4085 | ||
4086 | ql_disable_interrupts(qdev); | |
4087 | ||
4088 | if (qdev->workqueue) { | |
4089 | cancel_delayed_work(&qdev->reset_work); | |
4090 | cancel_delayed_work(&qdev->tx_timeout_work); | |
4091 | destroy_workqueue(qdev->workqueue); | |
4092 | qdev->workqueue = NULL; | |
4093 | } | |
4094 | ||
855fc73b | 4095 | iounmap(qdev->mem_map_registers); |
5a4faa87 RM |
4096 | pci_release_regions(pdev); |
4097 | pci_set_drvdata(pdev, NULL); | |
4098 | free_netdev(ndev); | |
4099 | } | |
4100 | ||
4101 | static struct pci_driver ql3xxx_driver = { | |
4102 | ||
4103 | .name = DRV_NAME, | |
4104 | .id_table = ql3xxx_pci_tbl, | |
4105 | .probe = ql3xxx_probe, | |
4106 | .remove = __devexit_p(ql3xxx_remove), | |
4107 | }; | |
4108 | ||
4109 | static int __init ql3xxx_init_module(void) | |
4110 | { | |
4111 | return pci_register_driver(&ql3xxx_driver); | |
4112 | } | |
4113 | ||
4114 | static void __exit ql3xxx_exit(void) | |
4115 | { | |
4116 | pci_unregister_driver(&ql3xxx_driver); | |
4117 | } | |
4118 | ||
4119 | module_init(ql3xxx_init_module); | |
4120 | module_exit(ql3xxx_exit); |