Staging: sxg: Use correct queue_id for transmitting non-TCP packets
[deliverable/linux.git] / drivers / staging / sxg / sxg.c
1 /**************************************************************************
2 *
3 * Copyright (C) 2000-2008 Alacritech, Inc. All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above
12 * copyright notice, this list of conditions and the following
13 * disclaimer in the documentation and/or other materials provided
14 * with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY ALACRITECH, INC. ``AS IS'' AND ANY
17 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
19 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL ALACRITECH, INC. OR
20 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
23 * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
24 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
25 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
26 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 *
29 * The views and conclusions contained in the software and documentation
30 * are those of the authors and should not be interpreted as representing
31 * official policies, either expressed or implied, of Alacritech, Inc.
32 *
33 * Parts developed by LinSysSoft Sahara team
34 *
35 **************************************************************************/
36
37 /*
38 * FILENAME: sxg.c
39 *
40 * The SXG driver for Alacritech's 10Gbe products.
41 *
42 * NOTE: This is the standard, non-accelerated version of Alacritech's
43 * IS-NIC driver.
44 */
45
46 #include <linux/kernel.h>
47 #include <linux/string.h>
48 #include <linux/errno.h>
49 #include <linux/module.h>
50 #include <linux/moduleparam.h>
51 #include <linux/firmware.h>
52 #include <linux/ioport.h>
53 #include <linux/slab.h>
54 #include <linux/interrupt.h>
55 #include <linux/timer.h>
56 #include <linux/pci.h>
57 #include <linux/spinlock.h>
58 #include <linux/init.h>
59 #include <linux/netdevice.h>
60 #include <linux/etherdevice.h>
61 #include <linux/ethtool.h>
62 #include <linux/skbuff.h>
63 #include <linux/delay.h>
64 #include <linux/types.h>
65 #include <linux/dma-mapping.h>
66 #include <linux/mii.h>
67 #include <linux/ip.h>
68 #include <linux/in.h>
69 #include <linux/tcp.h>
70 #include <linux/ipv6.h>
71
72 #define SLIC_GET_STATS_ENABLED 0
73 #define LINUX_FREES_ADAPTER_RESOURCES 1
74 #define SXG_OFFLOAD_IP_CHECKSUM 0
75 #define SXG_POWER_MANAGEMENT_ENABLED 0
76 #define VPCI 0
77 #define ATK_DEBUG 1
78 #define SXG_UCODE_DEBUG 0
79
80
81 #include "sxg_os.h"
82 #include "sxghw.h"
83 #include "sxghif.h"
84 #include "sxg.h"
85 #include "sxgdbg.h"
86 #include "sxgphycode-1.2.h"
87
88 static int sxg_allocate_buffer_memory(struct adapter_t *adapter, u32 Size,
89 enum sxg_buffer_type BufferType);
90 static int sxg_allocate_rcvblock_complete(struct adapter_t *adapter,
91 void *RcvBlock,
92 dma_addr_t PhysicalAddress,
93 u32 Length);
94 static void sxg_allocate_sgl_buffer_complete(struct adapter_t *adapter,
95 struct sxg_scatter_gather *SxgSgl,
96 dma_addr_t PhysicalAddress,
97 u32 Length);
98
99 static void sxg_mcast_init_crc32(void);
100 static int sxg_entry_open(struct net_device *dev);
101 static int sxg_second_open(struct net_device * dev);
102 static int sxg_entry_halt(struct net_device *dev);
103 static int sxg_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
104 static int sxg_send_packets(struct sk_buff *skb, struct net_device *dev);
105 static int sxg_transmit_packet(struct adapter_t *adapter, struct sk_buff *skb);
106 static int sxg_dumb_sgl(struct sxg_x64_sgl *pSgl,
107 struct sxg_scatter_gather *SxgSgl);
108
109 static void sxg_handle_interrupt(struct adapter_t *adapter, int *work_done,
110 int budget);
111 static void sxg_interrupt(struct adapter_t *adapter);
112 static int sxg_poll(struct napi_struct *napi, int budget);
113 static int sxg_process_isr(struct adapter_t *adapter, u32 MessageId);
114 static u32 sxg_process_event_queue(struct adapter_t *adapter, u32 RssId,
115 int *sxg_napi_continue, int *work_done, int budget);
116 static void sxg_complete_slow_send(struct adapter_t *adapter);
117 static struct sk_buff *sxg_slow_receive(struct adapter_t *adapter,
118 struct sxg_event *Event);
119 static void sxg_process_rcv_error(struct adapter_t *adapter, u32 ErrorStatus);
120 static bool sxg_mac_filter(struct adapter_t *adapter,
121 struct ether_header *EtherHdr, ushort length);
122 static struct net_device_stats *sxg_get_stats(struct net_device * dev);
123 void sxg_free_resources(struct adapter_t *adapter);
124 void sxg_free_rcvblocks(struct adapter_t *adapter);
125 void sxg_free_sgl_buffers(struct adapter_t *adapter);
126 void sxg_unmap_resources(struct adapter_t *adapter);
127 void sxg_free_mcast_addrs(struct adapter_t *adapter);
128 void sxg_collect_statistics(struct adapter_t *adapter);
129 static int sxg_register_interrupt(struct adapter_t *adapter);
130 static void sxg_remove_isr(struct adapter_t *adapter);
131 static irqreturn_t sxg_isr(int irq, void *dev_id);
132
133 static void sxg_watchdog(unsigned long data);
134 static void sxg_update_link_status (struct work_struct *work);
135
136 #define XXXTODO 0
137
138 #if XXXTODO
139 static int sxg_mac_set_address(struct net_device *dev, void *ptr);
140 #endif
141 static void sxg_mcast_set_list(struct net_device *dev);
142
143 static int sxg_adapter_set_hwaddr(struct adapter_t *adapter);
144
145 static int sxg_initialize_adapter(struct adapter_t *adapter);
146 static void sxg_stock_rcv_buffers(struct adapter_t *adapter);
147 static void sxg_complete_descriptor_blocks(struct adapter_t *adapter,
148 unsigned char Index);
149 int sxg_change_mtu (struct net_device *netdev, int new_mtu);
150 static int sxg_initialize_link(struct adapter_t *adapter);
151 static int sxg_phy_init(struct adapter_t *adapter);
152 static void sxg_link_event(struct adapter_t *adapter);
153 static enum SXG_LINK_STATE sxg_get_link_state(struct adapter_t *adapter);
154 static void sxg_link_state(struct adapter_t *adapter,
155 enum SXG_LINK_STATE LinkState);
156 static int sxg_write_mdio_reg(struct adapter_t *adapter,
157 u32 DevAddr, u32 RegAddr, u32 Value);
158 static int sxg_read_mdio_reg(struct adapter_t *adapter,
159 u32 DevAddr, u32 RegAddr, u32 *pValue);
160 static void sxg_set_mcast_addr(struct adapter_t *adapter);
161
162 static unsigned int sxg_first_init = 1;
163 static char *sxg_banner =
164 "Alacritech SLIC Technology(tm) Server and Storage \
165 10Gbe Accelerator (Non-Accelerated)\n";
166
167 static int sxg_debug = 1;
168 static int debug = -1;
169 static struct net_device *head_netdevice = NULL;
170
171 static struct sxgbase_driver sxg_global = {
172 .dynamic_intagg = 1,
173 };
174 static int intagg_delay = 100;
175 static u32 dynamic_intagg = 0;
176
177 char sxg_driver_name[] = "sxg_nic";
178 #define DRV_AUTHOR "Alacritech, Inc. Engineering"
179 #define DRV_DESCRIPTION \
180 "Alacritech SLIC Techonology(tm) Non-Accelerated 10Gbe Driver"
181 #define DRV_COPYRIGHT \
182 "Copyright 2000-2008 Alacritech, Inc. All rights reserved."
183
184 MODULE_AUTHOR(DRV_AUTHOR);
185 MODULE_DESCRIPTION(DRV_DESCRIPTION);
186 MODULE_LICENSE("GPL");
187
188 module_param(dynamic_intagg, int, 0);
189 MODULE_PARM_DESC(dynamic_intagg, "Dynamic Interrupt Aggregation Setting");
190 module_param(intagg_delay, int, 0);
191 MODULE_PARM_DESC(intagg_delay, "uSec Interrupt Aggregation Delay");
192
193 static struct pci_device_id sxg_pci_tbl[] __devinitdata = {
194 {PCI_DEVICE(SXG_VENDOR_ID, SXG_DEVICE_ID)},
195 {0,}
196 };
197
198 MODULE_DEVICE_TABLE(pci, sxg_pci_tbl);
199
200 static inline void sxg_reg32_write(void __iomem *reg, u32 value, bool flush)
201 {
202 writel(value, reg);
203 if (flush)
204 mb();
205 }
206
207 static inline void sxg_reg64_write(struct adapter_t *adapter, void __iomem *reg,
208 u64 value, u32 cpu)
209 {
210 u32 value_high = (u32) (value >> 32);
211 u32 value_low = (u32) (value & 0x00000000FFFFFFFF);
212 unsigned long flags;
213
214 spin_lock_irqsave(&adapter->Bit64RegLock, flags);
215 writel(value_high, (void __iomem *)(&adapter->UcodeRegs[cpu].Upper));
216 writel(value_low, reg);
217 spin_unlock_irqrestore(&adapter->Bit64RegLock, flags);
218 }
219
220 static void sxg_init_driver(void)
221 {
222 if (sxg_first_init) {
223 DBG_ERROR("sxg: %s sxg_first_init set jiffies[%lx]\n",
224 __func__, jiffies);
225 sxg_first_init = 0;
226 spin_lock_init(&sxg_global.driver_lock);
227 }
228 }
229
230 static void sxg_dbg_macaddrs(struct adapter_t *adapter)
231 {
232 DBG_ERROR(" (%s) curr %2.2X:%2.2X:%2.2X:%2.2X:%2.2X:%2.2X\n",
233 adapter->netdev->name, adapter->currmacaddr[0],
234 adapter->currmacaddr[1], adapter->currmacaddr[2],
235 adapter->currmacaddr[3], adapter->currmacaddr[4],
236 adapter->currmacaddr[5]);
237 DBG_ERROR(" (%s) mac %2.2X:%2.2X:%2.2X:%2.2X:%2.2X:%2.2X\n",
238 adapter->netdev->name, adapter->macaddr[0],
239 adapter->macaddr[1], adapter->macaddr[2],
240 adapter->macaddr[3], adapter->macaddr[4],
241 adapter->macaddr[5]);
242 return;
243 }
244
245 /* SXG Globals */
246 static struct sxg_driver SxgDriver;
247
248 #ifdef ATKDBG
249 static struct sxg_trace_buffer LSxgTraceBuffer;
250 #endif /* ATKDBG */
251 static struct sxg_trace_buffer *SxgTraceBuffer = NULL;
252
253 /*
254 * MSI Related API's
255 */
256 int sxg_register_intr(struct adapter_t *adapter);
257 int sxg_enable_msi_x(struct adapter_t *adapter);
258 int sxg_add_msi_isr(struct adapter_t *adapter);
259 void sxg_remove_msix_isr(struct adapter_t *adapter);
260 int sxg_set_interrupt_capability(struct adapter_t *adapter);
261
262 int sxg_set_interrupt_capability(struct adapter_t *adapter)
263 {
264 int ret;
265
266 ret = sxg_enable_msi_x(adapter);
267 if (ret != STATUS_SUCCESS) {
268 adapter->msi_enabled = FALSE;
269 DBG_ERROR("sxg_set_interrupt_capability MSI-X Disable\n");
270 } else {
271 adapter->msi_enabled = TRUE;
272 DBG_ERROR("sxg_set_interrupt_capability MSI-X Enable\n");
273 }
274 return ret;
275 }
276
277 int sxg_register_intr(struct adapter_t *adapter)
278 {
279 int ret = 0;
280
281 if (adapter->msi_enabled) {
282 ret = sxg_add_msi_isr(adapter);
283 }
284 else {
285 DBG_ERROR("MSI-X Enable Failed. Using Pin INT\n");
286 ret = sxg_register_interrupt(adapter);
287 if (ret != STATUS_SUCCESS) {
288 DBG_ERROR("sxg_register_interrupt Failed\n");
289 }
290 }
291 return ret;
292 }
293
294 int sxg_enable_msi_x(struct adapter_t *adapter)
295 {
296 int ret;
297
298 adapter->nr_msix_entries = 1;
299 adapter->msi_entries = kmalloc(adapter->nr_msix_entries *
300 sizeof(struct msix_entry),GFP_KERNEL);
301 if (!adapter->msi_entries) {
302 DBG_ERROR("%s:MSI Entries memory allocation Failed\n",__func__);
303 return -ENOMEM;
304 }
305 memset(adapter->msi_entries, 0, adapter->nr_msix_entries *
306 sizeof(struct msix_entry));
307
308 ret = pci_enable_msix(adapter->pcidev, adapter->msi_entries,
309 adapter->nr_msix_entries);
310 if (ret) {
311 DBG_ERROR("Enabling MSI-X with %d vectors failed\n",
312 adapter->nr_msix_entries);
313 /*Should try with less vector returned.*/
314 kfree(adapter->msi_entries);
315 return STATUS_FAILURE; /*MSI-X Enable failed.*/
316 }
317 return (STATUS_SUCCESS);
318 }
319
320 int sxg_add_msi_isr(struct adapter_t *adapter)
321 {
322 int ret,i;
323
324 if (!adapter->intrregistered) {
325 for (i=0; i<adapter->nr_msix_entries; i++) {
326 ret = request_irq (adapter->msi_entries[i].vector,
327 sxg_isr,
328 IRQF_SHARED,
329 adapter->netdev->name,
330 adapter->netdev);
331 if (ret) {
332 DBG_ERROR("sxg: MSI-X request_irq (%s) "
333 "FAILED [%x]\n", adapter->netdev->name,
334 ret);
335 return (ret);
336 }
337 }
338 }
339 adapter->msi_enabled = TRUE;
340 adapter->intrregistered = 1;
341 adapter->IntRegistered = TRUE;
342 return (STATUS_SUCCESS);
343 }
344
345 void sxg_remove_msix_isr(struct adapter_t *adapter)
346 {
347 int i,vector;
348 struct net_device *netdev = adapter->netdev;
349
350 for(i=0; i< adapter->nr_msix_entries;i++)
351 {
352 vector = adapter->msi_entries[i].vector;
353 DBG_ERROR("%s : Freeing IRQ vector#%d\n",__FUNCTION__,vector);
354 free_irq(vector,netdev);
355 }
356 }
357
358
359 static void sxg_remove_isr(struct adapter_t *adapter)
360 {
361 struct net_device *netdev = adapter->netdev;
362 if (adapter->msi_enabled)
363 sxg_remove_msix_isr(adapter);
364 else
365 free_irq(adapter->netdev->irq, netdev);
366 }
367
368 void sxg_reset_interrupt_capability(struct adapter_t *adapter)
369 {
370 if (adapter->msi_enabled) {
371 pci_disable_msix(adapter->pcidev);
372 kfree(adapter->msi_entries);
373 adapter->msi_entries = NULL;
374 }
375 return;
376 }
377
378 /*
379 * sxg_download_microcode
380 *
381 * Download Microcode to Sahara adapter using the Linux
382 * Firmware module to get the ucode.sys file.
383 *
384 * Arguments -
385 * adapter - A pointer to our adapter structure
386 * UcodeSel - microcode file selection
387 *
388 * Return
389 * int
390 */
391 static bool sxg_download_microcode(struct adapter_t *adapter,
392 enum SXG_UCODE_SEL UcodeSel)
393 {
394 const struct firmware *fw;
395 const char *file = "";
396 struct sxg_hw_regs *HwRegs = adapter->HwRegs;
397 int ret;
398 int ucode_start;
399 u32 Section;
400 u32 ThisSectionSize;
401 u32 instruction = 0;
402 u32 BaseAddress, AddressOffset, Address;
403 /* u32 Failure; */
404 u32 ValueRead;
405 u32 i;
406 u32 index = 0;
407 u32 num_sections = 0;
408 u32 sectionSize[16];
409 u32 sectionStart[16];
410
411 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "DnldUcod",
412 adapter, 0, 0, 0);
413
414 /*
415 * This routine is only implemented to download the microcode
416 * for the Revision B Sahara chip. Rev A and Diagnostic
417 * microcode is not supported at this time. If Rev A or
418 * diagnostic ucode is required, this routine will obviously
419 * need to change. Also, eventually need to add support for
420 * Rev B checked version of ucode. That's easy enough once
421 * the free version of Rev B works.
422 */
423 ASSERT(UcodeSel == SXG_UCODE_SYSTEM);
424 ASSERT(adapter->asictype == SAHARA_REV_B);
425 #if SXG_UCODE_DEBUG
426 file = "sxg/saharadbgdownloadB.sys";
427 #else
428 file = "sxg/saharadownloadB.sys";
429 #endif
430 ret = request_firmware(&fw, file, &adapter->pcidev->dev);
431 if (ret) {
432 DBG_ERROR("%s SXG_NIC: Failed to load firmware %s\n", __func__,file);
433 return ret;
434 }
435
436 /*
437 * The microcode .sys file contains starts with a 4 byte word containing
438 * the number of sections. That is followed by "num_sections" 4 byte
439 * words containing each "section" size. That is followed num_sections
440 * 4 byte words containing each section "start" address.
441 *
442 * Following the above header, the .sys file contains num_sections,
443 * where each section size is specified, newline delineatetd 12 byte
444 * microcode instructions.
445 */
446 num_sections = *(u32 *)(fw->data + index);
447 index += 4;
448 ASSERT(num_sections <= 3);
449 for (i = 0; i < num_sections; i++) {
450 sectionSize[i] = *(u32 *)(fw->data + index);
451 index += 4;
452 }
453 for (i = 0; i < num_sections; i++) {
454 sectionStart[i] = *(u32 *)(fw->data + index);
455 index += 4;
456 }
457
458 /* First, reset the card */
459 WRITE_REG(HwRegs->Reset, 0xDEAD, FLUSH);
460 udelay(50);
461 HwRegs = adapter->HwRegs;
462
463 /*
464 * Download each section of the microcode as specified in
465 * sectionSize[index] to sectionStart[index] address. As
466 * described above, the .sys file contains 12 byte word
467 * microcode instructions. The *download.sys file is generated
468 * using the objtosys.exe utility that was built for Sahara
469 * microcode.
470 */
471 /* See usage of this below when we read back for parity */
472 ucode_start = index;
473 instruction = *(u32 *)(fw->data + index);
474 index += 4;
475
476 for (Section = 0; Section < num_sections; Section++) {
477 BaseAddress = sectionStart[Section];
478 /* Size in instructions */
479 ThisSectionSize = sectionSize[Section] / 12;
480 for (AddressOffset = 0; AddressOffset < ThisSectionSize;
481 AddressOffset++) {
482 u32 first_instr = 0; /* See comment below */
483
484 Address = BaseAddress + AddressOffset;
485 ASSERT((Address & ~MICROCODE_ADDRESS_MASK) == 0);
486 /* Write instruction bits 31 - 0 (low) */
487 first_instr = instruction;
488 WRITE_REG(HwRegs->UcodeDataLow, instruction, FLUSH);
489 instruction = *(u32 *)(fw->data + index);
490 index += 4; /* Advance to the "next" instruction */
491
492 /* Write instruction bits 63-32 (middle) */
493 WRITE_REG(HwRegs->UcodeDataMiddle, instruction, FLUSH);
494 instruction = *(u32 *)(fw->data + index);
495 index += 4; /* Advance to the "next" instruction */
496
497 /* Write instruction bits 95-64 (high) */
498 WRITE_REG(HwRegs->UcodeDataHigh, instruction, FLUSH);
499 instruction = *(u32 *)(fw->data + index);
500 index += 4; /* Advance to the "next" instruction */
501
502 /* Write instruction address with the WRITE bit set */
503 WRITE_REG(HwRegs->UcodeAddr,
504 (Address | MICROCODE_ADDRESS_WRITE), FLUSH);
505 /*
506 * Sahara bug in the ucode download logic - the write to DataLow
507 * for the next instruction could get corrupted. To avoid this,
508 * write to DataLow again for this instruction (which may get
509 * corrupted, but it doesn't matter), then increment the address
510 * and write the data for the next instruction to DataLow. That
511 * write should succeed.
512 */
513 WRITE_REG(HwRegs->UcodeDataLow, first_instr, FLUSH);
514 }
515 }
516 /*
517 * Now repeat the entire operation reading the instruction back and
518 * checking for parity errors
519 */
520 index = ucode_start;
521
522 for (Section = 0; Section < num_sections; Section++) {
523 BaseAddress = sectionStart[Section];
524 /* Size in instructions */
525 ThisSectionSize = sectionSize[Section] / 12;
526 for (AddressOffset = 0; AddressOffset < ThisSectionSize;
527 AddressOffset++) {
528 Address = BaseAddress + AddressOffset;
529 /* Write the address with the READ bit set */
530 WRITE_REG(HwRegs->UcodeAddr,
531 (Address | MICROCODE_ADDRESS_READ), FLUSH);
532 /* Read it back and check parity bit. */
533 READ_REG(HwRegs->UcodeAddr, ValueRead);
534 if (ValueRead & MICROCODE_ADDRESS_PARITY) {
535 DBG_ERROR("sxg: %s PARITY ERROR\n",
536 __func__);
537
538 return FALSE; /* Parity error */
539 }
540 ASSERT((ValueRead & MICROCODE_ADDRESS_MASK) == Address);
541 /* Read the instruction back and compare */
542 /* First instruction */
543 instruction = *(u32 *)(fw->data + index);
544 index += 4;
545 READ_REG(HwRegs->UcodeDataLow, ValueRead);
546 if (ValueRead != instruction) {
547 DBG_ERROR("sxg: %s MISCOMPARE LOW\n",
548 __func__);
549 return FALSE; /* Miscompare */
550 }
551 instruction = *(u32 *)(fw->data + index);
552 index += 4;
553 READ_REG(HwRegs->UcodeDataMiddle, ValueRead);
554 if (ValueRead != instruction) {
555 DBG_ERROR("sxg: %s MISCOMPARE MIDDLE\n",
556 __func__);
557 return FALSE; /* Miscompare */
558 }
559 instruction = *(u32 *)(fw->data + index);
560 index += 4;
561 READ_REG(HwRegs->UcodeDataHigh, ValueRead);
562 if (ValueRead != instruction) {
563 DBG_ERROR("sxg: %s MISCOMPARE HIGH\n",
564 __func__);
565 return FALSE; /* Miscompare */
566 }
567 }
568 }
569
570 /* download finished */
571 release_firmware(fw);
572 /* Everything OK, Go. */
573 WRITE_REG(HwRegs->UcodeAddr, MICROCODE_ADDRESS_GO, FLUSH);
574
575 /*
576 * Poll the CardUp register to wait for microcode to initialize
577 * Give up after 10,000 attemps (500ms).
578 */
579 for (i = 0; i < 10000; i++) {
580 udelay(50);
581 READ_REG(adapter->UcodeRegs[0].CardUp, ValueRead);
582 if (ValueRead == 0xCAFE) {
583 break;
584 }
585 }
586 if (i == 10000) {
587 DBG_ERROR("sxg: %s TIMEOUT bringing up card - verify MICROCODE\n", __func__);
588
589 return FALSE; /* Timeout */
590 }
591 /*
592 * Now write the LoadSync register. This is used to
593 * synchronize with the card so it can scribble on the memory
594 * that contained 0xCAFE from the "CardUp" step above
595 */
596 if (UcodeSel == SXG_UCODE_SYSTEM) {
597 WRITE_REG(adapter->UcodeRegs[0].LoadSync, 0, FLUSH);
598 }
599
600 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XDnldUcd",
601 adapter, 0, 0, 0);
602 return (TRUE);
603 }
604
605 /*
606 * sxg_allocate_resources - Allocate memory and locks
607 *
608 * Arguments -
609 * adapter - A pointer to our adapter structure
610 *
611 * Return - int
612 */
613 static int sxg_allocate_resources(struct adapter_t *adapter)
614 {
615 int status = STATUS_SUCCESS;
616 u32 RssIds, IsrCount;
617 /* struct sxg_xmt_ring *XmtRing; */
618 /* struct sxg_rcv_ring *RcvRing; */
619
620 DBG_ERROR("%s ENTER\n", __func__);
621
622 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "AllocRes",
623 adapter, 0, 0, 0);
624
625 /* Windows tells us how many CPUs it plans to use for */
626 /* RSS */
627 RssIds = SXG_RSS_CPU_COUNT(adapter);
628 IsrCount = adapter->msi_enabled ? RssIds : 1;
629
630 DBG_ERROR("%s Setup the spinlocks\n", __func__);
631
632 /* Allocate spinlocks and initialize listheads first. */
633 spin_lock_init(&adapter->RcvQLock);
634 spin_lock_init(&adapter->SglQLock);
635 spin_lock_init(&adapter->XmtZeroLock);
636 spin_lock_init(&adapter->Bit64RegLock);
637 spin_lock_init(&adapter->AdapterLock);
638 atomic_set(&adapter->pending_allocations, 0);
639
640 DBG_ERROR("%s Setup the lists\n", __func__);
641
642 InitializeListHead(&adapter->FreeRcvBuffers);
643 InitializeListHead(&adapter->FreeRcvBlocks);
644 InitializeListHead(&adapter->AllRcvBlocks);
645 InitializeListHead(&adapter->FreeSglBuffers);
646 InitializeListHead(&adapter->AllSglBuffers);
647
648 /*
649 * Mark these basic allocations done. This flags essentially
650 * tells the SxgFreeResources routine that it can grab spinlocks
651 * and reference listheads.
652 */
653 adapter->BasicAllocations = TRUE;
654 /*
655 * Main allocation loop. Start with the maximum supported by
656 * the microcode and back off if memory allocation
657 * fails. If we hit a minimum, fail.
658 */
659
660 for (;;) {
661 DBG_ERROR("%s Allocate XmtRings size[%x]\n", __func__,
662 (unsigned int)(sizeof(struct sxg_xmt_ring) * 1));
663
664 /*
665 * Start with big items first - receive and transmit rings.
666 * At the moment I'm going to keep the ring size fixed and
667 * adjust the TCBs if we fail. Later we might
668 * consider reducing the ring size as well..
669 */
670 adapter->XmtRings = pci_alloc_consistent(adapter->pcidev,
671 sizeof(struct sxg_xmt_ring) *
672 1,
673 &adapter->PXmtRings);
674 DBG_ERROR("%s XmtRings[%p]\n", __func__, adapter->XmtRings);
675
676 if (!adapter->XmtRings) {
677 goto per_tcb_allocation_failed;
678 }
679 memset(adapter->XmtRings, 0, sizeof(struct sxg_xmt_ring) * 1);
680
681 DBG_ERROR("%s Allocate RcvRings size[%x]\n", __func__,
682 (unsigned int)(sizeof(struct sxg_rcv_ring) * 1));
683 adapter->RcvRings =
684 pci_alloc_consistent(adapter->pcidev,
685 sizeof(struct sxg_rcv_ring) * 1,
686 &adapter->PRcvRings);
687 DBG_ERROR("%s RcvRings[%p]\n", __func__, adapter->RcvRings);
688 if (!adapter->RcvRings) {
689 goto per_tcb_allocation_failed;
690 }
691 memset(adapter->RcvRings, 0, sizeof(struct sxg_rcv_ring) * 1);
692 adapter->ucode_stats = kzalloc(sizeof(struct sxg_ucode_stats), GFP_ATOMIC);
693 adapter->pucode_stats = pci_map_single(adapter->pcidev,
694 adapter->ucode_stats,
695 sizeof(struct sxg_ucode_stats),
696 PCI_DMA_FROMDEVICE);
697 // memset(adapter->ucode_stats, 0, sizeof(struct sxg_ucode_stats));
698 break;
699
700 per_tcb_allocation_failed:
701 /* an allocation failed. Free any successful allocations. */
702 if (adapter->XmtRings) {
703 pci_free_consistent(adapter->pcidev,
704 sizeof(struct sxg_xmt_ring) * 1,
705 adapter->XmtRings,
706 adapter->PXmtRings);
707 adapter->XmtRings = NULL;
708 }
709 if (adapter->RcvRings) {
710 pci_free_consistent(adapter->pcidev,
711 sizeof(struct sxg_rcv_ring) * 1,
712 adapter->RcvRings,
713 adapter->PRcvRings);
714 adapter->RcvRings = NULL;
715 }
716 /* Loop around and try again.... */
717 if (adapter->ucode_stats) {
718 pci_unmap_single(adapter->pcidev,
719 sizeof(struct sxg_ucode_stats),
720 adapter->pucode_stats, PCI_DMA_FROMDEVICE);
721 adapter->ucode_stats = NULL;
722 }
723
724 }
725
726 DBG_ERROR("%s Initialize RCV ZERO and XMT ZERO rings\n", __func__);
727 /* Initialize rcv zero and xmt zero rings */
728 SXG_INITIALIZE_RING(adapter->RcvRingZeroInfo, SXG_RCV_RING_SIZE);
729 SXG_INITIALIZE_RING(adapter->XmtRingZeroInfo, SXG_XMT_RING_SIZE);
730
731 /* Sanity check receive data structure format */
732 /* ASSERT((adapter->ReceiveBufferSize == SXG_RCV_DATA_BUFFER_SIZE) ||
733 (adapter->ReceiveBufferSize == SXG_RCV_JUMBO_BUFFER_SIZE)); */
734 ASSERT(sizeof(struct sxg_rcv_descriptor_block) ==
735 SXG_RCV_DESCRIPTOR_BLOCK_SIZE);
736
737 DBG_ERROR("%s Allocate EventRings size[%x]\n", __func__,
738 (unsigned int)(sizeof(struct sxg_event_ring) * RssIds));
739
740 /* Allocate event queues. */
741 adapter->EventRings = pci_alloc_consistent(adapter->pcidev,
742 sizeof(struct sxg_event_ring) *
743 RssIds,
744 &adapter->PEventRings);
745
746 if (!adapter->EventRings) {
747 /* Caller will call SxgFreeAdapter to clean up above
748 * allocations */
749 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XAResF8",
750 adapter, SXG_MAX_ENTRIES, 0, 0);
751 status = STATUS_RESOURCES;
752 goto per_tcb_allocation_failed;
753 }
754 memset(adapter->EventRings, 0, sizeof(struct sxg_event_ring) * RssIds);
755
756 DBG_ERROR("%s Allocate ISR size[%x]\n", __func__, IsrCount);
757 /* Allocate ISR */
758 adapter->Isr = pci_alloc_consistent(adapter->pcidev,
759 IsrCount, &adapter->PIsr);
760 if (!adapter->Isr) {
761 /* Caller will call SxgFreeAdapter to clean up above
762 * allocations */
763 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XAResF9",
764 adapter, SXG_MAX_ENTRIES, 0, 0);
765 status = STATUS_RESOURCES;
766 goto per_tcb_allocation_failed;
767 }
768 memset(adapter->Isr, 0, sizeof(u32) * IsrCount);
769
770 DBG_ERROR("%s Allocate shared XMT ring zero index location size[%x]\n",
771 __func__, (unsigned int)sizeof(u32));
772
773 /* Allocate shared XMT ring zero index location */
774 adapter->XmtRingZeroIndex = pci_alloc_consistent(adapter->pcidev,
775 sizeof(u32),
776 &adapter->
777 PXmtRingZeroIndex);
778 if (!adapter->XmtRingZeroIndex) {
779 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XAResF10",
780 adapter, SXG_MAX_ENTRIES, 0, 0);
781 status = STATUS_RESOURCES;
782 goto per_tcb_allocation_failed;
783 }
784 memset(adapter->XmtRingZeroIndex, 0, sizeof(u32));
785
786 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XAlcResS",
787 adapter, SXG_MAX_ENTRIES, 0, 0);
788
789 return status;
790 }
791
792 /*
793 * sxg_config_pci -
794 *
795 * Set up PCI Configuration space
796 *
797 * Arguments -
798 * pcidev - A pointer to our adapter structure
799 */
800 static void sxg_config_pci(struct pci_dev *pcidev)
801 {
802 u16 pci_command;
803 u16 new_command;
804
805 pci_read_config_word(pcidev, PCI_COMMAND, &pci_command);
806 DBG_ERROR("sxg: %s PCI command[%4.4x]\n", __func__, pci_command);
807 /* Set the command register */
808 new_command = pci_command | (
809 /* Memory Space Enable */
810 PCI_COMMAND_MEMORY |
811 /* Bus master enable */
812 PCI_COMMAND_MASTER |
813 /* Memory write and invalidate */
814 PCI_COMMAND_INVALIDATE |
815 /* Parity error response */
816 PCI_COMMAND_PARITY |
817 /* System ERR */
818 PCI_COMMAND_SERR |
819 /* Fast back-to-back */
820 PCI_COMMAND_FAST_BACK);
821 if (pci_command != new_command) {
822 DBG_ERROR("%s -- Updating PCI COMMAND register %4.4x->%4.4x.\n",
823 __func__, pci_command, new_command);
824 pci_write_config_word(pcidev, PCI_COMMAND, new_command);
825 }
826 }
827
828 /*
829 * sxg_read_config
830 * @adapter : Pointer to the adapter structure for the card
831 * This function will read the configuration data from EEPROM/FLASH
832 */
833 static inline int sxg_read_config(struct adapter_t *adapter)
834 {
835 /* struct sxg_config data; */
836 struct sxg_config *config;
837 struct sw_cfg_data *data;
838 dma_addr_t p_addr;
839 unsigned long status;
840 unsigned long i;
841 config = pci_alloc_consistent(adapter->pcidev,
842 sizeof(struct sxg_config), &p_addr);
843
844 if(!config) {
845 /*
846 * We cant get even this much memory. Raise a hell
847 * Get out of here
848 */
849 printk(KERN_ERR"%s : Could not allocate memory for reading \
850 EEPROM\n", __FUNCTION__);
851 return -ENOMEM;
852 }
853
854 data = &config->SwCfg;
855
856 /* Initialize (reflective memory) status register */
857 WRITE_REG(adapter->UcodeRegs[0].ConfigStat, SXG_CFG_TIMEOUT, TRUE);
858
859 /* Send request to fetch configuration data */
860 WRITE_REG64(adapter, adapter->UcodeRegs[0].Config, p_addr, 0);
861 for(i=0; i<1000; i++) {
862 READ_REG(adapter->UcodeRegs[0].ConfigStat, status);
863 if (status != SXG_CFG_TIMEOUT)
864 break;
865 mdelay(1); /* Do we really need this */
866 }
867
868 switch(status) {
869 /* Config read from EEPROM succeeded */
870 case SXG_CFG_LOAD_EEPROM:
871 /* Config read from Flash succeeded */
872 case SXG_CFG_LOAD_FLASH:
873 /*
874 * Copy the MAC address to adapter structure
875 * TODO: We are not doing the remaining part : FRU, etc
876 */
877 memcpy(adapter->macaddr, data->MacAddr[0].MacAddr,
878 sizeof(struct sxg_config_mac));
879 break;
880 case SXG_CFG_TIMEOUT:
881 case SXG_CFG_LOAD_INVALID:
882 case SXG_CFG_LOAD_ERROR:
883 default: /* Fix default handler later */
884 printk(KERN_WARNING"%s : We could not read the config \
885 word. Status = %ld\n", __FUNCTION__, status);
886 break;
887 }
888 pci_free_consistent(adapter->pcidev, sizeof(struct sw_cfg_data), data,
889 p_addr);
890 if (adapter->netdev) {
891 memcpy(adapter->netdev->dev_addr, adapter->currmacaddr, 6);
892 memcpy(adapter->netdev->perm_addr, adapter->currmacaddr, 6);
893 }
894 sxg_dbg_macaddrs(adapter);
895
896 return status;
897 }
898
899 static const struct net_device_ops sxg_netdev_ops = {
900 .ndo_open = sxg_entry_open,
901 .ndo_stop = sxg_entry_halt,
902 .ndo_start_xmit = sxg_send_packets,
903 .ndo_do_ioctl = sxg_ioctl,
904 .ndo_change_mtu = sxg_change_mtu,
905 .ndo_get_stats = sxg_get_stats,
906 .ndo_set_multicast_list = sxg_mcast_set_list,
907 .ndo_validate_addr = eth_validate_addr,
908 #if XXXTODO
909 .ndo_set_mac_address = sxg_mac_set_address,
910 #else
911 .ndo_set_mac_address = eth_mac_addr,
912 #endif
913 };
914
915 static int sxg_entry_probe(struct pci_dev *pcidev,
916 const struct pci_device_id *pci_tbl_entry)
917 {
918 static int did_version = 0;
919 int err;
920 struct net_device *netdev;
921 struct adapter_t *adapter;
922 void __iomem *memmapped_ioaddr;
923 u32 status = 0;
924 ulong mmio_start = 0;
925 ulong mmio_len = 0;
926 unsigned char revision_id;
927
928 DBG_ERROR("sxg: %s 2.6 VERSION ENTER jiffies[%lx] cpu %d\n",
929 __func__, jiffies, smp_processor_id());
930
931 /* Initialize trace buffer */
932 #ifdef ATKDBG
933 SxgTraceBuffer = &LSxgTraceBuffer;
934 SXG_TRACE_INIT(SxgTraceBuffer, TRACE_NOISY);
935 #endif
936
937 sxg_global.dynamic_intagg = dynamic_intagg;
938
939 err = pci_enable_device(pcidev);
940
941 DBG_ERROR("Call pci_enable_device(%p) status[%x]\n", pcidev, err);
942 if (err) {
943 return err;
944 }
945
946 if (sxg_debug > 0 && did_version++ == 0) {
947 printk(KERN_INFO "%s\n", sxg_banner);
948 printk(KERN_INFO "%s\n", SXG_DRV_VERSION);
949 }
950
951 pci_read_config_byte(pcidev, PCI_REVISION_ID, &revision_id);
952
953 if (!(err = pci_set_dma_mask(pcidev, DMA_BIT_MASK(64)))) {
954 DBG_ERROR("pci_set_dma_mask(DMA_BIT_MASK(64)) successful\n");
955 } else {
956 if ((err = pci_set_dma_mask(pcidev, DMA_BIT_MASK(32)))) {
957 DBG_ERROR
958 ("No usable DMA configuration, aborting err[%x]\n",
959 err);
960 return err;
961 }
962 DBG_ERROR("pci_set_dma_mask(DMA_BIT_MASK(32)) successful\n");
963 }
964
965 DBG_ERROR("Call pci_request_regions\n");
966
967 err = pci_request_regions(pcidev, sxg_driver_name);
968 if (err) {
969 DBG_ERROR("pci_request_regions FAILED err[%x]\n", err);
970 return err;
971 }
972
973 DBG_ERROR("call pci_set_master\n");
974 pci_set_master(pcidev);
975
976 DBG_ERROR("call alloc_etherdev\n");
977 netdev = alloc_etherdev(sizeof(struct adapter_t));
978 if (!netdev) {
979 err = -ENOMEM;
980 goto err_out_exit_sxg_probe;
981 }
982 DBG_ERROR("alloc_etherdev for slic netdev[%p]\n", netdev);
983
984 SET_NETDEV_DEV(netdev, &pcidev->dev);
985
986 pci_set_drvdata(pcidev, netdev);
987 adapter = netdev_priv(netdev);
988 if (revision_id == 1) {
989 adapter->asictype = SAHARA_REV_A;
990 } else if (revision_id == 2) {
991 adapter->asictype = SAHARA_REV_B;
992 } else {
993 ASSERT(0);
994 DBG_ERROR("%s Unexpected revision ID %x\n", __FUNCTION__, revision_id);
995 goto err_out_exit_sxg_probe;
996 }
997 adapter->netdev = netdev;
998 adapter->pcidev = pcidev;
999
1000 mmio_start = pci_resource_start(pcidev, 0);
1001 mmio_len = pci_resource_len(pcidev, 0);
1002
1003 DBG_ERROR("sxg: call ioremap(mmio_start[%lx], mmio_len[%lx])\n",
1004 mmio_start, mmio_len);
1005
1006 memmapped_ioaddr = ioremap(mmio_start, mmio_len);
1007 DBG_ERROR("sxg: %s MEMMAPPED_IOADDR [%p]\n", __func__,
1008 memmapped_ioaddr);
1009 if (!memmapped_ioaddr) {
1010 DBG_ERROR("%s cannot remap MMIO region %lx @ %lx\n",
1011 __func__, mmio_len, mmio_start);
1012 goto err_out_free_mmio_region_0;
1013 }
1014
1015 DBG_ERROR("sxg: %s found Alacritech SXG PCI, MMIO at %p, start[%lx] \
1016 len[%lx], IRQ %d.\n", __func__, memmapped_ioaddr, mmio_start,
1017 mmio_len, pcidev->irq);
1018
1019 adapter->HwRegs = (void *)memmapped_ioaddr;
1020 adapter->base_addr = memmapped_ioaddr;
1021
1022 mmio_start = pci_resource_start(pcidev, 2);
1023 mmio_len = pci_resource_len(pcidev, 2);
1024
1025 DBG_ERROR("sxg: call ioremap(mmio_start[%lx], mmio_len[%lx])\n",
1026 mmio_start, mmio_len);
1027
1028 memmapped_ioaddr = ioremap(mmio_start, mmio_len);
1029 DBG_ERROR("sxg: %s MEMMAPPED_IOADDR [%p]\n", __func__,
1030 memmapped_ioaddr);
1031 if (!memmapped_ioaddr) {
1032 DBG_ERROR("%s cannot remap MMIO region %lx @ %lx\n",
1033 __func__, mmio_len, mmio_start);
1034 goto err_out_free_mmio_region_2;
1035 }
1036
1037 DBG_ERROR("sxg: %s found Alacritech SXG PCI, MMIO at %p, "
1038 "start[%lx] len[%lx], IRQ %d.\n", __func__,
1039 memmapped_ioaddr, mmio_start, mmio_len, pcidev->irq);
1040
1041 adapter->UcodeRegs = (void *)memmapped_ioaddr;
1042
1043 adapter->State = SXG_STATE_INITIALIZING;
1044 /*
1045 * Maintain a list of all adapters anchored by
1046 * the global SxgDriver structure.
1047 */
1048 adapter->Next = SxgDriver.Adapters;
1049 SxgDriver.Adapters = adapter;
1050 adapter->AdapterID = ++SxgDriver.AdapterID;
1051
1052 /* Initialize CRC table used to determine multicast hash */
1053 sxg_mcast_init_crc32();
1054
1055 adapter->JumboEnabled = FALSE;
1056 adapter->RssEnabled = FALSE;
1057 if (adapter->JumboEnabled) {
1058 adapter->FrameSize = JUMBOMAXFRAME;
1059 adapter->ReceiveBufferSize = SXG_RCV_JUMBO_BUFFER_SIZE;
1060 } else {
1061 adapter->FrameSize = ETHERMAXFRAME;
1062 adapter->ReceiveBufferSize = SXG_RCV_DATA_BUFFER_SIZE;
1063 }
1064
1065 /*
1066 * status = SXG_READ_EEPROM(adapter);
1067 * if (!status) {
1068 * goto sxg_init_bad;
1069 * }
1070 */
1071
1072 DBG_ERROR("sxg: %s ENTER sxg_config_pci\n", __func__);
1073 sxg_config_pci(pcidev);
1074 DBG_ERROR("sxg: %s EXIT sxg_config_pci\n", __func__);
1075
1076 DBG_ERROR("sxg: %s ENTER sxg_init_driver\n", __func__);
1077 sxg_init_driver();
1078 DBG_ERROR("sxg: %s EXIT sxg_init_driver\n", __func__);
1079
1080 adapter->vendid = pci_tbl_entry->vendor;
1081 adapter->devid = pci_tbl_entry->device;
1082 adapter->subsysid = pci_tbl_entry->subdevice;
1083 adapter->slotnumber = ((pcidev->devfn >> 3) & 0x1F);
1084 adapter->functionnumber = (pcidev->devfn & 0x7);
1085 adapter->memorylength = pci_resource_len(pcidev, 0);
1086 adapter->irq = pcidev->irq;
1087 adapter->next_netdevice = head_netdevice;
1088 head_netdevice = netdev;
1089 adapter->port = 0; /*adapter->functionnumber; */
1090
1091 /* Allocate memory and other resources */
1092 DBG_ERROR("sxg: %s ENTER sxg_allocate_resources\n", __func__);
1093 status = sxg_allocate_resources(adapter);
1094 DBG_ERROR("sxg: %s EXIT sxg_allocate_resources status %x\n",
1095 __func__, status);
1096 if (status != STATUS_SUCCESS) {
1097 goto err_out_unmap;
1098 }
1099
1100 DBG_ERROR("sxg: %s ENTER sxg_download_microcode\n", __func__);
1101 if (sxg_download_microcode(adapter, SXG_UCODE_SYSTEM)) {
1102 DBG_ERROR("sxg: %s ENTER sxg_adapter_set_hwaddr\n",
1103 __func__);
1104 sxg_read_config(adapter);
1105 status = sxg_adapter_set_hwaddr(adapter);
1106 } else {
1107 adapter->state = ADAPT_FAIL;
1108 adapter->linkstate = LINK_DOWN;
1109 DBG_ERROR("sxg_download_microcode FAILED status[%x]\n", status);
1110 }
1111
1112 netdev->base_addr = (unsigned long)adapter->base_addr;
1113 netdev->irq = adapter->irq;
1114 netdev->netdev_ops = &sxg_netdev_ops;
1115 SET_ETHTOOL_OPS(netdev, &sxg_nic_ethtool_ops);
1116 netdev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
1117 err = sxg_set_interrupt_capability(adapter);
1118 if (err != STATUS_SUCCESS)
1119 DBG_ERROR("Cannot enable MSI-X capability\n");
1120
1121 strcpy(netdev->name, "eth%d");
1122 /* strcpy(netdev->name, pci_name(pcidev)); */
1123 if ((err = register_netdev(netdev))) {
1124 DBG_ERROR("Cannot register net device, aborting. %s\n",
1125 netdev->name);
1126 goto err_out_unmap;
1127 }
1128
1129 netif_napi_add(netdev, &adapter->napi,
1130 sxg_poll, SXG_NETDEV_WEIGHT);
1131 netdev->watchdog_timeo = 2 * HZ;
1132 init_timer(&adapter->watchdog_timer);
1133 adapter->watchdog_timer.function = &sxg_watchdog;
1134 adapter->watchdog_timer.data = (unsigned long) adapter;
1135 INIT_WORK(&adapter->update_link_status, sxg_update_link_status);
1136
1137 DBG_ERROR
1138 ("sxg: %s addr 0x%lx, irq %d, MAC addr \
1139 %02X:%02X:%02X:%02X:%02X:%02X\n",
1140 netdev->name, netdev->base_addr, pcidev->irq, netdev->dev_addr[0],
1141 netdev->dev_addr[1], netdev->dev_addr[2], netdev->dev_addr[3],
1142 netdev->dev_addr[4], netdev->dev_addr[5]);
1143
1144 /* sxg_init_bad: */
1145 ASSERT(status == FALSE);
1146 /* sxg_free_adapter(adapter); */
1147
1148 DBG_ERROR("sxg: %s EXIT status[%x] jiffies[%lx] cpu %d\n", __func__,
1149 status, jiffies, smp_processor_id());
1150 return status;
1151
1152 err_out_unmap:
1153 sxg_free_resources(adapter);
1154
1155 err_out_free_mmio_region_2:
1156
1157 mmio_start = pci_resource_start(pcidev, 2);
1158 mmio_len = pci_resource_len(pcidev, 2);
1159 release_mem_region(mmio_start, mmio_len);
1160
1161 err_out_free_mmio_region_0:
1162
1163 mmio_start = pci_resource_start(pcidev, 0);
1164 mmio_len = pci_resource_len(pcidev, 0);
1165
1166 release_mem_region(mmio_start, mmio_len);
1167
1168 err_out_exit_sxg_probe:
1169
1170 DBG_ERROR("%s EXIT jiffies[%lx] cpu %d\n", __func__, jiffies,
1171 smp_processor_id());
1172
1173 pci_disable_device(pcidev);
1174 DBG_ERROR("sxg: %s deallocate device\n", __FUNCTION__);
1175 kfree(netdev);
1176 printk("Exit %s, Sxg driver loading failed..\n", __FUNCTION__);
1177
1178 return -ENODEV;
1179 }
1180
1181 /*
1182 * LINE BASE Interrupt routines..
1183 *
1184 * sxg_disable_interrupt
1185 *
1186 * DisableInterrupt Handler
1187 *
1188 * Arguments:
1189 *
1190 * adapter: Our adapter structure
1191 *
1192 * Return Value:
1193 * None.
1194 */
1195 static void sxg_disable_interrupt(struct adapter_t *adapter)
1196 {
1197 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "DisIntr",
1198 adapter, adapter->InterruptsEnabled, 0, 0);
1199 /* For now, RSS is disabled with line based interrupts */
1200 ASSERT(adapter->RssEnabled == FALSE);
1201 /* Turn off interrupts by writing to the icr register. */
1202 WRITE_REG(adapter->UcodeRegs[0].Icr, SXG_ICR(0, SXG_ICR_DISABLE), TRUE);
1203
1204 adapter->InterruptsEnabled = 0;
1205
1206 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XDisIntr",
1207 adapter, adapter->InterruptsEnabled, 0, 0);
1208 }
1209
1210 /*
1211 * sxg_enable_interrupt
1212 *
1213 * EnableInterrupt Handler
1214 *
1215 * Arguments:
1216 *
1217 * adapter: Our adapter structure
1218 *
1219 * Return Value:
1220 * None.
1221 */
1222 static void sxg_enable_interrupt(struct adapter_t *adapter)
1223 {
1224 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "EnIntr",
1225 adapter, adapter->InterruptsEnabled, 0, 0);
1226 /* For now, RSS is disabled with line based interrupts */
1227 ASSERT(adapter->RssEnabled == FALSE);
1228 /* Turn on interrupts by writing to the icr register. */
1229 WRITE_REG(adapter->UcodeRegs[0].Icr, SXG_ICR(0, SXG_ICR_ENABLE), TRUE);
1230
1231 adapter->InterruptsEnabled = 1;
1232
1233 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XEnIntr",
1234 adapter, 0, 0, 0);
1235 }
1236
1237 /*
1238 * sxg_isr - Process an line-based interrupt
1239 *
1240 * Arguments:
1241 * Context - Our adapter structure
1242 * QueueDefault - Output parameter to queue to default CPU
1243 * TargetCpus - Output bitmap to schedule DPC's
1244 *
1245 * Return Value: TRUE if our interrupt
1246 */
1247 static irqreturn_t sxg_isr(int irq, void *dev_id)
1248 {
1249 struct net_device *dev = (struct net_device *) dev_id;
1250 struct adapter_t *adapter = (struct adapter_t *) netdev_priv(dev);
1251
1252 if(adapter->state != ADAPT_UP)
1253 return IRQ_NONE;
1254 adapter->Stats.NumInts++;
1255 if (adapter->Isr[0] == 0) {
1256 /*
1257 * The SLIC driver used to experience a number of spurious
1258 * interrupts due to the delay associated with the masking of
1259 * the interrupt (we'd bounce back in here). If we see that
1260 * again with Sahara,add a READ_REG of the Icr register after
1261 * the WRITE_REG below.
1262 */
1263 adapter->Stats.FalseInts++;
1264 return IRQ_NONE;
1265 }
1266 /*
1267 * Move the Isr contents and clear the value in
1268 * shared memory, and mask interrupts
1269 */
1270 /* ASSERT(adapter->IsrDpcsPending == 0); */
1271 #if XXXTODO /* RSS Stuff */
1272 /*
1273 * If RSS is enabled and the ISR specifies SXG_ISR_EVENT, then
1274 * schedule DPC's based on event queues.
1275 */
1276 if (adapter->RssEnabled && (adapter->IsrCopy[0] & SXG_ISR_EVENT)) {
1277 for (i = 0;
1278 i < adapter->RssSystemInfo->ProcessorInfo.RssCpuCount;
1279 i++) {
1280 struct sxg_event_ring *EventRing =
1281 &adapter->EventRings[i];
1282 struct sxg_event *Event =
1283 &EventRing->Ring[adapter->NextEvent[i]];
1284 unsigned char Cpu =
1285 adapter->RssSystemInfo->RssIdToCpu[i];
1286 if (Event->Status & EVENT_STATUS_VALID) {
1287 adapter->IsrDpcsPending++;
1288 CpuMask |= (1 << Cpu);
1289 }
1290 }
1291 }
1292 /*
1293 * Now, either schedule the CPUs specified by the CpuMask,
1294 * or queue default
1295 */
1296 if (CpuMask) {
1297 *QueueDefault = FALSE;
1298 } else {
1299 adapter->IsrDpcsPending = 1;
1300 *QueueDefault = TRUE;
1301 }
1302 *TargetCpus = CpuMask;
1303 #endif
1304 sxg_interrupt(adapter);
1305
1306 return IRQ_HANDLED;
1307 }
1308
1309 static void sxg_interrupt(struct adapter_t *adapter)
1310 {
1311 WRITE_REG(adapter->UcodeRegs[0].Icr, SXG_ICR(0, SXG_ICR_MASK), TRUE);
1312
1313 if (napi_schedule_prep(&adapter->napi)) {
1314 __napi_schedule(&adapter->napi);
1315 }
1316 }
1317
1318 static void sxg_handle_interrupt(struct adapter_t *adapter, int *work_done,
1319 int budget)
1320 {
1321 /* unsigned char RssId = 0; */
1322 u32 NewIsr;
1323 int sxg_napi_continue = 1;
1324 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "HndlIntr",
1325 adapter, adapter->IsrCopy[0], 0, 0);
1326 /* For now, RSS is disabled with line based interrupts */
1327 ASSERT(adapter->RssEnabled == FALSE);
1328
1329 adapter->IsrCopy[0] = adapter->Isr[0];
1330 adapter->Isr[0] = 0;
1331
1332 /* Always process the event queue. */
1333 while (sxg_napi_continue)
1334 {
1335 sxg_process_event_queue(adapter,
1336 (adapter->RssEnabled ? /*RssId */ 0 : 0),
1337 &sxg_napi_continue, work_done, budget);
1338 }
1339
1340 #if XXXTODO /* RSS stuff */
1341 if (--adapter->IsrDpcsPending) {
1342 /* We're done. */
1343 ASSERT(adapter->RssEnabled);
1344 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "DPCsPend",
1345 adapter, 0, 0, 0);
1346 return;
1347 }
1348 #endif
1349 /* Last (or only) DPC processes the ISR and clears the interrupt. */
1350 NewIsr = sxg_process_isr(adapter, 0);
1351 /* Reenable interrupts */
1352 adapter->IsrCopy[0] = 0;
1353 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "ClearIsr",
1354 adapter, NewIsr, 0, 0);
1355
1356 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XHndlInt",
1357 adapter, 0, 0, 0);
1358 }
1359 static int sxg_poll(struct napi_struct *napi, int budget)
1360 {
1361 struct adapter_t *adapter = container_of(napi, struct adapter_t, napi);
1362 int work_done = 0;
1363
1364 sxg_handle_interrupt(adapter, &work_done, budget);
1365
1366 if (work_done < budget) {
1367 napi_complete(napi);
1368 WRITE_REG(adapter->UcodeRegs[0].Isr, 0, TRUE);
1369 }
1370 return work_done;
1371 }
1372
1373 /*
1374 * sxg_process_isr - Process an interrupt. Called from the line-based and
1375 * message based interrupt DPC routines
1376 *
1377 * Arguments:
1378 * adapter - Our adapter structure
1379 * Queue - The ISR that needs processing
1380 *
1381 * Return Value:
1382 * None
1383 */
1384 static int sxg_process_isr(struct adapter_t *adapter, u32 MessageId)
1385 {
1386 u32 Isr = adapter->IsrCopy[MessageId];
1387 u32 NewIsr = 0;
1388
1389 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "ProcIsr",
1390 adapter, Isr, 0, 0);
1391
1392 /* Error */
1393 if (Isr & SXG_ISR_ERR) {
1394 if (Isr & SXG_ISR_PDQF) {
1395 adapter->Stats.PdqFull++;
1396 DBG_ERROR("%s: SXG_ISR_ERR PDQF!!\n", __func__);
1397 }
1398 /* No host buffer */
1399 if (Isr & SXG_ISR_RMISS) {
1400 /*
1401 * There is a bunch of code in the SLIC driver which
1402 * attempts to process more receive events per DPC
1403 * if we start to fall behind. We'll probablyd
1404 * need to do something similar here, but hold
1405 * off for now. I don't want to make the code more
1406 * complicated than strictly needed.
1407 */
1408 adapter->stats.rx_missed_errors++;
1409 if (adapter->stats.rx_missed_errors< 5) {
1410 DBG_ERROR("%s: SXG_ISR_ERR RMISS!!\n",
1411 __func__);
1412 }
1413 }
1414 /* Card crash */
1415 if (Isr & SXG_ISR_DEAD) {
1416 /*
1417 * Set aside the crash info and set the adapter state
1418 * to RESET
1419 */
1420 adapter->CrashCpu = (unsigned char)
1421 ((Isr & SXG_ISR_CPU) >> SXG_ISR_CPU_SHIFT);
1422 adapter->CrashLocation = (ushort) (Isr & SXG_ISR_CRASH);
1423 adapter->Dead = TRUE;
1424 DBG_ERROR("%s: ISR_DEAD %x, CPU: %d\n", __func__,
1425 adapter->CrashLocation, adapter->CrashCpu);
1426 }
1427 /* Event ring full */
1428 if (Isr & SXG_ISR_ERFULL) {
1429 /*
1430 * Same issue as RMISS, really. This means the
1431 * host is falling behind the card. Need to increase
1432 * event ring size, process more events per interrupt,
1433 * and/or reduce/remove interrupt aggregation.
1434 */
1435 adapter->Stats.EventRingFull++;
1436 DBG_ERROR("%s: SXG_ISR_ERR EVENT RING FULL!!\n",
1437 __func__);
1438 }
1439 /* Transmit drop - no DRAM buffers or XMT error */
1440 if (Isr & SXG_ISR_XDROP) {
1441 DBG_ERROR("%s: SXG_ISR_ERR XDROP!!\n", __func__);
1442 }
1443 }
1444 /* Slowpath send completions */
1445 if (Isr & SXG_ISR_SPSEND) {
1446 sxg_complete_slow_send(adapter);
1447 }
1448 /* Dump */
1449 if (Isr & SXG_ISR_UPC) {
1450 /* Maybe change when debug is added.. */
1451 // ASSERT(adapter->DumpCmdRunning);
1452 adapter->DumpCmdRunning = FALSE;
1453 }
1454 /* Link event */
1455 if (Isr & SXG_ISR_LINK) {
1456 if (adapter->state != ADAPT_DOWN) {
1457 adapter->link_status_changed = 1;
1458 schedule_work(&adapter->update_link_status);
1459 }
1460 }
1461 /* Debug - breakpoint hit */
1462 if (Isr & SXG_ISR_BREAK) {
1463 /*
1464 * At the moment AGDB isn't written to support interactive
1465 * debug sessions. When it is, this interrupt will be used to
1466 * signal AGDB that it has hit a breakpoint. For now, ASSERT.
1467 */
1468 ASSERT(0);
1469 }
1470 /* Heartbeat response */
1471 if (Isr & SXG_ISR_PING) {
1472 adapter->PingOutstanding = FALSE;
1473 }
1474 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XProcIsr",
1475 adapter, Isr, NewIsr, 0);
1476
1477 return (NewIsr);
1478 }
1479
1480 /*
1481 * sxg_rcv_checksum - Set the checksum for received packet
1482 *
1483 * Arguements:
1484 * @adapter - Adapter structure on which packet is received
1485 * @skb - Packet which is receieved
1486 * @Event - Event read from hardware
1487 */
1488
1489 void sxg_rcv_checksum(struct adapter_t *adapter, struct sk_buff *skb,
1490 struct sxg_event *Event)
1491 {
1492 skb->ip_summed = CHECKSUM_NONE;
1493 if (likely(adapter->flags & SXG_RCV_IP_CSUM_ENABLED)) {
1494 if (likely(adapter->flags & SXG_RCV_TCP_CSUM_ENABLED)
1495 && (Event->Status & EVENT_STATUS_TCPIP)) {
1496 if(!(Event->Status & EVENT_STATUS_TCPBAD))
1497 skb->ip_summed = CHECKSUM_UNNECESSARY;
1498 if(!(Event->Status & EVENT_STATUS_IPBAD))
1499 skb->ip_summed = CHECKSUM_UNNECESSARY;
1500 } else if(Event->Status & EVENT_STATUS_IPONLY) {
1501 if(!(Event->Status & EVENT_STATUS_IPBAD))
1502 skb->ip_summed = CHECKSUM_UNNECESSARY;
1503 }
1504 }
1505 }
1506
1507 /*
1508 * sxg_process_event_queue - Process our event queue
1509 *
1510 * Arguments:
1511 * - adapter - Adapter structure
1512 * - RssId - The event queue requiring processing
1513 *
1514 * Return Value:
1515 * None.
1516 */
1517 static u32 sxg_process_event_queue(struct adapter_t *adapter, u32 RssId,
1518 int *sxg_napi_continue, int *work_done, int budget)
1519 {
1520 struct sxg_event_ring *EventRing = &adapter->EventRings[RssId];
1521 struct sxg_event *Event = &EventRing->Ring[adapter->NextEvent[RssId]];
1522 u32 EventsProcessed = 0, Batches = 0;
1523 struct sk_buff *skb;
1524 #ifdef LINUX_HANDLES_RCV_INDICATION_LISTS
1525 struct sk_buff *prev_skb = NULL;
1526 struct sk_buff *IndicationList[SXG_RCV_ARRAYSIZE];
1527 u32 Index;
1528 struct sxg_rcv_data_buffer_hdr *RcvDataBufferHdr;
1529 #endif
1530 u32 ReturnStatus = 0;
1531 int sxg_rcv_data_buffers = SXG_RCV_DATA_BUFFERS;
1532
1533 ASSERT((adapter->State == SXG_STATE_RUNNING) ||
1534 (adapter->State == SXG_STATE_PAUSING) ||
1535 (adapter->State == SXG_STATE_PAUSED) ||
1536 (adapter->State == SXG_STATE_HALTING));
1537 /*
1538 * We may still have unprocessed events on the queue if
1539 * the card crashed. Don't process them.
1540 */
1541 if (adapter->Dead) {
1542 return (0);
1543 }
1544 /*
1545 * In theory there should only be a single processor that
1546 * accesses this queue, and only at interrupt-DPC time. So/
1547 * we shouldn't need a lock for any of this.
1548 */
1549 while (Event->Status & EVENT_STATUS_VALID) {
1550 (*sxg_napi_continue) = 1;
1551 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "Event",
1552 Event, Event->Code, Event->Status,
1553 adapter->NextEvent);
1554 switch (Event->Code) {
1555 case EVENT_CODE_BUFFERS:
1556 /* struct sxg_ring_info Head & Tail == unsigned char */
1557 ASSERT(!(Event->CommandIndex & 0xFF00));
1558 sxg_complete_descriptor_blocks(adapter,
1559 Event->CommandIndex);
1560 break;
1561 case EVENT_CODE_SLOWRCV:
1562 (*work_done)++;
1563 --adapter->RcvBuffersOnCard;
1564 if ((skb = sxg_slow_receive(adapter, Event))) {
1565 u32 rx_bytes;
1566 #ifdef LINUX_HANDLES_RCV_INDICATION_LISTS
1567 /* Add it to our indication list */
1568 SXG_ADD_RCV_PACKET(adapter, skb, prev_skb,
1569 IndicationList, num_skbs);
1570 /*
1571 * Linux, we just pass up each skb to the
1572 * protocol above at this point, there is no
1573 * capability of an indication list.
1574 */
1575 #else
1576 /* CHECK skb_pull(skb, INIC_RCVBUF_HEADSIZE); */
1577 /* (rcvbuf->length & IRHDDR_FLEN_MSK); */
1578 rx_bytes = Event->Length;
1579 adapter->stats.rx_packets++;
1580 adapter->stats.rx_bytes += rx_bytes;
1581 sxg_rcv_checksum(adapter, skb, Event);
1582 skb->dev = adapter->netdev;
1583 netif_receive_skb(skb);
1584 #endif
1585 }
1586 break;
1587 default:
1588 DBG_ERROR("%s: ERROR Invalid EventCode %d\n",
1589 __func__, Event->Code);
1590 /* ASSERT(0); */
1591 }
1592 /*
1593 * See if we need to restock card receive buffers.
1594 * There are two things to note here:
1595 * First - This test is not SMP safe. The
1596 * adapter->BuffersOnCard field is protected via atomic
1597 * interlocked calls, but we do not protect it with respect
1598 * to these tests. The only way to do that is with a lock,
1599 * and I don't want to grab a lock every time we adjust the
1600 * BuffersOnCard count. Instead, we allow the buffer
1601 * replenishment to be off once in a while. The worst that
1602 * can happen is the card is given on more-or-less descriptor
1603 * block than the arbitrary value we've chosen. No big deal
1604 * In short DO NOT ADD A LOCK HERE, OR WHERE RcvBuffersOnCard
1605 * is adjusted.
1606 * Second - We expect this test to rarely
1607 * evaluate to true. We attempt to refill descriptor blocks
1608 * as they are returned to us (sxg_complete_descriptor_blocks)
1609 * so The only time this should evaluate to true is when
1610 * sxg_complete_descriptor_blocks failed to allocate
1611 * receive buffers.
1612 */
1613 if (adapter->JumboEnabled)
1614 sxg_rcv_data_buffers = SXG_JUMBO_RCV_DATA_BUFFERS;
1615
1616 if (adapter->RcvBuffersOnCard < sxg_rcv_data_buffers) {
1617 sxg_stock_rcv_buffers(adapter);
1618 }
1619 /*
1620 * It's more efficient to just set this to zero.
1621 * But clearing the top bit saves potential debug info...
1622 */
1623 Event->Status &= ~EVENT_STATUS_VALID;
1624 /* Advance to the next event */
1625 SXG_ADVANCE_INDEX(adapter->NextEvent[RssId], EVENT_RING_SIZE);
1626 Event = &EventRing->Ring[adapter->NextEvent[RssId]];
1627 EventsProcessed++;
1628 if (EventsProcessed == EVENT_RING_BATCH) {
1629 /* Release a batch of events back to the card */
1630 WRITE_REG(adapter->UcodeRegs[RssId].EventRelease,
1631 EVENT_RING_BATCH, FALSE);
1632 EventsProcessed = 0;
1633 /*
1634 * If we've processed our batch limit, break out of the
1635 * loop and return SXG_ISR_EVENT to arrange for us to
1636 * be called again
1637 */
1638 if (Batches++ == EVENT_BATCH_LIMIT) {
1639 SXG_TRACE(TRACE_SXG, SxgTraceBuffer,
1640 TRACE_NOISY, "EvtLimit", Batches,
1641 adapter->NextEvent, 0, 0);
1642 ReturnStatus = SXG_ISR_EVENT;
1643 break;
1644 }
1645 }
1646 if (*work_done >= budget) {
1647 WRITE_REG(adapter->UcodeRegs[RssId].EventRelease,
1648 EventsProcessed, FALSE);
1649 EventsProcessed = 0;
1650 (*sxg_napi_continue) = 0;
1651 break;
1652 }
1653 }
1654 if (!(Event->Status & EVENT_STATUS_VALID))
1655 (*sxg_napi_continue) = 0;
1656
1657 #ifdef LINUX_HANDLES_RCV_INDICATION_LISTS
1658 /* Indicate any received dumb-nic frames */
1659 SXG_INDICATE_PACKETS(adapter, IndicationList, num_skbs);
1660 #endif
1661 /* Release events back to the card. */
1662 if (EventsProcessed) {
1663 WRITE_REG(adapter->UcodeRegs[RssId].EventRelease,
1664 EventsProcessed, FALSE);
1665 }
1666 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XPrcEvnt",
1667 Batches, EventsProcessed, adapter->NextEvent, num_skbs);
1668
1669 return (ReturnStatus);
1670 }
1671
1672 /*
1673 * sxg_complete_slow_send - Complete slowpath or dumb-nic sends
1674 *
1675 * Arguments -
1676 * adapter - A pointer to our adapter structure
1677 * Return
1678 * None
1679 */
1680 static void sxg_complete_slow_send(struct adapter_t *adapter)
1681 {
1682 struct sxg_xmt_ring *XmtRing = &adapter->XmtRings[0];
1683 struct sxg_ring_info *XmtRingInfo = &adapter->XmtRingZeroInfo;
1684 u32 *ContextType;
1685 struct sxg_cmd *XmtCmd;
1686 unsigned long flags = 0;
1687 unsigned long sgl_flags = 0;
1688 unsigned int processed_count = 0;
1689
1690 /*
1691 * NOTE - This lock is dropped and regrabbed in this loop.
1692 * This means two different processors can both be running/
1693 * through this loop. Be *very* careful.
1694 */
1695 spin_lock_irqsave(&adapter->XmtZeroLock, flags);
1696
1697 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "CmpSnds",
1698 adapter, XmtRingInfo->Head, XmtRingInfo->Tail, 0);
1699
1700 while ((XmtRingInfo->Tail != *adapter->XmtRingZeroIndex)
1701 && processed_count++ < SXG_COMPLETE_SLOW_SEND_LIMIT) {
1702 /*
1703 * Locate the current Cmd (ring descriptor entry), and
1704 * associated SGL, and advance the tail
1705 */
1706 SXG_RETURN_CMD(XmtRing, XmtRingInfo, XmtCmd, ContextType);
1707 ASSERT(ContextType);
1708 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "CmpSnd",
1709 XmtRingInfo->Head, XmtRingInfo->Tail, XmtCmd, 0);
1710 /* Clear the SGL field. */
1711 XmtCmd->Sgl = 0;
1712
1713 switch (*ContextType) {
1714 case SXG_SGL_DUMB:
1715 {
1716 struct sk_buff *skb;
1717 struct sxg_scatter_gather *SxgSgl =
1718 (struct sxg_scatter_gather *)ContextType;
1719 dma64_addr_t FirstSgeAddress;
1720 u32 FirstSgeLength;
1721
1722 /* Dumb-nic send. Command context is the dumb-nic SGL */
1723 skb = (struct sk_buff *)ContextType;
1724 skb = SxgSgl->DumbPacket;
1725 FirstSgeAddress = XmtCmd->Buffer.FirstSgeAddress;
1726 FirstSgeLength = XmtCmd->Buffer.FirstSgeLength;
1727 /* Complete the send */
1728 SXG_TRACE(TRACE_SXG, SxgTraceBuffer,
1729 TRACE_IMPORTANT, "DmSndCmp", skb, 0,
1730 0, 0);
1731 ASSERT(adapter->Stats.XmtQLen);
1732 /*
1733 * Now drop the lock and complete the send
1734 * back to Microsoft. We need to drop the lock
1735 * because Microsoft can come back with a
1736 * chimney send, which results in a double trip
1737 * in SxgTcpOuput
1738 */
1739 spin_unlock_irqrestore(
1740 &adapter->XmtZeroLock, flags);
1741
1742 SxgSgl->DumbPacket = NULL;
1743 SXG_COMPLETE_DUMB_SEND(adapter, skb,
1744 FirstSgeAddress,
1745 FirstSgeLength);
1746 SXG_FREE_SGL_BUFFER(adapter, SxgSgl, NULL);
1747 /* and reacquire.. */
1748 spin_lock_irqsave(&adapter->XmtZeroLock, flags);
1749 }
1750 break;
1751 default:
1752 ASSERT(0);
1753 }
1754 }
1755 spin_unlock_irqrestore(&adapter->XmtZeroLock, flags);
1756 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "CmpSnd",
1757 adapter, XmtRingInfo->Head, XmtRingInfo->Tail, 0);
1758 }
1759
1760 /*
1761 * sxg_slow_receive
1762 *
1763 * Arguments -
1764 * adapter - A pointer to our adapter structure
1765 * Event - Receive event
1766 *
1767 * Return - skb
1768 */
1769 static struct sk_buff *sxg_slow_receive(struct adapter_t *adapter,
1770 struct sxg_event *Event)
1771 {
1772 u32 BufferSize = adapter->ReceiveBufferSize;
1773 struct sxg_rcv_data_buffer_hdr *RcvDataBufferHdr;
1774 struct sk_buff *Packet;
1775 static int read_counter = 0;
1776
1777 RcvDataBufferHdr = (struct sxg_rcv_data_buffer_hdr *) Event->HostHandle;
1778 if(read_counter++ & 0x100)
1779 {
1780 sxg_collect_statistics(adapter);
1781 read_counter = 0;
1782 }
1783 ASSERT(RcvDataBufferHdr);
1784 ASSERT(RcvDataBufferHdr->State == SXG_BUFFER_ONCARD);
1785 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_IMPORTANT, "SlowRcv", Event,
1786 RcvDataBufferHdr, RcvDataBufferHdr->State,
1787 /*RcvDataBufferHdr->VirtualAddress*/ 0);
1788 /* Drop rcv frames in non-running state */
1789 switch (adapter->State) {
1790 case SXG_STATE_RUNNING:
1791 break;
1792 case SXG_STATE_PAUSING:
1793 case SXG_STATE_PAUSED:
1794 case SXG_STATE_HALTING:
1795 goto drop;
1796 default:
1797 ASSERT(0);
1798 goto drop;
1799 }
1800
1801 /*
1802 * memcpy(SXG_RECEIVE_DATA_LOCATION(RcvDataBufferHdr),
1803 * RcvDataBufferHdr->VirtualAddress, Event->Length);
1804 */
1805
1806 /* Change buffer state to UPSTREAM */
1807 RcvDataBufferHdr->State = SXG_BUFFER_UPSTREAM;
1808 if (Event->Status & EVENT_STATUS_RCVERR) {
1809 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "RcvError",
1810 Event, Event->Status, Event->HostHandle, 0);
1811 sxg_process_rcv_error(adapter, *(u32 *)
1812 SXG_RECEIVE_DATA_LOCATION
1813 (RcvDataBufferHdr));
1814 goto drop;
1815 }
1816 #if XXXTODO /* VLAN stuff */
1817 /* If there's a VLAN tag, extract it and validate it */
1818 if (((struct ether_header *)
1819 (SXG_RECEIVE_DATA_LOCATION(RcvDataBufferHdr)))->EtherType
1820 == ETHERTYPE_VLAN) {
1821 if (SxgExtractVlanHeader(adapter, RcvDataBufferHdr, Event) !=
1822 STATUS_SUCCESS) {
1823 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY,
1824 "BadVlan", Event,
1825 SXG_RECEIVE_DATA_LOCATION(RcvDataBufferHdr),
1826 Event->Length, 0);
1827 goto drop;
1828 }
1829 }
1830 #endif
1831 /* Dumb-nic frame. See if it passes our mac filter and update stats */
1832
1833 if (!sxg_mac_filter(adapter,
1834 (struct ether_header *)(SXG_RECEIVE_DATA_LOCATION(RcvDataBufferHdr)),
1835 Event->Length)) {
1836 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "RcvFiltr",
1837 Event, SXG_RECEIVE_DATA_LOCATION(RcvDataBufferHdr),
1838 Event->Length, 0);
1839 goto drop;
1840 }
1841
1842 Packet = RcvDataBufferHdr->SxgDumbRcvPacket;
1843 SXG_ADJUST_RCV_PACKET(Packet, RcvDataBufferHdr, Event);
1844 Packet->protocol = eth_type_trans(Packet, adapter->netdev);
1845
1846 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_IMPORTANT, "DumbRcv",
1847 RcvDataBufferHdr, Packet, Event->Length, 0);
1848 /* Lastly adjust the receive packet length. */
1849 RcvDataBufferHdr->SxgDumbRcvPacket = NULL;
1850 RcvDataBufferHdr->PhysicalAddress = (dma_addr_t)NULL;
1851 SXG_ALLOCATE_RCV_PACKET(adapter, RcvDataBufferHdr, BufferSize);
1852 if (RcvDataBufferHdr->skb)
1853 {
1854 spin_lock(&adapter->RcvQLock);
1855 SXG_FREE_RCV_DATA_BUFFER(adapter, RcvDataBufferHdr);
1856 // adapter->RcvBuffersOnCard ++;
1857 spin_unlock(&adapter->RcvQLock);
1858 }
1859 return (Packet);
1860
1861 drop:
1862 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "DropRcv",
1863 RcvDataBufferHdr, Event->Length, 0, 0);
1864 adapter->stats.rx_dropped++;
1865 // adapter->Stats.RcvDiscards++;
1866 spin_lock(&adapter->RcvQLock);
1867 SXG_FREE_RCV_DATA_BUFFER(adapter, RcvDataBufferHdr);
1868 spin_unlock(&adapter->RcvQLock);
1869 return (NULL);
1870 }
1871
1872 /*
1873 * sxg_process_rcv_error - process receive error and update
1874 * stats
1875 *
1876 * Arguments:
1877 * adapter - Adapter structure
1878 * ErrorStatus - 4-byte receive error status
1879 *
1880 * Return Value : None
1881 */
1882 static void sxg_process_rcv_error(struct adapter_t *adapter, u32 ErrorStatus)
1883 {
1884 u32 Error;
1885
1886 adapter->stats.rx_errors++;
1887
1888 if (ErrorStatus & SXG_RCV_STATUS_TRANSPORT_ERROR) {
1889 Error = ErrorStatus & SXG_RCV_STATUS_TRANSPORT_MASK;
1890 switch (Error) {
1891 case SXG_RCV_STATUS_TRANSPORT_CSUM:
1892 adapter->Stats.TransportCsum++;
1893 break;
1894 case SXG_RCV_STATUS_TRANSPORT_UFLOW:
1895 adapter->Stats.TransportUflow++;
1896 break;
1897 case SXG_RCV_STATUS_TRANSPORT_HDRLEN:
1898 adapter->Stats.TransportHdrLen++;
1899 break;
1900 }
1901 }
1902 if (ErrorStatus & SXG_RCV_STATUS_NETWORK_ERROR) {
1903 Error = ErrorStatus & SXG_RCV_STATUS_NETWORK_MASK;
1904 switch (Error) {
1905 case SXG_RCV_STATUS_NETWORK_CSUM:
1906 adapter->Stats.NetworkCsum++;
1907 break;
1908 case SXG_RCV_STATUS_NETWORK_UFLOW:
1909 adapter->Stats.NetworkUflow++;
1910 break;
1911 case SXG_RCV_STATUS_NETWORK_HDRLEN:
1912 adapter->Stats.NetworkHdrLen++;
1913 break;
1914 }
1915 }
1916 if (ErrorStatus & SXG_RCV_STATUS_PARITY) {
1917 adapter->Stats.Parity++;
1918 }
1919 if (ErrorStatus & SXG_RCV_STATUS_LINK_ERROR) {
1920 Error = ErrorStatus & SXG_RCV_STATUS_LINK_MASK;
1921 switch (Error) {
1922 case SXG_RCV_STATUS_LINK_PARITY:
1923 adapter->Stats.LinkParity++;
1924 break;
1925 case SXG_RCV_STATUS_LINK_EARLY:
1926 adapter->Stats.LinkEarly++;
1927 break;
1928 case SXG_RCV_STATUS_LINK_BUFOFLOW:
1929 adapter->Stats.LinkBufOflow++;
1930 break;
1931 case SXG_RCV_STATUS_LINK_CODE:
1932 adapter->Stats.LinkCode++;
1933 break;
1934 case SXG_RCV_STATUS_LINK_DRIBBLE:
1935 adapter->Stats.LinkDribble++;
1936 break;
1937 case SXG_RCV_STATUS_LINK_CRC:
1938 adapter->Stats.LinkCrc++;
1939 break;
1940 case SXG_RCV_STATUS_LINK_OFLOW:
1941 adapter->Stats.LinkOflow++;
1942 break;
1943 case SXG_RCV_STATUS_LINK_UFLOW:
1944 adapter->Stats.LinkUflow++;
1945 break;
1946 }
1947 }
1948 }
1949
1950 /*
1951 * sxg_mac_filter
1952 *
1953 * Arguments:
1954 * adapter - Adapter structure
1955 * pether - Ethernet header
1956 * length - Frame length
1957 *
1958 * Return Value : TRUE if the frame is to be allowed
1959 */
1960 static bool sxg_mac_filter(struct adapter_t *adapter,
1961 struct ether_header *EtherHdr, ushort length)
1962 {
1963 bool EqualAddr;
1964 struct net_device *dev = adapter->netdev;
1965
1966 if (SXG_MULTICAST_PACKET(EtherHdr)) {
1967 if (SXG_BROADCAST_PACKET(EtherHdr)) {
1968 /* broadcast */
1969 if (adapter->MacFilter & MAC_BCAST) {
1970 adapter->Stats.DumbRcvBcastPkts++;
1971 adapter->Stats.DumbRcvBcastBytes += length;
1972 return (TRUE);
1973 }
1974 } else {
1975 /* multicast */
1976 if (adapter->MacFilter & MAC_ALLMCAST) {
1977 adapter->Stats.DumbRcvMcastPkts++;
1978 adapter->Stats.DumbRcvMcastBytes += length;
1979 return (TRUE);
1980 }
1981 if (adapter->MacFilter & MAC_MCAST) {
1982 struct dev_mc_list *mclist = dev->mc_list;
1983 while (mclist) {
1984 ETHER_EQ_ADDR(mclist->da_addr,
1985 EtherHdr->ether_dhost,
1986 EqualAddr);
1987 if (EqualAddr) {
1988 adapter->Stats.
1989 DumbRcvMcastPkts++;
1990 adapter->Stats.
1991 DumbRcvMcastBytes += length;
1992 return (TRUE);
1993 }
1994 mclist = mclist->next;
1995 }
1996 }
1997 }
1998 } else if (adapter->MacFilter & MAC_DIRECTED) {
1999 /*
2000 * Not broadcast or multicast. Must be directed at us or
2001 * the card is in promiscuous mode. Either way, consider it
2002 * ours if MAC_DIRECTED is set
2003 */
2004 adapter->Stats.DumbRcvUcastPkts++;
2005 adapter->Stats.DumbRcvUcastBytes += length;
2006 return (TRUE);
2007 }
2008 if (adapter->MacFilter & MAC_PROMISC) {
2009 /* Whatever it is, keep it. */
2010 return (TRUE);
2011 }
2012 return (FALSE);
2013 }
2014
2015 static int sxg_register_interrupt(struct adapter_t *adapter)
2016 {
2017 if (!adapter->intrregistered) {
2018 int retval;
2019
2020 DBG_ERROR
2021 ("sxg: %s AllocAdaptRsrcs adapter[%p] dev->irq[%x] %x\n",
2022 __func__, adapter, adapter->netdev->irq, NR_IRQS);
2023
2024 spin_unlock_irqrestore(&sxg_global.driver_lock,
2025 sxg_global.flags);
2026
2027 retval = request_irq(adapter->netdev->irq,
2028 &sxg_isr,
2029 IRQF_SHARED,
2030 adapter->netdev->name, adapter->netdev);
2031
2032 spin_lock_irqsave(&sxg_global.driver_lock, sxg_global.flags);
2033
2034 if (retval) {
2035 DBG_ERROR("sxg: request_irq (%s) FAILED [%x]\n",
2036 adapter->netdev->name, retval);
2037 return (retval);
2038 }
2039 adapter->intrregistered = 1;
2040 adapter->IntRegistered = TRUE;
2041 /* Disable RSS with line-based interrupts */
2042 adapter->RssEnabled = FALSE;
2043 DBG_ERROR("sxg: %s AllocAdaptRsrcs adapter[%p] dev->irq[%x]\n",
2044 __func__, adapter, adapter->netdev->irq);
2045 }
2046 return (STATUS_SUCCESS);
2047 }
2048
2049 static void sxg_deregister_interrupt(struct adapter_t *adapter)
2050 {
2051 DBG_ERROR("sxg: %s ENTER adapter[%p]\n", __func__, adapter);
2052 #if XXXTODO
2053 slic_init_cleanup(adapter);
2054 #endif
2055 memset(&adapter->stats, 0, sizeof(struct net_device_stats));
2056 adapter->error_interrupts = 0;
2057 adapter->rcv_interrupts = 0;
2058 adapter->xmit_interrupts = 0;
2059 adapter->linkevent_interrupts = 0;
2060 adapter->upr_interrupts = 0;
2061 adapter->num_isrs = 0;
2062 adapter->xmit_completes = 0;
2063 adapter->rcv_broadcasts = 0;
2064 adapter->rcv_multicasts = 0;
2065 adapter->rcv_unicasts = 0;
2066 DBG_ERROR("sxg: %s EXIT\n", __func__);
2067 }
2068
2069 /*
2070 * sxg_if_init
2071 *
2072 * Perform initialization of our slic interface.
2073 *
2074 */
2075 static int sxg_if_init(struct adapter_t *adapter)
2076 {
2077 struct net_device *dev = adapter->netdev;
2078 int status = 0;
2079
2080 DBG_ERROR("sxg: %s (%s) ENTER states[%d:%d] flags[%x]\n",
2081 __func__, adapter->netdev->name,
2082 adapter->state,
2083 adapter->linkstate, dev->flags);
2084
2085 /* adapter should be down at this point */
2086 if (adapter->state != ADAPT_DOWN) {
2087 DBG_ERROR("sxg_if_init adapter->state != ADAPT_DOWN\n");
2088 return (-EIO);
2089 }
2090 ASSERT(adapter->linkstate == LINK_DOWN);
2091
2092 adapter->devflags_prev = dev->flags;
2093 adapter->MacFilter = MAC_DIRECTED;
2094 if (dev->flags) {
2095 DBG_ERROR("sxg: %s (%s) Set MAC options: ", __func__,
2096 adapter->netdev->name);
2097 if (dev->flags & IFF_BROADCAST) {
2098 adapter->MacFilter |= MAC_BCAST;
2099 DBG_ERROR("BCAST ");
2100 }
2101 if (dev->flags & IFF_PROMISC) {
2102 adapter->MacFilter |= MAC_PROMISC;
2103 DBG_ERROR("PROMISC ");
2104 }
2105 if (dev->flags & IFF_ALLMULTI) {
2106 adapter->MacFilter |= MAC_ALLMCAST;
2107 DBG_ERROR("ALL_MCAST ");
2108 }
2109 if (dev->flags & IFF_MULTICAST) {
2110 adapter->MacFilter |= MAC_MCAST;
2111 DBG_ERROR("MCAST ");
2112 }
2113 DBG_ERROR("\n");
2114 }
2115 status = sxg_register_intr(adapter);
2116 if (status != STATUS_SUCCESS) {
2117 DBG_ERROR("sxg_if_init: sxg_register_intr FAILED %x\n",
2118 status);
2119 sxg_deregister_interrupt(adapter);
2120 return (status);
2121 }
2122
2123 adapter->state = ADAPT_UP;
2124
2125 /* clear any pending events, then enable interrupts */
2126 DBG_ERROR("sxg: %s ENABLE interrupts(slic)\n", __func__);
2127
2128 return (STATUS_SUCCESS);
2129 }
2130
2131 void sxg_set_interrupt_aggregation(struct adapter_t *adapter)
2132 {
2133 /*
2134 * Top bit disables aggregation on xmt (SXG_AGG_XMT_DISABLE).
2135 * Make sure Max is less than 0x8000.
2136 */
2137 adapter->max_aggregation = SXG_MAX_AGG_DEFAULT;
2138 adapter->min_aggregation = SXG_MIN_AGG_DEFAULT;
2139 WRITE_REG(adapter->UcodeRegs[0].Aggregation,
2140 ((adapter->max_aggregation << SXG_MAX_AGG_SHIFT) |
2141 adapter->min_aggregation),
2142 TRUE);
2143 }
2144
2145 static int sxg_entry_open(struct net_device *dev)
2146 {
2147 struct adapter_t *adapter = (struct adapter_t *) netdev_priv(dev);
2148 int status;
2149 static int turn;
2150 int sxg_initial_rcv_data_buffers = SXG_INITIAL_RCV_DATA_BUFFERS;
2151 int i;
2152
2153 if (adapter->JumboEnabled == TRUE) {
2154 sxg_initial_rcv_data_buffers =
2155 SXG_INITIAL_JUMBO_RCV_DATA_BUFFERS;
2156 SXG_INITIALIZE_RING(adapter->RcvRingZeroInfo,
2157 SXG_JUMBO_RCV_RING_SIZE);
2158 }
2159
2160 /*
2161 * Allocate receive data buffers. We allocate a block of buffers and
2162 * a corresponding descriptor block at once. See sxghw.h:SXG_RCV_BLOCK
2163 */
2164
2165 for (i = 0; i < sxg_initial_rcv_data_buffers;
2166 i += SXG_RCV_DESCRIPTORS_PER_BLOCK)
2167 {
2168 status = sxg_allocate_buffer_memory(adapter,
2169 SXG_RCV_BLOCK_SIZE(SXG_RCV_DATA_HDR_SIZE),
2170 SXG_BUFFER_TYPE_RCV);
2171 if (status != STATUS_SUCCESS)
2172 return status;
2173 }
2174 /*
2175 * NBL resource allocation can fail in the 'AllocateComplete' routine,
2176 * which doesn't return status. Make sure we got the number of buffers
2177 * we requested
2178 */
2179
2180 if (adapter->FreeRcvBufferCount < sxg_initial_rcv_data_buffers) {
2181 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XAResF6",
2182 adapter, adapter->FreeRcvBufferCount, SXG_MAX_ENTRIES,
2183 0);
2184 return (STATUS_RESOURCES);
2185 }
2186 /*
2187 * The microcode expects it to be downloaded on every open.
2188 */
2189 DBG_ERROR("sxg: %s ENTER sxg_download_microcode\n", __FUNCTION__);
2190 if (sxg_download_microcode(adapter, SXG_UCODE_SYSTEM)) {
2191 DBG_ERROR("sxg: %s ENTER sxg_adapter_set_hwaddr\n",
2192 __FUNCTION__);
2193 sxg_read_config(adapter);
2194 } else {
2195 adapter->state = ADAPT_FAIL;
2196 adapter->linkstate = LINK_DOWN;
2197 DBG_ERROR("sxg_download_microcode FAILED status[%x]\n",
2198 status);
2199 }
2200 msleep(5);
2201
2202 if (turn) {
2203 sxg_second_open(adapter->netdev);
2204
2205 return STATUS_SUCCESS;
2206 }
2207
2208 turn++;
2209
2210 ASSERT(adapter);
2211 DBG_ERROR("sxg: %s adapter->activated[%d]\n", __func__,
2212 adapter->activated);
2213 DBG_ERROR
2214 ("sxg: %s (%s): [jiffies[%lx] cpu %d] dev[%p] adapt[%p] port[%d]\n",
2215 __func__, adapter->netdev->name, jiffies, smp_processor_id(),
2216 adapter->netdev, adapter, adapter->port);
2217
2218 netif_stop_queue(adapter->netdev);
2219
2220 spin_lock_irqsave(&sxg_global.driver_lock, sxg_global.flags);
2221 if (!adapter->activated) {
2222 sxg_global.num_sxg_ports_active++;
2223 adapter->activated = 1;
2224 }
2225 /* Initialize the adapter */
2226 DBG_ERROR("sxg: %s ENTER sxg_initialize_adapter\n", __func__);
2227 status = sxg_initialize_adapter(adapter);
2228 DBG_ERROR("sxg: %s EXIT sxg_initialize_adapter status[%x]\n",
2229 __func__, status);
2230
2231 if (status == STATUS_SUCCESS) {
2232 DBG_ERROR("sxg: %s ENTER sxg_if_init\n", __func__);
2233 status = sxg_if_init(adapter);
2234 DBG_ERROR("sxg: %s EXIT sxg_if_init status[%x]\n", __func__,
2235 status);
2236 }
2237
2238 if (status != STATUS_SUCCESS) {
2239 if (adapter->activated) {
2240 sxg_global.num_sxg_ports_active--;
2241 adapter->activated = 0;
2242 }
2243 spin_unlock_irqrestore(&sxg_global.driver_lock,
2244 sxg_global.flags);
2245 return (status);
2246 }
2247 DBG_ERROR("sxg: %s ENABLE ALL INTERRUPTS\n", __func__);
2248 sxg_set_interrupt_aggregation(adapter);
2249 napi_enable(&adapter->napi);
2250
2251 /* Enable interrupts */
2252 SXG_ENABLE_ALL_INTERRUPTS(adapter);
2253
2254 DBG_ERROR("sxg: %s EXIT\n", __func__);
2255
2256 spin_unlock_irqrestore(&sxg_global.driver_lock, sxg_global.flags);
2257 mod_timer(&adapter->watchdog_timer, jiffies);
2258
2259 return STATUS_SUCCESS;
2260 }
2261
2262 int sxg_second_open(struct net_device * dev)
2263 {
2264 struct adapter_t *adapter = (struct adapter_t*) netdev_priv(dev);
2265 int status = 0;
2266
2267 spin_lock_irqsave(&sxg_global.driver_lock, sxg_global.flags);
2268 netif_start_queue(adapter->netdev);
2269 adapter->state = ADAPT_UP;
2270 adapter->linkstate = LINK_UP;
2271
2272 status = sxg_initialize_adapter(adapter);
2273 sxg_set_interrupt_aggregation(adapter);
2274 napi_enable(&adapter->napi);
2275 /* Re-enable interrupts */
2276 SXG_ENABLE_ALL_INTERRUPTS(adapter);
2277
2278 sxg_register_intr(adapter);
2279 spin_unlock_irqrestore(&sxg_global.driver_lock, sxg_global.flags);
2280 mod_timer(&adapter->watchdog_timer, jiffies);
2281 return (STATUS_SUCCESS);
2282
2283 }
2284
2285 static void __devexit sxg_entry_remove(struct pci_dev *pcidev)
2286 {
2287 u32 mmio_start = 0;
2288 u32 mmio_len = 0;
2289
2290 struct net_device *dev = pci_get_drvdata(pcidev);
2291 struct adapter_t *adapter = (struct adapter_t *) netdev_priv(dev);
2292
2293 flush_scheduled_work();
2294
2295 /* Deallocate Resources */
2296 unregister_netdev(dev);
2297 sxg_reset_interrupt_capability(adapter);
2298 sxg_free_resources(adapter);
2299
2300 ASSERT(adapter);
2301
2302 mmio_start = pci_resource_start(pcidev, 0);
2303 mmio_len = pci_resource_len(pcidev, 0);
2304
2305 DBG_ERROR("sxg: %s rel_region(0) start[%x] len[%x]\n", __FUNCTION__,
2306 mmio_start, mmio_len);
2307 release_mem_region(mmio_start, mmio_len);
2308
2309 mmio_start = pci_resource_start(pcidev, 2);
2310 mmio_len = pci_resource_len(pcidev, 2);
2311
2312 DBG_ERROR("sxg: %s rel_region(2) start[%x] len[%x]\n", __FUNCTION__,
2313 mmio_start, mmio_len);
2314 release_mem_region(mmio_start, mmio_len);
2315
2316 pci_disable_device(pcidev);
2317
2318 DBG_ERROR("sxg: %s deallocate device\n", __func__);
2319 kfree(dev);
2320 DBG_ERROR("sxg: %s EXIT\n", __func__);
2321 }
2322
2323 static int sxg_entry_halt(struct net_device *dev)
2324 {
2325 struct adapter_t *adapter = (struct adapter_t *) netdev_priv(dev);
2326 struct sxg_hw_regs *HwRegs = adapter->HwRegs;
2327 int i;
2328 u32 RssIds, IsrCount;
2329 unsigned long flags;
2330
2331 RssIds = SXG_RSS_CPU_COUNT(adapter);
2332 IsrCount = adapter->msi_enabled ? RssIds : 1;
2333 /* Disable interrupts */
2334 spin_lock_irqsave(&sxg_global.driver_lock, sxg_global.flags);
2335 SXG_DISABLE_ALL_INTERRUPTS(adapter);
2336 adapter->state = ADAPT_DOWN;
2337 adapter->linkstate = LINK_DOWN;
2338
2339 spin_unlock_irqrestore(&sxg_global.driver_lock, sxg_global.flags);
2340 sxg_deregister_interrupt(adapter);
2341 WRITE_REG(HwRegs->Reset, 0xDEAD, FLUSH);
2342 mdelay(5000);
2343
2344 del_timer_sync(&adapter->watchdog_timer);
2345 netif_stop_queue(dev);
2346 netif_carrier_off(dev);
2347
2348 napi_disable(&adapter->napi);
2349
2350 WRITE_REG(adapter->UcodeRegs[0].RcvCmd, 0, true);
2351 adapter->devflags_prev = 0;
2352 DBG_ERROR("sxg: %s (%s) set adapter[%p] state to ADAPT_DOWN(%d)\n",
2353 __func__, dev->name, adapter, adapter->state);
2354
2355 spin_lock(&adapter->RcvQLock);
2356 /* Free all the blocks and the buffers, moved from remove() routine */
2357 if (!(IsListEmpty(&adapter->AllRcvBlocks))) {
2358 sxg_free_rcvblocks(adapter);
2359 }
2360
2361
2362 InitializeListHead(&adapter->FreeRcvBuffers);
2363 InitializeListHead(&adapter->FreeRcvBlocks);
2364 InitializeListHead(&adapter->AllRcvBlocks);
2365 InitializeListHead(&adapter->FreeSglBuffers);
2366 InitializeListHead(&adapter->AllSglBuffers);
2367
2368 adapter->FreeRcvBufferCount = 0;
2369 adapter->FreeRcvBlockCount = 0;
2370 adapter->AllRcvBlockCount = 0;
2371 adapter->RcvBuffersOnCard = 0;
2372 adapter->PendingRcvCount = 0;
2373
2374 memset(adapter->RcvRings, 0, sizeof(struct sxg_rcv_ring) * 1);
2375 memset(adapter->EventRings, 0, sizeof(struct sxg_event_ring) * RssIds);
2376 memset(adapter->Isr, 0, sizeof(u32) * IsrCount);
2377 for (i = 0; i < SXG_MAX_RING_SIZE; i++)
2378 adapter->RcvRingZeroInfo.Context[i] = NULL;
2379 SXG_INITIALIZE_RING(adapter->RcvRingZeroInfo, SXG_RCV_RING_SIZE);
2380 SXG_INITIALIZE_RING(adapter->XmtRingZeroInfo, SXG_XMT_RING_SIZE);
2381
2382 spin_unlock(&adapter->RcvQLock);
2383
2384 spin_lock_irqsave(&adapter->XmtZeroLock, flags);
2385 adapter->AllSglBufferCount = 0;
2386 adapter->FreeSglBufferCount = 0;
2387 adapter->PendingXmtCount = 0;
2388 memset(adapter->XmtRings, 0, sizeof(struct sxg_xmt_ring) * 1);
2389 memset(adapter->XmtRingZeroIndex, 0, sizeof(u32));
2390 spin_unlock_irqrestore(&adapter->XmtZeroLock, flags);
2391
2392 for (i = 0; i < SXG_MAX_RSS; i++) {
2393 adapter->NextEvent[i] = 0;
2394 }
2395 atomic_set(&adapter->pending_allocations, 0);
2396 adapter->intrregistered = 0;
2397 sxg_remove_isr(adapter);
2398 DBG_ERROR("sxg: %s (%s) EXIT\n", __FUNCTION__, dev->name);
2399 return (STATUS_SUCCESS);
2400 }
2401
2402 static int sxg_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
2403 {
2404 ASSERT(rq);
2405 /* DBG_ERROR("sxg: %s cmd[%x] rq[%p] dev[%p]\n", __func__, cmd, rq, dev);*/
2406 switch (cmd) {
2407 case SIOCSLICSETINTAGG:
2408 {
2409 /* struct adapter_t *adapter = (struct adapter_t *)
2410 * netdev_priv(dev);
2411 */
2412 u32 data[7];
2413 u32 intagg;
2414
2415 if (copy_from_user(data, rq->ifr_data, 28)) {
2416 DBG_ERROR("copy_from_user FAILED getting \
2417 initial params\n");
2418 return -EFAULT;
2419 }
2420 intagg = data[0];
2421 printk(KERN_EMERG
2422 "%s: set interrupt aggregation to %d\n",
2423 __func__, intagg);
2424 return 0;
2425 }
2426
2427 default:
2428 /* DBG_ERROR("sxg: %s UNSUPPORTED[%x]\n", __func__, cmd); */
2429 return -EOPNOTSUPP;
2430 }
2431 return 0;
2432 }
2433
2434 #define NORMAL_ETHFRAME 0
2435
2436 /*
2437 * sxg_send_packets - Send a skb packet
2438 *
2439 * Arguments:
2440 * skb - The packet to send
2441 * dev - Our linux net device that refs our adapter
2442 *
2443 * Return:
2444 * 0 regardless of outcome XXXTODO refer to e1000 driver
2445 */
2446 static int sxg_send_packets(struct sk_buff *skb, struct net_device *dev)
2447 {
2448 struct adapter_t *adapter = (struct adapter_t *) netdev_priv(dev);
2449 u32 status = STATUS_SUCCESS;
2450
2451 /*
2452 * DBG_ERROR("sxg: %s ENTER sxg_send_packets skb[%p]\n", __FUNCTION__,
2453 * skb);
2454 */
2455
2456 /* Check the adapter state */
2457 switch (adapter->State) {
2458 case SXG_STATE_INITIALIZING:
2459 case SXG_STATE_HALTED:
2460 case SXG_STATE_SHUTDOWN:
2461 ASSERT(0); /* unexpected */
2462 /* fall through */
2463 case SXG_STATE_RESETTING:
2464 case SXG_STATE_SLEEP:
2465 case SXG_STATE_BOOTDIAG:
2466 case SXG_STATE_DIAG:
2467 case SXG_STATE_HALTING:
2468 status = STATUS_FAILURE;
2469 break;
2470 case SXG_STATE_RUNNING:
2471 if (adapter->LinkState != SXG_LINK_UP) {
2472 status = STATUS_FAILURE;
2473 }
2474 break;
2475 default:
2476 ASSERT(0);
2477 status = STATUS_FAILURE;
2478 }
2479 if (status != STATUS_SUCCESS) {
2480 goto xmit_fail;
2481 }
2482 /* send a packet */
2483 status = sxg_transmit_packet(adapter, skb);
2484 if (status == STATUS_SUCCESS) {
2485 goto xmit_done;
2486 }
2487
2488 xmit_fail:
2489 /* reject & complete all the packets if they cant be sent */
2490 if (status != STATUS_SUCCESS) {
2491 #if XXXTODO
2492 /* sxg_send_packets_fail(adapter, skb, status); */
2493 #else
2494 SXG_DROP_DUMB_SEND(adapter, skb);
2495 adapter->stats.tx_dropped++;
2496 return NETDEV_TX_BUSY;
2497 #endif
2498 }
2499 DBG_ERROR("sxg: %s EXIT sxg_send_packets status[%x]\n", __func__,
2500 status);
2501
2502 xmit_done:
2503 return NETDEV_TX_OK;
2504 }
2505
2506 /*
2507 * sxg_transmit_packet
2508 *
2509 * This function transmits a single packet.
2510 *
2511 * Arguments -
2512 * adapter - Pointer to our adapter structure
2513 * skb - The packet to be sent
2514 *
2515 * Return - STATUS of send
2516 */
2517 static int sxg_transmit_packet(struct adapter_t *adapter, struct sk_buff *skb)
2518 {
2519 struct sxg_x64_sgl *pSgl;
2520 struct sxg_scatter_gather *SxgSgl;
2521 unsigned long sgl_flags;
2522 /* void *SglBuffer; */
2523 /* u32 SglBufferLength; */
2524
2525 /*
2526 * The vast majority of work is done in the shared
2527 * sxg_dumb_sgl routine.
2528 */
2529 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "DumbSend",
2530 adapter, skb, 0, 0);
2531
2532 /* Allocate a SGL buffer */
2533 SXG_GET_SGL_BUFFER(adapter, SxgSgl, 0);
2534 if (!SxgSgl) {
2535 adapter->Stats.NoSglBuf++;
2536 adapter->stats.tx_errors++;
2537 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "SndPktF1",
2538 adapter, skb, 0, 0);
2539 return (STATUS_RESOURCES);
2540 }
2541 ASSERT(SxgSgl->adapter == adapter);
2542 /*SglBuffer = SXG_SGL_BUFFER(SxgSgl);
2543 SglBufferLength = SXG_SGL_BUF_SIZE; */
2544 SxgSgl->VlanTag.VlanTci = 0;
2545 SxgSgl->VlanTag.VlanTpid = 0;
2546 SxgSgl->Type = SXG_SGL_DUMB;
2547 SxgSgl->DumbPacket = skb;
2548 pSgl = NULL;
2549
2550 /* Call the common sxg_dumb_sgl routine to complete the send. */
2551 return (sxg_dumb_sgl(pSgl, SxgSgl));
2552 }
2553
2554 /*
2555 * sxg_dumb_sgl
2556 *
2557 * Arguments:
2558 * pSgl -
2559 * SxgSgl - struct sxg_scatter_gather
2560 *
2561 * Return Value:
2562 * Status of send operation.
2563 */
2564 static int sxg_dumb_sgl(struct sxg_x64_sgl *pSgl,
2565 struct sxg_scatter_gather *SxgSgl)
2566 {
2567 struct adapter_t *adapter = SxgSgl->adapter;
2568 struct sk_buff *skb = SxgSgl->DumbPacket;
2569 /* For now, all dumb-nic sends go on RSS queue zero */
2570 struct sxg_xmt_ring *XmtRing = &adapter->XmtRings[0];
2571 struct sxg_ring_info *XmtRingInfo = &adapter->XmtRingZeroInfo;
2572 struct sxg_cmd *XmtCmd = NULL;
2573 /* u32 Index = 0; */
2574 u32 DataLength = skb->len;
2575 /* unsigned int BufLen; */
2576 /* u32 SglOffset; */
2577 u64 phys_addr;
2578 unsigned long flags;
2579 unsigned long queue_id=0;
2580
2581 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "DumbSgl",
2582 pSgl, SxgSgl, 0, 0);
2583
2584 /* Set aside a pointer to the sgl */
2585 SxgSgl->pSgl = pSgl;
2586
2587 /* Sanity check that our SGL format is as we expect. */
2588 ASSERT(sizeof(struct sxg_x64_sge) == sizeof(struct sxg_x64_sge));
2589 /* Shouldn't be a vlan tag on this frame */
2590 ASSERT(SxgSgl->VlanTag.VlanTci == 0);
2591 ASSERT(SxgSgl->VlanTag.VlanTpid == 0);
2592
2593 /*
2594 * From here below we work with the SGL placed in our
2595 * buffer.
2596 */
2597
2598 SxgSgl->Sgl.NumberOfElements = 1;
2599 /*
2600 * Set ucode Queue ID based on bottom bits of destination TCP port.
2601 * This Queue ID splits slowpath/dumb-nic packet processing across
2602 * multiple threads on the card to improve performance. It is split
2603 * using the TCP port to avoid out-of-order packets that can result
2604 * from multithreaded processing. We use the destination port because
2605 * we expect to be run on a server, so in nearly all cases the local
2606 * port is likely to be constant (well-known server port) and the
2607 * remote port is likely to be random. The exception to this is iSCSI,
2608 * in which case we use the sport instead. Note
2609 * that original attempt at XOR'ing source and dest port resulted in
2610 * poor balance on NTTTCP/iometer applications since they tend to
2611 * line up (even-even, odd-odd..).
2612 */
2613
2614 if (skb->protocol == htons(ETH_P_IP)) {
2615 struct iphdr *ip;
2616
2617 ip = ip_hdr(skb);
2618 if (ip->protocol != IPPROTO_TCP || !tcp_hdr(skb))
2619 queue_id = 0;
2620 else if ((ip->protocol == IPPROTO_TCP)&&(DataLength >= sizeof(
2621 struct tcphdr))){
2622 queue_id = ((ntohs(tcp_hdr(skb)->dest) == ISCSI_PORT) ?
2623 (ntohs (tcp_hdr(skb)->source) &
2624 SXG_LARGE_SEND_QUEUE_MASK):
2625 (ntohs(tcp_hdr(skb)->dest) &
2626 SXG_LARGE_SEND_QUEUE_MASK));
2627 }
2628 } else if (skb->protocol == htons(ETH_P_IPV6)) {
2629 if (ipv6_hdr(skb)->nexthdr != IPPROTO_TCP || !tcp_hdr(skb))
2630 queue_id = 0;
2631 else if ((ipv6_hdr(skb)->nexthdr == IPPROTO_TCP) && (DataLength
2632 >= sizeof(struct tcphdr)) ) {
2633 queue_id = ((ntohs(tcp_hdr(skb)->dest) == ISCSI_PORT) ?
2634 (ntohs (tcp_hdr(skb)->source) &
2635 SXG_LARGE_SEND_QUEUE_MASK):
2636 (ntohs(tcp_hdr(skb)->dest) &
2637 SXG_LARGE_SEND_QUEUE_MASK));
2638 }
2639 }
2640
2641 /* Grab the spinlock and acquire a command */
2642 spin_lock_irqsave(&adapter->XmtZeroLock, flags);
2643 SXG_GET_CMD(XmtRing, XmtRingInfo, XmtCmd, SxgSgl);
2644 if (XmtCmd == NULL) {
2645 /*
2646 * Call sxg_complete_slow_send to see if we can
2647 * free up any XmtRingZero entries and then try again
2648 */
2649
2650 spin_unlock_irqrestore(&adapter->XmtZeroLock, flags);
2651 sxg_complete_slow_send(adapter);
2652 spin_lock_irqsave(&adapter->XmtZeroLock, flags);
2653 SXG_GET_CMD(XmtRing, XmtRingInfo, XmtCmd, SxgSgl);
2654 if (XmtCmd == NULL) {
2655 adapter->Stats.XmtZeroFull++;
2656 goto abortcmd;
2657 }
2658 }
2659 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "DumbCmd",
2660 XmtCmd, XmtRingInfo->Head, XmtRingInfo->Tail, 0);
2661 /* Update stats */
2662 adapter->stats.tx_packets++;
2663 adapter->stats.tx_bytes += DataLength;
2664 #if XXXTODO /* Stats stuff */
2665 if (SXG_MULTICAST_PACKET(EtherHdr)) {
2666 if (SXG_BROADCAST_PACKET(EtherHdr)) {
2667 adapter->Stats.DumbXmtBcastPkts++;
2668 adapter->Stats.DumbXmtBcastBytes += DataLength;
2669 } else {
2670 adapter->Stats.DumbXmtMcastPkts++;
2671 adapter->Stats.DumbXmtMcastBytes += DataLength;
2672 }
2673 } else {
2674 adapter->Stats.DumbXmtUcastPkts++;
2675 adapter->Stats.DumbXmtUcastBytes += DataLength;
2676 }
2677 #endif
2678 /*
2679 * Fill in the command
2680 * Copy out the first SGE to the command and adjust for offset
2681 */
2682 phys_addr = pci_map_single(adapter->pcidev, skb->data, skb->len,
2683 PCI_DMA_TODEVICE);
2684
2685 /*
2686 * SAHARA SGL WORKAROUND
2687 * See if the SGL straddles a 64k boundary. If so, skip to
2688 * the start of the next 64k boundary and continue
2689 */
2690
2691 if ((adapter->asictype == SAHARA_REV_A) &&
2692 (SXG_INVALID_SGL(phys_addr,skb->data_len)))
2693 {
2694 spin_unlock_irqrestore(&adapter->XmtZeroLock, flags);
2695 /* Silently drop this packet */
2696 printk(KERN_EMERG"Dropped a packet for 64k boundary problem\n");
2697 return STATUS_SUCCESS;
2698 }
2699 memset(XmtCmd, '\0', sizeof(*XmtCmd));
2700 XmtCmd->Buffer.FirstSgeAddress = phys_addr;
2701 XmtCmd->Buffer.FirstSgeLength = DataLength;
2702 XmtCmd->Buffer.SgeOffset = 0;
2703 XmtCmd->Buffer.TotalLength = DataLength;
2704 XmtCmd->SgEntries = 1;
2705 XmtCmd->Flags = 0;
2706
2707 if (skb->ip_summed == CHECKSUM_PARTIAL) {
2708 /*
2709 * We need to set the Checkum in IP header to 0. This is
2710 * required by hardware.
2711 */
2712 ip_hdr(skb)->check = 0x0;
2713 XmtCmd->CsumFlags.Flags |= SXG_SLOWCMD_CSUM_IP;
2714 XmtCmd->CsumFlags.Flags |= SXG_SLOWCMD_CSUM_TCP;
2715 /* Dont know if length will require a change in case of VLAN */
2716 XmtCmd->CsumFlags.MacLen = ETH_HLEN;
2717 XmtCmd->CsumFlags.IpHl = skb_network_header_len(skb) >>
2718 SXG_NW_HDR_LEN_SHIFT;
2719 }
2720 /*
2721 * Advance transmit cmd descripter by 1.
2722 * NOTE - See comments in SxgTcpOutput where we write
2723 * to the XmtCmd register regarding CPU ID values and/or
2724 * multiple commands.
2725 * Top 16 bits specify queue_id. See comments about queue_id above
2726 */
2727 /* Four queues at the moment */
2728 ASSERT((queue_id & ~SXG_LARGE_SEND_QUEUE_MASK) == 0);
2729 WRITE_REG(adapter->UcodeRegs[0].XmtCmd, ((queue_id << 16) | 1), TRUE);
2730 adapter->Stats.XmtQLen++; /* Stats within lock */
2731 spin_unlock_irqrestore(&adapter->XmtZeroLock, flags);
2732 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XDumSgl2",
2733 XmtCmd, pSgl, SxgSgl, 0);
2734 return STATUS_SUCCESS;
2735
2736 abortcmd:
2737 /*
2738 * NOTE - Only jump to this label AFTER grabbing the
2739 * XmtZeroLock, and DO NOT DROP IT between the
2740 * command allocation and the following abort.
2741 */
2742 if (XmtCmd) {
2743 SXG_ABORT_CMD(XmtRingInfo);
2744 }
2745 spin_unlock_irqrestore(&adapter->XmtZeroLock, flags);
2746
2747 /*
2748 * failsgl:
2749 * Jump to this label if failure occurs before the
2750 * XmtZeroLock is grabbed
2751 */
2752 adapter->stats.tx_errors++;
2753 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_IMPORTANT, "DumSGFal",
2754 pSgl, SxgSgl, XmtRingInfo->Head, XmtRingInfo->Tail);
2755 /* SxgSgl->DumbPacket is the skb */
2756 // SXG_COMPLETE_DUMB_SEND(adapter, SxgSgl->DumbPacket);
2757
2758 return STATUS_FAILURE;
2759 }
2760
2761 /*
2762 * Link management functions
2763 *
2764 * sxg_initialize_link - Initialize the link stuff
2765 *
2766 * Arguments -
2767 * adapter - A pointer to our adapter structure
2768 *
2769 * Return
2770 * status
2771 */
2772 static int sxg_initialize_link(struct adapter_t *adapter)
2773 {
2774 struct sxg_hw_regs *HwRegs = adapter->HwRegs;
2775 u32 Value;
2776 u32 ConfigData;
2777 u32 MaxFrame;
2778 u32 AxgMacReg1;
2779 int status;
2780
2781 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "InitLink",
2782 adapter, 0, 0, 0);
2783
2784 /* Reset PHY and XGXS module */
2785 WRITE_REG(HwRegs->LinkStatus, LS_SERDES_POWER_DOWN, TRUE);
2786
2787 /* Reset transmit configuration register */
2788 WRITE_REG(HwRegs->XmtConfig, XMT_CONFIG_RESET, TRUE);
2789
2790 /* Reset receive configuration register */
2791 WRITE_REG(HwRegs->RcvConfig, RCV_CONFIG_RESET, TRUE);
2792
2793 /* Reset all MAC modules */
2794 WRITE_REG(HwRegs->MacConfig0, AXGMAC_CFG0_SUB_RESET, TRUE);
2795
2796 /*
2797 * Link address 0
2798 * XXXTODO - This assumes the MAC address (0a:0b:0c:0d:0e:0f)
2799 * is stored with the first nibble (0a) in the byte 0
2800 * of the Mac address. Possibly reverse?
2801 */
2802 Value = *(u32 *) adapter->macaddr;
2803 WRITE_REG(HwRegs->LinkAddress0Low, Value, TRUE);
2804 /* also write the MAC address to the MAC. Endian is reversed. */
2805 WRITE_REG(HwRegs->MacAddressLow, ntohl(Value), TRUE);
2806 Value = (*(u16 *) & adapter->macaddr[4] & 0x0000FFFF);
2807 WRITE_REG(HwRegs->LinkAddress0High, Value | LINK_ADDRESS_ENABLE, TRUE);
2808 /* endian swap for the MAC (put high bytes in bits [31:16], swapped) */
2809 Value = ntohl(Value);
2810 WRITE_REG(HwRegs->MacAddressHigh, Value, TRUE);
2811 /* Link address 1 */
2812 WRITE_REG(HwRegs->LinkAddress1Low, 0, TRUE);
2813 WRITE_REG(HwRegs->LinkAddress1High, 0, TRUE);
2814 /* Link address 2 */
2815 WRITE_REG(HwRegs->LinkAddress2Low, 0, TRUE);
2816 WRITE_REG(HwRegs->LinkAddress2High, 0, TRUE);
2817 /* Link address 3 */
2818 WRITE_REG(HwRegs->LinkAddress3Low, 0, TRUE);
2819 WRITE_REG(HwRegs->LinkAddress3High, 0, TRUE);
2820
2821 /* Enable MAC modules */
2822 WRITE_REG(HwRegs->MacConfig0, 0, TRUE);
2823
2824 /* Configure MAC */
2825 AxgMacReg1 = ( /* Enable XMT */
2826 AXGMAC_CFG1_XMT_EN |
2827 /* Enable receive */
2828 AXGMAC_CFG1_RCV_EN |
2829 /* short frame detection */
2830 AXGMAC_CFG1_SHORT_ASSERT |
2831 /* Verify frame length */
2832 AXGMAC_CFG1_CHECK_LEN |
2833 /* Generate FCS */
2834 AXGMAC_CFG1_GEN_FCS |
2835 /* Pad frames to 64 bytes */
2836 AXGMAC_CFG1_PAD_64);
2837
2838 if (adapter->XmtFcEnabled) {
2839 AxgMacReg1 |= AXGMAC_CFG1_XMT_PAUSE; /* Allow sending of pause */
2840 }
2841 if (adapter->RcvFcEnabled) {
2842 AxgMacReg1 |= AXGMAC_CFG1_RCV_PAUSE; /* Enable detection of pause */
2843 }
2844
2845 WRITE_REG(HwRegs->MacConfig1, AxgMacReg1, TRUE);
2846
2847 /* Set AXGMAC max frame length if jumbo. Not needed for standard MTU */
2848 if (adapter->JumboEnabled) {
2849 WRITE_REG(HwRegs->MacMaxFrameLen, AXGMAC_MAXFRAME_JUMBO, TRUE);
2850 }
2851 /*
2852 * AMIIM Configuration Register -
2853 * The value placed in the AXGMAC_AMIIM_CFG_HALF_CLOCK portion
2854 * (bottom bits) of this register is used to determine the MDC frequency
2855 * as specified in the A-XGMAC Design Document. This value must not be
2856 * zero. The following value (62 or 0x3E) is based on our MAC transmit
2857 * clock frequency (MTCLK) of 312.5 MHz. Given a maximum MDIO clock
2858 * frequency of 2.5 MHz (see the PHY spec), we get:
2859 * 312.5/(2*(X+1)) < 2.5 ==> X = 62.
2860 * This value happens to be the default value for this register, so we
2861 * really don't have to do this.
2862 */
2863 if (adapter->asictype == SAHARA_REV_B) {
2864 WRITE_REG(HwRegs->MacAmiimConfig, 0x0000001F, TRUE);
2865 } else {
2866 WRITE_REG(HwRegs->MacAmiimConfig, 0x0000003E, TRUE);
2867 }
2868
2869 /* Power up and enable PHY and XAUI/XGXS/Serdes logic */
2870 WRITE_REG(HwRegs->LinkStatus,
2871 (LS_PHY_CLR_RESET |
2872 LS_XGXS_ENABLE |
2873 LS_XGXS_CTL |
2874 LS_PHY_CLK_EN |
2875 LS_ATTN_ALARM),
2876 TRUE);
2877 DBG_ERROR("After Power Up and enable PHY in sxg_initialize_link\n");
2878
2879 /*
2880 * Per information given by Aeluros, wait 100 ms after removing reset.
2881 * It's not enough to wait for the self-clearing reset bit in reg 0 to
2882 * clear.
2883 */
2884 mdelay(100);
2885
2886 /* Verify the PHY has come up by checking that the Reset bit has
2887 * cleared.
2888 */
2889 status = sxg_read_mdio_reg(adapter,
2890 MIIM_DEV_PHY_PMA, /* PHY PMA/PMD module */
2891 PHY_PMA_CONTROL1, /* PMA/PMD control register */
2892 &Value);
2893 DBG_ERROR("After sxg_read_mdio_reg Value[%x] fail=%x\n", Value,
2894 (Value & PMA_CONTROL1_RESET));
2895 if (status != STATUS_SUCCESS)
2896 return (STATUS_FAILURE);
2897 if (Value & PMA_CONTROL1_RESET) /* reset complete if bit is 0 */
2898 return (STATUS_FAILURE);
2899
2900 /* The SERDES should be initialized by now - confirm */
2901 READ_REG(HwRegs->LinkStatus, Value);
2902 if (Value & LS_SERDES_DOWN) /* verify SERDES is initialized */
2903 return (STATUS_FAILURE);
2904
2905 /* The XAUI link should also be up - confirm */
2906 if (!(Value & LS_XAUI_LINK_UP)) /* verify XAUI link is up */
2907 return (STATUS_FAILURE);
2908
2909 /* Initialize the PHY */
2910 status = sxg_phy_init(adapter);
2911 if (status != STATUS_SUCCESS)
2912 return (STATUS_FAILURE);
2913
2914 /* Enable the Link Alarm */
2915
2916 /* MIIM_DEV_PHY_PMA - PHY PMA/PMD module
2917 * LASI_CONTROL - LASI control register
2918 * LASI_CTL_LS_ALARM_ENABLE - enable link alarm bit
2919 */
2920 status = sxg_write_mdio_reg(adapter, MIIM_DEV_PHY_PMA,
2921 LASI_CONTROL,
2922 LASI_CTL_LS_ALARM_ENABLE);
2923 if (status != STATUS_SUCCESS)
2924 return (STATUS_FAILURE);
2925
2926 /* XXXTODO - temporary - verify bit is set */
2927
2928 /* MIIM_DEV_PHY_PMA - PHY PMA/PMD module
2929 * LASI_CONTROL - LASI control register
2930 */
2931 status = sxg_read_mdio_reg(adapter, MIIM_DEV_PHY_PMA,
2932 LASI_CONTROL,
2933 &Value);
2934
2935 if (status != STATUS_SUCCESS)
2936 return (STATUS_FAILURE);
2937 if (!(Value & LASI_CTL_LS_ALARM_ENABLE)) {
2938 DBG_ERROR("Error! LASI Control Alarm Enable bit not set!\n");
2939 }
2940 /* Enable receive */
2941 MaxFrame = adapter->JumboEnabled ? JUMBOMAXFRAME : ETHERMAXFRAME;
2942 ConfigData = (RCV_CONFIG_ENABLE |
2943 RCV_CONFIG_ENPARSE |
2944 RCV_CONFIG_RCVBAD |
2945 RCV_CONFIG_RCVPAUSE |
2946 RCV_CONFIG_TZIPV6 |
2947 RCV_CONFIG_TZIPV4 |
2948 RCV_CONFIG_HASH_16 |
2949 RCV_CONFIG_SOCKET | RCV_CONFIG_BUFSIZE(MaxFrame));
2950
2951 if (adapter->asictype == SAHARA_REV_B) {
2952 ConfigData |= (RCV_CONFIG_HIPRICTL |
2953 RCV_CONFIG_NEWSTATUSFMT);
2954 }
2955 WRITE_REG(HwRegs->RcvConfig, ConfigData, TRUE);
2956
2957 WRITE_REG(HwRegs->XmtConfig, XMT_CONFIG_ENABLE, TRUE);
2958
2959 /* Mark the link as down. We'll get a link event when it comes up. */
2960 sxg_link_state(adapter, SXG_LINK_DOWN);
2961
2962 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XInitLnk",
2963 adapter, 0, 0, 0);
2964 return (STATUS_SUCCESS);
2965 }
2966
2967 /*
2968 * sxg_phy_init - Initialize the PHY
2969 *
2970 * Arguments -
2971 * adapter - A pointer to our adapter structure
2972 *
2973 * Return
2974 * status
2975 */
2976 static int sxg_phy_init(struct adapter_t *adapter)
2977 {
2978 u32 Value;
2979 struct phy_ucode *p;
2980 int status;
2981
2982 DBG_ERROR("ENTER %s\n", __func__);
2983
2984 /* MIIM_DEV_PHY_PMA - PHY PMA/PMD module
2985 * 0xC205 - PHY ID register (?)
2986 * &Value - XXXTODO - add def
2987 */
2988 status = sxg_read_mdio_reg(adapter, MIIM_DEV_PHY_PMA,
2989 0xC205,
2990 &Value);
2991 if (status != STATUS_SUCCESS)
2992 return (STATUS_FAILURE);
2993
2994 if (Value == 0x0012) {
2995 /* 0x0012 == AEL2005C PHY(?) - XXXTODO - add def */
2996 DBG_ERROR("AEL2005C PHY detected. Downloading PHY \
2997 microcode.\n");
2998
2999 /* Initialize AEL2005C PHY and download PHY microcode */
3000 for (p = PhyUcode; p->Addr != 0xFFFF; p++) {
3001 if (p->Addr == 0) {
3002 /* if address == 0, data == sleep time in ms */
3003 mdelay(p->Data);
3004 } else {
3005 /* write the given data to the specified address */
3006 status = sxg_write_mdio_reg(adapter,
3007 MIIM_DEV_PHY_PMA,
3008 /* PHY address */
3009 p->Addr,
3010 /* PHY data */
3011 p->Data);
3012 if (status != STATUS_SUCCESS)
3013 return (STATUS_FAILURE);
3014 }
3015 }
3016 }
3017 DBG_ERROR("EXIT %s\n", __func__);
3018
3019 return (STATUS_SUCCESS);
3020 }
3021
3022 /*
3023 * sxg_link_event - Process a link event notification from the card
3024 *
3025 * Arguments -
3026 * adapter - A pointer to our adapter structure
3027 *
3028 * Return
3029 * None
3030 */
3031 static void sxg_link_event(struct adapter_t *adapter)
3032 {
3033 struct sxg_hw_regs *HwRegs = adapter->HwRegs;
3034 struct net_device *netdev = adapter->netdev;
3035 enum SXG_LINK_STATE LinkState;
3036 int status;
3037 u32 Value;
3038
3039 if (adapter->state == ADAPT_DOWN)
3040 return;
3041 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "LinkEvnt",
3042 adapter, 0, 0, 0);
3043 DBG_ERROR("ENTER %s\n", __func__);
3044
3045 /* Check the Link Status register. We should have a Link Alarm. */
3046 READ_REG(HwRegs->LinkStatus, Value);
3047 if (Value & LS_LINK_ALARM) {
3048 /*
3049 * We got a Link Status alarm. First, pause to let the
3050 * link state settle (it can bounce a number of times)
3051 */
3052 mdelay(10);
3053
3054 /* Now clear the alarm by reading the LASI status register. */
3055 /* MIIM_DEV_PHY_PMA - PHY PMA/PMD module */
3056 status = sxg_read_mdio_reg(adapter, MIIM_DEV_PHY_PMA,
3057 /* LASI status register */
3058 LASI_STATUS,
3059 &Value);
3060 if (status != STATUS_SUCCESS) {
3061 DBG_ERROR("Error reading LASI Status MDIO register!\n");
3062 sxg_link_state(adapter, SXG_LINK_DOWN);
3063 /* ASSERT(0); */
3064 }
3065 /*
3066 * We used to assert that the LASI_LS_ALARM bit was set, as
3067 * it should be. But there appears to be cases during
3068 * initialization (when the PHY is reset and re-initialized)
3069 * when we get a link alarm, but the status bit is 0 when we
3070 * read it. Rather than trying to assure this never happens
3071 * (and nver being certain), just ignore it.
3072
3073 * ASSERT(Value & LASI_STATUS_LS_ALARM);
3074 */
3075
3076 /* Now get and set the link state */
3077 LinkState = sxg_get_link_state(adapter);
3078 sxg_link_state(adapter, LinkState);
3079 DBG_ERROR("SXG: Link Alarm occurred. Link is %s\n",
3080 ((LinkState == SXG_LINK_UP) ? "UP" : "DOWN"));
3081 if (LinkState == SXG_LINK_UP) {
3082 netif_carrier_on(netdev);
3083 netif_tx_start_all_queues(netdev);
3084 } else {
3085 netif_tx_stop_all_queues(netdev);
3086 netif_carrier_off(netdev);
3087 }
3088 } else {
3089 /*
3090 * XXXTODO - Assuming Link Attention is only being generated
3091 * for the Link Alarm pin (and not for a XAUI Link Status change)
3092 * , then it's impossible to get here. Yet we've gotten here
3093 * twice (under extreme conditions - bouncing the link up and
3094 * down many times a second). Needs further investigation.
3095 */
3096 DBG_ERROR("SXG: sxg_link_event: Can't get here!\n");
3097 DBG_ERROR("SXG: Link Status == 0x%08X.\n", Value);
3098 /* ASSERT(0); */
3099 }
3100 DBG_ERROR("EXIT %s\n", __func__);
3101
3102 }
3103
3104 /*
3105 * sxg_get_link_state - Determine if the link is up or down
3106 *
3107 * Arguments -
3108 * adapter - A pointer to our adapter structure
3109 *
3110 * Return
3111 * Link State
3112 */
3113 static enum SXG_LINK_STATE sxg_get_link_state(struct adapter_t *adapter)
3114 {
3115 int status;
3116 u32 Value;
3117
3118 DBG_ERROR("ENTER %s\n", __func__);
3119
3120 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "GetLink",
3121 adapter, 0, 0, 0);
3122
3123 /*
3124 * Per the Xenpak spec (and the IEEE 10Gb spec?), the link is up if
3125 * the following 3 bits (from 3 different MDIO registers) are all true.
3126 */
3127
3128 /* MIIM_DEV_PHY_PMA - PHY PMA/PMD module */
3129 status = sxg_read_mdio_reg(adapter, MIIM_DEV_PHY_PMA,
3130 /* PMA/PMD Receive Signal Detect register */
3131 PHY_PMA_RCV_DET,
3132 &Value);
3133 if (status != STATUS_SUCCESS)
3134 goto bad;
3135
3136 /* If PMA/PMD receive signal detect is 0, then the link is down */
3137 if (!(Value & PMA_RCV_DETECT))
3138 return (SXG_LINK_DOWN);
3139
3140 /* MIIM_DEV_PHY_PCS - PHY PCS module */
3141 status = sxg_read_mdio_reg(adapter, MIIM_DEV_PHY_PCS,
3142 /* PCS 10GBASE-R Status 1 register */
3143 PHY_PCS_10G_STATUS1,
3144 &Value);
3145 if (status != STATUS_SUCCESS)
3146 goto bad;
3147
3148 /* If PCS is not locked to receive blocks, then the link is down */
3149 if (!(Value & PCS_10B_BLOCK_LOCK))
3150 return (SXG_LINK_DOWN);
3151
3152 status = sxg_read_mdio_reg(adapter, MIIM_DEV_PHY_XS,/* PHY XS module */
3153 /* XS Lane Status register */
3154 PHY_XS_LANE_STATUS,
3155 &Value);
3156 if (status != STATUS_SUCCESS)
3157 goto bad;
3158
3159 /* If XS transmit lanes are not aligned, then the link is down */
3160 if (!(Value & XS_LANE_ALIGN))
3161 return (SXG_LINK_DOWN);
3162
3163 /* All 3 bits are true, so the link is up */
3164 DBG_ERROR("EXIT %s\n", __func__);
3165
3166 return (SXG_LINK_UP);
3167
3168 bad:
3169 /* An error occurred reading an MDIO register. This shouldn't happen. */
3170 DBG_ERROR("Error reading an MDIO register!\n");
3171 ASSERT(0);
3172 return (SXG_LINK_DOWN);
3173 }
3174
3175 static void sxg_indicate_link_state(struct adapter_t *adapter,
3176 enum SXG_LINK_STATE LinkState)
3177 {
3178 if (adapter->LinkState == SXG_LINK_UP) {
3179 DBG_ERROR("%s: LINK now UP, call netif_start_queue\n",
3180 __func__);
3181 netif_start_queue(adapter->netdev);
3182 } else {
3183 DBG_ERROR("%s: LINK now DOWN, call netif_stop_queue\n",
3184 __func__);
3185 netif_stop_queue(adapter->netdev);
3186 }
3187 }
3188
3189 /*
3190 * sxg_change_mtu - Change the Maximum Transfer Unit
3191 * * @returns 0 on success, negative on failure
3192 */
3193 int sxg_change_mtu (struct net_device *netdev, int new_mtu)
3194 {
3195 struct adapter_t *adapter = (struct adapter_t *) netdev_priv(netdev);
3196
3197 if (!((new_mtu == SXG_DEFAULT_MTU) || (new_mtu == SXG_JUMBO_MTU)))
3198 return -EINVAL;
3199
3200 if(new_mtu == netdev->mtu)
3201 return 0;
3202
3203 netdev->mtu = new_mtu;
3204
3205 if (new_mtu == SXG_JUMBO_MTU) {
3206 adapter->JumboEnabled = TRUE;
3207 adapter->FrameSize = JUMBOMAXFRAME;
3208 adapter->ReceiveBufferSize = SXG_RCV_JUMBO_BUFFER_SIZE;
3209 } else {
3210 adapter->JumboEnabled = FALSE;
3211 adapter->FrameSize = ETHERMAXFRAME;
3212 adapter->ReceiveBufferSize = SXG_RCV_DATA_BUFFER_SIZE;
3213 }
3214
3215 sxg_entry_halt(netdev);
3216 sxg_entry_open(netdev);
3217 return 0;
3218 }
3219
3220 /*
3221 * sxg_link_state - Set the link state and if necessary, indicate.
3222 * This routine the central point of processing for all link state changes.
3223 * Nothing else in the driver should alter the link state or perform
3224 * link state indications
3225 *
3226 * Arguments -
3227 * adapter - A pointer to our adapter structure
3228 * LinkState - The link state
3229 *
3230 * Return
3231 * None
3232 */
3233 static void sxg_link_state(struct adapter_t *adapter,
3234 enum SXG_LINK_STATE LinkState)
3235 {
3236 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_IMPORTANT, "LnkINDCT",
3237 adapter, LinkState, adapter->LinkState, adapter->State);
3238
3239 DBG_ERROR("ENTER %s\n", __func__);
3240
3241 /*
3242 * Hold the adapter lock during this routine. Maybe move
3243 * the lock to the caller.
3244 */
3245 /* IMP TODO : Check if we can survive without taking this lock */
3246 // spin_lock(&adapter->AdapterLock);
3247 if (LinkState == adapter->LinkState) {
3248 /* Nothing changed.. */
3249 // spin_unlock(&adapter->AdapterLock);
3250 DBG_ERROR("EXIT #0 %s. Link status = %d\n",
3251 __func__, LinkState);
3252 return;
3253 }
3254 /* Save the adapter state */
3255 adapter->LinkState = LinkState;
3256
3257 /* Drop the lock and indicate link state */
3258 // spin_unlock(&adapter->AdapterLock);
3259 DBG_ERROR("EXIT #1 %s\n", __func__);
3260
3261 sxg_indicate_link_state(adapter, LinkState);
3262 }
3263
3264 /*
3265 * sxg_write_mdio_reg - Write to a register on the MDIO bus
3266 *
3267 * Arguments -
3268 * adapter - A pointer to our adapter structure
3269 * DevAddr - MDIO device number being addressed
3270 * RegAddr - register address for the specified MDIO device
3271 * Value - value to write to the MDIO register
3272 *
3273 * Return
3274 * status
3275 */
3276 static int sxg_write_mdio_reg(struct adapter_t *adapter,
3277 u32 DevAddr, u32 RegAddr, u32 Value)
3278 {
3279 struct sxg_hw_regs *HwRegs = adapter->HwRegs;
3280 /* Address operation (written to MIIM field reg) */
3281 u32 AddrOp;
3282 /* Write operation (written to MIIM field reg) */
3283 u32 WriteOp;
3284 u32 Cmd;/* Command (written to MIIM command reg) */
3285 u32 ValueRead;
3286 u32 Timeout;
3287
3288 /* DBG_ERROR("ENTER %s\n", __func__); */
3289
3290 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "WrtMDIO",
3291 adapter, 0, 0, 0);
3292
3293 /* Ensure values don't exceed field width */
3294 DevAddr &= 0x001F; /* 5-bit field */
3295 RegAddr &= 0xFFFF; /* 16-bit field */
3296 Value &= 0xFFFF; /* 16-bit field */
3297
3298 /* Set MIIM field register bits for an MIIM address operation */
3299 AddrOp = (MIIM_PORT_NUM << AXGMAC_AMIIM_FIELD_PORT_SHIFT) |
3300 (DevAddr << AXGMAC_AMIIM_FIELD_DEV_SHIFT) |
3301 (MIIM_TA_10GB << AXGMAC_AMIIM_FIELD_TA_SHIFT) |
3302 (MIIM_OP_ADDR << AXGMAC_AMIIM_FIELD_OP_SHIFT) | RegAddr;
3303
3304 /* Set MIIM field register bits for an MIIM write operation */
3305 WriteOp = (MIIM_PORT_NUM << AXGMAC_AMIIM_FIELD_PORT_SHIFT) |
3306 (DevAddr << AXGMAC_AMIIM_FIELD_DEV_SHIFT) |
3307 (MIIM_TA_10GB << AXGMAC_AMIIM_FIELD_TA_SHIFT) |
3308 (MIIM_OP_WRITE << AXGMAC_AMIIM_FIELD_OP_SHIFT) | Value;
3309
3310 /* Set MIIM command register bits to execute an MIIM command */
3311 Cmd = AXGMAC_AMIIM_CMD_START | AXGMAC_AMIIM_CMD_10G_OPERATION;
3312
3313 /* Reset the command register command bit (in case it's not 0) */
3314 WRITE_REG(HwRegs->MacAmiimCmd, 0, TRUE);
3315
3316 /* MIIM write to set the address of the specified MDIO register */
3317 WRITE_REG(HwRegs->MacAmiimField, AddrOp, TRUE);
3318
3319 /* Write to MIIM Command Register to execute to address operation */
3320 WRITE_REG(HwRegs->MacAmiimCmd, Cmd, TRUE);
3321
3322 /* Poll AMIIM Indicator register to wait for completion */
3323 Timeout = SXG_LINK_TIMEOUT;
3324 do {
3325 udelay(100); /* Timeout in 100us units */
3326 READ_REG(HwRegs->MacAmiimIndicator, ValueRead);
3327 if (--Timeout == 0) {
3328 return (STATUS_FAILURE);
3329 }
3330 } while (ValueRead & AXGMAC_AMIIM_INDC_BUSY);
3331
3332 /* Reset the command register command bit */
3333 WRITE_REG(HwRegs->MacAmiimCmd, 0, TRUE);
3334
3335 /* MIIM write to set up an MDIO write operation */
3336 WRITE_REG(HwRegs->MacAmiimField, WriteOp, TRUE);
3337
3338 /* Write to MIIM Command Register to execute the write operation */
3339 WRITE_REG(HwRegs->MacAmiimCmd, Cmd, TRUE);
3340
3341 /* Poll AMIIM Indicator register to wait for completion */
3342 Timeout = SXG_LINK_TIMEOUT;
3343 do {
3344 udelay(100); /* Timeout in 100us units */
3345 READ_REG(HwRegs->MacAmiimIndicator, ValueRead);
3346 if (--Timeout == 0) {
3347 return (STATUS_FAILURE);
3348 }
3349 } while (ValueRead & AXGMAC_AMIIM_INDC_BUSY);
3350
3351 /* DBG_ERROR("EXIT %s\n", __func__); */
3352
3353 return (STATUS_SUCCESS);
3354 }
3355
3356 /*
3357 * sxg_read_mdio_reg - Read a register on the MDIO bus
3358 *
3359 * Arguments -
3360 * adapter - A pointer to our adapter structure
3361 * DevAddr - MDIO device number being addressed
3362 * RegAddr - register address for the specified MDIO device
3363 * pValue - pointer to where to put data read from the MDIO register
3364 *
3365 * Return
3366 * status
3367 */
3368 static int sxg_read_mdio_reg(struct adapter_t *adapter,
3369 u32 DevAddr, u32 RegAddr, u32 *pValue)
3370 {
3371 struct sxg_hw_regs *HwRegs = adapter->HwRegs;
3372 u32 AddrOp; /* Address operation (written to MIIM field reg) */
3373 u32 ReadOp; /* Read operation (written to MIIM field reg) */
3374 u32 Cmd; /* Command (written to MIIM command reg) */
3375 u32 ValueRead;
3376 u32 Timeout;
3377
3378 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "WrtMDIO",
3379 adapter, 0, 0, 0);
3380 DBG_ERROR("ENTER %s\n", __FUNCTION__);
3381
3382 /* Ensure values don't exceed field width */
3383 DevAddr &= 0x001F; /* 5-bit field */
3384 RegAddr &= 0xFFFF; /* 16-bit field */
3385
3386 /* Set MIIM field register bits for an MIIM address operation */
3387 AddrOp = (MIIM_PORT_NUM << AXGMAC_AMIIM_FIELD_PORT_SHIFT) |
3388 (DevAddr << AXGMAC_AMIIM_FIELD_DEV_SHIFT) |
3389 (MIIM_TA_10GB << AXGMAC_AMIIM_FIELD_TA_SHIFT) |
3390 (MIIM_OP_ADDR << AXGMAC_AMIIM_FIELD_OP_SHIFT) | RegAddr;
3391
3392 /* Set MIIM field register bits for an MIIM read operation */
3393 ReadOp = (MIIM_PORT_NUM << AXGMAC_AMIIM_FIELD_PORT_SHIFT) |
3394 (DevAddr << AXGMAC_AMIIM_FIELD_DEV_SHIFT) |
3395 (MIIM_TA_10GB << AXGMAC_AMIIM_FIELD_TA_SHIFT) |
3396 (MIIM_OP_READ << AXGMAC_AMIIM_FIELD_OP_SHIFT);
3397
3398 /* Set MIIM command register bits to execute an MIIM command */
3399 Cmd = AXGMAC_AMIIM_CMD_START | AXGMAC_AMIIM_CMD_10G_OPERATION;
3400
3401 /* Reset the command register command bit (in case it's not 0) */
3402 WRITE_REG(HwRegs->MacAmiimCmd, 0, TRUE);
3403
3404 /* MIIM write to set the address of the specified MDIO register */
3405 WRITE_REG(HwRegs->MacAmiimField, AddrOp, TRUE);
3406
3407 /* Write to MIIM Command Register to execute to address operation */
3408 WRITE_REG(HwRegs->MacAmiimCmd, Cmd, TRUE);
3409
3410 /* Poll AMIIM Indicator register to wait for completion */
3411 Timeout = SXG_LINK_TIMEOUT;
3412 do {
3413 udelay(100); /* Timeout in 100us units */
3414 READ_REG(HwRegs->MacAmiimIndicator, ValueRead);
3415 if (--Timeout == 0) {
3416 DBG_ERROR("EXIT %s with STATUS_FAILURE 1\n", __FUNCTION__);
3417
3418 return (STATUS_FAILURE);
3419 }
3420 } while (ValueRead & AXGMAC_AMIIM_INDC_BUSY);
3421
3422 /* Reset the command register command bit */
3423 WRITE_REG(HwRegs->MacAmiimCmd, 0, TRUE);
3424
3425 /* MIIM write to set up an MDIO register read operation */
3426 WRITE_REG(HwRegs->MacAmiimField, ReadOp, TRUE);
3427
3428 /* Write to MIIM Command Register to execute the read operation */
3429 WRITE_REG(HwRegs->MacAmiimCmd, Cmd, TRUE);
3430
3431 /* Poll AMIIM Indicator register to wait for completion */
3432 Timeout = SXG_LINK_TIMEOUT;
3433 do {
3434 udelay(100); /* Timeout in 100us units */
3435 READ_REG(HwRegs->MacAmiimIndicator, ValueRead);
3436 if (--Timeout == 0) {
3437 DBG_ERROR("EXIT %s with STATUS_FAILURE 2\n", __FUNCTION__);
3438
3439 return (STATUS_FAILURE);
3440 }
3441 } while (ValueRead & AXGMAC_AMIIM_INDC_BUSY);
3442
3443 /* Read the MDIO register data back from the field register */
3444 READ_REG(HwRegs->MacAmiimField, *pValue);
3445 *pValue &= 0xFFFF; /* data is in the lower 16 bits */
3446
3447 DBG_ERROR("EXIT %s\n", __FUNCTION__);
3448
3449 return (STATUS_SUCCESS);
3450 }
3451
3452 /*
3453 * Functions to obtain the CRC corresponding to the destination mac address.
3454 * This is a standard ethernet CRC in that it is a 32-bit, reflected CRC using
3455 * the polynomial:
3456 * x^32 + x^26 + x^23 + x^22 + x^16 + x^12 + x^11 + x^10 + x^8 + x^7 + x^5
3457 * + x^4 + x^2 + x^1.
3458 *
3459 * After the CRC for the 6 bytes is generated (but before the value is
3460 * complemented), we must then transpose the value and return bits 30-23.
3461 */
3462 static u32 sxg_crc_table[256];/* Table of CRC's for all possible byte values */
3463 static u32 sxg_crc_init; /* Is table initialized */
3464
3465 /* Contruct the CRC32 table */
3466 static void sxg_mcast_init_crc32(void)
3467 {
3468 u32 c; /* CRC shit reg */
3469 u32 e = 0; /* Poly X-or pattern */
3470 int i; /* counter */
3471 int k; /* byte being shifted into crc */
3472
3473 static int p[] = { 0, 1, 2, 4, 5, 7, 8, 10, 11, 12, 16, 22, 23, 26 };
3474
3475 for (i = 0; i < sizeof(p) / sizeof(int); i++) {
3476 e |= 1L << (31 - p[i]);
3477 }
3478
3479 for (i = 1; i < 256; i++) {
3480 c = i;
3481 for (k = 8; k; k--) {
3482 c = c & 1 ? (c >> 1) ^ e : c >> 1;
3483 }
3484 sxg_crc_table[i] = c;
3485 }
3486 }
3487
3488 /*
3489 * Return the MAC hast as described above.
3490 */
3491 static unsigned char sxg_mcast_get_mac_hash(char *macaddr)
3492 {
3493 u32 crc;
3494 char *p;
3495 int i;
3496 unsigned char machash = 0;
3497
3498 if (!sxg_crc_init) {
3499 sxg_mcast_init_crc32();
3500 sxg_crc_init = 1;
3501 }
3502
3503 crc = 0xFFFFFFFF; /* Preload shift register, per crc-32 spec */
3504 for (i = 0, p = macaddr; i < 6; ++p, ++i) {
3505 crc = (crc >> 8) ^ sxg_crc_table[(crc ^ *p) & 0xFF];
3506 }
3507
3508 /* Return bits 1-8, transposed */
3509 for (i = 1; i < 9; i++) {
3510 machash |= (((crc >> i) & 1) << (8 - i));
3511 }
3512
3513 return (machash);
3514 }
3515
3516 static void sxg_mcast_set_mask(struct adapter_t *adapter)
3517 {
3518 struct sxg_ucode_regs *sxg_regs = adapter->UcodeRegs;
3519
3520 DBG_ERROR("%s ENTER (%s) MacFilter[%x] mask[%llx]\n", __FUNCTION__,
3521 adapter->netdev->name, (unsigned int)adapter->MacFilter,
3522 adapter->MulticastMask);
3523
3524 if (adapter->MacFilter & (MAC_ALLMCAST | MAC_PROMISC)) {
3525 /*
3526 * Turn on all multicast addresses. We have to do this for
3527 * promiscuous mode as well as ALLMCAST mode. It saves the
3528 * Microcode from having keep state about the MAC configuration
3529 */
3530 /* DBG_ERROR("sxg: %s MacFilter = MAC_ALLMCAST | MAC_PROMISC\n \
3531 * SLUT MODE!!!\n",__func__);
3532 */
3533 WRITE_REG(sxg_regs->McastLow, 0xFFFFFFFF, FLUSH);
3534 WRITE_REG(sxg_regs->McastHigh, 0xFFFFFFFF, FLUSH);
3535 /* DBG_ERROR("%s (%s) WRITE to slic_regs slic_mcastlow&high \
3536 * 0xFFFFFFFF\n",__func__, adapter->netdev->name);
3537 */
3538
3539 } else {
3540 /*
3541 * Commit our multicast mast to the SLIC by writing to the
3542 * multicast address mask registers
3543 */
3544 DBG_ERROR("%s (%s) WRITE mcastlow[%lx] mcasthigh[%lx]\n",
3545 __func__, adapter->netdev->name,
3546 ((ulong) (adapter->MulticastMask & 0xFFFFFFFF)),
3547 ((ulong)
3548 ((adapter->MulticastMask >> 32) & 0xFFFFFFFF)));
3549
3550 WRITE_REG(sxg_regs->McastLow,
3551 (u32) (adapter->MulticastMask & 0xFFFFFFFF), FLUSH);
3552 WRITE_REG(sxg_regs->McastHigh,
3553 (u32) ((adapter->
3554 MulticastMask >> 32) & 0xFFFFFFFF), FLUSH);
3555 }
3556 }
3557
3558 static void sxg_mcast_set_bit(struct adapter_t *adapter, char *address)
3559 {
3560 unsigned char crcpoly;
3561
3562 /* Get the CRC polynomial for the mac address */
3563 crcpoly = sxg_mcast_get_mac_hash(address);
3564
3565 /*
3566 * We only have space on the SLIC for 64 entries. Lop
3567 * off the top two bits. (2^6 = 64)
3568 */
3569 crcpoly &= 0x3F;
3570
3571 /* OR in the new bit into our 64 bit mask. */
3572 adapter->MulticastMask |= (u64) 1 << crcpoly;
3573 }
3574
3575 /*
3576 * Function takes MAC addresses from dev_mc_list and generates the Mask
3577 */
3578
3579 static void sxg_set_mcast_addr(struct adapter_t *adapter)
3580 {
3581 struct dev_mc_list *mclist;
3582 struct net_device *dev = adapter->netdev;
3583 int i;
3584
3585 if (adapter->MacFilter & (MAC_ALLMCAST | MAC_MCAST)) {
3586 for (i = 0, mclist = dev->mc_list; i < dev->mc_count;
3587 i++, mclist = mclist->next) {
3588 sxg_mcast_set_bit(adapter,mclist->da_addr);
3589 }
3590 }
3591 sxg_mcast_set_mask(adapter);
3592 }
3593
3594 static void sxg_mcast_set_list(struct net_device *dev)
3595 {
3596 struct adapter_t *adapter = (struct adapter_t *) netdev_priv(dev);
3597
3598 ASSERT(adapter);
3599 if (dev->flags & IFF_PROMISC)
3600 adapter->MacFilter |= MAC_PROMISC;
3601 if (dev->flags & IFF_MULTICAST)
3602 adapter->MacFilter |= MAC_MCAST;
3603 if (dev->flags & IFF_ALLMULTI)
3604 adapter->MacFilter |= MAC_ALLMCAST;
3605
3606 //XXX handle other flags as well
3607 sxg_set_mcast_addr(adapter);
3608 }
3609
3610 void sxg_free_sgl_buffers(struct adapter_t *adapter)
3611 {
3612 struct list_entry *ple;
3613 struct sxg_scatter_gather *Sgl;
3614
3615 while(!(IsListEmpty(&adapter->AllSglBuffers))) {
3616 ple = RemoveHeadList(&adapter->AllSglBuffers);
3617 Sgl = container_of(ple, struct sxg_scatter_gather, AllList);
3618 kfree(Sgl);
3619 adapter->AllSglBufferCount--;
3620 }
3621 }
3622
3623 void sxg_free_rcvblocks(struct adapter_t *adapter)
3624 {
3625 u32 i;
3626 void *temp_RcvBlock;
3627 struct list_entry *ple;
3628 struct sxg_rcv_block_hdr *RcvBlockHdr;
3629 struct sxg_rcv_data_buffer_hdr *RcvDataBufferHdr;
3630 ASSERT((adapter->state == SXG_STATE_INITIALIZING) ||
3631 (adapter->state == SXG_STATE_HALTING));
3632 while(!(IsListEmpty(&adapter->AllRcvBlocks))) {
3633
3634 ple = RemoveHeadList(&adapter->AllRcvBlocks);
3635 RcvBlockHdr = container_of(ple, struct sxg_rcv_block_hdr, AllList);
3636
3637 if(RcvBlockHdr->VirtualAddress) {
3638 temp_RcvBlock = RcvBlockHdr->VirtualAddress;
3639
3640 for(i=0; i< SXG_RCV_DESCRIPTORS_PER_BLOCK;
3641 i++, temp_RcvBlock += SXG_RCV_DATA_HDR_SIZE) {
3642 RcvDataBufferHdr =
3643 (struct sxg_rcv_data_buffer_hdr *)temp_RcvBlock;
3644 SXG_FREE_RCV_PACKET(RcvDataBufferHdr);
3645 }
3646 }
3647
3648 pci_free_consistent(adapter->pcidev,
3649 SXG_RCV_BLOCK_SIZE(SXG_RCV_DATA_HDR_SIZE),
3650 RcvBlockHdr->VirtualAddress,
3651 RcvBlockHdr->PhysicalAddress);
3652 adapter->AllRcvBlockCount--;
3653 }
3654 ASSERT(adapter->AllRcvBlockCount == 0);
3655 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XFrRBlk",
3656 adapter, 0, 0, 0);
3657 }
3658 void sxg_free_mcast_addrs(struct adapter_t *adapter)
3659 {
3660 struct sxg_multicast_address *address;
3661 while(adapter->MulticastAddrs) {
3662 address = adapter->MulticastAddrs;
3663 adapter->MulticastAddrs = address->Next;
3664 kfree(address);
3665 }
3666
3667 adapter->MulticastMask= 0;
3668 }
3669
3670 void sxg_unmap_resources(struct adapter_t *adapter)
3671 {
3672 if(adapter->HwRegs) {
3673 iounmap((void *)adapter->HwRegs);
3674 }
3675 if(adapter->UcodeRegs) {
3676 iounmap((void *)adapter->UcodeRegs);
3677 }
3678
3679 ASSERT(adapter->AllRcvBlockCount == 0);
3680 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XFrRBlk",
3681 adapter, 0, 0, 0);
3682 }
3683
3684
3685
3686 /*
3687 * sxg_free_resources - Free everything allocated in SxgAllocateResources
3688 *
3689 * Arguments -
3690 * adapter - A pointer to our adapter structure
3691 *
3692 * Return
3693 * none
3694 */
3695 void sxg_free_resources(struct adapter_t *adapter)
3696 {
3697 u32 RssIds, IsrCount;
3698 RssIds = SXG_RSS_CPU_COUNT(adapter);
3699 IsrCount = adapter->msi_enabled ? RssIds : 1;
3700
3701 if (adapter->BasicAllocations == FALSE) {
3702 /*
3703 * No allocations have been made, including spinlocks,
3704 * or listhead initializations. Return.
3705 */
3706 return;
3707 }
3708
3709 if (!(IsListEmpty(&adapter->AllRcvBlocks))) {
3710 sxg_free_rcvblocks(adapter);
3711 }
3712 if (!(IsListEmpty(&adapter->AllSglBuffers))) {
3713 sxg_free_sgl_buffers(adapter);
3714 }
3715
3716 if (adapter->XmtRingZeroIndex) {
3717 pci_free_consistent(adapter->pcidev,
3718 sizeof(u32),
3719 adapter->XmtRingZeroIndex,
3720 adapter->PXmtRingZeroIndex);
3721 }
3722 if (adapter->Isr) {
3723 pci_free_consistent(adapter->pcidev,
3724 sizeof(u32) * IsrCount,
3725 adapter->Isr, adapter->PIsr);
3726 }
3727
3728 if (adapter->EventRings) {
3729 pci_free_consistent(adapter->pcidev,
3730 sizeof(struct sxg_event_ring) * RssIds,
3731 adapter->EventRings, adapter->PEventRings);
3732 }
3733 if (adapter->RcvRings) {
3734 pci_free_consistent(adapter->pcidev,
3735 sizeof(struct sxg_rcv_ring) * 1,
3736 adapter->RcvRings,
3737 adapter->PRcvRings);
3738 adapter->RcvRings = NULL;
3739 }
3740
3741 if(adapter->XmtRings) {
3742 pci_free_consistent(adapter->pcidev,
3743 sizeof(struct sxg_xmt_ring) * 1,
3744 adapter->XmtRings,
3745 adapter->PXmtRings);
3746 adapter->XmtRings = NULL;
3747 }
3748
3749 if (adapter->ucode_stats) {
3750 pci_unmap_single(adapter->pcidev,
3751 sizeof(struct sxg_ucode_stats),
3752 adapter->pucode_stats, PCI_DMA_FROMDEVICE);
3753 adapter->ucode_stats = NULL;
3754 }
3755
3756
3757 /* Unmap register spaces */
3758 sxg_unmap_resources(adapter);
3759
3760 sxg_free_mcast_addrs(adapter);
3761
3762 adapter->BasicAllocations = FALSE;
3763
3764 }
3765
3766 /*
3767 * sxg_allocate_complete -
3768 *
3769 * This routine is called when a memory allocation has completed.
3770 *
3771 * Arguments -
3772 * struct adapter_t * - Our adapter structure
3773 * VirtualAddress - Memory virtual address
3774 * PhysicalAddress - Memory physical address
3775 * Length - Length of memory allocated (or 0)
3776 * Context - The type of buffer allocated
3777 *
3778 * Return
3779 * None.
3780 */
3781 static int sxg_allocate_complete(struct adapter_t *adapter,
3782 void *VirtualAddress,
3783 dma_addr_t PhysicalAddress,
3784 u32 Length, enum sxg_buffer_type Context)
3785 {
3786 int status = 0;
3787 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "AllocCmp",
3788 adapter, VirtualAddress, Length, Context);
3789 ASSERT(atomic_read(&adapter->pending_allocations));
3790 atomic_dec(&adapter->pending_allocations);
3791
3792 switch (Context) {
3793
3794 case SXG_BUFFER_TYPE_RCV:
3795 status = sxg_allocate_rcvblock_complete(adapter,
3796 VirtualAddress,
3797 PhysicalAddress, Length);
3798 break;
3799 case SXG_BUFFER_TYPE_SGL:
3800 sxg_allocate_sgl_buffer_complete(adapter, (struct sxg_scatter_gather *)
3801 VirtualAddress,
3802 PhysicalAddress, Length);
3803 break;
3804 }
3805 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XAlocCmp",
3806 adapter, VirtualAddress, Length, Context);
3807
3808 return status;
3809 }
3810
3811 /*
3812 * sxg_allocate_buffer_memory - Shared memory allocation routine used for
3813 * synchronous and asynchronous buffer allocations
3814 *
3815 * Arguments -
3816 * adapter - A pointer to our adapter structure
3817 * Size - block size to allocate
3818 * BufferType - Type of buffer to allocate
3819 *
3820 * Return
3821 * int
3822 */
3823 static int sxg_allocate_buffer_memory(struct adapter_t *adapter,
3824 u32 Size, enum sxg_buffer_type BufferType)
3825 {
3826 int status;
3827 void *Buffer;
3828 dma_addr_t pBuffer;
3829
3830 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "AllocMem",
3831 adapter, Size, BufferType, 0);
3832 /*
3833 * Grab the adapter lock and check the state. If we're in anything other
3834 * than INITIALIZING or RUNNING state, fail. This is to prevent
3835 * allocations in an improper driver state
3836 */
3837
3838 atomic_inc(&adapter->pending_allocations);
3839
3840 if(BufferType != SXG_BUFFER_TYPE_SGL)
3841 Buffer = pci_alloc_consistent(adapter->pcidev, Size, &pBuffer);
3842 else {
3843 Buffer = kzalloc(Size, GFP_ATOMIC);
3844 pBuffer = (dma_addr_t)NULL;
3845 }
3846 if (Buffer == NULL) {
3847 /*
3848 * Decrement the AllocationsPending count while holding
3849 * the lock. Pause processing relies on this
3850 */
3851 atomic_dec(&adapter->pending_allocations);
3852 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "AlcMemF1",
3853 adapter, Size, BufferType, 0);
3854 return (STATUS_RESOURCES);
3855 }
3856 status = sxg_allocate_complete(adapter, Buffer, pBuffer, Size, BufferType);
3857
3858 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XAlocMem",
3859 adapter, Size, BufferType, status);
3860 return status;
3861 }
3862
3863 /*
3864 * sxg_allocate_rcvblock_complete - Complete a receive descriptor
3865 * block allocation
3866 *
3867 * Arguments -
3868 * adapter - A pointer to our adapter structure
3869 * RcvBlock - receive block virtual address
3870 * PhysicalAddress - Physical address
3871 * Length - Memory length
3872 *
3873 * Return
3874 */
3875 static int sxg_allocate_rcvblock_complete(struct adapter_t *adapter,
3876 void *RcvBlock,
3877 dma_addr_t PhysicalAddress,
3878 u32 Length)
3879 {
3880 u32 i;
3881 u32 BufferSize = adapter->ReceiveBufferSize;
3882 u64 Paddr;
3883 void *temp_RcvBlock;
3884 struct sxg_rcv_block_hdr *RcvBlockHdr;
3885 struct sxg_rcv_data_buffer_hdr *RcvDataBufferHdr;
3886 struct sxg_rcv_descriptor_block *RcvDescriptorBlock;
3887 struct sxg_rcv_descriptor_block_hdr *RcvDescriptorBlockHdr;
3888
3889 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "AlRcvBlk",
3890 adapter, RcvBlock, Length, 0);
3891 if (RcvBlock == NULL) {
3892 goto fail;
3893 }
3894 memset(RcvBlock, 0, Length);
3895 ASSERT((BufferSize == SXG_RCV_DATA_BUFFER_SIZE) ||
3896 (BufferSize == SXG_RCV_JUMBO_BUFFER_SIZE));
3897 ASSERT(Length == SXG_RCV_BLOCK_SIZE(SXG_RCV_DATA_HDR_SIZE));
3898 /*
3899 * First, initialize the contained pool of receive data buffers.
3900 * This initialization requires NBL/NB/MDL allocations, if any of them
3901 * fail, free the block and return without queueing the shared memory
3902 */
3903 //RcvDataBuffer = RcvBlock;
3904 temp_RcvBlock = RcvBlock;
3905 for (i = 0; i < SXG_RCV_DESCRIPTORS_PER_BLOCK;
3906 i++, temp_RcvBlock += SXG_RCV_DATA_HDR_SIZE) {
3907 RcvDataBufferHdr = (struct sxg_rcv_data_buffer_hdr *)
3908 temp_RcvBlock;
3909 /* For FREE macro assertion */
3910 RcvDataBufferHdr->State = SXG_BUFFER_UPSTREAM;
3911 SXG_ALLOCATE_RCV_PACKET(adapter, RcvDataBufferHdr, BufferSize);
3912 if (RcvDataBufferHdr->SxgDumbRcvPacket == NULL)
3913 goto fail;
3914
3915 }
3916
3917 /*
3918 * Place this entire block of memory on the AllRcvBlocks queue so it
3919 * can be free later
3920 */
3921
3922 RcvBlockHdr = (struct sxg_rcv_block_hdr *) ((unsigned char *)RcvBlock +
3923 SXG_RCV_BLOCK_HDR_OFFSET(SXG_RCV_DATA_HDR_SIZE));
3924 RcvBlockHdr->VirtualAddress = RcvBlock;
3925 RcvBlockHdr->PhysicalAddress = PhysicalAddress;
3926 spin_lock(&adapter->RcvQLock);
3927 adapter->AllRcvBlockCount++;
3928 InsertTailList(&adapter->AllRcvBlocks, &RcvBlockHdr->AllList);
3929 spin_unlock(&adapter->RcvQLock);
3930
3931 /* Now free the contained receive data buffers that we
3932 * initialized above */
3933 temp_RcvBlock = RcvBlock;
3934 for (i = 0, Paddr = PhysicalAddress;
3935 i < SXG_RCV_DESCRIPTORS_PER_BLOCK;
3936 i++, Paddr += SXG_RCV_DATA_HDR_SIZE,
3937 temp_RcvBlock += SXG_RCV_DATA_HDR_SIZE) {
3938 RcvDataBufferHdr =
3939 (struct sxg_rcv_data_buffer_hdr *)temp_RcvBlock;
3940 spin_lock(&adapter->RcvQLock);
3941 SXG_FREE_RCV_DATA_BUFFER(adapter, RcvDataBufferHdr);
3942 spin_unlock(&adapter->RcvQLock);
3943 }
3944
3945 /* Locate the descriptor block and put it on a separate free queue */
3946 RcvDescriptorBlock =
3947 (struct sxg_rcv_descriptor_block *) ((unsigned char *)RcvBlock +
3948 SXG_RCV_DESCRIPTOR_BLOCK_OFFSET
3949 (SXG_RCV_DATA_HDR_SIZE));
3950 RcvDescriptorBlockHdr =
3951 (struct sxg_rcv_descriptor_block_hdr *) ((unsigned char *)RcvBlock +
3952 SXG_RCV_DESCRIPTOR_BLOCK_HDR_OFFSET
3953 (SXG_RCV_DATA_HDR_SIZE));
3954 RcvDescriptorBlockHdr->VirtualAddress = RcvDescriptorBlock;
3955 RcvDescriptorBlockHdr->PhysicalAddress = Paddr;
3956 spin_lock(&adapter->RcvQLock);
3957 SXG_FREE_RCV_DESCRIPTOR_BLOCK(adapter, RcvDescriptorBlockHdr);
3958 spin_unlock(&adapter->RcvQLock);
3959 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XAlRBlk",
3960 adapter, RcvBlock, Length, 0);
3961 return STATUS_SUCCESS;
3962 fail:
3963 /* Free any allocated resources */
3964 if (RcvBlock) {
3965 temp_RcvBlock = RcvBlock;
3966 for (i = 0; i < SXG_RCV_DESCRIPTORS_PER_BLOCK;
3967 i++, temp_RcvBlock += SXG_RCV_DATA_HDR_SIZE) {
3968 RcvDataBufferHdr =
3969 (struct sxg_rcv_data_buffer_hdr *)temp_RcvBlock;
3970 SXG_FREE_RCV_PACKET(RcvDataBufferHdr);
3971 }
3972 pci_free_consistent(adapter->pcidev,
3973 Length, RcvBlock, PhysicalAddress);
3974 }
3975 DBG_ERROR("%s: OUT OF RESOURCES\n", __func__);
3976 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_IMPORTANT, "RcvAFail",
3977 adapter, adapter->FreeRcvBufferCount,
3978 adapter->FreeRcvBlockCount, adapter->AllRcvBlockCount);
3979 adapter->Stats.NoMem++;
3980 /* As allocation failed, free all previously allocated blocks..*/
3981 //sxg_free_rcvblocks(adapter);
3982
3983 return STATUS_RESOURCES;
3984 }
3985
3986 /*
3987 * sxg_allocate_sgl_buffer_complete - Complete a SGL buffer allocation
3988 *
3989 * Arguments -
3990 * adapter - A pointer to our adapter structure
3991 * SxgSgl - struct sxg_scatter_gather buffer
3992 * PhysicalAddress - Physical address
3993 * Length - Memory length
3994 *
3995 * Return
3996 */
3997 static void sxg_allocate_sgl_buffer_complete(struct adapter_t *adapter,
3998 struct sxg_scatter_gather *SxgSgl,
3999 dma_addr_t PhysicalAddress,
4000 u32 Length)
4001 {
4002 unsigned long sgl_flags;
4003 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "AlSglCmp",
4004 adapter, SxgSgl, Length, 0);
4005 spin_lock_irqsave(&adapter->SglQLock, sgl_flags);
4006 adapter->AllSglBufferCount++;
4007 /* PhysicalAddress; */
4008 SxgSgl->PhysicalAddress = PhysicalAddress;
4009 /* Initialize backpointer once */
4010 SxgSgl->adapter = adapter;
4011 InsertTailList(&adapter->AllSglBuffers, &SxgSgl->AllList);
4012 spin_unlock_irqrestore(&adapter->SglQLock, sgl_flags);
4013 SxgSgl->State = SXG_BUFFER_BUSY;
4014 SXG_FREE_SGL_BUFFER(adapter, SxgSgl, NULL);
4015 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XAlSgl",
4016 adapter, SxgSgl, Length, 0);
4017 }
4018
4019
4020 static int sxg_adapter_set_hwaddr(struct adapter_t *adapter)
4021 {
4022 /*
4023 * DBG_ERROR ("%s ENTER card->config_set[%x] port[%d] physport[%d] \
4024 * funct#[%d]\n", __func__, card->config_set,
4025 * adapter->port, adapter->physport, adapter->functionnumber);
4026 *
4027 * sxg_dbg_macaddrs(adapter);
4028 */
4029 /* DBG_ERROR ("%s AFTER copying from config.macinfo into currmacaddr\n",
4030 * __FUNCTION__);
4031 */
4032
4033 /* sxg_dbg_macaddrs(adapter); */
4034
4035 struct net_device * dev = adapter->netdev;
4036 if(!dev)
4037 {
4038 printk("sxg: Dev is Null\n");
4039 }
4040
4041 DBG_ERROR("%s ENTER (%s)\n", __FUNCTION__, adapter->netdev->name);
4042
4043 if (netif_running(dev)) {
4044 return -EBUSY;
4045 }
4046 if (!adapter) {
4047 return -EBUSY;
4048 }
4049
4050 if (!(adapter->currmacaddr[0] ||
4051 adapter->currmacaddr[1] ||
4052 adapter->currmacaddr[2] ||
4053 adapter->currmacaddr[3] ||
4054 adapter->currmacaddr[4] || adapter->currmacaddr[5])) {
4055 memcpy(adapter->currmacaddr, adapter->macaddr, 6);
4056 }
4057 if (adapter->netdev) {
4058 memcpy(adapter->netdev->dev_addr, adapter->currmacaddr, 6);
4059 memcpy(adapter->netdev->perm_addr, adapter->currmacaddr, 6);
4060 }
4061 /* DBG_ERROR ("%s EXIT port %d\n", __func__, adapter->port); */
4062 sxg_dbg_macaddrs(adapter);
4063
4064 return 0;
4065 }
4066
4067 #if XXXTODO
4068 static int sxg_mac_set_address(struct net_device *dev, void *ptr)
4069 {
4070 struct adapter_t *adapter = (struct adapter_t *) netdev_priv(dev);
4071 struct sockaddr *addr = ptr;
4072
4073 DBG_ERROR("%s ENTER (%s)\n", __func__, adapter->netdev->name);
4074
4075 if (netif_running(dev)) {
4076 return -EBUSY;
4077 }
4078 if (!adapter) {
4079 return -EBUSY;
4080 }
4081 DBG_ERROR("sxg: %s (%s) curr %2.2X:%2.2X:%2.2X:%2.2X:%2.2X:%2.2X\n",
4082 __func__, adapter->netdev->name, adapter->currmacaddr[0],
4083 adapter->currmacaddr[1], adapter->currmacaddr[2],
4084 adapter->currmacaddr[3], adapter->currmacaddr[4],
4085 adapter->currmacaddr[5]);
4086 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
4087 memcpy(adapter->currmacaddr, addr->sa_data, dev->addr_len);
4088 DBG_ERROR("sxg: %s (%s) new %2.2X:%2.2X:%2.2X:%2.2X:%2.2X:%2.2X\n",
4089 __func__, adapter->netdev->name, adapter->currmacaddr[0],
4090 adapter->currmacaddr[1], adapter->currmacaddr[2],
4091 adapter->currmacaddr[3], adapter->currmacaddr[4],
4092 adapter->currmacaddr[5]);
4093
4094 sxg_config_set(adapter, TRUE);
4095 return 0;
4096 }
4097 #endif
4098
4099 /*
4100 * SXG DRIVER FUNCTIONS (below)
4101 *
4102 * sxg_initialize_adapter - Initialize adapter
4103 *
4104 * Arguments -
4105 * adapter - A pointer to our adapter structure
4106 *
4107 * Return - int
4108 */
4109 static int sxg_initialize_adapter(struct adapter_t *adapter)
4110 {
4111 u32 RssIds, IsrCount;
4112 u32 i;
4113 int status;
4114 int sxg_rcv_ring_size = SXG_RCV_RING_SIZE;
4115
4116 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "InitAdpt",
4117 adapter, 0, 0, 0);
4118
4119 RssIds = 1; /* XXXTODO SXG_RSS_CPU_COUNT(adapter); */
4120 IsrCount = adapter->msi_enabled ? RssIds : 1;
4121
4122 /*
4123 * Sanity check SXG_UCODE_REGS structure definition to
4124 * make sure the length is correct
4125 */
4126 ASSERT(sizeof(struct sxg_ucode_regs) == SXG_REGISTER_SIZE_PER_CPU);
4127
4128 /* Disable interrupts */
4129 SXG_DISABLE_ALL_INTERRUPTS(adapter);
4130
4131 /* Set MTU */
4132 ASSERT((adapter->FrameSize == ETHERMAXFRAME) ||
4133 (adapter->FrameSize == JUMBOMAXFRAME));
4134 WRITE_REG(adapter->UcodeRegs[0].LinkMtu, adapter->FrameSize, TRUE);
4135
4136 /* Set event ring base address and size */
4137 WRITE_REG64(adapter,
4138 adapter->UcodeRegs[0].EventBase, adapter->PEventRings, 0);
4139 WRITE_REG(adapter->UcodeRegs[0].EventSize, EVENT_RING_SIZE, TRUE);
4140
4141 /* Per-ISR initialization */
4142 for (i = 0; i < IsrCount; i++) {
4143 u64 Addr;
4144 /* Set interrupt status pointer */
4145 Addr = adapter->PIsr + (i * sizeof(u32));
4146 WRITE_REG64(adapter, adapter->UcodeRegs[i].Isp, Addr, i);
4147 }
4148
4149 /* XMT ring zero index */
4150 WRITE_REG64(adapter,
4151 adapter->UcodeRegs[0].SPSendIndex,
4152 adapter->PXmtRingZeroIndex, 0);
4153
4154 /* Per-RSS initialization */
4155 for (i = 0; i < RssIds; i++) {
4156 /* Release all event ring entries to the Microcode */
4157 WRITE_REG(adapter->UcodeRegs[i].EventRelease, EVENT_RING_SIZE,
4158 TRUE);
4159 }
4160
4161 /* Transmit ring base and size */
4162 WRITE_REG64(adapter,
4163 adapter->UcodeRegs[0].XmtBase, adapter->PXmtRings, 0);
4164 WRITE_REG(adapter->UcodeRegs[0].XmtSize, SXG_XMT_RING_SIZE, TRUE);
4165
4166 /* Receive ring base and size */
4167 WRITE_REG64(adapter,
4168 adapter->UcodeRegs[0].RcvBase, adapter->PRcvRings, 0);
4169 if (adapter->JumboEnabled == TRUE)
4170 sxg_rcv_ring_size = SXG_JUMBO_RCV_RING_SIZE;
4171 WRITE_REG(adapter->UcodeRegs[0].RcvSize, sxg_rcv_ring_size, TRUE);
4172
4173 /* Populate the card with receive buffers */
4174 sxg_stock_rcv_buffers(adapter);
4175
4176 /*
4177 * Initialize checksum offload capabilities. At the moment we always
4178 * enable IP and TCP receive checksums on the card. Depending on the
4179 * checksum configuration specified by the user, we can choose to
4180 * report or ignore the checksum information provided by the card.
4181 */
4182 WRITE_REG(adapter->UcodeRegs[0].ReceiveChecksum,
4183 SXG_RCV_TCP_CSUM_ENABLED | SXG_RCV_IP_CSUM_ENABLED, TRUE);
4184
4185 adapter->flags |= (SXG_RCV_TCP_CSUM_ENABLED | SXG_RCV_IP_CSUM_ENABLED );
4186
4187 /* Initialize the MAC, XAUI */
4188 DBG_ERROR("sxg: %s ENTER sxg_initialize_link\n", __func__);
4189 status = sxg_initialize_link(adapter);
4190 DBG_ERROR("sxg: %s EXIT sxg_initialize_link status[%x]\n", __func__,
4191 status);
4192 if (status != STATUS_SUCCESS) {
4193 return (status);
4194 }
4195 /*
4196 * Initialize Dead to FALSE.
4197 * SlicCheckForHang or SlicDumpThread will take it from here.
4198 */
4199 adapter->Dead = FALSE;
4200 adapter->PingOutstanding = FALSE;
4201 adapter->XmtFcEnabled = TRUE;
4202 adapter->RcvFcEnabled = TRUE;
4203
4204 adapter->State = SXG_STATE_RUNNING;
4205
4206 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XInit",
4207 adapter, 0, 0, 0);
4208 return (STATUS_SUCCESS);
4209 }
4210
4211 /*
4212 * sxg_fill_descriptor_block - Populate a descriptor block and give it to
4213 * the card. The caller should hold the RcvQLock
4214 *
4215 * Arguments -
4216 * adapter - A pointer to our adapter structure
4217 * RcvDescriptorBlockHdr - Descriptor block to fill
4218 *
4219 * Return
4220 * status
4221 */
4222 static int sxg_fill_descriptor_block(struct adapter_t *adapter,
4223 struct sxg_rcv_descriptor_block_hdr *RcvDescriptorBlockHdr)
4224 {
4225 u32 i;
4226 struct sxg_ring_info *RcvRingInfo = &adapter->RcvRingZeroInfo;
4227 struct sxg_rcv_data_buffer_hdr *RcvDataBufferHdr;
4228 struct sxg_rcv_descriptor_block *RcvDescriptorBlock;
4229 struct sxg_cmd *RingDescriptorCmd;
4230 struct sxg_rcv_ring *RingZero = &adapter->RcvRings[0];
4231
4232 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "FilBlk",
4233 adapter, adapter->RcvBuffersOnCard,
4234 adapter->FreeRcvBufferCount, adapter->AllRcvBlockCount);
4235
4236 ASSERT(RcvDescriptorBlockHdr);
4237
4238 /*
4239 * If we don't have the resources to fill the descriptor block,
4240 * return failure
4241 */
4242 if ((adapter->FreeRcvBufferCount < SXG_RCV_DESCRIPTORS_PER_BLOCK) ||
4243 SXG_RING_FULL(RcvRingInfo)) {
4244 adapter->Stats.NoMem++;
4245 return (STATUS_FAILURE);
4246 }
4247 /* Get a ring descriptor command */
4248 SXG_GET_CMD(RingZero,
4249 RcvRingInfo, RingDescriptorCmd, RcvDescriptorBlockHdr);
4250 ASSERT(RingDescriptorCmd);
4251 RcvDescriptorBlockHdr->State = SXG_BUFFER_ONCARD;
4252 RcvDescriptorBlock = (struct sxg_rcv_descriptor_block *)
4253 RcvDescriptorBlockHdr->VirtualAddress;
4254
4255 /* Fill in the descriptor block */
4256 for (i = 0; i < SXG_RCV_DESCRIPTORS_PER_BLOCK; i++) {
4257 SXG_GET_RCV_DATA_BUFFER(adapter, RcvDataBufferHdr);
4258 ASSERT(RcvDataBufferHdr);
4259 // ASSERT(RcvDataBufferHdr->SxgDumbRcvPacket);
4260 if (!RcvDataBufferHdr->SxgDumbRcvPacket) {
4261 SXG_ALLOCATE_RCV_PACKET(adapter, RcvDataBufferHdr,
4262 adapter->ReceiveBufferSize);
4263 if(RcvDataBufferHdr->skb)
4264 RcvDataBufferHdr->SxgDumbRcvPacket =
4265 RcvDataBufferHdr->skb;
4266 else
4267 goto no_memory;
4268 }
4269 SXG_REINIATIALIZE_PACKET(RcvDataBufferHdr->SxgDumbRcvPacket);
4270 RcvDataBufferHdr->State = SXG_BUFFER_ONCARD;
4271 RcvDescriptorBlock->Descriptors[i].VirtualAddress =
4272 (void *)RcvDataBufferHdr;
4273
4274 RcvDescriptorBlock->Descriptors[i].PhysicalAddress =
4275 RcvDataBufferHdr->PhysicalAddress;
4276 }
4277 /* Add the descriptor block to receive descriptor ring 0 */
4278 RingDescriptorCmd->Sgl = RcvDescriptorBlockHdr->PhysicalAddress;
4279
4280 /*
4281 * RcvBuffersOnCard is not protected via the receive lock (see
4282 * sxg_process_event_queue) We don't want to grap a lock every time a
4283 * buffer is returned to us, so we use atomic interlocked functions
4284 * instead.
4285 */
4286 adapter->RcvBuffersOnCard += SXG_RCV_DESCRIPTORS_PER_BLOCK;
4287
4288 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "DscBlk",
4289 RcvDescriptorBlockHdr,
4290 RingDescriptorCmd, RcvRingInfo->Head, RcvRingInfo->Tail);
4291
4292 WRITE_REG(adapter->UcodeRegs[0].RcvCmd, 1, true);
4293 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XFilBlk",
4294 adapter, adapter->RcvBuffersOnCard,
4295 adapter->FreeRcvBufferCount, adapter->AllRcvBlockCount);
4296 return (STATUS_SUCCESS);
4297 no_memory:
4298 for (; i >= 0 ; i--) {
4299 if (RcvDescriptorBlock->Descriptors[i].VirtualAddress) {
4300 RcvDataBufferHdr = (struct sxg_rcv_data_buffer_hdr *)
4301 RcvDescriptorBlock->Descriptors[i].
4302 VirtualAddress;
4303 RcvDescriptorBlock->Descriptors[i].PhysicalAddress =
4304 (dma_addr_t)NULL;
4305 RcvDescriptorBlock->Descriptors[i].VirtualAddress=NULL;
4306 }
4307 SXG_FREE_RCV_DATA_BUFFER(adapter, RcvDataBufferHdr);
4308 }
4309 RcvDescriptorBlockHdr->State = SXG_BUFFER_FREE;
4310 SXG_RETURN_CMD(RingZero, RcvRingInfo, RingDescriptorCmd,
4311 RcvDescriptorBlockHdr);
4312
4313 return (-ENOMEM);
4314 }
4315
4316 /*
4317 * sxg_stock_rcv_buffers - Stock the card with receive buffers
4318 *
4319 * Arguments -
4320 * adapter - A pointer to our adapter structure
4321 *
4322 * Return
4323 * None
4324 */
4325 static void sxg_stock_rcv_buffers(struct adapter_t *adapter)
4326 {
4327 struct sxg_rcv_descriptor_block_hdr *RcvDescriptorBlockHdr;
4328 int sxg_rcv_data_buffers = SXG_RCV_DATA_BUFFERS;
4329 int sxg_min_rcv_data_buffers = SXG_MIN_RCV_DATA_BUFFERS;
4330
4331 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "StockBuf",
4332 adapter, adapter->RcvBuffersOnCard,
4333 adapter->FreeRcvBufferCount, adapter->AllRcvBlockCount);
4334 /*
4335 * First, see if we've got less than our minimum threshold of
4336 * receive buffers, there isn't an allocation in progress, and
4337 * we haven't exceeded our maximum.. get another block of buffers
4338 * None of this needs to be SMP safe. It's round numbers.
4339 */
4340 if (adapter->JumboEnabled == TRUE)
4341 sxg_min_rcv_data_buffers = SXG_MIN_JUMBO_RCV_DATA_BUFFERS;
4342 if ((adapter->FreeRcvBufferCount < sxg_min_rcv_data_buffers) &&
4343 (adapter->AllRcvBlockCount < SXG_MAX_RCV_BLOCKS) &&
4344 (atomic_read(&adapter->pending_allocations) == 0)) {
4345 sxg_allocate_buffer_memory(adapter,
4346 SXG_RCV_BLOCK_SIZE
4347 (SXG_RCV_DATA_HDR_SIZE),
4348 SXG_BUFFER_TYPE_RCV);
4349 }
4350 /* Now grab the RcvQLock lock and proceed */
4351 spin_lock(&adapter->RcvQLock);
4352 if (adapter->JumboEnabled)
4353 sxg_rcv_data_buffers = SXG_JUMBO_RCV_DATA_BUFFERS;
4354 while (adapter->RcvBuffersOnCard < sxg_rcv_data_buffers) {
4355 struct list_entry *_ple;
4356
4357 /* Get a descriptor block */
4358 RcvDescriptorBlockHdr = NULL;
4359 if (adapter->FreeRcvBlockCount) {
4360 _ple = RemoveHeadList(&adapter->FreeRcvBlocks);
4361 RcvDescriptorBlockHdr =
4362 container_of(_ple, struct sxg_rcv_descriptor_block_hdr,
4363 FreeList);
4364 adapter->FreeRcvBlockCount--;
4365 RcvDescriptorBlockHdr->State = SXG_BUFFER_BUSY;
4366 }
4367
4368 if (RcvDescriptorBlockHdr == NULL) {
4369 /* Bail out.. */
4370 adapter->Stats.NoMem++;
4371 break;
4372 }
4373 /* Fill in the descriptor block and give it to the card */
4374 if (sxg_fill_descriptor_block(adapter, RcvDescriptorBlockHdr) ==
4375 STATUS_FAILURE) {
4376 /* Free the descriptor block */
4377 SXG_FREE_RCV_DESCRIPTOR_BLOCK(adapter,
4378 RcvDescriptorBlockHdr);
4379 break;
4380 }
4381 }
4382 spin_unlock(&adapter->RcvQLock);
4383 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XFilBlks",
4384 adapter, adapter->RcvBuffersOnCard,
4385 adapter->FreeRcvBufferCount, adapter->AllRcvBlockCount);
4386 }
4387
4388 /*
4389 * sxg_complete_descriptor_blocks - Return descriptor blocks that have been
4390 * completed by the microcode
4391 *
4392 * Arguments -
4393 * adapter - A pointer to our adapter structure
4394 * Index - Where the microcode is up to
4395 *
4396 * Return
4397 * None
4398 */
4399 static void sxg_complete_descriptor_blocks(struct adapter_t *adapter,
4400 unsigned char Index)
4401 {
4402 struct sxg_rcv_ring *RingZero = &adapter->RcvRings[0];
4403 struct sxg_ring_info *RcvRingInfo = &adapter->RcvRingZeroInfo;
4404 struct sxg_rcv_descriptor_block_hdr *RcvDescriptorBlockHdr;
4405 struct sxg_cmd *RingDescriptorCmd;
4406
4407 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "CmpRBlks",
4408 adapter, Index, RcvRingInfo->Head, RcvRingInfo->Tail);
4409
4410 /* Now grab the RcvQLock lock and proceed */
4411 spin_lock(&adapter->RcvQLock);
4412 ASSERT(Index != RcvRingInfo->Tail);
4413 while (sxg_ring_get_forward_diff(RcvRingInfo, Index,
4414 RcvRingInfo->Tail) > 3) {
4415 /*
4416 * Locate the current Cmd (ring descriptor entry), and
4417 * associated receive descriptor block, and advance
4418 * the tail
4419 */
4420 SXG_RETURN_CMD(RingZero,
4421 RcvRingInfo,
4422 RingDescriptorCmd, RcvDescriptorBlockHdr);
4423 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "CmpRBlk",
4424 RcvRingInfo->Head, RcvRingInfo->Tail,
4425 RingDescriptorCmd, RcvDescriptorBlockHdr);
4426
4427 /* Clear the SGL field */
4428 RingDescriptorCmd->Sgl = 0;
4429 /*
4430 * Attempt to refill it and hand it right back to the
4431 * card. If we fail to refill it, free the descriptor block
4432 * header. The card will be restocked later via the
4433 * RcvBuffersOnCard test
4434 */
4435 if (sxg_fill_descriptor_block(adapter,
4436 RcvDescriptorBlockHdr) == STATUS_FAILURE)
4437 SXG_FREE_RCV_DESCRIPTOR_BLOCK(adapter,
4438 RcvDescriptorBlockHdr);
4439 }
4440 spin_unlock(&adapter->RcvQLock);
4441 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XCRBlks",
4442 adapter, Index, RcvRingInfo->Head, RcvRingInfo->Tail);
4443 }
4444
4445 /*
4446 * Read the statistics which the card has been maintaining.
4447 */
4448 void sxg_collect_statistics(struct adapter_t *adapter)
4449 {
4450 if(adapter->ucode_stats)
4451 WRITE_REG64(adapter, adapter->UcodeRegs[0].GetUcodeStats,
4452 adapter->pucode_stats, 0);
4453 adapter->stats.rx_fifo_errors = adapter->ucode_stats->ERDrops;
4454 adapter->stats.rx_over_errors = adapter->ucode_stats->NBDrops;
4455 adapter->stats.tx_fifo_errors = adapter->ucode_stats->XDrops;
4456 }
4457
4458 static struct net_device_stats *sxg_get_stats(struct net_device * dev)
4459 {
4460 struct adapter_t *adapter = netdev_priv(dev);
4461
4462 sxg_collect_statistics(adapter);
4463 return (&adapter->stats);
4464 }
4465
4466 static void sxg_watchdog(unsigned long data)
4467 {
4468 struct adapter_t *adapter = (struct adapter_t *) data;
4469
4470 if (adapter->state != ADAPT_DOWN) {
4471 sxg_link_event(adapter);
4472 /* Reset the timer */
4473 mod_timer(&adapter->watchdog_timer, round_jiffies(jiffies + 2 * HZ));
4474 }
4475 }
4476
4477 static void sxg_update_link_status (struct work_struct *work)
4478 {
4479 struct adapter_t *adapter = (struct adapter_t *)container_of
4480 (work, struct adapter_t, update_link_status);
4481 if (likely(adapter->link_status_changed)) {
4482 sxg_link_event(adapter);
4483 adapter->link_status_changed = 0;
4484 }
4485 }
4486
4487 static struct pci_driver sxg_driver = {
4488 .name = sxg_driver_name,
4489 .id_table = sxg_pci_tbl,
4490 .probe = sxg_entry_probe,
4491 .remove = sxg_entry_remove,
4492 #if SXG_POWER_MANAGEMENT_ENABLED
4493 .suspend = sxgpm_suspend,
4494 .resume = sxgpm_resume,
4495 #endif
4496 /* .shutdown = slic_shutdown, MOOK_INVESTIGATE */
4497 };
4498
4499 static int __init sxg_module_init(void)
4500 {
4501 sxg_init_driver();
4502
4503 if (debug >= 0)
4504 sxg_debug = debug;
4505
4506 return pci_register_driver(&sxg_driver);
4507 }
4508
4509 static void __exit sxg_module_cleanup(void)
4510 {
4511 pci_unregister_driver(&sxg_driver);
4512 }
4513
4514 module_init(sxg_module_init);
4515 module_exit(sxg_module_cleanup);
This page took 0.197628 seconds and 5 git commands to generate.