2 * Copyright (C) 2003 - 2009 NetXen, Inc.
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version 2
8 * of the License, or (at your option) any later version.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place - Suite 330, Boston,
20 * The full GNU General Public License is included in this distribution
21 * in the file called LICENSE.
23 * Contact Information:
27 * Cupertino, CA 95014-0701
31 #include <linux/vmalloc.h>
32 #include <linux/interrupt.h>
33 #include "netxen_nic_hw.h"
35 #include "netxen_nic.h"
36 #include "netxen_nic_phan_reg.h"
38 #include <linux/dma-mapping.h>
39 #include <linux/if_vlan.h>
41 #include <linux/ipv6.h>
43 MODULE_DESCRIPTION("NetXen Multi port (1/10) Gigabit Network Driver");
44 MODULE_LICENSE("GPL");
45 MODULE_VERSION(NETXEN_NIC_LINUX_VERSIONID
);
47 char netxen_nic_driver_name
[] = "netxen_nic";
48 static char netxen_nic_driver_string
[] = "NetXen Network Driver version "
49 NETXEN_NIC_LINUX_VERSIONID
;
51 static int port_mode
= NETXEN_PORT_MODE_AUTO_NEG
;
53 /* Default to restricted 1G auto-neg mode */
54 static int wol_port_mode
= 5;
56 static int use_msi
= 1;
58 static int use_msi_x
= 1;
60 /* Local functions to NetXen NIC driver */
61 static int __devinit
netxen_nic_probe(struct pci_dev
*pdev
,
62 const struct pci_device_id
*ent
);
63 static void __devexit
netxen_nic_remove(struct pci_dev
*pdev
);
64 static int netxen_nic_open(struct net_device
*netdev
);
65 static int netxen_nic_close(struct net_device
*netdev
);
66 static int netxen_nic_xmit_frame(struct sk_buff
*, struct net_device
*);
67 static void netxen_tx_timeout(struct net_device
*netdev
);
68 static void netxen_tx_timeout_task(struct work_struct
*work
);
69 static void netxen_watchdog(unsigned long);
70 static int netxen_nic_poll(struct napi_struct
*napi
, int budget
);
71 #ifdef CONFIG_NET_POLL_CONTROLLER
72 static void netxen_nic_poll_controller(struct net_device
*netdev
);
74 static irqreturn_t
netxen_intr(int irq
, void *data
);
75 static irqreturn_t
netxen_msi_intr(int irq
, void *data
);
76 static irqreturn_t
netxen_msix_intr(int irq
, void *data
);
78 /* PCI Device ID Table */
79 #define ENTRY(device) \
80 {PCI_DEVICE(PCI_VENDOR_ID_NETXEN, (device)), \
81 .class = PCI_CLASS_NETWORK_ETHERNET << 8, .class_mask = ~0}
83 static struct pci_device_id netxen_pci_tbl
[] __devinitdata
= {
84 ENTRY(PCI_DEVICE_ID_NX2031_10GXSR
),
85 ENTRY(PCI_DEVICE_ID_NX2031_10GCX4
),
86 ENTRY(PCI_DEVICE_ID_NX2031_4GCU
),
87 ENTRY(PCI_DEVICE_ID_NX2031_IMEZ
),
88 ENTRY(PCI_DEVICE_ID_NX2031_HMEZ
),
89 ENTRY(PCI_DEVICE_ID_NX2031_XG_MGMT
),
90 ENTRY(PCI_DEVICE_ID_NX2031_XG_MGMT2
),
91 ENTRY(PCI_DEVICE_ID_NX3031
),
95 MODULE_DEVICE_TABLE(pci
, netxen_pci_tbl
);
97 static struct workqueue_struct
*netxen_workq
;
98 #define SCHEDULE_WORK(tp) queue_work(netxen_workq, tp)
99 #define FLUSH_SCHEDULED_WORK() flush_workqueue(netxen_workq)
101 static void netxen_watchdog(unsigned long);
103 static uint32_t crb_cmd_producer
[4] = {
104 CRB_CMD_PRODUCER_OFFSET
, CRB_CMD_PRODUCER_OFFSET_1
,
105 CRB_CMD_PRODUCER_OFFSET_2
, CRB_CMD_PRODUCER_OFFSET_3
109 netxen_nic_update_cmd_producer(struct netxen_adapter
*adapter
,
110 struct nx_host_tx_ring
*tx_ring
, u32 producer
)
112 adapter
->pci_write_normalize(adapter
,
113 tx_ring
->crb_cmd_producer
, producer
);
116 static uint32_t crb_cmd_consumer
[4] = {
117 CRB_CMD_CONSUMER_OFFSET
, CRB_CMD_CONSUMER_OFFSET_1
,
118 CRB_CMD_CONSUMER_OFFSET_2
, CRB_CMD_CONSUMER_OFFSET_3
122 netxen_nic_update_cmd_consumer(struct netxen_adapter
*adapter
,
123 struct nx_host_tx_ring
*tx_ring
, u32 consumer
)
125 adapter
->pci_write_normalize(adapter
,
126 tx_ring
->crb_cmd_consumer
, consumer
);
129 static uint32_t msi_tgt_status
[8] = {
130 ISR_INT_TARGET_STATUS
, ISR_INT_TARGET_STATUS_F1
,
131 ISR_INT_TARGET_STATUS_F2
, ISR_INT_TARGET_STATUS_F3
,
132 ISR_INT_TARGET_STATUS_F4
, ISR_INT_TARGET_STATUS_F5
,
133 ISR_INT_TARGET_STATUS_F6
, ISR_INT_TARGET_STATUS_F7
136 static struct netxen_legacy_intr_set legacy_intr
[] = NX_LEGACY_INTR_CONFIG
;
138 static inline void netxen_nic_disable_int(struct nx_host_sds_ring
*sds_ring
)
140 struct netxen_adapter
*adapter
= sds_ring
->adapter
;
142 adapter
->pci_write_normalize(adapter
, sds_ring
->crb_intr_mask
, 0);
145 static inline void netxen_nic_enable_int(struct nx_host_sds_ring
*sds_ring
)
147 struct netxen_adapter
*adapter
= sds_ring
->adapter
;
149 adapter
->pci_write_normalize(adapter
, sds_ring
->crb_intr_mask
, 0x1);
151 if (!NETXEN_IS_MSI_FAMILY(adapter
))
152 adapter
->pci_write_immediate(adapter
,
153 adapter
->legacy_intr
.tgt_mask_reg
, 0xfbff);
157 netxen_alloc_sds_rings(struct netxen_recv_context
*recv_ctx
, int count
)
159 int size
= sizeof(struct nx_host_sds_ring
) * count
;
161 recv_ctx
->sds_rings
= kzalloc(size
, GFP_KERNEL
);
163 return (recv_ctx
->sds_rings
== NULL
);
167 netxen_free_sds_rings(struct netxen_recv_context
*recv_ctx
)
169 if (recv_ctx
->sds_rings
!= NULL
)
170 kfree(recv_ctx
->sds_rings
);
174 netxen_napi_add(struct netxen_adapter
*adapter
, struct net_device
*netdev
)
177 struct nx_host_sds_ring
*sds_ring
;
178 struct netxen_recv_context
*recv_ctx
= &adapter
->recv_ctx
;
180 if (adapter
->flags
& NETXEN_NIC_MSIX_ENABLED
)
181 adapter
->max_sds_rings
= (num_online_cpus() >= 4) ? 4 : 2;
183 adapter
->max_sds_rings
= 1;
185 if (netxen_alloc_sds_rings(recv_ctx
, adapter
->max_sds_rings
))
188 for (ring
= 0; ring
< adapter
->max_sds_rings
; ring
++) {
189 sds_ring
= &recv_ctx
->sds_rings
[ring
];
190 netif_napi_add(netdev
, &sds_ring
->napi
,
191 netxen_nic_poll
, NETXEN_NETDEV_WEIGHT
);
198 netxen_napi_enable(struct netxen_adapter
*adapter
)
201 struct nx_host_sds_ring
*sds_ring
;
202 struct netxen_recv_context
*recv_ctx
= &adapter
->recv_ctx
;
204 for (ring
= 0; ring
< adapter
->max_sds_rings
; ring
++) {
205 sds_ring
= &recv_ctx
->sds_rings
[ring
];
206 napi_enable(&sds_ring
->napi
);
207 netxen_nic_enable_int(sds_ring
);
212 netxen_napi_disable(struct netxen_adapter
*adapter
)
215 struct nx_host_sds_ring
*sds_ring
;
216 struct netxen_recv_context
*recv_ctx
= &adapter
->recv_ctx
;
218 for (ring
= 0; ring
< adapter
->max_sds_rings
; ring
++) {
219 sds_ring
= &recv_ctx
->sds_rings
[ring
];
220 netxen_nic_disable_int(sds_ring
);
221 napi_disable(&sds_ring
->napi
);
225 static int nx_set_dma_mask(struct netxen_adapter
*adapter
, uint8_t revision_id
)
227 struct pci_dev
*pdev
= adapter
->pdev
;
228 uint64_t mask
, cmask
;
230 adapter
->pci_using_dac
= 0;
232 mask
= DMA_BIT_MASK(32);
234 * Consistent DMA mask is set to 32 bit because it cannot be set to
235 * 35 bits. For P3 also leave it at 32 bits for now. Only the rings
236 * come off this pool.
238 cmask
= DMA_BIT_MASK(32);
241 if (revision_id
>= NX_P3_B0
)
242 mask
= DMA_BIT_MASK(39);
243 else if (revision_id
== NX_P2_C1
)
244 mask
= DMA_BIT_MASK(35);
246 if (pci_set_dma_mask(pdev
, mask
) == 0 &&
247 pci_set_consistent_dma_mask(pdev
, cmask
) == 0) {
248 adapter
->pci_using_dac
= 1;
255 /* Update addressable range if firmware supports it */
257 nx_update_dma_mask(struct netxen_adapter
*adapter
)
259 int change
, shift
, err
;
260 uint64_t mask
, old_mask
;
261 struct pci_dev
*pdev
= adapter
->pdev
;
265 shift
= netxen_nic_reg_read(adapter
, CRB_DMA_SHIFT
);
269 if (NX_IS_REVISION_P3(adapter
->ahw
.revision_id
) && (shift
> 9))
271 else if ((adapter
->ahw
.revision_id
== NX_P2_C1
) && (shift
<= 4))
275 old_mask
= pdev
->dma_mask
;
276 mask
= (1ULL<<(32+shift
)) - 1;
278 err
= pci_set_dma_mask(pdev
, mask
);
280 return pci_set_dma_mask(pdev
, old_mask
);
286 static void netxen_check_options(struct netxen_adapter
*adapter
)
288 if (adapter
->ahw
.port_type
== NETXEN_NIC_XGBE
)
289 adapter
->num_rxd
= MAX_RCV_DESCRIPTORS_10G
;
290 else if (adapter
->ahw
.port_type
== NETXEN_NIC_GBE
)
291 adapter
->num_rxd
= MAX_RCV_DESCRIPTORS_1G
;
293 if (NX_IS_REVISION_P3(adapter
->ahw
.revision_id
))
294 adapter
->msix_supported
= !!use_msi_x
;
296 adapter
->msix_supported
= 0;
298 adapter
->num_txd
= MAX_CMD_DESCRIPTORS_HOST
;
299 adapter
->num_jumbo_rxd
= MAX_JUMBO_RCV_DESCRIPTORS
;
300 adapter
->num_lro_rxd
= MAX_LRO_RCV_DESCRIPTORS
;
306 netxen_check_hw_init(struct netxen_adapter
*adapter
, int first_boot
)
310 if (first_boot
== 0x55555555) {
311 /* This is the first boot after power up */
312 adapter
->pci_write_normalize(adapter
,
313 NETXEN_CAM_RAM(0x1fc), NETXEN_BDINFO_MAGIC
);
315 if (!NX_IS_REVISION_P2(adapter
->ahw
.revision_id
))
318 /* PCI bus master workaround */
319 adapter
->hw_read_wx(adapter
,
320 NETXEN_PCIE_REG(0x4), &first_boot
, 4);
321 if (!(first_boot
& 0x4)) {
323 adapter
->hw_write_wx(adapter
,
324 NETXEN_PCIE_REG(0x4), &first_boot
, 4);
325 adapter
->hw_read_wx(adapter
,
326 NETXEN_PCIE_REG(0x4), &first_boot
, 4);
329 /* This is the first boot after power up */
330 adapter
->hw_read_wx(adapter
,
331 NETXEN_ROMUSB_GLB_SW_RESET
, &first_boot
, 4);
332 if (first_boot
!= 0x80000f) {
333 /* clear the register for future unloads/loads */
334 adapter
->pci_write_normalize(adapter
,
335 NETXEN_CAM_RAM(0x1fc), 0);
339 /* Start P2 boot loader */
340 val
= adapter
->pci_read_normalize(adapter
,
341 NETXEN_ROMUSB_GLB_PEGTUNE_DONE
);
342 adapter
->pci_write_normalize(adapter
,
343 NETXEN_ROMUSB_GLB_PEGTUNE_DONE
, val
| 0x1);
347 val
= adapter
->pci_read_normalize(adapter
,
348 NETXEN_CAM_RAM(0x1fc));
350 if (++timeout
> 5000)
353 } while (val
== NETXEN_BDINFO_MAGIC
);
358 static void netxen_set_port_mode(struct netxen_adapter
*adapter
)
362 val
= adapter
->ahw
.board_type
;
363 if ((val
== NETXEN_BRDTYPE_P3_HMEZ
) ||
364 (val
== NETXEN_BRDTYPE_P3_XG_LOM
)) {
365 if (port_mode
== NETXEN_PORT_MODE_802_3_AP
) {
366 data
= NETXEN_PORT_MODE_802_3_AP
;
367 adapter
->hw_write_wx(adapter
,
368 NETXEN_PORT_MODE_ADDR
, &data
, 4);
369 } else if (port_mode
== NETXEN_PORT_MODE_XG
) {
370 data
= NETXEN_PORT_MODE_XG
;
371 adapter
->hw_write_wx(adapter
,
372 NETXEN_PORT_MODE_ADDR
, &data
, 4);
373 } else if (port_mode
== NETXEN_PORT_MODE_AUTO_NEG_1G
) {
374 data
= NETXEN_PORT_MODE_AUTO_NEG_1G
;
375 adapter
->hw_write_wx(adapter
,
376 NETXEN_PORT_MODE_ADDR
, &data
, 4);
377 } else if (port_mode
== NETXEN_PORT_MODE_AUTO_NEG_XG
) {
378 data
= NETXEN_PORT_MODE_AUTO_NEG_XG
;
379 adapter
->hw_write_wx(adapter
,
380 NETXEN_PORT_MODE_ADDR
, &data
, 4);
382 data
= NETXEN_PORT_MODE_AUTO_NEG
;
383 adapter
->hw_write_wx(adapter
,
384 NETXEN_PORT_MODE_ADDR
, &data
, 4);
387 if ((wol_port_mode
!= NETXEN_PORT_MODE_802_3_AP
) &&
388 (wol_port_mode
!= NETXEN_PORT_MODE_XG
) &&
389 (wol_port_mode
!= NETXEN_PORT_MODE_AUTO_NEG_1G
) &&
390 (wol_port_mode
!= NETXEN_PORT_MODE_AUTO_NEG_XG
)) {
391 wol_port_mode
= NETXEN_PORT_MODE_AUTO_NEG
;
393 adapter
->hw_write_wx(adapter
, NETXEN_WOL_PORT_MODE
,
398 static void netxen_set_msix_bit(struct pci_dev
*pdev
, int enable
)
403 pos
= pci_find_capability(pdev
, PCI_CAP_ID_MSIX
);
405 pci_read_config_dword(pdev
, pos
, &control
);
407 control
|= PCI_MSIX_FLAGS_ENABLE
;
410 pci_write_config_dword(pdev
, pos
, control
);
414 static void netxen_init_msix_entries(struct netxen_adapter
*adapter
)
418 for (i
= 0; i
< MSIX_ENTRIES_PER_ADAPTER
; i
++)
419 adapter
->msix_entries
[i
].entry
= i
;
423 netxen_read_mac_addr(struct netxen_adapter
*adapter
)
428 struct net_device
*netdev
= adapter
->netdev
;
429 struct pci_dev
*pdev
= adapter
->pdev
;
431 if (NX_IS_REVISION_P3(adapter
->ahw
.revision_id
)) {
432 if (netxen_p3_get_mac_addr(adapter
, &mac_addr
) != 0)
435 if (netxen_get_flash_mac_addr(adapter
, &mac_addr
) != 0)
439 p
= (unsigned char *)&mac_addr
;
440 for (i
= 0; i
< 6; i
++)
441 netdev
->dev_addr
[i
] = *(p
+ 5 - i
);
443 memcpy(netdev
->perm_addr
, netdev
->dev_addr
, netdev
->addr_len
);
445 /* set station address */
447 if (!is_valid_ether_addr(netdev
->perm_addr
))
448 dev_warn(&pdev
->dev
, "Bad MAC address %pM.\n", netdev
->dev_addr
);
450 adapter
->macaddr_set(adapter
, netdev
->dev_addr
);
455 static void netxen_set_multicast_list(struct net_device
*dev
)
457 struct netxen_adapter
*adapter
= netdev_priv(dev
);
459 if (NX_IS_REVISION_P3(adapter
->ahw
.revision_id
))
460 netxen_p3_nic_set_multi(dev
);
462 netxen_p2_nic_set_multi(dev
);
465 static const struct net_device_ops netxen_netdev_ops
= {
466 .ndo_open
= netxen_nic_open
,
467 .ndo_stop
= netxen_nic_close
,
468 .ndo_start_xmit
= netxen_nic_xmit_frame
,
469 .ndo_get_stats
= netxen_nic_get_stats
,
470 .ndo_validate_addr
= eth_validate_addr
,
471 .ndo_set_multicast_list
= netxen_set_multicast_list
,
472 .ndo_set_mac_address
= netxen_nic_set_mac
,
473 .ndo_change_mtu
= netxen_nic_change_mtu
,
474 .ndo_tx_timeout
= netxen_tx_timeout
,
475 #ifdef CONFIG_NET_POLL_CONTROLLER
476 .ndo_poll_controller
= netxen_nic_poll_controller
,
481 netxen_setup_intr(struct netxen_adapter
*adapter
)
483 struct netxen_legacy_intr_set
*legacy_intrp
;
484 struct pci_dev
*pdev
= adapter
->pdev
;
486 adapter
->flags
&= ~(NETXEN_NIC_MSI_ENABLED
| NETXEN_NIC_MSIX_ENABLED
);
488 if (adapter
->ahw
.revision_id
>= NX_P3_B0
)
489 legacy_intrp
= &legacy_intr
[adapter
->ahw
.pci_func
];
491 legacy_intrp
= &legacy_intr
[0];
492 adapter
->legacy_intr
.int_vec_bit
= legacy_intrp
->int_vec_bit
;
493 adapter
->legacy_intr
.tgt_status_reg
= legacy_intrp
->tgt_status_reg
;
494 adapter
->legacy_intr
.tgt_mask_reg
= legacy_intrp
->tgt_mask_reg
;
495 adapter
->legacy_intr
.pci_int_reg
= legacy_intrp
->pci_int_reg
;
497 netxen_set_msix_bit(pdev
, 0);
499 if (adapter
->msix_supported
) {
501 netxen_init_msix_entries(adapter
);
502 if (pci_enable_msix(pdev
, adapter
->msix_entries
,
503 MSIX_ENTRIES_PER_ADAPTER
))
506 adapter
->flags
|= NETXEN_NIC_MSIX_ENABLED
;
507 netxen_set_msix_bit(pdev
, 1);
508 dev_info(&pdev
->dev
, "using msi-x interrupts\n");
512 if (use_msi
&& !pci_enable_msi(pdev
)) {
513 adapter
->flags
|= NETXEN_NIC_MSI_ENABLED
;
514 dev_info(&pdev
->dev
, "using msi interrupts\n");
516 dev_info(&pdev
->dev
, "using legacy interrupts\n");
517 adapter
->msix_entries
[0].vector
= pdev
->irq
;
522 netxen_teardown_intr(struct netxen_adapter
*adapter
)
524 if (adapter
->flags
& NETXEN_NIC_MSIX_ENABLED
)
525 pci_disable_msix(adapter
->pdev
);
526 if (adapter
->flags
& NETXEN_NIC_MSI_ENABLED
)
527 pci_disable_msi(adapter
->pdev
);
531 netxen_cleanup_pci_map(struct netxen_adapter
*adapter
)
533 if (adapter
->ahw
.db_base
!= NULL
)
534 iounmap(adapter
->ahw
.db_base
);
535 if (adapter
->ahw
.pci_base0
!= NULL
)
536 iounmap(adapter
->ahw
.pci_base0
);
537 if (adapter
->ahw
.pci_base1
!= NULL
)
538 iounmap(adapter
->ahw
.pci_base1
);
539 if (adapter
->ahw
.pci_base2
!= NULL
)
540 iounmap(adapter
->ahw
.pci_base2
);
544 netxen_setup_pci_map(struct netxen_adapter
*adapter
)
546 void __iomem
*mem_ptr0
= NULL
;
547 void __iomem
*mem_ptr1
= NULL
;
548 void __iomem
*mem_ptr2
= NULL
;
549 void __iomem
*db_ptr
= NULL
;
551 unsigned long mem_base
, mem_len
, db_base
, db_len
= 0, pci_len0
= 0;
553 struct pci_dev
*pdev
= adapter
->pdev
;
554 int pci_func
= adapter
->ahw
.pci_func
;
559 * Set the CRB window to invalid. If any register in window 0 is
560 * accessed it should set the window to 0 and then reset it to 1.
562 adapter
->curr_window
= 255;
563 adapter
->ahw
.qdr_sn_window
= -1;
564 adapter
->ahw
.ddr_mn_window
= -1;
566 /* remap phys address */
567 mem_base
= pci_resource_start(pdev
, 0); /* 0 is for BAR 0 */
568 mem_len
= pci_resource_len(pdev
, 0);
571 adapter
->hw_write_wx
= netxen_nic_hw_write_wx_128M
;
572 adapter
->hw_read_wx
= netxen_nic_hw_read_wx_128M
;
573 adapter
->pci_read_immediate
= netxen_nic_pci_read_immediate_128M
;
574 adapter
->pci_write_immediate
= netxen_nic_pci_write_immediate_128M
;
575 adapter
->pci_read_normalize
= netxen_nic_pci_read_normalize_128M
;
576 adapter
->pci_write_normalize
= netxen_nic_pci_write_normalize_128M
;
577 adapter
->pci_set_window
= netxen_nic_pci_set_window_128M
;
578 adapter
->pci_mem_read
= netxen_nic_pci_mem_read_128M
;
579 adapter
->pci_mem_write
= netxen_nic_pci_mem_write_128M
;
581 /* 128 Meg of memory */
582 if (mem_len
== NETXEN_PCI_128MB_SIZE
) {
583 mem_ptr0
= ioremap(mem_base
, FIRST_PAGE_GROUP_SIZE
);
584 mem_ptr1
= ioremap(mem_base
+ SECOND_PAGE_GROUP_START
,
585 SECOND_PAGE_GROUP_SIZE
);
586 mem_ptr2
= ioremap(mem_base
+ THIRD_PAGE_GROUP_START
,
587 THIRD_PAGE_GROUP_SIZE
);
588 } else if (mem_len
== NETXEN_PCI_32MB_SIZE
) {
589 mem_ptr1
= ioremap(mem_base
, SECOND_PAGE_GROUP_SIZE
);
590 mem_ptr2
= ioremap(mem_base
+ THIRD_PAGE_GROUP_START
-
591 SECOND_PAGE_GROUP_START
, THIRD_PAGE_GROUP_SIZE
);
592 } else if (mem_len
== NETXEN_PCI_2MB_SIZE
) {
593 adapter
->hw_write_wx
= netxen_nic_hw_write_wx_2M
;
594 adapter
->hw_read_wx
= netxen_nic_hw_read_wx_2M
;
595 adapter
->pci_read_immediate
= netxen_nic_pci_read_immediate_2M
;
596 adapter
->pci_write_immediate
=
597 netxen_nic_pci_write_immediate_2M
;
598 adapter
->pci_read_normalize
= netxen_nic_pci_read_normalize_2M
;
599 adapter
->pci_write_normalize
=
600 netxen_nic_pci_write_normalize_2M
;
601 adapter
->pci_set_window
= netxen_nic_pci_set_window_2M
;
602 adapter
->pci_mem_read
= netxen_nic_pci_mem_read_2M
;
603 adapter
->pci_mem_write
= netxen_nic_pci_mem_write_2M
;
605 mem_ptr0
= pci_ioremap_bar(pdev
, 0);
606 if (mem_ptr0
== NULL
) {
607 dev_err(&pdev
->dev
, "failed to map PCI bar 0\n");
612 adapter
->ahw
.ddr_mn_window
= 0;
613 adapter
->ahw
.qdr_sn_window
= 0;
615 adapter
->ahw
.mn_win_crb
= 0x100000 + PCIX_MN_WINDOW
+
617 adapter
->ahw
.ms_win_crb
= 0x100000 + PCIX_SN_WINDOW
;
619 adapter
->ahw
.ms_win_crb
+= (pci_func
* 0x20);
621 adapter
->ahw
.ms_win_crb
+=
622 0xA0 + ((pci_func
- 4) * 0x10);
627 dev_info(&pdev
->dev
, "%dMB memory map\n", (int)(mem_len
>>20));
629 adapter
->ahw
.pci_base0
= mem_ptr0
;
630 adapter
->ahw
.pci_len0
= pci_len0
;
631 adapter
->ahw
.pci_base1
= mem_ptr1
;
632 adapter
->ahw
.pci_base2
= mem_ptr2
;
634 if (NX_IS_REVISION_P3(adapter
->ahw
.revision_id
))
637 db_base
= pci_resource_start(pdev
, 4); /* doorbell is on bar 4 */
638 db_len
= pci_resource_len(pdev
, 4);
641 printk(KERN_ERR
"%s: doorbell is disabled\n",
642 netxen_nic_driver_name
);
647 db_ptr
= ioremap(db_base
, NETXEN_DB_MAPSIZE_BYTES
);
649 printk(KERN_ERR
"%s: Failed to allocate doorbell map.",
650 netxen_nic_driver_name
);
656 adapter
->ahw
.db_base
= db_ptr
;
657 adapter
->ahw
.db_len
= db_len
;
661 netxen_cleanup_pci_map(adapter
);
666 netxen_start_firmware(struct netxen_adapter
*adapter
)
668 int val
, err
, first_boot
;
669 struct pci_dev
*pdev
= adapter
->pdev
;
671 int first_driver
= 0;
672 if (NX_IS_REVISION_P3(adapter
->ahw
.revision_id
)) {
673 if (adapter
->ahw
.pci_func
== 0)
676 if (adapter
->portnum
== 0)
683 first_boot
= adapter
->pci_read_normalize(adapter
,
684 NETXEN_CAM_RAM(0x1fc));
686 err
= netxen_check_hw_init(adapter
, first_boot
);
688 dev_err(&pdev
->dev
, "error in init HW init sequence\n");
692 if (first_boot
!= 0x55555555) {
693 adapter
->pci_write_normalize(adapter
,
694 CRB_CMDPEG_STATE
, 0);
695 netxen_pinit_from_rom(adapter
, 0);
699 netxen_nic_reg_write(adapter
, CRB_DMA_SHIFT
, 0x55555555);
700 if (NX_IS_REVISION_P3(adapter
->ahw
.revision_id
))
701 netxen_set_port_mode(adapter
);
703 netxen_load_firmware(adapter
);
705 if (NX_IS_REVISION_P2(adapter
->ahw
.revision_id
)) {
707 /* Initialize multicast addr pool owners */
709 if (adapter
->ahw
.port_type
== NETXEN_NIC_XGBE
)
711 netxen_crb_writelit_adapter(adapter
,
712 NETXEN_MAC_ADDR_CNTL_REG
, val
);
716 err
= netxen_initialize_adapter_offload(adapter
);
721 * Tell the hardware our version number.
723 val
= (_NETXEN_NIC_LINUX_MAJOR
<< 16)
724 | ((_NETXEN_NIC_LINUX_MINOR
<< 8))
725 | (_NETXEN_NIC_LINUX_SUBVERSION
);
726 adapter
->pci_write_normalize(adapter
, CRB_DRIVER_VERSION
, val
);
728 /* Handshake with the card before we register the devices. */
729 err
= netxen_phantom_init(adapter
, NETXEN_NIC_PEG_TUNE
);
731 netxen_free_adapter_offload(adapter
);
739 netxen_nic_request_irq(struct netxen_adapter
*adapter
)
741 irq_handler_t handler
;
742 struct nx_host_sds_ring
*sds_ring
;
745 unsigned long flags
= IRQF_SAMPLE_RANDOM
;
746 struct net_device
*netdev
= adapter
->netdev
;
747 struct netxen_recv_context
*recv_ctx
= &adapter
->recv_ctx
;
749 if (adapter
->flags
& NETXEN_NIC_MSIX_ENABLED
)
750 handler
= netxen_msix_intr
;
751 else if (adapter
->flags
& NETXEN_NIC_MSI_ENABLED
)
752 handler
= netxen_msi_intr
;
754 flags
|= IRQF_SHARED
;
755 handler
= netxen_intr
;
757 adapter
->irq
= netdev
->irq
;
759 for (ring
= 0; ring
< adapter
->max_sds_rings
; ring
++) {
760 sds_ring
= &recv_ctx
->sds_rings
[ring
];
761 sprintf(sds_ring
->name
, "%16s[%d]", netdev
->name
, ring
);
762 err
= request_irq(sds_ring
->irq
, handler
,
763 flags
, sds_ring
->name
, sds_ring
);
772 netxen_nic_free_irq(struct netxen_adapter
*adapter
)
775 struct nx_host_sds_ring
*sds_ring
;
777 struct netxen_recv_context
*recv_ctx
= &adapter
->recv_ctx
;
779 for (ring
= 0; ring
< adapter
->max_sds_rings
; ring
++) {
780 sds_ring
= &recv_ctx
->sds_rings
[ring
];
781 free_irq(sds_ring
->irq
, sds_ring
);
786 netxen_nic_up(struct netxen_adapter
*adapter
, struct net_device
*netdev
)
790 err
= adapter
->init_port(adapter
, adapter
->physical_port
);
792 printk(KERN_ERR
"%s: Failed to initialize port %d\n",
793 netxen_nic_driver_name
, adapter
->portnum
);
796 adapter
->macaddr_set(adapter
, netdev
->dev_addr
);
798 netxen_nic_set_link_parameters(adapter
);
800 netxen_set_multicast_list(netdev
);
801 if (adapter
->set_mtu
)
802 adapter
->set_mtu(adapter
, netdev
->mtu
);
804 adapter
->ahw
.linkup
= 0;
805 mod_timer(&adapter
->watchdog_timer
, jiffies
);
807 netxen_napi_enable(adapter
);
809 if (adapter
->max_sds_rings
> 1)
810 netxen_config_rss(adapter
, 1);
812 if (NX_IS_REVISION_P3(adapter
->ahw
.revision_id
))
813 netxen_linkevent_request(adapter
, 1);
819 netxen_nic_down(struct netxen_adapter
*adapter
, struct net_device
*netdev
)
821 netif_carrier_off(netdev
);
822 netif_stop_queue(netdev
);
823 netxen_napi_disable(adapter
);
825 if (adapter
->stop_port
)
826 adapter
->stop_port(adapter
);
828 netxen_release_tx_buffers(adapter
);
830 FLUSH_SCHEDULED_WORK();
831 del_timer_sync(&adapter
->watchdog_timer
);
836 netxen_nic_attach(struct netxen_adapter
*adapter
)
838 struct net_device
*netdev
= adapter
->netdev
;
839 struct pci_dev
*pdev
= adapter
->pdev
;
841 struct nx_host_rds_ring
*rds_ring
;
842 struct nx_host_tx_ring
*tx_ring
;
844 err
= netxen_init_firmware(adapter
);
846 printk(KERN_ERR
"Failed to init firmware\n");
850 if (adapter
->fw_major
< 4)
851 adapter
->max_rds_rings
= 3;
853 adapter
->max_rds_rings
= 2;
855 err
= netxen_alloc_sw_resources(adapter
);
857 printk(KERN_ERR
"%s: Error in setting sw resources\n",
862 netxen_nic_clear_stats(adapter
);
864 err
= netxen_alloc_hw_resources(adapter
);
866 printk(KERN_ERR
"%s: Error in setting hw resources\n",
868 goto err_out_free_sw
;
871 if (adapter
->fw_major
< 4) {
872 tx_ring
= &adapter
->tx_ring
;
873 tx_ring
->crb_cmd_producer
= crb_cmd_producer
[adapter
->portnum
];
874 tx_ring
->crb_cmd_consumer
= crb_cmd_consumer
[adapter
->portnum
];
876 netxen_nic_update_cmd_producer(adapter
, tx_ring
, 0);
877 netxen_nic_update_cmd_consumer(adapter
, tx_ring
, 0);
880 for (ring
= 0; ring
< adapter
->max_rds_rings
; ring
++) {
881 rds_ring
= &adapter
->recv_ctx
.rds_rings
[ring
];
882 netxen_post_rx_buffers(adapter
, ring
, rds_ring
);
885 err
= netxen_nic_request_irq(adapter
);
887 dev_err(&pdev
->dev
, "%s: failed to setup interrupt\n",
889 goto err_out_free_rxbuf
;
892 adapter
->is_up
= NETXEN_ADAPTER_UP_MAGIC
;
896 netxen_release_rx_buffers(adapter
);
897 netxen_free_hw_resources(adapter
);
899 netxen_free_sw_resources(adapter
);
904 netxen_nic_detach(struct netxen_adapter
*adapter
)
906 netxen_nic_free_irq(adapter
);
908 netxen_release_rx_buffers(adapter
);
909 netxen_free_hw_resources(adapter
);
910 netxen_free_sw_resources(adapter
);
916 netxen_nic_probe(struct pci_dev
*pdev
, const struct pci_device_id
*ent
)
918 struct net_device
*netdev
= NULL
;
919 struct netxen_adapter
*adapter
= NULL
;
921 int pci_func_id
= PCI_FUNC(pdev
->devfn
);
924 if (pdev
->class != 0x020000) {
925 printk(KERN_DEBUG
"NetXen function %d, class %x will not "
926 "be enabled.\n",pci_func_id
, pdev
->class);
930 if (pdev
->revision
>= NX_P3_A0
&& pdev
->revision
< NX_P3_B1
) {
931 printk(KERN_WARNING
"NetXen chip revisions between 0x%x-0x%x"
932 "will not be enabled.\n",
937 if ((err
= pci_enable_device(pdev
)))
940 if (!(pci_resource_flags(pdev
, 0) & IORESOURCE_MEM
)) {
942 goto err_out_disable_pdev
;
945 if ((err
= pci_request_regions(pdev
, netxen_nic_driver_name
)))
946 goto err_out_disable_pdev
;
948 pci_set_master(pdev
);
950 netdev
= alloc_etherdev(sizeof(struct netxen_adapter
));
952 printk(KERN_ERR
"%s: Failed to allocate memory for the "
953 "device block.Check system memory resource"
954 " usage.\n", netxen_nic_driver_name
);
955 goto err_out_free_res
;
958 SET_NETDEV_DEV(netdev
, &pdev
->dev
);
960 adapter
= netdev_priv(netdev
);
961 adapter
->netdev
= netdev
;
962 adapter
->pdev
= pdev
;
963 adapter
->ahw
.pci_func
= pci_func_id
;
965 revision_id
= pdev
->revision
;
966 adapter
->ahw
.revision_id
= revision_id
;
968 err
= nx_set_dma_mask(adapter
, revision_id
);
970 goto err_out_free_netdev
;
972 rwlock_init(&adapter
->adapter_lock
);
973 spin_lock_init(&adapter
->tx_clean_lock
);
975 err
= netxen_setup_pci_map(adapter
);
977 goto err_out_free_netdev
;
979 /* This will be reset for mezz cards */
980 adapter
->portnum
= pci_func_id
;
981 adapter
->rx_csum
= 1;
982 adapter
->mc_enabled
= 0;
983 if (NX_IS_REVISION_P3(revision_id
))
984 adapter
->max_mc_count
= 38;
986 adapter
->max_mc_count
= 16;
988 netdev
->netdev_ops
= &netxen_netdev_ops
;
989 netdev
->watchdog_timeo
= 2*HZ
;
991 netxen_nic_change_mtu(netdev
, netdev
->mtu
);
993 SET_ETHTOOL_OPS(netdev
, &netxen_nic_ethtool_ops
);
995 netdev
->features
|= (NETIF_F_SG
| NETIF_F_IP_CSUM
| NETIF_F_TSO
);
996 netdev
->vlan_features
|= (NETIF_F_SG
| NETIF_F_IP_CSUM
| NETIF_F_TSO
);
998 if (NX_IS_REVISION_P3(revision_id
)) {
999 netdev
->features
|= (NETIF_F_IPV6_CSUM
| NETIF_F_TSO6
);
1000 netdev
->vlan_features
|= (NETIF_F_IPV6_CSUM
| NETIF_F_TSO6
);
1003 if (adapter
->pci_using_dac
) {
1004 netdev
->features
|= NETIF_F_HIGHDMA
;
1005 netdev
->vlan_features
|= NETIF_F_HIGHDMA
;
1008 if (netxen_nic_get_board_info(adapter
) != 0) {
1009 printk("%s: Error getting board config info.\n",
1010 netxen_nic_driver_name
);
1012 goto err_out_iounmap
;
1015 netxen_initialize_adapter_ops(adapter
);
1017 /* Mezz cards have PCI function 0,2,3 enabled */
1018 switch (adapter
->ahw
.board_type
) {
1019 case NETXEN_BRDTYPE_P2_SB31_10G_IMEZ
:
1020 case NETXEN_BRDTYPE_P2_SB31_10G_HMEZ
:
1021 if (pci_func_id
>= 2)
1022 adapter
->portnum
= pci_func_id
- 2;
1028 err
= netxen_start_firmware(adapter
);
1030 goto err_out_iounmap
;
1032 nx_update_dma_mask(adapter
);
1034 netxen_nic_get_firmware_info(adapter
);
1037 * See if the firmware gave us a virtual-physical port mapping.
1039 adapter
->physical_port
= adapter
->portnum
;
1040 if (adapter
->fw_major
< 4) {
1041 i
= adapter
->pci_read_normalize(adapter
,
1042 CRB_V2P(adapter
->portnum
));
1043 if (i
!= 0x55555555)
1044 adapter
->physical_port
= i
;
1047 netxen_check_options(adapter
);
1049 netxen_setup_intr(adapter
);
1051 netdev
->irq
= adapter
->msix_entries
[0].vector
;
1053 if (netxen_napi_add(adapter
, netdev
))
1054 goto err_out_disable_msi
;
1056 init_timer(&adapter
->watchdog_timer
);
1057 adapter
->watchdog_timer
.function
= &netxen_watchdog
;
1058 adapter
->watchdog_timer
.data
= (unsigned long)adapter
;
1059 INIT_WORK(&adapter
->watchdog_task
, netxen_watchdog_task
);
1060 INIT_WORK(&adapter
->tx_timeout_task
, netxen_tx_timeout_task
);
1062 err
= netxen_read_mac_addr(adapter
);
1064 dev_warn(&pdev
->dev
, "failed to read mac addr\n");
1066 netif_carrier_off(netdev
);
1067 netif_stop_queue(netdev
);
1069 if ((err
= register_netdev(netdev
))) {
1070 printk(KERN_ERR
"%s: register_netdev failed port #%d"
1071 " aborting\n", netxen_nic_driver_name
,
1074 goto err_out_disable_msi
;
1077 pci_set_drvdata(pdev
, adapter
);
1079 switch (adapter
->ahw
.port_type
) {
1080 case NETXEN_NIC_GBE
:
1081 dev_info(&adapter
->pdev
->dev
, "%s: GbE port initialized\n",
1082 adapter
->netdev
->name
);
1084 case NETXEN_NIC_XGBE
:
1085 dev_info(&adapter
->pdev
->dev
, "%s: XGbE port initialized\n",
1086 adapter
->netdev
->name
);
1092 err_out_disable_msi
:
1093 netxen_teardown_intr(adapter
);
1095 netxen_free_adapter_offload(adapter
);
1098 netxen_cleanup_pci_map(adapter
);
1100 err_out_free_netdev
:
1101 free_netdev(netdev
);
1104 pci_release_regions(pdev
);
1106 err_out_disable_pdev
:
1107 pci_set_drvdata(pdev
, NULL
);
1108 pci_disable_device(pdev
);
1112 static void __devexit
netxen_nic_remove(struct pci_dev
*pdev
)
1114 struct netxen_adapter
*adapter
;
1115 struct net_device
*netdev
;
1117 adapter
= pci_get_drvdata(pdev
);
1118 if (adapter
== NULL
)
1121 netdev
= adapter
->netdev
;
1123 unregister_netdev(netdev
);
1125 if (adapter
->is_up
== NETXEN_ADAPTER_UP_MAGIC
) {
1126 netxen_nic_detach(adapter
);
1128 if (NX_IS_REVISION_P3(adapter
->ahw
.revision_id
))
1129 netxen_p3_free_mac_list(adapter
);
1132 if (adapter
->portnum
== 0)
1133 netxen_free_adapter_offload(adapter
);
1135 netxen_teardown_intr(adapter
);
1136 netxen_free_sds_rings(&adapter
->recv_ctx
);
1138 netxen_cleanup_pci_map(adapter
);
1140 pci_release_regions(pdev
);
1141 pci_disable_device(pdev
);
1142 pci_set_drvdata(pdev
, NULL
);
1144 free_netdev(netdev
);
1148 netxen_nic_suspend(struct pci_dev
*pdev
, pm_message_t state
)
1151 struct netxen_adapter
*adapter
= pci_get_drvdata(pdev
);
1152 struct net_device
*netdev
= adapter
->netdev
;
1154 netif_device_detach(netdev
);
1156 if (netif_running(netdev
))
1157 netxen_nic_down(adapter
, netdev
);
1159 if (adapter
->is_up
== NETXEN_ADAPTER_UP_MAGIC
)
1160 netxen_nic_detach(adapter
);
1162 pci_save_state(pdev
);
1164 if (netxen_nic_wol_supported(adapter
)) {
1165 pci_enable_wake(pdev
, PCI_D3cold
, 1);
1166 pci_enable_wake(pdev
, PCI_D3hot
, 1);
1169 pci_disable_device(pdev
);
1170 pci_set_power_state(pdev
, pci_choose_state(pdev
, state
));
1176 netxen_nic_resume(struct pci_dev
*pdev
)
1178 struct netxen_adapter
*adapter
= pci_get_drvdata(pdev
);
1179 struct net_device
*netdev
= adapter
->netdev
;
1182 pci_set_power_state(pdev
, PCI_D0
);
1183 pci_restore_state(pdev
);
1185 err
= pci_enable_device(pdev
);
1189 adapter
->curr_window
= 255;
1191 err
= netxen_start_firmware(adapter
);
1193 dev_err(&pdev
->dev
, "failed to start firmware\n");
1197 if (netif_running(netdev
)) {
1198 err
= netxen_nic_attach(adapter
);
1202 err
= netxen_nic_up(adapter
, netdev
);
1206 netif_device_attach(netdev
);
1212 static int netxen_nic_open(struct net_device
*netdev
)
1214 struct netxen_adapter
*adapter
= netdev_priv(netdev
);
1217 if (adapter
->driver_mismatch
)
1220 if (adapter
->is_up
!= NETXEN_ADAPTER_UP_MAGIC
) {
1221 err
= netxen_nic_attach(adapter
);
1226 err
= netxen_nic_up(adapter
, netdev
);
1230 netif_start_queue(netdev
);
1235 netxen_nic_detach(adapter
);
1240 * netxen_nic_close - Disables a network interface entry point
1242 static int netxen_nic_close(struct net_device
*netdev
)
1244 struct netxen_adapter
*adapter
= netdev_priv(netdev
);
1246 netxen_nic_down(adapter
, netdev
);
1250 static bool netxen_tso_check(struct net_device
*netdev
,
1251 struct cmd_desc_type0
*desc
, struct sk_buff
*skb
)
1254 u8 opcode
= TX_ETHER_PKT
;
1255 __be16 protocol
= skb
->protocol
;
1258 if (protocol
== cpu_to_be16(ETH_P_8021Q
)) {
1259 struct vlan_ethhdr
*vh
= (struct vlan_ethhdr
*)skb
->data
;
1260 protocol
= vh
->h_vlan_encapsulated_proto
;
1261 flags
= FLAGS_VLAN_TAGGED
;
1264 if ((netdev
->features
& (NETIF_F_TSO
| NETIF_F_TSO6
)) &&
1265 skb_shinfo(skb
)->gso_size
> 0) {
1267 desc
->mss
= cpu_to_le16(skb_shinfo(skb
)->gso_size
);
1268 desc
->total_hdr_length
=
1269 skb_transport_offset(skb
) + tcp_hdrlen(skb
);
1271 opcode
= (protocol
== cpu_to_be16(ETH_P_IPV6
)) ?
1272 TX_TCP_LSO6
: TX_TCP_LSO
;
1275 } else if (skb
->ip_summed
== CHECKSUM_PARTIAL
) {
1278 if (protocol
== cpu_to_be16(ETH_P_IP
)) {
1279 l4proto
= ip_hdr(skb
)->protocol
;
1281 if (l4proto
== IPPROTO_TCP
)
1282 opcode
= TX_TCP_PKT
;
1283 else if(l4proto
== IPPROTO_UDP
)
1284 opcode
= TX_UDP_PKT
;
1285 } else if (protocol
== cpu_to_be16(ETH_P_IPV6
)) {
1286 l4proto
= ipv6_hdr(skb
)->nexthdr
;
1288 if (l4proto
== IPPROTO_TCP
)
1289 opcode
= TX_TCPV6_PKT
;
1290 else if(l4proto
== IPPROTO_UDP
)
1291 opcode
= TX_UDPV6_PKT
;
1294 desc
->tcp_hdr_offset
= skb_transport_offset(skb
);
1295 desc
->ip_hdr_offset
= skb_network_offset(skb
);
1296 netxen_set_tx_flags_opcode(desc
, flags
, opcode
);
1301 netxen_clean_tx_dma_mapping(struct pci_dev
*pdev
,
1302 struct netxen_cmd_buffer
*pbuf
, int last
)
1305 struct netxen_skb_frag
*buffrag
;
1307 buffrag
= &pbuf
->frag_array
[0];
1308 pci_unmap_single(pdev
, buffrag
->dma
,
1309 buffrag
->length
, PCI_DMA_TODEVICE
);
1311 for (k
= 1; k
< last
; k
++) {
1312 buffrag
= &pbuf
->frag_array
[k
];
1313 pci_unmap_page(pdev
, buffrag
->dma
,
1314 buffrag
->length
, PCI_DMA_TODEVICE
);
1319 netxen_clear_cmddesc(u64
*desc
)
1322 for (i
= 0; i
< 8; i
++)
1327 netxen_nic_xmit_frame(struct sk_buff
*skb
, struct net_device
*netdev
)
1329 struct netxen_adapter
*adapter
= netdev_priv(netdev
);
1330 struct nx_host_tx_ring
*tx_ring
= &adapter
->tx_ring
;
1331 unsigned int first_seg_len
= skb
->len
- skb
->data_len
;
1332 struct netxen_cmd_buffer
*pbuf
;
1333 struct netxen_skb_frag
*buffrag
;
1334 struct cmd_desc_type0
*hwdesc
;
1335 struct pci_dev
*pdev
= adapter
->pdev
;
1336 dma_addr_t temp_dma
;
1339 u32 producer
, consumer
;
1340 int frag_count
, no_of_desc
;
1341 u32 num_txd
= tx_ring
->num_desc
;
1342 bool is_tso
= false;
1344 frag_count
= skb_shinfo(skb
)->nr_frags
+ 1;
1346 /* 4 fragments per cmd des */
1347 no_of_desc
= (frag_count
+ 3) >> 2;
1349 producer
= tx_ring
->producer
;
1351 consumer
= tx_ring
->sw_consumer
;
1352 if ((no_of_desc
+2) > find_diff_among(producer
, consumer
, num_txd
)) {
1353 netif_stop_queue(netdev
);
1355 return NETDEV_TX_BUSY
;
1358 hwdesc
= &tx_ring
->desc_head
[producer
];
1359 netxen_clear_cmddesc((u64
*)hwdesc
);
1360 pbuf
= &tx_ring
->cmd_buf_arr
[producer
];
1362 is_tso
= netxen_tso_check(netdev
, hwdesc
, skb
);
1365 pbuf
->frag_count
= frag_count
;
1366 buffrag
= &pbuf
->frag_array
[0];
1367 temp_dma
= pci_map_single(pdev
, skb
->data
, first_seg_len
,
1369 if (pci_dma_mapping_error(pdev
, temp_dma
))
1372 buffrag
->dma
= temp_dma
;
1373 buffrag
->length
= first_seg_len
;
1374 netxen_set_tx_frags_len(hwdesc
, frag_count
, skb
->len
);
1375 netxen_set_tx_port(hwdesc
, adapter
->portnum
);
1377 hwdesc
->buffer_length
[0] = cpu_to_le16(first_seg_len
);
1378 hwdesc
->addr_buffer1
= cpu_to_le64(buffrag
->dma
);
1380 for (i
= 1, k
= 1; i
< frag_count
; i
++, k
++) {
1381 struct skb_frag_struct
*frag
;
1383 unsigned long offset
;
1385 /* move to next desc. if there is a need */
1386 if ((i
& 0x3) == 0) {
1388 producer
= get_next_index(producer
, num_txd
);
1389 hwdesc
= &tx_ring
->desc_head
[producer
];
1390 netxen_clear_cmddesc((u64
*)hwdesc
);
1391 pbuf
= &tx_ring
->cmd_buf_arr
[producer
];
1394 frag
= &skb_shinfo(skb
)->frags
[i
- 1];
1396 offset
= frag
->page_offset
;
1399 temp_dma
= pci_map_page(pdev
, frag
->page
, offset
,
1400 len
, PCI_DMA_TODEVICE
);
1401 if (pci_dma_mapping_error(pdev
, temp_dma
)) {
1402 netxen_clean_tx_dma_mapping(pdev
, pbuf
, i
);
1407 buffrag
->dma
= temp_dma
;
1408 buffrag
->length
= temp_len
;
1410 hwdesc
->buffer_length
[k
] = cpu_to_le16(temp_len
);
1413 hwdesc
->addr_buffer1
= cpu_to_le64(temp_dma
);
1416 hwdesc
->addr_buffer2
= cpu_to_le64(temp_dma
);
1419 hwdesc
->addr_buffer3
= cpu_to_le64(temp_dma
);
1422 hwdesc
->addr_buffer4
= cpu_to_le64(temp_dma
);
1427 producer
= get_next_index(producer
, num_txd
);
1429 /* For LSO, we need to copy the MAC/IP/TCP headers into
1430 * the descriptor ring
1433 int hdr_len
, first_hdr_len
, more_hdr
;
1434 hdr_len
= skb_transport_offset(skb
) + tcp_hdrlen(skb
);
1435 if (hdr_len
> (sizeof(struct cmd_desc_type0
) - 2)) {
1436 first_hdr_len
= sizeof(struct cmd_desc_type0
) - 2;
1439 first_hdr_len
= hdr_len
;
1442 /* copy the MAC/IP/TCP headers to the cmd descriptor list */
1443 hwdesc
= &tx_ring
->desc_head
[producer
];
1444 pbuf
= &tx_ring
->cmd_buf_arr
[producer
];
1447 /* copy the first 64 bytes */
1448 memcpy(((void *)hwdesc
) + 2,
1449 (void *)(skb
->data
), first_hdr_len
);
1450 producer
= get_next_index(producer
, num_txd
);
1453 hwdesc
= &tx_ring
->desc_head
[producer
];
1454 pbuf
= &tx_ring
->cmd_buf_arr
[producer
];
1456 /* copy the next 64 bytes - should be enough except
1457 * for pathological case
1459 skb_copy_from_linear_data_offset(skb
, first_hdr_len
,
1463 producer
= get_next_index(producer
, num_txd
);
1467 tx_ring
->producer
= producer
;
1468 adapter
->stats
.txbytes
+= skb
->len
;
1470 netxen_nic_update_cmd_producer(adapter
, tx_ring
, producer
);
1472 adapter
->stats
.xmitcalled
++;
1473 netdev
->trans_start
= jiffies
;
1475 return NETDEV_TX_OK
;
1478 adapter
->stats
.txdropped
++;
1479 dev_kfree_skb_any(skb
);
1480 return NETDEV_TX_OK
;
1483 static int netxen_nic_check_temp(struct netxen_adapter
*adapter
)
1485 struct net_device
*netdev
= adapter
->netdev
;
1486 uint32_t temp
, temp_state
, temp_val
;
1489 temp
= adapter
->pci_read_normalize(adapter
, CRB_TEMP_STATE
);
1491 temp_state
= nx_get_temp_state(temp
);
1492 temp_val
= nx_get_temp_val(temp
);
1494 if (temp_state
== NX_TEMP_PANIC
) {
1496 "%s: Device temperature %d degrees C exceeds"
1497 " maximum allowed. Hardware has been shut down.\n",
1498 netxen_nic_driver_name
, temp_val
);
1500 netif_carrier_off(netdev
);
1501 netif_stop_queue(netdev
);
1503 } else if (temp_state
== NX_TEMP_WARN
) {
1504 if (adapter
->temp
== NX_TEMP_NORMAL
) {
1506 "%s: Device temperature %d degrees C "
1507 "exceeds operating range."
1508 " Immediate action needed.\n",
1509 netxen_nic_driver_name
, temp_val
);
1512 if (adapter
->temp
== NX_TEMP_WARN
) {
1514 "%s: Device temperature is now %d degrees C"
1515 " in normal range.\n", netxen_nic_driver_name
,
1519 adapter
->temp
= temp_state
;
1523 void netxen_advert_link_change(struct netxen_adapter
*adapter
, int linkup
)
1525 struct net_device
*netdev
= adapter
->netdev
;
1527 if (adapter
->ahw
.linkup
&& !linkup
) {
1528 printk(KERN_INFO
"%s: %s NIC Link is down\n",
1529 netxen_nic_driver_name
, netdev
->name
);
1530 adapter
->ahw
.linkup
= 0;
1531 if (netif_running(netdev
)) {
1532 netif_carrier_off(netdev
);
1533 netif_stop_queue(netdev
);
1536 if (!adapter
->has_link_events
)
1537 netxen_nic_set_link_parameters(adapter
);
1539 } else if (!adapter
->ahw
.linkup
&& linkup
) {
1540 printk(KERN_INFO
"%s: %s NIC Link is up\n",
1541 netxen_nic_driver_name
, netdev
->name
);
1542 adapter
->ahw
.linkup
= 1;
1543 if (netif_running(netdev
)) {
1544 netif_carrier_on(netdev
);
1545 netif_wake_queue(netdev
);
1548 if (!adapter
->has_link_events
)
1549 netxen_nic_set_link_parameters(adapter
);
1553 static void netxen_nic_handle_phy_intr(struct netxen_adapter
*adapter
)
1555 u32 val
, port
, linkup
;
1557 port
= adapter
->physical_port
;
1559 if (NX_IS_REVISION_P3(adapter
->ahw
.revision_id
)) {
1560 val
= adapter
->pci_read_normalize(adapter
, CRB_XG_STATE_P3
);
1561 val
= XG_LINK_STATE_P3(adapter
->ahw
.pci_func
, val
);
1562 linkup
= (val
== XG_LINK_UP_P3
);
1564 val
= adapter
->pci_read_normalize(adapter
, CRB_XG_STATE
);
1565 if (adapter
->ahw
.port_type
== NETXEN_NIC_GBE
)
1566 linkup
= (val
>> port
) & 1;
1568 val
= (val
>> port
*8) & 0xff;
1569 linkup
= (val
== XG_LINK_UP
);
1573 netxen_advert_link_change(adapter
, linkup
);
1576 static void netxen_watchdog(unsigned long v
)
1578 struct netxen_adapter
*adapter
= (struct netxen_adapter
*)v
;
1580 SCHEDULE_WORK(&adapter
->watchdog_task
);
1583 void netxen_watchdog_task(struct work_struct
*work
)
1585 struct netxen_adapter
*adapter
=
1586 container_of(work
, struct netxen_adapter
, watchdog_task
);
1588 if ((adapter
->portnum
== 0) && netxen_nic_check_temp(adapter
))
1591 if (!adapter
->has_link_events
)
1592 netxen_nic_handle_phy_intr(adapter
);
1594 if (netif_running(adapter
->netdev
))
1595 mod_timer(&adapter
->watchdog_timer
, jiffies
+ 2 * HZ
);
1598 static void netxen_tx_timeout(struct net_device
*netdev
)
1600 struct netxen_adapter
*adapter
= (struct netxen_adapter
*)
1601 netdev_priv(netdev
);
1602 SCHEDULE_WORK(&adapter
->tx_timeout_task
);
1605 static void netxen_tx_timeout_task(struct work_struct
*work
)
1607 struct netxen_adapter
*adapter
=
1608 container_of(work
, struct netxen_adapter
, tx_timeout_task
);
1610 printk(KERN_ERR
"%s %s: transmit timeout, resetting.\n",
1611 netxen_nic_driver_name
, adapter
->netdev
->name
);
1613 netxen_napi_disable(adapter
);
1615 adapter
->netdev
->trans_start
= jiffies
;
1617 netxen_napi_enable(adapter
);
1618 netif_wake_queue(adapter
->netdev
);
1621 struct net_device_stats
*netxen_nic_get_stats(struct net_device
*netdev
)
1623 struct netxen_adapter
*adapter
= netdev_priv(netdev
);
1624 struct net_device_stats
*stats
= &adapter
->net_stats
;
1626 memset(stats
, 0, sizeof(*stats
));
1628 stats
->rx_packets
= adapter
->stats
.no_rcv
;
1629 stats
->tx_packets
= adapter
->stats
.xmitfinished
;
1630 stats
->rx_bytes
= adapter
->stats
.rxbytes
;
1631 stats
->tx_bytes
= adapter
->stats
.txbytes
;
1632 stats
->rx_dropped
= adapter
->stats
.rxdropped
;
1633 stats
->tx_dropped
= adapter
->stats
.txdropped
;
1638 static irqreturn_t
netxen_intr(int irq
, void *data
)
1640 struct nx_host_sds_ring
*sds_ring
= data
;
1641 struct netxen_adapter
*adapter
= sds_ring
->adapter
;
1644 status
= adapter
->pci_read_immediate(adapter
, ISR_INT_VECTOR
);
1646 if (!(status
& adapter
->legacy_intr
.int_vec_bit
))
1649 if (adapter
->ahw
.revision_id
>= NX_P3_B1
) {
1650 /* check interrupt state machine, to be sure */
1651 status
= adapter
->pci_read_immediate(adapter
,
1653 if (!ISR_LEGACY_INT_TRIGGERED(status
))
1657 unsigned long our_int
= 0;
1659 our_int
= adapter
->pci_read_normalize(adapter
, CRB_INT_VECTOR
);
1661 /* not our interrupt */
1662 if (!test_and_clear_bit((7 + adapter
->portnum
), &our_int
))
1665 /* claim interrupt */
1666 adapter
->pci_write_normalize(adapter
,
1667 CRB_INT_VECTOR
, (our_int
& 0xffffffff));
1670 /* clear interrupt */
1671 if (adapter
->fw_major
< 4)
1672 netxen_nic_disable_int(sds_ring
);
1674 adapter
->pci_write_immediate(adapter
,
1675 adapter
->legacy_intr
.tgt_status_reg
,
1677 /* read twice to ensure write is flushed */
1678 adapter
->pci_read_immediate(adapter
, ISR_INT_VECTOR
);
1679 adapter
->pci_read_immediate(adapter
, ISR_INT_VECTOR
);
1681 napi_schedule(&sds_ring
->napi
);
1686 static irqreturn_t
netxen_msi_intr(int irq
, void *data
)
1688 struct nx_host_sds_ring
*sds_ring
= data
;
1689 struct netxen_adapter
*adapter
= sds_ring
->adapter
;
1691 /* clear interrupt */
1692 adapter
->pci_write_immediate(adapter
,
1693 msi_tgt_status
[adapter
->ahw
.pci_func
], 0xffffffff);
1695 napi_schedule(&sds_ring
->napi
);
1699 static irqreturn_t
netxen_msix_intr(int irq
, void *data
)
1701 struct nx_host_sds_ring
*sds_ring
= data
;
1703 napi_schedule(&sds_ring
->napi
);
1707 static int netxen_nic_poll(struct napi_struct
*napi
, int budget
)
1709 struct nx_host_sds_ring
*sds_ring
=
1710 container_of(napi
, struct nx_host_sds_ring
, napi
);
1712 struct netxen_adapter
*adapter
= sds_ring
->adapter
;
1717 tx_complete
= netxen_process_cmd_ring(adapter
);
1719 work_done
= netxen_process_rcv_ring(sds_ring
, budget
);
1721 if ((work_done
< budget
) && tx_complete
) {
1722 napi_complete(&sds_ring
->napi
);
1723 netxen_nic_enable_int(sds_ring
);
1729 #ifdef CONFIG_NET_POLL_CONTROLLER
1730 static void netxen_nic_poll_controller(struct net_device
*netdev
)
1732 struct netxen_adapter
*adapter
= netdev_priv(netdev
);
1733 disable_irq(adapter
->irq
);
1734 netxen_intr(adapter
->irq
, adapter
);
1735 enable_irq(adapter
->irq
);
1739 static struct pci_driver netxen_driver
= {
1740 .name
= netxen_nic_driver_name
,
1741 .id_table
= netxen_pci_tbl
,
1742 .probe
= netxen_nic_probe
,
1743 .remove
= __devexit_p(netxen_nic_remove
),
1744 .suspend
= netxen_nic_suspend
,
1745 .resume
= netxen_nic_resume
1748 /* Driver Registration on NetXen card */
1750 static int __init
netxen_init_module(void)
1752 printk(KERN_INFO
"%s\n", netxen_nic_driver_string
);
1754 if ((netxen_workq
= create_singlethread_workqueue("netxen")) == NULL
)
1757 return pci_register_driver(&netxen_driver
);
1760 module_init(netxen_init_module
);
1762 static void __exit
netxen_exit_module(void)
1764 pci_unregister_driver(&netxen_driver
);
1765 destroy_workqueue(netxen_workq
);
1768 module_exit(netxen_exit_module
);