qlcnic: fix memory leaks
[deliverable/linux.git] / drivers / net / qlcnic / qlcnic_main.c
CommitLineData
af19b491
AKS
1/*
2 * Copyright (C) 2009 - QLogic Corporation.
3 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version 2
8 * of the License, or (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place - Suite 330, Boston,
18 * MA 02111-1307, USA.
19 *
20 * The full GNU General Public License is included in this distribution
21 * in the file called "COPYING".
22 *
23 */
24
5a0e3ad6 25#include <linux/slab.h>
af19b491
AKS
26#include <linux/vmalloc.h>
27#include <linux/interrupt.h>
28
29#include "qlcnic.h"
30
31#include <linux/dma-mapping.h>
32#include <linux/if_vlan.h>
33#include <net/ip.h>
34#include <linux/ipv6.h>
35#include <linux/inetdevice.h>
36#include <linux/sysfs.h>
37
38MODULE_DESCRIPTION("QLogic 10 GbE Converged Ethernet Driver");
39MODULE_LICENSE("GPL");
40MODULE_VERSION(QLCNIC_LINUX_VERSIONID);
41MODULE_FIRMWARE(QLCNIC_UNIFIED_ROMIMAGE_NAME);
42
43char qlcnic_driver_name[] = "qlcnic";
44static const char qlcnic_driver_string[] = "QLogic Converged Ethernet Driver v"
45 QLCNIC_LINUX_VERSIONID;
46
47static int port_mode = QLCNIC_PORT_MODE_AUTO_NEG;
48
49/* Default to restricted 1G auto-neg mode */
50static int wol_port_mode = 5;
51
52static int use_msi = 1;
53module_param(use_msi, int, 0644);
54MODULE_PARM_DESC(use_msi, "MSI interrupt (0=disabled, 1=enabled");
55
56static int use_msi_x = 1;
57module_param(use_msi_x, int, 0644);
58MODULE_PARM_DESC(use_msi_x, "MSI-X interrupt (0=disabled, 1=enabled");
59
60static int auto_fw_reset = AUTO_FW_RESET_ENABLED;
61module_param(auto_fw_reset, int, 0644);
62MODULE_PARM_DESC(auto_fw_reset, "Auto firmware reset (0=disabled, 1=enabled");
63
64static int __devinit qlcnic_probe(struct pci_dev *pdev,
65 const struct pci_device_id *ent);
66static void __devexit qlcnic_remove(struct pci_dev *pdev);
67static int qlcnic_open(struct net_device *netdev);
68static int qlcnic_close(struct net_device *netdev);
af19b491
AKS
69static void qlcnic_tx_timeout(struct net_device *netdev);
70static void qlcnic_tx_timeout_task(struct work_struct *work);
71static void qlcnic_attach_work(struct work_struct *work);
72static void qlcnic_fwinit_work(struct work_struct *work);
73static void qlcnic_fw_poll_work(struct work_struct *work);
74static void qlcnic_schedule_work(struct qlcnic_adapter *adapter,
75 work_func_t func, int delay);
76static void qlcnic_cancel_fw_work(struct qlcnic_adapter *adapter);
77static int qlcnic_poll(struct napi_struct *napi, int budget);
78#ifdef CONFIG_NET_POLL_CONTROLLER
79static void qlcnic_poll_controller(struct net_device *netdev);
80#endif
81
82static void qlcnic_create_sysfs_entries(struct qlcnic_adapter *adapter);
83static void qlcnic_remove_sysfs_entries(struct qlcnic_adapter *adapter);
84static void qlcnic_create_diag_entries(struct qlcnic_adapter *adapter);
85static void qlcnic_remove_diag_entries(struct qlcnic_adapter *adapter);
86
6df900e9 87static void qlcnic_idc_debug_info(struct qlcnic_adapter *adapter, u8 encoding);
af19b491
AKS
88static void qlcnic_clr_all_drv_state(struct qlcnic_adapter *adapter);
89static int qlcnic_can_start_firmware(struct qlcnic_adapter *adapter);
90
7eb9855d 91static irqreturn_t qlcnic_tmp_intr(int irq, void *data);
af19b491
AKS
92static irqreturn_t qlcnic_intr(int irq, void *data);
93static irqreturn_t qlcnic_msi_intr(int irq, void *data);
94static irqreturn_t qlcnic_msix_intr(int irq, void *data);
95
96static struct net_device_stats *qlcnic_get_stats(struct net_device *netdev);
97static void qlcnic_config_indev_addr(struct net_device *dev, unsigned long);
98
99/* PCI Device ID Table */
100#define ENTRY(device) \
101 {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, (device)), \
102 .class = PCI_CLASS_NETWORK_ETHERNET << 8, .class_mask = ~0}
103
104#define PCI_DEVICE_ID_QLOGIC_QLE824X 0x8020
105
6a902881 106static DEFINE_PCI_DEVICE_TABLE(qlcnic_pci_tbl) = {
af19b491
AKS
107 ENTRY(PCI_DEVICE_ID_QLOGIC_QLE824X),
108 {0,}
109};
110
111MODULE_DEVICE_TABLE(pci, qlcnic_pci_tbl);
112
113
114void
115qlcnic_update_cmd_producer(struct qlcnic_adapter *adapter,
116 struct qlcnic_host_tx_ring *tx_ring)
117{
118 writel(tx_ring->producer, tx_ring->crb_cmd_producer);
119
120 if (qlcnic_tx_avail(tx_ring) <= TX_STOP_THRESH) {
121 netif_stop_queue(adapter->netdev);
122 smp_mb();
8bfe8b91 123 adapter->stats.xmit_off++;
af19b491
AKS
124 }
125}
126
127static const u32 msi_tgt_status[8] = {
128 ISR_INT_TARGET_STATUS, ISR_INT_TARGET_STATUS_F1,
129 ISR_INT_TARGET_STATUS_F2, ISR_INT_TARGET_STATUS_F3,
130 ISR_INT_TARGET_STATUS_F4, ISR_INT_TARGET_STATUS_F5,
131 ISR_INT_TARGET_STATUS_F6, ISR_INT_TARGET_STATUS_F7
132};
133
134static const
135struct qlcnic_legacy_intr_set legacy_intr[] = QLCNIC_LEGACY_INTR_CONFIG;
136
137static inline void qlcnic_disable_int(struct qlcnic_host_sds_ring *sds_ring)
138{
139 writel(0, sds_ring->crb_intr_mask);
140}
141
142static inline void qlcnic_enable_int(struct qlcnic_host_sds_ring *sds_ring)
143{
144 struct qlcnic_adapter *adapter = sds_ring->adapter;
145
146 writel(0x1, sds_ring->crb_intr_mask);
147
148 if (!QLCNIC_IS_MSI_FAMILY(adapter))
149 writel(0xfbff, adapter->tgt_mask_reg);
150}
151
152static int
153qlcnic_alloc_sds_rings(struct qlcnic_recv_context *recv_ctx, int count)
154{
155 int size = sizeof(struct qlcnic_host_sds_ring) * count;
156
157 recv_ctx->sds_rings = kzalloc(size, GFP_KERNEL);
158
159 return (recv_ctx->sds_rings == NULL);
160}
161
162static void
163qlcnic_free_sds_rings(struct qlcnic_recv_context *recv_ctx)
164{
165 if (recv_ctx->sds_rings != NULL)
166 kfree(recv_ctx->sds_rings);
167
168 recv_ctx->sds_rings = NULL;
169}
170
171static int
172qlcnic_napi_add(struct qlcnic_adapter *adapter, struct net_device *netdev)
173{
174 int ring;
175 struct qlcnic_host_sds_ring *sds_ring;
176 struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
177
178 if (qlcnic_alloc_sds_rings(recv_ctx, adapter->max_sds_rings))
179 return -ENOMEM;
180
181 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
182 sds_ring = &recv_ctx->sds_rings[ring];
183 netif_napi_add(netdev, &sds_ring->napi,
184 qlcnic_poll, QLCNIC_NETDEV_WEIGHT);
185 }
186
187 return 0;
188}
189
190static void
191qlcnic_napi_del(struct qlcnic_adapter *adapter)
192{
193 int ring;
194 struct qlcnic_host_sds_ring *sds_ring;
195 struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
196
197 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
198 sds_ring = &recv_ctx->sds_rings[ring];
199 netif_napi_del(&sds_ring->napi);
200 }
201
202 qlcnic_free_sds_rings(&adapter->recv_ctx);
203}
204
205static void
206qlcnic_napi_enable(struct qlcnic_adapter *adapter)
207{
208 int ring;
209 struct qlcnic_host_sds_ring *sds_ring;
210 struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
211
780ab790
AKS
212 if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
213 return;
214
af19b491
AKS
215 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
216 sds_ring = &recv_ctx->sds_rings[ring];
217 napi_enable(&sds_ring->napi);
218 qlcnic_enable_int(sds_ring);
219 }
220}
221
222static void
223qlcnic_napi_disable(struct qlcnic_adapter *adapter)
224{
225 int ring;
226 struct qlcnic_host_sds_ring *sds_ring;
227 struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
228
780ab790
AKS
229 if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
230 return;
231
af19b491
AKS
232 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
233 sds_ring = &recv_ctx->sds_rings[ring];
234 qlcnic_disable_int(sds_ring);
235 napi_synchronize(&sds_ring->napi);
236 napi_disable(&sds_ring->napi);
237 }
238}
239
240static void qlcnic_clear_stats(struct qlcnic_adapter *adapter)
241{
242 memset(&adapter->stats, 0, sizeof(adapter->stats));
af19b491
AKS
243}
244
af19b491
AKS
245static void qlcnic_set_port_mode(struct qlcnic_adapter *adapter)
246{
247 u32 val, data;
248
249 val = adapter->ahw.board_type;
250 if ((val == QLCNIC_BRDTYPE_P3_HMEZ) ||
251 (val == QLCNIC_BRDTYPE_P3_XG_LOM)) {
252 if (port_mode == QLCNIC_PORT_MODE_802_3_AP) {
253 data = QLCNIC_PORT_MODE_802_3_AP;
254 QLCWR32(adapter, QLCNIC_PORT_MODE_ADDR, data);
255 } else if (port_mode == QLCNIC_PORT_MODE_XG) {
256 data = QLCNIC_PORT_MODE_XG;
257 QLCWR32(adapter, QLCNIC_PORT_MODE_ADDR, data);
258 } else if (port_mode == QLCNIC_PORT_MODE_AUTO_NEG_1G) {
259 data = QLCNIC_PORT_MODE_AUTO_NEG_1G;
260 QLCWR32(adapter, QLCNIC_PORT_MODE_ADDR, data);
261 } else if (port_mode == QLCNIC_PORT_MODE_AUTO_NEG_XG) {
262 data = QLCNIC_PORT_MODE_AUTO_NEG_XG;
263 QLCWR32(adapter, QLCNIC_PORT_MODE_ADDR, data);
264 } else {
265 data = QLCNIC_PORT_MODE_AUTO_NEG;
266 QLCWR32(adapter, QLCNIC_PORT_MODE_ADDR, data);
267 }
268
269 if ((wol_port_mode != QLCNIC_PORT_MODE_802_3_AP) &&
270 (wol_port_mode != QLCNIC_PORT_MODE_XG) &&
271 (wol_port_mode != QLCNIC_PORT_MODE_AUTO_NEG_1G) &&
272 (wol_port_mode != QLCNIC_PORT_MODE_AUTO_NEG_XG)) {
273 wol_port_mode = QLCNIC_PORT_MODE_AUTO_NEG;
274 }
275 QLCWR32(adapter, QLCNIC_WOL_PORT_MODE, wol_port_mode);
276 }
277}
278
279static void qlcnic_set_msix_bit(struct pci_dev *pdev, int enable)
280{
281 u32 control;
282 int pos;
283
284 pos = pci_find_capability(pdev, PCI_CAP_ID_MSIX);
285 if (pos) {
286 pci_read_config_dword(pdev, pos, &control);
287 if (enable)
288 control |= PCI_MSIX_FLAGS_ENABLE;
289 else
290 control = 0;
291 pci_write_config_dword(pdev, pos, control);
292 }
293}
294
295static void qlcnic_init_msix_entries(struct qlcnic_adapter *adapter, int count)
296{
297 int i;
298
299 for (i = 0; i < count; i++)
300 adapter->msix_entries[i].entry = i;
301}
302
303static int
304qlcnic_read_mac_addr(struct qlcnic_adapter *adapter)
305{
306 int i;
307 unsigned char *p;
308 u64 mac_addr;
309 struct net_device *netdev = adapter->netdev;
310 struct pci_dev *pdev = adapter->pdev;
311
312 if (qlcnic_get_mac_addr(adapter, &mac_addr) != 0)
313 return -EIO;
314
315 p = (unsigned char *)&mac_addr;
316 for (i = 0; i < 6; i++)
317 netdev->dev_addr[i] = *(p + 5 - i);
318
319 memcpy(netdev->perm_addr, netdev->dev_addr, netdev->addr_len);
320 memcpy(adapter->mac_addr, netdev->dev_addr, netdev->addr_len);
321
322 /* set station address */
323
324 if (!is_valid_ether_addr(netdev->perm_addr))
325 dev_warn(&pdev->dev, "Bad MAC address %pM.\n",
326 netdev->dev_addr);
327
328 return 0;
329}
330
331static int qlcnic_set_mac(struct net_device *netdev, void *p)
332{
333 struct qlcnic_adapter *adapter = netdev_priv(netdev);
334 struct sockaddr *addr = p;
335
336 if (!is_valid_ether_addr(addr->sa_data))
337 return -EINVAL;
338
339 if (netif_running(netdev)) {
340 netif_device_detach(netdev);
341 qlcnic_napi_disable(adapter);
342 }
343
344 memcpy(adapter->mac_addr, addr->sa_data, netdev->addr_len);
345 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
346 qlcnic_set_multi(adapter->netdev);
347
348 if (netif_running(netdev)) {
349 netif_device_attach(netdev);
350 qlcnic_napi_enable(adapter);
351 }
352 return 0;
353}
354
355static const struct net_device_ops qlcnic_netdev_ops = {
356 .ndo_open = qlcnic_open,
357 .ndo_stop = qlcnic_close,
358 .ndo_start_xmit = qlcnic_xmit_frame,
359 .ndo_get_stats = qlcnic_get_stats,
360 .ndo_validate_addr = eth_validate_addr,
361 .ndo_set_multicast_list = qlcnic_set_multi,
362 .ndo_set_mac_address = qlcnic_set_mac,
363 .ndo_change_mtu = qlcnic_change_mtu,
364 .ndo_tx_timeout = qlcnic_tx_timeout,
365#ifdef CONFIG_NET_POLL_CONTROLLER
366 .ndo_poll_controller = qlcnic_poll_controller,
367#endif
368};
369
370static void
371qlcnic_setup_intr(struct qlcnic_adapter *adapter)
372{
373 const struct qlcnic_legacy_intr_set *legacy_intrp;
374 struct pci_dev *pdev = adapter->pdev;
375 int err, num_msix;
376
377 if (adapter->rss_supported) {
378 num_msix = (num_online_cpus() >= MSIX_ENTRIES_PER_ADAPTER) ?
379 MSIX_ENTRIES_PER_ADAPTER : 2;
380 } else
381 num_msix = 1;
382
383 adapter->max_sds_rings = 1;
384
385 adapter->flags &= ~(QLCNIC_MSI_ENABLED | QLCNIC_MSIX_ENABLED);
386
387 legacy_intrp = &legacy_intr[adapter->ahw.pci_func];
388
389 adapter->int_vec_bit = legacy_intrp->int_vec_bit;
390 adapter->tgt_status_reg = qlcnic_get_ioaddr(adapter,
391 legacy_intrp->tgt_status_reg);
392 adapter->tgt_mask_reg = qlcnic_get_ioaddr(adapter,
393 legacy_intrp->tgt_mask_reg);
394 adapter->isr_int_vec = qlcnic_get_ioaddr(adapter, ISR_INT_VECTOR);
395
396 adapter->crb_int_state_reg = qlcnic_get_ioaddr(adapter,
397 ISR_INT_STATE_REG);
398
399 qlcnic_set_msix_bit(pdev, 0);
400
401 if (adapter->msix_supported) {
402
403 qlcnic_init_msix_entries(adapter, num_msix);
404 err = pci_enable_msix(pdev, adapter->msix_entries, num_msix);
405 if (err == 0) {
406 adapter->flags |= QLCNIC_MSIX_ENABLED;
407 qlcnic_set_msix_bit(pdev, 1);
408
409 if (adapter->rss_supported)
410 adapter->max_sds_rings = num_msix;
411
412 dev_info(&pdev->dev, "using msi-x interrupts\n");
413 return;
414 }
415
416 if (err > 0)
417 pci_disable_msix(pdev);
418
419 /* fall through for msi */
420 }
421
422 if (use_msi && !pci_enable_msi(pdev)) {
423 adapter->flags |= QLCNIC_MSI_ENABLED;
424 adapter->tgt_status_reg = qlcnic_get_ioaddr(adapter,
425 msi_tgt_status[adapter->ahw.pci_func]);
426 dev_info(&pdev->dev, "using msi interrupts\n");
427 adapter->msix_entries[0].vector = pdev->irq;
428 return;
429 }
430
431 dev_info(&pdev->dev, "using legacy interrupts\n");
432 adapter->msix_entries[0].vector = pdev->irq;
433}
434
435static void
436qlcnic_teardown_intr(struct qlcnic_adapter *adapter)
437{
438 if (adapter->flags & QLCNIC_MSIX_ENABLED)
439 pci_disable_msix(adapter->pdev);
440 if (adapter->flags & QLCNIC_MSI_ENABLED)
441 pci_disable_msi(adapter->pdev);
442}
443
444static void
445qlcnic_cleanup_pci_map(struct qlcnic_adapter *adapter)
446{
447 if (adapter->ahw.pci_base0 != NULL)
448 iounmap(adapter->ahw.pci_base0);
449}
450
451static int
452qlcnic_setup_pci_map(struct qlcnic_adapter *adapter)
453{
454 void __iomem *mem_ptr0 = NULL;
455 resource_size_t mem_base;
456 unsigned long mem_len, pci_len0 = 0;
457
458 struct pci_dev *pdev = adapter->pdev;
459 int pci_func = adapter->ahw.pci_func;
460
af19b491
AKS
461 /* remap phys address */
462 mem_base = pci_resource_start(pdev, 0); /* 0 is for BAR 0 */
463 mem_len = pci_resource_len(pdev, 0);
464
465 if (mem_len == QLCNIC_PCI_2MB_SIZE) {
466
467 mem_ptr0 = pci_ioremap_bar(pdev, 0);
468 if (mem_ptr0 == NULL) {
469 dev_err(&pdev->dev, "failed to map PCI bar 0\n");
470 return -EIO;
471 }
472 pci_len0 = mem_len;
473 } else {
474 return -EIO;
475 }
476
477 dev_info(&pdev->dev, "%dMB memory map\n", (int)(mem_len>>20));
478
479 adapter->ahw.pci_base0 = mem_ptr0;
480 adapter->ahw.pci_len0 = pci_len0;
481
482 adapter->ahw.ocm_win_crb = qlcnic_get_ioaddr(adapter,
483 QLCNIC_PCIX_PS_REG(PCIX_OCM_WINDOW_REG(pci_func)));
484
485 return 0;
486}
487
488static void get_brd_name(struct qlcnic_adapter *adapter, char *name)
489{
490 struct pci_dev *pdev = adapter->pdev;
491 int i, found = 0;
492
493 for (i = 0; i < NUM_SUPPORTED_BOARDS; ++i) {
494 if (qlcnic_boards[i].vendor == pdev->vendor &&
495 qlcnic_boards[i].device == pdev->device &&
496 qlcnic_boards[i].sub_vendor == pdev->subsystem_vendor &&
497 qlcnic_boards[i].sub_device == pdev->subsystem_device) {
498 strcpy(name, qlcnic_boards[i].short_name);
499 found = 1;
500 break;
501 }
502
503 }
504
505 if (!found)
506 name = "Unknown";
507}
508
509static void
510qlcnic_check_options(struct qlcnic_adapter *adapter)
511{
512 u32 fw_major, fw_minor, fw_build;
513 char brd_name[QLCNIC_MAX_BOARD_NAME_LEN];
514 char serial_num[32];
515 int i, offset, val;
516 int *ptr32;
517 struct pci_dev *pdev = adapter->pdev;
518
519 adapter->driver_mismatch = 0;
520
521 ptr32 = (int *)&serial_num;
522 offset = QLCNIC_FW_SERIAL_NUM_OFFSET;
523 for (i = 0; i < 8; i++) {
524 if (qlcnic_rom_fast_read(adapter, offset, &val) == -1) {
525 dev_err(&pdev->dev, "error reading board info\n");
526 adapter->driver_mismatch = 1;
527 return;
528 }
529 ptr32[i] = cpu_to_le32(val);
530 offset += sizeof(u32);
531 }
532
533 fw_major = QLCRD32(adapter, QLCNIC_FW_VERSION_MAJOR);
534 fw_minor = QLCRD32(adapter, QLCNIC_FW_VERSION_MINOR);
535 fw_build = QLCRD32(adapter, QLCNIC_FW_VERSION_SUB);
536
537 adapter->fw_version = QLCNIC_VERSION_CODE(fw_major, fw_minor, fw_build);
538
539 if (adapter->portnum == 0) {
540 get_brd_name(adapter, brd_name);
541
542 pr_info("%s: %s Board Chip rev 0x%x\n",
543 module_name(THIS_MODULE),
544 brd_name, adapter->ahw.revision_id);
545 }
546
251a84c9
AKS
547 dev_info(&pdev->dev, "firmware v%d.%d.%d\n",
548 fw_major, fw_minor, fw_build);
af19b491 549
251a84c9 550 adapter->capabilities = QLCRD32(adapter, CRB_FW_CAPABILITIES_1);
af19b491
AKS
551
552 adapter->flags &= ~QLCNIC_LRO_ENABLED;
553
554 if (adapter->ahw.port_type == QLCNIC_XGBE) {
555 adapter->num_rxd = DEFAULT_RCV_DESCRIPTORS_10G;
556 adapter->num_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_10G;
557 } else if (adapter->ahw.port_type == QLCNIC_GBE) {
558 adapter->num_rxd = DEFAULT_RCV_DESCRIPTORS_1G;
559 adapter->num_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_1G;
560 }
561
562 adapter->msix_supported = !!use_msi_x;
563 adapter->rss_supported = !!use_msi_x;
564
565 adapter->num_txd = MAX_CMD_DESCRIPTORS;
566
af19b491
AKS
567 adapter->max_rds_rings = 2;
568}
569
570static int
571qlcnic_start_firmware(struct qlcnic_adapter *adapter)
572{
573 int val, err, first_boot;
574
aa5e18c0
SC
575 err = qlcnic_can_start_firmware(adapter);
576 if (err < 0)
577 return err;
578 else if (!err)
af19b491
AKS
579 goto wait_init;
580
581 first_boot = QLCRD32(adapter, QLCNIC_CAM_RAM(0x1fc));
582 if (first_boot == 0x55555555)
583 /* This is the first boot after power up */
584 QLCWR32(adapter, QLCNIC_CAM_RAM(0x1fc), QLCNIC_BDINFO_MAGIC);
585
586 qlcnic_request_firmware(adapter);
587
588 err = qlcnic_need_fw_reset(adapter);
589 if (err < 0)
590 goto err_out;
591 if (err == 0)
592 goto wait_init;
593
594 if (first_boot != 0x55555555) {
595 QLCWR32(adapter, CRB_CMDPEG_STATE, 0);
596 qlcnic_pinit_from_rom(adapter);
597 msleep(1);
598 }
599
600 QLCWR32(adapter, CRB_DMA_SHIFT, 0x55555555);
601 QLCWR32(adapter, QLCNIC_PEG_HALT_STATUS1, 0);
602 QLCWR32(adapter, QLCNIC_PEG_HALT_STATUS2, 0);
603
604 qlcnic_set_port_mode(adapter);
605
606 err = qlcnic_load_firmware(adapter);
607 if (err)
608 goto err_out;
609
610 qlcnic_release_firmware(adapter);
611
612 val = (_QLCNIC_LINUX_MAJOR << 16)
613 | ((_QLCNIC_LINUX_MINOR << 8))
614 | (_QLCNIC_LINUX_SUBVERSION);
615 QLCWR32(adapter, CRB_DRIVER_VERSION, val);
616
617wait_init:
618 /* Handshake with the card before we register the devices. */
619 err = qlcnic_phantom_init(adapter);
620 if (err)
621 goto err_out;
622
623 QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_READY);
6df900e9 624 qlcnic_idc_debug_info(adapter, 1);
af19b491 625
af19b491
AKS
626 qlcnic_check_options(adapter);
627
628 adapter->need_fw_reset = 0;
629
630 /* fall through and release firmware */
631
632err_out:
633 qlcnic_release_firmware(adapter);
634 return err;
635}
636
637static int
638qlcnic_request_irq(struct qlcnic_adapter *adapter)
639{
640 irq_handler_t handler;
641 struct qlcnic_host_sds_ring *sds_ring;
642 int err, ring;
643
644 unsigned long flags = 0;
645 struct net_device *netdev = adapter->netdev;
646 struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
647
7eb9855d
AKS
648 if (adapter->diag_test == QLCNIC_INTERRUPT_TEST) {
649 handler = qlcnic_tmp_intr;
650 if (!QLCNIC_IS_MSI_FAMILY(adapter))
651 flags |= IRQF_SHARED;
652
653 } else {
654 if (adapter->flags & QLCNIC_MSIX_ENABLED)
655 handler = qlcnic_msix_intr;
656 else if (adapter->flags & QLCNIC_MSI_ENABLED)
657 handler = qlcnic_msi_intr;
658 else {
659 flags |= IRQF_SHARED;
660 handler = qlcnic_intr;
661 }
af19b491
AKS
662 }
663 adapter->irq = netdev->irq;
664
665 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
666 sds_ring = &recv_ctx->sds_rings[ring];
667 sprintf(sds_ring->name, "%s[%d]", netdev->name, ring);
668 err = request_irq(sds_ring->irq, handler,
669 flags, sds_ring->name, sds_ring);
670 if (err)
671 return err;
672 }
673
674 return 0;
675}
676
677static void
678qlcnic_free_irq(struct qlcnic_adapter *adapter)
679{
680 int ring;
681 struct qlcnic_host_sds_ring *sds_ring;
682
683 struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
684
685 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
686 sds_ring = &recv_ctx->sds_rings[ring];
687 free_irq(sds_ring->irq, sds_ring);
688 }
689}
690
691static void
692qlcnic_init_coalesce_defaults(struct qlcnic_adapter *adapter)
693{
694 adapter->coal.flags = QLCNIC_INTR_DEFAULT;
695 adapter->coal.normal.data.rx_time_us =
696 QLCNIC_DEFAULT_INTR_COALESCE_RX_TIME_US;
697 adapter->coal.normal.data.rx_packets =
698 QLCNIC_DEFAULT_INTR_COALESCE_RX_PACKETS;
699 adapter->coal.normal.data.tx_time_us =
700 QLCNIC_DEFAULT_INTR_COALESCE_TX_TIME_US;
701 adapter->coal.normal.data.tx_packets =
702 QLCNIC_DEFAULT_INTR_COALESCE_TX_PACKETS;
703}
704
705static int
706__qlcnic_up(struct qlcnic_adapter *adapter, struct net_device *netdev)
707{
708 if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
709 return -EIO;
710
711 qlcnic_set_multi(netdev);
712 qlcnic_fw_cmd_set_mtu(adapter, netdev->mtu);
713
714 adapter->ahw.linkup = 0;
715
716 if (adapter->max_sds_rings > 1)
717 qlcnic_config_rss(adapter, 1);
718
719 qlcnic_config_intr_coalesce(adapter);
720
721 if (adapter->capabilities & QLCNIC_FW_CAPABILITY_HW_LRO)
722 qlcnic_config_hw_lro(adapter, QLCNIC_LRO_ENABLED);
723
724 qlcnic_napi_enable(adapter);
725
726 qlcnic_linkevent_request(adapter, 1);
727
728 set_bit(__QLCNIC_DEV_UP, &adapter->state);
729 return 0;
730}
731
732/* Usage: During resume and firmware recovery module.*/
733
734static int
735qlcnic_up(struct qlcnic_adapter *adapter, struct net_device *netdev)
736{
737 int err = 0;
738
739 rtnl_lock();
740 if (netif_running(netdev))
741 err = __qlcnic_up(adapter, netdev);
742 rtnl_unlock();
743
744 return err;
745}
746
747static void
748__qlcnic_down(struct qlcnic_adapter *adapter, struct net_device *netdev)
749{
750 if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
751 return;
752
753 if (!test_and_clear_bit(__QLCNIC_DEV_UP, &adapter->state))
754 return;
755
756 smp_mb();
757 spin_lock(&adapter->tx_clean_lock);
758 netif_carrier_off(netdev);
759 netif_tx_disable(netdev);
760
761 qlcnic_free_mac_list(adapter);
762
763 qlcnic_nic_set_promisc(adapter, QLCNIC_NIU_NON_PROMISC_MODE);
764
765 qlcnic_napi_disable(adapter);
766
767 qlcnic_release_tx_buffers(adapter);
768 spin_unlock(&adapter->tx_clean_lock);
769}
770
771/* Usage: During suspend and firmware recovery module */
772
773static void
774qlcnic_down(struct qlcnic_adapter *adapter, struct net_device *netdev)
775{
776 rtnl_lock();
777 if (netif_running(netdev))
778 __qlcnic_down(adapter, netdev);
779 rtnl_unlock();
780
781}
782
783static int
784qlcnic_attach(struct qlcnic_adapter *adapter)
785{
786 struct net_device *netdev = adapter->netdev;
787 struct pci_dev *pdev = adapter->pdev;
788 int err, ring;
789 struct qlcnic_host_rds_ring *rds_ring;
790
791 if (adapter->is_up == QLCNIC_ADAPTER_UP_MAGIC)
792 return 0;
793
794 err = qlcnic_init_firmware(adapter);
795 if (err)
796 return err;
797
798 err = qlcnic_napi_add(adapter, netdev);
799 if (err)
800 return err;
801
802 err = qlcnic_alloc_sw_resources(adapter);
803 if (err) {
804 dev_err(&pdev->dev, "Error in setting sw resources\n");
805 return err;
806 }
807
808 err = qlcnic_alloc_hw_resources(adapter);
809 if (err) {
810 dev_err(&pdev->dev, "Error in setting hw resources\n");
811 goto err_out_free_sw;
812 }
813
814
815 for (ring = 0; ring < adapter->max_rds_rings; ring++) {
816 rds_ring = &adapter->recv_ctx.rds_rings[ring];
817 qlcnic_post_rx_buffers(adapter, ring, rds_ring);
818 }
819
820 err = qlcnic_request_irq(adapter);
821 if (err) {
822 dev_err(&pdev->dev, "failed to setup interrupt\n");
823 goto err_out_free_rxbuf;
824 }
825
826 qlcnic_init_coalesce_defaults(adapter);
827
828 qlcnic_create_sysfs_entries(adapter);
829
830 adapter->is_up = QLCNIC_ADAPTER_UP_MAGIC;
831 return 0;
832
833err_out_free_rxbuf:
834 qlcnic_release_rx_buffers(adapter);
835 qlcnic_free_hw_resources(adapter);
836err_out_free_sw:
837 qlcnic_free_sw_resources(adapter);
838 return err;
839}
840
841static void
842qlcnic_detach(struct qlcnic_adapter *adapter)
843{
844 if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
845 return;
846
847 qlcnic_remove_sysfs_entries(adapter);
848
849 qlcnic_free_hw_resources(adapter);
850 qlcnic_release_rx_buffers(adapter);
851 qlcnic_free_irq(adapter);
852 qlcnic_napi_del(adapter);
853 qlcnic_free_sw_resources(adapter);
854
855 adapter->is_up = 0;
856}
857
7eb9855d
AKS
858void qlcnic_diag_free_res(struct net_device *netdev, int max_sds_rings)
859{
860 struct qlcnic_adapter *adapter = netdev_priv(netdev);
861 struct qlcnic_host_sds_ring *sds_ring;
862 int ring;
863
cdaff185
AKS
864 if (adapter->diag_test == QLCNIC_INTERRUPT_TEST) {
865 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
866 sds_ring = &adapter->recv_ctx.sds_rings[ring];
867 qlcnic_disable_int(sds_ring);
868 }
7eb9855d
AKS
869 }
870
871 qlcnic_detach(adapter);
872
873 adapter->diag_test = 0;
874 adapter->max_sds_rings = max_sds_rings;
875
876 if (qlcnic_attach(adapter))
34ce3626 877 goto out;
7eb9855d
AKS
878
879 if (netif_running(netdev))
880 __qlcnic_up(adapter, netdev);
34ce3626 881out:
7eb9855d
AKS
882 netif_device_attach(netdev);
883}
884
885int qlcnic_diag_alloc_res(struct net_device *netdev, int test)
886{
887 struct qlcnic_adapter *adapter = netdev_priv(netdev);
888 struct qlcnic_host_sds_ring *sds_ring;
889 int ring;
890 int ret;
891
892 netif_device_detach(netdev);
893
894 if (netif_running(netdev))
895 __qlcnic_down(adapter, netdev);
896
897 qlcnic_detach(adapter);
898
899 adapter->max_sds_rings = 1;
900 adapter->diag_test = test;
901
902 ret = qlcnic_attach(adapter);
34ce3626
AKS
903 if (ret) {
904 netif_device_attach(netdev);
7eb9855d 905 return ret;
34ce3626 906 }
7eb9855d 907
cdaff185
AKS
908 if (adapter->diag_test == QLCNIC_INTERRUPT_TEST) {
909 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
910 sds_ring = &adapter->recv_ctx.sds_rings[ring];
911 qlcnic_enable_int(sds_ring);
912 }
7eb9855d
AKS
913 }
914
915 return 0;
916}
917
af19b491
AKS
918int
919qlcnic_reset_context(struct qlcnic_adapter *adapter)
920{
921 int err = 0;
922 struct net_device *netdev = adapter->netdev;
923
924 if (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state))
925 return -EBUSY;
926
927 if (adapter->is_up == QLCNIC_ADAPTER_UP_MAGIC) {
928
929 netif_device_detach(netdev);
930
931 if (netif_running(netdev))
932 __qlcnic_down(adapter, netdev);
933
934 qlcnic_detach(adapter);
935
936 if (netif_running(netdev)) {
937 err = qlcnic_attach(adapter);
938 if (!err)
34ce3626 939 __qlcnic_up(adapter, netdev);
af19b491
AKS
940 }
941
942 netif_device_attach(netdev);
943 }
944
af19b491
AKS
945 clear_bit(__QLCNIC_RESETTING, &adapter->state);
946 return err;
947}
948
949static int
950qlcnic_setup_netdev(struct qlcnic_adapter *adapter,
1bb09fb9 951 struct net_device *netdev, u8 pci_using_dac)
af19b491
AKS
952{
953 int err;
954 struct pci_dev *pdev = adapter->pdev;
955
956 adapter->rx_csum = 1;
957 adapter->mc_enabled = 0;
958 adapter->max_mc_count = 38;
959
960 netdev->netdev_ops = &qlcnic_netdev_ops;
961 netdev->watchdog_timeo = 2*HZ;
962
963 qlcnic_change_mtu(netdev, netdev->mtu);
964
965 SET_ETHTOOL_OPS(netdev, &qlcnic_ethtool_ops);
966
967 netdev->features |= (NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO);
968 netdev->features |= (NETIF_F_GRO);
969 netdev->vlan_features |= (NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO);
970
971 netdev->features |= (NETIF_F_IPV6_CSUM | NETIF_F_TSO6);
972 netdev->vlan_features |= (NETIF_F_IPV6_CSUM | NETIF_F_TSO6);
973
1bb09fb9 974 if (pci_using_dac) {
af19b491
AKS
975 netdev->features |= NETIF_F_HIGHDMA;
976 netdev->vlan_features |= NETIF_F_HIGHDMA;
977 }
978
979 if (adapter->capabilities & QLCNIC_FW_CAPABILITY_FVLANTX)
980 netdev->features |= (NETIF_F_HW_VLAN_TX);
981
982 if (adapter->capabilities & QLCNIC_FW_CAPABILITY_HW_LRO)
983 netdev->features |= NETIF_F_LRO;
984
985 netdev->irq = adapter->msix_entries[0].vector;
986
987 INIT_WORK(&adapter->tx_timeout_task, qlcnic_tx_timeout_task);
988
989 if (qlcnic_read_mac_addr(adapter))
990 dev_warn(&pdev->dev, "failed to read mac addr\n");
991
992 netif_carrier_off(netdev);
993 netif_stop_queue(netdev);
994
995 err = register_netdev(netdev);
996 if (err) {
997 dev_err(&pdev->dev, "failed to register net device\n");
998 return err;
999 }
1000
1001 return 0;
1002}
1003
1bb09fb9
AKS
1004static int qlcnic_set_dma_mask(struct pci_dev *pdev, u8 *pci_using_dac)
1005{
1006 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) &&
1007 !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)))
1008 *pci_using_dac = 1;
1009 else if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) &&
1010 !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)))
1011 *pci_using_dac = 0;
1012 else {
1013 dev_err(&pdev->dev, "Unable to set DMA mask, aborting\n");
1014 return -EIO;
1015 }
1016
1017 return 0;
1018}
1019
af19b491
AKS
1020static int __devinit
1021qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1022{
1023 struct net_device *netdev = NULL;
1024 struct qlcnic_adapter *adapter = NULL;
1025 int err;
1026 int pci_func_id = PCI_FUNC(pdev->devfn);
1027 uint8_t revision_id;
1bb09fb9 1028 uint8_t pci_using_dac;
af19b491
AKS
1029
1030 err = pci_enable_device(pdev);
1031 if (err)
1032 return err;
1033
1034 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
1035 err = -ENODEV;
1036 goto err_out_disable_pdev;
1037 }
1038
1bb09fb9
AKS
1039 err = qlcnic_set_dma_mask(pdev, &pci_using_dac);
1040 if (err)
1041 goto err_out_disable_pdev;
1042
af19b491
AKS
1043 err = pci_request_regions(pdev, qlcnic_driver_name);
1044 if (err)
1045 goto err_out_disable_pdev;
1046
1047 pci_set_master(pdev);
1048
1049 netdev = alloc_etherdev(sizeof(struct qlcnic_adapter));
1050 if (!netdev) {
1051 dev_err(&pdev->dev, "failed to allocate net_device\n");
1052 err = -ENOMEM;
1053 goto err_out_free_res;
1054 }
1055
1056 SET_NETDEV_DEV(netdev, &pdev->dev);
1057
1058 adapter = netdev_priv(netdev);
1059 adapter->netdev = netdev;
1060 adapter->pdev = pdev;
6df900e9 1061 adapter->dev_rst_time = jiffies;
af19b491
AKS
1062 adapter->ahw.pci_func = pci_func_id;
1063
1064 revision_id = pdev->revision;
1065 adapter->ahw.revision_id = revision_id;
1066
1067 rwlock_init(&adapter->ahw.crb_lock);
1068 mutex_init(&adapter->ahw.mem_lock);
1069
1070 spin_lock_init(&adapter->tx_clean_lock);
1071 INIT_LIST_HEAD(&adapter->mac_list);
1072
1073 err = qlcnic_setup_pci_map(adapter);
1074 if (err)
1075 goto err_out_free_netdev;
1076
1077 /* This will be reset for mezz cards */
1078 adapter->portnum = pci_func_id;
1079
1080 err = qlcnic_get_board_info(adapter);
1081 if (err) {
1082 dev_err(&pdev->dev, "Error getting board config info.\n");
1083 goto err_out_iounmap;
1084 }
1085
b3a24649
SC
1086 if (qlcnic_setup_idc_param(adapter))
1087 goto err_out_iounmap;
af19b491
AKS
1088
1089 err = qlcnic_start_firmware(adapter);
1090 if (err)
1091 goto err_out_decr_ref;
1092
af19b491
AKS
1093 qlcnic_clear_stats(adapter);
1094
1095 qlcnic_setup_intr(adapter);
1096
1bb09fb9 1097 err = qlcnic_setup_netdev(adapter, netdev, pci_using_dac);
af19b491
AKS
1098 if (err)
1099 goto err_out_disable_msi;
1100
1101 pci_set_drvdata(pdev, adapter);
1102
1103 qlcnic_schedule_work(adapter, qlcnic_fw_poll_work, FW_POLL_DELAY);
1104
1105 switch (adapter->ahw.port_type) {
1106 case QLCNIC_GBE:
1107 dev_info(&adapter->pdev->dev, "%s: GbE port initialized\n",
1108 adapter->netdev->name);
1109 break;
1110 case QLCNIC_XGBE:
1111 dev_info(&adapter->pdev->dev, "%s: XGbE port initialized\n",
1112 adapter->netdev->name);
1113 break;
1114 }
1115
1116 qlcnic_create_diag_entries(adapter);
1117
1118 return 0;
1119
1120err_out_disable_msi:
1121 qlcnic_teardown_intr(adapter);
1122
1123err_out_decr_ref:
1124 qlcnic_clr_all_drv_state(adapter);
1125
1126err_out_iounmap:
1127 qlcnic_cleanup_pci_map(adapter);
1128
1129err_out_free_netdev:
1130 free_netdev(netdev);
1131
1132err_out_free_res:
1133 pci_release_regions(pdev);
1134
1135err_out_disable_pdev:
1136 pci_set_drvdata(pdev, NULL);
1137 pci_disable_device(pdev);
1138 return err;
1139}
1140
1141static void __devexit qlcnic_remove(struct pci_dev *pdev)
1142{
1143 struct qlcnic_adapter *adapter;
1144 struct net_device *netdev;
1145
1146 adapter = pci_get_drvdata(pdev);
1147 if (adapter == NULL)
1148 return;
1149
1150 netdev = adapter->netdev;
1151
1152 qlcnic_cancel_fw_work(adapter);
1153
1154 unregister_netdev(netdev);
1155
1156 cancel_work_sync(&adapter->tx_timeout_task);
1157
1158 qlcnic_detach(adapter);
1159
1160 qlcnic_clr_all_drv_state(adapter);
1161
1162 clear_bit(__QLCNIC_RESETTING, &adapter->state);
1163
1164 qlcnic_teardown_intr(adapter);
1165
1166 qlcnic_remove_diag_entries(adapter);
1167
1168 qlcnic_cleanup_pci_map(adapter);
1169
1170 qlcnic_release_firmware(adapter);
1171
1172 pci_release_regions(pdev);
1173 pci_disable_device(pdev);
1174 pci_set_drvdata(pdev, NULL);
1175
1176 free_netdev(netdev);
1177}
1178static int __qlcnic_shutdown(struct pci_dev *pdev)
1179{
1180 struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
1181 struct net_device *netdev = adapter->netdev;
1182 int retval;
1183
1184 netif_device_detach(netdev);
1185
1186 qlcnic_cancel_fw_work(adapter);
1187
1188 if (netif_running(netdev))
1189 qlcnic_down(adapter, netdev);
1190
1191 cancel_work_sync(&adapter->tx_timeout_task);
1192
1193 qlcnic_detach(adapter);
1194
1195 qlcnic_clr_all_drv_state(adapter);
1196
1197 clear_bit(__QLCNIC_RESETTING, &adapter->state);
1198
1199 retval = pci_save_state(pdev);
1200 if (retval)
1201 return retval;
1202
1203 if (qlcnic_wol_supported(adapter)) {
1204 pci_enable_wake(pdev, PCI_D3cold, 1);
1205 pci_enable_wake(pdev, PCI_D3hot, 1);
1206 }
1207
1208 return 0;
1209}
1210
1211static void qlcnic_shutdown(struct pci_dev *pdev)
1212{
1213 if (__qlcnic_shutdown(pdev))
1214 return;
1215
1216 pci_disable_device(pdev);
1217}
1218
1219#ifdef CONFIG_PM
1220static int
1221qlcnic_suspend(struct pci_dev *pdev, pm_message_t state)
1222{
1223 int retval;
1224
1225 retval = __qlcnic_shutdown(pdev);
1226 if (retval)
1227 return retval;
1228
1229 pci_set_power_state(pdev, pci_choose_state(pdev, state));
1230 return 0;
1231}
1232
1233static int
1234qlcnic_resume(struct pci_dev *pdev)
1235{
1236 struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
1237 struct net_device *netdev = adapter->netdev;
1238 int err;
1239
1240 err = pci_enable_device(pdev);
1241 if (err)
1242 return err;
1243
1244 pci_set_power_state(pdev, PCI_D0);
1245 pci_set_master(pdev);
1246 pci_restore_state(pdev);
1247
af19b491
AKS
1248 err = qlcnic_start_firmware(adapter);
1249 if (err) {
1250 dev_err(&pdev->dev, "failed to start firmware\n");
1251 return err;
1252 }
1253
1254 if (netif_running(netdev)) {
1255 err = qlcnic_attach(adapter);
1256 if (err)
1257 goto err_out;
1258
1259 err = qlcnic_up(adapter, netdev);
1260 if (err)
1261 goto err_out_detach;
1262
1263
1264 qlcnic_config_indev_addr(netdev, NETDEV_UP);
1265 }
1266
1267 netif_device_attach(netdev);
1268 qlcnic_schedule_work(adapter, qlcnic_fw_poll_work, FW_POLL_DELAY);
1269 return 0;
1270
1271err_out_detach:
1272 qlcnic_detach(adapter);
1273err_out:
1274 qlcnic_clr_all_drv_state(adapter);
34ce3626 1275 netif_device_attach(netdev);
af19b491
AKS
1276 return err;
1277}
1278#endif
1279
1280static int qlcnic_open(struct net_device *netdev)
1281{
1282 struct qlcnic_adapter *adapter = netdev_priv(netdev);
1283 int err;
1284
1285 if (adapter->driver_mismatch)
1286 return -EIO;
1287
1288 err = qlcnic_attach(adapter);
1289 if (err)
1290 return err;
1291
1292 err = __qlcnic_up(adapter, netdev);
1293 if (err)
1294 goto err_out;
1295
1296 netif_start_queue(netdev);
1297
1298 return 0;
1299
1300err_out:
1301 qlcnic_detach(adapter);
1302 return err;
1303}
1304
1305/*
1306 * qlcnic_close - Disables a network interface entry point
1307 */
1308static int qlcnic_close(struct net_device *netdev)
1309{
1310 struct qlcnic_adapter *adapter = netdev_priv(netdev);
1311
1312 __qlcnic_down(adapter, netdev);
1313 return 0;
1314}
1315
1316static void
1317qlcnic_tso_check(struct net_device *netdev,
1318 struct qlcnic_host_tx_ring *tx_ring,
1319 struct cmd_desc_type0 *first_desc,
1320 struct sk_buff *skb)
1321{
1322 u8 opcode = TX_ETHER_PKT;
1323 __be16 protocol = skb->protocol;
1324 u16 flags = 0, vid = 0;
1325 u32 producer;
1326 int copied, offset, copy_len, hdr_len = 0, tso = 0, vlan_oob = 0;
1327 struct cmd_desc_type0 *hwdesc;
1328 struct vlan_ethhdr *vh;
8bfe8b91 1329 struct qlcnic_adapter *adapter = netdev_priv(netdev);
af19b491
AKS
1330
1331 if (protocol == cpu_to_be16(ETH_P_8021Q)) {
1332
1333 vh = (struct vlan_ethhdr *)skb->data;
1334 protocol = vh->h_vlan_encapsulated_proto;
1335 flags = FLAGS_VLAN_TAGGED;
1336
1337 } else if (vlan_tx_tag_present(skb)) {
1338
1339 flags = FLAGS_VLAN_OOB;
1340 vid = vlan_tx_tag_get(skb);
1341 qlcnic_set_tx_vlan_tci(first_desc, vid);
1342 vlan_oob = 1;
1343 }
1344
1345 if ((netdev->features & (NETIF_F_TSO | NETIF_F_TSO6)) &&
1346 skb_shinfo(skb)->gso_size > 0) {
1347
1348 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
1349
1350 first_desc->mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
1351 first_desc->total_hdr_length = hdr_len;
1352 if (vlan_oob) {
1353 first_desc->total_hdr_length += VLAN_HLEN;
1354 first_desc->tcp_hdr_offset = VLAN_HLEN;
1355 first_desc->ip_hdr_offset = VLAN_HLEN;
1356 /* Only in case of TSO on vlan device */
1357 flags |= FLAGS_VLAN_TAGGED;
1358 }
1359
1360 opcode = (protocol == cpu_to_be16(ETH_P_IPV6)) ?
1361 TX_TCP_LSO6 : TX_TCP_LSO;
1362 tso = 1;
1363
1364 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
1365 u8 l4proto;
1366
1367 if (protocol == cpu_to_be16(ETH_P_IP)) {
1368 l4proto = ip_hdr(skb)->protocol;
1369
1370 if (l4proto == IPPROTO_TCP)
1371 opcode = TX_TCP_PKT;
1372 else if (l4proto == IPPROTO_UDP)
1373 opcode = TX_UDP_PKT;
1374 } else if (protocol == cpu_to_be16(ETH_P_IPV6)) {
1375 l4proto = ipv6_hdr(skb)->nexthdr;
1376
1377 if (l4proto == IPPROTO_TCP)
1378 opcode = TX_TCPV6_PKT;
1379 else if (l4proto == IPPROTO_UDP)
1380 opcode = TX_UDPV6_PKT;
1381 }
1382 }
1383
1384 first_desc->tcp_hdr_offset += skb_transport_offset(skb);
1385 first_desc->ip_hdr_offset += skb_network_offset(skb);
1386 qlcnic_set_tx_flags_opcode(first_desc, flags, opcode);
1387
1388 if (!tso)
1389 return;
1390
1391 /* For LSO, we need to copy the MAC/IP/TCP headers into
1392 * the descriptor ring
1393 */
1394 producer = tx_ring->producer;
1395 copied = 0;
1396 offset = 2;
1397
1398 if (vlan_oob) {
1399 /* Create a TSO vlan header template for firmware */
1400
1401 hwdesc = &tx_ring->desc_head[producer];
1402 tx_ring->cmd_buf_arr[producer].skb = NULL;
1403
1404 copy_len = min((int)sizeof(struct cmd_desc_type0) - offset,
1405 hdr_len + VLAN_HLEN);
1406
1407 vh = (struct vlan_ethhdr *)((char *)hwdesc + 2);
1408 skb_copy_from_linear_data(skb, vh, 12);
1409 vh->h_vlan_proto = htons(ETH_P_8021Q);
1410 vh->h_vlan_TCI = htons(vid);
1411 skb_copy_from_linear_data_offset(skb, 12,
1412 (char *)vh + 16, copy_len - 16);
1413
1414 copied = copy_len - VLAN_HLEN;
1415 offset = 0;
1416
1417 producer = get_next_index(producer, tx_ring->num_desc);
1418 }
1419
1420 while (copied < hdr_len) {
1421
1422 copy_len = min((int)sizeof(struct cmd_desc_type0) - offset,
1423 (hdr_len - copied));
1424
1425 hwdesc = &tx_ring->desc_head[producer];
1426 tx_ring->cmd_buf_arr[producer].skb = NULL;
1427
1428 skb_copy_from_linear_data_offset(skb, copied,
1429 (char *)hwdesc + offset, copy_len);
1430
1431 copied += copy_len;
1432 offset = 0;
1433
1434 producer = get_next_index(producer, tx_ring->num_desc);
1435 }
1436
1437 tx_ring->producer = producer;
1438 barrier();
8bfe8b91 1439 adapter->stats.lso_frames++;
af19b491
AKS
1440}
1441
1442static int
1443qlcnic_map_tx_skb(struct pci_dev *pdev,
1444 struct sk_buff *skb, struct qlcnic_cmd_buffer *pbuf)
1445{
1446 struct qlcnic_skb_frag *nf;
1447 struct skb_frag_struct *frag;
1448 int i, nr_frags;
1449 dma_addr_t map;
1450
1451 nr_frags = skb_shinfo(skb)->nr_frags;
1452 nf = &pbuf->frag_array[0];
1453
1454 map = pci_map_single(pdev, skb->data,
1455 skb_headlen(skb), PCI_DMA_TODEVICE);
1456 if (pci_dma_mapping_error(pdev, map))
1457 goto out_err;
1458
1459 nf->dma = map;
1460 nf->length = skb_headlen(skb);
1461
1462 for (i = 0; i < nr_frags; i++) {
1463 frag = &skb_shinfo(skb)->frags[i];
1464 nf = &pbuf->frag_array[i+1];
1465
1466 map = pci_map_page(pdev, frag->page, frag->page_offset,
1467 frag->size, PCI_DMA_TODEVICE);
1468 if (pci_dma_mapping_error(pdev, map))
1469 goto unwind;
1470
1471 nf->dma = map;
1472 nf->length = frag->size;
1473 }
1474
1475 return 0;
1476
1477unwind:
1478 while (--i >= 0) {
1479 nf = &pbuf->frag_array[i+1];
1480 pci_unmap_page(pdev, nf->dma, nf->length, PCI_DMA_TODEVICE);
1481 }
1482
1483 nf = &pbuf->frag_array[0];
1484 pci_unmap_single(pdev, nf->dma, skb_headlen(skb), PCI_DMA_TODEVICE);
1485
1486out_err:
1487 return -ENOMEM;
1488}
1489
1490static inline void
1491qlcnic_clear_cmddesc(u64 *desc)
1492{
1493 desc[0] = 0ULL;
1494 desc[2] = 0ULL;
1495}
1496
cdaff185 1497netdev_tx_t
af19b491
AKS
1498qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
1499{
1500 struct qlcnic_adapter *adapter = netdev_priv(netdev);
1501 struct qlcnic_host_tx_ring *tx_ring = adapter->tx_ring;
1502 struct qlcnic_cmd_buffer *pbuf;
1503 struct qlcnic_skb_frag *buffrag;
1504 struct cmd_desc_type0 *hwdesc, *first_desc;
1505 struct pci_dev *pdev;
1506 int i, k;
1507
1508 u32 producer;
1509 int frag_count, no_of_desc;
1510 u32 num_txd = tx_ring->num_desc;
1511
780ab790
AKS
1512 if (!test_bit(__QLCNIC_DEV_UP, &adapter->state)) {
1513 netif_stop_queue(netdev);
1514 return NETDEV_TX_BUSY;
1515 }
1516
af19b491
AKS
1517 frag_count = skb_shinfo(skb)->nr_frags + 1;
1518
1519 /* 4 fragments per cmd des */
1520 no_of_desc = (frag_count + 3) >> 2;
1521
1522 if (unlikely(no_of_desc + 2 > qlcnic_tx_avail(tx_ring))) {
1523 netif_stop_queue(netdev);
8bfe8b91 1524 adapter->stats.xmit_off++;
af19b491
AKS
1525 return NETDEV_TX_BUSY;
1526 }
1527
1528 producer = tx_ring->producer;
1529 pbuf = &tx_ring->cmd_buf_arr[producer];
1530
1531 pdev = adapter->pdev;
1532
8ae6df97
AKS
1533 if (qlcnic_map_tx_skb(pdev, skb, pbuf)) {
1534 adapter->stats.tx_dma_map_error++;
af19b491 1535 goto drop_packet;
8ae6df97 1536 }
af19b491
AKS
1537
1538 pbuf->skb = skb;
1539 pbuf->frag_count = frag_count;
1540
1541 first_desc = hwdesc = &tx_ring->desc_head[producer];
1542 qlcnic_clear_cmddesc((u64 *)hwdesc);
1543
1544 qlcnic_set_tx_frags_len(first_desc, frag_count, skb->len);
1545 qlcnic_set_tx_port(first_desc, adapter->portnum);
1546
1547 for (i = 0; i < frag_count; i++) {
1548
1549 k = i % 4;
1550
1551 if ((k == 0) && (i > 0)) {
1552 /* move to next desc.*/
1553 producer = get_next_index(producer, num_txd);
1554 hwdesc = &tx_ring->desc_head[producer];
1555 qlcnic_clear_cmddesc((u64 *)hwdesc);
1556 tx_ring->cmd_buf_arr[producer].skb = NULL;
1557 }
1558
1559 buffrag = &pbuf->frag_array[i];
1560
1561 hwdesc->buffer_length[k] = cpu_to_le16(buffrag->length);
1562 switch (k) {
1563 case 0:
1564 hwdesc->addr_buffer1 = cpu_to_le64(buffrag->dma);
1565 break;
1566 case 1:
1567 hwdesc->addr_buffer2 = cpu_to_le64(buffrag->dma);
1568 break;
1569 case 2:
1570 hwdesc->addr_buffer3 = cpu_to_le64(buffrag->dma);
1571 break;
1572 case 3:
1573 hwdesc->addr_buffer4 = cpu_to_le64(buffrag->dma);
1574 break;
1575 }
1576 }
1577
1578 tx_ring->producer = get_next_index(producer, num_txd);
1579
1580 qlcnic_tso_check(netdev, tx_ring, first_desc, skb);
1581
1582 qlcnic_update_cmd_producer(adapter, tx_ring);
1583
1584 adapter->stats.txbytes += skb->len;
1585 adapter->stats.xmitcalled++;
1586
1587 return NETDEV_TX_OK;
1588
1589drop_packet:
1590 adapter->stats.txdropped++;
1591 dev_kfree_skb_any(skb);
1592 return NETDEV_TX_OK;
1593}
1594
1595static int qlcnic_check_temp(struct qlcnic_adapter *adapter)
1596{
1597 struct net_device *netdev = adapter->netdev;
1598 u32 temp, temp_state, temp_val;
1599 int rv = 0;
1600
1601 temp = QLCRD32(adapter, CRB_TEMP_STATE);
1602
1603 temp_state = qlcnic_get_temp_state(temp);
1604 temp_val = qlcnic_get_temp_val(temp);
1605
1606 if (temp_state == QLCNIC_TEMP_PANIC) {
1607 dev_err(&netdev->dev,
1608 "Device temperature %d degrees C exceeds"
1609 " maximum allowed. Hardware has been shut down.\n",
1610 temp_val);
1611 rv = 1;
1612 } else if (temp_state == QLCNIC_TEMP_WARN) {
1613 if (adapter->temp == QLCNIC_TEMP_NORMAL) {
1614 dev_err(&netdev->dev,
1615 "Device temperature %d degrees C "
1616 "exceeds operating range."
1617 " Immediate action needed.\n",
1618 temp_val);
1619 }
1620 } else {
1621 if (adapter->temp == QLCNIC_TEMP_WARN) {
1622 dev_info(&netdev->dev,
1623 "Device temperature is now %d degrees C"
1624 " in normal range.\n", temp_val);
1625 }
1626 }
1627 adapter->temp = temp_state;
1628 return rv;
1629}
1630
1631void qlcnic_advert_link_change(struct qlcnic_adapter *adapter, int linkup)
1632{
1633 struct net_device *netdev = adapter->netdev;
1634
1635 if (adapter->ahw.linkup && !linkup) {
1636 dev_info(&netdev->dev, "NIC Link is down\n");
1637 adapter->ahw.linkup = 0;
1638 if (netif_running(netdev)) {
1639 netif_carrier_off(netdev);
1640 netif_stop_queue(netdev);
1641 }
1642 } else if (!adapter->ahw.linkup && linkup) {
1643 dev_info(&netdev->dev, "NIC Link is up\n");
1644 adapter->ahw.linkup = 1;
1645 if (netif_running(netdev)) {
1646 netif_carrier_on(netdev);
1647 netif_wake_queue(netdev);
1648 }
1649 }
1650}
1651
1652static void qlcnic_tx_timeout(struct net_device *netdev)
1653{
1654 struct qlcnic_adapter *adapter = netdev_priv(netdev);
1655
1656 if (test_bit(__QLCNIC_RESETTING, &adapter->state))
1657 return;
1658
1659 dev_err(&netdev->dev, "transmit timeout, resetting.\n");
1660 schedule_work(&adapter->tx_timeout_task);
1661}
1662
1663static void qlcnic_tx_timeout_task(struct work_struct *work)
1664{
1665 struct qlcnic_adapter *adapter =
1666 container_of(work, struct qlcnic_adapter, tx_timeout_task);
1667
1668 if (!netif_running(adapter->netdev))
1669 return;
1670
1671 if (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state))
1672 return;
1673
1674 if (++adapter->tx_timeo_cnt >= QLCNIC_MAX_TX_TIMEOUTS)
1675 goto request_reset;
1676
1677 clear_bit(__QLCNIC_RESETTING, &adapter->state);
1678 if (!qlcnic_reset_context(adapter)) {
1679 adapter->netdev->trans_start = jiffies;
1680 return;
1681
1682 /* context reset failed, fall through for fw reset */
1683 }
1684
1685request_reset:
1686 adapter->need_fw_reset = 1;
1687 clear_bit(__QLCNIC_RESETTING, &adapter->state);
65b5b420 1688 QLCDB(adapter, DRV, "Resetting adapter\n");
af19b491
AKS
1689}
1690
1691static struct net_device_stats *qlcnic_get_stats(struct net_device *netdev)
1692{
1693 struct qlcnic_adapter *adapter = netdev_priv(netdev);
1694 struct net_device_stats *stats = &netdev->stats;
1695
1696 memset(stats, 0, sizeof(*stats));
1697
1698 stats->rx_packets = adapter->stats.rx_pkts + adapter->stats.lro_pkts;
1699 stats->tx_packets = adapter->stats.xmitfinished;
1700 stats->rx_bytes = adapter->stats.rxbytes;
1701 stats->tx_bytes = adapter->stats.txbytes;
1702 stats->rx_dropped = adapter->stats.rxdropped;
1703 stats->tx_dropped = adapter->stats.txdropped;
1704
1705 return stats;
1706}
1707
7eb9855d 1708static irqreturn_t qlcnic_clear_legacy_intr(struct qlcnic_adapter *adapter)
af19b491 1709{
af19b491
AKS
1710 u32 status;
1711
1712 status = readl(adapter->isr_int_vec);
1713
1714 if (!(status & adapter->int_vec_bit))
1715 return IRQ_NONE;
1716
1717 /* check interrupt state machine, to be sure */
1718 status = readl(adapter->crb_int_state_reg);
1719 if (!ISR_LEGACY_INT_TRIGGERED(status))
1720 return IRQ_NONE;
1721
1722 writel(0xffffffff, adapter->tgt_status_reg);
1723 /* read twice to ensure write is flushed */
1724 readl(adapter->isr_int_vec);
1725 readl(adapter->isr_int_vec);
1726
7eb9855d
AKS
1727 return IRQ_HANDLED;
1728}
1729
1730static irqreturn_t qlcnic_tmp_intr(int irq, void *data)
1731{
1732 struct qlcnic_host_sds_ring *sds_ring = data;
1733 struct qlcnic_adapter *adapter = sds_ring->adapter;
1734
1735 if (adapter->flags & QLCNIC_MSIX_ENABLED)
1736 goto done;
1737 else if (adapter->flags & QLCNIC_MSI_ENABLED) {
1738 writel(0xffffffff, adapter->tgt_status_reg);
1739 goto done;
1740 }
1741
1742 if (qlcnic_clear_legacy_intr(adapter) == IRQ_NONE)
1743 return IRQ_NONE;
1744
1745done:
1746 adapter->diag_cnt++;
1747 qlcnic_enable_int(sds_ring);
1748 return IRQ_HANDLED;
1749}
1750
1751static irqreturn_t qlcnic_intr(int irq, void *data)
1752{
1753 struct qlcnic_host_sds_ring *sds_ring = data;
1754 struct qlcnic_adapter *adapter = sds_ring->adapter;
1755
1756 if (qlcnic_clear_legacy_intr(adapter) == IRQ_NONE)
1757 return IRQ_NONE;
1758
af19b491
AKS
1759 napi_schedule(&sds_ring->napi);
1760
1761 return IRQ_HANDLED;
1762}
1763
1764static irqreturn_t qlcnic_msi_intr(int irq, void *data)
1765{
1766 struct qlcnic_host_sds_ring *sds_ring = data;
1767 struct qlcnic_adapter *adapter = sds_ring->adapter;
1768
1769 /* clear interrupt */
1770 writel(0xffffffff, adapter->tgt_status_reg);
1771
1772 napi_schedule(&sds_ring->napi);
1773 return IRQ_HANDLED;
1774}
1775
1776static irqreturn_t qlcnic_msix_intr(int irq, void *data)
1777{
1778 struct qlcnic_host_sds_ring *sds_ring = data;
1779
1780 napi_schedule(&sds_ring->napi);
1781 return IRQ_HANDLED;
1782}
1783
1784static int qlcnic_process_cmd_ring(struct qlcnic_adapter *adapter)
1785{
1786 u32 sw_consumer, hw_consumer;
1787 int count = 0, i;
1788 struct qlcnic_cmd_buffer *buffer;
1789 struct pci_dev *pdev = adapter->pdev;
1790 struct net_device *netdev = adapter->netdev;
1791 struct qlcnic_skb_frag *frag;
1792 int done;
1793 struct qlcnic_host_tx_ring *tx_ring = adapter->tx_ring;
1794
1795 if (!spin_trylock(&adapter->tx_clean_lock))
1796 return 1;
1797
1798 sw_consumer = tx_ring->sw_consumer;
1799 hw_consumer = le32_to_cpu(*(tx_ring->hw_consumer));
1800
1801 while (sw_consumer != hw_consumer) {
1802 buffer = &tx_ring->cmd_buf_arr[sw_consumer];
1803 if (buffer->skb) {
1804 frag = &buffer->frag_array[0];
1805 pci_unmap_single(pdev, frag->dma, frag->length,
1806 PCI_DMA_TODEVICE);
1807 frag->dma = 0ULL;
1808 for (i = 1; i < buffer->frag_count; i++) {
1809 frag++;
1810 pci_unmap_page(pdev, frag->dma, frag->length,
1811 PCI_DMA_TODEVICE);
1812 frag->dma = 0ULL;
1813 }
1814
1815 adapter->stats.xmitfinished++;
1816 dev_kfree_skb_any(buffer->skb);
1817 buffer->skb = NULL;
1818 }
1819
1820 sw_consumer = get_next_index(sw_consumer, tx_ring->num_desc);
1821 if (++count >= MAX_STATUS_HANDLE)
1822 break;
1823 }
1824
1825 if (count && netif_running(netdev)) {
1826 tx_ring->sw_consumer = sw_consumer;
1827
1828 smp_mb();
1829
1830 if (netif_queue_stopped(netdev) && netif_carrier_ok(netdev)) {
1831 __netif_tx_lock(tx_ring->txq, smp_processor_id());
1832 if (qlcnic_tx_avail(tx_ring) > TX_STOP_THRESH) {
1833 netif_wake_queue(netdev);
1834 adapter->tx_timeo_cnt = 0;
8bfe8b91 1835 adapter->stats.xmit_on++;
af19b491
AKS
1836 }
1837 __netif_tx_unlock(tx_ring->txq);
1838 }
1839 }
1840 /*
1841 * If everything is freed up to consumer then check if the ring is full
1842 * If the ring is full then check if more needs to be freed and
1843 * schedule the call back again.
1844 *
1845 * This happens when there are 2 CPUs. One could be freeing and the
1846 * other filling it. If the ring is full when we get out of here and
1847 * the card has already interrupted the host then the host can miss the
1848 * interrupt.
1849 *
1850 * There is still a possible race condition and the host could miss an
1851 * interrupt. The card has to take care of this.
1852 */
1853 hw_consumer = le32_to_cpu(*(tx_ring->hw_consumer));
1854 done = (sw_consumer == hw_consumer);
1855 spin_unlock(&adapter->tx_clean_lock);
1856
1857 return done;
1858}
1859
1860static int qlcnic_poll(struct napi_struct *napi, int budget)
1861{
1862 struct qlcnic_host_sds_ring *sds_ring =
1863 container_of(napi, struct qlcnic_host_sds_ring, napi);
1864
1865 struct qlcnic_adapter *adapter = sds_ring->adapter;
1866
1867 int tx_complete;
1868 int work_done;
1869
1870 tx_complete = qlcnic_process_cmd_ring(adapter);
1871
1872 work_done = qlcnic_process_rcv_ring(sds_ring, budget);
1873
1874 if ((work_done < budget) && tx_complete) {
1875 napi_complete(&sds_ring->napi);
1876 if (test_bit(__QLCNIC_DEV_UP, &adapter->state))
1877 qlcnic_enable_int(sds_ring);
1878 }
1879
1880 return work_done;
1881}
1882
1883#ifdef CONFIG_NET_POLL_CONTROLLER
1884static void qlcnic_poll_controller(struct net_device *netdev)
1885{
1886 struct qlcnic_adapter *adapter = netdev_priv(netdev);
1887 disable_irq(adapter->irq);
1888 qlcnic_intr(adapter->irq, adapter);
1889 enable_irq(adapter->irq);
1890}
1891#endif
1892
6df900e9
SC
1893static void
1894qlcnic_idc_debug_info(struct qlcnic_adapter *adapter, u8 encoding)
1895{
1896 u32 val;
1897
1898 val = adapter->portnum & 0xf;
1899 val |= encoding << 7;
1900 val |= (jiffies - adapter->dev_rst_time) << 8;
1901
1902 QLCWR32(adapter, QLCNIC_CRB_DRV_SCRATCH, val);
1903 adapter->dev_rst_time = jiffies;
1904}
1905
ade91f8e
AKS
1906static int
1907qlcnic_set_drv_state(struct qlcnic_adapter *adapter, u8 state)
af19b491
AKS
1908{
1909 u32 val;
1910
1911 WARN_ON(state != QLCNIC_DEV_NEED_RESET &&
1912 state != QLCNIC_DEV_NEED_QUISCENT);
1913
1914 if (qlcnic_api_lock(adapter))
ade91f8e 1915 return -EIO;
af19b491
AKS
1916
1917 val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
1918
1919 if (state == QLCNIC_DEV_NEED_RESET)
6d2a4724 1920 QLC_DEV_SET_RST_RDY(val, adapter->portnum);
af19b491 1921 else if (state == QLCNIC_DEV_NEED_QUISCENT)
6d2a4724 1922 QLC_DEV_SET_QSCNT_RDY(val, adapter->portnum);
af19b491
AKS
1923
1924 QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val);
1925
1926 qlcnic_api_unlock(adapter);
ade91f8e
AKS
1927
1928 return 0;
af19b491
AKS
1929}
1930
1b95a839
AKS
1931static int
1932qlcnic_clr_drv_state(struct qlcnic_adapter *adapter)
1933{
1934 u32 val;
1935
1936 if (qlcnic_api_lock(adapter))
1937 return -EBUSY;
1938
1939 val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
6d2a4724 1940 QLC_DEV_CLR_RST_QSCNT(val, adapter->portnum);
1b95a839
AKS
1941 QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val);
1942
1943 qlcnic_api_unlock(adapter);
1944
1945 return 0;
1946}
1947
af19b491
AKS
1948static void
1949qlcnic_clr_all_drv_state(struct qlcnic_adapter *adapter)
1950{
1951 u32 val;
1952
1953 if (qlcnic_api_lock(adapter))
1954 goto err;
1955
1956 val = QLCRD32(adapter, QLCNIC_CRB_DEV_REF_COUNT);
6d2a4724 1957 QLC_DEV_CLR_REF_CNT(val, adapter->portnum);
af19b491
AKS
1958 QLCWR32(adapter, QLCNIC_CRB_DEV_REF_COUNT, val);
1959
1960 if (!(val & 0x11111111))
1961 QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_COLD);
1962
1963 val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
6d2a4724 1964 QLC_DEV_CLR_RST_QSCNT(val, adapter->portnum);
af19b491
AKS
1965 QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val);
1966
1967 qlcnic_api_unlock(adapter);
1968err:
1969 adapter->fw_fail_cnt = 0;
1970 clear_bit(__QLCNIC_START_FW, &adapter->state);
1971 clear_bit(__QLCNIC_RESETTING, &adapter->state);
1972}
1973
f73dfc50 1974/* Grab api lock, before checking state */
af19b491
AKS
1975static int
1976qlcnic_check_drv_state(struct qlcnic_adapter *adapter)
1977{
1978 int act, state;
1979
1980 state = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
1981 act = QLCRD32(adapter, QLCNIC_CRB_DEV_REF_COUNT);
1982
1983 if (((state & 0x11111111) == (act & 0x11111111)) ||
1984 ((act & 0x11111111) == ((state >> 1) & 0x11111111)))
1985 return 0;
1986 else
1987 return 1;
1988}
1989
96f8118c
SC
1990static int qlcnic_check_idc_ver(struct qlcnic_adapter *adapter)
1991{
1992 u32 val = QLCRD32(adapter, QLCNIC_CRB_DRV_IDC_VER);
1993
1994 if (val != QLCNIC_DRV_IDC_VER) {
1995 dev_warn(&adapter->pdev->dev, "IDC Version mismatch, driver's"
1996 " idc ver = %x; reqd = %x\n", QLCNIC_DRV_IDC_VER, val);
1997 }
1998
1999 return 0;
2000}
2001
af19b491
AKS
2002static int
2003qlcnic_can_start_firmware(struct qlcnic_adapter *adapter)
2004{
2005 u32 val, prev_state;
aa5e18c0 2006 u8 dev_init_timeo = adapter->dev_init_timeo;
6d2a4724 2007 u8 portnum = adapter->portnum;
96f8118c 2008 u8 ret;
af19b491 2009
f73dfc50
AKS
2010 if (test_and_clear_bit(__QLCNIC_START_FW, &adapter->state))
2011 return 1;
2012
af19b491
AKS
2013 if (qlcnic_api_lock(adapter))
2014 return -1;
2015
2016 val = QLCRD32(adapter, QLCNIC_CRB_DEV_REF_COUNT);
6d2a4724
AKS
2017 if (!(val & (1 << (portnum * 4)))) {
2018 QLC_DEV_SET_REF_CNT(val, portnum);
af19b491 2019 QLCWR32(adapter, QLCNIC_CRB_DEV_REF_COUNT, val);
af19b491
AKS
2020 }
2021
2022 prev_state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
65b5b420 2023 QLCDB(adapter, HW, "Device state = %u\n", prev_state);
af19b491
AKS
2024
2025 switch (prev_state) {
2026 case QLCNIC_DEV_COLD:
bbd8c6a4 2027 QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_INITIALIZING);
96f8118c 2028 QLCWR32(adapter, QLCNIC_CRB_DRV_IDC_VER, QLCNIC_DRV_IDC_VER);
6df900e9 2029 qlcnic_idc_debug_info(adapter, 0);
af19b491
AKS
2030 qlcnic_api_unlock(adapter);
2031 return 1;
2032
2033 case QLCNIC_DEV_READY:
96f8118c 2034 ret = qlcnic_check_idc_ver(adapter);
af19b491 2035 qlcnic_api_unlock(adapter);
96f8118c 2036 return ret;
af19b491
AKS
2037
2038 case QLCNIC_DEV_NEED_RESET:
2039 val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
6d2a4724 2040 QLC_DEV_SET_RST_RDY(val, portnum);
af19b491
AKS
2041 QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val);
2042 break;
2043
2044 case QLCNIC_DEV_NEED_QUISCENT:
2045 val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
6d2a4724 2046 QLC_DEV_SET_QSCNT_RDY(val, portnum);
af19b491
AKS
2047 QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val);
2048 break;
2049
2050 case QLCNIC_DEV_FAILED:
2051 qlcnic_api_unlock(adapter);
2052 return -1;
bbd8c6a4
AKS
2053
2054 case QLCNIC_DEV_INITIALIZING:
2055 case QLCNIC_DEV_QUISCENT:
2056 break;
af19b491
AKS
2057 }
2058
2059 qlcnic_api_unlock(adapter);
aa5e18c0
SC
2060
2061 do {
af19b491 2062 msleep(1000);
a5e463d0
SC
2063 prev_state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
2064
2065 if (prev_state == QLCNIC_DEV_QUISCENT)
2066 continue;
2067 } while ((prev_state != QLCNIC_DEV_READY) && --dev_init_timeo);
af19b491 2068
65b5b420
AKS
2069 if (!dev_init_timeo) {
2070 dev_err(&adapter->pdev->dev,
2071 "Waiting for device to initialize timeout\n");
af19b491 2072 return -1;
65b5b420 2073 }
af19b491
AKS
2074
2075 if (qlcnic_api_lock(adapter))
2076 return -1;
2077
2078 val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
6d2a4724 2079 QLC_DEV_CLR_RST_QSCNT(val, portnum);
af19b491
AKS
2080 QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val);
2081
96f8118c 2082 ret = qlcnic_check_idc_ver(adapter);
af19b491
AKS
2083 qlcnic_api_unlock(adapter);
2084
96f8118c 2085 return ret;
af19b491
AKS
2086}
2087
2088static void
2089qlcnic_fwinit_work(struct work_struct *work)
2090{
2091 struct qlcnic_adapter *adapter = container_of(work,
2092 struct qlcnic_adapter, fw_work.work);
f73dfc50 2093 u32 dev_state = 0xf;
af19b491 2094
f73dfc50
AKS
2095 if (qlcnic_api_lock(adapter))
2096 goto err_ret;
af19b491 2097
a5e463d0
SC
2098 dev_state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
2099 if (dev_state == QLCNIC_DEV_QUISCENT) {
2100 qlcnic_api_unlock(adapter);
2101 qlcnic_schedule_work(adapter, qlcnic_fwinit_work,
2102 FW_POLL_DELAY * 2);
2103 return;
2104 }
2105
f73dfc50
AKS
2106 if (adapter->fw_wait_cnt++ > adapter->reset_ack_timeo) {
2107 dev_err(&adapter->pdev->dev, "Reset:Failed to get ack %d sec\n",
2108 adapter->reset_ack_timeo);
2109 goto skip_ack_check;
2110 }
2111
2112 if (!qlcnic_check_drv_state(adapter)) {
2113skip_ack_check:
2114 dev_state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
a5e463d0
SC
2115
2116 if (dev_state == QLCNIC_DEV_NEED_QUISCENT) {
2117 QLCWR32(adapter, QLCNIC_CRB_DEV_STATE,
2118 QLCNIC_DEV_QUISCENT);
2119 qlcnic_schedule_work(adapter, qlcnic_fwinit_work,
2120 FW_POLL_DELAY * 2);
2121 QLCDB(adapter, DRV, "Quiscing the driver\n");
6df900e9
SC
2122 qlcnic_idc_debug_info(adapter, 0);
2123
a5e463d0
SC
2124 qlcnic_api_unlock(adapter);
2125 return;
2126 }
2127
f73dfc50
AKS
2128 if (dev_state == QLCNIC_DEV_NEED_RESET) {
2129 QLCWR32(adapter, QLCNIC_CRB_DEV_STATE,
2130 QLCNIC_DEV_INITIALIZING);
2131 set_bit(__QLCNIC_START_FW, &adapter->state);
2132 QLCDB(adapter, DRV, "Restarting fw\n");
6df900e9 2133 qlcnic_idc_debug_info(adapter, 0);
af19b491
AKS
2134 }
2135
f73dfc50
AKS
2136 qlcnic_api_unlock(adapter);
2137
af19b491
AKS
2138 if (!qlcnic_start_firmware(adapter)) {
2139 qlcnic_schedule_work(adapter, qlcnic_attach_work, 0);
2140 return;
2141 }
af19b491
AKS
2142 goto err_ret;
2143 }
2144
f73dfc50 2145 qlcnic_api_unlock(adapter);
aa5e18c0 2146
af19b491 2147 dev_state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
f73dfc50 2148 QLCDB(adapter, HW, "Func waiting: Device state=%u\n", dev_state);
65b5b420 2149
af19b491 2150 switch (dev_state) {
a5e463d0
SC
2151 case QLCNIC_DEV_QUISCENT:
2152 case QLCNIC_DEV_NEED_QUISCENT:
f73dfc50
AKS
2153 case QLCNIC_DEV_NEED_RESET:
2154 qlcnic_schedule_work(adapter,
2155 qlcnic_fwinit_work, FW_POLL_DELAY);
2156 return;
af19b491
AKS
2157 case QLCNIC_DEV_FAILED:
2158 break;
2159
2160 default:
f73dfc50
AKS
2161 if (!qlcnic_start_firmware(adapter)) {
2162 qlcnic_schedule_work(adapter, qlcnic_attach_work, 0);
2163 return;
2164 }
af19b491
AKS
2165 }
2166
2167err_ret:
f73dfc50
AKS
2168 dev_err(&adapter->pdev->dev, "Fwinit work failed state=%u "
2169 "fw_wait_cnt=%u\n", dev_state, adapter->fw_wait_cnt);
34ce3626 2170 netif_device_attach(adapter->netdev);
af19b491
AKS
2171 qlcnic_clr_all_drv_state(adapter);
2172}
2173
2174static void
2175qlcnic_detach_work(struct work_struct *work)
2176{
2177 struct qlcnic_adapter *adapter = container_of(work,
2178 struct qlcnic_adapter, fw_work.work);
2179 struct net_device *netdev = adapter->netdev;
2180 u32 status;
2181
2182 netif_device_detach(netdev);
2183
2184 qlcnic_down(adapter, netdev);
2185
ce668443 2186 rtnl_lock();
af19b491 2187 qlcnic_detach(adapter);
ce668443 2188 rtnl_unlock();
af19b491
AKS
2189
2190 status = QLCRD32(adapter, QLCNIC_PEG_HALT_STATUS1);
2191
2192 if (status & QLCNIC_RCODE_FATAL_ERROR)
2193 goto err_ret;
2194
2195 if (adapter->temp == QLCNIC_TEMP_PANIC)
2196 goto err_ret;
2197
ade91f8e
AKS
2198 if (qlcnic_set_drv_state(adapter, adapter->dev_state))
2199 goto err_ret;
af19b491
AKS
2200
2201 adapter->fw_wait_cnt = 0;
2202
2203 qlcnic_schedule_work(adapter, qlcnic_fwinit_work, FW_POLL_DELAY);
2204
2205 return;
2206
2207err_ret:
65b5b420
AKS
2208 dev_err(&adapter->pdev->dev, "detach failed; status=%d temp=%d\n",
2209 status, adapter->temp);
34ce3626 2210 netif_device_attach(netdev);
af19b491
AKS
2211 qlcnic_clr_all_drv_state(adapter);
2212
2213}
2214
f73dfc50 2215/*Transit to RESET state from READY state only */
af19b491
AKS
2216static void
2217qlcnic_dev_request_reset(struct qlcnic_adapter *adapter)
2218{
2219 u32 state;
2220
2221 if (qlcnic_api_lock(adapter))
2222 return;
2223
2224 state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
2225
f73dfc50 2226 if (state == QLCNIC_DEV_READY) {
af19b491 2227 QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_NEED_RESET);
65b5b420 2228 QLCDB(adapter, DRV, "NEED_RESET state set\n");
6df900e9 2229 qlcnic_idc_debug_info(adapter, 0);
af19b491
AKS
2230 }
2231
2232 qlcnic_api_unlock(adapter);
2233}
2234
2235static void
2236qlcnic_schedule_work(struct qlcnic_adapter *adapter,
2237 work_func_t func, int delay)
2238{
2239 INIT_DELAYED_WORK(&adapter->fw_work, func);
2240 schedule_delayed_work(&adapter->fw_work, round_jiffies_relative(delay));
2241}
2242
2243static void
2244qlcnic_cancel_fw_work(struct qlcnic_adapter *adapter)
2245{
2246 while (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state))
2247 msleep(10);
2248
2249 cancel_delayed_work_sync(&adapter->fw_work);
2250}
2251
2252static void
2253qlcnic_attach_work(struct work_struct *work)
2254{
2255 struct qlcnic_adapter *adapter = container_of(work,
2256 struct qlcnic_adapter, fw_work.work);
2257 struct net_device *netdev = adapter->netdev;
2258 int err;
2259
2260 if (netif_running(netdev)) {
2261 err = qlcnic_attach(adapter);
2262 if (err)
2263 goto done;
2264
2265 err = qlcnic_up(adapter, netdev);
2266 if (err) {
2267 qlcnic_detach(adapter);
2268 goto done;
2269 }
2270
2271 qlcnic_config_indev_addr(netdev, NETDEV_UP);
2272 }
2273
af19b491 2274done:
34ce3626 2275 netif_device_attach(netdev);
af19b491
AKS
2276 adapter->fw_fail_cnt = 0;
2277 clear_bit(__QLCNIC_RESETTING, &adapter->state);
1b95a839
AKS
2278
2279 if (!qlcnic_clr_drv_state(adapter))
2280 qlcnic_schedule_work(adapter, qlcnic_fw_poll_work,
2281 FW_POLL_DELAY);
af19b491
AKS
2282}
2283
2284static int
2285qlcnic_check_health(struct qlcnic_adapter *adapter)
2286{
2287 u32 state = 0, heartbit;
2288 struct net_device *netdev = adapter->netdev;
2289
2290 if (qlcnic_check_temp(adapter))
2291 goto detach;
2292
2372a5f1 2293 if (adapter->need_fw_reset)
af19b491 2294 qlcnic_dev_request_reset(adapter);
af19b491
AKS
2295
2296 state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
2297 if (state == QLCNIC_DEV_NEED_RESET || state == QLCNIC_DEV_NEED_QUISCENT)
2298 adapter->need_fw_reset = 1;
2299
2300 heartbit = QLCRD32(adapter, QLCNIC_PEG_ALIVE_COUNTER);
2301 if (heartbit != adapter->heartbit) {
2302 adapter->heartbit = heartbit;
2303 adapter->fw_fail_cnt = 0;
2304 if (adapter->need_fw_reset)
2305 goto detach;
2306 return 0;
2307 }
2308
2309 if (++adapter->fw_fail_cnt < FW_FAIL_THRESH)
2310 return 0;
2311
2312 qlcnic_dev_request_reset(adapter);
2313
2314 clear_bit(__QLCNIC_FW_ATTACHED, &adapter->state);
2315
2316 dev_info(&netdev->dev, "firmware hang detected\n");
2317
2318detach:
2319 adapter->dev_state = (state == QLCNIC_DEV_NEED_QUISCENT) ? state :
2320 QLCNIC_DEV_NEED_RESET;
2321
2322 if ((auto_fw_reset == AUTO_FW_RESET_ENABLED) &&
65b5b420
AKS
2323 !test_and_set_bit(__QLCNIC_RESETTING, &adapter->state)) {
2324
af19b491 2325 qlcnic_schedule_work(adapter, qlcnic_detach_work, 0);
65b5b420
AKS
2326 QLCDB(adapter, DRV, "fw recovery scheduled.\n");
2327 }
af19b491
AKS
2328
2329 return 1;
2330}
2331
2332static void
2333qlcnic_fw_poll_work(struct work_struct *work)
2334{
2335 struct qlcnic_adapter *adapter = container_of(work,
2336 struct qlcnic_adapter, fw_work.work);
2337
2338 if (test_bit(__QLCNIC_RESETTING, &adapter->state))
2339 goto reschedule;
2340
2341
2342 if (qlcnic_check_health(adapter))
2343 return;
2344
2345reschedule:
2346 qlcnic_schedule_work(adapter, qlcnic_fw_poll_work, FW_POLL_DELAY);
2347}
2348
2349static ssize_t
2350qlcnic_store_bridged_mode(struct device *dev,
2351 struct device_attribute *attr, const char *buf, size_t len)
2352{
2353 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
2354 unsigned long new;
2355 int ret = -EINVAL;
2356
2357 if (!(adapter->capabilities & QLCNIC_FW_CAPABILITY_BDG))
2358 goto err_out;
2359
2360 if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
2361 goto err_out;
2362
2363 if (strict_strtoul(buf, 2, &new))
2364 goto err_out;
2365
2366 if (!qlcnic_config_bridged_mode(adapter, !!new))
2367 ret = len;
2368
2369err_out:
2370 return ret;
2371}
2372
2373static ssize_t
2374qlcnic_show_bridged_mode(struct device *dev,
2375 struct device_attribute *attr, char *buf)
2376{
2377 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
2378 int bridged_mode = 0;
2379
2380 if (adapter->capabilities & QLCNIC_FW_CAPABILITY_BDG)
2381 bridged_mode = !!(adapter->flags & QLCNIC_BRIDGE_ENABLED);
2382
2383 return sprintf(buf, "%d\n", bridged_mode);
2384}
2385
2386static struct device_attribute dev_attr_bridged_mode = {
2387 .attr = {.name = "bridged_mode", .mode = (S_IRUGO | S_IWUSR)},
2388 .show = qlcnic_show_bridged_mode,
2389 .store = qlcnic_store_bridged_mode,
2390};
2391
2392static ssize_t
2393qlcnic_store_diag_mode(struct device *dev,
2394 struct device_attribute *attr, const char *buf, size_t len)
2395{
2396 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
2397 unsigned long new;
2398
2399 if (strict_strtoul(buf, 2, &new))
2400 return -EINVAL;
2401
2402 if (!!new != !!(adapter->flags & QLCNIC_DIAG_ENABLED))
2403 adapter->flags ^= QLCNIC_DIAG_ENABLED;
2404
2405 return len;
2406}
2407
2408static ssize_t
2409qlcnic_show_diag_mode(struct device *dev,
2410 struct device_attribute *attr, char *buf)
2411{
2412 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
2413
2414 return sprintf(buf, "%d\n",
2415 !!(adapter->flags & QLCNIC_DIAG_ENABLED));
2416}
2417
2418static struct device_attribute dev_attr_diag_mode = {
2419 .attr = {.name = "diag_mode", .mode = (S_IRUGO | S_IWUSR)},
2420 .show = qlcnic_show_diag_mode,
2421 .store = qlcnic_store_diag_mode,
2422};
2423
2424static int
2425qlcnic_sysfs_validate_crb(struct qlcnic_adapter *adapter,
2426 loff_t offset, size_t size)
2427{
897e8c7c
DP
2428 size_t crb_size = 4;
2429
af19b491
AKS
2430 if (!(adapter->flags & QLCNIC_DIAG_ENABLED))
2431 return -EIO;
2432
897e8c7c
DP
2433 if (offset < QLCNIC_PCI_CRBSPACE) {
2434 if (ADDR_IN_RANGE(offset, QLCNIC_PCI_CAMQM,
2435 QLCNIC_PCI_CAMQM_END))
2436 crb_size = 8;
2437 else
2438 return -EINVAL;
2439 }
af19b491 2440
897e8c7c
DP
2441 if ((size != crb_size) || (offset & (crb_size-1)))
2442 return -EINVAL;
af19b491
AKS
2443
2444 return 0;
2445}
2446
2447static ssize_t
2448qlcnic_sysfs_read_crb(struct kobject *kobj, struct bin_attribute *attr,
2449 char *buf, loff_t offset, size_t size)
2450{
2451 struct device *dev = container_of(kobj, struct device, kobj);
2452 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
2453 u32 data;
897e8c7c 2454 u64 qmdata;
af19b491
AKS
2455 int ret;
2456
2457 ret = qlcnic_sysfs_validate_crb(adapter, offset, size);
2458 if (ret != 0)
2459 return ret;
2460
897e8c7c
DP
2461 if (ADDR_IN_RANGE(offset, QLCNIC_PCI_CAMQM, QLCNIC_PCI_CAMQM_END)) {
2462 qlcnic_pci_camqm_read_2M(adapter, offset, &qmdata);
2463 memcpy(buf, &qmdata, size);
2464 } else {
2465 data = QLCRD32(adapter, offset);
2466 memcpy(buf, &data, size);
2467 }
af19b491
AKS
2468 return size;
2469}
2470
2471static ssize_t
2472qlcnic_sysfs_write_crb(struct kobject *kobj, struct bin_attribute *attr,
2473 char *buf, loff_t offset, size_t size)
2474{
2475 struct device *dev = container_of(kobj, struct device, kobj);
2476 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
2477 u32 data;
897e8c7c 2478 u64 qmdata;
af19b491
AKS
2479 int ret;
2480
2481 ret = qlcnic_sysfs_validate_crb(adapter, offset, size);
2482 if (ret != 0)
2483 return ret;
2484
897e8c7c
DP
2485 if (ADDR_IN_RANGE(offset, QLCNIC_PCI_CAMQM, QLCNIC_PCI_CAMQM_END)) {
2486 memcpy(&qmdata, buf, size);
2487 qlcnic_pci_camqm_write_2M(adapter, offset, qmdata);
2488 } else {
2489 memcpy(&data, buf, size);
2490 QLCWR32(adapter, offset, data);
2491 }
af19b491
AKS
2492 return size;
2493}
2494
2495static int
2496qlcnic_sysfs_validate_mem(struct qlcnic_adapter *adapter,
2497 loff_t offset, size_t size)
2498{
2499 if (!(adapter->flags & QLCNIC_DIAG_ENABLED))
2500 return -EIO;
2501
2502 if ((size != 8) || (offset & 0x7))
2503 return -EIO;
2504
2505 return 0;
2506}
2507
2508static ssize_t
2509qlcnic_sysfs_read_mem(struct kobject *kobj, struct bin_attribute *attr,
2510 char *buf, loff_t offset, size_t size)
2511{
2512 struct device *dev = container_of(kobj, struct device, kobj);
2513 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
2514 u64 data;
2515 int ret;
2516
2517 ret = qlcnic_sysfs_validate_mem(adapter, offset, size);
2518 if (ret != 0)
2519 return ret;
2520
2521 if (qlcnic_pci_mem_read_2M(adapter, offset, &data))
2522 return -EIO;
2523
2524 memcpy(buf, &data, size);
2525
2526 return size;
2527}
2528
2529static ssize_t
2530qlcnic_sysfs_write_mem(struct kobject *kobj, struct bin_attribute *attr,
2531 char *buf, loff_t offset, size_t size)
2532{
2533 struct device *dev = container_of(kobj, struct device, kobj);
2534 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
2535 u64 data;
2536 int ret;
2537
2538 ret = qlcnic_sysfs_validate_mem(adapter, offset, size);
2539 if (ret != 0)
2540 return ret;
2541
2542 memcpy(&data, buf, size);
2543
2544 if (qlcnic_pci_mem_write_2M(adapter, offset, data))
2545 return -EIO;
2546
2547 return size;
2548}
2549
2550
2551static struct bin_attribute bin_attr_crb = {
2552 .attr = {.name = "crb", .mode = (S_IRUGO | S_IWUSR)},
2553 .size = 0,
2554 .read = qlcnic_sysfs_read_crb,
2555 .write = qlcnic_sysfs_write_crb,
2556};
2557
2558static struct bin_attribute bin_attr_mem = {
2559 .attr = {.name = "mem", .mode = (S_IRUGO | S_IWUSR)},
2560 .size = 0,
2561 .read = qlcnic_sysfs_read_mem,
2562 .write = qlcnic_sysfs_write_mem,
2563};
2564
2565static void
2566qlcnic_create_sysfs_entries(struct qlcnic_adapter *adapter)
2567{
2568 struct device *dev = &adapter->pdev->dev;
2569
2570 if (adapter->capabilities & QLCNIC_FW_CAPABILITY_BDG)
2571 if (device_create_file(dev, &dev_attr_bridged_mode))
2572 dev_warn(dev,
2573 "failed to create bridged_mode sysfs entry\n");
2574}
2575
2576static void
2577qlcnic_remove_sysfs_entries(struct qlcnic_adapter *adapter)
2578{
2579 struct device *dev = &adapter->pdev->dev;
2580
2581 if (adapter->capabilities & QLCNIC_FW_CAPABILITY_BDG)
2582 device_remove_file(dev, &dev_attr_bridged_mode);
2583}
2584
2585static void
2586qlcnic_create_diag_entries(struct qlcnic_adapter *adapter)
2587{
2588 struct device *dev = &adapter->pdev->dev;
2589
2590 if (device_create_file(dev, &dev_attr_diag_mode))
2591 dev_info(dev, "failed to create diag_mode sysfs entry\n");
2592 if (device_create_bin_file(dev, &bin_attr_crb))
2593 dev_info(dev, "failed to create crb sysfs entry\n");
2594 if (device_create_bin_file(dev, &bin_attr_mem))
2595 dev_info(dev, "failed to create mem sysfs entry\n");
2596}
2597
2598
2599static void
2600qlcnic_remove_diag_entries(struct qlcnic_adapter *adapter)
2601{
2602 struct device *dev = &adapter->pdev->dev;
2603
2604 device_remove_file(dev, &dev_attr_diag_mode);
2605 device_remove_bin_file(dev, &bin_attr_crb);
2606 device_remove_bin_file(dev, &bin_attr_mem);
2607}
2608
2609#ifdef CONFIG_INET
2610
2611#define is_qlcnic_netdev(dev) (dev->netdev_ops == &qlcnic_netdev_ops)
2612
af19b491
AKS
2613static void
2614qlcnic_config_indev_addr(struct net_device *dev, unsigned long event)
2615{
2616 struct in_device *indev;
2617 struct qlcnic_adapter *adapter = netdev_priv(dev);
2618
af19b491
AKS
2619 indev = in_dev_get(dev);
2620 if (!indev)
2621 return;
2622
2623 for_ifa(indev) {
2624 switch (event) {
2625 case NETDEV_UP:
2626 qlcnic_config_ipaddr(adapter,
2627 ifa->ifa_address, QLCNIC_IP_UP);
2628 break;
2629 case NETDEV_DOWN:
2630 qlcnic_config_ipaddr(adapter,
2631 ifa->ifa_address, QLCNIC_IP_DOWN);
2632 break;
2633 default:
2634 break;
2635 }
2636 } endfor_ifa(indev);
2637
2638 in_dev_put(indev);
af19b491
AKS
2639}
2640
2641static int qlcnic_netdev_event(struct notifier_block *this,
2642 unsigned long event, void *ptr)
2643{
2644 struct qlcnic_adapter *adapter;
2645 struct net_device *dev = (struct net_device *)ptr;
2646
2647recheck:
2648 if (dev == NULL)
2649 goto done;
2650
2651 if (dev->priv_flags & IFF_802_1Q_VLAN) {
2652 dev = vlan_dev_real_dev(dev);
2653 goto recheck;
2654 }
2655
2656 if (!is_qlcnic_netdev(dev))
2657 goto done;
2658
2659 adapter = netdev_priv(dev);
2660
2661 if (!adapter)
2662 goto done;
2663
2664 if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
2665 goto done;
2666
2667 qlcnic_config_indev_addr(dev, event);
2668done:
2669 return NOTIFY_DONE;
2670}
2671
2672static int
2673qlcnic_inetaddr_event(struct notifier_block *this,
2674 unsigned long event, void *ptr)
2675{
2676 struct qlcnic_adapter *adapter;
2677 struct net_device *dev;
2678
2679 struct in_ifaddr *ifa = (struct in_ifaddr *)ptr;
2680
2681 dev = ifa->ifa_dev ? ifa->ifa_dev->dev : NULL;
2682
2683recheck:
2684 if (dev == NULL || !netif_running(dev))
2685 goto done;
2686
2687 if (dev->priv_flags & IFF_802_1Q_VLAN) {
2688 dev = vlan_dev_real_dev(dev);
2689 goto recheck;
2690 }
2691
2692 if (!is_qlcnic_netdev(dev))
2693 goto done;
2694
2695 adapter = netdev_priv(dev);
2696
251a84c9 2697 if (!adapter)
af19b491
AKS
2698 goto done;
2699
2700 if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
2701 goto done;
2702
2703 switch (event) {
2704 case NETDEV_UP:
2705 qlcnic_config_ipaddr(adapter, ifa->ifa_address, QLCNIC_IP_UP);
2706 break;
2707 case NETDEV_DOWN:
2708 qlcnic_config_ipaddr(adapter, ifa->ifa_address, QLCNIC_IP_DOWN);
2709 break;
2710 default:
2711 break;
2712 }
2713
2714done:
2715 return NOTIFY_DONE;
2716}
2717
2718static struct notifier_block qlcnic_netdev_cb = {
2719 .notifier_call = qlcnic_netdev_event,
2720};
2721
2722static struct notifier_block qlcnic_inetaddr_cb = {
2723 .notifier_call = qlcnic_inetaddr_event,
2724};
2725#else
2726static void
2727qlcnic_config_indev_addr(struct net_device *dev, unsigned long event)
2728{ }
2729#endif
2730
2731static struct pci_driver qlcnic_driver = {
2732 .name = qlcnic_driver_name,
2733 .id_table = qlcnic_pci_tbl,
2734 .probe = qlcnic_probe,
2735 .remove = __devexit_p(qlcnic_remove),
2736#ifdef CONFIG_PM
2737 .suspend = qlcnic_suspend,
2738 .resume = qlcnic_resume,
2739#endif
2740 .shutdown = qlcnic_shutdown
2741};
2742
2743static int __init qlcnic_init_module(void)
2744{
2745
2746 printk(KERN_INFO "%s\n", qlcnic_driver_string);
2747
2748#ifdef CONFIG_INET
2749 register_netdevice_notifier(&qlcnic_netdev_cb);
2750 register_inetaddr_notifier(&qlcnic_inetaddr_cb);
2751#endif
2752
2753
2754 return pci_register_driver(&qlcnic_driver);
2755}
2756
2757module_init(qlcnic_init_module);
2758
2759static void __exit qlcnic_exit_module(void)
2760{
2761
2762 pci_unregister_driver(&qlcnic_driver);
2763
2764#ifdef CONFIG_INET
2765 unregister_inetaddr_notifier(&qlcnic_inetaddr_cb);
2766 unregister_netdevice_notifier(&qlcnic_netdev_cb);
2767#endif
2768}
2769
2770module_exit(qlcnic_exit_module);
This page took 0.191676 seconds and 5 git commands to generate.