ipv4, ipv6, bonding: Restore control over number of peer notifications
[deliverable/linux.git] / drivers / net / qlcnic / qlcnic_main.c
CommitLineData
af19b491 1/*
40839129
SV
2 * QLogic qlcnic NIC Driver
3 * Copyright (c) 2009-2010 QLogic Corporation
af19b491 4 *
40839129 5 * See LICENSE.qlcnic for copyright and licensing details.
af19b491
AKS
6 */
7
5a0e3ad6 8#include <linux/slab.h>
af19b491
AKS
9#include <linux/vmalloc.h>
10#include <linux/interrupt.h>
11
12#include "qlcnic.h"
13
7e56cac4 14#include <linux/swab.h>
af19b491 15#include <linux/dma-mapping.h>
af19b491
AKS
16#include <net/ip.h>
17#include <linux/ipv6.h>
18#include <linux/inetdevice.h>
19#include <linux/sysfs.h>
451724c8 20#include <linux/aer.h>
af19b491 21
7f9a0c34 22MODULE_DESCRIPTION("QLogic 1/10 GbE Converged/Intelligent Ethernet Driver");
af19b491
AKS
23MODULE_LICENSE("GPL");
24MODULE_VERSION(QLCNIC_LINUX_VERSIONID);
25MODULE_FIRMWARE(QLCNIC_UNIFIED_ROMIMAGE_NAME);
26
27char qlcnic_driver_name[] = "qlcnic";
7f9a0c34
SV
28static const char qlcnic_driver_string[] = "QLogic 1/10 GbE "
29 "Converged/Intelligent Ethernet Driver v" QLCNIC_LINUX_VERSIONID;
af19b491 30
f7ec804a 31static struct workqueue_struct *qlcnic_wq;
b5e5492c 32static int qlcnic_mac_learn;
b11a25aa 33module_param(qlcnic_mac_learn, int, 0444);
b5e5492c
AKS
34MODULE_PARM_DESC(qlcnic_mac_learn, "Mac Filter (0=disabled, 1=enabled)");
35
af19b491 36static int use_msi = 1;
b11a25aa 37module_param(use_msi, int, 0444);
af19b491
AKS
38MODULE_PARM_DESC(use_msi, "MSI interrupt (0=disabled, 1=enabled");
39
40static int use_msi_x = 1;
b11a25aa 41module_param(use_msi_x, int, 0444);
af19b491
AKS
42MODULE_PARM_DESC(use_msi_x, "MSI-X interrupt (0=disabled, 1=enabled");
43
9ce13ca8 44static int auto_fw_reset = 1;
af19b491
AKS
45module_param(auto_fw_reset, int, 0644);
46MODULE_PARM_DESC(auto_fw_reset, "Auto firmware reset (0=disabled, 1=enabled");
47
4d5bdb38 48static int load_fw_file;
b11a25aa 49module_param(load_fw_file, int, 0444);
4d5bdb38
AKS
50MODULE_PARM_DESC(load_fw_file, "Load firmware from (0=flash, 1=file");
51
2e9d722d 52static int qlcnic_config_npars;
b11a25aa 53module_param(qlcnic_config_npars, int, 0444);
2e9d722d
AC
54MODULE_PARM_DESC(qlcnic_config_npars, "Configure NPARs (0=disabled, 1=enabled");
55
af19b491
AKS
56static int __devinit qlcnic_probe(struct pci_dev *pdev,
57 const struct pci_device_id *ent);
58static void __devexit qlcnic_remove(struct pci_dev *pdev);
59static int qlcnic_open(struct net_device *netdev);
60static int qlcnic_close(struct net_device *netdev);
af19b491 61static void qlcnic_tx_timeout(struct net_device *netdev);
af19b491
AKS
62static void qlcnic_attach_work(struct work_struct *work);
63static void qlcnic_fwinit_work(struct work_struct *work);
64static void qlcnic_fw_poll_work(struct work_struct *work);
65static void qlcnic_schedule_work(struct qlcnic_adapter *adapter,
66 work_func_t func, int delay);
67static void qlcnic_cancel_fw_work(struct qlcnic_adapter *adapter);
68static int qlcnic_poll(struct napi_struct *napi, int budget);
8f891387 69static int qlcnic_rx_poll(struct napi_struct *napi, int budget);
af19b491
AKS
70#ifdef CONFIG_NET_POLL_CONTROLLER
71static void qlcnic_poll_controller(struct net_device *netdev);
72#endif
73
74static void qlcnic_create_sysfs_entries(struct qlcnic_adapter *adapter);
75static void qlcnic_remove_sysfs_entries(struct qlcnic_adapter *adapter);
76static void qlcnic_create_diag_entries(struct qlcnic_adapter *adapter);
77static void qlcnic_remove_diag_entries(struct qlcnic_adapter *adapter);
78
6df900e9 79static void qlcnic_idc_debug_info(struct qlcnic_adapter *adapter, u8 encoding);
21854f02 80static void qlcnic_clr_all_drv_state(struct qlcnic_adapter *adapter, u8);
af19b491
AKS
81static int qlcnic_can_start_firmware(struct qlcnic_adapter *adapter);
82
7eb9855d 83static irqreturn_t qlcnic_tmp_intr(int irq, void *data);
af19b491
AKS
84static irqreturn_t qlcnic_intr(int irq, void *data);
85static irqreturn_t qlcnic_msi_intr(int irq, void *data);
86static irqreturn_t qlcnic_msix_intr(int irq, void *data);
87
88static struct net_device_stats *qlcnic_get_stats(struct net_device *netdev);
aec1e845 89static void qlcnic_restore_indev_addr(struct net_device *dev, unsigned long);
9f26f547
AC
90static int qlcnic_start_firmware(struct qlcnic_adapter *);
91
b5e5492c
AKS
92static void qlcnic_alloc_lb_filters_mem(struct qlcnic_adapter *adapter);
93static void qlcnic_free_lb_filters_mem(struct qlcnic_adapter *adapter);
9f26f547 94static void qlcnic_dev_set_npar_ready(struct qlcnic_adapter *);
9f26f547
AC
95static int qlcnicvf_config_led(struct qlcnic_adapter *, u32, u32);
96static int qlcnicvf_config_bridged_mode(struct qlcnic_adapter *, u32);
97static int qlcnicvf_start_firmware(struct qlcnic_adapter *);
0325d69b
RB
98static void qlcnic_set_netdev_features(struct qlcnic_adapter *,
99 struct qlcnic_esw_func_cfg *);
b9796a14
AC
100static void qlcnic_vlan_rx_add(struct net_device *, u16);
101static void qlcnic_vlan_rx_del(struct net_device *, u16);
102
af19b491
AKS
103/* PCI Device ID Table */
104#define ENTRY(device) \
105 {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, (device)), \
106 .class = PCI_CLASS_NETWORK_ETHERNET << 8, .class_mask = ~0}
107
108#define PCI_DEVICE_ID_QLOGIC_QLE824X 0x8020
109
6a902881 110static DEFINE_PCI_DEVICE_TABLE(qlcnic_pci_tbl) = {
af19b491
AKS
111 ENTRY(PCI_DEVICE_ID_QLOGIC_QLE824X),
112 {0,}
113};
114
115MODULE_DEVICE_TABLE(pci, qlcnic_pci_tbl);
116
117
b1fc6d3c 118inline void
af19b491
AKS
119qlcnic_update_cmd_producer(struct qlcnic_adapter *adapter,
120 struct qlcnic_host_tx_ring *tx_ring)
121{
122 writel(tx_ring->producer, tx_ring->crb_cmd_producer);
af19b491
AKS
123}
124
125static const u32 msi_tgt_status[8] = {
126 ISR_INT_TARGET_STATUS, ISR_INT_TARGET_STATUS_F1,
127 ISR_INT_TARGET_STATUS_F2, ISR_INT_TARGET_STATUS_F3,
128 ISR_INT_TARGET_STATUS_F4, ISR_INT_TARGET_STATUS_F5,
129 ISR_INT_TARGET_STATUS_F6, ISR_INT_TARGET_STATUS_F7
130};
131
132static const
133struct qlcnic_legacy_intr_set legacy_intr[] = QLCNIC_LEGACY_INTR_CONFIG;
134
135static inline void qlcnic_disable_int(struct qlcnic_host_sds_ring *sds_ring)
136{
137 writel(0, sds_ring->crb_intr_mask);
138}
139
140static inline void qlcnic_enable_int(struct qlcnic_host_sds_ring *sds_ring)
141{
142 struct qlcnic_adapter *adapter = sds_ring->adapter;
143
144 writel(0x1, sds_ring->crb_intr_mask);
145
146 if (!QLCNIC_IS_MSI_FAMILY(adapter))
147 writel(0xfbff, adapter->tgt_mask_reg);
148}
149
150static int
151qlcnic_alloc_sds_rings(struct qlcnic_recv_context *recv_ctx, int count)
152{
153 int size = sizeof(struct qlcnic_host_sds_ring) * count;
154
155 recv_ctx->sds_rings = kzalloc(size, GFP_KERNEL);
156
807540ba 157 return recv_ctx->sds_rings == NULL;
af19b491
AKS
158}
159
160static void
161qlcnic_free_sds_rings(struct qlcnic_recv_context *recv_ctx)
162{
163 if (recv_ctx->sds_rings != NULL)
164 kfree(recv_ctx->sds_rings);
165
166 recv_ctx->sds_rings = NULL;
167}
168
169static int
170qlcnic_napi_add(struct qlcnic_adapter *adapter, struct net_device *netdev)
171{
172 int ring;
173 struct qlcnic_host_sds_ring *sds_ring;
b1fc6d3c 174 struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
af19b491
AKS
175
176 if (qlcnic_alloc_sds_rings(recv_ctx, adapter->max_sds_rings))
177 return -ENOMEM;
178
179 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
180 sds_ring = &recv_ctx->sds_rings[ring];
8f891387 181
182 if (ring == adapter->max_sds_rings - 1)
183 netif_napi_add(netdev, &sds_ring->napi, qlcnic_poll,
184 QLCNIC_NETDEV_WEIGHT/adapter->max_sds_rings);
185 else
186 netif_napi_add(netdev, &sds_ring->napi,
187 qlcnic_rx_poll, QLCNIC_NETDEV_WEIGHT*2);
af19b491
AKS
188 }
189
190 return 0;
191}
192
193static void
194qlcnic_napi_del(struct qlcnic_adapter *adapter)
195{
196 int ring;
197 struct qlcnic_host_sds_ring *sds_ring;
b1fc6d3c 198 struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
af19b491
AKS
199
200 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
201 sds_ring = &recv_ctx->sds_rings[ring];
202 netif_napi_del(&sds_ring->napi);
203 }
204
b1fc6d3c 205 qlcnic_free_sds_rings(adapter->recv_ctx);
af19b491
AKS
206}
207
208static void
209qlcnic_napi_enable(struct qlcnic_adapter *adapter)
210{
211 int ring;
212 struct qlcnic_host_sds_ring *sds_ring;
b1fc6d3c 213 struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
af19b491 214
780ab790
AKS
215 if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
216 return;
217
af19b491
AKS
218 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
219 sds_ring = &recv_ctx->sds_rings[ring];
220 napi_enable(&sds_ring->napi);
221 qlcnic_enable_int(sds_ring);
222 }
223}
224
225static void
226qlcnic_napi_disable(struct qlcnic_adapter *adapter)
227{
228 int ring;
229 struct qlcnic_host_sds_ring *sds_ring;
b1fc6d3c 230 struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
af19b491 231
780ab790
AKS
232 if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
233 return;
234
af19b491
AKS
235 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
236 sds_ring = &recv_ctx->sds_rings[ring];
237 qlcnic_disable_int(sds_ring);
238 napi_synchronize(&sds_ring->napi);
239 napi_disable(&sds_ring->napi);
240 }
241}
242
243static void qlcnic_clear_stats(struct qlcnic_adapter *adapter)
244{
245 memset(&adapter->stats, 0, sizeof(adapter->stats));
af19b491
AKS
246}
247
af19b491
AKS
248static void qlcnic_set_msix_bit(struct pci_dev *pdev, int enable)
249{
250 u32 control;
251 int pos;
252
253 pos = pci_find_capability(pdev, PCI_CAP_ID_MSIX);
254 if (pos) {
255 pci_read_config_dword(pdev, pos, &control);
256 if (enable)
257 control |= PCI_MSIX_FLAGS_ENABLE;
258 else
259 control = 0;
260 pci_write_config_dword(pdev, pos, control);
261 }
262}
263
264static void qlcnic_init_msix_entries(struct qlcnic_adapter *adapter, int count)
265{
266 int i;
267
268 for (i = 0; i < count; i++)
269 adapter->msix_entries[i].entry = i;
270}
271
272static int
273qlcnic_read_mac_addr(struct qlcnic_adapter *adapter)
274{
2e9d722d 275 u8 mac_addr[ETH_ALEN];
af19b491
AKS
276 struct net_device *netdev = adapter->netdev;
277 struct pci_dev *pdev = adapter->pdev;
278
da48e6c3 279 if (qlcnic_get_mac_address(adapter, mac_addr) != 0)
af19b491
AKS
280 return -EIO;
281
2e9d722d 282 memcpy(netdev->dev_addr, mac_addr, ETH_ALEN);
af19b491
AKS
283 memcpy(netdev->perm_addr, netdev->dev_addr, netdev->addr_len);
284 memcpy(adapter->mac_addr, netdev->dev_addr, netdev->addr_len);
285
286 /* set station address */
287
288 if (!is_valid_ether_addr(netdev->perm_addr))
289 dev_warn(&pdev->dev, "Bad MAC address %pM.\n",
290 netdev->dev_addr);
291
292 return 0;
293}
294
295static int qlcnic_set_mac(struct net_device *netdev, void *p)
296{
297 struct qlcnic_adapter *adapter = netdev_priv(netdev);
298 struct sockaddr *addr = p;
299
7373373d
RB
300 if ((adapter->flags & QLCNIC_MAC_OVERRIDE_DISABLED))
301 return -EOPNOTSUPP;
302
af19b491
AKS
303 if (!is_valid_ether_addr(addr->sa_data))
304 return -EINVAL;
305
8a15ad1f 306 if (test_bit(__QLCNIC_DEV_UP, &adapter->state)) {
af19b491
AKS
307 netif_device_detach(netdev);
308 qlcnic_napi_disable(adapter);
309 }
310
311 memcpy(adapter->mac_addr, addr->sa_data, netdev->addr_len);
312 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
313 qlcnic_set_multi(adapter->netdev);
314
8a15ad1f 315 if (test_bit(__QLCNIC_DEV_UP, &adapter->state)) {
af19b491
AKS
316 netif_device_attach(netdev);
317 qlcnic_napi_enable(adapter);
318 }
319 return 0;
320}
321
322static const struct net_device_ops qlcnic_netdev_ops = {
323 .ndo_open = qlcnic_open,
324 .ndo_stop = qlcnic_close,
325 .ndo_start_xmit = qlcnic_xmit_frame,
326 .ndo_get_stats = qlcnic_get_stats,
327 .ndo_validate_addr = eth_validate_addr,
328 .ndo_set_multicast_list = qlcnic_set_multi,
329 .ndo_set_mac_address = qlcnic_set_mac,
330 .ndo_change_mtu = qlcnic_change_mtu,
135d84a9
MM
331 .ndo_fix_features = qlcnic_fix_features,
332 .ndo_set_features = qlcnic_set_features,
af19b491 333 .ndo_tx_timeout = qlcnic_tx_timeout,
b9796a14
AC
334 .ndo_vlan_rx_add_vid = qlcnic_vlan_rx_add,
335 .ndo_vlan_rx_kill_vid = qlcnic_vlan_rx_del,
af19b491
AKS
336#ifdef CONFIG_NET_POLL_CONTROLLER
337 .ndo_poll_controller = qlcnic_poll_controller,
338#endif
339};
340
2e9d722d 341static struct qlcnic_nic_template qlcnic_ops = {
2e9d722d
AC
342 .config_bridged_mode = qlcnic_config_bridged_mode,
343 .config_led = qlcnic_config_led,
9f26f547
AC
344 .start_firmware = qlcnic_start_firmware
345};
346
347static struct qlcnic_nic_template qlcnic_vf_ops = {
9f26f547
AC
348 .config_bridged_mode = qlcnicvf_config_bridged_mode,
349 .config_led = qlcnicvf_config_led,
9f26f547 350 .start_firmware = qlcnicvf_start_firmware
2e9d722d
AC
351};
352
af19b491
AKS
353static void
354qlcnic_setup_intr(struct qlcnic_adapter *adapter)
355{
356 const struct qlcnic_legacy_intr_set *legacy_intrp;
357 struct pci_dev *pdev = adapter->pdev;
358 int err, num_msix;
359
b1fc6d3c 360 if (adapter->msix_supported) {
af19b491
AKS
361 num_msix = (num_online_cpus() >= MSIX_ENTRIES_PER_ADAPTER) ?
362 MSIX_ENTRIES_PER_ADAPTER : 2;
363 } else
364 num_msix = 1;
365
366 adapter->max_sds_rings = 1;
367
368 adapter->flags &= ~(QLCNIC_MSI_ENABLED | QLCNIC_MSIX_ENABLED);
369
b1fc6d3c 370 legacy_intrp = &legacy_intr[adapter->ahw->pci_func];
af19b491
AKS
371
372 adapter->int_vec_bit = legacy_intrp->int_vec_bit;
373 adapter->tgt_status_reg = qlcnic_get_ioaddr(adapter,
374 legacy_intrp->tgt_status_reg);
375 adapter->tgt_mask_reg = qlcnic_get_ioaddr(adapter,
376 legacy_intrp->tgt_mask_reg);
377 adapter->isr_int_vec = qlcnic_get_ioaddr(adapter, ISR_INT_VECTOR);
378
379 adapter->crb_int_state_reg = qlcnic_get_ioaddr(adapter,
380 ISR_INT_STATE_REG);
381
382 qlcnic_set_msix_bit(pdev, 0);
383
384 if (adapter->msix_supported) {
385
386 qlcnic_init_msix_entries(adapter, num_msix);
387 err = pci_enable_msix(pdev, adapter->msix_entries, num_msix);
388 if (err == 0) {
389 adapter->flags |= QLCNIC_MSIX_ENABLED;
390 qlcnic_set_msix_bit(pdev, 1);
391
b1fc6d3c 392 adapter->max_sds_rings = num_msix;
af19b491
AKS
393
394 dev_info(&pdev->dev, "using msi-x interrupts\n");
395 return;
396 }
397
398 if (err > 0)
399 pci_disable_msix(pdev);
400
401 /* fall through for msi */
402 }
403
404 if (use_msi && !pci_enable_msi(pdev)) {
405 adapter->flags |= QLCNIC_MSI_ENABLED;
406 adapter->tgt_status_reg = qlcnic_get_ioaddr(adapter,
b1fc6d3c 407 msi_tgt_status[adapter->ahw->pci_func]);
af19b491
AKS
408 dev_info(&pdev->dev, "using msi interrupts\n");
409 adapter->msix_entries[0].vector = pdev->irq;
410 return;
411 }
412
413 dev_info(&pdev->dev, "using legacy interrupts\n");
414 adapter->msix_entries[0].vector = pdev->irq;
415}
416
417static void
418qlcnic_teardown_intr(struct qlcnic_adapter *adapter)
419{
420 if (adapter->flags & QLCNIC_MSIX_ENABLED)
421 pci_disable_msix(adapter->pdev);
422 if (adapter->flags & QLCNIC_MSI_ENABLED)
423 pci_disable_msi(adapter->pdev);
424}
425
426static void
427qlcnic_cleanup_pci_map(struct qlcnic_adapter *adapter)
428{
b1fc6d3c
AC
429 if (adapter->ahw->pci_base0 != NULL)
430 iounmap(adapter->ahw->pci_base0);
af19b491
AKS
431}
432
346fe763
RB
433static int
434qlcnic_init_pci_info(struct qlcnic_adapter *adapter)
435{
e88db3bd 436 struct qlcnic_pci_info *pci_info;
900853a4 437 int i, ret = 0;
346fe763
RB
438 u8 pfn;
439
e88db3bd
DC
440 pci_info = kcalloc(QLCNIC_MAX_PCI_FUNC, sizeof(*pci_info), GFP_KERNEL);
441 if (!pci_info)
442 return -ENOMEM;
443
ca315ac2 444 adapter->npars = kzalloc(sizeof(struct qlcnic_npar_info) *
346fe763 445 QLCNIC_MAX_PCI_FUNC, GFP_KERNEL);
e88db3bd 446 if (!adapter->npars) {
900853a4 447 ret = -ENOMEM;
e88db3bd
DC
448 goto err_pci_info;
449 }
346fe763 450
ca315ac2 451 adapter->eswitch = kzalloc(sizeof(struct qlcnic_eswitch) *
346fe763
RB
452 QLCNIC_NIU_MAX_XG_PORTS, GFP_KERNEL);
453 if (!adapter->eswitch) {
900853a4 454 ret = -ENOMEM;
ca315ac2 455 goto err_npars;
346fe763
RB
456 }
457
458 ret = qlcnic_get_pci_info(adapter, pci_info);
ca315ac2
DC
459 if (ret)
460 goto err_eswitch;
346fe763 461
ca315ac2
DC
462 for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) {
463 pfn = pci_info[i].id;
f848d6dd
SC
464 if (pfn > QLCNIC_MAX_PCI_FUNC) {
465 ret = QL_STATUS_INVALID_PARAM;
466 goto err_eswitch;
467 }
a1c0c459
SC
468 adapter->npars[pfn].active = (u8)pci_info[i].active;
469 adapter->npars[pfn].type = (u8)pci_info[i].type;
470 adapter->npars[pfn].phy_port = (u8)pci_info[i].default_port;
ca315ac2
DC
471 adapter->npars[pfn].min_bw = pci_info[i].tx_min_bw;
472 adapter->npars[pfn].max_bw = pci_info[i].tx_max_bw;
346fe763
RB
473 }
474
ca315ac2
DC
475 for (i = 0; i < QLCNIC_NIU_MAX_XG_PORTS; i++)
476 adapter->eswitch[i].flags |= QLCNIC_SWITCH_ENABLE;
477
e88db3bd 478 kfree(pci_info);
ca315ac2
DC
479 return 0;
480
481err_eswitch:
346fe763
RB
482 kfree(adapter->eswitch);
483 adapter->eswitch = NULL;
ca315ac2 484err_npars:
346fe763 485 kfree(adapter->npars);
ca315ac2 486 adapter->npars = NULL;
e88db3bd
DC
487err_pci_info:
488 kfree(pci_info);
346fe763
RB
489
490 return ret;
491}
492
2e9d722d
AC
493static int
494qlcnic_set_function_modes(struct qlcnic_adapter *adapter)
495{
496 u8 id;
497 u32 ref_count;
498 int i, ret = 1;
499 u32 data = QLCNIC_MGMT_FUNC;
b1fc6d3c 500 void __iomem *priv_op = adapter->ahw->pci_base0 + QLCNIC_DRV_OP_MODE;
2e9d722d
AC
501
502 /* If other drivers are not in use set their privilege level */
31018e06 503 ref_count = QLCRD32(adapter, QLCNIC_CRB_DRV_ACTIVE);
2e9d722d
AC
504 ret = qlcnic_api_lock(adapter);
505 if (ret)
506 goto err_lock;
2e9d722d 507
0e33c664
AC
508 if (qlcnic_config_npars) {
509 for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) {
346fe763 510 id = i;
0e33c664 511 if (adapter->npars[i].type != QLCNIC_TYPE_NIC ||
b1fc6d3c 512 id == adapter->ahw->pci_func)
0e33c664
AC
513 continue;
514 data |= (qlcnic_config_npars &
515 QLC_DEV_SET_DRV(0xf, id));
516 }
517 } else {
518 data = readl(priv_op);
b1fc6d3c 519 data = (data & ~QLC_DEV_SET_DRV(0xf, adapter->ahw->pci_func)) |
0e33c664 520 (QLC_DEV_SET_DRV(QLCNIC_MGMT_FUNC,
b1fc6d3c 521 adapter->ahw->pci_func));
2e9d722d
AC
522 }
523 writel(data, priv_op);
2e9d722d
AC
524 qlcnic_api_unlock(adapter);
525err_lock:
526 return ret;
527}
528
0866d96d
AC
529static void
530qlcnic_check_vf(struct qlcnic_adapter *adapter)
2e9d722d
AC
531{
532 void __iomem *msix_base_addr;
533 void __iomem *priv_op;
534 u32 func;
535 u32 msix_base;
536 u32 op_mode, priv_level;
537
538 /* Determine FW API version */
b1fc6d3c
AC
539 adapter->fw_hal_version = readl(adapter->ahw->pci_base0 +
540 QLCNIC_FW_API);
2e9d722d
AC
541
542 /* Find PCI function number */
543 pci_read_config_dword(adapter->pdev, QLCNIC_MSIX_TABLE_OFFSET, &func);
b1fc6d3c 544 msix_base_addr = adapter->ahw->pci_base0 + QLCNIC_MSIX_BASE;
2e9d722d
AC
545 msix_base = readl(msix_base_addr);
546 func = (func - msix_base)/QLCNIC_MSIX_TBL_PGSIZE;
b1fc6d3c 547 adapter->ahw->pci_func = func;
2e9d722d
AC
548
549 /* Determine function privilege level */
b1fc6d3c 550 priv_op = adapter->ahw->pci_base0 + QLCNIC_DRV_OP_MODE;
2e9d722d 551 op_mode = readl(priv_op);
0e33c664 552 if (op_mode == QLC_DEV_DRV_DEFAULT)
2e9d722d 553 priv_level = QLCNIC_MGMT_FUNC;
0e33c664 554 else
b1fc6d3c 555 priv_level = QLC_DEV_GET_DRV(op_mode, adapter->ahw->pci_func);
2e9d722d 556
0866d96d 557 if (priv_level == QLCNIC_NON_PRIV_FUNC) {
9f26f547
AC
558 adapter->op_mode = QLCNIC_NON_PRIV_FUNC;
559 dev_info(&adapter->pdev->dev,
560 "HAL Version: %d Non Privileged function\n",
561 adapter->fw_hal_version);
562 adapter->nic_ops = &qlcnic_vf_ops;
0866d96d
AC
563 } else
564 adapter->nic_ops = &qlcnic_ops;
2e9d722d
AC
565}
566
af19b491
AKS
567static int
568qlcnic_setup_pci_map(struct qlcnic_adapter *adapter)
569{
570 void __iomem *mem_ptr0 = NULL;
571 resource_size_t mem_base;
572 unsigned long mem_len, pci_len0 = 0;
573
574 struct pci_dev *pdev = adapter->pdev;
af19b491 575
af19b491
AKS
576 /* remap phys address */
577 mem_base = pci_resource_start(pdev, 0); /* 0 is for BAR 0 */
578 mem_len = pci_resource_len(pdev, 0);
579
580 if (mem_len == QLCNIC_PCI_2MB_SIZE) {
581
582 mem_ptr0 = pci_ioremap_bar(pdev, 0);
583 if (mem_ptr0 == NULL) {
584 dev_err(&pdev->dev, "failed to map PCI bar 0\n");
585 return -EIO;
586 }
587 pci_len0 = mem_len;
588 } else {
589 return -EIO;
590 }
591
592 dev_info(&pdev->dev, "%dMB memory map\n", (int)(mem_len>>20));
593
b1fc6d3c
AC
594 adapter->ahw->pci_base0 = mem_ptr0;
595 adapter->ahw->pci_len0 = pci_len0;
af19b491 596
0866d96d 597 qlcnic_check_vf(adapter);
2e9d722d 598
b1fc6d3c
AC
599 adapter->ahw->ocm_win_crb = qlcnic_get_ioaddr(adapter,
600 QLCNIC_PCIX_PS_REG(PCIX_OCM_WINDOW_REG(
601 adapter->ahw->pci_func)));
af19b491
AKS
602
603 return 0;
604}
605
606static void get_brd_name(struct qlcnic_adapter *adapter, char *name)
607{
608 struct pci_dev *pdev = adapter->pdev;
609 int i, found = 0;
610
611 for (i = 0; i < NUM_SUPPORTED_BOARDS; ++i) {
612 if (qlcnic_boards[i].vendor == pdev->vendor &&
613 qlcnic_boards[i].device == pdev->device &&
614 qlcnic_boards[i].sub_vendor == pdev->subsystem_vendor &&
615 qlcnic_boards[i].sub_device == pdev->subsystem_device) {
02f6e46f
SC
616 sprintf(name, "%pM: %s" ,
617 adapter->mac_addr,
618 qlcnic_boards[i].short_name);
af19b491
AKS
619 found = 1;
620 break;
621 }
622
623 }
624
625 if (!found)
7f9a0c34 626 sprintf(name, "%pM Gigabit Ethernet", adapter->mac_addr);
af19b491
AKS
627}
628
629static void
630qlcnic_check_options(struct qlcnic_adapter *adapter)
631{
632 u32 fw_major, fw_minor, fw_build;
af19b491 633 struct pci_dev *pdev = adapter->pdev;
af19b491
AKS
634
635 fw_major = QLCRD32(adapter, QLCNIC_FW_VERSION_MAJOR);
636 fw_minor = QLCRD32(adapter, QLCNIC_FW_VERSION_MINOR);
637 fw_build = QLCRD32(adapter, QLCNIC_FW_VERSION_SUB);
638
639 adapter->fw_version = QLCNIC_VERSION_CODE(fw_major, fw_minor, fw_build);
640
251a84c9
AKS
641 dev_info(&pdev->dev, "firmware v%d.%d.%d\n",
642 fw_major, fw_minor, fw_build);
b1fc6d3c 643 if (adapter->ahw->port_type == QLCNIC_XGBE) {
90d19005
SC
644 if (adapter->flags & QLCNIC_ESWITCH_ENABLED) {
645 adapter->num_rxd = DEFAULT_RCV_DESCRIPTORS_VF;
646 adapter->max_rxd = MAX_RCV_DESCRIPTORS_VF;
647 } else {
648 adapter->num_rxd = DEFAULT_RCV_DESCRIPTORS_10G;
649 adapter->max_rxd = MAX_RCV_DESCRIPTORS_10G;
650 }
651
af19b491 652 adapter->num_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_10G;
90d19005
SC
653 adapter->max_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_10G;
654
b1fc6d3c 655 } else if (adapter->ahw->port_type == QLCNIC_GBE) {
af19b491
AKS
656 adapter->num_rxd = DEFAULT_RCV_DESCRIPTORS_1G;
657 adapter->num_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_1G;
90d19005
SC
658 adapter->max_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_1G;
659 adapter->max_rxd = MAX_RCV_DESCRIPTORS_1G;
af19b491
AKS
660 }
661
662 adapter->msix_supported = !!use_msi_x;
af19b491
AKS
663
664 adapter->num_txd = MAX_CMD_DESCRIPTORS;
665
251b036a 666 adapter->max_rds_rings = MAX_RDS_RINGS;
af19b491
AKS
667}
668
174240a8
RB
669static int
670qlcnic_initialize_nic(struct qlcnic_adapter *adapter)
671{
672 int err;
673 struct qlcnic_info nic_info;
674
b1fc6d3c 675 err = qlcnic_get_nic_info(adapter, &nic_info, adapter->ahw->pci_func);
174240a8
RB
676 if (err)
677 return err;
678
a1c0c459 679 adapter->physical_port = (u8)nic_info.phys_port;
174240a8
RB
680 adapter->switch_mode = nic_info.switch_mode;
681 adapter->max_tx_ques = nic_info.max_tx_ques;
682 adapter->max_rx_ques = nic_info.max_rx_ques;
683 adapter->capabilities = nic_info.capabilities;
684 adapter->max_mac_filters = nic_info.max_mac_filters;
685 adapter->max_mtu = nic_info.max_mtu;
686
687 if (adapter->capabilities & BIT_6)
688 adapter->flags |= QLCNIC_ESWITCH_ENABLED;
689 else
690 adapter->flags &= ~QLCNIC_ESWITCH_ENABLED;
691
692 return err;
693}
694
8cf61f89
AKS
695static void
696qlcnic_set_vlan_config(struct qlcnic_adapter *adapter,
697 struct qlcnic_esw_func_cfg *esw_cfg)
698{
699 if (esw_cfg->discard_tagged)
700 adapter->flags &= ~QLCNIC_TAGGING_ENABLED;
701 else
702 adapter->flags |= QLCNIC_TAGGING_ENABLED;
703
704 if (esw_cfg->vlan_id)
705 adapter->pvid = esw_cfg->vlan_id;
706 else
707 adapter->pvid = 0;
708}
709
b9796a14
AC
710static void
711qlcnic_vlan_rx_add(struct net_device *netdev, u16 vid)
712{
713 struct qlcnic_adapter *adapter = netdev_priv(netdev);
714 set_bit(vid, adapter->vlans);
715}
716
717static void
718qlcnic_vlan_rx_del(struct net_device *netdev, u16 vid)
719{
720 struct qlcnic_adapter *adapter = netdev_priv(netdev);
721
722 qlcnic_restore_indev_addr(netdev, NETDEV_DOWN);
723 clear_bit(vid, adapter->vlans);
724}
725
0325d69b
RB
726static void
727qlcnic_set_eswitch_port_features(struct qlcnic_adapter *adapter,
728 struct qlcnic_esw_func_cfg *esw_cfg)
729{
ee07c1a7
RB
730 adapter->flags &= ~(QLCNIC_MACSPOOF | QLCNIC_MAC_OVERRIDE_DISABLED |
731 QLCNIC_PROMISC_DISABLED);
7613c87b
RB
732
733 if (esw_cfg->mac_anti_spoof)
734 adapter->flags |= QLCNIC_MACSPOOF;
fe4d434d 735
7373373d
RB
736 if (!esw_cfg->mac_override)
737 adapter->flags |= QLCNIC_MAC_OVERRIDE_DISABLED;
738
ee07c1a7
RB
739 if (!esw_cfg->promisc_mode)
740 adapter->flags |= QLCNIC_PROMISC_DISABLED;
741
0325d69b
RB
742 qlcnic_set_netdev_features(adapter, esw_cfg);
743}
744
745static int
746qlcnic_set_eswitch_port_config(struct qlcnic_adapter *adapter)
747{
748 struct qlcnic_esw_func_cfg esw_cfg;
749
750 if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED))
751 return 0;
752
b1fc6d3c 753 esw_cfg.pci_func = adapter->ahw->pci_func;
0325d69b
RB
754 if (qlcnic_get_eswitch_port_config(adapter, &esw_cfg))
755 return -EIO;
8cf61f89 756 qlcnic_set_vlan_config(adapter, &esw_cfg);
0325d69b
RB
757 qlcnic_set_eswitch_port_features(adapter, &esw_cfg);
758
759 return 0;
760}
761
762static void
763qlcnic_set_netdev_features(struct qlcnic_adapter *adapter,
764 struct qlcnic_esw_func_cfg *esw_cfg)
765{
766 struct net_device *netdev = adapter->netdev;
767 unsigned long features, vlan_features;
768
135d84a9 769 features = (NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM |
0325d69b
RB
770 NETIF_F_IPV6_CSUM | NETIF_F_GRO);
771 vlan_features = (NETIF_F_SG | NETIF_F_IP_CSUM |
b9796a14 772 NETIF_F_IPV6_CSUM | NETIF_F_HW_VLAN_FILTER);
0325d69b
RB
773
774 if (adapter->capabilities & QLCNIC_FW_CAPABILITY_TSO) {
775 features |= (NETIF_F_TSO | NETIF_F_TSO6);
776 vlan_features |= (NETIF_F_TSO | NETIF_F_TSO6);
777 }
b56421d0
RB
778
779 if (netdev->features & NETIF_F_LRO)
0325d69b
RB
780 features |= NETIF_F_LRO;
781
782 if (esw_cfg->offload_flags & BIT_0) {
783 netdev->features |= features;
0325d69b
RB
784 if (!(esw_cfg->offload_flags & BIT_1))
785 netdev->features &= ~NETIF_F_TSO;
786 if (!(esw_cfg->offload_flags & BIT_2))
787 netdev->features &= ~NETIF_F_TSO6;
788 } else {
789 netdev->features &= ~features;
0325d69b
RB
790 }
791
792 netdev->vlan_features = (features & vlan_features);
793}
794
0866d96d
AC
795static int
796qlcnic_check_eswitch_mode(struct qlcnic_adapter *adapter)
797{
798 void __iomem *priv_op;
799 u32 op_mode, priv_level;
800 int err = 0;
801
174240a8
RB
802 err = qlcnic_initialize_nic(adapter);
803 if (err)
804 return err;
805
0866d96d
AC
806 if (adapter->flags & QLCNIC_ADAPTER_INITIALIZED)
807 return 0;
808
b1fc6d3c 809 priv_op = adapter->ahw->pci_base0 + QLCNIC_DRV_OP_MODE;
0866d96d 810 op_mode = readl(priv_op);
b1fc6d3c 811 priv_level = QLC_DEV_GET_DRV(op_mode, adapter->ahw->pci_func);
0866d96d
AC
812
813 if (op_mode == QLC_DEV_DRV_DEFAULT)
814 priv_level = QLCNIC_MGMT_FUNC;
815 else
b1fc6d3c 816 priv_level = QLC_DEV_GET_DRV(op_mode, adapter->ahw->pci_func);
0866d96d 817
174240a8 818 if (adapter->flags & QLCNIC_ESWITCH_ENABLED) {
0866d96d
AC
819 if (priv_level == QLCNIC_MGMT_FUNC) {
820 adapter->op_mode = QLCNIC_MGMT_FUNC;
821 err = qlcnic_init_pci_info(adapter);
822 if (err)
823 return err;
824 /* Set privilege level for other functions */
825 qlcnic_set_function_modes(adapter);
826 dev_info(&adapter->pdev->dev,
827 "HAL Version: %d, Management function\n",
828 adapter->fw_hal_version);
829 } else if (priv_level == QLCNIC_PRIV_FUNC) {
830 adapter->op_mode = QLCNIC_PRIV_FUNC;
831 dev_info(&adapter->pdev->dev,
832 "HAL Version: %d, Privileged function\n",
833 adapter->fw_hal_version);
834 }
174240a8 835 }
0866d96d
AC
836
837 adapter->flags |= QLCNIC_ADAPTER_INITIALIZED;
838
839 return err;
840}
841
0325d69b
RB
842static int
843qlcnic_set_default_offload_settings(struct qlcnic_adapter *adapter)
844{
845 struct qlcnic_esw_func_cfg esw_cfg;
846 struct qlcnic_npar_info *npar;
847 u8 i;
848
174240a8 849 if (adapter->need_fw_reset)
0325d69b
RB
850 return 0;
851
852 for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) {
853 if (adapter->npars[i].type != QLCNIC_TYPE_NIC)
854 continue;
855 memset(&esw_cfg, 0, sizeof(struct qlcnic_esw_func_cfg));
856 esw_cfg.pci_func = i;
857 esw_cfg.offload_flags = BIT_0;
7373373d 858 esw_cfg.mac_override = BIT_0;
ee07c1a7 859 esw_cfg.promisc_mode = BIT_0;
0325d69b
RB
860 if (adapter->capabilities & QLCNIC_FW_CAPABILITY_TSO)
861 esw_cfg.offload_flags |= (BIT_1 | BIT_2);
862 if (qlcnic_config_switch_port(adapter, &esw_cfg))
863 return -EIO;
864 npar = &adapter->npars[i];
865 npar->pvid = esw_cfg.vlan_id;
7373373d 866 npar->mac_override = esw_cfg.mac_override;
0325d69b
RB
867 npar->mac_anti_spoof = esw_cfg.mac_anti_spoof;
868 npar->discard_tagged = esw_cfg.discard_tagged;
869 npar->promisc_mode = esw_cfg.promisc_mode;
870 npar->offload_flags = esw_cfg.offload_flags;
871 }
872
873 return 0;
874}
875
4e8acb01
RB
876static int
877qlcnic_reset_eswitch_config(struct qlcnic_adapter *adapter,
878 struct qlcnic_npar_info *npar, int pci_func)
879{
880 struct qlcnic_esw_func_cfg esw_cfg;
881 esw_cfg.op_mode = QLCNIC_PORT_DEFAULTS;
882 esw_cfg.pci_func = pci_func;
883 esw_cfg.vlan_id = npar->pvid;
7373373d 884 esw_cfg.mac_override = npar->mac_override;
4e8acb01
RB
885 esw_cfg.discard_tagged = npar->discard_tagged;
886 esw_cfg.mac_anti_spoof = npar->mac_anti_spoof;
887 esw_cfg.offload_flags = npar->offload_flags;
888 esw_cfg.promisc_mode = npar->promisc_mode;
889 if (qlcnic_config_switch_port(adapter, &esw_cfg))
890 return -EIO;
891
892 esw_cfg.op_mode = QLCNIC_ADD_VLAN;
893 if (qlcnic_config_switch_port(adapter, &esw_cfg))
894 return -EIO;
895
896 return 0;
897}
898
cea8975e
AC
899static int
900qlcnic_reset_npar_config(struct qlcnic_adapter *adapter)
901{
4e8acb01 902 int i, err;
cea8975e
AC
903 struct qlcnic_npar_info *npar;
904 struct qlcnic_info nic_info;
905
174240a8 906 if (!adapter->need_fw_reset)
cea8975e
AC
907 return 0;
908
4e8acb01
RB
909 /* Set the NPAR config data after FW reset */
910 for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) {
911 npar = &adapter->npars[i];
912 if (npar->type != QLCNIC_TYPE_NIC)
913 continue;
914 err = qlcnic_get_nic_info(adapter, &nic_info, i);
915 if (err)
916 return err;
917 nic_info.min_tx_bw = npar->min_bw;
918 nic_info.max_tx_bw = npar->max_bw;
919 err = qlcnic_set_nic_info(adapter, &nic_info);
920 if (err)
921 return err;
cea8975e 922
4e8acb01
RB
923 if (npar->enable_pm) {
924 err = qlcnic_config_port_mirroring(adapter,
925 npar->dest_npar, 1, i);
926 if (err)
927 return err;
cea8975e 928 }
4e8acb01
RB
929 err = qlcnic_reset_eswitch_config(adapter, npar, i);
930 if (err)
931 return err;
cea8975e 932 }
4e8acb01 933 return 0;
cea8975e
AC
934}
935
78f84e1a
AKS
936static int qlcnic_check_npar_opertional(struct qlcnic_adapter *adapter)
937{
938 u8 npar_opt_timeo = QLCNIC_DEV_NPAR_OPER_TIMEO;
939 u32 npar_state;
940
941 if (adapter->op_mode == QLCNIC_MGMT_FUNC)
942 return 0;
943
944 npar_state = QLCRD32(adapter, QLCNIC_CRB_DEV_NPAR_STATE);
945 while (npar_state != QLCNIC_DEV_NPAR_OPER && --npar_opt_timeo) {
946 msleep(1000);
947 npar_state = QLCRD32(adapter, QLCNIC_CRB_DEV_NPAR_STATE);
948 }
949 if (!npar_opt_timeo) {
950 dev_err(&adapter->pdev->dev,
951 "Waiting for NPAR state to opertional timeout\n");
952 return -EIO;
953 }
954 return 0;
955}
956
174240a8
RB
957static int
958qlcnic_set_mgmt_operations(struct qlcnic_adapter *adapter)
959{
960 int err;
961
962 if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED) ||
963 adapter->op_mode != QLCNIC_MGMT_FUNC)
964 return 0;
965
966 err = qlcnic_set_default_offload_settings(adapter);
967 if (err)
968 return err;
969
970 err = qlcnic_reset_npar_config(adapter);
971 if (err)
972 return err;
973
974 qlcnic_dev_set_npar_ready(adapter);
975
976 return err;
977}
978
af19b491
AKS
979static int
980qlcnic_start_firmware(struct qlcnic_adapter *adapter)
981{
d4066833 982 int err;
af19b491 983
aa5e18c0
SC
984 err = qlcnic_can_start_firmware(adapter);
985 if (err < 0)
986 return err;
987 else if (!err)
d4066833 988 goto check_fw_status;
af19b491 989
4d5bdb38
AKS
990 if (load_fw_file)
991 qlcnic_request_firmware(adapter);
8f891387 992 else {
8cfdce08
SC
993 err = qlcnic_check_flash_fw_ver(adapter);
994 if (err)
8f891387 995 goto err_out;
996
4d5bdb38 997 adapter->fw_type = QLCNIC_FLASH_ROMIMAGE;
8f891387 998 }
af19b491
AKS
999
1000 err = qlcnic_need_fw_reset(adapter);
af19b491 1001 if (err == 0)
4e70812b 1002 goto check_fw_status;
af19b491 1003
d4066833
SC
1004 err = qlcnic_pinit_from_rom(adapter);
1005 if (err)
1006 goto err_out;
af19b491
AKS
1007
1008 err = qlcnic_load_firmware(adapter);
1009 if (err)
1010 goto err_out;
1011
1012 qlcnic_release_firmware(adapter);
d4066833 1013 QLCWR32(adapter, CRB_DRIVER_VERSION, QLCNIC_DRIVER_VERSION);
af19b491 1014
d4066833
SC
1015check_fw_status:
1016 err = qlcnic_check_fw_status(adapter);
af19b491
AKS
1017 if (err)
1018 goto err_out;
1019
1020 QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_READY);
6df900e9 1021 qlcnic_idc_debug_info(adapter, 1);
b18971d1 1022
0866d96d
AC
1023 err = qlcnic_check_eswitch_mode(adapter);
1024 if (err) {
1025 dev_err(&adapter->pdev->dev,
1026 "Memory allocation failed for eswitch\n");
1027 goto err_out;
1028 }
174240a8
RB
1029 err = qlcnic_set_mgmt_operations(adapter);
1030 if (err)
1031 goto err_out;
1032
1033 qlcnic_check_options(adapter);
af19b491
AKS
1034 adapter->need_fw_reset = 0;
1035
a7fc948f
AKS
1036 qlcnic_release_firmware(adapter);
1037 return 0;
af19b491
AKS
1038
1039err_out:
a7fc948f
AKS
1040 QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_FAILED);
1041 dev_err(&adapter->pdev->dev, "Device state set to failed\n");
0866d96d 1042
af19b491
AKS
1043 qlcnic_release_firmware(adapter);
1044 return err;
1045}
1046
1047static int
1048qlcnic_request_irq(struct qlcnic_adapter *adapter)
1049{
1050 irq_handler_t handler;
1051 struct qlcnic_host_sds_ring *sds_ring;
1052 int err, ring;
1053
1054 unsigned long flags = 0;
1055 struct net_device *netdev = adapter->netdev;
b1fc6d3c 1056 struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
af19b491 1057
7eb9855d
AKS
1058 if (adapter->diag_test == QLCNIC_INTERRUPT_TEST) {
1059 handler = qlcnic_tmp_intr;
1060 if (!QLCNIC_IS_MSI_FAMILY(adapter))
1061 flags |= IRQF_SHARED;
1062
1063 } else {
1064 if (adapter->flags & QLCNIC_MSIX_ENABLED)
1065 handler = qlcnic_msix_intr;
1066 else if (adapter->flags & QLCNIC_MSI_ENABLED)
1067 handler = qlcnic_msi_intr;
1068 else {
1069 flags |= IRQF_SHARED;
1070 handler = qlcnic_intr;
1071 }
af19b491
AKS
1072 }
1073 adapter->irq = netdev->irq;
1074
1075 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
1076 sds_ring = &recv_ctx->sds_rings[ring];
1077 sprintf(sds_ring->name, "%s[%d]", netdev->name, ring);
1078 err = request_irq(sds_ring->irq, handler,
1079 flags, sds_ring->name, sds_ring);
1080 if (err)
1081 return err;
1082 }
1083
1084 return 0;
1085}
1086
1087static void
1088qlcnic_free_irq(struct qlcnic_adapter *adapter)
1089{
1090 int ring;
1091 struct qlcnic_host_sds_ring *sds_ring;
1092
b1fc6d3c 1093 struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
af19b491
AKS
1094
1095 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
1096 sds_ring = &recv_ctx->sds_rings[ring];
1097 free_irq(sds_ring->irq, sds_ring);
1098 }
1099}
1100
af19b491
AKS
1101static int
1102__qlcnic_up(struct qlcnic_adapter *adapter, struct net_device *netdev)
1103{
8a15ad1f
AKS
1104 int ring;
1105 struct qlcnic_host_rds_ring *rds_ring;
1106
af19b491
AKS
1107 if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
1108 return -EIO;
1109
8a15ad1f
AKS
1110 if (test_bit(__QLCNIC_DEV_UP, &adapter->state))
1111 return 0;
0325d69b
RB
1112 if (qlcnic_set_eswitch_port_config(adapter))
1113 return -EIO;
8a15ad1f
AKS
1114
1115 if (qlcnic_fw_create_ctx(adapter))
1116 return -EIO;
1117
1118 for (ring = 0; ring < adapter->max_rds_rings; ring++) {
b1fc6d3c
AC
1119 rds_ring = &adapter->recv_ctx->rds_rings[ring];
1120 qlcnic_post_rx_buffers(adapter, rds_ring);
8a15ad1f
AKS
1121 }
1122
af19b491
AKS
1123 qlcnic_set_multi(netdev);
1124 qlcnic_fw_cmd_set_mtu(adapter, netdev->mtu);
1125
b1fc6d3c 1126 adapter->ahw->linkup = 0;
af19b491
AKS
1127
1128 if (adapter->max_sds_rings > 1)
1129 qlcnic_config_rss(adapter, 1);
1130
1131 qlcnic_config_intr_coalesce(adapter);
1132
24763d80 1133 if (netdev->features & NETIF_F_LRO)
af19b491
AKS
1134 qlcnic_config_hw_lro(adapter, QLCNIC_LRO_ENABLED);
1135
1136 qlcnic_napi_enable(adapter);
1137
1138 qlcnic_linkevent_request(adapter, 1);
1139
68bf1c68 1140 adapter->reset_context = 0;
af19b491
AKS
1141 set_bit(__QLCNIC_DEV_UP, &adapter->state);
1142 return 0;
1143}
1144
1145/* Usage: During resume and firmware recovery module.*/
1146
1147static int
1148qlcnic_up(struct qlcnic_adapter *adapter, struct net_device *netdev)
1149{
1150 int err = 0;
1151
1152 rtnl_lock();
1153 if (netif_running(netdev))
1154 err = __qlcnic_up(adapter, netdev);
1155 rtnl_unlock();
1156
1157 return err;
1158}
1159
1160static void
1161__qlcnic_down(struct qlcnic_adapter *adapter, struct net_device *netdev)
1162{
1163 if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
1164 return;
1165
1166 if (!test_and_clear_bit(__QLCNIC_DEV_UP, &adapter->state))
1167 return;
1168
1169 smp_mb();
1170 spin_lock(&adapter->tx_clean_lock);
1171 netif_carrier_off(netdev);
1172 netif_tx_disable(netdev);
1173
1174 qlcnic_free_mac_list(adapter);
1175
b5e5492c
AKS
1176 if (adapter->fhash.fnum)
1177 qlcnic_delete_lb_filters(adapter);
1178
af19b491
AKS
1179 qlcnic_nic_set_promisc(adapter, QLCNIC_NIU_NON_PROMISC_MODE);
1180
1181 qlcnic_napi_disable(adapter);
1182
8a15ad1f
AKS
1183 qlcnic_fw_destroy_ctx(adapter);
1184
1185 qlcnic_reset_rx_buffers_list(adapter);
af19b491
AKS
1186 qlcnic_release_tx_buffers(adapter);
1187 spin_unlock(&adapter->tx_clean_lock);
1188}
1189
1190/* Usage: During suspend and firmware recovery module */
1191
1192static void
1193qlcnic_down(struct qlcnic_adapter *adapter, struct net_device *netdev)
1194{
1195 rtnl_lock();
1196 if (netif_running(netdev))
1197 __qlcnic_down(adapter, netdev);
1198 rtnl_unlock();
1199
1200}
1201
1202static int
1203qlcnic_attach(struct qlcnic_adapter *adapter)
1204{
1205 struct net_device *netdev = adapter->netdev;
1206 struct pci_dev *pdev = adapter->pdev;
8a15ad1f 1207 int err;
af19b491
AKS
1208
1209 if (adapter->is_up == QLCNIC_ADAPTER_UP_MAGIC)
1210 return 0;
1211
af19b491
AKS
1212 err = qlcnic_napi_add(adapter, netdev);
1213 if (err)
1214 return err;
1215
1216 err = qlcnic_alloc_sw_resources(adapter);
1217 if (err) {
1218 dev_err(&pdev->dev, "Error in setting sw resources\n");
8a15ad1f 1219 goto err_out_napi_del;
af19b491
AKS
1220 }
1221
1222 err = qlcnic_alloc_hw_resources(adapter);
1223 if (err) {
1224 dev_err(&pdev->dev, "Error in setting hw resources\n");
1225 goto err_out_free_sw;
1226 }
1227
af19b491
AKS
1228 err = qlcnic_request_irq(adapter);
1229 if (err) {
1230 dev_err(&pdev->dev, "failed to setup interrupt\n");
8a15ad1f 1231 goto err_out_free_hw;
af19b491
AKS
1232 }
1233
af19b491
AKS
1234 qlcnic_create_sysfs_entries(adapter);
1235
1236 adapter->is_up = QLCNIC_ADAPTER_UP_MAGIC;
1237 return 0;
1238
8a15ad1f 1239err_out_free_hw:
af19b491
AKS
1240 qlcnic_free_hw_resources(adapter);
1241err_out_free_sw:
1242 qlcnic_free_sw_resources(adapter);
8a15ad1f
AKS
1243err_out_napi_del:
1244 qlcnic_napi_del(adapter);
af19b491
AKS
1245 return err;
1246}
1247
1248static void
1249qlcnic_detach(struct qlcnic_adapter *adapter)
1250{
1251 if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
1252 return;
1253
1254 qlcnic_remove_sysfs_entries(adapter);
1255
1256 qlcnic_free_hw_resources(adapter);
1257 qlcnic_release_rx_buffers(adapter);
1258 qlcnic_free_irq(adapter);
1259 qlcnic_napi_del(adapter);
1260 qlcnic_free_sw_resources(adapter);
1261
1262 adapter->is_up = 0;
1263}
1264
7eb9855d
AKS
1265void qlcnic_diag_free_res(struct net_device *netdev, int max_sds_rings)
1266{
1267 struct qlcnic_adapter *adapter = netdev_priv(netdev);
1268 struct qlcnic_host_sds_ring *sds_ring;
1269 int ring;
1270
78ad3892 1271 clear_bit(__QLCNIC_DEV_UP, &adapter->state);
cdaff185
AKS
1272 if (adapter->diag_test == QLCNIC_INTERRUPT_TEST) {
1273 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
b1fc6d3c 1274 sds_ring = &adapter->recv_ctx->sds_rings[ring];
cdaff185
AKS
1275 qlcnic_disable_int(sds_ring);
1276 }
7eb9855d
AKS
1277 }
1278
8a15ad1f
AKS
1279 qlcnic_fw_destroy_ctx(adapter);
1280
7eb9855d
AKS
1281 qlcnic_detach(adapter);
1282
1283 adapter->diag_test = 0;
1284 adapter->max_sds_rings = max_sds_rings;
1285
1286 if (qlcnic_attach(adapter))
34ce3626 1287 goto out;
7eb9855d
AKS
1288
1289 if (netif_running(netdev))
1290 __qlcnic_up(adapter, netdev);
34ce3626 1291out:
7eb9855d
AKS
1292 netif_device_attach(netdev);
1293}
1294
b1fc6d3c
AC
1295static int qlcnic_alloc_adapter_resources(struct qlcnic_adapter *adapter)
1296{
1297 int err = 0;
1298 adapter->ahw = kzalloc(sizeof(struct qlcnic_hardware_context),
1299 GFP_KERNEL);
1300 if (!adapter->ahw) {
1301 dev_err(&adapter->pdev->dev,
1302 "Failed to allocate recv ctx resources for adapter\n");
1303 err = -ENOMEM;
1304 goto err_out;
1305 }
1306 adapter->recv_ctx = kzalloc(sizeof(struct qlcnic_recv_context),
1307 GFP_KERNEL);
1308 if (!adapter->recv_ctx) {
1309 dev_err(&adapter->pdev->dev,
1310 "Failed to allocate recv ctx resources for adapter\n");
1311 kfree(adapter->ahw);
1312 adapter->ahw = NULL;
1313 err = -ENOMEM;
8816d009 1314 goto err_out;
b1fc6d3c 1315 }
8816d009
AC
1316 /* Initialize interrupt coalesce parameters */
1317 adapter->ahw->coal.flag = QLCNIC_INTR_DEFAULT;
1318 adapter->ahw->coal.rx_time_us = QLCNIC_DEFAULT_INTR_COALESCE_RX_TIME_US;
1319 adapter->ahw->coal.rx_packets = QLCNIC_DEFAULT_INTR_COALESCE_RX_PACKETS;
b1fc6d3c
AC
1320err_out:
1321 return err;
1322}
1323
1324static void qlcnic_free_adapter_resources(struct qlcnic_adapter *adapter)
1325{
1326 kfree(adapter->recv_ctx);
1327 adapter->recv_ctx = NULL;
1328
1329 kfree(adapter->ahw);
1330 adapter->ahw = NULL;
1331}
1332
7eb9855d
AKS
1333int qlcnic_diag_alloc_res(struct net_device *netdev, int test)
1334{
1335 struct qlcnic_adapter *adapter = netdev_priv(netdev);
1336 struct qlcnic_host_sds_ring *sds_ring;
8a15ad1f 1337 struct qlcnic_host_rds_ring *rds_ring;
7eb9855d
AKS
1338 int ring;
1339 int ret;
1340
1341 netif_device_detach(netdev);
1342
1343 if (netif_running(netdev))
1344 __qlcnic_down(adapter, netdev);
1345
1346 qlcnic_detach(adapter);
1347
1348 adapter->max_sds_rings = 1;
1349 adapter->diag_test = test;
1350
1351 ret = qlcnic_attach(adapter);
34ce3626
AKS
1352 if (ret) {
1353 netif_device_attach(netdev);
7eb9855d 1354 return ret;
34ce3626 1355 }
7eb9855d 1356
8a15ad1f
AKS
1357 ret = qlcnic_fw_create_ctx(adapter);
1358 if (ret) {
1359 qlcnic_detach(adapter);
57e46248 1360 netif_device_attach(netdev);
8a15ad1f
AKS
1361 return ret;
1362 }
1363
1364 for (ring = 0; ring < adapter->max_rds_rings; ring++) {
b1fc6d3c
AC
1365 rds_ring = &adapter->recv_ctx->rds_rings[ring];
1366 qlcnic_post_rx_buffers(adapter, rds_ring);
8a15ad1f
AKS
1367 }
1368
cdaff185
AKS
1369 if (adapter->diag_test == QLCNIC_INTERRUPT_TEST) {
1370 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
b1fc6d3c 1371 sds_ring = &adapter->recv_ctx->sds_rings[ring];
cdaff185
AKS
1372 qlcnic_enable_int(sds_ring);
1373 }
7eb9855d 1374 }
78ad3892 1375 set_bit(__QLCNIC_DEV_UP, &adapter->state);
7eb9855d
AKS
1376
1377 return 0;
1378}
1379
68bf1c68
AKS
1380/* Reset context in hardware only */
1381static int
1382qlcnic_reset_hw_context(struct qlcnic_adapter *adapter)
1383{
1384 struct net_device *netdev = adapter->netdev;
1385
1386 if (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state))
1387 return -EBUSY;
1388
1389 netif_device_detach(netdev);
1390
1391 qlcnic_down(adapter, netdev);
1392
1393 qlcnic_up(adapter, netdev);
1394
1395 netif_device_attach(netdev);
1396
1397 clear_bit(__QLCNIC_RESETTING, &adapter->state);
1398 return 0;
1399}
1400
af19b491
AKS
1401int
1402qlcnic_reset_context(struct qlcnic_adapter *adapter)
1403{
1404 int err = 0;
1405 struct net_device *netdev = adapter->netdev;
1406
1407 if (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state))
1408 return -EBUSY;
1409
1410 if (adapter->is_up == QLCNIC_ADAPTER_UP_MAGIC) {
1411
1412 netif_device_detach(netdev);
1413
1414 if (netif_running(netdev))
1415 __qlcnic_down(adapter, netdev);
1416
1417 qlcnic_detach(adapter);
1418
1419 if (netif_running(netdev)) {
1420 err = qlcnic_attach(adapter);
1421 if (!err)
34ce3626 1422 __qlcnic_up(adapter, netdev);
af19b491
AKS
1423 }
1424
1425 netif_device_attach(netdev);
1426 }
1427
af19b491
AKS
1428 clear_bit(__QLCNIC_RESETTING, &adapter->state);
1429 return err;
1430}
1431
1432static int
1433qlcnic_setup_netdev(struct qlcnic_adapter *adapter,
1bb09fb9 1434 struct net_device *netdev, u8 pci_using_dac)
af19b491
AKS
1435{
1436 int err;
1437 struct pci_dev *pdev = adapter->pdev;
1438
af19b491
AKS
1439 adapter->mc_enabled = 0;
1440 adapter->max_mc_count = 38;
1441
1442 netdev->netdev_ops = &qlcnic_netdev_ops;
ef71ff83 1443 netdev->watchdog_timeo = 5*HZ;
af19b491
AKS
1444
1445 qlcnic_change_mtu(netdev, netdev->mtu);
1446
1447 SET_ETHTOOL_OPS(netdev, &qlcnic_ethtool_ops);
1448
135d84a9
MM
1449 netdev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM |
1450 NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM;
ac8d0c4f 1451
135d84a9
MM
1452 if (adapter->capabilities & QLCNIC_FW_CAPABILITY_TSO)
1453 netdev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
1454 if (pci_using_dac)
1455 netdev->hw_features |= NETIF_F_HIGHDMA;
af19b491 1456
135d84a9 1457 netdev->vlan_features = netdev->hw_features;
af19b491
AKS
1458
1459 if (adapter->capabilities & QLCNIC_FW_CAPABILITY_FVLANTX)
135d84a9 1460 netdev->hw_features |= NETIF_F_HW_VLAN_TX;
af19b491 1461 if (adapter->capabilities & QLCNIC_FW_CAPABILITY_HW_LRO)
135d84a9
MM
1462 netdev->hw_features |= NETIF_F_LRO;
1463
1464 netdev->features |= netdev->hw_features |
1465 NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
1466
af19b491
AKS
1467 netdev->irq = adapter->msix_entries[0].vector;
1468
af19b491 1469 netif_carrier_off(netdev);
af19b491
AKS
1470
1471 err = register_netdev(netdev);
1472 if (err) {
1473 dev_err(&pdev->dev, "failed to register net device\n");
1474 return err;
1475 }
1476
1477 return 0;
1478}
1479
1bb09fb9
AKS
1480static int qlcnic_set_dma_mask(struct pci_dev *pdev, u8 *pci_using_dac)
1481{
1482 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) &&
1483 !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)))
1484 *pci_using_dac = 1;
1485 else if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) &&
1486 !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)))
1487 *pci_using_dac = 0;
1488 else {
1489 dev_err(&pdev->dev, "Unable to set DMA mask, aborting\n");
1490 return -EIO;
1491 }
1492
1493 return 0;
1494}
1495
af19b491
AKS
1496static int __devinit
1497qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1498{
1499 struct net_device *netdev = NULL;
1500 struct qlcnic_adapter *adapter = NULL;
1501 int err;
af19b491 1502 uint8_t revision_id;
1bb09fb9 1503 uint8_t pci_using_dac;
da48e6c3 1504 char brd_name[QLCNIC_MAX_BOARD_NAME_LEN];
af19b491
AKS
1505
1506 err = pci_enable_device(pdev);
1507 if (err)
1508 return err;
1509
1510 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
1511 err = -ENODEV;
1512 goto err_out_disable_pdev;
1513 }
1514
1bb09fb9
AKS
1515 err = qlcnic_set_dma_mask(pdev, &pci_using_dac);
1516 if (err)
1517 goto err_out_disable_pdev;
1518
af19b491
AKS
1519 err = pci_request_regions(pdev, qlcnic_driver_name);
1520 if (err)
1521 goto err_out_disable_pdev;
1522
1523 pci_set_master(pdev);
451724c8 1524 pci_enable_pcie_error_reporting(pdev);
af19b491
AKS
1525
1526 netdev = alloc_etherdev(sizeof(struct qlcnic_adapter));
1527 if (!netdev) {
1528 dev_err(&pdev->dev, "failed to allocate net_device\n");
1529 err = -ENOMEM;
1530 goto err_out_free_res;
1531 }
1532
1533 SET_NETDEV_DEV(netdev, &pdev->dev);
1534
1535 adapter = netdev_priv(netdev);
1536 adapter->netdev = netdev;
1537 adapter->pdev = pdev;
af19b491 1538
b1fc6d3c
AC
1539 if (qlcnic_alloc_adapter_resources(adapter))
1540 goto err_out_free_netdev;
1541
1542 adapter->dev_rst_time = jiffies;
af19b491 1543 revision_id = pdev->revision;
b1fc6d3c 1544 adapter->ahw->revision_id = revision_id;
af19b491 1545
b1fc6d3c
AC
1546 rwlock_init(&adapter->ahw->crb_lock);
1547 mutex_init(&adapter->ahw->mem_lock);
af19b491
AKS
1548
1549 spin_lock_init(&adapter->tx_clean_lock);
1550 INIT_LIST_HEAD(&adapter->mac_list);
1551
1552 err = qlcnic_setup_pci_map(adapter);
1553 if (err)
b1fc6d3c 1554 goto err_out_free_hw;
af19b491
AKS
1555
1556 /* This will be reset for mezz cards */
b1fc6d3c 1557 adapter->portnum = adapter->ahw->pci_func;
af19b491
AKS
1558
1559 err = qlcnic_get_board_info(adapter);
1560 if (err) {
1561 dev_err(&pdev->dev, "Error getting board config info.\n");
1562 goto err_out_iounmap;
1563 }
1564
8cfdce08
SC
1565 err = qlcnic_setup_idc_param(adapter);
1566 if (err)
b3a24649 1567 goto err_out_iounmap;
af19b491 1568
1dc0f3c5 1569 adapter->flags |= QLCNIC_NEED_FLR;
b0044bcf 1570
9f26f547 1571 err = adapter->nic_ops->start_firmware(adapter);
a7fc948f
AKS
1572 if (err) {
1573 dev_err(&pdev->dev, "Loading fw failed.Please Reboot\n");
af19b491 1574 goto err_out_decr_ref;
a7fc948f 1575 }
af19b491 1576
da48e6c3
RB
1577 if (qlcnic_read_mac_addr(adapter))
1578 dev_warn(&pdev->dev, "failed to read mac addr\n");
1579
1580 if (adapter->portnum == 0) {
1581 get_brd_name(adapter, brd_name);
1582
1583 pr_info("%s: %s Board Chip rev 0x%x\n",
1584 module_name(THIS_MODULE),
b1fc6d3c 1585 brd_name, adapter->ahw->revision_id);
da48e6c3
RB
1586 }
1587
af19b491
AKS
1588 qlcnic_clear_stats(adapter);
1589
1590 qlcnic_setup_intr(adapter);
1591
1bb09fb9 1592 err = qlcnic_setup_netdev(adapter, netdev, pci_using_dac);
af19b491
AKS
1593 if (err)
1594 goto err_out_disable_msi;
1595
1596 pci_set_drvdata(pdev, adapter);
1597
1598 qlcnic_schedule_work(adapter, qlcnic_fw_poll_work, FW_POLL_DELAY);
1599
b1fc6d3c 1600 switch (adapter->ahw->port_type) {
af19b491
AKS
1601 case QLCNIC_GBE:
1602 dev_info(&adapter->pdev->dev, "%s: GbE port initialized\n",
1603 adapter->netdev->name);
1604 break;
1605 case QLCNIC_XGBE:
1606 dev_info(&adapter->pdev->dev, "%s: XGbE port initialized\n",
1607 adapter->netdev->name);
1608 break;
1609 }
1610
b5e5492c 1611 qlcnic_alloc_lb_filters_mem(adapter);
af19b491
AKS
1612 qlcnic_create_diag_entries(adapter);
1613
1614 return 0;
1615
1616err_out_disable_msi:
1617 qlcnic_teardown_intr(adapter);
1618
1619err_out_decr_ref:
21854f02 1620 qlcnic_clr_all_drv_state(adapter, 0);
af19b491
AKS
1621
1622err_out_iounmap:
1623 qlcnic_cleanup_pci_map(adapter);
1624
b1fc6d3c
AC
1625err_out_free_hw:
1626 qlcnic_free_adapter_resources(adapter);
1627
af19b491
AKS
1628err_out_free_netdev:
1629 free_netdev(netdev);
1630
1631err_out_free_res:
1632 pci_release_regions(pdev);
1633
1634err_out_disable_pdev:
1635 pci_set_drvdata(pdev, NULL);
1636 pci_disable_device(pdev);
1637 return err;
1638}
1639
1640static void __devexit qlcnic_remove(struct pci_dev *pdev)
1641{
1642 struct qlcnic_adapter *adapter;
1643 struct net_device *netdev;
1644
1645 adapter = pci_get_drvdata(pdev);
1646 if (adapter == NULL)
1647 return;
1648
1649 netdev = adapter->netdev;
1650
1651 qlcnic_cancel_fw_work(adapter);
1652
1653 unregister_netdev(netdev);
1654
af19b491
AKS
1655 qlcnic_detach(adapter);
1656
2e9d722d
AC
1657 if (adapter->npars != NULL)
1658 kfree(adapter->npars);
1659 if (adapter->eswitch != NULL)
1660 kfree(adapter->eswitch);
1661
21854f02 1662 qlcnic_clr_all_drv_state(adapter, 0);
af19b491
AKS
1663
1664 clear_bit(__QLCNIC_RESETTING, &adapter->state);
1665
b5e5492c
AKS
1666 qlcnic_free_lb_filters_mem(adapter);
1667
af19b491
AKS
1668 qlcnic_teardown_intr(adapter);
1669
1670 qlcnic_remove_diag_entries(adapter);
1671
1672 qlcnic_cleanup_pci_map(adapter);
1673
1674 qlcnic_release_firmware(adapter);
1675
451724c8 1676 pci_disable_pcie_error_reporting(pdev);
af19b491
AKS
1677 pci_release_regions(pdev);
1678 pci_disable_device(pdev);
1679 pci_set_drvdata(pdev, NULL);
1680
b1fc6d3c 1681 qlcnic_free_adapter_resources(adapter);
af19b491
AKS
1682 free_netdev(netdev);
1683}
1684static int __qlcnic_shutdown(struct pci_dev *pdev)
1685{
1686 struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
1687 struct net_device *netdev = adapter->netdev;
1688 int retval;
1689
1690 netif_device_detach(netdev);
1691
1692 qlcnic_cancel_fw_work(adapter);
1693
1694 if (netif_running(netdev))
1695 qlcnic_down(adapter, netdev);
1696
21854f02 1697 qlcnic_clr_all_drv_state(adapter, 0);
af19b491
AKS
1698
1699 clear_bit(__QLCNIC_RESETTING, &adapter->state);
1700
1701 retval = pci_save_state(pdev);
1702 if (retval)
1703 return retval;
1704
1705 if (qlcnic_wol_supported(adapter)) {
1706 pci_enable_wake(pdev, PCI_D3cold, 1);
1707 pci_enable_wake(pdev, PCI_D3hot, 1);
1708 }
1709
1710 return 0;
1711}
1712
1713static void qlcnic_shutdown(struct pci_dev *pdev)
1714{
1715 if (__qlcnic_shutdown(pdev))
1716 return;
1717
1718 pci_disable_device(pdev);
1719}
1720
1721#ifdef CONFIG_PM
1722static int
1723qlcnic_suspend(struct pci_dev *pdev, pm_message_t state)
1724{
1725 int retval;
1726
1727 retval = __qlcnic_shutdown(pdev);
1728 if (retval)
1729 return retval;
1730
1731 pci_set_power_state(pdev, pci_choose_state(pdev, state));
1732 return 0;
1733}
1734
1735static int
1736qlcnic_resume(struct pci_dev *pdev)
1737{
1738 struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
1739 struct net_device *netdev = adapter->netdev;
1740 int err;
1741
1742 err = pci_enable_device(pdev);
1743 if (err)
1744 return err;
1745
1746 pci_set_power_state(pdev, PCI_D0);
1747 pci_set_master(pdev);
1748 pci_restore_state(pdev);
1749
9f26f547 1750 err = adapter->nic_ops->start_firmware(adapter);
af19b491
AKS
1751 if (err) {
1752 dev_err(&pdev->dev, "failed to start firmware\n");
1753 return err;
1754 }
1755
1756 if (netif_running(netdev)) {
af19b491
AKS
1757 err = qlcnic_up(adapter, netdev);
1758 if (err)
52486a3a 1759 goto done;
af19b491 1760
aec1e845 1761 qlcnic_restore_indev_addr(netdev, NETDEV_UP);
af19b491 1762 }
52486a3a 1763done:
af19b491
AKS
1764 netif_device_attach(netdev);
1765 qlcnic_schedule_work(adapter, qlcnic_fw_poll_work, FW_POLL_DELAY);
1766 return 0;
af19b491
AKS
1767}
1768#endif
1769
1770static int qlcnic_open(struct net_device *netdev)
1771{
1772 struct qlcnic_adapter *adapter = netdev_priv(netdev);
1773 int err;
1774
af19b491
AKS
1775 err = qlcnic_attach(adapter);
1776 if (err)
1777 return err;
1778
1779 err = __qlcnic_up(adapter, netdev);
1780 if (err)
1781 goto err_out;
1782
1783 netif_start_queue(netdev);
1784
1785 return 0;
1786
1787err_out:
1788 qlcnic_detach(adapter);
1789 return err;
1790}
1791
1792/*
1793 * qlcnic_close - Disables a network interface entry point
1794 */
1795static int qlcnic_close(struct net_device *netdev)
1796{
1797 struct qlcnic_adapter *adapter = netdev_priv(netdev);
1798
1799 __qlcnic_down(adapter, netdev);
1800 return 0;
1801}
1802
b5e5492c
AKS
1803static void
1804qlcnic_alloc_lb_filters_mem(struct qlcnic_adapter *adapter)
1805{
1806 void *head;
1807 int i;
1808
1809 if (!qlcnic_mac_learn)
1810 return;
1811
1812 spin_lock_init(&adapter->mac_learn_lock);
1813
1814 head = kcalloc(QLCNIC_LB_MAX_FILTERS, sizeof(struct hlist_head),
1815 GFP_KERNEL);
1816 if (!head)
1817 return;
1818
1819 adapter->fhash.fmax = QLCNIC_LB_MAX_FILTERS;
1820 adapter->fhash.fhead = (struct hlist_head *)head;
1821
1822 for (i = 0; i < adapter->fhash.fmax; i++)
1823 INIT_HLIST_HEAD(&adapter->fhash.fhead[i]);
1824}
1825
1826static void qlcnic_free_lb_filters_mem(struct qlcnic_adapter *adapter)
1827{
1828 if (adapter->fhash.fmax && adapter->fhash.fhead)
1829 kfree(adapter->fhash.fhead);
1830
1831 adapter->fhash.fhead = NULL;
1832 adapter->fhash.fmax = 0;
1833}
1834
1835static void qlcnic_change_filter(struct qlcnic_adapter *adapter,
7e56cac4 1836 u64 uaddr, __le16 vlan_id, struct qlcnic_host_tx_ring *tx_ring)
b5e5492c
AKS
1837{
1838 struct cmd_desc_type0 *hwdesc;
1839 struct qlcnic_nic_req *req;
1840 struct qlcnic_mac_req *mac_req;
7e56cac4 1841 struct qlcnic_vlan_req *vlan_req;
b5e5492c
AKS
1842 u32 producer;
1843 u64 word;
1844
1845 producer = tx_ring->producer;
1846 hwdesc = &tx_ring->desc_head[tx_ring->producer];
1847
1848 req = (struct qlcnic_nic_req *)hwdesc;
1849 memset(req, 0, sizeof(struct qlcnic_nic_req));
1850 req->qhdr = cpu_to_le64(QLCNIC_REQUEST << 23);
1851
1852 word = QLCNIC_MAC_EVENT | ((u64)(adapter->portnum) << 16);
1853 req->req_hdr = cpu_to_le64(word);
1854
1855 mac_req = (struct qlcnic_mac_req *)&(req->words[0]);
03c5d770 1856 mac_req->op = vlan_id ? QLCNIC_MAC_VLAN_ADD : QLCNIC_MAC_ADD;
b5e5492c
AKS
1857 memcpy(mac_req->mac_addr, &uaddr, ETH_ALEN);
1858
7e56cac4
SC
1859 vlan_req = (struct qlcnic_vlan_req *)&req->words[1];
1860 vlan_req->vlan_id = vlan_id;
03c5d770 1861
b5e5492c 1862 tx_ring->producer = get_next_index(producer, tx_ring->num_desc);
036d61f0 1863 smp_mb();
b5e5492c
AKS
1864}
1865
1866#define QLCNIC_MAC_HASH(MAC)\
1867 ((((MAC) & 0x70000) >> 0x10) | (((MAC) & 0x70000000000ULL) >> 0x25))
1868
1869static void
1870qlcnic_send_filter(struct qlcnic_adapter *adapter,
1871 struct qlcnic_host_tx_ring *tx_ring,
1872 struct cmd_desc_type0 *first_desc,
1873 struct sk_buff *skb)
1874{
1875 struct ethhdr *phdr = (struct ethhdr *)(skb->data);
1876 struct qlcnic_filter *fil, *tmp_fil;
1877 struct hlist_node *tmp_hnode, *n;
1878 struct hlist_head *head;
1879 u64 src_addr = 0;
7e56cac4 1880 __le16 vlan_id = 0;
b5e5492c
AKS
1881 u8 hindex;
1882
1883 if (!compare_ether_addr(phdr->h_source, adapter->mac_addr))
1884 return;
1885
1886 if (adapter->fhash.fnum >= adapter->fhash.fmax)
1887 return;
1888
03c5d770
AKS
1889 /* Only NPAR capable devices support vlan based learning*/
1890 if (adapter->flags & QLCNIC_ESWITCH_ENABLED)
1891 vlan_id = first_desc->vlan_TCI;
b5e5492c
AKS
1892 memcpy(&src_addr, phdr->h_source, ETH_ALEN);
1893 hindex = QLCNIC_MAC_HASH(src_addr) & (QLCNIC_LB_MAX_FILTERS - 1);
1894 head = &(adapter->fhash.fhead[hindex]);
1895
1896 hlist_for_each_entry_safe(tmp_fil, tmp_hnode, n, head, fnode) {
03c5d770
AKS
1897 if (!memcmp(tmp_fil->faddr, &src_addr, ETH_ALEN) &&
1898 tmp_fil->vlan_id == vlan_id) {
e5edb7b1 1899
1900 if (jiffies >
1901 (QLCNIC_READD_AGE * HZ + tmp_fil->ftime))
1902 qlcnic_change_filter(adapter, src_addr, vlan_id,
1903 tx_ring);
b5e5492c
AKS
1904 tmp_fil->ftime = jiffies;
1905 return;
1906 }
1907 }
1908
1909 fil = kzalloc(sizeof(struct qlcnic_filter), GFP_ATOMIC);
1910 if (!fil)
1911 return;
1912
03c5d770 1913 qlcnic_change_filter(adapter, src_addr, vlan_id, tx_ring);
b5e5492c
AKS
1914
1915 fil->ftime = jiffies;
03c5d770 1916 fil->vlan_id = vlan_id;
b5e5492c
AKS
1917 memcpy(fil->faddr, &src_addr, ETH_ALEN);
1918 spin_lock(&adapter->mac_learn_lock);
1919 hlist_add_head(&(fil->fnode), head);
1920 adapter->fhash.fnum++;
1921 spin_unlock(&adapter->mac_learn_lock);
1922}
1923
036d61f0
AC
1924static int
1925qlcnic_tx_pkt(struct qlcnic_adapter *adapter,
af19b491
AKS
1926 struct cmd_desc_type0 *first_desc,
1927 struct sk_buff *skb)
1928{
036d61f0
AC
1929 u8 opcode = 0, hdr_len = 0;
1930 u16 flags = 0, vlan_tci = 0;
1931 int copied, offset, copy_len;
af19b491
AKS
1932 struct cmd_desc_type0 *hwdesc;
1933 struct vlan_ethhdr *vh;
036d61f0
AC
1934 struct qlcnic_host_tx_ring *tx_ring = adapter->tx_ring;
1935 u16 protocol = ntohs(skb->protocol);
2e9d722d 1936 u32 producer = tx_ring->producer;
036d61f0
AC
1937
1938 if (protocol == ETH_P_8021Q) {
1939 vh = (struct vlan_ethhdr *)skb->data;
1940 flags = FLAGS_VLAN_TAGGED;
1941 vlan_tci = vh->h_vlan_TCI;
1942 } else if (vlan_tx_tag_present(skb)) {
1943 flags = FLAGS_VLAN_OOB;
1944 vlan_tci = vlan_tx_tag_get(skb);
1945 }
1946 if (unlikely(adapter->pvid)) {
1947 if (vlan_tci && !(adapter->flags & QLCNIC_TAGGING_ENABLED))
1948 return -EIO;
1949 if (vlan_tci && (adapter->flags & QLCNIC_TAGGING_ENABLED))
1950 goto set_flags;
1951
1952 flags = FLAGS_VLAN_OOB;
1953 vlan_tci = adapter->pvid;
1954 }
1955set_flags:
1956 qlcnic_set_tx_vlan_tci(first_desc, vlan_tci);
1957 qlcnic_set_tx_flags_opcode(first_desc, flags, opcode);
af19b491 1958
2e9d722d
AC
1959 if (*(skb->data) & BIT_0) {
1960 flags |= BIT_0;
1961 memcpy(&first_desc->eth_addr, skb->data, ETH_ALEN);
1962 }
036d61f0
AC
1963 opcode = TX_ETHER_PKT;
1964 if ((adapter->netdev->features & (NETIF_F_TSO | NETIF_F_TSO6)) &&
af19b491
AKS
1965 skb_shinfo(skb)->gso_size > 0) {
1966
1967 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
1968
1969 first_desc->mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
1970 first_desc->total_hdr_length = hdr_len;
036d61f0
AC
1971
1972 opcode = (protocol == ETH_P_IPV6) ? TX_TCP_LSO6 : TX_TCP_LSO;
1973
1974 /* For LSO, we need to copy the MAC/IP/TCP headers into
1975 * the descriptor ring */
1976 copied = 0;
1977 offset = 2;
1978
1979 if (flags & FLAGS_VLAN_OOB) {
af19b491
AKS
1980 first_desc->total_hdr_length += VLAN_HLEN;
1981 first_desc->tcp_hdr_offset = VLAN_HLEN;
1982 first_desc->ip_hdr_offset = VLAN_HLEN;
1983 /* Only in case of TSO on vlan device */
1984 flags |= FLAGS_VLAN_TAGGED;
036d61f0
AC
1985
1986 /* Create a TSO vlan header template for firmware */
1987
1988 hwdesc = &tx_ring->desc_head[producer];
1989 tx_ring->cmd_buf_arr[producer].skb = NULL;
1990
1991 copy_len = min((int)sizeof(struct cmd_desc_type0) -
1992 offset, hdr_len + VLAN_HLEN);
1993
1994 vh = (struct vlan_ethhdr *)((char *) hwdesc + 2);
1995 skb_copy_from_linear_data(skb, vh, 12);
1996 vh->h_vlan_proto = htons(ETH_P_8021Q);
1997 vh->h_vlan_TCI = htons(vlan_tci);
1998
1999 skb_copy_from_linear_data_offset(skb, 12,
2000 (char *)vh + 16, copy_len - 16);
2001
2002 copied = copy_len - VLAN_HLEN;
2003 offset = 0;
2004
2005 producer = get_next_index(producer, tx_ring->num_desc);
af19b491
AKS
2006 }
2007
036d61f0
AC
2008 while (copied < hdr_len) {
2009
2010 copy_len = min((int)sizeof(struct cmd_desc_type0) -
2011 offset, (hdr_len - copied));
2012
2013 hwdesc = &tx_ring->desc_head[producer];
2014 tx_ring->cmd_buf_arr[producer].skb = NULL;
2015
2016 skb_copy_from_linear_data_offset(skb, copied,
2017 (char *) hwdesc + offset, copy_len);
2018
2019 copied += copy_len;
2020 offset = 0;
2021
2022 producer = get_next_index(producer, tx_ring->num_desc);
2023 }
2024
2025 tx_ring->producer = producer;
2026 smp_mb();
2027 adapter->stats.lso_frames++;
af19b491
AKS
2028
2029 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
2030 u8 l4proto;
2031
036d61f0 2032 if (protocol == ETH_P_IP) {
af19b491
AKS
2033 l4proto = ip_hdr(skb)->protocol;
2034
2035 if (l4proto == IPPROTO_TCP)
2036 opcode = TX_TCP_PKT;
2037 else if (l4proto == IPPROTO_UDP)
2038 opcode = TX_UDP_PKT;
036d61f0 2039 } else if (protocol == ETH_P_IPV6) {
af19b491
AKS
2040 l4proto = ipv6_hdr(skb)->nexthdr;
2041
2042 if (l4proto == IPPROTO_TCP)
2043 opcode = TX_TCPV6_PKT;
2044 else if (l4proto == IPPROTO_UDP)
2045 opcode = TX_UDPV6_PKT;
2046 }
2047 }
af19b491
AKS
2048 first_desc->tcp_hdr_offset += skb_transport_offset(skb);
2049 first_desc->ip_hdr_offset += skb_network_offset(skb);
2050 qlcnic_set_tx_flags_opcode(first_desc, flags, opcode);
2051
036d61f0 2052 return 0;
af19b491
AKS
2053}
2054
2055static int
2056qlcnic_map_tx_skb(struct pci_dev *pdev,
2057 struct sk_buff *skb, struct qlcnic_cmd_buffer *pbuf)
2058{
2059 struct qlcnic_skb_frag *nf;
2060 struct skb_frag_struct *frag;
2061 int i, nr_frags;
2062 dma_addr_t map;
2063
2064 nr_frags = skb_shinfo(skb)->nr_frags;
2065 nf = &pbuf->frag_array[0];
2066
2067 map = pci_map_single(pdev, skb->data,
2068 skb_headlen(skb), PCI_DMA_TODEVICE);
2069 if (pci_dma_mapping_error(pdev, map))
2070 goto out_err;
2071
2072 nf->dma = map;
2073 nf->length = skb_headlen(skb);
2074
2075 for (i = 0; i < nr_frags; i++) {
2076 frag = &skb_shinfo(skb)->frags[i];
2077 nf = &pbuf->frag_array[i+1];
2078
2079 map = pci_map_page(pdev, frag->page, frag->page_offset,
2080 frag->size, PCI_DMA_TODEVICE);
2081 if (pci_dma_mapping_error(pdev, map))
2082 goto unwind;
2083
2084 nf->dma = map;
2085 nf->length = frag->size;
2086 }
2087
2088 return 0;
2089
2090unwind:
2091 while (--i >= 0) {
2092 nf = &pbuf->frag_array[i+1];
2093 pci_unmap_page(pdev, nf->dma, nf->length, PCI_DMA_TODEVICE);
2094 }
2095
2096 nf = &pbuf->frag_array[0];
2097 pci_unmap_single(pdev, nf->dma, skb_headlen(skb), PCI_DMA_TODEVICE);
2098
2099out_err:
2100 return -ENOMEM;
2101}
2102
036d61f0
AC
2103static void
2104qlcnic_unmap_buffers(struct pci_dev *pdev, struct sk_buff *skb,
2105 struct qlcnic_cmd_buffer *pbuf)
8cf61f89 2106{
036d61f0
AC
2107 struct qlcnic_skb_frag *nf = &pbuf->frag_array[0];
2108 int nr_frags = skb_shinfo(skb)->nr_frags;
2109 int i;
8cf61f89 2110
036d61f0
AC
2111 for (i = 0; i < nr_frags; i++) {
2112 nf = &pbuf->frag_array[i+1];
2113 pci_unmap_page(pdev, nf->dma, nf->length, PCI_DMA_TODEVICE);
8cf61f89 2114 }
8cf61f89 2115
036d61f0
AC
2116 nf = &pbuf->frag_array[0];
2117 pci_unmap_single(pdev, nf->dma, skb_headlen(skb), PCI_DMA_TODEVICE);
8cf61f89
AKS
2118}
2119
af19b491
AKS
2120static inline void
2121qlcnic_clear_cmddesc(u64 *desc)
2122{
2123 desc[0] = 0ULL;
2124 desc[2] = 0ULL;
8cf61f89 2125 desc[7] = 0ULL;
af19b491
AKS
2126}
2127
cdaff185 2128netdev_tx_t
af19b491
AKS
2129qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
2130{
2131 struct qlcnic_adapter *adapter = netdev_priv(netdev);
2132 struct qlcnic_host_tx_ring *tx_ring = adapter->tx_ring;
2133 struct qlcnic_cmd_buffer *pbuf;
2134 struct qlcnic_skb_frag *buffrag;
2135 struct cmd_desc_type0 *hwdesc, *first_desc;
2136 struct pci_dev *pdev;
dcb50aff 2137 struct ethhdr *phdr;
91a403ca 2138 int delta = 0;
af19b491
AKS
2139 int i, k;
2140
2141 u32 producer;
036d61f0 2142 int frag_count;
af19b491
AKS
2143 u32 num_txd = tx_ring->num_desc;
2144
780ab790
AKS
2145 if (!test_bit(__QLCNIC_DEV_UP, &adapter->state)) {
2146 netif_stop_queue(netdev);
2147 return NETDEV_TX_BUSY;
2148 }
2149
fe4d434d 2150 if (adapter->flags & QLCNIC_MACSPOOF) {
dcb50aff
RB
2151 phdr = (struct ethhdr *)skb->data;
2152 if (compare_ether_addr(phdr->h_source,
fe4d434d
SC
2153 adapter->mac_addr))
2154 goto drop_packet;
2155 }
2156
af19b491 2157 frag_count = skb_shinfo(skb)->nr_frags + 1;
91a403ca
AKS
2158 /* 14 frags supported for normal packet and
2159 * 32 frags supported for TSO packet
2160 */
2161 if (!skb_is_gso(skb) && frag_count > QLCNIC_MAX_FRAGS_PER_TX) {
2162
2163 for (i = 0; i < (frag_count - QLCNIC_MAX_FRAGS_PER_TX); i++)
2164 delta += skb_shinfo(skb)->frags[i].size;
2165
2166 if (!__pskb_pull_tail(skb, delta))
2167 goto drop_packet;
2168
2169 frag_count = 1 + skb_shinfo(skb)->nr_frags;
2170 }
af19b491 2171
ef71ff83 2172 if (unlikely(qlcnic_tx_avail(tx_ring) <= TX_STOP_THRESH)) {
af19b491 2173 netif_stop_queue(netdev);
ef71ff83
RB
2174 if (qlcnic_tx_avail(tx_ring) > TX_STOP_THRESH)
2175 netif_start_queue(netdev);
2176 else {
2177 adapter->stats.xmit_off++;
2178 return NETDEV_TX_BUSY;
2179 }
af19b491
AKS
2180 }
2181
2182 producer = tx_ring->producer;
2183 pbuf = &tx_ring->cmd_buf_arr[producer];
2184
2185 pdev = adapter->pdev;
2186
8cf61f89
AKS
2187 first_desc = hwdesc = &tx_ring->desc_head[producer];
2188 qlcnic_clear_cmddesc((u64 *)hwdesc);
2189
8ae6df97
AKS
2190 if (qlcnic_map_tx_skb(pdev, skb, pbuf)) {
2191 adapter->stats.tx_dma_map_error++;
af19b491 2192 goto drop_packet;
8ae6df97 2193 }
af19b491
AKS
2194
2195 pbuf->skb = skb;
2196 pbuf->frag_count = frag_count;
2197
af19b491
AKS
2198 qlcnic_set_tx_frags_len(first_desc, frag_count, skb->len);
2199 qlcnic_set_tx_port(first_desc, adapter->portnum);
2200
2201 for (i = 0; i < frag_count; i++) {
2202
2203 k = i % 4;
2204
2205 if ((k == 0) && (i > 0)) {
2206 /* move to next desc.*/
2207 producer = get_next_index(producer, num_txd);
2208 hwdesc = &tx_ring->desc_head[producer];
2209 qlcnic_clear_cmddesc((u64 *)hwdesc);
2210 tx_ring->cmd_buf_arr[producer].skb = NULL;
2211 }
2212
2213 buffrag = &pbuf->frag_array[i];
2214
2215 hwdesc->buffer_length[k] = cpu_to_le16(buffrag->length);
2216 switch (k) {
2217 case 0:
2218 hwdesc->addr_buffer1 = cpu_to_le64(buffrag->dma);
2219 break;
2220 case 1:
2221 hwdesc->addr_buffer2 = cpu_to_le64(buffrag->dma);
2222 break;
2223 case 2:
2224 hwdesc->addr_buffer3 = cpu_to_le64(buffrag->dma);
2225 break;
2226 case 3:
2227 hwdesc->addr_buffer4 = cpu_to_le64(buffrag->dma);
2228 break;
2229 }
2230 }
2231
2232 tx_ring->producer = get_next_index(producer, num_txd);
036d61f0 2233 smp_mb();
af19b491 2234
036d61f0
AC
2235 if (unlikely(qlcnic_tx_pkt(adapter, first_desc, skb)))
2236 goto unwind_buff;
af19b491 2237
b5e5492c
AKS
2238 if (qlcnic_mac_learn)
2239 qlcnic_send_filter(adapter, tx_ring, first_desc, skb);
2240
af19b491
AKS
2241 qlcnic_update_cmd_producer(adapter, tx_ring);
2242
2243 adapter->stats.txbytes += skb->len;
2244 adapter->stats.xmitcalled++;
2245
2246 return NETDEV_TX_OK;
2247
036d61f0
AC
2248unwind_buff:
2249 qlcnic_unmap_buffers(pdev, skb, pbuf);
af19b491
AKS
2250drop_packet:
2251 adapter->stats.txdropped++;
2252 dev_kfree_skb_any(skb);
2253 return NETDEV_TX_OK;
2254}
2255
2256static int qlcnic_check_temp(struct qlcnic_adapter *adapter)
2257{
2258 struct net_device *netdev = adapter->netdev;
2259 u32 temp, temp_state, temp_val;
2260 int rv = 0;
2261
2262 temp = QLCRD32(adapter, CRB_TEMP_STATE);
2263
2264 temp_state = qlcnic_get_temp_state(temp);
2265 temp_val = qlcnic_get_temp_val(temp);
2266
2267 if (temp_state == QLCNIC_TEMP_PANIC) {
2268 dev_err(&netdev->dev,
2269 "Device temperature %d degrees C exceeds"
2270 " maximum allowed. Hardware has been shut down.\n",
2271 temp_val);
2272 rv = 1;
2273 } else if (temp_state == QLCNIC_TEMP_WARN) {
2274 if (adapter->temp == QLCNIC_TEMP_NORMAL) {
2275 dev_err(&netdev->dev,
2276 "Device temperature %d degrees C "
2277 "exceeds operating range."
2278 " Immediate action needed.\n",
2279 temp_val);
2280 }
2281 } else {
2282 if (adapter->temp == QLCNIC_TEMP_WARN) {
2283 dev_info(&netdev->dev,
2284 "Device temperature is now %d degrees C"
2285 " in normal range.\n", temp_val);
2286 }
2287 }
2288 adapter->temp = temp_state;
2289 return rv;
2290}
2291
2292void qlcnic_advert_link_change(struct qlcnic_adapter *adapter, int linkup)
2293{
2294 struct net_device *netdev = adapter->netdev;
2295
b1fc6d3c 2296 if (adapter->ahw->linkup && !linkup) {
69324275 2297 netdev_info(netdev, "NIC Link is down\n");
b1fc6d3c 2298 adapter->ahw->linkup = 0;
af19b491
AKS
2299 if (netif_running(netdev)) {
2300 netif_carrier_off(netdev);
2301 netif_stop_queue(netdev);
2302 }
b1fc6d3c 2303 } else if (!adapter->ahw->linkup && linkup) {
69324275 2304 netdev_info(netdev, "NIC Link is up\n");
b1fc6d3c 2305 adapter->ahw->linkup = 1;
af19b491
AKS
2306 if (netif_running(netdev)) {
2307 netif_carrier_on(netdev);
2308 netif_wake_queue(netdev);
2309 }
2310 }
2311}
2312
2313static void qlcnic_tx_timeout(struct net_device *netdev)
2314{
2315 struct qlcnic_adapter *adapter = netdev_priv(netdev);
2316
2317 if (test_bit(__QLCNIC_RESETTING, &adapter->state))
2318 return;
2319
2320 dev_err(&netdev->dev, "transmit timeout, resetting.\n");
af19b491
AKS
2321
2322 if (++adapter->tx_timeo_cnt >= QLCNIC_MAX_TX_TIMEOUTS)
68bf1c68
AKS
2323 adapter->need_fw_reset = 1;
2324 else
2325 adapter->reset_context = 1;
af19b491
AKS
2326}
2327
2328static struct net_device_stats *qlcnic_get_stats(struct net_device *netdev)
2329{
2330 struct qlcnic_adapter *adapter = netdev_priv(netdev);
2331 struct net_device_stats *stats = &netdev->stats;
2332
af19b491
AKS
2333 stats->rx_packets = adapter->stats.rx_pkts + adapter->stats.lro_pkts;
2334 stats->tx_packets = adapter->stats.xmitfinished;
7e382594 2335 stats->rx_bytes = adapter->stats.rxbytes + adapter->stats.lrobytes;
af19b491
AKS
2336 stats->tx_bytes = adapter->stats.txbytes;
2337 stats->rx_dropped = adapter->stats.rxdropped;
2338 stats->tx_dropped = adapter->stats.txdropped;
2339
2340 return stats;
2341}
2342
7eb9855d 2343static irqreturn_t qlcnic_clear_legacy_intr(struct qlcnic_adapter *adapter)
af19b491 2344{
af19b491
AKS
2345 u32 status;
2346
2347 status = readl(adapter->isr_int_vec);
2348
2349 if (!(status & adapter->int_vec_bit))
2350 return IRQ_NONE;
2351
2352 /* check interrupt state machine, to be sure */
2353 status = readl(adapter->crb_int_state_reg);
2354 if (!ISR_LEGACY_INT_TRIGGERED(status))
2355 return IRQ_NONE;
2356
2357 writel(0xffffffff, adapter->tgt_status_reg);
2358 /* read twice to ensure write is flushed */
2359 readl(adapter->isr_int_vec);
2360 readl(adapter->isr_int_vec);
2361
7eb9855d
AKS
2362 return IRQ_HANDLED;
2363}
2364
2365static irqreturn_t qlcnic_tmp_intr(int irq, void *data)
2366{
2367 struct qlcnic_host_sds_ring *sds_ring = data;
2368 struct qlcnic_adapter *adapter = sds_ring->adapter;
2369
2370 if (adapter->flags & QLCNIC_MSIX_ENABLED)
2371 goto done;
2372 else if (adapter->flags & QLCNIC_MSI_ENABLED) {
2373 writel(0xffffffff, adapter->tgt_status_reg);
2374 goto done;
2375 }
2376
2377 if (qlcnic_clear_legacy_intr(adapter) == IRQ_NONE)
2378 return IRQ_NONE;
2379
2380done:
2381 adapter->diag_cnt++;
2382 qlcnic_enable_int(sds_ring);
2383 return IRQ_HANDLED;
2384}
2385
2386static irqreturn_t qlcnic_intr(int irq, void *data)
2387{
2388 struct qlcnic_host_sds_ring *sds_ring = data;
2389 struct qlcnic_adapter *adapter = sds_ring->adapter;
2390
2391 if (qlcnic_clear_legacy_intr(adapter) == IRQ_NONE)
2392 return IRQ_NONE;
2393
af19b491
AKS
2394 napi_schedule(&sds_ring->napi);
2395
2396 return IRQ_HANDLED;
2397}
2398
2399static irqreturn_t qlcnic_msi_intr(int irq, void *data)
2400{
2401 struct qlcnic_host_sds_ring *sds_ring = data;
2402 struct qlcnic_adapter *adapter = sds_ring->adapter;
2403
2404 /* clear interrupt */
2405 writel(0xffffffff, adapter->tgt_status_reg);
2406
2407 napi_schedule(&sds_ring->napi);
2408 return IRQ_HANDLED;
2409}
2410
2411static irqreturn_t qlcnic_msix_intr(int irq, void *data)
2412{
2413 struct qlcnic_host_sds_ring *sds_ring = data;
2414
2415 napi_schedule(&sds_ring->napi);
2416 return IRQ_HANDLED;
2417}
2418
2419static int qlcnic_process_cmd_ring(struct qlcnic_adapter *adapter)
2420{
2421 u32 sw_consumer, hw_consumer;
2422 int count = 0, i;
2423 struct qlcnic_cmd_buffer *buffer;
2424 struct pci_dev *pdev = adapter->pdev;
2425 struct net_device *netdev = adapter->netdev;
2426 struct qlcnic_skb_frag *frag;
2427 int done;
2428 struct qlcnic_host_tx_ring *tx_ring = adapter->tx_ring;
2429
2430 if (!spin_trylock(&adapter->tx_clean_lock))
2431 return 1;
2432
2433 sw_consumer = tx_ring->sw_consumer;
2434 hw_consumer = le32_to_cpu(*(tx_ring->hw_consumer));
2435
2436 while (sw_consumer != hw_consumer) {
2437 buffer = &tx_ring->cmd_buf_arr[sw_consumer];
2438 if (buffer->skb) {
2439 frag = &buffer->frag_array[0];
2440 pci_unmap_single(pdev, frag->dma, frag->length,
2441 PCI_DMA_TODEVICE);
2442 frag->dma = 0ULL;
2443 for (i = 1; i < buffer->frag_count; i++) {
2444 frag++;
2445 pci_unmap_page(pdev, frag->dma, frag->length,
2446 PCI_DMA_TODEVICE);
2447 frag->dma = 0ULL;
2448 }
2449
2450 adapter->stats.xmitfinished++;
2451 dev_kfree_skb_any(buffer->skb);
2452 buffer->skb = NULL;
2453 }
2454
2455 sw_consumer = get_next_index(sw_consumer, tx_ring->num_desc);
2456 if (++count >= MAX_STATUS_HANDLE)
2457 break;
2458 }
2459
2460 if (count && netif_running(netdev)) {
2461 tx_ring->sw_consumer = sw_consumer;
2462
2463 smp_mb();
2464
2465 if (netif_queue_stopped(netdev) && netif_carrier_ok(netdev)) {
af19b491
AKS
2466 if (qlcnic_tx_avail(tx_ring) > TX_STOP_THRESH) {
2467 netif_wake_queue(netdev);
8bfe8b91 2468 adapter->stats.xmit_on++;
af19b491 2469 }
af19b491 2470 }
ef71ff83 2471 adapter->tx_timeo_cnt = 0;
af19b491
AKS
2472 }
2473 /*
2474 * If everything is freed up to consumer then check if the ring is full
2475 * If the ring is full then check if more needs to be freed and
2476 * schedule the call back again.
2477 *
2478 * This happens when there are 2 CPUs. One could be freeing and the
2479 * other filling it. If the ring is full when we get out of here and
2480 * the card has already interrupted the host then the host can miss the
2481 * interrupt.
2482 *
2483 * There is still a possible race condition and the host could miss an
2484 * interrupt. The card has to take care of this.
2485 */
2486 hw_consumer = le32_to_cpu(*(tx_ring->hw_consumer));
2487 done = (sw_consumer == hw_consumer);
2488 spin_unlock(&adapter->tx_clean_lock);
2489
2490 return done;
2491}
2492
2493static int qlcnic_poll(struct napi_struct *napi, int budget)
2494{
2495 struct qlcnic_host_sds_ring *sds_ring =
2496 container_of(napi, struct qlcnic_host_sds_ring, napi);
2497
2498 struct qlcnic_adapter *adapter = sds_ring->adapter;
2499
2500 int tx_complete;
2501 int work_done;
2502
2503 tx_complete = qlcnic_process_cmd_ring(adapter);
2504
2505 work_done = qlcnic_process_rcv_ring(sds_ring, budget);
2506
2507 if ((work_done < budget) && tx_complete) {
2508 napi_complete(&sds_ring->napi);
2509 if (test_bit(__QLCNIC_DEV_UP, &adapter->state))
2510 qlcnic_enable_int(sds_ring);
2511 }
2512
2513 return work_done;
2514}
2515
8f891387 2516static int qlcnic_rx_poll(struct napi_struct *napi, int budget)
2517{
2518 struct qlcnic_host_sds_ring *sds_ring =
2519 container_of(napi, struct qlcnic_host_sds_ring, napi);
2520
2521 struct qlcnic_adapter *adapter = sds_ring->adapter;
2522 int work_done;
2523
2524 work_done = qlcnic_process_rcv_ring(sds_ring, budget);
2525
2526 if (work_done < budget) {
2527 napi_complete(&sds_ring->napi);
2528 if (test_bit(__QLCNIC_DEV_UP, &adapter->state))
2529 qlcnic_enable_int(sds_ring);
2530 }
2531
2532 return work_done;
2533}
2534
af19b491
AKS
2535#ifdef CONFIG_NET_POLL_CONTROLLER
2536static void qlcnic_poll_controller(struct net_device *netdev)
2537{
bf82791e
YL
2538 int ring;
2539 struct qlcnic_host_sds_ring *sds_ring;
af19b491 2540 struct qlcnic_adapter *adapter = netdev_priv(netdev);
b1fc6d3c 2541 struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
bf82791e 2542
af19b491 2543 disable_irq(adapter->irq);
bf82791e
YL
2544 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
2545 sds_ring = &recv_ctx->sds_rings[ring];
2546 qlcnic_intr(adapter->irq, sds_ring);
2547 }
af19b491
AKS
2548 enable_irq(adapter->irq);
2549}
2550#endif
2551
6df900e9
SC
2552static void
2553qlcnic_idc_debug_info(struct qlcnic_adapter *adapter, u8 encoding)
2554{
2555 u32 val;
2556
2557 val = adapter->portnum & 0xf;
2558 val |= encoding << 7;
2559 val |= (jiffies - adapter->dev_rst_time) << 8;
2560
2561 QLCWR32(adapter, QLCNIC_CRB_DRV_SCRATCH, val);
2562 adapter->dev_rst_time = jiffies;
2563}
2564
ade91f8e
AKS
2565static int
2566qlcnic_set_drv_state(struct qlcnic_adapter *adapter, u8 state)
af19b491
AKS
2567{
2568 u32 val;
2569
2570 WARN_ON(state != QLCNIC_DEV_NEED_RESET &&
2571 state != QLCNIC_DEV_NEED_QUISCENT);
2572
2573 if (qlcnic_api_lock(adapter))
ade91f8e 2574 return -EIO;
af19b491
AKS
2575
2576 val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
2577
2578 if (state == QLCNIC_DEV_NEED_RESET)
6d2a4724 2579 QLC_DEV_SET_RST_RDY(val, adapter->portnum);
af19b491 2580 else if (state == QLCNIC_DEV_NEED_QUISCENT)
6d2a4724 2581 QLC_DEV_SET_QSCNT_RDY(val, adapter->portnum);
af19b491
AKS
2582
2583 QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val);
2584
2585 qlcnic_api_unlock(adapter);
ade91f8e
AKS
2586
2587 return 0;
af19b491
AKS
2588}
2589
1b95a839
AKS
2590static int
2591qlcnic_clr_drv_state(struct qlcnic_adapter *adapter)
2592{
2593 u32 val;
2594
2595 if (qlcnic_api_lock(adapter))
2596 return -EBUSY;
2597
2598 val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
6d2a4724 2599 QLC_DEV_CLR_RST_QSCNT(val, adapter->portnum);
1b95a839
AKS
2600 QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val);
2601
2602 qlcnic_api_unlock(adapter);
2603
2604 return 0;
2605}
2606
af19b491 2607static void
21854f02 2608qlcnic_clr_all_drv_state(struct qlcnic_adapter *adapter, u8 failed)
af19b491
AKS
2609{
2610 u32 val;
2611
2612 if (qlcnic_api_lock(adapter))
2613 goto err;
2614
31018e06 2615 val = QLCRD32(adapter, QLCNIC_CRB_DRV_ACTIVE);
6d2a4724 2616 QLC_DEV_CLR_REF_CNT(val, adapter->portnum);
31018e06 2617 QLCWR32(adapter, QLCNIC_CRB_DRV_ACTIVE, val);
af19b491 2618
21854f02
AKS
2619 if (failed) {
2620 QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_FAILED);
2621 dev_info(&adapter->pdev->dev,
2622 "Device state set to Failed. Please Reboot\n");
2623 } else if (!(val & 0x11111111))
af19b491
AKS
2624 QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_COLD);
2625
2626 val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
6d2a4724 2627 QLC_DEV_CLR_RST_QSCNT(val, adapter->portnum);
af19b491
AKS
2628 QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val);
2629
2630 qlcnic_api_unlock(adapter);
2631err:
2632 adapter->fw_fail_cnt = 0;
2633 clear_bit(__QLCNIC_START_FW, &adapter->state);
2634 clear_bit(__QLCNIC_RESETTING, &adapter->state);
2635}
2636
f73dfc50 2637/* Grab api lock, before checking state */
af19b491
AKS
2638static int
2639qlcnic_check_drv_state(struct qlcnic_adapter *adapter)
2640{
2641 int act, state;
2642
2643 state = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
31018e06 2644 act = QLCRD32(adapter, QLCNIC_CRB_DRV_ACTIVE);
af19b491
AKS
2645
2646 if (((state & 0x11111111) == (act & 0x11111111)) ||
2647 ((act & 0x11111111) == ((state >> 1) & 0x11111111)))
2648 return 0;
2649 else
2650 return 1;
2651}
2652
96f8118c
SC
2653static int qlcnic_check_idc_ver(struct qlcnic_adapter *adapter)
2654{
2655 u32 val = QLCRD32(adapter, QLCNIC_CRB_DRV_IDC_VER);
2656
2657 if (val != QLCNIC_DRV_IDC_VER) {
2658 dev_warn(&adapter->pdev->dev, "IDC Version mismatch, driver's"
2659 " idc ver = %x; reqd = %x\n", QLCNIC_DRV_IDC_VER, val);
2660 }
2661
2662 return 0;
2663}
2664
af19b491
AKS
2665static int
2666qlcnic_can_start_firmware(struct qlcnic_adapter *adapter)
2667{
2668 u32 val, prev_state;
aa5e18c0 2669 u8 dev_init_timeo = adapter->dev_init_timeo;
6d2a4724 2670 u8 portnum = adapter->portnum;
96f8118c 2671 u8 ret;
af19b491 2672
f73dfc50
AKS
2673 if (test_and_clear_bit(__QLCNIC_START_FW, &adapter->state))
2674 return 1;
2675
af19b491
AKS
2676 if (qlcnic_api_lock(adapter))
2677 return -1;
2678
31018e06 2679 val = QLCRD32(adapter, QLCNIC_CRB_DRV_ACTIVE);
6d2a4724
AKS
2680 if (!(val & (1 << (portnum * 4)))) {
2681 QLC_DEV_SET_REF_CNT(val, portnum);
31018e06 2682 QLCWR32(adapter, QLCNIC_CRB_DRV_ACTIVE, val);
af19b491
AKS
2683 }
2684
2685 prev_state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
65b5b420 2686 QLCDB(adapter, HW, "Device state = %u\n", prev_state);
af19b491
AKS
2687
2688 switch (prev_state) {
2689 case QLCNIC_DEV_COLD:
bbd8c6a4 2690 QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_INITIALIZING);
96f8118c 2691 QLCWR32(adapter, QLCNIC_CRB_DRV_IDC_VER, QLCNIC_DRV_IDC_VER);
6df900e9 2692 qlcnic_idc_debug_info(adapter, 0);
af19b491
AKS
2693 qlcnic_api_unlock(adapter);
2694 return 1;
2695
2696 case QLCNIC_DEV_READY:
96f8118c 2697 ret = qlcnic_check_idc_ver(adapter);
af19b491 2698 qlcnic_api_unlock(adapter);
96f8118c 2699 return ret;
af19b491
AKS
2700
2701 case QLCNIC_DEV_NEED_RESET:
2702 val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
6d2a4724 2703 QLC_DEV_SET_RST_RDY(val, portnum);
af19b491
AKS
2704 QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val);
2705 break;
2706
2707 case QLCNIC_DEV_NEED_QUISCENT:
2708 val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
6d2a4724 2709 QLC_DEV_SET_QSCNT_RDY(val, portnum);
af19b491
AKS
2710 QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val);
2711 break;
2712
2713 case QLCNIC_DEV_FAILED:
a7fc948f 2714 dev_err(&adapter->pdev->dev, "Device in failed state.\n");
af19b491
AKS
2715 qlcnic_api_unlock(adapter);
2716 return -1;
bbd8c6a4
AKS
2717
2718 case QLCNIC_DEV_INITIALIZING:
2719 case QLCNIC_DEV_QUISCENT:
2720 break;
af19b491
AKS
2721 }
2722
2723 qlcnic_api_unlock(adapter);
aa5e18c0
SC
2724
2725 do {
af19b491 2726 msleep(1000);
a5e463d0
SC
2727 prev_state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
2728
2729 if (prev_state == QLCNIC_DEV_QUISCENT)
2730 continue;
2731 } while ((prev_state != QLCNIC_DEV_READY) && --dev_init_timeo);
af19b491 2732
65b5b420
AKS
2733 if (!dev_init_timeo) {
2734 dev_err(&adapter->pdev->dev,
2735 "Waiting for device to initialize timeout\n");
af19b491 2736 return -1;
65b5b420 2737 }
af19b491
AKS
2738
2739 if (qlcnic_api_lock(adapter))
2740 return -1;
2741
2742 val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
6d2a4724 2743 QLC_DEV_CLR_RST_QSCNT(val, portnum);
af19b491
AKS
2744 QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val);
2745
96f8118c 2746 ret = qlcnic_check_idc_ver(adapter);
af19b491
AKS
2747 qlcnic_api_unlock(adapter);
2748
96f8118c 2749 return ret;
af19b491
AKS
2750}
2751
2752static void
2753qlcnic_fwinit_work(struct work_struct *work)
2754{
2755 struct qlcnic_adapter *adapter = container_of(work,
2756 struct qlcnic_adapter, fw_work.work);
3c4b23b1 2757 u32 dev_state = 0xf;
af19b491 2758
f73dfc50
AKS
2759 if (qlcnic_api_lock(adapter))
2760 goto err_ret;
af19b491 2761
a5e463d0 2762 dev_state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
b8c17620
AKS
2763 if (dev_state == QLCNIC_DEV_QUISCENT ||
2764 dev_state == QLCNIC_DEV_NEED_QUISCENT) {
a5e463d0
SC
2765 qlcnic_api_unlock(adapter);
2766 qlcnic_schedule_work(adapter, qlcnic_fwinit_work,
2767 FW_POLL_DELAY * 2);
2768 return;
2769 }
2770
9f26f547 2771 if (adapter->op_mode == QLCNIC_NON_PRIV_FUNC) {
3c4b23b1
AKS
2772 qlcnic_api_unlock(adapter);
2773 goto wait_npar;
9f26f547
AC
2774 }
2775
f73dfc50
AKS
2776 if (adapter->fw_wait_cnt++ > adapter->reset_ack_timeo) {
2777 dev_err(&adapter->pdev->dev, "Reset:Failed to get ack %d sec\n",
2778 adapter->reset_ack_timeo);
2779 goto skip_ack_check;
2780 }
2781
2782 if (!qlcnic_check_drv_state(adapter)) {
2783skip_ack_check:
2784 dev_state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
a5e463d0 2785
f73dfc50
AKS
2786 if (dev_state == QLCNIC_DEV_NEED_RESET) {
2787 QLCWR32(adapter, QLCNIC_CRB_DEV_STATE,
2788 QLCNIC_DEV_INITIALIZING);
2789 set_bit(__QLCNIC_START_FW, &adapter->state);
2790 QLCDB(adapter, DRV, "Restarting fw\n");
6df900e9 2791 qlcnic_idc_debug_info(adapter, 0);
af19b491
AKS
2792 }
2793
f73dfc50
AKS
2794 qlcnic_api_unlock(adapter);
2795
9f26f547 2796 if (!adapter->nic_ops->start_firmware(adapter)) {
af19b491 2797 qlcnic_schedule_work(adapter, qlcnic_attach_work, 0);
b18971d1 2798 adapter->fw_wait_cnt = 0;
af19b491
AKS
2799 return;
2800 }
af19b491
AKS
2801 goto err_ret;
2802 }
2803
f73dfc50 2804 qlcnic_api_unlock(adapter);
aa5e18c0 2805
9f26f547 2806wait_npar:
af19b491 2807 dev_state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
f73dfc50 2808 QLCDB(adapter, HW, "Func waiting: Device state=%u\n", dev_state);
65b5b420 2809
af19b491 2810 switch (dev_state) {
3c4b23b1 2811 case QLCNIC_DEV_READY:
9f26f547 2812 if (!adapter->nic_ops->start_firmware(adapter)) {
f73dfc50 2813 qlcnic_schedule_work(adapter, qlcnic_attach_work, 0);
b18971d1 2814 adapter->fw_wait_cnt = 0;
f73dfc50
AKS
2815 return;
2816 }
3c4b23b1
AKS
2817 case QLCNIC_DEV_FAILED:
2818 break;
2819 default:
2820 qlcnic_schedule_work(adapter,
2821 qlcnic_fwinit_work, FW_POLL_DELAY);
2822 return;
af19b491
AKS
2823 }
2824
2825err_ret:
f73dfc50
AKS
2826 dev_err(&adapter->pdev->dev, "Fwinit work failed state=%u "
2827 "fw_wait_cnt=%u\n", dev_state, adapter->fw_wait_cnt);
34ce3626 2828 netif_device_attach(adapter->netdev);
21854f02 2829 qlcnic_clr_all_drv_state(adapter, 0);
af19b491
AKS
2830}
2831
2832static void
2833qlcnic_detach_work(struct work_struct *work)
2834{
2835 struct qlcnic_adapter *adapter = container_of(work,
2836 struct qlcnic_adapter, fw_work.work);
2837 struct net_device *netdev = adapter->netdev;
2838 u32 status;
2839
2840 netif_device_detach(netdev);
2841
b8c17620
AKS
2842 /* Dont grab rtnl lock during Quiscent mode */
2843 if (adapter->dev_state == QLCNIC_DEV_NEED_QUISCENT) {
2844 if (netif_running(netdev))
2845 __qlcnic_down(adapter, netdev);
2846 } else
2847 qlcnic_down(adapter, netdev);
af19b491 2848
af19b491
AKS
2849 status = QLCRD32(adapter, QLCNIC_PEG_HALT_STATUS1);
2850
2851 if (status & QLCNIC_RCODE_FATAL_ERROR)
2852 goto err_ret;
2853
2854 if (adapter->temp == QLCNIC_TEMP_PANIC)
2855 goto err_ret;
2856
ade91f8e
AKS
2857 if (qlcnic_set_drv_state(adapter, adapter->dev_state))
2858 goto err_ret;
af19b491
AKS
2859
2860 adapter->fw_wait_cnt = 0;
2861
2862 qlcnic_schedule_work(adapter, qlcnic_fwinit_work, FW_POLL_DELAY);
2863
2864 return;
2865
2866err_ret:
65b5b420
AKS
2867 dev_err(&adapter->pdev->dev, "detach failed; status=%d temp=%d\n",
2868 status, adapter->temp);
34ce3626 2869 netif_device_attach(netdev);
21854f02 2870 qlcnic_clr_all_drv_state(adapter, 1);
af19b491
AKS
2871}
2872
3c4b23b1
AKS
2873/*Transit NPAR state to NON Operational */
2874static void
2875qlcnic_set_npar_non_operational(struct qlcnic_adapter *adapter)
2876{
2877 u32 state;
2878
2879 state = QLCRD32(adapter, QLCNIC_CRB_DEV_NPAR_STATE);
2880 if (state == QLCNIC_DEV_NPAR_NON_OPER)
2881 return;
2882
2883 if (qlcnic_api_lock(adapter))
2884 return;
2885 QLCWR32(adapter, QLCNIC_CRB_DEV_NPAR_STATE, QLCNIC_DEV_NPAR_NON_OPER);
2886 qlcnic_api_unlock(adapter);
2887}
2888
f73dfc50 2889/*Transit to RESET state from READY state only */
af19b491
AKS
2890static void
2891qlcnic_dev_request_reset(struct qlcnic_adapter *adapter)
2892{
2893 u32 state;
2894
cea8975e 2895 adapter->need_fw_reset = 1;
af19b491
AKS
2896 if (qlcnic_api_lock(adapter))
2897 return;
2898
2899 state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
2900
f73dfc50 2901 if (state == QLCNIC_DEV_READY) {
af19b491 2902 QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_NEED_RESET);
65b5b420 2903 QLCDB(adapter, DRV, "NEED_RESET state set\n");
6df900e9 2904 qlcnic_idc_debug_info(adapter, 0);
af19b491
AKS
2905 }
2906
3c4b23b1 2907 QLCWR32(adapter, QLCNIC_CRB_DEV_NPAR_STATE, QLCNIC_DEV_NPAR_NON_OPER);
af19b491
AKS
2908 qlcnic_api_unlock(adapter);
2909}
2910
9f26f547
AC
2911/* Transit to NPAR READY state from NPAR NOT READY state */
2912static void
2913qlcnic_dev_set_npar_ready(struct qlcnic_adapter *adapter)
2914{
9f26f547
AC
2915 if (qlcnic_api_lock(adapter))
2916 return;
2917
3c4b23b1
AKS
2918 QLCWR32(adapter, QLCNIC_CRB_DEV_NPAR_STATE, QLCNIC_DEV_NPAR_OPER);
2919 QLCDB(adapter, DRV, "NPAR operational state set\n");
9f26f547
AC
2920
2921 qlcnic_api_unlock(adapter);
2922}
2923
af19b491
AKS
2924static void
2925qlcnic_schedule_work(struct qlcnic_adapter *adapter,
2926 work_func_t func, int delay)
2927{
451724c8
SC
2928 if (test_bit(__QLCNIC_AER, &adapter->state))
2929 return;
2930
af19b491 2931 INIT_DELAYED_WORK(&adapter->fw_work, func);
f7ec804a
AKS
2932 queue_delayed_work(qlcnic_wq, &adapter->fw_work,
2933 round_jiffies_relative(delay));
af19b491
AKS
2934}
2935
2936static void
2937qlcnic_cancel_fw_work(struct qlcnic_adapter *adapter)
2938{
2939 while (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state))
2940 msleep(10);
2941
2942 cancel_delayed_work_sync(&adapter->fw_work);
2943}
2944
2945static void
2946qlcnic_attach_work(struct work_struct *work)
2947{
2948 struct qlcnic_adapter *adapter = container_of(work,
2949 struct qlcnic_adapter, fw_work.work);
2950 struct net_device *netdev = adapter->netdev;
b18971d1 2951 u32 npar_state;
af19b491 2952
b18971d1
AKS
2953 if (adapter->op_mode != QLCNIC_MGMT_FUNC) {
2954 npar_state = QLCRD32(adapter, QLCNIC_CRB_DEV_NPAR_STATE);
2955 if (adapter->fw_wait_cnt++ > QLCNIC_DEV_NPAR_OPER_TIMEO)
2956 qlcnic_clr_all_drv_state(adapter, 0);
2957 else if (npar_state != QLCNIC_DEV_NPAR_OPER)
2958 qlcnic_schedule_work(adapter, qlcnic_attach_work,
2959 FW_POLL_DELAY);
2960 else
2961 goto attach;
2962 QLCDB(adapter, DRV, "Waiting for NPAR state to operational\n");
2963 return;
2964 }
2965attach:
af19b491 2966 if (netif_running(netdev)) {
52486a3a 2967 if (qlcnic_up(adapter, netdev))
af19b491 2968 goto done;
af19b491 2969
aec1e845 2970 qlcnic_restore_indev_addr(netdev, NETDEV_UP);
af19b491
AKS
2971 }
2972
af19b491 2973done:
34ce3626 2974 netif_device_attach(netdev);
af19b491
AKS
2975 adapter->fw_fail_cnt = 0;
2976 clear_bit(__QLCNIC_RESETTING, &adapter->state);
1b95a839
AKS
2977
2978 if (!qlcnic_clr_drv_state(adapter))
2979 qlcnic_schedule_work(adapter, qlcnic_fw_poll_work,
2980 FW_POLL_DELAY);
af19b491
AKS
2981}
2982
2983static int
2984qlcnic_check_health(struct qlcnic_adapter *adapter)
2985{
4e70812b 2986 u32 state = 0, heartbeat;
af19b491
AKS
2987 struct net_device *netdev = adapter->netdev;
2988
2989 if (qlcnic_check_temp(adapter))
2990 goto detach;
2991
2372a5f1 2992 if (adapter->need_fw_reset)
af19b491 2993 qlcnic_dev_request_reset(adapter);
af19b491
AKS
2994
2995 state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
b8c17620 2996 if (state == QLCNIC_DEV_NEED_RESET) {
3c4b23b1 2997 qlcnic_set_npar_non_operational(adapter);
af19b491 2998 adapter->need_fw_reset = 1;
b8c17620
AKS
2999 } else if (state == QLCNIC_DEV_NEED_QUISCENT)
3000 goto detach;
af19b491 3001
4e70812b
SC
3002 heartbeat = QLCRD32(adapter, QLCNIC_PEG_ALIVE_COUNTER);
3003 if (heartbeat != adapter->heartbeat) {
3004 adapter->heartbeat = heartbeat;
af19b491
AKS
3005 adapter->fw_fail_cnt = 0;
3006 if (adapter->need_fw_reset)
3007 goto detach;
68bf1c68 3008
9ce13ca8 3009 if (adapter->reset_context && auto_fw_reset) {
68bf1c68
AKS
3010 qlcnic_reset_hw_context(adapter);
3011 adapter->netdev->trans_start = jiffies;
3012 }
3013
af19b491
AKS
3014 return 0;
3015 }
3016
3017 if (++adapter->fw_fail_cnt < FW_FAIL_THRESH)
3018 return 0;
3019
3020 qlcnic_dev_request_reset(adapter);
3021
9ce13ca8 3022 if (auto_fw_reset)
0df170b6 3023 clear_bit(__QLCNIC_FW_ATTACHED, &adapter->state);
af19b491
AKS
3024
3025 dev_info(&netdev->dev, "firmware hang detected\n");
3026
3027detach:
3028 adapter->dev_state = (state == QLCNIC_DEV_NEED_QUISCENT) ? state :
3029 QLCNIC_DEV_NEED_RESET;
3030
9ce13ca8 3031 if (auto_fw_reset &&
65b5b420
AKS
3032 !test_and_set_bit(__QLCNIC_RESETTING, &adapter->state)) {
3033
af19b491 3034 qlcnic_schedule_work(adapter, qlcnic_detach_work, 0);
65b5b420
AKS
3035 QLCDB(adapter, DRV, "fw recovery scheduled.\n");
3036 }
af19b491
AKS
3037
3038 return 1;
3039}
3040
3041static void
3042qlcnic_fw_poll_work(struct work_struct *work)
3043{
3044 struct qlcnic_adapter *adapter = container_of(work,
3045 struct qlcnic_adapter, fw_work.work);
3046
3047 if (test_bit(__QLCNIC_RESETTING, &adapter->state))
3048 goto reschedule;
3049
3050
3051 if (qlcnic_check_health(adapter))
3052 return;
3053
b5e5492c
AKS
3054 if (adapter->fhash.fnum)
3055 qlcnic_prune_lb_filters(adapter);
3056
af19b491
AKS
3057reschedule:
3058 qlcnic_schedule_work(adapter, qlcnic_fw_poll_work, FW_POLL_DELAY);
3059}
3060
451724c8
SC
3061static int qlcnic_is_first_func(struct pci_dev *pdev)
3062{
3063 struct pci_dev *oth_pdev;
3064 int val = pdev->devfn;
3065
3066 while (val-- > 0) {
3067 oth_pdev = pci_get_domain_bus_and_slot(pci_domain_nr
3068 (pdev->bus), pdev->bus->number,
3069 PCI_DEVFN(PCI_SLOT(pdev->devfn), val));
bfc978fa
AKS
3070 if (!oth_pdev)
3071 continue;
451724c8 3072
bfc978fa
AKS
3073 if (oth_pdev->current_state != PCI_D3cold) {
3074 pci_dev_put(oth_pdev);
451724c8 3075 return 0;
bfc978fa
AKS
3076 }
3077 pci_dev_put(oth_pdev);
451724c8
SC
3078 }
3079 return 1;
3080}
3081
3082static int qlcnic_attach_func(struct pci_dev *pdev)
3083{
3084 int err, first_func;
3085 struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
3086 struct net_device *netdev = adapter->netdev;
3087
3088 pdev->error_state = pci_channel_io_normal;
3089
3090 err = pci_enable_device(pdev);
3091 if (err)
3092 return err;
3093
3094 pci_set_power_state(pdev, PCI_D0);
3095 pci_set_master(pdev);
3096 pci_restore_state(pdev);
3097
3098 first_func = qlcnic_is_first_func(pdev);
3099
3100 if (qlcnic_api_lock(adapter))
3101 return -EINVAL;
3102
933fce12 3103 if (adapter->op_mode != QLCNIC_NON_PRIV_FUNC && first_func) {
451724c8
SC
3104 adapter->need_fw_reset = 1;
3105 set_bit(__QLCNIC_START_FW, &adapter->state);
3106 QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_INITIALIZING);
3107 QLCDB(adapter, DRV, "Restarting fw\n");
3108 }
3109 qlcnic_api_unlock(adapter);
3110
3111 err = adapter->nic_ops->start_firmware(adapter);
3112 if (err)
3113 return err;
3114
3115 qlcnic_clr_drv_state(adapter);
3116 qlcnic_setup_intr(adapter);
3117
3118 if (netif_running(netdev)) {
3119 err = qlcnic_attach(adapter);
3120 if (err) {
21854f02 3121 qlcnic_clr_all_drv_state(adapter, 1);
451724c8
SC
3122 clear_bit(__QLCNIC_AER, &adapter->state);
3123 netif_device_attach(netdev);
3124 return err;
3125 }
3126
3127 err = qlcnic_up(adapter, netdev);
3128 if (err)
3129 goto done;
3130
aec1e845 3131 qlcnic_restore_indev_addr(netdev, NETDEV_UP);
451724c8
SC
3132 }
3133 done:
3134 netif_device_attach(netdev);
3135 return err;
3136}
3137
3138static pci_ers_result_t qlcnic_io_error_detected(struct pci_dev *pdev,
3139 pci_channel_state_t state)
3140{
3141 struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
3142 struct net_device *netdev = adapter->netdev;
3143
3144 if (state == pci_channel_io_perm_failure)
3145 return PCI_ERS_RESULT_DISCONNECT;
3146
3147 if (state == pci_channel_io_normal)
3148 return PCI_ERS_RESULT_RECOVERED;
3149
3150 set_bit(__QLCNIC_AER, &adapter->state);
3151 netif_device_detach(netdev);
3152
3153 cancel_delayed_work_sync(&adapter->fw_work);
3154
3155 if (netif_running(netdev))
3156 qlcnic_down(adapter, netdev);
3157
3158 qlcnic_detach(adapter);
3159 qlcnic_teardown_intr(adapter);
3160
3161 clear_bit(__QLCNIC_RESETTING, &adapter->state);
3162
3163 pci_save_state(pdev);
3164 pci_disable_device(pdev);
3165
3166 return PCI_ERS_RESULT_NEED_RESET;
3167}
3168
3169static pci_ers_result_t qlcnic_io_slot_reset(struct pci_dev *pdev)
3170{
3171 return qlcnic_attach_func(pdev) ? PCI_ERS_RESULT_DISCONNECT :
3172 PCI_ERS_RESULT_RECOVERED;
3173}
3174
3175static void qlcnic_io_resume(struct pci_dev *pdev)
3176{
3177 struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
3178
3179 pci_cleanup_aer_uncorrect_error_status(pdev);
3180
3181 if (QLCRD32(adapter, QLCNIC_CRB_DEV_STATE) == QLCNIC_DEV_READY &&
3182 test_and_clear_bit(__QLCNIC_AER, &adapter->state))
3183 qlcnic_schedule_work(adapter, qlcnic_fw_poll_work,
3184 FW_POLL_DELAY);
3185}
3186
87eb743b
AC
3187static int
3188qlcnicvf_start_firmware(struct qlcnic_adapter *adapter)
3189{
3190 int err;
3191
3192 err = qlcnic_can_start_firmware(adapter);
3193 if (err)
3194 return err;
3195
78f84e1a
AKS
3196 err = qlcnic_check_npar_opertional(adapter);
3197 if (err)
3198 return err;
3c4b23b1 3199
174240a8
RB
3200 err = qlcnic_initialize_nic(adapter);
3201 if (err)
3202 return err;
3203
87eb743b
AC
3204 qlcnic_check_options(adapter);
3205
7373373d
RB
3206 err = qlcnic_set_eswitch_port_config(adapter);
3207 if (err)
3208 return err;
3209
87eb743b
AC
3210 adapter->need_fw_reset = 0;
3211
3212 return err;
3213}
3214
3215static int
3216qlcnicvf_config_bridged_mode(struct qlcnic_adapter *adapter, u32 enable)
3217{
3218 return -EOPNOTSUPP;
3219}
3220
3221static int
3222qlcnicvf_config_led(struct qlcnic_adapter *adapter, u32 state, u32 rate)
3223{
3224 return -EOPNOTSUPP;
3225}
3226
af19b491
AKS
3227static ssize_t
3228qlcnic_store_bridged_mode(struct device *dev,
3229 struct device_attribute *attr, const char *buf, size_t len)
3230{
3231 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3232 unsigned long new;
3233 int ret = -EINVAL;
3234
3235 if (!(adapter->capabilities & QLCNIC_FW_CAPABILITY_BDG))
3236 goto err_out;
3237
8a15ad1f 3238 if (!test_bit(__QLCNIC_DEV_UP, &adapter->state))
af19b491
AKS
3239 goto err_out;
3240
3241 if (strict_strtoul(buf, 2, &new))
3242 goto err_out;
3243
2e9d722d 3244 if (!adapter->nic_ops->config_bridged_mode(adapter, !!new))
af19b491
AKS
3245 ret = len;
3246
3247err_out:
3248 return ret;
3249}
3250
3251static ssize_t
3252qlcnic_show_bridged_mode(struct device *dev,
3253 struct device_attribute *attr, char *buf)
3254{
3255 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3256 int bridged_mode = 0;
3257
3258 if (adapter->capabilities & QLCNIC_FW_CAPABILITY_BDG)
3259 bridged_mode = !!(adapter->flags & QLCNIC_BRIDGE_ENABLED);
3260
3261 return sprintf(buf, "%d\n", bridged_mode);
3262}
3263
3264static struct device_attribute dev_attr_bridged_mode = {
3265 .attr = {.name = "bridged_mode", .mode = (S_IRUGO | S_IWUSR)},
3266 .show = qlcnic_show_bridged_mode,
3267 .store = qlcnic_store_bridged_mode,
3268};
3269
3270static ssize_t
3271qlcnic_store_diag_mode(struct device *dev,
3272 struct device_attribute *attr, const char *buf, size_t len)
3273{
3274 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3275 unsigned long new;
3276
3277 if (strict_strtoul(buf, 2, &new))
3278 return -EINVAL;
3279
3280 if (!!new != !!(adapter->flags & QLCNIC_DIAG_ENABLED))
3281 adapter->flags ^= QLCNIC_DIAG_ENABLED;
3282
3283 return len;
3284}
3285
3286static ssize_t
3287qlcnic_show_diag_mode(struct device *dev,
3288 struct device_attribute *attr, char *buf)
3289{
3290 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3291
3292 return sprintf(buf, "%d\n",
3293 !!(adapter->flags & QLCNIC_DIAG_ENABLED));
3294}
3295
3296static struct device_attribute dev_attr_diag_mode = {
3297 .attr = {.name = "diag_mode", .mode = (S_IRUGO | S_IWUSR)},
3298 .show = qlcnic_show_diag_mode,
3299 .store = qlcnic_store_diag_mode,
3300};
3301
3302static int
3303qlcnic_sysfs_validate_crb(struct qlcnic_adapter *adapter,
3304 loff_t offset, size_t size)
3305{
897e8c7c
DP
3306 size_t crb_size = 4;
3307
af19b491
AKS
3308 if (!(adapter->flags & QLCNIC_DIAG_ENABLED))
3309 return -EIO;
3310
897e8c7c
DP
3311 if (offset < QLCNIC_PCI_CRBSPACE) {
3312 if (ADDR_IN_RANGE(offset, QLCNIC_PCI_CAMQM,
3313 QLCNIC_PCI_CAMQM_END))
3314 crb_size = 8;
3315 else
3316 return -EINVAL;
3317 }
af19b491 3318
897e8c7c
DP
3319 if ((size != crb_size) || (offset & (crb_size-1)))
3320 return -EINVAL;
af19b491
AKS
3321
3322 return 0;
3323}
3324
3325static ssize_t
2c3c8bea
CW
3326qlcnic_sysfs_read_crb(struct file *filp, struct kobject *kobj,
3327 struct bin_attribute *attr,
af19b491
AKS
3328 char *buf, loff_t offset, size_t size)
3329{
3330 struct device *dev = container_of(kobj, struct device, kobj);
3331 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3332 u32 data;
897e8c7c 3333 u64 qmdata;
af19b491
AKS
3334 int ret;
3335
3336 ret = qlcnic_sysfs_validate_crb(adapter, offset, size);
3337 if (ret != 0)
3338 return ret;
3339
897e8c7c
DP
3340 if (ADDR_IN_RANGE(offset, QLCNIC_PCI_CAMQM, QLCNIC_PCI_CAMQM_END)) {
3341 qlcnic_pci_camqm_read_2M(adapter, offset, &qmdata);
3342 memcpy(buf, &qmdata, size);
3343 } else {
3344 data = QLCRD32(adapter, offset);
3345 memcpy(buf, &data, size);
3346 }
af19b491
AKS
3347 return size;
3348}
3349
3350static ssize_t
2c3c8bea
CW
3351qlcnic_sysfs_write_crb(struct file *filp, struct kobject *kobj,
3352 struct bin_attribute *attr,
af19b491
AKS
3353 char *buf, loff_t offset, size_t size)
3354{
3355 struct device *dev = container_of(kobj, struct device, kobj);
3356 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3357 u32 data;
897e8c7c 3358 u64 qmdata;
af19b491
AKS
3359 int ret;
3360
3361 ret = qlcnic_sysfs_validate_crb(adapter, offset, size);
3362 if (ret != 0)
3363 return ret;
3364
897e8c7c
DP
3365 if (ADDR_IN_RANGE(offset, QLCNIC_PCI_CAMQM, QLCNIC_PCI_CAMQM_END)) {
3366 memcpy(&qmdata, buf, size);
3367 qlcnic_pci_camqm_write_2M(adapter, offset, qmdata);
3368 } else {
3369 memcpy(&data, buf, size);
3370 QLCWR32(adapter, offset, data);
3371 }
af19b491
AKS
3372 return size;
3373}
3374
3375static int
3376qlcnic_sysfs_validate_mem(struct qlcnic_adapter *adapter,
3377 loff_t offset, size_t size)
3378{
3379 if (!(adapter->flags & QLCNIC_DIAG_ENABLED))
3380 return -EIO;
3381
3382 if ((size != 8) || (offset & 0x7))
3383 return -EIO;
3384
3385 return 0;
3386}
3387
3388static ssize_t
2c3c8bea
CW
3389qlcnic_sysfs_read_mem(struct file *filp, struct kobject *kobj,
3390 struct bin_attribute *attr,
af19b491
AKS
3391 char *buf, loff_t offset, size_t size)
3392{
3393 struct device *dev = container_of(kobj, struct device, kobj);
3394 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3395 u64 data;
3396 int ret;
3397
3398 ret = qlcnic_sysfs_validate_mem(adapter, offset, size);
3399 if (ret != 0)
3400 return ret;
3401
3402 if (qlcnic_pci_mem_read_2M(adapter, offset, &data))
3403 return -EIO;
3404
3405 memcpy(buf, &data, size);
3406
3407 return size;
3408}
3409
3410static ssize_t
2c3c8bea
CW
3411qlcnic_sysfs_write_mem(struct file *filp, struct kobject *kobj,
3412 struct bin_attribute *attr,
af19b491
AKS
3413 char *buf, loff_t offset, size_t size)
3414{
3415 struct device *dev = container_of(kobj, struct device, kobj);
3416 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3417 u64 data;
3418 int ret;
3419
3420 ret = qlcnic_sysfs_validate_mem(adapter, offset, size);
3421 if (ret != 0)
3422 return ret;
3423
3424 memcpy(&data, buf, size);
3425
3426 if (qlcnic_pci_mem_write_2M(adapter, offset, data))
3427 return -EIO;
3428
3429 return size;
3430}
3431
3432
3433static struct bin_attribute bin_attr_crb = {
3434 .attr = {.name = "crb", .mode = (S_IRUGO | S_IWUSR)},
3435 .size = 0,
3436 .read = qlcnic_sysfs_read_crb,
3437 .write = qlcnic_sysfs_write_crb,
3438};
3439
3440static struct bin_attribute bin_attr_mem = {
3441 .attr = {.name = "mem", .mode = (S_IRUGO | S_IWUSR)},
3442 .size = 0,
3443 .read = qlcnic_sysfs_read_mem,
3444 .write = qlcnic_sysfs_write_mem,
3445};
3446
cea8975e 3447static int
346fe763
RB
3448validate_pm_config(struct qlcnic_adapter *adapter,
3449 struct qlcnic_pm_func_cfg *pm_cfg, int count)
3450{
3451
3452 u8 src_pci_func, s_esw_id, d_esw_id;
3453 u8 dest_pci_func;
3454 int i;
3455
3456 for (i = 0; i < count; i++) {
3457 src_pci_func = pm_cfg[i].pci_func;
3458 dest_pci_func = pm_cfg[i].dest_npar;
3459 if (src_pci_func >= QLCNIC_MAX_PCI_FUNC
3460 || dest_pci_func >= QLCNIC_MAX_PCI_FUNC)
3461 return QL_STATUS_INVALID_PARAM;
3462
3463 if (adapter->npars[src_pci_func].type != QLCNIC_TYPE_NIC)
3464 return QL_STATUS_INVALID_PARAM;
3465
3466 if (adapter->npars[dest_pci_func].type != QLCNIC_TYPE_NIC)
3467 return QL_STATUS_INVALID_PARAM;
3468
346fe763
RB
3469 s_esw_id = adapter->npars[src_pci_func].phy_port;
3470 d_esw_id = adapter->npars[dest_pci_func].phy_port;
3471
3472 if (s_esw_id != d_esw_id)
3473 return QL_STATUS_INVALID_PARAM;
3474
3475 }
3476 return 0;
3477
3478}
3479
3480static ssize_t
3481qlcnic_sysfs_write_pm_config(struct file *filp, struct kobject *kobj,
3482 struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
3483{
3484 struct device *dev = container_of(kobj, struct device, kobj);
3485 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3486 struct qlcnic_pm_func_cfg *pm_cfg;
3487 u32 id, action, pci_func;
3488 int count, rem, i, ret;
3489
3490 count = size / sizeof(struct qlcnic_pm_func_cfg);
3491 rem = size % sizeof(struct qlcnic_pm_func_cfg);
3492 if (rem)
3493 return QL_STATUS_INVALID_PARAM;
3494
3495 pm_cfg = (struct qlcnic_pm_func_cfg *) buf;
3496
3497 ret = validate_pm_config(adapter, pm_cfg, count);
3498 if (ret)
3499 return ret;
3500 for (i = 0; i < count; i++) {
3501 pci_func = pm_cfg[i].pci_func;
4e8acb01 3502 action = !!pm_cfg[i].action;
346fe763
RB
3503 id = adapter->npars[pci_func].phy_port;
3504 ret = qlcnic_config_port_mirroring(adapter, id,
3505 action, pci_func);
3506 if (ret)
3507 return ret;
3508 }
3509
3510 for (i = 0; i < count; i++) {
3511 pci_func = pm_cfg[i].pci_func;
3512 id = adapter->npars[pci_func].phy_port;
4e8acb01 3513 adapter->npars[pci_func].enable_pm = !!pm_cfg[i].action;
346fe763
RB
3514 adapter->npars[pci_func].dest_npar = id;
3515 }
3516 return size;
3517}
3518
3519static ssize_t
3520qlcnic_sysfs_read_pm_config(struct file *filp, struct kobject *kobj,
3521 struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
3522{
3523 struct device *dev = container_of(kobj, struct device, kobj);
3524 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3525 struct qlcnic_pm_func_cfg pm_cfg[QLCNIC_MAX_PCI_FUNC];
3526 int i;
3527
3528 if (size != sizeof(pm_cfg))
3529 return QL_STATUS_INVALID_PARAM;
3530
3531 for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) {
3532 if (adapter->npars[i].type != QLCNIC_TYPE_NIC)
3533 continue;
3534 pm_cfg[i].action = adapter->npars[i].enable_pm;
3535 pm_cfg[i].dest_npar = 0;
3536 pm_cfg[i].pci_func = i;
3537 }
3538 memcpy(buf, &pm_cfg, size);
3539
3540 return size;
3541}
3542
cea8975e 3543static int
346fe763 3544validate_esw_config(struct qlcnic_adapter *adapter,
4e8acb01 3545 struct qlcnic_esw_func_cfg *esw_cfg, int count)
346fe763 3546{
7613c87b 3547 u32 op_mode;
346fe763
RB
3548 u8 pci_func;
3549 int i;
7613c87b 3550
b1fc6d3c 3551 op_mode = readl(adapter->ahw->pci_base0 + QLCNIC_DRV_OP_MODE);
7613c87b 3552
346fe763
RB
3553 for (i = 0; i < count; i++) {
3554 pci_func = esw_cfg[i].pci_func;
3555 if (pci_func >= QLCNIC_MAX_PCI_FUNC)
3556 return QL_STATUS_INVALID_PARAM;
3557
4e8acb01
RB
3558 if (adapter->op_mode == QLCNIC_MGMT_FUNC)
3559 if (adapter->npars[pci_func].type != QLCNIC_TYPE_NIC)
3560 return QL_STATUS_INVALID_PARAM;
346fe763 3561
4e8acb01
RB
3562 switch (esw_cfg[i].op_mode) {
3563 case QLCNIC_PORT_DEFAULTS:
7613c87b 3564 if (QLC_DEV_GET_DRV(op_mode, pci_func) !=
7373373d 3565 QLCNIC_NON_PRIV_FUNC) {
091056b2
AKS
3566 if (esw_cfg[i].mac_anti_spoof != 0)
3567 return QL_STATUS_INVALID_PARAM;
3568 if (esw_cfg[i].mac_override != 1)
3569 return QL_STATUS_INVALID_PARAM;
3570 if (esw_cfg[i].promisc_mode != 1)
3571 return QL_STATUS_INVALID_PARAM;
7373373d 3572 }
4e8acb01
RB
3573 break;
3574 case QLCNIC_ADD_VLAN:
346fe763
RB
3575 if (!IS_VALID_VLAN(esw_cfg[i].vlan_id))
3576 return QL_STATUS_INVALID_PARAM;
4e8acb01
RB
3577 if (!esw_cfg[i].op_type)
3578 return QL_STATUS_INVALID_PARAM;
3579 break;
3580 case QLCNIC_DEL_VLAN:
4e8acb01
RB
3581 if (!esw_cfg[i].op_type)
3582 return QL_STATUS_INVALID_PARAM;
3583 break;
3584 default:
346fe763 3585 return QL_STATUS_INVALID_PARAM;
4e8acb01 3586 }
346fe763 3587 }
346fe763
RB
3588 return 0;
3589}
3590
3591static ssize_t
3592qlcnic_sysfs_write_esw_config(struct file *file, struct kobject *kobj,
3593 struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
3594{
3595 struct device *dev = container_of(kobj, struct device, kobj);
3596 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3597 struct qlcnic_esw_func_cfg *esw_cfg;
4e8acb01 3598 struct qlcnic_npar_info *npar;
346fe763 3599 int count, rem, i, ret;
0325d69b 3600 u8 pci_func, op_mode = 0;
346fe763
RB
3601
3602 count = size / sizeof(struct qlcnic_esw_func_cfg);
3603 rem = size % sizeof(struct qlcnic_esw_func_cfg);
3604 if (rem)
3605 return QL_STATUS_INVALID_PARAM;
3606
3607 esw_cfg = (struct qlcnic_esw_func_cfg *) buf;
3608 ret = validate_esw_config(adapter, esw_cfg, count);
3609 if (ret)
3610 return ret;
3611
3612 for (i = 0; i < count; i++) {
0325d69b
RB
3613 if (adapter->op_mode == QLCNIC_MGMT_FUNC)
3614 if (qlcnic_config_switch_port(adapter, &esw_cfg[i]))
3615 return QL_STATUS_INVALID_PARAM;
e9a47700 3616
b1fc6d3c 3617 if (adapter->ahw->pci_func != esw_cfg[i].pci_func)
e9a47700
RB
3618 continue;
3619
3620 op_mode = esw_cfg[i].op_mode;
3621 qlcnic_get_eswitch_port_config(adapter, &esw_cfg[i]);
3622 esw_cfg[i].op_mode = op_mode;
b1fc6d3c 3623 esw_cfg[i].pci_func = adapter->ahw->pci_func;
e9a47700
RB
3624
3625 switch (esw_cfg[i].op_mode) {
3626 case QLCNIC_PORT_DEFAULTS:
3627 qlcnic_set_eswitch_port_features(adapter, &esw_cfg[i]);
3628 break;
8cf61f89
AKS
3629 case QLCNIC_ADD_VLAN:
3630 qlcnic_set_vlan_config(adapter, &esw_cfg[i]);
3631 break;
3632 case QLCNIC_DEL_VLAN:
3633 esw_cfg[i].vlan_id = 0;
3634 qlcnic_set_vlan_config(adapter, &esw_cfg[i]);
3635 break;
0325d69b 3636 }
346fe763
RB
3637 }
3638
0325d69b
RB
3639 if (adapter->op_mode != QLCNIC_MGMT_FUNC)
3640 goto out;
e9a47700 3641
346fe763
RB
3642 for (i = 0; i < count; i++) {
3643 pci_func = esw_cfg[i].pci_func;
4e8acb01
RB
3644 npar = &adapter->npars[pci_func];
3645 switch (esw_cfg[i].op_mode) {
3646 case QLCNIC_PORT_DEFAULTS:
3647 npar->promisc_mode = esw_cfg[i].promisc_mode;
7373373d 3648 npar->mac_override = esw_cfg[i].mac_override;
4e8acb01
RB
3649 npar->offload_flags = esw_cfg[i].offload_flags;
3650 npar->mac_anti_spoof = esw_cfg[i].mac_anti_spoof;
3651 npar->discard_tagged = esw_cfg[i].discard_tagged;
3652 break;
3653 case QLCNIC_ADD_VLAN:
3654 npar->pvid = esw_cfg[i].vlan_id;
3655 break;
3656 case QLCNIC_DEL_VLAN:
3657 npar->pvid = 0;
3658 break;
3659 }
346fe763 3660 }
0325d69b 3661out:
346fe763
RB
3662 return size;
3663}
3664
3665static ssize_t
3666qlcnic_sysfs_read_esw_config(struct file *file, struct kobject *kobj,
3667 struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
3668{
3669 struct device *dev = container_of(kobj, struct device, kobj);
3670 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3671 struct qlcnic_esw_func_cfg esw_cfg[QLCNIC_MAX_PCI_FUNC];
4e8acb01 3672 u8 i;
346fe763
RB
3673
3674 if (size != sizeof(esw_cfg))
3675 return QL_STATUS_INVALID_PARAM;
3676
3677 for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) {
3678 if (adapter->npars[i].type != QLCNIC_TYPE_NIC)
3679 continue;
4e8acb01
RB
3680 esw_cfg[i].pci_func = i;
3681 if (qlcnic_get_eswitch_port_config(adapter, &esw_cfg[i]))
3682 return QL_STATUS_INVALID_PARAM;
346fe763
RB
3683 }
3684 memcpy(buf, &esw_cfg, size);
3685
3686 return size;
3687}
3688
cea8975e 3689static int
346fe763
RB
3690validate_npar_config(struct qlcnic_adapter *adapter,
3691 struct qlcnic_npar_func_cfg *np_cfg, int count)
3692{
3693 u8 pci_func, i;
3694
3695 for (i = 0; i < count; i++) {
3696 pci_func = np_cfg[i].pci_func;
3697 if (pci_func >= QLCNIC_MAX_PCI_FUNC)
3698 return QL_STATUS_INVALID_PARAM;
3699
3700 if (adapter->npars[pci_func].type != QLCNIC_TYPE_NIC)
3701 return QL_STATUS_INVALID_PARAM;
3702
d12b0d9a
RB
3703 if (!IS_VALID_BW(np_cfg[i].min_bw) ||
3704 !IS_VALID_BW(np_cfg[i].max_bw))
346fe763
RB
3705 return QL_STATUS_INVALID_PARAM;
3706 }
3707 return 0;
3708}
3709
3710static ssize_t
3711qlcnic_sysfs_write_npar_config(struct file *file, struct kobject *kobj,
3712 struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
3713{
3714 struct device *dev = container_of(kobj, struct device, kobj);
3715 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3716 struct qlcnic_info nic_info;
3717 struct qlcnic_npar_func_cfg *np_cfg;
3718 int i, count, rem, ret;
3719 u8 pci_func;
3720
3721 count = size / sizeof(struct qlcnic_npar_func_cfg);
3722 rem = size % sizeof(struct qlcnic_npar_func_cfg);
3723 if (rem)
3724 return QL_STATUS_INVALID_PARAM;
3725
3726 np_cfg = (struct qlcnic_npar_func_cfg *) buf;
3727 ret = validate_npar_config(adapter, np_cfg, count);
3728 if (ret)
3729 return ret;
3730
3731 for (i = 0; i < count ; i++) {
3732 pci_func = np_cfg[i].pci_func;
3733 ret = qlcnic_get_nic_info(adapter, &nic_info, pci_func);
3734 if (ret)
3735 return ret;
3736 nic_info.pci_func = pci_func;
3737 nic_info.min_tx_bw = np_cfg[i].min_bw;
3738 nic_info.max_tx_bw = np_cfg[i].max_bw;
3739 ret = qlcnic_set_nic_info(adapter, &nic_info);
3740 if (ret)
3741 return ret;
cea8975e
AC
3742 adapter->npars[i].min_bw = nic_info.min_tx_bw;
3743 adapter->npars[i].max_bw = nic_info.max_tx_bw;
346fe763
RB
3744 }
3745
3746 return size;
3747
3748}
3749static ssize_t
3750qlcnic_sysfs_read_npar_config(struct file *file, struct kobject *kobj,
3751 struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
3752{
3753 struct device *dev = container_of(kobj, struct device, kobj);
3754 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3755 struct qlcnic_info nic_info;
3756 struct qlcnic_npar_func_cfg np_cfg[QLCNIC_MAX_PCI_FUNC];
3757 int i, ret;
3758
3759 if (size != sizeof(np_cfg))
3760 return QL_STATUS_INVALID_PARAM;
3761
3762 for (i = 0; i < QLCNIC_MAX_PCI_FUNC ; i++) {
3763 if (adapter->npars[i].type != QLCNIC_TYPE_NIC)
3764 continue;
3765 ret = qlcnic_get_nic_info(adapter, &nic_info, i);
3766 if (ret)
3767 return ret;
3768
3769 np_cfg[i].pci_func = i;
a1c0c459 3770 np_cfg[i].op_mode = (u8)nic_info.op_mode;
346fe763
RB
3771 np_cfg[i].port_num = nic_info.phys_port;
3772 np_cfg[i].fw_capab = nic_info.capabilities;
3773 np_cfg[i].min_bw = nic_info.min_tx_bw ;
3774 np_cfg[i].max_bw = nic_info.max_tx_bw;
3775 np_cfg[i].max_tx_queues = nic_info.max_tx_ques;
3776 np_cfg[i].max_rx_queues = nic_info.max_rx_ques;
3777 }
3778 memcpy(buf, &np_cfg, size);
3779 return size;
3780}
3781
b6021212
AKS
3782static ssize_t
3783qlcnic_sysfs_get_port_stats(struct file *file, struct kobject *kobj,
3784 struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
3785{
3786 struct device *dev = container_of(kobj, struct device, kobj);
3787 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3788 struct qlcnic_esw_statistics port_stats;
3789 int ret;
3790
3791 if (size != sizeof(struct qlcnic_esw_statistics))
3792 return QL_STATUS_INVALID_PARAM;
3793
3794 if (offset >= QLCNIC_MAX_PCI_FUNC)
3795 return QL_STATUS_INVALID_PARAM;
3796
3797 memset(&port_stats, 0, size);
3798 ret = qlcnic_get_port_stats(adapter, offset, QLCNIC_QUERY_RX_COUNTER,
3799 &port_stats.rx);
3800 if (ret)
3801 return ret;
3802
3803 ret = qlcnic_get_port_stats(adapter, offset, QLCNIC_QUERY_TX_COUNTER,
3804 &port_stats.tx);
3805 if (ret)
3806 return ret;
3807
3808 memcpy(buf, &port_stats, size);
3809 return size;
3810}
3811
3812static ssize_t
3813qlcnic_sysfs_get_esw_stats(struct file *file, struct kobject *kobj,
3814 struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
3815{
3816 struct device *dev = container_of(kobj, struct device, kobj);
3817 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3818 struct qlcnic_esw_statistics esw_stats;
3819 int ret;
3820
3821 if (size != sizeof(struct qlcnic_esw_statistics))
3822 return QL_STATUS_INVALID_PARAM;
3823
3824 if (offset >= QLCNIC_NIU_MAX_XG_PORTS)
3825 return QL_STATUS_INVALID_PARAM;
3826
3827 memset(&esw_stats, 0, size);
3828 ret = qlcnic_get_eswitch_stats(adapter, offset, QLCNIC_QUERY_RX_COUNTER,
3829 &esw_stats.rx);
3830 if (ret)
3831 return ret;
3832
3833 ret = qlcnic_get_eswitch_stats(adapter, offset, QLCNIC_QUERY_TX_COUNTER,
3834 &esw_stats.tx);
3835 if (ret)
3836 return ret;
3837
3838 memcpy(buf, &esw_stats, size);
3839 return size;
3840}
3841
3842static ssize_t
3843qlcnic_sysfs_clear_esw_stats(struct file *file, struct kobject *kobj,
3844 struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
3845{
3846 struct device *dev = container_of(kobj, struct device, kobj);
3847 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3848 int ret;
3849
3850 if (offset >= QLCNIC_NIU_MAX_XG_PORTS)
3851 return QL_STATUS_INVALID_PARAM;
3852
3853 ret = qlcnic_clear_esw_stats(adapter, QLCNIC_STATS_ESWITCH, offset,
3854 QLCNIC_QUERY_RX_COUNTER);
3855 if (ret)
3856 return ret;
3857
3858 ret = qlcnic_clear_esw_stats(adapter, QLCNIC_STATS_ESWITCH, offset,
3859 QLCNIC_QUERY_TX_COUNTER);
3860 if (ret)
3861 return ret;
3862
3863 return size;
3864}
3865
3866static ssize_t
3867qlcnic_sysfs_clear_port_stats(struct file *file, struct kobject *kobj,
3868 struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
3869{
3870
3871 struct device *dev = container_of(kobj, struct device, kobj);
3872 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3873 int ret;
3874
3875 if (offset >= QLCNIC_MAX_PCI_FUNC)
3876 return QL_STATUS_INVALID_PARAM;
3877
3878 ret = qlcnic_clear_esw_stats(adapter, QLCNIC_STATS_PORT, offset,
3879 QLCNIC_QUERY_RX_COUNTER);
3880 if (ret)
3881 return ret;
3882
3883 ret = qlcnic_clear_esw_stats(adapter, QLCNIC_STATS_PORT, offset,
3884 QLCNIC_QUERY_TX_COUNTER);
3885 if (ret)
3886 return ret;
3887
3888 return size;
3889}
3890
346fe763
RB
3891static ssize_t
3892qlcnic_sysfs_read_pci_config(struct file *file, struct kobject *kobj,
3893 struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
3894{
3895 struct device *dev = container_of(kobj, struct device, kobj);
3896 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3897 struct qlcnic_pci_func_cfg pci_cfg[QLCNIC_MAX_PCI_FUNC];
e88db3bd 3898 struct qlcnic_pci_info *pci_info;
346fe763
RB
3899 int i, ret;
3900
3901 if (size != sizeof(pci_cfg))
3902 return QL_STATUS_INVALID_PARAM;
3903
e88db3bd
DC
3904 pci_info = kcalloc(QLCNIC_MAX_PCI_FUNC, sizeof(*pci_info), GFP_KERNEL);
3905 if (!pci_info)
3906 return -ENOMEM;
3907
346fe763 3908 ret = qlcnic_get_pci_info(adapter, pci_info);
e88db3bd
DC
3909 if (ret) {
3910 kfree(pci_info);
346fe763 3911 return ret;
e88db3bd 3912 }
346fe763
RB
3913
3914 for (i = 0; i < QLCNIC_MAX_PCI_FUNC ; i++) {
3915 pci_cfg[i].pci_func = pci_info[i].id;
3916 pci_cfg[i].func_type = pci_info[i].type;
3917 pci_cfg[i].port_num = pci_info[i].default_port;
3918 pci_cfg[i].min_bw = pci_info[i].tx_min_bw;
3919 pci_cfg[i].max_bw = pci_info[i].tx_max_bw;
3920 memcpy(&pci_cfg[i].def_mac_addr, &pci_info[i].mac, ETH_ALEN);
3921 }
3922 memcpy(buf, &pci_cfg, size);
e88db3bd 3923 kfree(pci_info);
346fe763 3924 return size;
346fe763
RB
3925}
3926static struct bin_attribute bin_attr_npar_config = {
3927 .attr = {.name = "npar_config", .mode = (S_IRUGO | S_IWUSR)},
3928 .size = 0,
3929 .read = qlcnic_sysfs_read_npar_config,
3930 .write = qlcnic_sysfs_write_npar_config,
3931};
3932
3933static struct bin_attribute bin_attr_pci_config = {
3934 .attr = {.name = "pci_config", .mode = (S_IRUGO | S_IWUSR)},
3935 .size = 0,
3936 .read = qlcnic_sysfs_read_pci_config,
3937 .write = NULL,
3938};
3939
b6021212
AKS
3940static struct bin_attribute bin_attr_port_stats = {
3941 .attr = {.name = "port_stats", .mode = (S_IRUGO | S_IWUSR)},
3942 .size = 0,
3943 .read = qlcnic_sysfs_get_port_stats,
3944 .write = qlcnic_sysfs_clear_port_stats,
3945};
3946
3947static struct bin_attribute bin_attr_esw_stats = {
3948 .attr = {.name = "esw_stats", .mode = (S_IRUGO | S_IWUSR)},
3949 .size = 0,
3950 .read = qlcnic_sysfs_get_esw_stats,
3951 .write = qlcnic_sysfs_clear_esw_stats,
3952};
3953
346fe763
RB
3954static struct bin_attribute bin_attr_esw_config = {
3955 .attr = {.name = "esw_config", .mode = (S_IRUGO | S_IWUSR)},
3956 .size = 0,
3957 .read = qlcnic_sysfs_read_esw_config,
3958 .write = qlcnic_sysfs_write_esw_config,
3959};
3960
3961static struct bin_attribute bin_attr_pm_config = {
3962 .attr = {.name = "pm_config", .mode = (S_IRUGO | S_IWUSR)},
3963 .size = 0,
3964 .read = qlcnic_sysfs_read_pm_config,
3965 .write = qlcnic_sysfs_write_pm_config,
3966};
3967
af19b491
AKS
3968static void
3969qlcnic_create_sysfs_entries(struct qlcnic_adapter *adapter)
3970{
3971 struct device *dev = &adapter->pdev->dev;
3972
3973 if (adapter->capabilities & QLCNIC_FW_CAPABILITY_BDG)
3974 if (device_create_file(dev, &dev_attr_bridged_mode))
3975 dev_warn(dev,
3976 "failed to create bridged_mode sysfs entry\n");
3977}
3978
3979static void
3980qlcnic_remove_sysfs_entries(struct qlcnic_adapter *adapter)
3981{
3982 struct device *dev = &adapter->pdev->dev;
3983
3984 if (adapter->capabilities & QLCNIC_FW_CAPABILITY_BDG)
3985 device_remove_file(dev, &dev_attr_bridged_mode);
3986}
3987
3988static void
3989qlcnic_create_diag_entries(struct qlcnic_adapter *adapter)
3990{
3991 struct device *dev = &adapter->pdev->dev;
3992
b6021212
AKS
3993 if (device_create_bin_file(dev, &bin_attr_port_stats))
3994 dev_info(dev, "failed to create port stats sysfs entry");
3995
132ff00a
AC
3996 if (adapter->op_mode == QLCNIC_NON_PRIV_FUNC)
3997 return;
af19b491
AKS
3998 if (device_create_file(dev, &dev_attr_diag_mode))
3999 dev_info(dev, "failed to create diag_mode sysfs entry\n");
4000 if (device_create_bin_file(dev, &bin_attr_crb))
4001 dev_info(dev, "failed to create crb sysfs entry\n");
4002 if (device_create_bin_file(dev, &bin_attr_mem))
4003 dev_info(dev, "failed to create mem sysfs entry\n");
53478fef
SC
4004 if (device_create_bin_file(dev, &bin_attr_pci_config))
4005 dev_info(dev, "failed to create pci config sysfs entry");
4e8acb01
RB
4006 if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED))
4007 return;
4008 if (device_create_bin_file(dev, &bin_attr_esw_config))
4009 dev_info(dev, "failed to create esw config sysfs entry");
4010 if (adapter->op_mode != QLCNIC_MGMT_FUNC)
346fe763 4011 return;
346fe763
RB
4012 if (device_create_bin_file(dev, &bin_attr_npar_config))
4013 dev_info(dev, "failed to create npar config sysfs entry");
346fe763
RB
4014 if (device_create_bin_file(dev, &bin_attr_pm_config))
4015 dev_info(dev, "failed to create pm config sysfs entry");
b6021212
AKS
4016 if (device_create_bin_file(dev, &bin_attr_esw_stats))
4017 dev_info(dev, "failed to create eswitch stats sysfs entry");
af19b491
AKS
4018}
4019
af19b491
AKS
4020static void
4021qlcnic_remove_diag_entries(struct qlcnic_adapter *adapter)
4022{
4023 struct device *dev = &adapter->pdev->dev;
4024
b6021212
AKS
4025 device_remove_bin_file(dev, &bin_attr_port_stats);
4026
132ff00a
AC
4027 if (adapter->op_mode == QLCNIC_NON_PRIV_FUNC)
4028 return;
af19b491
AKS
4029 device_remove_file(dev, &dev_attr_diag_mode);
4030 device_remove_bin_file(dev, &bin_attr_crb);
4031 device_remove_bin_file(dev, &bin_attr_mem);
53478fef 4032 device_remove_bin_file(dev, &bin_attr_pci_config);
4e8acb01
RB
4033 if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED))
4034 return;
4035 device_remove_bin_file(dev, &bin_attr_esw_config);
4036 if (adapter->op_mode != QLCNIC_MGMT_FUNC)
346fe763 4037 return;
346fe763 4038 device_remove_bin_file(dev, &bin_attr_npar_config);
346fe763 4039 device_remove_bin_file(dev, &bin_attr_pm_config);
b6021212 4040 device_remove_bin_file(dev, &bin_attr_esw_stats);
af19b491
AKS
4041}
4042
4043#ifdef CONFIG_INET
4044
4045#define is_qlcnic_netdev(dev) (dev->netdev_ops == &qlcnic_netdev_ops)
4046
af19b491 4047static void
aec1e845
AKS
4048qlcnic_config_indev_addr(struct qlcnic_adapter *adapter,
4049 struct net_device *dev, unsigned long event)
af19b491
AKS
4050{
4051 struct in_device *indev;
af19b491 4052
af19b491
AKS
4053 indev = in_dev_get(dev);
4054 if (!indev)
4055 return;
4056
4057 for_ifa(indev) {
4058 switch (event) {
4059 case NETDEV_UP:
4060 qlcnic_config_ipaddr(adapter,
4061 ifa->ifa_address, QLCNIC_IP_UP);
4062 break;
4063 case NETDEV_DOWN:
4064 qlcnic_config_ipaddr(adapter,
4065 ifa->ifa_address, QLCNIC_IP_DOWN);
4066 break;
4067 default:
4068 break;
4069 }
4070 } endfor_ifa(indev);
4071
4072 in_dev_put(indev);
af19b491
AKS
4073}
4074
aec1e845
AKS
4075static void
4076qlcnic_restore_indev_addr(struct net_device *netdev, unsigned long event)
4077{
4078 struct qlcnic_adapter *adapter = netdev_priv(netdev);
4079 struct net_device *dev;
4080 u16 vid;
4081
4082 qlcnic_config_indev_addr(adapter, netdev, event);
4083
b9796a14
AC
4084 for_each_set_bit(vid, adapter->vlans, VLAN_N_VID) {
4085 dev = vlan_find_dev(netdev, vid);
aec1e845
AKS
4086 if (!dev)
4087 continue;
aec1e845
AKS
4088 qlcnic_config_indev_addr(adapter, dev, event);
4089 }
4090}
4091
af19b491
AKS
4092static int qlcnic_netdev_event(struct notifier_block *this,
4093 unsigned long event, void *ptr)
4094{
4095 struct qlcnic_adapter *adapter;
4096 struct net_device *dev = (struct net_device *)ptr;
4097
4098recheck:
4099 if (dev == NULL)
4100 goto done;
4101
4102 if (dev->priv_flags & IFF_802_1Q_VLAN) {
4103 dev = vlan_dev_real_dev(dev);
4104 goto recheck;
4105 }
4106
4107 if (!is_qlcnic_netdev(dev))
4108 goto done;
4109
4110 adapter = netdev_priv(dev);
4111
4112 if (!adapter)
4113 goto done;
4114
8a15ad1f 4115 if (!test_bit(__QLCNIC_DEV_UP, &adapter->state))
af19b491
AKS
4116 goto done;
4117
aec1e845 4118 qlcnic_config_indev_addr(adapter, dev, event);
af19b491
AKS
4119done:
4120 return NOTIFY_DONE;
4121}
4122
4123static int
4124qlcnic_inetaddr_event(struct notifier_block *this,
4125 unsigned long event, void *ptr)
4126{
4127 struct qlcnic_adapter *adapter;
4128 struct net_device *dev;
4129
4130 struct in_ifaddr *ifa = (struct in_ifaddr *)ptr;
4131
4132 dev = ifa->ifa_dev ? ifa->ifa_dev->dev : NULL;
4133
4134recheck:
aec1e845 4135 if (dev == NULL)
af19b491
AKS
4136 goto done;
4137
4138 if (dev->priv_flags & IFF_802_1Q_VLAN) {
4139 dev = vlan_dev_real_dev(dev);
4140 goto recheck;
4141 }
4142
4143 if (!is_qlcnic_netdev(dev))
4144 goto done;
4145
4146 adapter = netdev_priv(dev);
4147
251a84c9 4148 if (!adapter)
af19b491
AKS
4149 goto done;
4150
8a15ad1f 4151 if (!test_bit(__QLCNIC_DEV_UP, &adapter->state))
af19b491
AKS
4152 goto done;
4153
4154 switch (event) {
4155 case NETDEV_UP:
4156 qlcnic_config_ipaddr(adapter, ifa->ifa_address, QLCNIC_IP_UP);
4157 break;
4158 case NETDEV_DOWN:
4159 qlcnic_config_ipaddr(adapter, ifa->ifa_address, QLCNIC_IP_DOWN);
4160 break;
4161 default:
4162 break;
4163 }
4164
4165done:
4166 return NOTIFY_DONE;
4167}
4168
4169static struct notifier_block qlcnic_netdev_cb = {
4170 .notifier_call = qlcnic_netdev_event,
4171};
4172
4173static struct notifier_block qlcnic_inetaddr_cb = {
4174 .notifier_call = qlcnic_inetaddr_event,
4175};
4176#else
4177static void
aec1e845 4178qlcnic_restore_indev_addr(struct net_device *dev, unsigned long event)
af19b491
AKS
4179{ }
4180#endif
451724c8
SC
4181static struct pci_error_handlers qlcnic_err_handler = {
4182 .error_detected = qlcnic_io_error_detected,
4183 .slot_reset = qlcnic_io_slot_reset,
4184 .resume = qlcnic_io_resume,
4185};
af19b491
AKS
4186
4187static struct pci_driver qlcnic_driver = {
4188 .name = qlcnic_driver_name,
4189 .id_table = qlcnic_pci_tbl,
4190 .probe = qlcnic_probe,
4191 .remove = __devexit_p(qlcnic_remove),
4192#ifdef CONFIG_PM
4193 .suspend = qlcnic_suspend,
4194 .resume = qlcnic_resume,
4195#endif
451724c8
SC
4196 .shutdown = qlcnic_shutdown,
4197 .err_handler = &qlcnic_err_handler
4198
af19b491
AKS
4199};
4200
4201static int __init qlcnic_init_module(void)
4202{
0cf3a14c 4203 int ret;
af19b491
AKS
4204
4205 printk(KERN_INFO "%s\n", qlcnic_driver_string);
4206
f7ec804a
AKS
4207 qlcnic_wq = create_singlethread_workqueue("qlcnic");
4208 if (qlcnic_wq == NULL) {
4209 printk(KERN_ERR "qlcnic: cannot create workqueue\n");
4210 return -ENOMEM;
4211 }
4212
af19b491
AKS
4213#ifdef CONFIG_INET
4214 register_netdevice_notifier(&qlcnic_netdev_cb);
4215 register_inetaddr_notifier(&qlcnic_inetaddr_cb);
4216#endif
4217
0cf3a14c
AKS
4218 ret = pci_register_driver(&qlcnic_driver);
4219 if (ret) {
4220#ifdef CONFIG_INET
4221 unregister_inetaddr_notifier(&qlcnic_inetaddr_cb);
4222 unregister_netdevice_notifier(&qlcnic_netdev_cb);
4223#endif
f7ec804a 4224 destroy_workqueue(qlcnic_wq);
0cf3a14c 4225 }
af19b491 4226
0cf3a14c 4227 return ret;
af19b491
AKS
4228}
4229
4230module_init(qlcnic_init_module);
4231
4232static void __exit qlcnic_exit_module(void)
4233{
4234
4235 pci_unregister_driver(&qlcnic_driver);
4236
4237#ifdef CONFIG_INET
4238 unregister_inetaddr_notifier(&qlcnic_inetaddr_cb);
4239 unregister_netdevice_notifier(&qlcnic_netdev_cb);
4240#endif
f7ec804a 4241 destroy_workqueue(qlcnic_wq);
af19b491
AKS
4242}
4243
4244module_exit(qlcnic_exit_module);
This page took 0.418158 seconds and 5 git commands to generate.