qlcnic: Fix delay in reset path
[deliverable/linux.git] / drivers / net / qlcnic / qlcnic_main.c
CommitLineData
af19b491 1/*
40839129
SV
2 * QLogic qlcnic NIC Driver
3 * Copyright (c) 2009-2010 QLogic Corporation
af19b491 4 *
40839129 5 * See LICENSE.qlcnic for copyright and licensing details.
af19b491
AKS
6 */
7
5a0e3ad6 8#include <linux/slab.h>
af19b491
AKS
9#include <linux/vmalloc.h>
10#include <linux/interrupt.h>
11
12#include "qlcnic.h"
13
7e56cac4 14#include <linux/swab.h>
af19b491 15#include <linux/dma-mapping.h>
af19b491
AKS
16#include <net/ip.h>
17#include <linux/ipv6.h>
18#include <linux/inetdevice.h>
19#include <linux/sysfs.h>
451724c8 20#include <linux/aer.h>
f94bc1e7 21#include <linux/log2.h>
af19b491 22
7f9a0c34 23MODULE_DESCRIPTION("QLogic 1/10 GbE Converged/Intelligent Ethernet Driver");
af19b491
AKS
24MODULE_LICENSE("GPL");
25MODULE_VERSION(QLCNIC_LINUX_VERSIONID);
26MODULE_FIRMWARE(QLCNIC_UNIFIED_ROMIMAGE_NAME);
27
28char qlcnic_driver_name[] = "qlcnic";
7f9a0c34
SV
29static const char qlcnic_driver_string[] = "QLogic 1/10 GbE "
30 "Converged/Intelligent Ethernet Driver v" QLCNIC_LINUX_VERSIONID;
af19b491 31
f7ec804a 32static struct workqueue_struct *qlcnic_wq;
b5e5492c 33static int qlcnic_mac_learn;
b11a25aa 34module_param(qlcnic_mac_learn, int, 0444);
b5e5492c
AKS
35MODULE_PARM_DESC(qlcnic_mac_learn, "Mac Filter (0=disabled, 1=enabled)");
36
af19b491 37static int use_msi = 1;
b11a25aa 38module_param(use_msi, int, 0444);
af19b491
AKS
39MODULE_PARM_DESC(use_msi, "MSI interrupt (0=disabled, 1=enabled");
40
41static int use_msi_x = 1;
b11a25aa 42module_param(use_msi_x, int, 0444);
af19b491
AKS
43MODULE_PARM_DESC(use_msi_x, "MSI-X interrupt (0=disabled, 1=enabled");
44
9ce13ca8 45static int auto_fw_reset = 1;
af19b491
AKS
46module_param(auto_fw_reset, int, 0644);
47MODULE_PARM_DESC(auto_fw_reset, "Auto firmware reset (0=disabled, 1=enabled");
48
4d5bdb38 49static int load_fw_file;
b11a25aa 50module_param(load_fw_file, int, 0444);
4d5bdb38
AKS
51MODULE_PARM_DESC(load_fw_file, "Load firmware from (0=flash, 1=file");
52
2e9d722d 53static int qlcnic_config_npars;
b11a25aa 54module_param(qlcnic_config_npars, int, 0444);
2e9d722d
AC
55MODULE_PARM_DESC(qlcnic_config_npars, "Configure NPARs (0=disabled, 1=enabled");
56
af19b491
AKS
57static int __devinit qlcnic_probe(struct pci_dev *pdev,
58 const struct pci_device_id *ent);
59static void __devexit qlcnic_remove(struct pci_dev *pdev);
60static int qlcnic_open(struct net_device *netdev);
61static int qlcnic_close(struct net_device *netdev);
af19b491 62static void qlcnic_tx_timeout(struct net_device *netdev);
af19b491
AKS
63static void qlcnic_attach_work(struct work_struct *work);
64static void qlcnic_fwinit_work(struct work_struct *work);
65static void qlcnic_fw_poll_work(struct work_struct *work);
66static void qlcnic_schedule_work(struct qlcnic_adapter *adapter,
67 work_func_t func, int delay);
68static void qlcnic_cancel_fw_work(struct qlcnic_adapter *adapter);
69static int qlcnic_poll(struct napi_struct *napi, int budget);
8f891387 70static int qlcnic_rx_poll(struct napi_struct *napi, int budget);
af19b491
AKS
71#ifdef CONFIG_NET_POLL_CONTROLLER
72static void qlcnic_poll_controller(struct net_device *netdev);
73#endif
74
75static void qlcnic_create_sysfs_entries(struct qlcnic_adapter *adapter);
76static void qlcnic_remove_sysfs_entries(struct qlcnic_adapter *adapter);
77static void qlcnic_create_diag_entries(struct qlcnic_adapter *adapter);
78static void qlcnic_remove_diag_entries(struct qlcnic_adapter *adapter);
79
6df900e9 80static void qlcnic_idc_debug_info(struct qlcnic_adapter *adapter, u8 encoding);
21854f02 81static void qlcnic_clr_all_drv_state(struct qlcnic_adapter *adapter, u8);
af19b491
AKS
82static int qlcnic_can_start_firmware(struct qlcnic_adapter *adapter);
83
7eb9855d 84static irqreturn_t qlcnic_tmp_intr(int irq, void *data);
af19b491
AKS
85static irqreturn_t qlcnic_intr(int irq, void *data);
86static irqreturn_t qlcnic_msi_intr(int irq, void *data);
87static irqreturn_t qlcnic_msix_intr(int irq, void *data);
88
89static struct net_device_stats *qlcnic_get_stats(struct net_device *netdev);
aec1e845 90static void qlcnic_restore_indev_addr(struct net_device *dev, unsigned long);
9f26f547
AC
91static int qlcnic_start_firmware(struct qlcnic_adapter *);
92
b5e5492c 93static void qlcnic_free_lb_filters_mem(struct qlcnic_adapter *adapter);
9f26f547 94static void qlcnic_dev_set_npar_ready(struct qlcnic_adapter *);
9f26f547
AC
95static int qlcnicvf_config_led(struct qlcnic_adapter *, u32, u32);
96static int qlcnicvf_config_bridged_mode(struct qlcnic_adapter *, u32);
97static int qlcnicvf_start_firmware(struct qlcnic_adapter *);
0325d69b
RB
98static void qlcnic_set_netdev_features(struct qlcnic_adapter *,
99 struct qlcnic_esw_func_cfg *);
b9796a14
AC
100static void qlcnic_vlan_rx_add(struct net_device *, u16);
101static void qlcnic_vlan_rx_del(struct net_device *, u16);
102
af19b491
AKS
103/* PCI Device ID Table */
104#define ENTRY(device) \
105 {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, (device)), \
106 .class = PCI_CLASS_NETWORK_ETHERNET << 8, .class_mask = ~0}
107
108#define PCI_DEVICE_ID_QLOGIC_QLE824X 0x8020
109
6a902881 110static DEFINE_PCI_DEVICE_TABLE(qlcnic_pci_tbl) = {
af19b491
AKS
111 ENTRY(PCI_DEVICE_ID_QLOGIC_QLE824X),
112 {0,}
113};
114
115MODULE_DEVICE_TABLE(pci, qlcnic_pci_tbl);
116
117
b1fc6d3c 118inline void
af19b491
AKS
119qlcnic_update_cmd_producer(struct qlcnic_adapter *adapter,
120 struct qlcnic_host_tx_ring *tx_ring)
121{
122 writel(tx_ring->producer, tx_ring->crb_cmd_producer);
af19b491
AKS
123}
124
125static const u32 msi_tgt_status[8] = {
126 ISR_INT_TARGET_STATUS, ISR_INT_TARGET_STATUS_F1,
127 ISR_INT_TARGET_STATUS_F2, ISR_INT_TARGET_STATUS_F3,
128 ISR_INT_TARGET_STATUS_F4, ISR_INT_TARGET_STATUS_F5,
129 ISR_INT_TARGET_STATUS_F6, ISR_INT_TARGET_STATUS_F7
130};
131
132static const
133struct qlcnic_legacy_intr_set legacy_intr[] = QLCNIC_LEGACY_INTR_CONFIG;
134
135static inline void qlcnic_disable_int(struct qlcnic_host_sds_ring *sds_ring)
136{
137 writel(0, sds_ring->crb_intr_mask);
138}
139
140static inline void qlcnic_enable_int(struct qlcnic_host_sds_ring *sds_ring)
141{
142 struct qlcnic_adapter *adapter = sds_ring->adapter;
143
144 writel(0x1, sds_ring->crb_intr_mask);
145
146 if (!QLCNIC_IS_MSI_FAMILY(adapter))
147 writel(0xfbff, adapter->tgt_mask_reg);
148}
149
150static int
151qlcnic_alloc_sds_rings(struct qlcnic_recv_context *recv_ctx, int count)
152{
153 int size = sizeof(struct qlcnic_host_sds_ring) * count;
154
155 recv_ctx->sds_rings = kzalloc(size, GFP_KERNEL);
156
807540ba 157 return recv_ctx->sds_rings == NULL;
af19b491
AKS
158}
159
160static void
161qlcnic_free_sds_rings(struct qlcnic_recv_context *recv_ctx)
162{
163 if (recv_ctx->sds_rings != NULL)
164 kfree(recv_ctx->sds_rings);
165
166 recv_ctx->sds_rings = NULL;
167}
168
169static int
170qlcnic_napi_add(struct qlcnic_adapter *adapter, struct net_device *netdev)
171{
172 int ring;
173 struct qlcnic_host_sds_ring *sds_ring;
b1fc6d3c 174 struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
af19b491
AKS
175
176 if (qlcnic_alloc_sds_rings(recv_ctx, adapter->max_sds_rings))
177 return -ENOMEM;
178
179 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
180 sds_ring = &recv_ctx->sds_rings[ring];
8f891387 181
182 if (ring == adapter->max_sds_rings - 1)
183 netif_napi_add(netdev, &sds_ring->napi, qlcnic_poll,
184 QLCNIC_NETDEV_WEIGHT/adapter->max_sds_rings);
185 else
186 netif_napi_add(netdev, &sds_ring->napi,
187 qlcnic_rx_poll, QLCNIC_NETDEV_WEIGHT*2);
af19b491
AKS
188 }
189
190 return 0;
191}
192
193static void
194qlcnic_napi_del(struct qlcnic_adapter *adapter)
195{
196 int ring;
197 struct qlcnic_host_sds_ring *sds_ring;
b1fc6d3c 198 struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
af19b491
AKS
199
200 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
201 sds_ring = &recv_ctx->sds_rings[ring];
202 netif_napi_del(&sds_ring->napi);
203 }
204
b1fc6d3c 205 qlcnic_free_sds_rings(adapter->recv_ctx);
af19b491
AKS
206}
207
208static void
209qlcnic_napi_enable(struct qlcnic_adapter *adapter)
210{
211 int ring;
212 struct qlcnic_host_sds_ring *sds_ring;
b1fc6d3c 213 struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
af19b491 214
780ab790
AKS
215 if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
216 return;
217
af19b491
AKS
218 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
219 sds_ring = &recv_ctx->sds_rings[ring];
220 napi_enable(&sds_ring->napi);
221 qlcnic_enable_int(sds_ring);
222 }
223}
224
225static void
226qlcnic_napi_disable(struct qlcnic_adapter *adapter)
227{
228 int ring;
229 struct qlcnic_host_sds_ring *sds_ring;
b1fc6d3c 230 struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
af19b491 231
780ab790
AKS
232 if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
233 return;
234
af19b491
AKS
235 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
236 sds_ring = &recv_ctx->sds_rings[ring];
237 qlcnic_disable_int(sds_ring);
238 napi_synchronize(&sds_ring->napi);
239 napi_disable(&sds_ring->napi);
240 }
241}
242
243static void qlcnic_clear_stats(struct qlcnic_adapter *adapter)
244{
245 memset(&adapter->stats, 0, sizeof(adapter->stats));
af19b491
AKS
246}
247
af19b491
AKS
248static void qlcnic_set_msix_bit(struct pci_dev *pdev, int enable)
249{
250 u32 control;
251 int pos;
252
253 pos = pci_find_capability(pdev, PCI_CAP_ID_MSIX);
254 if (pos) {
255 pci_read_config_dword(pdev, pos, &control);
256 if (enable)
257 control |= PCI_MSIX_FLAGS_ENABLE;
258 else
259 control = 0;
260 pci_write_config_dword(pdev, pos, control);
261 }
262}
263
264static void qlcnic_init_msix_entries(struct qlcnic_adapter *adapter, int count)
265{
266 int i;
267
268 for (i = 0; i < count; i++)
269 adapter->msix_entries[i].entry = i;
270}
271
272static int
273qlcnic_read_mac_addr(struct qlcnic_adapter *adapter)
274{
2e9d722d 275 u8 mac_addr[ETH_ALEN];
af19b491
AKS
276 struct net_device *netdev = adapter->netdev;
277 struct pci_dev *pdev = adapter->pdev;
278
da48e6c3 279 if (qlcnic_get_mac_address(adapter, mac_addr) != 0)
af19b491
AKS
280 return -EIO;
281
2e9d722d 282 memcpy(netdev->dev_addr, mac_addr, ETH_ALEN);
af19b491
AKS
283 memcpy(netdev->perm_addr, netdev->dev_addr, netdev->addr_len);
284 memcpy(adapter->mac_addr, netdev->dev_addr, netdev->addr_len);
285
286 /* set station address */
287
288 if (!is_valid_ether_addr(netdev->perm_addr))
289 dev_warn(&pdev->dev, "Bad MAC address %pM.\n",
290 netdev->dev_addr);
291
292 return 0;
293}
294
295static int qlcnic_set_mac(struct net_device *netdev, void *p)
296{
297 struct qlcnic_adapter *adapter = netdev_priv(netdev);
298 struct sockaddr *addr = p;
299
7373373d
RB
300 if ((adapter->flags & QLCNIC_MAC_OVERRIDE_DISABLED))
301 return -EOPNOTSUPP;
302
af19b491
AKS
303 if (!is_valid_ether_addr(addr->sa_data))
304 return -EINVAL;
305
8a15ad1f 306 if (test_bit(__QLCNIC_DEV_UP, &adapter->state)) {
af19b491
AKS
307 netif_device_detach(netdev);
308 qlcnic_napi_disable(adapter);
309 }
310
311 memcpy(adapter->mac_addr, addr->sa_data, netdev->addr_len);
312 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
313 qlcnic_set_multi(adapter->netdev);
314
8a15ad1f 315 if (test_bit(__QLCNIC_DEV_UP, &adapter->state)) {
af19b491
AKS
316 netif_device_attach(netdev);
317 qlcnic_napi_enable(adapter);
318 }
319 return 0;
320}
321
322static const struct net_device_ops qlcnic_netdev_ops = {
323 .ndo_open = qlcnic_open,
324 .ndo_stop = qlcnic_close,
325 .ndo_start_xmit = qlcnic_xmit_frame,
326 .ndo_get_stats = qlcnic_get_stats,
327 .ndo_validate_addr = eth_validate_addr,
328 .ndo_set_multicast_list = qlcnic_set_multi,
329 .ndo_set_mac_address = qlcnic_set_mac,
330 .ndo_change_mtu = qlcnic_change_mtu,
135d84a9
MM
331 .ndo_fix_features = qlcnic_fix_features,
332 .ndo_set_features = qlcnic_set_features,
af19b491 333 .ndo_tx_timeout = qlcnic_tx_timeout,
b9796a14
AC
334 .ndo_vlan_rx_add_vid = qlcnic_vlan_rx_add,
335 .ndo_vlan_rx_kill_vid = qlcnic_vlan_rx_del,
af19b491
AKS
336#ifdef CONFIG_NET_POLL_CONTROLLER
337 .ndo_poll_controller = qlcnic_poll_controller,
338#endif
339};
340
2e9d722d 341static struct qlcnic_nic_template qlcnic_ops = {
2e9d722d
AC
342 .config_bridged_mode = qlcnic_config_bridged_mode,
343 .config_led = qlcnic_config_led,
9f26f547
AC
344 .start_firmware = qlcnic_start_firmware
345};
346
347static struct qlcnic_nic_template qlcnic_vf_ops = {
9f26f547
AC
348 .config_bridged_mode = qlcnicvf_config_bridged_mode,
349 .config_led = qlcnicvf_config_led,
9f26f547 350 .start_firmware = qlcnicvf_start_firmware
2e9d722d
AC
351};
352
f94bc1e7 353static int qlcnic_enable_msix(struct qlcnic_adapter *adapter, u32 num_msix)
af19b491 354{
af19b491 355 struct pci_dev *pdev = adapter->pdev;
f94bc1e7 356 int err = -1;
af19b491
AKS
357
358 adapter->max_sds_rings = 1;
af19b491 359 adapter->flags &= ~(QLCNIC_MSI_ENABLED | QLCNIC_MSIX_ENABLED);
af19b491
AKS
360 qlcnic_set_msix_bit(pdev, 0);
361
362 if (adapter->msix_supported) {
f94bc1e7 363 enable_msix:
af19b491
AKS
364 qlcnic_init_msix_entries(adapter, num_msix);
365 err = pci_enable_msix(pdev, adapter->msix_entries, num_msix);
366 if (err == 0) {
367 adapter->flags |= QLCNIC_MSIX_ENABLED;
368 qlcnic_set_msix_bit(pdev, 1);
369
b1fc6d3c 370 adapter->max_sds_rings = num_msix;
af19b491
AKS
371
372 dev_info(&pdev->dev, "using msi-x interrupts\n");
f94bc1e7 373 return err;
af19b491 374 }
f94bc1e7
SC
375 if (err > 0) {
376 num_msix = rounddown_pow_of_two(err);
377 if (num_msix)
378 goto enable_msix;
379 }
380 }
381 return err;
382}
af19b491 383
af19b491 384
f94bc1e7
SC
385static void qlcnic_enable_msi_legacy(struct qlcnic_adapter *adapter)
386{
387 const struct qlcnic_legacy_intr_set *legacy_intrp;
388 struct pci_dev *pdev = adapter->pdev;
af19b491
AKS
389
390 if (use_msi && !pci_enable_msi(pdev)) {
391 adapter->flags |= QLCNIC_MSI_ENABLED;
392 adapter->tgt_status_reg = qlcnic_get_ioaddr(adapter,
b1fc6d3c 393 msi_tgt_status[adapter->ahw->pci_func]);
af19b491
AKS
394 dev_info(&pdev->dev, "using msi interrupts\n");
395 adapter->msix_entries[0].vector = pdev->irq;
396 return;
397 }
398
f94bc1e7
SC
399 legacy_intrp = &legacy_intr[adapter->ahw->pci_func];
400
401 adapter->int_vec_bit = legacy_intrp->int_vec_bit;
402 adapter->tgt_status_reg = qlcnic_get_ioaddr(adapter,
403 legacy_intrp->tgt_status_reg);
404 adapter->tgt_mask_reg = qlcnic_get_ioaddr(adapter,
405 legacy_intrp->tgt_mask_reg);
406 adapter->isr_int_vec = qlcnic_get_ioaddr(adapter, ISR_INT_VECTOR);
407
408 adapter->crb_int_state_reg = qlcnic_get_ioaddr(adapter,
409 ISR_INT_STATE_REG);
af19b491
AKS
410 dev_info(&pdev->dev, "using legacy interrupts\n");
411 adapter->msix_entries[0].vector = pdev->irq;
412}
413
f94bc1e7
SC
414static void
415qlcnic_setup_intr(struct qlcnic_adapter *adapter)
416{
417 int num_msix;
418
419 if (adapter->msix_supported) {
5f6ec29a
SC
420 num_msix = rounddown_pow_of_two(min_t(int, num_online_cpus(),
421 QLCNIC_DEF_NUM_STS_DESC_RINGS));
f94bc1e7
SC
422 } else
423 num_msix = 1;
424
425 if (!qlcnic_enable_msix(adapter, num_msix))
426 return;
427
428 qlcnic_enable_msi_legacy(adapter);
429}
430
af19b491
AKS
431static void
432qlcnic_teardown_intr(struct qlcnic_adapter *adapter)
433{
434 if (adapter->flags & QLCNIC_MSIX_ENABLED)
435 pci_disable_msix(adapter->pdev);
436 if (adapter->flags & QLCNIC_MSI_ENABLED)
437 pci_disable_msi(adapter->pdev);
438}
439
440static void
441qlcnic_cleanup_pci_map(struct qlcnic_adapter *adapter)
442{
b1fc6d3c
AC
443 if (adapter->ahw->pci_base0 != NULL)
444 iounmap(adapter->ahw->pci_base0);
af19b491
AKS
445}
446
346fe763
RB
447static int
448qlcnic_init_pci_info(struct qlcnic_adapter *adapter)
449{
e88db3bd 450 struct qlcnic_pci_info *pci_info;
900853a4 451 int i, ret = 0;
346fe763
RB
452 u8 pfn;
453
e88db3bd
DC
454 pci_info = kcalloc(QLCNIC_MAX_PCI_FUNC, sizeof(*pci_info), GFP_KERNEL);
455 if (!pci_info)
456 return -ENOMEM;
457
ca315ac2 458 adapter->npars = kzalloc(sizeof(struct qlcnic_npar_info) *
346fe763 459 QLCNIC_MAX_PCI_FUNC, GFP_KERNEL);
e88db3bd 460 if (!adapter->npars) {
900853a4 461 ret = -ENOMEM;
e88db3bd
DC
462 goto err_pci_info;
463 }
346fe763 464
ca315ac2 465 adapter->eswitch = kzalloc(sizeof(struct qlcnic_eswitch) *
346fe763
RB
466 QLCNIC_NIU_MAX_XG_PORTS, GFP_KERNEL);
467 if (!adapter->eswitch) {
900853a4 468 ret = -ENOMEM;
ca315ac2 469 goto err_npars;
346fe763
RB
470 }
471
472 ret = qlcnic_get_pci_info(adapter, pci_info);
ca315ac2
DC
473 if (ret)
474 goto err_eswitch;
346fe763 475
ca315ac2
DC
476 for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) {
477 pfn = pci_info[i].id;
f848d6dd
SC
478 if (pfn > QLCNIC_MAX_PCI_FUNC) {
479 ret = QL_STATUS_INVALID_PARAM;
480 goto err_eswitch;
481 }
a1c0c459
SC
482 adapter->npars[pfn].active = (u8)pci_info[i].active;
483 adapter->npars[pfn].type = (u8)pci_info[i].type;
484 adapter->npars[pfn].phy_port = (u8)pci_info[i].default_port;
ca315ac2
DC
485 adapter->npars[pfn].min_bw = pci_info[i].tx_min_bw;
486 adapter->npars[pfn].max_bw = pci_info[i].tx_max_bw;
346fe763
RB
487 }
488
ca315ac2
DC
489 for (i = 0; i < QLCNIC_NIU_MAX_XG_PORTS; i++)
490 adapter->eswitch[i].flags |= QLCNIC_SWITCH_ENABLE;
491
e88db3bd 492 kfree(pci_info);
ca315ac2
DC
493 return 0;
494
495err_eswitch:
346fe763
RB
496 kfree(adapter->eswitch);
497 adapter->eswitch = NULL;
ca315ac2 498err_npars:
346fe763 499 kfree(adapter->npars);
ca315ac2 500 adapter->npars = NULL;
e88db3bd
DC
501err_pci_info:
502 kfree(pci_info);
346fe763
RB
503
504 return ret;
505}
506
2e9d722d
AC
507static int
508qlcnic_set_function_modes(struct qlcnic_adapter *adapter)
509{
510 u8 id;
511 u32 ref_count;
512 int i, ret = 1;
513 u32 data = QLCNIC_MGMT_FUNC;
b1fc6d3c 514 void __iomem *priv_op = adapter->ahw->pci_base0 + QLCNIC_DRV_OP_MODE;
2e9d722d
AC
515
516 /* If other drivers are not in use set their privilege level */
31018e06 517 ref_count = QLCRD32(adapter, QLCNIC_CRB_DRV_ACTIVE);
2e9d722d
AC
518 ret = qlcnic_api_lock(adapter);
519 if (ret)
520 goto err_lock;
2e9d722d 521
0e33c664
AC
522 if (qlcnic_config_npars) {
523 for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) {
346fe763 524 id = i;
0e33c664 525 if (adapter->npars[i].type != QLCNIC_TYPE_NIC ||
b1fc6d3c 526 id == adapter->ahw->pci_func)
0e33c664
AC
527 continue;
528 data |= (qlcnic_config_npars &
529 QLC_DEV_SET_DRV(0xf, id));
530 }
531 } else {
532 data = readl(priv_op);
b1fc6d3c 533 data = (data & ~QLC_DEV_SET_DRV(0xf, adapter->ahw->pci_func)) |
0e33c664 534 (QLC_DEV_SET_DRV(QLCNIC_MGMT_FUNC,
b1fc6d3c 535 adapter->ahw->pci_func));
2e9d722d
AC
536 }
537 writel(data, priv_op);
2e9d722d
AC
538 qlcnic_api_unlock(adapter);
539err_lock:
540 return ret;
541}
542
0866d96d
AC
543static void
544qlcnic_check_vf(struct qlcnic_adapter *adapter)
2e9d722d
AC
545{
546 void __iomem *msix_base_addr;
547 void __iomem *priv_op;
548 u32 func;
549 u32 msix_base;
550 u32 op_mode, priv_level;
551
552 /* Determine FW API version */
b1fc6d3c
AC
553 adapter->fw_hal_version = readl(adapter->ahw->pci_base0 +
554 QLCNIC_FW_API);
2e9d722d
AC
555
556 /* Find PCI function number */
557 pci_read_config_dword(adapter->pdev, QLCNIC_MSIX_TABLE_OFFSET, &func);
b1fc6d3c 558 msix_base_addr = adapter->ahw->pci_base0 + QLCNIC_MSIX_BASE;
2e9d722d
AC
559 msix_base = readl(msix_base_addr);
560 func = (func - msix_base)/QLCNIC_MSIX_TBL_PGSIZE;
b1fc6d3c 561 adapter->ahw->pci_func = func;
2e9d722d
AC
562
563 /* Determine function privilege level */
b1fc6d3c 564 priv_op = adapter->ahw->pci_base0 + QLCNIC_DRV_OP_MODE;
2e9d722d 565 op_mode = readl(priv_op);
0e33c664 566 if (op_mode == QLC_DEV_DRV_DEFAULT)
2e9d722d 567 priv_level = QLCNIC_MGMT_FUNC;
0e33c664 568 else
b1fc6d3c 569 priv_level = QLC_DEV_GET_DRV(op_mode, adapter->ahw->pci_func);
2e9d722d 570
0866d96d 571 if (priv_level == QLCNIC_NON_PRIV_FUNC) {
9f26f547
AC
572 adapter->op_mode = QLCNIC_NON_PRIV_FUNC;
573 dev_info(&adapter->pdev->dev,
574 "HAL Version: %d Non Privileged function\n",
575 adapter->fw_hal_version);
576 adapter->nic_ops = &qlcnic_vf_ops;
0866d96d
AC
577 } else
578 adapter->nic_ops = &qlcnic_ops;
2e9d722d
AC
579}
580
af19b491
AKS
581static int
582qlcnic_setup_pci_map(struct qlcnic_adapter *adapter)
583{
584 void __iomem *mem_ptr0 = NULL;
585 resource_size_t mem_base;
586 unsigned long mem_len, pci_len0 = 0;
587
588 struct pci_dev *pdev = adapter->pdev;
af19b491 589
af19b491
AKS
590 /* remap phys address */
591 mem_base = pci_resource_start(pdev, 0); /* 0 is for BAR 0 */
592 mem_len = pci_resource_len(pdev, 0);
593
594 if (mem_len == QLCNIC_PCI_2MB_SIZE) {
595
596 mem_ptr0 = pci_ioremap_bar(pdev, 0);
597 if (mem_ptr0 == NULL) {
598 dev_err(&pdev->dev, "failed to map PCI bar 0\n");
599 return -EIO;
600 }
601 pci_len0 = mem_len;
602 } else {
603 return -EIO;
604 }
605
606 dev_info(&pdev->dev, "%dMB memory map\n", (int)(mem_len>>20));
607
b1fc6d3c
AC
608 adapter->ahw->pci_base0 = mem_ptr0;
609 adapter->ahw->pci_len0 = pci_len0;
af19b491 610
0866d96d 611 qlcnic_check_vf(adapter);
2e9d722d 612
b1fc6d3c
AC
613 adapter->ahw->ocm_win_crb = qlcnic_get_ioaddr(adapter,
614 QLCNIC_PCIX_PS_REG(PCIX_OCM_WINDOW_REG(
615 adapter->ahw->pci_func)));
af19b491
AKS
616
617 return 0;
618}
619
620static void get_brd_name(struct qlcnic_adapter *adapter, char *name)
621{
622 struct pci_dev *pdev = adapter->pdev;
623 int i, found = 0;
624
625 for (i = 0; i < NUM_SUPPORTED_BOARDS; ++i) {
626 if (qlcnic_boards[i].vendor == pdev->vendor &&
627 qlcnic_boards[i].device == pdev->device &&
628 qlcnic_boards[i].sub_vendor == pdev->subsystem_vendor &&
629 qlcnic_boards[i].sub_device == pdev->subsystem_device) {
02f6e46f
SC
630 sprintf(name, "%pM: %s" ,
631 adapter->mac_addr,
632 qlcnic_boards[i].short_name);
af19b491
AKS
633 found = 1;
634 break;
635 }
636
637 }
638
639 if (!found)
7f9a0c34 640 sprintf(name, "%pM Gigabit Ethernet", adapter->mac_addr);
af19b491
AKS
641}
642
643static void
644qlcnic_check_options(struct qlcnic_adapter *adapter)
645{
646 u32 fw_major, fw_minor, fw_build;
af19b491 647 struct pci_dev *pdev = adapter->pdev;
af19b491
AKS
648
649 fw_major = QLCRD32(adapter, QLCNIC_FW_VERSION_MAJOR);
650 fw_minor = QLCRD32(adapter, QLCNIC_FW_VERSION_MINOR);
651 fw_build = QLCRD32(adapter, QLCNIC_FW_VERSION_SUB);
652
653 adapter->fw_version = QLCNIC_VERSION_CODE(fw_major, fw_minor, fw_build);
654
251a84c9
AKS
655 dev_info(&pdev->dev, "firmware v%d.%d.%d\n",
656 fw_major, fw_minor, fw_build);
b1fc6d3c 657 if (adapter->ahw->port_type == QLCNIC_XGBE) {
90d19005
SC
658 if (adapter->flags & QLCNIC_ESWITCH_ENABLED) {
659 adapter->num_rxd = DEFAULT_RCV_DESCRIPTORS_VF;
660 adapter->max_rxd = MAX_RCV_DESCRIPTORS_VF;
661 } else {
662 adapter->num_rxd = DEFAULT_RCV_DESCRIPTORS_10G;
663 adapter->max_rxd = MAX_RCV_DESCRIPTORS_10G;
664 }
665
af19b491 666 adapter->num_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_10G;
90d19005
SC
667 adapter->max_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_10G;
668
b1fc6d3c 669 } else if (adapter->ahw->port_type == QLCNIC_GBE) {
af19b491
AKS
670 adapter->num_rxd = DEFAULT_RCV_DESCRIPTORS_1G;
671 adapter->num_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_1G;
90d19005
SC
672 adapter->max_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_1G;
673 adapter->max_rxd = MAX_RCV_DESCRIPTORS_1G;
af19b491
AKS
674 }
675
676 adapter->msix_supported = !!use_msi_x;
af19b491
AKS
677
678 adapter->num_txd = MAX_CMD_DESCRIPTORS;
679
251b036a 680 adapter->max_rds_rings = MAX_RDS_RINGS;
af19b491
AKS
681}
682
174240a8
RB
683static int
684qlcnic_initialize_nic(struct qlcnic_adapter *adapter)
685{
686 int err;
687 struct qlcnic_info nic_info;
688
b1fc6d3c 689 err = qlcnic_get_nic_info(adapter, &nic_info, adapter->ahw->pci_func);
174240a8
RB
690 if (err)
691 return err;
692
a1c0c459 693 adapter->physical_port = (u8)nic_info.phys_port;
174240a8
RB
694 adapter->switch_mode = nic_info.switch_mode;
695 adapter->max_tx_ques = nic_info.max_tx_ques;
696 adapter->max_rx_ques = nic_info.max_rx_ques;
697 adapter->capabilities = nic_info.capabilities;
698 adapter->max_mac_filters = nic_info.max_mac_filters;
699 adapter->max_mtu = nic_info.max_mtu;
700
701 if (adapter->capabilities & BIT_6)
702 adapter->flags |= QLCNIC_ESWITCH_ENABLED;
703 else
704 adapter->flags &= ~QLCNIC_ESWITCH_ENABLED;
705
706 return err;
707}
708
8cf61f89
AKS
709static void
710qlcnic_set_vlan_config(struct qlcnic_adapter *adapter,
711 struct qlcnic_esw_func_cfg *esw_cfg)
712{
713 if (esw_cfg->discard_tagged)
714 adapter->flags &= ~QLCNIC_TAGGING_ENABLED;
715 else
716 adapter->flags |= QLCNIC_TAGGING_ENABLED;
717
718 if (esw_cfg->vlan_id)
719 adapter->pvid = esw_cfg->vlan_id;
720 else
721 adapter->pvid = 0;
722}
723
b9796a14
AC
724static void
725qlcnic_vlan_rx_add(struct net_device *netdev, u16 vid)
726{
727 struct qlcnic_adapter *adapter = netdev_priv(netdev);
728 set_bit(vid, adapter->vlans);
729}
730
731static void
732qlcnic_vlan_rx_del(struct net_device *netdev, u16 vid)
733{
734 struct qlcnic_adapter *adapter = netdev_priv(netdev);
735
736 qlcnic_restore_indev_addr(netdev, NETDEV_DOWN);
737 clear_bit(vid, adapter->vlans);
738}
739
0325d69b
RB
740static void
741qlcnic_set_eswitch_port_features(struct qlcnic_adapter *adapter,
742 struct qlcnic_esw_func_cfg *esw_cfg)
743{
ee07c1a7
RB
744 adapter->flags &= ~(QLCNIC_MACSPOOF | QLCNIC_MAC_OVERRIDE_DISABLED |
745 QLCNIC_PROMISC_DISABLED);
7613c87b
RB
746
747 if (esw_cfg->mac_anti_spoof)
748 adapter->flags |= QLCNIC_MACSPOOF;
fe4d434d 749
7373373d
RB
750 if (!esw_cfg->mac_override)
751 adapter->flags |= QLCNIC_MAC_OVERRIDE_DISABLED;
752
ee07c1a7
RB
753 if (!esw_cfg->promisc_mode)
754 adapter->flags |= QLCNIC_PROMISC_DISABLED;
755
0325d69b
RB
756 qlcnic_set_netdev_features(adapter, esw_cfg);
757}
758
759static int
760qlcnic_set_eswitch_port_config(struct qlcnic_adapter *adapter)
761{
762 struct qlcnic_esw_func_cfg esw_cfg;
763
764 if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED))
765 return 0;
766
b1fc6d3c 767 esw_cfg.pci_func = adapter->ahw->pci_func;
0325d69b
RB
768 if (qlcnic_get_eswitch_port_config(adapter, &esw_cfg))
769 return -EIO;
8cf61f89 770 qlcnic_set_vlan_config(adapter, &esw_cfg);
0325d69b
RB
771 qlcnic_set_eswitch_port_features(adapter, &esw_cfg);
772
773 return 0;
774}
775
776static void
777qlcnic_set_netdev_features(struct qlcnic_adapter *adapter,
778 struct qlcnic_esw_func_cfg *esw_cfg)
779{
780 struct net_device *netdev = adapter->netdev;
781 unsigned long features, vlan_features;
782
135d84a9 783 features = (NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM |
0325d69b
RB
784 NETIF_F_IPV6_CSUM | NETIF_F_GRO);
785 vlan_features = (NETIF_F_SG | NETIF_F_IP_CSUM |
b9796a14 786 NETIF_F_IPV6_CSUM | NETIF_F_HW_VLAN_FILTER);
0325d69b
RB
787
788 if (adapter->capabilities & QLCNIC_FW_CAPABILITY_TSO) {
789 features |= (NETIF_F_TSO | NETIF_F_TSO6);
790 vlan_features |= (NETIF_F_TSO | NETIF_F_TSO6);
791 }
b56421d0
RB
792
793 if (netdev->features & NETIF_F_LRO)
0325d69b
RB
794 features |= NETIF_F_LRO;
795
796 if (esw_cfg->offload_flags & BIT_0) {
797 netdev->features |= features;
0325d69b
RB
798 if (!(esw_cfg->offload_flags & BIT_1))
799 netdev->features &= ~NETIF_F_TSO;
800 if (!(esw_cfg->offload_flags & BIT_2))
801 netdev->features &= ~NETIF_F_TSO6;
802 } else {
803 netdev->features &= ~features;
0325d69b
RB
804 }
805
806 netdev->vlan_features = (features & vlan_features);
807}
808
0866d96d
AC
809static int
810qlcnic_check_eswitch_mode(struct qlcnic_adapter *adapter)
811{
812 void __iomem *priv_op;
813 u32 op_mode, priv_level;
814 int err = 0;
815
174240a8
RB
816 err = qlcnic_initialize_nic(adapter);
817 if (err)
818 return err;
819
0866d96d
AC
820 if (adapter->flags & QLCNIC_ADAPTER_INITIALIZED)
821 return 0;
822
b1fc6d3c 823 priv_op = adapter->ahw->pci_base0 + QLCNIC_DRV_OP_MODE;
0866d96d 824 op_mode = readl(priv_op);
b1fc6d3c 825 priv_level = QLC_DEV_GET_DRV(op_mode, adapter->ahw->pci_func);
0866d96d
AC
826
827 if (op_mode == QLC_DEV_DRV_DEFAULT)
828 priv_level = QLCNIC_MGMT_FUNC;
829 else
b1fc6d3c 830 priv_level = QLC_DEV_GET_DRV(op_mode, adapter->ahw->pci_func);
0866d96d 831
174240a8 832 if (adapter->flags & QLCNIC_ESWITCH_ENABLED) {
0866d96d
AC
833 if (priv_level == QLCNIC_MGMT_FUNC) {
834 adapter->op_mode = QLCNIC_MGMT_FUNC;
835 err = qlcnic_init_pci_info(adapter);
836 if (err)
837 return err;
838 /* Set privilege level for other functions */
839 qlcnic_set_function_modes(adapter);
840 dev_info(&adapter->pdev->dev,
841 "HAL Version: %d, Management function\n",
842 adapter->fw_hal_version);
843 } else if (priv_level == QLCNIC_PRIV_FUNC) {
844 adapter->op_mode = QLCNIC_PRIV_FUNC;
845 dev_info(&adapter->pdev->dev,
846 "HAL Version: %d, Privileged function\n",
847 adapter->fw_hal_version);
848 }
174240a8 849 }
0866d96d
AC
850
851 adapter->flags |= QLCNIC_ADAPTER_INITIALIZED;
852
853 return err;
854}
855
0325d69b
RB
856static int
857qlcnic_set_default_offload_settings(struct qlcnic_adapter *adapter)
858{
859 struct qlcnic_esw_func_cfg esw_cfg;
860 struct qlcnic_npar_info *npar;
861 u8 i;
862
174240a8 863 if (adapter->need_fw_reset)
0325d69b
RB
864 return 0;
865
866 for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) {
867 if (adapter->npars[i].type != QLCNIC_TYPE_NIC)
868 continue;
869 memset(&esw_cfg, 0, sizeof(struct qlcnic_esw_func_cfg));
870 esw_cfg.pci_func = i;
871 esw_cfg.offload_flags = BIT_0;
7373373d 872 esw_cfg.mac_override = BIT_0;
ee07c1a7 873 esw_cfg.promisc_mode = BIT_0;
0325d69b
RB
874 if (adapter->capabilities & QLCNIC_FW_CAPABILITY_TSO)
875 esw_cfg.offload_flags |= (BIT_1 | BIT_2);
876 if (qlcnic_config_switch_port(adapter, &esw_cfg))
877 return -EIO;
878 npar = &adapter->npars[i];
879 npar->pvid = esw_cfg.vlan_id;
7373373d 880 npar->mac_override = esw_cfg.mac_override;
0325d69b
RB
881 npar->mac_anti_spoof = esw_cfg.mac_anti_spoof;
882 npar->discard_tagged = esw_cfg.discard_tagged;
883 npar->promisc_mode = esw_cfg.promisc_mode;
884 npar->offload_flags = esw_cfg.offload_flags;
885 }
886
887 return 0;
888}
889
4e8acb01
RB
890static int
891qlcnic_reset_eswitch_config(struct qlcnic_adapter *adapter,
892 struct qlcnic_npar_info *npar, int pci_func)
893{
894 struct qlcnic_esw_func_cfg esw_cfg;
895 esw_cfg.op_mode = QLCNIC_PORT_DEFAULTS;
896 esw_cfg.pci_func = pci_func;
897 esw_cfg.vlan_id = npar->pvid;
7373373d 898 esw_cfg.mac_override = npar->mac_override;
4e8acb01
RB
899 esw_cfg.discard_tagged = npar->discard_tagged;
900 esw_cfg.mac_anti_spoof = npar->mac_anti_spoof;
901 esw_cfg.offload_flags = npar->offload_flags;
902 esw_cfg.promisc_mode = npar->promisc_mode;
903 if (qlcnic_config_switch_port(adapter, &esw_cfg))
904 return -EIO;
905
906 esw_cfg.op_mode = QLCNIC_ADD_VLAN;
907 if (qlcnic_config_switch_port(adapter, &esw_cfg))
908 return -EIO;
909
910 return 0;
911}
912
cea8975e
AC
913static int
914qlcnic_reset_npar_config(struct qlcnic_adapter *adapter)
915{
4e8acb01 916 int i, err;
cea8975e
AC
917 struct qlcnic_npar_info *npar;
918 struct qlcnic_info nic_info;
919
174240a8 920 if (!adapter->need_fw_reset)
cea8975e
AC
921 return 0;
922
4e8acb01
RB
923 /* Set the NPAR config data after FW reset */
924 for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) {
925 npar = &adapter->npars[i];
926 if (npar->type != QLCNIC_TYPE_NIC)
927 continue;
928 err = qlcnic_get_nic_info(adapter, &nic_info, i);
929 if (err)
930 return err;
931 nic_info.min_tx_bw = npar->min_bw;
932 nic_info.max_tx_bw = npar->max_bw;
933 err = qlcnic_set_nic_info(adapter, &nic_info);
934 if (err)
935 return err;
cea8975e 936
4e8acb01
RB
937 if (npar->enable_pm) {
938 err = qlcnic_config_port_mirroring(adapter,
939 npar->dest_npar, 1, i);
940 if (err)
941 return err;
cea8975e 942 }
4e8acb01
RB
943 err = qlcnic_reset_eswitch_config(adapter, npar, i);
944 if (err)
945 return err;
cea8975e 946 }
4e8acb01 947 return 0;
cea8975e
AC
948}
949
78f84e1a
AKS
950static int qlcnic_check_npar_opertional(struct qlcnic_adapter *adapter)
951{
952 u8 npar_opt_timeo = QLCNIC_DEV_NPAR_OPER_TIMEO;
953 u32 npar_state;
954
955 if (adapter->op_mode == QLCNIC_MGMT_FUNC)
956 return 0;
957
958 npar_state = QLCRD32(adapter, QLCNIC_CRB_DEV_NPAR_STATE);
959 while (npar_state != QLCNIC_DEV_NPAR_OPER && --npar_opt_timeo) {
960 msleep(1000);
961 npar_state = QLCRD32(adapter, QLCNIC_CRB_DEV_NPAR_STATE);
962 }
963 if (!npar_opt_timeo) {
964 dev_err(&adapter->pdev->dev,
965 "Waiting for NPAR state to opertional timeout\n");
966 return -EIO;
967 }
968 return 0;
969}
970
174240a8
RB
971static int
972qlcnic_set_mgmt_operations(struct qlcnic_adapter *adapter)
973{
974 int err;
975
976 if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED) ||
977 adapter->op_mode != QLCNIC_MGMT_FUNC)
978 return 0;
979
980 err = qlcnic_set_default_offload_settings(adapter);
981 if (err)
982 return err;
983
984 err = qlcnic_reset_npar_config(adapter);
985 if (err)
986 return err;
987
988 qlcnic_dev_set_npar_ready(adapter);
989
990 return err;
991}
992
af19b491
AKS
993static int
994qlcnic_start_firmware(struct qlcnic_adapter *adapter)
995{
d4066833 996 int err;
af19b491 997
aa5e18c0
SC
998 err = qlcnic_can_start_firmware(adapter);
999 if (err < 0)
1000 return err;
1001 else if (!err)
d4066833 1002 goto check_fw_status;
af19b491 1003
4d5bdb38
AKS
1004 if (load_fw_file)
1005 qlcnic_request_firmware(adapter);
8f891387 1006 else {
8cfdce08
SC
1007 err = qlcnic_check_flash_fw_ver(adapter);
1008 if (err)
8f891387 1009 goto err_out;
1010
4d5bdb38 1011 adapter->fw_type = QLCNIC_FLASH_ROMIMAGE;
8f891387 1012 }
af19b491
AKS
1013
1014 err = qlcnic_need_fw_reset(adapter);
af19b491 1015 if (err == 0)
4e70812b 1016 goto check_fw_status;
af19b491 1017
d4066833
SC
1018 err = qlcnic_pinit_from_rom(adapter);
1019 if (err)
1020 goto err_out;
af19b491
AKS
1021
1022 err = qlcnic_load_firmware(adapter);
1023 if (err)
1024 goto err_out;
1025
1026 qlcnic_release_firmware(adapter);
d4066833 1027 QLCWR32(adapter, CRB_DRIVER_VERSION, QLCNIC_DRIVER_VERSION);
af19b491 1028
d4066833
SC
1029check_fw_status:
1030 err = qlcnic_check_fw_status(adapter);
af19b491
AKS
1031 if (err)
1032 goto err_out;
1033
1034 QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_READY);
6df900e9 1035 qlcnic_idc_debug_info(adapter, 1);
b18971d1 1036
0866d96d
AC
1037 err = qlcnic_check_eswitch_mode(adapter);
1038 if (err) {
1039 dev_err(&adapter->pdev->dev,
1040 "Memory allocation failed for eswitch\n");
1041 goto err_out;
1042 }
174240a8
RB
1043 err = qlcnic_set_mgmt_operations(adapter);
1044 if (err)
1045 goto err_out;
1046
1047 qlcnic_check_options(adapter);
af19b491
AKS
1048 adapter->need_fw_reset = 0;
1049
a7fc948f
AKS
1050 qlcnic_release_firmware(adapter);
1051 return 0;
af19b491
AKS
1052
1053err_out:
a7fc948f
AKS
1054 QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_FAILED);
1055 dev_err(&adapter->pdev->dev, "Device state set to failed\n");
0866d96d 1056
af19b491
AKS
1057 qlcnic_release_firmware(adapter);
1058 return err;
1059}
1060
1061static int
1062qlcnic_request_irq(struct qlcnic_adapter *adapter)
1063{
1064 irq_handler_t handler;
1065 struct qlcnic_host_sds_ring *sds_ring;
1066 int err, ring;
1067
1068 unsigned long flags = 0;
1069 struct net_device *netdev = adapter->netdev;
b1fc6d3c 1070 struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
af19b491 1071
7eb9855d
AKS
1072 if (adapter->diag_test == QLCNIC_INTERRUPT_TEST) {
1073 handler = qlcnic_tmp_intr;
1074 if (!QLCNIC_IS_MSI_FAMILY(adapter))
1075 flags |= IRQF_SHARED;
1076
1077 } else {
1078 if (adapter->flags & QLCNIC_MSIX_ENABLED)
1079 handler = qlcnic_msix_intr;
1080 else if (adapter->flags & QLCNIC_MSI_ENABLED)
1081 handler = qlcnic_msi_intr;
1082 else {
1083 flags |= IRQF_SHARED;
1084 handler = qlcnic_intr;
1085 }
af19b491
AKS
1086 }
1087 adapter->irq = netdev->irq;
1088
1089 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
1090 sds_ring = &recv_ctx->sds_rings[ring];
1091 sprintf(sds_ring->name, "%s[%d]", netdev->name, ring);
1092 err = request_irq(sds_ring->irq, handler,
1093 flags, sds_ring->name, sds_ring);
1094 if (err)
1095 return err;
1096 }
1097
1098 return 0;
1099}
1100
1101static void
1102qlcnic_free_irq(struct qlcnic_adapter *adapter)
1103{
1104 int ring;
1105 struct qlcnic_host_sds_ring *sds_ring;
1106
b1fc6d3c 1107 struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
af19b491
AKS
1108
1109 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
1110 sds_ring = &recv_ctx->sds_rings[ring];
1111 free_irq(sds_ring->irq, sds_ring);
1112 }
1113}
1114
af19b491
AKS
1115static int
1116__qlcnic_up(struct qlcnic_adapter *adapter, struct net_device *netdev)
1117{
8a15ad1f
AKS
1118 int ring;
1119 struct qlcnic_host_rds_ring *rds_ring;
1120
af19b491
AKS
1121 if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
1122 return -EIO;
1123
8a15ad1f
AKS
1124 if (test_bit(__QLCNIC_DEV_UP, &adapter->state))
1125 return 0;
0325d69b
RB
1126 if (qlcnic_set_eswitch_port_config(adapter))
1127 return -EIO;
8a15ad1f
AKS
1128
1129 if (qlcnic_fw_create_ctx(adapter))
1130 return -EIO;
1131
1132 for (ring = 0; ring < adapter->max_rds_rings; ring++) {
b1fc6d3c
AC
1133 rds_ring = &adapter->recv_ctx->rds_rings[ring];
1134 qlcnic_post_rx_buffers(adapter, rds_ring);
8a15ad1f
AKS
1135 }
1136
af19b491
AKS
1137 qlcnic_set_multi(netdev);
1138 qlcnic_fw_cmd_set_mtu(adapter, netdev->mtu);
1139
b1fc6d3c 1140 adapter->ahw->linkup = 0;
af19b491
AKS
1141
1142 if (adapter->max_sds_rings > 1)
1143 qlcnic_config_rss(adapter, 1);
1144
1145 qlcnic_config_intr_coalesce(adapter);
1146
24763d80 1147 if (netdev->features & NETIF_F_LRO)
af19b491
AKS
1148 qlcnic_config_hw_lro(adapter, QLCNIC_LRO_ENABLED);
1149
1150 qlcnic_napi_enable(adapter);
1151
1152 qlcnic_linkevent_request(adapter, 1);
1153
68bf1c68 1154 adapter->reset_context = 0;
af19b491
AKS
1155 set_bit(__QLCNIC_DEV_UP, &adapter->state);
1156 return 0;
1157}
1158
1159/* Usage: During resume and firmware recovery module.*/
1160
1161static int
1162qlcnic_up(struct qlcnic_adapter *adapter, struct net_device *netdev)
1163{
1164 int err = 0;
1165
1166 rtnl_lock();
1167 if (netif_running(netdev))
1168 err = __qlcnic_up(adapter, netdev);
1169 rtnl_unlock();
1170
1171 return err;
1172}
1173
1174static void
1175__qlcnic_down(struct qlcnic_adapter *adapter, struct net_device *netdev)
1176{
1177 if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
1178 return;
1179
1180 if (!test_and_clear_bit(__QLCNIC_DEV_UP, &adapter->state))
1181 return;
1182
1183 smp_mb();
1184 spin_lock(&adapter->tx_clean_lock);
1185 netif_carrier_off(netdev);
1186 netif_tx_disable(netdev);
1187
1188 qlcnic_free_mac_list(adapter);
1189
b5e5492c
AKS
1190 if (adapter->fhash.fnum)
1191 qlcnic_delete_lb_filters(adapter);
1192
af19b491
AKS
1193 qlcnic_nic_set_promisc(adapter, QLCNIC_NIU_NON_PROMISC_MODE);
1194
1195 qlcnic_napi_disable(adapter);
1196
8a15ad1f
AKS
1197 qlcnic_fw_destroy_ctx(adapter);
1198
1199 qlcnic_reset_rx_buffers_list(adapter);
af19b491
AKS
1200 qlcnic_release_tx_buffers(adapter);
1201 spin_unlock(&adapter->tx_clean_lock);
1202}
1203
1204/* Usage: During suspend and firmware recovery module */
1205
1206static void
1207qlcnic_down(struct qlcnic_adapter *adapter, struct net_device *netdev)
1208{
1209 rtnl_lock();
1210 if (netif_running(netdev))
1211 __qlcnic_down(adapter, netdev);
1212 rtnl_unlock();
1213
1214}
1215
1216static int
1217qlcnic_attach(struct qlcnic_adapter *adapter)
1218{
1219 struct net_device *netdev = adapter->netdev;
1220 struct pci_dev *pdev = adapter->pdev;
8a15ad1f 1221 int err;
af19b491
AKS
1222
1223 if (adapter->is_up == QLCNIC_ADAPTER_UP_MAGIC)
1224 return 0;
1225
af19b491
AKS
1226 err = qlcnic_napi_add(adapter, netdev);
1227 if (err)
1228 return err;
1229
1230 err = qlcnic_alloc_sw_resources(adapter);
1231 if (err) {
1232 dev_err(&pdev->dev, "Error in setting sw resources\n");
8a15ad1f 1233 goto err_out_napi_del;
af19b491
AKS
1234 }
1235
1236 err = qlcnic_alloc_hw_resources(adapter);
1237 if (err) {
1238 dev_err(&pdev->dev, "Error in setting hw resources\n");
1239 goto err_out_free_sw;
1240 }
1241
af19b491
AKS
1242 err = qlcnic_request_irq(adapter);
1243 if (err) {
1244 dev_err(&pdev->dev, "failed to setup interrupt\n");
8a15ad1f 1245 goto err_out_free_hw;
af19b491
AKS
1246 }
1247
af19b491
AKS
1248 qlcnic_create_sysfs_entries(adapter);
1249
1250 adapter->is_up = QLCNIC_ADAPTER_UP_MAGIC;
1251 return 0;
1252
8a15ad1f 1253err_out_free_hw:
af19b491
AKS
1254 qlcnic_free_hw_resources(adapter);
1255err_out_free_sw:
1256 qlcnic_free_sw_resources(adapter);
8a15ad1f
AKS
1257err_out_napi_del:
1258 qlcnic_napi_del(adapter);
af19b491
AKS
1259 return err;
1260}
1261
1262static void
1263qlcnic_detach(struct qlcnic_adapter *adapter)
1264{
1265 if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
1266 return;
1267
1268 qlcnic_remove_sysfs_entries(adapter);
1269
1270 qlcnic_free_hw_resources(adapter);
1271 qlcnic_release_rx_buffers(adapter);
1272 qlcnic_free_irq(adapter);
1273 qlcnic_napi_del(adapter);
1274 qlcnic_free_sw_resources(adapter);
1275
1276 adapter->is_up = 0;
1277}
1278
7eb9855d
AKS
1279void qlcnic_diag_free_res(struct net_device *netdev, int max_sds_rings)
1280{
1281 struct qlcnic_adapter *adapter = netdev_priv(netdev);
1282 struct qlcnic_host_sds_ring *sds_ring;
1283 int ring;
1284
78ad3892 1285 clear_bit(__QLCNIC_DEV_UP, &adapter->state);
cdaff185
AKS
1286 if (adapter->diag_test == QLCNIC_INTERRUPT_TEST) {
1287 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
b1fc6d3c 1288 sds_ring = &adapter->recv_ctx->sds_rings[ring];
cdaff185
AKS
1289 qlcnic_disable_int(sds_ring);
1290 }
7eb9855d
AKS
1291 }
1292
8a15ad1f
AKS
1293 qlcnic_fw_destroy_ctx(adapter);
1294
7eb9855d
AKS
1295 qlcnic_detach(adapter);
1296
1297 adapter->diag_test = 0;
1298 adapter->max_sds_rings = max_sds_rings;
1299
1300 if (qlcnic_attach(adapter))
34ce3626 1301 goto out;
7eb9855d
AKS
1302
1303 if (netif_running(netdev))
1304 __qlcnic_up(adapter, netdev);
34ce3626 1305out:
7eb9855d
AKS
1306 netif_device_attach(netdev);
1307}
1308
b1fc6d3c
AC
1309static int qlcnic_alloc_adapter_resources(struct qlcnic_adapter *adapter)
1310{
1311 int err = 0;
1312 adapter->ahw = kzalloc(sizeof(struct qlcnic_hardware_context),
1313 GFP_KERNEL);
1314 if (!adapter->ahw) {
1315 dev_err(&adapter->pdev->dev,
1316 "Failed to allocate recv ctx resources for adapter\n");
1317 err = -ENOMEM;
1318 goto err_out;
1319 }
1320 adapter->recv_ctx = kzalloc(sizeof(struct qlcnic_recv_context),
1321 GFP_KERNEL);
1322 if (!adapter->recv_ctx) {
1323 dev_err(&adapter->pdev->dev,
1324 "Failed to allocate recv ctx resources for adapter\n");
1325 kfree(adapter->ahw);
1326 adapter->ahw = NULL;
1327 err = -ENOMEM;
8816d009 1328 goto err_out;
b1fc6d3c 1329 }
8816d009
AC
1330 /* Initialize interrupt coalesce parameters */
1331 adapter->ahw->coal.flag = QLCNIC_INTR_DEFAULT;
1332 adapter->ahw->coal.rx_time_us = QLCNIC_DEFAULT_INTR_COALESCE_RX_TIME_US;
1333 adapter->ahw->coal.rx_packets = QLCNIC_DEFAULT_INTR_COALESCE_RX_PACKETS;
b1fc6d3c
AC
1334err_out:
1335 return err;
1336}
1337
1338static void qlcnic_free_adapter_resources(struct qlcnic_adapter *adapter)
1339{
1340 kfree(adapter->recv_ctx);
1341 adapter->recv_ctx = NULL;
1342
18f2f616
AC
1343 if (adapter->ahw->fw_dump.tmpl_hdr) {
1344 vfree(adapter->ahw->fw_dump.tmpl_hdr);
1345 adapter->ahw->fw_dump.tmpl_hdr = NULL;
1346 }
b1fc6d3c
AC
1347 kfree(adapter->ahw);
1348 adapter->ahw = NULL;
1349}
1350
7eb9855d
AKS
1351int qlcnic_diag_alloc_res(struct net_device *netdev, int test)
1352{
1353 struct qlcnic_adapter *adapter = netdev_priv(netdev);
1354 struct qlcnic_host_sds_ring *sds_ring;
8a15ad1f 1355 struct qlcnic_host_rds_ring *rds_ring;
7eb9855d
AKS
1356 int ring;
1357 int ret;
1358
1359 netif_device_detach(netdev);
1360
1361 if (netif_running(netdev))
1362 __qlcnic_down(adapter, netdev);
1363
1364 qlcnic_detach(adapter);
1365
1366 adapter->max_sds_rings = 1;
1367 adapter->diag_test = test;
1368
1369 ret = qlcnic_attach(adapter);
34ce3626
AKS
1370 if (ret) {
1371 netif_device_attach(netdev);
7eb9855d 1372 return ret;
34ce3626 1373 }
7eb9855d 1374
8a15ad1f
AKS
1375 ret = qlcnic_fw_create_ctx(adapter);
1376 if (ret) {
1377 qlcnic_detach(adapter);
57e46248 1378 netif_device_attach(netdev);
8a15ad1f
AKS
1379 return ret;
1380 }
1381
1382 for (ring = 0; ring < adapter->max_rds_rings; ring++) {
b1fc6d3c
AC
1383 rds_ring = &adapter->recv_ctx->rds_rings[ring];
1384 qlcnic_post_rx_buffers(adapter, rds_ring);
8a15ad1f
AKS
1385 }
1386
cdaff185
AKS
1387 if (adapter->diag_test == QLCNIC_INTERRUPT_TEST) {
1388 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
b1fc6d3c 1389 sds_ring = &adapter->recv_ctx->sds_rings[ring];
cdaff185
AKS
1390 qlcnic_enable_int(sds_ring);
1391 }
7eb9855d 1392 }
22c8c934
SC
1393
1394 if (adapter->diag_test == QLCNIC_LOOPBACK_TEST) {
1395 adapter->ahw->loopback_state = 0;
1396 qlcnic_linkevent_request(adapter, 1);
1397 }
1398
78ad3892 1399 set_bit(__QLCNIC_DEV_UP, &adapter->state);
7eb9855d
AKS
1400
1401 return 0;
1402}
1403
68bf1c68
AKS
1404/* Reset context in hardware only */
1405static int
1406qlcnic_reset_hw_context(struct qlcnic_adapter *adapter)
1407{
1408 struct net_device *netdev = adapter->netdev;
1409
1410 if (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state))
1411 return -EBUSY;
1412
1413 netif_device_detach(netdev);
1414
1415 qlcnic_down(adapter, netdev);
1416
1417 qlcnic_up(adapter, netdev);
1418
1419 netif_device_attach(netdev);
1420
1421 clear_bit(__QLCNIC_RESETTING, &adapter->state);
1422 return 0;
1423}
1424
af19b491
AKS
1425int
1426qlcnic_reset_context(struct qlcnic_adapter *adapter)
1427{
1428 int err = 0;
1429 struct net_device *netdev = adapter->netdev;
1430
1431 if (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state))
1432 return -EBUSY;
1433
1434 if (adapter->is_up == QLCNIC_ADAPTER_UP_MAGIC) {
1435
1436 netif_device_detach(netdev);
1437
1438 if (netif_running(netdev))
1439 __qlcnic_down(adapter, netdev);
1440
1441 qlcnic_detach(adapter);
1442
1443 if (netif_running(netdev)) {
1444 err = qlcnic_attach(adapter);
1445 if (!err)
34ce3626 1446 __qlcnic_up(adapter, netdev);
af19b491
AKS
1447 }
1448
1449 netif_device_attach(netdev);
1450 }
1451
af19b491
AKS
1452 clear_bit(__QLCNIC_RESETTING, &adapter->state);
1453 return err;
1454}
1455
1456static int
1457qlcnic_setup_netdev(struct qlcnic_adapter *adapter,
1bb09fb9 1458 struct net_device *netdev, u8 pci_using_dac)
af19b491
AKS
1459{
1460 int err;
1461 struct pci_dev *pdev = adapter->pdev;
1462
af19b491
AKS
1463 adapter->mc_enabled = 0;
1464 adapter->max_mc_count = 38;
1465
1466 netdev->netdev_ops = &qlcnic_netdev_ops;
ef71ff83 1467 netdev->watchdog_timeo = 5*HZ;
af19b491
AKS
1468
1469 qlcnic_change_mtu(netdev, netdev->mtu);
1470
1471 SET_ETHTOOL_OPS(netdev, &qlcnic_ethtool_ops);
1472
135d84a9
MM
1473 netdev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM |
1474 NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM;
ac8d0c4f 1475
135d84a9
MM
1476 if (adapter->capabilities & QLCNIC_FW_CAPABILITY_TSO)
1477 netdev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
1478 if (pci_using_dac)
1479 netdev->hw_features |= NETIF_F_HIGHDMA;
af19b491 1480
135d84a9 1481 netdev->vlan_features = netdev->hw_features;
af19b491
AKS
1482
1483 if (adapter->capabilities & QLCNIC_FW_CAPABILITY_FVLANTX)
135d84a9 1484 netdev->hw_features |= NETIF_F_HW_VLAN_TX;
af19b491 1485 if (adapter->capabilities & QLCNIC_FW_CAPABILITY_HW_LRO)
135d84a9
MM
1486 netdev->hw_features |= NETIF_F_LRO;
1487
1488 netdev->features |= netdev->hw_features |
1489 NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
1490
af19b491
AKS
1491 netdev->irq = adapter->msix_entries[0].vector;
1492
af19b491
AKS
1493 err = register_netdev(netdev);
1494 if (err) {
1495 dev_err(&pdev->dev, "failed to register net device\n");
1496 return err;
1497 }
1498
1499 return 0;
1500}
1501
1bb09fb9
AKS
1502static int qlcnic_set_dma_mask(struct pci_dev *pdev, u8 *pci_using_dac)
1503{
1504 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) &&
1505 !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)))
1506 *pci_using_dac = 1;
1507 else if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) &&
1508 !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)))
1509 *pci_using_dac = 0;
1510 else {
1511 dev_err(&pdev->dev, "Unable to set DMA mask, aborting\n");
1512 return -EIO;
1513 }
1514
1515 return 0;
1516}
1517
f94bc1e7
SC
1518static int
1519qlcnic_alloc_msix_entries(struct qlcnic_adapter *adapter, u16 count)
1520{
1521 adapter->msix_entries = kcalloc(count, sizeof(struct msix_entry),
1522 GFP_KERNEL);
1523
1524 if (adapter->msix_entries)
1525 return 0;
1526
1527 dev_err(&adapter->pdev->dev, "failed allocating msix_entries\n");
1528 return -ENOMEM;
1529}
1530
af19b491
AKS
1531static int __devinit
1532qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1533{
1534 struct net_device *netdev = NULL;
1535 struct qlcnic_adapter *adapter = NULL;
1536 int err;
af19b491 1537 uint8_t revision_id;
1bb09fb9 1538 uint8_t pci_using_dac;
da48e6c3 1539 char brd_name[QLCNIC_MAX_BOARD_NAME_LEN];
af19b491
AKS
1540
1541 err = pci_enable_device(pdev);
1542 if (err)
1543 return err;
1544
1545 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
1546 err = -ENODEV;
1547 goto err_out_disable_pdev;
1548 }
1549
1bb09fb9
AKS
1550 err = qlcnic_set_dma_mask(pdev, &pci_using_dac);
1551 if (err)
1552 goto err_out_disable_pdev;
1553
af19b491
AKS
1554 err = pci_request_regions(pdev, qlcnic_driver_name);
1555 if (err)
1556 goto err_out_disable_pdev;
1557
1558 pci_set_master(pdev);
451724c8 1559 pci_enable_pcie_error_reporting(pdev);
af19b491
AKS
1560
1561 netdev = alloc_etherdev(sizeof(struct qlcnic_adapter));
1562 if (!netdev) {
1563 dev_err(&pdev->dev, "failed to allocate net_device\n");
1564 err = -ENOMEM;
1565 goto err_out_free_res;
1566 }
1567
1568 SET_NETDEV_DEV(netdev, &pdev->dev);
1569
1570 adapter = netdev_priv(netdev);
1571 adapter->netdev = netdev;
1572 adapter->pdev = pdev;
af19b491 1573
b1fc6d3c
AC
1574 if (qlcnic_alloc_adapter_resources(adapter))
1575 goto err_out_free_netdev;
1576
1577 adapter->dev_rst_time = jiffies;
af19b491 1578 revision_id = pdev->revision;
b1fc6d3c 1579 adapter->ahw->revision_id = revision_id;
e5dcf6dc 1580 adapter->mac_learn = qlcnic_mac_learn;
af19b491 1581
b1fc6d3c
AC
1582 rwlock_init(&adapter->ahw->crb_lock);
1583 mutex_init(&adapter->ahw->mem_lock);
af19b491
AKS
1584
1585 spin_lock_init(&adapter->tx_clean_lock);
1586 INIT_LIST_HEAD(&adapter->mac_list);
1587
1588 err = qlcnic_setup_pci_map(adapter);
1589 if (err)
b1fc6d3c 1590 goto err_out_free_hw;
af19b491
AKS
1591
1592 /* This will be reset for mezz cards */
b1fc6d3c 1593 adapter->portnum = adapter->ahw->pci_func;
af19b491
AKS
1594
1595 err = qlcnic_get_board_info(adapter);
1596 if (err) {
1597 dev_err(&pdev->dev, "Error getting board config info.\n");
1598 goto err_out_iounmap;
1599 }
1600
8cfdce08
SC
1601 err = qlcnic_setup_idc_param(adapter);
1602 if (err)
b3a24649 1603 goto err_out_iounmap;
af19b491 1604
1dc0f3c5 1605 adapter->flags |= QLCNIC_NEED_FLR;
b0044bcf 1606
9f26f547 1607 err = adapter->nic_ops->start_firmware(adapter);
a7fc948f
AKS
1608 if (err) {
1609 dev_err(&pdev->dev, "Loading fw failed.Please Reboot\n");
af19b491 1610 goto err_out_decr_ref;
a7fc948f 1611 }
af19b491 1612
602ca6f0
SV
1613 /* Get FW dump template and store it */
1614 if (adapter->op_mode != QLCNIC_NON_PRIV_FUNC)
1615 if (!qlcnic_fw_cmd_get_minidump_temp(adapter))
1616 dev_info(&pdev->dev,
1617 "Supports FW dump capability\n");
1618
da48e6c3
RB
1619 if (qlcnic_read_mac_addr(adapter))
1620 dev_warn(&pdev->dev, "failed to read mac addr\n");
1621
1622 if (adapter->portnum == 0) {
1623 get_brd_name(adapter, brd_name);
1624
1625 pr_info("%s: %s Board Chip rev 0x%x\n",
1626 module_name(THIS_MODULE),
b1fc6d3c 1627 brd_name, adapter->ahw->revision_id);
da48e6c3
RB
1628 }
1629
af19b491
AKS
1630 qlcnic_clear_stats(adapter);
1631
f94bc1e7
SC
1632 err = qlcnic_alloc_msix_entries(adapter, adapter->max_rx_ques);
1633 if (err)
1634 goto err_out_decr_ref;
1635
af19b491
AKS
1636 qlcnic_setup_intr(adapter);
1637
1bb09fb9 1638 err = qlcnic_setup_netdev(adapter, netdev, pci_using_dac);
af19b491
AKS
1639 if (err)
1640 goto err_out_disable_msi;
1641
1642 pci_set_drvdata(pdev, adapter);
1643
1644 qlcnic_schedule_work(adapter, qlcnic_fw_poll_work, FW_POLL_DELAY);
1645
b1fc6d3c 1646 switch (adapter->ahw->port_type) {
af19b491
AKS
1647 case QLCNIC_GBE:
1648 dev_info(&adapter->pdev->dev, "%s: GbE port initialized\n",
1649 adapter->netdev->name);
1650 break;
1651 case QLCNIC_XGBE:
1652 dev_info(&adapter->pdev->dev, "%s: XGbE port initialized\n",
1653 adapter->netdev->name);
1654 break;
1655 }
1656
e5dcf6dc
SC
1657 if (adapter->mac_learn)
1658 qlcnic_alloc_lb_filters_mem(adapter);
1659
af19b491
AKS
1660 qlcnic_create_diag_entries(adapter);
1661
1662 return 0;
1663
1664err_out_disable_msi:
1665 qlcnic_teardown_intr(adapter);
f94bc1e7 1666 kfree(adapter->msix_entries);
af19b491
AKS
1667
1668err_out_decr_ref:
21854f02 1669 qlcnic_clr_all_drv_state(adapter, 0);
af19b491
AKS
1670
1671err_out_iounmap:
1672 qlcnic_cleanup_pci_map(adapter);
1673
b1fc6d3c
AC
1674err_out_free_hw:
1675 qlcnic_free_adapter_resources(adapter);
1676
af19b491
AKS
1677err_out_free_netdev:
1678 free_netdev(netdev);
1679
1680err_out_free_res:
1681 pci_release_regions(pdev);
1682
1683err_out_disable_pdev:
1684 pci_set_drvdata(pdev, NULL);
1685 pci_disable_device(pdev);
1686 return err;
1687}
1688
1689static void __devexit qlcnic_remove(struct pci_dev *pdev)
1690{
1691 struct qlcnic_adapter *adapter;
1692 struct net_device *netdev;
1693
1694 adapter = pci_get_drvdata(pdev);
1695 if (adapter == NULL)
1696 return;
1697
1698 netdev = adapter->netdev;
1699
1700 qlcnic_cancel_fw_work(adapter);
1701
1702 unregister_netdev(netdev);
1703
af19b491
AKS
1704 qlcnic_detach(adapter);
1705
2e9d722d
AC
1706 if (adapter->npars != NULL)
1707 kfree(adapter->npars);
1708 if (adapter->eswitch != NULL)
1709 kfree(adapter->eswitch);
1710
21854f02 1711 qlcnic_clr_all_drv_state(adapter, 0);
af19b491
AKS
1712
1713 clear_bit(__QLCNIC_RESETTING, &adapter->state);
1714
b5e5492c
AKS
1715 qlcnic_free_lb_filters_mem(adapter);
1716
af19b491 1717 qlcnic_teardown_intr(adapter);
f94bc1e7 1718 kfree(adapter->msix_entries);
af19b491
AKS
1719
1720 qlcnic_remove_diag_entries(adapter);
1721
1722 qlcnic_cleanup_pci_map(adapter);
1723
1724 qlcnic_release_firmware(adapter);
1725
451724c8 1726 pci_disable_pcie_error_reporting(pdev);
af19b491
AKS
1727 pci_release_regions(pdev);
1728 pci_disable_device(pdev);
1729 pci_set_drvdata(pdev, NULL);
1730
b1fc6d3c 1731 qlcnic_free_adapter_resources(adapter);
af19b491
AKS
1732 free_netdev(netdev);
1733}
1734static int __qlcnic_shutdown(struct pci_dev *pdev)
1735{
1736 struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
1737 struct net_device *netdev = adapter->netdev;
1738 int retval;
1739
1740 netif_device_detach(netdev);
1741
1742 qlcnic_cancel_fw_work(adapter);
1743
1744 if (netif_running(netdev))
1745 qlcnic_down(adapter, netdev);
1746
21854f02 1747 qlcnic_clr_all_drv_state(adapter, 0);
af19b491
AKS
1748
1749 clear_bit(__QLCNIC_RESETTING, &adapter->state);
1750
1751 retval = pci_save_state(pdev);
1752 if (retval)
1753 return retval;
1754
1755 if (qlcnic_wol_supported(adapter)) {
1756 pci_enable_wake(pdev, PCI_D3cold, 1);
1757 pci_enable_wake(pdev, PCI_D3hot, 1);
1758 }
1759
1760 return 0;
1761}
1762
1763static void qlcnic_shutdown(struct pci_dev *pdev)
1764{
1765 if (__qlcnic_shutdown(pdev))
1766 return;
1767
1768 pci_disable_device(pdev);
1769}
1770
1771#ifdef CONFIG_PM
1772static int
1773qlcnic_suspend(struct pci_dev *pdev, pm_message_t state)
1774{
1775 int retval;
1776
1777 retval = __qlcnic_shutdown(pdev);
1778 if (retval)
1779 return retval;
1780
1781 pci_set_power_state(pdev, pci_choose_state(pdev, state));
1782 return 0;
1783}
1784
1785static int
1786qlcnic_resume(struct pci_dev *pdev)
1787{
1788 struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
1789 struct net_device *netdev = adapter->netdev;
1790 int err;
1791
1792 err = pci_enable_device(pdev);
1793 if (err)
1794 return err;
1795
1796 pci_set_power_state(pdev, PCI_D0);
1797 pci_set_master(pdev);
1798 pci_restore_state(pdev);
1799
9f26f547 1800 err = adapter->nic_ops->start_firmware(adapter);
af19b491
AKS
1801 if (err) {
1802 dev_err(&pdev->dev, "failed to start firmware\n");
1803 return err;
1804 }
1805
1806 if (netif_running(netdev)) {
af19b491
AKS
1807 err = qlcnic_up(adapter, netdev);
1808 if (err)
52486a3a 1809 goto done;
af19b491 1810
aec1e845 1811 qlcnic_restore_indev_addr(netdev, NETDEV_UP);
af19b491 1812 }
52486a3a 1813done:
af19b491
AKS
1814 netif_device_attach(netdev);
1815 qlcnic_schedule_work(adapter, qlcnic_fw_poll_work, FW_POLL_DELAY);
1816 return 0;
af19b491
AKS
1817}
1818#endif
1819
1820static int qlcnic_open(struct net_device *netdev)
1821{
1822 struct qlcnic_adapter *adapter = netdev_priv(netdev);
1823 int err;
1824
c55ad8e5
AKS
1825 netif_carrier_off(netdev);
1826
af19b491
AKS
1827 err = qlcnic_attach(adapter);
1828 if (err)
1829 return err;
1830
1831 err = __qlcnic_up(adapter, netdev);
1832 if (err)
1833 goto err_out;
1834
1835 netif_start_queue(netdev);
1836
1837 return 0;
1838
1839err_out:
1840 qlcnic_detach(adapter);
1841 return err;
1842}
1843
1844/*
1845 * qlcnic_close - Disables a network interface entry point
1846 */
1847static int qlcnic_close(struct net_device *netdev)
1848{
1849 struct qlcnic_adapter *adapter = netdev_priv(netdev);
1850
1851 __qlcnic_down(adapter, netdev);
1852 return 0;
1853}
1854
e5dcf6dc 1855void qlcnic_alloc_lb_filters_mem(struct qlcnic_adapter *adapter)
b5e5492c
AKS
1856{
1857 void *head;
1858 int i;
1859
e5dcf6dc 1860 if (adapter->fhash.fmax && adapter->fhash.fhead)
b5e5492c
AKS
1861 return;
1862
1863 spin_lock_init(&adapter->mac_learn_lock);
1864
1865 head = kcalloc(QLCNIC_LB_MAX_FILTERS, sizeof(struct hlist_head),
1866 GFP_KERNEL);
1867 if (!head)
1868 return;
1869
1870 adapter->fhash.fmax = QLCNIC_LB_MAX_FILTERS;
43d620c8 1871 adapter->fhash.fhead = head;
b5e5492c
AKS
1872
1873 for (i = 0; i < adapter->fhash.fmax; i++)
1874 INIT_HLIST_HEAD(&adapter->fhash.fhead[i]);
1875}
1876
1877static void qlcnic_free_lb_filters_mem(struct qlcnic_adapter *adapter)
1878{
1879 if (adapter->fhash.fmax && adapter->fhash.fhead)
1880 kfree(adapter->fhash.fhead);
1881
1882 adapter->fhash.fhead = NULL;
1883 adapter->fhash.fmax = 0;
1884}
1885
1886static void qlcnic_change_filter(struct qlcnic_adapter *adapter,
7e56cac4 1887 u64 uaddr, __le16 vlan_id, struct qlcnic_host_tx_ring *tx_ring)
b5e5492c
AKS
1888{
1889 struct cmd_desc_type0 *hwdesc;
1890 struct qlcnic_nic_req *req;
1891 struct qlcnic_mac_req *mac_req;
7e56cac4 1892 struct qlcnic_vlan_req *vlan_req;
b5e5492c
AKS
1893 u32 producer;
1894 u64 word;
1895
1896 producer = tx_ring->producer;
1897 hwdesc = &tx_ring->desc_head[tx_ring->producer];
1898
1899 req = (struct qlcnic_nic_req *)hwdesc;
1900 memset(req, 0, sizeof(struct qlcnic_nic_req));
1901 req->qhdr = cpu_to_le64(QLCNIC_REQUEST << 23);
1902
1903 word = QLCNIC_MAC_EVENT | ((u64)(adapter->portnum) << 16);
1904 req->req_hdr = cpu_to_le64(word);
1905
1906 mac_req = (struct qlcnic_mac_req *)&(req->words[0]);
03c5d770 1907 mac_req->op = vlan_id ? QLCNIC_MAC_VLAN_ADD : QLCNIC_MAC_ADD;
b5e5492c
AKS
1908 memcpy(mac_req->mac_addr, &uaddr, ETH_ALEN);
1909
7e56cac4
SC
1910 vlan_req = (struct qlcnic_vlan_req *)&req->words[1];
1911 vlan_req->vlan_id = vlan_id;
03c5d770 1912
b5e5492c 1913 tx_ring->producer = get_next_index(producer, tx_ring->num_desc);
036d61f0 1914 smp_mb();
b5e5492c
AKS
1915}
1916
1917#define QLCNIC_MAC_HASH(MAC)\
1918 ((((MAC) & 0x70000) >> 0x10) | (((MAC) & 0x70000000000ULL) >> 0x25))
1919
1920static void
1921qlcnic_send_filter(struct qlcnic_adapter *adapter,
1922 struct qlcnic_host_tx_ring *tx_ring,
1923 struct cmd_desc_type0 *first_desc,
1924 struct sk_buff *skb)
1925{
1926 struct ethhdr *phdr = (struct ethhdr *)(skb->data);
1927 struct qlcnic_filter *fil, *tmp_fil;
1928 struct hlist_node *tmp_hnode, *n;
1929 struct hlist_head *head;
1930 u64 src_addr = 0;
7e56cac4 1931 __le16 vlan_id = 0;
b5e5492c
AKS
1932 u8 hindex;
1933
1934 if (!compare_ether_addr(phdr->h_source, adapter->mac_addr))
1935 return;
1936
1937 if (adapter->fhash.fnum >= adapter->fhash.fmax)
1938 return;
1939
03c5d770
AKS
1940 /* Only NPAR capable devices support vlan based learning*/
1941 if (adapter->flags & QLCNIC_ESWITCH_ENABLED)
1942 vlan_id = first_desc->vlan_TCI;
b5e5492c
AKS
1943 memcpy(&src_addr, phdr->h_source, ETH_ALEN);
1944 hindex = QLCNIC_MAC_HASH(src_addr) & (QLCNIC_LB_MAX_FILTERS - 1);
1945 head = &(adapter->fhash.fhead[hindex]);
1946
1947 hlist_for_each_entry_safe(tmp_fil, tmp_hnode, n, head, fnode) {
03c5d770
AKS
1948 if (!memcmp(tmp_fil->faddr, &src_addr, ETH_ALEN) &&
1949 tmp_fil->vlan_id == vlan_id) {
e5edb7b1 1950
1951 if (jiffies >
1952 (QLCNIC_READD_AGE * HZ + tmp_fil->ftime))
1953 qlcnic_change_filter(adapter, src_addr, vlan_id,
1954 tx_ring);
b5e5492c
AKS
1955 tmp_fil->ftime = jiffies;
1956 return;
1957 }
1958 }
1959
1960 fil = kzalloc(sizeof(struct qlcnic_filter), GFP_ATOMIC);
1961 if (!fil)
1962 return;
1963
03c5d770 1964 qlcnic_change_filter(adapter, src_addr, vlan_id, tx_ring);
b5e5492c
AKS
1965
1966 fil->ftime = jiffies;
03c5d770 1967 fil->vlan_id = vlan_id;
b5e5492c
AKS
1968 memcpy(fil->faddr, &src_addr, ETH_ALEN);
1969 spin_lock(&adapter->mac_learn_lock);
1970 hlist_add_head(&(fil->fnode), head);
1971 adapter->fhash.fnum++;
1972 spin_unlock(&adapter->mac_learn_lock);
1973}
1974
036d61f0
AC
1975static int
1976qlcnic_tx_pkt(struct qlcnic_adapter *adapter,
af19b491
AKS
1977 struct cmd_desc_type0 *first_desc,
1978 struct sk_buff *skb)
1979{
036d61f0
AC
1980 u8 opcode = 0, hdr_len = 0;
1981 u16 flags = 0, vlan_tci = 0;
1982 int copied, offset, copy_len;
af19b491
AKS
1983 struct cmd_desc_type0 *hwdesc;
1984 struct vlan_ethhdr *vh;
036d61f0
AC
1985 struct qlcnic_host_tx_ring *tx_ring = adapter->tx_ring;
1986 u16 protocol = ntohs(skb->protocol);
2e9d722d 1987 u32 producer = tx_ring->producer;
036d61f0
AC
1988
1989 if (protocol == ETH_P_8021Q) {
1990 vh = (struct vlan_ethhdr *)skb->data;
1991 flags = FLAGS_VLAN_TAGGED;
1992 vlan_tci = vh->h_vlan_TCI;
1993 } else if (vlan_tx_tag_present(skb)) {
1994 flags = FLAGS_VLAN_OOB;
1995 vlan_tci = vlan_tx_tag_get(skb);
1996 }
1997 if (unlikely(adapter->pvid)) {
1998 if (vlan_tci && !(adapter->flags & QLCNIC_TAGGING_ENABLED))
1999 return -EIO;
2000 if (vlan_tci && (adapter->flags & QLCNIC_TAGGING_ENABLED))
2001 goto set_flags;
2002
2003 flags = FLAGS_VLAN_OOB;
2004 vlan_tci = adapter->pvid;
2005 }
2006set_flags:
2007 qlcnic_set_tx_vlan_tci(first_desc, vlan_tci);
2008 qlcnic_set_tx_flags_opcode(first_desc, flags, opcode);
af19b491 2009
2e9d722d
AC
2010 if (*(skb->data) & BIT_0) {
2011 flags |= BIT_0;
2012 memcpy(&first_desc->eth_addr, skb->data, ETH_ALEN);
2013 }
036d61f0
AC
2014 opcode = TX_ETHER_PKT;
2015 if ((adapter->netdev->features & (NETIF_F_TSO | NETIF_F_TSO6)) &&
af19b491
AKS
2016 skb_shinfo(skb)->gso_size > 0) {
2017
2018 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
2019
2020 first_desc->mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
2021 first_desc->total_hdr_length = hdr_len;
036d61f0
AC
2022
2023 opcode = (protocol == ETH_P_IPV6) ? TX_TCP_LSO6 : TX_TCP_LSO;
2024
2025 /* For LSO, we need to copy the MAC/IP/TCP headers into
2026 * the descriptor ring */
2027 copied = 0;
2028 offset = 2;
2029
2030 if (flags & FLAGS_VLAN_OOB) {
af19b491
AKS
2031 first_desc->total_hdr_length += VLAN_HLEN;
2032 first_desc->tcp_hdr_offset = VLAN_HLEN;
2033 first_desc->ip_hdr_offset = VLAN_HLEN;
2034 /* Only in case of TSO on vlan device */
2035 flags |= FLAGS_VLAN_TAGGED;
036d61f0
AC
2036
2037 /* Create a TSO vlan header template for firmware */
2038
2039 hwdesc = &tx_ring->desc_head[producer];
2040 tx_ring->cmd_buf_arr[producer].skb = NULL;
2041
2042 copy_len = min((int)sizeof(struct cmd_desc_type0) -
2043 offset, hdr_len + VLAN_HLEN);
2044
2045 vh = (struct vlan_ethhdr *)((char *) hwdesc + 2);
2046 skb_copy_from_linear_data(skb, vh, 12);
2047 vh->h_vlan_proto = htons(ETH_P_8021Q);
2048 vh->h_vlan_TCI = htons(vlan_tci);
2049
2050 skb_copy_from_linear_data_offset(skb, 12,
2051 (char *)vh + 16, copy_len - 16);
2052
2053 copied = copy_len - VLAN_HLEN;
2054 offset = 0;
2055
2056 producer = get_next_index(producer, tx_ring->num_desc);
af19b491
AKS
2057 }
2058
036d61f0
AC
2059 while (copied < hdr_len) {
2060
2061 copy_len = min((int)sizeof(struct cmd_desc_type0) -
2062 offset, (hdr_len - copied));
2063
2064 hwdesc = &tx_ring->desc_head[producer];
2065 tx_ring->cmd_buf_arr[producer].skb = NULL;
2066
2067 skb_copy_from_linear_data_offset(skb, copied,
2068 (char *) hwdesc + offset, copy_len);
2069
2070 copied += copy_len;
2071 offset = 0;
2072
2073 producer = get_next_index(producer, tx_ring->num_desc);
2074 }
2075
2076 tx_ring->producer = producer;
2077 smp_mb();
2078 adapter->stats.lso_frames++;
af19b491
AKS
2079
2080 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
2081 u8 l4proto;
2082
036d61f0 2083 if (protocol == ETH_P_IP) {
af19b491
AKS
2084 l4proto = ip_hdr(skb)->protocol;
2085
2086 if (l4proto == IPPROTO_TCP)
2087 opcode = TX_TCP_PKT;
2088 else if (l4proto == IPPROTO_UDP)
2089 opcode = TX_UDP_PKT;
036d61f0 2090 } else if (protocol == ETH_P_IPV6) {
af19b491
AKS
2091 l4proto = ipv6_hdr(skb)->nexthdr;
2092
2093 if (l4proto == IPPROTO_TCP)
2094 opcode = TX_TCPV6_PKT;
2095 else if (l4proto == IPPROTO_UDP)
2096 opcode = TX_UDPV6_PKT;
2097 }
2098 }
af19b491
AKS
2099 first_desc->tcp_hdr_offset += skb_transport_offset(skb);
2100 first_desc->ip_hdr_offset += skb_network_offset(skb);
2101 qlcnic_set_tx_flags_opcode(first_desc, flags, opcode);
2102
036d61f0 2103 return 0;
af19b491
AKS
2104}
2105
2106static int
2107qlcnic_map_tx_skb(struct pci_dev *pdev,
2108 struct sk_buff *skb, struct qlcnic_cmd_buffer *pbuf)
2109{
2110 struct qlcnic_skb_frag *nf;
2111 struct skb_frag_struct *frag;
2112 int i, nr_frags;
2113 dma_addr_t map;
2114
2115 nr_frags = skb_shinfo(skb)->nr_frags;
2116 nf = &pbuf->frag_array[0];
2117
2118 map = pci_map_single(pdev, skb->data,
2119 skb_headlen(skb), PCI_DMA_TODEVICE);
2120 if (pci_dma_mapping_error(pdev, map))
2121 goto out_err;
2122
2123 nf->dma = map;
2124 nf->length = skb_headlen(skb);
2125
2126 for (i = 0; i < nr_frags; i++) {
2127 frag = &skb_shinfo(skb)->frags[i];
2128 nf = &pbuf->frag_array[i+1];
2129
2130 map = pci_map_page(pdev, frag->page, frag->page_offset,
2131 frag->size, PCI_DMA_TODEVICE);
2132 if (pci_dma_mapping_error(pdev, map))
2133 goto unwind;
2134
2135 nf->dma = map;
2136 nf->length = frag->size;
2137 }
2138
2139 return 0;
2140
2141unwind:
2142 while (--i >= 0) {
2143 nf = &pbuf->frag_array[i+1];
2144 pci_unmap_page(pdev, nf->dma, nf->length, PCI_DMA_TODEVICE);
2145 }
2146
2147 nf = &pbuf->frag_array[0];
2148 pci_unmap_single(pdev, nf->dma, skb_headlen(skb), PCI_DMA_TODEVICE);
2149
2150out_err:
2151 return -ENOMEM;
2152}
2153
036d61f0
AC
2154static void
2155qlcnic_unmap_buffers(struct pci_dev *pdev, struct sk_buff *skb,
2156 struct qlcnic_cmd_buffer *pbuf)
8cf61f89 2157{
036d61f0
AC
2158 struct qlcnic_skb_frag *nf = &pbuf->frag_array[0];
2159 int nr_frags = skb_shinfo(skb)->nr_frags;
2160 int i;
8cf61f89 2161
036d61f0
AC
2162 for (i = 0; i < nr_frags; i++) {
2163 nf = &pbuf->frag_array[i+1];
2164 pci_unmap_page(pdev, nf->dma, nf->length, PCI_DMA_TODEVICE);
8cf61f89 2165 }
8cf61f89 2166
036d61f0
AC
2167 nf = &pbuf->frag_array[0];
2168 pci_unmap_single(pdev, nf->dma, skb_headlen(skb), PCI_DMA_TODEVICE);
5b446c6a 2169 pbuf->skb = NULL;
8cf61f89
AKS
2170}
2171
af19b491
AKS
2172static inline void
2173qlcnic_clear_cmddesc(u64 *desc)
2174{
2175 desc[0] = 0ULL;
2176 desc[2] = 0ULL;
8cf61f89 2177 desc[7] = 0ULL;
af19b491
AKS
2178}
2179
cdaff185 2180netdev_tx_t
af19b491
AKS
2181qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
2182{
2183 struct qlcnic_adapter *adapter = netdev_priv(netdev);
2184 struct qlcnic_host_tx_ring *tx_ring = adapter->tx_ring;
2185 struct qlcnic_cmd_buffer *pbuf;
2186 struct qlcnic_skb_frag *buffrag;
2187 struct cmd_desc_type0 *hwdesc, *first_desc;
2188 struct pci_dev *pdev;
dcb50aff 2189 struct ethhdr *phdr;
91a403ca 2190 int delta = 0;
af19b491
AKS
2191 int i, k;
2192
2193 u32 producer;
036d61f0 2194 int frag_count;
af19b491
AKS
2195 u32 num_txd = tx_ring->num_desc;
2196
780ab790
AKS
2197 if (!test_bit(__QLCNIC_DEV_UP, &adapter->state)) {
2198 netif_stop_queue(netdev);
2199 return NETDEV_TX_BUSY;
2200 }
2201
fe4d434d 2202 if (adapter->flags & QLCNIC_MACSPOOF) {
dcb50aff
RB
2203 phdr = (struct ethhdr *)skb->data;
2204 if (compare_ether_addr(phdr->h_source,
fe4d434d
SC
2205 adapter->mac_addr))
2206 goto drop_packet;
2207 }
2208
af19b491 2209 frag_count = skb_shinfo(skb)->nr_frags + 1;
91a403ca
AKS
2210 /* 14 frags supported for normal packet and
2211 * 32 frags supported for TSO packet
2212 */
2213 if (!skb_is_gso(skb) && frag_count > QLCNIC_MAX_FRAGS_PER_TX) {
2214
2215 for (i = 0; i < (frag_count - QLCNIC_MAX_FRAGS_PER_TX); i++)
2216 delta += skb_shinfo(skb)->frags[i].size;
2217
2218 if (!__pskb_pull_tail(skb, delta))
2219 goto drop_packet;
2220
2221 frag_count = 1 + skb_shinfo(skb)->nr_frags;
2222 }
af19b491 2223
ef71ff83 2224 if (unlikely(qlcnic_tx_avail(tx_ring) <= TX_STOP_THRESH)) {
af19b491 2225 netif_stop_queue(netdev);
ef71ff83
RB
2226 if (qlcnic_tx_avail(tx_ring) > TX_STOP_THRESH)
2227 netif_start_queue(netdev);
2228 else {
2229 adapter->stats.xmit_off++;
2230 return NETDEV_TX_BUSY;
2231 }
af19b491
AKS
2232 }
2233
2234 producer = tx_ring->producer;
2235 pbuf = &tx_ring->cmd_buf_arr[producer];
2236
2237 pdev = adapter->pdev;
2238
8cf61f89
AKS
2239 first_desc = hwdesc = &tx_ring->desc_head[producer];
2240 qlcnic_clear_cmddesc((u64 *)hwdesc);
2241
8ae6df97
AKS
2242 if (qlcnic_map_tx_skb(pdev, skb, pbuf)) {
2243 adapter->stats.tx_dma_map_error++;
af19b491 2244 goto drop_packet;
8ae6df97 2245 }
af19b491
AKS
2246
2247 pbuf->skb = skb;
2248 pbuf->frag_count = frag_count;
2249
af19b491
AKS
2250 qlcnic_set_tx_frags_len(first_desc, frag_count, skb->len);
2251 qlcnic_set_tx_port(first_desc, adapter->portnum);
2252
2253 for (i = 0; i < frag_count; i++) {
2254
2255 k = i % 4;
2256
2257 if ((k == 0) && (i > 0)) {
2258 /* move to next desc.*/
2259 producer = get_next_index(producer, num_txd);
2260 hwdesc = &tx_ring->desc_head[producer];
2261 qlcnic_clear_cmddesc((u64 *)hwdesc);
2262 tx_ring->cmd_buf_arr[producer].skb = NULL;
2263 }
2264
2265 buffrag = &pbuf->frag_array[i];
2266
2267 hwdesc->buffer_length[k] = cpu_to_le16(buffrag->length);
2268 switch (k) {
2269 case 0:
2270 hwdesc->addr_buffer1 = cpu_to_le64(buffrag->dma);
2271 break;
2272 case 1:
2273 hwdesc->addr_buffer2 = cpu_to_le64(buffrag->dma);
2274 break;
2275 case 2:
2276 hwdesc->addr_buffer3 = cpu_to_le64(buffrag->dma);
2277 break;
2278 case 3:
2279 hwdesc->addr_buffer4 = cpu_to_le64(buffrag->dma);
2280 break;
2281 }
2282 }
2283
2284 tx_ring->producer = get_next_index(producer, num_txd);
036d61f0 2285 smp_mb();
af19b491 2286
036d61f0
AC
2287 if (unlikely(qlcnic_tx_pkt(adapter, first_desc, skb)))
2288 goto unwind_buff;
af19b491 2289
e5dcf6dc 2290 if (adapter->mac_learn)
b5e5492c
AKS
2291 qlcnic_send_filter(adapter, tx_ring, first_desc, skb);
2292
af19b491
AKS
2293 adapter->stats.txbytes += skb->len;
2294 adapter->stats.xmitcalled++;
2295
f127f472
SC
2296 qlcnic_update_cmd_producer(adapter, tx_ring);
2297
af19b491
AKS
2298 return NETDEV_TX_OK;
2299
036d61f0
AC
2300unwind_buff:
2301 qlcnic_unmap_buffers(pdev, skb, pbuf);
af19b491
AKS
2302drop_packet:
2303 adapter->stats.txdropped++;
2304 dev_kfree_skb_any(skb);
2305 return NETDEV_TX_OK;
2306}
2307
2308static int qlcnic_check_temp(struct qlcnic_adapter *adapter)
2309{
2310 struct net_device *netdev = adapter->netdev;
2311 u32 temp, temp_state, temp_val;
2312 int rv = 0;
2313
2314 temp = QLCRD32(adapter, CRB_TEMP_STATE);
2315
2316 temp_state = qlcnic_get_temp_state(temp);
2317 temp_val = qlcnic_get_temp_val(temp);
2318
2319 if (temp_state == QLCNIC_TEMP_PANIC) {
2320 dev_err(&netdev->dev,
2321 "Device temperature %d degrees C exceeds"
2322 " maximum allowed. Hardware has been shut down.\n",
2323 temp_val);
2324 rv = 1;
2325 } else if (temp_state == QLCNIC_TEMP_WARN) {
2326 if (adapter->temp == QLCNIC_TEMP_NORMAL) {
2327 dev_err(&netdev->dev,
2328 "Device temperature %d degrees C "
2329 "exceeds operating range."
2330 " Immediate action needed.\n",
2331 temp_val);
2332 }
2333 } else {
2334 if (adapter->temp == QLCNIC_TEMP_WARN) {
2335 dev_info(&netdev->dev,
2336 "Device temperature is now %d degrees C"
2337 " in normal range.\n", temp_val);
2338 }
2339 }
2340 adapter->temp = temp_state;
2341 return rv;
2342}
2343
2344void qlcnic_advert_link_change(struct qlcnic_adapter *adapter, int linkup)
2345{
2346 struct net_device *netdev = adapter->netdev;
2347
b1fc6d3c 2348 if (adapter->ahw->linkup && !linkup) {
69324275 2349 netdev_info(netdev, "NIC Link is down\n");
b1fc6d3c 2350 adapter->ahw->linkup = 0;
af19b491
AKS
2351 if (netif_running(netdev)) {
2352 netif_carrier_off(netdev);
2353 netif_stop_queue(netdev);
2354 }
b1fc6d3c 2355 } else if (!adapter->ahw->linkup && linkup) {
69324275 2356 netdev_info(netdev, "NIC Link is up\n");
b1fc6d3c 2357 adapter->ahw->linkup = 1;
af19b491
AKS
2358 if (netif_running(netdev)) {
2359 netif_carrier_on(netdev);
2360 netif_wake_queue(netdev);
2361 }
2362 }
2363}
2364
2365static void qlcnic_tx_timeout(struct net_device *netdev)
2366{
2367 struct qlcnic_adapter *adapter = netdev_priv(netdev);
2368
2369 if (test_bit(__QLCNIC_RESETTING, &adapter->state))
2370 return;
2371
2372 dev_err(&netdev->dev, "transmit timeout, resetting.\n");
af19b491
AKS
2373
2374 if (++adapter->tx_timeo_cnt >= QLCNIC_MAX_TX_TIMEOUTS)
68bf1c68
AKS
2375 adapter->need_fw_reset = 1;
2376 else
2377 adapter->reset_context = 1;
af19b491
AKS
2378}
2379
2380static struct net_device_stats *qlcnic_get_stats(struct net_device *netdev)
2381{
2382 struct qlcnic_adapter *adapter = netdev_priv(netdev);
2383 struct net_device_stats *stats = &netdev->stats;
2384
af19b491
AKS
2385 stats->rx_packets = adapter->stats.rx_pkts + adapter->stats.lro_pkts;
2386 stats->tx_packets = adapter->stats.xmitfinished;
7e382594 2387 stats->rx_bytes = adapter->stats.rxbytes + adapter->stats.lrobytes;
af19b491
AKS
2388 stats->tx_bytes = adapter->stats.txbytes;
2389 stats->rx_dropped = adapter->stats.rxdropped;
2390 stats->tx_dropped = adapter->stats.txdropped;
2391
2392 return stats;
2393}
2394
7eb9855d 2395static irqreturn_t qlcnic_clear_legacy_intr(struct qlcnic_adapter *adapter)
af19b491 2396{
af19b491
AKS
2397 u32 status;
2398
2399 status = readl(adapter->isr_int_vec);
2400
2401 if (!(status & adapter->int_vec_bit))
2402 return IRQ_NONE;
2403
2404 /* check interrupt state machine, to be sure */
2405 status = readl(adapter->crb_int_state_reg);
2406 if (!ISR_LEGACY_INT_TRIGGERED(status))
2407 return IRQ_NONE;
2408
2409 writel(0xffffffff, adapter->tgt_status_reg);
2410 /* read twice to ensure write is flushed */
2411 readl(adapter->isr_int_vec);
2412 readl(adapter->isr_int_vec);
2413
7eb9855d
AKS
2414 return IRQ_HANDLED;
2415}
2416
2417static irqreturn_t qlcnic_tmp_intr(int irq, void *data)
2418{
2419 struct qlcnic_host_sds_ring *sds_ring = data;
2420 struct qlcnic_adapter *adapter = sds_ring->adapter;
2421
2422 if (adapter->flags & QLCNIC_MSIX_ENABLED)
2423 goto done;
2424 else if (adapter->flags & QLCNIC_MSI_ENABLED) {
2425 writel(0xffffffff, adapter->tgt_status_reg);
2426 goto done;
2427 }
2428
2429 if (qlcnic_clear_legacy_intr(adapter) == IRQ_NONE)
2430 return IRQ_NONE;
2431
2432done:
2433 adapter->diag_cnt++;
2434 qlcnic_enable_int(sds_ring);
2435 return IRQ_HANDLED;
2436}
2437
2438static irqreturn_t qlcnic_intr(int irq, void *data)
2439{
2440 struct qlcnic_host_sds_ring *sds_ring = data;
2441 struct qlcnic_adapter *adapter = sds_ring->adapter;
2442
2443 if (qlcnic_clear_legacy_intr(adapter) == IRQ_NONE)
2444 return IRQ_NONE;
2445
af19b491
AKS
2446 napi_schedule(&sds_ring->napi);
2447
2448 return IRQ_HANDLED;
2449}
2450
2451static irqreturn_t qlcnic_msi_intr(int irq, void *data)
2452{
2453 struct qlcnic_host_sds_ring *sds_ring = data;
2454 struct qlcnic_adapter *adapter = sds_ring->adapter;
2455
2456 /* clear interrupt */
2457 writel(0xffffffff, adapter->tgt_status_reg);
2458
2459 napi_schedule(&sds_ring->napi);
2460 return IRQ_HANDLED;
2461}
2462
2463static irqreturn_t qlcnic_msix_intr(int irq, void *data)
2464{
2465 struct qlcnic_host_sds_ring *sds_ring = data;
2466
2467 napi_schedule(&sds_ring->napi);
2468 return IRQ_HANDLED;
2469}
2470
2471static int qlcnic_process_cmd_ring(struct qlcnic_adapter *adapter)
2472{
2473 u32 sw_consumer, hw_consumer;
2474 int count = 0, i;
2475 struct qlcnic_cmd_buffer *buffer;
2476 struct pci_dev *pdev = adapter->pdev;
2477 struct net_device *netdev = adapter->netdev;
2478 struct qlcnic_skb_frag *frag;
2479 int done;
2480 struct qlcnic_host_tx_ring *tx_ring = adapter->tx_ring;
2481
2482 if (!spin_trylock(&adapter->tx_clean_lock))
2483 return 1;
2484
2485 sw_consumer = tx_ring->sw_consumer;
2486 hw_consumer = le32_to_cpu(*(tx_ring->hw_consumer));
2487
2488 while (sw_consumer != hw_consumer) {
2489 buffer = &tx_ring->cmd_buf_arr[sw_consumer];
2490 if (buffer->skb) {
2491 frag = &buffer->frag_array[0];
2492 pci_unmap_single(pdev, frag->dma, frag->length,
2493 PCI_DMA_TODEVICE);
2494 frag->dma = 0ULL;
2495 for (i = 1; i < buffer->frag_count; i++) {
2496 frag++;
2497 pci_unmap_page(pdev, frag->dma, frag->length,
2498 PCI_DMA_TODEVICE);
2499 frag->dma = 0ULL;
2500 }
2501
2502 adapter->stats.xmitfinished++;
2503 dev_kfree_skb_any(buffer->skb);
2504 buffer->skb = NULL;
2505 }
2506
2507 sw_consumer = get_next_index(sw_consumer, tx_ring->num_desc);
2508 if (++count >= MAX_STATUS_HANDLE)
2509 break;
2510 }
2511
2512 if (count && netif_running(netdev)) {
2513 tx_ring->sw_consumer = sw_consumer;
2514
2515 smp_mb();
2516
2517 if (netif_queue_stopped(netdev) && netif_carrier_ok(netdev)) {
af19b491
AKS
2518 if (qlcnic_tx_avail(tx_ring) > TX_STOP_THRESH) {
2519 netif_wake_queue(netdev);
8bfe8b91 2520 adapter->stats.xmit_on++;
af19b491 2521 }
af19b491 2522 }
ef71ff83 2523 adapter->tx_timeo_cnt = 0;
af19b491
AKS
2524 }
2525 /*
2526 * If everything is freed up to consumer then check if the ring is full
2527 * If the ring is full then check if more needs to be freed and
2528 * schedule the call back again.
2529 *
2530 * This happens when there are 2 CPUs. One could be freeing and the
2531 * other filling it. If the ring is full when we get out of here and
2532 * the card has already interrupted the host then the host can miss the
2533 * interrupt.
2534 *
2535 * There is still a possible race condition and the host could miss an
2536 * interrupt. The card has to take care of this.
2537 */
2538 hw_consumer = le32_to_cpu(*(tx_ring->hw_consumer));
2539 done = (sw_consumer == hw_consumer);
2540 spin_unlock(&adapter->tx_clean_lock);
2541
2542 return done;
2543}
2544
2545static int qlcnic_poll(struct napi_struct *napi, int budget)
2546{
2547 struct qlcnic_host_sds_ring *sds_ring =
2548 container_of(napi, struct qlcnic_host_sds_ring, napi);
2549
2550 struct qlcnic_adapter *adapter = sds_ring->adapter;
2551
2552 int tx_complete;
2553 int work_done;
2554
2555 tx_complete = qlcnic_process_cmd_ring(adapter);
2556
2557 work_done = qlcnic_process_rcv_ring(sds_ring, budget);
2558
2559 if ((work_done < budget) && tx_complete) {
2560 napi_complete(&sds_ring->napi);
2561 if (test_bit(__QLCNIC_DEV_UP, &adapter->state))
2562 qlcnic_enable_int(sds_ring);
2563 }
2564
2565 return work_done;
2566}
2567
8f891387 2568static int qlcnic_rx_poll(struct napi_struct *napi, int budget)
2569{
2570 struct qlcnic_host_sds_ring *sds_ring =
2571 container_of(napi, struct qlcnic_host_sds_ring, napi);
2572
2573 struct qlcnic_adapter *adapter = sds_ring->adapter;
2574 int work_done;
2575
2576 work_done = qlcnic_process_rcv_ring(sds_ring, budget);
2577
2578 if (work_done < budget) {
2579 napi_complete(&sds_ring->napi);
2580 if (test_bit(__QLCNIC_DEV_UP, &adapter->state))
2581 qlcnic_enable_int(sds_ring);
2582 }
2583
2584 return work_done;
2585}
2586
af19b491
AKS
2587#ifdef CONFIG_NET_POLL_CONTROLLER
2588static void qlcnic_poll_controller(struct net_device *netdev)
2589{
bf82791e
YL
2590 int ring;
2591 struct qlcnic_host_sds_ring *sds_ring;
af19b491 2592 struct qlcnic_adapter *adapter = netdev_priv(netdev);
b1fc6d3c 2593 struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
bf82791e 2594
af19b491 2595 disable_irq(adapter->irq);
bf82791e
YL
2596 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
2597 sds_ring = &recv_ctx->sds_rings[ring];
2598 qlcnic_intr(adapter->irq, sds_ring);
2599 }
af19b491
AKS
2600 enable_irq(adapter->irq);
2601}
2602#endif
2603
6df900e9
SC
2604static void
2605qlcnic_idc_debug_info(struct qlcnic_adapter *adapter, u8 encoding)
2606{
2607 u32 val;
2608
2609 val = adapter->portnum & 0xf;
2610 val |= encoding << 7;
2611 val |= (jiffies - adapter->dev_rst_time) << 8;
2612
2613 QLCWR32(adapter, QLCNIC_CRB_DRV_SCRATCH, val);
2614 adapter->dev_rst_time = jiffies;
2615}
2616
ade91f8e
AKS
2617static int
2618qlcnic_set_drv_state(struct qlcnic_adapter *adapter, u8 state)
af19b491
AKS
2619{
2620 u32 val;
2621
2622 WARN_ON(state != QLCNIC_DEV_NEED_RESET &&
2623 state != QLCNIC_DEV_NEED_QUISCENT);
2624
2625 if (qlcnic_api_lock(adapter))
ade91f8e 2626 return -EIO;
af19b491
AKS
2627
2628 val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
2629
2630 if (state == QLCNIC_DEV_NEED_RESET)
6d2a4724 2631 QLC_DEV_SET_RST_RDY(val, adapter->portnum);
af19b491 2632 else if (state == QLCNIC_DEV_NEED_QUISCENT)
6d2a4724 2633 QLC_DEV_SET_QSCNT_RDY(val, adapter->portnum);
af19b491
AKS
2634
2635 QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val);
2636
2637 qlcnic_api_unlock(adapter);
ade91f8e
AKS
2638
2639 return 0;
af19b491
AKS
2640}
2641
1b95a839
AKS
2642static int
2643qlcnic_clr_drv_state(struct qlcnic_adapter *adapter)
2644{
2645 u32 val;
2646
2647 if (qlcnic_api_lock(adapter))
2648 return -EBUSY;
2649
2650 val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
6d2a4724 2651 QLC_DEV_CLR_RST_QSCNT(val, adapter->portnum);
1b95a839
AKS
2652 QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val);
2653
2654 qlcnic_api_unlock(adapter);
2655
2656 return 0;
2657}
2658
af19b491 2659static void
21854f02 2660qlcnic_clr_all_drv_state(struct qlcnic_adapter *adapter, u8 failed)
af19b491
AKS
2661{
2662 u32 val;
2663
2664 if (qlcnic_api_lock(adapter))
2665 goto err;
2666
31018e06 2667 val = QLCRD32(adapter, QLCNIC_CRB_DRV_ACTIVE);
6d2a4724 2668 QLC_DEV_CLR_REF_CNT(val, adapter->portnum);
31018e06 2669 QLCWR32(adapter, QLCNIC_CRB_DRV_ACTIVE, val);
af19b491 2670
21854f02
AKS
2671 if (failed) {
2672 QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_FAILED);
2673 dev_info(&adapter->pdev->dev,
2674 "Device state set to Failed. Please Reboot\n");
2675 } else if (!(val & 0x11111111))
af19b491
AKS
2676 QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_COLD);
2677
2678 val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
6d2a4724 2679 QLC_DEV_CLR_RST_QSCNT(val, adapter->portnum);
af19b491
AKS
2680 QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val);
2681
2682 qlcnic_api_unlock(adapter);
2683err:
2684 adapter->fw_fail_cnt = 0;
032a13c7 2685 adapter->flags &= ~QLCNIC_FW_HANG;
af19b491
AKS
2686 clear_bit(__QLCNIC_START_FW, &adapter->state);
2687 clear_bit(__QLCNIC_RESETTING, &adapter->state);
2688}
2689
f73dfc50 2690/* Grab api lock, before checking state */
af19b491
AKS
2691static int
2692qlcnic_check_drv_state(struct qlcnic_adapter *adapter)
2693{
602ca6f0 2694 int act, state, active_mask;
af19b491
AKS
2695
2696 state = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
31018e06 2697 act = QLCRD32(adapter, QLCNIC_CRB_DRV_ACTIVE);
af19b491 2698
602ca6f0
SV
2699 if (adapter->flags & QLCNIC_FW_RESET_OWNER) {
2700 active_mask = (~(1 << (adapter->ahw->pci_func * 4)));
2701 act = act & active_mask;
2702 }
2703
af19b491
AKS
2704 if (((state & 0x11111111) == (act & 0x11111111)) ||
2705 ((act & 0x11111111) == ((state >> 1) & 0x11111111)))
2706 return 0;
2707 else
2708 return 1;
2709}
2710
96f8118c
SC
2711static int qlcnic_check_idc_ver(struct qlcnic_adapter *adapter)
2712{
2713 u32 val = QLCRD32(adapter, QLCNIC_CRB_DRV_IDC_VER);
2714
2715 if (val != QLCNIC_DRV_IDC_VER) {
2716 dev_warn(&adapter->pdev->dev, "IDC Version mismatch, driver's"
2717 " idc ver = %x; reqd = %x\n", QLCNIC_DRV_IDC_VER, val);
2718 }
2719
2720 return 0;
2721}
2722
af19b491
AKS
2723static int
2724qlcnic_can_start_firmware(struct qlcnic_adapter *adapter)
2725{
2726 u32 val, prev_state;
aa5e18c0 2727 u8 dev_init_timeo = adapter->dev_init_timeo;
6d2a4724 2728 u8 portnum = adapter->portnum;
96f8118c 2729 u8 ret;
af19b491 2730
f73dfc50
AKS
2731 if (test_and_clear_bit(__QLCNIC_START_FW, &adapter->state))
2732 return 1;
2733
af19b491
AKS
2734 if (qlcnic_api_lock(adapter))
2735 return -1;
2736
31018e06 2737 val = QLCRD32(adapter, QLCNIC_CRB_DRV_ACTIVE);
6d2a4724
AKS
2738 if (!(val & (1 << (portnum * 4)))) {
2739 QLC_DEV_SET_REF_CNT(val, portnum);
31018e06 2740 QLCWR32(adapter, QLCNIC_CRB_DRV_ACTIVE, val);
af19b491
AKS
2741 }
2742
2743 prev_state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
65b5b420 2744 QLCDB(adapter, HW, "Device state = %u\n", prev_state);
af19b491
AKS
2745
2746 switch (prev_state) {
2747 case QLCNIC_DEV_COLD:
bbd8c6a4 2748 QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_INITIALIZING);
96f8118c 2749 QLCWR32(adapter, QLCNIC_CRB_DRV_IDC_VER, QLCNIC_DRV_IDC_VER);
6df900e9 2750 qlcnic_idc_debug_info(adapter, 0);
af19b491
AKS
2751 qlcnic_api_unlock(adapter);
2752 return 1;
2753
2754 case QLCNIC_DEV_READY:
96f8118c 2755 ret = qlcnic_check_idc_ver(adapter);
af19b491 2756 qlcnic_api_unlock(adapter);
96f8118c 2757 return ret;
af19b491
AKS
2758
2759 case QLCNIC_DEV_NEED_RESET:
2760 val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
6d2a4724 2761 QLC_DEV_SET_RST_RDY(val, portnum);
af19b491
AKS
2762 QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val);
2763 break;
2764
2765 case QLCNIC_DEV_NEED_QUISCENT:
2766 val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
6d2a4724 2767 QLC_DEV_SET_QSCNT_RDY(val, portnum);
af19b491
AKS
2768 QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val);
2769 break;
2770
2771 case QLCNIC_DEV_FAILED:
a7fc948f 2772 dev_err(&adapter->pdev->dev, "Device in failed state.\n");
af19b491
AKS
2773 qlcnic_api_unlock(adapter);
2774 return -1;
bbd8c6a4
AKS
2775
2776 case QLCNIC_DEV_INITIALIZING:
2777 case QLCNIC_DEV_QUISCENT:
2778 break;
af19b491
AKS
2779 }
2780
2781 qlcnic_api_unlock(adapter);
aa5e18c0
SC
2782
2783 do {
af19b491 2784 msleep(1000);
a5e463d0
SC
2785 prev_state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
2786
2787 if (prev_state == QLCNIC_DEV_QUISCENT)
2788 continue;
2789 } while ((prev_state != QLCNIC_DEV_READY) && --dev_init_timeo);
af19b491 2790
65b5b420
AKS
2791 if (!dev_init_timeo) {
2792 dev_err(&adapter->pdev->dev,
2793 "Waiting for device to initialize timeout\n");
af19b491 2794 return -1;
65b5b420 2795 }
af19b491
AKS
2796
2797 if (qlcnic_api_lock(adapter))
2798 return -1;
2799
2800 val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
6d2a4724 2801 QLC_DEV_CLR_RST_QSCNT(val, portnum);
af19b491
AKS
2802 QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val);
2803
96f8118c 2804 ret = qlcnic_check_idc_ver(adapter);
af19b491
AKS
2805 qlcnic_api_unlock(adapter);
2806
96f8118c 2807 return ret;
af19b491
AKS
2808}
2809
2810static void
2811qlcnic_fwinit_work(struct work_struct *work)
2812{
2813 struct qlcnic_adapter *adapter = container_of(work,
2814 struct qlcnic_adapter, fw_work.work);
3c4b23b1 2815 u32 dev_state = 0xf;
7b749ff4 2816 u32 val;
af19b491 2817
f73dfc50
AKS
2818 if (qlcnic_api_lock(adapter))
2819 goto err_ret;
af19b491 2820
a5e463d0 2821 dev_state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
b8c17620
AKS
2822 if (dev_state == QLCNIC_DEV_QUISCENT ||
2823 dev_state == QLCNIC_DEV_NEED_QUISCENT) {
a5e463d0
SC
2824 qlcnic_api_unlock(adapter);
2825 qlcnic_schedule_work(adapter, qlcnic_fwinit_work,
2826 FW_POLL_DELAY * 2);
2827 return;
2828 }
2829
9f26f547 2830 if (adapter->op_mode == QLCNIC_NON_PRIV_FUNC) {
3c4b23b1
AKS
2831 qlcnic_api_unlock(adapter);
2832 goto wait_npar;
9f26f547
AC
2833 }
2834
f73dfc50
AKS
2835 if (adapter->fw_wait_cnt++ > adapter->reset_ack_timeo) {
2836 dev_err(&adapter->pdev->dev, "Reset:Failed to get ack %d sec\n",
2837 adapter->reset_ack_timeo);
2838 goto skip_ack_check;
2839 }
2840
2841 if (!qlcnic_check_drv_state(adapter)) {
2842skip_ack_check:
2843 dev_state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
a5e463d0 2844
f73dfc50
AKS
2845 if (dev_state == QLCNIC_DEV_NEED_RESET) {
2846 QLCWR32(adapter, QLCNIC_CRB_DEV_STATE,
2847 QLCNIC_DEV_INITIALIZING);
2848 set_bit(__QLCNIC_START_FW, &adapter->state);
2849 QLCDB(adapter, DRV, "Restarting fw\n");
6df900e9 2850 qlcnic_idc_debug_info(adapter, 0);
7b749ff4
SV
2851 val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
2852 QLC_DEV_SET_RST_RDY(val, adapter->portnum);
2853 QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val);
af19b491
AKS
2854 }
2855
f73dfc50
AKS
2856 qlcnic_api_unlock(adapter);
2857
287e38aa 2858 rtnl_lock();
7b749ff4
SV
2859 if (adapter->ahw->fw_dump.enable &&
2860 (adapter->flags & QLCNIC_FW_RESET_OWNER)) {
9d6a6440
AC
2861 QLCDB(adapter, DRV, "Take FW dump\n");
2862 qlcnic_dump_fw(adapter);
032a13c7 2863 adapter->flags |= QLCNIC_FW_HANG;
9d6a6440 2864 }
287e38aa 2865 rtnl_unlock();
7b749ff4
SV
2866
2867 adapter->flags &= ~QLCNIC_FW_RESET_OWNER;
9f26f547 2868 if (!adapter->nic_ops->start_firmware(adapter)) {
af19b491 2869 qlcnic_schedule_work(adapter, qlcnic_attach_work, 0);
b18971d1 2870 adapter->fw_wait_cnt = 0;
af19b491
AKS
2871 return;
2872 }
af19b491
AKS
2873 goto err_ret;
2874 }
2875
f73dfc50 2876 qlcnic_api_unlock(adapter);
aa5e18c0 2877
9f26f547 2878wait_npar:
af19b491 2879 dev_state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
f73dfc50 2880 QLCDB(adapter, HW, "Func waiting: Device state=%u\n", dev_state);
65b5b420 2881
af19b491 2882 switch (dev_state) {
3c4b23b1 2883 case QLCNIC_DEV_READY:
9f26f547 2884 if (!adapter->nic_ops->start_firmware(adapter)) {
f73dfc50 2885 qlcnic_schedule_work(adapter, qlcnic_attach_work, 0);
b18971d1 2886 adapter->fw_wait_cnt = 0;
f73dfc50
AKS
2887 return;
2888 }
3c4b23b1
AKS
2889 case QLCNIC_DEV_FAILED:
2890 break;
2891 default:
2892 qlcnic_schedule_work(adapter,
2893 qlcnic_fwinit_work, FW_POLL_DELAY);
2894 return;
af19b491
AKS
2895 }
2896
2897err_ret:
f73dfc50
AKS
2898 dev_err(&adapter->pdev->dev, "Fwinit work failed state=%u "
2899 "fw_wait_cnt=%u\n", dev_state, adapter->fw_wait_cnt);
34ce3626 2900 netif_device_attach(adapter->netdev);
21854f02 2901 qlcnic_clr_all_drv_state(adapter, 0);
af19b491
AKS
2902}
2903
2904static void
2905qlcnic_detach_work(struct work_struct *work)
2906{
2907 struct qlcnic_adapter *adapter = container_of(work,
2908 struct qlcnic_adapter, fw_work.work);
2909 struct net_device *netdev = adapter->netdev;
2910 u32 status;
2911
2912 netif_device_detach(netdev);
2913
b8c17620
AKS
2914 /* Dont grab rtnl lock during Quiscent mode */
2915 if (adapter->dev_state == QLCNIC_DEV_NEED_QUISCENT) {
2916 if (netif_running(netdev))
2917 __qlcnic_down(adapter, netdev);
2918 } else
2919 qlcnic_down(adapter, netdev);
af19b491 2920
af19b491
AKS
2921 status = QLCRD32(adapter, QLCNIC_PEG_HALT_STATUS1);
2922
2923 if (status & QLCNIC_RCODE_FATAL_ERROR)
2924 goto err_ret;
2925
2926 if (adapter->temp == QLCNIC_TEMP_PANIC)
2927 goto err_ret;
602ca6f0
SV
2928 /* Dont ack if this instance is the reset owner */
2929 if (!(adapter->flags & QLCNIC_FW_RESET_OWNER)) {
2930 if (qlcnic_set_drv_state(adapter, adapter->dev_state))
2931 goto err_ret;
2932 }
af19b491
AKS
2933
2934 adapter->fw_wait_cnt = 0;
2935
2936 qlcnic_schedule_work(adapter, qlcnic_fwinit_work, FW_POLL_DELAY);
2937
2938 return;
2939
2940err_ret:
65b5b420
AKS
2941 dev_err(&adapter->pdev->dev, "detach failed; status=%d temp=%d\n",
2942 status, adapter->temp);
34ce3626 2943 netif_device_attach(netdev);
21854f02 2944 qlcnic_clr_all_drv_state(adapter, 1);
af19b491
AKS
2945}
2946
3c4b23b1
AKS
2947/*Transit NPAR state to NON Operational */
2948static void
2949qlcnic_set_npar_non_operational(struct qlcnic_adapter *adapter)
2950{
2951 u32 state;
2952
2953 state = QLCRD32(adapter, QLCNIC_CRB_DEV_NPAR_STATE);
2954 if (state == QLCNIC_DEV_NPAR_NON_OPER)
2955 return;
2956
2957 if (qlcnic_api_lock(adapter))
2958 return;
2959 QLCWR32(adapter, QLCNIC_CRB_DEV_NPAR_STATE, QLCNIC_DEV_NPAR_NON_OPER);
2960 qlcnic_api_unlock(adapter);
2961}
2962
f73dfc50 2963/*Transit to RESET state from READY state only */
18f2f616 2964void
af19b491
AKS
2965qlcnic_dev_request_reset(struct qlcnic_adapter *adapter)
2966{
2967 u32 state;
2968
cea8975e 2969 adapter->need_fw_reset = 1;
af19b491
AKS
2970 if (qlcnic_api_lock(adapter))
2971 return;
2972
2973 state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
2974
f73dfc50 2975 if (state == QLCNIC_DEV_READY) {
af19b491 2976 QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_NEED_RESET);
602ca6f0 2977 adapter->flags |= QLCNIC_FW_RESET_OWNER;
65b5b420 2978 QLCDB(adapter, DRV, "NEED_RESET state set\n");
6df900e9 2979 qlcnic_idc_debug_info(adapter, 0);
af19b491
AKS
2980 }
2981
3c4b23b1 2982 QLCWR32(adapter, QLCNIC_CRB_DEV_NPAR_STATE, QLCNIC_DEV_NPAR_NON_OPER);
af19b491
AKS
2983 qlcnic_api_unlock(adapter);
2984}
2985
9f26f547
AC
2986/* Transit to NPAR READY state from NPAR NOT READY state */
2987static void
2988qlcnic_dev_set_npar_ready(struct qlcnic_adapter *adapter)
2989{
9f26f547
AC
2990 if (qlcnic_api_lock(adapter))
2991 return;
2992
3c4b23b1
AKS
2993 QLCWR32(adapter, QLCNIC_CRB_DEV_NPAR_STATE, QLCNIC_DEV_NPAR_OPER);
2994 QLCDB(adapter, DRV, "NPAR operational state set\n");
9f26f547
AC
2995
2996 qlcnic_api_unlock(adapter);
2997}
2998
af19b491
AKS
2999static void
3000qlcnic_schedule_work(struct qlcnic_adapter *adapter,
3001 work_func_t func, int delay)
3002{
451724c8
SC
3003 if (test_bit(__QLCNIC_AER, &adapter->state))
3004 return;
3005
af19b491 3006 INIT_DELAYED_WORK(&adapter->fw_work, func);
f7ec804a
AKS
3007 queue_delayed_work(qlcnic_wq, &adapter->fw_work,
3008 round_jiffies_relative(delay));
af19b491
AKS
3009}
3010
3011static void
3012qlcnic_cancel_fw_work(struct qlcnic_adapter *adapter)
3013{
3014 while (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state))
3015 msleep(10);
3016
3017 cancel_delayed_work_sync(&adapter->fw_work);
3018}
3019
3020static void
3021qlcnic_attach_work(struct work_struct *work)
3022{
3023 struct qlcnic_adapter *adapter = container_of(work,
3024 struct qlcnic_adapter, fw_work.work);
3025 struct net_device *netdev = adapter->netdev;
b18971d1 3026 u32 npar_state;
af19b491 3027
b18971d1
AKS
3028 if (adapter->op_mode != QLCNIC_MGMT_FUNC) {
3029 npar_state = QLCRD32(adapter, QLCNIC_CRB_DEV_NPAR_STATE);
3030 if (adapter->fw_wait_cnt++ > QLCNIC_DEV_NPAR_OPER_TIMEO)
3031 qlcnic_clr_all_drv_state(adapter, 0);
3032 else if (npar_state != QLCNIC_DEV_NPAR_OPER)
3033 qlcnic_schedule_work(adapter, qlcnic_attach_work,
3034 FW_POLL_DELAY);
3035 else
3036 goto attach;
3037 QLCDB(adapter, DRV, "Waiting for NPAR state to operational\n");
3038 return;
3039 }
3040attach:
af19b491 3041 if (netif_running(netdev)) {
52486a3a 3042 if (qlcnic_up(adapter, netdev))
af19b491 3043 goto done;
af19b491 3044
aec1e845 3045 qlcnic_restore_indev_addr(netdev, NETDEV_UP);
af19b491
AKS
3046 }
3047
af19b491 3048done:
34ce3626 3049 netif_device_attach(netdev);
af19b491 3050 adapter->fw_fail_cnt = 0;
032a13c7 3051 adapter->flags &= ~QLCNIC_FW_HANG;
af19b491 3052 clear_bit(__QLCNIC_RESETTING, &adapter->state);
1b95a839
AKS
3053
3054 if (!qlcnic_clr_drv_state(adapter))
3055 qlcnic_schedule_work(adapter, qlcnic_fw_poll_work,
3056 FW_POLL_DELAY);
af19b491
AKS
3057}
3058
3059static int
3060qlcnic_check_health(struct qlcnic_adapter *adapter)
3061{
4e70812b 3062 u32 state = 0, heartbeat;
af19b491
AKS
3063 struct net_device *netdev = adapter->netdev;
3064
3065 if (qlcnic_check_temp(adapter))
3066 goto detach;
3067
2372a5f1 3068 if (adapter->need_fw_reset)
af19b491 3069 qlcnic_dev_request_reset(adapter);
af19b491
AKS
3070
3071 state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
b8c17620 3072 if (state == QLCNIC_DEV_NEED_RESET) {
3c4b23b1 3073 qlcnic_set_npar_non_operational(adapter);
af19b491 3074 adapter->need_fw_reset = 1;
b8c17620
AKS
3075 } else if (state == QLCNIC_DEV_NEED_QUISCENT)
3076 goto detach;
af19b491 3077
4e70812b
SC
3078 heartbeat = QLCRD32(adapter, QLCNIC_PEG_ALIVE_COUNTER);
3079 if (heartbeat != adapter->heartbeat) {
3080 adapter->heartbeat = heartbeat;
af19b491
AKS
3081 adapter->fw_fail_cnt = 0;
3082 if (adapter->need_fw_reset)
3083 goto detach;
68bf1c68 3084
9ce13ca8 3085 if (adapter->reset_context && auto_fw_reset) {
68bf1c68
AKS
3086 qlcnic_reset_hw_context(adapter);
3087 adapter->netdev->trans_start = jiffies;
3088 }
3089
af19b491
AKS
3090 return 0;
3091 }
3092
3093 if (++adapter->fw_fail_cnt < FW_FAIL_THRESH)
3094 return 0;
3095
032a13c7
SV
3096 adapter->flags |= QLCNIC_FW_HANG;
3097
af19b491
AKS
3098 qlcnic_dev_request_reset(adapter);
3099
9ce13ca8 3100 if (auto_fw_reset)
0df170b6 3101 clear_bit(__QLCNIC_FW_ATTACHED, &adapter->state);
af19b491
AKS
3102
3103 dev_info(&netdev->dev, "firmware hang detected\n");
3104
3105detach:
3106 adapter->dev_state = (state == QLCNIC_DEV_NEED_QUISCENT) ? state :
3107 QLCNIC_DEV_NEED_RESET;
3108
9ce13ca8 3109 if (auto_fw_reset &&
65b5b420
AKS
3110 !test_and_set_bit(__QLCNIC_RESETTING, &adapter->state)) {
3111
af19b491 3112 qlcnic_schedule_work(adapter, qlcnic_detach_work, 0);
65b5b420
AKS
3113 QLCDB(adapter, DRV, "fw recovery scheduled.\n");
3114 }
af19b491
AKS
3115
3116 return 1;
3117}
3118
3119static void
3120qlcnic_fw_poll_work(struct work_struct *work)
3121{
3122 struct qlcnic_adapter *adapter = container_of(work,
3123 struct qlcnic_adapter, fw_work.work);
3124
3125 if (test_bit(__QLCNIC_RESETTING, &adapter->state))
3126 goto reschedule;
3127
3128
3129 if (qlcnic_check_health(adapter))
3130 return;
3131
b5e5492c
AKS
3132 if (adapter->fhash.fnum)
3133 qlcnic_prune_lb_filters(adapter);
3134
af19b491
AKS
3135reschedule:
3136 qlcnic_schedule_work(adapter, qlcnic_fw_poll_work, FW_POLL_DELAY);
3137}
3138
451724c8
SC
3139static int qlcnic_is_first_func(struct pci_dev *pdev)
3140{
3141 struct pci_dev *oth_pdev;
3142 int val = pdev->devfn;
3143
3144 while (val-- > 0) {
3145 oth_pdev = pci_get_domain_bus_and_slot(pci_domain_nr
3146 (pdev->bus), pdev->bus->number,
3147 PCI_DEVFN(PCI_SLOT(pdev->devfn), val));
bfc978fa
AKS
3148 if (!oth_pdev)
3149 continue;
451724c8 3150
bfc978fa
AKS
3151 if (oth_pdev->current_state != PCI_D3cold) {
3152 pci_dev_put(oth_pdev);
451724c8 3153 return 0;
bfc978fa
AKS
3154 }
3155 pci_dev_put(oth_pdev);
451724c8
SC
3156 }
3157 return 1;
3158}
3159
3160static int qlcnic_attach_func(struct pci_dev *pdev)
3161{
3162 int err, first_func;
3163 struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
3164 struct net_device *netdev = adapter->netdev;
3165
3166 pdev->error_state = pci_channel_io_normal;
3167
3168 err = pci_enable_device(pdev);
3169 if (err)
3170 return err;
3171
3172 pci_set_power_state(pdev, PCI_D0);
3173 pci_set_master(pdev);
3174 pci_restore_state(pdev);
3175
3176 first_func = qlcnic_is_first_func(pdev);
3177
3178 if (qlcnic_api_lock(adapter))
3179 return -EINVAL;
3180
933fce12 3181 if (adapter->op_mode != QLCNIC_NON_PRIV_FUNC && first_func) {
451724c8
SC
3182 adapter->need_fw_reset = 1;
3183 set_bit(__QLCNIC_START_FW, &adapter->state);
3184 QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_INITIALIZING);
3185 QLCDB(adapter, DRV, "Restarting fw\n");
3186 }
3187 qlcnic_api_unlock(adapter);
3188
3189 err = adapter->nic_ops->start_firmware(adapter);
3190 if (err)
3191 return err;
3192
3193 qlcnic_clr_drv_state(adapter);
3194 qlcnic_setup_intr(adapter);
3195
3196 if (netif_running(netdev)) {
3197 err = qlcnic_attach(adapter);
3198 if (err) {
21854f02 3199 qlcnic_clr_all_drv_state(adapter, 1);
451724c8
SC
3200 clear_bit(__QLCNIC_AER, &adapter->state);
3201 netif_device_attach(netdev);
3202 return err;
3203 }
3204
3205 err = qlcnic_up(adapter, netdev);
3206 if (err)
3207 goto done;
3208
aec1e845 3209 qlcnic_restore_indev_addr(netdev, NETDEV_UP);
451724c8
SC
3210 }
3211 done:
3212 netif_device_attach(netdev);
3213 return err;
3214}
3215
3216static pci_ers_result_t qlcnic_io_error_detected(struct pci_dev *pdev,
3217 pci_channel_state_t state)
3218{
3219 struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
3220 struct net_device *netdev = adapter->netdev;
3221
3222 if (state == pci_channel_io_perm_failure)
3223 return PCI_ERS_RESULT_DISCONNECT;
3224
3225 if (state == pci_channel_io_normal)
3226 return PCI_ERS_RESULT_RECOVERED;
3227
3228 set_bit(__QLCNIC_AER, &adapter->state);
3229 netif_device_detach(netdev);
3230
3231 cancel_delayed_work_sync(&adapter->fw_work);
3232
3233 if (netif_running(netdev))
3234 qlcnic_down(adapter, netdev);
3235
3236 qlcnic_detach(adapter);
3237 qlcnic_teardown_intr(adapter);
3238
3239 clear_bit(__QLCNIC_RESETTING, &adapter->state);
3240
3241 pci_save_state(pdev);
3242 pci_disable_device(pdev);
3243
3244 return PCI_ERS_RESULT_NEED_RESET;
3245}
3246
3247static pci_ers_result_t qlcnic_io_slot_reset(struct pci_dev *pdev)
3248{
3249 return qlcnic_attach_func(pdev) ? PCI_ERS_RESULT_DISCONNECT :
3250 PCI_ERS_RESULT_RECOVERED;
3251}
3252
3253static void qlcnic_io_resume(struct pci_dev *pdev)
3254{
3255 struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
3256
3257 pci_cleanup_aer_uncorrect_error_status(pdev);
3258
3259 if (QLCRD32(adapter, QLCNIC_CRB_DEV_STATE) == QLCNIC_DEV_READY &&
3260 test_and_clear_bit(__QLCNIC_AER, &adapter->state))
3261 qlcnic_schedule_work(adapter, qlcnic_fw_poll_work,
3262 FW_POLL_DELAY);
3263}
3264
87eb743b
AC
3265static int
3266qlcnicvf_start_firmware(struct qlcnic_adapter *adapter)
3267{
3268 int err;
3269
3270 err = qlcnic_can_start_firmware(adapter);
3271 if (err)
3272 return err;
3273
78f84e1a
AKS
3274 err = qlcnic_check_npar_opertional(adapter);
3275 if (err)
3276 return err;
3c4b23b1 3277
174240a8
RB
3278 err = qlcnic_initialize_nic(adapter);
3279 if (err)
3280 return err;
3281
87eb743b
AC
3282 qlcnic_check_options(adapter);
3283
7373373d
RB
3284 err = qlcnic_set_eswitch_port_config(adapter);
3285 if (err)
3286 return err;
3287
87eb743b
AC
3288 adapter->need_fw_reset = 0;
3289
3290 return err;
3291}
3292
3293static int
3294qlcnicvf_config_bridged_mode(struct qlcnic_adapter *adapter, u32 enable)
3295{
3296 return -EOPNOTSUPP;
3297}
3298
3299static int
3300qlcnicvf_config_led(struct qlcnic_adapter *adapter, u32 state, u32 rate)
3301{
3302 return -EOPNOTSUPP;
3303}
3304
af19b491
AKS
3305static ssize_t
3306qlcnic_store_bridged_mode(struct device *dev,
3307 struct device_attribute *attr, const char *buf, size_t len)
3308{
3309 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3310 unsigned long new;
3311 int ret = -EINVAL;
3312
3313 if (!(adapter->capabilities & QLCNIC_FW_CAPABILITY_BDG))
3314 goto err_out;
3315
8a15ad1f 3316 if (!test_bit(__QLCNIC_DEV_UP, &adapter->state))
af19b491
AKS
3317 goto err_out;
3318
3319 if (strict_strtoul(buf, 2, &new))
3320 goto err_out;
3321
2e9d722d 3322 if (!adapter->nic_ops->config_bridged_mode(adapter, !!new))
af19b491
AKS
3323 ret = len;
3324
3325err_out:
3326 return ret;
3327}
3328
3329static ssize_t
3330qlcnic_show_bridged_mode(struct device *dev,
3331 struct device_attribute *attr, char *buf)
3332{
3333 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3334 int bridged_mode = 0;
3335
3336 if (adapter->capabilities & QLCNIC_FW_CAPABILITY_BDG)
3337 bridged_mode = !!(adapter->flags & QLCNIC_BRIDGE_ENABLED);
3338
3339 return sprintf(buf, "%d\n", bridged_mode);
3340}
3341
3342static struct device_attribute dev_attr_bridged_mode = {
3343 .attr = {.name = "bridged_mode", .mode = (S_IRUGO | S_IWUSR)},
3344 .show = qlcnic_show_bridged_mode,
3345 .store = qlcnic_store_bridged_mode,
3346};
3347
3348static ssize_t
3349qlcnic_store_diag_mode(struct device *dev,
3350 struct device_attribute *attr, const char *buf, size_t len)
3351{
3352 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3353 unsigned long new;
3354
3355 if (strict_strtoul(buf, 2, &new))
3356 return -EINVAL;
3357
3358 if (!!new != !!(adapter->flags & QLCNIC_DIAG_ENABLED))
3359 adapter->flags ^= QLCNIC_DIAG_ENABLED;
3360
3361 return len;
3362}
3363
3364static ssize_t
3365qlcnic_show_diag_mode(struct device *dev,
3366 struct device_attribute *attr, char *buf)
3367{
3368 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3369
3370 return sprintf(buf, "%d\n",
3371 !!(adapter->flags & QLCNIC_DIAG_ENABLED));
3372}
3373
3374static struct device_attribute dev_attr_diag_mode = {
3375 .attr = {.name = "diag_mode", .mode = (S_IRUGO | S_IWUSR)},
3376 .show = qlcnic_show_diag_mode,
3377 .store = qlcnic_store_diag_mode,
3378};
3379
f94bc1e7
SC
3380int qlcnic_validate_max_rss(struct net_device *netdev, u8 max_hw, u8 val)
3381{
3382 if (!use_msi_x && !use_msi) {
3383 netdev_info(netdev, "no msix or msi support, hence no rss\n");
3384 return -EINVAL;
3385 }
3386
3387 if ((val > max_hw) || (val < 2) || !is_power_of_2(val)) {
3388 netdev_info(netdev, "rss_ring valid range [2 - %x] in "
3389 " powers of 2\n", max_hw);
3390 return -EINVAL;
3391 }
3392 return 0;
3393
3394}
3395
3396int qlcnic_set_max_rss(struct qlcnic_adapter *adapter, u8 data)
3397{
3398 struct net_device *netdev = adapter->netdev;
3399 int err = 0;
3400
3401 if (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state))
3402 return -EBUSY;
3403
3404 netif_device_detach(netdev);
3405 if (netif_running(netdev))
3406 __qlcnic_down(adapter, netdev);
3407 qlcnic_detach(adapter);
3408 qlcnic_teardown_intr(adapter);
3409
3410 if (qlcnic_enable_msix(adapter, data)) {
3411 netdev_info(netdev, "failed setting max_rss; rss disabled\n");
3412 qlcnic_enable_msi_legacy(adapter);
3413 }
3414
3415 if (netif_running(netdev)) {
3416 err = qlcnic_attach(adapter);
3417 if (err)
3418 goto done;
3419 err = __qlcnic_up(adapter, netdev);
3420 if (err)
3421 goto done;
3422 qlcnic_restore_indev_addr(netdev, NETDEV_UP);
3423 }
3424 done:
3425 netif_device_attach(netdev);
3426 clear_bit(__QLCNIC_RESETTING, &adapter->state);
3427 return err;
3428}
3429
af19b491
AKS
3430static int
3431qlcnic_sysfs_validate_crb(struct qlcnic_adapter *adapter,
3432 loff_t offset, size_t size)
3433{
897e8c7c
DP
3434 size_t crb_size = 4;
3435
af19b491
AKS
3436 if (!(adapter->flags & QLCNIC_DIAG_ENABLED))
3437 return -EIO;
3438
897e8c7c
DP
3439 if (offset < QLCNIC_PCI_CRBSPACE) {
3440 if (ADDR_IN_RANGE(offset, QLCNIC_PCI_CAMQM,
3441 QLCNIC_PCI_CAMQM_END))
3442 crb_size = 8;
3443 else
3444 return -EINVAL;
3445 }
af19b491 3446
897e8c7c
DP
3447 if ((size != crb_size) || (offset & (crb_size-1)))
3448 return -EINVAL;
af19b491
AKS
3449
3450 return 0;
3451}
3452
3453static ssize_t
2c3c8bea
CW
3454qlcnic_sysfs_read_crb(struct file *filp, struct kobject *kobj,
3455 struct bin_attribute *attr,
af19b491
AKS
3456 char *buf, loff_t offset, size_t size)
3457{
3458 struct device *dev = container_of(kobj, struct device, kobj);
3459 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3460 u32 data;
897e8c7c 3461 u64 qmdata;
af19b491
AKS
3462 int ret;
3463
3464 ret = qlcnic_sysfs_validate_crb(adapter, offset, size);
3465 if (ret != 0)
3466 return ret;
3467
897e8c7c
DP
3468 if (ADDR_IN_RANGE(offset, QLCNIC_PCI_CAMQM, QLCNIC_PCI_CAMQM_END)) {
3469 qlcnic_pci_camqm_read_2M(adapter, offset, &qmdata);
3470 memcpy(buf, &qmdata, size);
3471 } else {
3472 data = QLCRD32(adapter, offset);
3473 memcpy(buf, &data, size);
3474 }
af19b491
AKS
3475 return size;
3476}
3477
3478static ssize_t
2c3c8bea
CW
3479qlcnic_sysfs_write_crb(struct file *filp, struct kobject *kobj,
3480 struct bin_attribute *attr,
af19b491
AKS
3481 char *buf, loff_t offset, size_t size)
3482{
3483 struct device *dev = container_of(kobj, struct device, kobj);
3484 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3485 u32 data;
897e8c7c 3486 u64 qmdata;
af19b491
AKS
3487 int ret;
3488
3489 ret = qlcnic_sysfs_validate_crb(adapter, offset, size);
3490 if (ret != 0)
3491 return ret;
3492
897e8c7c
DP
3493 if (ADDR_IN_RANGE(offset, QLCNIC_PCI_CAMQM, QLCNIC_PCI_CAMQM_END)) {
3494 memcpy(&qmdata, buf, size);
3495 qlcnic_pci_camqm_write_2M(adapter, offset, qmdata);
3496 } else {
3497 memcpy(&data, buf, size);
3498 QLCWR32(adapter, offset, data);
3499 }
af19b491
AKS
3500 return size;
3501}
3502
3503static int
3504qlcnic_sysfs_validate_mem(struct qlcnic_adapter *adapter,
3505 loff_t offset, size_t size)
3506{
3507 if (!(adapter->flags & QLCNIC_DIAG_ENABLED))
3508 return -EIO;
3509
3510 if ((size != 8) || (offset & 0x7))
3511 return -EIO;
3512
3513 return 0;
3514}
3515
3516static ssize_t
2c3c8bea
CW
3517qlcnic_sysfs_read_mem(struct file *filp, struct kobject *kobj,
3518 struct bin_attribute *attr,
af19b491
AKS
3519 char *buf, loff_t offset, size_t size)
3520{
3521 struct device *dev = container_of(kobj, struct device, kobj);
3522 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3523 u64 data;
3524 int ret;
3525
3526 ret = qlcnic_sysfs_validate_mem(adapter, offset, size);
3527 if (ret != 0)
3528 return ret;
3529
3530 if (qlcnic_pci_mem_read_2M(adapter, offset, &data))
3531 return -EIO;
3532
3533 memcpy(buf, &data, size);
3534
3535 return size;
3536}
3537
3538static ssize_t
2c3c8bea
CW
3539qlcnic_sysfs_write_mem(struct file *filp, struct kobject *kobj,
3540 struct bin_attribute *attr,
af19b491
AKS
3541 char *buf, loff_t offset, size_t size)
3542{
3543 struct device *dev = container_of(kobj, struct device, kobj);
3544 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3545 u64 data;
3546 int ret;
3547
3548 ret = qlcnic_sysfs_validate_mem(adapter, offset, size);
3549 if (ret != 0)
3550 return ret;
3551
3552 memcpy(&data, buf, size);
3553
3554 if (qlcnic_pci_mem_write_2M(adapter, offset, data))
3555 return -EIO;
3556
3557 return size;
3558}
3559
af19b491
AKS
3560static struct bin_attribute bin_attr_crb = {
3561 .attr = {.name = "crb", .mode = (S_IRUGO | S_IWUSR)},
3562 .size = 0,
3563 .read = qlcnic_sysfs_read_crb,
3564 .write = qlcnic_sysfs_write_crb,
3565};
3566
3567static struct bin_attribute bin_attr_mem = {
3568 .attr = {.name = "mem", .mode = (S_IRUGO | S_IWUSR)},
3569 .size = 0,
3570 .read = qlcnic_sysfs_read_mem,
3571 .write = qlcnic_sysfs_write_mem,
3572};
3573
cea8975e 3574static int
346fe763
RB
3575validate_pm_config(struct qlcnic_adapter *adapter,
3576 struct qlcnic_pm_func_cfg *pm_cfg, int count)
3577{
3578
3579 u8 src_pci_func, s_esw_id, d_esw_id;
3580 u8 dest_pci_func;
3581 int i;
3582
3583 for (i = 0; i < count; i++) {
3584 src_pci_func = pm_cfg[i].pci_func;
3585 dest_pci_func = pm_cfg[i].dest_npar;
3586 if (src_pci_func >= QLCNIC_MAX_PCI_FUNC
3587 || dest_pci_func >= QLCNIC_MAX_PCI_FUNC)
3588 return QL_STATUS_INVALID_PARAM;
3589
3590 if (adapter->npars[src_pci_func].type != QLCNIC_TYPE_NIC)
3591 return QL_STATUS_INVALID_PARAM;
3592
3593 if (adapter->npars[dest_pci_func].type != QLCNIC_TYPE_NIC)
3594 return QL_STATUS_INVALID_PARAM;
3595
346fe763
RB
3596 s_esw_id = adapter->npars[src_pci_func].phy_port;
3597 d_esw_id = adapter->npars[dest_pci_func].phy_port;
3598
3599 if (s_esw_id != d_esw_id)
3600 return QL_STATUS_INVALID_PARAM;
3601
3602 }
3603 return 0;
3604
3605}
3606
3607static ssize_t
3608qlcnic_sysfs_write_pm_config(struct file *filp, struct kobject *kobj,
3609 struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
3610{
3611 struct device *dev = container_of(kobj, struct device, kobj);
3612 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3613 struct qlcnic_pm_func_cfg *pm_cfg;
3614 u32 id, action, pci_func;
3615 int count, rem, i, ret;
3616
3617 count = size / sizeof(struct qlcnic_pm_func_cfg);
3618 rem = size % sizeof(struct qlcnic_pm_func_cfg);
3619 if (rem)
3620 return QL_STATUS_INVALID_PARAM;
3621
3622 pm_cfg = (struct qlcnic_pm_func_cfg *) buf;
3623
3624 ret = validate_pm_config(adapter, pm_cfg, count);
3625 if (ret)
3626 return ret;
3627 for (i = 0; i < count; i++) {
3628 pci_func = pm_cfg[i].pci_func;
4e8acb01 3629 action = !!pm_cfg[i].action;
346fe763
RB
3630 id = adapter->npars[pci_func].phy_port;
3631 ret = qlcnic_config_port_mirroring(adapter, id,
3632 action, pci_func);
3633 if (ret)
3634 return ret;
3635 }
3636
3637 for (i = 0; i < count; i++) {
3638 pci_func = pm_cfg[i].pci_func;
3639 id = adapter->npars[pci_func].phy_port;
4e8acb01 3640 adapter->npars[pci_func].enable_pm = !!pm_cfg[i].action;
346fe763
RB
3641 adapter->npars[pci_func].dest_npar = id;
3642 }
3643 return size;
3644}
3645
3646static ssize_t
3647qlcnic_sysfs_read_pm_config(struct file *filp, struct kobject *kobj,
3648 struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
3649{
3650 struct device *dev = container_of(kobj, struct device, kobj);
3651 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3652 struct qlcnic_pm_func_cfg pm_cfg[QLCNIC_MAX_PCI_FUNC];
3653 int i;
3654
3655 if (size != sizeof(pm_cfg))
3656 return QL_STATUS_INVALID_PARAM;
3657
3658 for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) {
3659 if (adapter->npars[i].type != QLCNIC_TYPE_NIC)
3660 continue;
3661 pm_cfg[i].action = adapter->npars[i].enable_pm;
3662 pm_cfg[i].dest_npar = 0;
3663 pm_cfg[i].pci_func = i;
3664 }
3665 memcpy(buf, &pm_cfg, size);
3666
3667 return size;
3668}
3669
cea8975e 3670static int
346fe763 3671validate_esw_config(struct qlcnic_adapter *adapter,
4e8acb01 3672 struct qlcnic_esw_func_cfg *esw_cfg, int count)
346fe763 3673{
7613c87b 3674 u32 op_mode;
346fe763
RB
3675 u8 pci_func;
3676 int i;
7613c87b 3677
b1fc6d3c 3678 op_mode = readl(adapter->ahw->pci_base0 + QLCNIC_DRV_OP_MODE);
7613c87b 3679
346fe763
RB
3680 for (i = 0; i < count; i++) {
3681 pci_func = esw_cfg[i].pci_func;
3682 if (pci_func >= QLCNIC_MAX_PCI_FUNC)
3683 return QL_STATUS_INVALID_PARAM;
3684
4e8acb01
RB
3685 if (adapter->op_mode == QLCNIC_MGMT_FUNC)
3686 if (adapter->npars[pci_func].type != QLCNIC_TYPE_NIC)
3687 return QL_STATUS_INVALID_PARAM;
346fe763 3688
4e8acb01
RB
3689 switch (esw_cfg[i].op_mode) {
3690 case QLCNIC_PORT_DEFAULTS:
7613c87b 3691 if (QLC_DEV_GET_DRV(op_mode, pci_func) !=
7373373d 3692 QLCNIC_NON_PRIV_FUNC) {
091056b2
AKS
3693 if (esw_cfg[i].mac_anti_spoof != 0)
3694 return QL_STATUS_INVALID_PARAM;
3695 if (esw_cfg[i].mac_override != 1)
3696 return QL_STATUS_INVALID_PARAM;
3697 if (esw_cfg[i].promisc_mode != 1)
3698 return QL_STATUS_INVALID_PARAM;
7373373d 3699 }
4e8acb01
RB
3700 break;
3701 case QLCNIC_ADD_VLAN:
346fe763
RB
3702 if (!IS_VALID_VLAN(esw_cfg[i].vlan_id))
3703 return QL_STATUS_INVALID_PARAM;
4e8acb01
RB
3704 if (!esw_cfg[i].op_type)
3705 return QL_STATUS_INVALID_PARAM;
3706 break;
3707 case QLCNIC_DEL_VLAN:
4e8acb01
RB
3708 if (!esw_cfg[i].op_type)
3709 return QL_STATUS_INVALID_PARAM;
3710 break;
3711 default:
346fe763 3712 return QL_STATUS_INVALID_PARAM;
4e8acb01 3713 }
346fe763 3714 }
346fe763
RB
3715 return 0;
3716}
3717
3718static ssize_t
3719qlcnic_sysfs_write_esw_config(struct file *file, struct kobject *kobj,
3720 struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
3721{
3722 struct device *dev = container_of(kobj, struct device, kobj);
3723 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3724 struct qlcnic_esw_func_cfg *esw_cfg;
4e8acb01 3725 struct qlcnic_npar_info *npar;
346fe763 3726 int count, rem, i, ret;
0325d69b 3727 u8 pci_func, op_mode = 0;
346fe763
RB
3728
3729 count = size / sizeof(struct qlcnic_esw_func_cfg);
3730 rem = size % sizeof(struct qlcnic_esw_func_cfg);
3731 if (rem)
3732 return QL_STATUS_INVALID_PARAM;
3733
3734 esw_cfg = (struct qlcnic_esw_func_cfg *) buf;
3735 ret = validate_esw_config(adapter, esw_cfg, count);
3736 if (ret)
3737 return ret;
3738
3739 for (i = 0; i < count; i++) {
0325d69b
RB
3740 if (adapter->op_mode == QLCNIC_MGMT_FUNC)
3741 if (qlcnic_config_switch_port(adapter, &esw_cfg[i]))
3742 return QL_STATUS_INVALID_PARAM;
e9a47700 3743
b1fc6d3c 3744 if (adapter->ahw->pci_func != esw_cfg[i].pci_func)
e9a47700
RB
3745 continue;
3746
3747 op_mode = esw_cfg[i].op_mode;
3748 qlcnic_get_eswitch_port_config(adapter, &esw_cfg[i]);
3749 esw_cfg[i].op_mode = op_mode;
b1fc6d3c 3750 esw_cfg[i].pci_func = adapter->ahw->pci_func;
e9a47700
RB
3751
3752 switch (esw_cfg[i].op_mode) {
3753 case QLCNIC_PORT_DEFAULTS:
3754 qlcnic_set_eswitch_port_features(adapter, &esw_cfg[i]);
3755 break;
8cf61f89
AKS
3756 case QLCNIC_ADD_VLAN:
3757 qlcnic_set_vlan_config(adapter, &esw_cfg[i]);
3758 break;
3759 case QLCNIC_DEL_VLAN:
3760 esw_cfg[i].vlan_id = 0;
3761 qlcnic_set_vlan_config(adapter, &esw_cfg[i]);
3762 break;
0325d69b 3763 }
346fe763
RB
3764 }
3765
0325d69b
RB
3766 if (adapter->op_mode != QLCNIC_MGMT_FUNC)
3767 goto out;
e9a47700 3768
346fe763
RB
3769 for (i = 0; i < count; i++) {
3770 pci_func = esw_cfg[i].pci_func;
4e8acb01
RB
3771 npar = &adapter->npars[pci_func];
3772 switch (esw_cfg[i].op_mode) {
3773 case QLCNIC_PORT_DEFAULTS:
3774 npar->promisc_mode = esw_cfg[i].promisc_mode;
7373373d 3775 npar->mac_override = esw_cfg[i].mac_override;
4e8acb01
RB
3776 npar->offload_flags = esw_cfg[i].offload_flags;
3777 npar->mac_anti_spoof = esw_cfg[i].mac_anti_spoof;
3778 npar->discard_tagged = esw_cfg[i].discard_tagged;
3779 break;
3780 case QLCNIC_ADD_VLAN:
3781 npar->pvid = esw_cfg[i].vlan_id;
3782 break;
3783 case QLCNIC_DEL_VLAN:
3784 npar->pvid = 0;
3785 break;
3786 }
346fe763 3787 }
0325d69b 3788out:
346fe763
RB
3789 return size;
3790}
3791
3792static ssize_t
3793qlcnic_sysfs_read_esw_config(struct file *file, struct kobject *kobj,
3794 struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
3795{
3796 struct device *dev = container_of(kobj, struct device, kobj);
3797 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3798 struct qlcnic_esw_func_cfg esw_cfg[QLCNIC_MAX_PCI_FUNC];
4e8acb01 3799 u8 i;
346fe763
RB
3800
3801 if (size != sizeof(esw_cfg))
3802 return QL_STATUS_INVALID_PARAM;
3803
3804 for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) {
3805 if (adapter->npars[i].type != QLCNIC_TYPE_NIC)
3806 continue;
4e8acb01
RB
3807 esw_cfg[i].pci_func = i;
3808 if (qlcnic_get_eswitch_port_config(adapter, &esw_cfg[i]))
3809 return QL_STATUS_INVALID_PARAM;
346fe763
RB
3810 }
3811 memcpy(buf, &esw_cfg, size);
3812
3813 return size;
3814}
3815
cea8975e 3816static int
346fe763
RB
3817validate_npar_config(struct qlcnic_adapter *adapter,
3818 struct qlcnic_npar_func_cfg *np_cfg, int count)
3819{
3820 u8 pci_func, i;
3821
3822 for (i = 0; i < count; i++) {
3823 pci_func = np_cfg[i].pci_func;
3824 if (pci_func >= QLCNIC_MAX_PCI_FUNC)
3825 return QL_STATUS_INVALID_PARAM;
3826
3827 if (adapter->npars[pci_func].type != QLCNIC_TYPE_NIC)
3828 return QL_STATUS_INVALID_PARAM;
3829
d12b0d9a
RB
3830 if (!IS_VALID_BW(np_cfg[i].min_bw) ||
3831 !IS_VALID_BW(np_cfg[i].max_bw))
346fe763
RB
3832 return QL_STATUS_INVALID_PARAM;
3833 }
3834 return 0;
3835}
3836
3837static ssize_t
3838qlcnic_sysfs_write_npar_config(struct file *file, struct kobject *kobj,
3839 struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
3840{
3841 struct device *dev = container_of(kobj, struct device, kobj);
3842 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3843 struct qlcnic_info nic_info;
3844 struct qlcnic_npar_func_cfg *np_cfg;
3845 int i, count, rem, ret;
3846 u8 pci_func;
3847
3848 count = size / sizeof(struct qlcnic_npar_func_cfg);
3849 rem = size % sizeof(struct qlcnic_npar_func_cfg);
3850 if (rem)
3851 return QL_STATUS_INVALID_PARAM;
3852
3853 np_cfg = (struct qlcnic_npar_func_cfg *) buf;
3854 ret = validate_npar_config(adapter, np_cfg, count);
3855 if (ret)
3856 return ret;
3857
3858 for (i = 0; i < count ; i++) {
3859 pci_func = np_cfg[i].pci_func;
3860 ret = qlcnic_get_nic_info(adapter, &nic_info, pci_func);
3861 if (ret)
3862 return ret;
3863 nic_info.pci_func = pci_func;
3864 nic_info.min_tx_bw = np_cfg[i].min_bw;
3865 nic_info.max_tx_bw = np_cfg[i].max_bw;
3866 ret = qlcnic_set_nic_info(adapter, &nic_info);
3867 if (ret)
3868 return ret;
cea8975e
AC
3869 adapter->npars[i].min_bw = nic_info.min_tx_bw;
3870 adapter->npars[i].max_bw = nic_info.max_tx_bw;
346fe763
RB
3871 }
3872
3873 return size;
3874
3875}
3876static ssize_t
3877qlcnic_sysfs_read_npar_config(struct file *file, struct kobject *kobj,
3878 struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
3879{
3880 struct device *dev = container_of(kobj, struct device, kobj);
3881 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3882 struct qlcnic_info nic_info;
3883 struct qlcnic_npar_func_cfg np_cfg[QLCNIC_MAX_PCI_FUNC];
3884 int i, ret;
3885
3886 if (size != sizeof(np_cfg))
3887 return QL_STATUS_INVALID_PARAM;
3888
3889 for (i = 0; i < QLCNIC_MAX_PCI_FUNC ; i++) {
3890 if (adapter->npars[i].type != QLCNIC_TYPE_NIC)
3891 continue;
3892 ret = qlcnic_get_nic_info(adapter, &nic_info, i);
3893 if (ret)
3894 return ret;
3895
3896 np_cfg[i].pci_func = i;
a1c0c459 3897 np_cfg[i].op_mode = (u8)nic_info.op_mode;
346fe763
RB
3898 np_cfg[i].port_num = nic_info.phys_port;
3899 np_cfg[i].fw_capab = nic_info.capabilities;
3900 np_cfg[i].min_bw = nic_info.min_tx_bw ;
3901 np_cfg[i].max_bw = nic_info.max_tx_bw;
3902 np_cfg[i].max_tx_queues = nic_info.max_tx_ques;
3903 np_cfg[i].max_rx_queues = nic_info.max_rx_ques;
3904 }
3905 memcpy(buf, &np_cfg, size);
3906 return size;
3907}
3908
b6021212
AKS
3909static ssize_t
3910qlcnic_sysfs_get_port_stats(struct file *file, struct kobject *kobj,
3911 struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
3912{
3913 struct device *dev = container_of(kobj, struct device, kobj);
3914 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3915 struct qlcnic_esw_statistics port_stats;
3916 int ret;
3917
3918 if (size != sizeof(struct qlcnic_esw_statistics))
3919 return QL_STATUS_INVALID_PARAM;
3920
3921 if (offset >= QLCNIC_MAX_PCI_FUNC)
3922 return QL_STATUS_INVALID_PARAM;
3923
3924 memset(&port_stats, 0, size);
3925 ret = qlcnic_get_port_stats(adapter, offset, QLCNIC_QUERY_RX_COUNTER,
3926 &port_stats.rx);
3927 if (ret)
3928 return ret;
3929
3930 ret = qlcnic_get_port_stats(adapter, offset, QLCNIC_QUERY_TX_COUNTER,
3931 &port_stats.tx);
3932 if (ret)
3933 return ret;
3934
3935 memcpy(buf, &port_stats, size);
3936 return size;
3937}
3938
3939static ssize_t
3940qlcnic_sysfs_get_esw_stats(struct file *file, struct kobject *kobj,
3941 struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
3942{
3943 struct device *dev = container_of(kobj, struct device, kobj);
3944 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3945 struct qlcnic_esw_statistics esw_stats;
3946 int ret;
3947
3948 if (size != sizeof(struct qlcnic_esw_statistics))
3949 return QL_STATUS_INVALID_PARAM;
3950
3951 if (offset >= QLCNIC_NIU_MAX_XG_PORTS)
3952 return QL_STATUS_INVALID_PARAM;
3953
3954 memset(&esw_stats, 0, size);
3955 ret = qlcnic_get_eswitch_stats(adapter, offset, QLCNIC_QUERY_RX_COUNTER,
3956 &esw_stats.rx);
3957 if (ret)
3958 return ret;
3959
3960 ret = qlcnic_get_eswitch_stats(adapter, offset, QLCNIC_QUERY_TX_COUNTER,
3961 &esw_stats.tx);
3962 if (ret)
3963 return ret;
3964
3965 memcpy(buf, &esw_stats, size);
3966 return size;
3967}
3968
3969static ssize_t
3970qlcnic_sysfs_clear_esw_stats(struct file *file, struct kobject *kobj,
3971 struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
3972{
3973 struct device *dev = container_of(kobj, struct device, kobj);
3974 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3975 int ret;
3976
3977 if (offset >= QLCNIC_NIU_MAX_XG_PORTS)
3978 return QL_STATUS_INVALID_PARAM;
3979
3980 ret = qlcnic_clear_esw_stats(adapter, QLCNIC_STATS_ESWITCH, offset,
3981 QLCNIC_QUERY_RX_COUNTER);
3982 if (ret)
3983 return ret;
3984
3985 ret = qlcnic_clear_esw_stats(adapter, QLCNIC_STATS_ESWITCH, offset,
3986 QLCNIC_QUERY_TX_COUNTER);
3987 if (ret)
3988 return ret;
3989
3990 return size;
3991}
3992
3993static ssize_t
3994qlcnic_sysfs_clear_port_stats(struct file *file, struct kobject *kobj,
3995 struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
3996{
3997
3998 struct device *dev = container_of(kobj, struct device, kobj);
3999 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
4000 int ret;
4001
4002 if (offset >= QLCNIC_MAX_PCI_FUNC)
4003 return QL_STATUS_INVALID_PARAM;
4004
4005 ret = qlcnic_clear_esw_stats(adapter, QLCNIC_STATS_PORT, offset,
4006 QLCNIC_QUERY_RX_COUNTER);
4007 if (ret)
4008 return ret;
4009
4010 ret = qlcnic_clear_esw_stats(adapter, QLCNIC_STATS_PORT, offset,
4011 QLCNIC_QUERY_TX_COUNTER);
4012 if (ret)
4013 return ret;
4014
4015 return size;
4016}
4017
346fe763
RB
4018static ssize_t
4019qlcnic_sysfs_read_pci_config(struct file *file, struct kobject *kobj,
4020 struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
4021{
4022 struct device *dev = container_of(kobj, struct device, kobj);
4023 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
4024 struct qlcnic_pci_func_cfg pci_cfg[QLCNIC_MAX_PCI_FUNC];
e88db3bd 4025 struct qlcnic_pci_info *pci_info;
346fe763
RB
4026 int i, ret;
4027
4028 if (size != sizeof(pci_cfg))
4029 return QL_STATUS_INVALID_PARAM;
4030
e88db3bd
DC
4031 pci_info = kcalloc(QLCNIC_MAX_PCI_FUNC, sizeof(*pci_info), GFP_KERNEL);
4032 if (!pci_info)
4033 return -ENOMEM;
4034
346fe763 4035 ret = qlcnic_get_pci_info(adapter, pci_info);
e88db3bd
DC
4036 if (ret) {
4037 kfree(pci_info);
346fe763 4038 return ret;
e88db3bd 4039 }
346fe763
RB
4040
4041 for (i = 0; i < QLCNIC_MAX_PCI_FUNC ; i++) {
4042 pci_cfg[i].pci_func = pci_info[i].id;
4043 pci_cfg[i].func_type = pci_info[i].type;
4044 pci_cfg[i].port_num = pci_info[i].default_port;
4045 pci_cfg[i].min_bw = pci_info[i].tx_min_bw;
4046 pci_cfg[i].max_bw = pci_info[i].tx_max_bw;
4047 memcpy(&pci_cfg[i].def_mac_addr, &pci_info[i].mac, ETH_ALEN);
4048 }
4049 memcpy(buf, &pci_cfg, size);
e88db3bd 4050 kfree(pci_info);
346fe763 4051 return size;
346fe763
RB
4052}
4053static struct bin_attribute bin_attr_npar_config = {
4054 .attr = {.name = "npar_config", .mode = (S_IRUGO | S_IWUSR)},
4055 .size = 0,
4056 .read = qlcnic_sysfs_read_npar_config,
4057 .write = qlcnic_sysfs_write_npar_config,
4058};
4059
4060static struct bin_attribute bin_attr_pci_config = {
4061 .attr = {.name = "pci_config", .mode = (S_IRUGO | S_IWUSR)},
4062 .size = 0,
4063 .read = qlcnic_sysfs_read_pci_config,
4064 .write = NULL,
4065};
4066
b6021212
AKS
4067static struct bin_attribute bin_attr_port_stats = {
4068 .attr = {.name = "port_stats", .mode = (S_IRUGO | S_IWUSR)},
4069 .size = 0,
4070 .read = qlcnic_sysfs_get_port_stats,
4071 .write = qlcnic_sysfs_clear_port_stats,
4072};
4073
4074static struct bin_attribute bin_attr_esw_stats = {
4075 .attr = {.name = "esw_stats", .mode = (S_IRUGO | S_IWUSR)},
4076 .size = 0,
4077 .read = qlcnic_sysfs_get_esw_stats,
4078 .write = qlcnic_sysfs_clear_esw_stats,
4079};
4080
346fe763
RB
4081static struct bin_attribute bin_attr_esw_config = {
4082 .attr = {.name = "esw_config", .mode = (S_IRUGO | S_IWUSR)},
4083 .size = 0,
4084 .read = qlcnic_sysfs_read_esw_config,
4085 .write = qlcnic_sysfs_write_esw_config,
4086};
4087
4088static struct bin_attribute bin_attr_pm_config = {
4089 .attr = {.name = "pm_config", .mode = (S_IRUGO | S_IWUSR)},
4090 .size = 0,
4091 .read = qlcnic_sysfs_read_pm_config,
4092 .write = qlcnic_sysfs_write_pm_config,
4093};
4094
af19b491
AKS
4095static void
4096qlcnic_create_sysfs_entries(struct qlcnic_adapter *adapter)
4097{
4098 struct device *dev = &adapter->pdev->dev;
4099
4100 if (adapter->capabilities & QLCNIC_FW_CAPABILITY_BDG)
4101 if (device_create_file(dev, &dev_attr_bridged_mode))
4102 dev_warn(dev,
4103 "failed to create bridged_mode sysfs entry\n");
4104}
4105
4106static void
4107qlcnic_remove_sysfs_entries(struct qlcnic_adapter *adapter)
4108{
4109 struct device *dev = &adapter->pdev->dev;
4110
4111 if (adapter->capabilities & QLCNIC_FW_CAPABILITY_BDG)
4112 device_remove_file(dev, &dev_attr_bridged_mode);
4113}
4114
4115static void
4116qlcnic_create_diag_entries(struct qlcnic_adapter *adapter)
4117{
4118 struct device *dev = &adapter->pdev->dev;
4119
b6021212
AKS
4120 if (device_create_bin_file(dev, &bin_attr_port_stats))
4121 dev_info(dev, "failed to create port stats sysfs entry");
4122
132ff00a
AC
4123 if (adapter->op_mode == QLCNIC_NON_PRIV_FUNC)
4124 return;
af19b491
AKS
4125 if (device_create_file(dev, &dev_attr_diag_mode))
4126 dev_info(dev, "failed to create diag_mode sysfs entry\n");
4127 if (device_create_bin_file(dev, &bin_attr_crb))
4128 dev_info(dev, "failed to create crb sysfs entry\n");
4129 if (device_create_bin_file(dev, &bin_attr_mem))
4130 dev_info(dev, "failed to create mem sysfs entry\n");
53478fef
SC
4131 if (device_create_bin_file(dev, &bin_attr_pci_config))
4132 dev_info(dev, "failed to create pci config sysfs entry");
4e8acb01
RB
4133 if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED))
4134 return;
4135 if (device_create_bin_file(dev, &bin_attr_esw_config))
4136 dev_info(dev, "failed to create esw config sysfs entry");
4137 if (adapter->op_mode != QLCNIC_MGMT_FUNC)
346fe763 4138 return;
346fe763
RB
4139 if (device_create_bin_file(dev, &bin_attr_npar_config))
4140 dev_info(dev, "failed to create npar config sysfs entry");
346fe763
RB
4141 if (device_create_bin_file(dev, &bin_attr_pm_config))
4142 dev_info(dev, "failed to create pm config sysfs entry");
b6021212
AKS
4143 if (device_create_bin_file(dev, &bin_attr_esw_stats))
4144 dev_info(dev, "failed to create eswitch stats sysfs entry");
af19b491
AKS
4145}
4146
af19b491
AKS
4147static void
4148qlcnic_remove_diag_entries(struct qlcnic_adapter *adapter)
4149{
4150 struct device *dev = &adapter->pdev->dev;
4151
b6021212
AKS
4152 device_remove_bin_file(dev, &bin_attr_port_stats);
4153
132ff00a
AC
4154 if (adapter->op_mode == QLCNIC_NON_PRIV_FUNC)
4155 return;
af19b491
AKS
4156 device_remove_file(dev, &dev_attr_diag_mode);
4157 device_remove_bin_file(dev, &bin_attr_crb);
4158 device_remove_bin_file(dev, &bin_attr_mem);
53478fef 4159 device_remove_bin_file(dev, &bin_attr_pci_config);
4e8acb01
RB
4160 if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED))
4161 return;
4162 device_remove_bin_file(dev, &bin_attr_esw_config);
4163 if (adapter->op_mode != QLCNIC_MGMT_FUNC)
346fe763 4164 return;
346fe763 4165 device_remove_bin_file(dev, &bin_attr_npar_config);
346fe763 4166 device_remove_bin_file(dev, &bin_attr_pm_config);
b6021212 4167 device_remove_bin_file(dev, &bin_attr_esw_stats);
af19b491
AKS
4168}
4169
4170#ifdef CONFIG_INET
4171
4172#define is_qlcnic_netdev(dev) (dev->netdev_ops == &qlcnic_netdev_ops)
4173
af19b491 4174static void
aec1e845
AKS
4175qlcnic_config_indev_addr(struct qlcnic_adapter *adapter,
4176 struct net_device *dev, unsigned long event)
af19b491
AKS
4177{
4178 struct in_device *indev;
af19b491 4179
af19b491
AKS
4180 indev = in_dev_get(dev);
4181 if (!indev)
4182 return;
4183
4184 for_ifa(indev) {
4185 switch (event) {
4186 case NETDEV_UP:
4187 qlcnic_config_ipaddr(adapter,
4188 ifa->ifa_address, QLCNIC_IP_UP);
4189 break;
4190 case NETDEV_DOWN:
4191 qlcnic_config_ipaddr(adapter,
4192 ifa->ifa_address, QLCNIC_IP_DOWN);
4193 break;
4194 default:
4195 break;
4196 }
4197 } endfor_ifa(indev);
4198
4199 in_dev_put(indev);
af19b491
AKS
4200}
4201
aec1e845
AKS
4202static void
4203qlcnic_restore_indev_addr(struct net_device *netdev, unsigned long event)
4204{
4205 struct qlcnic_adapter *adapter = netdev_priv(netdev);
4206 struct net_device *dev;
4207 u16 vid;
4208
4209 qlcnic_config_indev_addr(adapter, netdev, event);
4210
b9796a14 4211 for_each_set_bit(vid, adapter->vlans, VLAN_N_VID) {
223bb15e 4212 dev = __vlan_find_dev_deep(netdev, vid);
aec1e845
AKS
4213 if (!dev)
4214 continue;
aec1e845
AKS
4215 qlcnic_config_indev_addr(adapter, dev, event);
4216 }
4217}
4218
af19b491
AKS
4219static int qlcnic_netdev_event(struct notifier_block *this,
4220 unsigned long event, void *ptr)
4221{
4222 struct qlcnic_adapter *adapter;
4223 struct net_device *dev = (struct net_device *)ptr;
4224
4225recheck:
4226 if (dev == NULL)
4227 goto done;
4228
4229 if (dev->priv_flags & IFF_802_1Q_VLAN) {
4230 dev = vlan_dev_real_dev(dev);
4231 goto recheck;
4232 }
4233
4234 if (!is_qlcnic_netdev(dev))
4235 goto done;
4236
4237 adapter = netdev_priv(dev);
4238
4239 if (!adapter)
4240 goto done;
4241
8a15ad1f 4242 if (!test_bit(__QLCNIC_DEV_UP, &adapter->state))
af19b491
AKS
4243 goto done;
4244
aec1e845 4245 qlcnic_config_indev_addr(adapter, dev, event);
af19b491
AKS
4246done:
4247 return NOTIFY_DONE;
4248}
4249
4250static int
4251qlcnic_inetaddr_event(struct notifier_block *this,
4252 unsigned long event, void *ptr)
4253{
4254 struct qlcnic_adapter *adapter;
4255 struct net_device *dev;
4256
4257 struct in_ifaddr *ifa = (struct in_ifaddr *)ptr;
4258
4259 dev = ifa->ifa_dev ? ifa->ifa_dev->dev : NULL;
4260
4261recheck:
aec1e845 4262 if (dev == NULL)
af19b491
AKS
4263 goto done;
4264
4265 if (dev->priv_flags & IFF_802_1Q_VLAN) {
4266 dev = vlan_dev_real_dev(dev);
4267 goto recheck;
4268 }
4269
4270 if (!is_qlcnic_netdev(dev))
4271 goto done;
4272
4273 adapter = netdev_priv(dev);
4274
251a84c9 4275 if (!adapter)
af19b491
AKS
4276 goto done;
4277
8a15ad1f 4278 if (!test_bit(__QLCNIC_DEV_UP, &adapter->state))
af19b491
AKS
4279 goto done;
4280
4281 switch (event) {
4282 case NETDEV_UP:
4283 qlcnic_config_ipaddr(adapter, ifa->ifa_address, QLCNIC_IP_UP);
4284 break;
4285 case NETDEV_DOWN:
4286 qlcnic_config_ipaddr(adapter, ifa->ifa_address, QLCNIC_IP_DOWN);
4287 break;
4288 default:
4289 break;
4290 }
4291
4292done:
4293 return NOTIFY_DONE;
4294}
4295
4296static struct notifier_block qlcnic_netdev_cb = {
4297 .notifier_call = qlcnic_netdev_event,
4298};
4299
4300static struct notifier_block qlcnic_inetaddr_cb = {
4301 .notifier_call = qlcnic_inetaddr_event,
4302};
4303#else
4304static void
aec1e845 4305qlcnic_restore_indev_addr(struct net_device *dev, unsigned long event)
af19b491
AKS
4306{ }
4307#endif
451724c8
SC
4308static struct pci_error_handlers qlcnic_err_handler = {
4309 .error_detected = qlcnic_io_error_detected,
4310 .slot_reset = qlcnic_io_slot_reset,
4311 .resume = qlcnic_io_resume,
4312};
af19b491
AKS
4313
4314static struct pci_driver qlcnic_driver = {
4315 .name = qlcnic_driver_name,
4316 .id_table = qlcnic_pci_tbl,
4317 .probe = qlcnic_probe,
4318 .remove = __devexit_p(qlcnic_remove),
4319#ifdef CONFIG_PM
4320 .suspend = qlcnic_suspend,
4321 .resume = qlcnic_resume,
4322#endif
451724c8
SC
4323 .shutdown = qlcnic_shutdown,
4324 .err_handler = &qlcnic_err_handler
4325
af19b491
AKS
4326};
4327
4328static int __init qlcnic_init_module(void)
4329{
0cf3a14c 4330 int ret;
af19b491
AKS
4331
4332 printk(KERN_INFO "%s\n", qlcnic_driver_string);
4333
f7ec804a
AKS
4334 qlcnic_wq = create_singlethread_workqueue("qlcnic");
4335 if (qlcnic_wq == NULL) {
4336 printk(KERN_ERR "qlcnic: cannot create workqueue\n");
4337 return -ENOMEM;
4338 }
4339
af19b491
AKS
4340#ifdef CONFIG_INET
4341 register_netdevice_notifier(&qlcnic_netdev_cb);
4342 register_inetaddr_notifier(&qlcnic_inetaddr_cb);
4343#endif
4344
0cf3a14c
AKS
4345 ret = pci_register_driver(&qlcnic_driver);
4346 if (ret) {
4347#ifdef CONFIG_INET
4348 unregister_inetaddr_notifier(&qlcnic_inetaddr_cb);
4349 unregister_netdevice_notifier(&qlcnic_netdev_cb);
4350#endif
f7ec804a 4351 destroy_workqueue(qlcnic_wq);
0cf3a14c 4352 }
af19b491 4353
0cf3a14c 4354 return ret;
af19b491
AKS
4355}
4356
4357module_init(qlcnic_init_module);
4358
4359static void __exit qlcnic_exit_module(void)
4360{
4361
4362 pci_unregister_driver(&qlcnic_driver);
4363
4364#ifdef CONFIG_INET
4365 unregister_inetaddr_notifier(&qlcnic_inetaddr_cb);
4366 unregister_netdevice_notifier(&qlcnic_netdev_cb);
4367#endif
f7ec804a 4368 destroy_workqueue(qlcnic_wq);
af19b491
AKS
4369}
4370
4371module_exit(qlcnic_exit_module);
This page took 0.452881 seconds and 5 git commands to generate.