Merge branch 'upstream' of git://git.linux-mips.org/pub/scm/upstream-linus
[deliverable/linux.git] / drivers / net / qlcnic / qlcnic_main.c
1 /*
2 * Copyright (C) 2009 - QLogic Corporation.
3 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version 2
8 * of the License, or (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place - Suite 330, Boston,
18 * MA 02111-1307, USA.
19 *
20 * The full GNU General Public License is included in this distribution
21 * in the file called "COPYING".
22 *
23 */
24
25 #include <linux/slab.h>
26 #include <linux/vmalloc.h>
27 #include <linux/interrupt.h>
28
29 #include "qlcnic.h"
30
31 #include <linux/dma-mapping.h>
32 #include <linux/if_vlan.h>
33 #include <net/ip.h>
34 #include <linux/ipv6.h>
35 #include <linux/inetdevice.h>
36 #include <linux/sysfs.h>
37 #include <linux/aer.h>
38
39 MODULE_DESCRIPTION("QLogic 1/10 GbE Converged/Intelligent Ethernet Driver");
40 MODULE_LICENSE("GPL");
41 MODULE_VERSION(QLCNIC_LINUX_VERSIONID);
42 MODULE_FIRMWARE(QLCNIC_UNIFIED_ROMIMAGE_NAME);
43
44 char qlcnic_driver_name[] = "qlcnic";
45 static const char qlcnic_driver_string[] = "QLogic 1/10 GbE "
46 "Converged/Intelligent Ethernet Driver v" QLCNIC_LINUX_VERSIONID;
47
48 static int port_mode = QLCNIC_PORT_MODE_AUTO_NEG;
49
50 /* Default to restricted 1G auto-neg mode */
51 static int wol_port_mode = 5;
52
53 static int use_msi = 1;
54 module_param(use_msi, int, 0644);
55 MODULE_PARM_DESC(use_msi, "MSI interrupt (0=disabled, 1=enabled");
56
57 static int use_msi_x = 1;
58 module_param(use_msi_x, int, 0644);
59 MODULE_PARM_DESC(use_msi_x, "MSI-X interrupt (0=disabled, 1=enabled");
60
61 static int auto_fw_reset = AUTO_FW_RESET_ENABLED;
62 module_param(auto_fw_reset, int, 0644);
63 MODULE_PARM_DESC(auto_fw_reset, "Auto firmware reset (0=disabled, 1=enabled");
64
65 static int load_fw_file;
66 module_param(load_fw_file, int, 0644);
67 MODULE_PARM_DESC(load_fw_file, "Load firmware from (0=flash, 1=file");
68
69 static int qlcnic_config_npars;
70 module_param(qlcnic_config_npars, int, 0644);
71 MODULE_PARM_DESC(qlcnic_config_npars, "Configure NPARs (0=disabled, 1=enabled");
72
73 static int __devinit qlcnic_probe(struct pci_dev *pdev,
74 const struct pci_device_id *ent);
75 static void __devexit qlcnic_remove(struct pci_dev *pdev);
76 static int qlcnic_open(struct net_device *netdev);
77 static int qlcnic_close(struct net_device *netdev);
78 static void qlcnic_tx_timeout(struct net_device *netdev);
79 static void qlcnic_attach_work(struct work_struct *work);
80 static void qlcnic_fwinit_work(struct work_struct *work);
81 static void qlcnic_fw_poll_work(struct work_struct *work);
82 static void qlcnic_schedule_work(struct qlcnic_adapter *adapter,
83 work_func_t func, int delay);
84 static void qlcnic_cancel_fw_work(struct qlcnic_adapter *adapter);
85 static int qlcnic_poll(struct napi_struct *napi, int budget);
86 static int qlcnic_rx_poll(struct napi_struct *napi, int budget);
87 #ifdef CONFIG_NET_POLL_CONTROLLER
88 static void qlcnic_poll_controller(struct net_device *netdev);
89 #endif
90
91 static void qlcnic_create_sysfs_entries(struct qlcnic_adapter *adapter);
92 static void qlcnic_remove_sysfs_entries(struct qlcnic_adapter *adapter);
93 static void qlcnic_create_diag_entries(struct qlcnic_adapter *adapter);
94 static void qlcnic_remove_diag_entries(struct qlcnic_adapter *adapter);
95
96 static void qlcnic_idc_debug_info(struct qlcnic_adapter *adapter, u8 encoding);
97 static void qlcnic_clr_all_drv_state(struct qlcnic_adapter *adapter);
98 static int qlcnic_can_start_firmware(struct qlcnic_adapter *adapter);
99
100 static irqreturn_t qlcnic_tmp_intr(int irq, void *data);
101 static irqreturn_t qlcnic_intr(int irq, void *data);
102 static irqreturn_t qlcnic_msi_intr(int irq, void *data);
103 static irqreturn_t qlcnic_msix_intr(int irq, void *data);
104
105 static struct net_device_stats *qlcnic_get_stats(struct net_device *netdev);
106 static void qlcnic_config_indev_addr(struct net_device *dev, unsigned long);
107 static int qlcnic_start_firmware(struct qlcnic_adapter *);
108
109 static void qlcnic_dev_set_npar_ready(struct qlcnic_adapter *);
110 static int qlcnicvf_config_led(struct qlcnic_adapter *, u32, u32);
111 static int qlcnicvf_config_bridged_mode(struct qlcnic_adapter *, u32);
112 static int qlcnicvf_start_firmware(struct qlcnic_adapter *);
113 /* PCI Device ID Table */
114 #define ENTRY(device) \
115 {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, (device)), \
116 .class = PCI_CLASS_NETWORK_ETHERNET << 8, .class_mask = ~0}
117
118 #define PCI_DEVICE_ID_QLOGIC_QLE824X 0x8020
119
120 static DEFINE_PCI_DEVICE_TABLE(qlcnic_pci_tbl) = {
121 ENTRY(PCI_DEVICE_ID_QLOGIC_QLE824X),
122 {0,}
123 };
124
125 MODULE_DEVICE_TABLE(pci, qlcnic_pci_tbl);
126
127
128 void
129 qlcnic_update_cmd_producer(struct qlcnic_adapter *adapter,
130 struct qlcnic_host_tx_ring *tx_ring)
131 {
132 writel(tx_ring->producer, tx_ring->crb_cmd_producer);
133 }
134
135 static const u32 msi_tgt_status[8] = {
136 ISR_INT_TARGET_STATUS, ISR_INT_TARGET_STATUS_F1,
137 ISR_INT_TARGET_STATUS_F2, ISR_INT_TARGET_STATUS_F3,
138 ISR_INT_TARGET_STATUS_F4, ISR_INT_TARGET_STATUS_F5,
139 ISR_INT_TARGET_STATUS_F6, ISR_INT_TARGET_STATUS_F7
140 };
141
142 static const
143 struct qlcnic_legacy_intr_set legacy_intr[] = QLCNIC_LEGACY_INTR_CONFIG;
144
145 static inline void qlcnic_disable_int(struct qlcnic_host_sds_ring *sds_ring)
146 {
147 writel(0, sds_ring->crb_intr_mask);
148 }
149
150 static inline void qlcnic_enable_int(struct qlcnic_host_sds_ring *sds_ring)
151 {
152 struct qlcnic_adapter *adapter = sds_ring->adapter;
153
154 writel(0x1, sds_ring->crb_intr_mask);
155
156 if (!QLCNIC_IS_MSI_FAMILY(adapter))
157 writel(0xfbff, adapter->tgt_mask_reg);
158 }
159
160 static int
161 qlcnic_alloc_sds_rings(struct qlcnic_recv_context *recv_ctx, int count)
162 {
163 int size = sizeof(struct qlcnic_host_sds_ring) * count;
164
165 recv_ctx->sds_rings = kzalloc(size, GFP_KERNEL);
166
167 return (recv_ctx->sds_rings == NULL);
168 }
169
170 static void
171 qlcnic_free_sds_rings(struct qlcnic_recv_context *recv_ctx)
172 {
173 if (recv_ctx->sds_rings != NULL)
174 kfree(recv_ctx->sds_rings);
175
176 recv_ctx->sds_rings = NULL;
177 }
178
179 static int
180 qlcnic_napi_add(struct qlcnic_adapter *adapter, struct net_device *netdev)
181 {
182 int ring;
183 struct qlcnic_host_sds_ring *sds_ring;
184 struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
185
186 if (qlcnic_alloc_sds_rings(recv_ctx, adapter->max_sds_rings))
187 return -ENOMEM;
188
189 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
190 sds_ring = &recv_ctx->sds_rings[ring];
191
192 if (ring == adapter->max_sds_rings - 1)
193 netif_napi_add(netdev, &sds_ring->napi, qlcnic_poll,
194 QLCNIC_NETDEV_WEIGHT/adapter->max_sds_rings);
195 else
196 netif_napi_add(netdev, &sds_ring->napi,
197 qlcnic_rx_poll, QLCNIC_NETDEV_WEIGHT*2);
198 }
199
200 return 0;
201 }
202
203 static void
204 qlcnic_napi_del(struct qlcnic_adapter *adapter)
205 {
206 int ring;
207 struct qlcnic_host_sds_ring *sds_ring;
208 struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
209
210 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
211 sds_ring = &recv_ctx->sds_rings[ring];
212 netif_napi_del(&sds_ring->napi);
213 }
214
215 qlcnic_free_sds_rings(&adapter->recv_ctx);
216 }
217
218 static void
219 qlcnic_napi_enable(struct qlcnic_adapter *adapter)
220 {
221 int ring;
222 struct qlcnic_host_sds_ring *sds_ring;
223 struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
224
225 if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
226 return;
227
228 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
229 sds_ring = &recv_ctx->sds_rings[ring];
230 napi_enable(&sds_ring->napi);
231 qlcnic_enable_int(sds_ring);
232 }
233 }
234
235 static void
236 qlcnic_napi_disable(struct qlcnic_adapter *adapter)
237 {
238 int ring;
239 struct qlcnic_host_sds_ring *sds_ring;
240 struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
241
242 if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
243 return;
244
245 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
246 sds_ring = &recv_ctx->sds_rings[ring];
247 qlcnic_disable_int(sds_ring);
248 napi_synchronize(&sds_ring->napi);
249 napi_disable(&sds_ring->napi);
250 }
251 }
252
253 static void qlcnic_clear_stats(struct qlcnic_adapter *adapter)
254 {
255 memset(&adapter->stats, 0, sizeof(adapter->stats));
256 }
257
258 static void qlcnic_set_port_mode(struct qlcnic_adapter *adapter)
259 {
260 u32 val, data;
261
262 val = adapter->ahw.board_type;
263 if ((val == QLCNIC_BRDTYPE_P3_HMEZ) ||
264 (val == QLCNIC_BRDTYPE_P3_XG_LOM)) {
265 if (port_mode == QLCNIC_PORT_MODE_802_3_AP) {
266 data = QLCNIC_PORT_MODE_802_3_AP;
267 QLCWR32(adapter, QLCNIC_PORT_MODE_ADDR, data);
268 } else if (port_mode == QLCNIC_PORT_MODE_XG) {
269 data = QLCNIC_PORT_MODE_XG;
270 QLCWR32(adapter, QLCNIC_PORT_MODE_ADDR, data);
271 } else if (port_mode == QLCNIC_PORT_MODE_AUTO_NEG_1G) {
272 data = QLCNIC_PORT_MODE_AUTO_NEG_1G;
273 QLCWR32(adapter, QLCNIC_PORT_MODE_ADDR, data);
274 } else if (port_mode == QLCNIC_PORT_MODE_AUTO_NEG_XG) {
275 data = QLCNIC_PORT_MODE_AUTO_NEG_XG;
276 QLCWR32(adapter, QLCNIC_PORT_MODE_ADDR, data);
277 } else {
278 data = QLCNIC_PORT_MODE_AUTO_NEG;
279 QLCWR32(adapter, QLCNIC_PORT_MODE_ADDR, data);
280 }
281
282 if ((wol_port_mode != QLCNIC_PORT_MODE_802_3_AP) &&
283 (wol_port_mode != QLCNIC_PORT_MODE_XG) &&
284 (wol_port_mode != QLCNIC_PORT_MODE_AUTO_NEG_1G) &&
285 (wol_port_mode != QLCNIC_PORT_MODE_AUTO_NEG_XG)) {
286 wol_port_mode = QLCNIC_PORT_MODE_AUTO_NEG;
287 }
288 QLCWR32(adapter, QLCNIC_WOL_PORT_MODE, wol_port_mode);
289 }
290 }
291
292 static void qlcnic_set_msix_bit(struct pci_dev *pdev, int enable)
293 {
294 u32 control;
295 int pos;
296
297 pos = pci_find_capability(pdev, PCI_CAP_ID_MSIX);
298 if (pos) {
299 pci_read_config_dword(pdev, pos, &control);
300 if (enable)
301 control |= PCI_MSIX_FLAGS_ENABLE;
302 else
303 control = 0;
304 pci_write_config_dword(pdev, pos, control);
305 }
306 }
307
308 static void qlcnic_init_msix_entries(struct qlcnic_adapter *adapter, int count)
309 {
310 int i;
311
312 for (i = 0; i < count; i++)
313 adapter->msix_entries[i].entry = i;
314 }
315
316 static int
317 qlcnic_read_mac_addr(struct qlcnic_adapter *adapter)
318 {
319 u8 mac_addr[ETH_ALEN];
320 struct net_device *netdev = adapter->netdev;
321 struct pci_dev *pdev = adapter->pdev;
322
323 if (adapter->nic_ops->get_mac_addr(adapter, mac_addr) != 0)
324 return -EIO;
325
326 memcpy(netdev->dev_addr, mac_addr, ETH_ALEN);
327 memcpy(netdev->perm_addr, netdev->dev_addr, netdev->addr_len);
328 memcpy(adapter->mac_addr, netdev->dev_addr, netdev->addr_len);
329
330 /* set station address */
331
332 if (!is_valid_ether_addr(netdev->perm_addr))
333 dev_warn(&pdev->dev, "Bad MAC address %pM.\n",
334 netdev->dev_addr);
335
336 return 0;
337 }
338
339 static int qlcnic_set_mac(struct net_device *netdev, void *p)
340 {
341 struct qlcnic_adapter *adapter = netdev_priv(netdev);
342 struct sockaddr *addr = p;
343
344 if (!is_valid_ether_addr(addr->sa_data))
345 return -EINVAL;
346
347 if (test_bit(__QLCNIC_DEV_UP, &adapter->state)) {
348 netif_device_detach(netdev);
349 qlcnic_napi_disable(adapter);
350 }
351
352 memcpy(adapter->mac_addr, addr->sa_data, netdev->addr_len);
353 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
354 qlcnic_set_multi(adapter->netdev);
355
356 if (test_bit(__QLCNIC_DEV_UP, &adapter->state)) {
357 netif_device_attach(netdev);
358 qlcnic_napi_enable(adapter);
359 }
360 return 0;
361 }
362
363 static const struct net_device_ops qlcnic_netdev_ops = {
364 .ndo_open = qlcnic_open,
365 .ndo_stop = qlcnic_close,
366 .ndo_start_xmit = qlcnic_xmit_frame,
367 .ndo_get_stats = qlcnic_get_stats,
368 .ndo_validate_addr = eth_validate_addr,
369 .ndo_set_multicast_list = qlcnic_set_multi,
370 .ndo_set_mac_address = qlcnic_set_mac,
371 .ndo_change_mtu = qlcnic_change_mtu,
372 .ndo_tx_timeout = qlcnic_tx_timeout,
373 #ifdef CONFIG_NET_POLL_CONTROLLER
374 .ndo_poll_controller = qlcnic_poll_controller,
375 #endif
376 };
377
378 static struct qlcnic_nic_template qlcnic_ops = {
379 .get_mac_addr = qlcnic_get_mac_address,
380 .config_bridged_mode = qlcnic_config_bridged_mode,
381 .config_led = qlcnic_config_led,
382 .start_firmware = qlcnic_start_firmware
383 };
384
385 static struct qlcnic_nic_template qlcnic_vf_ops = {
386 .get_mac_addr = qlcnic_get_mac_address,
387 .config_bridged_mode = qlcnicvf_config_bridged_mode,
388 .config_led = qlcnicvf_config_led,
389 .start_firmware = qlcnicvf_start_firmware
390 };
391
392 static void
393 qlcnic_setup_intr(struct qlcnic_adapter *adapter)
394 {
395 const struct qlcnic_legacy_intr_set *legacy_intrp;
396 struct pci_dev *pdev = adapter->pdev;
397 int err, num_msix;
398
399 if (adapter->rss_supported) {
400 num_msix = (num_online_cpus() >= MSIX_ENTRIES_PER_ADAPTER) ?
401 MSIX_ENTRIES_PER_ADAPTER : 2;
402 } else
403 num_msix = 1;
404
405 adapter->max_sds_rings = 1;
406
407 adapter->flags &= ~(QLCNIC_MSI_ENABLED | QLCNIC_MSIX_ENABLED);
408
409 legacy_intrp = &legacy_intr[adapter->ahw.pci_func];
410
411 adapter->int_vec_bit = legacy_intrp->int_vec_bit;
412 adapter->tgt_status_reg = qlcnic_get_ioaddr(adapter,
413 legacy_intrp->tgt_status_reg);
414 adapter->tgt_mask_reg = qlcnic_get_ioaddr(adapter,
415 legacy_intrp->tgt_mask_reg);
416 adapter->isr_int_vec = qlcnic_get_ioaddr(adapter, ISR_INT_VECTOR);
417
418 adapter->crb_int_state_reg = qlcnic_get_ioaddr(adapter,
419 ISR_INT_STATE_REG);
420
421 qlcnic_set_msix_bit(pdev, 0);
422
423 if (adapter->msix_supported) {
424
425 qlcnic_init_msix_entries(adapter, num_msix);
426 err = pci_enable_msix(pdev, adapter->msix_entries, num_msix);
427 if (err == 0) {
428 adapter->flags |= QLCNIC_MSIX_ENABLED;
429 qlcnic_set_msix_bit(pdev, 1);
430
431 if (adapter->rss_supported)
432 adapter->max_sds_rings = num_msix;
433
434 dev_info(&pdev->dev, "using msi-x interrupts\n");
435 return;
436 }
437
438 if (err > 0)
439 pci_disable_msix(pdev);
440
441 /* fall through for msi */
442 }
443
444 if (use_msi && !pci_enable_msi(pdev)) {
445 adapter->flags |= QLCNIC_MSI_ENABLED;
446 adapter->tgt_status_reg = qlcnic_get_ioaddr(adapter,
447 msi_tgt_status[adapter->ahw.pci_func]);
448 dev_info(&pdev->dev, "using msi interrupts\n");
449 adapter->msix_entries[0].vector = pdev->irq;
450 return;
451 }
452
453 dev_info(&pdev->dev, "using legacy interrupts\n");
454 adapter->msix_entries[0].vector = pdev->irq;
455 }
456
457 static void
458 qlcnic_teardown_intr(struct qlcnic_adapter *adapter)
459 {
460 if (adapter->flags & QLCNIC_MSIX_ENABLED)
461 pci_disable_msix(adapter->pdev);
462 if (adapter->flags & QLCNIC_MSI_ENABLED)
463 pci_disable_msi(adapter->pdev);
464 }
465
466 static void
467 qlcnic_cleanup_pci_map(struct qlcnic_adapter *adapter)
468 {
469 if (adapter->ahw.pci_base0 != NULL)
470 iounmap(adapter->ahw.pci_base0);
471 }
472
473 static int
474 qlcnic_init_pci_info(struct qlcnic_adapter *adapter)
475 {
476 struct qlcnic_pci_info pci_info[QLCNIC_MAX_PCI_FUNC];
477 int i, ret = 0, err;
478 u8 pfn;
479
480 if (!adapter->npars)
481 adapter->npars = kzalloc(sizeof(struct qlcnic_npar_info) *
482 QLCNIC_MAX_PCI_FUNC, GFP_KERNEL);
483 if (!adapter->npars)
484 return -ENOMEM;
485
486 if (!adapter->eswitch)
487 adapter->eswitch = kzalloc(sizeof(struct qlcnic_eswitch) *
488 QLCNIC_NIU_MAX_XG_PORTS, GFP_KERNEL);
489 if (!adapter->eswitch) {
490 err = -ENOMEM;
491 goto err_eswitch;
492 }
493
494 ret = qlcnic_get_pci_info(adapter, pci_info);
495 if (!ret) {
496 for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) {
497 pfn = pci_info[i].id;
498 if (pfn > QLCNIC_MAX_PCI_FUNC)
499 return QL_STATUS_INVALID_PARAM;
500 adapter->npars[pfn].active = pci_info[i].active;
501 adapter->npars[pfn].type = pci_info[i].type;
502 adapter->npars[pfn].phy_port = pci_info[i].default_port;
503 adapter->npars[pfn].mac_learning = DEFAULT_MAC_LEARN;
504 adapter->npars[pfn].min_bw = pci_info[i].tx_min_bw;
505 adapter->npars[pfn].max_bw = pci_info[i].tx_max_bw;
506 }
507
508 for (i = 0; i < QLCNIC_NIU_MAX_XG_PORTS; i++)
509 adapter->eswitch[i].flags |= QLCNIC_SWITCH_ENABLE;
510
511 return ret;
512 }
513
514 kfree(adapter->eswitch);
515 adapter->eswitch = NULL;
516 err_eswitch:
517 kfree(adapter->npars);
518
519 return ret;
520 }
521
522 static int
523 qlcnic_set_function_modes(struct qlcnic_adapter *adapter)
524 {
525 u8 id;
526 u32 ref_count;
527 int i, ret = 1;
528 u32 data = QLCNIC_MGMT_FUNC;
529 void __iomem *priv_op = adapter->ahw.pci_base0 + QLCNIC_DRV_OP_MODE;
530
531 /* If other drivers are not in use set their privilege level */
532 ref_count = QLCRD32(adapter, QLCNIC_CRB_DEV_REF_COUNT);
533 ret = qlcnic_api_lock(adapter);
534 if (ret)
535 goto err_lock;
536 if (QLC_DEV_CLR_REF_CNT(ref_count, adapter->ahw.pci_func))
537 goto err_npar;
538
539 if (qlcnic_config_npars) {
540 for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) {
541 id = i;
542 if (adapter->npars[i].type != QLCNIC_TYPE_NIC ||
543 id == adapter->ahw.pci_func)
544 continue;
545 data |= (qlcnic_config_npars &
546 QLC_DEV_SET_DRV(0xf, id));
547 }
548 } else {
549 data = readl(priv_op);
550 data = (data & ~QLC_DEV_SET_DRV(0xf, adapter->ahw.pci_func)) |
551 (QLC_DEV_SET_DRV(QLCNIC_MGMT_FUNC,
552 adapter->ahw.pci_func));
553 }
554 writel(data, priv_op);
555 err_npar:
556 qlcnic_api_unlock(adapter);
557 err_lock:
558 return ret;
559 }
560
561 static u32
562 qlcnic_get_driver_mode(struct qlcnic_adapter *adapter)
563 {
564 void __iomem *msix_base_addr;
565 void __iomem *priv_op;
566 struct qlcnic_info nic_info;
567 u32 func;
568 u32 msix_base;
569 u32 op_mode, priv_level;
570
571 /* Determine FW API version */
572 adapter->fw_hal_version = readl(adapter->ahw.pci_base0 + QLCNIC_FW_API);
573
574 /* Find PCI function number */
575 pci_read_config_dword(adapter->pdev, QLCNIC_MSIX_TABLE_OFFSET, &func);
576 msix_base_addr = adapter->ahw.pci_base0 + QLCNIC_MSIX_BASE;
577 msix_base = readl(msix_base_addr);
578 func = (func - msix_base)/QLCNIC_MSIX_TBL_PGSIZE;
579 adapter->ahw.pci_func = func;
580
581 if (!qlcnic_get_nic_info(adapter, &nic_info, adapter->ahw.pci_func)) {
582 adapter->capabilities = nic_info.capabilities;
583
584 if (adapter->capabilities & BIT_6)
585 adapter->flags |= QLCNIC_ESWITCH_ENABLED;
586 else
587 adapter->flags &= ~QLCNIC_ESWITCH_ENABLED;
588 }
589
590 if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED)) {
591 adapter->nic_ops = &qlcnic_ops;
592 return adapter->fw_hal_version;
593 }
594
595 /* Determine function privilege level */
596 priv_op = adapter->ahw.pci_base0 + QLCNIC_DRV_OP_MODE;
597 op_mode = readl(priv_op);
598 if (op_mode == QLC_DEV_DRV_DEFAULT)
599 priv_level = QLCNIC_MGMT_FUNC;
600 else
601 priv_level = QLC_DEV_GET_DRV(op_mode, adapter->ahw.pci_func);
602
603 switch (priv_level) {
604 case QLCNIC_MGMT_FUNC:
605 adapter->op_mode = QLCNIC_MGMT_FUNC;
606 adapter->nic_ops = &qlcnic_ops;
607 qlcnic_init_pci_info(adapter);
608 /* Set privilege level for other functions */
609 qlcnic_set_function_modes(adapter);
610 dev_info(&adapter->pdev->dev,
611 "HAL Version: %d, Management function\n",
612 adapter->fw_hal_version);
613 break;
614 case QLCNIC_PRIV_FUNC:
615 adapter->op_mode = QLCNIC_PRIV_FUNC;
616 dev_info(&adapter->pdev->dev,
617 "HAL Version: %d, Privileged function\n",
618 adapter->fw_hal_version);
619 adapter->nic_ops = &qlcnic_ops;
620 break;
621 case QLCNIC_NON_PRIV_FUNC:
622 adapter->op_mode = QLCNIC_NON_PRIV_FUNC;
623 dev_info(&adapter->pdev->dev,
624 "HAL Version: %d Non Privileged function\n",
625 adapter->fw_hal_version);
626 adapter->nic_ops = &qlcnic_vf_ops;
627 break;
628 default:
629 dev_info(&adapter->pdev->dev, "Unknown function mode: %d\n",
630 priv_level);
631 return 0;
632 }
633 return adapter->fw_hal_version;
634 }
635
636 static int
637 qlcnic_setup_pci_map(struct qlcnic_adapter *adapter)
638 {
639 void __iomem *mem_ptr0 = NULL;
640 resource_size_t mem_base;
641 unsigned long mem_len, pci_len0 = 0;
642
643 struct pci_dev *pdev = adapter->pdev;
644
645 /* remap phys address */
646 mem_base = pci_resource_start(pdev, 0); /* 0 is for BAR 0 */
647 mem_len = pci_resource_len(pdev, 0);
648
649 if (mem_len == QLCNIC_PCI_2MB_SIZE) {
650
651 mem_ptr0 = pci_ioremap_bar(pdev, 0);
652 if (mem_ptr0 == NULL) {
653 dev_err(&pdev->dev, "failed to map PCI bar 0\n");
654 return -EIO;
655 }
656 pci_len0 = mem_len;
657 } else {
658 return -EIO;
659 }
660
661 dev_info(&pdev->dev, "%dMB memory map\n", (int)(mem_len>>20));
662
663 adapter->ahw.pci_base0 = mem_ptr0;
664 adapter->ahw.pci_len0 = pci_len0;
665
666 if (!qlcnic_get_driver_mode(adapter)) {
667 iounmap(adapter->ahw.pci_base0);
668 return -EIO;
669 }
670
671 adapter->ahw.ocm_win_crb = qlcnic_get_ioaddr(adapter,
672 QLCNIC_PCIX_PS_REG(PCIX_OCM_WINDOW_REG(adapter->ahw.pci_func)));
673
674 return 0;
675 }
676
677 static void get_brd_name(struct qlcnic_adapter *adapter, char *name)
678 {
679 struct pci_dev *pdev = adapter->pdev;
680 int i, found = 0;
681
682 for (i = 0; i < NUM_SUPPORTED_BOARDS; ++i) {
683 if (qlcnic_boards[i].vendor == pdev->vendor &&
684 qlcnic_boards[i].device == pdev->device &&
685 qlcnic_boards[i].sub_vendor == pdev->subsystem_vendor &&
686 qlcnic_boards[i].sub_device == pdev->subsystem_device) {
687 sprintf(name, "%pM: %s" ,
688 adapter->mac_addr,
689 qlcnic_boards[i].short_name);
690 found = 1;
691 break;
692 }
693
694 }
695
696 if (!found)
697 sprintf(name, "%pM Gigabit Ethernet", adapter->mac_addr);
698 }
699
700 static void
701 qlcnic_check_options(struct qlcnic_adapter *adapter)
702 {
703 u32 fw_major, fw_minor, fw_build;
704 char brd_name[QLCNIC_MAX_BOARD_NAME_LEN];
705 char serial_num[32];
706 int i, offset, val;
707 int *ptr32;
708 struct pci_dev *pdev = adapter->pdev;
709 struct qlcnic_info nic_info;
710 adapter->driver_mismatch = 0;
711
712 ptr32 = (int *)&serial_num;
713 offset = QLCNIC_FW_SERIAL_NUM_OFFSET;
714 for (i = 0; i < 8; i++) {
715 if (qlcnic_rom_fast_read(adapter, offset, &val) == -1) {
716 dev_err(&pdev->dev, "error reading board info\n");
717 adapter->driver_mismatch = 1;
718 return;
719 }
720 ptr32[i] = cpu_to_le32(val);
721 offset += sizeof(u32);
722 }
723
724 fw_major = QLCRD32(adapter, QLCNIC_FW_VERSION_MAJOR);
725 fw_minor = QLCRD32(adapter, QLCNIC_FW_VERSION_MINOR);
726 fw_build = QLCRD32(adapter, QLCNIC_FW_VERSION_SUB);
727
728 adapter->fw_version = QLCNIC_VERSION_CODE(fw_major, fw_minor, fw_build);
729
730 if (adapter->portnum == 0) {
731 get_brd_name(adapter, brd_name);
732
733 pr_info("%s: %s Board Chip rev 0x%x\n",
734 module_name(THIS_MODULE),
735 brd_name, adapter->ahw.revision_id);
736 }
737
738 dev_info(&pdev->dev, "firmware v%d.%d.%d\n",
739 fw_major, fw_minor, fw_build);
740
741 adapter->flags &= ~QLCNIC_LRO_ENABLED;
742
743 if (adapter->ahw.port_type == QLCNIC_XGBE) {
744 adapter->num_rxd = DEFAULT_RCV_DESCRIPTORS_10G;
745 adapter->num_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_10G;
746 } else if (adapter->ahw.port_type == QLCNIC_GBE) {
747 adapter->num_rxd = DEFAULT_RCV_DESCRIPTORS_1G;
748 adapter->num_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_1G;
749 }
750
751 if (!qlcnic_get_nic_info(adapter, &nic_info, adapter->ahw.pci_func)) {
752 adapter->physical_port = nic_info.phys_port;
753 adapter->switch_mode = nic_info.switch_mode;
754 adapter->max_tx_ques = nic_info.max_tx_ques;
755 adapter->max_rx_ques = nic_info.max_rx_ques;
756 adapter->capabilities = nic_info.capabilities;
757 adapter->max_mac_filters = nic_info.max_mac_filters;
758 adapter->max_mtu = nic_info.max_mtu;
759 }
760
761 adapter->msix_supported = !!use_msi_x;
762 adapter->rss_supported = !!use_msi_x;
763
764 adapter->num_txd = MAX_CMD_DESCRIPTORS;
765
766 adapter->max_rds_rings = 2;
767 }
768
769 static int
770 qlcnic_reset_npar_config(struct qlcnic_adapter *adapter)
771 {
772 int i, err = 0;
773 struct qlcnic_npar_info *npar;
774 struct qlcnic_info nic_info;
775
776 if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED) ||
777 !adapter->need_fw_reset)
778 return 0;
779
780 if (adapter->op_mode == QLCNIC_MGMT_FUNC) {
781 /* Set the NPAR config data after FW reset */
782 for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) {
783 npar = &adapter->npars[i];
784 if (npar->type != QLCNIC_TYPE_NIC)
785 continue;
786 err = qlcnic_get_nic_info(adapter, &nic_info, i);
787 if (err)
788 goto err_out;
789 nic_info.min_tx_bw = npar->min_bw;
790 nic_info.max_tx_bw = npar->max_bw;
791 err = qlcnic_set_nic_info(adapter, &nic_info);
792 if (err)
793 goto err_out;
794
795 if (npar->enable_pm) {
796 err = qlcnic_config_port_mirroring(adapter,
797 npar->dest_npar, 1, i);
798 if (err)
799 goto err_out;
800
801 }
802 npar->mac_learning = DEFAULT_MAC_LEARN;
803 npar->host_vlan_tag = 0;
804 npar->promisc_mode = 0;
805 npar->discard_tagged = 0;
806 npar->vlan_id = 0;
807 }
808 }
809 err_out:
810 return err;
811 }
812
813 static int
814 qlcnic_start_firmware(struct qlcnic_adapter *adapter)
815 {
816 int val, err, first_boot;
817
818 err = qlcnic_can_start_firmware(adapter);
819 if (err < 0)
820 return err;
821 else if (!err)
822 goto wait_init;
823
824 first_boot = QLCRD32(adapter, QLCNIC_CAM_RAM(0x1fc));
825 if (first_boot == 0x55555555)
826 /* This is the first boot after power up */
827 QLCWR32(adapter, QLCNIC_CAM_RAM(0x1fc), QLCNIC_BDINFO_MAGIC);
828
829 if (load_fw_file)
830 qlcnic_request_firmware(adapter);
831 else {
832 if (qlcnic_check_flash_fw_ver(adapter))
833 goto err_out;
834
835 adapter->fw_type = QLCNIC_FLASH_ROMIMAGE;
836 }
837
838 err = qlcnic_need_fw_reset(adapter);
839 if (err < 0)
840 goto err_out;
841 if (err == 0)
842 goto wait_init;
843
844 if (first_boot != 0x55555555) {
845 QLCWR32(adapter, CRB_CMDPEG_STATE, 0);
846 QLCWR32(adapter, CRB_RCVPEG_STATE, 0);
847 qlcnic_pinit_from_rom(adapter);
848 msleep(1);
849 }
850
851 QLCWR32(adapter, QLCNIC_PEG_HALT_STATUS1, 0);
852 QLCWR32(adapter, QLCNIC_PEG_HALT_STATUS2, 0);
853
854 qlcnic_set_port_mode(adapter);
855
856 err = qlcnic_load_firmware(adapter);
857 if (err)
858 goto err_out;
859
860 qlcnic_release_firmware(adapter);
861
862 val = (_QLCNIC_LINUX_MAJOR << 16)
863 | ((_QLCNIC_LINUX_MINOR << 8))
864 | (_QLCNIC_LINUX_SUBVERSION);
865 QLCWR32(adapter, CRB_DRIVER_VERSION, val);
866
867 wait_init:
868 /* Handshake with the card before we register the devices. */
869 err = qlcnic_init_firmware(adapter);
870 if (err)
871 goto err_out;
872
873 QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_READY);
874 qlcnic_idc_debug_info(adapter, 1);
875
876 qlcnic_check_options(adapter);
877 if (qlcnic_reset_npar_config(adapter))
878 goto err_out;
879 qlcnic_dev_set_npar_ready(adapter);
880
881 adapter->need_fw_reset = 0;
882
883 qlcnic_release_firmware(adapter);
884 return 0;
885
886 err_out:
887 QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_FAILED);
888 dev_err(&adapter->pdev->dev, "Device state set to failed\n");
889 qlcnic_release_firmware(adapter);
890 return err;
891 }
892
893 static int
894 qlcnic_request_irq(struct qlcnic_adapter *adapter)
895 {
896 irq_handler_t handler;
897 struct qlcnic_host_sds_ring *sds_ring;
898 int err, ring;
899
900 unsigned long flags = 0;
901 struct net_device *netdev = adapter->netdev;
902 struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
903
904 if (adapter->diag_test == QLCNIC_INTERRUPT_TEST) {
905 handler = qlcnic_tmp_intr;
906 if (!QLCNIC_IS_MSI_FAMILY(adapter))
907 flags |= IRQF_SHARED;
908
909 } else {
910 if (adapter->flags & QLCNIC_MSIX_ENABLED)
911 handler = qlcnic_msix_intr;
912 else if (adapter->flags & QLCNIC_MSI_ENABLED)
913 handler = qlcnic_msi_intr;
914 else {
915 flags |= IRQF_SHARED;
916 handler = qlcnic_intr;
917 }
918 }
919 adapter->irq = netdev->irq;
920
921 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
922 sds_ring = &recv_ctx->sds_rings[ring];
923 sprintf(sds_ring->name, "%s[%d]", netdev->name, ring);
924 err = request_irq(sds_ring->irq, handler,
925 flags, sds_ring->name, sds_ring);
926 if (err)
927 return err;
928 }
929
930 return 0;
931 }
932
933 static void
934 qlcnic_free_irq(struct qlcnic_adapter *adapter)
935 {
936 int ring;
937 struct qlcnic_host_sds_ring *sds_ring;
938
939 struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
940
941 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
942 sds_ring = &recv_ctx->sds_rings[ring];
943 free_irq(sds_ring->irq, sds_ring);
944 }
945 }
946
947 static void
948 qlcnic_init_coalesce_defaults(struct qlcnic_adapter *adapter)
949 {
950 adapter->coal.flags = QLCNIC_INTR_DEFAULT;
951 adapter->coal.normal.data.rx_time_us =
952 QLCNIC_DEFAULT_INTR_COALESCE_RX_TIME_US;
953 adapter->coal.normal.data.rx_packets =
954 QLCNIC_DEFAULT_INTR_COALESCE_RX_PACKETS;
955 adapter->coal.normal.data.tx_time_us =
956 QLCNIC_DEFAULT_INTR_COALESCE_TX_TIME_US;
957 adapter->coal.normal.data.tx_packets =
958 QLCNIC_DEFAULT_INTR_COALESCE_TX_PACKETS;
959 }
960
961 static int
962 __qlcnic_up(struct qlcnic_adapter *adapter, struct net_device *netdev)
963 {
964 int ring;
965 struct qlcnic_host_rds_ring *rds_ring;
966
967 if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
968 return -EIO;
969
970 if (test_bit(__QLCNIC_DEV_UP, &adapter->state))
971 return 0;
972
973 if (qlcnic_fw_create_ctx(adapter))
974 return -EIO;
975
976 for (ring = 0; ring < adapter->max_rds_rings; ring++) {
977 rds_ring = &adapter->recv_ctx.rds_rings[ring];
978 qlcnic_post_rx_buffers(adapter, ring, rds_ring);
979 }
980
981 qlcnic_set_multi(netdev);
982 qlcnic_fw_cmd_set_mtu(adapter, netdev->mtu);
983
984 adapter->ahw.linkup = 0;
985
986 if (adapter->max_sds_rings > 1)
987 qlcnic_config_rss(adapter, 1);
988
989 qlcnic_config_intr_coalesce(adapter);
990
991 if (adapter->capabilities & QLCNIC_FW_CAPABILITY_HW_LRO)
992 qlcnic_config_hw_lro(adapter, QLCNIC_LRO_ENABLED);
993
994 qlcnic_napi_enable(adapter);
995
996 qlcnic_linkevent_request(adapter, 1);
997
998 adapter->reset_context = 0;
999 set_bit(__QLCNIC_DEV_UP, &adapter->state);
1000 return 0;
1001 }
1002
1003 /* Usage: During resume and firmware recovery module.*/
1004
1005 static int
1006 qlcnic_up(struct qlcnic_adapter *adapter, struct net_device *netdev)
1007 {
1008 int err = 0;
1009
1010 rtnl_lock();
1011 if (netif_running(netdev))
1012 err = __qlcnic_up(adapter, netdev);
1013 rtnl_unlock();
1014
1015 return err;
1016 }
1017
1018 static void
1019 __qlcnic_down(struct qlcnic_adapter *adapter, struct net_device *netdev)
1020 {
1021 if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
1022 return;
1023
1024 if (!test_and_clear_bit(__QLCNIC_DEV_UP, &adapter->state))
1025 return;
1026
1027 smp_mb();
1028 spin_lock(&adapter->tx_clean_lock);
1029 netif_carrier_off(netdev);
1030 netif_tx_disable(netdev);
1031
1032 qlcnic_free_mac_list(adapter);
1033
1034 qlcnic_nic_set_promisc(adapter, QLCNIC_NIU_NON_PROMISC_MODE);
1035
1036 qlcnic_napi_disable(adapter);
1037
1038 qlcnic_fw_destroy_ctx(adapter);
1039
1040 qlcnic_reset_rx_buffers_list(adapter);
1041 qlcnic_release_tx_buffers(adapter);
1042 spin_unlock(&adapter->tx_clean_lock);
1043 }
1044
1045 /* Usage: During suspend and firmware recovery module */
1046
1047 static void
1048 qlcnic_down(struct qlcnic_adapter *adapter, struct net_device *netdev)
1049 {
1050 rtnl_lock();
1051 if (netif_running(netdev))
1052 __qlcnic_down(adapter, netdev);
1053 rtnl_unlock();
1054
1055 }
1056
1057 static int
1058 qlcnic_attach(struct qlcnic_adapter *adapter)
1059 {
1060 struct net_device *netdev = adapter->netdev;
1061 struct pci_dev *pdev = adapter->pdev;
1062 int err;
1063
1064 if (adapter->is_up == QLCNIC_ADAPTER_UP_MAGIC)
1065 return 0;
1066
1067 err = qlcnic_napi_add(adapter, netdev);
1068 if (err)
1069 return err;
1070
1071 err = qlcnic_alloc_sw_resources(adapter);
1072 if (err) {
1073 dev_err(&pdev->dev, "Error in setting sw resources\n");
1074 goto err_out_napi_del;
1075 }
1076
1077 err = qlcnic_alloc_hw_resources(adapter);
1078 if (err) {
1079 dev_err(&pdev->dev, "Error in setting hw resources\n");
1080 goto err_out_free_sw;
1081 }
1082
1083 err = qlcnic_request_irq(adapter);
1084 if (err) {
1085 dev_err(&pdev->dev, "failed to setup interrupt\n");
1086 goto err_out_free_hw;
1087 }
1088
1089 qlcnic_init_coalesce_defaults(adapter);
1090
1091 qlcnic_create_sysfs_entries(adapter);
1092
1093 adapter->is_up = QLCNIC_ADAPTER_UP_MAGIC;
1094 return 0;
1095
1096 err_out_free_hw:
1097 qlcnic_free_hw_resources(adapter);
1098 err_out_free_sw:
1099 qlcnic_free_sw_resources(adapter);
1100 err_out_napi_del:
1101 qlcnic_napi_del(adapter);
1102 return err;
1103 }
1104
1105 static void
1106 qlcnic_detach(struct qlcnic_adapter *adapter)
1107 {
1108 if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
1109 return;
1110
1111 qlcnic_remove_sysfs_entries(adapter);
1112
1113 qlcnic_free_hw_resources(adapter);
1114 qlcnic_release_rx_buffers(adapter);
1115 qlcnic_free_irq(adapter);
1116 qlcnic_napi_del(adapter);
1117 qlcnic_free_sw_resources(adapter);
1118
1119 adapter->is_up = 0;
1120 }
1121
1122 void qlcnic_diag_free_res(struct net_device *netdev, int max_sds_rings)
1123 {
1124 struct qlcnic_adapter *adapter = netdev_priv(netdev);
1125 struct qlcnic_host_sds_ring *sds_ring;
1126 int ring;
1127
1128 clear_bit(__QLCNIC_DEV_UP, &adapter->state);
1129 if (adapter->diag_test == QLCNIC_INTERRUPT_TEST) {
1130 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
1131 sds_ring = &adapter->recv_ctx.sds_rings[ring];
1132 qlcnic_disable_int(sds_ring);
1133 }
1134 }
1135
1136 qlcnic_fw_destroy_ctx(adapter);
1137
1138 qlcnic_detach(adapter);
1139
1140 adapter->diag_test = 0;
1141 adapter->max_sds_rings = max_sds_rings;
1142
1143 if (qlcnic_attach(adapter))
1144 goto out;
1145
1146 if (netif_running(netdev))
1147 __qlcnic_up(adapter, netdev);
1148 out:
1149 netif_device_attach(netdev);
1150 }
1151
1152 int qlcnic_diag_alloc_res(struct net_device *netdev, int test)
1153 {
1154 struct qlcnic_adapter *adapter = netdev_priv(netdev);
1155 struct qlcnic_host_sds_ring *sds_ring;
1156 struct qlcnic_host_rds_ring *rds_ring;
1157 int ring;
1158 int ret;
1159
1160 netif_device_detach(netdev);
1161
1162 if (netif_running(netdev))
1163 __qlcnic_down(adapter, netdev);
1164
1165 qlcnic_detach(adapter);
1166
1167 adapter->max_sds_rings = 1;
1168 adapter->diag_test = test;
1169
1170 ret = qlcnic_attach(adapter);
1171 if (ret) {
1172 netif_device_attach(netdev);
1173 return ret;
1174 }
1175
1176 ret = qlcnic_fw_create_ctx(adapter);
1177 if (ret) {
1178 qlcnic_detach(adapter);
1179 netif_device_attach(netdev);
1180 return ret;
1181 }
1182
1183 for (ring = 0; ring < adapter->max_rds_rings; ring++) {
1184 rds_ring = &adapter->recv_ctx.rds_rings[ring];
1185 qlcnic_post_rx_buffers(adapter, ring, rds_ring);
1186 }
1187
1188 if (adapter->diag_test == QLCNIC_INTERRUPT_TEST) {
1189 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
1190 sds_ring = &adapter->recv_ctx.sds_rings[ring];
1191 qlcnic_enable_int(sds_ring);
1192 }
1193 }
1194 set_bit(__QLCNIC_DEV_UP, &adapter->state);
1195
1196 return 0;
1197 }
1198
1199 /* Reset context in hardware only */
1200 static int
1201 qlcnic_reset_hw_context(struct qlcnic_adapter *adapter)
1202 {
1203 struct net_device *netdev = adapter->netdev;
1204
1205 if (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state))
1206 return -EBUSY;
1207
1208 netif_device_detach(netdev);
1209
1210 qlcnic_down(adapter, netdev);
1211
1212 qlcnic_up(adapter, netdev);
1213
1214 netif_device_attach(netdev);
1215
1216 clear_bit(__QLCNIC_RESETTING, &adapter->state);
1217 return 0;
1218 }
1219
1220 int
1221 qlcnic_reset_context(struct qlcnic_adapter *adapter)
1222 {
1223 int err = 0;
1224 struct net_device *netdev = adapter->netdev;
1225
1226 if (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state))
1227 return -EBUSY;
1228
1229 if (adapter->is_up == QLCNIC_ADAPTER_UP_MAGIC) {
1230
1231 netif_device_detach(netdev);
1232
1233 if (netif_running(netdev))
1234 __qlcnic_down(adapter, netdev);
1235
1236 qlcnic_detach(adapter);
1237
1238 if (netif_running(netdev)) {
1239 err = qlcnic_attach(adapter);
1240 if (!err)
1241 __qlcnic_up(adapter, netdev);
1242 }
1243
1244 netif_device_attach(netdev);
1245 }
1246
1247 clear_bit(__QLCNIC_RESETTING, &adapter->state);
1248 return err;
1249 }
1250
1251 static int
1252 qlcnic_setup_netdev(struct qlcnic_adapter *adapter,
1253 struct net_device *netdev, u8 pci_using_dac)
1254 {
1255 int err;
1256 struct pci_dev *pdev = adapter->pdev;
1257
1258 adapter->rx_csum = 1;
1259 adapter->mc_enabled = 0;
1260 adapter->max_mc_count = 38;
1261
1262 netdev->netdev_ops = &qlcnic_netdev_ops;
1263 netdev->watchdog_timeo = 5*HZ;
1264
1265 qlcnic_change_mtu(netdev, netdev->mtu);
1266
1267 SET_ETHTOOL_OPS(netdev, &qlcnic_ethtool_ops);
1268
1269 netdev->features |= (NETIF_F_SG | NETIF_F_IP_CSUM |
1270 NETIF_F_IPV6_CSUM | NETIF_F_GRO);
1271 netdev->vlan_features |= (NETIF_F_SG | NETIF_F_IP_CSUM |
1272 NETIF_F_IPV6_CSUM);
1273
1274 if (adapter->capabilities & QLCNIC_FW_CAPABILITY_TSO) {
1275 netdev->features |= (NETIF_F_TSO | NETIF_F_TSO6);
1276 netdev->vlan_features |= (NETIF_F_TSO | NETIF_F_TSO6);
1277 }
1278
1279 if (pci_using_dac) {
1280 netdev->features |= NETIF_F_HIGHDMA;
1281 netdev->vlan_features |= NETIF_F_HIGHDMA;
1282 }
1283
1284 if (adapter->capabilities & QLCNIC_FW_CAPABILITY_FVLANTX)
1285 netdev->features |= (NETIF_F_HW_VLAN_TX);
1286
1287 if (adapter->capabilities & QLCNIC_FW_CAPABILITY_HW_LRO)
1288 netdev->features |= NETIF_F_LRO;
1289
1290 netdev->irq = adapter->msix_entries[0].vector;
1291
1292 if (qlcnic_read_mac_addr(adapter))
1293 dev_warn(&pdev->dev, "failed to read mac addr\n");
1294
1295 netif_carrier_off(netdev);
1296 netif_stop_queue(netdev);
1297
1298 err = register_netdev(netdev);
1299 if (err) {
1300 dev_err(&pdev->dev, "failed to register net device\n");
1301 return err;
1302 }
1303
1304 return 0;
1305 }
1306
1307 static int qlcnic_set_dma_mask(struct pci_dev *pdev, u8 *pci_using_dac)
1308 {
1309 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) &&
1310 !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)))
1311 *pci_using_dac = 1;
1312 else if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) &&
1313 !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)))
1314 *pci_using_dac = 0;
1315 else {
1316 dev_err(&pdev->dev, "Unable to set DMA mask, aborting\n");
1317 return -EIO;
1318 }
1319
1320 return 0;
1321 }
1322
1323 static int __devinit
1324 qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1325 {
1326 struct net_device *netdev = NULL;
1327 struct qlcnic_adapter *adapter = NULL;
1328 int err;
1329 uint8_t revision_id;
1330 uint8_t pci_using_dac;
1331
1332 err = pci_enable_device(pdev);
1333 if (err)
1334 return err;
1335
1336 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
1337 err = -ENODEV;
1338 goto err_out_disable_pdev;
1339 }
1340
1341 err = qlcnic_set_dma_mask(pdev, &pci_using_dac);
1342 if (err)
1343 goto err_out_disable_pdev;
1344
1345 err = pci_request_regions(pdev, qlcnic_driver_name);
1346 if (err)
1347 goto err_out_disable_pdev;
1348
1349 pci_set_master(pdev);
1350 pci_enable_pcie_error_reporting(pdev);
1351
1352 netdev = alloc_etherdev(sizeof(struct qlcnic_adapter));
1353 if (!netdev) {
1354 dev_err(&pdev->dev, "failed to allocate net_device\n");
1355 err = -ENOMEM;
1356 goto err_out_free_res;
1357 }
1358
1359 SET_NETDEV_DEV(netdev, &pdev->dev);
1360
1361 adapter = netdev_priv(netdev);
1362 adapter->netdev = netdev;
1363 adapter->pdev = pdev;
1364 adapter->dev_rst_time = jiffies;
1365
1366 revision_id = pdev->revision;
1367 adapter->ahw.revision_id = revision_id;
1368
1369 rwlock_init(&adapter->ahw.crb_lock);
1370 mutex_init(&adapter->ahw.mem_lock);
1371
1372 spin_lock_init(&adapter->tx_clean_lock);
1373 INIT_LIST_HEAD(&adapter->mac_list);
1374
1375 err = qlcnic_setup_pci_map(adapter);
1376 if (err)
1377 goto err_out_free_netdev;
1378
1379 /* This will be reset for mezz cards */
1380 adapter->portnum = adapter->ahw.pci_func;
1381
1382 err = qlcnic_get_board_info(adapter);
1383 if (err) {
1384 dev_err(&pdev->dev, "Error getting board config info.\n");
1385 goto err_out_iounmap;
1386 }
1387
1388 if (qlcnic_read_mac_addr(adapter))
1389 dev_warn(&pdev->dev, "failed to read mac addr\n");
1390
1391 if (qlcnic_setup_idc_param(adapter))
1392 goto err_out_iounmap;
1393
1394 err = adapter->nic_ops->start_firmware(adapter);
1395 if (err) {
1396 dev_err(&pdev->dev, "Loading fw failed.Please Reboot\n");
1397 goto err_out_decr_ref;
1398 }
1399
1400 qlcnic_clear_stats(adapter);
1401
1402 qlcnic_setup_intr(adapter);
1403
1404 err = qlcnic_setup_netdev(adapter, netdev, pci_using_dac);
1405 if (err)
1406 goto err_out_disable_msi;
1407
1408 pci_set_drvdata(pdev, adapter);
1409
1410 qlcnic_schedule_work(adapter, qlcnic_fw_poll_work, FW_POLL_DELAY);
1411
1412 switch (adapter->ahw.port_type) {
1413 case QLCNIC_GBE:
1414 dev_info(&adapter->pdev->dev, "%s: GbE port initialized\n",
1415 adapter->netdev->name);
1416 break;
1417 case QLCNIC_XGBE:
1418 dev_info(&adapter->pdev->dev, "%s: XGbE port initialized\n",
1419 adapter->netdev->name);
1420 break;
1421 }
1422
1423 qlcnic_create_diag_entries(adapter);
1424
1425 return 0;
1426
1427 err_out_disable_msi:
1428 qlcnic_teardown_intr(adapter);
1429
1430 err_out_decr_ref:
1431 qlcnic_clr_all_drv_state(adapter);
1432
1433 err_out_iounmap:
1434 qlcnic_cleanup_pci_map(adapter);
1435
1436 err_out_free_netdev:
1437 free_netdev(netdev);
1438
1439 err_out_free_res:
1440 pci_release_regions(pdev);
1441
1442 err_out_disable_pdev:
1443 pci_set_drvdata(pdev, NULL);
1444 pci_disable_device(pdev);
1445 return err;
1446 }
1447
1448 static void __devexit qlcnic_remove(struct pci_dev *pdev)
1449 {
1450 struct qlcnic_adapter *adapter;
1451 struct net_device *netdev;
1452
1453 adapter = pci_get_drvdata(pdev);
1454 if (adapter == NULL)
1455 return;
1456
1457 netdev = adapter->netdev;
1458
1459 qlcnic_cancel_fw_work(adapter);
1460
1461 unregister_netdev(netdev);
1462
1463 qlcnic_detach(adapter);
1464
1465 if (adapter->npars != NULL)
1466 kfree(adapter->npars);
1467 if (adapter->eswitch != NULL)
1468 kfree(adapter->eswitch);
1469
1470 qlcnic_clr_all_drv_state(adapter);
1471
1472 clear_bit(__QLCNIC_RESETTING, &adapter->state);
1473
1474 qlcnic_teardown_intr(adapter);
1475
1476 qlcnic_remove_diag_entries(adapter);
1477
1478 qlcnic_cleanup_pci_map(adapter);
1479
1480 qlcnic_release_firmware(adapter);
1481
1482 pci_disable_pcie_error_reporting(pdev);
1483 pci_release_regions(pdev);
1484 pci_disable_device(pdev);
1485 pci_set_drvdata(pdev, NULL);
1486
1487 free_netdev(netdev);
1488 }
1489 static int __qlcnic_shutdown(struct pci_dev *pdev)
1490 {
1491 struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
1492 struct net_device *netdev = adapter->netdev;
1493 int retval;
1494
1495 netif_device_detach(netdev);
1496
1497 qlcnic_cancel_fw_work(adapter);
1498
1499 if (netif_running(netdev))
1500 qlcnic_down(adapter, netdev);
1501
1502 qlcnic_clr_all_drv_state(adapter);
1503
1504 clear_bit(__QLCNIC_RESETTING, &adapter->state);
1505
1506 retval = pci_save_state(pdev);
1507 if (retval)
1508 return retval;
1509
1510 if (qlcnic_wol_supported(adapter)) {
1511 pci_enable_wake(pdev, PCI_D3cold, 1);
1512 pci_enable_wake(pdev, PCI_D3hot, 1);
1513 }
1514
1515 return 0;
1516 }
1517
1518 static void qlcnic_shutdown(struct pci_dev *pdev)
1519 {
1520 if (__qlcnic_shutdown(pdev))
1521 return;
1522
1523 pci_disable_device(pdev);
1524 }
1525
1526 #ifdef CONFIG_PM
1527 static int
1528 qlcnic_suspend(struct pci_dev *pdev, pm_message_t state)
1529 {
1530 int retval;
1531
1532 retval = __qlcnic_shutdown(pdev);
1533 if (retval)
1534 return retval;
1535
1536 pci_set_power_state(pdev, pci_choose_state(pdev, state));
1537 return 0;
1538 }
1539
1540 static int
1541 qlcnic_resume(struct pci_dev *pdev)
1542 {
1543 struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
1544 struct net_device *netdev = adapter->netdev;
1545 int err;
1546
1547 err = pci_enable_device(pdev);
1548 if (err)
1549 return err;
1550
1551 pci_set_power_state(pdev, PCI_D0);
1552 pci_set_master(pdev);
1553 pci_restore_state(pdev);
1554
1555 err = adapter->nic_ops->start_firmware(adapter);
1556 if (err) {
1557 dev_err(&pdev->dev, "failed to start firmware\n");
1558 return err;
1559 }
1560
1561 if (netif_running(netdev)) {
1562 err = qlcnic_up(adapter, netdev);
1563 if (err)
1564 goto done;
1565
1566 qlcnic_config_indev_addr(netdev, NETDEV_UP);
1567 }
1568 done:
1569 netif_device_attach(netdev);
1570 qlcnic_schedule_work(adapter, qlcnic_fw_poll_work, FW_POLL_DELAY);
1571 return 0;
1572 }
1573 #endif
1574
1575 static int qlcnic_open(struct net_device *netdev)
1576 {
1577 struct qlcnic_adapter *adapter = netdev_priv(netdev);
1578 int err;
1579
1580 if (adapter->driver_mismatch)
1581 return -EIO;
1582
1583 err = qlcnic_attach(adapter);
1584 if (err)
1585 return err;
1586
1587 err = __qlcnic_up(adapter, netdev);
1588 if (err)
1589 goto err_out;
1590
1591 netif_start_queue(netdev);
1592
1593 return 0;
1594
1595 err_out:
1596 qlcnic_detach(adapter);
1597 return err;
1598 }
1599
1600 /*
1601 * qlcnic_close - Disables a network interface entry point
1602 */
1603 static int qlcnic_close(struct net_device *netdev)
1604 {
1605 struct qlcnic_adapter *adapter = netdev_priv(netdev);
1606
1607 __qlcnic_down(adapter, netdev);
1608 return 0;
1609 }
1610
1611 static void
1612 qlcnic_tso_check(struct net_device *netdev,
1613 struct qlcnic_host_tx_ring *tx_ring,
1614 struct cmd_desc_type0 *first_desc,
1615 struct sk_buff *skb)
1616 {
1617 u8 opcode = TX_ETHER_PKT;
1618 __be16 protocol = skb->protocol;
1619 u16 flags = 0, vid = 0;
1620 int copied, offset, copy_len, hdr_len = 0, tso = 0, vlan_oob = 0;
1621 struct cmd_desc_type0 *hwdesc;
1622 struct vlan_ethhdr *vh;
1623 struct qlcnic_adapter *adapter = netdev_priv(netdev);
1624 u32 producer = tx_ring->producer;
1625
1626 if (protocol == cpu_to_be16(ETH_P_8021Q)) {
1627
1628 vh = (struct vlan_ethhdr *)skb->data;
1629 protocol = vh->h_vlan_encapsulated_proto;
1630 flags = FLAGS_VLAN_TAGGED;
1631
1632 } else if (vlan_tx_tag_present(skb)) {
1633
1634 flags = FLAGS_VLAN_OOB;
1635 vid = vlan_tx_tag_get(skb);
1636 qlcnic_set_tx_vlan_tci(first_desc, vid);
1637 vlan_oob = 1;
1638 }
1639
1640 if (*(skb->data) & BIT_0) {
1641 flags |= BIT_0;
1642 memcpy(&first_desc->eth_addr, skb->data, ETH_ALEN);
1643 }
1644
1645 if ((netdev->features & (NETIF_F_TSO | NETIF_F_TSO6)) &&
1646 skb_shinfo(skb)->gso_size > 0) {
1647
1648 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
1649
1650 first_desc->mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
1651 first_desc->total_hdr_length = hdr_len;
1652 if (vlan_oob) {
1653 first_desc->total_hdr_length += VLAN_HLEN;
1654 first_desc->tcp_hdr_offset = VLAN_HLEN;
1655 first_desc->ip_hdr_offset = VLAN_HLEN;
1656 /* Only in case of TSO on vlan device */
1657 flags |= FLAGS_VLAN_TAGGED;
1658 }
1659
1660 opcode = (protocol == cpu_to_be16(ETH_P_IPV6)) ?
1661 TX_TCP_LSO6 : TX_TCP_LSO;
1662 tso = 1;
1663
1664 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
1665 u8 l4proto;
1666
1667 if (protocol == cpu_to_be16(ETH_P_IP)) {
1668 l4proto = ip_hdr(skb)->protocol;
1669
1670 if (l4proto == IPPROTO_TCP)
1671 opcode = TX_TCP_PKT;
1672 else if (l4proto == IPPROTO_UDP)
1673 opcode = TX_UDP_PKT;
1674 } else if (protocol == cpu_to_be16(ETH_P_IPV6)) {
1675 l4proto = ipv6_hdr(skb)->nexthdr;
1676
1677 if (l4proto == IPPROTO_TCP)
1678 opcode = TX_TCPV6_PKT;
1679 else if (l4proto == IPPROTO_UDP)
1680 opcode = TX_UDPV6_PKT;
1681 }
1682 }
1683
1684 first_desc->tcp_hdr_offset += skb_transport_offset(skb);
1685 first_desc->ip_hdr_offset += skb_network_offset(skb);
1686 qlcnic_set_tx_flags_opcode(first_desc, flags, opcode);
1687
1688 if (!tso)
1689 return;
1690
1691 /* For LSO, we need to copy the MAC/IP/TCP headers into
1692 * the descriptor ring
1693 */
1694 copied = 0;
1695 offset = 2;
1696
1697 if (vlan_oob) {
1698 /* Create a TSO vlan header template for firmware */
1699
1700 hwdesc = &tx_ring->desc_head[producer];
1701 tx_ring->cmd_buf_arr[producer].skb = NULL;
1702
1703 copy_len = min((int)sizeof(struct cmd_desc_type0) - offset,
1704 hdr_len + VLAN_HLEN);
1705
1706 vh = (struct vlan_ethhdr *)((char *)hwdesc + 2);
1707 skb_copy_from_linear_data(skb, vh, 12);
1708 vh->h_vlan_proto = htons(ETH_P_8021Q);
1709 vh->h_vlan_TCI = htons(vid);
1710 skb_copy_from_linear_data_offset(skb, 12,
1711 (char *)vh + 16, copy_len - 16);
1712
1713 copied = copy_len - VLAN_HLEN;
1714 offset = 0;
1715
1716 producer = get_next_index(producer, tx_ring->num_desc);
1717 }
1718
1719 while (copied < hdr_len) {
1720
1721 copy_len = min((int)sizeof(struct cmd_desc_type0) - offset,
1722 (hdr_len - copied));
1723
1724 hwdesc = &tx_ring->desc_head[producer];
1725 tx_ring->cmd_buf_arr[producer].skb = NULL;
1726
1727 skb_copy_from_linear_data_offset(skb, copied,
1728 (char *)hwdesc + offset, copy_len);
1729
1730 copied += copy_len;
1731 offset = 0;
1732
1733 producer = get_next_index(producer, tx_ring->num_desc);
1734 }
1735
1736 tx_ring->producer = producer;
1737 barrier();
1738 adapter->stats.lso_frames++;
1739 }
1740
1741 static int
1742 qlcnic_map_tx_skb(struct pci_dev *pdev,
1743 struct sk_buff *skb, struct qlcnic_cmd_buffer *pbuf)
1744 {
1745 struct qlcnic_skb_frag *nf;
1746 struct skb_frag_struct *frag;
1747 int i, nr_frags;
1748 dma_addr_t map;
1749
1750 nr_frags = skb_shinfo(skb)->nr_frags;
1751 nf = &pbuf->frag_array[0];
1752
1753 map = pci_map_single(pdev, skb->data,
1754 skb_headlen(skb), PCI_DMA_TODEVICE);
1755 if (pci_dma_mapping_error(pdev, map))
1756 goto out_err;
1757
1758 nf->dma = map;
1759 nf->length = skb_headlen(skb);
1760
1761 for (i = 0; i < nr_frags; i++) {
1762 frag = &skb_shinfo(skb)->frags[i];
1763 nf = &pbuf->frag_array[i+1];
1764
1765 map = pci_map_page(pdev, frag->page, frag->page_offset,
1766 frag->size, PCI_DMA_TODEVICE);
1767 if (pci_dma_mapping_error(pdev, map))
1768 goto unwind;
1769
1770 nf->dma = map;
1771 nf->length = frag->size;
1772 }
1773
1774 return 0;
1775
1776 unwind:
1777 while (--i >= 0) {
1778 nf = &pbuf->frag_array[i+1];
1779 pci_unmap_page(pdev, nf->dma, nf->length, PCI_DMA_TODEVICE);
1780 }
1781
1782 nf = &pbuf->frag_array[0];
1783 pci_unmap_single(pdev, nf->dma, skb_headlen(skb), PCI_DMA_TODEVICE);
1784
1785 out_err:
1786 return -ENOMEM;
1787 }
1788
1789 static inline void
1790 qlcnic_clear_cmddesc(u64 *desc)
1791 {
1792 desc[0] = 0ULL;
1793 desc[2] = 0ULL;
1794 }
1795
1796 netdev_tx_t
1797 qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
1798 {
1799 struct qlcnic_adapter *adapter = netdev_priv(netdev);
1800 struct qlcnic_host_tx_ring *tx_ring = adapter->tx_ring;
1801 struct qlcnic_cmd_buffer *pbuf;
1802 struct qlcnic_skb_frag *buffrag;
1803 struct cmd_desc_type0 *hwdesc, *first_desc;
1804 struct pci_dev *pdev;
1805 int i, k;
1806
1807 u32 producer;
1808 int frag_count, no_of_desc;
1809 u32 num_txd = tx_ring->num_desc;
1810
1811 if (!test_bit(__QLCNIC_DEV_UP, &adapter->state)) {
1812 netif_stop_queue(netdev);
1813 return NETDEV_TX_BUSY;
1814 }
1815
1816 frag_count = skb_shinfo(skb)->nr_frags + 1;
1817
1818 /* 4 fragments per cmd des */
1819 no_of_desc = (frag_count + 3) >> 2;
1820
1821 if (unlikely(qlcnic_tx_avail(tx_ring) <= TX_STOP_THRESH)) {
1822 netif_stop_queue(netdev);
1823 smp_mb();
1824 if (qlcnic_tx_avail(tx_ring) > TX_STOP_THRESH)
1825 netif_start_queue(netdev);
1826 else {
1827 adapter->stats.xmit_off++;
1828 return NETDEV_TX_BUSY;
1829 }
1830 }
1831
1832 producer = tx_ring->producer;
1833 pbuf = &tx_ring->cmd_buf_arr[producer];
1834
1835 pdev = adapter->pdev;
1836
1837 if (qlcnic_map_tx_skb(pdev, skb, pbuf)) {
1838 adapter->stats.tx_dma_map_error++;
1839 goto drop_packet;
1840 }
1841
1842 pbuf->skb = skb;
1843 pbuf->frag_count = frag_count;
1844
1845 first_desc = hwdesc = &tx_ring->desc_head[producer];
1846 qlcnic_clear_cmddesc((u64 *)hwdesc);
1847
1848 qlcnic_set_tx_frags_len(first_desc, frag_count, skb->len);
1849 qlcnic_set_tx_port(first_desc, adapter->portnum);
1850
1851 for (i = 0; i < frag_count; i++) {
1852
1853 k = i % 4;
1854
1855 if ((k == 0) && (i > 0)) {
1856 /* move to next desc.*/
1857 producer = get_next_index(producer, num_txd);
1858 hwdesc = &tx_ring->desc_head[producer];
1859 qlcnic_clear_cmddesc((u64 *)hwdesc);
1860 tx_ring->cmd_buf_arr[producer].skb = NULL;
1861 }
1862
1863 buffrag = &pbuf->frag_array[i];
1864
1865 hwdesc->buffer_length[k] = cpu_to_le16(buffrag->length);
1866 switch (k) {
1867 case 0:
1868 hwdesc->addr_buffer1 = cpu_to_le64(buffrag->dma);
1869 break;
1870 case 1:
1871 hwdesc->addr_buffer2 = cpu_to_le64(buffrag->dma);
1872 break;
1873 case 2:
1874 hwdesc->addr_buffer3 = cpu_to_le64(buffrag->dma);
1875 break;
1876 case 3:
1877 hwdesc->addr_buffer4 = cpu_to_le64(buffrag->dma);
1878 break;
1879 }
1880 }
1881
1882 tx_ring->producer = get_next_index(producer, num_txd);
1883
1884 qlcnic_tso_check(netdev, tx_ring, first_desc, skb);
1885
1886 qlcnic_update_cmd_producer(adapter, tx_ring);
1887
1888 adapter->stats.txbytes += skb->len;
1889 adapter->stats.xmitcalled++;
1890
1891 return NETDEV_TX_OK;
1892
1893 drop_packet:
1894 adapter->stats.txdropped++;
1895 dev_kfree_skb_any(skb);
1896 return NETDEV_TX_OK;
1897 }
1898
1899 static int qlcnic_check_temp(struct qlcnic_adapter *adapter)
1900 {
1901 struct net_device *netdev = adapter->netdev;
1902 u32 temp, temp_state, temp_val;
1903 int rv = 0;
1904
1905 temp = QLCRD32(adapter, CRB_TEMP_STATE);
1906
1907 temp_state = qlcnic_get_temp_state(temp);
1908 temp_val = qlcnic_get_temp_val(temp);
1909
1910 if (temp_state == QLCNIC_TEMP_PANIC) {
1911 dev_err(&netdev->dev,
1912 "Device temperature %d degrees C exceeds"
1913 " maximum allowed. Hardware has been shut down.\n",
1914 temp_val);
1915 rv = 1;
1916 } else if (temp_state == QLCNIC_TEMP_WARN) {
1917 if (adapter->temp == QLCNIC_TEMP_NORMAL) {
1918 dev_err(&netdev->dev,
1919 "Device temperature %d degrees C "
1920 "exceeds operating range."
1921 " Immediate action needed.\n",
1922 temp_val);
1923 }
1924 } else {
1925 if (adapter->temp == QLCNIC_TEMP_WARN) {
1926 dev_info(&netdev->dev,
1927 "Device temperature is now %d degrees C"
1928 " in normal range.\n", temp_val);
1929 }
1930 }
1931 adapter->temp = temp_state;
1932 return rv;
1933 }
1934
1935 void qlcnic_advert_link_change(struct qlcnic_adapter *adapter, int linkup)
1936 {
1937 struct net_device *netdev = adapter->netdev;
1938
1939 if (adapter->ahw.linkup && !linkup) {
1940 dev_info(&netdev->dev, "NIC Link is down\n");
1941 adapter->ahw.linkup = 0;
1942 if (netif_running(netdev)) {
1943 netif_carrier_off(netdev);
1944 netif_stop_queue(netdev);
1945 }
1946 } else if (!adapter->ahw.linkup && linkup) {
1947 dev_info(&netdev->dev, "NIC Link is up\n");
1948 adapter->ahw.linkup = 1;
1949 if (netif_running(netdev)) {
1950 netif_carrier_on(netdev);
1951 netif_wake_queue(netdev);
1952 }
1953 }
1954 }
1955
1956 static void qlcnic_tx_timeout(struct net_device *netdev)
1957 {
1958 struct qlcnic_adapter *adapter = netdev_priv(netdev);
1959
1960 if (test_bit(__QLCNIC_RESETTING, &adapter->state))
1961 return;
1962
1963 dev_err(&netdev->dev, "transmit timeout, resetting.\n");
1964
1965 if (++adapter->tx_timeo_cnt >= QLCNIC_MAX_TX_TIMEOUTS)
1966 adapter->need_fw_reset = 1;
1967 else
1968 adapter->reset_context = 1;
1969 }
1970
1971 static struct net_device_stats *qlcnic_get_stats(struct net_device *netdev)
1972 {
1973 struct qlcnic_adapter *adapter = netdev_priv(netdev);
1974 struct net_device_stats *stats = &netdev->stats;
1975
1976 memset(stats, 0, sizeof(*stats));
1977
1978 stats->rx_packets = adapter->stats.rx_pkts + adapter->stats.lro_pkts;
1979 stats->tx_packets = adapter->stats.xmitfinished;
1980 stats->rx_bytes = adapter->stats.rxbytes + adapter->stats.lrobytes;
1981 stats->tx_bytes = adapter->stats.txbytes;
1982 stats->rx_dropped = adapter->stats.rxdropped;
1983 stats->tx_dropped = adapter->stats.txdropped;
1984
1985 return stats;
1986 }
1987
1988 static irqreturn_t qlcnic_clear_legacy_intr(struct qlcnic_adapter *adapter)
1989 {
1990 u32 status;
1991
1992 status = readl(adapter->isr_int_vec);
1993
1994 if (!(status & adapter->int_vec_bit))
1995 return IRQ_NONE;
1996
1997 /* check interrupt state machine, to be sure */
1998 status = readl(adapter->crb_int_state_reg);
1999 if (!ISR_LEGACY_INT_TRIGGERED(status))
2000 return IRQ_NONE;
2001
2002 writel(0xffffffff, adapter->tgt_status_reg);
2003 /* read twice to ensure write is flushed */
2004 readl(adapter->isr_int_vec);
2005 readl(adapter->isr_int_vec);
2006
2007 return IRQ_HANDLED;
2008 }
2009
2010 static irqreturn_t qlcnic_tmp_intr(int irq, void *data)
2011 {
2012 struct qlcnic_host_sds_ring *sds_ring = data;
2013 struct qlcnic_adapter *adapter = sds_ring->adapter;
2014
2015 if (adapter->flags & QLCNIC_MSIX_ENABLED)
2016 goto done;
2017 else if (adapter->flags & QLCNIC_MSI_ENABLED) {
2018 writel(0xffffffff, adapter->tgt_status_reg);
2019 goto done;
2020 }
2021
2022 if (qlcnic_clear_legacy_intr(adapter) == IRQ_NONE)
2023 return IRQ_NONE;
2024
2025 done:
2026 adapter->diag_cnt++;
2027 qlcnic_enable_int(sds_ring);
2028 return IRQ_HANDLED;
2029 }
2030
2031 static irqreturn_t qlcnic_intr(int irq, void *data)
2032 {
2033 struct qlcnic_host_sds_ring *sds_ring = data;
2034 struct qlcnic_adapter *adapter = sds_ring->adapter;
2035
2036 if (qlcnic_clear_legacy_intr(adapter) == IRQ_NONE)
2037 return IRQ_NONE;
2038
2039 napi_schedule(&sds_ring->napi);
2040
2041 return IRQ_HANDLED;
2042 }
2043
2044 static irqreturn_t qlcnic_msi_intr(int irq, void *data)
2045 {
2046 struct qlcnic_host_sds_ring *sds_ring = data;
2047 struct qlcnic_adapter *adapter = sds_ring->adapter;
2048
2049 /* clear interrupt */
2050 writel(0xffffffff, adapter->tgt_status_reg);
2051
2052 napi_schedule(&sds_ring->napi);
2053 return IRQ_HANDLED;
2054 }
2055
2056 static irqreturn_t qlcnic_msix_intr(int irq, void *data)
2057 {
2058 struct qlcnic_host_sds_ring *sds_ring = data;
2059
2060 napi_schedule(&sds_ring->napi);
2061 return IRQ_HANDLED;
2062 }
2063
2064 static int qlcnic_process_cmd_ring(struct qlcnic_adapter *adapter)
2065 {
2066 u32 sw_consumer, hw_consumer;
2067 int count = 0, i;
2068 struct qlcnic_cmd_buffer *buffer;
2069 struct pci_dev *pdev = adapter->pdev;
2070 struct net_device *netdev = adapter->netdev;
2071 struct qlcnic_skb_frag *frag;
2072 int done;
2073 struct qlcnic_host_tx_ring *tx_ring = adapter->tx_ring;
2074
2075 if (!spin_trylock(&adapter->tx_clean_lock))
2076 return 1;
2077
2078 sw_consumer = tx_ring->sw_consumer;
2079 hw_consumer = le32_to_cpu(*(tx_ring->hw_consumer));
2080
2081 while (sw_consumer != hw_consumer) {
2082 buffer = &tx_ring->cmd_buf_arr[sw_consumer];
2083 if (buffer->skb) {
2084 frag = &buffer->frag_array[0];
2085 pci_unmap_single(pdev, frag->dma, frag->length,
2086 PCI_DMA_TODEVICE);
2087 frag->dma = 0ULL;
2088 for (i = 1; i < buffer->frag_count; i++) {
2089 frag++;
2090 pci_unmap_page(pdev, frag->dma, frag->length,
2091 PCI_DMA_TODEVICE);
2092 frag->dma = 0ULL;
2093 }
2094
2095 adapter->stats.xmitfinished++;
2096 dev_kfree_skb_any(buffer->skb);
2097 buffer->skb = NULL;
2098 }
2099
2100 sw_consumer = get_next_index(sw_consumer, tx_ring->num_desc);
2101 if (++count >= MAX_STATUS_HANDLE)
2102 break;
2103 }
2104
2105 if (count && netif_running(netdev)) {
2106 tx_ring->sw_consumer = sw_consumer;
2107
2108 smp_mb();
2109
2110 if (netif_queue_stopped(netdev) && netif_carrier_ok(netdev)) {
2111 if (qlcnic_tx_avail(tx_ring) > TX_STOP_THRESH) {
2112 netif_wake_queue(netdev);
2113 adapter->stats.xmit_on++;
2114 }
2115 }
2116 adapter->tx_timeo_cnt = 0;
2117 }
2118 /*
2119 * If everything is freed up to consumer then check if the ring is full
2120 * If the ring is full then check if more needs to be freed and
2121 * schedule the call back again.
2122 *
2123 * This happens when there are 2 CPUs. One could be freeing and the
2124 * other filling it. If the ring is full when we get out of here and
2125 * the card has already interrupted the host then the host can miss the
2126 * interrupt.
2127 *
2128 * There is still a possible race condition and the host could miss an
2129 * interrupt. The card has to take care of this.
2130 */
2131 hw_consumer = le32_to_cpu(*(tx_ring->hw_consumer));
2132 done = (sw_consumer == hw_consumer);
2133 spin_unlock(&adapter->tx_clean_lock);
2134
2135 return done;
2136 }
2137
2138 static int qlcnic_poll(struct napi_struct *napi, int budget)
2139 {
2140 struct qlcnic_host_sds_ring *sds_ring =
2141 container_of(napi, struct qlcnic_host_sds_ring, napi);
2142
2143 struct qlcnic_adapter *adapter = sds_ring->adapter;
2144
2145 int tx_complete;
2146 int work_done;
2147
2148 tx_complete = qlcnic_process_cmd_ring(adapter);
2149
2150 work_done = qlcnic_process_rcv_ring(sds_ring, budget);
2151
2152 if ((work_done < budget) && tx_complete) {
2153 napi_complete(&sds_ring->napi);
2154 if (test_bit(__QLCNIC_DEV_UP, &adapter->state))
2155 qlcnic_enable_int(sds_ring);
2156 }
2157
2158 return work_done;
2159 }
2160
2161 static int qlcnic_rx_poll(struct napi_struct *napi, int budget)
2162 {
2163 struct qlcnic_host_sds_ring *sds_ring =
2164 container_of(napi, struct qlcnic_host_sds_ring, napi);
2165
2166 struct qlcnic_adapter *adapter = sds_ring->adapter;
2167 int work_done;
2168
2169 work_done = qlcnic_process_rcv_ring(sds_ring, budget);
2170
2171 if (work_done < budget) {
2172 napi_complete(&sds_ring->napi);
2173 if (test_bit(__QLCNIC_DEV_UP, &adapter->state))
2174 qlcnic_enable_int(sds_ring);
2175 }
2176
2177 return work_done;
2178 }
2179
2180 #ifdef CONFIG_NET_POLL_CONTROLLER
2181 static void qlcnic_poll_controller(struct net_device *netdev)
2182 {
2183 struct qlcnic_adapter *adapter = netdev_priv(netdev);
2184 disable_irq(adapter->irq);
2185 qlcnic_intr(adapter->irq, adapter);
2186 enable_irq(adapter->irq);
2187 }
2188 #endif
2189
2190 static void
2191 qlcnic_idc_debug_info(struct qlcnic_adapter *adapter, u8 encoding)
2192 {
2193 u32 val;
2194
2195 val = adapter->portnum & 0xf;
2196 val |= encoding << 7;
2197 val |= (jiffies - adapter->dev_rst_time) << 8;
2198
2199 QLCWR32(adapter, QLCNIC_CRB_DRV_SCRATCH, val);
2200 adapter->dev_rst_time = jiffies;
2201 }
2202
2203 static int
2204 qlcnic_set_drv_state(struct qlcnic_adapter *adapter, u8 state)
2205 {
2206 u32 val;
2207
2208 WARN_ON(state != QLCNIC_DEV_NEED_RESET &&
2209 state != QLCNIC_DEV_NEED_QUISCENT);
2210
2211 if (qlcnic_api_lock(adapter))
2212 return -EIO;
2213
2214 val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
2215
2216 if (state == QLCNIC_DEV_NEED_RESET)
2217 QLC_DEV_SET_RST_RDY(val, adapter->portnum);
2218 else if (state == QLCNIC_DEV_NEED_QUISCENT)
2219 QLC_DEV_SET_QSCNT_RDY(val, adapter->portnum);
2220
2221 QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val);
2222
2223 qlcnic_api_unlock(adapter);
2224
2225 return 0;
2226 }
2227
2228 static int
2229 qlcnic_clr_drv_state(struct qlcnic_adapter *adapter)
2230 {
2231 u32 val;
2232
2233 if (qlcnic_api_lock(adapter))
2234 return -EBUSY;
2235
2236 val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
2237 QLC_DEV_CLR_RST_QSCNT(val, adapter->portnum);
2238 QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val);
2239
2240 qlcnic_api_unlock(adapter);
2241
2242 return 0;
2243 }
2244
2245 static void
2246 qlcnic_clr_all_drv_state(struct qlcnic_adapter *adapter)
2247 {
2248 u32 val;
2249
2250 if (qlcnic_api_lock(adapter))
2251 goto err;
2252
2253 val = QLCRD32(adapter, QLCNIC_CRB_DEV_REF_COUNT);
2254 QLC_DEV_CLR_REF_CNT(val, adapter->portnum);
2255 QLCWR32(adapter, QLCNIC_CRB_DEV_REF_COUNT, val);
2256
2257 if (!(val & 0x11111111))
2258 QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_COLD);
2259
2260 val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
2261 QLC_DEV_CLR_RST_QSCNT(val, adapter->portnum);
2262 QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val);
2263
2264 qlcnic_api_unlock(adapter);
2265 err:
2266 adapter->fw_fail_cnt = 0;
2267 clear_bit(__QLCNIC_START_FW, &adapter->state);
2268 clear_bit(__QLCNIC_RESETTING, &adapter->state);
2269 }
2270
2271 /* Grab api lock, before checking state */
2272 static int
2273 qlcnic_check_drv_state(struct qlcnic_adapter *adapter)
2274 {
2275 int act, state;
2276
2277 state = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
2278 act = QLCRD32(adapter, QLCNIC_CRB_DEV_REF_COUNT);
2279
2280 if (((state & 0x11111111) == (act & 0x11111111)) ||
2281 ((act & 0x11111111) == ((state >> 1) & 0x11111111)))
2282 return 0;
2283 else
2284 return 1;
2285 }
2286
2287 static int qlcnic_check_idc_ver(struct qlcnic_adapter *adapter)
2288 {
2289 u32 val = QLCRD32(adapter, QLCNIC_CRB_DRV_IDC_VER);
2290
2291 if (val != QLCNIC_DRV_IDC_VER) {
2292 dev_warn(&adapter->pdev->dev, "IDC Version mismatch, driver's"
2293 " idc ver = %x; reqd = %x\n", QLCNIC_DRV_IDC_VER, val);
2294 }
2295
2296 return 0;
2297 }
2298
2299 static int
2300 qlcnic_can_start_firmware(struct qlcnic_adapter *adapter)
2301 {
2302 u32 val, prev_state;
2303 u8 dev_init_timeo = adapter->dev_init_timeo;
2304 u8 portnum = adapter->portnum;
2305 u8 ret;
2306
2307 if (test_and_clear_bit(__QLCNIC_START_FW, &adapter->state))
2308 return 1;
2309
2310 if (qlcnic_api_lock(adapter))
2311 return -1;
2312
2313 val = QLCRD32(adapter, QLCNIC_CRB_DEV_REF_COUNT);
2314 if (!(val & (1 << (portnum * 4)))) {
2315 QLC_DEV_SET_REF_CNT(val, portnum);
2316 QLCWR32(adapter, QLCNIC_CRB_DEV_REF_COUNT, val);
2317 }
2318
2319 prev_state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
2320 QLCDB(adapter, HW, "Device state = %u\n", prev_state);
2321
2322 switch (prev_state) {
2323 case QLCNIC_DEV_COLD:
2324 QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_INITIALIZING);
2325 QLCWR32(adapter, QLCNIC_CRB_DRV_IDC_VER, QLCNIC_DRV_IDC_VER);
2326 qlcnic_idc_debug_info(adapter, 0);
2327 qlcnic_api_unlock(adapter);
2328 return 1;
2329
2330 case QLCNIC_DEV_READY:
2331 ret = qlcnic_check_idc_ver(adapter);
2332 qlcnic_api_unlock(adapter);
2333 return ret;
2334
2335 case QLCNIC_DEV_NEED_RESET:
2336 val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
2337 QLC_DEV_SET_RST_RDY(val, portnum);
2338 QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val);
2339 break;
2340
2341 case QLCNIC_DEV_NEED_QUISCENT:
2342 val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
2343 QLC_DEV_SET_QSCNT_RDY(val, portnum);
2344 QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val);
2345 break;
2346
2347 case QLCNIC_DEV_FAILED:
2348 dev_err(&adapter->pdev->dev, "Device in failed state.\n");
2349 qlcnic_api_unlock(adapter);
2350 return -1;
2351
2352 case QLCNIC_DEV_INITIALIZING:
2353 case QLCNIC_DEV_QUISCENT:
2354 break;
2355 }
2356
2357 qlcnic_api_unlock(adapter);
2358
2359 do {
2360 msleep(1000);
2361 prev_state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
2362
2363 if (prev_state == QLCNIC_DEV_QUISCENT)
2364 continue;
2365 } while ((prev_state != QLCNIC_DEV_READY) && --dev_init_timeo);
2366
2367 if (!dev_init_timeo) {
2368 dev_err(&adapter->pdev->dev,
2369 "Waiting for device to initialize timeout\n");
2370 return -1;
2371 }
2372
2373 if (qlcnic_api_lock(adapter))
2374 return -1;
2375
2376 val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
2377 QLC_DEV_CLR_RST_QSCNT(val, portnum);
2378 QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val);
2379
2380 ret = qlcnic_check_idc_ver(adapter);
2381 qlcnic_api_unlock(adapter);
2382
2383 return ret;
2384 }
2385
2386 static void
2387 qlcnic_fwinit_work(struct work_struct *work)
2388 {
2389 struct qlcnic_adapter *adapter = container_of(work,
2390 struct qlcnic_adapter, fw_work.work);
2391 u32 dev_state = 0xf, npar_state;
2392
2393 if (qlcnic_api_lock(adapter))
2394 goto err_ret;
2395
2396 dev_state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
2397 if (dev_state == QLCNIC_DEV_QUISCENT) {
2398 qlcnic_api_unlock(adapter);
2399 qlcnic_schedule_work(adapter, qlcnic_fwinit_work,
2400 FW_POLL_DELAY * 2);
2401 return;
2402 }
2403
2404 if (adapter->op_mode == QLCNIC_NON_PRIV_FUNC) {
2405 npar_state = QLCRD32(adapter, QLCNIC_CRB_DEV_NPAR_STATE);
2406 if (npar_state == QLCNIC_DEV_NPAR_RDY) {
2407 qlcnic_api_unlock(adapter);
2408 goto wait_npar;
2409 } else {
2410 qlcnic_schedule_work(adapter, qlcnic_fwinit_work,
2411 FW_POLL_DELAY);
2412 qlcnic_api_unlock(adapter);
2413 return;
2414 }
2415 }
2416
2417 if (adapter->fw_wait_cnt++ > adapter->reset_ack_timeo) {
2418 dev_err(&adapter->pdev->dev, "Reset:Failed to get ack %d sec\n",
2419 adapter->reset_ack_timeo);
2420 goto skip_ack_check;
2421 }
2422
2423 if (!qlcnic_check_drv_state(adapter)) {
2424 skip_ack_check:
2425 dev_state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
2426
2427 if (dev_state == QLCNIC_DEV_NEED_QUISCENT) {
2428 QLCWR32(adapter, QLCNIC_CRB_DEV_STATE,
2429 QLCNIC_DEV_QUISCENT);
2430 qlcnic_schedule_work(adapter, qlcnic_fwinit_work,
2431 FW_POLL_DELAY * 2);
2432 QLCDB(adapter, DRV, "Quiscing the driver\n");
2433 qlcnic_idc_debug_info(adapter, 0);
2434
2435 qlcnic_api_unlock(adapter);
2436 return;
2437 }
2438
2439 if (dev_state == QLCNIC_DEV_NEED_RESET) {
2440 QLCWR32(adapter, QLCNIC_CRB_DEV_STATE,
2441 QLCNIC_DEV_INITIALIZING);
2442 set_bit(__QLCNIC_START_FW, &adapter->state);
2443 QLCDB(adapter, DRV, "Restarting fw\n");
2444 qlcnic_idc_debug_info(adapter, 0);
2445 }
2446
2447 qlcnic_api_unlock(adapter);
2448
2449 if (!adapter->nic_ops->start_firmware(adapter)) {
2450 qlcnic_schedule_work(adapter, qlcnic_attach_work, 0);
2451 return;
2452 }
2453 goto err_ret;
2454 }
2455
2456 qlcnic_api_unlock(adapter);
2457
2458 wait_npar:
2459 dev_state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
2460 QLCDB(adapter, HW, "Func waiting: Device state=%u\n", dev_state);
2461
2462 switch (dev_state) {
2463 case QLCNIC_DEV_QUISCENT:
2464 case QLCNIC_DEV_NEED_QUISCENT:
2465 case QLCNIC_DEV_NEED_RESET:
2466 qlcnic_schedule_work(adapter,
2467 qlcnic_fwinit_work, FW_POLL_DELAY);
2468 return;
2469 case QLCNIC_DEV_FAILED:
2470 break;
2471
2472 default:
2473 if (!adapter->nic_ops->start_firmware(adapter)) {
2474 qlcnic_schedule_work(adapter, qlcnic_attach_work, 0);
2475 return;
2476 }
2477 }
2478
2479 err_ret:
2480 dev_err(&adapter->pdev->dev, "Fwinit work failed state=%u "
2481 "fw_wait_cnt=%u\n", dev_state, adapter->fw_wait_cnt);
2482 netif_device_attach(adapter->netdev);
2483 qlcnic_clr_all_drv_state(adapter);
2484 }
2485
2486 static void
2487 qlcnic_detach_work(struct work_struct *work)
2488 {
2489 struct qlcnic_adapter *adapter = container_of(work,
2490 struct qlcnic_adapter, fw_work.work);
2491 struct net_device *netdev = adapter->netdev;
2492 u32 status;
2493
2494 netif_device_detach(netdev);
2495
2496 qlcnic_down(adapter, netdev);
2497
2498 status = QLCRD32(adapter, QLCNIC_PEG_HALT_STATUS1);
2499
2500 if (status & QLCNIC_RCODE_FATAL_ERROR)
2501 goto err_ret;
2502
2503 if (adapter->temp == QLCNIC_TEMP_PANIC)
2504 goto err_ret;
2505
2506 if (qlcnic_set_drv_state(adapter, adapter->dev_state))
2507 goto err_ret;
2508
2509 adapter->fw_wait_cnt = 0;
2510
2511 qlcnic_schedule_work(adapter, qlcnic_fwinit_work, FW_POLL_DELAY);
2512
2513 return;
2514
2515 err_ret:
2516 dev_err(&adapter->pdev->dev, "detach failed; status=%d temp=%d\n",
2517 status, adapter->temp);
2518 netif_device_attach(netdev);
2519 qlcnic_clr_all_drv_state(adapter);
2520
2521 }
2522
2523 /*Transit to RESET state from READY state only */
2524 static void
2525 qlcnic_dev_request_reset(struct qlcnic_adapter *adapter)
2526 {
2527 u32 state;
2528
2529 adapter->need_fw_reset = 1;
2530 if (qlcnic_api_lock(adapter))
2531 return;
2532
2533 state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
2534
2535 if (state == QLCNIC_DEV_READY) {
2536 QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_NEED_RESET);
2537 QLCDB(adapter, DRV, "NEED_RESET state set\n");
2538 qlcnic_idc_debug_info(adapter, 0);
2539 }
2540
2541 qlcnic_api_unlock(adapter);
2542 }
2543
2544 /* Transit to NPAR READY state from NPAR NOT READY state */
2545 static void
2546 qlcnic_dev_set_npar_ready(struct qlcnic_adapter *adapter)
2547 {
2548 u32 state;
2549
2550 if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED) ||
2551 adapter->op_mode == QLCNIC_NON_PRIV_FUNC)
2552 return;
2553 if (qlcnic_api_lock(adapter))
2554 return;
2555
2556 state = QLCRD32(adapter, QLCNIC_CRB_DEV_NPAR_STATE);
2557
2558 if (state != QLCNIC_DEV_NPAR_RDY) {
2559 QLCWR32(adapter, QLCNIC_CRB_DEV_NPAR_STATE,
2560 QLCNIC_DEV_NPAR_RDY);
2561 QLCDB(adapter, DRV, "NPAR READY state set\n");
2562 }
2563
2564 qlcnic_api_unlock(adapter);
2565 }
2566
2567 static void
2568 qlcnic_schedule_work(struct qlcnic_adapter *adapter,
2569 work_func_t func, int delay)
2570 {
2571 if (test_bit(__QLCNIC_AER, &adapter->state))
2572 return;
2573
2574 INIT_DELAYED_WORK(&adapter->fw_work, func);
2575 schedule_delayed_work(&adapter->fw_work, round_jiffies_relative(delay));
2576 }
2577
2578 static void
2579 qlcnic_cancel_fw_work(struct qlcnic_adapter *adapter)
2580 {
2581 while (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state))
2582 msleep(10);
2583
2584 cancel_delayed_work_sync(&adapter->fw_work);
2585 }
2586
2587 static void
2588 qlcnic_attach_work(struct work_struct *work)
2589 {
2590 struct qlcnic_adapter *adapter = container_of(work,
2591 struct qlcnic_adapter, fw_work.work);
2592 struct net_device *netdev = adapter->netdev;
2593
2594 if (netif_running(netdev)) {
2595 if (qlcnic_up(adapter, netdev))
2596 goto done;
2597
2598 qlcnic_config_indev_addr(netdev, NETDEV_UP);
2599 }
2600
2601 done:
2602 netif_device_attach(netdev);
2603 adapter->fw_fail_cnt = 0;
2604 clear_bit(__QLCNIC_RESETTING, &adapter->state);
2605
2606 if (!qlcnic_clr_drv_state(adapter))
2607 qlcnic_schedule_work(adapter, qlcnic_fw_poll_work,
2608 FW_POLL_DELAY);
2609 }
2610
2611 static int
2612 qlcnic_check_health(struct qlcnic_adapter *adapter)
2613 {
2614 u32 state = 0, heartbit;
2615 struct net_device *netdev = adapter->netdev;
2616
2617 if (qlcnic_check_temp(adapter))
2618 goto detach;
2619
2620 if (adapter->need_fw_reset)
2621 qlcnic_dev_request_reset(adapter);
2622
2623 state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
2624 if (state == QLCNIC_DEV_NEED_RESET || state == QLCNIC_DEV_NEED_QUISCENT)
2625 adapter->need_fw_reset = 1;
2626
2627 heartbit = QLCRD32(adapter, QLCNIC_PEG_ALIVE_COUNTER);
2628 if (heartbit != adapter->heartbit) {
2629 adapter->heartbit = heartbit;
2630 adapter->fw_fail_cnt = 0;
2631 if (adapter->need_fw_reset)
2632 goto detach;
2633
2634 if (adapter->reset_context &&
2635 auto_fw_reset == AUTO_FW_RESET_ENABLED) {
2636 qlcnic_reset_hw_context(adapter);
2637 adapter->netdev->trans_start = jiffies;
2638 }
2639
2640 return 0;
2641 }
2642
2643 if (++adapter->fw_fail_cnt < FW_FAIL_THRESH)
2644 return 0;
2645
2646 qlcnic_dev_request_reset(adapter);
2647
2648 if ((auto_fw_reset == AUTO_FW_RESET_ENABLED))
2649 clear_bit(__QLCNIC_FW_ATTACHED, &adapter->state);
2650
2651 dev_info(&netdev->dev, "firmware hang detected\n");
2652
2653 detach:
2654 adapter->dev_state = (state == QLCNIC_DEV_NEED_QUISCENT) ? state :
2655 QLCNIC_DEV_NEED_RESET;
2656
2657 if ((auto_fw_reset == AUTO_FW_RESET_ENABLED) &&
2658 !test_and_set_bit(__QLCNIC_RESETTING, &adapter->state)) {
2659
2660 qlcnic_schedule_work(adapter, qlcnic_detach_work, 0);
2661 QLCDB(adapter, DRV, "fw recovery scheduled.\n");
2662 }
2663
2664 return 1;
2665 }
2666
2667 static void
2668 qlcnic_fw_poll_work(struct work_struct *work)
2669 {
2670 struct qlcnic_adapter *adapter = container_of(work,
2671 struct qlcnic_adapter, fw_work.work);
2672
2673 if (test_bit(__QLCNIC_RESETTING, &adapter->state))
2674 goto reschedule;
2675
2676
2677 if (qlcnic_check_health(adapter))
2678 return;
2679
2680 reschedule:
2681 qlcnic_schedule_work(adapter, qlcnic_fw_poll_work, FW_POLL_DELAY);
2682 }
2683
2684 static int qlcnic_is_first_func(struct pci_dev *pdev)
2685 {
2686 struct pci_dev *oth_pdev;
2687 int val = pdev->devfn;
2688
2689 while (val-- > 0) {
2690 oth_pdev = pci_get_domain_bus_and_slot(pci_domain_nr
2691 (pdev->bus), pdev->bus->number,
2692 PCI_DEVFN(PCI_SLOT(pdev->devfn), val));
2693 if (!oth_pdev)
2694 continue;
2695
2696 if (oth_pdev->current_state != PCI_D3cold) {
2697 pci_dev_put(oth_pdev);
2698 return 0;
2699 }
2700 pci_dev_put(oth_pdev);
2701 }
2702 return 1;
2703 }
2704
2705 static int qlcnic_attach_func(struct pci_dev *pdev)
2706 {
2707 int err, first_func;
2708 struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
2709 struct net_device *netdev = adapter->netdev;
2710
2711 pdev->error_state = pci_channel_io_normal;
2712
2713 err = pci_enable_device(pdev);
2714 if (err)
2715 return err;
2716
2717 pci_set_power_state(pdev, PCI_D0);
2718 pci_set_master(pdev);
2719 pci_restore_state(pdev);
2720
2721 first_func = qlcnic_is_first_func(pdev);
2722
2723 if (qlcnic_api_lock(adapter))
2724 return -EINVAL;
2725
2726 if (first_func) {
2727 adapter->need_fw_reset = 1;
2728 set_bit(__QLCNIC_START_FW, &adapter->state);
2729 QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_INITIALIZING);
2730 QLCDB(adapter, DRV, "Restarting fw\n");
2731 }
2732 qlcnic_api_unlock(adapter);
2733
2734 err = adapter->nic_ops->start_firmware(adapter);
2735 if (err)
2736 return err;
2737
2738 qlcnic_clr_drv_state(adapter);
2739 qlcnic_setup_intr(adapter);
2740
2741 if (netif_running(netdev)) {
2742 err = qlcnic_attach(adapter);
2743 if (err) {
2744 qlcnic_clr_all_drv_state(adapter);
2745 clear_bit(__QLCNIC_AER, &adapter->state);
2746 netif_device_attach(netdev);
2747 return err;
2748 }
2749
2750 err = qlcnic_up(adapter, netdev);
2751 if (err)
2752 goto done;
2753
2754 qlcnic_config_indev_addr(netdev, NETDEV_UP);
2755 }
2756 done:
2757 netif_device_attach(netdev);
2758 return err;
2759 }
2760
2761 static pci_ers_result_t qlcnic_io_error_detected(struct pci_dev *pdev,
2762 pci_channel_state_t state)
2763 {
2764 struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
2765 struct net_device *netdev = adapter->netdev;
2766
2767 if (state == pci_channel_io_perm_failure)
2768 return PCI_ERS_RESULT_DISCONNECT;
2769
2770 if (state == pci_channel_io_normal)
2771 return PCI_ERS_RESULT_RECOVERED;
2772
2773 set_bit(__QLCNIC_AER, &adapter->state);
2774 netif_device_detach(netdev);
2775
2776 cancel_delayed_work_sync(&adapter->fw_work);
2777
2778 if (netif_running(netdev))
2779 qlcnic_down(adapter, netdev);
2780
2781 qlcnic_detach(adapter);
2782 qlcnic_teardown_intr(adapter);
2783
2784 clear_bit(__QLCNIC_RESETTING, &adapter->state);
2785
2786 pci_save_state(pdev);
2787 pci_disable_device(pdev);
2788
2789 return PCI_ERS_RESULT_NEED_RESET;
2790 }
2791
2792 static pci_ers_result_t qlcnic_io_slot_reset(struct pci_dev *pdev)
2793 {
2794 return qlcnic_attach_func(pdev) ? PCI_ERS_RESULT_DISCONNECT :
2795 PCI_ERS_RESULT_RECOVERED;
2796 }
2797
2798 static void qlcnic_io_resume(struct pci_dev *pdev)
2799 {
2800 struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
2801
2802 pci_cleanup_aer_uncorrect_error_status(pdev);
2803
2804 if (QLCRD32(adapter, QLCNIC_CRB_DEV_STATE) == QLCNIC_DEV_READY &&
2805 test_and_clear_bit(__QLCNIC_AER, &adapter->state))
2806 qlcnic_schedule_work(adapter, qlcnic_fw_poll_work,
2807 FW_POLL_DELAY);
2808 }
2809
2810
2811 static int
2812 qlcnicvf_start_firmware(struct qlcnic_adapter *adapter)
2813 {
2814 int err;
2815
2816 err = qlcnic_can_start_firmware(adapter);
2817 if (err)
2818 return err;
2819
2820 qlcnic_check_options(adapter);
2821
2822 adapter->need_fw_reset = 0;
2823
2824 return err;
2825 }
2826
2827 static int
2828 qlcnicvf_config_bridged_mode(struct qlcnic_adapter *adapter, u32 enable)
2829 {
2830 return -EOPNOTSUPP;
2831 }
2832
2833 static int
2834 qlcnicvf_config_led(struct qlcnic_adapter *adapter, u32 state, u32 rate)
2835 {
2836 return -EOPNOTSUPP;
2837 }
2838
2839 static ssize_t
2840 qlcnic_store_bridged_mode(struct device *dev,
2841 struct device_attribute *attr, const char *buf, size_t len)
2842 {
2843 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
2844 unsigned long new;
2845 int ret = -EINVAL;
2846
2847 if (!(adapter->capabilities & QLCNIC_FW_CAPABILITY_BDG))
2848 goto err_out;
2849
2850 if (!test_bit(__QLCNIC_DEV_UP, &adapter->state))
2851 goto err_out;
2852
2853 if (strict_strtoul(buf, 2, &new))
2854 goto err_out;
2855
2856 if (!adapter->nic_ops->config_bridged_mode(adapter, !!new))
2857 ret = len;
2858
2859 err_out:
2860 return ret;
2861 }
2862
2863 static ssize_t
2864 qlcnic_show_bridged_mode(struct device *dev,
2865 struct device_attribute *attr, char *buf)
2866 {
2867 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
2868 int bridged_mode = 0;
2869
2870 if (adapter->capabilities & QLCNIC_FW_CAPABILITY_BDG)
2871 bridged_mode = !!(adapter->flags & QLCNIC_BRIDGE_ENABLED);
2872
2873 return sprintf(buf, "%d\n", bridged_mode);
2874 }
2875
2876 static struct device_attribute dev_attr_bridged_mode = {
2877 .attr = {.name = "bridged_mode", .mode = (S_IRUGO | S_IWUSR)},
2878 .show = qlcnic_show_bridged_mode,
2879 .store = qlcnic_store_bridged_mode,
2880 };
2881
2882 static ssize_t
2883 qlcnic_store_diag_mode(struct device *dev,
2884 struct device_attribute *attr, const char *buf, size_t len)
2885 {
2886 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
2887 unsigned long new;
2888
2889 if (strict_strtoul(buf, 2, &new))
2890 return -EINVAL;
2891
2892 if (!!new != !!(adapter->flags & QLCNIC_DIAG_ENABLED))
2893 adapter->flags ^= QLCNIC_DIAG_ENABLED;
2894
2895 return len;
2896 }
2897
2898 static ssize_t
2899 qlcnic_show_diag_mode(struct device *dev,
2900 struct device_attribute *attr, char *buf)
2901 {
2902 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
2903
2904 return sprintf(buf, "%d\n",
2905 !!(adapter->flags & QLCNIC_DIAG_ENABLED));
2906 }
2907
2908 static struct device_attribute dev_attr_diag_mode = {
2909 .attr = {.name = "diag_mode", .mode = (S_IRUGO | S_IWUSR)},
2910 .show = qlcnic_show_diag_mode,
2911 .store = qlcnic_store_diag_mode,
2912 };
2913
2914 static int
2915 qlcnic_sysfs_validate_crb(struct qlcnic_adapter *adapter,
2916 loff_t offset, size_t size)
2917 {
2918 size_t crb_size = 4;
2919
2920 if (!(adapter->flags & QLCNIC_DIAG_ENABLED))
2921 return -EIO;
2922
2923 if (offset < QLCNIC_PCI_CRBSPACE) {
2924 if (ADDR_IN_RANGE(offset, QLCNIC_PCI_CAMQM,
2925 QLCNIC_PCI_CAMQM_END))
2926 crb_size = 8;
2927 else
2928 return -EINVAL;
2929 }
2930
2931 if ((size != crb_size) || (offset & (crb_size-1)))
2932 return -EINVAL;
2933
2934 return 0;
2935 }
2936
2937 static ssize_t
2938 qlcnic_sysfs_read_crb(struct file *filp, struct kobject *kobj,
2939 struct bin_attribute *attr,
2940 char *buf, loff_t offset, size_t size)
2941 {
2942 struct device *dev = container_of(kobj, struct device, kobj);
2943 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
2944 u32 data;
2945 u64 qmdata;
2946 int ret;
2947
2948 ret = qlcnic_sysfs_validate_crb(adapter, offset, size);
2949 if (ret != 0)
2950 return ret;
2951
2952 if (ADDR_IN_RANGE(offset, QLCNIC_PCI_CAMQM, QLCNIC_PCI_CAMQM_END)) {
2953 qlcnic_pci_camqm_read_2M(adapter, offset, &qmdata);
2954 memcpy(buf, &qmdata, size);
2955 } else {
2956 data = QLCRD32(adapter, offset);
2957 memcpy(buf, &data, size);
2958 }
2959 return size;
2960 }
2961
2962 static ssize_t
2963 qlcnic_sysfs_write_crb(struct file *filp, struct kobject *kobj,
2964 struct bin_attribute *attr,
2965 char *buf, loff_t offset, size_t size)
2966 {
2967 struct device *dev = container_of(kobj, struct device, kobj);
2968 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
2969 u32 data;
2970 u64 qmdata;
2971 int ret;
2972
2973 ret = qlcnic_sysfs_validate_crb(adapter, offset, size);
2974 if (ret != 0)
2975 return ret;
2976
2977 if (ADDR_IN_RANGE(offset, QLCNIC_PCI_CAMQM, QLCNIC_PCI_CAMQM_END)) {
2978 memcpy(&qmdata, buf, size);
2979 qlcnic_pci_camqm_write_2M(adapter, offset, qmdata);
2980 } else {
2981 memcpy(&data, buf, size);
2982 QLCWR32(adapter, offset, data);
2983 }
2984 return size;
2985 }
2986
2987 static int
2988 qlcnic_sysfs_validate_mem(struct qlcnic_adapter *adapter,
2989 loff_t offset, size_t size)
2990 {
2991 if (!(adapter->flags & QLCNIC_DIAG_ENABLED))
2992 return -EIO;
2993
2994 if ((size != 8) || (offset & 0x7))
2995 return -EIO;
2996
2997 return 0;
2998 }
2999
3000 static ssize_t
3001 qlcnic_sysfs_read_mem(struct file *filp, struct kobject *kobj,
3002 struct bin_attribute *attr,
3003 char *buf, loff_t offset, size_t size)
3004 {
3005 struct device *dev = container_of(kobj, struct device, kobj);
3006 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3007 u64 data;
3008 int ret;
3009
3010 ret = qlcnic_sysfs_validate_mem(adapter, offset, size);
3011 if (ret != 0)
3012 return ret;
3013
3014 if (qlcnic_pci_mem_read_2M(adapter, offset, &data))
3015 return -EIO;
3016
3017 memcpy(buf, &data, size);
3018
3019 return size;
3020 }
3021
3022 static ssize_t
3023 qlcnic_sysfs_write_mem(struct file *filp, struct kobject *kobj,
3024 struct bin_attribute *attr,
3025 char *buf, loff_t offset, size_t size)
3026 {
3027 struct device *dev = container_of(kobj, struct device, kobj);
3028 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3029 u64 data;
3030 int ret;
3031
3032 ret = qlcnic_sysfs_validate_mem(adapter, offset, size);
3033 if (ret != 0)
3034 return ret;
3035
3036 memcpy(&data, buf, size);
3037
3038 if (qlcnic_pci_mem_write_2M(adapter, offset, data))
3039 return -EIO;
3040
3041 return size;
3042 }
3043
3044
3045 static struct bin_attribute bin_attr_crb = {
3046 .attr = {.name = "crb", .mode = (S_IRUGO | S_IWUSR)},
3047 .size = 0,
3048 .read = qlcnic_sysfs_read_crb,
3049 .write = qlcnic_sysfs_write_crb,
3050 };
3051
3052 static struct bin_attribute bin_attr_mem = {
3053 .attr = {.name = "mem", .mode = (S_IRUGO | S_IWUSR)},
3054 .size = 0,
3055 .read = qlcnic_sysfs_read_mem,
3056 .write = qlcnic_sysfs_write_mem,
3057 };
3058
3059 static int
3060 validate_pm_config(struct qlcnic_adapter *adapter,
3061 struct qlcnic_pm_func_cfg *pm_cfg, int count)
3062 {
3063
3064 u8 src_pci_func, s_esw_id, d_esw_id;
3065 u8 dest_pci_func;
3066 int i;
3067
3068 for (i = 0; i < count; i++) {
3069 src_pci_func = pm_cfg[i].pci_func;
3070 dest_pci_func = pm_cfg[i].dest_npar;
3071 if (src_pci_func >= QLCNIC_MAX_PCI_FUNC
3072 || dest_pci_func >= QLCNIC_MAX_PCI_FUNC)
3073 return QL_STATUS_INVALID_PARAM;
3074
3075 if (adapter->npars[src_pci_func].type != QLCNIC_TYPE_NIC)
3076 return QL_STATUS_INVALID_PARAM;
3077
3078 if (adapter->npars[dest_pci_func].type != QLCNIC_TYPE_NIC)
3079 return QL_STATUS_INVALID_PARAM;
3080
3081 if (!IS_VALID_MODE(pm_cfg[i].action))
3082 return QL_STATUS_INVALID_PARAM;
3083
3084 s_esw_id = adapter->npars[src_pci_func].phy_port;
3085 d_esw_id = adapter->npars[dest_pci_func].phy_port;
3086
3087 if (s_esw_id != d_esw_id)
3088 return QL_STATUS_INVALID_PARAM;
3089
3090 }
3091 return 0;
3092
3093 }
3094
3095 static ssize_t
3096 qlcnic_sysfs_write_pm_config(struct file *filp, struct kobject *kobj,
3097 struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
3098 {
3099 struct device *dev = container_of(kobj, struct device, kobj);
3100 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3101 struct qlcnic_pm_func_cfg *pm_cfg;
3102 u32 id, action, pci_func;
3103 int count, rem, i, ret;
3104
3105 count = size / sizeof(struct qlcnic_pm_func_cfg);
3106 rem = size % sizeof(struct qlcnic_pm_func_cfg);
3107 if (rem)
3108 return QL_STATUS_INVALID_PARAM;
3109
3110 pm_cfg = (struct qlcnic_pm_func_cfg *) buf;
3111
3112 ret = validate_pm_config(adapter, pm_cfg, count);
3113 if (ret)
3114 return ret;
3115 for (i = 0; i < count; i++) {
3116 pci_func = pm_cfg[i].pci_func;
3117 action = pm_cfg[i].action;
3118 id = adapter->npars[pci_func].phy_port;
3119 ret = qlcnic_config_port_mirroring(adapter, id,
3120 action, pci_func);
3121 if (ret)
3122 return ret;
3123 }
3124
3125 for (i = 0; i < count; i++) {
3126 pci_func = pm_cfg[i].pci_func;
3127 id = adapter->npars[pci_func].phy_port;
3128 adapter->npars[pci_func].enable_pm = pm_cfg[i].action;
3129 adapter->npars[pci_func].dest_npar = id;
3130 }
3131 return size;
3132 }
3133
3134 static ssize_t
3135 qlcnic_sysfs_read_pm_config(struct file *filp, struct kobject *kobj,
3136 struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
3137 {
3138 struct device *dev = container_of(kobj, struct device, kobj);
3139 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3140 struct qlcnic_pm_func_cfg pm_cfg[QLCNIC_MAX_PCI_FUNC];
3141 int i;
3142
3143 if (size != sizeof(pm_cfg))
3144 return QL_STATUS_INVALID_PARAM;
3145
3146 for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) {
3147 if (adapter->npars[i].type != QLCNIC_TYPE_NIC)
3148 continue;
3149 pm_cfg[i].action = adapter->npars[i].enable_pm;
3150 pm_cfg[i].dest_npar = 0;
3151 pm_cfg[i].pci_func = i;
3152 }
3153 memcpy(buf, &pm_cfg, size);
3154
3155 return size;
3156 }
3157
3158 static int
3159 validate_esw_config(struct qlcnic_adapter *adapter,
3160 struct qlcnic_esw_func_cfg *esw_cfg, int count)
3161 {
3162 u8 pci_func;
3163 int i;
3164
3165 for (i = 0; i < count; i++) {
3166 pci_func = esw_cfg[i].pci_func;
3167 if (pci_func >= QLCNIC_MAX_PCI_FUNC)
3168 return QL_STATUS_INVALID_PARAM;
3169
3170 if (adapter->npars[i].type != QLCNIC_TYPE_NIC)
3171 return QL_STATUS_INVALID_PARAM;
3172
3173 if (esw_cfg->host_vlan_tag == 1)
3174 if (!IS_VALID_VLAN(esw_cfg[i].vlan_id))
3175 return QL_STATUS_INVALID_PARAM;
3176
3177 if (!IS_VALID_MODE(esw_cfg[i].promisc_mode)
3178 || !IS_VALID_MODE(esw_cfg[i].host_vlan_tag)
3179 || !IS_VALID_MODE(esw_cfg[i].mac_learning)
3180 || !IS_VALID_MODE(esw_cfg[i].discard_tagged))
3181 return QL_STATUS_INVALID_PARAM;
3182 }
3183
3184 return 0;
3185 }
3186
3187 static ssize_t
3188 qlcnic_sysfs_write_esw_config(struct file *file, struct kobject *kobj,
3189 struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
3190 {
3191 struct device *dev = container_of(kobj, struct device, kobj);
3192 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3193 struct qlcnic_esw_func_cfg *esw_cfg;
3194 int count, rem, i, ret;
3195 u8 id, pci_func;
3196
3197 count = size / sizeof(struct qlcnic_esw_func_cfg);
3198 rem = size % sizeof(struct qlcnic_esw_func_cfg);
3199 if (rem)
3200 return QL_STATUS_INVALID_PARAM;
3201
3202 esw_cfg = (struct qlcnic_esw_func_cfg *) buf;
3203 ret = validate_esw_config(adapter, esw_cfg, count);
3204 if (ret)
3205 return ret;
3206
3207 for (i = 0; i < count; i++) {
3208 pci_func = esw_cfg[i].pci_func;
3209 id = adapter->npars[pci_func].phy_port;
3210 ret = qlcnic_config_switch_port(adapter, id,
3211 esw_cfg[i].host_vlan_tag,
3212 esw_cfg[i].discard_tagged,
3213 esw_cfg[i].promisc_mode,
3214 esw_cfg[i].mac_learning,
3215 esw_cfg[i].pci_func,
3216 esw_cfg[i].vlan_id);
3217 if (ret)
3218 return ret;
3219 }
3220
3221 for (i = 0; i < count; i++) {
3222 pci_func = esw_cfg[i].pci_func;
3223 adapter->npars[pci_func].promisc_mode = esw_cfg[i].promisc_mode;
3224 adapter->npars[pci_func].mac_learning = esw_cfg[i].mac_learning;
3225 adapter->npars[pci_func].vlan_id = esw_cfg[i].vlan_id;
3226 adapter->npars[pci_func].discard_tagged =
3227 esw_cfg[i].discard_tagged;
3228 adapter->npars[pci_func].host_vlan_tag =
3229 esw_cfg[i].host_vlan_tag;
3230 }
3231
3232 return size;
3233 }
3234
3235 static ssize_t
3236 qlcnic_sysfs_read_esw_config(struct file *file, struct kobject *kobj,
3237 struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
3238 {
3239 struct device *dev = container_of(kobj, struct device, kobj);
3240 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3241 struct qlcnic_esw_func_cfg esw_cfg[QLCNIC_MAX_PCI_FUNC];
3242 int i;
3243
3244 if (size != sizeof(esw_cfg))
3245 return QL_STATUS_INVALID_PARAM;
3246
3247 for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) {
3248 if (adapter->npars[i].type != QLCNIC_TYPE_NIC)
3249 continue;
3250
3251 esw_cfg[i].host_vlan_tag = adapter->npars[i].host_vlan_tag;
3252 esw_cfg[i].promisc_mode = adapter->npars[i].promisc_mode;
3253 esw_cfg[i].discard_tagged = adapter->npars[i].discard_tagged;
3254 esw_cfg[i].vlan_id = adapter->npars[i].vlan_id;
3255 esw_cfg[i].mac_learning = adapter->npars[i].mac_learning;
3256 }
3257 memcpy(buf, &esw_cfg, size);
3258
3259 return size;
3260 }
3261
3262 static int
3263 validate_npar_config(struct qlcnic_adapter *adapter,
3264 struct qlcnic_npar_func_cfg *np_cfg, int count)
3265 {
3266 u8 pci_func, i;
3267
3268 for (i = 0; i < count; i++) {
3269 pci_func = np_cfg[i].pci_func;
3270 if (pci_func >= QLCNIC_MAX_PCI_FUNC)
3271 return QL_STATUS_INVALID_PARAM;
3272
3273 if (adapter->npars[pci_func].type != QLCNIC_TYPE_NIC)
3274 return QL_STATUS_INVALID_PARAM;
3275
3276 if (!IS_VALID_BW(np_cfg[i].min_bw)
3277 || !IS_VALID_BW(np_cfg[i].max_bw)
3278 || !IS_VALID_RX_QUEUES(np_cfg[i].max_rx_queues)
3279 || !IS_VALID_TX_QUEUES(np_cfg[i].max_tx_queues))
3280 return QL_STATUS_INVALID_PARAM;
3281 }
3282 return 0;
3283 }
3284
3285 static ssize_t
3286 qlcnic_sysfs_write_npar_config(struct file *file, struct kobject *kobj,
3287 struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
3288 {
3289 struct device *dev = container_of(kobj, struct device, kobj);
3290 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3291 struct qlcnic_info nic_info;
3292 struct qlcnic_npar_func_cfg *np_cfg;
3293 int i, count, rem, ret;
3294 u8 pci_func;
3295
3296 count = size / sizeof(struct qlcnic_npar_func_cfg);
3297 rem = size % sizeof(struct qlcnic_npar_func_cfg);
3298 if (rem)
3299 return QL_STATUS_INVALID_PARAM;
3300
3301 np_cfg = (struct qlcnic_npar_func_cfg *) buf;
3302 ret = validate_npar_config(adapter, np_cfg, count);
3303 if (ret)
3304 return ret;
3305
3306 for (i = 0; i < count ; i++) {
3307 pci_func = np_cfg[i].pci_func;
3308 ret = qlcnic_get_nic_info(adapter, &nic_info, pci_func);
3309 if (ret)
3310 return ret;
3311 nic_info.pci_func = pci_func;
3312 nic_info.min_tx_bw = np_cfg[i].min_bw;
3313 nic_info.max_tx_bw = np_cfg[i].max_bw;
3314 ret = qlcnic_set_nic_info(adapter, &nic_info);
3315 if (ret)
3316 return ret;
3317 adapter->npars[i].min_bw = nic_info.min_tx_bw;
3318 adapter->npars[i].max_bw = nic_info.max_tx_bw;
3319 }
3320
3321 return size;
3322
3323 }
3324 static ssize_t
3325 qlcnic_sysfs_read_npar_config(struct file *file, struct kobject *kobj,
3326 struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
3327 {
3328 struct device *dev = container_of(kobj, struct device, kobj);
3329 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3330 struct qlcnic_info nic_info;
3331 struct qlcnic_npar_func_cfg np_cfg[QLCNIC_MAX_PCI_FUNC];
3332 int i, ret;
3333
3334 if (size != sizeof(np_cfg))
3335 return QL_STATUS_INVALID_PARAM;
3336
3337 for (i = 0; i < QLCNIC_MAX_PCI_FUNC ; i++) {
3338 if (adapter->npars[i].type != QLCNIC_TYPE_NIC)
3339 continue;
3340 ret = qlcnic_get_nic_info(adapter, &nic_info, i);
3341 if (ret)
3342 return ret;
3343
3344 np_cfg[i].pci_func = i;
3345 np_cfg[i].op_mode = nic_info.op_mode;
3346 np_cfg[i].port_num = nic_info.phys_port;
3347 np_cfg[i].fw_capab = nic_info.capabilities;
3348 np_cfg[i].min_bw = nic_info.min_tx_bw ;
3349 np_cfg[i].max_bw = nic_info.max_tx_bw;
3350 np_cfg[i].max_tx_queues = nic_info.max_tx_ques;
3351 np_cfg[i].max_rx_queues = nic_info.max_rx_ques;
3352 }
3353 memcpy(buf, &np_cfg, size);
3354 return size;
3355 }
3356
3357 static ssize_t
3358 qlcnic_sysfs_read_pci_config(struct file *file, struct kobject *kobj,
3359 struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
3360 {
3361 struct device *dev = container_of(kobj, struct device, kobj);
3362 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3363 struct qlcnic_pci_func_cfg pci_cfg[QLCNIC_MAX_PCI_FUNC];
3364 struct qlcnic_pci_info pci_info[QLCNIC_MAX_PCI_FUNC];
3365 int i, ret;
3366
3367 if (size != sizeof(pci_cfg))
3368 return QL_STATUS_INVALID_PARAM;
3369
3370 ret = qlcnic_get_pci_info(adapter, pci_info);
3371 if (ret)
3372 return ret;
3373
3374 for (i = 0; i < QLCNIC_MAX_PCI_FUNC ; i++) {
3375 pci_cfg[i].pci_func = pci_info[i].id;
3376 pci_cfg[i].func_type = pci_info[i].type;
3377 pci_cfg[i].port_num = pci_info[i].default_port;
3378 pci_cfg[i].min_bw = pci_info[i].tx_min_bw;
3379 pci_cfg[i].max_bw = pci_info[i].tx_max_bw;
3380 memcpy(&pci_cfg[i].def_mac_addr, &pci_info[i].mac, ETH_ALEN);
3381 }
3382 memcpy(buf, &pci_cfg, size);
3383 return size;
3384
3385 }
3386 static struct bin_attribute bin_attr_npar_config = {
3387 .attr = {.name = "npar_config", .mode = (S_IRUGO | S_IWUSR)},
3388 .size = 0,
3389 .read = qlcnic_sysfs_read_npar_config,
3390 .write = qlcnic_sysfs_write_npar_config,
3391 };
3392
3393 static struct bin_attribute bin_attr_pci_config = {
3394 .attr = {.name = "pci_config", .mode = (S_IRUGO | S_IWUSR)},
3395 .size = 0,
3396 .read = qlcnic_sysfs_read_pci_config,
3397 .write = NULL,
3398 };
3399
3400 static struct bin_attribute bin_attr_esw_config = {
3401 .attr = {.name = "esw_config", .mode = (S_IRUGO | S_IWUSR)},
3402 .size = 0,
3403 .read = qlcnic_sysfs_read_esw_config,
3404 .write = qlcnic_sysfs_write_esw_config,
3405 };
3406
3407 static struct bin_attribute bin_attr_pm_config = {
3408 .attr = {.name = "pm_config", .mode = (S_IRUGO | S_IWUSR)},
3409 .size = 0,
3410 .read = qlcnic_sysfs_read_pm_config,
3411 .write = qlcnic_sysfs_write_pm_config,
3412 };
3413
3414 static void
3415 qlcnic_create_sysfs_entries(struct qlcnic_adapter *adapter)
3416 {
3417 struct device *dev = &adapter->pdev->dev;
3418
3419 if (adapter->capabilities & QLCNIC_FW_CAPABILITY_BDG)
3420 if (device_create_file(dev, &dev_attr_bridged_mode))
3421 dev_warn(dev,
3422 "failed to create bridged_mode sysfs entry\n");
3423 }
3424
3425 static void
3426 qlcnic_remove_sysfs_entries(struct qlcnic_adapter *adapter)
3427 {
3428 struct device *dev = &adapter->pdev->dev;
3429
3430 if (adapter->capabilities & QLCNIC_FW_CAPABILITY_BDG)
3431 device_remove_file(dev, &dev_attr_bridged_mode);
3432 }
3433
3434 static void
3435 qlcnic_create_diag_entries(struct qlcnic_adapter *adapter)
3436 {
3437 struct device *dev = &adapter->pdev->dev;
3438
3439 if (adapter->op_mode == QLCNIC_NON_PRIV_FUNC)
3440 return;
3441 if (device_create_file(dev, &dev_attr_diag_mode))
3442 dev_info(dev, "failed to create diag_mode sysfs entry\n");
3443 if (device_create_bin_file(dev, &bin_attr_crb))
3444 dev_info(dev, "failed to create crb sysfs entry\n");
3445 if (device_create_bin_file(dev, &bin_attr_mem))
3446 dev_info(dev, "failed to create mem sysfs entry\n");
3447 if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED) ||
3448 adapter->op_mode != QLCNIC_MGMT_FUNC)
3449 return;
3450 if (device_create_bin_file(dev, &bin_attr_pci_config))
3451 dev_info(dev, "failed to create pci config sysfs entry");
3452 if (device_create_bin_file(dev, &bin_attr_npar_config))
3453 dev_info(dev, "failed to create npar config sysfs entry");
3454 if (device_create_bin_file(dev, &bin_attr_esw_config))
3455 dev_info(dev, "failed to create esw config sysfs entry");
3456 if (device_create_bin_file(dev, &bin_attr_pm_config))
3457 dev_info(dev, "failed to create pm config sysfs entry");
3458
3459 }
3460
3461 static void
3462 qlcnic_remove_diag_entries(struct qlcnic_adapter *adapter)
3463 {
3464 struct device *dev = &adapter->pdev->dev;
3465
3466 if (adapter->op_mode == QLCNIC_NON_PRIV_FUNC)
3467 return;
3468 device_remove_file(dev, &dev_attr_diag_mode);
3469 device_remove_bin_file(dev, &bin_attr_crb);
3470 device_remove_bin_file(dev, &bin_attr_mem);
3471 if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED) ||
3472 adapter->op_mode != QLCNIC_MGMT_FUNC)
3473 return;
3474 device_remove_bin_file(dev, &bin_attr_pci_config);
3475 device_remove_bin_file(dev, &bin_attr_npar_config);
3476 device_remove_bin_file(dev, &bin_attr_esw_config);
3477 device_remove_bin_file(dev, &bin_attr_pm_config);
3478 }
3479
3480 #ifdef CONFIG_INET
3481
3482 #define is_qlcnic_netdev(dev) (dev->netdev_ops == &qlcnic_netdev_ops)
3483
3484 static void
3485 qlcnic_config_indev_addr(struct net_device *dev, unsigned long event)
3486 {
3487 struct in_device *indev;
3488 struct qlcnic_adapter *adapter = netdev_priv(dev);
3489
3490 indev = in_dev_get(dev);
3491 if (!indev)
3492 return;
3493
3494 for_ifa(indev) {
3495 switch (event) {
3496 case NETDEV_UP:
3497 qlcnic_config_ipaddr(adapter,
3498 ifa->ifa_address, QLCNIC_IP_UP);
3499 break;
3500 case NETDEV_DOWN:
3501 qlcnic_config_ipaddr(adapter,
3502 ifa->ifa_address, QLCNIC_IP_DOWN);
3503 break;
3504 default:
3505 break;
3506 }
3507 } endfor_ifa(indev);
3508
3509 in_dev_put(indev);
3510 }
3511
3512 static int qlcnic_netdev_event(struct notifier_block *this,
3513 unsigned long event, void *ptr)
3514 {
3515 struct qlcnic_adapter *adapter;
3516 struct net_device *dev = (struct net_device *)ptr;
3517
3518 recheck:
3519 if (dev == NULL)
3520 goto done;
3521
3522 if (dev->priv_flags & IFF_802_1Q_VLAN) {
3523 dev = vlan_dev_real_dev(dev);
3524 goto recheck;
3525 }
3526
3527 if (!is_qlcnic_netdev(dev))
3528 goto done;
3529
3530 adapter = netdev_priv(dev);
3531
3532 if (!adapter)
3533 goto done;
3534
3535 if (!test_bit(__QLCNIC_DEV_UP, &adapter->state))
3536 goto done;
3537
3538 qlcnic_config_indev_addr(dev, event);
3539 done:
3540 return NOTIFY_DONE;
3541 }
3542
3543 static int
3544 qlcnic_inetaddr_event(struct notifier_block *this,
3545 unsigned long event, void *ptr)
3546 {
3547 struct qlcnic_adapter *adapter;
3548 struct net_device *dev;
3549
3550 struct in_ifaddr *ifa = (struct in_ifaddr *)ptr;
3551
3552 dev = ifa->ifa_dev ? ifa->ifa_dev->dev : NULL;
3553
3554 recheck:
3555 if (dev == NULL || !netif_running(dev))
3556 goto done;
3557
3558 if (dev->priv_flags & IFF_802_1Q_VLAN) {
3559 dev = vlan_dev_real_dev(dev);
3560 goto recheck;
3561 }
3562
3563 if (!is_qlcnic_netdev(dev))
3564 goto done;
3565
3566 adapter = netdev_priv(dev);
3567
3568 if (!adapter)
3569 goto done;
3570
3571 if (!test_bit(__QLCNIC_DEV_UP, &adapter->state))
3572 goto done;
3573
3574 switch (event) {
3575 case NETDEV_UP:
3576 qlcnic_config_ipaddr(adapter, ifa->ifa_address, QLCNIC_IP_UP);
3577 break;
3578 case NETDEV_DOWN:
3579 qlcnic_config_ipaddr(adapter, ifa->ifa_address, QLCNIC_IP_DOWN);
3580 break;
3581 default:
3582 break;
3583 }
3584
3585 done:
3586 return NOTIFY_DONE;
3587 }
3588
3589 static struct notifier_block qlcnic_netdev_cb = {
3590 .notifier_call = qlcnic_netdev_event,
3591 };
3592
3593 static struct notifier_block qlcnic_inetaddr_cb = {
3594 .notifier_call = qlcnic_inetaddr_event,
3595 };
3596 #else
3597 static void
3598 qlcnic_config_indev_addr(struct net_device *dev, unsigned long event)
3599 { }
3600 #endif
3601 static struct pci_error_handlers qlcnic_err_handler = {
3602 .error_detected = qlcnic_io_error_detected,
3603 .slot_reset = qlcnic_io_slot_reset,
3604 .resume = qlcnic_io_resume,
3605 };
3606
3607 static struct pci_driver qlcnic_driver = {
3608 .name = qlcnic_driver_name,
3609 .id_table = qlcnic_pci_tbl,
3610 .probe = qlcnic_probe,
3611 .remove = __devexit_p(qlcnic_remove),
3612 #ifdef CONFIG_PM
3613 .suspend = qlcnic_suspend,
3614 .resume = qlcnic_resume,
3615 #endif
3616 .shutdown = qlcnic_shutdown,
3617 .err_handler = &qlcnic_err_handler
3618
3619 };
3620
3621 static int __init qlcnic_init_module(void)
3622 {
3623 int ret;
3624
3625 printk(KERN_INFO "%s\n", qlcnic_driver_string);
3626
3627 #ifdef CONFIG_INET
3628 register_netdevice_notifier(&qlcnic_netdev_cb);
3629 register_inetaddr_notifier(&qlcnic_inetaddr_cb);
3630 #endif
3631
3632 ret = pci_register_driver(&qlcnic_driver);
3633 if (ret) {
3634 #ifdef CONFIG_INET
3635 unregister_inetaddr_notifier(&qlcnic_inetaddr_cb);
3636 unregister_netdevice_notifier(&qlcnic_netdev_cb);
3637 #endif
3638 }
3639
3640 return ret;
3641 }
3642
3643 module_init(qlcnic_init_module);
3644
3645 static void __exit qlcnic_exit_module(void)
3646 {
3647
3648 pci_unregister_driver(&qlcnic_driver);
3649
3650 #ifdef CONFIG_INET
3651 unregister_inetaddr_notifier(&qlcnic_inetaddr_cb);
3652 unregister_netdevice_notifier(&qlcnic_netdev_cb);
3653 #endif
3654 }
3655
3656 module_exit(qlcnic_exit_module);
This page took 0.118039 seconds and 5 git commands to generate.