Merge branch 'x86-asm-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git...
[deliverable/linux.git] / drivers / net / ethernet / emulex / benet / be_main.c
1 /*
2 * Copyright (C) 2005 - 2013 Emulex
3 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
11 * linux-drivers@emulex.com
12 *
13 * Emulex
14 * 3333 Susan Street
15 * Costa Mesa, CA 92626
16 */
17
18 #include <linux/prefetch.h>
19 #include <linux/module.h>
20 #include "be.h"
21 #include "be_cmds.h"
22 #include <asm/div64.h>
23 #include <linux/aer.h>
24 #include <linux/if_bridge.h>
25
26 MODULE_VERSION(DRV_VER);
27 MODULE_DEVICE_TABLE(pci, be_dev_ids);
28 MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
29 MODULE_AUTHOR("Emulex Corporation");
30 MODULE_LICENSE("GPL");
31
32 static unsigned int num_vfs;
33 module_param(num_vfs, uint, S_IRUGO);
34 MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
35
36 static ushort rx_frag_size = 2048;
37 module_param(rx_frag_size, ushort, S_IRUGO);
38 MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
39
40 static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
41 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
42 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
43 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
44 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
45 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
46 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
47 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID5)},
48 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID6)},
49 { 0 }
50 };
51 MODULE_DEVICE_TABLE(pci, be_dev_ids);
52 /* UE Status Low CSR */
53 static const char * const ue_status_low_desc[] = {
54 "CEV",
55 "CTX",
56 "DBUF",
57 "ERX",
58 "Host",
59 "MPU",
60 "NDMA",
61 "PTC ",
62 "RDMA ",
63 "RXF ",
64 "RXIPS ",
65 "RXULP0 ",
66 "RXULP1 ",
67 "RXULP2 ",
68 "TIM ",
69 "TPOST ",
70 "TPRE ",
71 "TXIPS ",
72 "TXULP0 ",
73 "TXULP1 ",
74 "UC ",
75 "WDMA ",
76 "TXULP2 ",
77 "HOST1 ",
78 "P0_OB_LINK ",
79 "P1_OB_LINK ",
80 "HOST_GPIO ",
81 "MBOX ",
82 "AXGMAC0",
83 "AXGMAC1",
84 "JTAG",
85 "MPU_INTPEND"
86 };
87 /* UE Status High CSR */
88 static const char * const ue_status_hi_desc[] = {
89 "LPCMEMHOST",
90 "MGMT_MAC",
91 "PCS0ONLINE",
92 "MPU_IRAM",
93 "PCS1ONLINE",
94 "PCTL0",
95 "PCTL1",
96 "PMEM",
97 "RR",
98 "TXPB",
99 "RXPP",
100 "XAUI",
101 "TXP",
102 "ARM",
103 "IPC",
104 "HOST2",
105 "HOST3",
106 "HOST4",
107 "HOST5",
108 "HOST6",
109 "HOST7",
110 "HOST8",
111 "HOST9",
112 "NETC",
113 "Unknown",
114 "Unknown",
115 "Unknown",
116 "Unknown",
117 "Unknown",
118 "Unknown",
119 "Unknown",
120 "Unknown"
121 };
122
123 /* Is BE in a multi-channel mode */
124 static inline bool be_is_mc(struct be_adapter *adapter) {
125 return (adapter->function_mode & FLEX10_MODE ||
126 adapter->function_mode & VNIC_MODE ||
127 adapter->function_mode & UMC_ENABLED);
128 }
129
130 static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
131 {
132 struct be_dma_mem *mem = &q->dma_mem;
133 if (mem->va) {
134 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
135 mem->dma);
136 mem->va = NULL;
137 }
138 }
139
140 static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
141 u16 len, u16 entry_size)
142 {
143 struct be_dma_mem *mem = &q->dma_mem;
144
145 memset(q, 0, sizeof(*q));
146 q->len = len;
147 q->entry_size = entry_size;
148 mem->size = len * entry_size;
149 mem->va = dma_zalloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
150 GFP_KERNEL);
151 if (!mem->va)
152 return -ENOMEM;
153 return 0;
154 }
155
156 static void be_reg_intr_set(struct be_adapter *adapter, bool enable)
157 {
158 u32 reg, enabled;
159
160 pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
161 &reg);
162 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
163
164 if (!enabled && enable)
165 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
166 else if (enabled && !enable)
167 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
168 else
169 return;
170
171 pci_write_config_dword(adapter->pdev,
172 PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
173 }
174
175 static void be_intr_set(struct be_adapter *adapter, bool enable)
176 {
177 int status = 0;
178
179 /* On lancer interrupts can't be controlled via this register */
180 if (lancer_chip(adapter))
181 return;
182
183 if (adapter->eeh_error)
184 return;
185
186 status = be_cmd_intr_set(adapter, enable);
187 if (status)
188 be_reg_intr_set(adapter, enable);
189 }
190
191 static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
192 {
193 u32 val = 0;
194 val |= qid & DB_RQ_RING_ID_MASK;
195 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
196
197 wmb();
198 iowrite32(val, adapter->db + DB_RQ_OFFSET);
199 }
200
201 static void be_txq_notify(struct be_adapter *adapter, struct be_tx_obj *txo,
202 u16 posted)
203 {
204 u32 val = 0;
205 val |= txo->q.id & DB_TXULP_RING_ID_MASK;
206 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
207
208 wmb();
209 iowrite32(val, adapter->db + txo->db_offset);
210 }
211
212 static void be_eq_notify(struct be_adapter *adapter, u16 qid,
213 bool arm, bool clear_int, u16 num_popped)
214 {
215 u32 val = 0;
216 val |= qid & DB_EQ_RING_ID_MASK;
217 val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
218 DB_EQ_RING_ID_EXT_MASK_SHIFT);
219
220 if (adapter->eeh_error)
221 return;
222
223 if (arm)
224 val |= 1 << DB_EQ_REARM_SHIFT;
225 if (clear_int)
226 val |= 1 << DB_EQ_CLR_SHIFT;
227 val |= 1 << DB_EQ_EVNT_SHIFT;
228 val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
229 iowrite32(val, adapter->db + DB_EQ_OFFSET);
230 }
231
232 void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
233 {
234 u32 val = 0;
235 val |= qid & DB_CQ_RING_ID_MASK;
236 val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
237 DB_CQ_RING_ID_EXT_MASK_SHIFT);
238
239 if (adapter->eeh_error)
240 return;
241
242 if (arm)
243 val |= 1 << DB_CQ_REARM_SHIFT;
244 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
245 iowrite32(val, adapter->db + DB_CQ_OFFSET);
246 }
247
248 static int be_mac_addr_set(struct net_device *netdev, void *p)
249 {
250 struct be_adapter *adapter = netdev_priv(netdev);
251 struct device *dev = &adapter->pdev->dev;
252 struct sockaddr *addr = p;
253 int status;
254 u8 mac[ETH_ALEN];
255 u32 old_pmac_id = adapter->pmac_id[0], curr_pmac_id = 0;
256
257 if (!is_valid_ether_addr(addr->sa_data))
258 return -EADDRNOTAVAIL;
259
260 /* The PMAC_ADD cmd may fail if the VF doesn't have FILTMGMT
261 * privilege or if PF did not provision the new MAC address.
262 * On BE3, this cmd will always fail if the VF doesn't have the
263 * FILTMGMT privilege. This failure is OK, only if the PF programmed
264 * the MAC for the VF.
265 */
266 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
267 adapter->if_handle, &adapter->pmac_id[0], 0);
268 if (!status) {
269 curr_pmac_id = adapter->pmac_id[0];
270
271 /* Delete the old programmed MAC. This call may fail if the
272 * old MAC was already deleted by the PF driver.
273 */
274 if (adapter->pmac_id[0] != old_pmac_id)
275 be_cmd_pmac_del(adapter, adapter->if_handle,
276 old_pmac_id, 0);
277 }
278
279 /* Decide if the new MAC is successfully activated only after
280 * querying the FW
281 */
282 status = be_cmd_get_active_mac(adapter, curr_pmac_id, mac);
283 if (status)
284 goto err;
285
286 /* The MAC change did not happen, either due to lack of privilege
287 * or PF didn't pre-provision.
288 */
289 if (memcmp(addr->sa_data, mac, ETH_ALEN)) {
290 status = -EPERM;
291 goto err;
292 }
293
294 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
295 dev_info(dev, "MAC address changed to %pM\n", mac);
296 return 0;
297 err:
298 dev_warn(dev, "MAC address change to %pM failed\n", addr->sa_data);
299 return status;
300 }
301
302 /* BE2 supports only v0 cmd */
303 static void *hw_stats_from_cmd(struct be_adapter *adapter)
304 {
305 if (BE2_chip(adapter)) {
306 struct be_cmd_resp_get_stats_v0 *cmd = adapter->stats_cmd.va;
307
308 return &cmd->hw_stats;
309 } else {
310 struct be_cmd_resp_get_stats_v1 *cmd = adapter->stats_cmd.va;
311
312 return &cmd->hw_stats;
313 }
314 }
315
316 /* BE2 supports only v0 cmd */
317 static void *be_erx_stats_from_cmd(struct be_adapter *adapter)
318 {
319 if (BE2_chip(adapter)) {
320 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
321
322 return &hw_stats->erx;
323 } else {
324 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
325
326 return &hw_stats->erx;
327 }
328 }
329
330 static void populate_be_v0_stats(struct be_adapter *adapter)
331 {
332 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
333 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
334 struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
335 struct be_port_rxf_stats_v0 *port_stats =
336 &rxf_stats->port[adapter->port_num];
337 struct be_drv_stats *drvs = &adapter->drv_stats;
338
339 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
340 drvs->rx_pause_frames = port_stats->rx_pause_frames;
341 drvs->rx_crc_errors = port_stats->rx_crc_errors;
342 drvs->rx_control_frames = port_stats->rx_control_frames;
343 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
344 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
345 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
346 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
347 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
348 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
349 drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
350 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
351 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
352 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
353 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
354 drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
355 drvs->rx_dropped_header_too_small =
356 port_stats->rx_dropped_header_too_small;
357 drvs->rx_address_filtered =
358 port_stats->rx_address_filtered +
359 port_stats->rx_vlan_filtered;
360 drvs->rx_alignment_symbol_errors =
361 port_stats->rx_alignment_symbol_errors;
362
363 drvs->tx_pauseframes = port_stats->tx_pauseframes;
364 drvs->tx_controlframes = port_stats->tx_controlframes;
365
366 if (adapter->port_num)
367 drvs->jabber_events = rxf_stats->port1_jabber_events;
368 else
369 drvs->jabber_events = rxf_stats->port0_jabber_events;
370 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
371 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
372 drvs->forwarded_packets = rxf_stats->forwarded_packets;
373 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
374 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
375 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
376 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
377 }
378
379 static void populate_be_v1_stats(struct be_adapter *adapter)
380 {
381 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
382 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
383 struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
384 struct be_port_rxf_stats_v1 *port_stats =
385 &rxf_stats->port[adapter->port_num];
386 struct be_drv_stats *drvs = &adapter->drv_stats;
387
388 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
389 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
390 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
391 drvs->rx_pause_frames = port_stats->rx_pause_frames;
392 drvs->rx_crc_errors = port_stats->rx_crc_errors;
393 drvs->rx_control_frames = port_stats->rx_control_frames;
394 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
395 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
396 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
397 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
398 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
399 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
400 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
401 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
402 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
403 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
404 drvs->rx_dropped_header_too_small =
405 port_stats->rx_dropped_header_too_small;
406 drvs->rx_input_fifo_overflow_drop =
407 port_stats->rx_input_fifo_overflow_drop;
408 drvs->rx_address_filtered = port_stats->rx_address_filtered;
409 drvs->rx_alignment_symbol_errors =
410 port_stats->rx_alignment_symbol_errors;
411 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
412 drvs->tx_pauseframes = port_stats->tx_pauseframes;
413 drvs->tx_controlframes = port_stats->tx_controlframes;
414 drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
415 drvs->jabber_events = port_stats->jabber_events;
416 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
417 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
418 drvs->forwarded_packets = rxf_stats->forwarded_packets;
419 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
420 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
421 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
422 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
423 }
424
425 static void populate_lancer_stats(struct be_adapter *adapter)
426 {
427
428 struct be_drv_stats *drvs = &adapter->drv_stats;
429 struct lancer_pport_stats *pport_stats =
430 pport_stats_from_cmd(adapter);
431
432 be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
433 drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
434 drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
435 drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
436 drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
437 drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
438 drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
439 drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
440 drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
441 drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
442 drvs->rx_dropped_tcp_length =
443 pport_stats->rx_dropped_invalid_tcp_length;
444 drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
445 drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
446 drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
447 drvs->rx_dropped_header_too_small =
448 pport_stats->rx_dropped_header_too_small;
449 drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
450 drvs->rx_address_filtered =
451 pport_stats->rx_address_filtered +
452 pport_stats->rx_vlan_filtered;
453 drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
454 drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
455 drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
456 drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
457 drvs->jabber_events = pport_stats->rx_jabbers;
458 drvs->forwarded_packets = pport_stats->num_forwards_lo;
459 drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
460 drvs->rx_drops_too_many_frags =
461 pport_stats->rx_drops_too_many_frags_lo;
462 }
463
464 static void accumulate_16bit_val(u32 *acc, u16 val)
465 {
466 #define lo(x) (x & 0xFFFF)
467 #define hi(x) (x & 0xFFFF0000)
468 bool wrapped = val < lo(*acc);
469 u32 newacc = hi(*acc) + val;
470
471 if (wrapped)
472 newacc += 65536;
473 ACCESS_ONCE(*acc) = newacc;
474 }
475
476 static void populate_erx_stats(struct be_adapter *adapter,
477 struct be_rx_obj *rxo,
478 u32 erx_stat)
479 {
480 if (!BEx_chip(adapter))
481 rx_stats(rxo)->rx_drops_no_frags = erx_stat;
482 else
483 /* below erx HW counter can actually wrap around after
484 * 65535. Driver accumulates a 32-bit value
485 */
486 accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
487 (u16)erx_stat);
488 }
489
490 void be_parse_stats(struct be_adapter *adapter)
491 {
492 struct be_erx_stats_v1 *erx = be_erx_stats_from_cmd(adapter);
493 struct be_rx_obj *rxo;
494 int i;
495 u32 erx_stat;
496
497 if (lancer_chip(adapter)) {
498 populate_lancer_stats(adapter);
499 } else {
500 if (BE2_chip(adapter))
501 populate_be_v0_stats(adapter);
502 else
503 /* for BE3 and Skyhawk */
504 populate_be_v1_stats(adapter);
505
506 /* as erx_v1 is longer than v0, ok to use v1 for v0 access */
507 for_all_rx_queues(adapter, rxo, i) {
508 erx_stat = erx->rx_drops_no_fragments[rxo->q.id];
509 populate_erx_stats(adapter, rxo, erx_stat);
510 }
511 }
512 }
513
514 static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
515 struct rtnl_link_stats64 *stats)
516 {
517 struct be_adapter *adapter = netdev_priv(netdev);
518 struct be_drv_stats *drvs = &adapter->drv_stats;
519 struct be_rx_obj *rxo;
520 struct be_tx_obj *txo;
521 u64 pkts, bytes;
522 unsigned int start;
523 int i;
524
525 for_all_rx_queues(adapter, rxo, i) {
526 const struct be_rx_stats *rx_stats = rx_stats(rxo);
527 do {
528 start = u64_stats_fetch_begin_bh(&rx_stats->sync);
529 pkts = rx_stats(rxo)->rx_pkts;
530 bytes = rx_stats(rxo)->rx_bytes;
531 } while (u64_stats_fetch_retry_bh(&rx_stats->sync, start));
532 stats->rx_packets += pkts;
533 stats->rx_bytes += bytes;
534 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
535 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
536 rx_stats(rxo)->rx_drops_no_frags;
537 }
538
539 for_all_tx_queues(adapter, txo, i) {
540 const struct be_tx_stats *tx_stats = tx_stats(txo);
541 do {
542 start = u64_stats_fetch_begin_bh(&tx_stats->sync);
543 pkts = tx_stats(txo)->tx_pkts;
544 bytes = tx_stats(txo)->tx_bytes;
545 } while (u64_stats_fetch_retry_bh(&tx_stats->sync, start));
546 stats->tx_packets += pkts;
547 stats->tx_bytes += bytes;
548 }
549
550 /* bad pkts received */
551 stats->rx_errors = drvs->rx_crc_errors +
552 drvs->rx_alignment_symbol_errors +
553 drvs->rx_in_range_errors +
554 drvs->rx_out_range_errors +
555 drvs->rx_frame_too_long +
556 drvs->rx_dropped_too_small +
557 drvs->rx_dropped_too_short +
558 drvs->rx_dropped_header_too_small +
559 drvs->rx_dropped_tcp_length +
560 drvs->rx_dropped_runt;
561
562 /* detailed rx errors */
563 stats->rx_length_errors = drvs->rx_in_range_errors +
564 drvs->rx_out_range_errors +
565 drvs->rx_frame_too_long;
566
567 stats->rx_crc_errors = drvs->rx_crc_errors;
568
569 /* frame alignment errors */
570 stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
571
572 /* receiver fifo overrun */
573 /* drops_no_pbuf is no per i/f, it's per BE card */
574 stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
575 drvs->rx_input_fifo_overflow_drop +
576 drvs->rx_drops_no_pbuf;
577 return stats;
578 }
579
580 void be_link_status_update(struct be_adapter *adapter, u8 link_status)
581 {
582 struct net_device *netdev = adapter->netdev;
583
584 if (!(adapter->flags & BE_FLAGS_LINK_STATUS_INIT)) {
585 netif_carrier_off(netdev);
586 adapter->flags |= BE_FLAGS_LINK_STATUS_INIT;
587 }
588
589 if ((link_status & LINK_STATUS_MASK) == LINK_UP)
590 netif_carrier_on(netdev);
591 else
592 netif_carrier_off(netdev);
593 }
594
595 static void be_tx_stats_update(struct be_tx_obj *txo,
596 u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
597 {
598 struct be_tx_stats *stats = tx_stats(txo);
599
600 u64_stats_update_begin(&stats->sync);
601 stats->tx_reqs++;
602 stats->tx_wrbs += wrb_cnt;
603 stats->tx_bytes += copied;
604 stats->tx_pkts += (gso_segs ? gso_segs : 1);
605 if (stopped)
606 stats->tx_stops++;
607 u64_stats_update_end(&stats->sync);
608 }
609
610 /* Determine number of WRB entries needed to xmit data in an skb */
611 static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
612 bool *dummy)
613 {
614 int cnt = (skb->len > skb->data_len);
615
616 cnt += skb_shinfo(skb)->nr_frags;
617
618 /* to account for hdr wrb */
619 cnt++;
620 if (lancer_chip(adapter) || !(cnt & 1)) {
621 *dummy = false;
622 } else {
623 /* add a dummy to make it an even num */
624 cnt++;
625 *dummy = true;
626 }
627 BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
628 return cnt;
629 }
630
631 static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
632 {
633 wrb->frag_pa_hi = upper_32_bits(addr);
634 wrb->frag_pa_lo = addr & 0xFFFFFFFF;
635 wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
636 wrb->rsvd0 = 0;
637 }
638
639 static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
640 struct sk_buff *skb)
641 {
642 u8 vlan_prio;
643 u16 vlan_tag;
644
645 vlan_tag = vlan_tx_tag_get(skb);
646 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
647 /* If vlan priority provided by OS is NOT in available bmap */
648 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
649 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
650 adapter->recommended_prio;
651
652 return vlan_tag;
653 }
654
655 static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
656 struct sk_buff *skb, u32 wrb_cnt, u32 len, bool skip_hw_vlan)
657 {
658 u16 vlan_tag;
659
660 memset(hdr, 0, sizeof(*hdr));
661
662 AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
663
664 if (skb_is_gso(skb)) {
665 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
666 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
667 hdr, skb_shinfo(skb)->gso_size);
668 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
669 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
670 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
671 if (is_tcp_pkt(skb))
672 AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
673 else if (is_udp_pkt(skb))
674 AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
675 }
676
677 if (vlan_tx_tag_present(skb)) {
678 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
679 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
680 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
681 }
682
683 /* To skip HW VLAN tagging: evt = 1, compl = 0 */
684 AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, !skip_hw_vlan);
685 AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
686 AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
687 AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
688 }
689
690 static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
691 bool unmap_single)
692 {
693 dma_addr_t dma;
694
695 be_dws_le_to_cpu(wrb, sizeof(*wrb));
696
697 dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
698 if (wrb->frag_len) {
699 if (unmap_single)
700 dma_unmap_single(dev, dma, wrb->frag_len,
701 DMA_TO_DEVICE);
702 else
703 dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
704 }
705 }
706
707 static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
708 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb,
709 bool skip_hw_vlan)
710 {
711 dma_addr_t busaddr;
712 int i, copied = 0;
713 struct device *dev = &adapter->pdev->dev;
714 struct sk_buff *first_skb = skb;
715 struct be_eth_wrb *wrb;
716 struct be_eth_hdr_wrb *hdr;
717 bool map_single = false;
718 u16 map_head;
719
720 hdr = queue_head_node(txq);
721 queue_head_inc(txq);
722 map_head = txq->head;
723
724 if (skb->len > skb->data_len) {
725 int len = skb_headlen(skb);
726 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
727 if (dma_mapping_error(dev, busaddr))
728 goto dma_err;
729 map_single = true;
730 wrb = queue_head_node(txq);
731 wrb_fill(wrb, busaddr, len);
732 be_dws_cpu_to_le(wrb, sizeof(*wrb));
733 queue_head_inc(txq);
734 copied += len;
735 }
736
737 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
738 const struct skb_frag_struct *frag =
739 &skb_shinfo(skb)->frags[i];
740 busaddr = skb_frag_dma_map(dev, frag, 0,
741 skb_frag_size(frag), DMA_TO_DEVICE);
742 if (dma_mapping_error(dev, busaddr))
743 goto dma_err;
744 wrb = queue_head_node(txq);
745 wrb_fill(wrb, busaddr, skb_frag_size(frag));
746 be_dws_cpu_to_le(wrb, sizeof(*wrb));
747 queue_head_inc(txq);
748 copied += skb_frag_size(frag);
749 }
750
751 if (dummy_wrb) {
752 wrb = queue_head_node(txq);
753 wrb_fill(wrb, 0, 0);
754 be_dws_cpu_to_le(wrb, sizeof(*wrb));
755 queue_head_inc(txq);
756 }
757
758 wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied, skip_hw_vlan);
759 be_dws_cpu_to_le(hdr, sizeof(*hdr));
760
761 return copied;
762 dma_err:
763 txq->head = map_head;
764 while (copied) {
765 wrb = queue_head_node(txq);
766 unmap_tx_frag(dev, wrb, map_single);
767 map_single = false;
768 copied -= wrb->frag_len;
769 queue_head_inc(txq);
770 }
771 return 0;
772 }
773
774 static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter,
775 struct sk_buff *skb,
776 bool *skip_hw_vlan)
777 {
778 u16 vlan_tag = 0;
779
780 skb = skb_share_check(skb, GFP_ATOMIC);
781 if (unlikely(!skb))
782 return skb;
783
784 if (vlan_tx_tag_present(skb))
785 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
786
787 if (qnq_async_evt_rcvd(adapter) && adapter->pvid) {
788 if (!vlan_tag)
789 vlan_tag = adapter->pvid;
790 /* f/w workaround to set skip_hw_vlan = 1, informs the F/W to
791 * skip VLAN insertion
792 */
793 if (skip_hw_vlan)
794 *skip_hw_vlan = true;
795 }
796
797 if (vlan_tag) {
798 skb = __vlan_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
799 if (unlikely(!skb))
800 return skb;
801 skb->vlan_tci = 0;
802 }
803
804 /* Insert the outer VLAN, if any */
805 if (adapter->qnq_vid) {
806 vlan_tag = adapter->qnq_vid;
807 skb = __vlan_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
808 if (unlikely(!skb))
809 return skb;
810 if (skip_hw_vlan)
811 *skip_hw_vlan = true;
812 }
813
814 return skb;
815 }
816
817 static bool be_ipv6_exthdr_check(struct sk_buff *skb)
818 {
819 struct ethhdr *eh = (struct ethhdr *)skb->data;
820 u16 offset = ETH_HLEN;
821
822 if (eh->h_proto == htons(ETH_P_IPV6)) {
823 struct ipv6hdr *ip6h = (struct ipv6hdr *)(skb->data + offset);
824
825 offset += sizeof(struct ipv6hdr);
826 if (ip6h->nexthdr != NEXTHDR_TCP &&
827 ip6h->nexthdr != NEXTHDR_UDP) {
828 struct ipv6_opt_hdr *ehdr =
829 (struct ipv6_opt_hdr *) (skb->data + offset);
830
831 /* offending pkt: 2nd byte following IPv6 hdr is 0xff */
832 if (ehdr->hdrlen == 0xff)
833 return true;
834 }
835 }
836 return false;
837 }
838
839 static int be_vlan_tag_tx_chk(struct be_adapter *adapter, struct sk_buff *skb)
840 {
841 return vlan_tx_tag_present(skb) || adapter->pvid || adapter->qnq_vid;
842 }
843
844 static int be_ipv6_tx_stall_chk(struct be_adapter *adapter,
845 struct sk_buff *skb)
846 {
847 return BE3_chip(adapter) && be_ipv6_exthdr_check(skb);
848 }
849
850 static struct sk_buff *be_xmit_workarounds(struct be_adapter *adapter,
851 struct sk_buff *skb,
852 bool *skip_hw_vlan)
853 {
854 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
855 unsigned int eth_hdr_len;
856 struct iphdr *ip;
857
858 /* Lancer, SH-R ASICs have a bug wherein Packets that are 32 bytes or less
859 * may cause a transmit stall on that port. So the work-around is to
860 * pad short packets (<= 32 bytes) to a 36-byte length.
861 */
862 if (unlikely(!BEx_chip(adapter) && skb->len <= 32)) {
863 if (skb_padto(skb, 36))
864 goto tx_drop;
865 skb->len = 36;
866 }
867
868 /* For padded packets, BE HW modifies tot_len field in IP header
869 * incorrecly when VLAN tag is inserted by HW.
870 * For padded packets, Lancer computes incorrect checksum.
871 */
872 eth_hdr_len = ntohs(skb->protocol) == ETH_P_8021Q ?
873 VLAN_ETH_HLEN : ETH_HLEN;
874 if (skb->len <= 60 &&
875 (lancer_chip(adapter) || vlan_tx_tag_present(skb)) &&
876 is_ipv4_pkt(skb)) {
877 ip = (struct iphdr *)ip_hdr(skb);
878 pskb_trim(skb, eth_hdr_len + ntohs(ip->tot_len));
879 }
880
881 /* If vlan tag is already inlined in the packet, skip HW VLAN
882 * tagging in UMC mode
883 */
884 if ((adapter->function_mode & UMC_ENABLED) &&
885 veh->h_vlan_proto == htons(ETH_P_8021Q))
886 *skip_hw_vlan = true;
887
888 /* HW has a bug wherein it will calculate CSUM for VLAN
889 * pkts even though it is disabled.
890 * Manually insert VLAN in pkt.
891 */
892 if (skb->ip_summed != CHECKSUM_PARTIAL &&
893 vlan_tx_tag_present(skb)) {
894 skb = be_insert_vlan_in_pkt(adapter, skb, skip_hw_vlan);
895 if (unlikely(!skb))
896 goto tx_drop;
897 }
898
899 /* HW may lockup when VLAN HW tagging is requested on
900 * certain ipv6 packets. Drop such pkts if the HW workaround to
901 * skip HW tagging is not enabled by FW.
902 */
903 if (unlikely(be_ipv6_tx_stall_chk(adapter, skb) &&
904 (adapter->pvid || adapter->qnq_vid) &&
905 !qnq_async_evt_rcvd(adapter)))
906 goto tx_drop;
907
908 /* Manual VLAN tag insertion to prevent:
909 * ASIC lockup when the ASIC inserts VLAN tag into
910 * certain ipv6 packets. Insert VLAN tags in driver,
911 * and set event, completion, vlan bits accordingly
912 * in the Tx WRB.
913 */
914 if (be_ipv6_tx_stall_chk(adapter, skb) &&
915 be_vlan_tag_tx_chk(adapter, skb)) {
916 skb = be_insert_vlan_in_pkt(adapter, skb, skip_hw_vlan);
917 if (unlikely(!skb))
918 goto tx_drop;
919 }
920
921 return skb;
922 tx_drop:
923 dev_kfree_skb_any(skb);
924 return NULL;
925 }
926
927 static netdev_tx_t be_xmit(struct sk_buff *skb, struct net_device *netdev)
928 {
929 struct be_adapter *adapter = netdev_priv(netdev);
930 struct be_tx_obj *txo = &adapter->tx_obj[skb_get_queue_mapping(skb)];
931 struct be_queue_info *txq = &txo->q;
932 bool dummy_wrb, stopped = false;
933 u32 wrb_cnt = 0, copied = 0;
934 bool skip_hw_vlan = false;
935 u32 start = txq->head;
936
937 skb = be_xmit_workarounds(adapter, skb, &skip_hw_vlan);
938 if (!skb)
939 return NETDEV_TX_OK;
940
941 wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
942
943 copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb,
944 skip_hw_vlan);
945 if (copied) {
946 int gso_segs = skb_shinfo(skb)->gso_segs;
947
948 /* record the sent skb in the sent_skb table */
949 BUG_ON(txo->sent_skb_list[start]);
950 txo->sent_skb_list[start] = skb;
951
952 /* Ensure txq has space for the next skb; Else stop the queue
953 * *BEFORE* ringing the tx doorbell, so that we serialze the
954 * tx compls of the current transmit which'll wake up the queue
955 */
956 atomic_add(wrb_cnt, &txq->used);
957 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
958 txq->len) {
959 netif_stop_subqueue(netdev, skb_get_queue_mapping(skb));
960 stopped = true;
961 }
962
963 be_txq_notify(adapter, txo, wrb_cnt);
964
965 be_tx_stats_update(txo, wrb_cnt, copied, gso_segs, stopped);
966 } else {
967 txq->head = start;
968 dev_kfree_skb_any(skb);
969 }
970 return NETDEV_TX_OK;
971 }
972
973 static int be_change_mtu(struct net_device *netdev, int new_mtu)
974 {
975 struct be_adapter *adapter = netdev_priv(netdev);
976 if (new_mtu < BE_MIN_MTU ||
977 new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
978 (ETH_HLEN + ETH_FCS_LEN))) {
979 dev_info(&adapter->pdev->dev,
980 "MTU must be between %d and %d bytes\n",
981 BE_MIN_MTU,
982 (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
983 return -EINVAL;
984 }
985 dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
986 netdev->mtu, new_mtu);
987 netdev->mtu = new_mtu;
988 return 0;
989 }
990
991 /*
992 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
993 * If the user configures more, place BE in vlan promiscuous mode.
994 */
995 static int be_vid_config(struct be_adapter *adapter)
996 {
997 u16 vids[BE_NUM_VLANS_SUPPORTED];
998 u16 num = 0, i;
999 int status = 0;
1000
1001 /* No need to further configure vids if in promiscuous mode */
1002 if (adapter->promiscuous)
1003 return 0;
1004
1005 if (adapter->vlans_added > be_max_vlans(adapter))
1006 goto set_vlan_promisc;
1007
1008 /* Construct VLAN Table to give to HW */
1009 for (i = 0; i < VLAN_N_VID; i++)
1010 if (adapter->vlan_tag[i])
1011 vids[num++] = cpu_to_le16(i);
1012
1013 status = be_cmd_vlan_config(adapter, adapter->if_handle,
1014 vids, num, 1, 0);
1015
1016 if (status) {
1017 /* Set to VLAN promisc mode as setting VLAN filter failed */
1018 if (status == MCC_ADDL_STS_INSUFFICIENT_RESOURCES)
1019 goto set_vlan_promisc;
1020 dev_err(&adapter->pdev->dev,
1021 "Setting HW VLAN filtering failed.\n");
1022 } else {
1023 if (adapter->flags & BE_FLAGS_VLAN_PROMISC) {
1024 /* hw VLAN filtering re-enabled. */
1025 status = be_cmd_rx_filter(adapter,
1026 BE_FLAGS_VLAN_PROMISC, OFF);
1027 if (!status) {
1028 dev_info(&adapter->pdev->dev,
1029 "Disabling VLAN Promiscuous mode.\n");
1030 adapter->flags &= ~BE_FLAGS_VLAN_PROMISC;
1031 dev_info(&adapter->pdev->dev,
1032 "Re-Enabling HW VLAN filtering\n");
1033 }
1034 }
1035 }
1036
1037 return status;
1038
1039 set_vlan_promisc:
1040 dev_warn(&adapter->pdev->dev, "Exhausted VLAN HW filters.\n");
1041
1042 status = be_cmd_rx_filter(adapter, BE_FLAGS_VLAN_PROMISC, ON);
1043 if (!status) {
1044 dev_info(&adapter->pdev->dev, "Enable VLAN Promiscuous mode\n");
1045 dev_info(&adapter->pdev->dev, "Disabling HW VLAN filtering\n");
1046 adapter->flags |= BE_FLAGS_VLAN_PROMISC;
1047 } else
1048 dev_err(&adapter->pdev->dev,
1049 "Failed to enable VLAN Promiscuous mode.\n");
1050 return status;
1051 }
1052
1053 static int be_vlan_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
1054 {
1055 struct be_adapter *adapter = netdev_priv(netdev);
1056 int status = 0;
1057
1058
1059 /* Packets with VID 0 are always received by Lancer by default */
1060 if (lancer_chip(adapter) && vid == 0)
1061 goto ret;
1062
1063 adapter->vlan_tag[vid] = 1;
1064 if (adapter->vlans_added <= (be_max_vlans(adapter) + 1))
1065 status = be_vid_config(adapter);
1066
1067 if (!status)
1068 adapter->vlans_added++;
1069 else
1070 adapter->vlan_tag[vid] = 0;
1071 ret:
1072 return status;
1073 }
1074
1075 static int be_vlan_rem_vid(struct net_device *netdev, __be16 proto, u16 vid)
1076 {
1077 struct be_adapter *adapter = netdev_priv(netdev);
1078 int status = 0;
1079
1080 /* Packets with VID 0 are always received by Lancer by default */
1081 if (lancer_chip(adapter) && vid == 0)
1082 goto ret;
1083
1084 adapter->vlan_tag[vid] = 0;
1085 if (adapter->vlans_added <= be_max_vlans(adapter))
1086 status = be_vid_config(adapter);
1087
1088 if (!status)
1089 adapter->vlans_added--;
1090 else
1091 adapter->vlan_tag[vid] = 1;
1092 ret:
1093 return status;
1094 }
1095
1096 static void be_set_rx_mode(struct net_device *netdev)
1097 {
1098 struct be_adapter *adapter = netdev_priv(netdev);
1099 int status;
1100
1101 if (netdev->flags & IFF_PROMISC) {
1102 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
1103 adapter->promiscuous = true;
1104 goto done;
1105 }
1106
1107 /* BE was previously in promiscuous mode; disable it */
1108 if (adapter->promiscuous) {
1109 adapter->promiscuous = false;
1110 be_cmd_rx_filter(adapter, IFF_PROMISC, OFF);
1111
1112 if (adapter->vlans_added)
1113 be_vid_config(adapter);
1114 }
1115
1116 /* Enable multicast promisc if num configured exceeds what we support */
1117 if (netdev->flags & IFF_ALLMULTI ||
1118 netdev_mc_count(netdev) > be_max_mc(adapter)) {
1119 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
1120 goto done;
1121 }
1122
1123 if (netdev_uc_count(netdev) != adapter->uc_macs) {
1124 struct netdev_hw_addr *ha;
1125 int i = 1; /* First slot is claimed by the Primary MAC */
1126
1127 for (; adapter->uc_macs > 0; adapter->uc_macs--, i++) {
1128 be_cmd_pmac_del(adapter, adapter->if_handle,
1129 adapter->pmac_id[i], 0);
1130 }
1131
1132 if (netdev_uc_count(netdev) > be_max_uc(adapter)) {
1133 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
1134 adapter->promiscuous = true;
1135 goto done;
1136 }
1137
1138 netdev_for_each_uc_addr(ha, adapter->netdev) {
1139 adapter->uc_macs++; /* First slot is for Primary MAC */
1140 be_cmd_pmac_add(adapter, (u8 *)ha->addr,
1141 adapter->if_handle,
1142 &adapter->pmac_id[adapter->uc_macs], 0);
1143 }
1144 }
1145
1146 status = be_cmd_rx_filter(adapter, IFF_MULTICAST, ON);
1147
1148 /* Set to MCAST promisc mode if setting MULTICAST address fails */
1149 if (status) {
1150 dev_info(&adapter->pdev->dev, "Exhausted multicast HW filters.\n");
1151 dev_info(&adapter->pdev->dev, "Disabling HW multicast filtering.\n");
1152 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
1153 }
1154 done:
1155 return;
1156 }
1157
1158 static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
1159 {
1160 struct be_adapter *adapter = netdev_priv(netdev);
1161 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1162 int status;
1163
1164 if (!sriov_enabled(adapter))
1165 return -EPERM;
1166
1167 if (!is_valid_ether_addr(mac) || vf >= adapter->num_vfs)
1168 return -EINVAL;
1169
1170 if (BEx_chip(adapter)) {
1171 be_cmd_pmac_del(adapter, vf_cfg->if_handle, vf_cfg->pmac_id,
1172 vf + 1);
1173
1174 status = be_cmd_pmac_add(adapter, mac, vf_cfg->if_handle,
1175 &vf_cfg->pmac_id, vf + 1);
1176 } else {
1177 status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
1178 vf + 1);
1179 }
1180
1181 if (status)
1182 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
1183 mac, vf);
1184 else
1185 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
1186
1187 return status;
1188 }
1189
1190 static int be_get_vf_config(struct net_device *netdev, int vf,
1191 struct ifla_vf_info *vi)
1192 {
1193 struct be_adapter *adapter = netdev_priv(netdev);
1194 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1195
1196 if (!sriov_enabled(adapter))
1197 return -EPERM;
1198
1199 if (vf >= adapter->num_vfs)
1200 return -EINVAL;
1201
1202 vi->vf = vf;
1203 vi->tx_rate = vf_cfg->tx_rate;
1204 vi->vlan = vf_cfg->vlan_tag & VLAN_VID_MASK;
1205 vi->qos = vf_cfg->vlan_tag >> VLAN_PRIO_SHIFT;
1206 memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
1207
1208 return 0;
1209 }
1210
1211 static int be_set_vf_vlan(struct net_device *netdev,
1212 int vf, u16 vlan, u8 qos)
1213 {
1214 struct be_adapter *adapter = netdev_priv(netdev);
1215 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1216 int status = 0;
1217
1218 if (!sriov_enabled(adapter))
1219 return -EPERM;
1220
1221 if (vf >= adapter->num_vfs || vlan > 4095 || qos > 7)
1222 return -EINVAL;
1223
1224 if (vlan || qos) {
1225 vlan |= qos << VLAN_PRIO_SHIFT;
1226 if (vf_cfg->vlan_tag != vlan) {
1227 /* If this is new value, program it. Else skip. */
1228 vf_cfg->vlan_tag = vlan;
1229 status = be_cmd_set_hsw_config(adapter, vlan, vf + 1,
1230 vf_cfg->if_handle, 0);
1231 }
1232 } else {
1233 /* Reset Transparent Vlan Tagging. */
1234 vf_cfg->vlan_tag = 0;
1235 vlan = vf_cfg->def_vid;
1236 status = be_cmd_set_hsw_config(adapter, vlan, vf + 1,
1237 vf_cfg->if_handle, 0);
1238 }
1239
1240
1241 if (status)
1242 dev_info(&adapter->pdev->dev,
1243 "VLAN %d config on VF %d failed\n", vlan, vf);
1244 return status;
1245 }
1246
1247 static int be_set_vf_tx_rate(struct net_device *netdev,
1248 int vf, int rate)
1249 {
1250 struct be_adapter *adapter = netdev_priv(netdev);
1251 int status = 0;
1252
1253 if (!sriov_enabled(adapter))
1254 return -EPERM;
1255
1256 if (vf >= adapter->num_vfs)
1257 return -EINVAL;
1258
1259 if (rate < 100 || rate > 10000) {
1260 dev_err(&adapter->pdev->dev,
1261 "tx rate must be between 100 and 10000 Mbps\n");
1262 return -EINVAL;
1263 }
1264
1265 if (lancer_chip(adapter))
1266 status = be_cmd_set_profile_config(adapter, rate / 10, vf + 1);
1267 else
1268 status = be_cmd_set_qos(adapter, rate / 10, vf + 1);
1269
1270 if (status)
1271 dev_err(&adapter->pdev->dev,
1272 "tx rate %d on VF %d failed\n", rate, vf);
1273 else
1274 adapter->vf_cfg[vf].tx_rate = rate;
1275 return status;
1276 }
1277
1278 static void be_eqd_update(struct be_adapter *adapter, struct be_eq_obj *eqo)
1279 {
1280 struct be_rx_stats *stats = rx_stats(&adapter->rx_obj[eqo->idx]);
1281 ulong now = jiffies;
1282 ulong delta = now - stats->rx_jiffies;
1283 u64 pkts;
1284 unsigned int start, eqd;
1285
1286 if (!eqo->enable_aic) {
1287 eqd = eqo->eqd;
1288 goto modify_eqd;
1289 }
1290
1291 if (eqo->idx >= adapter->num_rx_qs)
1292 return;
1293
1294 stats = rx_stats(&adapter->rx_obj[eqo->idx]);
1295
1296 /* Wrapped around */
1297 if (time_before(now, stats->rx_jiffies)) {
1298 stats->rx_jiffies = now;
1299 return;
1300 }
1301
1302 /* Update once a second */
1303 if (delta < HZ)
1304 return;
1305
1306 do {
1307 start = u64_stats_fetch_begin_bh(&stats->sync);
1308 pkts = stats->rx_pkts;
1309 } while (u64_stats_fetch_retry_bh(&stats->sync, start));
1310
1311 stats->rx_pps = (unsigned long)(pkts - stats->rx_pkts_prev) / (delta / HZ);
1312 stats->rx_pkts_prev = pkts;
1313 stats->rx_jiffies = now;
1314 eqd = (stats->rx_pps / 110000) << 3;
1315 eqd = min(eqd, eqo->max_eqd);
1316 eqd = max(eqd, eqo->min_eqd);
1317 if (eqd < 10)
1318 eqd = 0;
1319
1320 modify_eqd:
1321 if (eqd != eqo->cur_eqd) {
1322 be_cmd_modify_eqd(adapter, eqo->q.id, eqd);
1323 eqo->cur_eqd = eqd;
1324 }
1325 }
1326
1327 static void be_rx_stats_update(struct be_rx_obj *rxo,
1328 struct be_rx_compl_info *rxcp)
1329 {
1330 struct be_rx_stats *stats = rx_stats(rxo);
1331
1332 u64_stats_update_begin(&stats->sync);
1333 stats->rx_compl++;
1334 stats->rx_bytes += rxcp->pkt_size;
1335 stats->rx_pkts++;
1336 if (rxcp->pkt_type == BE_MULTICAST_PACKET)
1337 stats->rx_mcast_pkts++;
1338 if (rxcp->err)
1339 stats->rx_compl_err++;
1340 u64_stats_update_end(&stats->sync);
1341 }
1342
1343 static inline bool csum_passed(struct be_rx_compl_info *rxcp)
1344 {
1345 /* L4 checksum is not reliable for non TCP/UDP packets.
1346 * Also ignore ipcksm for ipv6 pkts */
1347 return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
1348 (rxcp->ip_csum || rxcp->ipv6);
1349 }
1350
1351 static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo,
1352 u16 frag_idx)
1353 {
1354 struct be_adapter *adapter = rxo->adapter;
1355 struct be_rx_page_info *rx_page_info;
1356 struct be_queue_info *rxq = &rxo->q;
1357
1358 rx_page_info = &rxo->page_info_tbl[frag_idx];
1359 BUG_ON(!rx_page_info->page);
1360
1361 if (rx_page_info->last_page_user) {
1362 dma_unmap_page(&adapter->pdev->dev,
1363 dma_unmap_addr(rx_page_info, bus),
1364 adapter->big_page_size, DMA_FROM_DEVICE);
1365 rx_page_info->last_page_user = false;
1366 }
1367
1368 atomic_dec(&rxq->used);
1369 return rx_page_info;
1370 }
1371
1372 /* Throwaway the data in the Rx completion */
1373 static void be_rx_compl_discard(struct be_rx_obj *rxo,
1374 struct be_rx_compl_info *rxcp)
1375 {
1376 struct be_queue_info *rxq = &rxo->q;
1377 struct be_rx_page_info *page_info;
1378 u16 i, num_rcvd = rxcp->num_rcvd;
1379
1380 for (i = 0; i < num_rcvd; i++) {
1381 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
1382 put_page(page_info->page);
1383 memset(page_info, 0, sizeof(*page_info));
1384 index_inc(&rxcp->rxq_idx, rxq->len);
1385 }
1386 }
1387
1388 /*
1389 * skb_fill_rx_data forms a complete skb for an ether frame
1390 * indicated by rxcp.
1391 */
1392 static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
1393 struct be_rx_compl_info *rxcp)
1394 {
1395 struct be_queue_info *rxq = &rxo->q;
1396 struct be_rx_page_info *page_info;
1397 u16 i, j;
1398 u16 hdr_len, curr_frag_len, remaining;
1399 u8 *start;
1400
1401 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
1402 start = page_address(page_info->page) + page_info->page_offset;
1403 prefetch(start);
1404
1405 /* Copy data in the first descriptor of this completion */
1406 curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
1407
1408 skb->len = curr_frag_len;
1409 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
1410 memcpy(skb->data, start, curr_frag_len);
1411 /* Complete packet has now been moved to data */
1412 put_page(page_info->page);
1413 skb->data_len = 0;
1414 skb->tail += curr_frag_len;
1415 } else {
1416 hdr_len = ETH_HLEN;
1417 memcpy(skb->data, start, hdr_len);
1418 skb_shinfo(skb)->nr_frags = 1;
1419 skb_frag_set_page(skb, 0, page_info->page);
1420 skb_shinfo(skb)->frags[0].page_offset =
1421 page_info->page_offset + hdr_len;
1422 skb_frag_size_set(&skb_shinfo(skb)->frags[0], curr_frag_len - hdr_len);
1423 skb->data_len = curr_frag_len - hdr_len;
1424 skb->truesize += rx_frag_size;
1425 skb->tail += hdr_len;
1426 }
1427 page_info->page = NULL;
1428
1429 if (rxcp->pkt_size <= rx_frag_size) {
1430 BUG_ON(rxcp->num_rcvd != 1);
1431 return;
1432 }
1433
1434 /* More frags present for this completion */
1435 index_inc(&rxcp->rxq_idx, rxq->len);
1436 remaining = rxcp->pkt_size - curr_frag_len;
1437 for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
1438 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
1439 curr_frag_len = min(remaining, rx_frag_size);
1440
1441 /* Coalesce all frags from the same physical page in one slot */
1442 if (page_info->page_offset == 0) {
1443 /* Fresh page */
1444 j++;
1445 skb_frag_set_page(skb, j, page_info->page);
1446 skb_shinfo(skb)->frags[j].page_offset =
1447 page_info->page_offset;
1448 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
1449 skb_shinfo(skb)->nr_frags++;
1450 } else {
1451 put_page(page_info->page);
1452 }
1453
1454 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
1455 skb->len += curr_frag_len;
1456 skb->data_len += curr_frag_len;
1457 skb->truesize += rx_frag_size;
1458 remaining -= curr_frag_len;
1459 index_inc(&rxcp->rxq_idx, rxq->len);
1460 page_info->page = NULL;
1461 }
1462 BUG_ON(j > MAX_SKB_FRAGS);
1463 }
1464
1465 /* Process the RX completion indicated by rxcp when GRO is disabled */
1466 static void be_rx_compl_process(struct be_rx_obj *rxo,
1467 struct be_rx_compl_info *rxcp)
1468 {
1469 struct be_adapter *adapter = rxo->adapter;
1470 struct net_device *netdev = adapter->netdev;
1471 struct sk_buff *skb;
1472
1473 skb = netdev_alloc_skb_ip_align(netdev, BE_RX_SKB_ALLOC_SIZE);
1474 if (unlikely(!skb)) {
1475 rx_stats(rxo)->rx_drops_no_skbs++;
1476 be_rx_compl_discard(rxo, rxcp);
1477 return;
1478 }
1479
1480 skb_fill_rx_data(rxo, skb, rxcp);
1481
1482 if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
1483 skb->ip_summed = CHECKSUM_UNNECESSARY;
1484 else
1485 skb_checksum_none_assert(skb);
1486
1487 skb->protocol = eth_type_trans(skb, netdev);
1488 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
1489 if (netdev->features & NETIF_F_RXHASH)
1490 skb->rxhash = rxcp->rss_hash;
1491
1492
1493 if (rxcp->vlanf)
1494 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
1495
1496 netif_receive_skb(skb);
1497 }
1498
1499 /* Process the RX completion indicated by rxcp when GRO is enabled */
1500 static void be_rx_compl_process_gro(struct be_rx_obj *rxo,
1501 struct napi_struct *napi,
1502 struct be_rx_compl_info *rxcp)
1503 {
1504 struct be_adapter *adapter = rxo->adapter;
1505 struct be_rx_page_info *page_info;
1506 struct sk_buff *skb = NULL;
1507 struct be_queue_info *rxq = &rxo->q;
1508 u16 remaining, curr_frag_len;
1509 u16 i, j;
1510
1511 skb = napi_get_frags(napi);
1512 if (!skb) {
1513 be_rx_compl_discard(rxo, rxcp);
1514 return;
1515 }
1516
1517 remaining = rxcp->pkt_size;
1518 for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
1519 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
1520
1521 curr_frag_len = min(remaining, rx_frag_size);
1522
1523 /* Coalesce all frags from the same physical page in one slot */
1524 if (i == 0 || page_info->page_offset == 0) {
1525 /* First frag or Fresh page */
1526 j++;
1527 skb_frag_set_page(skb, j, page_info->page);
1528 skb_shinfo(skb)->frags[j].page_offset =
1529 page_info->page_offset;
1530 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
1531 } else {
1532 put_page(page_info->page);
1533 }
1534 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
1535 skb->truesize += rx_frag_size;
1536 remaining -= curr_frag_len;
1537 index_inc(&rxcp->rxq_idx, rxq->len);
1538 memset(page_info, 0, sizeof(*page_info));
1539 }
1540 BUG_ON(j > MAX_SKB_FRAGS);
1541
1542 skb_shinfo(skb)->nr_frags = j + 1;
1543 skb->len = rxcp->pkt_size;
1544 skb->data_len = rxcp->pkt_size;
1545 skb->ip_summed = CHECKSUM_UNNECESSARY;
1546 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
1547 if (adapter->netdev->features & NETIF_F_RXHASH)
1548 skb->rxhash = rxcp->rss_hash;
1549
1550 if (rxcp->vlanf)
1551 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
1552
1553 napi_gro_frags(napi);
1554 }
1555
1556 static void be_parse_rx_compl_v1(struct be_eth_rx_compl *compl,
1557 struct be_rx_compl_info *rxcp)
1558 {
1559 rxcp->pkt_size =
1560 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, pktsize, compl);
1561 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtp, compl);
1562 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, err, compl);
1563 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tcpf, compl);
1564 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, udpf, compl);
1565 rxcp->ip_csum =
1566 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ipcksm, compl);
1567 rxcp->l4_csum =
1568 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, l4_cksm, compl);
1569 rxcp->ipv6 =
1570 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ip_version, compl);
1571 rxcp->rxq_idx =
1572 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, fragndx, compl);
1573 rxcp->num_rcvd =
1574 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl);
1575 rxcp->pkt_type =
1576 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl);
1577 rxcp->rss_hash =
1578 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, rsshash, compl);
1579 if (rxcp->vlanf) {
1580 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtm,
1581 compl);
1582 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag,
1583 compl);
1584 }
1585 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, port, compl);
1586 }
1587
1588 static void be_parse_rx_compl_v0(struct be_eth_rx_compl *compl,
1589 struct be_rx_compl_info *rxcp)
1590 {
1591 rxcp->pkt_size =
1592 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, pktsize, compl);
1593 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtp, compl);
1594 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, err, compl);
1595 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, tcpf, compl);
1596 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, udpf, compl);
1597 rxcp->ip_csum =
1598 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ipcksm, compl);
1599 rxcp->l4_csum =
1600 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, l4_cksm, compl);
1601 rxcp->ipv6 =
1602 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ip_version, compl);
1603 rxcp->rxq_idx =
1604 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, fragndx, compl);
1605 rxcp->num_rcvd =
1606 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl);
1607 rxcp->pkt_type =
1608 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl);
1609 rxcp->rss_hash =
1610 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, rsshash, compl);
1611 if (rxcp->vlanf) {
1612 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtm,
1613 compl);
1614 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag,
1615 compl);
1616 }
1617 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, port, compl);
1618 rxcp->ip_frag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0,
1619 ip_frag, compl);
1620 }
1621
1622 static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1623 {
1624 struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1625 struct be_rx_compl_info *rxcp = &rxo->rxcp;
1626 struct be_adapter *adapter = rxo->adapter;
1627
1628 /* For checking the valid bit it is Ok to use either definition as the
1629 * valid bit is at the same position in both v0 and v1 Rx compl */
1630 if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
1631 return NULL;
1632
1633 rmb();
1634 be_dws_le_to_cpu(compl, sizeof(*compl));
1635
1636 if (adapter->be3_native)
1637 be_parse_rx_compl_v1(compl, rxcp);
1638 else
1639 be_parse_rx_compl_v0(compl, rxcp);
1640
1641 if (rxcp->ip_frag)
1642 rxcp->l4_csum = 0;
1643
1644 if (rxcp->vlanf) {
1645 /* vlanf could be wrongly set in some cards.
1646 * ignore if vtm is not set */
1647 if ((adapter->function_mode & FLEX10_MODE) && !rxcp->vtm)
1648 rxcp->vlanf = 0;
1649
1650 if (!lancer_chip(adapter))
1651 rxcp->vlan_tag = swab16(rxcp->vlan_tag);
1652
1653 if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
1654 !adapter->vlan_tag[rxcp->vlan_tag])
1655 rxcp->vlanf = 0;
1656 }
1657
1658 /* As the compl has been parsed, reset it; we wont touch it again */
1659 compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
1660
1661 queue_tail_inc(&rxo->cq);
1662 return rxcp;
1663 }
1664
1665 static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
1666 {
1667 u32 order = get_order(size);
1668
1669 if (order > 0)
1670 gfp |= __GFP_COMP;
1671 return alloc_pages(gfp, order);
1672 }
1673
1674 /*
1675 * Allocate a page, split it to fragments of size rx_frag_size and post as
1676 * receive buffers to BE
1677 */
1678 static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
1679 {
1680 struct be_adapter *adapter = rxo->adapter;
1681 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
1682 struct be_queue_info *rxq = &rxo->q;
1683 struct page *pagep = NULL;
1684 struct be_eth_rx_d *rxd;
1685 u64 page_dmaaddr = 0, frag_dmaaddr;
1686 u32 posted, page_offset = 0;
1687
1688 page_info = &rxo->page_info_tbl[rxq->head];
1689 for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1690 if (!pagep) {
1691 pagep = be_alloc_pages(adapter->big_page_size, gfp);
1692 if (unlikely(!pagep)) {
1693 rx_stats(rxo)->rx_post_fail++;
1694 break;
1695 }
1696 page_dmaaddr = dma_map_page(&adapter->pdev->dev, pagep,
1697 0, adapter->big_page_size,
1698 DMA_FROM_DEVICE);
1699 page_info->page_offset = 0;
1700 } else {
1701 get_page(pagep);
1702 page_info->page_offset = page_offset + rx_frag_size;
1703 }
1704 page_offset = page_info->page_offset;
1705 page_info->page = pagep;
1706 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
1707 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
1708
1709 rxd = queue_head_node(rxq);
1710 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1711 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
1712
1713 /* Any space left in the current big page for another frag? */
1714 if ((page_offset + rx_frag_size + rx_frag_size) >
1715 adapter->big_page_size) {
1716 pagep = NULL;
1717 page_info->last_page_user = true;
1718 }
1719
1720 prev_page_info = page_info;
1721 queue_head_inc(rxq);
1722 page_info = &rxo->page_info_tbl[rxq->head];
1723 }
1724 if (pagep)
1725 prev_page_info->last_page_user = true;
1726
1727 if (posted) {
1728 atomic_add(posted, &rxq->used);
1729 be_rxq_notify(adapter, rxq->id, posted);
1730 } else if (atomic_read(&rxq->used) == 0) {
1731 /* Let be_worker replenish when memory is available */
1732 rxo->rx_post_starved = true;
1733 }
1734 }
1735
1736 static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
1737 {
1738 struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1739
1740 if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1741 return NULL;
1742
1743 rmb();
1744 be_dws_le_to_cpu(txcp, sizeof(*txcp));
1745
1746 txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1747
1748 queue_tail_inc(tx_cq);
1749 return txcp;
1750 }
1751
1752 static u16 be_tx_compl_process(struct be_adapter *adapter,
1753 struct be_tx_obj *txo, u16 last_index)
1754 {
1755 struct be_queue_info *txq = &txo->q;
1756 struct be_eth_wrb *wrb;
1757 struct sk_buff **sent_skbs = txo->sent_skb_list;
1758 struct sk_buff *sent_skb;
1759 u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1760 bool unmap_skb_hdr = true;
1761
1762 sent_skb = sent_skbs[txq->tail];
1763 BUG_ON(!sent_skb);
1764 sent_skbs[txq->tail] = NULL;
1765
1766 /* skip header wrb */
1767 queue_tail_inc(txq);
1768
1769 do {
1770 cur_index = txq->tail;
1771 wrb = queue_tail_node(txq);
1772 unmap_tx_frag(&adapter->pdev->dev, wrb,
1773 (unmap_skb_hdr && skb_headlen(sent_skb)));
1774 unmap_skb_hdr = false;
1775
1776 num_wrbs++;
1777 queue_tail_inc(txq);
1778 } while (cur_index != last_index);
1779
1780 kfree_skb(sent_skb);
1781 return num_wrbs;
1782 }
1783
1784 /* Return the number of events in the event queue */
1785 static inline int events_get(struct be_eq_obj *eqo)
1786 {
1787 struct be_eq_entry *eqe;
1788 int num = 0;
1789
1790 do {
1791 eqe = queue_tail_node(&eqo->q);
1792 if (eqe->evt == 0)
1793 break;
1794
1795 rmb();
1796 eqe->evt = 0;
1797 num++;
1798 queue_tail_inc(&eqo->q);
1799 } while (true);
1800
1801 return num;
1802 }
1803
1804 /* Leaves the EQ is disarmed state */
1805 static void be_eq_clean(struct be_eq_obj *eqo)
1806 {
1807 int num = events_get(eqo);
1808
1809 be_eq_notify(eqo->adapter, eqo->q.id, false, true, num);
1810 }
1811
1812 static void be_rx_cq_clean(struct be_rx_obj *rxo)
1813 {
1814 struct be_rx_page_info *page_info;
1815 struct be_queue_info *rxq = &rxo->q;
1816 struct be_queue_info *rx_cq = &rxo->cq;
1817 struct be_rx_compl_info *rxcp;
1818 struct be_adapter *adapter = rxo->adapter;
1819 int flush_wait = 0;
1820 u16 tail;
1821
1822 /* Consume pending rx completions.
1823 * Wait for the flush completion (identified by zero num_rcvd)
1824 * to arrive. Notify CQ even when there are no more CQ entries
1825 * for HW to flush partially coalesced CQ entries.
1826 * In Lancer, there is no need to wait for flush compl.
1827 */
1828 for (;;) {
1829 rxcp = be_rx_compl_get(rxo);
1830 if (rxcp == NULL) {
1831 if (lancer_chip(adapter))
1832 break;
1833
1834 if (flush_wait++ > 10 || be_hw_error(adapter)) {
1835 dev_warn(&adapter->pdev->dev,
1836 "did not receive flush compl\n");
1837 break;
1838 }
1839 be_cq_notify(adapter, rx_cq->id, true, 0);
1840 mdelay(1);
1841 } else {
1842 be_rx_compl_discard(rxo, rxcp);
1843 be_cq_notify(adapter, rx_cq->id, false, 1);
1844 if (rxcp->num_rcvd == 0)
1845 break;
1846 }
1847 }
1848
1849 /* After cleanup, leave the CQ in unarmed state */
1850 be_cq_notify(adapter, rx_cq->id, false, 0);
1851
1852 /* Then free posted rx buffers that were not used */
1853 tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len;
1854 for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) {
1855 page_info = get_rx_page_info(rxo, tail);
1856 put_page(page_info->page);
1857 memset(page_info, 0, sizeof(*page_info));
1858 }
1859 BUG_ON(atomic_read(&rxq->used));
1860 rxq->tail = rxq->head = 0;
1861 }
1862
1863 static void be_tx_compl_clean(struct be_adapter *adapter)
1864 {
1865 struct be_tx_obj *txo;
1866 struct be_queue_info *txq;
1867 struct be_eth_tx_compl *txcp;
1868 u16 end_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
1869 struct sk_buff *sent_skb;
1870 bool dummy_wrb;
1871 int i, pending_txqs;
1872
1873 /* Wait for a max of 200ms for all the tx-completions to arrive. */
1874 do {
1875 pending_txqs = adapter->num_tx_qs;
1876
1877 for_all_tx_queues(adapter, txo, i) {
1878 txq = &txo->q;
1879 while ((txcp = be_tx_compl_get(&txo->cq))) {
1880 end_idx =
1881 AMAP_GET_BITS(struct amap_eth_tx_compl,
1882 wrb_index, txcp);
1883 num_wrbs += be_tx_compl_process(adapter, txo,
1884 end_idx);
1885 cmpl++;
1886 }
1887 if (cmpl) {
1888 be_cq_notify(adapter, txo->cq.id, false, cmpl);
1889 atomic_sub(num_wrbs, &txq->used);
1890 cmpl = 0;
1891 num_wrbs = 0;
1892 }
1893 if (atomic_read(&txq->used) == 0)
1894 pending_txqs--;
1895 }
1896
1897 if (pending_txqs == 0 || ++timeo > 200)
1898 break;
1899
1900 mdelay(1);
1901 } while (true);
1902
1903 for_all_tx_queues(adapter, txo, i) {
1904 txq = &txo->q;
1905 if (atomic_read(&txq->used))
1906 dev_err(&adapter->pdev->dev, "%d pending tx-compls\n",
1907 atomic_read(&txq->used));
1908
1909 /* free posted tx for which compls will never arrive */
1910 while (atomic_read(&txq->used)) {
1911 sent_skb = txo->sent_skb_list[txq->tail];
1912 end_idx = txq->tail;
1913 num_wrbs = wrb_cnt_for_skb(adapter, sent_skb,
1914 &dummy_wrb);
1915 index_adv(&end_idx, num_wrbs - 1, txq->len);
1916 num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
1917 atomic_sub(num_wrbs, &txq->used);
1918 }
1919 }
1920 }
1921
1922 static void be_evt_queues_destroy(struct be_adapter *adapter)
1923 {
1924 struct be_eq_obj *eqo;
1925 int i;
1926
1927 for_all_evt_queues(adapter, eqo, i) {
1928 if (eqo->q.created) {
1929 be_eq_clean(eqo);
1930 be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ);
1931 netif_napi_del(&eqo->napi);
1932 }
1933 be_queue_free(adapter, &eqo->q);
1934 }
1935 }
1936
1937 static int be_evt_queues_create(struct be_adapter *adapter)
1938 {
1939 struct be_queue_info *eq;
1940 struct be_eq_obj *eqo;
1941 int i, rc;
1942
1943 adapter->num_evt_qs = min_t(u16, num_irqs(adapter),
1944 adapter->cfg_num_qs);
1945
1946 for_all_evt_queues(adapter, eqo, i) {
1947 netif_napi_add(adapter->netdev, &eqo->napi, be_poll,
1948 BE_NAPI_WEIGHT);
1949 eqo->adapter = adapter;
1950 eqo->tx_budget = BE_TX_BUDGET;
1951 eqo->idx = i;
1952 eqo->max_eqd = BE_MAX_EQD;
1953 eqo->enable_aic = true;
1954
1955 eq = &eqo->q;
1956 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1957 sizeof(struct be_eq_entry));
1958 if (rc)
1959 return rc;
1960
1961 rc = be_cmd_eq_create(adapter, eqo);
1962 if (rc)
1963 return rc;
1964 }
1965 return 0;
1966 }
1967
1968 static void be_mcc_queues_destroy(struct be_adapter *adapter)
1969 {
1970 struct be_queue_info *q;
1971
1972 q = &adapter->mcc_obj.q;
1973 if (q->created)
1974 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
1975 be_queue_free(adapter, q);
1976
1977 q = &adapter->mcc_obj.cq;
1978 if (q->created)
1979 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1980 be_queue_free(adapter, q);
1981 }
1982
1983 /* Must be called only after TX qs are created as MCC shares TX EQ */
1984 static int be_mcc_queues_create(struct be_adapter *adapter)
1985 {
1986 struct be_queue_info *q, *cq;
1987
1988 cq = &adapter->mcc_obj.cq;
1989 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
1990 sizeof(struct be_mcc_compl)))
1991 goto err;
1992
1993 /* Use the default EQ for MCC completions */
1994 if (be_cmd_cq_create(adapter, cq, &mcc_eqo(adapter)->q, true, 0))
1995 goto mcc_cq_free;
1996
1997 q = &adapter->mcc_obj.q;
1998 if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
1999 goto mcc_cq_destroy;
2000
2001 if (be_cmd_mccq_create(adapter, q, cq))
2002 goto mcc_q_free;
2003
2004 return 0;
2005
2006 mcc_q_free:
2007 be_queue_free(adapter, q);
2008 mcc_cq_destroy:
2009 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
2010 mcc_cq_free:
2011 be_queue_free(adapter, cq);
2012 err:
2013 return -1;
2014 }
2015
2016 static void be_tx_queues_destroy(struct be_adapter *adapter)
2017 {
2018 struct be_queue_info *q;
2019 struct be_tx_obj *txo;
2020 u8 i;
2021
2022 for_all_tx_queues(adapter, txo, i) {
2023 q = &txo->q;
2024 if (q->created)
2025 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
2026 be_queue_free(adapter, q);
2027
2028 q = &txo->cq;
2029 if (q->created)
2030 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2031 be_queue_free(adapter, q);
2032 }
2033 }
2034
2035 static int be_tx_qs_create(struct be_adapter *adapter)
2036 {
2037 struct be_queue_info *cq, *eq;
2038 struct be_tx_obj *txo;
2039 int status, i;
2040
2041 adapter->num_tx_qs = min(adapter->num_evt_qs, be_max_txqs(adapter));
2042
2043 for_all_tx_queues(adapter, txo, i) {
2044 cq = &txo->cq;
2045 status = be_queue_alloc(adapter, cq, TX_CQ_LEN,
2046 sizeof(struct be_eth_tx_compl));
2047 if (status)
2048 return status;
2049
2050 /* If num_evt_qs is less than num_tx_qs, then more than
2051 * one txq share an eq
2052 */
2053 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
2054 status = be_cmd_cq_create(adapter, cq, eq, false, 3);
2055 if (status)
2056 return status;
2057
2058 status = be_queue_alloc(adapter, &txo->q, TX_Q_LEN,
2059 sizeof(struct be_eth_wrb));
2060 if (status)
2061 return status;
2062
2063 status = be_cmd_txq_create(adapter, txo);
2064 if (status)
2065 return status;
2066 }
2067
2068 dev_info(&adapter->pdev->dev, "created %d TX queue(s)\n",
2069 adapter->num_tx_qs);
2070 return 0;
2071 }
2072
2073 static void be_rx_cqs_destroy(struct be_adapter *adapter)
2074 {
2075 struct be_queue_info *q;
2076 struct be_rx_obj *rxo;
2077 int i;
2078
2079 for_all_rx_queues(adapter, rxo, i) {
2080 q = &rxo->cq;
2081 if (q->created)
2082 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2083 be_queue_free(adapter, q);
2084 }
2085 }
2086
2087 static int be_rx_cqs_create(struct be_adapter *adapter)
2088 {
2089 struct be_queue_info *eq, *cq;
2090 struct be_rx_obj *rxo;
2091 int rc, i;
2092
2093 /* We can create as many RSS rings as there are EQs. */
2094 adapter->num_rx_qs = adapter->num_evt_qs;
2095
2096 /* We'll use RSS only if atleast 2 RSS rings are supported.
2097 * When RSS is used, we'll need a default RXQ for non-IP traffic.
2098 */
2099 if (adapter->num_rx_qs > 1)
2100 adapter->num_rx_qs++;
2101
2102 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
2103 for_all_rx_queues(adapter, rxo, i) {
2104 rxo->adapter = adapter;
2105 cq = &rxo->cq;
2106 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
2107 sizeof(struct be_eth_rx_compl));
2108 if (rc)
2109 return rc;
2110
2111 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
2112 rc = be_cmd_cq_create(adapter, cq, eq, false, 3);
2113 if (rc)
2114 return rc;
2115 }
2116
2117 dev_info(&adapter->pdev->dev,
2118 "created %d RSS queue(s) and 1 default RX queue\n",
2119 adapter->num_rx_qs - 1);
2120 return 0;
2121 }
2122
2123 static irqreturn_t be_intx(int irq, void *dev)
2124 {
2125 struct be_eq_obj *eqo = dev;
2126 struct be_adapter *adapter = eqo->adapter;
2127 int num_evts = 0;
2128
2129 /* IRQ is not expected when NAPI is scheduled as the EQ
2130 * will not be armed.
2131 * But, this can happen on Lancer INTx where it takes
2132 * a while to de-assert INTx or in BE2 where occasionaly
2133 * an interrupt may be raised even when EQ is unarmed.
2134 * If NAPI is already scheduled, then counting & notifying
2135 * events will orphan them.
2136 */
2137 if (napi_schedule_prep(&eqo->napi)) {
2138 num_evts = events_get(eqo);
2139 __napi_schedule(&eqo->napi);
2140 if (num_evts)
2141 eqo->spurious_intr = 0;
2142 }
2143 be_eq_notify(adapter, eqo->q.id, false, true, num_evts);
2144
2145 /* Return IRQ_HANDLED only for the the first spurious intr
2146 * after a valid intr to stop the kernel from branding
2147 * this irq as a bad one!
2148 */
2149 if (num_evts || eqo->spurious_intr++ == 0)
2150 return IRQ_HANDLED;
2151 else
2152 return IRQ_NONE;
2153 }
2154
2155 static irqreturn_t be_msix(int irq, void *dev)
2156 {
2157 struct be_eq_obj *eqo = dev;
2158
2159 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
2160 napi_schedule(&eqo->napi);
2161 return IRQ_HANDLED;
2162 }
2163
2164 static inline bool do_gro(struct be_rx_compl_info *rxcp)
2165 {
2166 return (rxcp->tcpf && !rxcp->err && rxcp->l4_csum) ? true : false;
2167 }
2168
2169 static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
2170 int budget)
2171 {
2172 struct be_adapter *adapter = rxo->adapter;
2173 struct be_queue_info *rx_cq = &rxo->cq;
2174 struct be_rx_compl_info *rxcp;
2175 u32 work_done;
2176
2177 for (work_done = 0; work_done < budget; work_done++) {
2178 rxcp = be_rx_compl_get(rxo);
2179 if (!rxcp)
2180 break;
2181
2182 /* Is it a flush compl that has no data */
2183 if (unlikely(rxcp->num_rcvd == 0))
2184 goto loop_continue;
2185
2186 /* Discard compl with partial DMA Lancer B0 */
2187 if (unlikely(!rxcp->pkt_size)) {
2188 be_rx_compl_discard(rxo, rxcp);
2189 goto loop_continue;
2190 }
2191
2192 /* On BE drop pkts that arrive due to imperfect filtering in
2193 * promiscuous mode on some skews
2194 */
2195 if (unlikely(rxcp->port != adapter->port_num &&
2196 !lancer_chip(adapter))) {
2197 be_rx_compl_discard(rxo, rxcp);
2198 goto loop_continue;
2199 }
2200
2201 if (do_gro(rxcp))
2202 be_rx_compl_process_gro(rxo, napi, rxcp);
2203 else
2204 be_rx_compl_process(rxo, rxcp);
2205 loop_continue:
2206 be_rx_stats_update(rxo, rxcp);
2207 }
2208
2209 if (work_done) {
2210 be_cq_notify(adapter, rx_cq->id, true, work_done);
2211
2212 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM)
2213 be_post_rx_frags(rxo, GFP_ATOMIC);
2214 }
2215
2216 return work_done;
2217 }
2218
2219 static bool be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo,
2220 int budget, int idx)
2221 {
2222 struct be_eth_tx_compl *txcp;
2223 int num_wrbs = 0, work_done;
2224
2225 for (work_done = 0; work_done < budget; work_done++) {
2226 txcp = be_tx_compl_get(&txo->cq);
2227 if (!txcp)
2228 break;
2229 num_wrbs += be_tx_compl_process(adapter, txo,
2230 AMAP_GET_BITS(struct amap_eth_tx_compl,
2231 wrb_index, txcp));
2232 }
2233
2234 if (work_done) {
2235 be_cq_notify(adapter, txo->cq.id, true, work_done);
2236 atomic_sub(num_wrbs, &txo->q.used);
2237
2238 /* As Tx wrbs have been freed up, wake up netdev queue
2239 * if it was stopped due to lack of tx wrbs. */
2240 if (__netif_subqueue_stopped(adapter->netdev, idx) &&
2241 atomic_read(&txo->q.used) < txo->q.len / 2) {
2242 netif_wake_subqueue(adapter->netdev, idx);
2243 }
2244
2245 u64_stats_update_begin(&tx_stats(txo)->sync_compl);
2246 tx_stats(txo)->tx_compl += work_done;
2247 u64_stats_update_end(&tx_stats(txo)->sync_compl);
2248 }
2249 return (work_done < budget); /* Done */
2250 }
2251
2252 int be_poll(struct napi_struct *napi, int budget)
2253 {
2254 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
2255 struct be_adapter *adapter = eqo->adapter;
2256 int max_work = 0, work, i, num_evts;
2257 bool tx_done;
2258
2259 num_evts = events_get(eqo);
2260
2261 /* Process all TXQs serviced by this EQ */
2262 for (i = eqo->idx; i < adapter->num_tx_qs; i += adapter->num_evt_qs) {
2263 tx_done = be_process_tx(adapter, &adapter->tx_obj[i],
2264 eqo->tx_budget, i);
2265 if (!tx_done)
2266 max_work = budget;
2267 }
2268
2269 /* This loop will iterate twice for EQ0 in which
2270 * completions of the last RXQ (default one) are also processed
2271 * For other EQs the loop iterates only once
2272 */
2273 for (i = eqo->idx; i < adapter->num_rx_qs; i += adapter->num_evt_qs) {
2274 work = be_process_rx(&adapter->rx_obj[i], napi, budget);
2275 max_work = max(work, max_work);
2276 }
2277
2278 if (is_mcc_eqo(eqo))
2279 be_process_mcc(adapter);
2280
2281 if (max_work < budget) {
2282 napi_complete(napi);
2283 be_eq_notify(adapter, eqo->q.id, true, false, num_evts);
2284 } else {
2285 /* As we'll continue in polling mode, count and clear events */
2286 be_eq_notify(adapter, eqo->q.id, false, false, num_evts);
2287 }
2288 return max_work;
2289 }
2290
2291 void be_detect_error(struct be_adapter *adapter)
2292 {
2293 u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
2294 u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
2295 u32 i;
2296
2297 if (be_hw_error(adapter))
2298 return;
2299
2300 if (lancer_chip(adapter)) {
2301 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
2302 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2303 sliport_err1 = ioread32(adapter->db +
2304 SLIPORT_ERROR1_OFFSET);
2305 sliport_err2 = ioread32(adapter->db +
2306 SLIPORT_ERROR2_OFFSET);
2307 }
2308 } else {
2309 pci_read_config_dword(adapter->pdev,
2310 PCICFG_UE_STATUS_LOW, &ue_lo);
2311 pci_read_config_dword(adapter->pdev,
2312 PCICFG_UE_STATUS_HIGH, &ue_hi);
2313 pci_read_config_dword(adapter->pdev,
2314 PCICFG_UE_STATUS_LOW_MASK, &ue_lo_mask);
2315 pci_read_config_dword(adapter->pdev,
2316 PCICFG_UE_STATUS_HI_MASK, &ue_hi_mask);
2317
2318 ue_lo = (ue_lo & ~ue_lo_mask);
2319 ue_hi = (ue_hi & ~ue_hi_mask);
2320 }
2321
2322 /* On certain platforms BE hardware can indicate spurious UEs.
2323 * Allow the h/w to stop working completely in case of a real UE.
2324 * Hence not setting the hw_error for UE detection.
2325 */
2326 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2327 adapter->hw_error = true;
2328 dev_err(&adapter->pdev->dev,
2329 "Error detected in the card\n");
2330 }
2331
2332 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2333 dev_err(&adapter->pdev->dev,
2334 "ERR: sliport status 0x%x\n", sliport_status);
2335 dev_err(&adapter->pdev->dev,
2336 "ERR: sliport error1 0x%x\n", sliport_err1);
2337 dev_err(&adapter->pdev->dev,
2338 "ERR: sliport error2 0x%x\n", sliport_err2);
2339 }
2340
2341 if (ue_lo) {
2342 for (i = 0; ue_lo; ue_lo >>= 1, i++) {
2343 if (ue_lo & 1)
2344 dev_err(&adapter->pdev->dev,
2345 "UE: %s bit set\n", ue_status_low_desc[i]);
2346 }
2347 }
2348
2349 if (ue_hi) {
2350 for (i = 0; ue_hi; ue_hi >>= 1, i++) {
2351 if (ue_hi & 1)
2352 dev_err(&adapter->pdev->dev,
2353 "UE: %s bit set\n", ue_status_hi_desc[i]);
2354 }
2355 }
2356
2357 }
2358
2359 static void be_msix_disable(struct be_adapter *adapter)
2360 {
2361 if (msix_enabled(adapter)) {
2362 pci_disable_msix(adapter->pdev);
2363 adapter->num_msix_vec = 0;
2364 adapter->num_msix_roce_vec = 0;
2365 }
2366 }
2367
2368 static int be_msix_enable(struct be_adapter *adapter)
2369 {
2370 int i, status, num_vec;
2371 struct device *dev = &adapter->pdev->dev;
2372
2373 /* If RoCE is supported, program the max number of NIC vectors that
2374 * may be configured via set-channels, along with vectors needed for
2375 * RoCe. Else, just program the number we'll use initially.
2376 */
2377 if (be_roce_supported(adapter))
2378 num_vec = min_t(int, 2 * be_max_eqs(adapter),
2379 2 * num_online_cpus());
2380 else
2381 num_vec = adapter->cfg_num_qs;
2382
2383 for (i = 0; i < num_vec; i++)
2384 adapter->msix_entries[i].entry = i;
2385
2386 status = pci_enable_msix(adapter->pdev, adapter->msix_entries, num_vec);
2387 if (status == 0) {
2388 goto done;
2389 } else if (status >= MIN_MSIX_VECTORS) {
2390 num_vec = status;
2391 status = pci_enable_msix(adapter->pdev, adapter->msix_entries,
2392 num_vec);
2393 if (!status)
2394 goto done;
2395 }
2396
2397 dev_warn(dev, "MSIx enable failed\n");
2398
2399 /* INTx is not supported in VFs, so fail probe if enable_msix fails */
2400 if (!be_physfn(adapter))
2401 return status;
2402 return 0;
2403 done:
2404 if (be_roce_supported(adapter) && num_vec > MIN_MSIX_VECTORS) {
2405 adapter->num_msix_roce_vec = num_vec / 2;
2406 dev_info(dev, "enabled %d MSI-x vector(s) for RoCE\n",
2407 adapter->num_msix_roce_vec);
2408 }
2409
2410 adapter->num_msix_vec = num_vec - adapter->num_msix_roce_vec;
2411
2412 dev_info(dev, "enabled %d MSI-x vector(s) for NIC\n",
2413 adapter->num_msix_vec);
2414 return 0;
2415 }
2416
2417 static inline int be_msix_vec_get(struct be_adapter *adapter,
2418 struct be_eq_obj *eqo)
2419 {
2420 return adapter->msix_entries[eqo->msix_idx].vector;
2421 }
2422
2423 static int be_msix_register(struct be_adapter *adapter)
2424 {
2425 struct net_device *netdev = adapter->netdev;
2426 struct be_eq_obj *eqo;
2427 int status, i, vec;
2428
2429 for_all_evt_queues(adapter, eqo, i) {
2430 sprintf(eqo->desc, "%s-q%d", netdev->name, i);
2431 vec = be_msix_vec_get(adapter, eqo);
2432 status = request_irq(vec, be_msix, 0, eqo->desc, eqo);
2433 if (status)
2434 goto err_msix;
2435 }
2436
2437 return 0;
2438 err_msix:
2439 for (i--, eqo = &adapter->eq_obj[i]; i >= 0; i--, eqo--)
2440 free_irq(be_msix_vec_get(adapter, eqo), eqo);
2441 dev_warn(&adapter->pdev->dev, "MSIX Request IRQ failed - err %d\n",
2442 status);
2443 be_msix_disable(adapter);
2444 return status;
2445 }
2446
2447 static int be_irq_register(struct be_adapter *adapter)
2448 {
2449 struct net_device *netdev = adapter->netdev;
2450 int status;
2451
2452 if (msix_enabled(adapter)) {
2453 status = be_msix_register(adapter);
2454 if (status == 0)
2455 goto done;
2456 /* INTx is not supported for VF */
2457 if (!be_physfn(adapter))
2458 return status;
2459 }
2460
2461 /* INTx: only the first EQ is used */
2462 netdev->irq = adapter->pdev->irq;
2463 status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
2464 &adapter->eq_obj[0]);
2465 if (status) {
2466 dev_err(&adapter->pdev->dev,
2467 "INTx request IRQ failed - err %d\n", status);
2468 return status;
2469 }
2470 done:
2471 adapter->isr_registered = true;
2472 return 0;
2473 }
2474
2475 static void be_irq_unregister(struct be_adapter *adapter)
2476 {
2477 struct net_device *netdev = adapter->netdev;
2478 struct be_eq_obj *eqo;
2479 int i;
2480
2481 if (!adapter->isr_registered)
2482 return;
2483
2484 /* INTx */
2485 if (!msix_enabled(adapter)) {
2486 free_irq(netdev->irq, &adapter->eq_obj[0]);
2487 goto done;
2488 }
2489
2490 /* MSIx */
2491 for_all_evt_queues(adapter, eqo, i)
2492 free_irq(be_msix_vec_get(adapter, eqo), eqo);
2493
2494 done:
2495 adapter->isr_registered = false;
2496 }
2497
2498 static void be_rx_qs_destroy(struct be_adapter *adapter)
2499 {
2500 struct be_queue_info *q;
2501 struct be_rx_obj *rxo;
2502 int i;
2503
2504 for_all_rx_queues(adapter, rxo, i) {
2505 q = &rxo->q;
2506 if (q->created) {
2507 be_cmd_rxq_destroy(adapter, q);
2508 be_rx_cq_clean(rxo);
2509 }
2510 be_queue_free(adapter, q);
2511 }
2512 }
2513
2514 static int be_close(struct net_device *netdev)
2515 {
2516 struct be_adapter *adapter = netdev_priv(netdev);
2517 struct be_eq_obj *eqo;
2518 int i;
2519
2520 be_roce_dev_close(adapter);
2521
2522 if (adapter->flags & BE_FLAGS_NAPI_ENABLED) {
2523 for_all_evt_queues(adapter, eqo, i)
2524 napi_disable(&eqo->napi);
2525 adapter->flags &= ~BE_FLAGS_NAPI_ENABLED;
2526 }
2527
2528 be_async_mcc_disable(adapter);
2529
2530 /* Wait for all pending tx completions to arrive so that
2531 * all tx skbs are freed.
2532 */
2533 netif_tx_disable(netdev);
2534 be_tx_compl_clean(adapter);
2535
2536 be_rx_qs_destroy(adapter);
2537
2538 for_all_evt_queues(adapter, eqo, i) {
2539 if (msix_enabled(adapter))
2540 synchronize_irq(be_msix_vec_get(adapter, eqo));
2541 else
2542 synchronize_irq(netdev->irq);
2543 be_eq_clean(eqo);
2544 }
2545
2546 be_irq_unregister(adapter);
2547
2548 return 0;
2549 }
2550
2551 static int be_rx_qs_create(struct be_adapter *adapter)
2552 {
2553 struct be_rx_obj *rxo;
2554 int rc, i, j;
2555 u8 rsstable[128];
2556
2557 for_all_rx_queues(adapter, rxo, i) {
2558 rc = be_queue_alloc(adapter, &rxo->q, RX_Q_LEN,
2559 sizeof(struct be_eth_rx_d));
2560 if (rc)
2561 return rc;
2562 }
2563
2564 /* The FW would like the default RXQ to be created first */
2565 rxo = default_rxo(adapter);
2566 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id, rx_frag_size,
2567 adapter->if_handle, false, &rxo->rss_id);
2568 if (rc)
2569 return rc;
2570
2571 for_all_rss_queues(adapter, rxo, i) {
2572 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
2573 rx_frag_size, adapter->if_handle,
2574 true, &rxo->rss_id);
2575 if (rc)
2576 return rc;
2577 }
2578
2579 if (be_multi_rxq(adapter)) {
2580 for (j = 0; j < 128; j += adapter->num_rx_qs - 1) {
2581 for_all_rss_queues(adapter, rxo, i) {
2582 if ((j + i) >= 128)
2583 break;
2584 rsstable[j + i] = rxo->rss_id;
2585 }
2586 }
2587 adapter->rss_flags = RSS_ENABLE_TCP_IPV4 | RSS_ENABLE_IPV4 |
2588 RSS_ENABLE_TCP_IPV6 | RSS_ENABLE_IPV6;
2589
2590 if (!BEx_chip(adapter))
2591 adapter->rss_flags |= RSS_ENABLE_UDP_IPV4 |
2592 RSS_ENABLE_UDP_IPV6;
2593
2594 rc = be_cmd_rss_config(adapter, rsstable, adapter->rss_flags,
2595 128);
2596 if (rc) {
2597 adapter->rss_flags = 0;
2598 return rc;
2599 }
2600 }
2601
2602 /* First time posting */
2603 for_all_rx_queues(adapter, rxo, i)
2604 be_post_rx_frags(rxo, GFP_KERNEL);
2605 return 0;
2606 }
2607
2608 static int be_open(struct net_device *netdev)
2609 {
2610 struct be_adapter *adapter = netdev_priv(netdev);
2611 struct be_eq_obj *eqo;
2612 struct be_rx_obj *rxo;
2613 struct be_tx_obj *txo;
2614 u8 link_status;
2615 int status, i;
2616
2617 status = be_rx_qs_create(adapter);
2618 if (status)
2619 goto err;
2620
2621 status = be_irq_register(adapter);
2622 if (status)
2623 goto err;
2624
2625 for_all_rx_queues(adapter, rxo, i)
2626 be_cq_notify(adapter, rxo->cq.id, true, 0);
2627
2628 for_all_tx_queues(adapter, txo, i)
2629 be_cq_notify(adapter, txo->cq.id, true, 0);
2630
2631 be_async_mcc_enable(adapter);
2632
2633 for_all_evt_queues(adapter, eqo, i) {
2634 napi_enable(&eqo->napi);
2635 be_eq_notify(adapter, eqo->q.id, true, false, 0);
2636 }
2637 adapter->flags |= BE_FLAGS_NAPI_ENABLED;
2638
2639 status = be_cmd_link_status_query(adapter, NULL, &link_status, 0);
2640 if (!status)
2641 be_link_status_update(adapter, link_status);
2642
2643 netif_tx_start_all_queues(netdev);
2644 be_roce_dev_open(adapter);
2645 return 0;
2646 err:
2647 be_close(adapter->netdev);
2648 return -EIO;
2649 }
2650
2651 static int be_setup_wol(struct be_adapter *adapter, bool enable)
2652 {
2653 struct be_dma_mem cmd;
2654 int status = 0;
2655 u8 mac[ETH_ALEN];
2656
2657 memset(mac, 0, ETH_ALEN);
2658
2659 cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
2660 cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2661 GFP_KERNEL);
2662 if (cmd.va == NULL)
2663 return -1;
2664
2665 if (enable) {
2666 status = pci_write_config_dword(adapter->pdev,
2667 PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
2668 if (status) {
2669 dev_err(&adapter->pdev->dev,
2670 "Could not enable Wake-on-lan\n");
2671 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2672 cmd.dma);
2673 return status;
2674 }
2675 status = be_cmd_enable_magic_wol(adapter,
2676 adapter->netdev->dev_addr, &cmd);
2677 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2678 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2679 } else {
2680 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2681 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2682 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2683 }
2684
2685 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
2686 return status;
2687 }
2688
2689 /*
2690 * Generate a seed MAC address from the PF MAC Address using jhash.
2691 * MAC Address for VFs are assigned incrementally starting from the seed.
2692 * These addresses are programmed in the ASIC by the PF and the VF driver
2693 * queries for the MAC address during its probe.
2694 */
2695 static int be_vf_eth_addr_config(struct be_adapter *adapter)
2696 {
2697 u32 vf;
2698 int status = 0;
2699 u8 mac[ETH_ALEN];
2700 struct be_vf_cfg *vf_cfg;
2701
2702 be_vf_eth_addr_generate(adapter, mac);
2703
2704 for_all_vfs(adapter, vf_cfg, vf) {
2705 if (BEx_chip(adapter))
2706 status = be_cmd_pmac_add(adapter, mac,
2707 vf_cfg->if_handle,
2708 &vf_cfg->pmac_id, vf + 1);
2709 else
2710 status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
2711 vf + 1);
2712
2713 if (status)
2714 dev_err(&adapter->pdev->dev,
2715 "Mac address assignment failed for VF %d\n", vf);
2716 else
2717 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
2718
2719 mac[5] += 1;
2720 }
2721 return status;
2722 }
2723
2724 static int be_vfs_mac_query(struct be_adapter *adapter)
2725 {
2726 int status, vf;
2727 u8 mac[ETH_ALEN];
2728 struct be_vf_cfg *vf_cfg;
2729 bool active = false;
2730
2731 for_all_vfs(adapter, vf_cfg, vf) {
2732 be_cmd_get_mac_from_list(adapter, mac, &active,
2733 &vf_cfg->pmac_id, 0);
2734
2735 status = be_cmd_mac_addr_query(adapter, mac, false,
2736 vf_cfg->if_handle, 0);
2737 if (status)
2738 return status;
2739 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
2740 }
2741 return 0;
2742 }
2743
2744 static void be_vf_clear(struct be_adapter *adapter)
2745 {
2746 struct be_vf_cfg *vf_cfg;
2747 u32 vf;
2748
2749 if (pci_vfs_assigned(adapter->pdev)) {
2750 dev_warn(&adapter->pdev->dev,
2751 "VFs are assigned to VMs: not disabling VFs\n");
2752 goto done;
2753 }
2754
2755 pci_disable_sriov(adapter->pdev);
2756
2757 for_all_vfs(adapter, vf_cfg, vf) {
2758 if (BEx_chip(adapter))
2759 be_cmd_pmac_del(adapter, vf_cfg->if_handle,
2760 vf_cfg->pmac_id, vf + 1);
2761 else
2762 be_cmd_set_mac(adapter, NULL, vf_cfg->if_handle,
2763 vf + 1);
2764
2765 be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1);
2766 }
2767 done:
2768 kfree(adapter->vf_cfg);
2769 adapter->num_vfs = 0;
2770 }
2771
2772 static void be_clear_queues(struct be_adapter *adapter)
2773 {
2774 be_mcc_queues_destroy(adapter);
2775 be_rx_cqs_destroy(adapter);
2776 be_tx_queues_destroy(adapter);
2777 be_evt_queues_destroy(adapter);
2778 }
2779
2780 static void be_cancel_worker(struct be_adapter *adapter)
2781 {
2782 if (adapter->flags & BE_FLAGS_WORKER_SCHEDULED) {
2783 cancel_delayed_work_sync(&adapter->work);
2784 adapter->flags &= ~BE_FLAGS_WORKER_SCHEDULED;
2785 }
2786 }
2787
2788 static int be_clear(struct be_adapter *adapter)
2789 {
2790 int i;
2791
2792 be_cancel_worker(adapter);
2793
2794 if (sriov_enabled(adapter))
2795 be_vf_clear(adapter);
2796
2797 /* delete the primary mac along with the uc-mac list */
2798 for (i = 0; i < (adapter->uc_macs + 1); i++)
2799 be_cmd_pmac_del(adapter, adapter->if_handle,
2800 adapter->pmac_id[i], 0);
2801 adapter->uc_macs = 0;
2802
2803 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
2804
2805 be_clear_queues(adapter);
2806
2807 kfree(adapter->pmac_id);
2808 adapter->pmac_id = NULL;
2809
2810 be_msix_disable(adapter);
2811 return 0;
2812 }
2813
2814 static int be_vfs_if_create(struct be_adapter *adapter)
2815 {
2816 struct be_resources res = {0};
2817 struct be_vf_cfg *vf_cfg;
2818 u32 cap_flags, en_flags, vf;
2819 int status = 0;
2820
2821 cap_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
2822 BE_IF_FLAGS_MULTICAST;
2823
2824 for_all_vfs(adapter, vf_cfg, vf) {
2825 if (!BE3_chip(adapter)) {
2826 status = be_cmd_get_profile_config(adapter, &res,
2827 vf + 1);
2828 if (!status)
2829 cap_flags = res.if_cap_flags;
2830 }
2831
2832 /* If a FW profile exists, then cap_flags are updated */
2833 en_flags = cap_flags & (BE_IF_FLAGS_UNTAGGED |
2834 BE_IF_FLAGS_BROADCAST | BE_IF_FLAGS_MULTICAST);
2835 status = be_cmd_if_create(adapter, cap_flags, en_flags,
2836 &vf_cfg->if_handle, vf + 1);
2837 if (status)
2838 goto err;
2839 }
2840 err:
2841 return status;
2842 }
2843
2844 static int be_vf_setup_init(struct be_adapter *adapter)
2845 {
2846 struct be_vf_cfg *vf_cfg;
2847 int vf;
2848
2849 adapter->vf_cfg = kcalloc(adapter->num_vfs, sizeof(*vf_cfg),
2850 GFP_KERNEL);
2851 if (!adapter->vf_cfg)
2852 return -ENOMEM;
2853
2854 for_all_vfs(adapter, vf_cfg, vf) {
2855 vf_cfg->if_handle = -1;
2856 vf_cfg->pmac_id = -1;
2857 }
2858 return 0;
2859 }
2860
2861 static int be_vf_setup(struct be_adapter *adapter)
2862 {
2863 struct be_vf_cfg *vf_cfg;
2864 u16 def_vlan, lnk_speed;
2865 int status, old_vfs, vf;
2866 struct device *dev = &adapter->pdev->dev;
2867 u32 privileges;
2868
2869 old_vfs = pci_num_vf(adapter->pdev);
2870 if (old_vfs) {
2871 dev_info(dev, "%d VFs are already enabled\n", old_vfs);
2872 if (old_vfs != num_vfs)
2873 dev_warn(dev, "Ignoring num_vfs=%d setting\n", num_vfs);
2874 adapter->num_vfs = old_vfs;
2875 } else {
2876 if (num_vfs > be_max_vfs(adapter))
2877 dev_info(dev, "Device supports %d VFs and not %d\n",
2878 be_max_vfs(adapter), num_vfs);
2879 adapter->num_vfs = min_t(u16, num_vfs, be_max_vfs(adapter));
2880 if (!adapter->num_vfs)
2881 return 0;
2882 }
2883
2884 status = be_vf_setup_init(adapter);
2885 if (status)
2886 goto err;
2887
2888 if (old_vfs) {
2889 for_all_vfs(adapter, vf_cfg, vf) {
2890 status = be_cmd_get_if_id(adapter, vf_cfg, vf);
2891 if (status)
2892 goto err;
2893 }
2894 } else {
2895 status = be_vfs_if_create(adapter);
2896 if (status)
2897 goto err;
2898 }
2899
2900 if (old_vfs) {
2901 status = be_vfs_mac_query(adapter);
2902 if (status)
2903 goto err;
2904 } else {
2905 status = be_vf_eth_addr_config(adapter);
2906 if (status)
2907 goto err;
2908 }
2909
2910 for_all_vfs(adapter, vf_cfg, vf) {
2911 /* Allow VFs to programs MAC/VLAN filters */
2912 status = be_cmd_get_fn_privileges(adapter, &privileges, vf + 1);
2913 if (!status && !(privileges & BE_PRIV_FILTMGMT)) {
2914 status = be_cmd_set_fn_privileges(adapter,
2915 privileges |
2916 BE_PRIV_FILTMGMT,
2917 vf + 1);
2918 if (!status)
2919 dev_info(dev, "VF%d has FILTMGMT privilege\n",
2920 vf);
2921 }
2922
2923 /* BE3 FW, by default, caps VF TX-rate to 100mbps.
2924 * Allow full available bandwidth
2925 */
2926 if (BE3_chip(adapter) && !old_vfs)
2927 be_cmd_set_qos(adapter, 1000, vf+1);
2928
2929 status = be_cmd_link_status_query(adapter, &lnk_speed,
2930 NULL, vf + 1);
2931 if (!status)
2932 vf_cfg->tx_rate = lnk_speed;
2933
2934 status = be_cmd_get_hsw_config(adapter, &def_vlan,
2935 vf + 1, vf_cfg->if_handle, NULL);
2936 if (status)
2937 goto err;
2938 vf_cfg->def_vid = def_vlan;
2939
2940 be_cmd_enable_vf(adapter, vf + 1);
2941 }
2942
2943 if (!old_vfs) {
2944 status = pci_enable_sriov(adapter->pdev, adapter->num_vfs);
2945 if (status) {
2946 dev_err(dev, "SRIOV enable failed\n");
2947 adapter->num_vfs = 0;
2948 goto err;
2949 }
2950 }
2951 return 0;
2952 err:
2953 dev_err(dev, "VF setup failed\n");
2954 be_vf_clear(adapter);
2955 return status;
2956 }
2957
2958 /* On BE2/BE3 FW does not suggest the supported limits */
2959 static void BEx_get_resources(struct be_adapter *adapter,
2960 struct be_resources *res)
2961 {
2962 struct pci_dev *pdev = adapter->pdev;
2963 bool use_sriov = false;
2964
2965 if (BE3_chip(adapter) && be_physfn(adapter)) {
2966 int max_vfs;
2967
2968 max_vfs = pci_sriov_get_totalvfs(pdev);
2969 res->max_vfs = max_vfs > 0 ? min(MAX_VFS, max_vfs) : 0;
2970 use_sriov = res->max_vfs && num_vfs;
2971 }
2972
2973 if (be_physfn(adapter))
2974 res->max_uc_mac = BE_UC_PMAC_COUNT;
2975 else
2976 res->max_uc_mac = BE_VF_UC_PMAC_COUNT;
2977
2978 if (adapter->function_mode & FLEX10_MODE)
2979 res->max_vlans = BE_NUM_VLANS_SUPPORTED/8;
2980 else if (adapter->function_mode & UMC_ENABLED)
2981 res->max_vlans = BE_UMC_NUM_VLANS_SUPPORTED;
2982 else
2983 res->max_vlans = BE_NUM_VLANS_SUPPORTED;
2984 res->max_mcast_mac = BE_MAX_MC;
2985
2986 if (BE2_chip(adapter) || use_sriov || be_is_mc(adapter) ||
2987 !be_physfn(adapter))
2988 res->max_tx_qs = 1;
2989 else
2990 res->max_tx_qs = BE3_MAX_TX_QS;
2991
2992 if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
2993 !use_sriov && be_physfn(adapter))
2994 res->max_rss_qs = (adapter->be3_native) ?
2995 BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
2996 res->max_rx_qs = res->max_rss_qs + 1;
2997
2998 res->max_evt_qs = be_physfn(adapter) ? BE3_MAX_EVT_QS : 1;
2999
3000 res->if_cap_flags = BE_IF_CAP_FLAGS_WANT;
3001 if (!(adapter->function_caps & BE_FUNCTION_CAPS_RSS))
3002 res->if_cap_flags &= ~BE_IF_FLAGS_RSS;
3003 }
3004
3005 static void be_setup_init(struct be_adapter *adapter)
3006 {
3007 adapter->vlan_prio_bmap = 0xff;
3008 adapter->phy.link_speed = -1;
3009 adapter->if_handle = -1;
3010 adapter->be3_native = false;
3011 adapter->promiscuous = false;
3012 if (be_physfn(adapter))
3013 adapter->cmd_privileges = MAX_PRIVILEGES;
3014 else
3015 adapter->cmd_privileges = MIN_PRIVILEGES;
3016 }
3017
3018 static int be_get_resources(struct be_adapter *adapter)
3019 {
3020 struct device *dev = &adapter->pdev->dev;
3021 struct be_resources res = {0};
3022 int status;
3023
3024 if (BEx_chip(adapter)) {
3025 BEx_get_resources(adapter, &res);
3026 adapter->res = res;
3027 }
3028
3029 /* For BE3 only check if FW suggests a different max-txqs value */
3030 if (BE3_chip(adapter)) {
3031 status = be_cmd_get_profile_config(adapter, &res, 0);
3032 if (!status && res.max_tx_qs)
3033 adapter->res.max_tx_qs =
3034 min(adapter->res.max_tx_qs, res.max_tx_qs);
3035 }
3036
3037 /* For Lancer, SH etc read per-function resource limits from FW.
3038 * GET_FUNC_CONFIG returns per function guaranteed limits.
3039 * GET_PROFILE_CONFIG returns PCI-E related limits PF-pool limits
3040 */
3041 if (!BEx_chip(adapter)) {
3042 status = be_cmd_get_func_config(adapter, &res);
3043 if (status)
3044 return status;
3045
3046 /* If RoCE may be enabled stash away half the EQs for RoCE */
3047 if (be_roce_supported(adapter))
3048 res.max_evt_qs /= 2;
3049 adapter->res = res;
3050
3051 if (be_physfn(adapter)) {
3052 status = be_cmd_get_profile_config(adapter, &res, 0);
3053 if (status)
3054 return status;
3055 adapter->res.max_vfs = res.max_vfs;
3056 }
3057
3058 dev_info(dev, "Max: txqs %d, rxqs %d, rss %d, eqs %d, vfs %d\n",
3059 be_max_txqs(adapter), be_max_rxqs(adapter),
3060 be_max_rss(adapter), be_max_eqs(adapter),
3061 be_max_vfs(adapter));
3062 dev_info(dev, "Max: uc-macs %d, mc-macs %d, vlans %d\n",
3063 be_max_uc(adapter), be_max_mc(adapter),
3064 be_max_vlans(adapter));
3065 }
3066
3067 return 0;
3068 }
3069
3070 /* Routine to query per function resource limits */
3071 static int be_get_config(struct be_adapter *adapter)
3072 {
3073 int status;
3074
3075 status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
3076 &adapter->function_mode,
3077 &adapter->function_caps,
3078 &adapter->asic_rev);
3079 if (status)
3080 return status;
3081
3082 status = be_get_resources(adapter);
3083 if (status)
3084 return status;
3085
3086 /* primary mac needs 1 pmac entry */
3087 adapter->pmac_id = kcalloc(be_max_uc(adapter) + 1, sizeof(u32),
3088 GFP_KERNEL);
3089 if (!adapter->pmac_id)
3090 return -ENOMEM;
3091
3092 /* Sanitize cfg_num_qs based on HW and platform limits */
3093 adapter->cfg_num_qs = min(adapter->cfg_num_qs, be_max_qs(adapter));
3094
3095 return 0;
3096 }
3097
3098 static int be_mac_setup(struct be_adapter *adapter)
3099 {
3100 u8 mac[ETH_ALEN];
3101 int status;
3102
3103 if (is_zero_ether_addr(adapter->netdev->dev_addr)) {
3104 status = be_cmd_get_perm_mac(adapter, mac);
3105 if (status)
3106 return status;
3107
3108 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
3109 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
3110 } else {
3111 /* Maybe the HW was reset; dev_addr must be re-programmed */
3112 memcpy(mac, adapter->netdev->dev_addr, ETH_ALEN);
3113 }
3114
3115 /* On BE3 VFs this cmd may fail due to lack of privilege.
3116 * Ignore the failure as in this case pmac_id is fetched
3117 * in the IFACE_CREATE cmd.
3118 */
3119 be_cmd_pmac_add(adapter, mac, adapter->if_handle,
3120 &adapter->pmac_id[0], 0);
3121 return 0;
3122 }
3123
3124 static void be_schedule_worker(struct be_adapter *adapter)
3125 {
3126 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
3127 adapter->flags |= BE_FLAGS_WORKER_SCHEDULED;
3128 }
3129
3130 static int be_setup_queues(struct be_adapter *adapter)
3131 {
3132 struct net_device *netdev = adapter->netdev;
3133 int status;
3134
3135 status = be_evt_queues_create(adapter);
3136 if (status)
3137 goto err;
3138
3139 status = be_tx_qs_create(adapter);
3140 if (status)
3141 goto err;
3142
3143 status = be_rx_cqs_create(adapter);
3144 if (status)
3145 goto err;
3146
3147 status = be_mcc_queues_create(adapter);
3148 if (status)
3149 goto err;
3150
3151 status = netif_set_real_num_rx_queues(netdev, adapter->num_rx_qs);
3152 if (status)
3153 goto err;
3154
3155 status = netif_set_real_num_tx_queues(netdev, adapter->num_tx_qs);
3156 if (status)
3157 goto err;
3158
3159 return 0;
3160 err:
3161 dev_err(&adapter->pdev->dev, "queue_setup failed\n");
3162 return status;
3163 }
3164
3165 int be_update_queues(struct be_adapter *adapter)
3166 {
3167 struct net_device *netdev = adapter->netdev;
3168 int status;
3169
3170 if (netif_running(netdev))
3171 be_close(netdev);
3172
3173 be_cancel_worker(adapter);
3174
3175 /* If any vectors have been shared with RoCE we cannot re-program
3176 * the MSIx table.
3177 */
3178 if (!adapter->num_msix_roce_vec)
3179 be_msix_disable(adapter);
3180
3181 be_clear_queues(adapter);
3182
3183 if (!msix_enabled(adapter)) {
3184 status = be_msix_enable(adapter);
3185 if (status)
3186 return status;
3187 }
3188
3189 status = be_setup_queues(adapter);
3190 if (status)
3191 return status;
3192
3193 be_schedule_worker(adapter);
3194
3195 if (netif_running(netdev))
3196 status = be_open(netdev);
3197
3198 return status;
3199 }
3200
3201 static int be_setup(struct be_adapter *adapter)
3202 {
3203 struct device *dev = &adapter->pdev->dev;
3204 u32 tx_fc, rx_fc, en_flags;
3205 int status;
3206
3207 be_setup_init(adapter);
3208
3209 if (!lancer_chip(adapter))
3210 be_cmd_req_native_mode(adapter);
3211
3212 status = be_get_config(adapter);
3213 if (status)
3214 goto err;
3215
3216 status = be_msix_enable(adapter);
3217 if (status)
3218 goto err;
3219
3220 en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
3221 BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS;
3222 if (adapter->function_caps & BE_FUNCTION_CAPS_RSS)
3223 en_flags |= BE_IF_FLAGS_RSS;
3224 en_flags = en_flags & be_if_cap_flags(adapter);
3225 status = be_cmd_if_create(adapter, be_if_cap_flags(adapter), en_flags,
3226 &adapter->if_handle, 0);
3227 if (status)
3228 goto err;
3229
3230 /* Updating real_num_tx/rx_queues() requires rtnl_lock() */
3231 rtnl_lock();
3232 status = be_setup_queues(adapter);
3233 rtnl_unlock();
3234 if (status)
3235 goto err;
3236
3237 be_cmd_get_fn_privileges(adapter, &adapter->cmd_privileges, 0);
3238 /* In UMC mode FW does not return right privileges.
3239 * Override with correct privilege equivalent to PF.
3240 */
3241 if (be_is_mc(adapter))
3242 adapter->cmd_privileges = MAX_PRIVILEGES;
3243
3244 status = be_mac_setup(adapter);
3245 if (status)
3246 goto err;
3247
3248 be_cmd_get_fw_ver(adapter, adapter->fw_ver, adapter->fw_on_flash);
3249
3250 if (BE2_chip(adapter) && fw_major_num(adapter->fw_ver) < 4) {
3251 dev_err(dev, "Firmware on card is old(%s), IRQs may not work.",
3252 adapter->fw_ver);
3253 dev_err(dev, "Please upgrade firmware to version >= 4.0\n");
3254 }
3255
3256 if (adapter->vlans_added)
3257 be_vid_config(adapter);
3258
3259 be_set_rx_mode(adapter->netdev);
3260
3261 be_cmd_get_flow_control(adapter, &tx_fc, &rx_fc);
3262
3263 if (rx_fc != adapter->rx_fc || tx_fc != adapter->tx_fc)
3264 be_cmd_set_flow_control(adapter, adapter->tx_fc,
3265 adapter->rx_fc);
3266
3267 if (be_physfn(adapter) && num_vfs) {
3268 if (be_max_vfs(adapter))
3269 be_vf_setup(adapter);
3270 else
3271 dev_warn(dev, "device doesn't support SRIOV\n");
3272 }
3273
3274 status = be_cmd_get_phy_info(adapter);
3275 if (!status && be_pause_supported(adapter))
3276 adapter->phy.fc_autoneg = 1;
3277
3278 be_schedule_worker(adapter);
3279 return 0;
3280 err:
3281 be_clear(adapter);
3282 return status;
3283 }
3284
3285 #ifdef CONFIG_NET_POLL_CONTROLLER
3286 static void be_netpoll(struct net_device *netdev)
3287 {
3288 struct be_adapter *adapter = netdev_priv(netdev);
3289 struct be_eq_obj *eqo;
3290 int i;
3291
3292 for_all_evt_queues(adapter, eqo, i) {
3293 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
3294 napi_schedule(&eqo->napi);
3295 }
3296
3297 return;
3298 }
3299 #endif
3300
3301 #define FW_FILE_HDR_SIGN "ServerEngines Corp. "
3302 static char flash_cookie[2][16] = {"*** SE FLAS", "H DIRECTORY *** "};
3303
3304 static bool be_flash_redboot(struct be_adapter *adapter,
3305 const u8 *p, u32 img_start, int image_size,
3306 int hdr_size)
3307 {
3308 u32 crc_offset;
3309 u8 flashed_crc[4];
3310 int status;
3311
3312 crc_offset = hdr_size + img_start + image_size - 4;
3313
3314 p += crc_offset;
3315
3316 status = be_cmd_get_flash_crc(adapter, flashed_crc,
3317 (image_size - 4));
3318 if (status) {
3319 dev_err(&adapter->pdev->dev,
3320 "could not get crc from flash, not flashing redboot\n");
3321 return false;
3322 }
3323
3324 /*update redboot only if crc does not match*/
3325 if (!memcmp(flashed_crc, p, 4))
3326 return false;
3327 else
3328 return true;
3329 }
3330
3331 static bool phy_flashing_required(struct be_adapter *adapter)
3332 {
3333 return (adapter->phy.phy_type == TN_8022 &&
3334 adapter->phy.interface_type == PHY_TYPE_BASET_10GB);
3335 }
3336
3337 static bool is_comp_in_ufi(struct be_adapter *adapter,
3338 struct flash_section_info *fsec, int type)
3339 {
3340 int i = 0, img_type = 0;
3341 struct flash_section_info_g2 *fsec_g2 = NULL;
3342
3343 if (BE2_chip(adapter))
3344 fsec_g2 = (struct flash_section_info_g2 *)fsec;
3345
3346 for (i = 0; i < MAX_FLASH_COMP; i++) {
3347 if (fsec_g2)
3348 img_type = le32_to_cpu(fsec_g2->fsec_entry[i].type);
3349 else
3350 img_type = le32_to_cpu(fsec->fsec_entry[i].type);
3351
3352 if (img_type == type)
3353 return true;
3354 }
3355 return false;
3356
3357 }
3358
3359 static struct flash_section_info *get_fsec_info(struct be_adapter *adapter,
3360 int header_size,
3361 const struct firmware *fw)
3362 {
3363 struct flash_section_info *fsec = NULL;
3364 const u8 *p = fw->data;
3365
3366 p += header_size;
3367 while (p < (fw->data + fw->size)) {
3368 fsec = (struct flash_section_info *)p;
3369 if (!memcmp(flash_cookie, fsec->cookie, sizeof(flash_cookie)))
3370 return fsec;
3371 p += 32;
3372 }
3373 return NULL;
3374 }
3375
3376 static int be_flash(struct be_adapter *adapter, const u8 *img,
3377 struct be_dma_mem *flash_cmd, int optype, int img_size)
3378 {
3379 u32 total_bytes = 0, flash_op, num_bytes = 0;
3380 int status = 0;
3381 struct be_cmd_write_flashrom *req = flash_cmd->va;
3382
3383 total_bytes = img_size;
3384 while (total_bytes) {
3385 num_bytes = min_t(u32, 32*1024, total_bytes);
3386
3387 total_bytes -= num_bytes;
3388
3389 if (!total_bytes) {
3390 if (optype == OPTYPE_PHY_FW)
3391 flash_op = FLASHROM_OPER_PHY_FLASH;
3392 else
3393 flash_op = FLASHROM_OPER_FLASH;
3394 } else {
3395 if (optype == OPTYPE_PHY_FW)
3396 flash_op = FLASHROM_OPER_PHY_SAVE;
3397 else
3398 flash_op = FLASHROM_OPER_SAVE;
3399 }
3400
3401 memcpy(req->data_buf, img, num_bytes);
3402 img += num_bytes;
3403 status = be_cmd_write_flashrom(adapter, flash_cmd, optype,
3404 flash_op, num_bytes);
3405 if (status) {
3406 if (status == ILLEGAL_IOCTL_REQ &&
3407 optype == OPTYPE_PHY_FW)
3408 break;
3409 dev_err(&adapter->pdev->dev,
3410 "cmd to write to flash rom failed.\n");
3411 return status;
3412 }
3413 }
3414 return 0;
3415 }
3416
3417 /* For BE2, BE3 and BE3-R */
3418 static int be_flash_BEx(struct be_adapter *adapter,
3419 const struct firmware *fw,
3420 struct be_dma_mem *flash_cmd,
3421 int num_of_images)
3422
3423 {
3424 int status = 0, i, filehdr_size = 0;
3425 int img_hdrs_size = (num_of_images * sizeof(struct image_hdr));
3426 const u8 *p = fw->data;
3427 const struct flash_comp *pflashcomp;
3428 int num_comp, redboot;
3429 struct flash_section_info *fsec = NULL;
3430
3431 struct flash_comp gen3_flash_types[] = {
3432 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, OPTYPE_ISCSI_ACTIVE,
3433 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_iSCSI},
3434 { FLASH_REDBOOT_START_g3, OPTYPE_REDBOOT,
3435 FLASH_REDBOOT_IMAGE_MAX_SIZE_g3, IMAGE_BOOT_CODE},
3436 { FLASH_iSCSI_BIOS_START_g3, OPTYPE_BIOS,
3437 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_ISCSI},
3438 { FLASH_PXE_BIOS_START_g3, OPTYPE_PXE_BIOS,
3439 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_PXE},
3440 { FLASH_FCoE_BIOS_START_g3, OPTYPE_FCOE_BIOS,
3441 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_FCoE},
3442 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, OPTYPE_ISCSI_BACKUP,
3443 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_iSCSI},
3444 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, OPTYPE_FCOE_FW_ACTIVE,
3445 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_FCoE},
3446 { FLASH_FCoE_BACKUP_IMAGE_START_g3, OPTYPE_FCOE_FW_BACKUP,
3447 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_FCoE},
3448 { FLASH_NCSI_START_g3, OPTYPE_NCSI_FW,
3449 FLASH_NCSI_IMAGE_MAX_SIZE_g3, IMAGE_NCSI},
3450 { FLASH_PHY_FW_START_g3, OPTYPE_PHY_FW,
3451 FLASH_PHY_FW_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_PHY}
3452 };
3453
3454 struct flash_comp gen2_flash_types[] = {
3455 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, OPTYPE_ISCSI_ACTIVE,
3456 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_iSCSI},
3457 { FLASH_REDBOOT_START_g2, OPTYPE_REDBOOT,
3458 FLASH_REDBOOT_IMAGE_MAX_SIZE_g2, IMAGE_BOOT_CODE},
3459 { FLASH_iSCSI_BIOS_START_g2, OPTYPE_BIOS,
3460 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_ISCSI},
3461 { FLASH_PXE_BIOS_START_g2, OPTYPE_PXE_BIOS,
3462 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_PXE},
3463 { FLASH_FCoE_BIOS_START_g2, OPTYPE_FCOE_BIOS,
3464 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_FCoE},
3465 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, OPTYPE_ISCSI_BACKUP,
3466 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_iSCSI},
3467 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, OPTYPE_FCOE_FW_ACTIVE,
3468 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_FCoE},
3469 { FLASH_FCoE_BACKUP_IMAGE_START_g2, OPTYPE_FCOE_FW_BACKUP,
3470 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_FCoE}
3471 };
3472
3473 if (BE3_chip(adapter)) {
3474 pflashcomp = gen3_flash_types;
3475 filehdr_size = sizeof(struct flash_file_hdr_g3);
3476 num_comp = ARRAY_SIZE(gen3_flash_types);
3477 } else {
3478 pflashcomp = gen2_flash_types;
3479 filehdr_size = sizeof(struct flash_file_hdr_g2);
3480 num_comp = ARRAY_SIZE(gen2_flash_types);
3481 }
3482
3483 /* Get flash section info*/
3484 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
3485 if (!fsec) {
3486 dev_err(&adapter->pdev->dev,
3487 "Invalid Cookie. UFI corrupted ?\n");
3488 return -1;
3489 }
3490 for (i = 0; i < num_comp; i++) {
3491 if (!is_comp_in_ufi(adapter, fsec, pflashcomp[i].img_type))
3492 continue;
3493
3494 if ((pflashcomp[i].optype == OPTYPE_NCSI_FW) &&
3495 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
3496 continue;
3497
3498 if (pflashcomp[i].optype == OPTYPE_PHY_FW &&
3499 !phy_flashing_required(adapter))
3500 continue;
3501
3502 if (pflashcomp[i].optype == OPTYPE_REDBOOT) {
3503 redboot = be_flash_redboot(adapter, fw->data,
3504 pflashcomp[i].offset, pflashcomp[i].size,
3505 filehdr_size + img_hdrs_size);
3506 if (!redboot)
3507 continue;
3508 }
3509
3510 p = fw->data;
3511 p += filehdr_size + pflashcomp[i].offset + img_hdrs_size;
3512 if (p + pflashcomp[i].size > fw->data + fw->size)
3513 return -1;
3514
3515 status = be_flash(adapter, p, flash_cmd, pflashcomp[i].optype,
3516 pflashcomp[i].size);
3517 if (status) {
3518 dev_err(&adapter->pdev->dev,
3519 "Flashing section type %d failed.\n",
3520 pflashcomp[i].img_type);
3521 return status;
3522 }
3523 }
3524 return 0;
3525 }
3526
3527 static int be_flash_skyhawk(struct be_adapter *adapter,
3528 const struct firmware *fw,
3529 struct be_dma_mem *flash_cmd, int num_of_images)
3530 {
3531 int status = 0, i, filehdr_size = 0;
3532 int img_offset, img_size, img_optype, redboot;
3533 int img_hdrs_size = num_of_images * sizeof(struct image_hdr);
3534 const u8 *p = fw->data;
3535 struct flash_section_info *fsec = NULL;
3536
3537 filehdr_size = sizeof(struct flash_file_hdr_g3);
3538 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
3539 if (!fsec) {
3540 dev_err(&adapter->pdev->dev,
3541 "Invalid Cookie. UFI corrupted ?\n");
3542 return -1;
3543 }
3544
3545 for (i = 0; i < le32_to_cpu(fsec->fsec_hdr.num_images); i++) {
3546 img_offset = le32_to_cpu(fsec->fsec_entry[i].offset);
3547 img_size = le32_to_cpu(fsec->fsec_entry[i].pad_size);
3548
3549 switch (le32_to_cpu(fsec->fsec_entry[i].type)) {
3550 case IMAGE_FIRMWARE_iSCSI:
3551 img_optype = OPTYPE_ISCSI_ACTIVE;
3552 break;
3553 case IMAGE_BOOT_CODE:
3554 img_optype = OPTYPE_REDBOOT;
3555 break;
3556 case IMAGE_OPTION_ROM_ISCSI:
3557 img_optype = OPTYPE_BIOS;
3558 break;
3559 case IMAGE_OPTION_ROM_PXE:
3560 img_optype = OPTYPE_PXE_BIOS;
3561 break;
3562 case IMAGE_OPTION_ROM_FCoE:
3563 img_optype = OPTYPE_FCOE_BIOS;
3564 break;
3565 case IMAGE_FIRMWARE_BACKUP_iSCSI:
3566 img_optype = OPTYPE_ISCSI_BACKUP;
3567 break;
3568 case IMAGE_NCSI:
3569 img_optype = OPTYPE_NCSI_FW;
3570 break;
3571 default:
3572 continue;
3573 }
3574
3575 if (img_optype == OPTYPE_REDBOOT) {
3576 redboot = be_flash_redboot(adapter, fw->data,
3577 img_offset, img_size,
3578 filehdr_size + img_hdrs_size);
3579 if (!redboot)
3580 continue;
3581 }
3582
3583 p = fw->data;
3584 p += filehdr_size + img_offset + img_hdrs_size;
3585 if (p + img_size > fw->data + fw->size)
3586 return -1;
3587
3588 status = be_flash(adapter, p, flash_cmd, img_optype, img_size);
3589 if (status) {
3590 dev_err(&adapter->pdev->dev,
3591 "Flashing section type %d failed.\n",
3592 fsec->fsec_entry[i].type);
3593 return status;
3594 }
3595 }
3596 return 0;
3597 }
3598
3599 static int lancer_fw_download(struct be_adapter *adapter,
3600 const struct firmware *fw)
3601 {
3602 #define LANCER_FW_DOWNLOAD_CHUNK (32 * 1024)
3603 #define LANCER_FW_DOWNLOAD_LOCATION "/prg"
3604 struct be_dma_mem flash_cmd;
3605 const u8 *data_ptr = NULL;
3606 u8 *dest_image_ptr = NULL;
3607 size_t image_size = 0;
3608 u32 chunk_size = 0;
3609 u32 data_written = 0;
3610 u32 offset = 0;
3611 int status = 0;
3612 u8 add_status = 0;
3613 u8 change_status;
3614
3615 if (!IS_ALIGNED(fw->size, sizeof(u32))) {
3616 dev_err(&adapter->pdev->dev,
3617 "FW Image not properly aligned. "
3618 "Length must be 4 byte aligned.\n");
3619 status = -EINVAL;
3620 goto lancer_fw_exit;
3621 }
3622
3623 flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
3624 + LANCER_FW_DOWNLOAD_CHUNK;
3625 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
3626 &flash_cmd.dma, GFP_KERNEL);
3627 if (!flash_cmd.va) {
3628 status = -ENOMEM;
3629 goto lancer_fw_exit;
3630 }
3631
3632 dest_image_ptr = flash_cmd.va +
3633 sizeof(struct lancer_cmd_req_write_object);
3634 image_size = fw->size;
3635 data_ptr = fw->data;
3636
3637 while (image_size) {
3638 chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
3639
3640 /* Copy the image chunk content. */
3641 memcpy(dest_image_ptr, data_ptr, chunk_size);
3642
3643 status = lancer_cmd_write_object(adapter, &flash_cmd,
3644 chunk_size, offset,
3645 LANCER_FW_DOWNLOAD_LOCATION,
3646 &data_written, &change_status,
3647 &add_status);
3648 if (status)
3649 break;
3650
3651 offset += data_written;
3652 data_ptr += data_written;
3653 image_size -= data_written;
3654 }
3655
3656 if (!status) {
3657 /* Commit the FW written */
3658 status = lancer_cmd_write_object(adapter, &flash_cmd,
3659 0, offset,
3660 LANCER_FW_DOWNLOAD_LOCATION,
3661 &data_written, &change_status,
3662 &add_status);
3663 }
3664
3665 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3666 flash_cmd.dma);
3667 if (status) {
3668 dev_err(&adapter->pdev->dev,
3669 "Firmware load error. "
3670 "Status code: 0x%x Additional Status: 0x%x\n",
3671 status, add_status);
3672 goto lancer_fw_exit;
3673 }
3674
3675 if (change_status == LANCER_FW_RESET_NEEDED) {
3676 status = lancer_physdev_ctrl(adapter,
3677 PHYSDEV_CONTROL_FW_RESET_MASK);
3678 if (status) {
3679 dev_err(&adapter->pdev->dev,
3680 "Adapter busy for FW reset.\n"
3681 "New FW will not be active.\n");
3682 goto lancer_fw_exit;
3683 }
3684 } else if (change_status != LANCER_NO_RESET_NEEDED) {
3685 dev_err(&adapter->pdev->dev,
3686 "System reboot required for new FW"
3687 " to be active\n");
3688 }
3689
3690 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
3691 lancer_fw_exit:
3692 return status;
3693 }
3694
3695 #define UFI_TYPE2 2
3696 #define UFI_TYPE3 3
3697 #define UFI_TYPE3R 10
3698 #define UFI_TYPE4 4
3699 static int be_get_ufi_type(struct be_adapter *adapter,
3700 struct flash_file_hdr_g3 *fhdr)
3701 {
3702 if (fhdr == NULL)
3703 goto be_get_ufi_exit;
3704
3705 if (skyhawk_chip(adapter) && fhdr->build[0] == '4')
3706 return UFI_TYPE4;
3707 else if (BE3_chip(adapter) && fhdr->build[0] == '3') {
3708 if (fhdr->asic_type_rev == 0x10)
3709 return UFI_TYPE3R;
3710 else
3711 return UFI_TYPE3;
3712 } else if (BE2_chip(adapter) && fhdr->build[0] == '2')
3713 return UFI_TYPE2;
3714
3715 be_get_ufi_exit:
3716 dev_err(&adapter->pdev->dev,
3717 "UFI and Interface are not compatible for flashing\n");
3718 return -1;
3719 }
3720
3721 static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
3722 {
3723 struct flash_file_hdr_g3 *fhdr3;
3724 struct image_hdr *img_hdr_ptr = NULL;
3725 struct be_dma_mem flash_cmd;
3726 const u8 *p;
3727 int status = 0, i = 0, num_imgs = 0, ufi_type = 0;
3728
3729 flash_cmd.size = sizeof(struct be_cmd_write_flashrom);
3730 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
3731 &flash_cmd.dma, GFP_KERNEL);
3732 if (!flash_cmd.va) {
3733 status = -ENOMEM;
3734 goto be_fw_exit;
3735 }
3736
3737 p = fw->data;
3738 fhdr3 = (struct flash_file_hdr_g3 *)p;
3739
3740 ufi_type = be_get_ufi_type(adapter, fhdr3);
3741
3742 num_imgs = le32_to_cpu(fhdr3->num_imgs);
3743 for (i = 0; i < num_imgs; i++) {
3744 img_hdr_ptr = (struct image_hdr *)(fw->data +
3745 (sizeof(struct flash_file_hdr_g3) +
3746 i * sizeof(struct image_hdr)));
3747 if (le32_to_cpu(img_hdr_ptr->imageid) == 1) {
3748 switch (ufi_type) {
3749 case UFI_TYPE4:
3750 status = be_flash_skyhawk(adapter, fw,
3751 &flash_cmd, num_imgs);
3752 break;
3753 case UFI_TYPE3R:
3754 status = be_flash_BEx(adapter, fw, &flash_cmd,
3755 num_imgs);
3756 break;
3757 case UFI_TYPE3:
3758 /* Do not flash this ufi on BE3-R cards */
3759 if (adapter->asic_rev < 0x10)
3760 status = be_flash_BEx(adapter, fw,
3761 &flash_cmd,
3762 num_imgs);
3763 else {
3764 status = -1;
3765 dev_err(&adapter->pdev->dev,
3766 "Can't load BE3 UFI on BE3R\n");
3767 }
3768 }
3769 }
3770 }
3771
3772 if (ufi_type == UFI_TYPE2)
3773 status = be_flash_BEx(adapter, fw, &flash_cmd, 0);
3774 else if (ufi_type == -1)
3775 status = -1;
3776
3777 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3778 flash_cmd.dma);
3779 if (status) {
3780 dev_err(&adapter->pdev->dev, "Firmware load error\n");
3781 goto be_fw_exit;
3782 }
3783
3784 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
3785
3786 be_fw_exit:
3787 return status;
3788 }
3789
3790 int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
3791 {
3792 const struct firmware *fw;
3793 int status;
3794
3795 if (!netif_running(adapter->netdev)) {
3796 dev_err(&adapter->pdev->dev,
3797 "Firmware load not allowed (interface is down)\n");
3798 return -1;
3799 }
3800
3801 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
3802 if (status)
3803 goto fw_exit;
3804
3805 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
3806
3807 if (lancer_chip(adapter))
3808 status = lancer_fw_download(adapter, fw);
3809 else
3810 status = be_fw_download(adapter, fw);
3811
3812 if (!status)
3813 be_cmd_get_fw_ver(adapter, adapter->fw_ver,
3814 adapter->fw_on_flash);
3815
3816 fw_exit:
3817 release_firmware(fw);
3818 return status;
3819 }
3820
3821 static int be_ndo_bridge_setlink(struct net_device *dev,
3822 struct nlmsghdr *nlh)
3823 {
3824 struct be_adapter *adapter = netdev_priv(dev);
3825 struct nlattr *attr, *br_spec;
3826 int rem;
3827 int status = 0;
3828 u16 mode = 0;
3829
3830 if (!sriov_enabled(adapter))
3831 return -EOPNOTSUPP;
3832
3833 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
3834
3835 nla_for_each_nested(attr, br_spec, rem) {
3836 if (nla_type(attr) != IFLA_BRIDGE_MODE)
3837 continue;
3838
3839 mode = nla_get_u16(attr);
3840 if (mode != BRIDGE_MODE_VEPA && mode != BRIDGE_MODE_VEB)
3841 return -EINVAL;
3842
3843 status = be_cmd_set_hsw_config(adapter, 0, 0,
3844 adapter->if_handle,
3845 mode == BRIDGE_MODE_VEPA ?
3846 PORT_FWD_TYPE_VEPA :
3847 PORT_FWD_TYPE_VEB);
3848 if (status)
3849 goto err;
3850
3851 dev_info(&adapter->pdev->dev, "enabled switch mode: %s\n",
3852 mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
3853
3854 return status;
3855 }
3856 err:
3857 dev_err(&adapter->pdev->dev, "Failed to set switch mode %s\n",
3858 mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
3859
3860 return status;
3861 }
3862
3863 static int be_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
3864 struct net_device *dev,
3865 u32 filter_mask)
3866 {
3867 struct be_adapter *adapter = netdev_priv(dev);
3868 int status = 0;
3869 u8 hsw_mode;
3870
3871 if (!sriov_enabled(adapter))
3872 return 0;
3873
3874 /* BE and Lancer chips support VEB mode only */
3875 if (BEx_chip(adapter) || lancer_chip(adapter)) {
3876 hsw_mode = PORT_FWD_TYPE_VEB;
3877 } else {
3878 status = be_cmd_get_hsw_config(adapter, NULL, 0,
3879 adapter->if_handle, &hsw_mode);
3880 if (status)
3881 return 0;
3882 }
3883
3884 return ndo_dflt_bridge_getlink(skb, pid, seq, dev,
3885 hsw_mode == PORT_FWD_TYPE_VEPA ?
3886 BRIDGE_MODE_VEPA : BRIDGE_MODE_VEB);
3887 }
3888
3889 static const struct net_device_ops be_netdev_ops = {
3890 .ndo_open = be_open,
3891 .ndo_stop = be_close,
3892 .ndo_start_xmit = be_xmit,
3893 .ndo_set_rx_mode = be_set_rx_mode,
3894 .ndo_set_mac_address = be_mac_addr_set,
3895 .ndo_change_mtu = be_change_mtu,
3896 .ndo_get_stats64 = be_get_stats64,
3897 .ndo_validate_addr = eth_validate_addr,
3898 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
3899 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
3900 .ndo_set_vf_mac = be_set_vf_mac,
3901 .ndo_set_vf_vlan = be_set_vf_vlan,
3902 .ndo_set_vf_tx_rate = be_set_vf_tx_rate,
3903 .ndo_get_vf_config = be_get_vf_config,
3904 #ifdef CONFIG_NET_POLL_CONTROLLER
3905 .ndo_poll_controller = be_netpoll,
3906 #endif
3907 .ndo_bridge_setlink = be_ndo_bridge_setlink,
3908 .ndo_bridge_getlink = be_ndo_bridge_getlink,
3909 };
3910
3911 static void be_netdev_init(struct net_device *netdev)
3912 {
3913 struct be_adapter *adapter = netdev_priv(netdev);
3914
3915 netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
3916 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
3917 NETIF_F_HW_VLAN_CTAG_TX;
3918 if (be_multi_rxq(adapter))
3919 netdev->hw_features |= NETIF_F_RXHASH;
3920
3921 netdev->features |= netdev->hw_features |
3922 NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER;
3923
3924 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
3925 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
3926
3927 netdev->priv_flags |= IFF_UNICAST_FLT;
3928
3929 netdev->flags |= IFF_MULTICAST;
3930
3931 netif_set_gso_max_size(netdev, 65535 - ETH_HLEN);
3932
3933 netdev->netdev_ops = &be_netdev_ops;
3934
3935 SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
3936 }
3937
3938 static void be_unmap_pci_bars(struct be_adapter *adapter)
3939 {
3940 if (adapter->csr)
3941 pci_iounmap(adapter->pdev, adapter->csr);
3942 if (adapter->db)
3943 pci_iounmap(adapter->pdev, adapter->db);
3944 }
3945
3946 static int db_bar(struct be_adapter *adapter)
3947 {
3948 if (lancer_chip(adapter) || !be_physfn(adapter))
3949 return 0;
3950 else
3951 return 4;
3952 }
3953
3954 static int be_roce_map_pci_bars(struct be_adapter *adapter)
3955 {
3956 if (skyhawk_chip(adapter)) {
3957 adapter->roce_db.size = 4096;
3958 adapter->roce_db.io_addr = pci_resource_start(adapter->pdev,
3959 db_bar(adapter));
3960 adapter->roce_db.total_size = pci_resource_len(adapter->pdev,
3961 db_bar(adapter));
3962 }
3963 return 0;
3964 }
3965
3966 static int be_map_pci_bars(struct be_adapter *adapter)
3967 {
3968 u8 __iomem *addr;
3969 u32 sli_intf;
3970
3971 pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3972 adapter->if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
3973 SLI_INTF_IF_TYPE_SHIFT;
3974
3975 if (BEx_chip(adapter) && be_physfn(adapter)) {
3976 adapter->csr = pci_iomap(adapter->pdev, 2, 0);
3977 if (adapter->csr == NULL)
3978 return -ENOMEM;
3979 }
3980
3981 addr = pci_iomap(adapter->pdev, db_bar(adapter), 0);
3982 if (addr == NULL)
3983 goto pci_map_err;
3984 adapter->db = addr;
3985
3986 be_roce_map_pci_bars(adapter);
3987 return 0;
3988
3989 pci_map_err:
3990 be_unmap_pci_bars(adapter);
3991 return -ENOMEM;
3992 }
3993
3994 static void be_ctrl_cleanup(struct be_adapter *adapter)
3995 {
3996 struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
3997
3998 be_unmap_pci_bars(adapter);
3999
4000 if (mem->va)
4001 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
4002 mem->dma);
4003
4004 mem = &adapter->rx_filter;
4005 if (mem->va)
4006 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
4007 mem->dma);
4008 }
4009
4010 static int be_ctrl_init(struct be_adapter *adapter)
4011 {
4012 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
4013 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
4014 struct be_dma_mem *rx_filter = &adapter->rx_filter;
4015 u32 sli_intf;
4016 int status;
4017
4018 pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
4019 adapter->sli_family = (sli_intf & SLI_INTF_FAMILY_MASK) >>
4020 SLI_INTF_FAMILY_SHIFT;
4021 adapter->virtfn = (sli_intf & SLI_INTF_FT_MASK) ? 1 : 0;
4022
4023 status = be_map_pci_bars(adapter);
4024 if (status)
4025 goto done;
4026
4027 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
4028 mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
4029 mbox_mem_alloc->size,
4030 &mbox_mem_alloc->dma,
4031 GFP_KERNEL);
4032 if (!mbox_mem_alloc->va) {
4033 status = -ENOMEM;
4034 goto unmap_pci_bars;
4035 }
4036 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
4037 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
4038 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
4039 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
4040
4041 rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
4042 rx_filter->va = dma_zalloc_coherent(&adapter->pdev->dev,
4043 rx_filter->size, &rx_filter->dma,
4044 GFP_KERNEL);
4045 if (rx_filter->va == NULL) {
4046 status = -ENOMEM;
4047 goto free_mbox;
4048 }
4049
4050 mutex_init(&adapter->mbox_lock);
4051 spin_lock_init(&adapter->mcc_lock);
4052 spin_lock_init(&adapter->mcc_cq_lock);
4053
4054 init_completion(&adapter->flash_compl);
4055 pci_save_state(adapter->pdev);
4056 return 0;
4057
4058 free_mbox:
4059 dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
4060 mbox_mem_alloc->va, mbox_mem_alloc->dma);
4061
4062 unmap_pci_bars:
4063 be_unmap_pci_bars(adapter);
4064
4065 done:
4066 return status;
4067 }
4068
4069 static void be_stats_cleanup(struct be_adapter *adapter)
4070 {
4071 struct be_dma_mem *cmd = &adapter->stats_cmd;
4072
4073 if (cmd->va)
4074 dma_free_coherent(&adapter->pdev->dev, cmd->size,
4075 cmd->va, cmd->dma);
4076 }
4077
4078 static int be_stats_init(struct be_adapter *adapter)
4079 {
4080 struct be_dma_mem *cmd = &adapter->stats_cmd;
4081
4082 if (lancer_chip(adapter))
4083 cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
4084 else if (BE2_chip(adapter))
4085 cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
4086 else
4087 /* BE3 and Skyhawk */
4088 cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
4089
4090 cmd->va = dma_zalloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
4091 GFP_KERNEL);
4092 if (cmd->va == NULL)
4093 return -1;
4094 return 0;
4095 }
4096
4097 static void be_remove(struct pci_dev *pdev)
4098 {
4099 struct be_adapter *adapter = pci_get_drvdata(pdev);
4100
4101 if (!adapter)
4102 return;
4103
4104 be_roce_dev_remove(adapter);
4105 be_intr_set(adapter, false);
4106
4107 cancel_delayed_work_sync(&adapter->func_recovery_work);
4108
4109 unregister_netdev(adapter->netdev);
4110
4111 be_clear(adapter);
4112
4113 /* tell fw we're done with firing cmds */
4114 be_cmd_fw_clean(adapter);
4115
4116 be_stats_cleanup(adapter);
4117
4118 be_ctrl_cleanup(adapter);
4119
4120 pci_disable_pcie_error_reporting(pdev);
4121
4122 pci_set_drvdata(pdev, NULL);
4123 pci_release_regions(pdev);
4124 pci_disable_device(pdev);
4125
4126 free_netdev(adapter->netdev);
4127 }
4128
4129 bool be_is_wol_supported(struct be_adapter *adapter)
4130 {
4131 return ((adapter->wol_cap & BE_WOL_CAP) &&
4132 !be_is_wol_excluded(adapter)) ? true : false;
4133 }
4134
4135 u32 be_get_fw_log_level(struct be_adapter *adapter)
4136 {
4137 struct be_dma_mem extfat_cmd;
4138 struct be_fat_conf_params *cfgs;
4139 int status;
4140 u32 level = 0;
4141 int j;
4142
4143 if (lancer_chip(adapter))
4144 return 0;
4145
4146 memset(&extfat_cmd, 0, sizeof(struct be_dma_mem));
4147 extfat_cmd.size = sizeof(struct be_cmd_resp_get_ext_fat_caps);
4148 extfat_cmd.va = pci_alloc_consistent(adapter->pdev, extfat_cmd.size,
4149 &extfat_cmd.dma);
4150
4151 if (!extfat_cmd.va) {
4152 dev_err(&adapter->pdev->dev, "%s: Memory allocation failure\n",
4153 __func__);
4154 goto err;
4155 }
4156
4157 status = be_cmd_get_ext_fat_capabilites(adapter, &extfat_cmd);
4158 if (!status) {
4159 cfgs = (struct be_fat_conf_params *)(extfat_cmd.va +
4160 sizeof(struct be_cmd_resp_hdr));
4161 for (j = 0; j < le32_to_cpu(cfgs->module[0].num_modes); j++) {
4162 if (cfgs->module[0].trace_lvl[j].mode == MODE_UART)
4163 level = cfgs->module[0].trace_lvl[j].dbg_lvl;
4164 }
4165 }
4166 pci_free_consistent(adapter->pdev, extfat_cmd.size, extfat_cmd.va,
4167 extfat_cmd.dma);
4168 err:
4169 return level;
4170 }
4171
4172 static int be_get_initial_config(struct be_adapter *adapter)
4173 {
4174 int status;
4175 u32 level;
4176
4177 status = be_cmd_get_cntl_attributes(adapter);
4178 if (status)
4179 return status;
4180
4181 status = be_cmd_get_acpi_wol_cap(adapter);
4182 if (status) {
4183 /* in case of a failure to get wol capabillities
4184 * check the exclusion list to determine WOL capability */
4185 if (!be_is_wol_excluded(adapter))
4186 adapter->wol_cap |= BE_WOL_CAP;
4187 }
4188
4189 if (be_is_wol_supported(adapter))
4190 adapter->wol = true;
4191
4192 /* Must be a power of 2 or else MODULO will BUG_ON */
4193 adapter->be_get_temp_freq = 64;
4194
4195 level = be_get_fw_log_level(adapter);
4196 adapter->msg_enable = level <= FW_LOG_LEVEL_DEFAULT ? NETIF_MSG_HW : 0;
4197
4198 adapter->cfg_num_qs = netif_get_num_default_rss_queues();
4199 return 0;
4200 }
4201
4202 static int lancer_recover_func(struct be_adapter *adapter)
4203 {
4204 struct device *dev = &adapter->pdev->dev;
4205 int status;
4206
4207 status = lancer_test_and_set_rdy_state(adapter);
4208 if (status)
4209 goto err;
4210
4211 if (netif_running(adapter->netdev))
4212 be_close(adapter->netdev);
4213
4214 be_clear(adapter);
4215
4216 be_clear_all_error(adapter);
4217
4218 status = be_setup(adapter);
4219 if (status)
4220 goto err;
4221
4222 if (netif_running(adapter->netdev)) {
4223 status = be_open(adapter->netdev);
4224 if (status)
4225 goto err;
4226 }
4227
4228 dev_err(dev, "Error recovery successful\n");
4229 return 0;
4230 err:
4231 if (status == -EAGAIN)
4232 dev_err(dev, "Waiting for resource provisioning\n");
4233 else
4234 dev_err(dev, "Error recovery failed\n");
4235
4236 return status;
4237 }
4238
4239 static void be_func_recovery_task(struct work_struct *work)
4240 {
4241 struct be_adapter *adapter =
4242 container_of(work, struct be_adapter, func_recovery_work.work);
4243 int status = 0;
4244
4245 be_detect_error(adapter);
4246
4247 if (adapter->hw_error && lancer_chip(adapter)) {
4248
4249 rtnl_lock();
4250 netif_device_detach(adapter->netdev);
4251 rtnl_unlock();
4252
4253 status = lancer_recover_func(adapter);
4254 if (!status)
4255 netif_device_attach(adapter->netdev);
4256 }
4257
4258 /* In Lancer, for all errors other than provisioning error (-EAGAIN),
4259 * no need to attempt further recovery.
4260 */
4261 if (!status || status == -EAGAIN)
4262 schedule_delayed_work(&adapter->func_recovery_work,
4263 msecs_to_jiffies(1000));
4264 }
4265
4266 static void be_worker(struct work_struct *work)
4267 {
4268 struct be_adapter *adapter =
4269 container_of(work, struct be_adapter, work.work);
4270 struct be_rx_obj *rxo;
4271 struct be_eq_obj *eqo;
4272 int i;
4273
4274 /* when interrupts are not yet enabled, just reap any pending
4275 * mcc completions */
4276 if (!netif_running(adapter->netdev)) {
4277 local_bh_disable();
4278 be_process_mcc(adapter);
4279 local_bh_enable();
4280 goto reschedule;
4281 }
4282
4283 if (!adapter->stats_cmd_sent) {
4284 if (lancer_chip(adapter))
4285 lancer_cmd_get_pport_stats(adapter,
4286 &adapter->stats_cmd);
4287 else
4288 be_cmd_get_stats(adapter, &adapter->stats_cmd);
4289 }
4290
4291 if (be_physfn(adapter) &&
4292 MODULO(adapter->work_counter, adapter->be_get_temp_freq) == 0)
4293 be_cmd_get_die_temperature(adapter);
4294
4295 for_all_rx_queues(adapter, rxo, i) {
4296 if (rxo->rx_post_starved) {
4297 rxo->rx_post_starved = false;
4298 be_post_rx_frags(rxo, GFP_KERNEL);
4299 }
4300 }
4301
4302 for_all_evt_queues(adapter, eqo, i)
4303 be_eqd_update(adapter, eqo);
4304
4305 reschedule:
4306 adapter->work_counter++;
4307 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
4308 }
4309
4310 /* If any VFs are already enabled don't FLR the PF */
4311 static bool be_reset_required(struct be_adapter *adapter)
4312 {
4313 return pci_num_vf(adapter->pdev) ? false : true;
4314 }
4315
4316 static char *mc_name(struct be_adapter *adapter)
4317 {
4318 if (adapter->function_mode & FLEX10_MODE)
4319 return "FLEX10";
4320 else if (adapter->function_mode & VNIC_MODE)
4321 return "vNIC";
4322 else if (adapter->function_mode & UMC_ENABLED)
4323 return "UMC";
4324 else
4325 return "";
4326 }
4327
4328 static inline char *func_name(struct be_adapter *adapter)
4329 {
4330 return be_physfn(adapter) ? "PF" : "VF";
4331 }
4332
4333 static int be_probe(struct pci_dev *pdev, const struct pci_device_id *pdev_id)
4334 {
4335 int status = 0;
4336 struct be_adapter *adapter;
4337 struct net_device *netdev;
4338 char port_name;
4339
4340 status = pci_enable_device(pdev);
4341 if (status)
4342 goto do_none;
4343
4344 status = pci_request_regions(pdev, DRV_NAME);
4345 if (status)
4346 goto disable_dev;
4347 pci_set_master(pdev);
4348
4349 netdev = alloc_etherdev_mqs(sizeof(*adapter), MAX_TX_QS, MAX_RX_QS);
4350 if (netdev == NULL) {
4351 status = -ENOMEM;
4352 goto rel_reg;
4353 }
4354 adapter = netdev_priv(netdev);
4355 adapter->pdev = pdev;
4356 pci_set_drvdata(pdev, adapter);
4357 adapter->netdev = netdev;
4358 SET_NETDEV_DEV(netdev, &pdev->dev);
4359
4360 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
4361 if (!status) {
4362 status = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
4363 if (status < 0) {
4364 dev_err(&pdev->dev, "dma_set_coherent_mask failed\n");
4365 goto free_netdev;
4366 }
4367 netdev->features |= NETIF_F_HIGHDMA;
4368 } else {
4369 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
4370 if (!status)
4371 status = dma_set_coherent_mask(&pdev->dev,
4372 DMA_BIT_MASK(32));
4373 if (status) {
4374 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
4375 goto free_netdev;
4376 }
4377 }
4378
4379 status = pci_enable_pcie_error_reporting(pdev);
4380 if (status)
4381 dev_info(&pdev->dev, "Could not use PCIe error reporting\n");
4382
4383 status = be_ctrl_init(adapter);
4384 if (status)
4385 goto free_netdev;
4386
4387 /* sync up with fw's ready state */
4388 if (be_physfn(adapter)) {
4389 status = be_fw_wait_ready(adapter);
4390 if (status)
4391 goto ctrl_clean;
4392 }
4393
4394 if (be_reset_required(adapter)) {
4395 status = be_cmd_reset_function(adapter);
4396 if (status)
4397 goto ctrl_clean;
4398
4399 /* Wait for interrupts to quiesce after an FLR */
4400 msleep(100);
4401 }
4402
4403 /* Allow interrupts for other ULPs running on NIC function */
4404 be_intr_set(adapter, true);
4405
4406 /* tell fw we're ready to fire cmds */
4407 status = be_cmd_fw_init(adapter);
4408 if (status)
4409 goto ctrl_clean;
4410
4411 status = be_stats_init(adapter);
4412 if (status)
4413 goto ctrl_clean;
4414
4415 status = be_get_initial_config(adapter);
4416 if (status)
4417 goto stats_clean;
4418
4419 INIT_DELAYED_WORK(&adapter->work, be_worker);
4420 INIT_DELAYED_WORK(&adapter->func_recovery_work, be_func_recovery_task);
4421 adapter->rx_fc = adapter->tx_fc = true;
4422
4423 status = be_setup(adapter);
4424 if (status)
4425 goto stats_clean;
4426
4427 be_netdev_init(netdev);
4428 status = register_netdev(netdev);
4429 if (status != 0)
4430 goto unsetup;
4431
4432 be_roce_dev_add(adapter);
4433
4434 schedule_delayed_work(&adapter->func_recovery_work,
4435 msecs_to_jiffies(1000));
4436
4437 be_cmd_query_port_name(adapter, &port_name);
4438
4439 dev_info(&pdev->dev, "%s: %s %s port %c\n", nic_name(pdev),
4440 func_name(adapter), mc_name(adapter), port_name);
4441
4442 return 0;
4443
4444 unsetup:
4445 be_clear(adapter);
4446 stats_clean:
4447 be_stats_cleanup(adapter);
4448 ctrl_clean:
4449 be_ctrl_cleanup(adapter);
4450 free_netdev:
4451 free_netdev(netdev);
4452 pci_set_drvdata(pdev, NULL);
4453 rel_reg:
4454 pci_release_regions(pdev);
4455 disable_dev:
4456 pci_disable_device(pdev);
4457 do_none:
4458 dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
4459 return status;
4460 }
4461
4462 static int be_suspend(struct pci_dev *pdev, pm_message_t state)
4463 {
4464 struct be_adapter *adapter = pci_get_drvdata(pdev);
4465 struct net_device *netdev = adapter->netdev;
4466
4467 if (adapter->wol)
4468 be_setup_wol(adapter, true);
4469
4470 cancel_delayed_work_sync(&adapter->func_recovery_work);
4471
4472 netif_device_detach(netdev);
4473 if (netif_running(netdev)) {
4474 rtnl_lock();
4475 be_close(netdev);
4476 rtnl_unlock();
4477 }
4478 be_clear(adapter);
4479
4480 pci_save_state(pdev);
4481 pci_disable_device(pdev);
4482 pci_set_power_state(pdev, pci_choose_state(pdev, state));
4483 return 0;
4484 }
4485
4486 static int be_resume(struct pci_dev *pdev)
4487 {
4488 int status = 0;
4489 struct be_adapter *adapter = pci_get_drvdata(pdev);
4490 struct net_device *netdev = adapter->netdev;
4491
4492 netif_device_detach(netdev);
4493
4494 status = pci_enable_device(pdev);
4495 if (status)
4496 return status;
4497
4498 pci_set_power_state(pdev, PCI_D0);
4499 pci_restore_state(pdev);
4500
4501 status = be_fw_wait_ready(adapter);
4502 if (status)
4503 return status;
4504
4505 /* tell fw we're ready to fire cmds */
4506 status = be_cmd_fw_init(adapter);
4507 if (status)
4508 return status;
4509
4510 be_setup(adapter);
4511 if (netif_running(netdev)) {
4512 rtnl_lock();
4513 be_open(netdev);
4514 rtnl_unlock();
4515 }
4516
4517 schedule_delayed_work(&adapter->func_recovery_work,
4518 msecs_to_jiffies(1000));
4519 netif_device_attach(netdev);
4520
4521 if (adapter->wol)
4522 be_setup_wol(adapter, false);
4523
4524 return 0;
4525 }
4526
4527 /*
4528 * An FLR will stop BE from DMAing any data.
4529 */
4530 static void be_shutdown(struct pci_dev *pdev)
4531 {
4532 struct be_adapter *adapter = pci_get_drvdata(pdev);
4533
4534 if (!adapter)
4535 return;
4536
4537 cancel_delayed_work_sync(&adapter->work);
4538 cancel_delayed_work_sync(&adapter->func_recovery_work);
4539
4540 netif_device_detach(adapter->netdev);
4541
4542 be_cmd_reset_function(adapter);
4543
4544 pci_disable_device(pdev);
4545 }
4546
4547 static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
4548 pci_channel_state_t state)
4549 {
4550 struct be_adapter *adapter = pci_get_drvdata(pdev);
4551 struct net_device *netdev = adapter->netdev;
4552
4553 dev_err(&adapter->pdev->dev, "EEH error detected\n");
4554
4555 if (!adapter->eeh_error) {
4556 adapter->eeh_error = true;
4557
4558 cancel_delayed_work_sync(&adapter->func_recovery_work);
4559
4560 rtnl_lock();
4561 netif_device_detach(netdev);
4562 if (netif_running(netdev))
4563 be_close(netdev);
4564 rtnl_unlock();
4565
4566 be_clear(adapter);
4567 }
4568
4569 if (state == pci_channel_io_perm_failure)
4570 return PCI_ERS_RESULT_DISCONNECT;
4571
4572 pci_disable_device(pdev);
4573
4574 /* The error could cause the FW to trigger a flash debug dump.
4575 * Resetting the card while flash dump is in progress
4576 * can cause it not to recover; wait for it to finish.
4577 * Wait only for first function as it is needed only once per
4578 * adapter.
4579 */
4580 if (pdev->devfn == 0)
4581 ssleep(30);
4582
4583 return PCI_ERS_RESULT_NEED_RESET;
4584 }
4585
4586 static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
4587 {
4588 struct be_adapter *adapter = pci_get_drvdata(pdev);
4589 int status;
4590
4591 dev_info(&adapter->pdev->dev, "EEH reset\n");
4592
4593 status = pci_enable_device(pdev);
4594 if (status)
4595 return PCI_ERS_RESULT_DISCONNECT;
4596
4597 pci_set_master(pdev);
4598 pci_set_power_state(pdev, PCI_D0);
4599 pci_restore_state(pdev);
4600
4601 /* Check if card is ok and fw is ready */
4602 dev_info(&adapter->pdev->dev,
4603 "Waiting for FW to be ready after EEH reset\n");
4604 status = be_fw_wait_ready(adapter);
4605 if (status)
4606 return PCI_ERS_RESULT_DISCONNECT;
4607
4608 pci_cleanup_aer_uncorrect_error_status(pdev);
4609 be_clear_all_error(adapter);
4610 return PCI_ERS_RESULT_RECOVERED;
4611 }
4612
4613 static void be_eeh_resume(struct pci_dev *pdev)
4614 {
4615 int status = 0;
4616 struct be_adapter *adapter = pci_get_drvdata(pdev);
4617 struct net_device *netdev = adapter->netdev;
4618
4619 dev_info(&adapter->pdev->dev, "EEH resume\n");
4620
4621 pci_save_state(pdev);
4622
4623 status = be_cmd_reset_function(adapter);
4624 if (status)
4625 goto err;
4626
4627 /* tell fw we're ready to fire cmds */
4628 status = be_cmd_fw_init(adapter);
4629 if (status)
4630 goto err;
4631
4632 status = be_setup(adapter);
4633 if (status)
4634 goto err;
4635
4636 if (netif_running(netdev)) {
4637 status = be_open(netdev);
4638 if (status)
4639 goto err;
4640 }
4641
4642 schedule_delayed_work(&adapter->func_recovery_work,
4643 msecs_to_jiffies(1000));
4644 netif_device_attach(netdev);
4645 return;
4646 err:
4647 dev_err(&adapter->pdev->dev, "EEH resume failed\n");
4648 }
4649
4650 static const struct pci_error_handlers be_eeh_handlers = {
4651 .error_detected = be_eeh_err_detected,
4652 .slot_reset = be_eeh_reset,
4653 .resume = be_eeh_resume,
4654 };
4655
4656 static struct pci_driver be_driver = {
4657 .name = DRV_NAME,
4658 .id_table = be_dev_ids,
4659 .probe = be_probe,
4660 .remove = be_remove,
4661 .suspend = be_suspend,
4662 .resume = be_resume,
4663 .shutdown = be_shutdown,
4664 .err_handler = &be_eeh_handlers
4665 };
4666
4667 static int __init be_init_module(void)
4668 {
4669 if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
4670 rx_frag_size != 2048) {
4671 printk(KERN_WARNING DRV_NAME
4672 " : Module param rx_frag_size must be 2048/4096/8192."
4673 " Using 2048\n");
4674 rx_frag_size = 2048;
4675 }
4676
4677 return pci_register_driver(&be_driver);
4678 }
4679 module_init(be_init_module);
4680
4681 static void __exit be_exit_module(void)
4682 {
4683 pci_unregister_driver(&be_driver);
4684 }
4685 module_exit(be_exit_module);
This page took 0.136975 seconds and 6 git commands to generate.