ipv4: Don't report stale pmtu values to userspace
[deliverable/linux.git] / drivers / net / ethernet / chelsio / cxgb4 / cxgb4_main.c
CommitLineData
b8ff05a9
DM
1/*
2 * This file is part of the Chelsio T4 Ethernet driver for Linux.
3 *
4 * Copyright (c) 2003-2010 Chelsio Communications, Inc. All rights reserved.
5 *
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
11 *
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
14 * conditions are met:
15 *
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
18 * disclaimer.
19 *
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * SOFTWARE.
33 */
34
35#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
36
37#include <linux/bitmap.h>
38#include <linux/crc32.h>
39#include <linux/ctype.h>
40#include <linux/debugfs.h>
41#include <linux/err.h>
42#include <linux/etherdevice.h>
43#include <linux/firmware.h>
01789349 44#include <linux/if.h>
b8ff05a9
DM
45#include <linux/if_vlan.h>
46#include <linux/init.h>
47#include <linux/log2.h>
48#include <linux/mdio.h>
49#include <linux/module.h>
50#include <linux/moduleparam.h>
51#include <linux/mutex.h>
52#include <linux/netdevice.h>
53#include <linux/pci.h>
54#include <linux/aer.h>
55#include <linux/rtnetlink.h>
56#include <linux/sched.h>
57#include <linux/seq_file.h>
58#include <linux/sockios.h>
59#include <linux/vmalloc.h>
60#include <linux/workqueue.h>
61#include <net/neighbour.h>
62#include <net/netevent.h>
63#include <asm/uaccess.h>
64
65#include "cxgb4.h"
66#include "t4_regs.h"
67#include "t4_msg.h"
68#include "t4fw_api.h"
69#include "l2t.h"
70
99e6d065 71#define DRV_VERSION "1.3.0-ko"
b8ff05a9
DM
72#define DRV_DESC "Chelsio T4 Network Driver"
73
74/*
75 * Max interrupt hold-off timer value in us. Queues fall back to this value
76 * under extreme memory pressure so it's largish to give the system time to
77 * recover.
78 */
79#define MAX_SGE_TIMERVAL 200U
80
7ee9ff94 81enum {
13ee15d3
VP
82 /*
83 * Physical Function provisioning constants.
84 */
85 PFRES_NVI = 4, /* # of Virtual Interfaces */
86 PFRES_NETHCTRL = 128, /* # of EQs used for ETH or CTRL Qs */
87 PFRES_NIQFLINT = 128, /* # of ingress Qs/w Free List(s)/intr
88 */
89 PFRES_NEQ = 256, /* # of egress queues */
90 PFRES_NIQ = 0, /* # of ingress queues */
91 PFRES_TC = 0, /* PCI-E traffic class */
92 PFRES_NEXACTF = 128, /* # of exact MPS filters */
93
94 PFRES_R_CAPS = FW_CMD_CAP_PF,
95 PFRES_WX_CAPS = FW_CMD_CAP_PF,
96
97#ifdef CONFIG_PCI_IOV
98 /*
99 * Virtual Function provisioning constants. We need two extra Ingress
100 * Queues with Interrupt capability to serve as the VF's Firmware
101 * Event Queue and Forwarded Interrupt Queue (when using MSI mode) --
102 * neither will have Free Lists associated with them). For each
103 * Ethernet/Control Egress Queue and for each Free List, we need an
104 * Egress Context.
105 */
7ee9ff94
CL
106 VFRES_NPORTS = 1, /* # of "ports" per VF */
107 VFRES_NQSETS = 2, /* # of "Queue Sets" per VF */
108
109 VFRES_NVI = VFRES_NPORTS, /* # of Virtual Interfaces */
110 VFRES_NETHCTRL = VFRES_NQSETS, /* # of EQs used for ETH or CTRL Qs */
111 VFRES_NIQFLINT = VFRES_NQSETS+2,/* # of ingress Qs/w Free List(s)/intr */
7ee9ff94 112 VFRES_NEQ = VFRES_NQSETS*2, /* # of egress queues */
13ee15d3 113 VFRES_NIQ = 0, /* # of non-fl/int ingress queues */
7ee9ff94
CL
114 VFRES_TC = 0, /* PCI-E traffic class */
115 VFRES_NEXACTF = 16, /* # of exact MPS filters */
116
117 VFRES_R_CAPS = FW_CMD_CAP_DMAQ|FW_CMD_CAP_VF|FW_CMD_CAP_PORT,
118 VFRES_WX_CAPS = FW_CMD_CAP_DMAQ|FW_CMD_CAP_VF,
13ee15d3 119#endif
7ee9ff94
CL
120};
121
122/*
123 * Provide a Port Access Rights Mask for the specified PF/VF. This is very
124 * static and likely not to be useful in the long run. We really need to
125 * implement some form of persistent configuration which the firmware
126 * controls.
127 */
128static unsigned int pfvfres_pmask(struct adapter *adapter,
129 unsigned int pf, unsigned int vf)
130{
131 unsigned int portn, portvec;
132
133 /*
134 * Give PF's access to all of the ports.
135 */
136 if (vf == 0)
137 return FW_PFVF_CMD_PMASK_MASK;
138
139 /*
140 * For VFs, we'll assign them access to the ports based purely on the
141 * PF. We assign active ports in order, wrapping around if there are
142 * fewer active ports than PFs: e.g. active port[pf % nports].
143 * Unfortunately the adapter's port_info structs haven't been
144 * initialized yet so we have to compute this.
145 */
146 if (adapter->params.nports == 0)
147 return 0;
148
149 portn = pf % adapter->params.nports;
150 portvec = adapter->params.portvec;
151 for (;;) {
152 /*
153 * Isolate the lowest set bit in the port vector. If we're at
154 * the port number that we want, return that as the pmask.
155 * otherwise mask that bit out of the port vector and
156 * decrement our port number ...
157 */
158 unsigned int pmask = portvec ^ (portvec & (portvec-1));
159 if (portn == 0)
160 return pmask;
161 portn--;
162 portvec &= ~pmask;
163 }
164 /*NOTREACHED*/
165}
7ee9ff94 166
b8ff05a9
DM
167enum {
168 MAX_TXQ_ENTRIES = 16384,
169 MAX_CTRL_TXQ_ENTRIES = 1024,
170 MAX_RSPQ_ENTRIES = 16384,
171 MAX_RX_BUFFERS = 16384,
172 MIN_TXQ_ENTRIES = 32,
173 MIN_CTRL_TXQ_ENTRIES = 32,
174 MIN_RSPQ_ENTRIES = 128,
175 MIN_FL_ENTRIES = 16
176};
177
178#define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
179 NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\
180 NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
181
060e0c75 182#define CH_DEVICE(devid, data) { PCI_VDEVICE(CHELSIO, devid), (data) }
b8ff05a9
DM
183
184static DEFINE_PCI_DEVICE_TABLE(cxgb4_pci_tbl) = {
060e0c75 185 CH_DEVICE(0xa000, 0), /* PE10K */
ccea790e
DM
186 CH_DEVICE(0x4001, -1),
187 CH_DEVICE(0x4002, -1),
188 CH_DEVICE(0x4003, -1),
189 CH_DEVICE(0x4004, -1),
190 CH_DEVICE(0x4005, -1),
191 CH_DEVICE(0x4006, -1),
192 CH_DEVICE(0x4007, -1),
193 CH_DEVICE(0x4008, -1),
194 CH_DEVICE(0x4009, -1),
195 CH_DEVICE(0x400a, -1),
196 CH_DEVICE(0x4401, 4),
197 CH_DEVICE(0x4402, 4),
198 CH_DEVICE(0x4403, 4),
199 CH_DEVICE(0x4404, 4),
200 CH_DEVICE(0x4405, 4),
201 CH_DEVICE(0x4406, 4),
202 CH_DEVICE(0x4407, 4),
203 CH_DEVICE(0x4408, 4),
204 CH_DEVICE(0x4409, 4),
205 CH_DEVICE(0x440a, 4),
f637d577
VP
206 CH_DEVICE(0x440d, 4),
207 CH_DEVICE(0x440e, 4),
b8ff05a9
DM
208 { 0, }
209};
210
211#define FW_FNAME "cxgb4/t4fw.bin"
636f9d37 212#define FW_CFNAME "cxgb4/t4-config.txt"
b8ff05a9
DM
213
214MODULE_DESCRIPTION(DRV_DESC);
215MODULE_AUTHOR("Chelsio Communications");
216MODULE_LICENSE("Dual BSD/GPL");
217MODULE_VERSION(DRV_VERSION);
218MODULE_DEVICE_TABLE(pci, cxgb4_pci_tbl);
219MODULE_FIRMWARE(FW_FNAME);
220
636f9d37
VP
221/*
222 * Normally we're willing to become the firmware's Master PF but will be happy
223 * if another PF has already become the Master and initialized the adapter.
224 * Setting "force_init" will cause this driver to forcibly establish itself as
225 * the Master PF and initialize the adapter.
226 */
227static uint force_init;
228
229module_param(force_init, uint, 0644);
230MODULE_PARM_DESC(force_init, "Forcibly become Master PF and initialize adapter");
231
13ee15d3
VP
232/*
233 * Normally if the firmware we connect to has Configuration File support, we
234 * use that and only fall back to the old Driver-based initialization if the
235 * Configuration File fails for some reason. If force_old_init is set, then
236 * we'll always use the old Driver-based initialization sequence.
237 */
238static uint force_old_init;
239
240module_param(force_old_init, uint, 0644);
241MODULE_PARM_DESC(force_old_init, "Force old initialization sequence");
242
b8ff05a9
DM
243static int dflt_msg_enable = DFLT_MSG_ENABLE;
244
245module_param(dflt_msg_enable, int, 0644);
246MODULE_PARM_DESC(dflt_msg_enable, "Chelsio T4 default message enable bitmap");
247
248/*
249 * The driver uses the best interrupt scheme available on a platform in the
250 * order MSI-X, MSI, legacy INTx interrupts. This parameter determines which
251 * of these schemes the driver may consider as follows:
252 *
253 * msi = 2: choose from among all three options
254 * msi = 1: only consider MSI and INTx interrupts
255 * msi = 0: force INTx interrupts
256 */
257static int msi = 2;
258
259module_param(msi, int, 0644);
260MODULE_PARM_DESC(msi, "whether to use INTx (0), MSI (1) or MSI-X (2)");
261
262/*
263 * Queue interrupt hold-off timer values. Queues default to the first of these
264 * upon creation.
265 */
266static unsigned int intr_holdoff[SGE_NTIMERS - 1] = { 5, 10, 20, 50, 100 };
267
268module_param_array(intr_holdoff, uint, NULL, 0644);
269MODULE_PARM_DESC(intr_holdoff, "values for queue interrupt hold-off timers "
270 "0..4 in microseconds");
271
272static unsigned int intr_cnt[SGE_NCOUNTERS - 1] = { 4, 8, 16 };
273
274module_param_array(intr_cnt, uint, NULL, 0644);
275MODULE_PARM_DESC(intr_cnt,
276 "thresholds 1..3 for queue interrupt packet counters");
277
636f9d37
VP
278/*
279 * Normally we tell the chip to deliver Ingress Packets into our DMA buffers
280 * offset by 2 bytes in order to have the IP headers line up on 4-byte
281 * boundaries. This is a requirement for many architectures which will throw
282 * a machine check fault if an attempt is made to access one of the 4-byte IP
283 * header fields on a non-4-byte boundary. And it's a major performance issue
284 * even on some architectures which allow it like some implementations of the
285 * x86 ISA. However, some architectures don't mind this and for some very
286 * edge-case performance sensitive applications (like forwarding large volumes
287 * of small packets), setting this DMA offset to 0 will decrease the number of
288 * PCI-E Bus transfers enough to measurably affect performance.
289 */
290static int rx_dma_offset = 2;
291
eb939922 292static bool vf_acls;
b8ff05a9
DM
293
294#ifdef CONFIG_PCI_IOV
295module_param(vf_acls, bool, 0644);
296MODULE_PARM_DESC(vf_acls, "if set enable virtualization L2 ACL enforcement");
297
298static unsigned int num_vf[4];
299
300module_param_array(num_vf, uint, NULL, 0644);
301MODULE_PARM_DESC(num_vf, "number of VFs for each of PFs 0-3");
302#endif
303
13ee15d3
VP
304/*
305 * The filter TCAM has a fixed portion and a variable portion. The fixed
306 * portion can match on source/destination IP IPv4/IPv6 addresses and TCP/UDP
307 * ports. The variable portion is 36 bits which can include things like Exact
308 * Match MAC Index (9 bits), Ether Type (16 bits), IP Protocol (8 bits),
309 * [Inner] VLAN Tag (17 bits), etc. which, if all were somehow selected, would
310 * far exceed the 36-bit budget for this "compressed" header portion of the
311 * filter. Thus, we have a scarce resource which must be carefully managed.
312 *
313 * By default we set this up to mostly match the set of filter matching
314 * capabilities of T3 but with accommodations for some of T4's more
315 * interesting features:
316 *
317 * { IP Fragment (1), MPS Match Type (3), IP Protocol (8),
318 * [Inner] VLAN (17), Port (3), FCoE (1) }
319 */
320enum {
321 TP_VLAN_PRI_MAP_DEFAULT = HW_TPL_FR_MT_PR_IV_P_FC,
322 TP_VLAN_PRI_MAP_FIRST = FCOE_SHIFT,
323 TP_VLAN_PRI_MAP_LAST = FRAGMENTATION_SHIFT,
324};
325
326static unsigned int tp_vlan_pri_map = TP_VLAN_PRI_MAP_DEFAULT;
327
b8ff05a9
DM
328static struct dentry *cxgb4_debugfs_root;
329
330static LIST_HEAD(adapter_list);
331static DEFINE_MUTEX(uld_mutex);
332static struct cxgb4_uld_info ulds[CXGB4_ULD_MAX];
333static const char *uld_str[] = { "RDMA", "iSCSI" };
334
335static void link_report(struct net_device *dev)
336{
337 if (!netif_carrier_ok(dev))
338 netdev_info(dev, "link down\n");
339 else {
340 static const char *fc[] = { "no", "Rx", "Tx", "Tx/Rx" };
341
342 const char *s = "10Mbps";
343 const struct port_info *p = netdev_priv(dev);
344
345 switch (p->link_cfg.speed) {
346 case SPEED_10000:
347 s = "10Gbps";
348 break;
349 case SPEED_1000:
350 s = "1000Mbps";
351 break;
352 case SPEED_100:
353 s = "100Mbps";
354 break;
355 }
356
357 netdev_info(dev, "link up, %s, full-duplex, %s PAUSE\n", s,
358 fc[p->link_cfg.fc]);
359 }
360}
361
362void t4_os_link_changed(struct adapter *adapter, int port_id, int link_stat)
363{
364 struct net_device *dev = adapter->port[port_id];
365
366 /* Skip changes from disabled ports. */
367 if (netif_running(dev) && link_stat != netif_carrier_ok(dev)) {
368 if (link_stat)
369 netif_carrier_on(dev);
370 else
371 netif_carrier_off(dev);
372
373 link_report(dev);
374 }
375}
376
377void t4_os_portmod_changed(const struct adapter *adap, int port_id)
378{
379 static const char *mod_str[] = {
a0881cab 380 NULL, "LR", "SR", "ER", "passive DA", "active DA", "LRM"
b8ff05a9
DM
381 };
382
383 const struct net_device *dev = adap->port[port_id];
384 const struct port_info *pi = netdev_priv(dev);
385
386 if (pi->mod_type == FW_PORT_MOD_TYPE_NONE)
387 netdev_info(dev, "port module unplugged\n");
a0881cab 388 else if (pi->mod_type < ARRAY_SIZE(mod_str))
b8ff05a9
DM
389 netdev_info(dev, "%s module inserted\n", mod_str[pi->mod_type]);
390}
391
392/*
393 * Configure the exact and hash address filters to handle a port's multicast
394 * and secondary unicast MAC addresses.
395 */
396static int set_addr_filters(const struct net_device *dev, bool sleep)
397{
398 u64 mhash = 0;
399 u64 uhash = 0;
400 bool free = true;
401 u16 filt_idx[7];
402 const u8 *addr[7];
403 int ret, naddr = 0;
b8ff05a9
DM
404 const struct netdev_hw_addr *ha;
405 int uc_cnt = netdev_uc_count(dev);
4a35ecf8 406 int mc_cnt = netdev_mc_count(dev);
b8ff05a9 407 const struct port_info *pi = netdev_priv(dev);
060e0c75 408 unsigned int mb = pi->adapter->fn;
b8ff05a9
DM
409
410 /* first do the secondary unicast addresses */
411 netdev_for_each_uc_addr(ha, dev) {
412 addr[naddr++] = ha->addr;
413 if (--uc_cnt == 0 || naddr >= ARRAY_SIZE(addr)) {
060e0c75 414 ret = t4_alloc_mac_filt(pi->adapter, mb, pi->viid, free,
b8ff05a9
DM
415 naddr, addr, filt_idx, &uhash, sleep);
416 if (ret < 0)
417 return ret;
418
419 free = false;
420 naddr = 0;
421 }
422 }
423
424 /* next set up the multicast addresses */
4a35ecf8
DM
425 netdev_for_each_mc_addr(ha, dev) {
426 addr[naddr++] = ha->addr;
427 if (--mc_cnt == 0 || naddr >= ARRAY_SIZE(addr)) {
060e0c75 428 ret = t4_alloc_mac_filt(pi->adapter, mb, pi->viid, free,
b8ff05a9
DM
429 naddr, addr, filt_idx, &mhash, sleep);
430 if (ret < 0)
431 return ret;
432
433 free = false;
434 naddr = 0;
435 }
436 }
437
060e0c75 438 return t4_set_addr_hash(pi->adapter, mb, pi->viid, uhash != 0,
b8ff05a9
DM
439 uhash | mhash, sleep);
440}
441
3069ee9b
VP
442int dbfifo_int_thresh = 10; /* 10 == 640 entry threshold */
443module_param(dbfifo_int_thresh, int, 0644);
444MODULE_PARM_DESC(dbfifo_int_thresh, "doorbell fifo interrupt threshold");
445
446int dbfifo_drain_delay = 1000; /* usecs to sleep while draining the dbfifo */
447module_param(dbfifo_drain_delay, int, 0644);
448MODULE_PARM_DESC(dbfifo_drain_delay,
449 "usecs to sleep while draining the dbfifo");
450
b8ff05a9
DM
451/*
452 * Set Rx properties of a port, such as promiscruity, address filters, and MTU.
453 * If @mtu is -1 it is left unchanged.
454 */
455static int set_rxmode(struct net_device *dev, int mtu, bool sleep_ok)
456{
457 int ret;
458 struct port_info *pi = netdev_priv(dev);
459
460 ret = set_addr_filters(dev, sleep_ok);
461 if (ret == 0)
060e0c75 462 ret = t4_set_rxmode(pi->adapter, pi->adapter->fn, pi->viid, mtu,
b8ff05a9 463 (dev->flags & IFF_PROMISC) ? 1 : 0,
f8f5aafa 464 (dev->flags & IFF_ALLMULTI) ? 1 : 0, 1, -1,
b8ff05a9
DM
465 sleep_ok);
466 return ret;
467}
468
3069ee9b
VP
469static struct workqueue_struct *workq;
470
b8ff05a9
DM
471/**
472 * link_start - enable a port
473 * @dev: the port to enable
474 *
475 * Performs the MAC and PHY actions needed to enable a port.
476 */
477static int link_start(struct net_device *dev)
478{
479 int ret;
480 struct port_info *pi = netdev_priv(dev);
060e0c75 481 unsigned int mb = pi->adapter->fn;
b8ff05a9
DM
482
483 /*
484 * We do not set address filters and promiscuity here, the stack does
485 * that step explicitly.
486 */
060e0c75 487 ret = t4_set_rxmode(pi->adapter, mb, pi->viid, dev->mtu, -1, -1, -1,
19ecae2c 488 !!(dev->features & NETIF_F_HW_VLAN_RX), true);
b8ff05a9 489 if (ret == 0) {
060e0c75 490 ret = t4_change_mac(pi->adapter, mb, pi->viid,
b8ff05a9 491 pi->xact_addr_filt, dev->dev_addr, true,
b6bd29e7 492 true);
b8ff05a9
DM
493 if (ret >= 0) {
494 pi->xact_addr_filt = ret;
495 ret = 0;
496 }
497 }
498 if (ret == 0)
060e0c75
DM
499 ret = t4_link_start(pi->adapter, mb, pi->tx_chan,
500 &pi->link_cfg);
b8ff05a9 501 if (ret == 0)
060e0c75 502 ret = t4_enable_vi(pi->adapter, mb, pi->viid, true, true);
b8ff05a9
DM
503 return ret;
504}
505
506/*
507 * Response queue handler for the FW event queue.
508 */
509static int fwevtq_handler(struct sge_rspq *q, const __be64 *rsp,
510 const struct pkt_gl *gl)
511{
512 u8 opcode = ((const struct rss_header *)rsp)->opcode;
513
514 rsp++; /* skip RSS header */
515 if (likely(opcode == CPL_SGE_EGR_UPDATE)) {
516 const struct cpl_sge_egr_update *p = (void *)rsp;
517 unsigned int qid = EGR_QID(ntohl(p->opcode_qid));
e46dab4d 518 struct sge_txq *txq;
b8ff05a9 519
e46dab4d 520 txq = q->adap->sge.egr_map[qid - q->adap->sge.egr_start];
b8ff05a9 521 txq->restarts++;
e46dab4d 522 if ((u8 *)txq < (u8 *)q->adap->sge.ofldtxq) {
b8ff05a9
DM
523 struct sge_eth_txq *eq;
524
525 eq = container_of(txq, struct sge_eth_txq, q);
526 netif_tx_wake_queue(eq->txq);
527 } else {
528 struct sge_ofld_txq *oq;
529
530 oq = container_of(txq, struct sge_ofld_txq, q);
531 tasklet_schedule(&oq->qresume_tsk);
532 }
533 } else if (opcode == CPL_FW6_MSG || opcode == CPL_FW4_MSG) {
534 const struct cpl_fw6_msg *p = (void *)rsp;
535
536 if (p->type == 0)
537 t4_handle_fw_rpl(q->adap, p->data);
538 } else if (opcode == CPL_L2T_WRITE_RPL) {
539 const struct cpl_l2t_write_rpl *p = (void *)rsp;
540
541 do_l2t_write_rpl(q->adap, p);
542 } else
543 dev_err(q->adap->pdev_dev,
544 "unexpected CPL %#x on FW event queue\n", opcode);
545 return 0;
546}
547
548/**
549 * uldrx_handler - response queue handler for ULD queues
550 * @q: the response queue that received the packet
551 * @rsp: the response queue descriptor holding the offload message
552 * @gl: the gather list of packet fragments
553 *
554 * Deliver an ingress offload packet to a ULD. All processing is done by
555 * the ULD, we just maintain statistics.
556 */
557static int uldrx_handler(struct sge_rspq *q, const __be64 *rsp,
558 const struct pkt_gl *gl)
559{
560 struct sge_ofld_rxq *rxq = container_of(q, struct sge_ofld_rxq, rspq);
561
562 if (ulds[q->uld].rx_handler(q->adap->uld_handle[q->uld], rsp, gl)) {
563 rxq->stats.nomem++;
564 return -1;
565 }
566 if (gl == NULL)
567 rxq->stats.imm++;
568 else if (gl == CXGB4_MSG_AN)
569 rxq->stats.an++;
570 else
571 rxq->stats.pkts++;
572 return 0;
573}
574
575static void disable_msi(struct adapter *adapter)
576{
577 if (adapter->flags & USING_MSIX) {
578 pci_disable_msix(adapter->pdev);
579 adapter->flags &= ~USING_MSIX;
580 } else if (adapter->flags & USING_MSI) {
581 pci_disable_msi(adapter->pdev);
582 adapter->flags &= ~USING_MSI;
583 }
584}
585
586/*
587 * Interrupt handler for non-data events used with MSI-X.
588 */
589static irqreturn_t t4_nondata_intr(int irq, void *cookie)
590{
591 struct adapter *adap = cookie;
592
593 u32 v = t4_read_reg(adap, MYPF_REG(PL_PF_INT_CAUSE));
594 if (v & PFSW) {
595 adap->swintr = 1;
596 t4_write_reg(adap, MYPF_REG(PL_PF_INT_CAUSE), v);
597 }
598 t4_slow_intr_handler(adap);
599 return IRQ_HANDLED;
600}
601
602/*
603 * Name the MSI-X interrupts.
604 */
605static void name_msix_vecs(struct adapter *adap)
606{
ba27816c 607 int i, j, msi_idx = 2, n = sizeof(adap->msix_info[0].desc);
b8ff05a9
DM
608
609 /* non-data interrupts */
b1a3c2b6 610 snprintf(adap->msix_info[0].desc, n, "%s", adap->port[0]->name);
b8ff05a9
DM
611
612 /* FW events */
b1a3c2b6
DM
613 snprintf(adap->msix_info[1].desc, n, "%s-FWeventq",
614 adap->port[0]->name);
b8ff05a9
DM
615
616 /* Ethernet queues */
617 for_each_port(adap, j) {
618 struct net_device *d = adap->port[j];
619 const struct port_info *pi = netdev_priv(d);
620
ba27816c 621 for (i = 0; i < pi->nqsets; i++, msi_idx++)
b8ff05a9
DM
622 snprintf(adap->msix_info[msi_idx].desc, n, "%s-Rx%d",
623 d->name, i);
b8ff05a9
DM
624 }
625
626 /* offload queues */
ba27816c
DM
627 for_each_ofldrxq(&adap->sge, i)
628 snprintf(adap->msix_info[msi_idx++].desc, n, "%s-ofld%d",
b1a3c2b6 629 adap->port[0]->name, i);
ba27816c
DM
630
631 for_each_rdmarxq(&adap->sge, i)
632 snprintf(adap->msix_info[msi_idx++].desc, n, "%s-rdma%d",
b1a3c2b6 633 adap->port[0]->name, i);
b8ff05a9
DM
634}
635
636static int request_msix_queue_irqs(struct adapter *adap)
637{
638 struct sge *s = &adap->sge;
639 int err, ethqidx, ofldqidx = 0, rdmaqidx = 0, msi = 2;
640
641 err = request_irq(adap->msix_info[1].vec, t4_sge_intr_msix, 0,
642 adap->msix_info[1].desc, &s->fw_evtq);
643 if (err)
644 return err;
645
646 for_each_ethrxq(s, ethqidx) {
647 err = request_irq(adap->msix_info[msi].vec, t4_sge_intr_msix, 0,
648 adap->msix_info[msi].desc,
649 &s->ethrxq[ethqidx].rspq);
650 if (err)
651 goto unwind;
652 msi++;
653 }
654 for_each_ofldrxq(s, ofldqidx) {
655 err = request_irq(adap->msix_info[msi].vec, t4_sge_intr_msix, 0,
656 adap->msix_info[msi].desc,
657 &s->ofldrxq[ofldqidx].rspq);
658 if (err)
659 goto unwind;
660 msi++;
661 }
662 for_each_rdmarxq(s, rdmaqidx) {
663 err = request_irq(adap->msix_info[msi].vec, t4_sge_intr_msix, 0,
664 adap->msix_info[msi].desc,
665 &s->rdmarxq[rdmaqidx].rspq);
666 if (err)
667 goto unwind;
668 msi++;
669 }
670 return 0;
671
672unwind:
673 while (--rdmaqidx >= 0)
674 free_irq(adap->msix_info[--msi].vec,
675 &s->rdmarxq[rdmaqidx].rspq);
676 while (--ofldqidx >= 0)
677 free_irq(adap->msix_info[--msi].vec,
678 &s->ofldrxq[ofldqidx].rspq);
679 while (--ethqidx >= 0)
680 free_irq(adap->msix_info[--msi].vec, &s->ethrxq[ethqidx].rspq);
681 free_irq(adap->msix_info[1].vec, &s->fw_evtq);
682 return err;
683}
684
685static void free_msix_queue_irqs(struct adapter *adap)
686{
687 int i, msi = 2;
688 struct sge *s = &adap->sge;
689
690 free_irq(adap->msix_info[1].vec, &s->fw_evtq);
691 for_each_ethrxq(s, i)
692 free_irq(adap->msix_info[msi++].vec, &s->ethrxq[i].rspq);
693 for_each_ofldrxq(s, i)
694 free_irq(adap->msix_info[msi++].vec, &s->ofldrxq[i].rspq);
695 for_each_rdmarxq(s, i)
696 free_irq(adap->msix_info[msi++].vec, &s->rdmarxq[i].rspq);
697}
698
671b0060
DM
699/**
700 * write_rss - write the RSS table for a given port
701 * @pi: the port
702 * @queues: array of queue indices for RSS
703 *
704 * Sets up the portion of the HW RSS table for the port's VI to distribute
705 * packets to the Rx queues in @queues.
706 */
707static int write_rss(const struct port_info *pi, const u16 *queues)
708{
709 u16 *rss;
710 int i, err;
711 const struct sge_eth_rxq *q = &pi->adapter->sge.ethrxq[pi->first_qset];
712
713 rss = kmalloc(pi->rss_size * sizeof(u16), GFP_KERNEL);
714 if (!rss)
715 return -ENOMEM;
716
717 /* map the queue indices to queue ids */
718 for (i = 0; i < pi->rss_size; i++, queues++)
719 rss[i] = q[*queues].rspq.abs_id;
720
060e0c75
DM
721 err = t4_config_rss_range(pi->adapter, pi->adapter->fn, pi->viid, 0,
722 pi->rss_size, rss, pi->rss_size);
671b0060
DM
723 kfree(rss);
724 return err;
725}
726
b8ff05a9
DM
727/**
728 * setup_rss - configure RSS
729 * @adap: the adapter
730 *
671b0060 731 * Sets up RSS for each port.
b8ff05a9
DM
732 */
733static int setup_rss(struct adapter *adap)
734{
671b0060 735 int i, err;
b8ff05a9
DM
736
737 for_each_port(adap, i) {
738 const struct port_info *pi = adap2pinfo(adap, i);
b8ff05a9 739
671b0060 740 err = write_rss(pi, pi->rss);
b8ff05a9
DM
741 if (err)
742 return err;
743 }
744 return 0;
745}
746
e46dab4d
DM
747/*
748 * Return the channel of the ingress queue with the given qid.
749 */
750static unsigned int rxq_to_chan(const struct sge *p, unsigned int qid)
751{
752 qid -= p->ingr_start;
753 return netdev2pinfo(p->ingr_map[qid]->netdev)->tx_chan;
754}
755
b8ff05a9
DM
756/*
757 * Wait until all NAPI handlers are descheduled.
758 */
759static void quiesce_rx(struct adapter *adap)
760{
761 int i;
762
763 for (i = 0; i < ARRAY_SIZE(adap->sge.ingr_map); i++) {
764 struct sge_rspq *q = adap->sge.ingr_map[i];
765
766 if (q && q->handler)
767 napi_disable(&q->napi);
768 }
769}
770
771/*
772 * Enable NAPI scheduling and interrupt generation for all Rx queues.
773 */
774static void enable_rx(struct adapter *adap)
775{
776 int i;
777
778 for (i = 0; i < ARRAY_SIZE(adap->sge.ingr_map); i++) {
779 struct sge_rspq *q = adap->sge.ingr_map[i];
780
781 if (!q)
782 continue;
783 if (q->handler)
784 napi_enable(&q->napi);
785 /* 0-increment GTS to start the timer and enable interrupts */
786 t4_write_reg(adap, MYPF_REG(SGE_PF_GTS),
787 SEINTARM(q->intr_params) |
788 INGRESSQID(q->cntxt_id));
789 }
790}
791
792/**
793 * setup_sge_queues - configure SGE Tx/Rx/response queues
794 * @adap: the adapter
795 *
796 * Determines how many sets of SGE queues to use and initializes them.
797 * We support multiple queue sets per port if we have MSI-X, otherwise
798 * just one queue set per port.
799 */
800static int setup_sge_queues(struct adapter *adap)
801{
802 int err, msi_idx, i, j;
803 struct sge *s = &adap->sge;
804
805 bitmap_zero(s->starving_fl, MAX_EGRQ);
806 bitmap_zero(s->txq_maperr, MAX_EGRQ);
807
808 if (adap->flags & USING_MSIX)
809 msi_idx = 1; /* vector 0 is for non-queue interrupts */
810 else {
811 err = t4_sge_alloc_rxq(adap, &s->intrq, false, adap->port[0], 0,
812 NULL, NULL);
813 if (err)
814 return err;
815 msi_idx = -((int)s->intrq.abs_id + 1);
816 }
817
818 err = t4_sge_alloc_rxq(adap, &s->fw_evtq, true, adap->port[0],
819 msi_idx, NULL, fwevtq_handler);
820 if (err) {
821freeout: t4_free_sge_resources(adap);
822 return err;
823 }
824
825 for_each_port(adap, i) {
826 struct net_device *dev = adap->port[i];
827 struct port_info *pi = netdev_priv(dev);
828 struct sge_eth_rxq *q = &s->ethrxq[pi->first_qset];
829 struct sge_eth_txq *t = &s->ethtxq[pi->first_qset];
830
831 for (j = 0; j < pi->nqsets; j++, q++) {
832 if (msi_idx > 0)
833 msi_idx++;
834 err = t4_sge_alloc_rxq(adap, &q->rspq, false, dev,
835 msi_idx, &q->fl,
836 t4_ethrx_handler);
837 if (err)
838 goto freeout;
839 q->rspq.idx = j;
840 memset(&q->stats, 0, sizeof(q->stats));
841 }
842 for (j = 0; j < pi->nqsets; j++, t++) {
843 err = t4_sge_alloc_eth_txq(adap, t, dev,
844 netdev_get_tx_queue(dev, j),
845 s->fw_evtq.cntxt_id);
846 if (err)
847 goto freeout;
848 }
849 }
850
851 j = s->ofldqsets / adap->params.nports; /* ofld queues per channel */
852 for_each_ofldrxq(s, i) {
853 struct sge_ofld_rxq *q = &s->ofldrxq[i];
854 struct net_device *dev = adap->port[i / j];
855
856 if (msi_idx > 0)
857 msi_idx++;
858 err = t4_sge_alloc_rxq(adap, &q->rspq, false, dev, msi_idx,
859 &q->fl, uldrx_handler);
860 if (err)
861 goto freeout;
862 memset(&q->stats, 0, sizeof(q->stats));
863 s->ofld_rxq[i] = q->rspq.abs_id;
864 err = t4_sge_alloc_ofld_txq(adap, &s->ofldtxq[i], dev,
865 s->fw_evtq.cntxt_id);
866 if (err)
867 goto freeout;
868 }
869
870 for_each_rdmarxq(s, i) {
871 struct sge_ofld_rxq *q = &s->rdmarxq[i];
872
873 if (msi_idx > 0)
874 msi_idx++;
875 err = t4_sge_alloc_rxq(adap, &q->rspq, false, adap->port[i],
876 msi_idx, &q->fl, uldrx_handler);
877 if (err)
878 goto freeout;
879 memset(&q->stats, 0, sizeof(q->stats));
880 s->rdma_rxq[i] = q->rspq.abs_id;
881 }
882
883 for_each_port(adap, i) {
884 /*
885 * Note that ->rdmarxq[i].rspq.cntxt_id below is 0 if we don't
886 * have RDMA queues, and that's the right value.
887 */
888 err = t4_sge_alloc_ctrl_txq(adap, &s->ctrlq[i], adap->port[i],
889 s->fw_evtq.cntxt_id,
890 s->rdmarxq[i].rspq.cntxt_id);
891 if (err)
892 goto freeout;
893 }
894
895 t4_write_reg(adap, MPS_TRC_RSS_CONTROL,
896 RSSCONTROL(netdev2pinfo(adap->port[0])->tx_chan) |
897 QUEUENUMBER(s->ethrxq[0].rspq.abs_id));
898 return 0;
899}
900
901/*
902 * Returns 0 if new FW was successfully loaded, a positive errno if a load was
903 * started but failed, and a negative errno if flash load couldn't start.
904 */
905static int upgrade_fw(struct adapter *adap)
906{
907 int ret;
908 u32 vers;
909 const struct fw_hdr *hdr;
910 const struct firmware *fw;
911 struct device *dev = adap->pdev_dev;
912
913 ret = request_firmware(&fw, FW_FNAME, dev);
914 if (ret < 0) {
915 dev_err(dev, "unable to load firmware image " FW_FNAME
916 ", error %d\n", ret);
917 return ret;
918 }
919
920 hdr = (const struct fw_hdr *)fw->data;
921 vers = ntohl(hdr->fw_ver);
922 if (FW_HDR_FW_VER_MAJOR_GET(vers) != FW_VERSION_MAJOR) {
923 ret = -EINVAL; /* wrong major version, won't do */
924 goto out;
925 }
926
927 /*
928 * If the flash FW is unusable or we found something newer, load it.
929 */
930 if (FW_HDR_FW_VER_MAJOR_GET(adap->params.fw_vers) != FW_VERSION_MAJOR ||
931 vers > adap->params.fw_vers) {
26f7cbc0
VP
932 dev_info(dev, "upgrading firmware ...\n");
933 ret = t4_fw_upgrade(adap, adap->mbox, fw->data, fw->size,
934 /*force=*/false);
b8ff05a9 935 if (!ret)
26f7cbc0
VP
936 dev_info(dev, "firmware successfully upgraded to "
937 FW_FNAME " (%d.%d.%d.%d)\n",
938 FW_HDR_FW_VER_MAJOR_GET(vers),
939 FW_HDR_FW_VER_MINOR_GET(vers),
940 FW_HDR_FW_VER_MICRO_GET(vers),
941 FW_HDR_FW_VER_BUILD_GET(vers));
942 else
943 dev_err(dev, "firmware upgrade failed! err=%d\n", -ret);
1648a22b
VP
944 } else {
945 /*
946 * Tell our caller that we didn't upgrade the firmware.
947 */
948 ret = -EINVAL;
b8ff05a9 949 }
1648a22b 950
b8ff05a9
DM
951out: release_firmware(fw);
952 return ret;
953}
954
955/*
956 * Allocate a chunk of memory using kmalloc or, if that fails, vmalloc.
957 * The allocated memory is cleared.
958 */
959void *t4_alloc_mem(size_t size)
960{
89bf67f1 961 void *p = kzalloc(size, GFP_KERNEL);
b8ff05a9
DM
962
963 if (!p)
89bf67f1 964 p = vzalloc(size);
b8ff05a9
DM
965 return p;
966}
967
968/*
969 * Free memory allocated through alloc_mem().
970 */
31b9c19b 971static void t4_free_mem(void *addr)
b8ff05a9
DM
972{
973 if (is_vmalloc_addr(addr))
974 vfree(addr);
975 else
976 kfree(addr);
977}
978
979static inline int is_offload(const struct adapter *adap)
980{
981 return adap->params.offload;
982}
983
984/*
985 * Implementation of ethtool operations.
986 */
987
988static u32 get_msglevel(struct net_device *dev)
989{
990 return netdev2adap(dev)->msg_enable;
991}
992
993static void set_msglevel(struct net_device *dev, u32 val)
994{
995 netdev2adap(dev)->msg_enable = val;
996}
997
998static char stats_strings[][ETH_GSTRING_LEN] = {
999 "TxOctetsOK ",
1000 "TxFramesOK ",
1001 "TxBroadcastFrames ",
1002 "TxMulticastFrames ",
1003 "TxUnicastFrames ",
1004 "TxErrorFrames ",
1005
1006 "TxFrames64 ",
1007 "TxFrames65To127 ",
1008 "TxFrames128To255 ",
1009 "TxFrames256To511 ",
1010 "TxFrames512To1023 ",
1011 "TxFrames1024To1518 ",
1012 "TxFrames1519ToMax ",
1013
1014 "TxFramesDropped ",
1015 "TxPauseFrames ",
1016 "TxPPP0Frames ",
1017 "TxPPP1Frames ",
1018 "TxPPP2Frames ",
1019 "TxPPP3Frames ",
1020 "TxPPP4Frames ",
1021 "TxPPP5Frames ",
1022 "TxPPP6Frames ",
1023 "TxPPP7Frames ",
1024
1025 "RxOctetsOK ",
1026 "RxFramesOK ",
1027 "RxBroadcastFrames ",
1028 "RxMulticastFrames ",
1029 "RxUnicastFrames ",
1030
1031 "RxFramesTooLong ",
1032 "RxJabberErrors ",
1033 "RxFCSErrors ",
1034 "RxLengthErrors ",
1035 "RxSymbolErrors ",
1036 "RxRuntFrames ",
1037
1038 "RxFrames64 ",
1039 "RxFrames65To127 ",
1040 "RxFrames128To255 ",
1041 "RxFrames256To511 ",
1042 "RxFrames512To1023 ",
1043 "RxFrames1024To1518 ",
1044 "RxFrames1519ToMax ",
1045
1046 "RxPauseFrames ",
1047 "RxPPP0Frames ",
1048 "RxPPP1Frames ",
1049 "RxPPP2Frames ",
1050 "RxPPP3Frames ",
1051 "RxPPP4Frames ",
1052 "RxPPP5Frames ",
1053 "RxPPP6Frames ",
1054 "RxPPP7Frames ",
1055
1056 "RxBG0FramesDropped ",
1057 "RxBG1FramesDropped ",
1058 "RxBG2FramesDropped ",
1059 "RxBG3FramesDropped ",
1060 "RxBG0FramesTrunc ",
1061 "RxBG1FramesTrunc ",
1062 "RxBG2FramesTrunc ",
1063 "RxBG3FramesTrunc ",
1064
1065 "TSO ",
1066 "TxCsumOffload ",
1067 "RxCsumGood ",
1068 "VLANextractions ",
1069 "VLANinsertions ",
4a6346d4
DM
1070 "GROpackets ",
1071 "GROmerged ",
b8ff05a9
DM
1072};
1073
1074static int get_sset_count(struct net_device *dev, int sset)
1075{
1076 switch (sset) {
1077 case ETH_SS_STATS:
1078 return ARRAY_SIZE(stats_strings);
1079 default:
1080 return -EOPNOTSUPP;
1081 }
1082}
1083
1084#define T4_REGMAP_SIZE (160 * 1024)
1085
1086static int get_regs_len(struct net_device *dev)
1087{
1088 return T4_REGMAP_SIZE;
1089}
1090
1091static int get_eeprom_len(struct net_device *dev)
1092{
1093 return EEPROMSIZE;
1094}
1095
1096static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1097{
1098 struct adapter *adapter = netdev2adap(dev);
1099
23020ab3
RJ
1100 strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
1101 strlcpy(info->version, DRV_VERSION, sizeof(info->version));
1102 strlcpy(info->bus_info, pci_name(adapter->pdev),
1103 sizeof(info->bus_info));
b8ff05a9 1104
84b40501 1105 if (adapter->params.fw_vers)
b8ff05a9
DM
1106 snprintf(info->fw_version, sizeof(info->fw_version),
1107 "%u.%u.%u.%u, TP %u.%u.%u.%u",
1108 FW_HDR_FW_VER_MAJOR_GET(adapter->params.fw_vers),
1109 FW_HDR_FW_VER_MINOR_GET(adapter->params.fw_vers),
1110 FW_HDR_FW_VER_MICRO_GET(adapter->params.fw_vers),
1111 FW_HDR_FW_VER_BUILD_GET(adapter->params.fw_vers),
1112 FW_HDR_FW_VER_MAJOR_GET(adapter->params.tp_vers),
1113 FW_HDR_FW_VER_MINOR_GET(adapter->params.tp_vers),
1114 FW_HDR_FW_VER_MICRO_GET(adapter->params.tp_vers),
1115 FW_HDR_FW_VER_BUILD_GET(adapter->params.tp_vers));
1116}
1117
1118static void get_strings(struct net_device *dev, u32 stringset, u8 *data)
1119{
1120 if (stringset == ETH_SS_STATS)
1121 memcpy(data, stats_strings, sizeof(stats_strings));
1122}
1123
1124/*
1125 * port stats maintained per queue of the port. They should be in the same
1126 * order as in stats_strings above.
1127 */
1128struct queue_port_stats {
1129 u64 tso;
1130 u64 tx_csum;
1131 u64 rx_csum;
1132 u64 vlan_ex;
1133 u64 vlan_ins;
4a6346d4
DM
1134 u64 gro_pkts;
1135 u64 gro_merged;
b8ff05a9
DM
1136};
1137
1138static void collect_sge_port_stats(const struct adapter *adap,
1139 const struct port_info *p, struct queue_port_stats *s)
1140{
1141 int i;
1142 const struct sge_eth_txq *tx = &adap->sge.ethtxq[p->first_qset];
1143 const struct sge_eth_rxq *rx = &adap->sge.ethrxq[p->first_qset];
1144
1145 memset(s, 0, sizeof(*s));
1146 for (i = 0; i < p->nqsets; i++, rx++, tx++) {
1147 s->tso += tx->tso;
1148 s->tx_csum += tx->tx_cso;
1149 s->rx_csum += rx->stats.rx_cso;
1150 s->vlan_ex += rx->stats.vlan_ex;
1151 s->vlan_ins += tx->vlan_ins;
4a6346d4
DM
1152 s->gro_pkts += rx->stats.lro_pkts;
1153 s->gro_merged += rx->stats.lro_merged;
b8ff05a9
DM
1154 }
1155}
1156
1157static void get_stats(struct net_device *dev, struct ethtool_stats *stats,
1158 u64 *data)
1159{
1160 struct port_info *pi = netdev_priv(dev);
1161 struct adapter *adapter = pi->adapter;
1162
1163 t4_get_port_stats(adapter, pi->tx_chan, (struct port_stats *)data);
1164
1165 data += sizeof(struct port_stats) / sizeof(u64);
1166 collect_sge_port_stats(adapter, pi, (struct queue_port_stats *)data);
1167}
1168
1169/*
1170 * Return a version number to identify the type of adapter. The scheme is:
1171 * - bits 0..9: chip version
1172 * - bits 10..15: chip revision
835bb606 1173 * - bits 16..23: register dump version
b8ff05a9
DM
1174 */
1175static inline unsigned int mk_adap_vers(const struct adapter *ap)
1176{
835bb606 1177 return 4 | (ap->params.rev << 10) | (1 << 16);
b8ff05a9
DM
1178}
1179
1180static void reg_block_dump(struct adapter *ap, void *buf, unsigned int start,
1181 unsigned int end)
1182{
1183 u32 *p = buf + start;
1184
1185 for ( ; start <= end; start += sizeof(u32))
1186 *p++ = t4_read_reg(ap, start);
1187}
1188
1189static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
1190 void *buf)
1191{
1192 static const unsigned int reg_ranges[] = {
1193 0x1008, 0x1108,
1194 0x1180, 0x11b4,
1195 0x11fc, 0x123c,
1196 0x1300, 0x173c,
1197 0x1800, 0x18fc,
1198 0x3000, 0x30d8,
1199 0x30e0, 0x5924,
1200 0x5960, 0x59d4,
1201 0x5a00, 0x5af8,
1202 0x6000, 0x6098,
1203 0x6100, 0x6150,
1204 0x6200, 0x6208,
1205 0x6240, 0x6248,
1206 0x6280, 0x6338,
1207 0x6370, 0x638c,
1208 0x6400, 0x643c,
1209 0x6500, 0x6524,
1210 0x6a00, 0x6a38,
1211 0x6a60, 0x6a78,
1212 0x6b00, 0x6b84,
1213 0x6bf0, 0x6c84,
1214 0x6cf0, 0x6d84,
1215 0x6df0, 0x6e84,
1216 0x6ef0, 0x6f84,
1217 0x6ff0, 0x7084,
1218 0x70f0, 0x7184,
1219 0x71f0, 0x7284,
1220 0x72f0, 0x7384,
1221 0x73f0, 0x7450,
1222 0x7500, 0x7530,
1223 0x7600, 0x761c,
1224 0x7680, 0x76cc,
1225 0x7700, 0x7798,
1226 0x77c0, 0x77fc,
1227 0x7900, 0x79fc,
1228 0x7b00, 0x7c38,
1229 0x7d00, 0x7efc,
1230 0x8dc0, 0x8e1c,
1231 0x8e30, 0x8e78,
1232 0x8ea0, 0x8f6c,
1233 0x8fc0, 0x9074,
1234 0x90fc, 0x90fc,
1235 0x9400, 0x9458,
1236 0x9600, 0x96bc,
1237 0x9800, 0x9808,
1238 0x9820, 0x983c,
1239 0x9850, 0x9864,
1240 0x9c00, 0x9c6c,
1241 0x9c80, 0x9cec,
1242 0x9d00, 0x9d6c,
1243 0x9d80, 0x9dec,
1244 0x9e00, 0x9e6c,
1245 0x9e80, 0x9eec,
1246 0x9f00, 0x9f6c,
1247 0x9f80, 0x9fec,
1248 0xd004, 0xd03c,
1249 0xdfc0, 0xdfe0,
1250 0xe000, 0xea7c,
1251 0xf000, 0x11190,
835bb606
DM
1252 0x19040, 0x1906c,
1253 0x19078, 0x19080,
1254 0x1908c, 0x19124,
b8ff05a9
DM
1255 0x19150, 0x191b0,
1256 0x191d0, 0x191e8,
1257 0x19238, 0x1924c,
1258 0x193f8, 0x19474,
1259 0x19490, 0x194f8,
1260 0x19800, 0x19f30,
1261 0x1a000, 0x1a06c,
1262 0x1a0b0, 0x1a120,
1263 0x1a128, 0x1a138,
1264 0x1a190, 0x1a1c4,
1265 0x1a1fc, 0x1a1fc,
1266 0x1e040, 0x1e04c,
835bb606 1267 0x1e284, 0x1e28c,
b8ff05a9
DM
1268 0x1e2c0, 0x1e2c0,
1269 0x1e2e0, 0x1e2e0,
1270 0x1e300, 0x1e384,
1271 0x1e3c0, 0x1e3c8,
1272 0x1e440, 0x1e44c,
835bb606 1273 0x1e684, 0x1e68c,
b8ff05a9
DM
1274 0x1e6c0, 0x1e6c0,
1275 0x1e6e0, 0x1e6e0,
1276 0x1e700, 0x1e784,
1277 0x1e7c0, 0x1e7c8,
1278 0x1e840, 0x1e84c,
835bb606 1279 0x1ea84, 0x1ea8c,
b8ff05a9
DM
1280 0x1eac0, 0x1eac0,
1281 0x1eae0, 0x1eae0,
1282 0x1eb00, 0x1eb84,
1283 0x1ebc0, 0x1ebc8,
1284 0x1ec40, 0x1ec4c,
835bb606 1285 0x1ee84, 0x1ee8c,
b8ff05a9
DM
1286 0x1eec0, 0x1eec0,
1287 0x1eee0, 0x1eee0,
1288 0x1ef00, 0x1ef84,
1289 0x1efc0, 0x1efc8,
1290 0x1f040, 0x1f04c,
835bb606 1291 0x1f284, 0x1f28c,
b8ff05a9
DM
1292 0x1f2c0, 0x1f2c0,
1293 0x1f2e0, 0x1f2e0,
1294 0x1f300, 0x1f384,
1295 0x1f3c0, 0x1f3c8,
1296 0x1f440, 0x1f44c,
835bb606 1297 0x1f684, 0x1f68c,
b8ff05a9
DM
1298 0x1f6c0, 0x1f6c0,
1299 0x1f6e0, 0x1f6e0,
1300 0x1f700, 0x1f784,
1301 0x1f7c0, 0x1f7c8,
1302 0x1f840, 0x1f84c,
835bb606 1303 0x1fa84, 0x1fa8c,
b8ff05a9
DM
1304 0x1fac0, 0x1fac0,
1305 0x1fae0, 0x1fae0,
1306 0x1fb00, 0x1fb84,
1307 0x1fbc0, 0x1fbc8,
1308 0x1fc40, 0x1fc4c,
835bb606 1309 0x1fe84, 0x1fe8c,
b8ff05a9
DM
1310 0x1fec0, 0x1fec0,
1311 0x1fee0, 0x1fee0,
1312 0x1ff00, 0x1ff84,
1313 0x1ffc0, 0x1ffc8,
1314 0x20000, 0x2002c,
1315 0x20100, 0x2013c,
1316 0x20190, 0x201c8,
1317 0x20200, 0x20318,
1318 0x20400, 0x20528,
1319 0x20540, 0x20614,
1320 0x21000, 0x21040,
1321 0x2104c, 0x21060,
1322 0x210c0, 0x210ec,
1323 0x21200, 0x21268,
1324 0x21270, 0x21284,
1325 0x212fc, 0x21388,
1326 0x21400, 0x21404,
1327 0x21500, 0x21518,
1328 0x2152c, 0x2153c,
1329 0x21550, 0x21554,
1330 0x21600, 0x21600,
1331 0x21608, 0x21628,
1332 0x21630, 0x2163c,
1333 0x21700, 0x2171c,
1334 0x21780, 0x2178c,
1335 0x21800, 0x21c38,
1336 0x21c80, 0x21d7c,
1337 0x21e00, 0x21e04,
1338 0x22000, 0x2202c,
1339 0x22100, 0x2213c,
1340 0x22190, 0x221c8,
1341 0x22200, 0x22318,
1342 0x22400, 0x22528,
1343 0x22540, 0x22614,
1344 0x23000, 0x23040,
1345 0x2304c, 0x23060,
1346 0x230c0, 0x230ec,
1347 0x23200, 0x23268,
1348 0x23270, 0x23284,
1349 0x232fc, 0x23388,
1350 0x23400, 0x23404,
1351 0x23500, 0x23518,
1352 0x2352c, 0x2353c,
1353 0x23550, 0x23554,
1354 0x23600, 0x23600,
1355 0x23608, 0x23628,
1356 0x23630, 0x2363c,
1357 0x23700, 0x2371c,
1358 0x23780, 0x2378c,
1359 0x23800, 0x23c38,
1360 0x23c80, 0x23d7c,
1361 0x23e00, 0x23e04,
1362 0x24000, 0x2402c,
1363 0x24100, 0x2413c,
1364 0x24190, 0x241c8,
1365 0x24200, 0x24318,
1366 0x24400, 0x24528,
1367 0x24540, 0x24614,
1368 0x25000, 0x25040,
1369 0x2504c, 0x25060,
1370 0x250c0, 0x250ec,
1371 0x25200, 0x25268,
1372 0x25270, 0x25284,
1373 0x252fc, 0x25388,
1374 0x25400, 0x25404,
1375 0x25500, 0x25518,
1376 0x2552c, 0x2553c,
1377 0x25550, 0x25554,
1378 0x25600, 0x25600,
1379 0x25608, 0x25628,
1380 0x25630, 0x2563c,
1381 0x25700, 0x2571c,
1382 0x25780, 0x2578c,
1383 0x25800, 0x25c38,
1384 0x25c80, 0x25d7c,
1385 0x25e00, 0x25e04,
1386 0x26000, 0x2602c,
1387 0x26100, 0x2613c,
1388 0x26190, 0x261c8,
1389 0x26200, 0x26318,
1390 0x26400, 0x26528,
1391 0x26540, 0x26614,
1392 0x27000, 0x27040,
1393 0x2704c, 0x27060,
1394 0x270c0, 0x270ec,
1395 0x27200, 0x27268,
1396 0x27270, 0x27284,
1397 0x272fc, 0x27388,
1398 0x27400, 0x27404,
1399 0x27500, 0x27518,
1400 0x2752c, 0x2753c,
1401 0x27550, 0x27554,
1402 0x27600, 0x27600,
1403 0x27608, 0x27628,
1404 0x27630, 0x2763c,
1405 0x27700, 0x2771c,
1406 0x27780, 0x2778c,
1407 0x27800, 0x27c38,
1408 0x27c80, 0x27d7c,
1409 0x27e00, 0x27e04
1410 };
1411
1412 int i;
1413 struct adapter *ap = netdev2adap(dev);
1414
1415 regs->version = mk_adap_vers(ap);
1416
1417 memset(buf, 0, T4_REGMAP_SIZE);
1418 for (i = 0; i < ARRAY_SIZE(reg_ranges); i += 2)
1419 reg_block_dump(ap, buf, reg_ranges[i], reg_ranges[i + 1]);
1420}
1421
1422static int restart_autoneg(struct net_device *dev)
1423{
1424 struct port_info *p = netdev_priv(dev);
1425
1426 if (!netif_running(dev))
1427 return -EAGAIN;
1428 if (p->link_cfg.autoneg != AUTONEG_ENABLE)
1429 return -EINVAL;
060e0c75 1430 t4_restart_aneg(p->adapter, p->adapter->fn, p->tx_chan);
b8ff05a9
DM
1431 return 0;
1432}
1433
c5e06360
DM
1434static int identify_port(struct net_device *dev,
1435 enum ethtool_phys_id_state state)
b8ff05a9 1436{
c5e06360 1437 unsigned int val;
060e0c75
DM
1438 struct adapter *adap = netdev2adap(dev);
1439
c5e06360
DM
1440 if (state == ETHTOOL_ID_ACTIVE)
1441 val = 0xffff;
1442 else if (state == ETHTOOL_ID_INACTIVE)
1443 val = 0;
1444 else
1445 return -EINVAL;
b8ff05a9 1446
c5e06360 1447 return t4_identify_port(adap, adap->fn, netdev2pinfo(dev)->viid, val);
b8ff05a9
DM
1448}
1449
1450static unsigned int from_fw_linkcaps(unsigned int type, unsigned int caps)
1451{
1452 unsigned int v = 0;
1453
a0881cab
DM
1454 if (type == FW_PORT_TYPE_BT_SGMII || type == FW_PORT_TYPE_BT_XFI ||
1455 type == FW_PORT_TYPE_BT_XAUI) {
b8ff05a9
DM
1456 v |= SUPPORTED_TP;
1457 if (caps & FW_PORT_CAP_SPEED_100M)
1458 v |= SUPPORTED_100baseT_Full;
1459 if (caps & FW_PORT_CAP_SPEED_1G)
1460 v |= SUPPORTED_1000baseT_Full;
1461 if (caps & FW_PORT_CAP_SPEED_10G)
1462 v |= SUPPORTED_10000baseT_Full;
1463 } else if (type == FW_PORT_TYPE_KX4 || type == FW_PORT_TYPE_KX) {
1464 v |= SUPPORTED_Backplane;
1465 if (caps & FW_PORT_CAP_SPEED_1G)
1466 v |= SUPPORTED_1000baseKX_Full;
1467 if (caps & FW_PORT_CAP_SPEED_10G)
1468 v |= SUPPORTED_10000baseKX4_Full;
1469 } else if (type == FW_PORT_TYPE_KR)
1470 v |= SUPPORTED_Backplane | SUPPORTED_10000baseKR_Full;
a0881cab 1471 else if (type == FW_PORT_TYPE_BP_AP)
7d5e77aa
DM
1472 v |= SUPPORTED_Backplane | SUPPORTED_10000baseR_FEC |
1473 SUPPORTED_10000baseKR_Full | SUPPORTED_1000baseKX_Full;
1474 else if (type == FW_PORT_TYPE_BP4_AP)
1475 v |= SUPPORTED_Backplane | SUPPORTED_10000baseR_FEC |
1476 SUPPORTED_10000baseKR_Full | SUPPORTED_1000baseKX_Full |
1477 SUPPORTED_10000baseKX4_Full;
a0881cab
DM
1478 else if (type == FW_PORT_TYPE_FIBER_XFI ||
1479 type == FW_PORT_TYPE_FIBER_XAUI || type == FW_PORT_TYPE_SFP)
b8ff05a9
DM
1480 v |= SUPPORTED_FIBRE;
1481
1482 if (caps & FW_PORT_CAP_ANEG)
1483 v |= SUPPORTED_Autoneg;
1484 return v;
1485}
1486
1487static unsigned int to_fw_linkcaps(unsigned int caps)
1488{
1489 unsigned int v = 0;
1490
1491 if (caps & ADVERTISED_100baseT_Full)
1492 v |= FW_PORT_CAP_SPEED_100M;
1493 if (caps & ADVERTISED_1000baseT_Full)
1494 v |= FW_PORT_CAP_SPEED_1G;
1495 if (caps & ADVERTISED_10000baseT_Full)
1496 v |= FW_PORT_CAP_SPEED_10G;
1497 return v;
1498}
1499
1500static int get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1501{
1502 const struct port_info *p = netdev_priv(dev);
1503
1504 if (p->port_type == FW_PORT_TYPE_BT_SGMII ||
a0881cab 1505 p->port_type == FW_PORT_TYPE_BT_XFI ||
b8ff05a9
DM
1506 p->port_type == FW_PORT_TYPE_BT_XAUI)
1507 cmd->port = PORT_TP;
a0881cab
DM
1508 else if (p->port_type == FW_PORT_TYPE_FIBER_XFI ||
1509 p->port_type == FW_PORT_TYPE_FIBER_XAUI)
b8ff05a9 1510 cmd->port = PORT_FIBRE;
a0881cab
DM
1511 else if (p->port_type == FW_PORT_TYPE_SFP) {
1512 if (p->mod_type == FW_PORT_MOD_TYPE_TWINAX_PASSIVE ||
1513 p->mod_type == FW_PORT_MOD_TYPE_TWINAX_ACTIVE)
1514 cmd->port = PORT_DA;
1515 else
1516 cmd->port = PORT_FIBRE;
1517 } else
b8ff05a9
DM
1518 cmd->port = PORT_OTHER;
1519
1520 if (p->mdio_addr >= 0) {
1521 cmd->phy_address = p->mdio_addr;
1522 cmd->transceiver = XCVR_EXTERNAL;
1523 cmd->mdio_support = p->port_type == FW_PORT_TYPE_BT_SGMII ?
1524 MDIO_SUPPORTS_C22 : MDIO_SUPPORTS_C45;
1525 } else {
1526 cmd->phy_address = 0; /* not really, but no better option */
1527 cmd->transceiver = XCVR_INTERNAL;
1528 cmd->mdio_support = 0;
1529 }
1530
1531 cmd->supported = from_fw_linkcaps(p->port_type, p->link_cfg.supported);
1532 cmd->advertising = from_fw_linkcaps(p->port_type,
1533 p->link_cfg.advertising);
70739497
DD
1534 ethtool_cmd_speed_set(cmd,
1535 netif_carrier_ok(dev) ? p->link_cfg.speed : 0);
b8ff05a9
DM
1536 cmd->duplex = DUPLEX_FULL;
1537 cmd->autoneg = p->link_cfg.autoneg;
1538 cmd->maxtxpkt = 0;
1539 cmd->maxrxpkt = 0;
1540 return 0;
1541}
1542
1543static unsigned int speed_to_caps(int speed)
1544{
1545 if (speed == SPEED_100)
1546 return FW_PORT_CAP_SPEED_100M;
1547 if (speed == SPEED_1000)
1548 return FW_PORT_CAP_SPEED_1G;
1549 if (speed == SPEED_10000)
1550 return FW_PORT_CAP_SPEED_10G;
1551 return 0;
1552}
1553
1554static int set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1555{
1556 unsigned int cap;
1557 struct port_info *p = netdev_priv(dev);
1558 struct link_config *lc = &p->link_cfg;
25db0338 1559 u32 speed = ethtool_cmd_speed(cmd);
b8ff05a9
DM
1560
1561 if (cmd->duplex != DUPLEX_FULL) /* only full-duplex supported */
1562 return -EINVAL;
1563
1564 if (!(lc->supported & FW_PORT_CAP_ANEG)) {
1565 /*
1566 * PHY offers a single speed. See if that's what's
1567 * being requested.
1568 */
1569 if (cmd->autoneg == AUTONEG_DISABLE &&
25db0338
DD
1570 (lc->supported & speed_to_caps(speed)))
1571 return 0;
b8ff05a9
DM
1572 return -EINVAL;
1573 }
1574
1575 if (cmd->autoneg == AUTONEG_DISABLE) {
25db0338 1576 cap = speed_to_caps(speed);
b8ff05a9 1577
25db0338
DD
1578 if (!(lc->supported & cap) || (speed == SPEED_1000) ||
1579 (speed == SPEED_10000))
b8ff05a9
DM
1580 return -EINVAL;
1581 lc->requested_speed = cap;
1582 lc->advertising = 0;
1583 } else {
1584 cap = to_fw_linkcaps(cmd->advertising);
1585 if (!(lc->supported & cap))
1586 return -EINVAL;
1587 lc->requested_speed = 0;
1588 lc->advertising = cap | FW_PORT_CAP_ANEG;
1589 }
1590 lc->autoneg = cmd->autoneg;
1591
1592 if (netif_running(dev))
060e0c75
DM
1593 return t4_link_start(p->adapter, p->adapter->fn, p->tx_chan,
1594 lc);
b8ff05a9
DM
1595 return 0;
1596}
1597
1598static void get_pauseparam(struct net_device *dev,
1599 struct ethtool_pauseparam *epause)
1600{
1601 struct port_info *p = netdev_priv(dev);
1602
1603 epause->autoneg = (p->link_cfg.requested_fc & PAUSE_AUTONEG) != 0;
1604 epause->rx_pause = (p->link_cfg.fc & PAUSE_RX) != 0;
1605 epause->tx_pause = (p->link_cfg.fc & PAUSE_TX) != 0;
1606}
1607
1608static int set_pauseparam(struct net_device *dev,
1609 struct ethtool_pauseparam *epause)
1610{
1611 struct port_info *p = netdev_priv(dev);
1612 struct link_config *lc = &p->link_cfg;
1613
1614 if (epause->autoneg == AUTONEG_DISABLE)
1615 lc->requested_fc = 0;
1616 else if (lc->supported & FW_PORT_CAP_ANEG)
1617 lc->requested_fc = PAUSE_AUTONEG;
1618 else
1619 return -EINVAL;
1620
1621 if (epause->rx_pause)
1622 lc->requested_fc |= PAUSE_RX;
1623 if (epause->tx_pause)
1624 lc->requested_fc |= PAUSE_TX;
1625 if (netif_running(dev))
060e0c75
DM
1626 return t4_link_start(p->adapter, p->adapter->fn, p->tx_chan,
1627 lc);
b8ff05a9
DM
1628 return 0;
1629}
1630
b8ff05a9
DM
1631static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
1632{
1633 const struct port_info *pi = netdev_priv(dev);
1634 const struct sge *s = &pi->adapter->sge;
1635
1636 e->rx_max_pending = MAX_RX_BUFFERS;
1637 e->rx_mini_max_pending = MAX_RSPQ_ENTRIES;
1638 e->rx_jumbo_max_pending = 0;
1639 e->tx_max_pending = MAX_TXQ_ENTRIES;
1640
1641 e->rx_pending = s->ethrxq[pi->first_qset].fl.size - 8;
1642 e->rx_mini_pending = s->ethrxq[pi->first_qset].rspq.size;
1643 e->rx_jumbo_pending = 0;
1644 e->tx_pending = s->ethtxq[pi->first_qset].q.size;
1645}
1646
1647static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
1648{
1649 int i;
1650 const struct port_info *pi = netdev_priv(dev);
1651 struct adapter *adapter = pi->adapter;
1652 struct sge *s = &adapter->sge;
1653
1654 if (e->rx_pending > MAX_RX_BUFFERS || e->rx_jumbo_pending ||
1655 e->tx_pending > MAX_TXQ_ENTRIES ||
1656 e->rx_mini_pending > MAX_RSPQ_ENTRIES ||
1657 e->rx_mini_pending < MIN_RSPQ_ENTRIES ||
1658 e->rx_pending < MIN_FL_ENTRIES || e->tx_pending < MIN_TXQ_ENTRIES)
1659 return -EINVAL;
1660
1661 if (adapter->flags & FULL_INIT_DONE)
1662 return -EBUSY;
1663
1664 for (i = 0; i < pi->nqsets; ++i) {
1665 s->ethtxq[pi->first_qset + i].q.size = e->tx_pending;
1666 s->ethrxq[pi->first_qset + i].fl.size = e->rx_pending + 8;
1667 s->ethrxq[pi->first_qset + i].rspq.size = e->rx_mini_pending;
1668 }
1669 return 0;
1670}
1671
1672static int closest_timer(const struct sge *s, int time)
1673{
1674 int i, delta, match = 0, min_delta = INT_MAX;
1675
1676 for (i = 0; i < ARRAY_SIZE(s->timer_val); i++) {
1677 delta = time - s->timer_val[i];
1678 if (delta < 0)
1679 delta = -delta;
1680 if (delta < min_delta) {
1681 min_delta = delta;
1682 match = i;
1683 }
1684 }
1685 return match;
1686}
1687
1688static int closest_thres(const struct sge *s, int thres)
1689{
1690 int i, delta, match = 0, min_delta = INT_MAX;
1691
1692 for (i = 0; i < ARRAY_SIZE(s->counter_val); i++) {
1693 delta = thres - s->counter_val[i];
1694 if (delta < 0)
1695 delta = -delta;
1696 if (delta < min_delta) {
1697 min_delta = delta;
1698 match = i;
1699 }
1700 }
1701 return match;
1702}
1703
1704/*
1705 * Return a queue's interrupt hold-off time in us. 0 means no timer.
1706 */
1707static unsigned int qtimer_val(const struct adapter *adap,
1708 const struct sge_rspq *q)
1709{
1710 unsigned int idx = q->intr_params >> 1;
1711
1712 return idx < SGE_NTIMERS ? adap->sge.timer_val[idx] : 0;
1713}
1714
1715/**
1716 * set_rxq_intr_params - set a queue's interrupt holdoff parameters
1717 * @adap: the adapter
1718 * @q: the Rx queue
1719 * @us: the hold-off time in us, or 0 to disable timer
1720 * @cnt: the hold-off packet count, or 0 to disable counter
1721 *
1722 * Sets an Rx queue's interrupt hold-off time and packet count. At least
1723 * one of the two needs to be enabled for the queue to generate interrupts.
1724 */
1725static int set_rxq_intr_params(struct adapter *adap, struct sge_rspq *q,
1726 unsigned int us, unsigned int cnt)
1727{
1728 if ((us | cnt) == 0)
1729 cnt = 1;
1730
1731 if (cnt) {
1732 int err;
1733 u32 v, new_idx;
1734
1735 new_idx = closest_thres(&adap->sge, cnt);
1736 if (q->desc && q->pktcnt_idx != new_idx) {
1737 /* the queue has already been created, update it */
1738 v = FW_PARAMS_MNEM(FW_PARAMS_MNEM_DMAQ) |
1739 FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DMAQ_IQ_INTCNTTHRESH) |
1740 FW_PARAMS_PARAM_YZ(q->cntxt_id);
060e0c75
DM
1741 err = t4_set_params(adap, adap->fn, adap->fn, 0, 1, &v,
1742 &new_idx);
b8ff05a9
DM
1743 if (err)
1744 return err;
1745 }
1746 q->pktcnt_idx = new_idx;
1747 }
1748
1749 us = us == 0 ? 6 : closest_timer(&adap->sge, us);
1750 q->intr_params = QINTR_TIMER_IDX(us) | (cnt > 0 ? QINTR_CNT_EN : 0);
1751 return 0;
1752}
1753
1754static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
1755{
1756 const struct port_info *pi = netdev_priv(dev);
1757 struct adapter *adap = pi->adapter;
1758
1759 return set_rxq_intr_params(adap, &adap->sge.ethrxq[pi->first_qset].rspq,
1760 c->rx_coalesce_usecs, c->rx_max_coalesced_frames);
1761}
1762
1763static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
1764{
1765 const struct port_info *pi = netdev_priv(dev);
1766 const struct adapter *adap = pi->adapter;
1767 const struct sge_rspq *rq = &adap->sge.ethrxq[pi->first_qset].rspq;
1768
1769 c->rx_coalesce_usecs = qtimer_val(adap, rq);
1770 c->rx_max_coalesced_frames = (rq->intr_params & QINTR_CNT_EN) ?
1771 adap->sge.counter_val[rq->pktcnt_idx] : 0;
1772 return 0;
1773}
1774
1478b3ee
DM
1775/**
1776 * eeprom_ptov - translate a physical EEPROM address to virtual
1777 * @phys_addr: the physical EEPROM address
1778 * @fn: the PCI function number
1779 * @sz: size of function-specific area
1780 *
1781 * Translate a physical EEPROM address to virtual. The first 1K is
1782 * accessed through virtual addresses starting at 31K, the rest is
1783 * accessed through virtual addresses starting at 0.
1784 *
1785 * The mapping is as follows:
1786 * [0..1K) -> [31K..32K)
1787 * [1K..1K+A) -> [31K-A..31K)
1788 * [1K+A..ES) -> [0..ES-A-1K)
1789 *
1790 * where A = @fn * @sz, and ES = EEPROM size.
b8ff05a9 1791 */
1478b3ee 1792static int eeprom_ptov(unsigned int phys_addr, unsigned int fn, unsigned int sz)
b8ff05a9 1793{
1478b3ee 1794 fn *= sz;
b8ff05a9
DM
1795 if (phys_addr < 1024)
1796 return phys_addr + (31 << 10);
1478b3ee
DM
1797 if (phys_addr < 1024 + fn)
1798 return 31744 - fn + phys_addr - 1024;
b8ff05a9 1799 if (phys_addr < EEPROMSIZE)
1478b3ee 1800 return phys_addr - 1024 - fn;
b8ff05a9
DM
1801 return -EINVAL;
1802}
1803
1804/*
1805 * The next two routines implement eeprom read/write from physical addresses.
b8ff05a9
DM
1806 */
1807static int eeprom_rd_phys(struct adapter *adap, unsigned int phys_addr, u32 *v)
1808{
1478b3ee 1809 int vaddr = eeprom_ptov(phys_addr, adap->fn, EEPROMPFSIZE);
b8ff05a9
DM
1810
1811 if (vaddr >= 0)
1812 vaddr = pci_read_vpd(adap->pdev, vaddr, sizeof(u32), v);
1813 return vaddr < 0 ? vaddr : 0;
1814}
1815
1816static int eeprom_wr_phys(struct adapter *adap, unsigned int phys_addr, u32 v)
1817{
1478b3ee 1818 int vaddr = eeprom_ptov(phys_addr, adap->fn, EEPROMPFSIZE);
b8ff05a9
DM
1819
1820 if (vaddr >= 0)
1821 vaddr = pci_write_vpd(adap->pdev, vaddr, sizeof(u32), &v);
1822 return vaddr < 0 ? vaddr : 0;
1823}
1824
1825#define EEPROM_MAGIC 0x38E2F10C
1826
1827static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *e,
1828 u8 *data)
1829{
1830 int i, err = 0;
1831 struct adapter *adapter = netdev2adap(dev);
1832
1833 u8 *buf = kmalloc(EEPROMSIZE, GFP_KERNEL);
1834 if (!buf)
1835 return -ENOMEM;
1836
1837 e->magic = EEPROM_MAGIC;
1838 for (i = e->offset & ~3; !err && i < e->offset + e->len; i += 4)
1839 err = eeprom_rd_phys(adapter, i, (u32 *)&buf[i]);
1840
1841 if (!err)
1842 memcpy(data, buf + e->offset, e->len);
1843 kfree(buf);
1844 return err;
1845}
1846
1847static int set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
1848 u8 *data)
1849{
1850 u8 *buf;
1851 int err = 0;
1852 u32 aligned_offset, aligned_len, *p;
1853 struct adapter *adapter = netdev2adap(dev);
1854
1855 if (eeprom->magic != EEPROM_MAGIC)
1856 return -EINVAL;
1857
1858 aligned_offset = eeprom->offset & ~3;
1859 aligned_len = (eeprom->len + (eeprom->offset & 3) + 3) & ~3;
1860
1478b3ee
DM
1861 if (adapter->fn > 0) {
1862 u32 start = 1024 + adapter->fn * EEPROMPFSIZE;
1863
1864 if (aligned_offset < start ||
1865 aligned_offset + aligned_len > start + EEPROMPFSIZE)
1866 return -EPERM;
1867 }
1868
b8ff05a9
DM
1869 if (aligned_offset != eeprom->offset || aligned_len != eeprom->len) {
1870 /*
1871 * RMW possibly needed for first or last words.
1872 */
1873 buf = kmalloc(aligned_len, GFP_KERNEL);
1874 if (!buf)
1875 return -ENOMEM;
1876 err = eeprom_rd_phys(adapter, aligned_offset, (u32 *)buf);
1877 if (!err && aligned_len > 4)
1878 err = eeprom_rd_phys(adapter,
1879 aligned_offset + aligned_len - 4,
1880 (u32 *)&buf[aligned_len - 4]);
1881 if (err)
1882 goto out;
1883 memcpy(buf + (eeprom->offset & 3), data, eeprom->len);
1884 } else
1885 buf = data;
1886
1887 err = t4_seeprom_wp(adapter, false);
1888 if (err)
1889 goto out;
1890
1891 for (p = (u32 *)buf; !err && aligned_len; aligned_len -= 4, p++) {
1892 err = eeprom_wr_phys(adapter, aligned_offset, *p);
1893 aligned_offset += 4;
1894 }
1895
1896 if (!err)
1897 err = t4_seeprom_wp(adapter, true);
1898out:
1899 if (buf != data)
1900 kfree(buf);
1901 return err;
1902}
1903
1904static int set_flash(struct net_device *netdev, struct ethtool_flash *ef)
1905{
1906 int ret;
1907 const struct firmware *fw;
1908 struct adapter *adap = netdev2adap(netdev);
1909
1910 ef->data[sizeof(ef->data) - 1] = '\0';
1911 ret = request_firmware(&fw, ef->data, adap->pdev_dev);
1912 if (ret < 0)
1913 return ret;
1914
1915 ret = t4_load_fw(adap, fw->data, fw->size);
1916 release_firmware(fw);
1917 if (!ret)
1918 dev_info(adap->pdev_dev, "loaded firmware %s\n", ef->data);
1919 return ret;
1920}
1921
1922#define WOL_SUPPORTED (WAKE_BCAST | WAKE_MAGIC)
1923#define BCAST_CRC 0xa0ccc1a6
1924
1925static void get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1926{
1927 wol->supported = WAKE_BCAST | WAKE_MAGIC;
1928 wol->wolopts = netdev2adap(dev)->wol;
1929 memset(&wol->sopass, 0, sizeof(wol->sopass));
1930}
1931
1932static int set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1933{
1934 int err = 0;
1935 struct port_info *pi = netdev_priv(dev);
1936
1937 if (wol->wolopts & ~WOL_SUPPORTED)
1938 return -EINVAL;
1939 t4_wol_magic_enable(pi->adapter, pi->tx_chan,
1940 (wol->wolopts & WAKE_MAGIC) ? dev->dev_addr : NULL);
1941 if (wol->wolopts & WAKE_BCAST) {
1942 err = t4_wol_pat_enable(pi->adapter, pi->tx_chan, 0xfe, ~0ULL,
1943 ~0ULL, 0, false);
1944 if (!err)
1945 err = t4_wol_pat_enable(pi->adapter, pi->tx_chan, 1,
1946 ~6ULL, ~0ULL, BCAST_CRC, true);
1947 } else
1948 t4_wol_pat_enable(pi->adapter, pi->tx_chan, 0, 0, 0, 0, false);
1949 return err;
1950}
1951
c8f44aff 1952static int cxgb_set_features(struct net_device *dev, netdev_features_t features)
87b6cf51 1953{
2ed28baa 1954 const struct port_info *pi = netdev_priv(dev);
c8f44aff 1955 netdev_features_t changed = dev->features ^ features;
19ecae2c 1956 int err;
19ecae2c 1957
2ed28baa
MM
1958 if (!(changed & NETIF_F_HW_VLAN_RX))
1959 return 0;
19ecae2c 1960
2ed28baa
MM
1961 err = t4_set_rxmode(pi->adapter, pi->adapter->fn, pi->viid, -1,
1962 -1, -1, -1,
1963 !!(features & NETIF_F_HW_VLAN_RX), true);
1964 if (unlikely(err))
1965 dev->features = features ^ NETIF_F_HW_VLAN_RX;
19ecae2c 1966 return err;
87b6cf51
DM
1967}
1968
7850f63f 1969static u32 get_rss_table_size(struct net_device *dev)
671b0060
DM
1970{
1971 const struct port_info *pi = netdev_priv(dev);
671b0060 1972
7850f63f
BH
1973 return pi->rss_size;
1974}
1975
1976static int get_rss_table(struct net_device *dev, u32 *p)
1977{
1978 const struct port_info *pi = netdev_priv(dev);
1979 unsigned int n = pi->rss_size;
1980
671b0060 1981 while (n--)
7850f63f 1982 p[n] = pi->rss[n];
671b0060
DM
1983 return 0;
1984}
1985
7850f63f 1986static int set_rss_table(struct net_device *dev, const u32 *p)
671b0060
DM
1987{
1988 unsigned int i;
1989 struct port_info *pi = netdev_priv(dev);
1990
7850f63f
BH
1991 for (i = 0; i < pi->rss_size; i++)
1992 pi->rss[i] = p[i];
671b0060
DM
1993 if (pi->adapter->flags & FULL_INIT_DONE)
1994 return write_rss(pi, pi->rss);
1995 return 0;
1996}
1997
1998static int get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
815c7db5 1999 u32 *rules)
671b0060 2000{
f796564a
DM
2001 const struct port_info *pi = netdev_priv(dev);
2002
671b0060 2003 switch (info->cmd) {
f796564a
DM
2004 case ETHTOOL_GRXFH: {
2005 unsigned int v = pi->rss_mode;
2006
2007 info->data = 0;
2008 switch (info->flow_type) {
2009 case TCP_V4_FLOW:
2010 if (v & FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN)
2011 info->data = RXH_IP_SRC | RXH_IP_DST |
2012 RXH_L4_B_0_1 | RXH_L4_B_2_3;
2013 else if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN)
2014 info->data = RXH_IP_SRC | RXH_IP_DST;
2015 break;
2016 case UDP_V4_FLOW:
2017 if ((v & FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN) &&
2018 (v & FW_RSS_VI_CONFIG_CMD_UDPEN))
2019 info->data = RXH_IP_SRC | RXH_IP_DST |
2020 RXH_L4_B_0_1 | RXH_L4_B_2_3;
2021 else if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN)
2022 info->data = RXH_IP_SRC | RXH_IP_DST;
2023 break;
2024 case SCTP_V4_FLOW:
2025 case AH_ESP_V4_FLOW:
2026 case IPV4_FLOW:
2027 if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN)
2028 info->data = RXH_IP_SRC | RXH_IP_DST;
2029 break;
2030 case TCP_V6_FLOW:
2031 if (v & FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN)
2032 info->data = RXH_IP_SRC | RXH_IP_DST |
2033 RXH_L4_B_0_1 | RXH_L4_B_2_3;
2034 else if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN)
2035 info->data = RXH_IP_SRC | RXH_IP_DST;
2036 break;
2037 case UDP_V6_FLOW:
2038 if ((v & FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN) &&
2039 (v & FW_RSS_VI_CONFIG_CMD_UDPEN))
2040 info->data = RXH_IP_SRC | RXH_IP_DST |
2041 RXH_L4_B_0_1 | RXH_L4_B_2_3;
2042 else if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN)
2043 info->data = RXH_IP_SRC | RXH_IP_DST;
2044 break;
2045 case SCTP_V6_FLOW:
2046 case AH_ESP_V6_FLOW:
2047 case IPV6_FLOW:
2048 if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN)
2049 info->data = RXH_IP_SRC | RXH_IP_DST;
2050 break;
2051 }
2052 return 0;
2053 }
671b0060 2054 case ETHTOOL_GRXRINGS:
f796564a 2055 info->data = pi->nqsets;
671b0060
DM
2056 return 0;
2057 }
2058 return -EOPNOTSUPP;
2059}
2060
9b07be4b 2061static const struct ethtool_ops cxgb_ethtool_ops = {
b8ff05a9
DM
2062 .get_settings = get_settings,
2063 .set_settings = set_settings,
2064 .get_drvinfo = get_drvinfo,
2065 .get_msglevel = get_msglevel,
2066 .set_msglevel = set_msglevel,
2067 .get_ringparam = get_sge_param,
2068 .set_ringparam = set_sge_param,
2069 .get_coalesce = get_coalesce,
2070 .set_coalesce = set_coalesce,
2071 .get_eeprom_len = get_eeprom_len,
2072 .get_eeprom = get_eeprom,
2073 .set_eeprom = set_eeprom,
2074 .get_pauseparam = get_pauseparam,
2075 .set_pauseparam = set_pauseparam,
b8ff05a9
DM
2076 .get_link = ethtool_op_get_link,
2077 .get_strings = get_strings,
c5e06360 2078 .set_phys_id = identify_port,
b8ff05a9
DM
2079 .nway_reset = restart_autoneg,
2080 .get_sset_count = get_sset_count,
2081 .get_ethtool_stats = get_stats,
2082 .get_regs_len = get_regs_len,
2083 .get_regs = get_regs,
2084 .get_wol = get_wol,
2085 .set_wol = set_wol,
671b0060 2086 .get_rxnfc = get_rxnfc,
7850f63f 2087 .get_rxfh_indir_size = get_rss_table_size,
671b0060
DM
2088 .get_rxfh_indir = get_rss_table,
2089 .set_rxfh_indir = set_rss_table,
b8ff05a9
DM
2090 .flash_device = set_flash,
2091};
2092
2093/*
2094 * debugfs support
2095 */
b8ff05a9
DM
2096static ssize_t mem_read(struct file *file, char __user *buf, size_t count,
2097 loff_t *ppos)
2098{
2099 loff_t pos = *ppos;
2100 loff_t avail = file->f_path.dentry->d_inode->i_size;
2101 unsigned int mem = (uintptr_t)file->private_data & 3;
2102 struct adapter *adap = file->private_data - mem;
2103
2104 if (pos < 0)
2105 return -EINVAL;
2106 if (pos >= avail)
2107 return 0;
2108 if (count > avail - pos)
2109 count = avail - pos;
2110
2111 while (count) {
2112 size_t len;
2113 int ret, ofst;
2114 __be32 data[16];
2115
2116 if (mem == MEM_MC)
2117 ret = t4_mc_read(adap, pos, data, NULL);
2118 else
2119 ret = t4_edc_read(adap, mem, pos, data, NULL);
2120 if (ret)
2121 return ret;
2122
2123 ofst = pos % sizeof(data);
2124 len = min(count, sizeof(data) - ofst);
2125 if (copy_to_user(buf, (u8 *)data + ofst, len))
2126 return -EFAULT;
2127
2128 buf += len;
2129 pos += len;
2130 count -= len;
2131 }
2132 count = pos - *ppos;
2133 *ppos = pos;
2134 return count;
2135}
2136
2137static const struct file_operations mem_debugfs_fops = {
2138 .owner = THIS_MODULE,
234e3405 2139 .open = simple_open,
b8ff05a9 2140 .read = mem_read,
6038f373 2141 .llseek = default_llseek,
b8ff05a9
DM
2142};
2143
2144static void __devinit add_debugfs_mem(struct adapter *adap, const char *name,
2145 unsigned int idx, unsigned int size_mb)
2146{
2147 struct dentry *de;
2148
2149 de = debugfs_create_file(name, S_IRUSR, adap->debugfs_root,
2150 (void *)adap + idx, &mem_debugfs_fops);
2151 if (de && de->d_inode)
2152 de->d_inode->i_size = size_mb << 20;
2153}
2154
2155static int __devinit setup_debugfs(struct adapter *adap)
2156{
2157 int i;
2158
2159 if (IS_ERR_OR_NULL(adap->debugfs_root))
2160 return -1;
2161
2162 i = t4_read_reg(adap, MA_TARGET_MEM_ENABLE);
2163 if (i & EDRAM0_ENABLE)
2164 add_debugfs_mem(adap, "edc0", MEM_EDC0, 5);
2165 if (i & EDRAM1_ENABLE)
2166 add_debugfs_mem(adap, "edc1", MEM_EDC1, 5);
2167 if (i & EXT_MEM_ENABLE)
2168 add_debugfs_mem(adap, "mc", MEM_MC,
2169 EXT_MEM_SIZE_GET(t4_read_reg(adap, MA_EXT_MEMORY_BAR)));
2170 if (adap->l2t)
2171 debugfs_create_file("l2t", S_IRUSR, adap->debugfs_root, adap,
2172 &t4_l2t_fops);
2173 return 0;
2174}
2175
2176/*
2177 * upper-layer driver support
2178 */
2179
2180/*
2181 * Allocate an active-open TID and set it to the supplied value.
2182 */
2183int cxgb4_alloc_atid(struct tid_info *t, void *data)
2184{
2185 int atid = -1;
2186
2187 spin_lock_bh(&t->atid_lock);
2188 if (t->afree) {
2189 union aopen_entry *p = t->afree;
2190
2191 atid = p - t->atid_tab;
2192 t->afree = p->next;
2193 p->data = data;
2194 t->atids_in_use++;
2195 }
2196 spin_unlock_bh(&t->atid_lock);
2197 return atid;
2198}
2199EXPORT_SYMBOL(cxgb4_alloc_atid);
2200
2201/*
2202 * Release an active-open TID.
2203 */
2204void cxgb4_free_atid(struct tid_info *t, unsigned int atid)
2205{
2206 union aopen_entry *p = &t->atid_tab[atid];
2207
2208 spin_lock_bh(&t->atid_lock);
2209 p->next = t->afree;
2210 t->afree = p;
2211 t->atids_in_use--;
2212 spin_unlock_bh(&t->atid_lock);
2213}
2214EXPORT_SYMBOL(cxgb4_free_atid);
2215
2216/*
2217 * Allocate a server TID and set it to the supplied value.
2218 */
2219int cxgb4_alloc_stid(struct tid_info *t, int family, void *data)
2220{
2221 int stid;
2222
2223 spin_lock_bh(&t->stid_lock);
2224 if (family == PF_INET) {
2225 stid = find_first_zero_bit(t->stid_bmap, t->nstids);
2226 if (stid < t->nstids)
2227 __set_bit(stid, t->stid_bmap);
2228 else
2229 stid = -1;
2230 } else {
2231 stid = bitmap_find_free_region(t->stid_bmap, t->nstids, 2);
2232 if (stid < 0)
2233 stid = -1;
2234 }
2235 if (stid >= 0) {
2236 t->stid_tab[stid].data = data;
2237 stid += t->stid_base;
2238 t->stids_in_use++;
2239 }
2240 spin_unlock_bh(&t->stid_lock);
2241 return stid;
2242}
2243EXPORT_SYMBOL(cxgb4_alloc_stid);
2244
2245/*
2246 * Release a server TID.
2247 */
2248void cxgb4_free_stid(struct tid_info *t, unsigned int stid, int family)
2249{
2250 stid -= t->stid_base;
2251 spin_lock_bh(&t->stid_lock);
2252 if (family == PF_INET)
2253 __clear_bit(stid, t->stid_bmap);
2254 else
2255 bitmap_release_region(t->stid_bmap, stid, 2);
2256 t->stid_tab[stid].data = NULL;
2257 t->stids_in_use--;
2258 spin_unlock_bh(&t->stid_lock);
2259}
2260EXPORT_SYMBOL(cxgb4_free_stid);
2261
2262/*
2263 * Populate a TID_RELEASE WR. Caller must properly size the skb.
2264 */
2265static void mk_tid_release(struct sk_buff *skb, unsigned int chan,
2266 unsigned int tid)
2267{
2268 struct cpl_tid_release *req;
2269
2270 set_wr_txq(skb, CPL_PRIORITY_SETUP, chan);
2271 req = (struct cpl_tid_release *)__skb_put(skb, sizeof(*req));
2272 INIT_TP_WR(req, tid);
2273 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_TID_RELEASE, tid));
2274}
2275
2276/*
2277 * Queue a TID release request and if necessary schedule a work queue to
2278 * process it.
2279 */
31b9c19b 2280static void cxgb4_queue_tid_release(struct tid_info *t, unsigned int chan,
2281 unsigned int tid)
b8ff05a9
DM
2282{
2283 void **p = &t->tid_tab[tid];
2284 struct adapter *adap = container_of(t, struct adapter, tids);
2285
2286 spin_lock_bh(&adap->tid_release_lock);
2287 *p = adap->tid_release_head;
2288 /* Low 2 bits encode the Tx channel number */
2289 adap->tid_release_head = (void **)((uintptr_t)p | chan);
2290 if (!adap->tid_release_task_busy) {
2291 adap->tid_release_task_busy = true;
3069ee9b 2292 queue_work(workq, &adap->tid_release_task);
b8ff05a9
DM
2293 }
2294 spin_unlock_bh(&adap->tid_release_lock);
2295}
b8ff05a9
DM
2296
2297/*
2298 * Process the list of pending TID release requests.
2299 */
2300static void process_tid_release_list(struct work_struct *work)
2301{
2302 struct sk_buff *skb;
2303 struct adapter *adap;
2304
2305 adap = container_of(work, struct adapter, tid_release_task);
2306
2307 spin_lock_bh(&adap->tid_release_lock);
2308 while (adap->tid_release_head) {
2309 void **p = adap->tid_release_head;
2310 unsigned int chan = (uintptr_t)p & 3;
2311 p = (void *)p - chan;
2312
2313 adap->tid_release_head = *p;
2314 *p = NULL;
2315 spin_unlock_bh(&adap->tid_release_lock);
2316
2317 while (!(skb = alloc_skb(sizeof(struct cpl_tid_release),
2318 GFP_KERNEL)))
2319 schedule_timeout_uninterruptible(1);
2320
2321 mk_tid_release(skb, chan, p - adap->tids.tid_tab);
2322 t4_ofld_send(adap, skb);
2323 spin_lock_bh(&adap->tid_release_lock);
2324 }
2325 adap->tid_release_task_busy = false;
2326 spin_unlock_bh(&adap->tid_release_lock);
2327}
2328
2329/*
2330 * Release a TID and inform HW. If we are unable to allocate the release
2331 * message we defer to a work queue.
2332 */
2333void cxgb4_remove_tid(struct tid_info *t, unsigned int chan, unsigned int tid)
2334{
2335 void *old;
2336 struct sk_buff *skb;
2337 struct adapter *adap = container_of(t, struct adapter, tids);
2338
2339 old = t->tid_tab[tid];
2340 skb = alloc_skb(sizeof(struct cpl_tid_release), GFP_ATOMIC);
2341 if (likely(skb)) {
2342 t->tid_tab[tid] = NULL;
2343 mk_tid_release(skb, chan, tid);
2344 t4_ofld_send(adap, skb);
2345 } else
2346 cxgb4_queue_tid_release(t, chan, tid);
2347 if (old)
2348 atomic_dec(&t->tids_in_use);
2349}
2350EXPORT_SYMBOL(cxgb4_remove_tid);
2351
2352/*
2353 * Allocate and initialize the TID tables. Returns 0 on success.
2354 */
2355static int tid_init(struct tid_info *t)
2356{
2357 size_t size;
2358 unsigned int natids = t->natids;
2359
2360 size = t->ntids * sizeof(*t->tid_tab) + natids * sizeof(*t->atid_tab) +
2361 t->nstids * sizeof(*t->stid_tab) +
2362 BITS_TO_LONGS(t->nstids) * sizeof(long);
2363 t->tid_tab = t4_alloc_mem(size);
2364 if (!t->tid_tab)
2365 return -ENOMEM;
2366
2367 t->atid_tab = (union aopen_entry *)&t->tid_tab[t->ntids];
2368 t->stid_tab = (struct serv_entry *)&t->atid_tab[natids];
2369 t->stid_bmap = (unsigned long *)&t->stid_tab[t->nstids];
2370 spin_lock_init(&t->stid_lock);
2371 spin_lock_init(&t->atid_lock);
2372
2373 t->stids_in_use = 0;
2374 t->afree = NULL;
2375 t->atids_in_use = 0;
2376 atomic_set(&t->tids_in_use, 0);
2377
2378 /* Setup the free list for atid_tab and clear the stid bitmap. */
2379 if (natids) {
2380 while (--natids)
2381 t->atid_tab[natids - 1].next = &t->atid_tab[natids];
2382 t->afree = t->atid_tab;
2383 }
2384 bitmap_zero(t->stid_bmap, t->nstids);
2385 return 0;
2386}
2387
2388/**
2389 * cxgb4_create_server - create an IP server
2390 * @dev: the device
2391 * @stid: the server TID
2392 * @sip: local IP address to bind server to
2393 * @sport: the server's TCP port
2394 * @queue: queue to direct messages from this server to
2395 *
2396 * Create an IP server for the given port and address.
2397 * Returns <0 on error and one of the %NET_XMIT_* values on success.
2398 */
2399int cxgb4_create_server(const struct net_device *dev, unsigned int stid,
2400 __be32 sip, __be16 sport, unsigned int queue)
2401{
2402 unsigned int chan;
2403 struct sk_buff *skb;
2404 struct adapter *adap;
2405 struct cpl_pass_open_req *req;
2406
2407 skb = alloc_skb(sizeof(*req), GFP_KERNEL);
2408 if (!skb)
2409 return -ENOMEM;
2410
2411 adap = netdev2adap(dev);
2412 req = (struct cpl_pass_open_req *)__skb_put(skb, sizeof(*req));
2413 INIT_TP_WR(req, 0);
2414 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_PASS_OPEN_REQ, stid));
2415 req->local_port = sport;
2416 req->peer_port = htons(0);
2417 req->local_ip = sip;
2418 req->peer_ip = htonl(0);
e46dab4d 2419 chan = rxq_to_chan(&adap->sge, queue);
b8ff05a9
DM
2420 req->opt0 = cpu_to_be64(TX_CHAN(chan));
2421 req->opt1 = cpu_to_be64(CONN_POLICY_ASK |
2422 SYN_RSS_ENABLE | SYN_RSS_QUEUE(queue));
2423 return t4_mgmt_tx(adap, skb);
2424}
2425EXPORT_SYMBOL(cxgb4_create_server);
2426
b8ff05a9
DM
2427/**
2428 * cxgb4_best_mtu - find the entry in the MTU table closest to an MTU
2429 * @mtus: the HW MTU table
2430 * @mtu: the target MTU
2431 * @idx: index of selected entry in the MTU table
2432 *
2433 * Returns the index and the value in the HW MTU table that is closest to
2434 * but does not exceed @mtu, unless @mtu is smaller than any value in the
2435 * table, in which case that smallest available value is selected.
2436 */
2437unsigned int cxgb4_best_mtu(const unsigned short *mtus, unsigned short mtu,
2438 unsigned int *idx)
2439{
2440 unsigned int i = 0;
2441
2442 while (i < NMTUS - 1 && mtus[i + 1] <= mtu)
2443 ++i;
2444 if (idx)
2445 *idx = i;
2446 return mtus[i];
2447}
2448EXPORT_SYMBOL(cxgb4_best_mtu);
2449
2450/**
2451 * cxgb4_port_chan - get the HW channel of a port
2452 * @dev: the net device for the port
2453 *
2454 * Return the HW Tx channel of the given port.
2455 */
2456unsigned int cxgb4_port_chan(const struct net_device *dev)
2457{
2458 return netdev2pinfo(dev)->tx_chan;
2459}
2460EXPORT_SYMBOL(cxgb4_port_chan);
2461
881806bc
VP
2462unsigned int cxgb4_dbfifo_count(const struct net_device *dev, int lpfifo)
2463{
2464 struct adapter *adap = netdev2adap(dev);
2465 u32 v;
2466
2467 v = t4_read_reg(adap, A_SGE_DBFIFO_STATUS);
2468 return lpfifo ? G_LP_COUNT(v) : G_HP_COUNT(v);
2469}
2470EXPORT_SYMBOL(cxgb4_dbfifo_count);
2471
b8ff05a9
DM
2472/**
2473 * cxgb4_port_viid - get the VI id of a port
2474 * @dev: the net device for the port
2475 *
2476 * Return the VI id of the given port.
2477 */
2478unsigned int cxgb4_port_viid(const struct net_device *dev)
2479{
2480 return netdev2pinfo(dev)->viid;
2481}
2482EXPORT_SYMBOL(cxgb4_port_viid);
2483
2484/**
2485 * cxgb4_port_idx - get the index of a port
2486 * @dev: the net device for the port
2487 *
2488 * Return the index of the given port.
2489 */
2490unsigned int cxgb4_port_idx(const struct net_device *dev)
2491{
2492 return netdev2pinfo(dev)->port_id;
2493}
2494EXPORT_SYMBOL(cxgb4_port_idx);
2495
b8ff05a9
DM
2496void cxgb4_get_tcp_stats(struct pci_dev *pdev, struct tp_tcp_stats *v4,
2497 struct tp_tcp_stats *v6)
2498{
2499 struct adapter *adap = pci_get_drvdata(pdev);
2500
2501 spin_lock(&adap->stats_lock);
2502 t4_tp_get_tcp_stats(adap, v4, v6);
2503 spin_unlock(&adap->stats_lock);
2504}
2505EXPORT_SYMBOL(cxgb4_get_tcp_stats);
2506
2507void cxgb4_iscsi_init(struct net_device *dev, unsigned int tag_mask,
2508 const unsigned int *pgsz_order)
2509{
2510 struct adapter *adap = netdev2adap(dev);
2511
2512 t4_write_reg(adap, ULP_RX_ISCSI_TAGMASK, tag_mask);
2513 t4_write_reg(adap, ULP_RX_ISCSI_PSZ, HPZ0(pgsz_order[0]) |
2514 HPZ1(pgsz_order[1]) | HPZ2(pgsz_order[2]) |
2515 HPZ3(pgsz_order[3]));
2516}
2517EXPORT_SYMBOL(cxgb4_iscsi_init);
2518
3069ee9b
VP
2519int cxgb4_flush_eq_cache(struct net_device *dev)
2520{
2521 struct adapter *adap = netdev2adap(dev);
2522 int ret;
2523
2524 ret = t4_fwaddrspace_write(adap, adap->mbox,
2525 0xe1000000 + A_SGE_CTXT_CMD, 0x20000000);
2526 return ret;
2527}
2528EXPORT_SYMBOL(cxgb4_flush_eq_cache);
2529
2530static int read_eq_indices(struct adapter *adap, u16 qid, u16 *pidx, u16 *cidx)
2531{
2532 u32 addr = t4_read_reg(adap, A_SGE_DBQ_CTXT_BADDR) + 24 * qid + 8;
2533 __be64 indices;
2534 int ret;
2535
2536 ret = t4_mem_win_read_len(adap, addr, (__be32 *)&indices, 8);
2537 if (!ret) {
2538 indices = be64_to_cpu(indices);
2539 *cidx = (indices >> 25) & 0xffff;
2540 *pidx = (indices >> 9) & 0xffff;
2541 }
2542 return ret;
2543}
2544
2545int cxgb4_sync_txq_pidx(struct net_device *dev, u16 qid, u16 pidx,
2546 u16 size)
2547{
2548 struct adapter *adap = netdev2adap(dev);
2549 u16 hw_pidx, hw_cidx;
2550 int ret;
2551
2552 ret = read_eq_indices(adap, qid, &hw_pidx, &hw_cidx);
2553 if (ret)
2554 goto out;
2555
2556 if (pidx != hw_pidx) {
2557 u16 delta;
2558
2559 if (pidx >= hw_pidx)
2560 delta = pidx - hw_pidx;
2561 else
2562 delta = size - hw_pidx + pidx;
2563 wmb();
840f3000
VP
2564 t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL),
2565 QID(qid) | PIDX(delta));
3069ee9b
VP
2566 }
2567out:
2568 return ret;
2569}
2570EXPORT_SYMBOL(cxgb4_sync_txq_pidx);
2571
b8ff05a9
DM
2572static struct pci_driver cxgb4_driver;
2573
2574static void check_neigh_update(struct neighbour *neigh)
2575{
2576 const struct device *parent;
2577 const struct net_device *netdev = neigh->dev;
2578
2579 if (netdev->priv_flags & IFF_802_1Q_VLAN)
2580 netdev = vlan_dev_real_dev(netdev);
2581 parent = netdev->dev.parent;
2582 if (parent && parent->driver == &cxgb4_driver.driver)
2583 t4_l2t_update(dev_get_drvdata(parent), neigh);
2584}
2585
2586static int netevent_cb(struct notifier_block *nb, unsigned long event,
2587 void *data)
2588{
2589 switch (event) {
2590 case NETEVENT_NEIGH_UPDATE:
2591 check_neigh_update(data);
2592 break;
b8ff05a9
DM
2593 case NETEVENT_REDIRECT:
2594 default:
2595 break;
2596 }
2597 return 0;
2598}
2599
2600static bool netevent_registered;
2601static struct notifier_block cxgb4_netevent_nb = {
2602 .notifier_call = netevent_cb
2603};
2604
3069ee9b
VP
2605static void drain_db_fifo(struct adapter *adap, int usecs)
2606{
2607 u32 v;
2608
2609 do {
2610 set_current_state(TASK_UNINTERRUPTIBLE);
2611 schedule_timeout(usecs_to_jiffies(usecs));
2612 v = t4_read_reg(adap, A_SGE_DBFIFO_STATUS);
2613 if (G_LP_COUNT(v) == 0 && G_HP_COUNT(v) == 0)
2614 break;
2615 } while (1);
2616}
2617
2618static void disable_txq_db(struct sge_txq *q)
2619{
2620 spin_lock_irq(&q->db_lock);
2621 q->db_disabled = 1;
2622 spin_unlock_irq(&q->db_lock);
2623}
2624
2625static void enable_txq_db(struct sge_txq *q)
2626{
2627 spin_lock_irq(&q->db_lock);
2628 q->db_disabled = 0;
2629 spin_unlock_irq(&q->db_lock);
2630}
2631
2632static void disable_dbs(struct adapter *adap)
2633{
2634 int i;
2635
2636 for_each_ethrxq(&adap->sge, i)
2637 disable_txq_db(&adap->sge.ethtxq[i].q);
2638 for_each_ofldrxq(&adap->sge, i)
2639 disable_txq_db(&adap->sge.ofldtxq[i].q);
2640 for_each_port(adap, i)
2641 disable_txq_db(&adap->sge.ctrlq[i].q);
2642}
2643
2644static void enable_dbs(struct adapter *adap)
2645{
2646 int i;
2647
2648 for_each_ethrxq(&adap->sge, i)
2649 enable_txq_db(&adap->sge.ethtxq[i].q);
2650 for_each_ofldrxq(&adap->sge, i)
2651 enable_txq_db(&adap->sge.ofldtxq[i].q);
2652 for_each_port(adap, i)
2653 enable_txq_db(&adap->sge.ctrlq[i].q);
2654}
2655
2656static void sync_txq_pidx(struct adapter *adap, struct sge_txq *q)
2657{
2658 u16 hw_pidx, hw_cidx;
2659 int ret;
2660
2661 spin_lock_bh(&q->db_lock);
2662 ret = read_eq_indices(adap, (u16)q->cntxt_id, &hw_pidx, &hw_cidx);
2663 if (ret)
2664 goto out;
2665 if (q->db_pidx != hw_pidx) {
2666 u16 delta;
2667
2668 if (q->db_pidx >= hw_pidx)
2669 delta = q->db_pidx - hw_pidx;
2670 else
2671 delta = q->size - hw_pidx + q->db_pidx;
2672 wmb();
840f3000
VP
2673 t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL),
2674 QID(q->cntxt_id) | PIDX(delta));
3069ee9b
VP
2675 }
2676out:
2677 q->db_disabled = 0;
2678 spin_unlock_bh(&q->db_lock);
2679 if (ret)
2680 CH_WARN(adap, "DB drop recovery failed.\n");
2681}
2682static void recover_all_queues(struct adapter *adap)
2683{
2684 int i;
2685
2686 for_each_ethrxq(&adap->sge, i)
2687 sync_txq_pidx(adap, &adap->sge.ethtxq[i].q);
2688 for_each_ofldrxq(&adap->sge, i)
2689 sync_txq_pidx(adap, &adap->sge.ofldtxq[i].q);
2690 for_each_port(adap, i)
2691 sync_txq_pidx(adap, &adap->sge.ctrlq[i].q);
2692}
2693
881806bc
VP
2694static void notify_rdma_uld(struct adapter *adap, enum cxgb4_control cmd)
2695{
2696 mutex_lock(&uld_mutex);
2697 if (adap->uld_handle[CXGB4_ULD_RDMA])
2698 ulds[CXGB4_ULD_RDMA].control(adap->uld_handle[CXGB4_ULD_RDMA],
2699 cmd);
2700 mutex_unlock(&uld_mutex);
2701}
2702
2703static void process_db_full(struct work_struct *work)
2704{
2705 struct adapter *adap;
881806bc
VP
2706
2707 adap = container_of(work, struct adapter, db_full_task);
2708
881806bc 2709 notify_rdma_uld(adap, CXGB4_CONTROL_DB_FULL);
3069ee9b 2710 drain_db_fifo(adap, dbfifo_drain_delay);
840f3000
VP
2711 t4_set_reg_field(adap, SGE_INT_ENABLE3,
2712 DBFIFO_HP_INT | DBFIFO_LP_INT,
2713 DBFIFO_HP_INT | DBFIFO_LP_INT);
881806bc 2714 notify_rdma_uld(adap, CXGB4_CONTROL_DB_EMPTY);
881806bc
VP
2715}
2716
2717static void process_db_drop(struct work_struct *work)
2718{
2719 struct adapter *adap;
881806bc 2720
3069ee9b 2721 adap = container_of(work, struct adapter, db_drop_task);
881806bc 2722
3069ee9b
VP
2723 t4_set_reg_field(adap, A_SGE_DOORBELL_CONTROL, F_DROPPED_DB, 0);
2724 disable_dbs(adap);
881806bc 2725 notify_rdma_uld(adap, CXGB4_CONTROL_DB_DROP);
3069ee9b
VP
2726 drain_db_fifo(adap, 1);
2727 recover_all_queues(adap);
2728 enable_dbs(adap);
881806bc
VP
2729}
2730
2731void t4_db_full(struct adapter *adap)
2732{
840f3000
VP
2733 t4_set_reg_field(adap, SGE_INT_ENABLE3,
2734 DBFIFO_HP_INT | DBFIFO_LP_INT, 0);
3069ee9b 2735 queue_work(workq, &adap->db_full_task);
881806bc
VP
2736}
2737
2738void t4_db_dropped(struct adapter *adap)
2739{
3069ee9b 2740 queue_work(workq, &adap->db_drop_task);
881806bc
VP
2741}
2742
b8ff05a9
DM
2743static void uld_attach(struct adapter *adap, unsigned int uld)
2744{
2745 void *handle;
2746 struct cxgb4_lld_info lli;
2747
2748 lli.pdev = adap->pdev;
2749 lli.l2t = adap->l2t;
2750 lli.tids = &adap->tids;
2751 lli.ports = adap->port;
2752 lli.vr = &adap->vres;
2753 lli.mtus = adap->params.mtus;
2754 if (uld == CXGB4_ULD_RDMA) {
2755 lli.rxq_ids = adap->sge.rdma_rxq;
2756 lli.nrxq = adap->sge.rdmaqs;
2757 } else if (uld == CXGB4_ULD_ISCSI) {
2758 lli.rxq_ids = adap->sge.ofld_rxq;
2759 lli.nrxq = adap->sge.ofldqsets;
2760 }
2761 lli.ntxq = adap->sge.ofldqsets;
2762 lli.nchan = adap->params.nports;
2763 lli.nports = adap->params.nports;
2764 lli.wr_cred = adap->params.ofldq_wr_cred;
2765 lli.adapter_type = adap->params.rev;
2766 lli.iscsi_iolen = MAXRXDATA_GET(t4_read_reg(adap, TP_PARA_REG2));
2767 lli.udb_density = 1 << QUEUESPERPAGEPF0_GET(
060e0c75
DM
2768 t4_read_reg(adap, SGE_EGRESS_QUEUES_PER_PAGE_PF) >>
2769 (adap->fn * 4));
b8ff05a9 2770 lli.ucq_density = 1 << QUEUESPERPAGEPF0_GET(
060e0c75
DM
2771 t4_read_reg(adap, SGE_INGRESS_QUEUES_PER_PAGE_PF) >>
2772 (adap->fn * 4));
b8ff05a9
DM
2773 lli.gts_reg = adap->regs + MYPF_REG(SGE_PF_GTS);
2774 lli.db_reg = adap->regs + MYPF_REG(SGE_PF_KDOORBELL);
2775 lli.fw_vers = adap->params.fw_vers;
3069ee9b 2776 lli.dbfifo_int_thresh = dbfifo_int_thresh;
b8ff05a9
DM
2777
2778 handle = ulds[uld].add(&lli);
2779 if (IS_ERR(handle)) {
2780 dev_warn(adap->pdev_dev,
2781 "could not attach to the %s driver, error %ld\n",
2782 uld_str[uld], PTR_ERR(handle));
2783 return;
2784 }
2785
2786 adap->uld_handle[uld] = handle;
2787
2788 if (!netevent_registered) {
2789 register_netevent_notifier(&cxgb4_netevent_nb);
2790 netevent_registered = true;
2791 }
e29f5dbc
DM
2792
2793 if (adap->flags & FULL_INIT_DONE)
2794 ulds[uld].state_change(handle, CXGB4_STATE_UP);
b8ff05a9
DM
2795}
2796
2797static void attach_ulds(struct adapter *adap)
2798{
2799 unsigned int i;
2800
2801 mutex_lock(&uld_mutex);
2802 list_add_tail(&adap->list_node, &adapter_list);
2803 for (i = 0; i < CXGB4_ULD_MAX; i++)
2804 if (ulds[i].add)
2805 uld_attach(adap, i);
2806 mutex_unlock(&uld_mutex);
2807}
2808
2809static void detach_ulds(struct adapter *adap)
2810{
2811 unsigned int i;
2812
2813 mutex_lock(&uld_mutex);
2814 list_del(&adap->list_node);
2815 for (i = 0; i < CXGB4_ULD_MAX; i++)
2816 if (adap->uld_handle[i]) {
2817 ulds[i].state_change(adap->uld_handle[i],
2818 CXGB4_STATE_DETACH);
2819 adap->uld_handle[i] = NULL;
2820 }
2821 if (netevent_registered && list_empty(&adapter_list)) {
2822 unregister_netevent_notifier(&cxgb4_netevent_nb);
2823 netevent_registered = false;
2824 }
2825 mutex_unlock(&uld_mutex);
2826}
2827
2828static void notify_ulds(struct adapter *adap, enum cxgb4_state new_state)
2829{
2830 unsigned int i;
2831
2832 mutex_lock(&uld_mutex);
2833 for (i = 0; i < CXGB4_ULD_MAX; i++)
2834 if (adap->uld_handle[i])
2835 ulds[i].state_change(adap->uld_handle[i], new_state);
2836 mutex_unlock(&uld_mutex);
2837}
2838
2839/**
2840 * cxgb4_register_uld - register an upper-layer driver
2841 * @type: the ULD type
2842 * @p: the ULD methods
2843 *
2844 * Registers an upper-layer driver with this driver and notifies the ULD
2845 * about any presently available devices that support its type. Returns
2846 * %-EBUSY if a ULD of the same type is already registered.
2847 */
2848int cxgb4_register_uld(enum cxgb4_uld type, const struct cxgb4_uld_info *p)
2849{
2850 int ret = 0;
2851 struct adapter *adap;
2852
2853 if (type >= CXGB4_ULD_MAX)
2854 return -EINVAL;
2855 mutex_lock(&uld_mutex);
2856 if (ulds[type].add) {
2857 ret = -EBUSY;
2858 goto out;
2859 }
2860 ulds[type] = *p;
2861 list_for_each_entry(adap, &adapter_list, list_node)
2862 uld_attach(adap, type);
2863out: mutex_unlock(&uld_mutex);
2864 return ret;
2865}
2866EXPORT_SYMBOL(cxgb4_register_uld);
2867
2868/**
2869 * cxgb4_unregister_uld - unregister an upper-layer driver
2870 * @type: the ULD type
2871 *
2872 * Unregisters an existing upper-layer driver.
2873 */
2874int cxgb4_unregister_uld(enum cxgb4_uld type)
2875{
2876 struct adapter *adap;
2877
2878 if (type >= CXGB4_ULD_MAX)
2879 return -EINVAL;
2880 mutex_lock(&uld_mutex);
2881 list_for_each_entry(adap, &adapter_list, list_node)
2882 adap->uld_handle[type] = NULL;
2883 ulds[type].add = NULL;
2884 mutex_unlock(&uld_mutex);
2885 return 0;
2886}
2887EXPORT_SYMBOL(cxgb4_unregister_uld);
2888
2889/**
2890 * cxgb_up - enable the adapter
2891 * @adap: adapter being enabled
2892 *
2893 * Called when the first port is enabled, this function performs the
2894 * actions necessary to make an adapter operational, such as completing
2895 * the initialization of HW modules, and enabling interrupts.
2896 *
2897 * Must be called with the rtnl lock held.
2898 */
2899static int cxgb_up(struct adapter *adap)
2900{
aaefae9b 2901 int err;
b8ff05a9 2902
aaefae9b
DM
2903 err = setup_sge_queues(adap);
2904 if (err)
2905 goto out;
2906 err = setup_rss(adap);
2907 if (err)
2908 goto freeq;
b8ff05a9
DM
2909
2910 if (adap->flags & USING_MSIX) {
aaefae9b 2911 name_msix_vecs(adap);
b8ff05a9
DM
2912 err = request_irq(adap->msix_info[0].vec, t4_nondata_intr, 0,
2913 adap->msix_info[0].desc, adap);
2914 if (err)
2915 goto irq_err;
2916
2917 err = request_msix_queue_irqs(adap);
2918 if (err) {
2919 free_irq(adap->msix_info[0].vec, adap);
2920 goto irq_err;
2921 }
2922 } else {
2923 err = request_irq(adap->pdev->irq, t4_intr_handler(adap),
2924 (adap->flags & USING_MSI) ? 0 : IRQF_SHARED,
b1a3c2b6 2925 adap->port[0]->name, adap);
b8ff05a9
DM
2926 if (err)
2927 goto irq_err;
2928 }
2929 enable_rx(adap);
2930 t4_sge_start(adap);
2931 t4_intr_enable(adap);
aaefae9b 2932 adap->flags |= FULL_INIT_DONE;
b8ff05a9
DM
2933 notify_ulds(adap, CXGB4_STATE_UP);
2934 out:
2935 return err;
2936 irq_err:
2937 dev_err(adap->pdev_dev, "request_irq failed, err %d\n", err);
aaefae9b
DM
2938 freeq:
2939 t4_free_sge_resources(adap);
b8ff05a9
DM
2940 goto out;
2941}
2942
2943static void cxgb_down(struct adapter *adapter)
2944{
2945 t4_intr_disable(adapter);
2946 cancel_work_sync(&adapter->tid_release_task);
881806bc
VP
2947 cancel_work_sync(&adapter->db_full_task);
2948 cancel_work_sync(&adapter->db_drop_task);
b8ff05a9 2949 adapter->tid_release_task_busy = false;
204dc3c0 2950 adapter->tid_release_head = NULL;
b8ff05a9
DM
2951
2952 if (adapter->flags & USING_MSIX) {
2953 free_msix_queue_irqs(adapter);
2954 free_irq(adapter->msix_info[0].vec, adapter);
2955 } else
2956 free_irq(adapter->pdev->irq, adapter);
2957 quiesce_rx(adapter);
aaefae9b
DM
2958 t4_sge_stop(adapter);
2959 t4_free_sge_resources(adapter);
2960 adapter->flags &= ~FULL_INIT_DONE;
b8ff05a9
DM
2961}
2962
2963/*
2964 * net_device operations
2965 */
2966static int cxgb_open(struct net_device *dev)
2967{
2968 int err;
2969 struct port_info *pi = netdev_priv(dev);
2970 struct adapter *adapter = pi->adapter;
2971
6a3c869a
DM
2972 netif_carrier_off(dev);
2973
aaefae9b
DM
2974 if (!(adapter->flags & FULL_INIT_DONE)) {
2975 err = cxgb_up(adapter);
2976 if (err < 0)
2977 return err;
2978 }
b8ff05a9 2979
f68707b8
DM
2980 err = link_start(dev);
2981 if (!err)
2982 netif_tx_start_all_queues(dev);
2983 return err;
b8ff05a9
DM
2984}
2985
2986static int cxgb_close(struct net_device *dev)
2987{
b8ff05a9
DM
2988 struct port_info *pi = netdev_priv(dev);
2989 struct adapter *adapter = pi->adapter;
2990
2991 netif_tx_stop_all_queues(dev);
2992 netif_carrier_off(dev);
060e0c75 2993 return t4_enable_vi(adapter, adapter->fn, pi->viid, false, false);
b8ff05a9
DM
2994}
2995
f5152c90
DM
2996static struct rtnl_link_stats64 *cxgb_get_stats(struct net_device *dev,
2997 struct rtnl_link_stats64 *ns)
b8ff05a9
DM
2998{
2999 struct port_stats stats;
3000 struct port_info *p = netdev_priv(dev);
3001 struct adapter *adapter = p->adapter;
b8ff05a9
DM
3002
3003 spin_lock(&adapter->stats_lock);
3004 t4_get_port_stats(adapter, p->tx_chan, &stats);
3005 spin_unlock(&adapter->stats_lock);
3006
3007 ns->tx_bytes = stats.tx_octets;
3008 ns->tx_packets = stats.tx_frames;
3009 ns->rx_bytes = stats.rx_octets;
3010 ns->rx_packets = stats.rx_frames;
3011 ns->multicast = stats.rx_mcast_frames;
3012
3013 /* detailed rx_errors */
3014 ns->rx_length_errors = stats.rx_jabber + stats.rx_too_long +
3015 stats.rx_runt;
3016 ns->rx_over_errors = 0;
3017 ns->rx_crc_errors = stats.rx_fcs_err;
3018 ns->rx_frame_errors = stats.rx_symbol_err;
3019 ns->rx_fifo_errors = stats.rx_ovflow0 + stats.rx_ovflow1 +
3020 stats.rx_ovflow2 + stats.rx_ovflow3 +
3021 stats.rx_trunc0 + stats.rx_trunc1 +
3022 stats.rx_trunc2 + stats.rx_trunc3;
3023 ns->rx_missed_errors = 0;
3024
3025 /* detailed tx_errors */
3026 ns->tx_aborted_errors = 0;
3027 ns->tx_carrier_errors = 0;
3028 ns->tx_fifo_errors = 0;
3029 ns->tx_heartbeat_errors = 0;
3030 ns->tx_window_errors = 0;
3031
3032 ns->tx_errors = stats.tx_error_frames;
3033 ns->rx_errors = stats.rx_symbol_err + stats.rx_fcs_err +
3034 ns->rx_length_errors + stats.rx_len_err + ns->rx_fifo_errors;
3035 return ns;
3036}
3037
3038static int cxgb_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
3039{
060e0c75 3040 unsigned int mbox;
b8ff05a9
DM
3041 int ret = 0, prtad, devad;
3042 struct port_info *pi = netdev_priv(dev);
3043 struct mii_ioctl_data *data = (struct mii_ioctl_data *)&req->ifr_data;
3044
3045 switch (cmd) {
3046 case SIOCGMIIPHY:
3047 if (pi->mdio_addr < 0)
3048 return -EOPNOTSUPP;
3049 data->phy_id = pi->mdio_addr;
3050 break;
3051 case SIOCGMIIREG:
3052 case SIOCSMIIREG:
3053 if (mdio_phy_id_is_c45(data->phy_id)) {
3054 prtad = mdio_phy_id_prtad(data->phy_id);
3055 devad = mdio_phy_id_devad(data->phy_id);
3056 } else if (data->phy_id < 32) {
3057 prtad = data->phy_id;
3058 devad = 0;
3059 data->reg_num &= 0x1f;
3060 } else
3061 return -EINVAL;
3062
060e0c75 3063 mbox = pi->adapter->fn;
b8ff05a9 3064 if (cmd == SIOCGMIIREG)
060e0c75 3065 ret = t4_mdio_rd(pi->adapter, mbox, prtad, devad,
b8ff05a9
DM
3066 data->reg_num, &data->val_out);
3067 else
060e0c75 3068 ret = t4_mdio_wr(pi->adapter, mbox, prtad, devad,
b8ff05a9
DM
3069 data->reg_num, data->val_in);
3070 break;
3071 default:
3072 return -EOPNOTSUPP;
3073 }
3074 return ret;
3075}
3076
3077static void cxgb_set_rxmode(struct net_device *dev)
3078{
3079 /* unfortunately we can't return errors to the stack */
3080 set_rxmode(dev, -1, false);
3081}
3082
3083static int cxgb_change_mtu(struct net_device *dev, int new_mtu)
3084{
3085 int ret;
3086 struct port_info *pi = netdev_priv(dev);
3087
3088 if (new_mtu < 81 || new_mtu > MAX_MTU) /* accommodate SACK */
3089 return -EINVAL;
060e0c75
DM
3090 ret = t4_set_rxmode(pi->adapter, pi->adapter->fn, pi->viid, new_mtu, -1,
3091 -1, -1, -1, true);
b8ff05a9
DM
3092 if (!ret)
3093 dev->mtu = new_mtu;
3094 return ret;
3095}
3096
3097static int cxgb_set_mac_addr(struct net_device *dev, void *p)
3098{
3099 int ret;
3100 struct sockaddr *addr = p;
3101 struct port_info *pi = netdev_priv(dev);
3102
3103 if (!is_valid_ether_addr(addr->sa_data))
504f9b5a 3104 return -EADDRNOTAVAIL;
b8ff05a9 3105
060e0c75
DM
3106 ret = t4_change_mac(pi->adapter, pi->adapter->fn, pi->viid,
3107 pi->xact_addr_filt, addr->sa_data, true, true);
b8ff05a9
DM
3108 if (ret < 0)
3109 return ret;
3110
3111 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
3112 pi->xact_addr_filt = ret;
3113 return 0;
3114}
3115
b8ff05a9
DM
3116#ifdef CONFIG_NET_POLL_CONTROLLER
3117static void cxgb_netpoll(struct net_device *dev)
3118{
3119 struct port_info *pi = netdev_priv(dev);
3120 struct adapter *adap = pi->adapter;
3121
3122 if (adap->flags & USING_MSIX) {
3123 int i;
3124 struct sge_eth_rxq *rx = &adap->sge.ethrxq[pi->first_qset];
3125
3126 for (i = pi->nqsets; i; i--, rx++)
3127 t4_sge_intr_msix(0, &rx->rspq);
3128 } else
3129 t4_intr_handler(adap)(0, adap);
3130}
3131#endif
3132
3133static const struct net_device_ops cxgb4_netdev_ops = {
3134 .ndo_open = cxgb_open,
3135 .ndo_stop = cxgb_close,
3136 .ndo_start_xmit = t4_eth_xmit,
9be793bf 3137 .ndo_get_stats64 = cxgb_get_stats,
b8ff05a9
DM
3138 .ndo_set_rx_mode = cxgb_set_rxmode,
3139 .ndo_set_mac_address = cxgb_set_mac_addr,
2ed28baa 3140 .ndo_set_features = cxgb_set_features,
b8ff05a9
DM
3141 .ndo_validate_addr = eth_validate_addr,
3142 .ndo_do_ioctl = cxgb_ioctl,
3143 .ndo_change_mtu = cxgb_change_mtu,
b8ff05a9
DM
3144#ifdef CONFIG_NET_POLL_CONTROLLER
3145 .ndo_poll_controller = cxgb_netpoll,
3146#endif
3147};
3148
3149void t4_fatal_err(struct adapter *adap)
3150{
3151 t4_set_reg_field(adap, SGE_CONTROL, GLOBALENABLE, 0);
3152 t4_intr_disable(adap);
3153 dev_alert(adap->pdev_dev, "encountered fatal error, adapter stopped\n");
3154}
3155
3156static void setup_memwin(struct adapter *adap)
3157{
3158 u32 bar0;
3159
3160 bar0 = pci_resource_start(adap->pdev, 0); /* truncation intentional */
3161 t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 0),
3162 (bar0 + MEMWIN0_BASE) | BIR(0) |
3163 WINDOW(ilog2(MEMWIN0_APERTURE) - 10));
3164 t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 1),
3165 (bar0 + MEMWIN1_BASE) | BIR(0) |
3166 WINDOW(ilog2(MEMWIN1_APERTURE) - 10));
3167 t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 2),
3168 (bar0 + MEMWIN2_BASE) | BIR(0) |
3169 WINDOW(ilog2(MEMWIN2_APERTURE) - 10));
636f9d37
VP
3170}
3171
3172static void setup_memwin_rdma(struct adapter *adap)
3173{
1ae970e0
DM
3174 if (adap->vres.ocq.size) {
3175 unsigned int start, sz_kb;
3176
3177 start = pci_resource_start(adap->pdev, 2) +
3178 OCQ_WIN_OFFSET(adap->pdev, &adap->vres);
3179 sz_kb = roundup_pow_of_two(adap->vres.ocq.size) >> 10;
3180 t4_write_reg(adap,
3181 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 3),
3182 start | BIR(1) | WINDOW(ilog2(sz_kb)));
3183 t4_write_reg(adap,
3184 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET, 3),
3185 adap->vres.ocq.start);
3186 t4_read_reg(adap,
3187 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET, 3));
3188 }
b8ff05a9
DM
3189}
3190
02b5fb8e
DM
3191static int adap_init1(struct adapter *adap, struct fw_caps_config_cmd *c)
3192{
3193 u32 v;
3194 int ret;
3195
3196 /* get device capabilities */
3197 memset(c, 0, sizeof(*c));
3198 c->op_to_write = htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
3199 FW_CMD_REQUEST | FW_CMD_READ);
3200 c->retval_len16 = htonl(FW_LEN16(*c));
060e0c75 3201 ret = t4_wr_mbox(adap, adap->fn, c, sizeof(*c), c);
02b5fb8e
DM
3202 if (ret < 0)
3203 return ret;
3204
3205 /* select capabilities we'll be using */
3206 if (c->niccaps & htons(FW_CAPS_CONFIG_NIC_VM)) {
3207 if (!vf_acls)
3208 c->niccaps ^= htons(FW_CAPS_CONFIG_NIC_VM);
3209 else
3210 c->niccaps = htons(FW_CAPS_CONFIG_NIC_VM);
3211 } else if (vf_acls) {
3212 dev_err(adap->pdev_dev, "virtualization ACLs not supported");
3213 return ret;
3214 }
3215 c->op_to_write = htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
3216 FW_CMD_REQUEST | FW_CMD_WRITE);
060e0c75 3217 ret = t4_wr_mbox(adap, adap->fn, c, sizeof(*c), NULL);
02b5fb8e
DM
3218 if (ret < 0)
3219 return ret;
3220
060e0c75 3221 ret = t4_config_glbl_rss(adap, adap->fn,
02b5fb8e
DM
3222 FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL,
3223 FW_RSS_GLB_CONFIG_CMD_TNLMAPEN |
3224 FW_RSS_GLB_CONFIG_CMD_TNLALLLKP);
3225 if (ret < 0)
3226 return ret;
3227
060e0c75
DM
3228 ret = t4_cfg_pfvf(adap, adap->fn, adap->fn, 0, MAX_EGRQ, 64, MAX_INGQ,
3229 0, 0, 4, 0xf, 0xf, 16, FW_CMD_CAP_PF, FW_CMD_CAP_PF);
02b5fb8e
DM
3230 if (ret < 0)
3231 return ret;
3232
3233 t4_sge_init(adap);
3234
02b5fb8e
DM
3235 /* tweak some settings */
3236 t4_write_reg(adap, TP_SHIFT_CNT, 0x64f8849);
3237 t4_write_reg(adap, ULP_RX_TDDP_PSZ, HPZ0(PAGE_SHIFT - 12));
3238 t4_write_reg(adap, TP_PIO_ADDR, TP_INGRESS_CONFIG);
3239 v = t4_read_reg(adap, TP_PIO_DATA);
3240 t4_write_reg(adap, TP_PIO_DATA, v & ~CSUM_HAS_PSEUDO_HDR);
060e0c75
DM
3241
3242 /* get basic stuff going */
3243 return t4_early_init(adap, adap->fn);
02b5fb8e
DM
3244}
3245
b8ff05a9
DM
3246/*
3247 * Max # of ATIDs. The absolute HW max is 16K but we keep it lower.
3248 */
3249#define MAX_ATIDS 8192U
3250
636f9d37
VP
3251/*
3252 * Phase 0 of initialization: contact FW, obtain config, perform basic init.
3253 *
3254 * If the firmware we're dealing with has Configuration File support, then
3255 * we use that to perform all configuration
3256 */
3257
3258/*
3259 * Tweak configuration based on module parameters, etc. Most of these have
3260 * defaults assigned to them by Firmware Configuration Files (if we're using
3261 * them) but need to be explicitly set if we're using hard-coded
3262 * initialization. But even in the case of using Firmware Configuration
3263 * Files, we'd like to expose the ability to change these via module
3264 * parameters so these are essentially common tweaks/settings for
3265 * Configuration Files and hard-coded initialization ...
3266 */
3267static int adap_init0_tweaks(struct adapter *adapter)
3268{
3269 /*
3270 * Fix up various Host-Dependent Parameters like Page Size, Cache
3271 * Line Size, etc. The firmware default is for a 4KB Page Size and
3272 * 64B Cache Line Size ...
3273 */
3274 t4_fixup_host_params(adapter, PAGE_SIZE, L1_CACHE_BYTES);
3275
3276 /*
3277 * Process module parameters which affect early initialization.
3278 */
3279 if (rx_dma_offset != 2 && rx_dma_offset != 0) {
3280 dev_err(&adapter->pdev->dev,
3281 "Ignoring illegal rx_dma_offset=%d, using 2\n",
3282 rx_dma_offset);
3283 rx_dma_offset = 2;
3284 }
3285 t4_set_reg_field(adapter, SGE_CONTROL,
3286 PKTSHIFT_MASK,
3287 PKTSHIFT(rx_dma_offset));
3288
3289 /*
3290 * Don't include the "IP Pseudo Header" in CPL_RX_PKT checksums: Linux
3291 * adds the pseudo header itself.
3292 */
3293 t4_tp_wr_bits_indirect(adapter, TP_INGRESS_CONFIG,
3294 CSUM_HAS_PSEUDO_HDR, 0);
3295
3296 return 0;
3297}
3298
3299/*
3300 * Attempt to initialize the adapter via a Firmware Configuration File.
3301 */
3302static int adap_init0_config(struct adapter *adapter, int reset)
3303{
3304 struct fw_caps_config_cmd caps_cmd;
3305 const struct firmware *cf;
3306 unsigned long mtype = 0, maddr = 0;
3307 u32 finiver, finicsum, cfcsum;
3308 int ret, using_flash;
3309
3310 /*
3311 * Reset device if necessary.
3312 */
3313 if (reset) {
3314 ret = t4_fw_reset(adapter, adapter->mbox,
3315 PIORSTMODE | PIORST);
3316 if (ret < 0)
3317 goto bye;
3318 }
3319
3320 /*
3321 * If we have a T4 configuration file under /lib/firmware/cxgb4/,
3322 * then use that. Otherwise, use the configuration file stored
3323 * in the adapter flash ...
3324 */
3325 ret = request_firmware(&cf, FW_CFNAME, adapter->pdev_dev);
3326 if (ret < 0) {
3327 using_flash = 1;
3328 mtype = FW_MEMTYPE_CF_FLASH;
3329 maddr = t4_flash_cfg_addr(adapter);
3330 } else {
3331 u32 params[7], val[7];
3332
3333 using_flash = 0;
3334 if (cf->size >= FLASH_CFG_MAX_SIZE)
3335 ret = -ENOMEM;
3336 else {
3337 params[0] = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
3338 FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_CF));
3339 ret = t4_query_params(adapter, adapter->mbox,
3340 adapter->fn, 0, 1, params, val);
3341 if (ret == 0) {
3342 /*
3343 * For t4_memory_write() below addresses and
3344 * sizes have to be in terms of multiples of 4
3345 * bytes. So, if the Configuration File isn't
3346 * a multiple of 4 bytes in length we'll have
3347 * to write that out separately since we can't
3348 * guarantee that the bytes following the
3349 * residual byte in the buffer returned by
3350 * request_firmware() are zeroed out ...
3351 */
3352 size_t resid = cf->size & 0x3;
3353 size_t size = cf->size & ~0x3;
3354 __be32 *data = (__be32 *)cf->data;
3355
3356 mtype = FW_PARAMS_PARAM_Y_GET(val[0]);
3357 maddr = FW_PARAMS_PARAM_Z_GET(val[0]) << 16;
3358
3359 ret = t4_memory_write(adapter, mtype, maddr,
3360 size, data);
3361 if (ret == 0 && resid != 0) {
3362 union {
3363 __be32 word;
3364 char buf[4];
3365 } last;
3366 int i;
3367
3368 last.word = data[size >> 2];
3369 for (i = resid; i < 4; i++)
3370 last.buf[i] = 0;
3371 ret = t4_memory_write(adapter, mtype,
3372 maddr + size,
3373 4, &last.word);
3374 }
3375 }
3376 }
3377
3378 release_firmware(cf);
3379 if (ret)
3380 goto bye;
3381 }
3382
3383 /*
3384 * Issue a Capability Configuration command to the firmware to get it
3385 * to parse the Configuration File. We don't use t4_fw_config_file()
3386 * because we want the ability to modify various features after we've
3387 * processed the configuration file ...
3388 */
3389 memset(&caps_cmd, 0, sizeof(caps_cmd));
3390 caps_cmd.op_to_write =
3391 htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
3392 FW_CMD_REQUEST |
3393 FW_CMD_READ);
3394 caps_cmd.retval_len16 =
3395 htonl(FW_CAPS_CONFIG_CMD_CFVALID |
3396 FW_CAPS_CONFIG_CMD_MEMTYPE_CF(mtype) |
3397 FW_CAPS_CONFIG_CMD_MEMADDR64K_CF(maddr >> 16) |
3398 FW_LEN16(caps_cmd));
3399 ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd),
3400 &caps_cmd);
3401 if (ret < 0)
3402 goto bye;
3403
3404 finiver = ntohl(caps_cmd.finiver);
3405 finicsum = ntohl(caps_cmd.finicsum);
3406 cfcsum = ntohl(caps_cmd.cfcsum);
3407 if (finicsum != cfcsum)
3408 dev_warn(adapter->pdev_dev, "Configuration File checksum "\
3409 "mismatch: [fini] csum=%#x, computed csum=%#x\n",
3410 finicsum, cfcsum);
3411
3412 /*
3413 * If we're a pure NIC driver then disable all offloading facilities.
3414 * This will allow the firmware to optimize aspects of the hardware
3415 * configuration which will result in improved performance.
3416 */
3417 caps_cmd.ofldcaps = 0;
3418 caps_cmd.iscsicaps = 0;
3419 caps_cmd.rdmacaps = 0;
3420 caps_cmd.fcoecaps = 0;
3421
3422 /*
3423 * And now tell the firmware to use the configuration we just loaded.
3424 */
3425 caps_cmd.op_to_write =
3426 htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
3427 FW_CMD_REQUEST |
3428 FW_CMD_WRITE);
3429 caps_cmd.retval_len16 = htonl(FW_LEN16(caps_cmd));
3430 ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd),
3431 NULL);
3432 if (ret < 0)
3433 goto bye;
3434
3435 /*
3436 * Tweak configuration based on system architecture, module
3437 * parameters, etc.
3438 */
3439 ret = adap_init0_tweaks(adapter);
3440 if (ret < 0)
3441 goto bye;
3442
3443 /*
3444 * And finally tell the firmware to initialize itself using the
3445 * parameters from the Configuration File.
3446 */
3447 ret = t4_fw_initialize(adapter, adapter->mbox);
3448 if (ret < 0)
3449 goto bye;
3450
3451 /*
3452 * Return successfully and note that we're operating with parameters
3453 * not supplied by the driver, rather than from hard-wired
3454 * initialization constants burried in the driver.
3455 */
3456 adapter->flags |= USING_SOFT_PARAMS;
3457 dev_info(adapter->pdev_dev, "Successfully configured using Firmware "\
3458 "Configuration File %s, version %#x, computed checksum %#x\n",
3459 (using_flash
3460 ? "in device FLASH"
3461 : "/lib/firmware/" FW_CFNAME),
3462 finiver, cfcsum);
3463 return 0;
3464
3465 /*
3466 * Something bad happened. Return the error ... (If the "error"
3467 * is that there's no Configuration File on the adapter we don't
3468 * want to issue a warning since this is fairly common.)
3469 */
3470bye:
3471 if (ret != -ENOENT)
3472 dev_warn(adapter->pdev_dev, "Configuration file error %d\n",
3473 -ret);
3474 return ret;
3475}
3476
13ee15d3
VP
3477/*
3478 * Attempt to initialize the adapter via hard-coded, driver supplied
3479 * parameters ...
3480 */
3481static int adap_init0_no_config(struct adapter *adapter, int reset)
3482{
3483 struct sge *s = &adapter->sge;
3484 struct fw_caps_config_cmd caps_cmd;
3485 u32 v;
3486 int i, ret;
3487
3488 /*
3489 * Reset device if necessary
3490 */
3491 if (reset) {
3492 ret = t4_fw_reset(adapter, adapter->mbox,
3493 PIORSTMODE | PIORST);
3494 if (ret < 0)
3495 goto bye;
3496 }
3497
3498 /*
3499 * Get device capabilities and select which we'll be using.
3500 */
3501 memset(&caps_cmd, 0, sizeof(caps_cmd));
3502 caps_cmd.op_to_write = htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
3503 FW_CMD_REQUEST | FW_CMD_READ);
3504 caps_cmd.retval_len16 = htonl(FW_LEN16(caps_cmd));
3505 ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd),
3506 &caps_cmd);
3507 if (ret < 0)
3508 goto bye;
3509
3510#ifndef CONFIG_CHELSIO_T4_OFFLOAD
3511 /*
3512 * If we're a pure NIC driver then disable all offloading facilities.
3513 * This will allow the firmware to optimize aspects of the hardware
3514 * configuration which will result in improved performance.
3515 */
3516 caps_cmd.ofldcaps = 0;
3517 caps_cmd.iscsicaps = 0;
3518 caps_cmd.rdmacaps = 0;
3519 caps_cmd.fcoecaps = 0;
3520#endif
3521
3522 if (caps_cmd.niccaps & htons(FW_CAPS_CONFIG_NIC_VM)) {
3523 if (!vf_acls)
3524 caps_cmd.niccaps ^= htons(FW_CAPS_CONFIG_NIC_VM);
3525 else
3526 caps_cmd.niccaps = htons(FW_CAPS_CONFIG_NIC_VM);
3527 } else if (vf_acls) {
3528 dev_err(adapter->pdev_dev, "virtualization ACLs not supported");
3529 goto bye;
3530 }
3531 caps_cmd.op_to_write = htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
3532 FW_CMD_REQUEST | FW_CMD_WRITE);
3533 ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd),
3534 NULL);
3535 if (ret < 0)
3536 goto bye;
3537
3538 /*
3539 * Tweak configuration based on system architecture, module
3540 * parameters, etc.
3541 */
3542 ret = adap_init0_tweaks(adapter);
3543 if (ret < 0)
3544 goto bye;
3545
3546 /*
3547 * Select RSS Global Mode we want to use. We use "Basic Virtual"
3548 * mode which maps each Virtual Interface to its own section of
3549 * the RSS Table and we turn on all map and hash enables ...
3550 */
3551 adapter->flags |= RSS_TNLALLLOOKUP;
3552 ret = t4_config_glbl_rss(adapter, adapter->mbox,
3553 FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL,
3554 FW_RSS_GLB_CONFIG_CMD_TNLMAPEN |
3555 FW_RSS_GLB_CONFIG_CMD_HASHTOEPLITZ |
3556 ((adapter->flags & RSS_TNLALLLOOKUP) ?
3557 FW_RSS_GLB_CONFIG_CMD_TNLALLLKP : 0));
3558 if (ret < 0)
3559 goto bye;
3560
3561 /*
3562 * Set up our own fundamental resource provisioning ...
3563 */
3564 ret = t4_cfg_pfvf(adapter, adapter->mbox, adapter->fn, 0,
3565 PFRES_NEQ, PFRES_NETHCTRL,
3566 PFRES_NIQFLINT, PFRES_NIQ,
3567 PFRES_TC, PFRES_NVI,
3568 FW_PFVF_CMD_CMASK_MASK,
3569 pfvfres_pmask(adapter, adapter->fn, 0),
3570 PFRES_NEXACTF,
3571 PFRES_R_CAPS, PFRES_WX_CAPS);
3572 if (ret < 0)
3573 goto bye;
3574
3575 /*
3576 * Perform low level SGE initialization. We need to do this before we
3577 * send the firmware the INITIALIZE command because that will cause
3578 * any other PF Drivers which are waiting for the Master
3579 * Initialization to proceed forward.
3580 */
3581 for (i = 0; i < SGE_NTIMERS - 1; i++)
3582 s->timer_val[i] = min(intr_holdoff[i], MAX_SGE_TIMERVAL);
3583 s->timer_val[SGE_NTIMERS - 1] = MAX_SGE_TIMERVAL;
3584 s->counter_val[0] = 1;
3585 for (i = 1; i < SGE_NCOUNTERS; i++)
3586 s->counter_val[i] = min(intr_cnt[i - 1],
3587 THRESHOLD_0_GET(THRESHOLD_0_MASK));
3588 t4_sge_init(adapter);
3589
3590#ifdef CONFIG_PCI_IOV
3591 /*
3592 * Provision resource limits for Virtual Functions. We currently
3593 * grant them all the same static resource limits except for the Port
3594 * Access Rights Mask which we're assigning based on the PF. All of
3595 * the static provisioning stuff for both the PF and VF really needs
3596 * to be managed in a persistent manner for each device which the
3597 * firmware controls.
3598 */
3599 {
3600 int pf, vf;
3601
3602 for (pf = 0; pf < ARRAY_SIZE(num_vf); pf++) {
3603 if (num_vf[pf] <= 0)
3604 continue;
3605
3606 /* VF numbering starts at 1! */
3607 for (vf = 1; vf <= num_vf[pf]; vf++) {
3608 ret = t4_cfg_pfvf(adapter, adapter->mbox,
3609 pf, vf,
3610 VFRES_NEQ, VFRES_NETHCTRL,
3611 VFRES_NIQFLINT, VFRES_NIQ,
3612 VFRES_TC, VFRES_NVI,
3613 FW_PFVF_CMD_CMASK_GET(
3614 FW_PFVF_CMD_CMASK_MASK),
3615 pfvfres_pmask(
3616 adapter, pf, vf),
3617 VFRES_NEXACTF,
3618 VFRES_R_CAPS, VFRES_WX_CAPS);
3619 if (ret < 0)
3620 dev_warn(adapter->pdev_dev,
3621 "failed to "\
3622 "provision pf/vf=%d/%d; "
3623 "err=%d\n", pf, vf, ret);
3624 }
3625 }
3626 }
3627#endif
3628
3629 /*
3630 * Set up the default filter mode. Later we'll want to implement this
3631 * via a firmware command, etc. ... This needs to be done before the
3632 * firmare initialization command ... If the selected set of fields
3633 * isn't equal to the default value, we'll need to make sure that the
3634 * field selections will fit in the 36-bit budget.
3635 */
3636 if (tp_vlan_pri_map != TP_VLAN_PRI_MAP_DEFAULT) {
3637 int i, bits = 0;
3638
3639 for (i = TP_VLAN_PRI_MAP_FIRST; i <= TP_VLAN_PRI_MAP_LAST; i++)
3640 switch (tp_vlan_pri_map & (1 << i)) {
3641 case 0:
3642 /* compressed filter field not enabled */
3643 break;
3644 case FCOE_MASK:
3645 bits += 1;
3646 break;
3647 case PORT_MASK:
3648 bits += 3;
3649 break;
3650 case VNIC_ID_MASK:
3651 bits += 17;
3652 break;
3653 case VLAN_MASK:
3654 bits += 17;
3655 break;
3656 case TOS_MASK:
3657 bits += 8;
3658 break;
3659 case PROTOCOL_MASK:
3660 bits += 8;
3661 break;
3662 case ETHERTYPE_MASK:
3663 bits += 16;
3664 break;
3665 case MACMATCH_MASK:
3666 bits += 9;
3667 break;
3668 case MPSHITTYPE_MASK:
3669 bits += 3;
3670 break;
3671 case FRAGMENTATION_MASK:
3672 bits += 1;
3673 break;
3674 }
3675
3676 if (bits > 36) {
3677 dev_err(adapter->pdev_dev,
3678 "tp_vlan_pri_map=%#x needs %d bits > 36;"\
3679 " using %#x\n", tp_vlan_pri_map, bits,
3680 TP_VLAN_PRI_MAP_DEFAULT);
3681 tp_vlan_pri_map = TP_VLAN_PRI_MAP_DEFAULT;
3682 }
3683 }
3684 v = tp_vlan_pri_map;
3685 t4_write_indirect(adapter, TP_PIO_ADDR, TP_PIO_DATA,
3686 &v, 1, TP_VLAN_PRI_MAP);
3687
3688 /*
3689 * We need Five Tuple Lookup mode to be set in TP_GLOBAL_CONFIG order
3690 * to support any of the compressed filter fields above. Newer
3691 * versions of the firmware do this automatically but it doesn't hurt
3692 * to set it here. Meanwhile, we do _not_ need to set Lookup Every
3693 * Packet in TP_INGRESS_CONFIG to support matching non-TCP packets
3694 * since the firmware automatically turns this on and off when we have
3695 * a non-zero number of filters active (since it does have a
3696 * performance impact).
3697 */
3698 if (tp_vlan_pri_map)
3699 t4_set_reg_field(adapter, TP_GLOBAL_CONFIG,
3700 FIVETUPLELOOKUP_MASK,
3701 FIVETUPLELOOKUP_MASK);
3702
3703 /*
3704 * Tweak some settings.
3705 */
3706 t4_write_reg(adapter, TP_SHIFT_CNT, SYNSHIFTMAX(6) |
3707 RXTSHIFTMAXR1(4) | RXTSHIFTMAXR2(15) |
3708 PERSHIFTBACKOFFMAX(8) | PERSHIFTMAX(8) |
3709 KEEPALIVEMAXR1(4) | KEEPALIVEMAXR2(9));
3710
3711 /*
3712 * Get basic stuff going by issuing the Firmware Initialize command.
3713 * Note that this _must_ be after all PFVF commands ...
3714 */
3715 ret = t4_fw_initialize(adapter, adapter->mbox);
3716 if (ret < 0)
3717 goto bye;
3718
3719 /*
3720 * Return successfully!
3721 */
3722 dev_info(adapter->pdev_dev, "Successfully configured using built-in "\
3723 "driver parameters\n");
3724 return 0;
3725
3726 /*
3727 * Something bad happened. Return the error ...
3728 */
3729bye:
3730 return ret;
3731}
3732
b8ff05a9
DM
3733/*
3734 * Phase 0 of initialization: contact FW, obtain config, perform basic init.
3735 */
3736static int adap_init0(struct adapter *adap)
3737{
3738 int ret;
3739 u32 v, port_vec;
3740 enum dev_state state;
3741 u32 params[7], val[7];
636f9d37 3742 int reset = 1, j;
b8ff05a9 3743
636f9d37
VP
3744 /*
3745 * Contact FW, advertising Master capability (and potentially forcing
3746 * ourselves as the Master PF if our module parameter force_init is
3747 * set).
3748 */
3749 ret = t4_fw_hello(adap, adap->mbox, adap->fn,
3750 force_init ? MASTER_MUST : MASTER_MAY,
3751 &state);
b8ff05a9
DM
3752 if (ret < 0) {
3753 dev_err(adap->pdev_dev, "could not connect to FW, error %d\n",
3754 ret);
3755 return ret;
3756 }
636f9d37
VP
3757 if (ret == adap->mbox)
3758 adap->flags |= MASTER_PF;
3759 if (force_init && state == DEV_STATE_INIT)
3760 state = DEV_STATE_UNINIT;
b8ff05a9 3761
636f9d37
VP
3762 /*
3763 * If we're the Master PF Driver and the device is uninitialized,
3764 * then let's consider upgrading the firmware ... (We always want
3765 * to check the firmware version number in order to A. get it for
3766 * later reporting and B. to warn if the currently loaded firmware
3767 * is excessively mismatched relative to the driver.)
3768 */
3769 ret = t4_check_fw_version(adap);
3770 if ((adap->flags & MASTER_PF) && state != DEV_STATE_INIT) {
3771 if (ret == -EINVAL || ret > 0) {
3772 if (upgrade_fw(adap) >= 0) {
3773 /*
3774 * Note that the chip was reset as part of the
3775 * firmware upgrade so we don't reset it again
3776 * below and grab the new firmware version.
3777 */
3778 reset = 0;
3779 ret = t4_check_fw_version(adap);
3780 }
3781 }
3782 if (ret < 0)
3783 return ret;
3784 }
b8ff05a9 3785
636f9d37
VP
3786 /*
3787 * Grab VPD parameters. This should be done after we establish a
3788 * connection to the firmware since some of the VPD parameters
3789 * (notably the Core Clock frequency) are retrieved via requests to
3790 * the firmware. On the other hand, we need these fairly early on
3791 * so we do this right after getting ahold of the firmware.
3792 */
3793 ret = get_vpd_params(adap, &adap->params.vpd);
a0881cab
DM
3794 if (ret < 0)
3795 goto bye;
a0881cab 3796
636f9d37 3797 /*
13ee15d3
VP
3798 * Find out what ports are available to us. Note that we need to do
3799 * this before calling adap_init0_no_config() since it needs nports
3800 * and portvec ...
636f9d37
VP
3801 */
3802 v =
3803 FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
3804 FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_PORTVEC);
3805 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 1, &v, &port_vec);
a0881cab
DM
3806 if (ret < 0)
3807 goto bye;
3808
636f9d37
VP
3809 adap->params.nports = hweight32(port_vec);
3810 adap->params.portvec = port_vec;
3811
3812 /*
3813 * If the firmware is initialized already (and we're not forcing a
3814 * master initialization), note that we're living with existing
3815 * adapter parameters. Otherwise, it's time to try initializing the
3816 * adapter ...
3817 */
3818 if (state == DEV_STATE_INIT) {
3819 dev_info(adap->pdev_dev, "Coming up as %s: "\
3820 "Adapter already initialized\n",
3821 adap->flags & MASTER_PF ? "MASTER" : "SLAVE");
3822 adap->flags |= USING_SOFT_PARAMS;
3823 } else {
3824 dev_info(adap->pdev_dev, "Coming up as MASTER: "\
3825 "Initializing adapter\n");
636f9d37
VP
3826
3827 /*
3828 * If the firmware doesn't support Configuration
3829 * Files warn user and exit,
3830 */
3831 if (ret < 0)
13ee15d3 3832 dev_warn(adap->pdev_dev, "Firmware doesn't support "
636f9d37 3833 "configuration file.\n");
13ee15d3
VP
3834 if (force_old_init)
3835 ret = adap_init0_no_config(adap, reset);
636f9d37
VP
3836 else {
3837 /*
13ee15d3
VP
3838 * Find out whether we're dealing with a version of
3839 * the firmware which has configuration file support.
636f9d37 3840 */
13ee15d3
VP
3841 params[0] = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
3842 FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_CF));
3843 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 1,
3844 params, val);
636f9d37 3845
13ee15d3
VP
3846 /*
3847 * If the firmware doesn't support Configuration
3848 * Files, use the old Driver-based, hard-wired
3849 * initialization. Otherwise, try using the
3850 * Configuration File support and fall back to the
3851 * Driver-based initialization if there's no
3852 * Configuration File found.
3853 */
3854 if (ret < 0)
3855 ret = adap_init0_no_config(adap, reset);
3856 else {
3857 /*
3858 * The firmware provides us with a memory
3859 * buffer where we can load a Configuration
3860 * File from the host if we want to override
3861 * the Configuration File in flash.
3862 */
3863
3864 ret = adap_init0_config(adap, reset);
3865 if (ret == -ENOENT) {
3866 dev_info(adap->pdev_dev,
3867 "No Configuration File present "
3868 "on adapter. Using hard-wired "
3869 "configuration parameters.\n");
3870 ret = adap_init0_no_config(adap, reset);
3871 }
636f9d37
VP
3872 }
3873 }
3874 if (ret < 0) {
3875 dev_err(adap->pdev_dev,
3876 "could not initialize adapter, error %d\n",
3877 -ret);
3878 goto bye;
3879 }
3880 }
3881
3882 /*
3883 * If we're living with non-hard-coded parameters (either from a
3884 * Firmware Configuration File or values programmed by a different PF
3885 * Driver), give the SGE code a chance to pull in anything that it
3886 * needs ... Note that this must be called after we retrieve our VPD
3887 * parameters in order to know how to convert core ticks to seconds.
3888 */
3889 if (adap->flags & USING_SOFT_PARAMS) {
3890 ret = t4_sge_init(adap);
3891 if (ret < 0)
3892 goto bye;
3893 }
3894
3895 /*
3896 * Grab some of our basic fundamental operating parameters.
3897 */
3898#define FW_PARAM_DEV(param) \
3899 (FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | \
3900 FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_##param))
3901
b8ff05a9 3902#define FW_PARAM_PFVF(param) \
636f9d37
VP
3903 FW_PARAMS_MNEM(FW_PARAMS_MNEM_PFVF) | \
3904 FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_PFVF_##param)| \
3905 FW_PARAMS_PARAM_Y(0) | \
3906 FW_PARAMS_PARAM_Z(0)
b8ff05a9 3907
636f9d37 3908 params[0] = FW_PARAM_PFVF(EQ_START);
b8ff05a9
DM
3909 params[1] = FW_PARAM_PFVF(L2T_START);
3910 params[2] = FW_PARAM_PFVF(L2T_END);
3911 params[3] = FW_PARAM_PFVF(FILTER_START);
3912 params[4] = FW_PARAM_PFVF(FILTER_END);
e46dab4d 3913 params[5] = FW_PARAM_PFVF(IQFLINT_START);
636f9d37 3914 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 6, params, val);
b8ff05a9
DM
3915 if (ret < 0)
3916 goto bye;
636f9d37
VP
3917 adap->sge.egr_start = val[0];
3918 adap->l2t_start = val[1];
3919 adap->l2t_end = val[2];
b8ff05a9
DM
3920 adap->tids.ftid_base = val[3];
3921 adap->tids.nftids = val[4] - val[3] + 1;
e46dab4d 3922 adap->sge.ingr_start = val[5];
b8ff05a9 3923
636f9d37
VP
3924 /* query params related to active filter region */
3925 params[0] = FW_PARAM_PFVF(ACTIVE_FILTER_START);
3926 params[1] = FW_PARAM_PFVF(ACTIVE_FILTER_END);
3927 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 2, params, val);
3928 /* If Active filter size is set we enable establishing
3929 * offload connection through firmware work request
3930 */
3931 if ((val[0] != val[1]) && (ret >= 0)) {
3932 adap->flags |= FW_OFLD_CONN;
3933 adap->tids.aftid_base = val[0];
3934 adap->tids.aftid_end = val[1];
3935 }
3936
3937#ifdef CONFIG_CHELSIO_T4_OFFLOAD
3938 /*
3939 * Get device capabilities so we can determine what resources we need
3940 * to manage.
3941 */
3942 memset(&caps_cmd, 0, sizeof(caps_cmd));
3943 caps_cmd.op_to_write = htonl(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
13ee15d3
VP
3944 FW_CMD_REQUEST | FW_CMD_READ);
3945 caps_cmd.retval_len16 = htonl(FW_LEN16(caps_cmd));
636f9d37
VP
3946 ret = t4_wr_mbox(adap, adap->mbox, &caps_cmd, sizeof(caps_cmd),
3947 &caps_cmd);
3948 if (ret < 0)
3949 goto bye;
3950
13ee15d3 3951 if (caps_cmd.ofldcaps) {
b8ff05a9
DM
3952 /* query offload-related parameters */
3953 params[0] = FW_PARAM_DEV(NTID);
3954 params[1] = FW_PARAM_PFVF(SERVER_START);
3955 params[2] = FW_PARAM_PFVF(SERVER_END);
3956 params[3] = FW_PARAM_PFVF(TDDP_START);
3957 params[4] = FW_PARAM_PFVF(TDDP_END);
3958 params[5] = FW_PARAM_DEV(FLOWC_BUFFIFO_SZ);
636f9d37
VP
3959 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 6,
3960 params, val);
b8ff05a9
DM
3961 if (ret < 0)
3962 goto bye;
3963 adap->tids.ntids = val[0];
3964 adap->tids.natids = min(adap->tids.ntids / 2, MAX_ATIDS);
3965 adap->tids.stid_base = val[1];
3966 adap->tids.nstids = val[2] - val[1] + 1;
636f9d37
VP
3967 /*
3968 * Setup server filter region. Divide the availble filter
3969 * region into two parts. Regular filters get 1/3rd and server
3970 * filters get 2/3rd part. This is only enabled if workarond
3971 * path is enabled.
3972 * 1. For regular filters.
3973 * 2. Server filter: This are special filters which are used
3974 * to redirect SYN packets to offload queue.
3975 */
3976 if (adap->flags & FW_OFLD_CONN && !is_bypass(adap)) {
3977 adap->tids.sftid_base = adap->tids.ftid_base +
3978 DIV_ROUND_UP(adap->tids.nftids, 3);
3979 adap->tids.nsftids = adap->tids.nftids -
3980 DIV_ROUND_UP(adap->tids.nftids, 3);
3981 adap->tids.nftids = adap->tids.sftid_base -
3982 adap->tids.ftid_base;
3983 }
b8ff05a9
DM
3984 adap->vres.ddp.start = val[3];
3985 adap->vres.ddp.size = val[4] - val[3] + 1;
3986 adap->params.ofldq_wr_cred = val[5];
636f9d37
VP
3987
3988 params[0] = FW_PARAM_PFVF(ETHOFLD_START);
3989 params[1] = FW_PARAM_PFVF(ETHOFLD_END);
3990 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 2,
3991 params, val);
3992 if ((val[0] != val[1]) && (ret >= 0)) {
3993 adap->tids.uotid_base = val[0];
3994 adap->tids.nuotids = val[1] - val[0] + 1;
3995 }
3996
b8ff05a9
DM
3997 adap->params.offload = 1;
3998 }
636f9d37 3999 if (caps_cmd.rdmacaps) {
b8ff05a9
DM
4000 params[0] = FW_PARAM_PFVF(STAG_START);
4001 params[1] = FW_PARAM_PFVF(STAG_END);
4002 params[2] = FW_PARAM_PFVF(RQ_START);
4003 params[3] = FW_PARAM_PFVF(RQ_END);
4004 params[4] = FW_PARAM_PFVF(PBL_START);
4005 params[5] = FW_PARAM_PFVF(PBL_END);
636f9d37
VP
4006 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 6,
4007 params, val);
b8ff05a9
DM
4008 if (ret < 0)
4009 goto bye;
4010 adap->vres.stag.start = val[0];
4011 adap->vres.stag.size = val[1] - val[0] + 1;
4012 adap->vres.rq.start = val[2];
4013 adap->vres.rq.size = val[3] - val[2] + 1;
4014 adap->vres.pbl.start = val[4];
4015 adap->vres.pbl.size = val[5] - val[4] + 1;
a0881cab
DM
4016
4017 params[0] = FW_PARAM_PFVF(SQRQ_START);
4018 params[1] = FW_PARAM_PFVF(SQRQ_END);
4019 params[2] = FW_PARAM_PFVF(CQ_START);
4020 params[3] = FW_PARAM_PFVF(CQ_END);
1ae970e0
DM
4021 params[4] = FW_PARAM_PFVF(OCQ_START);
4022 params[5] = FW_PARAM_PFVF(OCQ_END);
636f9d37 4023 ret = t4_query_params(adap, 0, 0, 0, 6, params, val);
a0881cab
DM
4024 if (ret < 0)
4025 goto bye;
4026 adap->vres.qp.start = val[0];
4027 adap->vres.qp.size = val[1] - val[0] + 1;
4028 adap->vres.cq.start = val[2];
4029 adap->vres.cq.size = val[3] - val[2] + 1;
1ae970e0
DM
4030 adap->vres.ocq.start = val[4];
4031 adap->vres.ocq.size = val[5] - val[4] + 1;
b8ff05a9 4032 }
636f9d37 4033 if (caps_cmd.iscsicaps) {
b8ff05a9
DM
4034 params[0] = FW_PARAM_PFVF(ISCSI_START);
4035 params[1] = FW_PARAM_PFVF(ISCSI_END);
636f9d37
VP
4036 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 2,
4037 params, val);
b8ff05a9
DM
4038 if (ret < 0)
4039 goto bye;
4040 adap->vres.iscsi.start = val[0];
4041 adap->vres.iscsi.size = val[1] - val[0] + 1;
4042 }
4043#undef FW_PARAM_PFVF
4044#undef FW_PARAM_DEV
636f9d37 4045#endif /* CONFIG_CHELSIO_T4_OFFLOAD */
b8ff05a9 4046
636f9d37
VP
4047 /*
4048 * These are finalized by FW initialization, load their values now.
4049 */
b8ff05a9
DM
4050 v = t4_read_reg(adap, TP_TIMER_RESOLUTION);
4051 adap->params.tp.tre = TIMERRESOLUTION_GET(v);
636f9d37 4052 adap->params.tp.dack_re = DELAYEDACKRESOLUTION_GET(v);
b8ff05a9
DM
4053 t4_read_mtu_tbl(adap, adap->params.mtus, NULL);
4054 t4_load_mtus(adap, adap->params.mtus, adap->params.a_wnd,
4055 adap->params.b_wnd);
7ee9ff94 4056
636f9d37
VP
4057 /* MODQ_REQ_MAP defaults to setting queues 0-3 to chan 0-3 */
4058 for (j = 0; j < NCHAN; j++)
4059 adap->params.tp.tx_modq[j] = j;
7ee9ff94 4060
636f9d37 4061 adap->flags |= FW_OK;
b8ff05a9
DM
4062 return 0;
4063
4064 /*
636f9d37
VP
4065 * Something bad happened. If a command timed out or failed with EIO
4066 * FW does not operate within its spec or something catastrophic
4067 * happened to HW/FW, stop issuing commands.
b8ff05a9 4068 */
636f9d37
VP
4069bye:
4070 if (ret != -ETIMEDOUT && ret != -EIO)
4071 t4_fw_bye(adap, adap->mbox);
b8ff05a9
DM
4072 return ret;
4073}
4074
204dc3c0
DM
4075/* EEH callbacks */
4076
4077static pci_ers_result_t eeh_err_detected(struct pci_dev *pdev,
4078 pci_channel_state_t state)
4079{
4080 int i;
4081 struct adapter *adap = pci_get_drvdata(pdev);
4082
4083 if (!adap)
4084 goto out;
4085
4086 rtnl_lock();
4087 adap->flags &= ~FW_OK;
4088 notify_ulds(adap, CXGB4_STATE_START_RECOVERY);
4089 for_each_port(adap, i) {
4090 struct net_device *dev = adap->port[i];
4091
4092 netif_device_detach(dev);
4093 netif_carrier_off(dev);
4094 }
4095 if (adap->flags & FULL_INIT_DONE)
4096 cxgb_down(adap);
4097 rtnl_unlock();
4098 pci_disable_device(pdev);
4099out: return state == pci_channel_io_perm_failure ?
4100 PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_NEED_RESET;
4101}
4102
4103static pci_ers_result_t eeh_slot_reset(struct pci_dev *pdev)
4104{
4105 int i, ret;
4106 struct fw_caps_config_cmd c;
4107 struct adapter *adap = pci_get_drvdata(pdev);
4108
4109 if (!adap) {
4110 pci_restore_state(pdev);
4111 pci_save_state(pdev);
4112 return PCI_ERS_RESULT_RECOVERED;
4113 }
4114
4115 if (pci_enable_device(pdev)) {
4116 dev_err(&pdev->dev, "cannot reenable PCI device after reset\n");
4117 return PCI_ERS_RESULT_DISCONNECT;
4118 }
4119
4120 pci_set_master(pdev);
4121 pci_restore_state(pdev);
4122 pci_save_state(pdev);
4123 pci_cleanup_aer_uncorrect_error_status(pdev);
4124
4125 if (t4_wait_dev_ready(adap) < 0)
4126 return PCI_ERS_RESULT_DISCONNECT;
060e0c75 4127 if (t4_fw_hello(adap, adap->fn, adap->fn, MASTER_MUST, NULL))
204dc3c0
DM
4128 return PCI_ERS_RESULT_DISCONNECT;
4129 adap->flags |= FW_OK;
4130 if (adap_init1(adap, &c))
4131 return PCI_ERS_RESULT_DISCONNECT;
4132
4133 for_each_port(adap, i) {
4134 struct port_info *p = adap2pinfo(adap, i);
4135
060e0c75
DM
4136 ret = t4_alloc_vi(adap, adap->fn, p->tx_chan, adap->fn, 0, 1,
4137 NULL, NULL);
204dc3c0
DM
4138 if (ret < 0)
4139 return PCI_ERS_RESULT_DISCONNECT;
4140 p->viid = ret;
4141 p->xact_addr_filt = -1;
4142 }
4143
4144 t4_load_mtus(adap, adap->params.mtus, adap->params.a_wnd,
4145 adap->params.b_wnd);
1ae970e0 4146 setup_memwin(adap);
204dc3c0
DM
4147 if (cxgb_up(adap))
4148 return PCI_ERS_RESULT_DISCONNECT;
4149 return PCI_ERS_RESULT_RECOVERED;
4150}
4151
4152static void eeh_resume(struct pci_dev *pdev)
4153{
4154 int i;
4155 struct adapter *adap = pci_get_drvdata(pdev);
4156
4157 if (!adap)
4158 return;
4159
4160 rtnl_lock();
4161 for_each_port(adap, i) {
4162 struct net_device *dev = adap->port[i];
4163
4164 if (netif_running(dev)) {
4165 link_start(dev);
4166 cxgb_set_rxmode(dev);
4167 }
4168 netif_device_attach(dev);
4169 }
4170 rtnl_unlock();
4171}
4172
3646f0e5 4173static const struct pci_error_handlers cxgb4_eeh = {
204dc3c0
DM
4174 .error_detected = eeh_err_detected,
4175 .slot_reset = eeh_slot_reset,
4176 .resume = eeh_resume,
4177};
4178
b8ff05a9
DM
4179static inline bool is_10g_port(const struct link_config *lc)
4180{
4181 return (lc->supported & FW_PORT_CAP_SPEED_10G) != 0;
4182}
4183
4184static inline void init_rspq(struct sge_rspq *q, u8 timer_idx, u8 pkt_cnt_idx,
4185 unsigned int size, unsigned int iqe_size)
4186{
4187 q->intr_params = QINTR_TIMER_IDX(timer_idx) |
4188 (pkt_cnt_idx < SGE_NCOUNTERS ? QINTR_CNT_EN : 0);
4189 q->pktcnt_idx = pkt_cnt_idx < SGE_NCOUNTERS ? pkt_cnt_idx : 0;
4190 q->iqe_len = iqe_size;
4191 q->size = size;
4192}
4193
4194/*
4195 * Perform default configuration of DMA queues depending on the number and type
4196 * of ports we found and the number of available CPUs. Most settings can be
4197 * modified by the admin prior to actual use.
4198 */
4199static void __devinit cfg_queues(struct adapter *adap)
4200{
4201 struct sge *s = &adap->sge;
4202 int i, q10g = 0, n10g = 0, qidx = 0;
4203
4204 for_each_port(adap, i)
4205 n10g += is_10g_port(&adap2pinfo(adap, i)->link_cfg);
4206
4207 /*
4208 * We default to 1 queue per non-10G port and up to # of cores queues
4209 * per 10G port.
4210 */
4211 if (n10g)
4212 q10g = (MAX_ETH_QSETS - (adap->params.nports - n10g)) / n10g;
5952dde7
YM
4213 if (q10g > netif_get_num_default_rss_queues())
4214 q10g = netif_get_num_default_rss_queues();
b8ff05a9
DM
4215
4216 for_each_port(adap, i) {
4217 struct port_info *pi = adap2pinfo(adap, i);
4218
4219 pi->first_qset = qidx;
4220 pi->nqsets = is_10g_port(&pi->link_cfg) ? q10g : 1;
4221 qidx += pi->nqsets;
4222 }
4223
4224 s->ethqsets = qidx;
4225 s->max_ethqsets = qidx; /* MSI-X may lower it later */
4226
4227 if (is_offload(adap)) {
4228 /*
4229 * For offload we use 1 queue/channel if all ports are up to 1G,
4230 * otherwise we divide all available queues amongst the channels
4231 * capped by the number of available cores.
4232 */
4233 if (n10g) {
4234 i = min_t(int, ARRAY_SIZE(s->ofldrxq),
4235 num_online_cpus());
4236 s->ofldqsets = roundup(i, adap->params.nports);
4237 } else
4238 s->ofldqsets = adap->params.nports;
4239 /* For RDMA one Rx queue per channel suffices */
4240 s->rdmaqs = adap->params.nports;
4241 }
4242
4243 for (i = 0; i < ARRAY_SIZE(s->ethrxq); i++) {
4244 struct sge_eth_rxq *r = &s->ethrxq[i];
4245
4246 init_rspq(&r->rspq, 0, 0, 1024, 64);
4247 r->fl.size = 72;
4248 }
4249
4250 for (i = 0; i < ARRAY_SIZE(s->ethtxq); i++)
4251 s->ethtxq[i].q.size = 1024;
4252
4253 for (i = 0; i < ARRAY_SIZE(s->ctrlq); i++)
4254 s->ctrlq[i].q.size = 512;
4255
4256 for (i = 0; i < ARRAY_SIZE(s->ofldtxq); i++)
4257 s->ofldtxq[i].q.size = 1024;
4258
4259 for (i = 0; i < ARRAY_SIZE(s->ofldrxq); i++) {
4260 struct sge_ofld_rxq *r = &s->ofldrxq[i];
4261
4262 init_rspq(&r->rspq, 0, 0, 1024, 64);
4263 r->rspq.uld = CXGB4_ULD_ISCSI;
4264 r->fl.size = 72;
4265 }
4266
4267 for (i = 0; i < ARRAY_SIZE(s->rdmarxq); i++) {
4268 struct sge_ofld_rxq *r = &s->rdmarxq[i];
4269
4270 init_rspq(&r->rspq, 0, 0, 511, 64);
4271 r->rspq.uld = CXGB4_ULD_RDMA;
4272 r->fl.size = 72;
4273 }
4274
4275 init_rspq(&s->fw_evtq, 6, 0, 512, 64);
4276 init_rspq(&s->intrq, 6, 0, 2 * MAX_INGQ, 64);
4277}
4278
4279/*
4280 * Reduce the number of Ethernet queues across all ports to at most n.
4281 * n provides at least one queue per port.
4282 */
4283static void __devinit reduce_ethqs(struct adapter *adap, int n)
4284{
4285 int i;
4286 struct port_info *pi;
4287
4288 while (n < adap->sge.ethqsets)
4289 for_each_port(adap, i) {
4290 pi = adap2pinfo(adap, i);
4291 if (pi->nqsets > 1) {
4292 pi->nqsets--;
4293 adap->sge.ethqsets--;
4294 if (adap->sge.ethqsets <= n)
4295 break;
4296 }
4297 }
4298
4299 n = 0;
4300 for_each_port(adap, i) {
4301 pi = adap2pinfo(adap, i);
4302 pi->first_qset = n;
4303 n += pi->nqsets;
4304 }
4305}
4306
4307/* 2 MSI-X vectors needed for the FW queue and non-data interrupts */
4308#define EXTRA_VECS 2
4309
4310static int __devinit enable_msix(struct adapter *adap)
4311{
4312 int ofld_need = 0;
4313 int i, err, want, need;
4314 struct sge *s = &adap->sge;
4315 unsigned int nchan = adap->params.nports;
4316 struct msix_entry entries[MAX_INGQ + 1];
4317
4318 for (i = 0; i < ARRAY_SIZE(entries); ++i)
4319 entries[i].entry = i;
4320
4321 want = s->max_ethqsets + EXTRA_VECS;
4322 if (is_offload(adap)) {
4323 want += s->rdmaqs + s->ofldqsets;
4324 /* need nchan for each possible ULD */
4325 ofld_need = 2 * nchan;
4326 }
4327 need = adap->params.nports + EXTRA_VECS + ofld_need;
4328
4329 while ((err = pci_enable_msix(adap->pdev, entries, want)) >= need)
4330 want = err;
4331
4332 if (!err) {
4333 /*
4334 * Distribute available vectors to the various queue groups.
4335 * Every group gets its minimum requirement and NIC gets top
4336 * priority for leftovers.
4337 */
4338 i = want - EXTRA_VECS - ofld_need;
4339 if (i < s->max_ethqsets) {
4340 s->max_ethqsets = i;
4341 if (i < s->ethqsets)
4342 reduce_ethqs(adap, i);
4343 }
4344 if (is_offload(adap)) {
4345 i = want - EXTRA_VECS - s->max_ethqsets;
4346 i -= ofld_need - nchan;
4347 s->ofldqsets = (i / nchan) * nchan; /* round down */
4348 }
4349 for (i = 0; i < want; ++i)
4350 adap->msix_info[i].vec = entries[i].vector;
4351 } else if (err > 0)
4352 dev_info(adap->pdev_dev,
4353 "only %d MSI-X vectors left, not using MSI-X\n", err);
4354 return err;
4355}
4356
4357#undef EXTRA_VECS
4358
671b0060
DM
4359static int __devinit init_rss(struct adapter *adap)
4360{
4361 unsigned int i, j;
4362
4363 for_each_port(adap, i) {
4364 struct port_info *pi = adap2pinfo(adap, i);
4365
4366 pi->rss = kcalloc(pi->rss_size, sizeof(u16), GFP_KERNEL);
4367 if (!pi->rss)
4368 return -ENOMEM;
4369 for (j = 0; j < pi->rss_size; j++)
278bc429 4370 pi->rss[j] = ethtool_rxfh_indir_default(j, pi->nqsets);
671b0060
DM
4371 }
4372 return 0;
4373}
4374
118969ed 4375static void __devinit print_port_info(const struct net_device *dev)
b8ff05a9
DM
4376{
4377 static const char *base[] = {
a0881cab 4378 "R XFI", "R XAUI", "T SGMII", "T XFI", "T XAUI", "KX4", "CX4",
7d5e77aa 4379 "KX", "KR", "R SFP+", "KR/KX", "KR/KX/KX4"
b8ff05a9
DM
4380 };
4381
b8ff05a9 4382 char buf[80];
118969ed 4383 char *bufp = buf;
f1a051b9 4384 const char *spd = "";
118969ed
DM
4385 const struct port_info *pi = netdev_priv(dev);
4386 const struct adapter *adap = pi->adapter;
f1a051b9
DM
4387
4388 if (adap->params.pci.speed == PCI_EXP_LNKSTA_CLS_2_5GB)
4389 spd = " 2.5 GT/s";
4390 else if (adap->params.pci.speed == PCI_EXP_LNKSTA_CLS_5_0GB)
4391 spd = " 5 GT/s";
b8ff05a9 4392
118969ed
DM
4393 if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_100M)
4394 bufp += sprintf(bufp, "100/");
4395 if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_1G)
4396 bufp += sprintf(bufp, "1000/");
4397 if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_10G)
4398 bufp += sprintf(bufp, "10G/");
4399 if (bufp != buf)
4400 --bufp;
4401 sprintf(bufp, "BASE-%s", base[pi->port_type]);
4402
4403 netdev_info(dev, "Chelsio %s rev %d %s %sNIC PCIe x%d%s%s\n",
4404 adap->params.vpd.id, adap->params.rev, buf,
4405 is_offload(adap) ? "R" : "", adap->params.pci.width, spd,
4406 (adap->flags & USING_MSIX) ? " MSI-X" :
4407 (adap->flags & USING_MSI) ? " MSI" : "");
4408 netdev_info(dev, "S/N: %s, E/C: %s\n",
4409 adap->params.vpd.sn, adap->params.vpd.ec);
b8ff05a9
DM
4410}
4411
ef306b50
DM
4412static void __devinit enable_pcie_relaxed_ordering(struct pci_dev *dev)
4413{
e5c8ae5f 4414 pcie_capability_set_word(dev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_RELAX_EN);
ef306b50
DM
4415}
4416
06546391
DM
4417/*
4418 * Free the following resources:
4419 * - memory used for tables
4420 * - MSI/MSI-X
4421 * - net devices
4422 * - resources FW is holding for us
4423 */
4424static void free_some_resources(struct adapter *adapter)
4425{
4426 unsigned int i;
4427
4428 t4_free_mem(adapter->l2t);
4429 t4_free_mem(adapter->tids.tid_tab);
4430 disable_msi(adapter);
4431
4432 for_each_port(adapter, i)
671b0060
DM
4433 if (adapter->port[i]) {
4434 kfree(adap2pinfo(adapter, i)->rss);
06546391 4435 free_netdev(adapter->port[i]);
671b0060 4436 }
06546391 4437 if (adapter->flags & FW_OK)
060e0c75 4438 t4_fw_bye(adapter, adapter->fn);
06546391
DM
4439}
4440
2ed28baa 4441#define TSO_FLAGS (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN)
35d35682 4442#define VLAN_FEAT (NETIF_F_SG | NETIF_F_IP_CSUM | TSO_FLAGS | \
b8ff05a9
DM
4443 NETIF_F_IPV6_CSUM | NETIF_F_HIGHDMA)
4444
4445static int __devinit init_one(struct pci_dev *pdev,
4446 const struct pci_device_id *ent)
4447{
4448 int func, i, err;
4449 struct port_info *pi;
c8f44aff 4450 bool highdma = false;
b8ff05a9
DM
4451 struct adapter *adapter = NULL;
4452
4453 printk_once(KERN_INFO "%s - version %s\n", DRV_DESC, DRV_VERSION);
4454
4455 err = pci_request_regions(pdev, KBUILD_MODNAME);
4456 if (err) {
4457 /* Just info, some other driver may have claimed the device. */
4458 dev_info(&pdev->dev, "cannot obtain PCI resources\n");
4459 return err;
4460 }
4461
060e0c75 4462 /* We control everything through one PF */
b8ff05a9 4463 func = PCI_FUNC(pdev->devfn);
060e0c75 4464 if (func != ent->driver_data) {
204dc3c0 4465 pci_save_state(pdev); /* to restore SR-IOV later */
b8ff05a9 4466 goto sriov;
204dc3c0 4467 }
b8ff05a9
DM
4468
4469 err = pci_enable_device(pdev);
4470 if (err) {
4471 dev_err(&pdev->dev, "cannot enable PCI device\n");
4472 goto out_release_regions;
4473 }
4474
4475 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
c8f44aff 4476 highdma = true;
b8ff05a9
DM
4477 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
4478 if (err) {
4479 dev_err(&pdev->dev, "unable to obtain 64-bit DMA for "
4480 "coherent allocations\n");
4481 goto out_disable_device;
4482 }
4483 } else {
4484 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
4485 if (err) {
4486 dev_err(&pdev->dev, "no usable DMA configuration\n");
4487 goto out_disable_device;
4488 }
4489 }
4490
4491 pci_enable_pcie_error_reporting(pdev);
ef306b50 4492 enable_pcie_relaxed_ordering(pdev);
b8ff05a9
DM
4493 pci_set_master(pdev);
4494 pci_save_state(pdev);
4495
4496 adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
4497 if (!adapter) {
4498 err = -ENOMEM;
4499 goto out_disable_device;
4500 }
4501
4502 adapter->regs = pci_ioremap_bar(pdev, 0);
4503 if (!adapter->regs) {
4504 dev_err(&pdev->dev, "cannot map device registers\n");
4505 err = -ENOMEM;
4506 goto out_free_adapter;
4507 }
4508
4509 adapter->pdev = pdev;
4510 adapter->pdev_dev = &pdev->dev;
3069ee9b 4511 adapter->mbox = func;
060e0c75 4512 adapter->fn = func;
b8ff05a9
DM
4513 adapter->msg_enable = dflt_msg_enable;
4514 memset(adapter->chan_map, 0xff, sizeof(adapter->chan_map));
4515
4516 spin_lock_init(&adapter->stats_lock);
4517 spin_lock_init(&adapter->tid_release_lock);
4518
4519 INIT_WORK(&adapter->tid_release_task, process_tid_release_list);
881806bc
VP
4520 INIT_WORK(&adapter->db_full_task, process_db_full);
4521 INIT_WORK(&adapter->db_drop_task, process_db_drop);
b8ff05a9
DM
4522
4523 err = t4_prep_adapter(adapter);
4524 if (err)
4525 goto out_unmap_bar;
636f9d37 4526 setup_memwin(adapter);
b8ff05a9 4527 err = adap_init0(adapter);
636f9d37 4528 setup_memwin_rdma(adapter);
b8ff05a9
DM
4529 if (err)
4530 goto out_unmap_bar;
4531
4532 for_each_port(adapter, i) {
4533 struct net_device *netdev;
4534
4535 netdev = alloc_etherdev_mq(sizeof(struct port_info),
4536 MAX_ETH_QSETS);
4537 if (!netdev) {
4538 err = -ENOMEM;
4539 goto out_free_dev;
4540 }
4541
4542 SET_NETDEV_DEV(netdev, &pdev->dev);
4543
4544 adapter->port[i] = netdev;
4545 pi = netdev_priv(netdev);
4546 pi->adapter = adapter;
4547 pi->xact_addr_filt = -1;
b8ff05a9 4548 pi->port_id = i;
b8ff05a9
DM
4549 netdev->irq = pdev->irq;
4550
2ed28baa
MM
4551 netdev->hw_features = NETIF_F_SG | TSO_FLAGS |
4552 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
4553 NETIF_F_RXCSUM | NETIF_F_RXHASH |
4554 NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
c8f44aff
MM
4555 if (highdma)
4556 netdev->hw_features |= NETIF_F_HIGHDMA;
4557 netdev->features |= netdev->hw_features;
b8ff05a9
DM
4558 netdev->vlan_features = netdev->features & VLAN_FEAT;
4559
01789349
JP
4560 netdev->priv_flags |= IFF_UNICAST_FLT;
4561
b8ff05a9
DM
4562 netdev->netdev_ops = &cxgb4_netdev_ops;
4563 SET_ETHTOOL_OPS(netdev, &cxgb_ethtool_ops);
4564 }
4565
4566 pci_set_drvdata(pdev, adapter);
4567
4568 if (adapter->flags & FW_OK) {
060e0c75 4569 err = t4_port_init(adapter, func, func, 0);
b8ff05a9
DM
4570 if (err)
4571 goto out_free_dev;
4572 }
4573
4574 /*
4575 * Configure queues and allocate tables now, they can be needed as
4576 * soon as the first register_netdev completes.
4577 */
4578 cfg_queues(adapter);
4579
4580 adapter->l2t = t4_init_l2t();
4581 if (!adapter->l2t) {
4582 /* We tolerate a lack of L2T, giving up some functionality */
4583 dev_warn(&pdev->dev, "could not allocate L2T, continuing\n");
4584 adapter->params.offload = 0;
4585 }
4586
4587 if (is_offload(adapter) && tid_init(&adapter->tids) < 0) {
4588 dev_warn(&pdev->dev, "could not allocate TID table, "
4589 "continuing\n");
4590 adapter->params.offload = 0;
4591 }
4592
f7cabcdd
DM
4593 /* See what interrupts we'll be using */
4594 if (msi > 1 && enable_msix(adapter) == 0)
4595 adapter->flags |= USING_MSIX;
4596 else if (msi > 0 && pci_enable_msi(pdev) == 0)
4597 adapter->flags |= USING_MSI;
4598
671b0060
DM
4599 err = init_rss(adapter);
4600 if (err)
4601 goto out_free_dev;
4602
b8ff05a9
DM
4603 /*
4604 * The card is now ready to go. If any errors occur during device
4605 * registration we do not fail the whole card but rather proceed only
4606 * with the ports we manage to register successfully. However we must
4607 * register at least one net device.
4608 */
4609 for_each_port(adapter, i) {
a57cabe0
DM
4610 pi = adap2pinfo(adapter, i);
4611 netif_set_real_num_tx_queues(adapter->port[i], pi->nqsets);
4612 netif_set_real_num_rx_queues(adapter->port[i], pi->nqsets);
4613
b8ff05a9
DM
4614 err = register_netdev(adapter->port[i]);
4615 if (err)
b1a3c2b6 4616 break;
b1a3c2b6
DM
4617 adapter->chan_map[pi->tx_chan] = i;
4618 print_port_info(adapter->port[i]);
b8ff05a9 4619 }
b1a3c2b6 4620 if (i == 0) {
b8ff05a9
DM
4621 dev_err(&pdev->dev, "could not register any net devices\n");
4622 goto out_free_dev;
4623 }
b1a3c2b6
DM
4624 if (err) {
4625 dev_warn(&pdev->dev, "only %d net devices registered\n", i);
4626 err = 0;
6403eab1 4627 }
b8ff05a9
DM
4628
4629 if (cxgb4_debugfs_root) {
4630 adapter->debugfs_root = debugfs_create_dir(pci_name(pdev),
4631 cxgb4_debugfs_root);
4632 setup_debugfs(adapter);
4633 }
4634
6482aa7c
DLR
4635 /* PCIe EEH recovery on powerpc platforms needs fundamental reset */
4636 pdev->needs_freset = 1;
4637
b8ff05a9
DM
4638 if (is_offload(adapter))
4639 attach_ulds(adapter);
4640
b8ff05a9
DM
4641sriov:
4642#ifdef CONFIG_PCI_IOV
4643 if (func < ARRAY_SIZE(num_vf) && num_vf[func] > 0)
4644 if (pci_enable_sriov(pdev, num_vf[func]) == 0)
4645 dev_info(&pdev->dev,
4646 "instantiated %u virtual functions\n",
4647 num_vf[func]);
4648#endif
4649 return 0;
4650
4651 out_free_dev:
06546391 4652 free_some_resources(adapter);
b8ff05a9
DM
4653 out_unmap_bar:
4654 iounmap(adapter->regs);
4655 out_free_adapter:
4656 kfree(adapter);
4657 out_disable_device:
4658 pci_disable_pcie_error_reporting(pdev);
4659 pci_disable_device(pdev);
4660 out_release_regions:
4661 pci_release_regions(pdev);
4662 pci_set_drvdata(pdev, NULL);
4663 return err;
4664}
4665
4666static void __devexit remove_one(struct pci_dev *pdev)
4667{
4668 struct adapter *adapter = pci_get_drvdata(pdev);
4669
636f9d37 4670#ifdef CONFIG_PCI_IOV
b8ff05a9
DM
4671 pci_disable_sriov(pdev);
4672
636f9d37
VP
4673#endif
4674
b8ff05a9
DM
4675 if (adapter) {
4676 int i;
4677
4678 if (is_offload(adapter))
4679 detach_ulds(adapter);
4680
4681 for_each_port(adapter, i)
8f3a7676 4682 if (adapter->port[i]->reg_state == NETREG_REGISTERED)
b8ff05a9
DM
4683 unregister_netdev(adapter->port[i]);
4684
4685 if (adapter->debugfs_root)
4686 debugfs_remove_recursive(adapter->debugfs_root);
4687
aaefae9b
DM
4688 if (adapter->flags & FULL_INIT_DONE)
4689 cxgb_down(adapter);
b8ff05a9 4690
06546391 4691 free_some_resources(adapter);
b8ff05a9
DM
4692 iounmap(adapter->regs);
4693 kfree(adapter);
4694 pci_disable_pcie_error_reporting(pdev);
4695 pci_disable_device(pdev);
4696 pci_release_regions(pdev);
4697 pci_set_drvdata(pdev, NULL);
a069ec91 4698 } else
b8ff05a9
DM
4699 pci_release_regions(pdev);
4700}
4701
4702static struct pci_driver cxgb4_driver = {
4703 .name = KBUILD_MODNAME,
4704 .id_table = cxgb4_pci_tbl,
4705 .probe = init_one,
4706 .remove = __devexit_p(remove_one),
204dc3c0 4707 .err_handler = &cxgb4_eeh,
b8ff05a9
DM
4708};
4709
4710static int __init cxgb4_init_module(void)
4711{
4712 int ret;
4713
3069ee9b
VP
4714 workq = create_singlethread_workqueue("cxgb4");
4715 if (!workq)
4716 return -ENOMEM;
4717
b8ff05a9
DM
4718 /* Debugfs support is optional, just warn if this fails */
4719 cxgb4_debugfs_root = debugfs_create_dir(KBUILD_MODNAME, NULL);
4720 if (!cxgb4_debugfs_root)
4721 pr_warning("could not create debugfs entry, continuing\n");
4722
4723 ret = pci_register_driver(&cxgb4_driver);
4724 if (ret < 0)
4725 debugfs_remove(cxgb4_debugfs_root);
4726 return ret;
4727}
4728
4729static void __exit cxgb4_cleanup_module(void)
4730{
4731 pci_unregister_driver(&cxgb4_driver);
4732 debugfs_remove(cxgb4_debugfs_root); /* NULL ok */
3069ee9b
VP
4733 flush_workqueue(workq);
4734 destroy_workqueue(workq);
b8ff05a9
DM
4735}
4736
4737module_init(cxgb4_init_module);
4738module_exit(cxgb4_cleanup_module);
This page took 0.548206 seconds and 5 git commands to generate.