2 * This file is part of the Chelsio T4 Ethernet driver for Linux.
4 * Copyright (c) 2003-2014 Chelsio Communications, Inc. All rights reserved.
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
35 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
37 #include <linux/bitmap.h>
38 #include <linux/crc32.h>
39 #include <linux/ctype.h>
40 #include <linux/debugfs.h>
41 #include <linux/err.h>
42 #include <linux/etherdevice.h>
43 #include <linux/firmware.h>
45 #include <linux/if_vlan.h>
46 #include <linux/init.h>
47 #include <linux/log2.h>
48 #include <linux/mdio.h>
49 #include <linux/module.h>
50 #include <linux/moduleparam.h>
51 #include <linux/mutex.h>
52 #include <linux/netdevice.h>
53 #include <linux/pci.h>
54 #include <linux/aer.h>
55 #include <linux/rtnetlink.h>
56 #include <linux/sched.h>
57 #include <linux/seq_file.h>
58 #include <linux/sockios.h>
59 #include <linux/vmalloc.h>
60 #include <linux/workqueue.h>
61 #include <net/neighbour.h>
62 #include <net/netevent.h>
63 #include <net/addrconf.h>
64 #include <asm/uaccess.h>
70 #include "cxgb4_dcb.h"
73 #include <../drivers/net/bonding/bonding.h>
78 #define DRV_VERSION "2.0.0-ko"
79 #define DRV_DESC "Chelsio T4/T5 Network Driver"
82 * Max interrupt hold-off timer value in us. Queues fall back to this value
83 * under extreme memory pressure so it's largish to give the system time to
86 #define MAX_SGE_TIMERVAL 200U
90 * Physical Function provisioning constants.
92 PFRES_NVI
= 4, /* # of Virtual Interfaces */
93 PFRES_NETHCTRL
= 128, /* # of EQs used for ETH or CTRL Qs */
94 PFRES_NIQFLINT
= 128, /* # of ingress Qs/w Free List(s)/intr
96 PFRES_NEQ
= 256, /* # of egress queues */
97 PFRES_NIQ
= 0, /* # of ingress queues */
98 PFRES_TC
= 0, /* PCI-E traffic class */
99 PFRES_NEXACTF
= 128, /* # of exact MPS filters */
101 PFRES_R_CAPS
= FW_CMD_CAP_PF
,
102 PFRES_WX_CAPS
= FW_CMD_CAP_PF
,
104 #ifdef CONFIG_PCI_IOV
106 * Virtual Function provisioning constants. We need two extra Ingress
107 * Queues with Interrupt capability to serve as the VF's Firmware
108 * Event Queue and Forwarded Interrupt Queue (when using MSI mode) --
109 * neither will have Free Lists associated with them). For each
110 * Ethernet/Control Egress Queue and for each Free List, we need an
113 VFRES_NPORTS
= 1, /* # of "ports" per VF */
114 VFRES_NQSETS
= 2, /* # of "Queue Sets" per VF */
116 VFRES_NVI
= VFRES_NPORTS
, /* # of Virtual Interfaces */
117 VFRES_NETHCTRL
= VFRES_NQSETS
, /* # of EQs used for ETH or CTRL Qs */
118 VFRES_NIQFLINT
= VFRES_NQSETS
+2,/* # of ingress Qs/w Free List(s)/intr */
119 VFRES_NEQ
= VFRES_NQSETS
*2, /* # of egress queues */
120 VFRES_NIQ
= 0, /* # of non-fl/int ingress queues */
121 VFRES_TC
= 0, /* PCI-E traffic class */
122 VFRES_NEXACTF
= 16, /* # of exact MPS filters */
124 VFRES_R_CAPS
= FW_CMD_CAP_DMAQ
|FW_CMD_CAP_VF
|FW_CMD_CAP_PORT
,
125 VFRES_WX_CAPS
= FW_CMD_CAP_DMAQ
|FW_CMD_CAP_VF
,
130 * Provide a Port Access Rights Mask for the specified PF/VF. This is very
131 * static and likely not to be useful in the long run. We really need to
132 * implement some form of persistent configuration which the firmware
135 static unsigned int pfvfres_pmask(struct adapter
*adapter
,
136 unsigned int pf
, unsigned int vf
)
138 unsigned int portn
, portvec
;
141 * Give PF's access to all of the ports.
144 return FW_PFVF_CMD_PMASK_MASK
;
147 * For VFs, we'll assign them access to the ports based purely on the
148 * PF. We assign active ports in order, wrapping around if there are
149 * fewer active ports than PFs: e.g. active port[pf % nports].
150 * Unfortunately the adapter's port_info structs haven't been
151 * initialized yet so we have to compute this.
153 if (adapter
->params
.nports
== 0)
156 portn
= pf
% adapter
->params
.nports
;
157 portvec
= adapter
->params
.portvec
;
160 * Isolate the lowest set bit in the port vector. If we're at
161 * the port number that we want, return that as the pmask.
162 * otherwise mask that bit out of the port vector and
163 * decrement our port number ...
165 unsigned int pmask
= portvec
^ (portvec
& (portvec
-1));
175 MAX_TXQ_ENTRIES
= 16384,
176 MAX_CTRL_TXQ_ENTRIES
= 1024,
177 MAX_RSPQ_ENTRIES
= 16384,
178 MAX_RX_BUFFERS
= 16384,
179 MIN_TXQ_ENTRIES
= 32,
180 MIN_CTRL_TXQ_ENTRIES
= 32,
181 MIN_RSPQ_ENTRIES
= 128,
185 /* Host shadow copy of ingress filter entry. This is in host native format
186 * and doesn't match the ordering or bit order, etc. of the hardware of the
187 * firmware command. The use of bit-field structure elements is purely to
188 * remind ourselves of the field size limitations and save memory in the case
189 * where the filter table is large.
191 struct filter_entry
{
192 /* Administrative fields for filter.
194 u32 valid
:1; /* filter allocated and valid */
195 u32 locked
:1; /* filter is administratively locked */
197 u32 pending
:1; /* filter action is pending firmware reply */
198 u32 smtidx
:8; /* Source MAC Table index for smac */
199 struct l2t_entry
*l2t
; /* Layer Two Table entry for dmac */
201 /* The filter itself. Most of this is a straight copy of information
202 * provided by the extended ioctl(). Some fields are translated to
203 * internal forms -- for instance the Ingress Queue ID passed in from
204 * the ioctl() is translated into the Absolute Ingress Queue ID.
206 struct ch_filter_specification fs
;
209 #define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
210 NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\
211 NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
213 #define CH_DEVICE(devid, data) { PCI_VDEVICE(CHELSIO, devid), (data) }
215 static const struct pci_device_id cxgb4_pci_tbl
[] = {
216 CH_DEVICE(0xa000, 0), /* PE10K */
217 CH_DEVICE(0x4001, -1),
218 CH_DEVICE(0x4002, -1),
219 CH_DEVICE(0x4003, -1),
220 CH_DEVICE(0x4004, -1),
221 CH_DEVICE(0x4005, -1),
222 CH_DEVICE(0x4006, -1),
223 CH_DEVICE(0x4007, -1),
224 CH_DEVICE(0x4008, -1),
225 CH_DEVICE(0x4009, -1),
226 CH_DEVICE(0x400a, -1),
227 CH_DEVICE(0x400d, -1),
228 CH_DEVICE(0x400e, -1),
229 CH_DEVICE(0x4080, -1),
230 CH_DEVICE(0x4081, -1),
231 CH_DEVICE(0x4082, -1),
232 CH_DEVICE(0x4083, -1),
233 CH_DEVICE(0x4084, -1),
234 CH_DEVICE(0x4085, -1),
235 CH_DEVICE(0x4086, -1),
236 CH_DEVICE(0x4087, -1),
237 CH_DEVICE(0x4088, -1),
238 CH_DEVICE(0x4401, 4),
239 CH_DEVICE(0x4402, 4),
240 CH_DEVICE(0x4403, 4),
241 CH_DEVICE(0x4404, 4),
242 CH_DEVICE(0x4405, 4),
243 CH_DEVICE(0x4406, 4),
244 CH_DEVICE(0x4407, 4),
245 CH_DEVICE(0x4408, 4),
246 CH_DEVICE(0x4409, 4),
247 CH_DEVICE(0x440a, 4),
248 CH_DEVICE(0x440d, 4),
249 CH_DEVICE(0x440e, 4),
250 CH_DEVICE(0x4480, 4),
251 CH_DEVICE(0x4481, 4),
252 CH_DEVICE(0x4482, 4),
253 CH_DEVICE(0x4483, 4),
254 CH_DEVICE(0x4484, 4),
255 CH_DEVICE(0x4485, 4),
256 CH_DEVICE(0x4486, 4),
257 CH_DEVICE(0x4487, 4),
258 CH_DEVICE(0x4488, 4),
259 CH_DEVICE(0x5001, 4),
260 CH_DEVICE(0x5002, 4),
261 CH_DEVICE(0x5003, 4),
262 CH_DEVICE(0x5004, 4),
263 CH_DEVICE(0x5005, 4),
264 CH_DEVICE(0x5006, 4),
265 CH_DEVICE(0x5007, 4),
266 CH_DEVICE(0x5008, 4),
267 CH_DEVICE(0x5009, 4),
268 CH_DEVICE(0x500A, 4),
269 CH_DEVICE(0x500B, 4),
270 CH_DEVICE(0x500C, 4),
271 CH_DEVICE(0x500D, 4),
272 CH_DEVICE(0x500E, 4),
273 CH_DEVICE(0x500F, 4),
274 CH_DEVICE(0x5010, 4),
275 CH_DEVICE(0x5011, 4),
276 CH_DEVICE(0x5012, 4),
277 CH_DEVICE(0x5013, 4),
278 CH_DEVICE(0x5014, 4),
279 CH_DEVICE(0x5015, 4),
280 CH_DEVICE(0x5080, 4),
281 CH_DEVICE(0x5081, 4),
282 CH_DEVICE(0x5082, 4),
283 CH_DEVICE(0x5083, 4),
284 CH_DEVICE(0x5084, 4),
285 CH_DEVICE(0x5085, 4),
286 CH_DEVICE(0x5086, 4),
287 CH_DEVICE(0x5087, 4),
288 CH_DEVICE(0x5088, 4),
289 CH_DEVICE(0x5401, 4),
290 CH_DEVICE(0x5402, 4),
291 CH_DEVICE(0x5403, 4),
292 CH_DEVICE(0x5404, 4),
293 CH_DEVICE(0x5405, 4),
294 CH_DEVICE(0x5406, 4),
295 CH_DEVICE(0x5407, 4),
296 CH_DEVICE(0x5408, 4),
297 CH_DEVICE(0x5409, 4),
298 CH_DEVICE(0x540A, 4),
299 CH_DEVICE(0x540B, 4),
300 CH_DEVICE(0x540C, 4),
301 CH_DEVICE(0x540D, 4),
302 CH_DEVICE(0x540E, 4),
303 CH_DEVICE(0x540F, 4),
304 CH_DEVICE(0x5410, 4),
305 CH_DEVICE(0x5411, 4),
306 CH_DEVICE(0x5412, 4),
307 CH_DEVICE(0x5413, 4),
308 CH_DEVICE(0x5414, 4),
309 CH_DEVICE(0x5415, 4),
310 CH_DEVICE(0x5480, 4),
311 CH_DEVICE(0x5481, 4),
312 CH_DEVICE(0x5482, 4),
313 CH_DEVICE(0x5483, 4),
314 CH_DEVICE(0x5484, 4),
315 CH_DEVICE(0x5485, 4),
316 CH_DEVICE(0x5486, 4),
317 CH_DEVICE(0x5487, 4),
318 CH_DEVICE(0x5488, 4),
322 #define FW4_FNAME "cxgb4/t4fw.bin"
323 #define FW5_FNAME "cxgb4/t5fw.bin"
324 #define FW4_CFNAME "cxgb4/t4-config.txt"
325 #define FW5_CFNAME "cxgb4/t5-config.txt"
327 MODULE_DESCRIPTION(DRV_DESC
);
328 MODULE_AUTHOR("Chelsio Communications");
329 MODULE_LICENSE("Dual BSD/GPL");
330 MODULE_VERSION(DRV_VERSION
);
331 MODULE_DEVICE_TABLE(pci
, cxgb4_pci_tbl
);
332 MODULE_FIRMWARE(FW4_FNAME
);
333 MODULE_FIRMWARE(FW5_FNAME
);
336 * Normally we're willing to become the firmware's Master PF but will be happy
337 * if another PF has already become the Master and initialized the adapter.
338 * Setting "force_init" will cause this driver to forcibly establish itself as
339 * the Master PF and initialize the adapter.
341 static uint force_init
;
343 module_param(force_init
, uint
, 0644);
344 MODULE_PARM_DESC(force_init
, "Forcibly become Master PF and initialize adapter");
347 * Normally if the firmware we connect to has Configuration File support, we
348 * use that and only fall back to the old Driver-based initialization if the
349 * Configuration File fails for some reason. If force_old_init is set, then
350 * we'll always use the old Driver-based initialization sequence.
352 static uint force_old_init
;
354 module_param(force_old_init
, uint
, 0644);
355 MODULE_PARM_DESC(force_old_init
, "Force old initialization sequence");
357 static int dflt_msg_enable
= DFLT_MSG_ENABLE
;
359 module_param(dflt_msg_enable
, int, 0644);
360 MODULE_PARM_DESC(dflt_msg_enable
, "Chelsio T4 default message enable bitmap");
363 * The driver uses the best interrupt scheme available on a platform in the
364 * order MSI-X, MSI, legacy INTx interrupts. This parameter determines which
365 * of these schemes the driver may consider as follows:
367 * msi = 2: choose from among all three options
368 * msi = 1: only consider MSI and INTx interrupts
369 * msi = 0: force INTx interrupts
373 module_param(msi
, int, 0644);
374 MODULE_PARM_DESC(msi
, "whether to use INTx (0), MSI (1) or MSI-X (2)");
377 * Queue interrupt hold-off timer values. Queues default to the first of these
380 static unsigned int intr_holdoff
[SGE_NTIMERS
- 1] = { 5, 10, 20, 50, 100 };
382 module_param_array(intr_holdoff
, uint
, NULL
, 0644);
383 MODULE_PARM_DESC(intr_holdoff
, "values for queue interrupt hold-off timers "
384 "0..4 in microseconds");
386 static unsigned int intr_cnt
[SGE_NCOUNTERS
- 1] = { 4, 8, 16 };
388 module_param_array(intr_cnt
, uint
, NULL
, 0644);
389 MODULE_PARM_DESC(intr_cnt
,
390 "thresholds 1..3 for queue interrupt packet counters");
393 * Normally we tell the chip to deliver Ingress Packets into our DMA buffers
394 * offset by 2 bytes in order to have the IP headers line up on 4-byte
395 * boundaries. This is a requirement for many architectures which will throw
396 * a machine check fault if an attempt is made to access one of the 4-byte IP
397 * header fields on a non-4-byte boundary. And it's a major performance issue
398 * even on some architectures which allow it like some implementations of the
399 * x86 ISA. However, some architectures don't mind this and for some very
400 * edge-case performance sensitive applications (like forwarding large volumes
401 * of small packets), setting this DMA offset to 0 will decrease the number of
402 * PCI-E Bus transfers enough to measurably affect performance.
404 static int rx_dma_offset
= 2;
408 #ifdef CONFIG_PCI_IOV
409 module_param(vf_acls
, bool, 0644);
410 MODULE_PARM_DESC(vf_acls
, "if set enable virtualization L2 ACL enforcement");
412 /* Configure the number of PCI-E Virtual Function which are to be instantiated
413 * on SR-IOV Capable Physical Functions.
415 static unsigned int num_vf
[NUM_OF_PF_WITH_SRIOV
];
417 module_param_array(num_vf
, uint
, NULL
, 0644);
418 MODULE_PARM_DESC(num_vf
, "number of VFs for each of PFs 0-3");
421 /* TX Queue select used to determine what algorithm to use for selecting TX
422 * queue. Select between the kernel provided function (select_queue=0) or user
423 * cxgb_select_queue function (select_queue=1)
425 * Default: select_queue=0
427 static int select_queue
;
428 module_param(select_queue
, int, 0644);
429 MODULE_PARM_DESC(select_queue
,
430 "Select between kernel provided method of selecting or driver method of selecting TX queue. Default is kernel method.");
433 * The filter TCAM has a fixed portion and a variable portion. The fixed
434 * portion can match on source/destination IP IPv4/IPv6 addresses and TCP/UDP
435 * ports. The variable portion is 36 bits which can include things like Exact
436 * Match MAC Index (9 bits), Ether Type (16 bits), IP Protocol (8 bits),
437 * [Inner] VLAN Tag (17 bits), etc. which, if all were somehow selected, would
438 * far exceed the 36-bit budget for this "compressed" header portion of the
439 * filter. Thus, we have a scarce resource which must be carefully managed.
441 * By default we set this up to mostly match the set of filter matching
442 * capabilities of T3 but with accommodations for some of T4's more
443 * interesting features:
445 * { IP Fragment (1), MPS Match Type (3), IP Protocol (8),
446 * [Inner] VLAN (17), Port (3), FCoE (1) }
449 TP_VLAN_PRI_MAP_DEFAULT
= HW_TPL_FR_MT_PR_IV_P_FC
,
450 TP_VLAN_PRI_MAP_FIRST
= FCOE_SHIFT
,
451 TP_VLAN_PRI_MAP_LAST
= FRAGMENTATION_SHIFT
,
454 static unsigned int tp_vlan_pri_map
= TP_VLAN_PRI_MAP_DEFAULT
;
456 module_param(tp_vlan_pri_map
, uint
, 0644);
457 MODULE_PARM_DESC(tp_vlan_pri_map
, "global compressed filter configuration");
459 static struct dentry
*cxgb4_debugfs_root
;
461 static LIST_HEAD(adapter_list
);
462 static DEFINE_MUTEX(uld_mutex
);
463 /* Adapter list to be accessed from atomic context */
464 static LIST_HEAD(adap_rcu_list
);
465 static DEFINE_SPINLOCK(adap_rcu_lock
);
466 static struct cxgb4_uld_info ulds
[CXGB4_ULD_MAX
];
467 static const char *uld_str
[] = { "RDMA", "iSCSI" };
469 static void link_report(struct net_device
*dev
)
471 if (!netif_carrier_ok(dev
))
472 netdev_info(dev
, "link down\n");
474 static const char *fc
[] = { "no", "Rx", "Tx", "Tx/Rx" };
476 const char *s
= "10Mbps";
477 const struct port_info
*p
= netdev_priv(dev
);
479 switch (p
->link_cfg
.speed
) {
494 netdev_info(dev
, "link up, %s, full-duplex, %s PAUSE\n", s
,
499 #ifdef CONFIG_CHELSIO_T4_DCB
500 /* Set up/tear down Data Center Bridging Priority mapping for a net device. */
501 static void dcb_tx_queue_prio_enable(struct net_device
*dev
, int enable
)
503 struct port_info
*pi
= netdev_priv(dev
);
504 struct adapter
*adap
= pi
->adapter
;
505 struct sge_eth_txq
*txq
= &adap
->sge
.ethtxq
[pi
->first_qset
];
508 /* We use a simple mapping of Port TX Queue Index to DCB
509 * Priority when we're enabling DCB.
511 for (i
= 0; i
< pi
->nqsets
; i
++, txq
++) {
515 name
= (FW_PARAMS_MNEM(FW_PARAMS_MNEM_DMAQ
) |
516 FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DMAQ_EQ_DCBPRIO_ETH
) |
517 FW_PARAMS_PARAM_YZ(txq
->q
.cntxt_id
));
518 value
= enable
? i
: 0xffffffff;
520 /* Since we can be called while atomic (from "interrupt
521 * level") we need to issue the Set Parameters Commannd
522 * without sleeping (timeout < 0).
524 err
= t4_set_params_nosleep(adap
, adap
->mbox
, adap
->fn
, 0, 1,
528 dev_err(adap
->pdev_dev
,
529 "Can't %s DCB Priority on port %d, TX Queue %d: err=%d\n",
530 enable
? "set" : "unset", pi
->port_id
, i
, -err
);
532 txq
->dcb_prio
= value
;
535 #endif /* CONFIG_CHELSIO_T4_DCB */
537 void t4_os_link_changed(struct adapter
*adapter
, int port_id
, int link_stat
)
539 struct net_device
*dev
= adapter
->port
[port_id
];
541 /* Skip changes from disabled ports. */
542 if (netif_running(dev
) && link_stat
!= netif_carrier_ok(dev
)) {
544 netif_carrier_on(dev
);
546 #ifdef CONFIG_CHELSIO_T4_DCB
547 cxgb4_dcb_state_init(dev
);
548 dcb_tx_queue_prio_enable(dev
, false);
549 #endif /* CONFIG_CHELSIO_T4_DCB */
550 netif_carrier_off(dev
);
557 void t4_os_portmod_changed(const struct adapter
*adap
, int port_id
)
559 static const char *mod_str
[] = {
560 NULL
, "LR", "SR", "ER", "passive DA", "active DA", "LRM"
563 const struct net_device
*dev
= adap
->port
[port_id
];
564 const struct port_info
*pi
= netdev_priv(dev
);
566 if (pi
->mod_type
== FW_PORT_MOD_TYPE_NONE
)
567 netdev_info(dev
, "port module unplugged\n");
568 else if (pi
->mod_type
< ARRAY_SIZE(mod_str
))
569 netdev_info(dev
, "%s module inserted\n", mod_str
[pi
->mod_type
]);
573 * Configure the exact and hash address filters to handle a port's multicast
574 * and secondary unicast MAC addresses.
576 static int set_addr_filters(const struct net_device
*dev
, bool sleep
)
584 const struct netdev_hw_addr
*ha
;
585 int uc_cnt
= netdev_uc_count(dev
);
586 int mc_cnt
= netdev_mc_count(dev
);
587 const struct port_info
*pi
= netdev_priv(dev
);
588 unsigned int mb
= pi
->adapter
->fn
;
590 /* first do the secondary unicast addresses */
591 netdev_for_each_uc_addr(ha
, dev
) {
592 addr
[naddr
++] = ha
->addr
;
593 if (--uc_cnt
== 0 || naddr
>= ARRAY_SIZE(addr
)) {
594 ret
= t4_alloc_mac_filt(pi
->adapter
, mb
, pi
->viid
, free
,
595 naddr
, addr
, filt_idx
, &uhash
, sleep
);
604 /* next set up the multicast addresses */
605 netdev_for_each_mc_addr(ha
, dev
) {
606 addr
[naddr
++] = ha
->addr
;
607 if (--mc_cnt
== 0 || naddr
>= ARRAY_SIZE(addr
)) {
608 ret
= t4_alloc_mac_filt(pi
->adapter
, mb
, pi
->viid
, free
,
609 naddr
, addr
, filt_idx
, &mhash
, sleep
);
618 return t4_set_addr_hash(pi
->adapter
, mb
, pi
->viid
, uhash
!= 0,
619 uhash
| mhash
, sleep
);
622 int dbfifo_int_thresh
= 10; /* 10 == 640 entry threshold */
623 module_param(dbfifo_int_thresh
, int, 0644);
624 MODULE_PARM_DESC(dbfifo_int_thresh
, "doorbell fifo interrupt threshold");
627 * usecs to sleep while draining the dbfifo
629 static int dbfifo_drain_delay
= 1000;
630 module_param(dbfifo_drain_delay
, int, 0644);
631 MODULE_PARM_DESC(dbfifo_drain_delay
,
632 "usecs to sleep while draining the dbfifo");
635 * Set Rx properties of a port, such as promiscruity, address filters, and MTU.
636 * If @mtu is -1 it is left unchanged.
638 static int set_rxmode(struct net_device
*dev
, int mtu
, bool sleep_ok
)
641 struct port_info
*pi
= netdev_priv(dev
);
643 ret
= set_addr_filters(dev
, sleep_ok
);
645 ret
= t4_set_rxmode(pi
->adapter
, pi
->adapter
->fn
, pi
->viid
, mtu
,
646 (dev
->flags
& IFF_PROMISC
) ? 1 : 0,
647 (dev
->flags
& IFF_ALLMULTI
) ? 1 : 0, 1, -1,
653 * link_start - enable a port
654 * @dev: the port to enable
656 * Performs the MAC and PHY actions needed to enable a port.
658 static int link_start(struct net_device
*dev
)
661 struct port_info
*pi
= netdev_priv(dev
);
662 unsigned int mb
= pi
->adapter
->fn
;
665 * We do not set address filters and promiscuity here, the stack does
666 * that step explicitly.
668 ret
= t4_set_rxmode(pi
->adapter
, mb
, pi
->viid
, dev
->mtu
, -1, -1, -1,
669 !!(dev
->features
& NETIF_F_HW_VLAN_CTAG_RX
), true);
671 ret
= t4_change_mac(pi
->adapter
, mb
, pi
->viid
,
672 pi
->xact_addr_filt
, dev
->dev_addr
, true,
675 pi
->xact_addr_filt
= ret
;
680 ret
= t4_link_start(pi
->adapter
, mb
, pi
->tx_chan
,
684 ret
= t4_enable_vi_params(pi
->adapter
, mb
, pi
->viid
, true,
685 true, CXGB4_DCB_ENABLED
);
692 int cxgb4_dcb_enabled(const struct net_device
*dev
)
694 #ifdef CONFIG_CHELSIO_T4_DCB
695 struct port_info
*pi
= netdev_priv(dev
);
697 if (!pi
->dcb
.enabled
)
700 return ((pi
->dcb
.state
== CXGB4_DCB_STATE_FW_ALLSYNCED
) ||
701 (pi
->dcb
.state
== CXGB4_DCB_STATE_HOST
));
706 EXPORT_SYMBOL(cxgb4_dcb_enabled
);
708 #ifdef CONFIG_CHELSIO_T4_DCB
709 /* Handle a Data Center Bridging update message from the firmware. */
710 static void dcb_rpl(struct adapter
*adap
, const struct fw_port_cmd
*pcmd
)
712 int port
= FW_PORT_CMD_PORTID_GET(ntohl(pcmd
->op_to_portid
));
713 struct net_device
*dev
= adap
->port
[port
];
714 int old_dcb_enabled
= cxgb4_dcb_enabled(dev
);
717 cxgb4_dcb_handle_fw_update(adap
, pcmd
);
718 new_dcb_enabled
= cxgb4_dcb_enabled(dev
);
720 /* If the DCB has become enabled or disabled on the port then we're
721 * going to need to set up/tear down DCB Priority parameters for the
722 * TX Queues associated with the port.
724 if (new_dcb_enabled
!= old_dcb_enabled
)
725 dcb_tx_queue_prio_enable(dev
, new_dcb_enabled
);
727 #endif /* CONFIG_CHELSIO_T4_DCB */
729 /* Clear a filter and release any of its resources that we own. This also
730 * clears the filter's "pending" status.
732 static void clear_filter(struct adapter
*adap
, struct filter_entry
*f
)
734 /* If the new or old filter have loopback rewriteing rules then we'll
735 * need to free any existing Layer Two Table (L2T) entries of the old
736 * filter rule. The firmware will handle freeing up any Source MAC
737 * Table (SMT) entries used for rewriting Source MAC Addresses in
741 cxgb4_l2t_release(f
->l2t
);
743 /* The zeroing of the filter rule below clears the filter valid,
744 * pending, locked flags, l2t pointer, etc. so it's all we need for
747 memset(f
, 0, sizeof(*f
));
750 /* Handle a filter write/deletion reply.
752 static void filter_rpl(struct adapter
*adap
, const struct cpl_set_tcb_rpl
*rpl
)
754 unsigned int idx
= GET_TID(rpl
);
755 unsigned int nidx
= idx
- adap
->tids
.ftid_base
;
757 struct filter_entry
*f
;
759 if (idx
>= adap
->tids
.ftid_base
&& nidx
<
760 (adap
->tids
.nftids
+ adap
->tids
.nsftids
)) {
762 ret
= GET_TCB_COOKIE(rpl
->cookie
);
763 f
= &adap
->tids
.ftid_tab
[idx
];
765 if (ret
== FW_FILTER_WR_FLT_DELETED
) {
766 /* Clear the filter when we get confirmation from the
767 * hardware that the filter has been deleted.
769 clear_filter(adap
, f
);
770 } else if (ret
== FW_FILTER_WR_SMT_TBL_FULL
) {
771 dev_err(adap
->pdev_dev
, "filter %u setup failed due to full SMT\n",
773 clear_filter(adap
, f
);
774 } else if (ret
== FW_FILTER_WR_FLT_ADDED
) {
775 f
->smtidx
= (be64_to_cpu(rpl
->oldval
) >> 24) & 0xff;
776 f
->pending
= 0; /* asynchronous setup completed */
779 /* Something went wrong. Issue a warning about the
780 * problem and clear everything out.
782 dev_err(adap
->pdev_dev
, "filter %u setup failed with error %u\n",
784 clear_filter(adap
, f
);
789 /* Response queue handler for the FW event queue.
791 static int fwevtq_handler(struct sge_rspq
*q
, const __be64
*rsp
,
792 const struct pkt_gl
*gl
)
794 u8 opcode
= ((const struct rss_header
*)rsp
)->opcode
;
796 rsp
++; /* skip RSS header */
798 /* FW can send EGR_UPDATEs encapsulated in a CPL_FW4_MSG.
800 if (unlikely(opcode
== CPL_FW4_MSG
&&
801 ((const struct cpl_fw4_msg
*)rsp
)->type
== FW_TYPE_RSSCPL
)) {
803 opcode
= ((const struct rss_header
*)rsp
)->opcode
;
805 if (opcode
!= CPL_SGE_EGR_UPDATE
) {
806 dev_err(q
->adap
->pdev_dev
, "unexpected FW4/CPL %#x on FW event queue\n"
812 if (likely(opcode
== CPL_SGE_EGR_UPDATE
)) {
813 const struct cpl_sge_egr_update
*p
= (void *)rsp
;
814 unsigned int qid
= EGR_QID(ntohl(p
->opcode_qid
));
817 txq
= q
->adap
->sge
.egr_map
[qid
- q
->adap
->sge
.egr_start
];
819 if ((u8
*)txq
< (u8
*)q
->adap
->sge
.ofldtxq
) {
820 struct sge_eth_txq
*eq
;
822 eq
= container_of(txq
, struct sge_eth_txq
, q
);
823 netif_tx_wake_queue(eq
->txq
);
825 struct sge_ofld_txq
*oq
;
827 oq
= container_of(txq
, struct sge_ofld_txq
, q
);
828 tasklet_schedule(&oq
->qresume_tsk
);
830 } else if (opcode
== CPL_FW6_MSG
|| opcode
== CPL_FW4_MSG
) {
831 const struct cpl_fw6_msg
*p
= (void *)rsp
;
833 #ifdef CONFIG_CHELSIO_T4_DCB
834 const struct fw_port_cmd
*pcmd
= (const void *)p
->data
;
835 unsigned int cmd
= FW_CMD_OP_GET(ntohl(pcmd
->op_to_portid
));
836 unsigned int action
=
837 FW_PORT_CMD_ACTION_GET(ntohl(pcmd
->action_to_len16
));
839 if (cmd
== FW_PORT_CMD
&&
840 action
== FW_PORT_ACTION_GET_PORT_INFO
) {
841 int port
= FW_PORT_CMD_PORTID_GET(
842 be32_to_cpu(pcmd
->op_to_portid
));
843 struct net_device
*dev
= q
->adap
->port
[port
];
844 int state_input
= ((pcmd
->u
.info
.dcbxdis_pkd
&
846 ? CXGB4_DCB_INPUT_FW_DISABLED
847 : CXGB4_DCB_INPUT_FW_ENABLED
);
849 cxgb4_dcb_state_fsm(dev
, state_input
);
852 if (cmd
== FW_PORT_CMD
&&
853 action
== FW_PORT_ACTION_L2_DCB_CFG
)
854 dcb_rpl(q
->adap
, pcmd
);
858 t4_handle_fw_rpl(q
->adap
, p
->data
);
859 } else if (opcode
== CPL_L2T_WRITE_RPL
) {
860 const struct cpl_l2t_write_rpl
*p
= (void *)rsp
;
862 do_l2t_write_rpl(q
->adap
, p
);
863 } else if (opcode
== CPL_SET_TCB_RPL
) {
864 const struct cpl_set_tcb_rpl
*p
= (void *)rsp
;
866 filter_rpl(q
->adap
, p
);
868 dev_err(q
->adap
->pdev_dev
,
869 "unexpected CPL %#x on FW event queue\n", opcode
);
875 * uldrx_handler - response queue handler for ULD queues
876 * @q: the response queue that received the packet
877 * @rsp: the response queue descriptor holding the offload message
878 * @gl: the gather list of packet fragments
880 * Deliver an ingress offload packet to a ULD. All processing is done by
881 * the ULD, we just maintain statistics.
883 static int uldrx_handler(struct sge_rspq
*q
, const __be64
*rsp
,
884 const struct pkt_gl
*gl
)
886 struct sge_ofld_rxq
*rxq
= container_of(q
, struct sge_ofld_rxq
, rspq
);
888 /* FW can send CPLs encapsulated in a CPL_FW4_MSG.
890 if (((const struct rss_header
*)rsp
)->opcode
== CPL_FW4_MSG
&&
891 ((const struct cpl_fw4_msg
*)(rsp
+ 1))->type
== FW_TYPE_RSSCPL
)
894 if (ulds
[q
->uld
].rx_handler(q
->adap
->uld_handle
[q
->uld
], rsp
, gl
)) {
900 else if (gl
== CXGB4_MSG_AN
)
907 static void disable_msi(struct adapter
*adapter
)
909 if (adapter
->flags
& USING_MSIX
) {
910 pci_disable_msix(adapter
->pdev
);
911 adapter
->flags
&= ~USING_MSIX
;
912 } else if (adapter
->flags
& USING_MSI
) {
913 pci_disable_msi(adapter
->pdev
);
914 adapter
->flags
&= ~USING_MSI
;
919 * Interrupt handler for non-data events used with MSI-X.
921 static irqreturn_t
t4_nondata_intr(int irq
, void *cookie
)
923 struct adapter
*adap
= cookie
;
925 u32 v
= t4_read_reg(adap
, MYPF_REG(PL_PF_INT_CAUSE
));
928 t4_write_reg(adap
, MYPF_REG(PL_PF_INT_CAUSE
), v
);
930 t4_slow_intr_handler(adap
);
935 * Name the MSI-X interrupts.
937 static void name_msix_vecs(struct adapter
*adap
)
939 int i
, j
, msi_idx
= 2, n
= sizeof(adap
->msix_info
[0].desc
);
941 /* non-data interrupts */
942 snprintf(adap
->msix_info
[0].desc
, n
, "%s", adap
->port
[0]->name
);
945 snprintf(adap
->msix_info
[1].desc
, n
, "%s-FWeventq",
946 adap
->port
[0]->name
);
948 /* Ethernet queues */
949 for_each_port(adap
, j
) {
950 struct net_device
*d
= adap
->port
[j
];
951 const struct port_info
*pi
= netdev_priv(d
);
953 for (i
= 0; i
< pi
->nqsets
; i
++, msi_idx
++)
954 snprintf(adap
->msix_info
[msi_idx
].desc
, n
, "%s-Rx%d",
959 for_each_ofldrxq(&adap
->sge
, i
)
960 snprintf(adap
->msix_info
[msi_idx
++].desc
, n
, "%s-ofld%d",
961 adap
->port
[0]->name
, i
);
963 for_each_rdmarxq(&adap
->sge
, i
)
964 snprintf(adap
->msix_info
[msi_idx
++].desc
, n
, "%s-rdma%d",
965 adap
->port
[0]->name
, i
);
967 for_each_rdmaciq(&adap
->sge
, i
)
968 snprintf(adap
->msix_info
[msi_idx
++].desc
, n
, "%s-rdma-ciq%d",
969 adap
->port
[0]->name
, i
);
972 static int request_msix_queue_irqs(struct adapter
*adap
)
974 struct sge
*s
= &adap
->sge
;
975 int err
, ethqidx
, ofldqidx
= 0, rdmaqidx
= 0, rdmaciqqidx
= 0;
978 err
= request_irq(adap
->msix_info
[1].vec
, t4_sge_intr_msix
, 0,
979 adap
->msix_info
[1].desc
, &s
->fw_evtq
);
983 for_each_ethrxq(s
, ethqidx
) {
984 err
= request_irq(adap
->msix_info
[msi_index
].vec
,
986 adap
->msix_info
[msi_index
].desc
,
987 &s
->ethrxq
[ethqidx
].rspq
);
992 for_each_ofldrxq(s
, ofldqidx
) {
993 err
= request_irq(adap
->msix_info
[msi_index
].vec
,
995 adap
->msix_info
[msi_index
].desc
,
996 &s
->ofldrxq
[ofldqidx
].rspq
);
1001 for_each_rdmarxq(s
, rdmaqidx
) {
1002 err
= request_irq(adap
->msix_info
[msi_index
].vec
,
1003 t4_sge_intr_msix
, 0,
1004 adap
->msix_info
[msi_index
].desc
,
1005 &s
->rdmarxq
[rdmaqidx
].rspq
);
1010 for_each_rdmaciq(s
, rdmaciqqidx
) {
1011 err
= request_irq(adap
->msix_info
[msi_index
].vec
,
1012 t4_sge_intr_msix
, 0,
1013 adap
->msix_info
[msi_index
].desc
,
1014 &s
->rdmaciq
[rdmaciqqidx
].rspq
);
1022 while (--rdmaciqqidx
>= 0)
1023 free_irq(adap
->msix_info
[--msi_index
].vec
,
1024 &s
->rdmaciq
[rdmaciqqidx
].rspq
);
1025 while (--rdmaqidx
>= 0)
1026 free_irq(adap
->msix_info
[--msi_index
].vec
,
1027 &s
->rdmarxq
[rdmaqidx
].rspq
);
1028 while (--ofldqidx
>= 0)
1029 free_irq(adap
->msix_info
[--msi_index
].vec
,
1030 &s
->ofldrxq
[ofldqidx
].rspq
);
1031 while (--ethqidx
>= 0)
1032 free_irq(adap
->msix_info
[--msi_index
].vec
,
1033 &s
->ethrxq
[ethqidx
].rspq
);
1034 free_irq(adap
->msix_info
[1].vec
, &s
->fw_evtq
);
1038 static void free_msix_queue_irqs(struct adapter
*adap
)
1040 int i
, msi_index
= 2;
1041 struct sge
*s
= &adap
->sge
;
1043 free_irq(adap
->msix_info
[1].vec
, &s
->fw_evtq
);
1044 for_each_ethrxq(s
, i
)
1045 free_irq(adap
->msix_info
[msi_index
++].vec
, &s
->ethrxq
[i
].rspq
);
1046 for_each_ofldrxq(s
, i
)
1047 free_irq(adap
->msix_info
[msi_index
++].vec
, &s
->ofldrxq
[i
].rspq
);
1048 for_each_rdmarxq(s
, i
)
1049 free_irq(adap
->msix_info
[msi_index
++].vec
, &s
->rdmarxq
[i
].rspq
);
1050 for_each_rdmaciq(s
, i
)
1051 free_irq(adap
->msix_info
[msi_index
++].vec
, &s
->rdmaciq
[i
].rspq
);
1055 * write_rss - write the RSS table for a given port
1057 * @queues: array of queue indices for RSS
1059 * Sets up the portion of the HW RSS table for the port's VI to distribute
1060 * packets to the Rx queues in @queues.
1062 static int write_rss(const struct port_info
*pi
, const u16
*queues
)
1066 const struct sge_eth_rxq
*q
= &pi
->adapter
->sge
.ethrxq
[pi
->first_qset
];
1068 rss
= kmalloc(pi
->rss_size
* sizeof(u16
), GFP_KERNEL
);
1072 /* map the queue indices to queue ids */
1073 for (i
= 0; i
< pi
->rss_size
; i
++, queues
++)
1074 rss
[i
] = q
[*queues
].rspq
.abs_id
;
1076 err
= t4_config_rss_range(pi
->adapter
, pi
->adapter
->fn
, pi
->viid
, 0,
1077 pi
->rss_size
, rss
, pi
->rss_size
);
1083 * setup_rss - configure RSS
1084 * @adap: the adapter
1086 * Sets up RSS for each port.
1088 static int setup_rss(struct adapter
*adap
)
1092 for_each_port(adap
, i
) {
1093 const struct port_info
*pi
= adap2pinfo(adap
, i
);
1095 err
= write_rss(pi
, pi
->rss
);
1103 * Return the channel of the ingress queue with the given qid.
1105 static unsigned int rxq_to_chan(const struct sge
*p
, unsigned int qid
)
1107 qid
-= p
->ingr_start
;
1108 return netdev2pinfo(p
->ingr_map
[qid
]->netdev
)->tx_chan
;
1112 * Wait until all NAPI handlers are descheduled.
1114 static void quiesce_rx(struct adapter
*adap
)
1118 for (i
= 0; i
< ARRAY_SIZE(adap
->sge
.ingr_map
); i
++) {
1119 struct sge_rspq
*q
= adap
->sge
.ingr_map
[i
];
1121 if (q
&& q
->handler
)
1122 napi_disable(&q
->napi
);
1127 * Enable NAPI scheduling and interrupt generation for all Rx queues.
1129 static void enable_rx(struct adapter
*adap
)
1133 for (i
= 0; i
< ARRAY_SIZE(adap
->sge
.ingr_map
); i
++) {
1134 struct sge_rspq
*q
= adap
->sge
.ingr_map
[i
];
1139 napi_enable(&q
->napi
);
1140 /* 0-increment GTS to start the timer and enable interrupts */
1141 t4_write_reg(adap
, MYPF_REG(SGE_PF_GTS
),
1142 SEINTARM(q
->intr_params
) |
1143 INGRESSQID(q
->cntxt_id
));
1148 * setup_sge_queues - configure SGE Tx/Rx/response queues
1149 * @adap: the adapter
1151 * Determines how many sets of SGE queues to use and initializes them.
1152 * We support multiple queue sets per port if we have MSI-X, otherwise
1153 * just one queue set per port.
1155 static int setup_sge_queues(struct adapter
*adap
)
1157 int err
, msi_idx
, i
, j
;
1158 struct sge
*s
= &adap
->sge
;
1160 bitmap_zero(s
->starving_fl
, MAX_EGRQ
);
1161 bitmap_zero(s
->txq_maperr
, MAX_EGRQ
);
1163 if (adap
->flags
& USING_MSIX
)
1164 msi_idx
= 1; /* vector 0 is for non-queue interrupts */
1166 err
= t4_sge_alloc_rxq(adap
, &s
->intrq
, false, adap
->port
[0], 0,
1170 msi_idx
= -((int)s
->intrq
.abs_id
+ 1);
1173 err
= t4_sge_alloc_rxq(adap
, &s
->fw_evtq
, true, adap
->port
[0],
1174 msi_idx
, NULL
, fwevtq_handler
);
1176 freeout
: t4_free_sge_resources(adap
);
1180 for_each_port(adap
, i
) {
1181 struct net_device
*dev
= adap
->port
[i
];
1182 struct port_info
*pi
= netdev_priv(dev
);
1183 struct sge_eth_rxq
*q
= &s
->ethrxq
[pi
->first_qset
];
1184 struct sge_eth_txq
*t
= &s
->ethtxq
[pi
->first_qset
];
1186 for (j
= 0; j
< pi
->nqsets
; j
++, q
++) {
1189 err
= t4_sge_alloc_rxq(adap
, &q
->rspq
, false, dev
,
1195 memset(&q
->stats
, 0, sizeof(q
->stats
));
1197 for (j
= 0; j
< pi
->nqsets
; j
++, t
++) {
1198 err
= t4_sge_alloc_eth_txq(adap
, t
, dev
,
1199 netdev_get_tx_queue(dev
, j
),
1200 s
->fw_evtq
.cntxt_id
);
1206 j
= s
->ofldqsets
/ adap
->params
.nports
; /* ofld queues per channel */
1207 for_each_ofldrxq(s
, i
) {
1208 struct sge_ofld_rxq
*q
= &s
->ofldrxq
[i
];
1209 struct net_device
*dev
= adap
->port
[i
/ j
];
1213 err
= t4_sge_alloc_rxq(adap
, &q
->rspq
, false, dev
, msi_idx
,
1214 q
->fl
.size
? &q
->fl
: NULL
,
1218 memset(&q
->stats
, 0, sizeof(q
->stats
));
1219 s
->ofld_rxq
[i
] = q
->rspq
.abs_id
;
1220 err
= t4_sge_alloc_ofld_txq(adap
, &s
->ofldtxq
[i
], dev
,
1221 s
->fw_evtq
.cntxt_id
);
1226 for_each_rdmarxq(s
, i
) {
1227 struct sge_ofld_rxq
*q
= &s
->rdmarxq
[i
];
1231 err
= t4_sge_alloc_rxq(adap
, &q
->rspq
, false, adap
->port
[i
],
1232 msi_idx
, q
->fl
.size
? &q
->fl
: NULL
,
1236 memset(&q
->stats
, 0, sizeof(q
->stats
));
1237 s
->rdma_rxq
[i
] = q
->rspq
.abs_id
;
1240 for_each_rdmaciq(s
, i
) {
1241 struct sge_ofld_rxq
*q
= &s
->rdmaciq
[i
];
1245 err
= t4_sge_alloc_rxq(adap
, &q
->rspq
, false, adap
->port
[i
],
1246 msi_idx
, q
->fl
.size
? &q
->fl
: NULL
,
1250 memset(&q
->stats
, 0, sizeof(q
->stats
));
1251 s
->rdma_ciq
[i
] = q
->rspq
.abs_id
;
1254 for_each_port(adap
, i
) {
1256 * Note that ->rdmarxq[i].rspq.cntxt_id below is 0 if we don't
1257 * have RDMA queues, and that's the right value.
1259 err
= t4_sge_alloc_ctrl_txq(adap
, &s
->ctrlq
[i
], adap
->port
[i
],
1260 s
->fw_evtq
.cntxt_id
,
1261 s
->rdmarxq
[i
].rspq
.cntxt_id
);
1266 t4_write_reg(adap
, is_t4(adap
->params
.chip
) ?
1267 MPS_TRC_RSS_CONTROL
:
1268 MPS_T5_TRC_RSS_CONTROL
,
1269 RSSCONTROL(netdev2pinfo(adap
->port
[0])->tx_chan
) |
1270 QUEUENUMBER(s
->ethrxq
[0].rspq
.abs_id
));
1275 * Allocate a chunk of memory using kmalloc or, if that fails, vmalloc.
1276 * The allocated memory is cleared.
1278 void *t4_alloc_mem(size_t size
)
1280 void *p
= kzalloc(size
, GFP_KERNEL
| __GFP_NOWARN
);
1288 * Free memory allocated through alloc_mem().
1290 static void t4_free_mem(void *addr
)
1292 if (is_vmalloc_addr(addr
))
1298 /* Send a Work Request to write the filter at a specified index. We construct
1299 * a Firmware Filter Work Request to have the work done and put the indicated
1300 * filter into "pending" mode which will prevent any further actions against
1301 * it till we get a reply from the firmware on the completion status of the
1304 static int set_filter_wr(struct adapter
*adapter
, int fidx
)
1306 struct filter_entry
*f
= &adapter
->tids
.ftid_tab
[fidx
];
1307 struct sk_buff
*skb
;
1308 struct fw_filter_wr
*fwr
;
1311 /* If the new filter requires loopback Destination MAC and/or VLAN
1312 * rewriting then we need to allocate a Layer 2 Table (L2T) entry for
1315 if (f
->fs
.newdmac
|| f
->fs
.newvlan
) {
1316 /* allocate L2T entry for new filter */
1317 f
->l2t
= t4_l2t_alloc_switching(adapter
->l2t
);
1320 if (t4_l2t_set_switching(adapter
, f
->l2t
, f
->fs
.vlan
,
1321 f
->fs
.eport
, f
->fs
.dmac
)) {
1322 cxgb4_l2t_release(f
->l2t
);
1328 ftid
= adapter
->tids
.ftid_base
+ fidx
;
1330 skb
= alloc_skb(sizeof(*fwr
), GFP_KERNEL
| __GFP_NOFAIL
);
1331 fwr
= (struct fw_filter_wr
*)__skb_put(skb
, sizeof(*fwr
));
1332 memset(fwr
, 0, sizeof(*fwr
));
1334 /* It would be nice to put most of the following in t4_hw.c but most
1335 * of the work is translating the cxgbtool ch_filter_specification
1336 * into the Work Request and the definition of that structure is
1337 * currently in cxgbtool.h which isn't appropriate to pull into the
1338 * common code. We may eventually try to come up with a more neutral
1339 * filter specification structure but for now it's easiest to simply
1340 * put this fairly direct code in line ...
1342 fwr
->op_pkd
= htonl(FW_WR_OP(FW_FILTER_WR
));
1343 fwr
->len16_pkd
= htonl(FW_WR_LEN16(sizeof(*fwr
)/16));
1345 htonl(V_FW_FILTER_WR_TID(ftid
) |
1346 V_FW_FILTER_WR_RQTYPE(f
->fs
.type
) |
1347 V_FW_FILTER_WR_NOREPLY(0) |
1348 V_FW_FILTER_WR_IQ(f
->fs
.iq
));
1349 fwr
->del_filter_to_l2tix
=
1350 htonl(V_FW_FILTER_WR_RPTTID(f
->fs
.rpttid
) |
1351 V_FW_FILTER_WR_DROP(f
->fs
.action
== FILTER_DROP
) |
1352 V_FW_FILTER_WR_DIRSTEER(f
->fs
.dirsteer
) |
1353 V_FW_FILTER_WR_MASKHASH(f
->fs
.maskhash
) |
1354 V_FW_FILTER_WR_DIRSTEERHASH(f
->fs
.dirsteerhash
) |
1355 V_FW_FILTER_WR_LPBK(f
->fs
.action
== FILTER_SWITCH
) |
1356 V_FW_FILTER_WR_DMAC(f
->fs
.newdmac
) |
1357 V_FW_FILTER_WR_SMAC(f
->fs
.newsmac
) |
1358 V_FW_FILTER_WR_INSVLAN(f
->fs
.newvlan
== VLAN_INSERT
||
1359 f
->fs
.newvlan
== VLAN_REWRITE
) |
1360 V_FW_FILTER_WR_RMVLAN(f
->fs
.newvlan
== VLAN_REMOVE
||
1361 f
->fs
.newvlan
== VLAN_REWRITE
) |
1362 V_FW_FILTER_WR_HITCNTS(f
->fs
.hitcnts
) |
1363 V_FW_FILTER_WR_TXCHAN(f
->fs
.eport
) |
1364 V_FW_FILTER_WR_PRIO(f
->fs
.prio
) |
1365 V_FW_FILTER_WR_L2TIX(f
->l2t
? f
->l2t
->idx
: 0));
1366 fwr
->ethtype
= htons(f
->fs
.val
.ethtype
);
1367 fwr
->ethtypem
= htons(f
->fs
.mask
.ethtype
);
1368 fwr
->frag_to_ovlan_vldm
=
1369 (V_FW_FILTER_WR_FRAG(f
->fs
.val
.frag
) |
1370 V_FW_FILTER_WR_FRAGM(f
->fs
.mask
.frag
) |
1371 V_FW_FILTER_WR_IVLAN_VLD(f
->fs
.val
.ivlan_vld
) |
1372 V_FW_FILTER_WR_OVLAN_VLD(f
->fs
.val
.ovlan_vld
) |
1373 V_FW_FILTER_WR_IVLAN_VLDM(f
->fs
.mask
.ivlan_vld
) |
1374 V_FW_FILTER_WR_OVLAN_VLDM(f
->fs
.mask
.ovlan_vld
));
1376 fwr
->rx_chan_rx_rpl_iq
=
1377 htons(V_FW_FILTER_WR_RX_CHAN(0) |
1378 V_FW_FILTER_WR_RX_RPL_IQ(adapter
->sge
.fw_evtq
.abs_id
));
1379 fwr
->maci_to_matchtypem
=
1380 htonl(V_FW_FILTER_WR_MACI(f
->fs
.val
.macidx
) |
1381 V_FW_FILTER_WR_MACIM(f
->fs
.mask
.macidx
) |
1382 V_FW_FILTER_WR_FCOE(f
->fs
.val
.fcoe
) |
1383 V_FW_FILTER_WR_FCOEM(f
->fs
.mask
.fcoe
) |
1384 V_FW_FILTER_WR_PORT(f
->fs
.val
.iport
) |
1385 V_FW_FILTER_WR_PORTM(f
->fs
.mask
.iport
) |
1386 V_FW_FILTER_WR_MATCHTYPE(f
->fs
.val
.matchtype
) |
1387 V_FW_FILTER_WR_MATCHTYPEM(f
->fs
.mask
.matchtype
));
1388 fwr
->ptcl
= f
->fs
.val
.proto
;
1389 fwr
->ptclm
= f
->fs
.mask
.proto
;
1390 fwr
->ttyp
= f
->fs
.val
.tos
;
1391 fwr
->ttypm
= f
->fs
.mask
.tos
;
1392 fwr
->ivlan
= htons(f
->fs
.val
.ivlan
);
1393 fwr
->ivlanm
= htons(f
->fs
.mask
.ivlan
);
1394 fwr
->ovlan
= htons(f
->fs
.val
.ovlan
);
1395 fwr
->ovlanm
= htons(f
->fs
.mask
.ovlan
);
1396 memcpy(fwr
->lip
, f
->fs
.val
.lip
, sizeof(fwr
->lip
));
1397 memcpy(fwr
->lipm
, f
->fs
.mask
.lip
, sizeof(fwr
->lipm
));
1398 memcpy(fwr
->fip
, f
->fs
.val
.fip
, sizeof(fwr
->fip
));
1399 memcpy(fwr
->fipm
, f
->fs
.mask
.fip
, sizeof(fwr
->fipm
));
1400 fwr
->lp
= htons(f
->fs
.val
.lport
);
1401 fwr
->lpm
= htons(f
->fs
.mask
.lport
);
1402 fwr
->fp
= htons(f
->fs
.val
.fport
);
1403 fwr
->fpm
= htons(f
->fs
.mask
.fport
);
1405 memcpy(fwr
->sma
, f
->fs
.smac
, sizeof(fwr
->sma
));
1407 /* Mark the filter as "pending" and ship off the Filter Work Request.
1408 * When we get the Work Request Reply we'll clear the pending status.
1411 set_wr_txq(skb
, CPL_PRIORITY_CONTROL
, f
->fs
.val
.iport
& 0x3);
1412 t4_ofld_send(adapter
, skb
);
1416 /* Delete the filter at a specified index.
1418 static int del_filter_wr(struct adapter
*adapter
, int fidx
)
1420 struct filter_entry
*f
= &adapter
->tids
.ftid_tab
[fidx
];
1421 struct sk_buff
*skb
;
1422 struct fw_filter_wr
*fwr
;
1423 unsigned int len
, ftid
;
1426 ftid
= adapter
->tids
.ftid_base
+ fidx
;
1428 skb
= alloc_skb(len
, GFP_KERNEL
| __GFP_NOFAIL
);
1429 fwr
= (struct fw_filter_wr
*)__skb_put(skb
, len
);
1430 t4_mk_filtdelwr(ftid
, fwr
, adapter
->sge
.fw_evtq
.abs_id
);
1432 /* Mark the filter as "pending" and ship off the Filter Work Request.
1433 * When we get the Work Request Reply we'll clear the pending status.
1436 t4_mgmt_tx(adapter
, skb
);
1440 static u16
cxgb_select_queue(struct net_device
*dev
, struct sk_buff
*skb
,
1441 void *accel_priv
, select_queue_fallback_t fallback
)
1445 #ifdef CONFIG_CHELSIO_T4_DCB
1446 /* If a Data Center Bridging has been successfully negotiated on this
1447 * link then we'll use the skb's priority to map it to a TX Queue.
1448 * The skb's priority is determined via the VLAN Tag Priority Code
1451 if (cxgb4_dcb_enabled(dev
)) {
1455 err
= vlan_get_tag(skb
, &vlan_tci
);
1456 if (unlikely(err
)) {
1457 if (net_ratelimit())
1459 "TX Packet without VLAN Tag on DCB Link\n");
1462 txq
= (vlan_tci
& VLAN_PRIO_MASK
) >> VLAN_PRIO_SHIFT
;
1466 #endif /* CONFIG_CHELSIO_T4_DCB */
1469 txq
= (skb_rx_queue_recorded(skb
)
1470 ? skb_get_rx_queue(skb
)
1471 : smp_processor_id());
1473 while (unlikely(txq
>= dev
->real_num_tx_queues
))
1474 txq
-= dev
->real_num_tx_queues
;
1479 return fallback(dev
, skb
) % dev
->real_num_tx_queues
;
1482 static inline int is_offload(const struct adapter
*adap
)
1484 return adap
->params
.offload
;
1488 * Implementation of ethtool operations.
1491 static u32
get_msglevel(struct net_device
*dev
)
1493 return netdev2adap(dev
)->msg_enable
;
1496 static void set_msglevel(struct net_device
*dev
, u32 val
)
1498 netdev2adap(dev
)->msg_enable
= val
;
1501 static char stats_strings
[][ETH_GSTRING_LEN
] = {
1504 "TxBroadcastFrames ",
1505 "TxMulticastFrames ",
1511 "TxFrames128To255 ",
1512 "TxFrames256To511 ",
1513 "TxFrames512To1023 ",
1514 "TxFrames1024To1518 ",
1515 "TxFrames1519ToMax ",
1530 "RxBroadcastFrames ",
1531 "RxMulticastFrames ",
1543 "RxFrames128To255 ",
1544 "RxFrames256To511 ",
1545 "RxFrames512To1023 ",
1546 "RxFrames1024To1518 ",
1547 "RxFrames1519ToMax ",
1559 "RxBG0FramesDropped ",
1560 "RxBG1FramesDropped ",
1561 "RxBG2FramesDropped ",
1562 "RxBG3FramesDropped ",
1563 "RxBG0FramesTrunc ",
1564 "RxBG1FramesTrunc ",
1565 "RxBG2FramesTrunc ",
1566 "RxBG3FramesTrunc ",
1575 "WriteCoalSuccess ",
1579 static int get_sset_count(struct net_device
*dev
, int sset
)
1583 return ARRAY_SIZE(stats_strings
);
1589 #define T4_REGMAP_SIZE (160 * 1024)
1590 #define T5_REGMAP_SIZE (332 * 1024)
1592 static int get_regs_len(struct net_device
*dev
)
1594 struct adapter
*adap
= netdev2adap(dev
);
1595 if (is_t4(adap
->params
.chip
))
1596 return T4_REGMAP_SIZE
;
1598 return T5_REGMAP_SIZE
;
1601 static int get_eeprom_len(struct net_device
*dev
)
1606 static void get_drvinfo(struct net_device
*dev
, struct ethtool_drvinfo
*info
)
1608 struct adapter
*adapter
= netdev2adap(dev
);
1610 strlcpy(info
->driver
, KBUILD_MODNAME
, sizeof(info
->driver
));
1611 strlcpy(info
->version
, DRV_VERSION
, sizeof(info
->version
));
1612 strlcpy(info
->bus_info
, pci_name(adapter
->pdev
),
1613 sizeof(info
->bus_info
));
1615 if (adapter
->params
.fw_vers
)
1616 snprintf(info
->fw_version
, sizeof(info
->fw_version
),
1617 "%u.%u.%u.%u, TP %u.%u.%u.%u",
1618 FW_HDR_FW_VER_MAJOR_GET(adapter
->params
.fw_vers
),
1619 FW_HDR_FW_VER_MINOR_GET(adapter
->params
.fw_vers
),
1620 FW_HDR_FW_VER_MICRO_GET(adapter
->params
.fw_vers
),
1621 FW_HDR_FW_VER_BUILD_GET(adapter
->params
.fw_vers
),
1622 FW_HDR_FW_VER_MAJOR_GET(adapter
->params
.tp_vers
),
1623 FW_HDR_FW_VER_MINOR_GET(adapter
->params
.tp_vers
),
1624 FW_HDR_FW_VER_MICRO_GET(adapter
->params
.tp_vers
),
1625 FW_HDR_FW_VER_BUILD_GET(adapter
->params
.tp_vers
));
1628 static void get_strings(struct net_device
*dev
, u32 stringset
, u8
*data
)
1630 if (stringset
== ETH_SS_STATS
)
1631 memcpy(data
, stats_strings
, sizeof(stats_strings
));
1635 * port stats maintained per queue of the port. They should be in the same
1636 * order as in stats_strings above.
1638 struct queue_port_stats
{
1648 static void collect_sge_port_stats(const struct adapter
*adap
,
1649 const struct port_info
*p
, struct queue_port_stats
*s
)
1652 const struct sge_eth_txq
*tx
= &adap
->sge
.ethtxq
[p
->first_qset
];
1653 const struct sge_eth_rxq
*rx
= &adap
->sge
.ethrxq
[p
->first_qset
];
1655 memset(s
, 0, sizeof(*s
));
1656 for (i
= 0; i
< p
->nqsets
; i
++, rx
++, tx
++) {
1658 s
->tx_csum
+= tx
->tx_cso
;
1659 s
->rx_csum
+= rx
->stats
.rx_cso
;
1660 s
->vlan_ex
+= rx
->stats
.vlan_ex
;
1661 s
->vlan_ins
+= tx
->vlan_ins
;
1662 s
->gro_pkts
+= rx
->stats
.lro_pkts
;
1663 s
->gro_merged
+= rx
->stats
.lro_merged
;
1667 static void get_stats(struct net_device
*dev
, struct ethtool_stats
*stats
,
1670 struct port_info
*pi
= netdev_priv(dev
);
1671 struct adapter
*adapter
= pi
->adapter
;
1674 t4_get_port_stats(adapter
, pi
->tx_chan
, (struct port_stats
*)data
);
1676 data
+= sizeof(struct port_stats
) / sizeof(u64
);
1677 collect_sge_port_stats(adapter
, pi
, (struct queue_port_stats
*)data
);
1678 data
+= sizeof(struct queue_port_stats
) / sizeof(u64
);
1679 if (!is_t4(adapter
->params
.chip
)) {
1680 t4_write_reg(adapter
, SGE_STAT_CFG
, STATSOURCE_T5(7));
1681 val1
= t4_read_reg(adapter
, SGE_STAT_TOTAL
);
1682 val2
= t4_read_reg(adapter
, SGE_STAT_MATCH
);
1683 *data
= val1
- val2
;
1688 memset(data
, 0, 2 * sizeof(u64
));
1694 * Return a version number to identify the type of adapter. The scheme is:
1695 * - bits 0..9: chip version
1696 * - bits 10..15: chip revision
1697 * - bits 16..23: register dump version
1699 static inline unsigned int mk_adap_vers(const struct adapter
*ap
)
1701 return CHELSIO_CHIP_VERSION(ap
->params
.chip
) |
1702 (CHELSIO_CHIP_RELEASE(ap
->params
.chip
) << 10) | (1 << 16);
1705 static void reg_block_dump(struct adapter
*ap
, void *buf
, unsigned int start
,
1708 u32
*p
= buf
+ start
;
1710 for ( ; start
<= end
; start
+= sizeof(u32
))
1711 *p
++ = t4_read_reg(ap
, start
);
1714 static void get_regs(struct net_device
*dev
, struct ethtool_regs
*regs
,
1717 static const unsigned int t4_reg_ranges
[] = {
1938 static const unsigned int t5_reg_ranges
[] = {
2367 struct adapter
*ap
= netdev2adap(dev
);
2368 static const unsigned int *reg_ranges
;
2369 int arr_size
= 0, buf_size
= 0;
2371 if (is_t4(ap
->params
.chip
)) {
2372 reg_ranges
= &t4_reg_ranges
[0];
2373 arr_size
= ARRAY_SIZE(t4_reg_ranges
);
2374 buf_size
= T4_REGMAP_SIZE
;
2376 reg_ranges
= &t5_reg_ranges
[0];
2377 arr_size
= ARRAY_SIZE(t5_reg_ranges
);
2378 buf_size
= T5_REGMAP_SIZE
;
2381 regs
->version
= mk_adap_vers(ap
);
2383 memset(buf
, 0, buf_size
);
2384 for (i
= 0; i
< arr_size
; i
+= 2)
2385 reg_block_dump(ap
, buf
, reg_ranges
[i
], reg_ranges
[i
+ 1]);
2388 static int restart_autoneg(struct net_device
*dev
)
2390 struct port_info
*p
= netdev_priv(dev
);
2392 if (!netif_running(dev
))
2394 if (p
->link_cfg
.autoneg
!= AUTONEG_ENABLE
)
2396 t4_restart_aneg(p
->adapter
, p
->adapter
->fn
, p
->tx_chan
);
2400 static int identify_port(struct net_device
*dev
,
2401 enum ethtool_phys_id_state state
)
2404 struct adapter
*adap
= netdev2adap(dev
);
2406 if (state
== ETHTOOL_ID_ACTIVE
)
2408 else if (state
== ETHTOOL_ID_INACTIVE
)
2413 return t4_identify_port(adap
, adap
->fn
, netdev2pinfo(dev
)->viid
, val
);
2416 static unsigned int from_fw_linkcaps(unsigned int type
, unsigned int caps
)
2420 if (type
== FW_PORT_TYPE_BT_SGMII
|| type
== FW_PORT_TYPE_BT_XFI
||
2421 type
== FW_PORT_TYPE_BT_XAUI
) {
2423 if (caps
& FW_PORT_CAP_SPEED_100M
)
2424 v
|= SUPPORTED_100baseT_Full
;
2425 if (caps
& FW_PORT_CAP_SPEED_1G
)
2426 v
|= SUPPORTED_1000baseT_Full
;
2427 if (caps
& FW_PORT_CAP_SPEED_10G
)
2428 v
|= SUPPORTED_10000baseT_Full
;
2429 } else if (type
== FW_PORT_TYPE_KX4
|| type
== FW_PORT_TYPE_KX
) {
2430 v
|= SUPPORTED_Backplane
;
2431 if (caps
& FW_PORT_CAP_SPEED_1G
)
2432 v
|= SUPPORTED_1000baseKX_Full
;
2433 if (caps
& FW_PORT_CAP_SPEED_10G
)
2434 v
|= SUPPORTED_10000baseKX4_Full
;
2435 } else if (type
== FW_PORT_TYPE_KR
)
2436 v
|= SUPPORTED_Backplane
| SUPPORTED_10000baseKR_Full
;
2437 else if (type
== FW_PORT_TYPE_BP_AP
)
2438 v
|= SUPPORTED_Backplane
| SUPPORTED_10000baseR_FEC
|
2439 SUPPORTED_10000baseKR_Full
| SUPPORTED_1000baseKX_Full
;
2440 else if (type
== FW_PORT_TYPE_BP4_AP
)
2441 v
|= SUPPORTED_Backplane
| SUPPORTED_10000baseR_FEC
|
2442 SUPPORTED_10000baseKR_Full
| SUPPORTED_1000baseKX_Full
|
2443 SUPPORTED_10000baseKX4_Full
;
2444 else if (type
== FW_PORT_TYPE_FIBER_XFI
||
2445 type
== FW_PORT_TYPE_FIBER_XAUI
|| type
== FW_PORT_TYPE_SFP
) {
2446 v
|= SUPPORTED_FIBRE
;
2447 if (caps
& FW_PORT_CAP_SPEED_1G
)
2448 v
|= SUPPORTED_1000baseT_Full
;
2449 if (caps
& FW_PORT_CAP_SPEED_10G
)
2450 v
|= SUPPORTED_10000baseT_Full
;
2451 } else if (type
== FW_PORT_TYPE_BP40_BA
)
2452 v
|= SUPPORTED_40000baseSR4_Full
;
2454 if (caps
& FW_PORT_CAP_ANEG
)
2455 v
|= SUPPORTED_Autoneg
;
2459 static unsigned int to_fw_linkcaps(unsigned int caps
)
2463 if (caps
& ADVERTISED_100baseT_Full
)
2464 v
|= FW_PORT_CAP_SPEED_100M
;
2465 if (caps
& ADVERTISED_1000baseT_Full
)
2466 v
|= FW_PORT_CAP_SPEED_1G
;
2467 if (caps
& ADVERTISED_10000baseT_Full
)
2468 v
|= FW_PORT_CAP_SPEED_10G
;
2469 if (caps
& ADVERTISED_40000baseSR4_Full
)
2470 v
|= FW_PORT_CAP_SPEED_40G
;
2474 static int get_settings(struct net_device
*dev
, struct ethtool_cmd
*cmd
)
2476 const struct port_info
*p
= netdev_priv(dev
);
2478 if (p
->port_type
== FW_PORT_TYPE_BT_SGMII
||
2479 p
->port_type
== FW_PORT_TYPE_BT_XFI
||
2480 p
->port_type
== FW_PORT_TYPE_BT_XAUI
)
2481 cmd
->port
= PORT_TP
;
2482 else if (p
->port_type
== FW_PORT_TYPE_FIBER_XFI
||
2483 p
->port_type
== FW_PORT_TYPE_FIBER_XAUI
)
2484 cmd
->port
= PORT_FIBRE
;
2485 else if (p
->port_type
== FW_PORT_TYPE_SFP
||
2486 p
->port_type
== FW_PORT_TYPE_QSFP_10G
||
2487 p
->port_type
== FW_PORT_TYPE_QSFP
) {
2488 if (p
->mod_type
== FW_PORT_MOD_TYPE_LR
||
2489 p
->mod_type
== FW_PORT_MOD_TYPE_SR
||
2490 p
->mod_type
== FW_PORT_MOD_TYPE_ER
||
2491 p
->mod_type
== FW_PORT_MOD_TYPE_LRM
)
2492 cmd
->port
= PORT_FIBRE
;
2493 else if (p
->mod_type
== FW_PORT_MOD_TYPE_TWINAX_PASSIVE
||
2494 p
->mod_type
== FW_PORT_MOD_TYPE_TWINAX_ACTIVE
)
2495 cmd
->port
= PORT_DA
;
2497 cmd
->port
= PORT_OTHER
;
2499 cmd
->port
= PORT_OTHER
;
2501 if (p
->mdio_addr
>= 0) {
2502 cmd
->phy_address
= p
->mdio_addr
;
2503 cmd
->transceiver
= XCVR_EXTERNAL
;
2504 cmd
->mdio_support
= p
->port_type
== FW_PORT_TYPE_BT_SGMII
?
2505 MDIO_SUPPORTS_C22
: MDIO_SUPPORTS_C45
;
2507 cmd
->phy_address
= 0; /* not really, but no better option */
2508 cmd
->transceiver
= XCVR_INTERNAL
;
2509 cmd
->mdio_support
= 0;
2512 cmd
->supported
= from_fw_linkcaps(p
->port_type
, p
->link_cfg
.supported
);
2513 cmd
->advertising
= from_fw_linkcaps(p
->port_type
,
2514 p
->link_cfg
.advertising
);
2515 ethtool_cmd_speed_set(cmd
,
2516 netif_carrier_ok(dev
) ? p
->link_cfg
.speed
: 0);
2517 cmd
->duplex
= DUPLEX_FULL
;
2518 cmd
->autoneg
= p
->link_cfg
.autoneg
;
2524 static unsigned int speed_to_caps(int speed
)
2527 return FW_PORT_CAP_SPEED_100M
;
2529 return FW_PORT_CAP_SPEED_1G
;
2531 return FW_PORT_CAP_SPEED_10G
;
2533 return FW_PORT_CAP_SPEED_40G
;
2537 static int set_settings(struct net_device
*dev
, struct ethtool_cmd
*cmd
)
2540 struct port_info
*p
= netdev_priv(dev
);
2541 struct link_config
*lc
= &p
->link_cfg
;
2542 u32 speed
= ethtool_cmd_speed(cmd
);
2544 if (cmd
->duplex
!= DUPLEX_FULL
) /* only full-duplex supported */
2547 if (!(lc
->supported
& FW_PORT_CAP_ANEG
)) {
2549 * PHY offers a single speed. See if that's what's
2552 if (cmd
->autoneg
== AUTONEG_DISABLE
&&
2553 (lc
->supported
& speed_to_caps(speed
)))
2558 if (cmd
->autoneg
== AUTONEG_DISABLE
) {
2559 cap
= speed_to_caps(speed
);
2561 if (!(lc
->supported
& cap
) ||
2566 lc
->requested_speed
= cap
;
2567 lc
->advertising
= 0;
2569 cap
= to_fw_linkcaps(cmd
->advertising
);
2570 if (!(lc
->supported
& cap
))
2572 lc
->requested_speed
= 0;
2573 lc
->advertising
= cap
| FW_PORT_CAP_ANEG
;
2575 lc
->autoneg
= cmd
->autoneg
;
2577 if (netif_running(dev
))
2578 return t4_link_start(p
->adapter
, p
->adapter
->fn
, p
->tx_chan
,
2583 static void get_pauseparam(struct net_device
*dev
,
2584 struct ethtool_pauseparam
*epause
)
2586 struct port_info
*p
= netdev_priv(dev
);
2588 epause
->autoneg
= (p
->link_cfg
.requested_fc
& PAUSE_AUTONEG
) != 0;
2589 epause
->rx_pause
= (p
->link_cfg
.fc
& PAUSE_RX
) != 0;
2590 epause
->tx_pause
= (p
->link_cfg
.fc
& PAUSE_TX
) != 0;
2593 static int set_pauseparam(struct net_device
*dev
,
2594 struct ethtool_pauseparam
*epause
)
2596 struct port_info
*p
= netdev_priv(dev
);
2597 struct link_config
*lc
= &p
->link_cfg
;
2599 if (epause
->autoneg
== AUTONEG_DISABLE
)
2600 lc
->requested_fc
= 0;
2601 else if (lc
->supported
& FW_PORT_CAP_ANEG
)
2602 lc
->requested_fc
= PAUSE_AUTONEG
;
2606 if (epause
->rx_pause
)
2607 lc
->requested_fc
|= PAUSE_RX
;
2608 if (epause
->tx_pause
)
2609 lc
->requested_fc
|= PAUSE_TX
;
2610 if (netif_running(dev
))
2611 return t4_link_start(p
->adapter
, p
->adapter
->fn
, p
->tx_chan
,
2616 static void get_sge_param(struct net_device
*dev
, struct ethtool_ringparam
*e
)
2618 const struct port_info
*pi
= netdev_priv(dev
);
2619 const struct sge
*s
= &pi
->adapter
->sge
;
2621 e
->rx_max_pending
= MAX_RX_BUFFERS
;
2622 e
->rx_mini_max_pending
= MAX_RSPQ_ENTRIES
;
2623 e
->rx_jumbo_max_pending
= 0;
2624 e
->tx_max_pending
= MAX_TXQ_ENTRIES
;
2626 e
->rx_pending
= s
->ethrxq
[pi
->first_qset
].fl
.size
- 8;
2627 e
->rx_mini_pending
= s
->ethrxq
[pi
->first_qset
].rspq
.size
;
2628 e
->rx_jumbo_pending
= 0;
2629 e
->tx_pending
= s
->ethtxq
[pi
->first_qset
].q
.size
;
2632 static int set_sge_param(struct net_device
*dev
, struct ethtool_ringparam
*e
)
2635 const struct port_info
*pi
= netdev_priv(dev
);
2636 struct adapter
*adapter
= pi
->adapter
;
2637 struct sge
*s
= &adapter
->sge
;
2639 if (e
->rx_pending
> MAX_RX_BUFFERS
|| e
->rx_jumbo_pending
||
2640 e
->tx_pending
> MAX_TXQ_ENTRIES
||
2641 e
->rx_mini_pending
> MAX_RSPQ_ENTRIES
||
2642 e
->rx_mini_pending
< MIN_RSPQ_ENTRIES
||
2643 e
->rx_pending
< MIN_FL_ENTRIES
|| e
->tx_pending
< MIN_TXQ_ENTRIES
)
2646 if (adapter
->flags
& FULL_INIT_DONE
)
2649 for (i
= 0; i
< pi
->nqsets
; ++i
) {
2650 s
->ethtxq
[pi
->first_qset
+ i
].q
.size
= e
->tx_pending
;
2651 s
->ethrxq
[pi
->first_qset
+ i
].fl
.size
= e
->rx_pending
+ 8;
2652 s
->ethrxq
[pi
->first_qset
+ i
].rspq
.size
= e
->rx_mini_pending
;
2657 static int closest_timer(const struct sge
*s
, int time
)
2659 int i
, delta
, match
= 0, min_delta
= INT_MAX
;
2661 for (i
= 0; i
< ARRAY_SIZE(s
->timer_val
); i
++) {
2662 delta
= time
- s
->timer_val
[i
];
2665 if (delta
< min_delta
) {
2673 static int closest_thres(const struct sge
*s
, int thres
)
2675 int i
, delta
, match
= 0, min_delta
= INT_MAX
;
2677 for (i
= 0; i
< ARRAY_SIZE(s
->counter_val
); i
++) {
2678 delta
= thres
- s
->counter_val
[i
];
2681 if (delta
< min_delta
) {
2690 * Return a queue's interrupt hold-off time in us. 0 means no timer.
2692 static unsigned int qtimer_val(const struct adapter
*adap
,
2693 const struct sge_rspq
*q
)
2695 unsigned int idx
= q
->intr_params
>> 1;
2697 return idx
< SGE_NTIMERS
? adap
->sge
.timer_val
[idx
] : 0;
2701 * set_rspq_intr_params - set a queue's interrupt holdoff parameters
2703 * @us: the hold-off time in us, or 0 to disable timer
2704 * @cnt: the hold-off packet count, or 0 to disable counter
2706 * Sets an Rx queue's interrupt hold-off time and packet count. At least
2707 * one of the two needs to be enabled for the queue to generate interrupts.
2709 static int set_rspq_intr_params(struct sge_rspq
*q
,
2710 unsigned int us
, unsigned int cnt
)
2712 struct adapter
*adap
= q
->adap
;
2714 if ((us
| cnt
) == 0)
2721 new_idx
= closest_thres(&adap
->sge
, cnt
);
2722 if (q
->desc
&& q
->pktcnt_idx
!= new_idx
) {
2723 /* the queue has already been created, update it */
2724 v
= FW_PARAMS_MNEM(FW_PARAMS_MNEM_DMAQ
) |
2725 FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DMAQ_IQ_INTCNTTHRESH
) |
2726 FW_PARAMS_PARAM_YZ(q
->cntxt_id
);
2727 err
= t4_set_params(adap
, adap
->fn
, adap
->fn
, 0, 1, &v
,
2732 q
->pktcnt_idx
= new_idx
;
2735 us
= us
== 0 ? 6 : closest_timer(&adap
->sge
, us
);
2736 q
->intr_params
= QINTR_TIMER_IDX(us
) | (cnt
> 0 ? QINTR_CNT_EN
: 0);
2741 * set_rx_intr_params - set a net devices's RX interrupt holdoff paramete!
2742 * @dev: the network device
2743 * @us: the hold-off time in us, or 0 to disable timer
2744 * @cnt: the hold-off packet count, or 0 to disable counter
2746 * Set the RX interrupt hold-off parameters for a network device.
2748 static int set_rx_intr_params(struct net_device
*dev
,
2749 unsigned int us
, unsigned int cnt
)
2752 struct port_info
*pi
= netdev_priv(dev
);
2753 struct adapter
*adap
= pi
->adapter
;
2754 struct sge_eth_rxq
*q
= &adap
->sge
.ethrxq
[pi
->first_qset
];
2756 for (i
= 0; i
< pi
->nqsets
; i
++, q
++) {
2757 err
= set_rspq_intr_params(&q
->rspq
, us
, cnt
);
2764 static int set_adaptive_rx_setting(struct net_device
*dev
, int adaptive_rx
)
2767 struct port_info
*pi
= netdev_priv(dev
);
2768 struct adapter
*adap
= pi
->adapter
;
2769 struct sge_eth_rxq
*q
= &adap
->sge
.ethrxq
[pi
->first_qset
];
2771 for (i
= 0; i
< pi
->nqsets
; i
++, q
++)
2772 q
->rspq
.adaptive_rx
= adaptive_rx
;
2777 static int get_adaptive_rx_setting(struct net_device
*dev
)
2779 struct port_info
*pi
= netdev_priv(dev
);
2780 struct adapter
*adap
= pi
->adapter
;
2781 struct sge_eth_rxq
*q
= &adap
->sge
.ethrxq
[pi
->first_qset
];
2783 return q
->rspq
.adaptive_rx
;
2786 static int set_coalesce(struct net_device
*dev
, struct ethtool_coalesce
*c
)
2788 set_adaptive_rx_setting(dev
, c
->use_adaptive_rx_coalesce
);
2789 return set_rx_intr_params(dev
, c
->rx_coalesce_usecs
,
2790 c
->rx_max_coalesced_frames
);
2793 static int get_coalesce(struct net_device
*dev
, struct ethtool_coalesce
*c
)
2795 const struct port_info
*pi
= netdev_priv(dev
);
2796 const struct adapter
*adap
= pi
->adapter
;
2797 const struct sge_rspq
*rq
= &adap
->sge
.ethrxq
[pi
->first_qset
].rspq
;
2799 c
->rx_coalesce_usecs
= qtimer_val(adap
, rq
);
2800 c
->rx_max_coalesced_frames
= (rq
->intr_params
& QINTR_CNT_EN
) ?
2801 adap
->sge
.counter_val
[rq
->pktcnt_idx
] : 0;
2802 c
->use_adaptive_rx_coalesce
= get_adaptive_rx_setting(dev
);
2807 * eeprom_ptov - translate a physical EEPROM address to virtual
2808 * @phys_addr: the physical EEPROM address
2809 * @fn: the PCI function number
2810 * @sz: size of function-specific area
2812 * Translate a physical EEPROM address to virtual. The first 1K is
2813 * accessed through virtual addresses starting at 31K, the rest is
2814 * accessed through virtual addresses starting at 0.
2816 * The mapping is as follows:
2817 * [0..1K) -> [31K..32K)
2818 * [1K..1K+A) -> [31K-A..31K)
2819 * [1K+A..ES) -> [0..ES-A-1K)
2821 * where A = @fn * @sz, and ES = EEPROM size.
2823 static int eeprom_ptov(unsigned int phys_addr
, unsigned int fn
, unsigned int sz
)
2826 if (phys_addr
< 1024)
2827 return phys_addr
+ (31 << 10);
2828 if (phys_addr
< 1024 + fn
)
2829 return 31744 - fn
+ phys_addr
- 1024;
2830 if (phys_addr
< EEPROMSIZE
)
2831 return phys_addr
- 1024 - fn
;
2836 * The next two routines implement eeprom read/write from physical addresses.
2838 static int eeprom_rd_phys(struct adapter
*adap
, unsigned int phys_addr
, u32
*v
)
2840 int vaddr
= eeprom_ptov(phys_addr
, adap
->fn
, EEPROMPFSIZE
);
2843 vaddr
= pci_read_vpd(adap
->pdev
, vaddr
, sizeof(u32
), v
);
2844 return vaddr
< 0 ? vaddr
: 0;
2847 static int eeprom_wr_phys(struct adapter
*adap
, unsigned int phys_addr
, u32 v
)
2849 int vaddr
= eeprom_ptov(phys_addr
, adap
->fn
, EEPROMPFSIZE
);
2852 vaddr
= pci_write_vpd(adap
->pdev
, vaddr
, sizeof(u32
), &v
);
2853 return vaddr
< 0 ? vaddr
: 0;
2856 #define EEPROM_MAGIC 0x38E2F10C
2858 static int get_eeprom(struct net_device
*dev
, struct ethtool_eeprom
*e
,
2862 struct adapter
*adapter
= netdev2adap(dev
);
2864 u8
*buf
= kmalloc(EEPROMSIZE
, GFP_KERNEL
);
2868 e
->magic
= EEPROM_MAGIC
;
2869 for (i
= e
->offset
& ~3; !err
&& i
< e
->offset
+ e
->len
; i
+= 4)
2870 err
= eeprom_rd_phys(adapter
, i
, (u32
*)&buf
[i
]);
2873 memcpy(data
, buf
+ e
->offset
, e
->len
);
2878 static int set_eeprom(struct net_device
*dev
, struct ethtool_eeprom
*eeprom
,
2883 u32 aligned_offset
, aligned_len
, *p
;
2884 struct adapter
*adapter
= netdev2adap(dev
);
2886 if (eeprom
->magic
!= EEPROM_MAGIC
)
2889 aligned_offset
= eeprom
->offset
& ~3;
2890 aligned_len
= (eeprom
->len
+ (eeprom
->offset
& 3) + 3) & ~3;
2892 if (adapter
->fn
> 0) {
2893 u32 start
= 1024 + adapter
->fn
* EEPROMPFSIZE
;
2895 if (aligned_offset
< start
||
2896 aligned_offset
+ aligned_len
> start
+ EEPROMPFSIZE
)
2900 if (aligned_offset
!= eeprom
->offset
|| aligned_len
!= eeprom
->len
) {
2902 * RMW possibly needed for first or last words.
2904 buf
= kmalloc(aligned_len
, GFP_KERNEL
);
2907 err
= eeprom_rd_phys(adapter
, aligned_offset
, (u32
*)buf
);
2908 if (!err
&& aligned_len
> 4)
2909 err
= eeprom_rd_phys(adapter
,
2910 aligned_offset
+ aligned_len
- 4,
2911 (u32
*)&buf
[aligned_len
- 4]);
2914 memcpy(buf
+ (eeprom
->offset
& 3), data
, eeprom
->len
);
2918 err
= t4_seeprom_wp(adapter
, false);
2922 for (p
= (u32
*)buf
; !err
&& aligned_len
; aligned_len
-= 4, p
++) {
2923 err
= eeprom_wr_phys(adapter
, aligned_offset
, *p
);
2924 aligned_offset
+= 4;
2928 err
= t4_seeprom_wp(adapter
, true);
2935 static int set_flash(struct net_device
*netdev
, struct ethtool_flash
*ef
)
2938 const struct firmware
*fw
;
2939 struct adapter
*adap
= netdev2adap(netdev
);
2940 unsigned int mbox
= FW_PCIE_FW_MASTER_MASK
+ 1;
2942 ef
->data
[sizeof(ef
->data
) - 1] = '\0';
2943 ret
= request_firmware(&fw
, ef
->data
, adap
->pdev_dev
);
2947 /* If the adapter has been fully initialized then we'll go ahead and
2948 * try to get the firmware's cooperation in upgrading to the new
2949 * firmware image otherwise we'll try to do the entire job from the
2950 * host ... and we always "force" the operation in this path.
2952 if (adap
->flags
& FULL_INIT_DONE
)
2955 ret
= t4_fw_upgrade(adap
, mbox
, fw
->data
, fw
->size
, 1);
2956 release_firmware(fw
);
2958 dev_info(adap
->pdev_dev
, "loaded firmware %s,"
2959 " reload cxgb4 driver\n", ef
->data
);
2963 #define WOL_SUPPORTED (WAKE_BCAST | WAKE_MAGIC)
2964 #define BCAST_CRC 0xa0ccc1a6
2966 static void get_wol(struct net_device
*dev
, struct ethtool_wolinfo
*wol
)
2968 wol
->supported
= WAKE_BCAST
| WAKE_MAGIC
;
2969 wol
->wolopts
= netdev2adap(dev
)->wol
;
2970 memset(&wol
->sopass
, 0, sizeof(wol
->sopass
));
2973 static int set_wol(struct net_device
*dev
, struct ethtool_wolinfo
*wol
)
2976 struct port_info
*pi
= netdev_priv(dev
);
2978 if (wol
->wolopts
& ~WOL_SUPPORTED
)
2980 t4_wol_magic_enable(pi
->adapter
, pi
->tx_chan
,
2981 (wol
->wolopts
& WAKE_MAGIC
) ? dev
->dev_addr
: NULL
);
2982 if (wol
->wolopts
& WAKE_BCAST
) {
2983 err
= t4_wol_pat_enable(pi
->adapter
, pi
->tx_chan
, 0xfe, ~0ULL,
2986 err
= t4_wol_pat_enable(pi
->adapter
, pi
->tx_chan
, 1,
2987 ~6ULL, ~0ULL, BCAST_CRC
, true);
2989 t4_wol_pat_enable(pi
->adapter
, pi
->tx_chan
, 0, 0, 0, 0, false);
2993 static int cxgb_set_features(struct net_device
*dev
, netdev_features_t features
)
2995 const struct port_info
*pi
= netdev_priv(dev
);
2996 netdev_features_t changed
= dev
->features
^ features
;
2999 if (!(changed
& NETIF_F_HW_VLAN_CTAG_RX
))
3002 err
= t4_set_rxmode(pi
->adapter
, pi
->adapter
->fn
, pi
->viid
, -1,
3004 !!(features
& NETIF_F_HW_VLAN_CTAG_RX
), true);
3006 dev
->features
= features
^ NETIF_F_HW_VLAN_CTAG_RX
;
3010 static u32
get_rss_table_size(struct net_device
*dev
)
3012 const struct port_info
*pi
= netdev_priv(dev
);
3014 return pi
->rss_size
;
3017 static int get_rss_table(struct net_device
*dev
, u32
*p
, u8
*key
)
3019 const struct port_info
*pi
= netdev_priv(dev
);
3020 unsigned int n
= pi
->rss_size
;
3027 static int set_rss_table(struct net_device
*dev
, const u32
*p
, const u8
*key
)
3030 struct port_info
*pi
= netdev_priv(dev
);
3032 for (i
= 0; i
< pi
->rss_size
; i
++)
3034 if (pi
->adapter
->flags
& FULL_INIT_DONE
)
3035 return write_rss(pi
, pi
->rss
);
3039 static int get_rxnfc(struct net_device
*dev
, struct ethtool_rxnfc
*info
,
3042 const struct port_info
*pi
= netdev_priv(dev
);
3044 switch (info
->cmd
) {
3045 case ETHTOOL_GRXFH
: {
3046 unsigned int v
= pi
->rss_mode
;
3049 switch (info
->flow_type
) {
3051 if (v
& FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN
)
3052 info
->data
= RXH_IP_SRC
| RXH_IP_DST
|
3053 RXH_L4_B_0_1
| RXH_L4_B_2_3
;
3054 else if (v
& FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN
)
3055 info
->data
= RXH_IP_SRC
| RXH_IP_DST
;
3058 if ((v
& FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN
) &&
3059 (v
& FW_RSS_VI_CONFIG_CMD_UDPEN
))
3060 info
->data
= RXH_IP_SRC
| RXH_IP_DST
|
3061 RXH_L4_B_0_1
| RXH_L4_B_2_3
;
3062 else if (v
& FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN
)
3063 info
->data
= RXH_IP_SRC
| RXH_IP_DST
;
3066 case AH_ESP_V4_FLOW
:
3068 if (v
& FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN
)
3069 info
->data
= RXH_IP_SRC
| RXH_IP_DST
;
3072 if (v
& FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN
)
3073 info
->data
= RXH_IP_SRC
| RXH_IP_DST
|
3074 RXH_L4_B_0_1
| RXH_L4_B_2_3
;
3075 else if (v
& FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN
)
3076 info
->data
= RXH_IP_SRC
| RXH_IP_DST
;
3079 if ((v
& FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN
) &&
3080 (v
& FW_RSS_VI_CONFIG_CMD_UDPEN
))
3081 info
->data
= RXH_IP_SRC
| RXH_IP_DST
|
3082 RXH_L4_B_0_1
| RXH_L4_B_2_3
;
3083 else if (v
& FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN
)
3084 info
->data
= RXH_IP_SRC
| RXH_IP_DST
;
3087 case AH_ESP_V6_FLOW
:
3089 if (v
& FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN
)
3090 info
->data
= RXH_IP_SRC
| RXH_IP_DST
;
3095 case ETHTOOL_GRXRINGS
:
3096 info
->data
= pi
->nqsets
;
3102 static const struct ethtool_ops cxgb_ethtool_ops
= {
3103 .get_settings
= get_settings
,
3104 .set_settings
= set_settings
,
3105 .get_drvinfo
= get_drvinfo
,
3106 .get_msglevel
= get_msglevel
,
3107 .set_msglevel
= set_msglevel
,
3108 .get_ringparam
= get_sge_param
,
3109 .set_ringparam
= set_sge_param
,
3110 .get_coalesce
= get_coalesce
,
3111 .set_coalesce
= set_coalesce
,
3112 .get_eeprom_len
= get_eeprom_len
,
3113 .get_eeprom
= get_eeprom
,
3114 .set_eeprom
= set_eeprom
,
3115 .get_pauseparam
= get_pauseparam
,
3116 .set_pauseparam
= set_pauseparam
,
3117 .get_link
= ethtool_op_get_link
,
3118 .get_strings
= get_strings
,
3119 .set_phys_id
= identify_port
,
3120 .nway_reset
= restart_autoneg
,
3121 .get_sset_count
= get_sset_count
,
3122 .get_ethtool_stats
= get_stats
,
3123 .get_regs_len
= get_regs_len
,
3124 .get_regs
= get_regs
,
3127 .get_rxnfc
= get_rxnfc
,
3128 .get_rxfh_indir_size
= get_rss_table_size
,
3129 .get_rxfh
= get_rss_table
,
3130 .set_rxfh
= set_rss_table
,
3131 .flash_device
= set_flash
,
3137 static ssize_t
mem_read(struct file
*file
, char __user
*buf
, size_t count
,
3141 loff_t avail
= file_inode(file
)->i_size
;
3142 unsigned int mem
= (uintptr_t)file
->private_data
& 3;
3143 struct adapter
*adap
= file
->private_data
- mem
;
3151 if (count
> avail
- pos
)
3152 count
= avail
- pos
;
3154 data
= t4_alloc_mem(count
);
3158 spin_lock(&adap
->win0_lock
);
3159 ret
= t4_memory_rw(adap
, 0, mem
, pos
, count
, data
, T4_MEMORY_READ
);
3160 spin_unlock(&adap
->win0_lock
);
3165 ret
= copy_to_user(buf
, data
, count
);
3171 *ppos
= pos
+ count
;
3175 static const struct file_operations mem_debugfs_fops
= {
3176 .owner
= THIS_MODULE
,
3177 .open
= simple_open
,
3179 .llseek
= default_llseek
,
3182 static void add_debugfs_mem(struct adapter
*adap
, const char *name
,
3183 unsigned int idx
, unsigned int size_mb
)
3187 de
= debugfs_create_file(name
, S_IRUSR
, adap
->debugfs_root
,
3188 (void *)adap
+ idx
, &mem_debugfs_fops
);
3189 if (de
&& de
->d_inode
)
3190 de
->d_inode
->i_size
= size_mb
<< 20;
3193 static int setup_debugfs(struct adapter
*adap
)
3198 if (IS_ERR_OR_NULL(adap
->debugfs_root
))
3201 i
= t4_read_reg(adap
, MA_TARGET_MEM_ENABLE
);
3202 if (i
& EDRAM0_ENABLE
) {
3203 size
= t4_read_reg(adap
, MA_EDRAM0_BAR
);
3204 add_debugfs_mem(adap
, "edc0", MEM_EDC0
, EDRAM_SIZE_GET(size
));
3206 if (i
& EDRAM1_ENABLE
) {
3207 size
= t4_read_reg(adap
, MA_EDRAM1_BAR
);
3208 add_debugfs_mem(adap
, "edc1", MEM_EDC1
, EDRAM_SIZE_GET(size
));
3210 if (is_t4(adap
->params
.chip
)) {
3211 size
= t4_read_reg(adap
, MA_EXT_MEMORY_BAR
);
3212 if (i
& EXT_MEM_ENABLE
)
3213 add_debugfs_mem(adap
, "mc", MEM_MC
,
3214 EXT_MEM_SIZE_GET(size
));
3216 if (i
& EXT_MEM_ENABLE
) {
3217 size
= t4_read_reg(adap
, MA_EXT_MEMORY_BAR
);
3218 add_debugfs_mem(adap
, "mc0", MEM_MC0
,
3219 EXT_MEM_SIZE_GET(size
));
3221 if (i
& EXT_MEM1_ENABLE
) {
3222 size
= t4_read_reg(adap
, MA_EXT_MEMORY1_BAR
);
3223 add_debugfs_mem(adap
, "mc1", MEM_MC1
,
3224 EXT_MEM_SIZE_GET(size
));
3228 debugfs_create_file("l2t", S_IRUSR
, adap
->debugfs_root
, adap
,
3234 * upper-layer driver support
3238 * Allocate an active-open TID and set it to the supplied value.
3240 int cxgb4_alloc_atid(struct tid_info
*t
, void *data
)
3244 spin_lock_bh(&t
->atid_lock
);
3246 union aopen_entry
*p
= t
->afree
;
3248 atid
= (p
- t
->atid_tab
) + t
->atid_base
;
3253 spin_unlock_bh(&t
->atid_lock
);
3256 EXPORT_SYMBOL(cxgb4_alloc_atid
);
3259 * Release an active-open TID.
3261 void cxgb4_free_atid(struct tid_info
*t
, unsigned int atid
)
3263 union aopen_entry
*p
= &t
->atid_tab
[atid
- t
->atid_base
];
3265 spin_lock_bh(&t
->atid_lock
);
3269 spin_unlock_bh(&t
->atid_lock
);
3271 EXPORT_SYMBOL(cxgb4_free_atid
);
3274 * Allocate a server TID and set it to the supplied value.
3276 int cxgb4_alloc_stid(struct tid_info
*t
, int family
, void *data
)
3280 spin_lock_bh(&t
->stid_lock
);
3281 if (family
== PF_INET
) {
3282 stid
= find_first_zero_bit(t
->stid_bmap
, t
->nstids
);
3283 if (stid
< t
->nstids
)
3284 __set_bit(stid
, t
->stid_bmap
);
3288 stid
= bitmap_find_free_region(t
->stid_bmap
, t
->nstids
, 2);
3293 t
->stid_tab
[stid
].data
= data
;
3294 stid
+= t
->stid_base
;
3295 /* IPv6 requires max of 520 bits or 16 cells in TCAM
3296 * This is equivalent to 4 TIDs. With CLIP enabled it
3299 if (family
== PF_INET
)
3302 t
->stids_in_use
+= 4;
3304 spin_unlock_bh(&t
->stid_lock
);
3307 EXPORT_SYMBOL(cxgb4_alloc_stid
);
3309 /* Allocate a server filter TID and set it to the supplied value.
3311 int cxgb4_alloc_sftid(struct tid_info
*t
, int family
, void *data
)
3315 spin_lock_bh(&t
->stid_lock
);
3316 if (family
== PF_INET
) {
3317 stid
= find_next_zero_bit(t
->stid_bmap
,
3318 t
->nstids
+ t
->nsftids
, t
->nstids
);
3319 if (stid
< (t
->nstids
+ t
->nsftids
))
3320 __set_bit(stid
, t
->stid_bmap
);
3327 t
->stid_tab
[stid
].data
= data
;
3329 stid
+= t
->sftid_base
;
3332 spin_unlock_bh(&t
->stid_lock
);
3335 EXPORT_SYMBOL(cxgb4_alloc_sftid
);
3337 /* Release a server TID.
3339 void cxgb4_free_stid(struct tid_info
*t
, unsigned int stid
, int family
)
3341 /* Is it a server filter TID? */
3342 if (t
->nsftids
&& (stid
>= t
->sftid_base
)) {
3343 stid
-= t
->sftid_base
;
3346 stid
-= t
->stid_base
;
3349 spin_lock_bh(&t
->stid_lock
);
3350 if (family
== PF_INET
)
3351 __clear_bit(stid
, t
->stid_bmap
);
3353 bitmap_release_region(t
->stid_bmap
, stid
, 2);
3354 t
->stid_tab
[stid
].data
= NULL
;
3355 if (family
== PF_INET
)
3358 t
->stids_in_use
-= 4;
3359 spin_unlock_bh(&t
->stid_lock
);
3361 EXPORT_SYMBOL(cxgb4_free_stid
);
3364 * Populate a TID_RELEASE WR. Caller must properly size the skb.
3366 static void mk_tid_release(struct sk_buff
*skb
, unsigned int chan
,
3369 struct cpl_tid_release
*req
;
3371 set_wr_txq(skb
, CPL_PRIORITY_SETUP
, chan
);
3372 req
= (struct cpl_tid_release
*)__skb_put(skb
, sizeof(*req
));
3373 INIT_TP_WR(req
, tid
);
3374 OPCODE_TID(req
) = htonl(MK_OPCODE_TID(CPL_TID_RELEASE
, tid
));
3378 * Queue a TID release request and if necessary schedule a work queue to
3381 static void cxgb4_queue_tid_release(struct tid_info
*t
, unsigned int chan
,
3384 void **p
= &t
->tid_tab
[tid
];
3385 struct adapter
*adap
= container_of(t
, struct adapter
, tids
);
3387 spin_lock_bh(&adap
->tid_release_lock
);
3388 *p
= adap
->tid_release_head
;
3389 /* Low 2 bits encode the Tx channel number */
3390 adap
->tid_release_head
= (void **)((uintptr_t)p
| chan
);
3391 if (!adap
->tid_release_task_busy
) {
3392 adap
->tid_release_task_busy
= true;
3393 queue_work(adap
->workq
, &adap
->tid_release_task
);
3395 spin_unlock_bh(&adap
->tid_release_lock
);
3399 * Process the list of pending TID release requests.
3401 static void process_tid_release_list(struct work_struct
*work
)
3403 struct sk_buff
*skb
;
3404 struct adapter
*adap
;
3406 adap
= container_of(work
, struct adapter
, tid_release_task
);
3408 spin_lock_bh(&adap
->tid_release_lock
);
3409 while (adap
->tid_release_head
) {
3410 void **p
= adap
->tid_release_head
;
3411 unsigned int chan
= (uintptr_t)p
& 3;
3412 p
= (void *)p
- chan
;
3414 adap
->tid_release_head
= *p
;
3416 spin_unlock_bh(&adap
->tid_release_lock
);
3418 while (!(skb
= alloc_skb(sizeof(struct cpl_tid_release
),
3420 schedule_timeout_uninterruptible(1);
3422 mk_tid_release(skb
, chan
, p
- adap
->tids
.tid_tab
);
3423 t4_ofld_send(adap
, skb
);
3424 spin_lock_bh(&adap
->tid_release_lock
);
3426 adap
->tid_release_task_busy
= false;
3427 spin_unlock_bh(&adap
->tid_release_lock
);
3431 * Release a TID and inform HW. If we are unable to allocate the release
3432 * message we defer to a work queue.
3434 void cxgb4_remove_tid(struct tid_info
*t
, unsigned int chan
, unsigned int tid
)
3437 struct sk_buff
*skb
;
3438 struct adapter
*adap
= container_of(t
, struct adapter
, tids
);
3440 old
= t
->tid_tab
[tid
];
3441 skb
= alloc_skb(sizeof(struct cpl_tid_release
), GFP_ATOMIC
);
3443 t
->tid_tab
[tid
] = NULL
;
3444 mk_tid_release(skb
, chan
, tid
);
3445 t4_ofld_send(adap
, skb
);
3447 cxgb4_queue_tid_release(t
, chan
, tid
);
3449 atomic_dec(&t
->tids_in_use
);
3451 EXPORT_SYMBOL(cxgb4_remove_tid
);
3454 * Allocate and initialize the TID tables. Returns 0 on success.
3456 static int tid_init(struct tid_info
*t
)
3459 unsigned int stid_bmap_size
;
3460 unsigned int natids
= t
->natids
;
3461 struct adapter
*adap
= container_of(t
, struct adapter
, tids
);
3463 stid_bmap_size
= BITS_TO_LONGS(t
->nstids
+ t
->nsftids
);
3464 size
= t
->ntids
* sizeof(*t
->tid_tab
) +
3465 natids
* sizeof(*t
->atid_tab
) +
3466 t
->nstids
* sizeof(*t
->stid_tab
) +
3467 t
->nsftids
* sizeof(*t
->stid_tab
) +
3468 stid_bmap_size
* sizeof(long) +
3469 t
->nftids
* sizeof(*t
->ftid_tab
) +
3470 t
->nsftids
* sizeof(*t
->ftid_tab
);
3472 t
->tid_tab
= t4_alloc_mem(size
);
3476 t
->atid_tab
= (union aopen_entry
*)&t
->tid_tab
[t
->ntids
];
3477 t
->stid_tab
= (struct serv_entry
*)&t
->atid_tab
[natids
];
3478 t
->stid_bmap
= (unsigned long *)&t
->stid_tab
[t
->nstids
+ t
->nsftids
];
3479 t
->ftid_tab
= (struct filter_entry
*)&t
->stid_bmap
[stid_bmap_size
];
3480 spin_lock_init(&t
->stid_lock
);
3481 spin_lock_init(&t
->atid_lock
);
3483 t
->stids_in_use
= 0;
3485 t
->atids_in_use
= 0;
3486 atomic_set(&t
->tids_in_use
, 0);
3488 /* Setup the free list for atid_tab and clear the stid bitmap. */
3491 t
->atid_tab
[natids
- 1].next
= &t
->atid_tab
[natids
];
3492 t
->afree
= t
->atid_tab
;
3494 bitmap_zero(t
->stid_bmap
, t
->nstids
+ t
->nsftids
);
3495 /* Reserve stid 0 for T4/T5 adapters */
3496 if (!t
->stid_base
&&
3497 (is_t4(adap
->params
.chip
) || is_t5(adap
->params
.chip
)))
3498 __set_bit(0, t
->stid_bmap
);
3503 int cxgb4_clip_get(const struct net_device
*dev
,
3504 const struct in6_addr
*lip
)
3506 struct adapter
*adap
;
3507 struct fw_clip_cmd c
;
3509 adap
= netdev2adap(dev
);
3510 memset(&c
, 0, sizeof(c
));
3511 c
.op_to_write
= htonl(FW_CMD_OP(FW_CLIP_CMD
) |
3512 FW_CMD_REQUEST
| FW_CMD_WRITE
);
3513 c
.alloc_to_len16
= htonl(F_FW_CLIP_CMD_ALLOC
| FW_LEN16(c
));
3514 c
.ip_hi
= *(__be64
*)(lip
->s6_addr
);
3515 c
.ip_lo
= *(__be64
*)(lip
->s6_addr
+ 8);
3516 return t4_wr_mbox_meat(adap
, adap
->mbox
, &c
, sizeof(c
), &c
, false);
3518 EXPORT_SYMBOL(cxgb4_clip_get
);
3520 int cxgb4_clip_release(const struct net_device
*dev
,
3521 const struct in6_addr
*lip
)
3523 struct adapter
*adap
;
3524 struct fw_clip_cmd c
;
3526 adap
= netdev2adap(dev
);
3527 memset(&c
, 0, sizeof(c
));
3528 c
.op_to_write
= htonl(FW_CMD_OP(FW_CLIP_CMD
) |
3529 FW_CMD_REQUEST
| FW_CMD_READ
);
3530 c
.alloc_to_len16
= htonl(F_FW_CLIP_CMD_FREE
| FW_LEN16(c
));
3531 c
.ip_hi
= *(__be64
*)(lip
->s6_addr
);
3532 c
.ip_lo
= *(__be64
*)(lip
->s6_addr
+ 8);
3533 return t4_wr_mbox_meat(adap
, adap
->mbox
, &c
, sizeof(c
), &c
, false);
3535 EXPORT_SYMBOL(cxgb4_clip_release
);
3538 * cxgb4_create_server - create an IP server
3540 * @stid: the server TID
3541 * @sip: local IP address to bind server to
3542 * @sport: the server's TCP port
3543 * @queue: queue to direct messages from this server to
3545 * Create an IP server for the given port and address.
3546 * Returns <0 on error and one of the %NET_XMIT_* values on success.
3548 int cxgb4_create_server(const struct net_device
*dev
, unsigned int stid
,
3549 __be32 sip
, __be16 sport
, __be16 vlan
,
3553 struct sk_buff
*skb
;
3554 struct adapter
*adap
;
3555 struct cpl_pass_open_req
*req
;
3558 skb
= alloc_skb(sizeof(*req
), GFP_KERNEL
);
3562 adap
= netdev2adap(dev
);
3563 req
= (struct cpl_pass_open_req
*)__skb_put(skb
, sizeof(*req
));
3565 OPCODE_TID(req
) = htonl(MK_OPCODE_TID(CPL_PASS_OPEN_REQ
, stid
));
3566 req
->local_port
= sport
;
3567 req
->peer_port
= htons(0);
3568 req
->local_ip
= sip
;
3569 req
->peer_ip
= htonl(0);
3570 chan
= rxq_to_chan(&adap
->sge
, queue
);
3571 req
->opt0
= cpu_to_be64(TX_CHAN(chan
));
3572 req
->opt1
= cpu_to_be64(CONN_POLICY_ASK
|
3573 SYN_RSS_ENABLE
| SYN_RSS_QUEUE(queue
));
3574 ret
= t4_mgmt_tx(adap
, skb
);
3575 return net_xmit_eval(ret
);
3577 EXPORT_SYMBOL(cxgb4_create_server
);
3579 /* cxgb4_create_server6 - create an IPv6 server
3581 * @stid: the server TID
3582 * @sip: local IPv6 address to bind server to
3583 * @sport: the server's TCP port
3584 * @queue: queue to direct messages from this server to
3586 * Create an IPv6 server for the given port and address.
3587 * Returns <0 on error and one of the %NET_XMIT_* values on success.
3589 int cxgb4_create_server6(const struct net_device
*dev
, unsigned int stid
,
3590 const struct in6_addr
*sip
, __be16 sport
,
3594 struct sk_buff
*skb
;
3595 struct adapter
*adap
;
3596 struct cpl_pass_open_req6
*req
;
3599 skb
= alloc_skb(sizeof(*req
), GFP_KERNEL
);
3603 adap
= netdev2adap(dev
);
3604 req
= (struct cpl_pass_open_req6
*)__skb_put(skb
, sizeof(*req
));
3606 OPCODE_TID(req
) = htonl(MK_OPCODE_TID(CPL_PASS_OPEN_REQ6
, stid
));
3607 req
->local_port
= sport
;
3608 req
->peer_port
= htons(0);
3609 req
->local_ip_hi
= *(__be64
*)(sip
->s6_addr
);
3610 req
->local_ip_lo
= *(__be64
*)(sip
->s6_addr
+ 8);
3611 req
->peer_ip_hi
= cpu_to_be64(0);
3612 req
->peer_ip_lo
= cpu_to_be64(0);
3613 chan
= rxq_to_chan(&adap
->sge
, queue
);
3614 req
->opt0
= cpu_to_be64(TX_CHAN(chan
));
3615 req
->opt1
= cpu_to_be64(CONN_POLICY_ASK
|
3616 SYN_RSS_ENABLE
| SYN_RSS_QUEUE(queue
));
3617 ret
= t4_mgmt_tx(adap
, skb
);
3618 return net_xmit_eval(ret
);
3620 EXPORT_SYMBOL(cxgb4_create_server6
);
3622 int cxgb4_remove_server(const struct net_device
*dev
, unsigned int stid
,
3623 unsigned int queue
, bool ipv6
)
3625 struct sk_buff
*skb
;
3626 struct adapter
*adap
;
3627 struct cpl_close_listsvr_req
*req
;
3630 adap
= netdev2adap(dev
);
3632 skb
= alloc_skb(sizeof(*req
), GFP_KERNEL
);
3636 req
= (struct cpl_close_listsvr_req
*)__skb_put(skb
, sizeof(*req
));
3638 OPCODE_TID(req
) = htonl(MK_OPCODE_TID(CPL_CLOSE_LISTSRV_REQ
, stid
));
3639 req
->reply_ctrl
= htons(NO_REPLY(0) | (ipv6
? LISTSVR_IPV6(1) :
3640 LISTSVR_IPV6(0)) | QUEUENO(queue
));
3641 ret
= t4_mgmt_tx(adap
, skb
);
3642 return net_xmit_eval(ret
);
3644 EXPORT_SYMBOL(cxgb4_remove_server
);
3647 * cxgb4_best_mtu - find the entry in the MTU table closest to an MTU
3648 * @mtus: the HW MTU table
3649 * @mtu: the target MTU
3650 * @idx: index of selected entry in the MTU table
3652 * Returns the index and the value in the HW MTU table that is closest to
3653 * but does not exceed @mtu, unless @mtu is smaller than any value in the
3654 * table, in which case that smallest available value is selected.
3656 unsigned int cxgb4_best_mtu(const unsigned short *mtus
, unsigned short mtu
,
3661 while (i
< NMTUS
- 1 && mtus
[i
+ 1] <= mtu
)
3667 EXPORT_SYMBOL(cxgb4_best_mtu
);
3670 * cxgb4_best_aligned_mtu - find best MTU, [hopefully] data size aligned
3671 * @mtus: the HW MTU table
3672 * @header_size: Header Size
3673 * @data_size_max: maximum Data Segment Size
3674 * @data_size_align: desired Data Segment Size Alignment (2^N)
3675 * @mtu_idxp: HW MTU Table Index return value pointer (possibly NULL)
3677 * Similar to cxgb4_best_mtu() but instead of searching the Hardware
3678 * MTU Table based solely on a Maximum MTU parameter, we break that
3679 * parameter up into a Header Size and Maximum Data Segment Size, and
3680 * provide a desired Data Segment Size Alignment. If we find an MTU in
3681 * the Hardware MTU Table which will result in a Data Segment Size with
3682 * the requested alignment _and_ that MTU isn't "too far" from the
3683 * closest MTU, then we'll return that rather than the closest MTU.
3685 unsigned int cxgb4_best_aligned_mtu(const unsigned short *mtus
,
3686 unsigned short header_size
,
3687 unsigned short data_size_max
,
3688 unsigned short data_size_align
,
3689 unsigned int *mtu_idxp
)
3691 unsigned short max_mtu
= header_size
+ data_size_max
;
3692 unsigned short data_size_align_mask
= data_size_align
- 1;
3693 int mtu_idx
, aligned_mtu_idx
;
3695 /* Scan the MTU Table till we find an MTU which is larger than our
3696 * Maximum MTU or we reach the end of the table. Along the way,
3697 * record the last MTU found, if any, which will result in a Data
3698 * Segment Length matching the requested alignment.
3700 for (mtu_idx
= 0, aligned_mtu_idx
= -1; mtu_idx
< NMTUS
; mtu_idx
++) {
3701 unsigned short data_size
= mtus
[mtu_idx
] - header_size
;
3703 /* If this MTU minus the Header Size would result in a
3704 * Data Segment Size of the desired alignment, remember it.
3706 if ((data_size
& data_size_align_mask
) == 0)
3707 aligned_mtu_idx
= mtu_idx
;
3709 /* If we're not at the end of the Hardware MTU Table and the
3710 * next element is larger than our Maximum MTU, drop out of
3713 if (mtu_idx
+1 < NMTUS
&& mtus
[mtu_idx
+1] > max_mtu
)
3717 /* If we fell out of the loop because we ran to the end of the table,
3718 * then we just have to use the last [largest] entry.
3720 if (mtu_idx
== NMTUS
)
3723 /* If we found an MTU which resulted in the requested Data Segment
3724 * Length alignment and that's "not far" from the largest MTU which is
3725 * less than or equal to the maximum MTU, then use that.
3727 if (aligned_mtu_idx
>= 0 &&
3728 mtu_idx
- aligned_mtu_idx
<= 1)
3729 mtu_idx
= aligned_mtu_idx
;
3731 /* If the caller has passed in an MTU Index pointer, pass the
3732 * MTU Index back. Return the MTU value.
3735 *mtu_idxp
= mtu_idx
;
3736 return mtus
[mtu_idx
];
3738 EXPORT_SYMBOL(cxgb4_best_aligned_mtu
);
3741 * cxgb4_port_chan - get the HW channel of a port
3742 * @dev: the net device for the port
3744 * Return the HW Tx channel of the given port.
3746 unsigned int cxgb4_port_chan(const struct net_device
*dev
)
3748 return netdev2pinfo(dev
)->tx_chan
;
3750 EXPORT_SYMBOL(cxgb4_port_chan
);
3752 unsigned int cxgb4_dbfifo_count(const struct net_device
*dev
, int lpfifo
)
3754 struct adapter
*adap
= netdev2adap(dev
);
3755 u32 v1
, v2
, lp_count
, hp_count
;
3757 v1
= t4_read_reg(adap
, A_SGE_DBFIFO_STATUS
);
3758 v2
= t4_read_reg(adap
, SGE_DBFIFO_STATUS2
);
3759 if (is_t4(adap
->params
.chip
)) {
3760 lp_count
= G_LP_COUNT(v1
);
3761 hp_count
= G_HP_COUNT(v1
);
3763 lp_count
= G_LP_COUNT_T5(v1
);
3764 hp_count
= G_HP_COUNT_T5(v2
);
3766 return lpfifo
? lp_count
: hp_count
;
3768 EXPORT_SYMBOL(cxgb4_dbfifo_count
);
3771 * cxgb4_port_viid - get the VI id of a port
3772 * @dev: the net device for the port
3774 * Return the VI id of the given port.
3776 unsigned int cxgb4_port_viid(const struct net_device
*dev
)
3778 return netdev2pinfo(dev
)->viid
;
3780 EXPORT_SYMBOL(cxgb4_port_viid
);
3783 * cxgb4_port_idx - get the index of a port
3784 * @dev: the net device for the port
3786 * Return the index of the given port.
3788 unsigned int cxgb4_port_idx(const struct net_device
*dev
)
3790 return netdev2pinfo(dev
)->port_id
;
3792 EXPORT_SYMBOL(cxgb4_port_idx
);
3794 void cxgb4_get_tcp_stats(struct pci_dev
*pdev
, struct tp_tcp_stats
*v4
,
3795 struct tp_tcp_stats
*v6
)
3797 struct adapter
*adap
= pci_get_drvdata(pdev
);
3799 spin_lock(&adap
->stats_lock
);
3800 t4_tp_get_tcp_stats(adap
, v4
, v6
);
3801 spin_unlock(&adap
->stats_lock
);
3803 EXPORT_SYMBOL(cxgb4_get_tcp_stats
);
3805 void cxgb4_iscsi_init(struct net_device
*dev
, unsigned int tag_mask
,
3806 const unsigned int *pgsz_order
)
3808 struct adapter
*adap
= netdev2adap(dev
);
3810 t4_write_reg(adap
, ULP_RX_ISCSI_TAGMASK
, tag_mask
);
3811 t4_write_reg(adap
, ULP_RX_ISCSI_PSZ
, HPZ0(pgsz_order
[0]) |
3812 HPZ1(pgsz_order
[1]) | HPZ2(pgsz_order
[2]) |
3813 HPZ3(pgsz_order
[3]));
3815 EXPORT_SYMBOL(cxgb4_iscsi_init
);
3817 int cxgb4_flush_eq_cache(struct net_device
*dev
)
3819 struct adapter
*adap
= netdev2adap(dev
);
3822 ret
= t4_fwaddrspace_write(adap
, adap
->mbox
,
3823 0xe1000000 + A_SGE_CTXT_CMD
, 0x20000000);
3826 EXPORT_SYMBOL(cxgb4_flush_eq_cache
);
3828 static int read_eq_indices(struct adapter
*adap
, u16 qid
, u16
*pidx
, u16
*cidx
)
3830 u32 addr
= t4_read_reg(adap
, A_SGE_DBQ_CTXT_BADDR
) + 24 * qid
+ 8;
3834 spin_lock(&adap
->win0_lock
);
3835 ret
= t4_memory_rw(adap
, 0, MEM_EDC0
, addr
,
3836 sizeof(indices
), (__be32
*)&indices
,
3838 spin_unlock(&adap
->win0_lock
);
3840 *cidx
= (be64_to_cpu(indices
) >> 25) & 0xffff;
3841 *pidx
= (be64_to_cpu(indices
) >> 9) & 0xffff;
3846 int cxgb4_sync_txq_pidx(struct net_device
*dev
, u16 qid
, u16 pidx
,
3849 struct adapter
*adap
= netdev2adap(dev
);
3850 u16 hw_pidx
, hw_cidx
;
3853 ret
= read_eq_indices(adap
, qid
, &hw_pidx
, &hw_cidx
);
3857 if (pidx
!= hw_pidx
) {
3860 if (pidx
>= hw_pidx
)
3861 delta
= pidx
- hw_pidx
;
3863 delta
= size
- hw_pidx
+ pidx
;
3865 t4_write_reg(adap
, MYPF_REG(SGE_PF_KDOORBELL
),
3866 QID(qid
) | PIDX(delta
));
3871 EXPORT_SYMBOL(cxgb4_sync_txq_pidx
);
3873 void cxgb4_disable_db_coalescing(struct net_device
*dev
)
3875 struct adapter
*adap
;
3877 adap
= netdev2adap(dev
);
3878 t4_set_reg_field(adap
, A_SGE_DOORBELL_CONTROL
, F_NOCOALESCE
,
3881 EXPORT_SYMBOL(cxgb4_disable_db_coalescing
);
3883 void cxgb4_enable_db_coalescing(struct net_device
*dev
)
3885 struct adapter
*adap
;
3887 adap
= netdev2adap(dev
);
3888 t4_set_reg_field(adap
, A_SGE_DOORBELL_CONTROL
, F_NOCOALESCE
, 0);
3890 EXPORT_SYMBOL(cxgb4_enable_db_coalescing
);
3892 int cxgb4_read_tpte(struct net_device
*dev
, u32 stag
, __be32
*tpte
)
3894 struct adapter
*adap
;
3895 u32 offset
, memtype
, memaddr
;
3896 u32 edc0_size
, edc1_size
, mc0_size
, mc1_size
;
3897 u32 edc0_end
, edc1_end
, mc0_end
, mc1_end
;
3900 adap
= netdev2adap(dev
);
3902 offset
= ((stag
>> 8) * 32) + adap
->vres
.stag
.start
;
3904 /* Figure out where the offset lands in the Memory Type/Address scheme.
3905 * This code assumes that the memory is laid out starting at offset 0
3906 * with no breaks as: EDC0, EDC1, MC0, MC1. All cards have both EDC0
3907 * and EDC1. Some cards will have neither MC0 nor MC1, most cards have
3908 * MC0, and some have both MC0 and MC1.
3910 edc0_size
= EDRAM_SIZE_GET(t4_read_reg(adap
, MA_EDRAM0_BAR
)) << 20;
3911 edc1_size
= EDRAM_SIZE_GET(t4_read_reg(adap
, MA_EDRAM1_BAR
)) << 20;
3912 mc0_size
= EXT_MEM_SIZE_GET(t4_read_reg(adap
, MA_EXT_MEMORY_BAR
)) << 20;
3914 edc0_end
= edc0_size
;
3915 edc1_end
= edc0_end
+ edc1_size
;
3916 mc0_end
= edc1_end
+ mc0_size
;
3918 if (offset
< edc0_end
) {
3921 } else if (offset
< edc1_end
) {
3923 memaddr
= offset
- edc0_end
;
3925 if (offset
< mc0_end
) {
3927 memaddr
= offset
- edc1_end
;
3928 } else if (is_t4(adap
->params
.chip
)) {
3929 /* T4 only has a single memory channel */
3932 mc1_size
= EXT_MEM_SIZE_GET(
3934 MA_EXT_MEMORY1_BAR
)) << 20;
3935 mc1_end
= mc0_end
+ mc1_size
;
3936 if (offset
< mc1_end
) {
3938 memaddr
= offset
- mc0_end
;
3940 /* offset beyond the end of any memory */
3946 spin_lock(&adap
->win0_lock
);
3947 ret
= t4_memory_rw(adap
, 0, memtype
, memaddr
, 32, tpte
, T4_MEMORY_READ
);
3948 spin_unlock(&adap
->win0_lock
);
3952 dev_err(adap
->pdev_dev
, "stag %#x, offset %#x out of range\n",
3956 EXPORT_SYMBOL(cxgb4_read_tpte
);
3958 u64
cxgb4_read_sge_timestamp(struct net_device
*dev
)
3961 struct adapter
*adap
;
3963 adap
= netdev2adap(dev
);
3964 lo
= t4_read_reg(adap
, SGE_TIMESTAMP_LO
);
3965 hi
= GET_TSVAL(t4_read_reg(adap
, SGE_TIMESTAMP_HI
));
3967 return ((u64
)hi
<< 32) | (u64
)lo
;
3969 EXPORT_SYMBOL(cxgb4_read_sge_timestamp
);
3971 static struct pci_driver cxgb4_driver
;
3973 static void check_neigh_update(struct neighbour
*neigh
)
3975 const struct device
*parent
;
3976 const struct net_device
*netdev
= neigh
->dev
;
3978 if (netdev
->priv_flags
& IFF_802_1Q_VLAN
)
3979 netdev
= vlan_dev_real_dev(netdev
);
3980 parent
= netdev
->dev
.parent
;
3981 if (parent
&& parent
->driver
== &cxgb4_driver
.driver
)
3982 t4_l2t_update(dev_get_drvdata(parent
), neigh
);
3985 static int netevent_cb(struct notifier_block
*nb
, unsigned long event
,
3989 case NETEVENT_NEIGH_UPDATE
:
3990 check_neigh_update(data
);
3992 case NETEVENT_REDIRECT
:
3999 static bool netevent_registered
;
4000 static struct notifier_block cxgb4_netevent_nb
= {
4001 .notifier_call
= netevent_cb
4004 static void drain_db_fifo(struct adapter
*adap
, int usecs
)
4006 u32 v1
, v2
, lp_count
, hp_count
;
4009 v1
= t4_read_reg(adap
, A_SGE_DBFIFO_STATUS
);
4010 v2
= t4_read_reg(adap
, SGE_DBFIFO_STATUS2
);
4011 if (is_t4(adap
->params
.chip
)) {
4012 lp_count
= G_LP_COUNT(v1
);
4013 hp_count
= G_HP_COUNT(v1
);
4015 lp_count
= G_LP_COUNT_T5(v1
);
4016 hp_count
= G_HP_COUNT_T5(v2
);
4019 if (lp_count
== 0 && hp_count
== 0)
4021 set_current_state(TASK_UNINTERRUPTIBLE
);
4022 schedule_timeout(usecs_to_jiffies(usecs
));
4026 static void disable_txq_db(struct sge_txq
*q
)
4028 unsigned long flags
;
4030 spin_lock_irqsave(&q
->db_lock
, flags
);
4032 spin_unlock_irqrestore(&q
->db_lock
, flags
);
4035 static void enable_txq_db(struct adapter
*adap
, struct sge_txq
*q
)
4037 spin_lock_irq(&q
->db_lock
);
4038 if (q
->db_pidx_inc
) {
4039 /* Make sure that all writes to the TX descriptors
4040 * are committed before we tell HW about them.
4043 t4_write_reg(adap
, MYPF_REG(SGE_PF_KDOORBELL
),
4044 QID(q
->cntxt_id
) | PIDX(q
->db_pidx_inc
));
4048 spin_unlock_irq(&q
->db_lock
);
4051 static void disable_dbs(struct adapter
*adap
)
4055 for_each_ethrxq(&adap
->sge
, i
)
4056 disable_txq_db(&adap
->sge
.ethtxq
[i
].q
);
4057 for_each_ofldrxq(&adap
->sge
, i
)
4058 disable_txq_db(&adap
->sge
.ofldtxq
[i
].q
);
4059 for_each_port(adap
, i
)
4060 disable_txq_db(&adap
->sge
.ctrlq
[i
].q
);
4063 static void enable_dbs(struct adapter
*adap
)
4067 for_each_ethrxq(&adap
->sge
, i
)
4068 enable_txq_db(adap
, &adap
->sge
.ethtxq
[i
].q
);
4069 for_each_ofldrxq(&adap
->sge
, i
)
4070 enable_txq_db(adap
, &adap
->sge
.ofldtxq
[i
].q
);
4071 for_each_port(adap
, i
)
4072 enable_txq_db(adap
, &adap
->sge
.ctrlq
[i
].q
);
4075 static void notify_rdma_uld(struct adapter
*adap
, enum cxgb4_control cmd
)
4077 if (adap
->uld_handle
[CXGB4_ULD_RDMA
])
4078 ulds
[CXGB4_ULD_RDMA
].control(adap
->uld_handle
[CXGB4_ULD_RDMA
],
4082 static void process_db_full(struct work_struct
*work
)
4084 struct adapter
*adap
;
4086 adap
= container_of(work
, struct adapter
, db_full_task
);
4088 drain_db_fifo(adap
, dbfifo_drain_delay
);
4090 notify_rdma_uld(adap
, CXGB4_CONTROL_DB_EMPTY
);
4091 t4_set_reg_field(adap
, SGE_INT_ENABLE3
,
4092 DBFIFO_HP_INT
| DBFIFO_LP_INT
,
4093 DBFIFO_HP_INT
| DBFIFO_LP_INT
);
4096 static void sync_txq_pidx(struct adapter
*adap
, struct sge_txq
*q
)
4098 u16 hw_pidx
, hw_cidx
;
4101 spin_lock_irq(&q
->db_lock
);
4102 ret
= read_eq_indices(adap
, (u16
)q
->cntxt_id
, &hw_pidx
, &hw_cidx
);
4105 if (q
->db_pidx
!= hw_pidx
) {
4108 if (q
->db_pidx
>= hw_pidx
)
4109 delta
= q
->db_pidx
- hw_pidx
;
4111 delta
= q
->size
- hw_pidx
+ q
->db_pidx
;
4113 t4_write_reg(adap
, MYPF_REG(SGE_PF_KDOORBELL
),
4114 QID(q
->cntxt_id
) | PIDX(delta
));
4119 spin_unlock_irq(&q
->db_lock
);
4121 CH_WARN(adap
, "DB drop recovery failed.\n");
4123 static void recover_all_queues(struct adapter
*adap
)
4127 for_each_ethrxq(&adap
->sge
, i
)
4128 sync_txq_pidx(adap
, &adap
->sge
.ethtxq
[i
].q
);
4129 for_each_ofldrxq(&adap
->sge
, i
)
4130 sync_txq_pidx(adap
, &adap
->sge
.ofldtxq
[i
].q
);
4131 for_each_port(adap
, i
)
4132 sync_txq_pidx(adap
, &adap
->sge
.ctrlq
[i
].q
);
4135 static void process_db_drop(struct work_struct
*work
)
4137 struct adapter
*adap
;
4139 adap
= container_of(work
, struct adapter
, db_drop_task
);
4141 if (is_t4(adap
->params
.chip
)) {
4142 drain_db_fifo(adap
, dbfifo_drain_delay
);
4143 notify_rdma_uld(adap
, CXGB4_CONTROL_DB_DROP
);
4144 drain_db_fifo(adap
, dbfifo_drain_delay
);
4145 recover_all_queues(adap
);
4146 drain_db_fifo(adap
, dbfifo_drain_delay
);
4148 notify_rdma_uld(adap
, CXGB4_CONTROL_DB_EMPTY
);
4150 u32 dropped_db
= t4_read_reg(adap
, 0x010ac);
4151 u16 qid
= (dropped_db
>> 15) & 0x1ffff;
4152 u16 pidx_inc
= dropped_db
& 0x1fff;
4154 unsigned short udb_density
;
4155 unsigned long qpshift
;
4159 dev_warn(adap
->pdev_dev
,
4160 "Dropped DB 0x%x qid %d bar2 %d coalesce %d pidx %d\n",
4162 (dropped_db
>> 14) & 1,
4163 (dropped_db
>> 13) & 1,
4166 drain_db_fifo(adap
, 1);
4168 s_qpp
= QUEUESPERPAGEPF1
* adap
->fn
;
4169 udb_density
= 1 << QUEUESPERPAGEPF0_GET(t4_read_reg(adap
,
4170 SGE_EGRESS_QUEUES_PER_PAGE_PF
) >> s_qpp
);
4171 qpshift
= PAGE_SHIFT
- ilog2(udb_density
);
4172 udb
= qid
<< qpshift
;
4174 page
= udb
/ PAGE_SIZE
;
4175 udb
+= (qid
- (page
* udb_density
)) * 128;
4177 writel(PIDX(pidx_inc
), adap
->bar2
+ udb
+ 8);
4179 /* Re-enable BAR2 WC */
4180 t4_set_reg_field(adap
, 0x10b0, 1<<15, 1<<15);
4183 t4_set_reg_field(adap
, A_SGE_DOORBELL_CONTROL
, F_DROPPED_DB
, 0);
4186 void t4_db_full(struct adapter
*adap
)
4188 if (is_t4(adap
->params
.chip
)) {
4190 notify_rdma_uld(adap
, CXGB4_CONTROL_DB_FULL
);
4191 t4_set_reg_field(adap
, SGE_INT_ENABLE3
,
4192 DBFIFO_HP_INT
| DBFIFO_LP_INT
, 0);
4193 queue_work(adap
->workq
, &adap
->db_full_task
);
4197 void t4_db_dropped(struct adapter
*adap
)
4199 if (is_t4(adap
->params
.chip
)) {
4201 notify_rdma_uld(adap
, CXGB4_CONTROL_DB_FULL
);
4203 queue_work(adap
->workq
, &adap
->db_drop_task
);
4206 static void uld_attach(struct adapter
*adap
, unsigned int uld
)
4209 struct cxgb4_lld_info lli
;
4212 lli
.pdev
= adap
->pdev
;
4214 lli
.l2t
= adap
->l2t
;
4215 lli
.tids
= &adap
->tids
;
4216 lli
.ports
= adap
->port
;
4217 lli
.vr
= &adap
->vres
;
4218 lli
.mtus
= adap
->params
.mtus
;
4219 if (uld
== CXGB4_ULD_RDMA
) {
4220 lli
.rxq_ids
= adap
->sge
.rdma_rxq
;
4221 lli
.ciq_ids
= adap
->sge
.rdma_ciq
;
4222 lli
.nrxq
= adap
->sge
.rdmaqs
;
4223 lli
.nciq
= adap
->sge
.rdmaciqs
;
4224 } else if (uld
== CXGB4_ULD_ISCSI
) {
4225 lli
.rxq_ids
= adap
->sge
.ofld_rxq
;
4226 lli
.nrxq
= adap
->sge
.ofldqsets
;
4228 lli
.ntxq
= adap
->sge
.ofldqsets
;
4229 lli
.nchan
= adap
->params
.nports
;
4230 lli
.nports
= adap
->params
.nports
;
4231 lli
.wr_cred
= adap
->params
.ofldq_wr_cred
;
4232 lli
.adapter_type
= adap
->params
.chip
;
4233 lli
.iscsi_iolen
= MAXRXDATA_GET(t4_read_reg(adap
, TP_PARA_REG2
));
4234 lli
.cclk_ps
= 1000000000 / adap
->params
.vpd
.cclk
;
4235 lli
.udb_density
= 1 << QUEUESPERPAGEPF0_GET(
4236 t4_read_reg(adap
, SGE_EGRESS_QUEUES_PER_PAGE_PF
) >>
4238 lli
.ucq_density
= 1 << QUEUESPERPAGEPF0_GET(
4239 t4_read_reg(adap
, SGE_INGRESS_QUEUES_PER_PAGE_PF
) >>
4241 lli
.filt_mode
= adap
->params
.tp
.vlan_pri_map
;
4242 /* MODQ_REQ_MAP sets queues 0-3 to chan 0-3 */
4243 for (i
= 0; i
< NCHAN
; i
++)
4245 lli
.gts_reg
= adap
->regs
+ MYPF_REG(SGE_PF_GTS
);
4246 lli
.db_reg
= adap
->regs
+ MYPF_REG(SGE_PF_KDOORBELL
);
4247 lli
.fw_vers
= adap
->params
.fw_vers
;
4248 lli
.dbfifo_int_thresh
= dbfifo_int_thresh
;
4249 lli
.sge_ingpadboundary
= adap
->sge
.fl_align
;
4250 lli
.sge_egrstatuspagesize
= adap
->sge
.stat_len
;
4251 lli
.sge_pktshift
= adap
->sge
.pktshift
;
4252 lli
.enable_fw_ofld_conn
= adap
->flags
& FW_OFLD_CONN
;
4253 lli
.max_ordird_qp
= adap
->params
.max_ordird_qp
;
4254 lli
.max_ird_adapter
= adap
->params
.max_ird_adapter
;
4255 lli
.ulptx_memwrite_dsgl
= adap
->params
.ulptx_memwrite_dsgl
;
4257 handle
= ulds
[uld
].add(&lli
);
4258 if (IS_ERR(handle
)) {
4259 dev_warn(adap
->pdev_dev
,
4260 "could not attach to the %s driver, error %ld\n",
4261 uld_str
[uld
], PTR_ERR(handle
));
4265 adap
->uld_handle
[uld
] = handle
;
4267 if (!netevent_registered
) {
4268 register_netevent_notifier(&cxgb4_netevent_nb
);
4269 netevent_registered
= true;
4272 if (adap
->flags
& FULL_INIT_DONE
)
4273 ulds
[uld
].state_change(handle
, CXGB4_STATE_UP
);
4276 static void attach_ulds(struct adapter
*adap
)
4280 spin_lock(&adap_rcu_lock
);
4281 list_add_tail_rcu(&adap
->rcu_node
, &adap_rcu_list
);
4282 spin_unlock(&adap_rcu_lock
);
4284 mutex_lock(&uld_mutex
);
4285 list_add_tail(&adap
->list_node
, &adapter_list
);
4286 for (i
= 0; i
< CXGB4_ULD_MAX
; i
++)
4288 uld_attach(adap
, i
);
4289 mutex_unlock(&uld_mutex
);
4292 static void detach_ulds(struct adapter
*adap
)
4296 mutex_lock(&uld_mutex
);
4297 list_del(&adap
->list_node
);
4298 for (i
= 0; i
< CXGB4_ULD_MAX
; i
++)
4299 if (adap
->uld_handle
[i
]) {
4300 ulds
[i
].state_change(adap
->uld_handle
[i
],
4301 CXGB4_STATE_DETACH
);
4302 adap
->uld_handle
[i
] = NULL
;
4304 if (netevent_registered
&& list_empty(&adapter_list
)) {
4305 unregister_netevent_notifier(&cxgb4_netevent_nb
);
4306 netevent_registered
= false;
4308 mutex_unlock(&uld_mutex
);
4310 spin_lock(&adap_rcu_lock
);
4311 list_del_rcu(&adap
->rcu_node
);
4312 spin_unlock(&adap_rcu_lock
);
4315 static void notify_ulds(struct adapter
*adap
, enum cxgb4_state new_state
)
4319 mutex_lock(&uld_mutex
);
4320 for (i
= 0; i
< CXGB4_ULD_MAX
; i
++)
4321 if (adap
->uld_handle
[i
])
4322 ulds
[i
].state_change(adap
->uld_handle
[i
], new_state
);
4323 mutex_unlock(&uld_mutex
);
4327 * cxgb4_register_uld - register an upper-layer driver
4328 * @type: the ULD type
4329 * @p: the ULD methods
4331 * Registers an upper-layer driver with this driver and notifies the ULD
4332 * about any presently available devices that support its type. Returns
4333 * %-EBUSY if a ULD of the same type is already registered.
4335 int cxgb4_register_uld(enum cxgb4_uld type
, const struct cxgb4_uld_info
*p
)
4338 struct adapter
*adap
;
4340 if (type
>= CXGB4_ULD_MAX
)
4342 mutex_lock(&uld_mutex
);
4343 if (ulds
[type
].add
) {
4348 list_for_each_entry(adap
, &adapter_list
, list_node
)
4349 uld_attach(adap
, type
);
4350 out
: mutex_unlock(&uld_mutex
);
4353 EXPORT_SYMBOL(cxgb4_register_uld
);
4356 * cxgb4_unregister_uld - unregister an upper-layer driver
4357 * @type: the ULD type
4359 * Unregisters an existing upper-layer driver.
4361 int cxgb4_unregister_uld(enum cxgb4_uld type
)
4363 struct adapter
*adap
;
4365 if (type
>= CXGB4_ULD_MAX
)
4367 mutex_lock(&uld_mutex
);
4368 list_for_each_entry(adap
, &adapter_list
, list_node
)
4369 adap
->uld_handle
[type
] = NULL
;
4370 ulds
[type
].add
= NULL
;
4371 mutex_unlock(&uld_mutex
);
4374 EXPORT_SYMBOL(cxgb4_unregister_uld
);
4376 /* Check if netdev on which event is occured belongs to us or not. Return
4377 * success (true) if it belongs otherwise failure (false).
4378 * Called with rcu_read_lock() held.
4380 #if IS_ENABLED(CONFIG_IPV6)
4381 static bool cxgb4_netdev(const struct net_device
*netdev
)
4383 struct adapter
*adap
;
4386 list_for_each_entry_rcu(adap
, &adap_rcu_list
, rcu_node
)
4387 for (i
= 0; i
< MAX_NPORTS
; i
++)
4388 if (adap
->port
[i
] == netdev
)
4393 static int clip_add(struct net_device
*event_dev
, struct inet6_ifaddr
*ifa
,
4394 unsigned long event
)
4396 int ret
= NOTIFY_DONE
;
4399 if (cxgb4_netdev(event_dev
)) {
4402 ret
= cxgb4_clip_get(event_dev
,
4403 (const struct in6_addr
*)ifa
->addr
.s6_addr
);
4411 cxgb4_clip_release(event_dev
,
4412 (const struct in6_addr
*)ifa
->addr
.s6_addr
);
4423 static int cxgb4_inet6addr_handler(struct notifier_block
*this,
4424 unsigned long event
, void *data
)
4426 struct inet6_ifaddr
*ifa
= data
;
4427 struct net_device
*event_dev
;
4428 int ret
= NOTIFY_DONE
;
4429 struct bonding
*bond
= netdev_priv(ifa
->idev
->dev
);
4430 struct list_head
*iter
;
4431 struct slave
*slave
;
4432 struct pci_dev
*first_pdev
= NULL
;
4434 if (ifa
->idev
->dev
->priv_flags
& IFF_802_1Q_VLAN
) {
4435 event_dev
= vlan_dev_real_dev(ifa
->idev
->dev
);
4436 ret
= clip_add(event_dev
, ifa
, event
);
4437 } else if (ifa
->idev
->dev
->flags
& IFF_MASTER
) {
4438 /* It is possible that two different adapters are bonded in one
4439 * bond. We need to find such different adapters and add clip
4440 * in all of them only once.
4442 bond_for_each_slave(bond
, slave
, iter
) {
4444 ret
= clip_add(slave
->dev
, ifa
, event
);
4445 /* If clip_add is success then only initialize
4446 * first_pdev since it means it is our device
4448 if (ret
== NOTIFY_OK
)
4449 first_pdev
= to_pci_dev(
4450 slave
->dev
->dev
.parent
);
4451 } else if (first_pdev
!=
4452 to_pci_dev(slave
->dev
->dev
.parent
))
4453 ret
= clip_add(slave
->dev
, ifa
, event
);
4456 ret
= clip_add(ifa
->idev
->dev
, ifa
, event
);
4461 static struct notifier_block cxgb4_inet6addr_notifier
= {
4462 .notifier_call
= cxgb4_inet6addr_handler
4465 /* Retrieves IPv6 addresses from a root device (bond, vlan) associated with
4466 * a physical device.
4467 * The physical device reference is needed to send the actul CLIP command.
4469 static int update_dev_clip(struct net_device
*root_dev
, struct net_device
*dev
)
4471 struct inet6_dev
*idev
= NULL
;
4472 struct inet6_ifaddr
*ifa
;
4475 idev
= __in6_dev_get(root_dev
);
4479 read_lock_bh(&idev
->lock
);
4480 list_for_each_entry(ifa
, &idev
->addr_list
, if_list
) {
4481 ret
= cxgb4_clip_get(dev
,
4482 (const struct in6_addr
*)ifa
->addr
.s6_addr
);
4486 read_unlock_bh(&idev
->lock
);
4491 static int update_root_dev_clip(struct net_device
*dev
)
4493 struct net_device
*root_dev
= NULL
;
4496 /* First populate the real net device's IPv6 addresses */
4497 ret
= update_dev_clip(dev
, dev
);
4501 /* Parse all bond and vlan devices layered on top of the physical dev */
4502 root_dev
= netdev_master_upper_dev_get_rcu(dev
);
4504 ret
= update_dev_clip(root_dev
, dev
);
4509 for (i
= 0; i
< VLAN_N_VID
; i
++) {
4510 root_dev
= __vlan_find_dev_deep_rcu(dev
, htons(ETH_P_8021Q
), i
);
4514 ret
= update_dev_clip(root_dev
, dev
);
4521 static void update_clip(const struct adapter
*adap
)
4524 struct net_device
*dev
;
4529 for (i
= 0; i
< MAX_NPORTS
; i
++) {
4530 dev
= adap
->port
[i
];
4534 ret
= update_root_dev_clip(dev
);
4541 #endif /* IS_ENABLED(CONFIG_IPV6) */
4544 * cxgb_up - enable the adapter
4545 * @adap: adapter being enabled
4547 * Called when the first port is enabled, this function performs the
4548 * actions necessary to make an adapter operational, such as completing
4549 * the initialization of HW modules, and enabling interrupts.
4551 * Must be called with the rtnl lock held.
4553 static int cxgb_up(struct adapter
*adap
)
4557 err
= setup_sge_queues(adap
);
4560 err
= setup_rss(adap
);
4564 if (adap
->flags
& USING_MSIX
) {
4565 name_msix_vecs(adap
);
4566 err
= request_irq(adap
->msix_info
[0].vec
, t4_nondata_intr
, 0,
4567 adap
->msix_info
[0].desc
, adap
);
4571 err
= request_msix_queue_irqs(adap
);
4573 free_irq(adap
->msix_info
[0].vec
, adap
);
4577 err
= request_irq(adap
->pdev
->irq
, t4_intr_handler(adap
),
4578 (adap
->flags
& USING_MSI
) ? 0 : IRQF_SHARED
,
4579 adap
->port
[0]->name
, adap
);
4585 t4_intr_enable(adap
);
4586 adap
->flags
|= FULL_INIT_DONE
;
4587 notify_ulds(adap
, CXGB4_STATE_UP
);
4588 #if IS_ENABLED(CONFIG_IPV6)
4594 dev_err(adap
->pdev_dev
, "request_irq failed, err %d\n", err
);
4596 t4_free_sge_resources(adap
);
4600 static void cxgb_down(struct adapter
*adapter
)
4602 t4_intr_disable(adapter
);
4603 cancel_work_sync(&adapter
->tid_release_task
);
4604 cancel_work_sync(&adapter
->db_full_task
);
4605 cancel_work_sync(&adapter
->db_drop_task
);
4606 adapter
->tid_release_task_busy
= false;
4607 adapter
->tid_release_head
= NULL
;
4609 if (adapter
->flags
& USING_MSIX
) {
4610 free_msix_queue_irqs(adapter
);
4611 free_irq(adapter
->msix_info
[0].vec
, adapter
);
4613 free_irq(adapter
->pdev
->irq
, adapter
);
4614 quiesce_rx(adapter
);
4615 t4_sge_stop(adapter
);
4616 t4_free_sge_resources(adapter
);
4617 adapter
->flags
&= ~FULL_INIT_DONE
;
4621 * net_device operations
4623 static int cxgb_open(struct net_device
*dev
)
4626 struct port_info
*pi
= netdev_priv(dev
);
4627 struct adapter
*adapter
= pi
->adapter
;
4629 netif_carrier_off(dev
);
4631 if (!(adapter
->flags
& FULL_INIT_DONE
)) {
4632 err
= cxgb_up(adapter
);
4637 err
= link_start(dev
);
4639 netif_tx_start_all_queues(dev
);
4643 static int cxgb_close(struct net_device
*dev
)
4645 struct port_info
*pi
= netdev_priv(dev
);
4646 struct adapter
*adapter
= pi
->adapter
;
4648 netif_tx_stop_all_queues(dev
);
4649 netif_carrier_off(dev
);
4650 return t4_enable_vi(adapter
, adapter
->fn
, pi
->viid
, false, false);
4653 /* Return an error number if the indicated filter isn't writable ...
4655 static int writable_filter(struct filter_entry
*f
)
4665 /* Delete the filter at the specified index (if valid). The checks for all
4666 * the common problems with doing this like the filter being locked, currently
4667 * pending in another operation, etc.
4669 static int delete_filter(struct adapter
*adapter
, unsigned int fidx
)
4671 struct filter_entry
*f
;
4674 if (fidx
>= adapter
->tids
.nftids
+ adapter
->tids
.nsftids
)
4677 f
= &adapter
->tids
.ftid_tab
[fidx
];
4678 ret
= writable_filter(f
);
4682 return del_filter_wr(adapter
, fidx
);
4687 int cxgb4_create_server_filter(const struct net_device
*dev
, unsigned int stid
,
4688 __be32 sip
, __be16 sport
, __be16 vlan
,
4689 unsigned int queue
, unsigned char port
, unsigned char mask
)
4692 struct filter_entry
*f
;
4693 struct adapter
*adap
;
4697 adap
= netdev2adap(dev
);
4699 /* Adjust stid to correct filter index */
4700 stid
-= adap
->tids
.sftid_base
;
4701 stid
+= adap
->tids
.nftids
;
4703 /* Check to make sure the filter requested is writable ...
4705 f
= &adap
->tids
.ftid_tab
[stid
];
4706 ret
= writable_filter(f
);
4710 /* Clear out any old resources being used by the filter before
4711 * we start constructing the new filter.
4714 clear_filter(adap
, f
);
4716 /* Clear out filter specifications */
4717 memset(&f
->fs
, 0, sizeof(struct ch_filter_specification
));
4718 f
->fs
.val
.lport
= cpu_to_be16(sport
);
4719 f
->fs
.mask
.lport
= ~0;
4721 if ((val
[0] | val
[1] | val
[2] | val
[3]) != 0) {
4722 for (i
= 0; i
< 4; i
++) {
4723 f
->fs
.val
.lip
[i
] = val
[i
];
4724 f
->fs
.mask
.lip
[i
] = ~0;
4726 if (adap
->params
.tp
.vlan_pri_map
& F_PORT
) {
4727 f
->fs
.val
.iport
= port
;
4728 f
->fs
.mask
.iport
= mask
;
4732 if (adap
->params
.tp
.vlan_pri_map
& F_PROTOCOL
) {
4733 f
->fs
.val
.proto
= IPPROTO_TCP
;
4734 f
->fs
.mask
.proto
= ~0;
4739 /* Mark filter as locked */
4743 ret
= set_filter_wr(adap
, stid
);
4745 clear_filter(adap
, f
);
4751 EXPORT_SYMBOL(cxgb4_create_server_filter
);
4753 int cxgb4_remove_server_filter(const struct net_device
*dev
, unsigned int stid
,
4754 unsigned int queue
, bool ipv6
)
4757 struct filter_entry
*f
;
4758 struct adapter
*adap
;
4760 adap
= netdev2adap(dev
);
4762 /* Adjust stid to correct filter index */
4763 stid
-= adap
->tids
.sftid_base
;
4764 stid
+= adap
->tids
.nftids
;
4766 f
= &adap
->tids
.ftid_tab
[stid
];
4767 /* Unlock the filter */
4770 ret
= delete_filter(adap
, stid
);
4776 EXPORT_SYMBOL(cxgb4_remove_server_filter
);
4778 static struct rtnl_link_stats64
*cxgb_get_stats(struct net_device
*dev
,
4779 struct rtnl_link_stats64
*ns
)
4781 struct port_stats stats
;
4782 struct port_info
*p
= netdev_priv(dev
);
4783 struct adapter
*adapter
= p
->adapter
;
4785 /* Block retrieving statistics during EEH error
4786 * recovery. Otherwise, the recovery might fail
4787 * and the PCI device will be removed permanently
4789 spin_lock(&adapter
->stats_lock
);
4790 if (!netif_device_present(dev
)) {
4791 spin_unlock(&adapter
->stats_lock
);
4794 t4_get_port_stats(adapter
, p
->tx_chan
, &stats
);
4795 spin_unlock(&adapter
->stats_lock
);
4797 ns
->tx_bytes
= stats
.tx_octets
;
4798 ns
->tx_packets
= stats
.tx_frames
;
4799 ns
->rx_bytes
= stats
.rx_octets
;
4800 ns
->rx_packets
= stats
.rx_frames
;
4801 ns
->multicast
= stats
.rx_mcast_frames
;
4803 /* detailed rx_errors */
4804 ns
->rx_length_errors
= stats
.rx_jabber
+ stats
.rx_too_long
+
4806 ns
->rx_over_errors
= 0;
4807 ns
->rx_crc_errors
= stats
.rx_fcs_err
;
4808 ns
->rx_frame_errors
= stats
.rx_symbol_err
;
4809 ns
->rx_fifo_errors
= stats
.rx_ovflow0
+ stats
.rx_ovflow1
+
4810 stats
.rx_ovflow2
+ stats
.rx_ovflow3
+
4811 stats
.rx_trunc0
+ stats
.rx_trunc1
+
4812 stats
.rx_trunc2
+ stats
.rx_trunc3
;
4813 ns
->rx_missed_errors
= 0;
4815 /* detailed tx_errors */
4816 ns
->tx_aborted_errors
= 0;
4817 ns
->tx_carrier_errors
= 0;
4818 ns
->tx_fifo_errors
= 0;
4819 ns
->tx_heartbeat_errors
= 0;
4820 ns
->tx_window_errors
= 0;
4822 ns
->tx_errors
= stats
.tx_error_frames
;
4823 ns
->rx_errors
= stats
.rx_symbol_err
+ stats
.rx_fcs_err
+
4824 ns
->rx_length_errors
+ stats
.rx_len_err
+ ns
->rx_fifo_errors
;
4828 static int cxgb_ioctl(struct net_device
*dev
, struct ifreq
*req
, int cmd
)
4831 int ret
= 0, prtad
, devad
;
4832 struct port_info
*pi
= netdev_priv(dev
);
4833 struct mii_ioctl_data
*data
= (struct mii_ioctl_data
*)&req
->ifr_data
;
4837 if (pi
->mdio_addr
< 0)
4839 data
->phy_id
= pi
->mdio_addr
;
4843 if (mdio_phy_id_is_c45(data
->phy_id
)) {
4844 prtad
= mdio_phy_id_prtad(data
->phy_id
);
4845 devad
= mdio_phy_id_devad(data
->phy_id
);
4846 } else if (data
->phy_id
< 32) {
4847 prtad
= data
->phy_id
;
4849 data
->reg_num
&= 0x1f;
4853 mbox
= pi
->adapter
->fn
;
4854 if (cmd
== SIOCGMIIREG
)
4855 ret
= t4_mdio_rd(pi
->adapter
, mbox
, prtad
, devad
,
4856 data
->reg_num
, &data
->val_out
);
4858 ret
= t4_mdio_wr(pi
->adapter
, mbox
, prtad
, devad
,
4859 data
->reg_num
, data
->val_in
);
4867 static void cxgb_set_rxmode(struct net_device
*dev
)
4869 /* unfortunately we can't return errors to the stack */
4870 set_rxmode(dev
, -1, false);
4873 static int cxgb_change_mtu(struct net_device
*dev
, int new_mtu
)
4876 struct port_info
*pi
= netdev_priv(dev
);
4878 if (new_mtu
< 81 || new_mtu
> MAX_MTU
) /* accommodate SACK */
4880 ret
= t4_set_rxmode(pi
->adapter
, pi
->adapter
->fn
, pi
->viid
, new_mtu
, -1,
4887 static int cxgb_set_mac_addr(struct net_device
*dev
, void *p
)
4890 struct sockaddr
*addr
= p
;
4891 struct port_info
*pi
= netdev_priv(dev
);
4893 if (!is_valid_ether_addr(addr
->sa_data
))
4894 return -EADDRNOTAVAIL
;
4896 ret
= t4_change_mac(pi
->adapter
, pi
->adapter
->fn
, pi
->viid
,
4897 pi
->xact_addr_filt
, addr
->sa_data
, true, true);
4901 memcpy(dev
->dev_addr
, addr
->sa_data
, dev
->addr_len
);
4902 pi
->xact_addr_filt
= ret
;
4906 #ifdef CONFIG_NET_POLL_CONTROLLER
4907 static void cxgb_netpoll(struct net_device
*dev
)
4909 struct port_info
*pi
= netdev_priv(dev
);
4910 struct adapter
*adap
= pi
->adapter
;
4912 if (adap
->flags
& USING_MSIX
) {
4914 struct sge_eth_rxq
*rx
= &adap
->sge
.ethrxq
[pi
->first_qset
];
4916 for (i
= pi
->nqsets
; i
; i
--, rx
++)
4917 t4_sge_intr_msix(0, &rx
->rspq
);
4919 t4_intr_handler(adap
)(0, adap
);
4923 static const struct net_device_ops cxgb4_netdev_ops
= {
4924 .ndo_open
= cxgb_open
,
4925 .ndo_stop
= cxgb_close
,
4926 .ndo_start_xmit
= t4_eth_xmit
,
4927 .ndo_select_queue
= cxgb_select_queue
,
4928 .ndo_get_stats64
= cxgb_get_stats
,
4929 .ndo_set_rx_mode
= cxgb_set_rxmode
,
4930 .ndo_set_mac_address
= cxgb_set_mac_addr
,
4931 .ndo_set_features
= cxgb_set_features
,
4932 .ndo_validate_addr
= eth_validate_addr
,
4933 .ndo_do_ioctl
= cxgb_ioctl
,
4934 .ndo_change_mtu
= cxgb_change_mtu
,
4935 #ifdef CONFIG_NET_POLL_CONTROLLER
4936 .ndo_poll_controller
= cxgb_netpoll
,
4940 void t4_fatal_err(struct adapter
*adap
)
4942 t4_set_reg_field(adap
, SGE_CONTROL
, GLOBALENABLE
, 0);
4943 t4_intr_disable(adap
);
4944 dev_alert(adap
->pdev_dev
, "encountered fatal error, adapter stopped\n");
4947 /* Return the specified PCI-E Configuration Space register from our Physical
4948 * Function. We try first via a Firmware LDST Command since we prefer to let
4949 * the firmware own all of these registers, but if that fails we go for it
4950 * directly ourselves.
4952 static u32
t4_read_pcie_cfg4(struct adapter
*adap
, int reg
)
4954 struct fw_ldst_cmd ldst_cmd
;
4958 /* Construct and send the Firmware LDST Command to retrieve the
4959 * specified PCI-E Configuration Space register.
4961 memset(&ldst_cmd
, 0, sizeof(ldst_cmd
));
4962 ldst_cmd
.op_to_addrspace
=
4963 htonl(FW_CMD_OP(FW_LDST_CMD
) |
4966 FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_FUNC_PCIE
));
4967 ldst_cmd
.cycles_to_len16
= htonl(FW_LEN16(ldst_cmd
));
4968 ldst_cmd
.u
.pcie
.select_naccess
= FW_LDST_CMD_NACCESS(1);
4969 ldst_cmd
.u
.pcie
.ctrl_to_fn
=
4970 (FW_LDST_CMD_LC
| FW_LDST_CMD_FN(adap
->fn
));
4971 ldst_cmd
.u
.pcie
.r
= reg
;
4972 ret
= t4_wr_mbox(adap
, adap
->mbox
, &ldst_cmd
, sizeof(ldst_cmd
),
4975 /* If the LDST Command suucceeded, exctract the returned register
4976 * value. Otherwise read it directly ourself.
4979 val
= ntohl(ldst_cmd
.u
.pcie
.data
[0]);
4981 t4_hw_pci_read_cfg4(adap
, reg
, &val
);
4986 static void setup_memwin(struct adapter
*adap
)
4988 u32 mem_win0_base
, mem_win1_base
, mem_win2_base
, mem_win2_aperture
;
4990 if (is_t4(adap
->params
.chip
)) {
4993 /* Truncation intentional: we only read the bottom 32-bits of
4994 * the 64-bit BAR0/BAR1 ... We use the hardware backdoor
4995 * mechanism to read BAR0 instead of using
4996 * pci_resource_start() because we could be operating from
4997 * within a Virtual Machine which is trapping our accesses to
4998 * our Configuration Space and we need to set up the PCI-E
4999 * Memory Window decoders with the actual addresses which will
5000 * be coming across the PCI-E link.
5002 bar0
= t4_read_pcie_cfg4(adap
, PCI_BASE_ADDRESS_0
);
5003 bar0
&= PCI_BASE_ADDRESS_MEM_MASK
;
5004 adap
->t4_bar0
= bar0
;
5006 mem_win0_base
= bar0
+ MEMWIN0_BASE
;
5007 mem_win1_base
= bar0
+ MEMWIN1_BASE
;
5008 mem_win2_base
= bar0
+ MEMWIN2_BASE
;
5009 mem_win2_aperture
= MEMWIN2_APERTURE
;
5011 /* For T5, only relative offset inside the PCIe BAR is passed */
5012 mem_win0_base
= MEMWIN0_BASE
;
5013 mem_win1_base
= MEMWIN1_BASE
;
5014 mem_win2_base
= MEMWIN2_BASE_T5
;
5015 mem_win2_aperture
= MEMWIN2_APERTURE_T5
;
5017 t4_write_reg(adap
, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN
, 0),
5018 mem_win0_base
| BIR(0) |
5019 WINDOW(ilog2(MEMWIN0_APERTURE
) - 10));
5020 t4_write_reg(adap
, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN
, 1),
5021 mem_win1_base
| BIR(0) |
5022 WINDOW(ilog2(MEMWIN1_APERTURE
) - 10));
5023 t4_write_reg(adap
, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN
, 2),
5024 mem_win2_base
| BIR(0) |
5025 WINDOW(ilog2(mem_win2_aperture
) - 10));
5026 t4_read_reg(adap
, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN
, 2));
5029 static void setup_memwin_rdma(struct adapter
*adap
)
5031 if (adap
->vres
.ocq
.size
) {
5035 start
= t4_read_pcie_cfg4(adap
, PCI_BASE_ADDRESS_2
);
5036 start
&= PCI_BASE_ADDRESS_MEM_MASK
;
5037 start
+= OCQ_WIN_OFFSET(adap
->pdev
, &adap
->vres
);
5038 sz_kb
= roundup_pow_of_two(adap
->vres
.ocq
.size
) >> 10;
5040 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN
, 3),
5041 start
| BIR(1) | WINDOW(ilog2(sz_kb
)));
5043 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET
, 3),
5044 adap
->vres
.ocq
.start
);
5046 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET
, 3));
5050 static int adap_init1(struct adapter
*adap
, struct fw_caps_config_cmd
*c
)
5055 /* get device capabilities */
5056 memset(c
, 0, sizeof(*c
));
5057 c
->op_to_write
= htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD
) |
5058 FW_CMD_REQUEST
| FW_CMD_READ
);
5059 c
->cfvalid_to_len16
= htonl(FW_LEN16(*c
));
5060 ret
= t4_wr_mbox(adap
, adap
->fn
, c
, sizeof(*c
), c
);
5064 /* select capabilities we'll be using */
5065 if (c
->niccaps
& htons(FW_CAPS_CONFIG_NIC_VM
)) {
5067 c
->niccaps
^= htons(FW_CAPS_CONFIG_NIC_VM
);
5069 c
->niccaps
= htons(FW_CAPS_CONFIG_NIC_VM
);
5070 } else if (vf_acls
) {
5071 dev_err(adap
->pdev_dev
, "virtualization ACLs not supported");
5074 c
->op_to_write
= htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD
) |
5075 FW_CMD_REQUEST
| FW_CMD_WRITE
);
5076 ret
= t4_wr_mbox(adap
, adap
->fn
, c
, sizeof(*c
), NULL
);
5080 ret
= t4_config_glbl_rss(adap
, adap
->fn
,
5081 FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL
,
5082 FW_RSS_GLB_CONFIG_CMD_TNLMAPEN
|
5083 FW_RSS_GLB_CONFIG_CMD_TNLALLLKP
);
5087 ret
= t4_cfg_pfvf(adap
, adap
->fn
, adap
->fn
, 0, MAX_EGRQ
, 64, MAX_INGQ
,
5088 0, 0, 4, 0xf, 0xf, 16, FW_CMD_CAP_PF
, FW_CMD_CAP_PF
);
5094 /* tweak some settings */
5095 t4_write_reg(adap
, TP_SHIFT_CNT
, 0x64f8849);
5096 t4_write_reg(adap
, ULP_RX_TDDP_PSZ
, HPZ0(PAGE_SHIFT
- 12));
5097 t4_write_reg(adap
, TP_PIO_ADDR
, TP_INGRESS_CONFIG
);
5098 v
= t4_read_reg(adap
, TP_PIO_DATA
);
5099 t4_write_reg(adap
, TP_PIO_DATA
, v
& ~CSUM_HAS_PSEUDO_HDR
);
5101 /* first 4 Tx modulation queues point to consecutive Tx channels */
5102 adap
->params
.tp
.tx_modq_map
= 0xE4;
5103 t4_write_reg(adap
, A_TP_TX_MOD_QUEUE_REQ_MAP
,
5104 V_TX_MOD_QUEUE_REQ_MAP(adap
->params
.tp
.tx_modq_map
));
5106 /* associate each Tx modulation queue with consecutive Tx channels */
5108 t4_write_indirect(adap
, TP_PIO_ADDR
, TP_PIO_DATA
,
5109 &v
, 1, A_TP_TX_SCHED_HDR
);
5110 t4_write_indirect(adap
, TP_PIO_ADDR
, TP_PIO_DATA
,
5111 &v
, 1, A_TP_TX_SCHED_FIFO
);
5112 t4_write_indirect(adap
, TP_PIO_ADDR
, TP_PIO_DATA
,
5113 &v
, 1, A_TP_TX_SCHED_PCMD
);
5115 #define T4_TX_MODQ_10G_WEIGHT_DEFAULT 16 /* in KB units */
5116 if (is_offload(adap
)) {
5117 t4_write_reg(adap
, A_TP_TX_MOD_QUEUE_WEIGHT0
,
5118 V_TX_MODQ_WEIGHT0(T4_TX_MODQ_10G_WEIGHT_DEFAULT
) |
5119 V_TX_MODQ_WEIGHT1(T4_TX_MODQ_10G_WEIGHT_DEFAULT
) |
5120 V_TX_MODQ_WEIGHT2(T4_TX_MODQ_10G_WEIGHT_DEFAULT
) |
5121 V_TX_MODQ_WEIGHT3(T4_TX_MODQ_10G_WEIGHT_DEFAULT
));
5122 t4_write_reg(adap
, A_TP_TX_MOD_CHANNEL_WEIGHT
,
5123 V_TX_MODQ_WEIGHT0(T4_TX_MODQ_10G_WEIGHT_DEFAULT
) |
5124 V_TX_MODQ_WEIGHT1(T4_TX_MODQ_10G_WEIGHT_DEFAULT
) |
5125 V_TX_MODQ_WEIGHT2(T4_TX_MODQ_10G_WEIGHT_DEFAULT
) |
5126 V_TX_MODQ_WEIGHT3(T4_TX_MODQ_10G_WEIGHT_DEFAULT
));
5129 /* get basic stuff going */
5130 return t4_early_init(adap
, adap
->fn
);
5134 * Max # of ATIDs. The absolute HW max is 16K but we keep it lower.
5136 #define MAX_ATIDS 8192U
5139 * Phase 0 of initialization: contact FW, obtain config, perform basic init.
5141 * If the firmware we're dealing with has Configuration File support, then
5142 * we use that to perform all configuration
5146 * Tweak configuration based on module parameters, etc. Most of these have
5147 * defaults assigned to them by Firmware Configuration Files (if we're using
5148 * them) but need to be explicitly set if we're using hard-coded
5149 * initialization. But even in the case of using Firmware Configuration
5150 * Files, we'd like to expose the ability to change these via module
5151 * parameters so these are essentially common tweaks/settings for
5152 * Configuration Files and hard-coded initialization ...
5154 static int adap_init0_tweaks(struct adapter
*adapter
)
5157 * Fix up various Host-Dependent Parameters like Page Size, Cache
5158 * Line Size, etc. The firmware default is for a 4KB Page Size and
5159 * 64B Cache Line Size ...
5161 t4_fixup_host_params(adapter
, PAGE_SIZE
, L1_CACHE_BYTES
);
5164 * Process module parameters which affect early initialization.
5166 if (rx_dma_offset
!= 2 && rx_dma_offset
!= 0) {
5167 dev_err(&adapter
->pdev
->dev
,
5168 "Ignoring illegal rx_dma_offset=%d, using 2\n",
5172 t4_set_reg_field(adapter
, SGE_CONTROL
,
5174 PKTSHIFT(rx_dma_offset
));
5177 * Don't include the "IP Pseudo Header" in CPL_RX_PKT checksums: Linux
5178 * adds the pseudo header itself.
5180 t4_tp_wr_bits_indirect(adapter
, TP_INGRESS_CONFIG
,
5181 CSUM_HAS_PSEUDO_HDR
, 0);
5187 * Attempt to initialize the adapter via a Firmware Configuration File.
5189 static int adap_init0_config(struct adapter
*adapter
, int reset
)
5191 struct fw_caps_config_cmd caps_cmd
;
5192 const struct firmware
*cf
;
5193 unsigned long mtype
= 0, maddr
= 0;
5194 u32 finiver
, finicsum
, cfcsum
;
5196 int config_issued
= 0;
5197 char *fw_config_file
, fw_config_file_path
[256];
5198 char *config_name
= NULL
;
5201 * Reset device if necessary.
5204 ret
= t4_fw_reset(adapter
, adapter
->mbox
,
5205 PIORSTMODE
| PIORST
);
5211 * If we have a T4 configuration file under /lib/firmware/cxgb4/,
5212 * then use that. Otherwise, use the configuration file stored
5213 * in the adapter flash ...
5215 switch (CHELSIO_CHIP_VERSION(adapter
->params
.chip
)) {
5217 fw_config_file
= FW4_CFNAME
;
5220 fw_config_file
= FW5_CFNAME
;
5223 dev_err(adapter
->pdev_dev
, "Device %d is not supported\n",
5224 adapter
->pdev
->device
);
5229 ret
= request_firmware(&cf
, fw_config_file
, adapter
->pdev_dev
);
5231 config_name
= "On FLASH";
5232 mtype
= FW_MEMTYPE_CF_FLASH
;
5233 maddr
= t4_flash_cfg_addr(adapter
);
5235 u32 params
[7], val
[7];
5237 sprintf(fw_config_file_path
,
5238 "/lib/firmware/%s", fw_config_file
);
5239 config_name
= fw_config_file_path
;
5241 if (cf
->size
>= FLASH_CFG_MAX_SIZE
)
5244 params
[0] = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV
) |
5245 FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_CF
));
5246 ret
= t4_query_params(adapter
, adapter
->mbox
,
5247 adapter
->fn
, 0, 1, params
, val
);
5250 * For t4_memory_rw() below addresses and
5251 * sizes have to be in terms of multiples of 4
5252 * bytes. So, if the Configuration File isn't
5253 * a multiple of 4 bytes in length we'll have
5254 * to write that out separately since we can't
5255 * guarantee that the bytes following the
5256 * residual byte in the buffer returned by
5257 * request_firmware() are zeroed out ...
5259 size_t resid
= cf
->size
& 0x3;
5260 size_t size
= cf
->size
& ~0x3;
5261 __be32
*data
= (__be32
*)cf
->data
;
5263 mtype
= FW_PARAMS_PARAM_Y_GET(val
[0]);
5264 maddr
= FW_PARAMS_PARAM_Z_GET(val
[0]) << 16;
5266 spin_lock(&adapter
->win0_lock
);
5267 ret
= t4_memory_rw(adapter
, 0, mtype
, maddr
,
5268 size
, data
, T4_MEMORY_WRITE
);
5269 if (ret
== 0 && resid
!= 0) {
5276 last
.word
= data
[size
>> 2];
5277 for (i
= resid
; i
< 4; i
++)
5279 ret
= t4_memory_rw(adapter
, 0, mtype
,
5284 spin_unlock(&adapter
->win0_lock
);
5288 release_firmware(cf
);
5294 * Issue a Capability Configuration command to the firmware to get it
5295 * to parse the Configuration File. We don't use t4_fw_config_file()
5296 * because we want the ability to modify various features after we've
5297 * processed the configuration file ...
5299 memset(&caps_cmd
, 0, sizeof(caps_cmd
));
5300 caps_cmd
.op_to_write
=
5301 htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD
) |
5304 caps_cmd
.cfvalid_to_len16
=
5305 htonl(FW_CAPS_CONFIG_CMD_CFVALID
|
5306 FW_CAPS_CONFIG_CMD_MEMTYPE_CF(mtype
) |
5307 FW_CAPS_CONFIG_CMD_MEMADDR64K_CF(maddr
>> 16) |
5308 FW_LEN16(caps_cmd
));
5309 ret
= t4_wr_mbox(adapter
, adapter
->mbox
, &caps_cmd
, sizeof(caps_cmd
),
5312 /* If the CAPS_CONFIG failed with an ENOENT (for a Firmware
5313 * Configuration File in FLASH), our last gasp effort is to use the
5314 * Firmware Configuration File which is embedded in the firmware. A
5315 * very few early versions of the firmware didn't have one embedded
5316 * but we can ignore those.
5318 if (ret
== -ENOENT
) {
5319 memset(&caps_cmd
, 0, sizeof(caps_cmd
));
5320 caps_cmd
.op_to_write
=
5321 htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD
) |
5324 caps_cmd
.cfvalid_to_len16
= htonl(FW_LEN16(caps_cmd
));
5325 ret
= t4_wr_mbox(adapter
, adapter
->mbox
, &caps_cmd
,
5326 sizeof(caps_cmd
), &caps_cmd
);
5327 config_name
= "Firmware Default";
5334 finiver
= ntohl(caps_cmd
.finiver
);
5335 finicsum
= ntohl(caps_cmd
.finicsum
);
5336 cfcsum
= ntohl(caps_cmd
.cfcsum
);
5337 if (finicsum
!= cfcsum
)
5338 dev_warn(adapter
->pdev_dev
, "Configuration File checksum "\
5339 "mismatch: [fini] csum=%#x, computed csum=%#x\n",
5343 * And now tell the firmware to use the configuration we just loaded.
5345 caps_cmd
.op_to_write
=
5346 htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD
) |
5349 caps_cmd
.cfvalid_to_len16
= htonl(FW_LEN16(caps_cmd
));
5350 ret
= t4_wr_mbox(adapter
, adapter
->mbox
, &caps_cmd
, sizeof(caps_cmd
),
5356 * Tweak configuration based on system architecture, module
5359 ret
= adap_init0_tweaks(adapter
);
5364 * And finally tell the firmware to initialize itself using the
5365 * parameters from the Configuration File.
5367 ret
= t4_fw_initialize(adapter
, adapter
->mbox
);
5372 * Return successfully and note that we're operating with parameters
5373 * not supplied by the driver, rather than from hard-wired
5374 * initialization constants burried in the driver.
5376 adapter
->flags
|= USING_SOFT_PARAMS
;
5377 dev_info(adapter
->pdev_dev
, "Successfully configured using Firmware "\
5378 "Configuration File \"%s\", version %#x, computed checksum %#x\n",
5379 config_name
, finiver
, cfcsum
);
5383 * Something bad happened. Return the error ... (If the "error"
5384 * is that there's no Configuration File on the adapter we don't
5385 * want to issue a warning since this is fairly common.)
5388 if (config_issued
&& ret
!= -ENOENT
)
5389 dev_warn(adapter
->pdev_dev
, "\"%s\" configuration file error %d\n",
5395 * Attempt to initialize the adapter via hard-coded, driver supplied
5398 static int adap_init0_no_config(struct adapter
*adapter
, int reset
)
5400 struct sge
*s
= &adapter
->sge
;
5401 struct fw_caps_config_cmd caps_cmd
;
5406 * Reset device if necessary
5409 ret
= t4_fw_reset(adapter
, adapter
->mbox
,
5410 PIORSTMODE
| PIORST
);
5416 * Get device capabilities and select which we'll be using.
5418 memset(&caps_cmd
, 0, sizeof(caps_cmd
));
5419 caps_cmd
.op_to_write
= htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD
) |
5420 FW_CMD_REQUEST
| FW_CMD_READ
);
5421 caps_cmd
.cfvalid_to_len16
= htonl(FW_LEN16(caps_cmd
));
5422 ret
= t4_wr_mbox(adapter
, adapter
->mbox
, &caps_cmd
, sizeof(caps_cmd
),
5427 if (caps_cmd
.niccaps
& htons(FW_CAPS_CONFIG_NIC_VM
)) {
5429 caps_cmd
.niccaps
^= htons(FW_CAPS_CONFIG_NIC_VM
);
5431 caps_cmd
.niccaps
= htons(FW_CAPS_CONFIG_NIC_VM
);
5432 } else if (vf_acls
) {
5433 dev_err(adapter
->pdev_dev
, "virtualization ACLs not supported");
5436 caps_cmd
.op_to_write
= htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD
) |
5437 FW_CMD_REQUEST
| FW_CMD_WRITE
);
5438 ret
= t4_wr_mbox(adapter
, adapter
->mbox
, &caps_cmd
, sizeof(caps_cmd
),
5444 * Tweak configuration based on system architecture, module
5447 ret
= adap_init0_tweaks(adapter
);
5452 * Select RSS Global Mode we want to use. We use "Basic Virtual"
5453 * mode which maps each Virtual Interface to its own section of
5454 * the RSS Table and we turn on all map and hash enables ...
5456 adapter
->flags
|= RSS_TNLALLLOOKUP
;
5457 ret
= t4_config_glbl_rss(adapter
, adapter
->mbox
,
5458 FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL
,
5459 FW_RSS_GLB_CONFIG_CMD_TNLMAPEN
|
5460 FW_RSS_GLB_CONFIG_CMD_HASHTOEPLITZ
|
5461 ((adapter
->flags
& RSS_TNLALLLOOKUP
) ?
5462 FW_RSS_GLB_CONFIG_CMD_TNLALLLKP
: 0));
5467 * Set up our own fundamental resource provisioning ...
5469 ret
= t4_cfg_pfvf(adapter
, adapter
->mbox
, adapter
->fn
, 0,
5470 PFRES_NEQ
, PFRES_NETHCTRL
,
5471 PFRES_NIQFLINT
, PFRES_NIQ
,
5472 PFRES_TC
, PFRES_NVI
,
5473 FW_PFVF_CMD_CMASK_MASK
,
5474 pfvfres_pmask(adapter
, adapter
->fn
, 0),
5476 PFRES_R_CAPS
, PFRES_WX_CAPS
);
5481 * Perform low level SGE initialization. We need to do this before we
5482 * send the firmware the INITIALIZE command because that will cause
5483 * any other PF Drivers which are waiting for the Master
5484 * Initialization to proceed forward.
5486 for (i
= 0; i
< SGE_NTIMERS
- 1; i
++)
5487 s
->timer_val
[i
] = min(intr_holdoff
[i
], MAX_SGE_TIMERVAL
);
5488 s
->timer_val
[SGE_NTIMERS
- 1] = MAX_SGE_TIMERVAL
;
5489 s
->counter_val
[0] = 1;
5490 for (i
= 1; i
< SGE_NCOUNTERS
; i
++)
5491 s
->counter_val
[i
] = min(intr_cnt
[i
- 1],
5492 THRESHOLD_0_GET(THRESHOLD_0_MASK
));
5493 t4_sge_init(adapter
);
5495 #ifdef CONFIG_PCI_IOV
5497 * Provision resource limits for Virtual Functions. We currently
5498 * grant them all the same static resource limits except for the Port
5499 * Access Rights Mask which we're assigning based on the PF. All of
5500 * the static provisioning stuff for both the PF and VF really needs
5501 * to be managed in a persistent manner for each device which the
5502 * firmware controls.
5507 for (pf
= 0; pf
< ARRAY_SIZE(num_vf
); pf
++) {
5508 if (num_vf
[pf
] <= 0)
5511 /* VF numbering starts at 1! */
5512 for (vf
= 1; vf
<= num_vf
[pf
]; vf
++) {
5513 ret
= t4_cfg_pfvf(adapter
, adapter
->mbox
,
5515 VFRES_NEQ
, VFRES_NETHCTRL
,
5516 VFRES_NIQFLINT
, VFRES_NIQ
,
5517 VFRES_TC
, VFRES_NVI
,
5518 FW_PFVF_CMD_CMASK_MASK
,
5522 VFRES_R_CAPS
, VFRES_WX_CAPS
);
5524 dev_warn(adapter
->pdev_dev
,
5526 "provision pf/vf=%d/%d; "
5527 "err=%d\n", pf
, vf
, ret
);
5534 * Set up the default filter mode. Later we'll want to implement this
5535 * via a firmware command, etc. ... This needs to be done before the
5536 * firmare initialization command ... If the selected set of fields
5537 * isn't equal to the default value, we'll need to make sure that the
5538 * field selections will fit in the 36-bit budget.
5540 if (tp_vlan_pri_map
!= TP_VLAN_PRI_MAP_DEFAULT
) {
5543 for (j
= TP_VLAN_PRI_MAP_FIRST
; j
<= TP_VLAN_PRI_MAP_LAST
; j
++)
5544 switch (tp_vlan_pri_map
& (1 << j
)) {
5546 /* compressed filter field not enabled */
5566 case ETHERTYPE_MASK
:
5572 case MPSHITTYPE_MASK
:
5575 case FRAGMENTATION_MASK
:
5581 dev_err(adapter
->pdev_dev
,
5582 "tp_vlan_pri_map=%#x needs %d bits > 36;"\
5583 " using %#x\n", tp_vlan_pri_map
, bits
,
5584 TP_VLAN_PRI_MAP_DEFAULT
);
5585 tp_vlan_pri_map
= TP_VLAN_PRI_MAP_DEFAULT
;
5588 v
= tp_vlan_pri_map
;
5589 t4_write_indirect(adapter
, TP_PIO_ADDR
, TP_PIO_DATA
,
5590 &v
, 1, TP_VLAN_PRI_MAP
);
5593 * We need Five Tuple Lookup mode to be set in TP_GLOBAL_CONFIG order
5594 * to support any of the compressed filter fields above. Newer
5595 * versions of the firmware do this automatically but it doesn't hurt
5596 * to set it here. Meanwhile, we do _not_ need to set Lookup Every
5597 * Packet in TP_INGRESS_CONFIG to support matching non-TCP packets
5598 * since the firmware automatically turns this on and off when we have
5599 * a non-zero number of filters active (since it does have a
5600 * performance impact).
5602 if (tp_vlan_pri_map
)
5603 t4_set_reg_field(adapter
, TP_GLOBAL_CONFIG
,
5604 FIVETUPLELOOKUP_MASK
,
5605 FIVETUPLELOOKUP_MASK
);
5608 * Tweak some settings.
5610 t4_write_reg(adapter
, TP_SHIFT_CNT
, SYNSHIFTMAX(6) |
5611 RXTSHIFTMAXR1(4) | RXTSHIFTMAXR2(15) |
5612 PERSHIFTBACKOFFMAX(8) | PERSHIFTMAX(8) |
5613 KEEPALIVEMAXR1(4) | KEEPALIVEMAXR2(9));
5616 * Get basic stuff going by issuing the Firmware Initialize command.
5617 * Note that this _must_ be after all PFVF commands ...
5619 ret
= t4_fw_initialize(adapter
, adapter
->mbox
);
5624 * Return successfully!
5626 dev_info(adapter
->pdev_dev
, "Successfully configured using built-in "\
5627 "driver parameters\n");
5631 * Something bad happened. Return the error ...
5637 static struct fw_info fw_info_array
[] = {
5640 .fs_name
= FW4_CFNAME
,
5641 .fw_mod_name
= FW4_FNAME
,
5643 .chip
= FW_HDR_CHIP_T4
,
5644 .fw_ver
= __cpu_to_be32(FW_VERSION(T4
)),
5645 .intfver_nic
= FW_INTFVER(T4
, NIC
),
5646 .intfver_vnic
= FW_INTFVER(T4
, VNIC
),
5647 .intfver_ri
= FW_INTFVER(T4
, RI
),
5648 .intfver_iscsi
= FW_INTFVER(T4
, ISCSI
),
5649 .intfver_fcoe
= FW_INTFVER(T4
, FCOE
),
5653 .fs_name
= FW5_CFNAME
,
5654 .fw_mod_name
= FW5_FNAME
,
5656 .chip
= FW_HDR_CHIP_T5
,
5657 .fw_ver
= __cpu_to_be32(FW_VERSION(T5
)),
5658 .intfver_nic
= FW_INTFVER(T5
, NIC
),
5659 .intfver_vnic
= FW_INTFVER(T5
, VNIC
),
5660 .intfver_ri
= FW_INTFVER(T5
, RI
),
5661 .intfver_iscsi
= FW_INTFVER(T5
, ISCSI
),
5662 .intfver_fcoe
= FW_INTFVER(T5
, FCOE
),
5667 static struct fw_info
*find_fw_info(int chip
)
5671 for (i
= 0; i
< ARRAY_SIZE(fw_info_array
); i
++) {
5672 if (fw_info_array
[i
].chip
== chip
)
5673 return &fw_info_array
[i
];
5679 * Phase 0 of initialization: contact FW, obtain config, perform basic init.
5681 static int adap_init0(struct adapter
*adap
)
5685 enum dev_state state
;
5686 u32 params
[7], val
[7];
5687 struct fw_caps_config_cmd caps_cmd
;
5691 * Contact FW, advertising Master capability (and potentially forcing
5692 * ourselves as the Master PF if our module parameter force_init is
5695 ret
= t4_fw_hello(adap
, adap
->mbox
, adap
->fn
,
5696 force_init
? MASTER_MUST
: MASTER_MAY
,
5699 dev_err(adap
->pdev_dev
, "could not connect to FW, error %d\n",
5703 if (ret
== adap
->mbox
)
5704 adap
->flags
|= MASTER_PF
;
5705 if (force_init
&& state
== DEV_STATE_INIT
)
5706 state
= DEV_STATE_UNINIT
;
5709 * If we're the Master PF Driver and the device is uninitialized,
5710 * then let's consider upgrading the firmware ... (We always want
5711 * to check the firmware version number in order to A. get it for
5712 * later reporting and B. to warn if the currently loaded firmware
5713 * is excessively mismatched relative to the driver.)
5715 t4_get_fw_version(adap
, &adap
->params
.fw_vers
);
5716 t4_get_tp_version(adap
, &adap
->params
.tp_vers
);
5717 if ((adap
->flags
& MASTER_PF
) && state
!= DEV_STATE_INIT
) {
5718 struct fw_info
*fw_info
;
5719 struct fw_hdr
*card_fw
;
5720 const struct firmware
*fw
;
5721 const u8
*fw_data
= NULL
;
5722 unsigned int fw_size
= 0;
5724 /* This is the firmware whose headers the driver was compiled
5727 fw_info
= find_fw_info(CHELSIO_CHIP_VERSION(adap
->params
.chip
));
5728 if (fw_info
== NULL
) {
5729 dev_err(adap
->pdev_dev
,
5730 "unable to get firmware info for chip %d.\n",
5731 CHELSIO_CHIP_VERSION(adap
->params
.chip
));
5735 /* allocate memory to read the header of the firmware on the
5738 card_fw
= t4_alloc_mem(sizeof(*card_fw
));
5740 /* Get FW from from /lib/firmware/ */
5741 ret
= request_firmware(&fw
, fw_info
->fw_mod_name
,
5744 dev_err(adap
->pdev_dev
,
5745 "unable to load firmware image %s, error %d\n",
5746 fw_info
->fw_mod_name
, ret
);
5752 /* upgrade FW logic */
5753 ret
= t4_prep_fw(adap
, fw_info
, fw_data
, fw_size
, card_fw
,
5758 release_firmware(fw
);
5759 t4_free_mem(card_fw
);
5766 * Grab VPD parameters. This should be done after we establish a
5767 * connection to the firmware since some of the VPD parameters
5768 * (notably the Core Clock frequency) are retrieved via requests to
5769 * the firmware. On the other hand, we need these fairly early on
5770 * so we do this right after getting ahold of the firmware.
5772 ret
= get_vpd_params(adap
, &adap
->params
.vpd
);
5777 * Find out what ports are available to us. Note that we need to do
5778 * this before calling adap_init0_no_config() since it needs nports
5782 FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV
) |
5783 FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_PORTVEC
);
5784 ret
= t4_query_params(adap
, adap
->mbox
, adap
->fn
, 0, 1, &v
, &port_vec
);
5788 adap
->params
.nports
= hweight32(port_vec
);
5789 adap
->params
.portvec
= port_vec
;
5792 * If the firmware is initialized already (and we're not forcing a
5793 * master initialization), note that we're living with existing
5794 * adapter parameters. Otherwise, it's time to try initializing the
5797 if (state
== DEV_STATE_INIT
) {
5798 dev_info(adap
->pdev_dev
, "Coming up as %s: "\
5799 "Adapter already initialized\n",
5800 adap
->flags
& MASTER_PF
? "MASTER" : "SLAVE");
5801 adap
->flags
|= USING_SOFT_PARAMS
;
5803 dev_info(adap
->pdev_dev
, "Coming up as MASTER: "\
5804 "Initializing adapter\n");
5807 * If the firmware doesn't support Configuration
5808 * Files warn user and exit,
5811 dev_warn(adap
->pdev_dev
, "Firmware doesn't support "
5812 "configuration file.\n");
5814 ret
= adap_init0_no_config(adap
, reset
);
5817 * Find out whether we're dealing with a version of
5818 * the firmware which has configuration file support.
5820 params
[0] = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV
) |
5821 FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_CF
));
5822 ret
= t4_query_params(adap
, adap
->mbox
, adap
->fn
, 0, 1,
5826 * If the firmware doesn't support Configuration
5827 * Files, use the old Driver-based, hard-wired
5828 * initialization. Otherwise, try using the
5829 * Configuration File support and fall back to the
5830 * Driver-based initialization if there's no
5831 * Configuration File found.
5834 ret
= adap_init0_no_config(adap
, reset
);
5837 * The firmware provides us with a memory
5838 * buffer where we can load a Configuration
5839 * File from the host if we want to override
5840 * the Configuration File in flash.
5843 ret
= adap_init0_config(adap
, reset
);
5844 if (ret
== -ENOENT
) {
5845 dev_info(adap
->pdev_dev
,
5846 "No Configuration File present "
5847 "on adapter. Using hard-wired "
5848 "configuration parameters.\n");
5849 ret
= adap_init0_no_config(adap
, reset
);
5854 dev_err(adap
->pdev_dev
,
5855 "could not initialize adapter, error %d\n",
5862 * If we're living with non-hard-coded parameters (either from a
5863 * Firmware Configuration File or values programmed by a different PF
5864 * Driver), give the SGE code a chance to pull in anything that it
5865 * needs ... Note that this must be called after we retrieve our VPD
5866 * parameters in order to know how to convert core ticks to seconds.
5868 if (adap
->flags
& USING_SOFT_PARAMS
) {
5869 ret
= t4_sge_init(adap
);
5874 if (is_bypass_device(adap
->pdev
->device
))
5875 adap
->params
.bypass
= 1;
5878 * Grab some of our basic fundamental operating parameters.
5880 #define FW_PARAM_DEV(param) \
5881 (FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | \
5882 FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_##param))
5884 #define FW_PARAM_PFVF(param) \
5885 FW_PARAMS_MNEM(FW_PARAMS_MNEM_PFVF) | \
5886 FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_PFVF_##param)| \
5887 FW_PARAMS_PARAM_Y(0) | \
5888 FW_PARAMS_PARAM_Z(0)
5890 params
[0] = FW_PARAM_PFVF(EQ_START
);
5891 params
[1] = FW_PARAM_PFVF(L2T_START
);
5892 params
[2] = FW_PARAM_PFVF(L2T_END
);
5893 params
[3] = FW_PARAM_PFVF(FILTER_START
);
5894 params
[4] = FW_PARAM_PFVF(FILTER_END
);
5895 params
[5] = FW_PARAM_PFVF(IQFLINT_START
);
5896 ret
= t4_query_params(adap
, adap
->mbox
, adap
->fn
, 0, 6, params
, val
);
5899 adap
->sge
.egr_start
= val
[0];
5900 adap
->l2t_start
= val
[1];
5901 adap
->l2t_end
= val
[2];
5902 adap
->tids
.ftid_base
= val
[3];
5903 adap
->tids
.nftids
= val
[4] - val
[3] + 1;
5904 adap
->sge
.ingr_start
= val
[5];
5906 /* query params related to active filter region */
5907 params
[0] = FW_PARAM_PFVF(ACTIVE_FILTER_START
);
5908 params
[1] = FW_PARAM_PFVF(ACTIVE_FILTER_END
);
5909 ret
= t4_query_params(adap
, adap
->mbox
, adap
->fn
, 0, 2, params
, val
);
5910 /* If Active filter size is set we enable establishing
5911 * offload connection through firmware work request
5913 if ((val
[0] != val
[1]) && (ret
>= 0)) {
5914 adap
->flags
|= FW_OFLD_CONN
;
5915 adap
->tids
.aftid_base
= val
[0];
5916 adap
->tids
.aftid_end
= val
[1];
5919 /* If we're running on newer firmware, let it know that we're
5920 * prepared to deal with encapsulated CPL messages. Older
5921 * firmware won't understand this and we'll just get
5922 * unencapsulated messages ...
5924 params
[0] = FW_PARAM_PFVF(CPLFW4MSG_ENCAP
);
5926 (void) t4_set_params(adap
, adap
->mbox
, adap
->fn
, 0, 1, params
, val
);
5929 * Find out whether we're allowed to use the T5+ ULPTX MEMWRITE DSGL
5930 * capability. Earlier versions of the firmware didn't have the
5931 * ULPTX_MEMWRITE_DSGL so we'll interpret a query failure as no
5932 * permission to use ULPTX MEMWRITE DSGL.
5934 if (is_t4(adap
->params
.chip
)) {
5935 adap
->params
.ulptx_memwrite_dsgl
= false;
5937 params
[0] = FW_PARAM_DEV(ULPTX_MEMWRITE_DSGL
);
5938 ret
= t4_query_params(adap
, adap
->mbox
, adap
->fn
, 0,
5940 adap
->params
.ulptx_memwrite_dsgl
= (ret
== 0 && val
[0] != 0);
5944 * Get device capabilities so we can determine what resources we need
5947 memset(&caps_cmd
, 0, sizeof(caps_cmd
));
5948 caps_cmd
.op_to_write
= htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD
) |
5949 FW_CMD_REQUEST
| FW_CMD_READ
);
5950 caps_cmd
.cfvalid_to_len16
= htonl(FW_LEN16(caps_cmd
));
5951 ret
= t4_wr_mbox(adap
, adap
->mbox
, &caps_cmd
, sizeof(caps_cmd
),
5956 if (caps_cmd
.ofldcaps
) {
5957 /* query offload-related parameters */
5958 params
[0] = FW_PARAM_DEV(NTID
);
5959 params
[1] = FW_PARAM_PFVF(SERVER_START
);
5960 params
[2] = FW_PARAM_PFVF(SERVER_END
);
5961 params
[3] = FW_PARAM_PFVF(TDDP_START
);
5962 params
[4] = FW_PARAM_PFVF(TDDP_END
);
5963 params
[5] = FW_PARAM_DEV(FLOWC_BUFFIFO_SZ
);
5964 ret
= t4_query_params(adap
, adap
->mbox
, adap
->fn
, 0, 6,
5968 adap
->tids
.ntids
= val
[0];
5969 adap
->tids
.natids
= min(adap
->tids
.ntids
/ 2, MAX_ATIDS
);
5970 adap
->tids
.stid_base
= val
[1];
5971 adap
->tids
.nstids
= val
[2] - val
[1] + 1;
5973 * Setup server filter region. Divide the availble filter
5974 * region into two parts. Regular filters get 1/3rd and server
5975 * filters get 2/3rd part. This is only enabled if workarond
5977 * 1. For regular filters.
5978 * 2. Server filter: This are special filters which are used
5979 * to redirect SYN packets to offload queue.
5981 if (adap
->flags
& FW_OFLD_CONN
&& !is_bypass(adap
)) {
5982 adap
->tids
.sftid_base
= adap
->tids
.ftid_base
+
5983 DIV_ROUND_UP(adap
->tids
.nftids
, 3);
5984 adap
->tids
.nsftids
= adap
->tids
.nftids
-
5985 DIV_ROUND_UP(adap
->tids
.nftids
, 3);
5986 adap
->tids
.nftids
= adap
->tids
.sftid_base
-
5987 adap
->tids
.ftid_base
;
5989 adap
->vres
.ddp
.start
= val
[3];
5990 adap
->vres
.ddp
.size
= val
[4] - val
[3] + 1;
5991 adap
->params
.ofldq_wr_cred
= val
[5];
5993 adap
->params
.offload
= 1;
5995 if (caps_cmd
.rdmacaps
) {
5996 params
[0] = FW_PARAM_PFVF(STAG_START
);
5997 params
[1] = FW_PARAM_PFVF(STAG_END
);
5998 params
[2] = FW_PARAM_PFVF(RQ_START
);
5999 params
[3] = FW_PARAM_PFVF(RQ_END
);
6000 params
[4] = FW_PARAM_PFVF(PBL_START
);
6001 params
[5] = FW_PARAM_PFVF(PBL_END
);
6002 ret
= t4_query_params(adap
, adap
->mbox
, adap
->fn
, 0, 6,
6006 adap
->vres
.stag
.start
= val
[0];
6007 adap
->vres
.stag
.size
= val
[1] - val
[0] + 1;
6008 adap
->vres
.rq
.start
= val
[2];
6009 adap
->vres
.rq
.size
= val
[3] - val
[2] + 1;
6010 adap
->vres
.pbl
.start
= val
[4];
6011 adap
->vres
.pbl
.size
= val
[5] - val
[4] + 1;
6013 params
[0] = FW_PARAM_PFVF(SQRQ_START
);
6014 params
[1] = FW_PARAM_PFVF(SQRQ_END
);
6015 params
[2] = FW_PARAM_PFVF(CQ_START
);
6016 params
[3] = FW_PARAM_PFVF(CQ_END
);
6017 params
[4] = FW_PARAM_PFVF(OCQ_START
);
6018 params
[5] = FW_PARAM_PFVF(OCQ_END
);
6019 ret
= t4_query_params(adap
, adap
->mbox
, adap
->fn
, 0, 6, params
,
6023 adap
->vres
.qp
.start
= val
[0];
6024 adap
->vres
.qp
.size
= val
[1] - val
[0] + 1;
6025 adap
->vres
.cq
.start
= val
[2];
6026 adap
->vres
.cq
.size
= val
[3] - val
[2] + 1;
6027 adap
->vres
.ocq
.start
= val
[4];
6028 adap
->vres
.ocq
.size
= val
[5] - val
[4] + 1;
6030 params
[0] = FW_PARAM_DEV(MAXORDIRD_QP
);
6031 params
[1] = FW_PARAM_DEV(MAXIRD_ADAPTER
);
6032 ret
= t4_query_params(adap
, adap
->mbox
, adap
->fn
, 0, 2, params
,
6035 adap
->params
.max_ordird_qp
= 8;
6036 adap
->params
.max_ird_adapter
= 32 * adap
->tids
.ntids
;
6039 adap
->params
.max_ordird_qp
= val
[0];
6040 adap
->params
.max_ird_adapter
= val
[1];
6042 dev_info(adap
->pdev_dev
,
6043 "max_ordird_qp %d max_ird_adapter %d\n",
6044 adap
->params
.max_ordird_qp
,
6045 adap
->params
.max_ird_adapter
);
6047 if (caps_cmd
.iscsicaps
) {
6048 params
[0] = FW_PARAM_PFVF(ISCSI_START
);
6049 params
[1] = FW_PARAM_PFVF(ISCSI_END
);
6050 ret
= t4_query_params(adap
, adap
->mbox
, adap
->fn
, 0, 2,
6054 adap
->vres
.iscsi
.start
= val
[0];
6055 adap
->vres
.iscsi
.size
= val
[1] - val
[0] + 1;
6057 #undef FW_PARAM_PFVF
6060 /* The MTU/MSS Table is initialized by now, so load their values. If
6061 * we're initializing the adapter, then we'll make any modifications
6062 * we want to the MTU/MSS Table and also initialize the congestion
6065 t4_read_mtu_tbl(adap
, adap
->params
.mtus
, NULL
);
6066 if (state
!= DEV_STATE_INIT
) {
6069 /* The default MTU Table contains values 1492 and 1500.
6070 * However, for TCP, it's better to have two values which are
6071 * a multiple of 8 +/- 4 bytes apart near this popular MTU.
6072 * This allows us to have a TCP Data Payload which is a
6073 * multiple of 8 regardless of what combination of TCP Options
6074 * are in use (always a multiple of 4 bytes) which is
6075 * important for performance reasons. For instance, if no
6076 * options are in use, then we have a 20-byte IP header and a
6077 * 20-byte TCP header. In this case, a 1500-byte MSS would
6078 * result in a TCP Data Payload of 1500 - 40 == 1460 bytes
6079 * which is not a multiple of 8. So using an MSS of 1488 in
6080 * this case results in a TCP Data Payload of 1448 bytes which
6081 * is a multiple of 8. On the other hand, if 12-byte TCP Time
6082 * Stamps have been negotiated, then an MTU of 1500 bytes
6083 * results in a TCP Data Payload of 1448 bytes which, as
6084 * above, is a multiple of 8 bytes ...
6086 for (i
= 0; i
< NMTUS
; i
++)
6087 if (adap
->params
.mtus
[i
] == 1492) {
6088 adap
->params
.mtus
[i
] = 1488;
6092 t4_load_mtus(adap
, adap
->params
.mtus
, adap
->params
.a_wnd
,
6093 adap
->params
.b_wnd
);
6095 t4_init_tp_params(adap
);
6096 adap
->flags
|= FW_OK
;
6100 * Something bad happened. If a command timed out or failed with EIO
6101 * FW does not operate within its spec or something catastrophic
6102 * happened to HW/FW, stop issuing commands.
6105 if (ret
!= -ETIMEDOUT
&& ret
!= -EIO
)
6106 t4_fw_bye(adap
, adap
->mbox
);
6112 static pci_ers_result_t
eeh_err_detected(struct pci_dev
*pdev
,
6113 pci_channel_state_t state
)
6116 struct adapter
*adap
= pci_get_drvdata(pdev
);
6122 adap
->flags
&= ~FW_OK
;
6123 notify_ulds(adap
, CXGB4_STATE_START_RECOVERY
);
6124 spin_lock(&adap
->stats_lock
);
6125 for_each_port(adap
, i
) {
6126 struct net_device
*dev
= adap
->port
[i
];
6128 netif_device_detach(dev
);
6129 netif_carrier_off(dev
);
6131 spin_unlock(&adap
->stats_lock
);
6132 if (adap
->flags
& FULL_INIT_DONE
)
6135 if ((adap
->flags
& DEV_ENABLED
)) {
6136 pci_disable_device(pdev
);
6137 adap
->flags
&= ~DEV_ENABLED
;
6139 out
: return state
== pci_channel_io_perm_failure
?
6140 PCI_ERS_RESULT_DISCONNECT
: PCI_ERS_RESULT_NEED_RESET
;
6143 static pci_ers_result_t
eeh_slot_reset(struct pci_dev
*pdev
)
6146 struct fw_caps_config_cmd c
;
6147 struct adapter
*adap
= pci_get_drvdata(pdev
);
6150 pci_restore_state(pdev
);
6151 pci_save_state(pdev
);
6152 return PCI_ERS_RESULT_RECOVERED
;
6155 if (!(adap
->flags
& DEV_ENABLED
)) {
6156 if (pci_enable_device(pdev
)) {
6157 dev_err(&pdev
->dev
, "Cannot reenable PCI "
6158 "device after reset\n");
6159 return PCI_ERS_RESULT_DISCONNECT
;
6161 adap
->flags
|= DEV_ENABLED
;
6164 pci_set_master(pdev
);
6165 pci_restore_state(pdev
);
6166 pci_save_state(pdev
);
6167 pci_cleanup_aer_uncorrect_error_status(pdev
);
6169 if (t4_wait_dev_ready(adap
->regs
) < 0)
6170 return PCI_ERS_RESULT_DISCONNECT
;
6171 if (t4_fw_hello(adap
, adap
->fn
, adap
->fn
, MASTER_MUST
, NULL
) < 0)
6172 return PCI_ERS_RESULT_DISCONNECT
;
6173 adap
->flags
|= FW_OK
;
6174 if (adap_init1(adap
, &c
))
6175 return PCI_ERS_RESULT_DISCONNECT
;
6177 for_each_port(adap
, i
) {
6178 struct port_info
*p
= adap2pinfo(adap
, i
);
6180 ret
= t4_alloc_vi(adap
, adap
->fn
, p
->tx_chan
, adap
->fn
, 0, 1,
6183 return PCI_ERS_RESULT_DISCONNECT
;
6185 p
->xact_addr_filt
= -1;
6188 t4_load_mtus(adap
, adap
->params
.mtus
, adap
->params
.a_wnd
,
6189 adap
->params
.b_wnd
);
6192 return PCI_ERS_RESULT_DISCONNECT
;
6193 return PCI_ERS_RESULT_RECOVERED
;
6196 static void eeh_resume(struct pci_dev
*pdev
)
6199 struct adapter
*adap
= pci_get_drvdata(pdev
);
6205 for_each_port(adap
, i
) {
6206 struct net_device
*dev
= adap
->port
[i
];
6208 if (netif_running(dev
)) {
6210 cxgb_set_rxmode(dev
);
6212 netif_device_attach(dev
);
6217 static const struct pci_error_handlers cxgb4_eeh
= {
6218 .error_detected
= eeh_err_detected
,
6219 .slot_reset
= eeh_slot_reset
,
6220 .resume
= eeh_resume
,
6223 static inline bool is_x_10g_port(const struct link_config
*lc
)
6225 return (lc
->supported
& FW_PORT_CAP_SPEED_10G
) != 0 ||
6226 (lc
->supported
& FW_PORT_CAP_SPEED_40G
) != 0;
6229 static inline void init_rspq(struct adapter
*adap
, struct sge_rspq
*q
,
6230 unsigned int us
, unsigned int cnt
,
6231 unsigned int size
, unsigned int iqe_size
)
6234 set_rspq_intr_params(q
, us
, cnt
);
6235 q
->iqe_len
= iqe_size
;
6240 * Perform default configuration of DMA queues depending on the number and type
6241 * of ports we found and the number of available CPUs. Most settings can be
6242 * modified by the admin prior to actual use.
6244 static void cfg_queues(struct adapter
*adap
)
6246 struct sge
*s
= &adap
->sge
;
6247 int i
, n10g
= 0, qidx
= 0;
6248 #ifndef CONFIG_CHELSIO_T4_DCB
6253 for_each_port(adap
, i
)
6254 n10g
+= is_x_10g_port(&adap2pinfo(adap
, i
)->link_cfg
);
6255 #ifdef CONFIG_CHELSIO_T4_DCB
6256 /* For Data Center Bridging support we need to be able to support up
6257 * to 8 Traffic Priorities; each of which will be assigned to its
6258 * own TX Queue in order to prevent Head-Of-Line Blocking.
6260 if (adap
->params
.nports
* 8 > MAX_ETH_QSETS
) {
6261 dev_err(adap
->pdev_dev
, "MAX_ETH_QSETS=%d < %d!\n",
6262 MAX_ETH_QSETS
, adap
->params
.nports
* 8);
6266 for_each_port(adap
, i
) {
6267 struct port_info
*pi
= adap2pinfo(adap
, i
);
6269 pi
->first_qset
= qidx
;
6273 #else /* !CONFIG_CHELSIO_T4_DCB */
6275 * We default to 1 queue per non-10G port and up to # of cores queues
6279 q10g
= (MAX_ETH_QSETS
- (adap
->params
.nports
- n10g
)) / n10g
;
6280 if (q10g
> netif_get_num_default_rss_queues())
6281 q10g
= netif_get_num_default_rss_queues();
6283 for_each_port(adap
, i
) {
6284 struct port_info
*pi
= adap2pinfo(adap
, i
);
6286 pi
->first_qset
= qidx
;
6287 pi
->nqsets
= is_x_10g_port(&pi
->link_cfg
) ? q10g
: 1;
6290 #endif /* !CONFIG_CHELSIO_T4_DCB */
6293 s
->max_ethqsets
= qidx
; /* MSI-X may lower it later */
6295 if (is_offload(adap
)) {
6297 * For offload we use 1 queue/channel if all ports are up to 1G,
6298 * otherwise we divide all available queues amongst the channels
6299 * capped by the number of available cores.
6302 i
= min_t(int, ARRAY_SIZE(s
->ofldrxq
),
6304 s
->ofldqsets
= roundup(i
, adap
->params
.nports
);
6306 s
->ofldqsets
= adap
->params
.nports
;
6307 /* For RDMA one Rx queue per channel suffices */
6308 s
->rdmaqs
= adap
->params
.nports
;
6309 s
->rdmaciqs
= adap
->params
.nports
;
6312 for (i
= 0; i
< ARRAY_SIZE(s
->ethrxq
); i
++) {
6313 struct sge_eth_rxq
*r
= &s
->ethrxq
[i
];
6315 init_rspq(adap
, &r
->rspq
, 5, 10, 1024, 64);
6319 for (i
= 0; i
< ARRAY_SIZE(s
->ethtxq
); i
++)
6320 s
->ethtxq
[i
].q
.size
= 1024;
6322 for (i
= 0; i
< ARRAY_SIZE(s
->ctrlq
); i
++)
6323 s
->ctrlq
[i
].q
.size
= 512;
6325 for (i
= 0; i
< ARRAY_SIZE(s
->ofldtxq
); i
++)
6326 s
->ofldtxq
[i
].q
.size
= 1024;
6328 for (i
= 0; i
< ARRAY_SIZE(s
->ofldrxq
); i
++) {
6329 struct sge_ofld_rxq
*r
= &s
->ofldrxq
[i
];
6331 init_rspq(adap
, &r
->rspq
, 5, 1, 1024, 64);
6332 r
->rspq
.uld
= CXGB4_ULD_ISCSI
;
6336 for (i
= 0; i
< ARRAY_SIZE(s
->rdmarxq
); i
++) {
6337 struct sge_ofld_rxq
*r
= &s
->rdmarxq
[i
];
6339 init_rspq(adap
, &r
->rspq
, 5, 1, 511, 64);
6340 r
->rspq
.uld
= CXGB4_ULD_RDMA
;
6344 ciq_size
= 64 + adap
->vres
.cq
.size
+ adap
->tids
.nftids
;
6345 if (ciq_size
> SGE_MAX_IQ_SIZE
) {
6346 CH_WARN(adap
, "CIQ size too small for available IQs\n");
6347 ciq_size
= SGE_MAX_IQ_SIZE
;
6350 for (i
= 0; i
< ARRAY_SIZE(s
->rdmaciq
); i
++) {
6351 struct sge_ofld_rxq
*r
= &s
->rdmaciq
[i
];
6353 init_rspq(adap
, &r
->rspq
, 5, 1, ciq_size
, 64);
6354 r
->rspq
.uld
= CXGB4_ULD_RDMA
;
6357 init_rspq(adap
, &s
->fw_evtq
, 0, 1, 1024, 64);
6358 init_rspq(adap
, &s
->intrq
, 0, 1, 2 * MAX_INGQ
, 64);
6362 * Reduce the number of Ethernet queues across all ports to at most n.
6363 * n provides at least one queue per port.
6365 static void reduce_ethqs(struct adapter
*adap
, int n
)
6368 struct port_info
*pi
;
6370 while (n
< adap
->sge
.ethqsets
)
6371 for_each_port(adap
, i
) {
6372 pi
= adap2pinfo(adap
, i
);
6373 if (pi
->nqsets
> 1) {
6375 adap
->sge
.ethqsets
--;
6376 if (adap
->sge
.ethqsets
<= n
)
6382 for_each_port(adap
, i
) {
6383 pi
= adap2pinfo(adap
, i
);
6389 /* 2 MSI-X vectors needed for the FW queue and non-data interrupts */
6390 #define EXTRA_VECS 2
6392 static int enable_msix(struct adapter
*adap
)
6396 struct sge
*s
= &adap
->sge
;
6397 unsigned int nchan
= adap
->params
.nports
;
6398 struct msix_entry entries
[MAX_INGQ
+ 1];
6400 for (i
= 0; i
< ARRAY_SIZE(entries
); ++i
)
6401 entries
[i
].entry
= i
;
6403 want
= s
->max_ethqsets
+ EXTRA_VECS
;
6404 if (is_offload(adap
)) {
6405 want
+= s
->rdmaqs
+ s
->rdmaciqs
+ s
->ofldqsets
;
6406 /* need nchan for each possible ULD */
6407 ofld_need
= 3 * nchan
;
6409 #ifdef CONFIG_CHELSIO_T4_DCB
6410 /* For Data Center Bridging we need 8 Ethernet TX Priority Queues for
6413 need
= 8 * adap
->params
.nports
+ EXTRA_VECS
+ ofld_need
;
6415 need
= adap
->params
.nports
+ EXTRA_VECS
+ ofld_need
;
6417 want
= pci_enable_msix_range(adap
->pdev
, entries
, need
, want
);
6422 * Distribute available vectors to the various queue groups.
6423 * Every group gets its minimum requirement and NIC gets top
6424 * priority for leftovers.
6426 i
= want
- EXTRA_VECS
- ofld_need
;
6427 if (i
< s
->max_ethqsets
) {
6428 s
->max_ethqsets
= i
;
6429 if (i
< s
->ethqsets
)
6430 reduce_ethqs(adap
, i
);
6432 if (is_offload(adap
)) {
6433 i
= want
- EXTRA_VECS
- s
->max_ethqsets
;
6434 i
-= ofld_need
- nchan
;
6435 s
->ofldqsets
= (i
/ nchan
) * nchan
; /* round down */
6437 for (i
= 0; i
< want
; ++i
)
6438 adap
->msix_info
[i
].vec
= entries
[i
].vector
;
6445 static int init_rss(struct adapter
*adap
)
6449 for_each_port(adap
, i
) {
6450 struct port_info
*pi
= adap2pinfo(adap
, i
);
6452 pi
->rss
= kcalloc(pi
->rss_size
, sizeof(u16
), GFP_KERNEL
);
6455 for (j
= 0; j
< pi
->rss_size
; j
++)
6456 pi
->rss
[j
] = ethtool_rxfh_indir_default(j
, pi
->nqsets
);
6461 static void print_port_info(const struct net_device
*dev
)
6465 const char *spd
= "";
6466 const struct port_info
*pi
= netdev_priv(dev
);
6467 const struct adapter
*adap
= pi
->adapter
;
6469 if (adap
->params
.pci
.speed
== PCI_EXP_LNKSTA_CLS_2_5GB
)
6471 else if (adap
->params
.pci
.speed
== PCI_EXP_LNKSTA_CLS_5_0GB
)
6473 else if (adap
->params
.pci
.speed
== PCI_EXP_LNKSTA_CLS_8_0GB
)
6476 if (pi
->link_cfg
.supported
& FW_PORT_CAP_SPEED_100M
)
6477 bufp
+= sprintf(bufp
, "100/");
6478 if (pi
->link_cfg
.supported
& FW_PORT_CAP_SPEED_1G
)
6479 bufp
+= sprintf(bufp
, "1000/");
6480 if (pi
->link_cfg
.supported
& FW_PORT_CAP_SPEED_10G
)
6481 bufp
+= sprintf(bufp
, "10G/");
6482 if (pi
->link_cfg
.supported
& FW_PORT_CAP_SPEED_40G
)
6483 bufp
+= sprintf(bufp
, "40G/");
6486 sprintf(bufp
, "BASE-%s", t4_get_port_type_description(pi
->port_type
));
6488 netdev_info(dev
, "Chelsio %s rev %d %s %sNIC PCIe x%d%s%s\n",
6489 adap
->params
.vpd
.id
,
6490 CHELSIO_CHIP_RELEASE(adap
->params
.chip
), buf
,
6491 is_offload(adap
) ? "R" : "", adap
->params
.pci
.width
, spd
,
6492 (adap
->flags
& USING_MSIX
) ? " MSI-X" :
6493 (adap
->flags
& USING_MSI
) ? " MSI" : "");
6494 netdev_info(dev
, "S/N: %s, P/N: %s\n",
6495 adap
->params
.vpd
.sn
, adap
->params
.vpd
.pn
);
6498 static void enable_pcie_relaxed_ordering(struct pci_dev
*dev
)
6500 pcie_capability_set_word(dev
, PCI_EXP_DEVCTL
, PCI_EXP_DEVCTL_RELAX_EN
);
6504 * Free the following resources:
6505 * - memory used for tables
6508 * - resources FW is holding for us
6510 static void free_some_resources(struct adapter
*adapter
)
6514 t4_free_mem(adapter
->l2t
);
6515 t4_free_mem(adapter
->tids
.tid_tab
);
6516 disable_msi(adapter
);
6518 for_each_port(adapter
, i
)
6519 if (adapter
->port
[i
]) {
6520 kfree(adap2pinfo(adapter
, i
)->rss
);
6521 free_netdev(adapter
->port
[i
]);
6523 if (adapter
->flags
& FW_OK
)
6524 t4_fw_bye(adapter
, adapter
->fn
);
6527 #define TSO_FLAGS (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN)
6528 #define VLAN_FEAT (NETIF_F_SG | NETIF_F_IP_CSUM | TSO_FLAGS | \
6529 NETIF_F_IPV6_CSUM | NETIF_F_HIGHDMA)
6530 #define SEGMENT_SIZE 128
6532 static int init_one(struct pci_dev
*pdev
, const struct pci_device_id
*ent
)
6534 int func
, i
, err
, s_qpp
, qpp
, num_seg
;
6535 struct port_info
*pi
;
6536 bool highdma
= false;
6537 struct adapter
*adapter
= NULL
;
6540 printk_once(KERN_INFO
"%s - version %s\n", DRV_DESC
, DRV_VERSION
);
6542 err
= pci_request_regions(pdev
, KBUILD_MODNAME
);
6544 /* Just info, some other driver may have claimed the device. */
6545 dev_info(&pdev
->dev
, "cannot obtain PCI resources\n");
6549 err
= pci_enable_device(pdev
);
6551 dev_err(&pdev
->dev
, "cannot enable PCI device\n");
6552 goto out_release_regions
;
6555 regs
= pci_ioremap_bar(pdev
, 0);
6557 dev_err(&pdev
->dev
, "cannot map device registers\n");
6559 goto out_disable_device
;
6562 err
= t4_wait_dev_ready(regs
);
6564 goto out_unmap_bar0
;
6566 /* We control everything through one PF */
6567 func
= SOURCEPF_GET(readl(regs
+ PL_WHOAMI
));
6568 if (func
!= ent
->driver_data
) {
6570 pci_disable_device(pdev
);
6571 pci_save_state(pdev
); /* to restore SR-IOV later */
6575 if (!pci_set_dma_mask(pdev
, DMA_BIT_MASK(64))) {
6577 err
= pci_set_consistent_dma_mask(pdev
, DMA_BIT_MASK(64));
6579 dev_err(&pdev
->dev
, "unable to obtain 64-bit DMA for "
6580 "coherent allocations\n");
6581 goto out_unmap_bar0
;
6584 err
= pci_set_dma_mask(pdev
, DMA_BIT_MASK(32));
6586 dev_err(&pdev
->dev
, "no usable DMA configuration\n");
6587 goto out_unmap_bar0
;
6591 pci_enable_pcie_error_reporting(pdev
);
6592 enable_pcie_relaxed_ordering(pdev
);
6593 pci_set_master(pdev
);
6594 pci_save_state(pdev
);
6596 adapter
= kzalloc(sizeof(*adapter
), GFP_KERNEL
);
6599 goto out_unmap_bar0
;
6602 adapter
->workq
= create_singlethread_workqueue("cxgb4");
6603 if (!adapter
->workq
) {
6605 goto out_free_adapter
;
6608 /* PCI device has been enabled */
6609 adapter
->flags
|= DEV_ENABLED
;
6611 adapter
->regs
= regs
;
6612 adapter
->pdev
= pdev
;
6613 adapter
->pdev_dev
= &pdev
->dev
;
6614 adapter
->mbox
= func
;
6616 adapter
->msg_enable
= dflt_msg_enable
;
6617 memset(adapter
->chan_map
, 0xff, sizeof(adapter
->chan_map
));
6619 spin_lock_init(&adapter
->stats_lock
);
6620 spin_lock_init(&adapter
->tid_release_lock
);
6621 spin_lock_init(&adapter
->win0_lock
);
6623 INIT_WORK(&adapter
->tid_release_task
, process_tid_release_list
);
6624 INIT_WORK(&adapter
->db_full_task
, process_db_full
);
6625 INIT_WORK(&adapter
->db_drop_task
, process_db_drop
);
6627 err
= t4_prep_adapter(adapter
);
6629 goto out_free_adapter
;
6632 if (!is_t4(adapter
->params
.chip
)) {
6633 s_qpp
= QUEUESPERPAGEPF1
* adapter
->fn
;
6634 qpp
= 1 << QUEUESPERPAGEPF0_GET(t4_read_reg(adapter
,
6635 SGE_EGRESS_QUEUES_PER_PAGE_PF
) >> s_qpp
);
6636 num_seg
= PAGE_SIZE
/ SEGMENT_SIZE
;
6638 /* Each segment size is 128B. Write coalescing is enabled only
6639 * when SGE_EGRESS_QUEUES_PER_PAGE_PF reg value for the
6640 * queue is less no of segments that can be accommodated in
6643 if (qpp
> num_seg
) {
6645 "Incorrect number of egress queues per page\n");
6647 goto out_free_adapter
;
6649 adapter
->bar2
= ioremap_wc(pci_resource_start(pdev
, 2),
6650 pci_resource_len(pdev
, 2));
6651 if (!adapter
->bar2
) {
6652 dev_err(&pdev
->dev
, "cannot map device bar2 region\n");
6654 goto out_free_adapter
;
6658 setup_memwin(adapter
);
6659 err
= adap_init0(adapter
);
6660 setup_memwin_rdma(adapter
);
6664 for_each_port(adapter
, i
) {
6665 struct net_device
*netdev
;
6667 netdev
= alloc_etherdev_mq(sizeof(struct port_info
),
6674 SET_NETDEV_DEV(netdev
, &pdev
->dev
);
6676 adapter
->port
[i
] = netdev
;
6677 pi
= netdev_priv(netdev
);
6678 pi
->adapter
= adapter
;
6679 pi
->xact_addr_filt
= -1;
6681 netdev
->irq
= pdev
->irq
;
6683 netdev
->hw_features
= NETIF_F_SG
| TSO_FLAGS
|
6684 NETIF_F_IP_CSUM
| NETIF_F_IPV6_CSUM
|
6685 NETIF_F_RXCSUM
| NETIF_F_RXHASH
|
6686 NETIF_F_HW_VLAN_CTAG_TX
| NETIF_F_HW_VLAN_CTAG_RX
;
6688 netdev
->hw_features
|= NETIF_F_HIGHDMA
;
6689 netdev
->features
|= netdev
->hw_features
;
6690 netdev
->vlan_features
= netdev
->features
& VLAN_FEAT
;
6692 netdev
->priv_flags
|= IFF_UNICAST_FLT
;
6694 netdev
->netdev_ops
= &cxgb4_netdev_ops
;
6695 #ifdef CONFIG_CHELSIO_T4_DCB
6696 netdev
->dcbnl_ops
= &cxgb4_dcb_ops
;
6697 cxgb4_dcb_state_init(netdev
);
6699 netdev
->ethtool_ops
= &cxgb_ethtool_ops
;
6702 pci_set_drvdata(pdev
, adapter
);
6704 if (adapter
->flags
& FW_OK
) {
6705 err
= t4_port_init(adapter
, func
, func
, 0);
6711 * Configure queues and allocate tables now, they can be needed as
6712 * soon as the first register_netdev completes.
6714 cfg_queues(adapter
);
6716 adapter
->l2t
= t4_init_l2t();
6717 if (!adapter
->l2t
) {
6718 /* We tolerate a lack of L2T, giving up some functionality */
6719 dev_warn(&pdev
->dev
, "could not allocate L2T, continuing\n");
6720 adapter
->params
.offload
= 0;
6723 if (is_offload(adapter
) && tid_init(&adapter
->tids
) < 0) {
6724 dev_warn(&pdev
->dev
, "could not allocate TID table, "
6726 adapter
->params
.offload
= 0;
6729 /* See what interrupts we'll be using */
6730 if (msi
> 1 && enable_msix(adapter
) == 0)
6731 adapter
->flags
|= USING_MSIX
;
6732 else if (msi
> 0 && pci_enable_msi(pdev
) == 0)
6733 adapter
->flags
|= USING_MSI
;
6735 err
= init_rss(adapter
);
6740 * The card is now ready to go. If any errors occur during device
6741 * registration we do not fail the whole card but rather proceed only
6742 * with the ports we manage to register successfully. However we must
6743 * register at least one net device.
6745 for_each_port(adapter
, i
) {
6746 pi
= adap2pinfo(adapter
, i
);
6747 netif_set_real_num_tx_queues(adapter
->port
[i
], pi
->nqsets
);
6748 netif_set_real_num_rx_queues(adapter
->port
[i
], pi
->nqsets
);
6750 err
= register_netdev(adapter
->port
[i
]);
6753 adapter
->chan_map
[pi
->tx_chan
] = i
;
6754 print_port_info(adapter
->port
[i
]);
6757 dev_err(&pdev
->dev
, "could not register any net devices\n");
6761 dev_warn(&pdev
->dev
, "only %d net devices registered\n", i
);
6765 if (cxgb4_debugfs_root
) {
6766 adapter
->debugfs_root
= debugfs_create_dir(pci_name(pdev
),
6767 cxgb4_debugfs_root
);
6768 setup_debugfs(adapter
);
6771 /* PCIe EEH recovery on powerpc platforms needs fundamental reset */
6772 pdev
->needs_freset
= 1;
6774 if (is_offload(adapter
))
6775 attach_ulds(adapter
);
6778 #ifdef CONFIG_PCI_IOV
6779 if (func
< ARRAY_SIZE(num_vf
) && num_vf
[func
] > 0)
6780 if (pci_enable_sriov(pdev
, num_vf
[func
]) == 0)
6781 dev_info(&pdev
->dev
,
6782 "instantiated %u virtual functions\n",
6788 free_some_resources(adapter
);
6790 if (!is_t4(adapter
->params
.chip
))
6791 iounmap(adapter
->bar2
);
6794 destroy_workqueue(adapter
->workq
);
6800 pci_disable_pcie_error_reporting(pdev
);
6801 pci_disable_device(pdev
);
6802 out_release_regions
:
6803 pci_release_regions(pdev
);
6807 static void remove_one(struct pci_dev
*pdev
)
6809 struct adapter
*adapter
= pci_get_drvdata(pdev
);
6811 #ifdef CONFIG_PCI_IOV
6812 pci_disable_sriov(pdev
);
6819 /* Tear down per-adapter Work Queue first since it can contain
6820 * references to our adapter data structure.
6822 destroy_workqueue(adapter
->workq
);
6824 if (is_offload(adapter
))
6825 detach_ulds(adapter
);
6827 for_each_port(adapter
, i
)
6828 if (adapter
->port
[i
]->reg_state
== NETREG_REGISTERED
)
6829 unregister_netdev(adapter
->port
[i
]);
6831 debugfs_remove_recursive(adapter
->debugfs_root
);
6833 /* If we allocated filters, free up state associated with any
6836 if (adapter
->tids
.ftid_tab
) {
6837 struct filter_entry
*f
= &adapter
->tids
.ftid_tab
[0];
6838 for (i
= 0; i
< (adapter
->tids
.nftids
+
6839 adapter
->tids
.nsftids
); i
++, f
++)
6841 clear_filter(adapter
, f
);
6844 if (adapter
->flags
& FULL_INIT_DONE
)
6847 free_some_resources(adapter
);
6848 iounmap(adapter
->regs
);
6849 if (!is_t4(adapter
->params
.chip
))
6850 iounmap(adapter
->bar2
);
6851 pci_disable_pcie_error_reporting(pdev
);
6852 if ((adapter
->flags
& DEV_ENABLED
)) {
6853 pci_disable_device(pdev
);
6854 adapter
->flags
&= ~DEV_ENABLED
;
6856 pci_release_regions(pdev
);
6860 pci_release_regions(pdev
);
6863 static struct pci_driver cxgb4_driver
= {
6864 .name
= KBUILD_MODNAME
,
6865 .id_table
= cxgb4_pci_tbl
,
6867 .remove
= remove_one
,
6868 .shutdown
= remove_one
,
6869 .err_handler
= &cxgb4_eeh
,
6872 static int __init
cxgb4_init_module(void)
6876 /* Debugfs support is optional, just warn if this fails */
6877 cxgb4_debugfs_root
= debugfs_create_dir(KBUILD_MODNAME
, NULL
);
6878 if (!cxgb4_debugfs_root
)
6879 pr_warn("could not create debugfs entry, continuing\n");
6881 ret
= pci_register_driver(&cxgb4_driver
);
6883 debugfs_remove(cxgb4_debugfs_root
);
6885 #if IS_ENABLED(CONFIG_IPV6)
6886 register_inet6addr_notifier(&cxgb4_inet6addr_notifier
);
6892 static void __exit
cxgb4_cleanup_module(void)
6894 #if IS_ENABLED(CONFIG_IPV6)
6895 unregister_inet6addr_notifier(&cxgb4_inet6addr_notifier
);
6897 pci_unregister_driver(&cxgb4_driver
);
6898 debugfs_remove(cxgb4_debugfs_root
); /* NULL ok */
6901 module_init(cxgb4_init_module
);
6902 module_exit(cxgb4_cleanup_module
);