2 * This file is part of the Chelsio T4 Ethernet driver for Linux.
4 * Copyright (c) 2003-2014 Chelsio Communications, Inc. All rights reserved.
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
35 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
37 #include <linux/bitmap.h>
38 #include <linux/crc32.h>
39 #include <linux/ctype.h>
40 #include <linux/debugfs.h>
41 #include <linux/err.h>
42 #include <linux/etherdevice.h>
43 #include <linux/firmware.h>
45 #include <linux/if_vlan.h>
46 #include <linux/init.h>
47 #include <linux/log2.h>
48 #include <linux/mdio.h>
49 #include <linux/module.h>
50 #include <linux/moduleparam.h>
51 #include <linux/mutex.h>
52 #include <linux/netdevice.h>
53 #include <linux/pci.h>
54 #include <linux/aer.h>
55 #include <linux/rtnetlink.h>
56 #include <linux/sched.h>
57 #include <linux/seq_file.h>
58 #include <linux/sockios.h>
59 #include <linux/vmalloc.h>
60 #include <linux/workqueue.h>
61 #include <net/neighbour.h>
62 #include <net/netevent.h>
63 #include <net/addrconf.h>
64 #include <net/bonding.h>
65 #include <net/addrconf.h>
66 #include <asm/uaccess.h>
67 #include <linux/crash_dump.h>
71 #include "t4_values.h"
74 #include "t4fw_version.h"
75 #include "cxgb4_dcb.h"
76 #include "cxgb4_debugfs.h"
80 char cxgb4_driver_name
[] = KBUILD_MODNAME
;
85 #define DRV_VERSION "2.0.0-ko"
86 const char cxgb4_driver_version
[] = DRV_VERSION
;
87 #define DRV_DESC "Chelsio T4/T5/T6 Network Driver"
89 /* Host shadow copy of ingress filter entry. This is in host native format
90 * and doesn't match the ordering or bit order, etc. of the hardware of the
91 * firmware command. The use of bit-field structure elements is purely to
92 * remind ourselves of the field size limitations and save memory in the case
93 * where the filter table is large.
96 /* Administrative fields for filter.
98 u32 valid
:1; /* filter allocated and valid */
99 u32 locked
:1; /* filter is administratively locked */
101 u32 pending
:1; /* filter action is pending firmware reply */
102 u32 smtidx
:8; /* Source MAC Table index for smac */
103 struct l2t_entry
*l2t
; /* Layer Two Table entry for dmac */
105 /* The filter itself. Most of this is a straight copy of information
106 * provided by the extended ioctl(). Some fields are translated to
107 * internal forms -- for instance the Ingress Queue ID passed in from
108 * the ioctl() is translated into the Absolute Ingress Queue ID.
110 struct ch_filter_specification fs
;
113 #define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
114 NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\
115 NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
117 /* Macros needed to support the PCI Device ID Table ...
119 #define CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN \
120 static const struct pci_device_id cxgb4_pci_tbl[] = {
121 #define CH_PCI_DEVICE_ID_FUNCTION 0x4
123 /* Include PCI Device IDs for both PF4 and PF0-3 so our PCI probe() routine is
126 #define CH_PCI_DEVICE_ID_FUNCTION2 0x0
128 #define CH_PCI_ID_TABLE_ENTRY(devid) \
129 {PCI_VDEVICE(CHELSIO, (devid)), 4}
131 #define CH_PCI_DEVICE_ID_TABLE_DEFINE_END \
135 #include "t4_pci_id_tbl.h"
137 #define FW4_FNAME "cxgb4/t4fw.bin"
138 #define FW5_FNAME "cxgb4/t5fw.bin"
139 #define FW6_FNAME "cxgb4/t6fw.bin"
140 #define FW4_CFNAME "cxgb4/t4-config.txt"
141 #define FW5_CFNAME "cxgb4/t5-config.txt"
142 #define FW6_CFNAME "cxgb4/t6-config.txt"
143 #define PHY_AQ1202_FIRMWARE "cxgb4/aq1202_fw.cld"
144 #define PHY_BCM84834_FIRMWARE "cxgb4/bcm8483.bin"
145 #define PHY_AQ1202_DEVICEID 0x4409
146 #define PHY_BCM84834_DEVICEID 0x4486
148 MODULE_DESCRIPTION(DRV_DESC
);
149 MODULE_AUTHOR("Chelsio Communications");
150 MODULE_LICENSE("Dual BSD/GPL");
151 MODULE_VERSION(DRV_VERSION
);
152 MODULE_DEVICE_TABLE(pci
, cxgb4_pci_tbl
);
153 MODULE_FIRMWARE(FW4_FNAME
);
154 MODULE_FIRMWARE(FW5_FNAME
);
155 MODULE_FIRMWARE(FW6_FNAME
);
158 * Normally we're willing to become the firmware's Master PF but will be happy
159 * if another PF has already become the Master and initialized the adapter.
160 * Setting "force_init" will cause this driver to forcibly establish itself as
161 * the Master PF and initialize the adapter.
163 static uint force_init
;
165 module_param(force_init
, uint
, 0644);
166 MODULE_PARM_DESC(force_init
, "Forcibly become Master PF and initialize adapter,"
167 "deprecated parameter");
169 static int dflt_msg_enable
= DFLT_MSG_ENABLE
;
171 module_param(dflt_msg_enable
, int, 0644);
172 MODULE_PARM_DESC(dflt_msg_enable
, "Chelsio T4 default message enable bitmap, "
173 "deprecated parameter");
176 * The driver uses the best interrupt scheme available on a platform in the
177 * order MSI-X, MSI, legacy INTx interrupts. This parameter determines which
178 * of these schemes the driver may consider as follows:
180 * msi = 2: choose from among all three options
181 * msi = 1: only consider MSI and INTx interrupts
182 * msi = 0: force INTx interrupts
186 module_param(msi
, int, 0644);
187 MODULE_PARM_DESC(msi
, "whether to use INTx (0), MSI (1) or MSI-X (2)");
190 * Normally we tell the chip to deliver Ingress Packets into our DMA buffers
191 * offset by 2 bytes in order to have the IP headers line up on 4-byte
192 * boundaries. This is a requirement for many architectures which will throw
193 * a machine check fault if an attempt is made to access one of the 4-byte IP
194 * header fields on a non-4-byte boundary. And it's a major performance issue
195 * even on some architectures which allow it like some implementations of the
196 * x86 ISA. However, some architectures don't mind this and for some very
197 * edge-case performance sensitive applications (like forwarding large volumes
198 * of small packets), setting this DMA offset to 0 will decrease the number of
199 * PCI-E Bus transfers enough to measurably affect performance.
201 static int rx_dma_offset
= 2;
203 #ifdef CONFIG_PCI_IOV
204 /* Configure the number of PCI-E Virtual Function which are to be instantiated
205 * on SR-IOV Capable Physical Functions.
207 static unsigned int num_vf
[NUM_OF_PF_WITH_SRIOV
];
209 module_param_array(num_vf
, uint
, NULL
, 0644);
210 MODULE_PARM_DESC(num_vf
, "number of VFs for each of PFs 0-3, deprecated parameter - please use the pci sysfs interface.");
213 /* TX Queue select used to determine what algorithm to use for selecting TX
214 * queue. Select between the kernel provided function (select_queue=0) or user
215 * cxgb_select_queue function (select_queue=1)
217 * Default: select_queue=0
219 static int select_queue
;
220 module_param(select_queue
, int, 0644);
221 MODULE_PARM_DESC(select_queue
,
222 "Select between kernel provided method of selecting or driver method of selecting TX queue. Default is kernel method.");
224 static struct dentry
*cxgb4_debugfs_root
;
226 static LIST_HEAD(adapter_list
);
227 static DEFINE_MUTEX(uld_mutex
);
228 /* Adapter list to be accessed from atomic context */
229 static LIST_HEAD(adap_rcu_list
);
230 static DEFINE_SPINLOCK(adap_rcu_lock
);
231 static struct cxgb4_uld_info ulds
[CXGB4_ULD_MAX
];
232 static const char *const uld_str
[] = { "RDMA", "iSCSI", "iSCSIT" };
234 static void link_report(struct net_device
*dev
)
236 if (!netif_carrier_ok(dev
))
237 netdev_info(dev
, "link down\n");
239 static const char *fc
[] = { "no", "Rx", "Tx", "Tx/Rx" };
242 const struct port_info
*p
= netdev_priv(dev
);
244 switch (p
->link_cfg
.speed
) {
258 pr_info("%s: unsupported speed: %d\n",
259 dev
->name
, p
->link_cfg
.speed
);
263 netdev_info(dev
, "link up, %s, full-duplex, %s PAUSE\n", s
,
268 #ifdef CONFIG_CHELSIO_T4_DCB
269 /* Set up/tear down Data Center Bridging Priority mapping for a net device. */
270 static void dcb_tx_queue_prio_enable(struct net_device
*dev
, int enable
)
272 struct port_info
*pi
= netdev_priv(dev
);
273 struct adapter
*adap
= pi
->adapter
;
274 struct sge_eth_txq
*txq
= &adap
->sge
.ethtxq
[pi
->first_qset
];
277 /* We use a simple mapping of Port TX Queue Index to DCB
278 * Priority when we're enabling DCB.
280 for (i
= 0; i
< pi
->nqsets
; i
++, txq
++) {
284 name
= (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DMAQ
) |
286 FW_PARAMS_PARAM_DMAQ_EQ_DCBPRIO_ETH
) |
287 FW_PARAMS_PARAM_YZ_V(txq
->q
.cntxt_id
));
288 value
= enable
? i
: 0xffffffff;
290 /* Since we can be called while atomic (from "interrupt
291 * level") we need to issue the Set Parameters Commannd
292 * without sleeping (timeout < 0).
294 err
= t4_set_params_timeout(adap
, adap
->mbox
, adap
->pf
, 0, 1,
296 -FW_CMD_MAX_TIMEOUT
);
299 dev_err(adap
->pdev_dev
,
300 "Can't %s DCB Priority on port %d, TX Queue %d: err=%d\n",
301 enable
? "set" : "unset", pi
->port_id
, i
, -err
);
303 txq
->dcb_prio
= value
;
306 #endif /* CONFIG_CHELSIO_T4_DCB */
308 int cxgb4_dcb_enabled(const struct net_device
*dev
)
310 #ifdef CONFIG_CHELSIO_T4_DCB
311 struct port_info
*pi
= netdev_priv(dev
);
313 if (!pi
->dcb
.enabled
)
316 return ((pi
->dcb
.state
== CXGB4_DCB_STATE_FW_ALLSYNCED
) ||
317 (pi
->dcb
.state
== CXGB4_DCB_STATE_HOST
));
322 EXPORT_SYMBOL(cxgb4_dcb_enabled
);
324 void t4_os_link_changed(struct adapter
*adapter
, int port_id
, int link_stat
)
326 struct net_device
*dev
= adapter
->port
[port_id
];
328 /* Skip changes from disabled ports. */
329 if (netif_running(dev
) && link_stat
!= netif_carrier_ok(dev
)) {
331 netif_carrier_on(dev
);
333 #ifdef CONFIG_CHELSIO_T4_DCB
334 if (cxgb4_dcb_enabled(dev
)) {
335 cxgb4_dcb_state_init(dev
);
336 dcb_tx_queue_prio_enable(dev
, false);
338 #endif /* CONFIG_CHELSIO_T4_DCB */
339 netif_carrier_off(dev
);
346 void t4_os_portmod_changed(const struct adapter
*adap
, int port_id
)
348 static const char *mod_str
[] = {
349 NULL
, "LR", "SR", "ER", "passive DA", "active DA", "LRM"
352 const struct net_device
*dev
= adap
->port
[port_id
];
353 const struct port_info
*pi
= netdev_priv(dev
);
355 if (pi
->mod_type
== FW_PORT_MOD_TYPE_NONE
)
356 netdev_info(dev
, "port module unplugged\n");
357 else if (pi
->mod_type
< ARRAY_SIZE(mod_str
))
358 netdev_info(dev
, "%s module inserted\n", mod_str
[pi
->mod_type
]);
359 else if (pi
->mod_type
== FW_PORT_MOD_TYPE_NOTSUPPORTED
)
360 netdev_info(dev
, "%s: unsupported port module inserted\n",
362 else if (pi
->mod_type
== FW_PORT_MOD_TYPE_UNKNOWN
)
363 netdev_info(dev
, "%s: unknown port module inserted\n",
365 else if (pi
->mod_type
== FW_PORT_MOD_TYPE_ERROR
)
366 netdev_info(dev
, "%s: transceiver module error\n", dev
->name
);
368 netdev_info(dev
, "%s: unknown module type %d inserted\n",
369 dev
->name
, pi
->mod_type
);
372 int dbfifo_int_thresh
= 10; /* 10 == 640 entry threshold */
373 module_param(dbfifo_int_thresh
, int, 0644);
374 MODULE_PARM_DESC(dbfifo_int_thresh
, "doorbell fifo interrupt threshold");
377 * usecs to sleep while draining the dbfifo
379 static int dbfifo_drain_delay
= 1000;
380 module_param(dbfifo_drain_delay
, int, 0644);
381 MODULE_PARM_DESC(dbfifo_drain_delay
,
382 "usecs to sleep while draining the dbfifo");
384 static inline int cxgb4_set_addr_hash(struct port_info
*pi
)
386 struct adapter
*adap
= pi
->adapter
;
389 struct hash_mac_addr
*entry
;
391 /* Calculate the hash vector for the updated list and program it */
392 list_for_each_entry(entry
, &adap
->mac_hlist
, list
) {
393 ucast
|= is_unicast_ether_addr(entry
->addr
);
394 vec
|= (1ULL << hash_mac_addr(entry
->addr
));
396 return t4_set_addr_hash(adap
, adap
->mbox
, pi
->viid
, ucast
,
400 static int cxgb4_mac_sync(struct net_device
*netdev
, const u8
*mac_addr
)
402 struct port_info
*pi
= netdev_priv(netdev
);
403 struct adapter
*adap
= pi
->adapter
;
408 bool ucast
= is_unicast_ether_addr(mac_addr
);
409 const u8
*maclist
[1] = {mac_addr
};
410 struct hash_mac_addr
*new_entry
;
412 ret
= t4_alloc_mac_filt(adap
, adap
->mbox
, pi
->viid
, free
, 1, maclist
,
413 NULL
, ucast
? &uhash
: &mhash
, false);
416 /* if hash != 0, then add the addr to hash addr list
417 * so on the end we will calculate the hash for the
418 * list and program it
420 if (uhash
|| mhash
) {
421 new_entry
= kzalloc(sizeof(*new_entry
), GFP_ATOMIC
);
424 ether_addr_copy(new_entry
->addr
, mac_addr
);
425 list_add_tail(&new_entry
->list
, &adap
->mac_hlist
);
426 ret
= cxgb4_set_addr_hash(pi
);
429 return ret
< 0 ? ret
: 0;
432 static int cxgb4_mac_unsync(struct net_device
*netdev
, const u8
*mac_addr
)
434 struct port_info
*pi
= netdev_priv(netdev
);
435 struct adapter
*adap
= pi
->adapter
;
437 const u8
*maclist
[1] = {mac_addr
};
438 struct hash_mac_addr
*entry
, *tmp
;
440 /* If the MAC address to be removed is in the hash addr
441 * list, delete it from the list and update hash vector
443 list_for_each_entry_safe(entry
, tmp
, &adap
->mac_hlist
, list
) {
444 if (ether_addr_equal(entry
->addr
, mac_addr
)) {
445 list_del(&entry
->list
);
447 return cxgb4_set_addr_hash(pi
);
451 ret
= t4_free_mac_filt(adap
, adap
->mbox
, pi
->viid
, 1, maclist
, false);
452 return ret
< 0 ? -EINVAL
: 0;
456 * Set Rx properties of a port, such as promiscruity, address filters, and MTU.
457 * If @mtu is -1 it is left unchanged.
459 static int set_rxmode(struct net_device
*dev
, int mtu
, bool sleep_ok
)
461 struct port_info
*pi
= netdev_priv(dev
);
462 struct adapter
*adapter
= pi
->adapter
;
464 __dev_uc_sync(dev
, cxgb4_mac_sync
, cxgb4_mac_unsync
);
465 __dev_mc_sync(dev
, cxgb4_mac_sync
, cxgb4_mac_unsync
);
467 return t4_set_rxmode(adapter
, adapter
->mbox
, pi
->viid
, mtu
,
468 (dev
->flags
& IFF_PROMISC
) ? 1 : 0,
469 (dev
->flags
& IFF_ALLMULTI
) ? 1 : 0, 1, -1,
474 * link_start - enable a port
475 * @dev: the port to enable
477 * Performs the MAC and PHY actions needed to enable a port.
479 static int link_start(struct net_device
*dev
)
482 struct port_info
*pi
= netdev_priv(dev
);
483 unsigned int mb
= pi
->adapter
->pf
;
486 * We do not set address filters and promiscuity here, the stack does
487 * that step explicitly.
489 ret
= t4_set_rxmode(pi
->adapter
, mb
, pi
->viid
, dev
->mtu
, -1, -1, -1,
490 !!(dev
->features
& NETIF_F_HW_VLAN_CTAG_RX
), true);
492 ret
= t4_change_mac(pi
->adapter
, mb
, pi
->viid
,
493 pi
->xact_addr_filt
, dev
->dev_addr
, true,
496 pi
->xact_addr_filt
= ret
;
501 ret
= t4_link_l1cfg(pi
->adapter
, mb
, pi
->tx_chan
,
505 ret
= t4_enable_vi_params(pi
->adapter
, mb
, pi
->viid
, true,
506 true, CXGB4_DCB_ENABLED
);
513 #ifdef CONFIG_CHELSIO_T4_DCB
514 /* Handle a Data Center Bridging update message from the firmware. */
515 static void dcb_rpl(struct adapter
*adap
, const struct fw_port_cmd
*pcmd
)
517 int port
= FW_PORT_CMD_PORTID_G(ntohl(pcmd
->op_to_portid
));
518 struct net_device
*dev
= adap
->port
[adap
->chan_map
[port
]];
519 int old_dcb_enabled
= cxgb4_dcb_enabled(dev
);
522 cxgb4_dcb_handle_fw_update(adap
, pcmd
);
523 new_dcb_enabled
= cxgb4_dcb_enabled(dev
);
525 /* If the DCB has become enabled or disabled on the port then we're
526 * going to need to set up/tear down DCB Priority parameters for the
527 * TX Queues associated with the port.
529 if (new_dcb_enabled
!= old_dcb_enabled
)
530 dcb_tx_queue_prio_enable(dev
, new_dcb_enabled
);
532 #endif /* CONFIG_CHELSIO_T4_DCB */
534 /* Clear a filter and release any of its resources that we own. This also
535 * clears the filter's "pending" status.
537 static void clear_filter(struct adapter
*adap
, struct filter_entry
*f
)
539 /* If the new or old filter have loopback rewriteing rules then we'll
540 * need to free any existing Layer Two Table (L2T) entries of the old
541 * filter rule. The firmware will handle freeing up any Source MAC
542 * Table (SMT) entries used for rewriting Source MAC Addresses in
546 cxgb4_l2t_release(f
->l2t
);
548 /* The zeroing of the filter rule below clears the filter valid,
549 * pending, locked flags, l2t pointer, etc. so it's all we need for
552 memset(f
, 0, sizeof(*f
));
555 /* Handle a filter write/deletion reply.
557 static void filter_rpl(struct adapter
*adap
, const struct cpl_set_tcb_rpl
*rpl
)
559 unsigned int idx
= GET_TID(rpl
);
560 unsigned int nidx
= idx
- adap
->tids
.ftid_base
;
562 struct filter_entry
*f
;
564 if (idx
>= adap
->tids
.ftid_base
&& nidx
<
565 (adap
->tids
.nftids
+ adap
->tids
.nsftids
)) {
567 ret
= TCB_COOKIE_G(rpl
->cookie
);
568 f
= &adap
->tids
.ftid_tab
[idx
];
570 if (ret
== FW_FILTER_WR_FLT_DELETED
) {
571 /* Clear the filter when we get confirmation from the
572 * hardware that the filter has been deleted.
574 clear_filter(adap
, f
);
575 } else if (ret
== FW_FILTER_WR_SMT_TBL_FULL
) {
576 dev_err(adap
->pdev_dev
, "filter %u setup failed due to full SMT\n",
578 clear_filter(adap
, f
);
579 } else if (ret
== FW_FILTER_WR_FLT_ADDED
) {
580 f
->smtidx
= (be64_to_cpu(rpl
->oldval
) >> 24) & 0xff;
581 f
->pending
= 0; /* asynchronous setup completed */
584 /* Something went wrong. Issue a warning about the
585 * problem and clear everything out.
587 dev_err(adap
->pdev_dev
, "filter %u setup failed with error %u\n",
589 clear_filter(adap
, f
);
594 /* Response queue handler for the FW event queue.
596 static int fwevtq_handler(struct sge_rspq
*q
, const __be64
*rsp
,
597 const struct pkt_gl
*gl
)
599 u8 opcode
= ((const struct rss_header
*)rsp
)->opcode
;
601 rsp
++; /* skip RSS header */
603 /* FW can send EGR_UPDATEs encapsulated in a CPL_FW4_MSG.
605 if (unlikely(opcode
== CPL_FW4_MSG
&&
606 ((const struct cpl_fw4_msg
*)rsp
)->type
== FW_TYPE_RSSCPL
)) {
608 opcode
= ((const struct rss_header
*)rsp
)->opcode
;
610 if (opcode
!= CPL_SGE_EGR_UPDATE
) {
611 dev_err(q
->adap
->pdev_dev
, "unexpected FW4/CPL %#x on FW event queue\n"
617 if (likely(opcode
== CPL_SGE_EGR_UPDATE
)) {
618 const struct cpl_sge_egr_update
*p
= (void *)rsp
;
619 unsigned int qid
= EGR_QID_G(ntohl(p
->opcode_qid
));
622 txq
= q
->adap
->sge
.egr_map
[qid
- q
->adap
->sge
.egr_start
];
624 if ((u8
*)txq
< (u8
*)q
->adap
->sge
.ofldtxq
) {
625 struct sge_eth_txq
*eq
;
627 eq
= container_of(txq
, struct sge_eth_txq
, q
);
628 netif_tx_wake_queue(eq
->txq
);
630 struct sge_ofld_txq
*oq
;
632 oq
= container_of(txq
, struct sge_ofld_txq
, q
);
633 tasklet_schedule(&oq
->qresume_tsk
);
635 } else if (opcode
== CPL_FW6_MSG
|| opcode
== CPL_FW4_MSG
) {
636 const struct cpl_fw6_msg
*p
= (void *)rsp
;
638 #ifdef CONFIG_CHELSIO_T4_DCB
639 const struct fw_port_cmd
*pcmd
= (const void *)p
->data
;
640 unsigned int cmd
= FW_CMD_OP_G(ntohl(pcmd
->op_to_portid
));
641 unsigned int action
=
642 FW_PORT_CMD_ACTION_G(ntohl(pcmd
->action_to_len16
));
644 if (cmd
== FW_PORT_CMD
&&
645 action
== FW_PORT_ACTION_GET_PORT_INFO
) {
646 int port
= FW_PORT_CMD_PORTID_G(
647 be32_to_cpu(pcmd
->op_to_portid
));
648 struct net_device
*dev
=
649 q
->adap
->port
[q
->adap
->chan_map
[port
]];
650 int state_input
= ((pcmd
->u
.info
.dcbxdis_pkd
&
651 FW_PORT_CMD_DCBXDIS_F
)
652 ? CXGB4_DCB_INPUT_FW_DISABLED
653 : CXGB4_DCB_INPUT_FW_ENABLED
);
655 cxgb4_dcb_state_fsm(dev
, state_input
);
658 if (cmd
== FW_PORT_CMD
&&
659 action
== FW_PORT_ACTION_L2_DCB_CFG
)
660 dcb_rpl(q
->adap
, pcmd
);
664 t4_handle_fw_rpl(q
->adap
, p
->data
);
665 } else if (opcode
== CPL_L2T_WRITE_RPL
) {
666 const struct cpl_l2t_write_rpl
*p
= (void *)rsp
;
668 do_l2t_write_rpl(q
->adap
, p
);
669 } else if (opcode
== CPL_SET_TCB_RPL
) {
670 const struct cpl_set_tcb_rpl
*p
= (void *)rsp
;
672 filter_rpl(q
->adap
, p
);
674 dev_err(q
->adap
->pdev_dev
,
675 "unexpected CPL %#x on FW event queue\n", opcode
);
680 /* Flush the aggregated lro sessions */
681 static void uldrx_flush_handler(struct sge_rspq
*q
)
683 if (ulds
[q
->uld
].lro_flush
)
684 ulds
[q
->uld
].lro_flush(&q
->lro_mgr
);
688 * uldrx_handler - response queue handler for ULD queues
689 * @q: the response queue that received the packet
690 * @rsp: the response queue descriptor holding the offload message
691 * @gl: the gather list of packet fragments
693 * Deliver an ingress offload packet to a ULD. All processing is done by
694 * the ULD, we just maintain statistics.
696 static int uldrx_handler(struct sge_rspq
*q
, const __be64
*rsp
,
697 const struct pkt_gl
*gl
)
699 struct sge_ofld_rxq
*rxq
= container_of(q
, struct sge_ofld_rxq
, rspq
);
702 /* FW can send CPLs encapsulated in a CPL_FW4_MSG.
704 if (((const struct rss_header
*)rsp
)->opcode
== CPL_FW4_MSG
&&
705 ((const struct cpl_fw4_msg
*)(rsp
+ 1))->type
== FW_TYPE_RSSCPL
)
708 if (q
->flush_handler
)
709 ret
= ulds
[q
->uld
].lro_rx_handler(q
->adap
->uld_handle
[q
->uld
],
710 rsp
, gl
, &q
->lro_mgr
,
713 ret
= ulds
[q
->uld
].rx_handler(q
->adap
->uld_handle
[q
->uld
],
723 else if (gl
== CXGB4_MSG_AN
)
730 static void disable_msi(struct adapter
*adapter
)
732 if (adapter
->flags
& USING_MSIX
) {
733 pci_disable_msix(adapter
->pdev
);
734 adapter
->flags
&= ~USING_MSIX
;
735 } else if (adapter
->flags
& USING_MSI
) {
736 pci_disable_msi(adapter
->pdev
);
737 adapter
->flags
&= ~USING_MSI
;
742 * Interrupt handler for non-data events used with MSI-X.
744 static irqreturn_t
t4_nondata_intr(int irq
, void *cookie
)
746 struct adapter
*adap
= cookie
;
747 u32 v
= t4_read_reg(adap
, MYPF_REG(PL_PF_INT_CAUSE_A
));
751 t4_write_reg(adap
, MYPF_REG(PL_PF_INT_CAUSE_A
), v
);
753 if (adap
->flags
& MASTER_PF
)
754 t4_slow_intr_handler(adap
);
759 * Name the MSI-X interrupts.
761 static void name_msix_vecs(struct adapter
*adap
)
763 int i
, j
, msi_idx
= 2, n
= sizeof(adap
->msix_info
[0].desc
);
765 /* non-data interrupts */
766 snprintf(adap
->msix_info
[0].desc
, n
, "%s", adap
->port
[0]->name
);
769 snprintf(adap
->msix_info
[1].desc
, n
, "%s-FWeventq",
770 adap
->port
[0]->name
);
772 /* Ethernet queues */
773 for_each_port(adap
, j
) {
774 struct net_device
*d
= adap
->port
[j
];
775 const struct port_info
*pi
= netdev_priv(d
);
777 for (i
= 0; i
< pi
->nqsets
; i
++, msi_idx
++)
778 snprintf(adap
->msix_info
[msi_idx
].desc
, n
, "%s-Rx%d",
783 for_each_iscsirxq(&adap
->sge
, i
)
784 snprintf(adap
->msix_info
[msi_idx
++].desc
, n
, "%s-iscsi%d",
785 adap
->port
[0]->name
, i
);
787 for_each_iscsitrxq(&adap
->sge
, i
)
788 snprintf(adap
->msix_info
[msi_idx
++].desc
, n
, "%s-iSCSIT%d",
789 adap
->port
[0]->name
, i
);
791 for_each_rdmarxq(&adap
->sge
, i
)
792 snprintf(adap
->msix_info
[msi_idx
++].desc
, n
, "%s-rdma%d",
793 adap
->port
[0]->name
, i
);
795 for_each_rdmaciq(&adap
->sge
, i
)
796 snprintf(adap
->msix_info
[msi_idx
++].desc
, n
, "%s-rdma-ciq%d",
797 adap
->port
[0]->name
, i
);
800 static int request_msix_queue_irqs(struct adapter
*adap
)
802 struct sge
*s
= &adap
->sge
;
803 int err
, ethqidx
, iscsiqidx
= 0, rdmaqidx
= 0, rdmaciqqidx
= 0;
807 err
= request_irq(adap
->msix_info
[1].vec
, t4_sge_intr_msix
, 0,
808 adap
->msix_info
[1].desc
, &s
->fw_evtq
);
812 for_each_ethrxq(s
, ethqidx
) {
813 err
= request_irq(adap
->msix_info
[msi_index
].vec
,
815 adap
->msix_info
[msi_index
].desc
,
816 &s
->ethrxq
[ethqidx
].rspq
);
821 for_each_iscsirxq(s
, iscsiqidx
) {
822 err
= request_irq(adap
->msix_info
[msi_index
].vec
,
824 adap
->msix_info
[msi_index
].desc
,
825 &s
->iscsirxq
[iscsiqidx
].rspq
);
830 for_each_iscsitrxq(s
, iscsitqidx
) {
831 err
= request_irq(adap
->msix_info
[msi_index
].vec
,
833 adap
->msix_info
[msi_index
].desc
,
834 &s
->iscsitrxq
[iscsitqidx
].rspq
);
839 for_each_rdmarxq(s
, rdmaqidx
) {
840 err
= request_irq(adap
->msix_info
[msi_index
].vec
,
842 adap
->msix_info
[msi_index
].desc
,
843 &s
->rdmarxq
[rdmaqidx
].rspq
);
848 for_each_rdmaciq(s
, rdmaciqqidx
) {
849 err
= request_irq(adap
->msix_info
[msi_index
].vec
,
851 adap
->msix_info
[msi_index
].desc
,
852 &s
->rdmaciq
[rdmaciqqidx
].rspq
);
860 while (--rdmaciqqidx
>= 0)
861 free_irq(adap
->msix_info
[--msi_index
].vec
,
862 &s
->rdmaciq
[rdmaciqqidx
].rspq
);
863 while (--rdmaqidx
>= 0)
864 free_irq(adap
->msix_info
[--msi_index
].vec
,
865 &s
->rdmarxq
[rdmaqidx
].rspq
);
866 while (--iscsitqidx
>= 0)
867 free_irq(adap
->msix_info
[--msi_index
].vec
,
868 &s
->iscsitrxq
[iscsitqidx
].rspq
);
869 while (--iscsiqidx
>= 0)
870 free_irq(adap
->msix_info
[--msi_index
].vec
,
871 &s
->iscsirxq
[iscsiqidx
].rspq
);
872 while (--ethqidx
>= 0)
873 free_irq(adap
->msix_info
[--msi_index
].vec
,
874 &s
->ethrxq
[ethqidx
].rspq
);
875 free_irq(adap
->msix_info
[1].vec
, &s
->fw_evtq
);
879 static void free_msix_queue_irqs(struct adapter
*adap
)
881 int i
, msi_index
= 2;
882 struct sge
*s
= &adap
->sge
;
884 free_irq(adap
->msix_info
[1].vec
, &s
->fw_evtq
);
885 for_each_ethrxq(s
, i
)
886 free_irq(adap
->msix_info
[msi_index
++].vec
, &s
->ethrxq
[i
].rspq
);
887 for_each_iscsirxq(s
, i
)
888 free_irq(adap
->msix_info
[msi_index
++].vec
,
889 &s
->iscsirxq
[i
].rspq
);
890 for_each_iscsitrxq(s
, i
)
891 free_irq(adap
->msix_info
[msi_index
++].vec
,
892 &s
->iscsitrxq
[i
].rspq
);
893 for_each_rdmarxq(s
, i
)
894 free_irq(adap
->msix_info
[msi_index
++].vec
, &s
->rdmarxq
[i
].rspq
);
895 for_each_rdmaciq(s
, i
)
896 free_irq(adap
->msix_info
[msi_index
++].vec
, &s
->rdmaciq
[i
].rspq
);
900 * cxgb4_write_rss - write the RSS table for a given port
902 * @queues: array of queue indices for RSS
904 * Sets up the portion of the HW RSS table for the port's VI to distribute
905 * packets to the Rx queues in @queues.
906 * Should never be called before setting up sge eth rx queues
908 int cxgb4_write_rss(const struct port_info
*pi
, const u16
*queues
)
912 struct adapter
*adapter
= pi
->adapter
;
913 const struct sge_eth_rxq
*rxq
;
915 rxq
= &adapter
->sge
.ethrxq
[pi
->first_qset
];
916 rss
= kmalloc(pi
->rss_size
* sizeof(u16
), GFP_KERNEL
);
920 /* map the queue indices to queue ids */
921 for (i
= 0; i
< pi
->rss_size
; i
++, queues
++)
922 rss
[i
] = rxq
[*queues
].rspq
.abs_id
;
924 err
= t4_config_rss_range(adapter
, adapter
->pf
, pi
->viid
, 0,
925 pi
->rss_size
, rss
, pi
->rss_size
);
926 /* If Tunnel All Lookup isn't specified in the global RSS
927 * Configuration, then we need to specify a default Ingress
928 * Queue for any ingress packets which aren't hashed. We'll
929 * use our first ingress queue ...
932 err
= t4_config_vi_rss(adapter
, adapter
->mbox
, pi
->viid
,
933 FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN_F
|
934 FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN_F
|
935 FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN_F
|
936 FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN_F
|
937 FW_RSS_VI_CONFIG_CMD_UDPEN_F
,
944 * setup_rss - configure RSS
947 * Sets up RSS for each port.
949 static int setup_rss(struct adapter
*adap
)
953 for_each_port(adap
, i
) {
954 const struct port_info
*pi
= adap2pinfo(adap
, i
);
956 /* Fill default values with equal distribution */
957 for (j
= 0; j
< pi
->rss_size
; j
++)
958 pi
->rss
[j
] = j
% pi
->nqsets
;
960 err
= cxgb4_write_rss(pi
, pi
->rss
);
968 * Return the channel of the ingress queue with the given qid.
970 static unsigned int rxq_to_chan(const struct sge
*p
, unsigned int qid
)
972 qid
-= p
->ingr_start
;
973 return netdev2pinfo(p
->ingr_map
[qid
]->netdev
)->tx_chan
;
977 * Wait until all NAPI handlers are descheduled.
979 static void quiesce_rx(struct adapter
*adap
)
983 for (i
= 0; i
< adap
->sge
.ingr_sz
; i
++) {
984 struct sge_rspq
*q
= adap
->sge
.ingr_map
[i
];
986 if (q
&& q
->handler
) {
987 napi_disable(&q
->napi
);
989 while (!cxgb_poll_lock_napi(q
))
997 /* Disable interrupt and napi handler */
998 static void disable_interrupts(struct adapter
*adap
)
1000 if (adap
->flags
& FULL_INIT_DONE
) {
1001 t4_intr_disable(adap
);
1002 if (adap
->flags
& USING_MSIX
) {
1003 free_msix_queue_irqs(adap
);
1004 free_irq(adap
->msix_info
[0].vec
, adap
);
1006 free_irq(adap
->pdev
->irq
, adap
);
1013 * Enable NAPI scheduling and interrupt generation for all Rx queues.
1015 static void enable_rx(struct adapter
*adap
)
1019 for (i
= 0; i
< adap
->sge
.ingr_sz
; i
++) {
1020 struct sge_rspq
*q
= adap
->sge
.ingr_map
[i
];
1025 cxgb_busy_poll_init_lock(q
);
1026 napi_enable(&q
->napi
);
1028 /* 0-increment GTS to start the timer and enable interrupts */
1029 t4_write_reg(adap
, MYPF_REG(SGE_PF_GTS_A
),
1030 SEINTARM_V(q
->intr_params
) |
1031 INGRESSQID_V(q
->cntxt_id
));
1035 static int alloc_ofld_rxqs(struct adapter
*adap
, struct sge_ofld_rxq
*q
,
1036 unsigned int nq
, unsigned int per_chan
, int msi_idx
,
1041 for (i
= 0; i
< nq
; i
++, q
++) {
1044 err
= t4_sge_alloc_rxq(adap
, &q
->rspq
, false,
1045 adap
->port
[i
/ per_chan
],
1046 msi_idx
, q
->fl
.size
? &q
->fl
: NULL
,
1048 lro
? uldrx_flush_handler
: NULL
,
1052 memset(&q
->stats
, 0, sizeof(q
->stats
));
1054 ids
[i
] = q
->rspq
.abs_id
;
1060 * setup_sge_queues - configure SGE Tx/Rx/response queues
1061 * @adap: the adapter
1063 * Determines how many sets of SGE queues to use and initializes them.
1064 * We support multiple queue sets per port if we have MSI-X, otherwise
1065 * just one queue set per port.
1067 static int setup_sge_queues(struct adapter
*adap
)
1069 int err
, msi_idx
, i
, j
;
1070 struct sge
*s
= &adap
->sge
;
1072 bitmap_zero(s
->starving_fl
, s
->egr_sz
);
1073 bitmap_zero(s
->txq_maperr
, s
->egr_sz
);
1075 if (adap
->flags
& USING_MSIX
)
1076 msi_idx
= 1; /* vector 0 is for non-queue interrupts */
1078 err
= t4_sge_alloc_rxq(adap
, &s
->intrq
, false, adap
->port
[0], 0,
1079 NULL
, NULL
, NULL
, -1);
1082 msi_idx
= -((int)s
->intrq
.abs_id
+ 1);
1085 /* NOTE: If you add/delete any Ingress/Egress Queue allocations in here,
1086 * don't forget to update the following which need to be
1087 * synchronized to and changes here.
1089 * 1. The calculations of MAX_INGQ in cxgb4.h.
1091 * 2. Update enable_msix/name_msix_vecs/request_msix_queue_irqs
1092 * to accommodate any new/deleted Ingress Queues
1093 * which need MSI-X Vectors.
1095 * 3. Update sge_qinfo_show() to include information on the
1096 * new/deleted queues.
1098 err
= t4_sge_alloc_rxq(adap
, &s
->fw_evtq
, true, adap
->port
[0],
1099 msi_idx
, NULL
, fwevtq_handler
, NULL
, -1);
1101 freeout
: t4_free_sge_resources(adap
);
1105 for_each_port(adap
, i
) {
1106 struct net_device
*dev
= adap
->port
[i
];
1107 struct port_info
*pi
= netdev_priv(dev
);
1108 struct sge_eth_rxq
*q
= &s
->ethrxq
[pi
->first_qset
];
1109 struct sge_eth_txq
*t
= &s
->ethtxq
[pi
->first_qset
];
1111 for (j
= 0; j
< pi
->nqsets
; j
++, q
++) {
1114 err
= t4_sge_alloc_rxq(adap
, &q
->rspq
, false, dev
,
1118 t4_get_mps_bg_map(adap
,
1123 memset(&q
->stats
, 0, sizeof(q
->stats
));
1125 for (j
= 0; j
< pi
->nqsets
; j
++, t
++) {
1126 err
= t4_sge_alloc_eth_txq(adap
, t
, dev
,
1127 netdev_get_tx_queue(dev
, j
),
1128 s
->fw_evtq
.cntxt_id
);
1134 j
= s
->iscsiqsets
/ adap
->params
.nports
; /* iscsi queues per channel */
1135 for_each_iscsirxq(s
, i
) {
1136 err
= t4_sge_alloc_ofld_txq(adap
, &s
->ofldtxq
[i
],
1138 s
->fw_evtq
.cntxt_id
);
1143 #define ALLOC_OFLD_RXQS(firstq, nq, per_chan, ids, lro) do { \
1144 err = alloc_ofld_rxqs(adap, firstq, nq, per_chan, msi_idx, ids, lro); \
1151 ALLOC_OFLD_RXQS(s
->iscsirxq
, s
->iscsiqsets
, j
, s
->iscsi_rxq
, false);
1152 ALLOC_OFLD_RXQS(s
->iscsitrxq
, s
->niscsitq
, j
, s
->iscsit_rxq
, true);
1153 ALLOC_OFLD_RXQS(s
->rdmarxq
, s
->rdmaqs
, 1, s
->rdma_rxq
, false);
1154 j
= s
->rdmaciqs
/ adap
->params
.nports
; /* rdmaq queues per channel */
1155 ALLOC_OFLD_RXQS(s
->rdmaciq
, s
->rdmaciqs
, j
, s
->rdma_ciq
, false);
1157 #undef ALLOC_OFLD_RXQS
1159 for_each_port(adap
, i
) {
1161 * Note that ->rdmarxq[i].rspq.cntxt_id below is 0 if we don't
1162 * have RDMA queues, and that's the right value.
1164 err
= t4_sge_alloc_ctrl_txq(adap
, &s
->ctrlq
[i
], adap
->port
[i
],
1165 s
->fw_evtq
.cntxt_id
,
1166 s
->rdmarxq
[i
].rspq
.cntxt_id
);
1171 t4_write_reg(adap
, is_t4(adap
->params
.chip
) ?
1172 MPS_TRC_RSS_CONTROL_A
:
1173 MPS_T5_TRC_RSS_CONTROL_A
,
1174 RSSCONTROL_V(netdev2pinfo(adap
->port
[0])->tx_chan
) |
1175 QUEUENUMBER_V(s
->ethrxq
[0].rspq
.abs_id
));
1180 * Allocate a chunk of memory using kmalloc or, if that fails, vmalloc.
1181 * The allocated memory is cleared.
1183 void *t4_alloc_mem(size_t size
)
1185 void *p
= kzalloc(size
, GFP_KERNEL
| __GFP_NOWARN
);
1193 * Free memory allocated through alloc_mem().
1195 void t4_free_mem(void *addr
)
1200 /* Send a Work Request to write the filter at a specified index. We construct
1201 * a Firmware Filter Work Request to have the work done and put the indicated
1202 * filter into "pending" mode which will prevent any further actions against
1203 * it till we get a reply from the firmware on the completion status of the
1206 static int set_filter_wr(struct adapter
*adapter
, int fidx
)
1208 struct filter_entry
*f
= &adapter
->tids
.ftid_tab
[fidx
];
1209 struct sk_buff
*skb
;
1210 struct fw_filter_wr
*fwr
;
1213 skb
= alloc_skb(sizeof(*fwr
), GFP_KERNEL
);
1217 /* If the new filter requires loopback Destination MAC and/or VLAN
1218 * rewriting then we need to allocate a Layer 2 Table (L2T) entry for
1221 if (f
->fs
.newdmac
|| f
->fs
.newvlan
) {
1222 /* allocate L2T entry for new filter */
1223 f
->l2t
= t4_l2t_alloc_switching(adapter
, f
->fs
.vlan
,
1224 f
->fs
.eport
, f
->fs
.dmac
);
1225 if (f
->l2t
== NULL
) {
1231 ftid
= adapter
->tids
.ftid_base
+ fidx
;
1233 fwr
= (struct fw_filter_wr
*)__skb_put(skb
, sizeof(*fwr
));
1234 memset(fwr
, 0, sizeof(*fwr
));
1236 /* It would be nice to put most of the following in t4_hw.c but most
1237 * of the work is translating the cxgbtool ch_filter_specification
1238 * into the Work Request and the definition of that structure is
1239 * currently in cxgbtool.h which isn't appropriate to pull into the
1240 * common code. We may eventually try to come up with a more neutral
1241 * filter specification structure but for now it's easiest to simply
1242 * put this fairly direct code in line ...
1244 fwr
->op_pkd
= htonl(FW_WR_OP_V(FW_FILTER_WR
));
1245 fwr
->len16_pkd
= htonl(FW_WR_LEN16_V(sizeof(*fwr
)/16));
1247 htonl(FW_FILTER_WR_TID_V(ftid
) |
1248 FW_FILTER_WR_RQTYPE_V(f
->fs
.type
) |
1249 FW_FILTER_WR_NOREPLY_V(0) |
1250 FW_FILTER_WR_IQ_V(f
->fs
.iq
));
1251 fwr
->del_filter_to_l2tix
=
1252 htonl(FW_FILTER_WR_RPTTID_V(f
->fs
.rpttid
) |
1253 FW_FILTER_WR_DROP_V(f
->fs
.action
== FILTER_DROP
) |
1254 FW_FILTER_WR_DIRSTEER_V(f
->fs
.dirsteer
) |
1255 FW_FILTER_WR_MASKHASH_V(f
->fs
.maskhash
) |
1256 FW_FILTER_WR_DIRSTEERHASH_V(f
->fs
.dirsteerhash
) |
1257 FW_FILTER_WR_LPBK_V(f
->fs
.action
== FILTER_SWITCH
) |
1258 FW_FILTER_WR_DMAC_V(f
->fs
.newdmac
) |
1259 FW_FILTER_WR_SMAC_V(f
->fs
.newsmac
) |
1260 FW_FILTER_WR_INSVLAN_V(f
->fs
.newvlan
== VLAN_INSERT
||
1261 f
->fs
.newvlan
== VLAN_REWRITE
) |
1262 FW_FILTER_WR_RMVLAN_V(f
->fs
.newvlan
== VLAN_REMOVE
||
1263 f
->fs
.newvlan
== VLAN_REWRITE
) |
1264 FW_FILTER_WR_HITCNTS_V(f
->fs
.hitcnts
) |
1265 FW_FILTER_WR_TXCHAN_V(f
->fs
.eport
) |
1266 FW_FILTER_WR_PRIO_V(f
->fs
.prio
) |
1267 FW_FILTER_WR_L2TIX_V(f
->l2t
? f
->l2t
->idx
: 0));
1268 fwr
->ethtype
= htons(f
->fs
.val
.ethtype
);
1269 fwr
->ethtypem
= htons(f
->fs
.mask
.ethtype
);
1270 fwr
->frag_to_ovlan_vldm
=
1271 (FW_FILTER_WR_FRAG_V(f
->fs
.val
.frag
) |
1272 FW_FILTER_WR_FRAGM_V(f
->fs
.mask
.frag
) |
1273 FW_FILTER_WR_IVLAN_VLD_V(f
->fs
.val
.ivlan_vld
) |
1274 FW_FILTER_WR_OVLAN_VLD_V(f
->fs
.val
.ovlan_vld
) |
1275 FW_FILTER_WR_IVLAN_VLDM_V(f
->fs
.mask
.ivlan_vld
) |
1276 FW_FILTER_WR_OVLAN_VLDM_V(f
->fs
.mask
.ovlan_vld
));
1278 fwr
->rx_chan_rx_rpl_iq
=
1279 htons(FW_FILTER_WR_RX_CHAN_V(0) |
1280 FW_FILTER_WR_RX_RPL_IQ_V(adapter
->sge
.fw_evtq
.abs_id
));
1281 fwr
->maci_to_matchtypem
=
1282 htonl(FW_FILTER_WR_MACI_V(f
->fs
.val
.macidx
) |
1283 FW_FILTER_WR_MACIM_V(f
->fs
.mask
.macidx
) |
1284 FW_FILTER_WR_FCOE_V(f
->fs
.val
.fcoe
) |
1285 FW_FILTER_WR_FCOEM_V(f
->fs
.mask
.fcoe
) |
1286 FW_FILTER_WR_PORT_V(f
->fs
.val
.iport
) |
1287 FW_FILTER_WR_PORTM_V(f
->fs
.mask
.iport
) |
1288 FW_FILTER_WR_MATCHTYPE_V(f
->fs
.val
.matchtype
) |
1289 FW_FILTER_WR_MATCHTYPEM_V(f
->fs
.mask
.matchtype
));
1290 fwr
->ptcl
= f
->fs
.val
.proto
;
1291 fwr
->ptclm
= f
->fs
.mask
.proto
;
1292 fwr
->ttyp
= f
->fs
.val
.tos
;
1293 fwr
->ttypm
= f
->fs
.mask
.tos
;
1294 fwr
->ivlan
= htons(f
->fs
.val
.ivlan
);
1295 fwr
->ivlanm
= htons(f
->fs
.mask
.ivlan
);
1296 fwr
->ovlan
= htons(f
->fs
.val
.ovlan
);
1297 fwr
->ovlanm
= htons(f
->fs
.mask
.ovlan
);
1298 memcpy(fwr
->lip
, f
->fs
.val
.lip
, sizeof(fwr
->lip
));
1299 memcpy(fwr
->lipm
, f
->fs
.mask
.lip
, sizeof(fwr
->lipm
));
1300 memcpy(fwr
->fip
, f
->fs
.val
.fip
, sizeof(fwr
->fip
));
1301 memcpy(fwr
->fipm
, f
->fs
.mask
.fip
, sizeof(fwr
->fipm
));
1302 fwr
->lp
= htons(f
->fs
.val
.lport
);
1303 fwr
->lpm
= htons(f
->fs
.mask
.lport
);
1304 fwr
->fp
= htons(f
->fs
.val
.fport
);
1305 fwr
->fpm
= htons(f
->fs
.mask
.fport
);
1307 memcpy(fwr
->sma
, f
->fs
.smac
, sizeof(fwr
->sma
));
1309 /* Mark the filter as "pending" and ship off the Filter Work Request.
1310 * When we get the Work Request Reply we'll clear the pending status.
1313 set_wr_txq(skb
, CPL_PRIORITY_CONTROL
, f
->fs
.val
.iport
& 0x3);
1314 t4_ofld_send(adapter
, skb
);
1318 /* Delete the filter at a specified index.
1320 static int del_filter_wr(struct adapter
*adapter
, int fidx
)
1322 struct filter_entry
*f
= &adapter
->tids
.ftid_tab
[fidx
];
1323 struct sk_buff
*skb
;
1324 struct fw_filter_wr
*fwr
;
1325 unsigned int len
, ftid
;
1328 ftid
= adapter
->tids
.ftid_base
+ fidx
;
1330 skb
= alloc_skb(len
, GFP_KERNEL
);
1334 fwr
= (struct fw_filter_wr
*)__skb_put(skb
, len
);
1335 t4_mk_filtdelwr(ftid
, fwr
, adapter
->sge
.fw_evtq
.abs_id
);
1337 /* Mark the filter as "pending" and ship off the Filter Work Request.
1338 * When we get the Work Request Reply we'll clear the pending status.
1341 t4_mgmt_tx(adapter
, skb
);
1345 static u16
cxgb_select_queue(struct net_device
*dev
, struct sk_buff
*skb
,
1346 void *accel_priv
, select_queue_fallback_t fallback
)
1350 #ifdef CONFIG_CHELSIO_T4_DCB
1351 /* If a Data Center Bridging has been successfully negotiated on this
1352 * link then we'll use the skb's priority to map it to a TX Queue.
1353 * The skb's priority is determined via the VLAN Tag Priority Code
1356 if (cxgb4_dcb_enabled(dev
)) {
1360 err
= vlan_get_tag(skb
, &vlan_tci
);
1361 if (unlikely(err
)) {
1362 if (net_ratelimit())
1364 "TX Packet without VLAN Tag on DCB Link\n");
1367 txq
= (vlan_tci
& VLAN_PRIO_MASK
) >> VLAN_PRIO_SHIFT
;
1368 #ifdef CONFIG_CHELSIO_T4_FCOE
1369 if (skb
->protocol
== htons(ETH_P_FCOE
))
1370 txq
= skb
->priority
& 0x7;
1371 #endif /* CONFIG_CHELSIO_T4_FCOE */
1375 #endif /* CONFIG_CHELSIO_T4_DCB */
1378 txq
= (skb_rx_queue_recorded(skb
)
1379 ? skb_get_rx_queue(skb
)
1380 : smp_processor_id());
1382 while (unlikely(txq
>= dev
->real_num_tx_queues
))
1383 txq
-= dev
->real_num_tx_queues
;
1388 return fallback(dev
, skb
) % dev
->real_num_tx_queues
;
1391 static int closest_timer(const struct sge
*s
, int time
)
1393 int i
, delta
, match
= 0, min_delta
= INT_MAX
;
1395 for (i
= 0; i
< ARRAY_SIZE(s
->timer_val
); i
++) {
1396 delta
= time
- s
->timer_val
[i
];
1399 if (delta
< min_delta
) {
1407 static int closest_thres(const struct sge
*s
, int thres
)
1409 int i
, delta
, match
= 0, min_delta
= INT_MAX
;
1411 for (i
= 0; i
< ARRAY_SIZE(s
->counter_val
); i
++) {
1412 delta
= thres
- s
->counter_val
[i
];
1415 if (delta
< min_delta
) {
1424 * cxgb4_set_rspq_intr_params - set a queue's interrupt holdoff parameters
1426 * @us: the hold-off time in us, or 0 to disable timer
1427 * @cnt: the hold-off packet count, or 0 to disable counter
1429 * Sets an Rx queue's interrupt hold-off time and packet count. At least
1430 * one of the two needs to be enabled for the queue to generate interrupts.
1432 int cxgb4_set_rspq_intr_params(struct sge_rspq
*q
,
1433 unsigned int us
, unsigned int cnt
)
1435 struct adapter
*adap
= q
->adap
;
1437 if ((us
| cnt
) == 0)
1444 new_idx
= closest_thres(&adap
->sge
, cnt
);
1445 if (q
->desc
&& q
->pktcnt_idx
!= new_idx
) {
1446 /* the queue has already been created, update it */
1447 v
= FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DMAQ
) |
1448 FW_PARAMS_PARAM_X_V(
1449 FW_PARAMS_PARAM_DMAQ_IQ_INTCNTTHRESH
) |
1450 FW_PARAMS_PARAM_YZ_V(q
->cntxt_id
);
1451 err
= t4_set_params(adap
, adap
->mbox
, adap
->pf
, 0, 1,
1456 q
->pktcnt_idx
= new_idx
;
1459 us
= us
== 0 ? 6 : closest_timer(&adap
->sge
, us
);
1460 q
->intr_params
= QINTR_TIMER_IDX_V(us
) | QINTR_CNT_EN_V(cnt
> 0);
1464 static int cxgb_set_features(struct net_device
*dev
, netdev_features_t features
)
1466 const struct port_info
*pi
= netdev_priv(dev
);
1467 netdev_features_t changed
= dev
->features
^ features
;
1470 if (!(changed
& NETIF_F_HW_VLAN_CTAG_RX
))
1473 err
= t4_set_rxmode(pi
->adapter
, pi
->adapter
->pf
, pi
->viid
, -1,
1475 !!(features
& NETIF_F_HW_VLAN_CTAG_RX
), true);
1477 dev
->features
= features
^ NETIF_F_HW_VLAN_CTAG_RX
;
1481 static int setup_debugfs(struct adapter
*adap
)
1483 if (IS_ERR_OR_NULL(adap
->debugfs_root
))
1486 #ifdef CONFIG_DEBUG_FS
1487 t4_setup_debugfs(adap
);
1493 * upper-layer driver support
1497 * Allocate an active-open TID and set it to the supplied value.
1499 int cxgb4_alloc_atid(struct tid_info
*t
, void *data
)
1503 spin_lock_bh(&t
->atid_lock
);
1505 union aopen_entry
*p
= t
->afree
;
1507 atid
= (p
- t
->atid_tab
) + t
->atid_base
;
1512 spin_unlock_bh(&t
->atid_lock
);
1515 EXPORT_SYMBOL(cxgb4_alloc_atid
);
1518 * Release an active-open TID.
1520 void cxgb4_free_atid(struct tid_info
*t
, unsigned int atid
)
1522 union aopen_entry
*p
= &t
->atid_tab
[atid
- t
->atid_base
];
1524 spin_lock_bh(&t
->atid_lock
);
1528 spin_unlock_bh(&t
->atid_lock
);
1530 EXPORT_SYMBOL(cxgb4_free_atid
);
1533 * Allocate a server TID and set it to the supplied value.
1535 int cxgb4_alloc_stid(struct tid_info
*t
, int family
, void *data
)
1539 spin_lock_bh(&t
->stid_lock
);
1540 if (family
== PF_INET
) {
1541 stid
= find_first_zero_bit(t
->stid_bmap
, t
->nstids
);
1542 if (stid
< t
->nstids
)
1543 __set_bit(stid
, t
->stid_bmap
);
1547 stid
= bitmap_find_free_region(t
->stid_bmap
, t
->nstids
, 1);
1552 t
->stid_tab
[stid
].data
= data
;
1553 stid
+= t
->stid_base
;
1554 /* IPv6 requires max of 520 bits or 16 cells in TCAM
1555 * This is equivalent to 4 TIDs. With CLIP enabled it
1558 if (family
== PF_INET
)
1561 t
->stids_in_use
+= 2;
1563 spin_unlock_bh(&t
->stid_lock
);
1566 EXPORT_SYMBOL(cxgb4_alloc_stid
);
1568 /* Allocate a server filter TID and set it to the supplied value.
1570 int cxgb4_alloc_sftid(struct tid_info
*t
, int family
, void *data
)
1574 spin_lock_bh(&t
->stid_lock
);
1575 if (family
== PF_INET
) {
1576 stid
= find_next_zero_bit(t
->stid_bmap
,
1577 t
->nstids
+ t
->nsftids
, t
->nstids
);
1578 if (stid
< (t
->nstids
+ t
->nsftids
))
1579 __set_bit(stid
, t
->stid_bmap
);
1586 t
->stid_tab
[stid
].data
= data
;
1588 stid
+= t
->sftid_base
;
1591 spin_unlock_bh(&t
->stid_lock
);
1594 EXPORT_SYMBOL(cxgb4_alloc_sftid
);
1596 /* Release a server TID.
1598 void cxgb4_free_stid(struct tid_info
*t
, unsigned int stid
, int family
)
1600 /* Is it a server filter TID? */
1601 if (t
->nsftids
&& (stid
>= t
->sftid_base
)) {
1602 stid
-= t
->sftid_base
;
1605 stid
-= t
->stid_base
;
1608 spin_lock_bh(&t
->stid_lock
);
1609 if (family
== PF_INET
)
1610 __clear_bit(stid
, t
->stid_bmap
);
1612 bitmap_release_region(t
->stid_bmap
, stid
, 1);
1613 t
->stid_tab
[stid
].data
= NULL
;
1614 if (stid
< t
->nstids
) {
1615 if (family
== PF_INET
)
1618 t
->stids_in_use
-= 2;
1622 spin_unlock_bh(&t
->stid_lock
);
1624 EXPORT_SYMBOL(cxgb4_free_stid
);
1627 * Populate a TID_RELEASE WR. Caller must properly size the skb.
1629 static void mk_tid_release(struct sk_buff
*skb
, unsigned int chan
,
1632 struct cpl_tid_release
*req
;
1634 set_wr_txq(skb
, CPL_PRIORITY_SETUP
, chan
);
1635 req
= (struct cpl_tid_release
*)__skb_put(skb
, sizeof(*req
));
1636 INIT_TP_WR(req
, tid
);
1637 OPCODE_TID(req
) = htonl(MK_OPCODE_TID(CPL_TID_RELEASE
, tid
));
1641 * Queue a TID release request and if necessary schedule a work queue to
1644 static void cxgb4_queue_tid_release(struct tid_info
*t
, unsigned int chan
,
1647 void **p
= &t
->tid_tab
[tid
];
1648 struct adapter
*adap
= container_of(t
, struct adapter
, tids
);
1650 spin_lock_bh(&adap
->tid_release_lock
);
1651 *p
= adap
->tid_release_head
;
1652 /* Low 2 bits encode the Tx channel number */
1653 adap
->tid_release_head
= (void **)((uintptr_t)p
| chan
);
1654 if (!adap
->tid_release_task_busy
) {
1655 adap
->tid_release_task_busy
= true;
1656 queue_work(adap
->workq
, &adap
->tid_release_task
);
1658 spin_unlock_bh(&adap
->tid_release_lock
);
1662 * Process the list of pending TID release requests.
1664 static void process_tid_release_list(struct work_struct
*work
)
1666 struct sk_buff
*skb
;
1667 struct adapter
*adap
;
1669 adap
= container_of(work
, struct adapter
, tid_release_task
);
1671 spin_lock_bh(&adap
->tid_release_lock
);
1672 while (adap
->tid_release_head
) {
1673 void **p
= adap
->tid_release_head
;
1674 unsigned int chan
= (uintptr_t)p
& 3;
1675 p
= (void *)p
- chan
;
1677 adap
->tid_release_head
= *p
;
1679 spin_unlock_bh(&adap
->tid_release_lock
);
1681 while (!(skb
= alloc_skb(sizeof(struct cpl_tid_release
),
1683 schedule_timeout_uninterruptible(1);
1685 mk_tid_release(skb
, chan
, p
- adap
->tids
.tid_tab
);
1686 t4_ofld_send(adap
, skb
);
1687 spin_lock_bh(&adap
->tid_release_lock
);
1689 adap
->tid_release_task_busy
= false;
1690 spin_unlock_bh(&adap
->tid_release_lock
);
1694 * Release a TID and inform HW. If we are unable to allocate the release
1695 * message we defer to a work queue.
1697 void cxgb4_remove_tid(struct tid_info
*t
, unsigned int chan
, unsigned int tid
)
1699 struct sk_buff
*skb
;
1700 struct adapter
*adap
= container_of(t
, struct adapter
, tids
);
1702 WARN_ON(tid
>= t
->ntids
);
1704 if (t
->tid_tab
[tid
]) {
1705 t
->tid_tab
[tid
] = NULL
;
1706 if (t
->hash_base
&& (tid
>= t
->hash_base
))
1707 atomic_dec(&t
->hash_tids_in_use
);
1709 atomic_dec(&t
->tids_in_use
);
1712 skb
= alloc_skb(sizeof(struct cpl_tid_release
), GFP_ATOMIC
);
1714 mk_tid_release(skb
, chan
, tid
);
1715 t4_ofld_send(adap
, skb
);
1717 cxgb4_queue_tid_release(t
, chan
, tid
);
1719 EXPORT_SYMBOL(cxgb4_remove_tid
);
1722 * Allocate and initialize the TID tables. Returns 0 on success.
1724 static int tid_init(struct tid_info
*t
)
1727 unsigned int stid_bmap_size
;
1728 unsigned int natids
= t
->natids
;
1729 struct adapter
*adap
= container_of(t
, struct adapter
, tids
);
1731 stid_bmap_size
= BITS_TO_LONGS(t
->nstids
+ t
->nsftids
);
1732 size
= t
->ntids
* sizeof(*t
->tid_tab
) +
1733 natids
* sizeof(*t
->atid_tab
) +
1734 t
->nstids
* sizeof(*t
->stid_tab
) +
1735 t
->nsftids
* sizeof(*t
->stid_tab
) +
1736 stid_bmap_size
* sizeof(long) +
1737 t
->nftids
* sizeof(*t
->ftid_tab
) +
1738 t
->nsftids
* sizeof(*t
->ftid_tab
);
1740 t
->tid_tab
= t4_alloc_mem(size
);
1744 t
->atid_tab
= (union aopen_entry
*)&t
->tid_tab
[t
->ntids
];
1745 t
->stid_tab
= (struct serv_entry
*)&t
->atid_tab
[natids
];
1746 t
->stid_bmap
= (unsigned long *)&t
->stid_tab
[t
->nstids
+ t
->nsftids
];
1747 t
->ftid_tab
= (struct filter_entry
*)&t
->stid_bmap
[stid_bmap_size
];
1748 spin_lock_init(&t
->stid_lock
);
1749 spin_lock_init(&t
->atid_lock
);
1751 t
->stids_in_use
= 0;
1752 t
->sftids_in_use
= 0;
1754 t
->atids_in_use
= 0;
1755 atomic_set(&t
->tids_in_use
, 0);
1756 atomic_set(&t
->hash_tids_in_use
, 0);
1758 /* Setup the free list for atid_tab and clear the stid bitmap. */
1761 t
->atid_tab
[natids
- 1].next
= &t
->atid_tab
[natids
];
1762 t
->afree
= t
->atid_tab
;
1764 bitmap_zero(t
->stid_bmap
, t
->nstids
+ t
->nsftids
);
1765 /* Reserve stid 0 for T4/T5 adapters */
1766 if (!t
->stid_base
&&
1767 (CHELSIO_CHIP_VERSION(adap
->params
.chip
) <= CHELSIO_T5
))
1768 __set_bit(0, t
->stid_bmap
);
1774 * cxgb4_create_server - create an IP server
1776 * @stid: the server TID
1777 * @sip: local IP address to bind server to
1778 * @sport: the server's TCP port
1779 * @queue: queue to direct messages from this server to
1781 * Create an IP server for the given port and address.
1782 * Returns <0 on error and one of the %NET_XMIT_* values on success.
1784 int cxgb4_create_server(const struct net_device
*dev
, unsigned int stid
,
1785 __be32 sip
, __be16 sport
, __be16 vlan
,
1789 struct sk_buff
*skb
;
1790 struct adapter
*adap
;
1791 struct cpl_pass_open_req
*req
;
1794 skb
= alloc_skb(sizeof(*req
), GFP_KERNEL
);
1798 adap
= netdev2adap(dev
);
1799 req
= (struct cpl_pass_open_req
*)__skb_put(skb
, sizeof(*req
));
1801 OPCODE_TID(req
) = htonl(MK_OPCODE_TID(CPL_PASS_OPEN_REQ
, stid
));
1802 req
->local_port
= sport
;
1803 req
->peer_port
= htons(0);
1804 req
->local_ip
= sip
;
1805 req
->peer_ip
= htonl(0);
1806 chan
= rxq_to_chan(&adap
->sge
, queue
);
1807 req
->opt0
= cpu_to_be64(TX_CHAN_V(chan
));
1808 req
->opt1
= cpu_to_be64(CONN_POLICY_V(CPL_CONN_POLICY_ASK
) |
1809 SYN_RSS_ENABLE_F
| SYN_RSS_QUEUE_V(queue
));
1810 ret
= t4_mgmt_tx(adap
, skb
);
1811 return net_xmit_eval(ret
);
1813 EXPORT_SYMBOL(cxgb4_create_server
);
1815 /* cxgb4_create_server6 - create an IPv6 server
1817 * @stid: the server TID
1818 * @sip: local IPv6 address to bind server to
1819 * @sport: the server's TCP port
1820 * @queue: queue to direct messages from this server to
1822 * Create an IPv6 server for the given port and address.
1823 * Returns <0 on error and one of the %NET_XMIT_* values on success.
1825 int cxgb4_create_server6(const struct net_device
*dev
, unsigned int stid
,
1826 const struct in6_addr
*sip
, __be16 sport
,
1830 struct sk_buff
*skb
;
1831 struct adapter
*adap
;
1832 struct cpl_pass_open_req6
*req
;
1835 skb
= alloc_skb(sizeof(*req
), GFP_KERNEL
);
1839 adap
= netdev2adap(dev
);
1840 req
= (struct cpl_pass_open_req6
*)__skb_put(skb
, sizeof(*req
));
1842 OPCODE_TID(req
) = htonl(MK_OPCODE_TID(CPL_PASS_OPEN_REQ6
, stid
));
1843 req
->local_port
= sport
;
1844 req
->peer_port
= htons(0);
1845 req
->local_ip_hi
= *(__be64
*)(sip
->s6_addr
);
1846 req
->local_ip_lo
= *(__be64
*)(sip
->s6_addr
+ 8);
1847 req
->peer_ip_hi
= cpu_to_be64(0);
1848 req
->peer_ip_lo
= cpu_to_be64(0);
1849 chan
= rxq_to_chan(&adap
->sge
, queue
);
1850 req
->opt0
= cpu_to_be64(TX_CHAN_V(chan
));
1851 req
->opt1
= cpu_to_be64(CONN_POLICY_V(CPL_CONN_POLICY_ASK
) |
1852 SYN_RSS_ENABLE_F
| SYN_RSS_QUEUE_V(queue
));
1853 ret
= t4_mgmt_tx(adap
, skb
);
1854 return net_xmit_eval(ret
);
1856 EXPORT_SYMBOL(cxgb4_create_server6
);
1858 int cxgb4_remove_server(const struct net_device
*dev
, unsigned int stid
,
1859 unsigned int queue
, bool ipv6
)
1861 struct sk_buff
*skb
;
1862 struct adapter
*adap
;
1863 struct cpl_close_listsvr_req
*req
;
1866 adap
= netdev2adap(dev
);
1868 skb
= alloc_skb(sizeof(*req
), GFP_KERNEL
);
1872 req
= (struct cpl_close_listsvr_req
*)__skb_put(skb
, sizeof(*req
));
1874 OPCODE_TID(req
) = htonl(MK_OPCODE_TID(CPL_CLOSE_LISTSRV_REQ
, stid
));
1875 req
->reply_ctrl
= htons(NO_REPLY_V(0) | (ipv6
? LISTSVR_IPV6_V(1) :
1876 LISTSVR_IPV6_V(0)) | QUEUENO_V(queue
));
1877 ret
= t4_mgmt_tx(adap
, skb
);
1878 return net_xmit_eval(ret
);
1880 EXPORT_SYMBOL(cxgb4_remove_server
);
1883 * cxgb4_best_mtu - find the entry in the MTU table closest to an MTU
1884 * @mtus: the HW MTU table
1885 * @mtu: the target MTU
1886 * @idx: index of selected entry in the MTU table
1888 * Returns the index and the value in the HW MTU table that is closest to
1889 * but does not exceed @mtu, unless @mtu is smaller than any value in the
1890 * table, in which case that smallest available value is selected.
1892 unsigned int cxgb4_best_mtu(const unsigned short *mtus
, unsigned short mtu
,
1897 while (i
< NMTUS
- 1 && mtus
[i
+ 1] <= mtu
)
1903 EXPORT_SYMBOL(cxgb4_best_mtu
);
1906 * cxgb4_best_aligned_mtu - find best MTU, [hopefully] data size aligned
1907 * @mtus: the HW MTU table
1908 * @header_size: Header Size
1909 * @data_size_max: maximum Data Segment Size
1910 * @data_size_align: desired Data Segment Size Alignment (2^N)
1911 * @mtu_idxp: HW MTU Table Index return value pointer (possibly NULL)
1913 * Similar to cxgb4_best_mtu() but instead of searching the Hardware
1914 * MTU Table based solely on a Maximum MTU parameter, we break that
1915 * parameter up into a Header Size and Maximum Data Segment Size, and
1916 * provide a desired Data Segment Size Alignment. If we find an MTU in
1917 * the Hardware MTU Table which will result in a Data Segment Size with
1918 * the requested alignment _and_ that MTU isn't "too far" from the
1919 * closest MTU, then we'll return that rather than the closest MTU.
1921 unsigned int cxgb4_best_aligned_mtu(const unsigned short *mtus
,
1922 unsigned short header_size
,
1923 unsigned short data_size_max
,
1924 unsigned short data_size_align
,
1925 unsigned int *mtu_idxp
)
1927 unsigned short max_mtu
= header_size
+ data_size_max
;
1928 unsigned short data_size_align_mask
= data_size_align
- 1;
1929 int mtu_idx
, aligned_mtu_idx
;
1931 /* Scan the MTU Table till we find an MTU which is larger than our
1932 * Maximum MTU or we reach the end of the table. Along the way,
1933 * record the last MTU found, if any, which will result in a Data
1934 * Segment Length matching the requested alignment.
1936 for (mtu_idx
= 0, aligned_mtu_idx
= -1; mtu_idx
< NMTUS
; mtu_idx
++) {
1937 unsigned short data_size
= mtus
[mtu_idx
] - header_size
;
1939 /* If this MTU minus the Header Size would result in a
1940 * Data Segment Size of the desired alignment, remember it.
1942 if ((data_size
& data_size_align_mask
) == 0)
1943 aligned_mtu_idx
= mtu_idx
;
1945 /* If we're not at the end of the Hardware MTU Table and the
1946 * next element is larger than our Maximum MTU, drop out of
1949 if (mtu_idx
+1 < NMTUS
&& mtus
[mtu_idx
+1] > max_mtu
)
1953 /* If we fell out of the loop because we ran to the end of the table,
1954 * then we just have to use the last [largest] entry.
1956 if (mtu_idx
== NMTUS
)
1959 /* If we found an MTU which resulted in the requested Data Segment
1960 * Length alignment and that's "not far" from the largest MTU which is
1961 * less than or equal to the maximum MTU, then use that.
1963 if (aligned_mtu_idx
>= 0 &&
1964 mtu_idx
- aligned_mtu_idx
<= 1)
1965 mtu_idx
= aligned_mtu_idx
;
1967 /* If the caller has passed in an MTU Index pointer, pass the
1968 * MTU Index back. Return the MTU value.
1971 *mtu_idxp
= mtu_idx
;
1972 return mtus
[mtu_idx
];
1974 EXPORT_SYMBOL(cxgb4_best_aligned_mtu
);
1977 * cxgb4_tp_smt_idx - Get the Source Mac Table index for this VI
1979 * @viid: VI id of the given port
1981 * Return the SMT index for this VI.
1983 unsigned int cxgb4_tp_smt_idx(enum chip_type chip
, unsigned int viid
)
1985 /* In T4/T5, SMT contains 256 SMAC entries organized in
1986 * 128 rows of 2 entries each.
1987 * In T6, SMT contains 256 SMAC entries in 256 rows.
1988 * TODO: The below code needs to be updated when we add support
1991 if (CHELSIO_CHIP_VERSION(chip
) <= CHELSIO_T5
)
1992 return ((viid
& 0x7f) << 1);
1994 return (viid
& 0x7f);
1996 EXPORT_SYMBOL(cxgb4_tp_smt_idx
);
1999 * cxgb4_port_chan - get the HW channel of a port
2000 * @dev: the net device for the port
2002 * Return the HW Tx channel of the given port.
2004 unsigned int cxgb4_port_chan(const struct net_device
*dev
)
2006 return netdev2pinfo(dev
)->tx_chan
;
2008 EXPORT_SYMBOL(cxgb4_port_chan
);
2010 unsigned int cxgb4_dbfifo_count(const struct net_device
*dev
, int lpfifo
)
2012 struct adapter
*adap
= netdev2adap(dev
);
2013 u32 v1
, v2
, lp_count
, hp_count
;
2015 v1
= t4_read_reg(adap
, SGE_DBFIFO_STATUS_A
);
2016 v2
= t4_read_reg(adap
, SGE_DBFIFO_STATUS2_A
);
2017 if (is_t4(adap
->params
.chip
)) {
2018 lp_count
= LP_COUNT_G(v1
);
2019 hp_count
= HP_COUNT_G(v1
);
2021 lp_count
= LP_COUNT_T5_G(v1
);
2022 hp_count
= HP_COUNT_T5_G(v2
);
2024 return lpfifo
? lp_count
: hp_count
;
2026 EXPORT_SYMBOL(cxgb4_dbfifo_count
);
2029 * cxgb4_port_viid - get the VI id of a port
2030 * @dev: the net device for the port
2032 * Return the VI id of the given port.
2034 unsigned int cxgb4_port_viid(const struct net_device
*dev
)
2036 return netdev2pinfo(dev
)->viid
;
2038 EXPORT_SYMBOL(cxgb4_port_viid
);
2041 * cxgb4_port_idx - get the index of a port
2042 * @dev: the net device for the port
2044 * Return the index of the given port.
2046 unsigned int cxgb4_port_idx(const struct net_device
*dev
)
2048 return netdev2pinfo(dev
)->port_id
;
2050 EXPORT_SYMBOL(cxgb4_port_idx
);
2052 void cxgb4_get_tcp_stats(struct pci_dev
*pdev
, struct tp_tcp_stats
*v4
,
2053 struct tp_tcp_stats
*v6
)
2055 struct adapter
*adap
= pci_get_drvdata(pdev
);
2057 spin_lock(&adap
->stats_lock
);
2058 t4_tp_get_tcp_stats(adap
, v4
, v6
);
2059 spin_unlock(&adap
->stats_lock
);
2061 EXPORT_SYMBOL(cxgb4_get_tcp_stats
);
2063 void cxgb4_iscsi_init(struct net_device
*dev
, unsigned int tag_mask
,
2064 const unsigned int *pgsz_order
)
2066 struct adapter
*adap
= netdev2adap(dev
);
2068 t4_write_reg(adap
, ULP_RX_ISCSI_TAGMASK_A
, tag_mask
);
2069 t4_write_reg(adap
, ULP_RX_ISCSI_PSZ_A
, HPZ0_V(pgsz_order
[0]) |
2070 HPZ1_V(pgsz_order
[1]) | HPZ2_V(pgsz_order
[2]) |
2071 HPZ3_V(pgsz_order
[3]));
2073 EXPORT_SYMBOL(cxgb4_iscsi_init
);
2075 int cxgb4_flush_eq_cache(struct net_device
*dev
)
2077 struct adapter
*adap
= netdev2adap(dev
);
2079 return t4_sge_ctxt_flush(adap
, adap
->mbox
);
2081 EXPORT_SYMBOL(cxgb4_flush_eq_cache
);
2083 static int read_eq_indices(struct adapter
*adap
, u16 qid
, u16
*pidx
, u16
*cidx
)
2085 u32 addr
= t4_read_reg(adap
, SGE_DBQ_CTXT_BADDR_A
) + 24 * qid
+ 8;
2089 spin_lock(&adap
->win0_lock
);
2090 ret
= t4_memory_rw(adap
, 0, MEM_EDC0
, addr
,
2091 sizeof(indices
), (__be32
*)&indices
,
2093 spin_unlock(&adap
->win0_lock
);
2095 *cidx
= (be64_to_cpu(indices
) >> 25) & 0xffff;
2096 *pidx
= (be64_to_cpu(indices
) >> 9) & 0xffff;
2101 int cxgb4_sync_txq_pidx(struct net_device
*dev
, u16 qid
, u16 pidx
,
2104 struct adapter
*adap
= netdev2adap(dev
);
2105 u16 hw_pidx
, hw_cidx
;
2108 ret
= read_eq_indices(adap
, qid
, &hw_pidx
, &hw_cidx
);
2112 if (pidx
!= hw_pidx
) {
2116 if (pidx
>= hw_pidx
)
2117 delta
= pidx
- hw_pidx
;
2119 delta
= size
- hw_pidx
+ pidx
;
2121 if (is_t4(adap
->params
.chip
))
2122 val
= PIDX_V(delta
);
2124 val
= PIDX_T5_V(delta
);
2126 t4_write_reg(adap
, MYPF_REG(SGE_PF_KDOORBELL_A
),
2132 EXPORT_SYMBOL(cxgb4_sync_txq_pidx
);
2134 int cxgb4_read_tpte(struct net_device
*dev
, u32 stag
, __be32
*tpte
)
2136 struct adapter
*adap
;
2137 u32 offset
, memtype
, memaddr
;
2138 u32 edc0_size
, edc1_size
, mc0_size
, mc1_size
, size
;
2139 u32 edc0_end
, edc1_end
, mc0_end
, mc1_end
;
2142 adap
= netdev2adap(dev
);
2144 offset
= ((stag
>> 8) * 32) + adap
->vres
.stag
.start
;
2146 /* Figure out where the offset lands in the Memory Type/Address scheme.
2147 * This code assumes that the memory is laid out starting at offset 0
2148 * with no breaks as: EDC0, EDC1, MC0, MC1. All cards have both EDC0
2149 * and EDC1. Some cards will have neither MC0 nor MC1, most cards have
2150 * MC0, and some have both MC0 and MC1.
2152 size
= t4_read_reg(adap
, MA_EDRAM0_BAR_A
);
2153 edc0_size
= EDRAM0_SIZE_G(size
) << 20;
2154 size
= t4_read_reg(adap
, MA_EDRAM1_BAR_A
);
2155 edc1_size
= EDRAM1_SIZE_G(size
) << 20;
2156 size
= t4_read_reg(adap
, MA_EXT_MEMORY0_BAR_A
);
2157 mc0_size
= EXT_MEM0_SIZE_G(size
) << 20;
2159 edc0_end
= edc0_size
;
2160 edc1_end
= edc0_end
+ edc1_size
;
2161 mc0_end
= edc1_end
+ mc0_size
;
2163 if (offset
< edc0_end
) {
2166 } else if (offset
< edc1_end
) {
2168 memaddr
= offset
- edc0_end
;
2170 if (offset
< mc0_end
) {
2172 memaddr
= offset
- edc1_end
;
2173 } else if (is_t5(adap
->params
.chip
)) {
2174 size
= t4_read_reg(adap
, MA_EXT_MEMORY1_BAR_A
);
2175 mc1_size
= EXT_MEM1_SIZE_G(size
) << 20;
2176 mc1_end
= mc0_end
+ mc1_size
;
2177 if (offset
< mc1_end
) {
2179 memaddr
= offset
- mc0_end
;
2181 /* offset beyond the end of any memory */
2185 /* T4/T6 only has a single memory channel */
2190 spin_lock(&adap
->win0_lock
);
2191 ret
= t4_memory_rw(adap
, 0, memtype
, memaddr
, 32, tpte
, T4_MEMORY_READ
);
2192 spin_unlock(&adap
->win0_lock
);
2196 dev_err(adap
->pdev_dev
, "stag %#x, offset %#x out of range\n",
2200 EXPORT_SYMBOL(cxgb4_read_tpte
);
2202 u64
cxgb4_read_sge_timestamp(struct net_device
*dev
)
2205 struct adapter
*adap
;
2207 adap
= netdev2adap(dev
);
2208 lo
= t4_read_reg(adap
, SGE_TIMESTAMP_LO_A
);
2209 hi
= TSVAL_G(t4_read_reg(adap
, SGE_TIMESTAMP_HI_A
));
2211 return ((u64
)hi
<< 32) | (u64
)lo
;
2213 EXPORT_SYMBOL(cxgb4_read_sge_timestamp
);
2215 int cxgb4_bar2_sge_qregs(struct net_device
*dev
,
2217 enum cxgb4_bar2_qtype qtype
,
2220 unsigned int *pbar2_qid
)
2222 return t4_bar2_sge_qregs(netdev2adap(dev
),
2224 (qtype
== CXGB4_BAR2_QTYPE_EGRESS
2225 ? T4_BAR2_QTYPE_EGRESS
2226 : T4_BAR2_QTYPE_INGRESS
),
2231 EXPORT_SYMBOL(cxgb4_bar2_sge_qregs
);
2233 static struct pci_driver cxgb4_driver
;
2235 static void check_neigh_update(struct neighbour
*neigh
)
2237 const struct device
*parent
;
2238 const struct net_device
*netdev
= neigh
->dev
;
2240 if (netdev
->priv_flags
& IFF_802_1Q_VLAN
)
2241 netdev
= vlan_dev_real_dev(netdev
);
2242 parent
= netdev
->dev
.parent
;
2243 if (parent
&& parent
->driver
== &cxgb4_driver
.driver
)
2244 t4_l2t_update(dev_get_drvdata(parent
), neigh
);
2247 static int netevent_cb(struct notifier_block
*nb
, unsigned long event
,
2251 case NETEVENT_NEIGH_UPDATE
:
2252 check_neigh_update(data
);
2254 case NETEVENT_REDIRECT
:
2261 static bool netevent_registered
;
2262 static struct notifier_block cxgb4_netevent_nb
= {
2263 .notifier_call
= netevent_cb
2266 static void drain_db_fifo(struct adapter
*adap
, int usecs
)
2268 u32 v1
, v2
, lp_count
, hp_count
;
2271 v1
= t4_read_reg(adap
, SGE_DBFIFO_STATUS_A
);
2272 v2
= t4_read_reg(adap
, SGE_DBFIFO_STATUS2_A
);
2273 if (is_t4(adap
->params
.chip
)) {
2274 lp_count
= LP_COUNT_G(v1
);
2275 hp_count
= HP_COUNT_G(v1
);
2277 lp_count
= LP_COUNT_T5_G(v1
);
2278 hp_count
= HP_COUNT_T5_G(v2
);
2281 if (lp_count
== 0 && hp_count
== 0)
2283 set_current_state(TASK_UNINTERRUPTIBLE
);
2284 schedule_timeout(usecs_to_jiffies(usecs
));
2288 static void disable_txq_db(struct sge_txq
*q
)
2290 unsigned long flags
;
2292 spin_lock_irqsave(&q
->db_lock
, flags
);
2294 spin_unlock_irqrestore(&q
->db_lock
, flags
);
2297 static void enable_txq_db(struct adapter
*adap
, struct sge_txq
*q
)
2299 spin_lock_irq(&q
->db_lock
);
2300 if (q
->db_pidx_inc
) {
2301 /* Make sure that all writes to the TX descriptors
2302 * are committed before we tell HW about them.
2305 t4_write_reg(adap
, MYPF_REG(SGE_PF_KDOORBELL_A
),
2306 QID_V(q
->cntxt_id
) | PIDX_V(q
->db_pidx_inc
));
2310 spin_unlock_irq(&q
->db_lock
);
2313 static void disable_dbs(struct adapter
*adap
)
2317 for_each_ethrxq(&adap
->sge
, i
)
2318 disable_txq_db(&adap
->sge
.ethtxq
[i
].q
);
2319 for_each_iscsirxq(&adap
->sge
, i
)
2320 disable_txq_db(&adap
->sge
.ofldtxq
[i
].q
);
2321 for_each_port(adap
, i
)
2322 disable_txq_db(&adap
->sge
.ctrlq
[i
].q
);
2325 static void enable_dbs(struct adapter
*adap
)
2329 for_each_ethrxq(&adap
->sge
, i
)
2330 enable_txq_db(adap
, &adap
->sge
.ethtxq
[i
].q
);
2331 for_each_iscsirxq(&adap
->sge
, i
)
2332 enable_txq_db(adap
, &adap
->sge
.ofldtxq
[i
].q
);
2333 for_each_port(adap
, i
)
2334 enable_txq_db(adap
, &adap
->sge
.ctrlq
[i
].q
);
2337 static void notify_rdma_uld(struct adapter
*adap
, enum cxgb4_control cmd
)
2339 if (adap
->uld_handle
[CXGB4_ULD_RDMA
])
2340 ulds
[CXGB4_ULD_RDMA
].control(adap
->uld_handle
[CXGB4_ULD_RDMA
],
2344 static void process_db_full(struct work_struct
*work
)
2346 struct adapter
*adap
;
2348 adap
= container_of(work
, struct adapter
, db_full_task
);
2350 drain_db_fifo(adap
, dbfifo_drain_delay
);
2352 notify_rdma_uld(adap
, CXGB4_CONTROL_DB_EMPTY
);
2353 if (CHELSIO_CHIP_VERSION(adap
->params
.chip
) <= CHELSIO_T5
)
2354 t4_set_reg_field(adap
, SGE_INT_ENABLE3_A
,
2355 DBFIFO_HP_INT_F
| DBFIFO_LP_INT_F
,
2356 DBFIFO_HP_INT_F
| DBFIFO_LP_INT_F
);
2358 t4_set_reg_field(adap
, SGE_INT_ENABLE3_A
,
2359 DBFIFO_LP_INT_F
, DBFIFO_LP_INT_F
);
2362 static void sync_txq_pidx(struct adapter
*adap
, struct sge_txq
*q
)
2364 u16 hw_pidx
, hw_cidx
;
2367 spin_lock_irq(&q
->db_lock
);
2368 ret
= read_eq_indices(adap
, (u16
)q
->cntxt_id
, &hw_pidx
, &hw_cidx
);
2371 if (q
->db_pidx
!= hw_pidx
) {
2375 if (q
->db_pidx
>= hw_pidx
)
2376 delta
= q
->db_pidx
- hw_pidx
;
2378 delta
= q
->size
- hw_pidx
+ q
->db_pidx
;
2380 if (is_t4(adap
->params
.chip
))
2381 val
= PIDX_V(delta
);
2383 val
= PIDX_T5_V(delta
);
2385 t4_write_reg(adap
, MYPF_REG(SGE_PF_KDOORBELL_A
),
2386 QID_V(q
->cntxt_id
) | val
);
2391 spin_unlock_irq(&q
->db_lock
);
2393 CH_WARN(adap
, "DB drop recovery failed.\n");
2395 static void recover_all_queues(struct adapter
*adap
)
2399 for_each_ethrxq(&adap
->sge
, i
)
2400 sync_txq_pidx(adap
, &adap
->sge
.ethtxq
[i
].q
);
2401 for_each_iscsirxq(&adap
->sge
, i
)
2402 sync_txq_pidx(adap
, &adap
->sge
.ofldtxq
[i
].q
);
2403 for_each_port(adap
, i
)
2404 sync_txq_pidx(adap
, &adap
->sge
.ctrlq
[i
].q
);
2407 static void process_db_drop(struct work_struct
*work
)
2409 struct adapter
*adap
;
2411 adap
= container_of(work
, struct adapter
, db_drop_task
);
2413 if (is_t4(adap
->params
.chip
)) {
2414 drain_db_fifo(adap
, dbfifo_drain_delay
);
2415 notify_rdma_uld(adap
, CXGB4_CONTROL_DB_DROP
);
2416 drain_db_fifo(adap
, dbfifo_drain_delay
);
2417 recover_all_queues(adap
);
2418 drain_db_fifo(adap
, dbfifo_drain_delay
);
2420 notify_rdma_uld(adap
, CXGB4_CONTROL_DB_EMPTY
);
2421 } else if (is_t5(adap
->params
.chip
)) {
2422 u32 dropped_db
= t4_read_reg(adap
, 0x010ac);
2423 u16 qid
= (dropped_db
>> 15) & 0x1ffff;
2424 u16 pidx_inc
= dropped_db
& 0x1fff;
2426 unsigned int bar2_qid
;
2429 ret
= t4_bar2_sge_qregs(adap
, qid
, T4_BAR2_QTYPE_EGRESS
,
2430 0, &bar2_qoffset
, &bar2_qid
);
2432 dev_err(adap
->pdev_dev
, "doorbell drop recovery: "
2433 "qid=%d, pidx_inc=%d\n", qid
, pidx_inc
);
2435 writel(PIDX_T5_V(pidx_inc
) | QID_V(bar2_qid
),
2436 adap
->bar2
+ bar2_qoffset
+ SGE_UDB_KDOORBELL
);
2438 /* Re-enable BAR2 WC */
2439 t4_set_reg_field(adap
, 0x10b0, 1<<15, 1<<15);
2442 if (CHELSIO_CHIP_VERSION(adap
->params
.chip
) <= CHELSIO_T5
)
2443 t4_set_reg_field(adap
, SGE_DOORBELL_CONTROL_A
, DROPPED_DB_F
, 0);
2446 void t4_db_full(struct adapter
*adap
)
2448 if (is_t4(adap
->params
.chip
)) {
2450 notify_rdma_uld(adap
, CXGB4_CONTROL_DB_FULL
);
2451 t4_set_reg_field(adap
, SGE_INT_ENABLE3_A
,
2452 DBFIFO_HP_INT_F
| DBFIFO_LP_INT_F
, 0);
2453 queue_work(adap
->workq
, &adap
->db_full_task
);
2457 void t4_db_dropped(struct adapter
*adap
)
2459 if (is_t4(adap
->params
.chip
)) {
2461 notify_rdma_uld(adap
, CXGB4_CONTROL_DB_FULL
);
2463 queue_work(adap
->workq
, &adap
->db_drop_task
);
2466 static void uld_attach(struct adapter
*adap
, unsigned int uld
)
2469 struct cxgb4_lld_info lli
;
2472 lli
.pdev
= adap
->pdev
;
2474 lli
.l2t
= adap
->l2t
;
2475 lli
.tids
= &adap
->tids
;
2476 lli
.ports
= adap
->port
;
2477 lli
.vr
= &adap
->vres
;
2478 lli
.mtus
= adap
->params
.mtus
;
2479 if (uld
== CXGB4_ULD_RDMA
) {
2480 lli
.rxq_ids
= adap
->sge
.rdma_rxq
;
2481 lli
.ciq_ids
= adap
->sge
.rdma_ciq
;
2482 lli
.nrxq
= adap
->sge
.rdmaqs
;
2483 lli
.nciq
= adap
->sge
.rdmaciqs
;
2484 } else if (uld
== CXGB4_ULD_ISCSI
) {
2485 lli
.rxq_ids
= adap
->sge
.iscsi_rxq
;
2486 lli
.nrxq
= adap
->sge
.iscsiqsets
;
2487 } else if (uld
== CXGB4_ULD_ISCSIT
) {
2488 lli
.rxq_ids
= adap
->sge
.iscsit_rxq
;
2489 lli
.nrxq
= adap
->sge
.niscsitq
;
2491 lli
.ntxq
= adap
->sge
.iscsiqsets
;
2492 lli
.nchan
= adap
->params
.nports
;
2493 lli
.nports
= adap
->params
.nports
;
2494 lli
.wr_cred
= adap
->params
.ofldq_wr_cred
;
2495 lli
.adapter_type
= adap
->params
.chip
;
2496 lli
.iscsi_iolen
= MAXRXDATA_G(t4_read_reg(adap
, TP_PARA_REG2_A
));
2497 lli
.iscsi_tagmask
= t4_read_reg(adap
, ULP_RX_ISCSI_TAGMASK_A
);
2498 lli
.iscsi_pgsz_order
= t4_read_reg(adap
, ULP_RX_ISCSI_PSZ_A
);
2499 lli
.iscsi_llimit
= t4_read_reg(adap
, ULP_RX_ISCSI_LLIMIT_A
);
2500 lli
.iscsi_ppm
= &adap
->iscsi_ppm
;
2501 lli
.cclk_ps
= 1000000000 / adap
->params
.vpd
.cclk
;
2502 lli
.udb_density
= 1 << adap
->params
.sge
.eq_qpp
;
2503 lli
.ucq_density
= 1 << adap
->params
.sge
.iq_qpp
;
2504 lli
.filt_mode
= adap
->params
.tp
.vlan_pri_map
;
2505 /* MODQ_REQ_MAP sets queues 0-3 to chan 0-3 */
2506 for (i
= 0; i
< NCHAN
; i
++)
2508 lli
.gts_reg
= adap
->regs
+ MYPF_REG(SGE_PF_GTS_A
);
2509 lli
.db_reg
= adap
->regs
+ MYPF_REG(SGE_PF_KDOORBELL_A
);
2510 lli
.fw_vers
= adap
->params
.fw_vers
;
2511 lli
.dbfifo_int_thresh
= dbfifo_int_thresh
;
2512 lli
.sge_ingpadboundary
= adap
->sge
.fl_align
;
2513 lli
.sge_egrstatuspagesize
= adap
->sge
.stat_len
;
2514 lli
.sge_pktshift
= adap
->sge
.pktshift
;
2515 lli
.enable_fw_ofld_conn
= adap
->flags
& FW_OFLD_CONN
;
2516 lli
.max_ordird_qp
= adap
->params
.max_ordird_qp
;
2517 lli
.max_ird_adapter
= adap
->params
.max_ird_adapter
;
2518 lli
.ulptx_memwrite_dsgl
= adap
->params
.ulptx_memwrite_dsgl
;
2519 lli
.nodeid
= dev_to_node(adap
->pdev_dev
);
2521 handle
= ulds
[uld
].add(&lli
);
2522 if (IS_ERR(handle
)) {
2523 dev_warn(adap
->pdev_dev
,
2524 "could not attach to the %s driver, error %ld\n",
2525 uld_str
[uld
], PTR_ERR(handle
));
2529 adap
->uld_handle
[uld
] = handle
;
2531 if (!netevent_registered
) {
2532 register_netevent_notifier(&cxgb4_netevent_nb
);
2533 netevent_registered
= true;
2536 if (adap
->flags
& FULL_INIT_DONE
)
2537 ulds
[uld
].state_change(handle
, CXGB4_STATE_UP
);
2540 static void attach_ulds(struct adapter
*adap
)
2544 spin_lock(&adap_rcu_lock
);
2545 list_add_tail_rcu(&adap
->rcu_node
, &adap_rcu_list
);
2546 spin_unlock(&adap_rcu_lock
);
2548 mutex_lock(&uld_mutex
);
2549 list_add_tail(&adap
->list_node
, &adapter_list
);
2550 for (i
= 0; i
< CXGB4_ULD_MAX
; i
++)
2552 uld_attach(adap
, i
);
2553 mutex_unlock(&uld_mutex
);
2556 static void detach_ulds(struct adapter
*adap
)
2560 mutex_lock(&uld_mutex
);
2561 list_del(&adap
->list_node
);
2562 for (i
= 0; i
< CXGB4_ULD_MAX
; i
++)
2563 if (adap
->uld_handle
[i
]) {
2564 ulds
[i
].state_change(adap
->uld_handle
[i
],
2565 CXGB4_STATE_DETACH
);
2566 adap
->uld_handle
[i
] = NULL
;
2568 if (netevent_registered
&& list_empty(&adapter_list
)) {
2569 unregister_netevent_notifier(&cxgb4_netevent_nb
);
2570 netevent_registered
= false;
2572 mutex_unlock(&uld_mutex
);
2574 spin_lock(&adap_rcu_lock
);
2575 list_del_rcu(&adap
->rcu_node
);
2576 spin_unlock(&adap_rcu_lock
);
2579 static void notify_ulds(struct adapter
*adap
, enum cxgb4_state new_state
)
2583 mutex_lock(&uld_mutex
);
2584 for (i
= 0; i
< CXGB4_ULD_MAX
; i
++)
2585 if (adap
->uld_handle
[i
])
2586 ulds
[i
].state_change(adap
->uld_handle
[i
], new_state
);
2587 mutex_unlock(&uld_mutex
);
2591 * cxgb4_register_uld - register an upper-layer driver
2592 * @type: the ULD type
2593 * @p: the ULD methods
2595 * Registers an upper-layer driver with this driver and notifies the ULD
2596 * about any presently available devices that support its type. Returns
2597 * %-EBUSY if a ULD of the same type is already registered.
2599 int cxgb4_register_uld(enum cxgb4_uld type
, const struct cxgb4_uld_info
*p
)
2602 struct adapter
*adap
;
2604 if (type
>= CXGB4_ULD_MAX
)
2606 mutex_lock(&uld_mutex
);
2607 if (ulds
[type
].add
) {
2612 list_for_each_entry(adap
, &adapter_list
, list_node
)
2613 uld_attach(adap
, type
);
2614 out
: mutex_unlock(&uld_mutex
);
2617 EXPORT_SYMBOL(cxgb4_register_uld
);
2620 * cxgb4_unregister_uld - unregister an upper-layer driver
2621 * @type: the ULD type
2623 * Unregisters an existing upper-layer driver.
2625 int cxgb4_unregister_uld(enum cxgb4_uld type
)
2627 struct adapter
*adap
;
2629 if (type
>= CXGB4_ULD_MAX
)
2631 mutex_lock(&uld_mutex
);
2632 list_for_each_entry(adap
, &adapter_list
, list_node
)
2633 adap
->uld_handle
[type
] = NULL
;
2634 ulds
[type
].add
= NULL
;
2635 mutex_unlock(&uld_mutex
);
2638 EXPORT_SYMBOL(cxgb4_unregister_uld
);
2640 #if IS_ENABLED(CONFIG_IPV6)
2641 static int cxgb4_inet6addr_handler(struct notifier_block
*this,
2642 unsigned long event
, void *data
)
2644 struct inet6_ifaddr
*ifa
= data
;
2645 struct net_device
*event_dev
= ifa
->idev
->dev
;
2646 const struct device
*parent
= NULL
;
2647 #if IS_ENABLED(CONFIG_BONDING)
2648 struct adapter
*adap
;
2650 if (event_dev
->priv_flags
& IFF_802_1Q_VLAN
)
2651 event_dev
= vlan_dev_real_dev(event_dev
);
2652 #if IS_ENABLED(CONFIG_BONDING)
2653 if (event_dev
->flags
& IFF_MASTER
) {
2654 list_for_each_entry(adap
, &adapter_list
, list_node
) {
2657 cxgb4_clip_get(adap
->port
[0],
2658 (const u32
*)ifa
, 1);
2661 cxgb4_clip_release(adap
->port
[0],
2662 (const u32
*)ifa
, 1);
2673 parent
= event_dev
->dev
.parent
;
2675 if (parent
&& parent
->driver
== &cxgb4_driver
.driver
) {
2678 cxgb4_clip_get(event_dev
, (const u32
*)ifa
, 1);
2681 cxgb4_clip_release(event_dev
, (const u32
*)ifa
, 1);
2690 static bool inet6addr_registered
;
2691 static struct notifier_block cxgb4_inet6addr_notifier
= {
2692 .notifier_call
= cxgb4_inet6addr_handler
2695 static void update_clip(const struct adapter
*adap
)
2698 struct net_device
*dev
;
2703 for (i
= 0; i
< MAX_NPORTS
; i
++) {
2704 dev
= adap
->port
[i
];
2708 ret
= cxgb4_update_root_dev_clip(dev
);
2715 #endif /* IS_ENABLED(CONFIG_IPV6) */
2718 * cxgb_up - enable the adapter
2719 * @adap: adapter being enabled
2721 * Called when the first port is enabled, this function performs the
2722 * actions necessary to make an adapter operational, such as completing
2723 * the initialization of HW modules, and enabling interrupts.
2725 * Must be called with the rtnl lock held.
2727 static int cxgb_up(struct adapter
*adap
)
2731 err
= setup_sge_queues(adap
);
2734 err
= setup_rss(adap
);
2738 if (adap
->flags
& USING_MSIX
) {
2739 name_msix_vecs(adap
);
2740 err
= request_irq(adap
->msix_info
[0].vec
, t4_nondata_intr
, 0,
2741 adap
->msix_info
[0].desc
, adap
);
2745 err
= request_msix_queue_irqs(adap
);
2747 free_irq(adap
->msix_info
[0].vec
, adap
);
2751 err
= request_irq(adap
->pdev
->irq
, t4_intr_handler(adap
),
2752 (adap
->flags
& USING_MSI
) ? 0 : IRQF_SHARED
,
2753 adap
->port
[0]->name
, adap
);
2759 t4_intr_enable(adap
);
2760 adap
->flags
|= FULL_INIT_DONE
;
2761 notify_ulds(adap
, CXGB4_STATE_UP
);
2762 #if IS_ENABLED(CONFIG_IPV6)
2765 /* Initialize hash mac addr list*/
2766 INIT_LIST_HEAD(&adap
->mac_hlist
);
2770 dev_err(adap
->pdev_dev
, "request_irq failed, err %d\n", err
);
2772 t4_free_sge_resources(adap
);
2776 static void cxgb_down(struct adapter
*adapter
)
2778 cancel_work_sync(&adapter
->tid_release_task
);
2779 cancel_work_sync(&adapter
->db_full_task
);
2780 cancel_work_sync(&adapter
->db_drop_task
);
2781 adapter
->tid_release_task_busy
= false;
2782 adapter
->tid_release_head
= NULL
;
2784 t4_sge_stop(adapter
);
2785 t4_free_sge_resources(adapter
);
2786 adapter
->flags
&= ~FULL_INIT_DONE
;
2790 * net_device operations
2792 static int cxgb_open(struct net_device
*dev
)
2795 struct port_info
*pi
= netdev_priv(dev
);
2796 struct adapter
*adapter
= pi
->adapter
;
2798 netif_carrier_off(dev
);
2800 if (!(adapter
->flags
& FULL_INIT_DONE
)) {
2801 err
= cxgb_up(adapter
);
2806 err
= link_start(dev
);
2808 netif_tx_start_all_queues(dev
);
2812 static int cxgb_close(struct net_device
*dev
)
2814 struct port_info
*pi
= netdev_priv(dev
);
2815 struct adapter
*adapter
= pi
->adapter
;
2817 netif_tx_stop_all_queues(dev
);
2818 netif_carrier_off(dev
);
2819 return t4_enable_vi(adapter
, adapter
->pf
, pi
->viid
, false, false);
2822 /* Return an error number if the indicated filter isn't writable ...
2824 static int writable_filter(struct filter_entry
*f
)
2834 /* Delete the filter at the specified index (if valid). The checks for all
2835 * the common problems with doing this like the filter being locked, currently
2836 * pending in another operation, etc.
2838 static int delete_filter(struct adapter
*adapter
, unsigned int fidx
)
2840 struct filter_entry
*f
;
2843 if (fidx
>= adapter
->tids
.nftids
+ adapter
->tids
.nsftids
)
2846 f
= &adapter
->tids
.ftid_tab
[fidx
];
2847 ret
= writable_filter(f
);
2851 return del_filter_wr(adapter
, fidx
);
2856 int cxgb4_create_server_filter(const struct net_device
*dev
, unsigned int stid
,
2857 __be32 sip
, __be16 sport
, __be16 vlan
,
2858 unsigned int queue
, unsigned char port
, unsigned char mask
)
2861 struct filter_entry
*f
;
2862 struct adapter
*adap
;
2866 adap
= netdev2adap(dev
);
2868 /* Adjust stid to correct filter index */
2869 stid
-= adap
->tids
.sftid_base
;
2870 stid
+= adap
->tids
.nftids
;
2872 /* Check to make sure the filter requested is writable ...
2874 f
= &adap
->tids
.ftid_tab
[stid
];
2875 ret
= writable_filter(f
);
2879 /* Clear out any old resources being used by the filter before
2880 * we start constructing the new filter.
2883 clear_filter(adap
, f
);
2885 /* Clear out filter specifications */
2886 memset(&f
->fs
, 0, sizeof(struct ch_filter_specification
));
2887 f
->fs
.val
.lport
= cpu_to_be16(sport
);
2888 f
->fs
.mask
.lport
= ~0;
2890 if ((val
[0] | val
[1] | val
[2] | val
[3]) != 0) {
2891 for (i
= 0; i
< 4; i
++) {
2892 f
->fs
.val
.lip
[i
] = val
[i
];
2893 f
->fs
.mask
.lip
[i
] = ~0;
2895 if (adap
->params
.tp
.vlan_pri_map
& PORT_F
) {
2896 f
->fs
.val
.iport
= port
;
2897 f
->fs
.mask
.iport
= mask
;
2901 if (adap
->params
.tp
.vlan_pri_map
& PROTOCOL_F
) {
2902 f
->fs
.val
.proto
= IPPROTO_TCP
;
2903 f
->fs
.mask
.proto
= ~0;
2908 /* Mark filter as locked */
2912 ret
= set_filter_wr(adap
, stid
);
2914 clear_filter(adap
, f
);
2920 EXPORT_SYMBOL(cxgb4_create_server_filter
);
2922 int cxgb4_remove_server_filter(const struct net_device
*dev
, unsigned int stid
,
2923 unsigned int queue
, bool ipv6
)
2926 struct filter_entry
*f
;
2927 struct adapter
*adap
;
2929 adap
= netdev2adap(dev
);
2931 /* Adjust stid to correct filter index */
2932 stid
-= adap
->tids
.sftid_base
;
2933 stid
+= adap
->tids
.nftids
;
2935 f
= &adap
->tids
.ftid_tab
[stid
];
2936 /* Unlock the filter */
2939 ret
= delete_filter(adap
, stid
);
2945 EXPORT_SYMBOL(cxgb4_remove_server_filter
);
2947 static struct rtnl_link_stats64
*cxgb_get_stats(struct net_device
*dev
,
2948 struct rtnl_link_stats64
*ns
)
2950 struct port_stats stats
;
2951 struct port_info
*p
= netdev_priv(dev
);
2952 struct adapter
*adapter
= p
->adapter
;
2954 /* Block retrieving statistics during EEH error
2955 * recovery. Otherwise, the recovery might fail
2956 * and the PCI device will be removed permanently
2958 spin_lock(&adapter
->stats_lock
);
2959 if (!netif_device_present(dev
)) {
2960 spin_unlock(&adapter
->stats_lock
);
2963 t4_get_port_stats_offset(adapter
, p
->tx_chan
, &stats
,
2965 spin_unlock(&adapter
->stats_lock
);
2967 ns
->tx_bytes
= stats
.tx_octets
;
2968 ns
->tx_packets
= stats
.tx_frames
;
2969 ns
->rx_bytes
= stats
.rx_octets
;
2970 ns
->rx_packets
= stats
.rx_frames
;
2971 ns
->multicast
= stats
.rx_mcast_frames
;
2973 /* detailed rx_errors */
2974 ns
->rx_length_errors
= stats
.rx_jabber
+ stats
.rx_too_long
+
2976 ns
->rx_over_errors
= 0;
2977 ns
->rx_crc_errors
= stats
.rx_fcs_err
;
2978 ns
->rx_frame_errors
= stats
.rx_symbol_err
;
2979 ns
->rx_fifo_errors
= stats
.rx_ovflow0
+ stats
.rx_ovflow1
+
2980 stats
.rx_ovflow2
+ stats
.rx_ovflow3
+
2981 stats
.rx_trunc0
+ stats
.rx_trunc1
+
2982 stats
.rx_trunc2
+ stats
.rx_trunc3
;
2983 ns
->rx_missed_errors
= 0;
2985 /* detailed tx_errors */
2986 ns
->tx_aborted_errors
= 0;
2987 ns
->tx_carrier_errors
= 0;
2988 ns
->tx_fifo_errors
= 0;
2989 ns
->tx_heartbeat_errors
= 0;
2990 ns
->tx_window_errors
= 0;
2992 ns
->tx_errors
= stats
.tx_error_frames
;
2993 ns
->rx_errors
= stats
.rx_symbol_err
+ stats
.rx_fcs_err
+
2994 ns
->rx_length_errors
+ stats
.rx_len_err
+ ns
->rx_fifo_errors
;
2998 static int cxgb_ioctl(struct net_device
*dev
, struct ifreq
*req
, int cmd
)
3001 int ret
= 0, prtad
, devad
;
3002 struct port_info
*pi
= netdev_priv(dev
);
3003 struct mii_ioctl_data
*data
= (struct mii_ioctl_data
*)&req
->ifr_data
;
3007 if (pi
->mdio_addr
< 0)
3009 data
->phy_id
= pi
->mdio_addr
;
3013 if (mdio_phy_id_is_c45(data
->phy_id
)) {
3014 prtad
= mdio_phy_id_prtad(data
->phy_id
);
3015 devad
= mdio_phy_id_devad(data
->phy_id
);
3016 } else if (data
->phy_id
< 32) {
3017 prtad
= data
->phy_id
;
3019 data
->reg_num
&= 0x1f;
3023 mbox
= pi
->adapter
->pf
;
3024 if (cmd
== SIOCGMIIREG
)
3025 ret
= t4_mdio_rd(pi
->adapter
, mbox
, prtad
, devad
,
3026 data
->reg_num
, &data
->val_out
);
3028 ret
= t4_mdio_wr(pi
->adapter
, mbox
, prtad
, devad
,
3029 data
->reg_num
, data
->val_in
);
3032 return copy_to_user(req
->ifr_data
, &pi
->tstamp_config
,
3033 sizeof(pi
->tstamp_config
)) ?
3036 if (copy_from_user(&pi
->tstamp_config
, req
->ifr_data
,
3037 sizeof(pi
->tstamp_config
)))
3040 switch (pi
->tstamp_config
.rx_filter
) {
3041 case HWTSTAMP_FILTER_NONE
:
3042 pi
->rxtstamp
= false;
3044 case HWTSTAMP_FILTER_ALL
:
3045 pi
->rxtstamp
= true;
3048 pi
->tstamp_config
.rx_filter
= HWTSTAMP_FILTER_NONE
;
3052 return copy_to_user(req
->ifr_data
, &pi
->tstamp_config
,
3053 sizeof(pi
->tstamp_config
)) ?
3061 static void cxgb_set_rxmode(struct net_device
*dev
)
3063 /* unfortunately we can't return errors to the stack */
3064 set_rxmode(dev
, -1, false);
3067 static int cxgb_change_mtu(struct net_device
*dev
, int new_mtu
)
3070 struct port_info
*pi
= netdev_priv(dev
);
3072 if (new_mtu
< 81 || new_mtu
> MAX_MTU
) /* accommodate SACK */
3074 ret
= t4_set_rxmode(pi
->adapter
, pi
->adapter
->pf
, pi
->viid
, new_mtu
, -1,
3081 static int cxgb_set_mac_addr(struct net_device
*dev
, void *p
)
3084 struct sockaddr
*addr
= p
;
3085 struct port_info
*pi
= netdev_priv(dev
);
3087 if (!is_valid_ether_addr(addr
->sa_data
))
3088 return -EADDRNOTAVAIL
;
3090 ret
= t4_change_mac(pi
->adapter
, pi
->adapter
->pf
, pi
->viid
,
3091 pi
->xact_addr_filt
, addr
->sa_data
, true, true);
3095 memcpy(dev
->dev_addr
, addr
->sa_data
, dev
->addr_len
);
3096 pi
->xact_addr_filt
= ret
;
3100 #ifdef CONFIG_NET_POLL_CONTROLLER
3101 static void cxgb_netpoll(struct net_device
*dev
)
3103 struct port_info
*pi
= netdev_priv(dev
);
3104 struct adapter
*adap
= pi
->adapter
;
3106 if (adap
->flags
& USING_MSIX
) {
3108 struct sge_eth_rxq
*rx
= &adap
->sge
.ethrxq
[pi
->first_qset
];
3110 for (i
= pi
->nqsets
; i
; i
--, rx
++)
3111 t4_sge_intr_msix(0, &rx
->rspq
);
3113 t4_intr_handler(adap
)(0, adap
);
3117 static const struct net_device_ops cxgb4_netdev_ops
= {
3118 .ndo_open
= cxgb_open
,
3119 .ndo_stop
= cxgb_close
,
3120 .ndo_start_xmit
= t4_eth_xmit
,
3121 .ndo_select_queue
= cxgb_select_queue
,
3122 .ndo_get_stats64
= cxgb_get_stats
,
3123 .ndo_set_rx_mode
= cxgb_set_rxmode
,
3124 .ndo_set_mac_address
= cxgb_set_mac_addr
,
3125 .ndo_set_features
= cxgb_set_features
,
3126 .ndo_validate_addr
= eth_validate_addr
,
3127 .ndo_do_ioctl
= cxgb_ioctl
,
3128 .ndo_change_mtu
= cxgb_change_mtu
,
3129 #ifdef CONFIG_NET_POLL_CONTROLLER
3130 .ndo_poll_controller
= cxgb_netpoll
,
3132 #ifdef CONFIG_CHELSIO_T4_FCOE
3133 .ndo_fcoe_enable
= cxgb_fcoe_enable
,
3134 .ndo_fcoe_disable
= cxgb_fcoe_disable
,
3135 #endif /* CONFIG_CHELSIO_T4_FCOE */
3136 #ifdef CONFIG_NET_RX_BUSY_POLL
3137 .ndo_busy_poll
= cxgb_busy_poll
,
3142 void t4_fatal_err(struct adapter
*adap
)
3144 t4_set_reg_field(adap
, SGE_CONTROL_A
, GLOBALENABLE_F
, 0);
3145 t4_intr_disable(adap
);
3146 dev_alert(adap
->pdev_dev
, "encountered fatal error, adapter stopped\n");
3149 static void setup_memwin(struct adapter
*adap
)
3151 u32 nic_win_base
= t4_get_util_window(adap
);
3153 t4_setup_memwin(adap
, nic_win_base
, MEMWIN_NIC
);
3156 static void setup_memwin_rdma(struct adapter
*adap
)
3158 if (adap
->vres
.ocq
.size
) {
3162 start
= t4_read_pcie_cfg4(adap
, PCI_BASE_ADDRESS_2
);
3163 start
&= PCI_BASE_ADDRESS_MEM_MASK
;
3164 start
+= OCQ_WIN_OFFSET(adap
->pdev
, &adap
->vres
);
3165 sz_kb
= roundup_pow_of_two(adap
->vres
.ocq
.size
) >> 10;
3167 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN_A
, 3),
3168 start
| BIR_V(1) | WINDOW_V(ilog2(sz_kb
)));
3170 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET_A
, 3),
3171 adap
->vres
.ocq
.start
);
3173 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET_A
, 3));
3177 static int adap_init1(struct adapter
*adap
, struct fw_caps_config_cmd
*c
)
3182 /* get device capabilities */
3183 memset(c
, 0, sizeof(*c
));
3184 c
->op_to_write
= htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD
) |
3185 FW_CMD_REQUEST_F
| FW_CMD_READ_F
);
3186 c
->cfvalid_to_len16
= htonl(FW_LEN16(*c
));
3187 ret
= t4_wr_mbox(adap
, adap
->mbox
, c
, sizeof(*c
), c
);
3191 c
->op_to_write
= htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD
) |
3192 FW_CMD_REQUEST_F
| FW_CMD_WRITE_F
);
3193 ret
= t4_wr_mbox(adap
, adap
->mbox
, c
, sizeof(*c
), NULL
);
3197 ret
= t4_config_glbl_rss(adap
, adap
->pf
,
3198 FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL
,
3199 FW_RSS_GLB_CONFIG_CMD_TNLMAPEN_F
|
3200 FW_RSS_GLB_CONFIG_CMD_TNLALLLKP_F
);
3204 ret
= t4_cfg_pfvf(adap
, adap
->mbox
, adap
->pf
, 0, adap
->sge
.egr_sz
, 64,
3205 MAX_INGQ
, 0, 0, 4, 0xf, 0xf, 16, FW_CMD_CAP_PF
,
3212 /* tweak some settings */
3213 t4_write_reg(adap
, TP_SHIFT_CNT_A
, 0x64f8849);
3214 t4_write_reg(adap
, ULP_RX_TDDP_PSZ_A
, HPZ0_V(PAGE_SHIFT
- 12));
3215 t4_write_reg(adap
, TP_PIO_ADDR_A
, TP_INGRESS_CONFIG_A
);
3216 v
= t4_read_reg(adap
, TP_PIO_DATA_A
);
3217 t4_write_reg(adap
, TP_PIO_DATA_A
, v
& ~CSUM_HAS_PSEUDO_HDR_F
);
3219 /* first 4 Tx modulation queues point to consecutive Tx channels */
3220 adap
->params
.tp
.tx_modq_map
= 0xE4;
3221 t4_write_reg(adap
, TP_TX_MOD_QUEUE_REQ_MAP_A
,
3222 TX_MOD_QUEUE_REQ_MAP_V(adap
->params
.tp
.tx_modq_map
));
3224 /* associate each Tx modulation queue with consecutive Tx channels */
3226 t4_write_indirect(adap
, TP_PIO_ADDR_A
, TP_PIO_DATA_A
,
3227 &v
, 1, TP_TX_SCHED_HDR_A
);
3228 t4_write_indirect(adap
, TP_PIO_ADDR_A
, TP_PIO_DATA_A
,
3229 &v
, 1, TP_TX_SCHED_FIFO_A
);
3230 t4_write_indirect(adap
, TP_PIO_ADDR_A
, TP_PIO_DATA_A
,
3231 &v
, 1, TP_TX_SCHED_PCMD_A
);
3233 #define T4_TX_MODQ_10G_WEIGHT_DEFAULT 16 /* in KB units */
3234 if (is_offload(adap
)) {
3235 t4_write_reg(adap
, TP_TX_MOD_QUEUE_WEIGHT0_A
,
3236 TX_MODQ_WEIGHT0_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT
) |
3237 TX_MODQ_WEIGHT1_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT
) |
3238 TX_MODQ_WEIGHT2_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT
) |
3239 TX_MODQ_WEIGHT3_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT
));
3240 t4_write_reg(adap
, TP_TX_MOD_CHANNEL_WEIGHT_A
,
3241 TX_MODQ_WEIGHT0_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT
) |
3242 TX_MODQ_WEIGHT1_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT
) |
3243 TX_MODQ_WEIGHT2_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT
) |
3244 TX_MODQ_WEIGHT3_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT
));
3247 /* get basic stuff going */
3248 return t4_early_init(adap
, adap
->pf
);
3252 * Max # of ATIDs. The absolute HW max is 16K but we keep it lower.
3254 #define MAX_ATIDS 8192U
3257 * Phase 0 of initialization: contact FW, obtain config, perform basic init.
3259 * If the firmware we're dealing with has Configuration File support, then
3260 * we use that to perform all configuration
3264 * Tweak configuration based on module parameters, etc. Most of these have
3265 * defaults assigned to them by Firmware Configuration Files (if we're using
3266 * them) but need to be explicitly set if we're using hard-coded
3267 * initialization. But even in the case of using Firmware Configuration
3268 * Files, we'd like to expose the ability to change these via module
3269 * parameters so these are essentially common tweaks/settings for
3270 * Configuration Files and hard-coded initialization ...
3272 static int adap_init0_tweaks(struct adapter
*adapter
)
3275 * Fix up various Host-Dependent Parameters like Page Size, Cache
3276 * Line Size, etc. The firmware default is for a 4KB Page Size and
3277 * 64B Cache Line Size ...
3279 t4_fixup_host_params(adapter
, PAGE_SIZE
, L1_CACHE_BYTES
);
3282 * Process module parameters which affect early initialization.
3284 if (rx_dma_offset
!= 2 && rx_dma_offset
!= 0) {
3285 dev_err(&adapter
->pdev
->dev
,
3286 "Ignoring illegal rx_dma_offset=%d, using 2\n",
3290 t4_set_reg_field(adapter
, SGE_CONTROL_A
,
3291 PKTSHIFT_V(PKTSHIFT_M
),
3292 PKTSHIFT_V(rx_dma_offset
));
3295 * Don't include the "IP Pseudo Header" in CPL_RX_PKT checksums: Linux
3296 * adds the pseudo header itself.
3298 t4_tp_wr_bits_indirect(adapter
, TP_INGRESS_CONFIG_A
,
3299 CSUM_HAS_PSEUDO_HDR_F
, 0);
3304 /* 10Gb/s-BT PHY Support. chip-external 10Gb/s-BT PHYs are complex chips
3305 * unto themselves and they contain their own firmware to perform their
3308 static int phy_aq1202_version(const u8
*phy_fw_data
,
3313 /* At offset 0x8 you're looking for the primary image's
3314 * starting offset which is 3 Bytes wide
3316 * At offset 0xa of the primary image, you look for the offset
3317 * of the DRAM segment which is 3 Bytes wide.
3319 * The FW version is at offset 0x27e of the DRAM and is 2 Bytes
3322 #define be16(__p) (((__p)[0] << 8) | (__p)[1])
3323 #define le16(__p) ((__p)[0] | ((__p)[1] << 8))
3324 #define le24(__p) (le16(__p) | ((__p)[2] << 16))
3326 offset
= le24(phy_fw_data
+ 0x8) << 12;
3327 offset
= le24(phy_fw_data
+ offset
+ 0xa);
3328 return be16(phy_fw_data
+ offset
+ 0x27e);
3335 static struct info_10gbt_phy_fw
{
3336 unsigned int phy_fw_id
; /* PCI Device ID */
3337 char *phy_fw_file
; /* /lib/firmware/ PHY Firmware file */
3338 int (*phy_fw_version
)(const u8
*phy_fw_data
, size_t phy_fw_size
);
3339 int phy_flash
; /* Has FLASH for PHY Firmware */
3340 } phy_info_array
[] = {
3342 PHY_AQ1202_DEVICEID
,
3343 PHY_AQ1202_FIRMWARE
,
3348 PHY_BCM84834_DEVICEID
,
3349 PHY_BCM84834_FIRMWARE
,
3356 static struct info_10gbt_phy_fw
*find_phy_info(int devid
)
3360 for (i
= 0; i
< ARRAY_SIZE(phy_info_array
); i
++) {
3361 if (phy_info_array
[i
].phy_fw_id
== devid
)
3362 return &phy_info_array
[i
];
3367 /* Handle updating of chip-external 10Gb/s-BT PHY firmware. This needs to
3368 * happen after the FW_RESET_CMD but before the FW_INITIALIZE_CMD. On error
3369 * we return a negative error number. If we transfer new firmware we return 1
3370 * (from t4_load_phy_fw()). If we don't do anything we return 0.
3372 static int adap_init0_phy(struct adapter
*adap
)
3374 const struct firmware
*phyf
;
3376 struct info_10gbt_phy_fw
*phy_info
;
3378 /* Use the device ID to determine which PHY file to flash.
3380 phy_info
= find_phy_info(adap
->pdev
->device
);
3382 dev_warn(adap
->pdev_dev
,
3383 "No PHY Firmware file found for this PHY\n");
3387 /* If we have a T4 PHY firmware file under /lib/firmware/cxgb4/, then
3388 * use that. The adapter firmware provides us with a memory buffer
3389 * where we can load a PHY firmware file from the host if we want to
3390 * override the PHY firmware File in flash.
3392 ret
= request_firmware_direct(&phyf
, phy_info
->phy_fw_file
,
3395 /* For adapters without FLASH attached to PHY for their
3396 * firmware, it's obviously a fatal error if we can't get the
3397 * firmware to the adapter. For adapters with PHY firmware
3398 * FLASH storage, it's worth a warning if we can't find the
3399 * PHY Firmware but we'll neuter the error ...
3401 dev_err(adap
->pdev_dev
, "unable to find PHY Firmware image "
3402 "/lib/firmware/%s, error %d\n",
3403 phy_info
->phy_fw_file
, -ret
);
3404 if (phy_info
->phy_flash
) {
3405 int cur_phy_fw_ver
= 0;
3407 t4_phy_fw_ver(adap
, &cur_phy_fw_ver
);
3408 dev_warn(adap
->pdev_dev
, "continuing with, on-adapter "
3409 "FLASH copy, version %#x\n", cur_phy_fw_ver
);
3416 /* Load PHY Firmware onto adapter.
3418 ret
= t4_load_phy_fw(adap
, MEMWIN_NIC
, &adap
->win0_lock
,
3419 phy_info
->phy_fw_version
,
3420 (u8
*)phyf
->data
, phyf
->size
);
3422 dev_err(adap
->pdev_dev
, "PHY Firmware transfer error %d\n",
3425 int new_phy_fw_ver
= 0;
3427 if (phy_info
->phy_fw_version
)
3428 new_phy_fw_ver
= phy_info
->phy_fw_version(phyf
->data
,
3430 dev_info(adap
->pdev_dev
, "Successfully transferred PHY "
3431 "Firmware /lib/firmware/%s, version %#x\n",
3432 phy_info
->phy_fw_file
, new_phy_fw_ver
);
3435 release_firmware(phyf
);
3441 * Attempt to initialize the adapter via a Firmware Configuration File.
3443 static int adap_init0_config(struct adapter
*adapter
, int reset
)
3445 struct fw_caps_config_cmd caps_cmd
;
3446 const struct firmware
*cf
;
3447 unsigned long mtype
= 0, maddr
= 0;
3448 u32 finiver
, finicsum
, cfcsum
;
3450 int config_issued
= 0;
3451 char *fw_config_file
, fw_config_file_path
[256];
3452 char *config_name
= NULL
;
3455 * Reset device if necessary.
3458 ret
= t4_fw_reset(adapter
, adapter
->mbox
,
3459 PIORSTMODE_F
| PIORST_F
);
3464 /* If this is a 10Gb/s-BT adapter make sure the chip-external
3465 * 10Gb/s-BT PHYs have up-to-date firmware. Note that this step needs
3466 * to be performed after any global adapter RESET above since some
3467 * PHYs only have local RAM copies of the PHY firmware.
3469 if (is_10gbt_device(adapter
->pdev
->device
)) {
3470 ret
= adap_init0_phy(adapter
);
3475 * If we have a T4 configuration file under /lib/firmware/cxgb4/,
3476 * then use that. Otherwise, use the configuration file stored
3477 * in the adapter flash ...
3479 switch (CHELSIO_CHIP_VERSION(adapter
->params
.chip
)) {
3481 fw_config_file
= FW4_CFNAME
;
3484 fw_config_file
= FW5_CFNAME
;
3487 fw_config_file
= FW6_CFNAME
;
3490 dev_err(adapter
->pdev_dev
, "Device %d is not supported\n",
3491 adapter
->pdev
->device
);
3496 ret
= request_firmware(&cf
, fw_config_file
, adapter
->pdev_dev
);
3498 config_name
= "On FLASH";
3499 mtype
= FW_MEMTYPE_CF_FLASH
;
3500 maddr
= t4_flash_cfg_addr(adapter
);
3502 u32 params
[7], val
[7];
3504 sprintf(fw_config_file_path
,
3505 "/lib/firmware/%s", fw_config_file
);
3506 config_name
= fw_config_file_path
;
3508 if (cf
->size
>= FLASH_CFG_MAX_SIZE
)
3511 params
[0] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV
) |
3512 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_CF
));
3513 ret
= t4_query_params(adapter
, adapter
->mbox
,
3514 adapter
->pf
, 0, 1, params
, val
);
3517 * For t4_memory_rw() below addresses and
3518 * sizes have to be in terms of multiples of 4
3519 * bytes. So, if the Configuration File isn't
3520 * a multiple of 4 bytes in length we'll have
3521 * to write that out separately since we can't
3522 * guarantee that the bytes following the
3523 * residual byte in the buffer returned by
3524 * request_firmware() are zeroed out ...
3526 size_t resid
= cf
->size
& 0x3;
3527 size_t size
= cf
->size
& ~0x3;
3528 __be32
*data
= (__be32
*)cf
->data
;
3530 mtype
= FW_PARAMS_PARAM_Y_G(val
[0]);
3531 maddr
= FW_PARAMS_PARAM_Z_G(val
[0]) << 16;
3533 spin_lock(&adapter
->win0_lock
);
3534 ret
= t4_memory_rw(adapter
, 0, mtype
, maddr
,
3535 size
, data
, T4_MEMORY_WRITE
);
3536 if (ret
== 0 && resid
!= 0) {
3543 last
.word
= data
[size
>> 2];
3544 for (i
= resid
; i
< 4; i
++)
3546 ret
= t4_memory_rw(adapter
, 0, mtype
,
3551 spin_unlock(&adapter
->win0_lock
);
3555 release_firmware(cf
);
3561 * Issue a Capability Configuration command to the firmware to get it
3562 * to parse the Configuration File. We don't use t4_fw_config_file()
3563 * because we want the ability to modify various features after we've
3564 * processed the configuration file ...
3566 memset(&caps_cmd
, 0, sizeof(caps_cmd
));
3567 caps_cmd
.op_to_write
=
3568 htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD
) |
3571 caps_cmd
.cfvalid_to_len16
=
3572 htonl(FW_CAPS_CONFIG_CMD_CFVALID_F
|
3573 FW_CAPS_CONFIG_CMD_MEMTYPE_CF_V(mtype
) |
3574 FW_CAPS_CONFIG_CMD_MEMADDR64K_CF_V(maddr
>> 16) |
3575 FW_LEN16(caps_cmd
));
3576 ret
= t4_wr_mbox(adapter
, adapter
->mbox
, &caps_cmd
, sizeof(caps_cmd
),
3579 /* If the CAPS_CONFIG failed with an ENOENT (for a Firmware
3580 * Configuration File in FLASH), our last gasp effort is to use the
3581 * Firmware Configuration File which is embedded in the firmware. A
3582 * very few early versions of the firmware didn't have one embedded
3583 * but we can ignore those.
3585 if (ret
== -ENOENT
) {
3586 memset(&caps_cmd
, 0, sizeof(caps_cmd
));
3587 caps_cmd
.op_to_write
=
3588 htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD
) |
3591 caps_cmd
.cfvalid_to_len16
= htonl(FW_LEN16(caps_cmd
));
3592 ret
= t4_wr_mbox(adapter
, adapter
->mbox
, &caps_cmd
,
3593 sizeof(caps_cmd
), &caps_cmd
);
3594 config_name
= "Firmware Default";
3601 finiver
= ntohl(caps_cmd
.finiver
);
3602 finicsum
= ntohl(caps_cmd
.finicsum
);
3603 cfcsum
= ntohl(caps_cmd
.cfcsum
);
3604 if (finicsum
!= cfcsum
)
3605 dev_warn(adapter
->pdev_dev
, "Configuration File checksum "\
3606 "mismatch: [fini] csum=%#x, computed csum=%#x\n",
3610 * And now tell the firmware to use the configuration we just loaded.
3612 caps_cmd
.op_to_write
=
3613 htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD
) |
3616 caps_cmd
.cfvalid_to_len16
= htonl(FW_LEN16(caps_cmd
));
3617 ret
= t4_wr_mbox(adapter
, adapter
->mbox
, &caps_cmd
, sizeof(caps_cmd
),
3623 * Tweak configuration based on system architecture, module
3626 ret
= adap_init0_tweaks(adapter
);
3631 * And finally tell the firmware to initialize itself using the
3632 * parameters from the Configuration File.
3634 ret
= t4_fw_initialize(adapter
, adapter
->mbox
);
3638 /* Emit Firmware Configuration File information and return
3641 dev_info(adapter
->pdev_dev
, "Successfully configured using Firmware "\
3642 "Configuration File \"%s\", version %#x, computed checksum %#x\n",
3643 config_name
, finiver
, cfcsum
);
3647 * Something bad happened. Return the error ... (If the "error"
3648 * is that there's no Configuration File on the adapter we don't
3649 * want to issue a warning since this is fairly common.)
3652 if (config_issued
&& ret
!= -ENOENT
)
3653 dev_warn(adapter
->pdev_dev
, "\"%s\" configuration file error %d\n",
3658 static struct fw_info fw_info_array
[] = {
3661 .fs_name
= FW4_CFNAME
,
3662 .fw_mod_name
= FW4_FNAME
,
3664 .chip
= FW_HDR_CHIP_T4
,
3665 .fw_ver
= __cpu_to_be32(FW_VERSION(T4
)),
3666 .intfver_nic
= FW_INTFVER(T4
, NIC
),
3667 .intfver_vnic
= FW_INTFVER(T4
, VNIC
),
3668 .intfver_ri
= FW_INTFVER(T4
, RI
),
3669 .intfver_iscsi
= FW_INTFVER(T4
, ISCSI
),
3670 .intfver_fcoe
= FW_INTFVER(T4
, FCOE
),
3674 .fs_name
= FW5_CFNAME
,
3675 .fw_mod_name
= FW5_FNAME
,
3677 .chip
= FW_HDR_CHIP_T5
,
3678 .fw_ver
= __cpu_to_be32(FW_VERSION(T5
)),
3679 .intfver_nic
= FW_INTFVER(T5
, NIC
),
3680 .intfver_vnic
= FW_INTFVER(T5
, VNIC
),
3681 .intfver_ri
= FW_INTFVER(T5
, RI
),
3682 .intfver_iscsi
= FW_INTFVER(T5
, ISCSI
),
3683 .intfver_fcoe
= FW_INTFVER(T5
, FCOE
),
3687 .fs_name
= FW6_CFNAME
,
3688 .fw_mod_name
= FW6_FNAME
,
3690 .chip
= FW_HDR_CHIP_T6
,
3691 .fw_ver
= __cpu_to_be32(FW_VERSION(T6
)),
3692 .intfver_nic
= FW_INTFVER(T6
, NIC
),
3693 .intfver_vnic
= FW_INTFVER(T6
, VNIC
),
3694 .intfver_ofld
= FW_INTFVER(T6
, OFLD
),
3695 .intfver_ri
= FW_INTFVER(T6
, RI
),
3696 .intfver_iscsipdu
= FW_INTFVER(T6
, ISCSIPDU
),
3697 .intfver_iscsi
= FW_INTFVER(T6
, ISCSI
),
3698 .intfver_fcoepdu
= FW_INTFVER(T6
, FCOEPDU
),
3699 .intfver_fcoe
= FW_INTFVER(T6
, FCOE
),
3705 static struct fw_info
*find_fw_info(int chip
)
3709 for (i
= 0; i
< ARRAY_SIZE(fw_info_array
); i
++) {
3710 if (fw_info_array
[i
].chip
== chip
)
3711 return &fw_info_array
[i
];
3717 * Phase 0 of initialization: contact FW, obtain config, perform basic init.
3719 static int adap_init0(struct adapter
*adap
)
3723 enum dev_state state
;
3724 u32 params
[7], val
[7];
3725 struct fw_caps_config_cmd caps_cmd
;
3728 /* Grab Firmware Device Log parameters as early as possible so we have
3729 * access to it for debugging, etc.
3731 ret
= t4_init_devlog_params(adap
);
3735 /* Contact FW, advertising Master capability */
3736 ret
= t4_fw_hello(adap
, adap
->mbox
, adap
->mbox
,
3737 is_kdump_kernel() ? MASTER_MUST
: MASTER_MAY
, &state
);
3739 dev_err(adap
->pdev_dev
, "could not connect to FW, error %d\n",
3743 if (ret
== adap
->mbox
)
3744 adap
->flags
|= MASTER_PF
;
3747 * If we're the Master PF Driver and the device is uninitialized,
3748 * then let's consider upgrading the firmware ... (We always want
3749 * to check the firmware version number in order to A. get it for
3750 * later reporting and B. to warn if the currently loaded firmware
3751 * is excessively mismatched relative to the driver.)
3753 t4_get_fw_version(adap
, &adap
->params
.fw_vers
);
3754 t4_get_bs_version(adap
, &adap
->params
.bs_vers
);
3755 t4_get_tp_version(adap
, &adap
->params
.tp_vers
);
3756 t4_get_exprom_version(adap
, &adap
->params
.er_vers
);
3758 ret
= t4_check_fw_version(adap
);
3759 /* If firmware is too old (not supported by driver) force an update. */
3761 state
= DEV_STATE_UNINIT
;
3762 if ((adap
->flags
& MASTER_PF
) && state
!= DEV_STATE_INIT
) {
3763 struct fw_info
*fw_info
;
3764 struct fw_hdr
*card_fw
;
3765 const struct firmware
*fw
;
3766 const u8
*fw_data
= NULL
;
3767 unsigned int fw_size
= 0;
3769 /* This is the firmware whose headers the driver was compiled
3772 fw_info
= find_fw_info(CHELSIO_CHIP_VERSION(adap
->params
.chip
));
3773 if (fw_info
== NULL
) {
3774 dev_err(adap
->pdev_dev
,
3775 "unable to get firmware info for chip %d.\n",
3776 CHELSIO_CHIP_VERSION(adap
->params
.chip
));
3780 /* allocate memory to read the header of the firmware on the
3783 card_fw
= t4_alloc_mem(sizeof(*card_fw
));
3785 /* Get FW from from /lib/firmware/ */
3786 ret
= request_firmware(&fw
, fw_info
->fw_mod_name
,
3789 dev_err(adap
->pdev_dev
,
3790 "unable to load firmware image %s, error %d\n",
3791 fw_info
->fw_mod_name
, ret
);
3797 /* upgrade FW logic */
3798 ret
= t4_prep_fw(adap
, fw_info
, fw_data
, fw_size
, card_fw
,
3802 release_firmware(fw
);
3803 t4_free_mem(card_fw
);
3810 * Grab VPD parameters. This should be done after we establish a
3811 * connection to the firmware since some of the VPD parameters
3812 * (notably the Core Clock frequency) are retrieved via requests to
3813 * the firmware. On the other hand, we need these fairly early on
3814 * so we do this right after getting ahold of the firmware.
3816 ret
= t4_get_vpd_params(adap
, &adap
->params
.vpd
);
3821 * Find out what ports are available to us. Note that we need to do
3822 * this before calling adap_init0_no_config() since it needs nports
3826 FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV
) |
3827 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_PORTVEC
);
3828 ret
= t4_query_params(adap
, adap
->mbox
, adap
->pf
, 0, 1, &v
, &port_vec
);
3832 adap
->params
.nports
= hweight32(port_vec
);
3833 adap
->params
.portvec
= port_vec
;
3835 /* If the firmware is initialized already, emit a simply note to that
3836 * effect. Otherwise, it's time to try initializing the adapter.
3838 if (state
== DEV_STATE_INIT
) {
3839 dev_info(adap
->pdev_dev
, "Coming up as %s: "\
3840 "Adapter already initialized\n",
3841 adap
->flags
& MASTER_PF
? "MASTER" : "SLAVE");
3843 dev_info(adap
->pdev_dev
, "Coming up as MASTER: "\
3844 "Initializing adapter\n");
3846 /* Find out whether we're dealing with a version of the
3847 * firmware which has configuration file support.
3849 params
[0] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV
) |
3850 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_CF
));
3851 ret
= t4_query_params(adap
, adap
->mbox
, adap
->pf
, 0, 1,
3854 /* If the firmware doesn't support Configuration Files,
3858 dev_err(adap
->pdev_dev
, "firmware doesn't support "
3859 "Firmware Configuration Files\n");
3863 /* The firmware provides us with a memory buffer where we can
3864 * load a Configuration File from the host if we want to
3865 * override the Configuration File in flash.
3867 ret
= adap_init0_config(adap
, reset
);
3868 if (ret
== -ENOENT
) {
3869 dev_err(adap
->pdev_dev
, "no Configuration File "
3870 "present on adapter.\n");
3874 dev_err(adap
->pdev_dev
, "could not initialize "
3875 "adapter, error %d\n", -ret
);
3880 /* Give the SGE code a chance to pull in anything that it needs ...
3881 * Note that this must be called after we retrieve our VPD parameters
3882 * in order to know how to convert core ticks to seconds, etc.
3884 ret
= t4_sge_init(adap
);
3888 if (is_bypass_device(adap
->pdev
->device
))
3889 adap
->params
.bypass
= 1;
3892 * Grab some of our basic fundamental operating parameters.
3894 #define FW_PARAM_DEV(param) \
3895 (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) | \
3896 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_##param))
3898 #define FW_PARAM_PFVF(param) \
3899 FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_PFVF) | \
3900 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_PFVF_##param)| \
3901 FW_PARAMS_PARAM_Y_V(0) | \
3902 FW_PARAMS_PARAM_Z_V(0)
3904 params
[0] = FW_PARAM_PFVF(EQ_START
);
3905 params
[1] = FW_PARAM_PFVF(L2T_START
);
3906 params
[2] = FW_PARAM_PFVF(L2T_END
);
3907 params
[3] = FW_PARAM_PFVF(FILTER_START
);
3908 params
[4] = FW_PARAM_PFVF(FILTER_END
);
3909 params
[5] = FW_PARAM_PFVF(IQFLINT_START
);
3910 ret
= t4_query_params(adap
, adap
->mbox
, adap
->pf
, 0, 6, params
, val
);
3913 adap
->sge
.egr_start
= val
[0];
3914 adap
->l2t_start
= val
[1];
3915 adap
->l2t_end
= val
[2];
3916 adap
->tids
.ftid_base
= val
[3];
3917 adap
->tids
.nftids
= val
[4] - val
[3] + 1;
3918 adap
->sge
.ingr_start
= val
[5];
3920 /* qids (ingress/egress) returned from firmware can be anywhere
3921 * in the range from EQ(IQFLINT)_START to EQ(IQFLINT)_END.
3922 * Hence driver needs to allocate memory for this range to
3923 * store the queue info. Get the highest IQFLINT/EQ index returned
3924 * in FW_EQ_*_CMD.alloc command.
3926 params
[0] = FW_PARAM_PFVF(EQ_END
);
3927 params
[1] = FW_PARAM_PFVF(IQFLINT_END
);
3928 ret
= t4_query_params(adap
, adap
->mbox
, adap
->pf
, 0, 2, params
, val
);
3931 adap
->sge
.egr_sz
= val
[0] - adap
->sge
.egr_start
+ 1;
3932 adap
->sge
.ingr_sz
= val
[1] - adap
->sge
.ingr_start
+ 1;
3934 adap
->sge
.egr_map
= kcalloc(adap
->sge
.egr_sz
,
3935 sizeof(*adap
->sge
.egr_map
), GFP_KERNEL
);
3936 if (!adap
->sge
.egr_map
) {
3941 adap
->sge
.ingr_map
= kcalloc(adap
->sge
.ingr_sz
,
3942 sizeof(*adap
->sge
.ingr_map
), GFP_KERNEL
);
3943 if (!adap
->sge
.ingr_map
) {
3948 /* Allocate the memory for the vaious egress queue bitmaps
3949 * ie starving_fl, txq_maperr and blocked_fl.
3951 adap
->sge
.starving_fl
= kcalloc(BITS_TO_LONGS(adap
->sge
.egr_sz
),
3952 sizeof(long), GFP_KERNEL
);
3953 if (!adap
->sge
.starving_fl
) {
3958 adap
->sge
.txq_maperr
= kcalloc(BITS_TO_LONGS(adap
->sge
.egr_sz
),
3959 sizeof(long), GFP_KERNEL
);
3960 if (!adap
->sge
.txq_maperr
) {
3965 #ifdef CONFIG_DEBUG_FS
3966 adap
->sge
.blocked_fl
= kcalloc(BITS_TO_LONGS(adap
->sge
.egr_sz
),
3967 sizeof(long), GFP_KERNEL
);
3968 if (!adap
->sge
.blocked_fl
) {
3974 params
[0] = FW_PARAM_PFVF(CLIP_START
);
3975 params
[1] = FW_PARAM_PFVF(CLIP_END
);
3976 ret
= t4_query_params(adap
, adap
->mbox
, adap
->pf
, 0, 2, params
, val
);
3979 adap
->clipt_start
= val
[0];
3980 adap
->clipt_end
= val
[1];
3982 /* query params related to active filter region */
3983 params
[0] = FW_PARAM_PFVF(ACTIVE_FILTER_START
);
3984 params
[1] = FW_PARAM_PFVF(ACTIVE_FILTER_END
);
3985 ret
= t4_query_params(adap
, adap
->mbox
, adap
->pf
, 0, 2, params
, val
);
3986 /* If Active filter size is set we enable establishing
3987 * offload connection through firmware work request
3989 if ((val
[0] != val
[1]) && (ret
>= 0)) {
3990 adap
->flags
|= FW_OFLD_CONN
;
3991 adap
->tids
.aftid_base
= val
[0];
3992 adap
->tids
.aftid_end
= val
[1];
3995 /* If we're running on newer firmware, let it know that we're
3996 * prepared to deal with encapsulated CPL messages. Older
3997 * firmware won't understand this and we'll just get
3998 * unencapsulated messages ...
4000 params
[0] = FW_PARAM_PFVF(CPLFW4MSG_ENCAP
);
4002 (void)t4_set_params(adap
, adap
->mbox
, adap
->pf
, 0, 1, params
, val
);
4005 * Find out whether we're allowed to use the T5+ ULPTX MEMWRITE DSGL
4006 * capability. Earlier versions of the firmware didn't have the
4007 * ULPTX_MEMWRITE_DSGL so we'll interpret a query failure as no
4008 * permission to use ULPTX MEMWRITE DSGL.
4010 if (is_t4(adap
->params
.chip
)) {
4011 adap
->params
.ulptx_memwrite_dsgl
= false;
4013 params
[0] = FW_PARAM_DEV(ULPTX_MEMWRITE_DSGL
);
4014 ret
= t4_query_params(adap
, adap
->mbox
, adap
->pf
, 0,
4016 adap
->params
.ulptx_memwrite_dsgl
= (ret
== 0 && val
[0] != 0);
4020 * Get device capabilities so we can determine what resources we need
4023 memset(&caps_cmd
, 0, sizeof(caps_cmd
));
4024 caps_cmd
.op_to_write
= htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD
) |
4025 FW_CMD_REQUEST_F
| FW_CMD_READ_F
);
4026 caps_cmd
.cfvalid_to_len16
= htonl(FW_LEN16(caps_cmd
));
4027 ret
= t4_wr_mbox(adap
, adap
->mbox
, &caps_cmd
, sizeof(caps_cmd
),
4032 if (caps_cmd
.ofldcaps
) {
4033 /* query offload-related parameters */
4034 params
[0] = FW_PARAM_DEV(NTID
);
4035 params
[1] = FW_PARAM_PFVF(SERVER_START
);
4036 params
[2] = FW_PARAM_PFVF(SERVER_END
);
4037 params
[3] = FW_PARAM_PFVF(TDDP_START
);
4038 params
[4] = FW_PARAM_PFVF(TDDP_END
);
4039 params
[5] = FW_PARAM_DEV(FLOWC_BUFFIFO_SZ
);
4040 ret
= t4_query_params(adap
, adap
->mbox
, adap
->pf
, 0, 6,
4044 adap
->tids
.ntids
= val
[0];
4045 adap
->tids
.natids
= min(adap
->tids
.ntids
/ 2, MAX_ATIDS
);
4046 adap
->tids
.stid_base
= val
[1];
4047 adap
->tids
.nstids
= val
[2] - val
[1] + 1;
4049 * Setup server filter region. Divide the available filter
4050 * region into two parts. Regular filters get 1/3rd and server
4051 * filters get 2/3rd part. This is only enabled if workarond
4053 * 1. For regular filters.
4054 * 2. Server filter: This are special filters which are used
4055 * to redirect SYN packets to offload queue.
4057 if (adap
->flags
& FW_OFLD_CONN
&& !is_bypass(adap
)) {
4058 adap
->tids
.sftid_base
= adap
->tids
.ftid_base
+
4059 DIV_ROUND_UP(adap
->tids
.nftids
, 3);
4060 adap
->tids
.nsftids
= adap
->tids
.nftids
-
4061 DIV_ROUND_UP(adap
->tids
.nftids
, 3);
4062 adap
->tids
.nftids
= adap
->tids
.sftid_base
-
4063 adap
->tids
.ftid_base
;
4065 adap
->vres
.ddp
.start
= val
[3];
4066 adap
->vres
.ddp
.size
= val
[4] - val
[3] + 1;
4067 adap
->params
.ofldq_wr_cred
= val
[5];
4069 adap
->params
.offload
= 1;
4071 if (caps_cmd
.rdmacaps
) {
4072 params
[0] = FW_PARAM_PFVF(STAG_START
);
4073 params
[1] = FW_PARAM_PFVF(STAG_END
);
4074 params
[2] = FW_PARAM_PFVF(RQ_START
);
4075 params
[3] = FW_PARAM_PFVF(RQ_END
);
4076 params
[4] = FW_PARAM_PFVF(PBL_START
);
4077 params
[5] = FW_PARAM_PFVF(PBL_END
);
4078 ret
= t4_query_params(adap
, adap
->mbox
, adap
->pf
, 0, 6,
4082 adap
->vres
.stag
.start
= val
[0];
4083 adap
->vres
.stag
.size
= val
[1] - val
[0] + 1;
4084 adap
->vres
.rq
.start
= val
[2];
4085 adap
->vres
.rq
.size
= val
[3] - val
[2] + 1;
4086 adap
->vres
.pbl
.start
= val
[4];
4087 adap
->vres
.pbl
.size
= val
[5] - val
[4] + 1;
4089 params
[0] = FW_PARAM_PFVF(SQRQ_START
);
4090 params
[1] = FW_PARAM_PFVF(SQRQ_END
);
4091 params
[2] = FW_PARAM_PFVF(CQ_START
);
4092 params
[3] = FW_PARAM_PFVF(CQ_END
);
4093 params
[4] = FW_PARAM_PFVF(OCQ_START
);
4094 params
[5] = FW_PARAM_PFVF(OCQ_END
);
4095 ret
= t4_query_params(adap
, adap
->mbox
, adap
->pf
, 0, 6, params
,
4099 adap
->vres
.qp
.start
= val
[0];
4100 adap
->vres
.qp
.size
= val
[1] - val
[0] + 1;
4101 adap
->vres
.cq
.start
= val
[2];
4102 adap
->vres
.cq
.size
= val
[3] - val
[2] + 1;
4103 adap
->vres
.ocq
.start
= val
[4];
4104 adap
->vres
.ocq
.size
= val
[5] - val
[4] + 1;
4106 params
[0] = FW_PARAM_DEV(MAXORDIRD_QP
);
4107 params
[1] = FW_PARAM_DEV(MAXIRD_ADAPTER
);
4108 ret
= t4_query_params(adap
, adap
->mbox
, adap
->pf
, 0, 2, params
,
4111 adap
->params
.max_ordird_qp
= 8;
4112 adap
->params
.max_ird_adapter
= 32 * adap
->tids
.ntids
;
4115 adap
->params
.max_ordird_qp
= val
[0];
4116 adap
->params
.max_ird_adapter
= val
[1];
4118 dev_info(adap
->pdev_dev
,
4119 "max_ordird_qp %d max_ird_adapter %d\n",
4120 adap
->params
.max_ordird_qp
,
4121 adap
->params
.max_ird_adapter
);
4123 if (caps_cmd
.iscsicaps
) {
4124 params
[0] = FW_PARAM_PFVF(ISCSI_START
);
4125 params
[1] = FW_PARAM_PFVF(ISCSI_END
);
4126 ret
= t4_query_params(adap
, adap
->mbox
, adap
->pf
, 0, 2,
4130 adap
->vres
.iscsi
.start
= val
[0];
4131 adap
->vres
.iscsi
.size
= val
[1] - val
[0] + 1;
4133 #undef FW_PARAM_PFVF
4136 /* The MTU/MSS Table is initialized by now, so load their values. If
4137 * we're initializing the adapter, then we'll make any modifications
4138 * we want to the MTU/MSS Table and also initialize the congestion
4141 t4_read_mtu_tbl(adap
, adap
->params
.mtus
, NULL
);
4142 if (state
!= DEV_STATE_INIT
) {
4145 /* The default MTU Table contains values 1492 and 1500.
4146 * However, for TCP, it's better to have two values which are
4147 * a multiple of 8 +/- 4 bytes apart near this popular MTU.
4148 * This allows us to have a TCP Data Payload which is a
4149 * multiple of 8 regardless of what combination of TCP Options
4150 * are in use (always a multiple of 4 bytes) which is
4151 * important for performance reasons. For instance, if no
4152 * options are in use, then we have a 20-byte IP header and a
4153 * 20-byte TCP header. In this case, a 1500-byte MSS would
4154 * result in a TCP Data Payload of 1500 - 40 == 1460 bytes
4155 * which is not a multiple of 8. So using an MSS of 1488 in
4156 * this case results in a TCP Data Payload of 1448 bytes which
4157 * is a multiple of 8. On the other hand, if 12-byte TCP Time
4158 * Stamps have been negotiated, then an MTU of 1500 bytes
4159 * results in a TCP Data Payload of 1448 bytes which, as
4160 * above, is a multiple of 8 bytes ...
4162 for (i
= 0; i
< NMTUS
; i
++)
4163 if (adap
->params
.mtus
[i
] == 1492) {
4164 adap
->params
.mtus
[i
] = 1488;
4168 t4_load_mtus(adap
, adap
->params
.mtus
, adap
->params
.a_wnd
,
4169 adap
->params
.b_wnd
);
4171 t4_init_sge_params(adap
);
4172 adap
->flags
|= FW_OK
;
4173 t4_init_tp_params(adap
);
4177 * Something bad happened. If a command timed out or failed with EIO
4178 * FW does not operate within its spec or something catastrophic
4179 * happened to HW/FW, stop issuing commands.
4182 kfree(adap
->sge
.egr_map
);
4183 kfree(adap
->sge
.ingr_map
);
4184 kfree(adap
->sge
.starving_fl
);
4185 kfree(adap
->sge
.txq_maperr
);
4186 #ifdef CONFIG_DEBUG_FS
4187 kfree(adap
->sge
.blocked_fl
);
4189 if (ret
!= -ETIMEDOUT
&& ret
!= -EIO
)
4190 t4_fw_bye(adap
, adap
->mbox
);
4196 static pci_ers_result_t
eeh_err_detected(struct pci_dev
*pdev
,
4197 pci_channel_state_t state
)
4200 struct adapter
*adap
= pci_get_drvdata(pdev
);
4206 adap
->flags
&= ~FW_OK
;
4207 notify_ulds(adap
, CXGB4_STATE_START_RECOVERY
);
4208 spin_lock(&adap
->stats_lock
);
4209 for_each_port(adap
, i
) {
4210 struct net_device
*dev
= adap
->port
[i
];
4212 netif_device_detach(dev
);
4213 netif_carrier_off(dev
);
4215 spin_unlock(&adap
->stats_lock
);
4216 disable_interrupts(adap
);
4217 if (adap
->flags
& FULL_INIT_DONE
)
4220 if ((adap
->flags
& DEV_ENABLED
)) {
4221 pci_disable_device(pdev
);
4222 adap
->flags
&= ~DEV_ENABLED
;
4224 out
: return state
== pci_channel_io_perm_failure
?
4225 PCI_ERS_RESULT_DISCONNECT
: PCI_ERS_RESULT_NEED_RESET
;
4228 static pci_ers_result_t
eeh_slot_reset(struct pci_dev
*pdev
)
4231 struct fw_caps_config_cmd c
;
4232 struct adapter
*adap
= pci_get_drvdata(pdev
);
4235 pci_restore_state(pdev
);
4236 pci_save_state(pdev
);
4237 return PCI_ERS_RESULT_RECOVERED
;
4240 if (!(adap
->flags
& DEV_ENABLED
)) {
4241 if (pci_enable_device(pdev
)) {
4242 dev_err(&pdev
->dev
, "Cannot reenable PCI "
4243 "device after reset\n");
4244 return PCI_ERS_RESULT_DISCONNECT
;
4246 adap
->flags
|= DEV_ENABLED
;
4249 pci_set_master(pdev
);
4250 pci_restore_state(pdev
);
4251 pci_save_state(pdev
);
4252 pci_cleanup_aer_uncorrect_error_status(pdev
);
4254 if (t4_wait_dev_ready(adap
->regs
) < 0)
4255 return PCI_ERS_RESULT_DISCONNECT
;
4256 if (t4_fw_hello(adap
, adap
->mbox
, adap
->pf
, MASTER_MUST
, NULL
) < 0)
4257 return PCI_ERS_RESULT_DISCONNECT
;
4258 adap
->flags
|= FW_OK
;
4259 if (adap_init1(adap
, &c
))
4260 return PCI_ERS_RESULT_DISCONNECT
;
4262 for_each_port(adap
, i
) {
4263 struct port_info
*p
= adap2pinfo(adap
, i
);
4265 ret
= t4_alloc_vi(adap
, adap
->mbox
, p
->tx_chan
, adap
->pf
, 0, 1,
4268 return PCI_ERS_RESULT_DISCONNECT
;
4270 p
->xact_addr_filt
= -1;
4273 t4_load_mtus(adap
, adap
->params
.mtus
, adap
->params
.a_wnd
,
4274 adap
->params
.b_wnd
);
4277 return PCI_ERS_RESULT_DISCONNECT
;
4278 return PCI_ERS_RESULT_RECOVERED
;
4281 static void eeh_resume(struct pci_dev
*pdev
)
4284 struct adapter
*adap
= pci_get_drvdata(pdev
);
4290 for_each_port(adap
, i
) {
4291 struct net_device
*dev
= adap
->port
[i
];
4293 if (netif_running(dev
)) {
4295 cxgb_set_rxmode(dev
);
4297 netif_device_attach(dev
);
4302 static const struct pci_error_handlers cxgb4_eeh
= {
4303 .error_detected
= eeh_err_detected
,
4304 .slot_reset
= eeh_slot_reset
,
4305 .resume
= eeh_resume
,
4308 static inline bool is_x_10g_port(const struct link_config
*lc
)
4310 return (lc
->supported
& FW_PORT_CAP_SPEED_10G
) != 0 ||
4311 (lc
->supported
& FW_PORT_CAP_SPEED_40G
) != 0;
4314 static inline void init_rspq(struct adapter
*adap
, struct sge_rspq
*q
,
4315 unsigned int us
, unsigned int cnt
,
4316 unsigned int size
, unsigned int iqe_size
)
4319 cxgb4_set_rspq_intr_params(q
, us
, cnt
);
4320 q
->iqe_len
= iqe_size
;
4325 * Perform default configuration of DMA queues depending on the number and type
4326 * of ports we found and the number of available CPUs. Most settings can be
4327 * modified by the admin prior to actual use.
4329 static void cfg_queues(struct adapter
*adap
)
4331 struct sge
*s
= &adap
->sge
;
4332 int i
, n10g
= 0, qidx
= 0;
4333 #ifndef CONFIG_CHELSIO_T4_DCB
4338 /* Reduce memory usage in kdump environment, disable all offload.
4340 if (is_kdump_kernel())
4341 adap
->params
.offload
= 0;
4343 for_each_port(adap
, i
)
4344 n10g
+= is_x_10g_port(&adap2pinfo(adap
, i
)->link_cfg
);
4345 #ifdef CONFIG_CHELSIO_T4_DCB
4346 /* For Data Center Bridging support we need to be able to support up
4347 * to 8 Traffic Priorities; each of which will be assigned to its
4348 * own TX Queue in order to prevent Head-Of-Line Blocking.
4350 if (adap
->params
.nports
* 8 > MAX_ETH_QSETS
) {
4351 dev_err(adap
->pdev_dev
, "MAX_ETH_QSETS=%d < %d!\n",
4352 MAX_ETH_QSETS
, adap
->params
.nports
* 8);
4356 for_each_port(adap
, i
) {
4357 struct port_info
*pi
= adap2pinfo(adap
, i
);
4359 pi
->first_qset
= qidx
;
4363 #else /* !CONFIG_CHELSIO_T4_DCB */
4365 * We default to 1 queue per non-10G port and up to # of cores queues
4369 q10g
= (MAX_ETH_QSETS
- (adap
->params
.nports
- n10g
)) / n10g
;
4370 if (q10g
> netif_get_num_default_rss_queues())
4371 q10g
= netif_get_num_default_rss_queues();
4373 for_each_port(adap
, i
) {
4374 struct port_info
*pi
= adap2pinfo(adap
, i
);
4376 pi
->first_qset
= qidx
;
4377 pi
->nqsets
= is_x_10g_port(&pi
->link_cfg
) ? q10g
: 1;
4380 #endif /* !CONFIG_CHELSIO_T4_DCB */
4383 s
->max_ethqsets
= qidx
; /* MSI-X may lower it later */
4385 if (is_offload(adap
)) {
4387 * For offload we use 1 queue/channel if all ports are up to 1G,
4388 * otherwise we divide all available queues amongst the channels
4389 * capped by the number of available cores.
4392 i
= min_t(int, ARRAY_SIZE(s
->iscsirxq
),
4394 s
->iscsiqsets
= roundup(i
, adap
->params
.nports
);
4396 s
->iscsiqsets
= adap
->params
.nports
;
4397 /* For RDMA one Rx queue per channel suffices */
4398 s
->rdmaqs
= adap
->params
.nports
;
4399 /* Try and allow at least 1 CIQ per cpu rounding down
4400 * to the number of ports, with a minimum of 1 per port.
4401 * A 2 port card in a 6 cpu system: 6 CIQs, 3 / port.
4402 * A 4 port card in a 6 cpu system: 4 CIQs, 1 / port.
4403 * A 4 port card in a 2 cpu system: 4 CIQs, 1 / port.
4405 s
->rdmaciqs
= min_t(int, MAX_RDMA_CIQS
, num_online_cpus());
4406 s
->rdmaciqs
= (s
->rdmaciqs
/ adap
->params
.nports
) *
4407 adap
->params
.nports
;
4408 s
->rdmaciqs
= max_t(int, s
->rdmaciqs
, adap
->params
.nports
);
4410 if (!is_t4(adap
->params
.chip
))
4411 s
->niscsitq
= s
->iscsiqsets
;
4414 for (i
= 0; i
< ARRAY_SIZE(s
->ethrxq
); i
++) {
4415 struct sge_eth_rxq
*r
= &s
->ethrxq
[i
];
4417 init_rspq(adap
, &r
->rspq
, 5, 10, 1024, 64);
4421 for (i
= 0; i
< ARRAY_SIZE(s
->ethtxq
); i
++)
4422 s
->ethtxq
[i
].q
.size
= 1024;
4424 for (i
= 0; i
< ARRAY_SIZE(s
->ctrlq
); i
++)
4425 s
->ctrlq
[i
].q
.size
= 512;
4427 for (i
= 0; i
< ARRAY_SIZE(s
->ofldtxq
); i
++)
4428 s
->ofldtxq
[i
].q
.size
= 1024;
4430 for (i
= 0; i
< ARRAY_SIZE(s
->iscsirxq
); i
++) {
4431 struct sge_ofld_rxq
*r
= &s
->iscsirxq
[i
];
4433 init_rspq(adap
, &r
->rspq
, 5, 1, 1024, 64);
4434 r
->rspq
.uld
= CXGB4_ULD_ISCSI
;
4438 if (!is_t4(adap
->params
.chip
)) {
4439 for (i
= 0; i
< ARRAY_SIZE(s
->iscsitrxq
); i
++) {
4440 struct sge_ofld_rxq
*r
= &s
->iscsitrxq
[i
];
4442 init_rspq(adap
, &r
->rspq
, 5, 1, 1024, 64);
4443 r
->rspq
.uld
= CXGB4_ULD_ISCSIT
;
4448 for (i
= 0; i
< ARRAY_SIZE(s
->rdmarxq
); i
++) {
4449 struct sge_ofld_rxq
*r
= &s
->rdmarxq
[i
];
4451 init_rspq(adap
, &r
->rspq
, 5, 1, 511, 64);
4452 r
->rspq
.uld
= CXGB4_ULD_RDMA
;
4456 ciq_size
= 64 + adap
->vres
.cq
.size
+ adap
->tids
.nftids
;
4457 if (ciq_size
> SGE_MAX_IQ_SIZE
) {
4458 CH_WARN(adap
, "CIQ size too small for available IQs\n");
4459 ciq_size
= SGE_MAX_IQ_SIZE
;
4462 for (i
= 0; i
< ARRAY_SIZE(s
->rdmaciq
); i
++) {
4463 struct sge_ofld_rxq
*r
= &s
->rdmaciq
[i
];
4465 init_rspq(adap
, &r
->rspq
, 5, 1, ciq_size
, 64);
4466 r
->rspq
.uld
= CXGB4_ULD_RDMA
;
4469 init_rspq(adap
, &s
->fw_evtq
, 0, 1, 1024, 64);
4470 init_rspq(adap
, &s
->intrq
, 0, 1, 2 * MAX_INGQ
, 64);
4474 * Reduce the number of Ethernet queues across all ports to at most n.
4475 * n provides at least one queue per port.
4477 static void reduce_ethqs(struct adapter
*adap
, int n
)
4480 struct port_info
*pi
;
4482 while (n
< adap
->sge
.ethqsets
)
4483 for_each_port(adap
, i
) {
4484 pi
= adap2pinfo(adap
, i
);
4485 if (pi
->nqsets
> 1) {
4487 adap
->sge
.ethqsets
--;
4488 if (adap
->sge
.ethqsets
<= n
)
4494 for_each_port(adap
, i
) {
4495 pi
= adap2pinfo(adap
, i
);
4501 /* 2 MSI-X vectors needed for the FW queue and non-data interrupts */
4502 #define EXTRA_VECS 2
4504 static int enable_msix(struct adapter
*adap
)
4507 int i
, want
, need
, allocated
;
4508 struct sge
*s
= &adap
->sge
;
4509 unsigned int nchan
= adap
->params
.nports
;
4510 struct msix_entry
*entries
;
4512 entries
= kmalloc(sizeof(*entries
) * (MAX_INGQ
+ 1),
4517 for (i
= 0; i
< MAX_INGQ
+ 1; ++i
)
4518 entries
[i
].entry
= i
;
4520 want
= s
->max_ethqsets
+ EXTRA_VECS
;
4521 if (is_offload(adap
)) {
4522 want
+= s
->rdmaqs
+ s
->rdmaciqs
+ s
->iscsiqsets
+
4524 /* need nchan for each possible ULD */
4525 if (is_t4(adap
->params
.chip
))
4526 ofld_need
= 3 * nchan
;
4528 ofld_need
= 4 * nchan
;
4530 #ifdef CONFIG_CHELSIO_T4_DCB
4531 /* For Data Center Bridging we need 8 Ethernet TX Priority Queues for
4534 need
= 8 * adap
->params
.nports
+ EXTRA_VECS
+ ofld_need
;
4536 need
= adap
->params
.nports
+ EXTRA_VECS
+ ofld_need
;
4538 allocated
= pci_enable_msix_range(adap
->pdev
, entries
, need
, want
);
4539 if (allocated
< 0) {
4540 dev_info(adap
->pdev_dev
, "not enough MSI-X vectors left,"
4541 " not using MSI-X\n");
4546 /* Distribute available vectors to the various queue groups.
4547 * Every group gets its minimum requirement and NIC gets top
4548 * priority for leftovers.
4550 i
= allocated
- EXTRA_VECS
- ofld_need
;
4551 if (i
< s
->max_ethqsets
) {
4552 s
->max_ethqsets
= i
;
4553 if (i
< s
->ethqsets
)
4554 reduce_ethqs(adap
, i
);
4556 if (is_offload(adap
)) {
4557 if (allocated
< want
) {
4559 s
->rdmaciqs
= nchan
;
4561 if (!is_t4(adap
->params
.chip
))
4562 s
->niscsitq
= nchan
;
4565 /* leftovers go to OFLD */
4566 i
= allocated
- EXTRA_VECS
- s
->max_ethqsets
-
4567 s
->rdmaqs
- s
->rdmaciqs
- s
->niscsitq
;
4568 s
->iscsiqsets
= (i
/ nchan
) * nchan
; /* round down */
4571 for (i
= 0; i
< allocated
; ++i
)
4572 adap
->msix_info
[i
].vec
= entries
[i
].vector
;
4573 dev_info(adap
->pdev_dev
, "%d MSI-X vectors allocated, "
4574 "nic %d iscsi %d rdma cpl %d rdma ciq %d\n",
4575 allocated
, s
->max_ethqsets
, s
->iscsiqsets
, s
->rdmaqs
,
4584 static int init_rss(struct adapter
*adap
)
4589 err
= t4_init_rss_mode(adap
, adap
->mbox
);
4593 for_each_port(adap
, i
) {
4594 struct port_info
*pi
= adap2pinfo(adap
, i
);
4596 pi
->rss
= kcalloc(pi
->rss_size
, sizeof(u16
), GFP_KERNEL
);
4603 static int cxgb4_get_pcie_dev_link_caps(struct adapter
*adap
,
4604 enum pci_bus_speed
*speed
,
4605 enum pcie_link_width
*width
)
4607 u32 lnkcap1
, lnkcap2
;
4610 #define PCIE_MLW_CAP_SHIFT 4 /* start of MLW mask in link capabilities */
4612 *speed
= PCI_SPEED_UNKNOWN
;
4613 *width
= PCIE_LNK_WIDTH_UNKNOWN
;
4615 err1
= pcie_capability_read_dword(adap
->pdev
, PCI_EXP_LNKCAP
,
4617 err2
= pcie_capability_read_dword(adap
->pdev
, PCI_EXP_LNKCAP2
,
4619 if (!err2
&& lnkcap2
) { /* PCIe r3.0-compliant */
4620 if (lnkcap2
& PCI_EXP_LNKCAP2_SLS_8_0GB
)
4621 *speed
= PCIE_SPEED_8_0GT
;
4622 else if (lnkcap2
& PCI_EXP_LNKCAP2_SLS_5_0GB
)
4623 *speed
= PCIE_SPEED_5_0GT
;
4624 else if (lnkcap2
& PCI_EXP_LNKCAP2_SLS_2_5GB
)
4625 *speed
= PCIE_SPEED_2_5GT
;
4628 *width
= (lnkcap1
& PCI_EXP_LNKCAP_MLW
) >> PCIE_MLW_CAP_SHIFT
;
4629 if (!lnkcap2
) { /* pre-r3.0 */
4630 if (lnkcap1
& PCI_EXP_LNKCAP_SLS_5_0GB
)
4631 *speed
= PCIE_SPEED_5_0GT
;
4632 else if (lnkcap1
& PCI_EXP_LNKCAP_SLS_2_5GB
)
4633 *speed
= PCIE_SPEED_2_5GT
;
4637 if (*speed
== PCI_SPEED_UNKNOWN
|| *width
== PCIE_LNK_WIDTH_UNKNOWN
)
4638 return err1
? err1
: err2
? err2
: -EINVAL
;
4642 static void cxgb4_check_pcie_caps(struct adapter
*adap
)
4644 enum pcie_link_width width
, width_cap
;
4645 enum pci_bus_speed speed
, speed_cap
;
4647 #define PCIE_SPEED_STR(speed) \
4648 (speed == PCIE_SPEED_8_0GT ? "8.0GT/s" : \
4649 speed == PCIE_SPEED_5_0GT ? "5.0GT/s" : \
4650 speed == PCIE_SPEED_2_5GT ? "2.5GT/s" : \
4653 if (cxgb4_get_pcie_dev_link_caps(adap
, &speed_cap
, &width_cap
)) {
4654 dev_warn(adap
->pdev_dev
,
4655 "Unable to determine PCIe device BW capabilities\n");
4659 if (pcie_get_minimum_link(adap
->pdev
, &speed
, &width
) ||
4660 speed
== PCI_SPEED_UNKNOWN
|| width
== PCIE_LNK_WIDTH_UNKNOWN
) {
4661 dev_warn(adap
->pdev_dev
,
4662 "Unable to determine PCI Express bandwidth.\n");
4666 dev_info(adap
->pdev_dev
, "PCIe link speed is %s, device supports %s\n",
4667 PCIE_SPEED_STR(speed
), PCIE_SPEED_STR(speed_cap
));
4668 dev_info(adap
->pdev_dev
, "PCIe link width is x%d, device supports x%d\n",
4670 if (speed
< speed_cap
|| width
< width_cap
)
4671 dev_info(adap
->pdev_dev
,
4672 "A slot with more lanes and/or higher speed is "
4673 "suggested for optimal performance.\n");
4676 /* Dump basic information about the adapter */
4677 static void print_adapter_info(struct adapter
*adapter
)
4679 /* Device information */
4680 dev_info(adapter
->pdev_dev
, "Chelsio %s rev %d\n",
4681 adapter
->params
.vpd
.id
,
4682 CHELSIO_CHIP_RELEASE(adapter
->params
.chip
));
4683 dev_info(adapter
->pdev_dev
, "S/N: %s, P/N: %s\n",
4684 adapter
->params
.vpd
.sn
, adapter
->params
.vpd
.pn
);
4686 /* Firmware Version */
4687 if (!adapter
->params
.fw_vers
)
4688 dev_warn(adapter
->pdev_dev
, "No firmware loaded\n");
4690 dev_info(adapter
->pdev_dev
, "Firmware version: %u.%u.%u.%u\n",
4691 FW_HDR_FW_VER_MAJOR_G(adapter
->params
.fw_vers
),
4692 FW_HDR_FW_VER_MINOR_G(adapter
->params
.fw_vers
),
4693 FW_HDR_FW_VER_MICRO_G(adapter
->params
.fw_vers
),
4694 FW_HDR_FW_VER_BUILD_G(adapter
->params
.fw_vers
));
4696 /* Bootstrap Firmware Version. (Some adapters don't have Bootstrap
4697 * Firmware, so dev_info() is more appropriate here.)
4699 if (!adapter
->params
.bs_vers
)
4700 dev_info(adapter
->pdev_dev
, "No bootstrap loaded\n");
4702 dev_info(adapter
->pdev_dev
, "Bootstrap version: %u.%u.%u.%u\n",
4703 FW_HDR_FW_VER_MAJOR_G(adapter
->params
.bs_vers
),
4704 FW_HDR_FW_VER_MINOR_G(adapter
->params
.bs_vers
),
4705 FW_HDR_FW_VER_MICRO_G(adapter
->params
.bs_vers
),
4706 FW_HDR_FW_VER_BUILD_G(adapter
->params
.bs_vers
));
4708 /* TP Microcode Version */
4709 if (!adapter
->params
.tp_vers
)
4710 dev_warn(adapter
->pdev_dev
, "No TP Microcode loaded\n");
4712 dev_info(adapter
->pdev_dev
,
4713 "TP Microcode version: %u.%u.%u.%u\n",
4714 FW_HDR_FW_VER_MAJOR_G(adapter
->params
.tp_vers
),
4715 FW_HDR_FW_VER_MINOR_G(adapter
->params
.tp_vers
),
4716 FW_HDR_FW_VER_MICRO_G(adapter
->params
.tp_vers
),
4717 FW_HDR_FW_VER_BUILD_G(adapter
->params
.tp_vers
));
4719 /* Expansion ROM version */
4720 if (!adapter
->params
.er_vers
)
4721 dev_info(adapter
->pdev_dev
, "No Expansion ROM loaded\n");
4723 dev_info(adapter
->pdev_dev
,
4724 "Expansion ROM version: %u.%u.%u.%u\n",
4725 FW_HDR_FW_VER_MAJOR_G(adapter
->params
.er_vers
),
4726 FW_HDR_FW_VER_MINOR_G(adapter
->params
.er_vers
),
4727 FW_HDR_FW_VER_MICRO_G(adapter
->params
.er_vers
),
4728 FW_HDR_FW_VER_BUILD_G(adapter
->params
.er_vers
));
4730 /* Software/Hardware configuration */
4731 dev_info(adapter
->pdev_dev
, "Configuration: %sNIC %s, %s capable\n",
4732 is_offload(adapter
) ? "R" : "",
4733 ((adapter
->flags
& USING_MSIX
) ? "MSI-X" :
4734 (adapter
->flags
& USING_MSI
) ? "MSI" : ""),
4735 is_offload(adapter
) ? "Offload" : "non-Offload");
4738 static void print_port_info(const struct net_device
*dev
)
4742 const char *spd
= "";
4743 const struct port_info
*pi
= netdev_priv(dev
);
4744 const struct adapter
*adap
= pi
->adapter
;
4746 if (adap
->params
.pci
.speed
== PCI_EXP_LNKSTA_CLS_2_5GB
)
4748 else if (adap
->params
.pci
.speed
== PCI_EXP_LNKSTA_CLS_5_0GB
)
4750 else if (adap
->params
.pci
.speed
== PCI_EXP_LNKSTA_CLS_8_0GB
)
4753 if (pi
->link_cfg
.supported
& FW_PORT_CAP_SPEED_100M
)
4754 bufp
+= sprintf(bufp
, "100/");
4755 if (pi
->link_cfg
.supported
& FW_PORT_CAP_SPEED_1G
)
4756 bufp
+= sprintf(bufp
, "1000/");
4757 if (pi
->link_cfg
.supported
& FW_PORT_CAP_SPEED_10G
)
4758 bufp
+= sprintf(bufp
, "10G/");
4759 if (pi
->link_cfg
.supported
& FW_PORT_CAP_SPEED_40G
)
4760 bufp
+= sprintf(bufp
, "40G/");
4763 sprintf(bufp
, "BASE-%s", t4_get_port_type_description(pi
->port_type
));
4765 netdev_info(dev
, "%s: Chelsio %s (%s) %s\n",
4766 dev
->name
, adap
->params
.vpd
.id
, adap
->name
, buf
);
4769 static void enable_pcie_relaxed_ordering(struct pci_dev
*dev
)
4771 pcie_capability_set_word(dev
, PCI_EXP_DEVCTL
, PCI_EXP_DEVCTL_RELAX_EN
);
4775 * Free the following resources:
4776 * - memory used for tables
4779 * - resources FW is holding for us
4781 static void free_some_resources(struct adapter
*adapter
)
4785 t4_free_mem(adapter
->l2t
);
4786 t4_free_mem(adapter
->tids
.tid_tab
);
4787 kfree(adapter
->sge
.egr_map
);
4788 kfree(adapter
->sge
.ingr_map
);
4789 kfree(adapter
->sge
.starving_fl
);
4790 kfree(adapter
->sge
.txq_maperr
);
4791 #ifdef CONFIG_DEBUG_FS
4792 kfree(adapter
->sge
.blocked_fl
);
4794 disable_msi(adapter
);
4796 for_each_port(adapter
, i
)
4797 if (adapter
->port
[i
]) {
4798 struct port_info
*pi
= adap2pinfo(adapter
, i
);
4801 t4_free_vi(adapter
, adapter
->mbox
, adapter
->pf
,
4803 kfree(adap2pinfo(adapter
, i
)->rss
);
4804 free_netdev(adapter
->port
[i
]);
4806 if (adapter
->flags
& FW_OK
)
4807 t4_fw_bye(adapter
, adapter
->pf
);
4810 #define TSO_FLAGS (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN)
4811 #define VLAN_FEAT (NETIF_F_SG | NETIF_F_IP_CSUM | TSO_FLAGS | \
4812 NETIF_F_IPV6_CSUM | NETIF_F_HIGHDMA)
4813 #define SEGMENT_SIZE 128
4815 static int get_chip_type(struct pci_dev
*pdev
, u32 pl_rev
)
4819 /* Retrieve adapter's device ID */
4820 pci_read_config_word(pdev
, PCI_DEVICE_ID
, &device_id
);
4822 switch (device_id
>> 12) {
4824 return CHELSIO_CHIP_CODE(CHELSIO_T4
, pl_rev
);
4826 return CHELSIO_CHIP_CODE(CHELSIO_T5
, pl_rev
);
4828 return CHELSIO_CHIP_CODE(CHELSIO_T6
, pl_rev
);
4830 dev_err(&pdev
->dev
, "Device %d is not supported\n",
4836 #ifdef CONFIG_PCI_IOV
4837 static int cxgb4_iov_configure(struct pci_dev
*pdev
, int num_vfs
)
4840 int current_vfs
= pci_num_vf(pdev
);
4844 regs
= pci_ioremap_bar(pdev
, 0);
4846 dev_err(&pdev
->dev
, "cannot map device registers\n");
4850 pcie_fw
= readl(regs
+ PCIE_FW_A
);
4852 /* Check if cxgb4 is the MASTER and fw is initialized */
4853 if (!(pcie_fw
& PCIE_FW_INIT_F
) ||
4854 !(pcie_fw
& PCIE_FW_MASTER_VLD_F
) ||
4855 PCIE_FW_MASTER_G(pcie_fw
) != 4) {
4856 dev_warn(&pdev
->dev
,
4857 "cxgb4 driver needs to be MASTER to support SRIOV\n");
4861 /* If any of the VF's is already assigned to Guest OS, then
4862 * SRIOV for the same cannot be modified
4864 if (current_vfs
&& pci_vfs_assigned(pdev
)) {
4866 "Cannot modify SR-IOV while VFs are assigned\n");
4867 num_vfs
= current_vfs
;
4871 /* Disable SRIOV when zero is passed.
4872 * One needs to disable SRIOV before modifying it, else
4873 * stack throws the below warning:
4874 * " 'n' VFs already enabled. Disable before enabling 'm' VFs."
4877 pci_disable_sriov(pdev
);
4881 if (num_vfs
!= current_vfs
) {
4882 err
= pci_enable_sriov(pdev
, num_vfs
);
4890 static int init_one(struct pci_dev
*pdev
, const struct pci_device_id
*ent
)
4892 int func
, i
, err
, s_qpp
, qpp
, num_seg
;
4893 struct port_info
*pi
;
4894 bool highdma
= false;
4895 struct adapter
*adapter
= NULL
;
4898 enum chip_type chip
;
4900 printk_once(KERN_INFO
"%s - version %s\n", DRV_DESC
, DRV_VERSION
);
4902 err
= pci_request_regions(pdev
, KBUILD_MODNAME
);
4904 /* Just info, some other driver may have claimed the device. */
4905 dev_info(&pdev
->dev
, "cannot obtain PCI resources\n");
4909 err
= pci_enable_device(pdev
);
4911 dev_err(&pdev
->dev
, "cannot enable PCI device\n");
4912 goto out_release_regions
;
4915 regs
= pci_ioremap_bar(pdev
, 0);
4917 dev_err(&pdev
->dev
, "cannot map device registers\n");
4919 goto out_disable_device
;
4922 err
= t4_wait_dev_ready(regs
);
4924 goto out_unmap_bar0
;
4926 /* We control everything through one PF */
4927 whoami
= readl(regs
+ PL_WHOAMI_A
);
4928 pl_rev
= REV_G(readl(regs
+ PL_REV_A
));
4929 chip
= get_chip_type(pdev
, pl_rev
);
4930 func
= CHELSIO_CHIP_VERSION(chip
) <= CHELSIO_T5
?
4931 SOURCEPF_G(whoami
) : T6_SOURCEPF_G(whoami
);
4932 if (func
!= ent
->driver_data
) {
4934 pci_disable_device(pdev
);
4935 pci_save_state(pdev
); /* to restore SR-IOV later */
4939 if (!pci_set_dma_mask(pdev
, DMA_BIT_MASK(64))) {
4941 err
= pci_set_consistent_dma_mask(pdev
, DMA_BIT_MASK(64));
4943 dev_err(&pdev
->dev
, "unable to obtain 64-bit DMA for "
4944 "coherent allocations\n");
4945 goto out_unmap_bar0
;
4948 err
= pci_set_dma_mask(pdev
, DMA_BIT_MASK(32));
4950 dev_err(&pdev
->dev
, "no usable DMA configuration\n");
4951 goto out_unmap_bar0
;
4955 pci_enable_pcie_error_reporting(pdev
);
4956 enable_pcie_relaxed_ordering(pdev
);
4957 pci_set_master(pdev
);
4958 pci_save_state(pdev
);
4960 adapter
= kzalloc(sizeof(*adapter
), GFP_KERNEL
);
4963 goto out_unmap_bar0
;
4966 adapter
->workq
= create_singlethread_workqueue("cxgb4");
4967 if (!adapter
->workq
) {
4969 goto out_free_adapter
;
4972 adapter
->mbox_log
= kzalloc(sizeof(*adapter
->mbox_log
) +
4973 (sizeof(struct mbox_cmd
) *
4974 T4_OS_LOG_MBOX_CMDS
),
4976 if (!adapter
->mbox_log
) {
4978 goto out_free_adapter
;
4980 adapter
->mbox_log
->size
= T4_OS_LOG_MBOX_CMDS
;
4982 /* PCI device has been enabled */
4983 adapter
->flags
|= DEV_ENABLED
;
4985 adapter
->regs
= regs
;
4986 adapter
->pdev
= pdev
;
4987 adapter
->pdev_dev
= &pdev
->dev
;
4988 adapter
->name
= pci_name(pdev
);
4989 adapter
->mbox
= func
;
4991 adapter
->msg_enable
= dflt_msg_enable
;
4992 memset(adapter
->chan_map
, 0xff, sizeof(adapter
->chan_map
));
4994 spin_lock_init(&adapter
->stats_lock
);
4995 spin_lock_init(&adapter
->tid_release_lock
);
4996 spin_lock_init(&adapter
->win0_lock
);
4998 INIT_WORK(&adapter
->tid_release_task
, process_tid_release_list
);
4999 INIT_WORK(&adapter
->db_full_task
, process_db_full
);
5000 INIT_WORK(&adapter
->db_drop_task
, process_db_drop
);
5002 err
= t4_prep_adapter(adapter
);
5004 goto out_free_adapter
;
5007 if (!is_t4(adapter
->params
.chip
)) {
5008 s_qpp
= (QUEUESPERPAGEPF0_S
+
5009 (QUEUESPERPAGEPF1_S
- QUEUESPERPAGEPF0_S
) *
5011 qpp
= 1 << QUEUESPERPAGEPF0_G(t4_read_reg(adapter
,
5012 SGE_EGRESS_QUEUES_PER_PAGE_PF_A
) >> s_qpp
);
5013 num_seg
= PAGE_SIZE
/ SEGMENT_SIZE
;
5015 /* Each segment size is 128B. Write coalescing is enabled only
5016 * when SGE_EGRESS_QUEUES_PER_PAGE_PF reg value for the
5017 * queue is less no of segments that can be accommodated in
5020 if (qpp
> num_seg
) {
5022 "Incorrect number of egress queues per page\n");
5024 goto out_free_adapter
;
5026 adapter
->bar2
= ioremap_wc(pci_resource_start(pdev
, 2),
5027 pci_resource_len(pdev
, 2));
5028 if (!adapter
->bar2
) {
5029 dev_err(&pdev
->dev
, "cannot map device bar2 region\n");
5031 goto out_free_adapter
;
5035 setup_memwin(adapter
);
5036 err
= adap_init0(adapter
);
5037 #ifdef CONFIG_DEBUG_FS
5038 bitmap_zero(adapter
->sge
.blocked_fl
, adapter
->sge
.egr_sz
);
5040 setup_memwin_rdma(adapter
);
5044 /* configure SGE_STAT_CFG_A to read WC stats */
5045 if (!is_t4(adapter
->params
.chip
))
5046 t4_write_reg(adapter
, SGE_STAT_CFG_A
, STATSOURCE_T5_V(7) |
5047 (is_t5(adapter
->params
.chip
) ? STATMODE_V(0) :
5050 for_each_port(adapter
, i
) {
5051 struct net_device
*netdev
;
5053 netdev
= alloc_etherdev_mq(sizeof(struct port_info
),
5060 SET_NETDEV_DEV(netdev
, &pdev
->dev
);
5062 adapter
->port
[i
] = netdev
;
5063 pi
= netdev_priv(netdev
);
5064 pi
->adapter
= adapter
;
5065 pi
->xact_addr_filt
= -1;
5067 netdev
->irq
= pdev
->irq
;
5069 netdev
->hw_features
= NETIF_F_SG
| TSO_FLAGS
|
5070 NETIF_F_IP_CSUM
| NETIF_F_IPV6_CSUM
|
5071 NETIF_F_RXCSUM
| NETIF_F_RXHASH
|
5072 NETIF_F_HW_VLAN_CTAG_TX
| NETIF_F_HW_VLAN_CTAG_RX
;
5074 netdev
->hw_features
|= NETIF_F_HIGHDMA
;
5075 netdev
->features
|= netdev
->hw_features
;
5076 netdev
->vlan_features
= netdev
->features
& VLAN_FEAT
;
5078 netdev
->priv_flags
|= IFF_UNICAST_FLT
;
5080 netdev
->netdev_ops
= &cxgb4_netdev_ops
;
5081 #ifdef CONFIG_CHELSIO_T4_DCB
5082 netdev
->dcbnl_ops
= &cxgb4_dcb_ops
;
5083 cxgb4_dcb_state_init(netdev
);
5085 cxgb4_set_ethtool_ops(netdev
);
5088 pci_set_drvdata(pdev
, adapter
);
5090 if (adapter
->flags
& FW_OK
) {
5091 err
= t4_port_init(adapter
, func
, func
, 0);
5094 } else if (adapter
->params
.nports
== 1) {
5095 /* If we don't have a connection to the firmware -- possibly
5096 * because of an error -- grab the raw VPD parameters so we
5097 * can set the proper MAC Address on the debug network
5098 * interface that we've created.
5100 u8 hw_addr
[ETH_ALEN
];
5101 u8
*na
= adapter
->params
.vpd
.na
;
5103 err
= t4_get_raw_vpd_params(adapter
, &adapter
->params
.vpd
);
5105 for (i
= 0; i
< ETH_ALEN
; i
++)
5106 hw_addr
[i
] = (hex2val(na
[2 * i
+ 0]) * 16 +
5107 hex2val(na
[2 * i
+ 1]));
5108 t4_set_hw_addr(adapter
, 0, hw_addr
);
5112 /* Configure queues and allocate tables now, they can be needed as
5113 * soon as the first register_netdev completes.
5115 cfg_queues(adapter
);
5117 adapter
->l2t
= t4_init_l2t(adapter
->l2t_start
, adapter
->l2t_end
);
5118 if (!adapter
->l2t
) {
5119 /* We tolerate a lack of L2T, giving up some functionality */
5120 dev_warn(&pdev
->dev
, "could not allocate L2T, continuing\n");
5121 adapter
->params
.offload
= 0;
5124 #if IS_ENABLED(CONFIG_IPV6)
5125 if ((CHELSIO_CHIP_VERSION(adapter
->params
.chip
) <= CHELSIO_T5
) &&
5126 (!(t4_read_reg(adapter
, LE_DB_CONFIG_A
) & ASLIPCOMPEN_F
))) {
5127 /* CLIP functionality is not present in hardware,
5128 * hence disable all offload features
5130 dev_warn(&pdev
->dev
,
5131 "CLIP not enabled in hardware, continuing\n");
5132 adapter
->params
.offload
= 0;
5134 adapter
->clipt
= t4_init_clip_tbl(adapter
->clipt_start
,
5135 adapter
->clipt_end
);
5136 if (!adapter
->clipt
) {
5137 /* We tolerate a lack of clip_table, giving up
5138 * some functionality
5140 dev_warn(&pdev
->dev
,
5141 "could not allocate Clip table, continuing\n");
5142 adapter
->params
.offload
= 0;
5146 if (is_offload(adapter
) && tid_init(&adapter
->tids
) < 0) {
5147 dev_warn(&pdev
->dev
, "could not allocate TID table, "
5149 adapter
->params
.offload
= 0;
5152 if (is_offload(adapter
)) {
5153 if (t4_read_reg(adapter
, LE_DB_CONFIG_A
) & HASHEN_F
) {
5154 u32 hash_base
, hash_reg
;
5156 if (chip
<= CHELSIO_T5
) {
5157 hash_reg
= LE_DB_TID_HASHBASE_A
;
5158 hash_base
= t4_read_reg(adapter
, hash_reg
);
5159 adapter
->tids
.hash_base
= hash_base
/ 4;
5161 hash_reg
= T6_LE_DB_HASH_TID_BASE_A
;
5162 hash_base
= t4_read_reg(adapter
, hash_reg
);
5163 adapter
->tids
.hash_base
= hash_base
;
5168 /* See what interrupts we'll be using */
5169 if (msi
> 1 && enable_msix(adapter
) == 0)
5170 adapter
->flags
|= USING_MSIX
;
5171 else if (msi
> 0 && pci_enable_msi(pdev
) == 0)
5172 adapter
->flags
|= USING_MSI
;
5174 /* check for PCI Express bandwidth capabiltites */
5175 cxgb4_check_pcie_caps(adapter
);
5177 err
= init_rss(adapter
);
5182 * The card is now ready to go. If any errors occur during device
5183 * registration we do not fail the whole card but rather proceed only
5184 * with the ports we manage to register successfully. However we must
5185 * register at least one net device.
5187 for_each_port(adapter
, i
) {
5188 pi
= adap2pinfo(adapter
, i
);
5189 netif_set_real_num_tx_queues(adapter
->port
[i
], pi
->nqsets
);
5190 netif_set_real_num_rx_queues(adapter
->port
[i
], pi
->nqsets
);
5192 err
= register_netdev(adapter
->port
[i
]);
5195 adapter
->chan_map
[pi
->tx_chan
] = i
;
5196 print_port_info(adapter
->port
[i
]);
5199 dev_err(&pdev
->dev
, "could not register any net devices\n");
5203 dev_warn(&pdev
->dev
, "only %d net devices registered\n", i
);
5207 if (cxgb4_debugfs_root
) {
5208 adapter
->debugfs_root
= debugfs_create_dir(pci_name(pdev
),
5209 cxgb4_debugfs_root
);
5210 setup_debugfs(adapter
);
5213 /* PCIe EEH recovery on powerpc platforms needs fundamental reset */
5214 pdev
->needs_freset
= 1;
5216 if (is_offload(adapter
))
5217 attach_ulds(adapter
);
5219 print_adapter_info(adapter
);
5222 #ifdef CONFIG_PCI_IOV
5223 if (func
< ARRAY_SIZE(num_vf
) && num_vf
[func
] > 0) {
5224 dev_warn(&pdev
->dev
,
5225 "Enabling SR-IOV VFs using the num_vf module "
5226 "parameter is deprecated - please use the pci sysfs "
5227 "interface instead.\n");
5228 if (pci_enable_sriov(pdev
, num_vf
[func
]) == 0)
5229 dev_info(&pdev
->dev
,
5230 "instantiated %u virtual functions\n",
5237 free_some_resources(adapter
);
5239 if (!is_t4(adapter
->params
.chip
))
5240 iounmap(adapter
->bar2
);
5243 destroy_workqueue(adapter
->workq
);
5245 kfree(adapter
->mbox_log
);
5250 pci_disable_pcie_error_reporting(pdev
);
5251 pci_disable_device(pdev
);
5252 out_release_regions
:
5253 pci_release_regions(pdev
);
5257 static void remove_one(struct pci_dev
*pdev
)
5259 struct adapter
*adapter
= pci_get_drvdata(pdev
);
5261 #ifdef CONFIG_PCI_IOV
5262 pci_disable_sriov(pdev
);
5269 /* Tear down per-adapter Work Queue first since it can contain
5270 * references to our adapter data structure.
5272 destroy_workqueue(adapter
->workq
);
5274 if (is_offload(adapter
))
5275 detach_ulds(adapter
);
5277 disable_interrupts(adapter
);
5279 for_each_port(adapter
, i
)
5280 if (adapter
->port
[i
]->reg_state
== NETREG_REGISTERED
)
5281 unregister_netdev(adapter
->port
[i
]);
5283 debugfs_remove_recursive(adapter
->debugfs_root
);
5285 /* If we allocated filters, free up state associated with any
5288 if (adapter
->tids
.ftid_tab
) {
5289 struct filter_entry
*f
= &adapter
->tids
.ftid_tab
[0];
5290 for (i
= 0; i
< (adapter
->tids
.nftids
+
5291 adapter
->tids
.nsftids
); i
++, f
++)
5293 clear_filter(adapter
, f
);
5296 if (adapter
->flags
& FULL_INIT_DONE
)
5299 free_some_resources(adapter
);
5300 #if IS_ENABLED(CONFIG_IPV6)
5301 t4_cleanup_clip_tbl(adapter
);
5303 iounmap(adapter
->regs
);
5304 if (!is_t4(adapter
->params
.chip
))
5305 iounmap(adapter
->bar2
);
5306 pci_disable_pcie_error_reporting(pdev
);
5307 if ((adapter
->flags
& DEV_ENABLED
)) {
5308 pci_disable_device(pdev
);
5309 adapter
->flags
&= ~DEV_ENABLED
;
5311 pci_release_regions(pdev
);
5312 kfree(adapter
->mbox_log
);
5316 pci_release_regions(pdev
);
5319 static struct pci_driver cxgb4_driver
= {
5320 .name
= KBUILD_MODNAME
,
5321 .id_table
= cxgb4_pci_tbl
,
5323 .remove
= remove_one
,
5324 .shutdown
= remove_one
,
5325 #ifdef CONFIG_PCI_IOV
5326 .sriov_configure
= cxgb4_iov_configure
,
5328 .err_handler
= &cxgb4_eeh
,
5331 static int __init
cxgb4_init_module(void)
5335 /* Debugfs support is optional, just warn if this fails */
5336 cxgb4_debugfs_root
= debugfs_create_dir(KBUILD_MODNAME
, NULL
);
5337 if (!cxgb4_debugfs_root
)
5338 pr_warn("could not create debugfs entry, continuing\n");
5340 ret
= pci_register_driver(&cxgb4_driver
);
5342 debugfs_remove(cxgb4_debugfs_root
);
5344 #if IS_ENABLED(CONFIG_IPV6)
5345 if (!inet6addr_registered
) {
5346 register_inet6addr_notifier(&cxgb4_inet6addr_notifier
);
5347 inet6addr_registered
= true;
5354 static void __exit
cxgb4_cleanup_module(void)
5356 #if IS_ENABLED(CONFIG_IPV6)
5357 if (inet6addr_registered
) {
5358 unregister_inet6addr_notifier(&cxgb4_inet6addr_notifier
);
5359 inet6addr_registered
= false;
5362 pci_unregister_driver(&cxgb4_driver
);
5363 debugfs_remove(cxgb4_debugfs_root
); /* NULL ok */
5366 module_init(cxgb4_init_module
);
5367 module_exit(cxgb4_cleanup_module
);