2 * This file is part of the Chelsio T4 Ethernet driver for Linux.
4 * Copyright (c) 2003-2016 Chelsio Communications, Inc. All rights reserved.
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
35 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
37 #include <linux/bitmap.h>
38 #include <linux/crc32.h>
39 #include <linux/ctype.h>
40 #include <linux/debugfs.h>
41 #include <linux/err.h>
42 #include <linux/etherdevice.h>
43 #include <linux/firmware.h>
45 #include <linux/if_vlan.h>
46 #include <linux/init.h>
47 #include <linux/log2.h>
48 #include <linux/mdio.h>
49 #include <linux/module.h>
50 #include <linux/moduleparam.h>
51 #include <linux/mutex.h>
52 #include <linux/netdevice.h>
53 #include <linux/pci.h>
54 #include <linux/aer.h>
55 #include <linux/rtnetlink.h>
56 #include <linux/sched.h>
57 #include <linux/seq_file.h>
58 #include <linux/sockios.h>
59 #include <linux/vmalloc.h>
60 #include <linux/workqueue.h>
61 #include <net/neighbour.h>
62 #include <net/netevent.h>
63 #include <net/addrconf.h>
64 #include <net/bonding.h>
65 #include <net/addrconf.h>
66 #include <asm/uaccess.h>
67 #include <linux/crash_dump.h>
71 #include "t4_values.h"
74 #include "t4fw_version.h"
75 #include "cxgb4_dcb.h"
76 #include "cxgb4_debugfs.h"
81 char cxgb4_driver_name
[] = KBUILD_MODNAME
;
86 #define DRV_VERSION "2.0.0-ko"
87 const char cxgb4_driver_version
[] = DRV_VERSION
;
88 #define DRV_DESC "Chelsio T4/T5/T6 Network Driver"
90 /* Host shadow copy of ingress filter entry. This is in host native format
91 * and doesn't match the ordering or bit order, etc. of the hardware of the
92 * firmware command. The use of bit-field structure elements is purely to
93 * remind ourselves of the field size limitations and save memory in the case
94 * where the filter table is large.
97 /* Administrative fields for filter.
99 u32 valid
:1; /* filter allocated and valid */
100 u32 locked
:1; /* filter is administratively locked */
102 u32 pending
:1; /* filter action is pending firmware reply */
103 u32 smtidx
:8; /* Source MAC Table index for smac */
104 struct l2t_entry
*l2t
; /* Layer Two Table entry for dmac */
106 /* The filter itself. Most of this is a straight copy of information
107 * provided by the extended ioctl(). Some fields are translated to
108 * internal forms -- for instance the Ingress Queue ID passed in from
109 * the ioctl() is translated into the Absolute Ingress Queue ID.
111 struct ch_filter_specification fs
;
114 #define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
115 NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\
116 NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
118 /* Macros needed to support the PCI Device ID Table ...
120 #define CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN \
121 static const struct pci_device_id cxgb4_pci_tbl[] = {
122 #define CH_PCI_DEVICE_ID_FUNCTION 0x4
124 /* Include PCI Device IDs for both PF4 and PF0-3 so our PCI probe() routine is
127 #define CH_PCI_DEVICE_ID_FUNCTION2 0x0
129 #define CH_PCI_ID_TABLE_ENTRY(devid) \
130 {PCI_VDEVICE(CHELSIO, (devid)), 4}
132 #define CH_PCI_DEVICE_ID_TABLE_DEFINE_END \
136 #include "t4_pci_id_tbl.h"
138 #define FW4_FNAME "cxgb4/t4fw.bin"
139 #define FW5_FNAME "cxgb4/t5fw.bin"
140 #define FW6_FNAME "cxgb4/t6fw.bin"
141 #define FW4_CFNAME "cxgb4/t4-config.txt"
142 #define FW5_CFNAME "cxgb4/t5-config.txt"
143 #define FW6_CFNAME "cxgb4/t6-config.txt"
144 #define PHY_AQ1202_FIRMWARE "cxgb4/aq1202_fw.cld"
145 #define PHY_BCM84834_FIRMWARE "cxgb4/bcm8483.bin"
146 #define PHY_AQ1202_DEVICEID 0x4409
147 #define PHY_BCM84834_DEVICEID 0x4486
149 MODULE_DESCRIPTION(DRV_DESC
);
150 MODULE_AUTHOR("Chelsio Communications");
151 MODULE_LICENSE("Dual BSD/GPL");
152 MODULE_VERSION(DRV_VERSION
);
153 MODULE_DEVICE_TABLE(pci
, cxgb4_pci_tbl
);
154 MODULE_FIRMWARE(FW4_FNAME
);
155 MODULE_FIRMWARE(FW5_FNAME
);
156 MODULE_FIRMWARE(FW6_FNAME
);
159 * Normally we're willing to become the firmware's Master PF but will be happy
160 * if another PF has already become the Master and initialized the adapter.
161 * Setting "force_init" will cause this driver to forcibly establish itself as
162 * the Master PF and initialize the adapter.
164 static uint force_init
;
166 module_param(force_init
, uint
, 0644);
167 MODULE_PARM_DESC(force_init
, "Forcibly become Master PF and initialize adapter,"
168 "deprecated parameter");
170 static int dflt_msg_enable
= DFLT_MSG_ENABLE
;
172 module_param(dflt_msg_enable
, int, 0644);
173 MODULE_PARM_DESC(dflt_msg_enable
, "Chelsio T4 default message enable bitmap, "
174 "deprecated parameter");
177 * The driver uses the best interrupt scheme available on a platform in the
178 * order MSI-X, MSI, legacy INTx interrupts. This parameter determines which
179 * of these schemes the driver may consider as follows:
181 * msi = 2: choose from among all three options
182 * msi = 1: only consider MSI and INTx interrupts
183 * msi = 0: force INTx interrupts
187 module_param(msi
, int, 0644);
188 MODULE_PARM_DESC(msi
, "whether to use INTx (0), MSI (1) or MSI-X (2)");
191 * Normally we tell the chip to deliver Ingress Packets into our DMA buffers
192 * offset by 2 bytes in order to have the IP headers line up on 4-byte
193 * boundaries. This is a requirement for many architectures which will throw
194 * a machine check fault if an attempt is made to access one of the 4-byte IP
195 * header fields on a non-4-byte boundary. And it's a major performance issue
196 * even on some architectures which allow it like some implementations of the
197 * x86 ISA. However, some architectures don't mind this and for some very
198 * edge-case performance sensitive applications (like forwarding large volumes
199 * of small packets), setting this DMA offset to 0 will decrease the number of
200 * PCI-E Bus transfers enough to measurably affect performance.
202 static int rx_dma_offset
= 2;
204 #ifdef CONFIG_PCI_IOV
205 /* Configure the number of PCI-E Virtual Function which are to be instantiated
206 * on SR-IOV Capable Physical Functions.
208 static unsigned int num_vf
[NUM_OF_PF_WITH_SRIOV
];
210 module_param_array(num_vf
, uint
, NULL
, 0644);
211 MODULE_PARM_DESC(num_vf
, "number of VFs for each of PFs 0-3, deprecated parameter - please use the pci sysfs interface.");
214 /* TX Queue select used to determine what algorithm to use for selecting TX
215 * queue. Select between the kernel provided function (select_queue=0) or user
216 * cxgb_select_queue function (select_queue=1)
218 * Default: select_queue=0
220 static int select_queue
;
221 module_param(select_queue
, int, 0644);
222 MODULE_PARM_DESC(select_queue
,
223 "Select between kernel provided method of selecting or driver method of selecting TX queue. Default is kernel method.");
225 static struct dentry
*cxgb4_debugfs_root
;
227 LIST_HEAD(adapter_list
);
228 DEFINE_MUTEX(uld_mutex
);
229 /* Adapter list to be accessed from atomic context */
230 static LIST_HEAD(adap_rcu_list
);
231 static DEFINE_SPINLOCK(adap_rcu_lock
);
232 static struct cxgb4_uld_info ulds
[CXGB4_ULD_MAX
];
233 static const char *const uld_str
[] = { "RDMA", "iSCSI", "iSCSIT" };
235 static void link_report(struct net_device
*dev
)
237 if (!netif_carrier_ok(dev
))
238 netdev_info(dev
, "link down\n");
240 static const char *fc
[] = { "no", "Rx", "Tx", "Tx/Rx" };
243 const struct port_info
*p
= netdev_priv(dev
);
245 switch (p
->link_cfg
.speed
) {
259 pr_info("%s: unsupported speed: %d\n",
260 dev
->name
, p
->link_cfg
.speed
);
264 netdev_info(dev
, "link up, %s, full-duplex, %s PAUSE\n", s
,
269 #ifdef CONFIG_CHELSIO_T4_DCB
270 /* Set up/tear down Data Center Bridging Priority mapping for a net device. */
271 static void dcb_tx_queue_prio_enable(struct net_device
*dev
, int enable
)
273 struct port_info
*pi
= netdev_priv(dev
);
274 struct adapter
*adap
= pi
->adapter
;
275 struct sge_eth_txq
*txq
= &adap
->sge
.ethtxq
[pi
->first_qset
];
278 /* We use a simple mapping of Port TX Queue Index to DCB
279 * Priority when we're enabling DCB.
281 for (i
= 0; i
< pi
->nqsets
; i
++, txq
++) {
285 name
= (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DMAQ
) |
287 FW_PARAMS_PARAM_DMAQ_EQ_DCBPRIO_ETH
) |
288 FW_PARAMS_PARAM_YZ_V(txq
->q
.cntxt_id
));
289 value
= enable
? i
: 0xffffffff;
291 /* Since we can be called while atomic (from "interrupt
292 * level") we need to issue the Set Parameters Commannd
293 * without sleeping (timeout < 0).
295 err
= t4_set_params_timeout(adap
, adap
->mbox
, adap
->pf
, 0, 1,
297 -FW_CMD_MAX_TIMEOUT
);
300 dev_err(adap
->pdev_dev
,
301 "Can't %s DCB Priority on port %d, TX Queue %d: err=%d\n",
302 enable
? "set" : "unset", pi
->port_id
, i
, -err
);
304 txq
->dcb_prio
= value
;
307 #endif /* CONFIG_CHELSIO_T4_DCB */
309 int cxgb4_dcb_enabled(const struct net_device
*dev
)
311 #ifdef CONFIG_CHELSIO_T4_DCB
312 struct port_info
*pi
= netdev_priv(dev
);
314 if (!pi
->dcb
.enabled
)
317 return ((pi
->dcb
.state
== CXGB4_DCB_STATE_FW_ALLSYNCED
) ||
318 (pi
->dcb
.state
== CXGB4_DCB_STATE_HOST
));
323 EXPORT_SYMBOL(cxgb4_dcb_enabled
);
325 void t4_os_link_changed(struct adapter
*adapter
, int port_id
, int link_stat
)
327 struct net_device
*dev
= adapter
->port
[port_id
];
329 /* Skip changes from disabled ports. */
330 if (netif_running(dev
) && link_stat
!= netif_carrier_ok(dev
)) {
332 netif_carrier_on(dev
);
334 #ifdef CONFIG_CHELSIO_T4_DCB
335 if (cxgb4_dcb_enabled(dev
)) {
336 cxgb4_dcb_state_init(dev
);
337 dcb_tx_queue_prio_enable(dev
, false);
339 #endif /* CONFIG_CHELSIO_T4_DCB */
340 netif_carrier_off(dev
);
347 void t4_os_portmod_changed(const struct adapter
*adap
, int port_id
)
349 static const char *mod_str
[] = {
350 NULL
, "LR", "SR", "ER", "passive DA", "active DA", "LRM"
353 const struct net_device
*dev
= adap
->port
[port_id
];
354 const struct port_info
*pi
= netdev_priv(dev
);
356 if (pi
->mod_type
== FW_PORT_MOD_TYPE_NONE
)
357 netdev_info(dev
, "port module unplugged\n");
358 else if (pi
->mod_type
< ARRAY_SIZE(mod_str
))
359 netdev_info(dev
, "%s module inserted\n", mod_str
[pi
->mod_type
]);
360 else if (pi
->mod_type
== FW_PORT_MOD_TYPE_NOTSUPPORTED
)
361 netdev_info(dev
, "%s: unsupported port module inserted\n",
363 else if (pi
->mod_type
== FW_PORT_MOD_TYPE_UNKNOWN
)
364 netdev_info(dev
, "%s: unknown port module inserted\n",
366 else if (pi
->mod_type
== FW_PORT_MOD_TYPE_ERROR
)
367 netdev_info(dev
, "%s: transceiver module error\n", dev
->name
);
369 netdev_info(dev
, "%s: unknown module type %d inserted\n",
370 dev
->name
, pi
->mod_type
);
373 int dbfifo_int_thresh
= 10; /* 10 == 640 entry threshold */
374 module_param(dbfifo_int_thresh
, int, 0644);
375 MODULE_PARM_DESC(dbfifo_int_thresh
, "doorbell fifo interrupt threshold");
378 * usecs to sleep while draining the dbfifo
380 static int dbfifo_drain_delay
= 1000;
381 module_param(dbfifo_drain_delay
, int, 0644);
382 MODULE_PARM_DESC(dbfifo_drain_delay
,
383 "usecs to sleep while draining the dbfifo");
385 static inline int cxgb4_set_addr_hash(struct port_info
*pi
)
387 struct adapter
*adap
= pi
->adapter
;
390 struct hash_mac_addr
*entry
;
392 /* Calculate the hash vector for the updated list and program it */
393 list_for_each_entry(entry
, &adap
->mac_hlist
, list
) {
394 ucast
|= is_unicast_ether_addr(entry
->addr
);
395 vec
|= (1ULL << hash_mac_addr(entry
->addr
));
397 return t4_set_addr_hash(adap
, adap
->mbox
, pi
->viid
, ucast
,
401 static int cxgb4_mac_sync(struct net_device
*netdev
, const u8
*mac_addr
)
403 struct port_info
*pi
= netdev_priv(netdev
);
404 struct adapter
*adap
= pi
->adapter
;
409 bool ucast
= is_unicast_ether_addr(mac_addr
);
410 const u8
*maclist
[1] = {mac_addr
};
411 struct hash_mac_addr
*new_entry
;
413 ret
= t4_alloc_mac_filt(adap
, adap
->mbox
, pi
->viid
, free
, 1, maclist
,
414 NULL
, ucast
? &uhash
: &mhash
, false);
417 /* if hash != 0, then add the addr to hash addr list
418 * so on the end we will calculate the hash for the
419 * list and program it
421 if (uhash
|| mhash
) {
422 new_entry
= kzalloc(sizeof(*new_entry
), GFP_ATOMIC
);
425 ether_addr_copy(new_entry
->addr
, mac_addr
);
426 list_add_tail(&new_entry
->list
, &adap
->mac_hlist
);
427 ret
= cxgb4_set_addr_hash(pi
);
430 return ret
< 0 ? ret
: 0;
433 static int cxgb4_mac_unsync(struct net_device
*netdev
, const u8
*mac_addr
)
435 struct port_info
*pi
= netdev_priv(netdev
);
436 struct adapter
*adap
= pi
->adapter
;
438 const u8
*maclist
[1] = {mac_addr
};
439 struct hash_mac_addr
*entry
, *tmp
;
441 /* If the MAC address to be removed is in the hash addr
442 * list, delete it from the list and update hash vector
444 list_for_each_entry_safe(entry
, tmp
, &adap
->mac_hlist
, list
) {
445 if (ether_addr_equal(entry
->addr
, mac_addr
)) {
446 list_del(&entry
->list
);
448 return cxgb4_set_addr_hash(pi
);
452 ret
= t4_free_mac_filt(adap
, adap
->mbox
, pi
->viid
, 1, maclist
, false);
453 return ret
< 0 ? -EINVAL
: 0;
457 * Set Rx properties of a port, such as promiscruity, address filters, and MTU.
458 * If @mtu is -1 it is left unchanged.
460 static int set_rxmode(struct net_device
*dev
, int mtu
, bool sleep_ok
)
462 struct port_info
*pi
= netdev_priv(dev
);
463 struct adapter
*adapter
= pi
->adapter
;
465 __dev_uc_sync(dev
, cxgb4_mac_sync
, cxgb4_mac_unsync
);
466 __dev_mc_sync(dev
, cxgb4_mac_sync
, cxgb4_mac_unsync
);
468 return t4_set_rxmode(adapter
, adapter
->mbox
, pi
->viid
, mtu
,
469 (dev
->flags
& IFF_PROMISC
) ? 1 : 0,
470 (dev
->flags
& IFF_ALLMULTI
) ? 1 : 0, 1, -1,
475 * link_start - enable a port
476 * @dev: the port to enable
478 * Performs the MAC and PHY actions needed to enable a port.
480 static int link_start(struct net_device
*dev
)
483 struct port_info
*pi
= netdev_priv(dev
);
484 unsigned int mb
= pi
->adapter
->pf
;
487 * We do not set address filters and promiscuity here, the stack does
488 * that step explicitly.
490 ret
= t4_set_rxmode(pi
->adapter
, mb
, pi
->viid
, dev
->mtu
, -1, -1, -1,
491 !!(dev
->features
& NETIF_F_HW_VLAN_CTAG_RX
), true);
493 ret
= t4_change_mac(pi
->adapter
, mb
, pi
->viid
,
494 pi
->xact_addr_filt
, dev
->dev_addr
, true,
497 pi
->xact_addr_filt
= ret
;
502 ret
= t4_link_l1cfg(pi
->adapter
, mb
, pi
->tx_chan
,
506 ret
= t4_enable_vi_params(pi
->adapter
, mb
, pi
->viid
, true,
507 true, CXGB4_DCB_ENABLED
);
514 #ifdef CONFIG_CHELSIO_T4_DCB
515 /* Handle a Data Center Bridging update message from the firmware. */
516 static void dcb_rpl(struct adapter
*adap
, const struct fw_port_cmd
*pcmd
)
518 int port
= FW_PORT_CMD_PORTID_G(ntohl(pcmd
->op_to_portid
));
519 struct net_device
*dev
= adap
->port
[adap
->chan_map
[port
]];
520 int old_dcb_enabled
= cxgb4_dcb_enabled(dev
);
523 cxgb4_dcb_handle_fw_update(adap
, pcmd
);
524 new_dcb_enabled
= cxgb4_dcb_enabled(dev
);
526 /* If the DCB has become enabled or disabled on the port then we're
527 * going to need to set up/tear down DCB Priority parameters for the
528 * TX Queues associated with the port.
530 if (new_dcb_enabled
!= old_dcb_enabled
)
531 dcb_tx_queue_prio_enable(dev
, new_dcb_enabled
);
533 #endif /* CONFIG_CHELSIO_T4_DCB */
535 /* Clear a filter and release any of its resources that we own. This also
536 * clears the filter's "pending" status.
538 static void clear_filter(struct adapter
*adap
, struct filter_entry
*f
)
540 /* If the new or old filter have loopback rewriteing rules then we'll
541 * need to free any existing Layer Two Table (L2T) entries of the old
542 * filter rule. The firmware will handle freeing up any Source MAC
543 * Table (SMT) entries used for rewriting Source MAC Addresses in
547 cxgb4_l2t_release(f
->l2t
);
549 /* The zeroing of the filter rule below clears the filter valid,
550 * pending, locked flags, l2t pointer, etc. so it's all we need for
553 memset(f
, 0, sizeof(*f
));
556 /* Handle a filter write/deletion reply.
558 static void filter_rpl(struct adapter
*adap
, const struct cpl_set_tcb_rpl
*rpl
)
560 unsigned int idx
= GET_TID(rpl
);
561 unsigned int nidx
= idx
- adap
->tids
.ftid_base
;
563 struct filter_entry
*f
;
565 if (idx
>= adap
->tids
.ftid_base
&& nidx
<
566 (adap
->tids
.nftids
+ adap
->tids
.nsftids
)) {
568 ret
= TCB_COOKIE_G(rpl
->cookie
);
569 f
= &adap
->tids
.ftid_tab
[idx
];
571 if (ret
== FW_FILTER_WR_FLT_DELETED
) {
572 /* Clear the filter when we get confirmation from the
573 * hardware that the filter has been deleted.
575 clear_filter(adap
, f
);
576 } else if (ret
== FW_FILTER_WR_SMT_TBL_FULL
) {
577 dev_err(adap
->pdev_dev
, "filter %u setup failed due to full SMT\n",
579 clear_filter(adap
, f
);
580 } else if (ret
== FW_FILTER_WR_FLT_ADDED
) {
581 f
->smtidx
= (be64_to_cpu(rpl
->oldval
) >> 24) & 0xff;
582 f
->pending
= 0; /* asynchronous setup completed */
585 /* Something went wrong. Issue a warning about the
586 * problem and clear everything out.
588 dev_err(adap
->pdev_dev
, "filter %u setup failed with error %u\n",
590 clear_filter(adap
, f
);
595 /* Response queue handler for the FW event queue.
597 static int fwevtq_handler(struct sge_rspq
*q
, const __be64
*rsp
,
598 const struct pkt_gl
*gl
)
600 u8 opcode
= ((const struct rss_header
*)rsp
)->opcode
;
602 rsp
++; /* skip RSS header */
604 /* FW can send EGR_UPDATEs encapsulated in a CPL_FW4_MSG.
606 if (unlikely(opcode
== CPL_FW4_MSG
&&
607 ((const struct cpl_fw4_msg
*)rsp
)->type
== FW_TYPE_RSSCPL
)) {
609 opcode
= ((const struct rss_header
*)rsp
)->opcode
;
611 if (opcode
!= CPL_SGE_EGR_UPDATE
) {
612 dev_err(q
->adap
->pdev_dev
, "unexpected FW4/CPL %#x on FW event queue\n"
618 if (likely(opcode
== CPL_SGE_EGR_UPDATE
)) {
619 const struct cpl_sge_egr_update
*p
= (void *)rsp
;
620 unsigned int qid
= EGR_QID_G(ntohl(p
->opcode_qid
));
623 txq
= q
->adap
->sge
.egr_map
[qid
- q
->adap
->sge
.egr_start
];
625 if ((u8
*)txq
< (u8
*)q
->adap
->sge
.ofldtxq
) {
626 struct sge_eth_txq
*eq
;
628 eq
= container_of(txq
, struct sge_eth_txq
, q
);
629 netif_tx_wake_queue(eq
->txq
);
631 struct sge_ofld_txq
*oq
;
633 oq
= container_of(txq
, struct sge_ofld_txq
, q
);
634 tasklet_schedule(&oq
->qresume_tsk
);
636 } else if (opcode
== CPL_FW6_MSG
|| opcode
== CPL_FW4_MSG
) {
637 const struct cpl_fw6_msg
*p
= (void *)rsp
;
639 #ifdef CONFIG_CHELSIO_T4_DCB
640 const struct fw_port_cmd
*pcmd
= (const void *)p
->data
;
641 unsigned int cmd
= FW_CMD_OP_G(ntohl(pcmd
->op_to_portid
));
642 unsigned int action
=
643 FW_PORT_CMD_ACTION_G(ntohl(pcmd
->action_to_len16
));
645 if (cmd
== FW_PORT_CMD
&&
646 action
== FW_PORT_ACTION_GET_PORT_INFO
) {
647 int port
= FW_PORT_CMD_PORTID_G(
648 be32_to_cpu(pcmd
->op_to_portid
));
649 struct net_device
*dev
=
650 q
->adap
->port
[q
->adap
->chan_map
[port
]];
651 int state_input
= ((pcmd
->u
.info
.dcbxdis_pkd
&
652 FW_PORT_CMD_DCBXDIS_F
)
653 ? CXGB4_DCB_INPUT_FW_DISABLED
654 : CXGB4_DCB_INPUT_FW_ENABLED
);
656 cxgb4_dcb_state_fsm(dev
, state_input
);
659 if (cmd
== FW_PORT_CMD
&&
660 action
== FW_PORT_ACTION_L2_DCB_CFG
)
661 dcb_rpl(q
->adap
, pcmd
);
665 t4_handle_fw_rpl(q
->adap
, p
->data
);
666 } else if (opcode
== CPL_L2T_WRITE_RPL
) {
667 const struct cpl_l2t_write_rpl
*p
= (void *)rsp
;
669 do_l2t_write_rpl(q
->adap
, p
);
670 } else if (opcode
== CPL_SET_TCB_RPL
) {
671 const struct cpl_set_tcb_rpl
*p
= (void *)rsp
;
673 filter_rpl(q
->adap
, p
);
675 dev_err(q
->adap
->pdev_dev
,
676 "unexpected CPL %#x on FW event queue\n", opcode
);
681 /* Flush the aggregated lro sessions */
682 static void uldrx_flush_handler(struct sge_rspq
*q
)
684 if (ulds
[q
->uld
].lro_flush
)
685 ulds
[q
->uld
].lro_flush(&q
->lro_mgr
);
689 * uldrx_handler - response queue handler for ULD queues
690 * @q: the response queue that received the packet
691 * @rsp: the response queue descriptor holding the offload message
692 * @gl: the gather list of packet fragments
694 * Deliver an ingress offload packet to a ULD. All processing is done by
695 * the ULD, we just maintain statistics.
697 static int uldrx_handler(struct sge_rspq
*q
, const __be64
*rsp
,
698 const struct pkt_gl
*gl
)
700 struct sge_ofld_rxq
*rxq
= container_of(q
, struct sge_ofld_rxq
, rspq
);
703 /* FW can send CPLs encapsulated in a CPL_FW4_MSG.
705 if (((const struct rss_header
*)rsp
)->opcode
== CPL_FW4_MSG
&&
706 ((const struct cpl_fw4_msg
*)(rsp
+ 1))->type
== FW_TYPE_RSSCPL
)
709 if (q
->flush_handler
)
710 ret
= ulds
[q
->uld
].lro_rx_handler(q
->adap
->uld_handle
[q
->uld
],
711 rsp
, gl
, &q
->lro_mgr
,
714 ret
= ulds
[q
->uld
].rx_handler(q
->adap
->uld_handle
[q
->uld
],
724 else if (gl
== CXGB4_MSG_AN
)
731 static void disable_msi(struct adapter
*adapter
)
733 if (adapter
->flags
& USING_MSIX
) {
734 pci_disable_msix(adapter
->pdev
);
735 adapter
->flags
&= ~USING_MSIX
;
736 } else if (adapter
->flags
& USING_MSI
) {
737 pci_disable_msi(adapter
->pdev
);
738 adapter
->flags
&= ~USING_MSI
;
743 * Interrupt handler for non-data events used with MSI-X.
745 static irqreturn_t
t4_nondata_intr(int irq
, void *cookie
)
747 struct adapter
*adap
= cookie
;
748 u32 v
= t4_read_reg(adap
, MYPF_REG(PL_PF_INT_CAUSE_A
));
752 t4_write_reg(adap
, MYPF_REG(PL_PF_INT_CAUSE_A
), v
);
754 if (adap
->flags
& MASTER_PF
)
755 t4_slow_intr_handler(adap
);
760 * Name the MSI-X interrupts.
762 static void name_msix_vecs(struct adapter
*adap
)
764 int i
, j
, msi_idx
= 2, n
= sizeof(adap
->msix_info
[0].desc
);
766 /* non-data interrupts */
767 snprintf(adap
->msix_info
[0].desc
, n
, "%s", adap
->port
[0]->name
);
770 snprintf(adap
->msix_info
[1].desc
, n
, "%s-FWeventq",
771 adap
->port
[0]->name
);
773 /* Ethernet queues */
774 for_each_port(adap
, j
) {
775 struct net_device
*d
= adap
->port
[j
];
776 const struct port_info
*pi
= netdev_priv(d
);
778 for (i
= 0; i
< pi
->nqsets
; i
++, msi_idx
++)
779 snprintf(adap
->msix_info
[msi_idx
].desc
, n
, "%s-Rx%d",
784 for_each_iscsirxq(&adap
->sge
, i
)
785 snprintf(adap
->msix_info
[msi_idx
++].desc
, n
, "%s-iscsi%d",
786 adap
->port
[0]->name
, i
);
788 for_each_iscsitrxq(&adap
->sge
, i
)
789 snprintf(adap
->msix_info
[msi_idx
++].desc
, n
, "%s-iSCSIT%d",
790 adap
->port
[0]->name
, i
);
792 for_each_rdmarxq(&adap
->sge
, i
)
793 snprintf(adap
->msix_info
[msi_idx
++].desc
, n
, "%s-rdma%d",
794 adap
->port
[0]->name
, i
);
796 for_each_rdmaciq(&adap
->sge
, i
)
797 snprintf(adap
->msix_info
[msi_idx
++].desc
, n
, "%s-rdma-ciq%d",
798 adap
->port
[0]->name
, i
);
801 static int request_msix_queue_irqs(struct adapter
*adap
)
803 struct sge
*s
= &adap
->sge
;
804 int err
, ethqidx
, iscsiqidx
= 0, rdmaqidx
= 0, rdmaciqqidx
= 0;
808 err
= request_irq(adap
->msix_info
[1].vec
, t4_sge_intr_msix
, 0,
809 adap
->msix_info
[1].desc
, &s
->fw_evtq
);
813 for_each_ethrxq(s
, ethqidx
) {
814 err
= request_irq(adap
->msix_info
[msi_index
].vec
,
816 adap
->msix_info
[msi_index
].desc
,
817 &s
->ethrxq
[ethqidx
].rspq
);
822 for_each_iscsirxq(s
, iscsiqidx
) {
823 err
= request_irq(adap
->msix_info
[msi_index
].vec
,
825 adap
->msix_info
[msi_index
].desc
,
826 &s
->iscsirxq
[iscsiqidx
].rspq
);
831 for_each_iscsitrxq(s
, iscsitqidx
) {
832 err
= request_irq(adap
->msix_info
[msi_index
].vec
,
834 adap
->msix_info
[msi_index
].desc
,
835 &s
->iscsitrxq
[iscsitqidx
].rspq
);
840 for_each_rdmarxq(s
, rdmaqidx
) {
841 err
= request_irq(adap
->msix_info
[msi_index
].vec
,
843 adap
->msix_info
[msi_index
].desc
,
844 &s
->rdmarxq
[rdmaqidx
].rspq
);
849 for_each_rdmaciq(s
, rdmaciqqidx
) {
850 err
= request_irq(adap
->msix_info
[msi_index
].vec
,
852 adap
->msix_info
[msi_index
].desc
,
853 &s
->rdmaciq
[rdmaciqqidx
].rspq
);
861 while (--rdmaciqqidx
>= 0)
862 free_irq(adap
->msix_info
[--msi_index
].vec
,
863 &s
->rdmaciq
[rdmaciqqidx
].rspq
);
864 while (--rdmaqidx
>= 0)
865 free_irq(adap
->msix_info
[--msi_index
].vec
,
866 &s
->rdmarxq
[rdmaqidx
].rspq
);
867 while (--iscsitqidx
>= 0)
868 free_irq(adap
->msix_info
[--msi_index
].vec
,
869 &s
->iscsitrxq
[iscsitqidx
].rspq
);
870 while (--iscsiqidx
>= 0)
871 free_irq(adap
->msix_info
[--msi_index
].vec
,
872 &s
->iscsirxq
[iscsiqidx
].rspq
);
873 while (--ethqidx
>= 0)
874 free_irq(adap
->msix_info
[--msi_index
].vec
,
875 &s
->ethrxq
[ethqidx
].rspq
);
876 free_irq(adap
->msix_info
[1].vec
, &s
->fw_evtq
);
880 static void free_msix_queue_irqs(struct adapter
*adap
)
882 int i
, msi_index
= 2;
883 struct sge
*s
= &adap
->sge
;
885 free_irq(adap
->msix_info
[1].vec
, &s
->fw_evtq
);
886 for_each_ethrxq(s
, i
)
887 free_irq(adap
->msix_info
[msi_index
++].vec
, &s
->ethrxq
[i
].rspq
);
888 for_each_iscsirxq(s
, i
)
889 free_irq(adap
->msix_info
[msi_index
++].vec
,
890 &s
->iscsirxq
[i
].rspq
);
891 for_each_iscsitrxq(s
, i
)
892 free_irq(adap
->msix_info
[msi_index
++].vec
,
893 &s
->iscsitrxq
[i
].rspq
);
894 for_each_rdmarxq(s
, i
)
895 free_irq(adap
->msix_info
[msi_index
++].vec
, &s
->rdmarxq
[i
].rspq
);
896 for_each_rdmaciq(s
, i
)
897 free_irq(adap
->msix_info
[msi_index
++].vec
, &s
->rdmaciq
[i
].rspq
);
901 * cxgb4_write_rss - write the RSS table for a given port
903 * @queues: array of queue indices for RSS
905 * Sets up the portion of the HW RSS table for the port's VI to distribute
906 * packets to the Rx queues in @queues.
907 * Should never be called before setting up sge eth rx queues
909 int cxgb4_write_rss(const struct port_info
*pi
, const u16
*queues
)
913 struct adapter
*adapter
= pi
->adapter
;
914 const struct sge_eth_rxq
*rxq
;
916 rxq
= &adapter
->sge
.ethrxq
[pi
->first_qset
];
917 rss
= kmalloc(pi
->rss_size
* sizeof(u16
), GFP_KERNEL
);
921 /* map the queue indices to queue ids */
922 for (i
= 0; i
< pi
->rss_size
; i
++, queues
++)
923 rss
[i
] = rxq
[*queues
].rspq
.abs_id
;
925 err
= t4_config_rss_range(adapter
, adapter
->pf
, pi
->viid
, 0,
926 pi
->rss_size
, rss
, pi
->rss_size
);
927 /* If Tunnel All Lookup isn't specified in the global RSS
928 * Configuration, then we need to specify a default Ingress
929 * Queue for any ingress packets which aren't hashed. We'll
930 * use our first ingress queue ...
933 err
= t4_config_vi_rss(adapter
, adapter
->mbox
, pi
->viid
,
934 FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN_F
|
935 FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN_F
|
936 FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN_F
|
937 FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN_F
|
938 FW_RSS_VI_CONFIG_CMD_UDPEN_F
,
945 * setup_rss - configure RSS
948 * Sets up RSS for each port.
950 static int setup_rss(struct adapter
*adap
)
954 for_each_port(adap
, i
) {
955 const struct port_info
*pi
= adap2pinfo(adap
, i
);
957 /* Fill default values with equal distribution */
958 for (j
= 0; j
< pi
->rss_size
; j
++)
959 pi
->rss
[j
] = j
% pi
->nqsets
;
961 err
= cxgb4_write_rss(pi
, pi
->rss
);
969 * Return the channel of the ingress queue with the given qid.
971 static unsigned int rxq_to_chan(const struct sge
*p
, unsigned int qid
)
973 qid
-= p
->ingr_start
;
974 return netdev2pinfo(p
->ingr_map
[qid
]->netdev
)->tx_chan
;
978 * Wait until all NAPI handlers are descheduled.
980 static void quiesce_rx(struct adapter
*adap
)
984 for (i
= 0; i
< adap
->sge
.ingr_sz
; i
++) {
985 struct sge_rspq
*q
= adap
->sge
.ingr_map
[i
];
987 if (q
&& q
->handler
) {
988 napi_disable(&q
->napi
);
990 while (!cxgb_poll_lock_napi(q
))
998 /* Disable interrupt and napi handler */
999 static void disable_interrupts(struct adapter
*adap
)
1001 if (adap
->flags
& FULL_INIT_DONE
) {
1002 t4_intr_disable(adap
);
1003 if (adap
->flags
& USING_MSIX
) {
1004 free_msix_queue_irqs(adap
);
1005 free_irq(adap
->msix_info
[0].vec
, adap
);
1007 free_irq(adap
->pdev
->irq
, adap
);
1014 * Enable NAPI scheduling and interrupt generation for all Rx queues.
1016 static void enable_rx(struct adapter
*adap
)
1020 for (i
= 0; i
< adap
->sge
.ingr_sz
; i
++) {
1021 struct sge_rspq
*q
= adap
->sge
.ingr_map
[i
];
1026 cxgb_busy_poll_init_lock(q
);
1027 napi_enable(&q
->napi
);
1029 /* 0-increment GTS to start the timer and enable interrupts */
1030 t4_write_reg(adap
, MYPF_REG(SGE_PF_GTS_A
),
1031 SEINTARM_V(q
->intr_params
) |
1032 INGRESSQID_V(q
->cntxt_id
));
1036 static int alloc_ofld_rxqs(struct adapter
*adap
, struct sge_ofld_rxq
*q
,
1037 unsigned int nq
, unsigned int per_chan
, int msi_idx
,
1042 for (i
= 0; i
< nq
; i
++, q
++) {
1045 err
= t4_sge_alloc_rxq(adap
, &q
->rspq
, false,
1046 adap
->port
[i
/ per_chan
],
1047 msi_idx
, q
->fl
.size
? &q
->fl
: NULL
,
1049 lro
? uldrx_flush_handler
: NULL
,
1053 memset(&q
->stats
, 0, sizeof(q
->stats
));
1055 ids
[i
] = q
->rspq
.abs_id
;
1061 * setup_sge_queues - configure SGE Tx/Rx/response queues
1062 * @adap: the adapter
1064 * Determines how many sets of SGE queues to use and initializes them.
1065 * We support multiple queue sets per port if we have MSI-X, otherwise
1066 * just one queue set per port.
1068 static int setup_sge_queues(struct adapter
*adap
)
1071 struct sge
*s
= &adap
->sge
;
1073 bitmap_zero(s
->starving_fl
, s
->egr_sz
);
1074 bitmap_zero(s
->txq_maperr
, s
->egr_sz
);
1076 if (adap
->flags
& USING_MSIX
)
1077 adap
->msi_idx
= 1; /* vector 0 is for non-queue interrupts */
1079 err
= t4_sge_alloc_rxq(adap
, &s
->intrq
, false, adap
->port
[0], 0,
1080 NULL
, NULL
, NULL
, -1);
1083 adap
->msi_idx
= -((int)s
->intrq
.abs_id
+ 1);
1086 /* NOTE: If you add/delete any Ingress/Egress Queue allocations in here,
1087 * don't forget to update the following which need to be
1088 * synchronized to and changes here.
1090 * 1. The calculations of MAX_INGQ in cxgb4.h.
1092 * 2. Update enable_msix/name_msix_vecs/request_msix_queue_irqs
1093 * to accommodate any new/deleted Ingress Queues
1094 * which need MSI-X Vectors.
1096 * 3. Update sge_qinfo_show() to include information on the
1097 * new/deleted queues.
1099 err
= t4_sge_alloc_rxq(adap
, &s
->fw_evtq
, true, adap
->port
[0],
1100 adap
->msi_idx
, NULL
, fwevtq_handler
, NULL
, -1);
1102 freeout
: t4_free_sge_resources(adap
);
1106 for_each_port(adap
, i
) {
1107 struct net_device
*dev
= adap
->port
[i
];
1108 struct port_info
*pi
= netdev_priv(dev
);
1109 struct sge_eth_rxq
*q
= &s
->ethrxq
[pi
->first_qset
];
1110 struct sge_eth_txq
*t
= &s
->ethtxq
[pi
->first_qset
];
1112 for (j
= 0; j
< pi
->nqsets
; j
++, q
++) {
1113 if (adap
->msi_idx
> 0)
1115 err
= t4_sge_alloc_rxq(adap
, &q
->rspq
, false, dev
,
1116 adap
->msi_idx
, &q
->fl
,
1119 t4_get_mps_bg_map(adap
,
1124 memset(&q
->stats
, 0, sizeof(q
->stats
));
1126 for (j
= 0; j
< pi
->nqsets
; j
++, t
++) {
1127 err
= t4_sge_alloc_eth_txq(adap
, t
, dev
,
1128 netdev_get_tx_queue(dev
, j
),
1129 s
->fw_evtq
.cntxt_id
);
1135 j
= s
->iscsiqsets
/ adap
->params
.nports
; /* iscsi queues per channel */
1136 for_each_iscsirxq(s
, i
) {
1137 err
= t4_sge_alloc_ofld_txq(adap
, &s
->ofldtxq
[i
],
1139 s
->fw_evtq
.cntxt_id
);
1144 #define ALLOC_OFLD_RXQS(firstq, nq, per_chan, ids, lro) do { \
1145 err = alloc_ofld_rxqs(adap, firstq, nq, per_chan, adap->msi_idx, ids, lro); \
1148 if (adap->msi_idx > 0) \
1149 adap->msi_idx += nq; \
1152 ALLOC_OFLD_RXQS(s
->iscsirxq
, s
->iscsiqsets
, j
, s
->iscsi_rxq
, false);
1153 ALLOC_OFLD_RXQS(s
->iscsitrxq
, s
->niscsitq
, j
, s
->iscsit_rxq
, true);
1154 ALLOC_OFLD_RXQS(s
->rdmarxq
, s
->rdmaqs
, 1, s
->rdma_rxq
, false);
1155 j
= s
->rdmaciqs
/ adap
->params
.nports
; /* rdmaq queues per channel */
1156 ALLOC_OFLD_RXQS(s
->rdmaciq
, s
->rdmaciqs
, j
, s
->rdma_ciq
, false);
1158 #undef ALLOC_OFLD_RXQS
1160 for_each_port(adap
, i
) {
1162 * Note that ->rdmarxq[i].rspq.cntxt_id below is 0 if we don't
1163 * have RDMA queues, and that's the right value.
1165 err
= t4_sge_alloc_ctrl_txq(adap
, &s
->ctrlq
[i
], adap
->port
[i
],
1166 s
->fw_evtq
.cntxt_id
,
1167 s
->rdmarxq
[i
].rspq
.cntxt_id
);
1172 t4_write_reg(adap
, is_t4(adap
->params
.chip
) ?
1173 MPS_TRC_RSS_CONTROL_A
:
1174 MPS_T5_TRC_RSS_CONTROL_A
,
1175 RSSCONTROL_V(netdev2pinfo(adap
->port
[0])->tx_chan
) |
1176 QUEUENUMBER_V(s
->ethrxq
[0].rspq
.abs_id
));
1181 * Allocate a chunk of memory using kmalloc or, if that fails, vmalloc.
1182 * The allocated memory is cleared.
1184 void *t4_alloc_mem(size_t size
)
1186 void *p
= kzalloc(size
, GFP_KERNEL
| __GFP_NOWARN
);
1194 * Free memory allocated through alloc_mem().
1196 void t4_free_mem(void *addr
)
1201 /* Send a Work Request to write the filter at a specified index. We construct
1202 * a Firmware Filter Work Request to have the work done and put the indicated
1203 * filter into "pending" mode which will prevent any further actions against
1204 * it till we get a reply from the firmware on the completion status of the
1207 static int set_filter_wr(struct adapter
*adapter
, int fidx
)
1209 struct filter_entry
*f
= &adapter
->tids
.ftid_tab
[fidx
];
1210 struct sk_buff
*skb
;
1211 struct fw_filter_wr
*fwr
;
1214 skb
= alloc_skb(sizeof(*fwr
), GFP_KERNEL
);
1218 /* If the new filter requires loopback Destination MAC and/or VLAN
1219 * rewriting then we need to allocate a Layer 2 Table (L2T) entry for
1222 if (f
->fs
.newdmac
|| f
->fs
.newvlan
) {
1223 /* allocate L2T entry for new filter */
1224 f
->l2t
= t4_l2t_alloc_switching(adapter
, f
->fs
.vlan
,
1225 f
->fs
.eport
, f
->fs
.dmac
);
1226 if (f
->l2t
== NULL
) {
1232 ftid
= adapter
->tids
.ftid_base
+ fidx
;
1234 fwr
= (struct fw_filter_wr
*)__skb_put(skb
, sizeof(*fwr
));
1235 memset(fwr
, 0, sizeof(*fwr
));
1237 /* It would be nice to put most of the following in t4_hw.c but most
1238 * of the work is translating the cxgbtool ch_filter_specification
1239 * into the Work Request and the definition of that structure is
1240 * currently in cxgbtool.h which isn't appropriate to pull into the
1241 * common code. We may eventually try to come up with a more neutral
1242 * filter specification structure but for now it's easiest to simply
1243 * put this fairly direct code in line ...
1245 fwr
->op_pkd
= htonl(FW_WR_OP_V(FW_FILTER_WR
));
1246 fwr
->len16_pkd
= htonl(FW_WR_LEN16_V(sizeof(*fwr
)/16));
1248 htonl(FW_FILTER_WR_TID_V(ftid
) |
1249 FW_FILTER_WR_RQTYPE_V(f
->fs
.type
) |
1250 FW_FILTER_WR_NOREPLY_V(0) |
1251 FW_FILTER_WR_IQ_V(f
->fs
.iq
));
1252 fwr
->del_filter_to_l2tix
=
1253 htonl(FW_FILTER_WR_RPTTID_V(f
->fs
.rpttid
) |
1254 FW_FILTER_WR_DROP_V(f
->fs
.action
== FILTER_DROP
) |
1255 FW_FILTER_WR_DIRSTEER_V(f
->fs
.dirsteer
) |
1256 FW_FILTER_WR_MASKHASH_V(f
->fs
.maskhash
) |
1257 FW_FILTER_WR_DIRSTEERHASH_V(f
->fs
.dirsteerhash
) |
1258 FW_FILTER_WR_LPBK_V(f
->fs
.action
== FILTER_SWITCH
) |
1259 FW_FILTER_WR_DMAC_V(f
->fs
.newdmac
) |
1260 FW_FILTER_WR_SMAC_V(f
->fs
.newsmac
) |
1261 FW_FILTER_WR_INSVLAN_V(f
->fs
.newvlan
== VLAN_INSERT
||
1262 f
->fs
.newvlan
== VLAN_REWRITE
) |
1263 FW_FILTER_WR_RMVLAN_V(f
->fs
.newvlan
== VLAN_REMOVE
||
1264 f
->fs
.newvlan
== VLAN_REWRITE
) |
1265 FW_FILTER_WR_HITCNTS_V(f
->fs
.hitcnts
) |
1266 FW_FILTER_WR_TXCHAN_V(f
->fs
.eport
) |
1267 FW_FILTER_WR_PRIO_V(f
->fs
.prio
) |
1268 FW_FILTER_WR_L2TIX_V(f
->l2t
? f
->l2t
->idx
: 0));
1269 fwr
->ethtype
= htons(f
->fs
.val
.ethtype
);
1270 fwr
->ethtypem
= htons(f
->fs
.mask
.ethtype
);
1271 fwr
->frag_to_ovlan_vldm
=
1272 (FW_FILTER_WR_FRAG_V(f
->fs
.val
.frag
) |
1273 FW_FILTER_WR_FRAGM_V(f
->fs
.mask
.frag
) |
1274 FW_FILTER_WR_IVLAN_VLD_V(f
->fs
.val
.ivlan_vld
) |
1275 FW_FILTER_WR_OVLAN_VLD_V(f
->fs
.val
.ovlan_vld
) |
1276 FW_FILTER_WR_IVLAN_VLDM_V(f
->fs
.mask
.ivlan_vld
) |
1277 FW_FILTER_WR_OVLAN_VLDM_V(f
->fs
.mask
.ovlan_vld
));
1279 fwr
->rx_chan_rx_rpl_iq
=
1280 htons(FW_FILTER_WR_RX_CHAN_V(0) |
1281 FW_FILTER_WR_RX_RPL_IQ_V(adapter
->sge
.fw_evtq
.abs_id
));
1282 fwr
->maci_to_matchtypem
=
1283 htonl(FW_FILTER_WR_MACI_V(f
->fs
.val
.macidx
) |
1284 FW_FILTER_WR_MACIM_V(f
->fs
.mask
.macidx
) |
1285 FW_FILTER_WR_FCOE_V(f
->fs
.val
.fcoe
) |
1286 FW_FILTER_WR_FCOEM_V(f
->fs
.mask
.fcoe
) |
1287 FW_FILTER_WR_PORT_V(f
->fs
.val
.iport
) |
1288 FW_FILTER_WR_PORTM_V(f
->fs
.mask
.iport
) |
1289 FW_FILTER_WR_MATCHTYPE_V(f
->fs
.val
.matchtype
) |
1290 FW_FILTER_WR_MATCHTYPEM_V(f
->fs
.mask
.matchtype
));
1291 fwr
->ptcl
= f
->fs
.val
.proto
;
1292 fwr
->ptclm
= f
->fs
.mask
.proto
;
1293 fwr
->ttyp
= f
->fs
.val
.tos
;
1294 fwr
->ttypm
= f
->fs
.mask
.tos
;
1295 fwr
->ivlan
= htons(f
->fs
.val
.ivlan
);
1296 fwr
->ivlanm
= htons(f
->fs
.mask
.ivlan
);
1297 fwr
->ovlan
= htons(f
->fs
.val
.ovlan
);
1298 fwr
->ovlanm
= htons(f
->fs
.mask
.ovlan
);
1299 memcpy(fwr
->lip
, f
->fs
.val
.lip
, sizeof(fwr
->lip
));
1300 memcpy(fwr
->lipm
, f
->fs
.mask
.lip
, sizeof(fwr
->lipm
));
1301 memcpy(fwr
->fip
, f
->fs
.val
.fip
, sizeof(fwr
->fip
));
1302 memcpy(fwr
->fipm
, f
->fs
.mask
.fip
, sizeof(fwr
->fipm
));
1303 fwr
->lp
= htons(f
->fs
.val
.lport
);
1304 fwr
->lpm
= htons(f
->fs
.mask
.lport
);
1305 fwr
->fp
= htons(f
->fs
.val
.fport
);
1306 fwr
->fpm
= htons(f
->fs
.mask
.fport
);
1308 memcpy(fwr
->sma
, f
->fs
.smac
, sizeof(fwr
->sma
));
1310 /* Mark the filter as "pending" and ship off the Filter Work Request.
1311 * When we get the Work Request Reply we'll clear the pending status.
1314 set_wr_txq(skb
, CPL_PRIORITY_CONTROL
, f
->fs
.val
.iport
& 0x3);
1315 t4_ofld_send(adapter
, skb
);
1319 /* Delete the filter at a specified index.
1321 static int del_filter_wr(struct adapter
*adapter
, int fidx
)
1323 struct filter_entry
*f
= &adapter
->tids
.ftid_tab
[fidx
];
1324 struct sk_buff
*skb
;
1325 struct fw_filter_wr
*fwr
;
1326 unsigned int len
, ftid
;
1329 ftid
= adapter
->tids
.ftid_base
+ fidx
;
1331 skb
= alloc_skb(len
, GFP_KERNEL
);
1335 fwr
= (struct fw_filter_wr
*)__skb_put(skb
, len
);
1336 t4_mk_filtdelwr(ftid
, fwr
, adapter
->sge
.fw_evtq
.abs_id
);
1338 /* Mark the filter as "pending" and ship off the Filter Work Request.
1339 * When we get the Work Request Reply we'll clear the pending status.
1342 t4_mgmt_tx(adapter
, skb
);
1346 static u16
cxgb_select_queue(struct net_device
*dev
, struct sk_buff
*skb
,
1347 void *accel_priv
, select_queue_fallback_t fallback
)
1351 #ifdef CONFIG_CHELSIO_T4_DCB
1352 /* If a Data Center Bridging has been successfully negotiated on this
1353 * link then we'll use the skb's priority to map it to a TX Queue.
1354 * The skb's priority is determined via the VLAN Tag Priority Code
1357 if (cxgb4_dcb_enabled(dev
)) {
1361 err
= vlan_get_tag(skb
, &vlan_tci
);
1362 if (unlikely(err
)) {
1363 if (net_ratelimit())
1365 "TX Packet without VLAN Tag on DCB Link\n");
1368 txq
= (vlan_tci
& VLAN_PRIO_MASK
) >> VLAN_PRIO_SHIFT
;
1369 #ifdef CONFIG_CHELSIO_T4_FCOE
1370 if (skb
->protocol
== htons(ETH_P_FCOE
))
1371 txq
= skb
->priority
& 0x7;
1372 #endif /* CONFIG_CHELSIO_T4_FCOE */
1376 #endif /* CONFIG_CHELSIO_T4_DCB */
1379 txq
= (skb_rx_queue_recorded(skb
)
1380 ? skb_get_rx_queue(skb
)
1381 : smp_processor_id());
1383 while (unlikely(txq
>= dev
->real_num_tx_queues
))
1384 txq
-= dev
->real_num_tx_queues
;
1389 return fallback(dev
, skb
) % dev
->real_num_tx_queues
;
1392 static int closest_timer(const struct sge
*s
, int time
)
1394 int i
, delta
, match
= 0, min_delta
= INT_MAX
;
1396 for (i
= 0; i
< ARRAY_SIZE(s
->timer_val
); i
++) {
1397 delta
= time
- s
->timer_val
[i
];
1400 if (delta
< min_delta
) {
1408 static int closest_thres(const struct sge
*s
, int thres
)
1410 int i
, delta
, match
= 0, min_delta
= INT_MAX
;
1412 for (i
= 0; i
< ARRAY_SIZE(s
->counter_val
); i
++) {
1413 delta
= thres
- s
->counter_val
[i
];
1416 if (delta
< min_delta
) {
1425 * cxgb4_set_rspq_intr_params - set a queue's interrupt holdoff parameters
1427 * @us: the hold-off time in us, or 0 to disable timer
1428 * @cnt: the hold-off packet count, or 0 to disable counter
1430 * Sets an Rx queue's interrupt hold-off time and packet count. At least
1431 * one of the two needs to be enabled for the queue to generate interrupts.
1433 int cxgb4_set_rspq_intr_params(struct sge_rspq
*q
,
1434 unsigned int us
, unsigned int cnt
)
1436 struct adapter
*adap
= q
->adap
;
1438 if ((us
| cnt
) == 0)
1445 new_idx
= closest_thres(&adap
->sge
, cnt
);
1446 if (q
->desc
&& q
->pktcnt_idx
!= new_idx
) {
1447 /* the queue has already been created, update it */
1448 v
= FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DMAQ
) |
1449 FW_PARAMS_PARAM_X_V(
1450 FW_PARAMS_PARAM_DMAQ_IQ_INTCNTTHRESH
) |
1451 FW_PARAMS_PARAM_YZ_V(q
->cntxt_id
);
1452 err
= t4_set_params(adap
, adap
->mbox
, adap
->pf
, 0, 1,
1457 q
->pktcnt_idx
= new_idx
;
1460 us
= us
== 0 ? 6 : closest_timer(&adap
->sge
, us
);
1461 q
->intr_params
= QINTR_TIMER_IDX_V(us
) | QINTR_CNT_EN_V(cnt
> 0);
1465 static int cxgb_set_features(struct net_device
*dev
, netdev_features_t features
)
1467 const struct port_info
*pi
= netdev_priv(dev
);
1468 netdev_features_t changed
= dev
->features
^ features
;
1471 if (!(changed
& NETIF_F_HW_VLAN_CTAG_RX
))
1474 err
= t4_set_rxmode(pi
->adapter
, pi
->adapter
->pf
, pi
->viid
, -1,
1476 !!(features
& NETIF_F_HW_VLAN_CTAG_RX
), true);
1478 dev
->features
= features
^ NETIF_F_HW_VLAN_CTAG_RX
;
1482 static int setup_debugfs(struct adapter
*adap
)
1484 if (IS_ERR_OR_NULL(adap
->debugfs_root
))
1487 #ifdef CONFIG_DEBUG_FS
1488 t4_setup_debugfs(adap
);
1494 * upper-layer driver support
1498 * Allocate an active-open TID and set it to the supplied value.
1500 int cxgb4_alloc_atid(struct tid_info
*t
, void *data
)
1504 spin_lock_bh(&t
->atid_lock
);
1506 union aopen_entry
*p
= t
->afree
;
1508 atid
= (p
- t
->atid_tab
) + t
->atid_base
;
1513 spin_unlock_bh(&t
->atid_lock
);
1516 EXPORT_SYMBOL(cxgb4_alloc_atid
);
1519 * Release an active-open TID.
1521 void cxgb4_free_atid(struct tid_info
*t
, unsigned int atid
)
1523 union aopen_entry
*p
= &t
->atid_tab
[atid
- t
->atid_base
];
1525 spin_lock_bh(&t
->atid_lock
);
1529 spin_unlock_bh(&t
->atid_lock
);
1531 EXPORT_SYMBOL(cxgb4_free_atid
);
1534 * Allocate a server TID and set it to the supplied value.
1536 int cxgb4_alloc_stid(struct tid_info
*t
, int family
, void *data
)
1540 spin_lock_bh(&t
->stid_lock
);
1541 if (family
== PF_INET
) {
1542 stid
= find_first_zero_bit(t
->stid_bmap
, t
->nstids
);
1543 if (stid
< t
->nstids
)
1544 __set_bit(stid
, t
->stid_bmap
);
1548 stid
= bitmap_find_free_region(t
->stid_bmap
, t
->nstids
, 1);
1553 t
->stid_tab
[stid
].data
= data
;
1554 stid
+= t
->stid_base
;
1555 /* IPv6 requires max of 520 bits or 16 cells in TCAM
1556 * This is equivalent to 4 TIDs. With CLIP enabled it
1559 if (family
== PF_INET
)
1562 t
->stids_in_use
+= 2;
1564 spin_unlock_bh(&t
->stid_lock
);
1567 EXPORT_SYMBOL(cxgb4_alloc_stid
);
1569 /* Allocate a server filter TID and set it to the supplied value.
1571 int cxgb4_alloc_sftid(struct tid_info
*t
, int family
, void *data
)
1575 spin_lock_bh(&t
->stid_lock
);
1576 if (family
== PF_INET
) {
1577 stid
= find_next_zero_bit(t
->stid_bmap
,
1578 t
->nstids
+ t
->nsftids
, t
->nstids
);
1579 if (stid
< (t
->nstids
+ t
->nsftids
))
1580 __set_bit(stid
, t
->stid_bmap
);
1587 t
->stid_tab
[stid
].data
= data
;
1589 stid
+= t
->sftid_base
;
1592 spin_unlock_bh(&t
->stid_lock
);
1595 EXPORT_SYMBOL(cxgb4_alloc_sftid
);
1597 /* Release a server TID.
1599 void cxgb4_free_stid(struct tid_info
*t
, unsigned int stid
, int family
)
1601 /* Is it a server filter TID? */
1602 if (t
->nsftids
&& (stid
>= t
->sftid_base
)) {
1603 stid
-= t
->sftid_base
;
1606 stid
-= t
->stid_base
;
1609 spin_lock_bh(&t
->stid_lock
);
1610 if (family
== PF_INET
)
1611 __clear_bit(stid
, t
->stid_bmap
);
1613 bitmap_release_region(t
->stid_bmap
, stid
, 1);
1614 t
->stid_tab
[stid
].data
= NULL
;
1615 if (stid
< t
->nstids
) {
1616 if (family
== PF_INET
)
1619 t
->stids_in_use
-= 2;
1623 spin_unlock_bh(&t
->stid_lock
);
1625 EXPORT_SYMBOL(cxgb4_free_stid
);
1628 * Populate a TID_RELEASE WR. Caller must properly size the skb.
1630 static void mk_tid_release(struct sk_buff
*skb
, unsigned int chan
,
1633 struct cpl_tid_release
*req
;
1635 set_wr_txq(skb
, CPL_PRIORITY_SETUP
, chan
);
1636 req
= (struct cpl_tid_release
*)__skb_put(skb
, sizeof(*req
));
1637 INIT_TP_WR(req
, tid
);
1638 OPCODE_TID(req
) = htonl(MK_OPCODE_TID(CPL_TID_RELEASE
, tid
));
1642 * Queue a TID release request and if necessary schedule a work queue to
1645 static void cxgb4_queue_tid_release(struct tid_info
*t
, unsigned int chan
,
1648 void **p
= &t
->tid_tab
[tid
];
1649 struct adapter
*adap
= container_of(t
, struct adapter
, tids
);
1651 spin_lock_bh(&adap
->tid_release_lock
);
1652 *p
= adap
->tid_release_head
;
1653 /* Low 2 bits encode the Tx channel number */
1654 adap
->tid_release_head
= (void **)((uintptr_t)p
| chan
);
1655 if (!adap
->tid_release_task_busy
) {
1656 adap
->tid_release_task_busy
= true;
1657 queue_work(adap
->workq
, &adap
->tid_release_task
);
1659 spin_unlock_bh(&adap
->tid_release_lock
);
1663 * Process the list of pending TID release requests.
1665 static void process_tid_release_list(struct work_struct
*work
)
1667 struct sk_buff
*skb
;
1668 struct adapter
*adap
;
1670 adap
= container_of(work
, struct adapter
, tid_release_task
);
1672 spin_lock_bh(&adap
->tid_release_lock
);
1673 while (adap
->tid_release_head
) {
1674 void **p
= adap
->tid_release_head
;
1675 unsigned int chan
= (uintptr_t)p
& 3;
1676 p
= (void *)p
- chan
;
1678 adap
->tid_release_head
= *p
;
1680 spin_unlock_bh(&adap
->tid_release_lock
);
1682 while (!(skb
= alloc_skb(sizeof(struct cpl_tid_release
),
1684 schedule_timeout_uninterruptible(1);
1686 mk_tid_release(skb
, chan
, p
- adap
->tids
.tid_tab
);
1687 t4_ofld_send(adap
, skb
);
1688 spin_lock_bh(&adap
->tid_release_lock
);
1690 adap
->tid_release_task_busy
= false;
1691 spin_unlock_bh(&adap
->tid_release_lock
);
1695 * Release a TID and inform HW. If we are unable to allocate the release
1696 * message we defer to a work queue.
1698 void cxgb4_remove_tid(struct tid_info
*t
, unsigned int chan
, unsigned int tid
)
1700 struct sk_buff
*skb
;
1701 struct adapter
*adap
= container_of(t
, struct adapter
, tids
);
1703 WARN_ON(tid
>= t
->ntids
);
1705 if (t
->tid_tab
[tid
]) {
1706 t
->tid_tab
[tid
] = NULL
;
1707 if (t
->hash_base
&& (tid
>= t
->hash_base
))
1708 atomic_dec(&t
->hash_tids_in_use
);
1710 atomic_dec(&t
->tids_in_use
);
1713 skb
= alloc_skb(sizeof(struct cpl_tid_release
), GFP_ATOMIC
);
1715 mk_tid_release(skb
, chan
, tid
);
1716 t4_ofld_send(adap
, skb
);
1718 cxgb4_queue_tid_release(t
, chan
, tid
);
1720 EXPORT_SYMBOL(cxgb4_remove_tid
);
1723 * Allocate and initialize the TID tables. Returns 0 on success.
1725 static int tid_init(struct tid_info
*t
)
1728 unsigned int stid_bmap_size
;
1729 unsigned int natids
= t
->natids
;
1730 struct adapter
*adap
= container_of(t
, struct adapter
, tids
);
1732 stid_bmap_size
= BITS_TO_LONGS(t
->nstids
+ t
->nsftids
);
1733 size
= t
->ntids
* sizeof(*t
->tid_tab
) +
1734 natids
* sizeof(*t
->atid_tab
) +
1735 t
->nstids
* sizeof(*t
->stid_tab
) +
1736 t
->nsftids
* sizeof(*t
->stid_tab
) +
1737 stid_bmap_size
* sizeof(long) +
1738 t
->nftids
* sizeof(*t
->ftid_tab
) +
1739 t
->nsftids
* sizeof(*t
->ftid_tab
);
1741 t
->tid_tab
= t4_alloc_mem(size
);
1745 t
->atid_tab
= (union aopen_entry
*)&t
->tid_tab
[t
->ntids
];
1746 t
->stid_tab
= (struct serv_entry
*)&t
->atid_tab
[natids
];
1747 t
->stid_bmap
= (unsigned long *)&t
->stid_tab
[t
->nstids
+ t
->nsftids
];
1748 t
->ftid_tab
= (struct filter_entry
*)&t
->stid_bmap
[stid_bmap_size
];
1749 spin_lock_init(&t
->stid_lock
);
1750 spin_lock_init(&t
->atid_lock
);
1752 t
->stids_in_use
= 0;
1753 t
->sftids_in_use
= 0;
1755 t
->atids_in_use
= 0;
1756 atomic_set(&t
->tids_in_use
, 0);
1757 atomic_set(&t
->hash_tids_in_use
, 0);
1759 /* Setup the free list for atid_tab and clear the stid bitmap. */
1762 t
->atid_tab
[natids
- 1].next
= &t
->atid_tab
[natids
];
1763 t
->afree
= t
->atid_tab
;
1765 bitmap_zero(t
->stid_bmap
, t
->nstids
+ t
->nsftids
);
1766 /* Reserve stid 0 for T4/T5 adapters */
1767 if (!t
->stid_base
&&
1768 (CHELSIO_CHIP_VERSION(adap
->params
.chip
) <= CHELSIO_T5
))
1769 __set_bit(0, t
->stid_bmap
);
1775 * cxgb4_create_server - create an IP server
1777 * @stid: the server TID
1778 * @sip: local IP address to bind server to
1779 * @sport: the server's TCP port
1780 * @queue: queue to direct messages from this server to
1782 * Create an IP server for the given port and address.
1783 * Returns <0 on error and one of the %NET_XMIT_* values on success.
1785 int cxgb4_create_server(const struct net_device
*dev
, unsigned int stid
,
1786 __be32 sip
, __be16 sport
, __be16 vlan
,
1790 struct sk_buff
*skb
;
1791 struct adapter
*adap
;
1792 struct cpl_pass_open_req
*req
;
1795 skb
= alloc_skb(sizeof(*req
), GFP_KERNEL
);
1799 adap
= netdev2adap(dev
);
1800 req
= (struct cpl_pass_open_req
*)__skb_put(skb
, sizeof(*req
));
1802 OPCODE_TID(req
) = htonl(MK_OPCODE_TID(CPL_PASS_OPEN_REQ
, stid
));
1803 req
->local_port
= sport
;
1804 req
->peer_port
= htons(0);
1805 req
->local_ip
= sip
;
1806 req
->peer_ip
= htonl(0);
1807 chan
= rxq_to_chan(&adap
->sge
, queue
);
1808 req
->opt0
= cpu_to_be64(TX_CHAN_V(chan
));
1809 req
->opt1
= cpu_to_be64(CONN_POLICY_V(CPL_CONN_POLICY_ASK
) |
1810 SYN_RSS_ENABLE_F
| SYN_RSS_QUEUE_V(queue
));
1811 ret
= t4_mgmt_tx(adap
, skb
);
1812 return net_xmit_eval(ret
);
1814 EXPORT_SYMBOL(cxgb4_create_server
);
1816 /* cxgb4_create_server6 - create an IPv6 server
1818 * @stid: the server TID
1819 * @sip: local IPv6 address to bind server to
1820 * @sport: the server's TCP port
1821 * @queue: queue to direct messages from this server to
1823 * Create an IPv6 server for the given port and address.
1824 * Returns <0 on error and one of the %NET_XMIT_* values on success.
1826 int cxgb4_create_server6(const struct net_device
*dev
, unsigned int stid
,
1827 const struct in6_addr
*sip
, __be16 sport
,
1831 struct sk_buff
*skb
;
1832 struct adapter
*adap
;
1833 struct cpl_pass_open_req6
*req
;
1836 skb
= alloc_skb(sizeof(*req
), GFP_KERNEL
);
1840 adap
= netdev2adap(dev
);
1841 req
= (struct cpl_pass_open_req6
*)__skb_put(skb
, sizeof(*req
));
1843 OPCODE_TID(req
) = htonl(MK_OPCODE_TID(CPL_PASS_OPEN_REQ6
, stid
));
1844 req
->local_port
= sport
;
1845 req
->peer_port
= htons(0);
1846 req
->local_ip_hi
= *(__be64
*)(sip
->s6_addr
);
1847 req
->local_ip_lo
= *(__be64
*)(sip
->s6_addr
+ 8);
1848 req
->peer_ip_hi
= cpu_to_be64(0);
1849 req
->peer_ip_lo
= cpu_to_be64(0);
1850 chan
= rxq_to_chan(&adap
->sge
, queue
);
1851 req
->opt0
= cpu_to_be64(TX_CHAN_V(chan
));
1852 req
->opt1
= cpu_to_be64(CONN_POLICY_V(CPL_CONN_POLICY_ASK
) |
1853 SYN_RSS_ENABLE_F
| SYN_RSS_QUEUE_V(queue
));
1854 ret
= t4_mgmt_tx(adap
, skb
);
1855 return net_xmit_eval(ret
);
1857 EXPORT_SYMBOL(cxgb4_create_server6
);
1859 int cxgb4_remove_server(const struct net_device
*dev
, unsigned int stid
,
1860 unsigned int queue
, bool ipv6
)
1862 struct sk_buff
*skb
;
1863 struct adapter
*adap
;
1864 struct cpl_close_listsvr_req
*req
;
1867 adap
= netdev2adap(dev
);
1869 skb
= alloc_skb(sizeof(*req
), GFP_KERNEL
);
1873 req
= (struct cpl_close_listsvr_req
*)__skb_put(skb
, sizeof(*req
));
1875 OPCODE_TID(req
) = htonl(MK_OPCODE_TID(CPL_CLOSE_LISTSRV_REQ
, stid
));
1876 req
->reply_ctrl
= htons(NO_REPLY_V(0) | (ipv6
? LISTSVR_IPV6_V(1) :
1877 LISTSVR_IPV6_V(0)) | QUEUENO_V(queue
));
1878 ret
= t4_mgmt_tx(adap
, skb
);
1879 return net_xmit_eval(ret
);
1881 EXPORT_SYMBOL(cxgb4_remove_server
);
1884 * cxgb4_best_mtu - find the entry in the MTU table closest to an MTU
1885 * @mtus: the HW MTU table
1886 * @mtu: the target MTU
1887 * @idx: index of selected entry in the MTU table
1889 * Returns the index and the value in the HW MTU table that is closest to
1890 * but does not exceed @mtu, unless @mtu is smaller than any value in the
1891 * table, in which case that smallest available value is selected.
1893 unsigned int cxgb4_best_mtu(const unsigned short *mtus
, unsigned short mtu
,
1898 while (i
< NMTUS
- 1 && mtus
[i
+ 1] <= mtu
)
1904 EXPORT_SYMBOL(cxgb4_best_mtu
);
1907 * cxgb4_best_aligned_mtu - find best MTU, [hopefully] data size aligned
1908 * @mtus: the HW MTU table
1909 * @header_size: Header Size
1910 * @data_size_max: maximum Data Segment Size
1911 * @data_size_align: desired Data Segment Size Alignment (2^N)
1912 * @mtu_idxp: HW MTU Table Index return value pointer (possibly NULL)
1914 * Similar to cxgb4_best_mtu() but instead of searching the Hardware
1915 * MTU Table based solely on a Maximum MTU parameter, we break that
1916 * parameter up into a Header Size and Maximum Data Segment Size, and
1917 * provide a desired Data Segment Size Alignment. If we find an MTU in
1918 * the Hardware MTU Table which will result in a Data Segment Size with
1919 * the requested alignment _and_ that MTU isn't "too far" from the
1920 * closest MTU, then we'll return that rather than the closest MTU.
1922 unsigned int cxgb4_best_aligned_mtu(const unsigned short *mtus
,
1923 unsigned short header_size
,
1924 unsigned short data_size_max
,
1925 unsigned short data_size_align
,
1926 unsigned int *mtu_idxp
)
1928 unsigned short max_mtu
= header_size
+ data_size_max
;
1929 unsigned short data_size_align_mask
= data_size_align
- 1;
1930 int mtu_idx
, aligned_mtu_idx
;
1932 /* Scan the MTU Table till we find an MTU which is larger than our
1933 * Maximum MTU or we reach the end of the table. Along the way,
1934 * record the last MTU found, if any, which will result in a Data
1935 * Segment Length matching the requested alignment.
1937 for (mtu_idx
= 0, aligned_mtu_idx
= -1; mtu_idx
< NMTUS
; mtu_idx
++) {
1938 unsigned short data_size
= mtus
[mtu_idx
] - header_size
;
1940 /* If this MTU minus the Header Size would result in a
1941 * Data Segment Size of the desired alignment, remember it.
1943 if ((data_size
& data_size_align_mask
) == 0)
1944 aligned_mtu_idx
= mtu_idx
;
1946 /* If we're not at the end of the Hardware MTU Table and the
1947 * next element is larger than our Maximum MTU, drop out of
1950 if (mtu_idx
+1 < NMTUS
&& mtus
[mtu_idx
+1] > max_mtu
)
1954 /* If we fell out of the loop because we ran to the end of the table,
1955 * then we just have to use the last [largest] entry.
1957 if (mtu_idx
== NMTUS
)
1960 /* If we found an MTU which resulted in the requested Data Segment
1961 * Length alignment and that's "not far" from the largest MTU which is
1962 * less than or equal to the maximum MTU, then use that.
1964 if (aligned_mtu_idx
>= 0 &&
1965 mtu_idx
- aligned_mtu_idx
<= 1)
1966 mtu_idx
= aligned_mtu_idx
;
1968 /* If the caller has passed in an MTU Index pointer, pass the
1969 * MTU Index back. Return the MTU value.
1972 *mtu_idxp
= mtu_idx
;
1973 return mtus
[mtu_idx
];
1975 EXPORT_SYMBOL(cxgb4_best_aligned_mtu
);
1978 * cxgb4_tp_smt_idx - Get the Source Mac Table index for this VI
1980 * @viid: VI id of the given port
1982 * Return the SMT index for this VI.
1984 unsigned int cxgb4_tp_smt_idx(enum chip_type chip
, unsigned int viid
)
1986 /* In T4/T5, SMT contains 256 SMAC entries organized in
1987 * 128 rows of 2 entries each.
1988 * In T6, SMT contains 256 SMAC entries in 256 rows.
1989 * TODO: The below code needs to be updated when we add support
1992 if (CHELSIO_CHIP_VERSION(chip
) <= CHELSIO_T5
)
1993 return ((viid
& 0x7f) << 1);
1995 return (viid
& 0x7f);
1997 EXPORT_SYMBOL(cxgb4_tp_smt_idx
);
2000 * cxgb4_port_chan - get the HW channel of a port
2001 * @dev: the net device for the port
2003 * Return the HW Tx channel of the given port.
2005 unsigned int cxgb4_port_chan(const struct net_device
*dev
)
2007 return netdev2pinfo(dev
)->tx_chan
;
2009 EXPORT_SYMBOL(cxgb4_port_chan
);
2011 unsigned int cxgb4_dbfifo_count(const struct net_device
*dev
, int lpfifo
)
2013 struct adapter
*adap
= netdev2adap(dev
);
2014 u32 v1
, v2
, lp_count
, hp_count
;
2016 v1
= t4_read_reg(adap
, SGE_DBFIFO_STATUS_A
);
2017 v2
= t4_read_reg(adap
, SGE_DBFIFO_STATUS2_A
);
2018 if (is_t4(adap
->params
.chip
)) {
2019 lp_count
= LP_COUNT_G(v1
);
2020 hp_count
= HP_COUNT_G(v1
);
2022 lp_count
= LP_COUNT_T5_G(v1
);
2023 hp_count
= HP_COUNT_T5_G(v2
);
2025 return lpfifo
? lp_count
: hp_count
;
2027 EXPORT_SYMBOL(cxgb4_dbfifo_count
);
2030 * cxgb4_port_viid - get the VI id of a port
2031 * @dev: the net device for the port
2033 * Return the VI id of the given port.
2035 unsigned int cxgb4_port_viid(const struct net_device
*dev
)
2037 return netdev2pinfo(dev
)->viid
;
2039 EXPORT_SYMBOL(cxgb4_port_viid
);
2042 * cxgb4_port_idx - get the index of a port
2043 * @dev: the net device for the port
2045 * Return the index of the given port.
2047 unsigned int cxgb4_port_idx(const struct net_device
*dev
)
2049 return netdev2pinfo(dev
)->port_id
;
2051 EXPORT_SYMBOL(cxgb4_port_idx
);
2053 void cxgb4_get_tcp_stats(struct pci_dev
*pdev
, struct tp_tcp_stats
*v4
,
2054 struct tp_tcp_stats
*v6
)
2056 struct adapter
*adap
= pci_get_drvdata(pdev
);
2058 spin_lock(&adap
->stats_lock
);
2059 t4_tp_get_tcp_stats(adap
, v4
, v6
);
2060 spin_unlock(&adap
->stats_lock
);
2062 EXPORT_SYMBOL(cxgb4_get_tcp_stats
);
2064 void cxgb4_iscsi_init(struct net_device
*dev
, unsigned int tag_mask
,
2065 const unsigned int *pgsz_order
)
2067 struct adapter
*adap
= netdev2adap(dev
);
2069 t4_write_reg(adap
, ULP_RX_ISCSI_TAGMASK_A
, tag_mask
);
2070 t4_write_reg(adap
, ULP_RX_ISCSI_PSZ_A
, HPZ0_V(pgsz_order
[0]) |
2071 HPZ1_V(pgsz_order
[1]) | HPZ2_V(pgsz_order
[2]) |
2072 HPZ3_V(pgsz_order
[3]));
2074 EXPORT_SYMBOL(cxgb4_iscsi_init
);
2076 int cxgb4_flush_eq_cache(struct net_device
*dev
)
2078 struct adapter
*adap
= netdev2adap(dev
);
2080 return t4_sge_ctxt_flush(adap
, adap
->mbox
);
2082 EXPORT_SYMBOL(cxgb4_flush_eq_cache
);
2084 static int read_eq_indices(struct adapter
*adap
, u16 qid
, u16
*pidx
, u16
*cidx
)
2086 u32 addr
= t4_read_reg(adap
, SGE_DBQ_CTXT_BADDR_A
) + 24 * qid
+ 8;
2090 spin_lock(&adap
->win0_lock
);
2091 ret
= t4_memory_rw(adap
, 0, MEM_EDC0
, addr
,
2092 sizeof(indices
), (__be32
*)&indices
,
2094 spin_unlock(&adap
->win0_lock
);
2096 *cidx
= (be64_to_cpu(indices
) >> 25) & 0xffff;
2097 *pidx
= (be64_to_cpu(indices
) >> 9) & 0xffff;
2102 int cxgb4_sync_txq_pidx(struct net_device
*dev
, u16 qid
, u16 pidx
,
2105 struct adapter
*adap
= netdev2adap(dev
);
2106 u16 hw_pidx
, hw_cidx
;
2109 ret
= read_eq_indices(adap
, qid
, &hw_pidx
, &hw_cidx
);
2113 if (pidx
!= hw_pidx
) {
2117 if (pidx
>= hw_pidx
)
2118 delta
= pidx
- hw_pidx
;
2120 delta
= size
- hw_pidx
+ pidx
;
2122 if (is_t4(adap
->params
.chip
))
2123 val
= PIDX_V(delta
);
2125 val
= PIDX_T5_V(delta
);
2127 t4_write_reg(adap
, MYPF_REG(SGE_PF_KDOORBELL_A
),
2133 EXPORT_SYMBOL(cxgb4_sync_txq_pidx
);
2135 int cxgb4_read_tpte(struct net_device
*dev
, u32 stag
, __be32
*tpte
)
2137 struct adapter
*adap
;
2138 u32 offset
, memtype
, memaddr
;
2139 u32 edc0_size
, edc1_size
, mc0_size
, mc1_size
, size
;
2140 u32 edc0_end
, edc1_end
, mc0_end
, mc1_end
;
2143 adap
= netdev2adap(dev
);
2145 offset
= ((stag
>> 8) * 32) + adap
->vres
.stag
.start
;
2147 /* Figure out where the offset lands in the Memory Type/Address scheme.
2148 * This code assumes that the memory is laid out starting at offset 0
2149 * with no breaks as: EDC0, EDC1, MC0, MC1. All cards have both EDC0
2150 * and EDC1. Some cards will have neither MC0 nor MC1, most cards have
2151 * MC0, and some have both MC0 and MC1.
2153 size
= t4_read_reg(adap
, MA_EDRAM0_BAR_A
);
2154 edc0_size
= EDRAM0_SIZE_G(size
) << 20;
2155 size
= t4_read_reg(adap
, MA_EDRAM1_BAR_A
);
2156 edc1_size
= EDRAM1_SIZE_G(size
) << 20;
2157 size
= t4_read_reg(adap
, MA_EXT_MEMORY0_BAR_A
);
2158 mc0_size
= EXT_MEM0_SIZE_G(size
) << 20;
2160 edc0_end
= edc0_size
;
2161 edc1_end
= edc0_end
+ edc1_size
;
2162 mc0_end
= edc1_end
+ mc0_size
;
2164 if (offset
< edc0_end
) {
2167 } else if (offset
< edc1_end
) {
2169 memaddr
= offset
- edc0_end
;
2171 if (offset
< mc0_end
) {
2173 memaddr
= offset
- edc1_end
;
2174 } else if (is_t5(adap
->params
.chip
)) {
2175 size
= t4_read_reg(adap
, MA_EXT_MEMORY1_BAR_A
);
2176 mc1_size
= EXT_MEM1_SIZE_G(size
) << 20;
2177 mc1_end
= mc0_end
+ mc1_size
;
2178 if (offset
< mc1_end
) {
2180 memaddr
= offset
- mc0_end
;
2182 /* offset beyond the end of any memory */
2186 /* T4/T6 only has a single memory channel */
2191 spin_lock(&adap
->win0_lock
);
2192 ret
= t4_memory_rw(adap
, 0, memtype
, memaddr
, 32, tpte
, T4_MEMORY_READ
);
2193 spin_unlock(&adap
->win0_lock
);
2197 dev_err(adap
->pdev_dev
, "stag %#x, offset %#x out of range\n",
2201 EXPORT_SYMBOL(cxgb4_read_tpte
);
2203 u64
cxgb4_read_sge_timestamp(struct net_device
*dev
)
2206 struct adapter
*adap
;
2208 adap
= netdev2adap(dev
);
2209 lo
= t4_read_reg(adap
, SGE_TIMESTAMP_LO_A
);
2210 hi
= TSVAL_G(t4_read_reg(adap
, SGE_TIMESTAMP_HI_A
));
2212 return ((u64
)hi
<< 32) | (u64
)lo
;
2214 EXPORT_SYMBOL(cxgb4_read_sge_timestamp
);
2216 int cxgb4_bar2_sge_qregs(struct net_device
*dev
,
2218 enum cxgb4_bar2_qtype qtype
,
2221 unsigned int *pbar2_qid
)
2223 return t4_bar2_sge_qregs(netdev2adap(dev
),
2225 (qtype
== CXGB4_BAR2_QTYPE_EGRESS
2226 ? T4_BAR2_QTYPE_EGRESS
2227 : T4_BAR2_QTYPE_INGRESS
),
2232 EXPORT_SYMBOL(cxgb4_bar2_sge_qregs
);
2234 static struct pci_driver cxgb4_driver
;
2236 static void check_neigh_update(struct neighbour
*neigh
)
2238 const struct device
*parent
;
2239 const struct net_device
*netdev
= neigh
->dev
;
2241 if (netdev
->priv_flags
& IFF_802_1Q_VLAN
)
2242 netdev
= vlan_dev_real_dev(netdev
);
2243 parent
= netdev
->dev
.parent
;
2244 if (parent
&& parent
->driver
== &cxgb4_driver
.driver
)
2245 t4_l2t_update(dev_get_drvdata(parent
), neigh
);
2248 static int netevent_cb(struct notifier_block
*nb
, unsigned long event
,
2252 case NETEVENT_NEIGH_UPDATE
:
2253 check_neigh_update(data
);
2255 case NETEVENT_REDIRECT
:
2262 static bool netevent_registered
;
2263 static struct notifier_block cxgb4_netevent_nb
= {
2264 .notifier_call
= netevent_cb
2267 static void drain_db_fifo(struct adapter
*adap
, int usecs
)
2269 u32 v1
, v2
, lp_count
, hp_count
;
2272 v1
= t4_read_reg(adap
, SGE_DBFIFO_STATUS_A
);
2273 v2
= t4_read_reg(adap
, SGE_DBFIFO_STATUS2_A
);
2274 if (is_t4(adap
->params
.chip
)) {
2275 lp_count
= LP_COUNT_G(v1
);
2276 hp_count
= HP_COUNT_G(v1
);
2278 lp_count
= LP_COUNT_T5_G(v1
);
2279 hp_count
= HP_COUNT_T5_G(v2
);
2282 if (lp_count
== 0 && hp_count
== 0)
2284 set_current_state(TASK_UNINTERRUPTIBLE
);
2285 schedule_timeout(usecs_to_jiffies(usecs
));
2289 static void disable_txq_db(struct sge_txq
*q
)
2291 unsigned long flags
;
2293 spin_lock_irqsave(&q
->db_lock
, flags
);
2295 spin_unlock_irqrestore(&q
->db_lock
, flags
);
2298 static void enable_txq_db(struct adapter
*adap
, struct sge_txq
*q
)
2300 spin_lock_irq(&q
->db_lock
);
2301 if (q
->db_pidx_inc
) {
2302 /* Make sure that all writes to the TX descriptors
2303 * are committed before we tell HW about them.
2306 t4_write_reg(adap
, MYPF_REG(SGE_PF_KDOORBELL_A
),
2307 QID_V(q
->cntxt_id
) | PIDX_V(q
->db_pidx_inc
));
2311 spin_unlock_irq(&q
->db_lock
);
2314 static void disable_dbs(struct adapter
*adap
)
2318 for_each_ethrxq(&adap
->sge
, i
)
2319 disable_txq_db(&adap
->sge
.ethtxq
[i
].q
);
2320 for_each_iscsirxq(&adap
->sge
, i
)
2321 disable_txq_db(&adap
->sge
.ofldtxq
[i
].q
);
2322 for_each_port(adap
, i
)
2323 disable_txq_db(&adap
->sge
.ctrlq
[i
].q
);
2326 static void enable_dbs(struct adapter
*adap
)
2330 for_each_ethrxq(&adap
->sge
, i
)
2331 enable_txq_db(adap
, &adap
->sge
.ethtxq
[i
].q
);
2332 for_each_iscsirxq(&adap
->sge
, i
)
2333 enable_txq_db(adap
, &adap
->sge
.ofldtxq
[i
].q
);
2334 for_each_port(adap
, i
)
2335 enable_txq_db(adap
, &adap
->sge
.ctrlq
[i
].q
);
2338 static void notify_rdma_uld(struct adapter
*adap
, enum cxgb4_control cmd
)
2340 if (adap
->uld_handle
[CXGB4_ULD_RDMA
])
2341 ulds
[CXGB4_ULD_RDMA
].control(adap
->uld_handle
[CXGB4_ULD_RDMA
],
2345 static void process_db_full(struct work_struct
*work
)
2347 struct adapter
*adap
;
2349 adap
= container_of(work
, struct adapter
, db_full_task
);
2351 drain_db_fifo(adap
, dbfifo_drain_delay
);
2353 notify_rdma_uld(adap
, CXGB4_CONTROL_DB_EMPTY
);
2354 if (CHELSIO_CHIP_VERSION(adap
->params
.chip
) <= CHELSIO_T5
)
2355 t4_set_reg_field(adap
, SGE_INT_ENABLE3_A
,
2356 DBFIFO_HP_INT_F
| DBFIFO_LP_INT_F
,
2357 DBFIFO_HP_INT_F
| DBFIFO_LP_INT_F
);
2359 t4_set_reg_field(adap
, SGE_INT_ENABLE3_A
,
2360 DBFIFO_LP_INT_F
, DBFIFO_LP_INT_F
);
2363 static void sync_txq_pidx(struct adapter
*adap
, struct sge_txq
*q
)
2365 u16 hw_pidx
, hw_cidx
;
2368 spin_lock_irq(&q
->db_lock
);
2369 ret
= read_eq_indices(adap
, (u16
)q
->cntxt_id
, &hw_pidx
, &hw_cidx
);
2372 if (q
->db_pidx
!= hw_pidx
) {
2376 if (q
->db_pidx
>= hw_pidx
)
2377 delta
= q
->db_pidx
- hw_pidx
;
2379 delta
= q
->size
- hw_pidx
+ q
->db_pidx
;
2381 if (is_t4(adap
->params
.chip
))
2382 val
= PIDX_V(delta
);
2384 val
= PIDX_T5_V(delta
);
2386 t4_write_reg(adap
, MYPF_REG(SGE_PF_KDOORBELL_A
),
2387 QID_V(q
->cntxt_id
) | val
);
2392 spin_unlock_irq(&q
->db_lock
);
2394 CH_WARN(adap
, "DB drop recovery failed.\n");
2396 static void recover_all_queues(struct adapter
*adap
)
2400 for_each_ethrxq(&adap
->sge
, i
)
2401 sync_txq_pidx(adap
, &adap
->sge
.ethtxq
[i
].q
);
2402 for_each_iscsirxq(&adap
->sge
, i
)
2403 sync_txq_pidx(adap
, &adap
->sge
.ofldtxq
[i
].q
);
2404 for_each_port(adap
, i
)
2405 sync_txq_pidx(adap
, &adap
->sge
.ctrlq
[i
].q
);
2408 static void process_db_drop(struct work_struct
*work
)
2410 struct adapter
*adap
;
2412 adap
= container_of(work
, struct adapter
, db_drop_task
);
2414 if (is_t4(adap
->params
.chip
)) {
2415 drain_db_fifo(adap
, dbfifo_drain_delay
);
2416 notify_rdma_uld(adap
, CXGB4_CONTROL_DB_DROP
);
2417 drain_db_fifo(adap
, dbfifo_drain_delay
);
2418 recover_all_queues(adap
);
2419 drain_db_fifo(adap
, dbfifo_drain_delay
);
2421 notify_rdma_uld(adap
, CXGB4_CONTROL_DB_EMPTY
);
2422 } else if (is_t5(adap
->params
.chip
)) {
2423 u32 dropped_db
= t4_read_reg(adap
, 0x010ac);
2424 u16 qid
= (dropped_db
>> 15) & 0x1ffff;
2425 u16 pidx_inc
= dropped_db
& 0x1fff;
2427 unsigned int bar2_qid
;
2430 ret
= t4_bar2_sge_qregs(adap
, qid
, T4_BAR2_QTYPE_EGRESS
,
2431 0, &bar2_qoffset
, &bar2_qid
);
2433 dev_err(adap
->pdev_dev
, "doorbell drop recovery: "
2434 "qid=%d, pidx_inc=%d\n", qid
, pidx_inc
);
2436 writel(PIDX_T5_V(pidx_inc
) | QID_V(bar2_qid
),
2437 adap
->bar2
+ bar2_qoffset
+ SGE_UDB_KDOORBELL
);
2439 /* Re-enable BAR2 WC */
2440 t4_set_reg_field(adap
, 0x10b0, 1<<15, 1<<15);
2443 if (CHELSIO_CHIP_VERSION(adap
->params
.chip
) <= CHELSIO_T5
)
2444 t4_set_reg_field(adap
, SGE_DOORBELL_CONTROL_A
, DROPPED_DB_F
, 0);
2447 void t4_db_full(struct adapter
*adap
)
2449 if (is_t4(adap
->params
.chip
)) {
2451 notify_rdma_uld(adap
, CXGB4_CONTROL_DB_FULL
);
2452 t4_set_reg_field(adap
, SGE_INT_ENABLE3_A
,
2453 DBFIFO_HP_INT_F
| DBFIFO_LP_INT_F
, 0);
2454 queue_work(adap
->workq
, &adap
->db_full_task
);
2458 void t4_db_dropped(struct adapter
*adap
)
2460 if (is_t4(adap
->params
.chip
)) {
2462 notify_rdma_uld(adap
, CXGB4_CONTROL_DB_FULL
);
2464 queue_work(adap
->workq
, &adap
->db_drop_task
);
2467 static void uld_attach(struct adapter
*adap
, unsigned int uld
)
2470 struct cxgb4_lld_info lli
;
2473 lli
.pdev
= adap
->pdev
;
2475 lli
.l2t
= adap
->l2t
;
2476 lli
.tids
= &adap
->tids
;
2477 lli
.ports
= adap
->port
;
2478 lli
.vr
= &adap
->vres
;
2479 lli
.mtus
= adap
->params
.mtus
;
2480 if (uld
== CXGB4_ULD_RDMA
) {
2481 lli
.rxq_ids
= adap
->sge
.rdma_rxq
;
2482 lli
.ciq_ids
= adap
->sge
.rdma_ciq
;
2483 lli
.nrxq
= adap
->sge
.rdmaqs
;
2484 lli
.nciq
= adap
->sge
.rdmaciqs
;
2485 } else if (uld
== CXGB4_ULD_ISCSI
) {
2486 lli
.rxq_ids
= adap
->sge
.iscsi_rxq
;
2487 lli
.nrxq
= adap
->sge
.iscsiqsets
;
2488 } else if (uld
== CXGB4_ULD_ISCSIT
) {
2489 lli
.rxq_ids
= adap
->sge
.iscsit_rxq
;
2490 lli
.nrxq
= adap
->sge
.niscsitq
;
2492 lli
.ntxq
= adap
->sge
.iscsiqsets
;
2493 lli
.nchan
= adap
->params
.nports
;
2494 lli
.nports
= adap
->params
.nports
;
2495 lli
.wr_cred
= adap
->params
.ofldq_wr_cred
;
2496 lli
.adapter_type
= adap
->params
.chip
;
2497 lli
.iscsi_iolen
= MAXRXDATA_G(t4_read_reg(adap
, TP_PARA_REG2_A
));
2498 lli
.iscsi_tagmask
= t4_read_reg(adap
, ULP_RX_ISCSI_TAGMASK_A
);
2499 lli
.iscsi_pgsz_order
= t4_read_reg(adap
, ULP_RX_ISCSI_PSZ_A
);
2500 lli
.iscsi_llimit
= t4_read_reg(adap
, ULP_RX_ISCSI_LLIMIT_A
);
2501 lli
.iscsi_ppm
= &adap
->iscsi_ppm
;
2502 lli
.cclk_ps
= 1000000000 / adap
->params
.vpd
.cclk
;
2503 lli
.udb_density
= 1 << adap
->params
.sge
.eq_qpp
;
2504 lli
.ucq_density
= 1 << adap
->params
.sge
.iq_qpp
;
2505 lli
.filt_mode
= adap
->params
.tp
.vlan_pri_map
;
2506 /* MODQ_REQ_MAP sets queues 0-3 to chan 0-3 */
2507 for (i
= 0; i
< NCHAN
; i
++)
2509 lli
.gts_reg
= adap
->regs
+ MYPF_REG(SGE_PF_GTS_A
);
2510 lli
.db_reg
= adap
->regs
+ MYPF_REG(SGE_PF_KDOORBELL_A
);
2511 lli
.fw_vers
= adap
->params
.fw_vers
;
2512 lli
.dbfifo_int_thresh
= dbfifo_int_thresh
;
2513 lli
.sge_ingpadboundary
= adap
->sge
.fl_align
;
2514 lli
.sge_egrstatuspagesize
= adap
->sge
.stat_len
;
2515 lli
.sge_pktshift
= adap
->sge
.pktshift
;
2516 lli
.enable_fw_ofld_conn
= adap
->flags
& FW_OFLD_CONN
;
2517 lli
.max_ordird_qp
= adap
->params
.max_ordird_qp
;
2518 lli
.max_ird_adapter
= adap
->params
.max_ird_adapter
;
2519 lli
.ulptx_memwrite_dsgl
= adap
->params
.ulptx_memwrite_dsgl
;
2520 lli
.nodeid
= dev_to_node(adap
->pdev_dev
);
2522 handle
= ulds
[uld
].add(&lli
);
2523 if (IS_ERR(handle
)) {
2524 dev_warn(adap
->pdev_dev
,
2525 "could not attach to the %s driver, error %ld\n",
2526 uld_str
[uld
], PTR_ERR(handle
));
2530 adap
->uld_handle
[uld
] = handle
;
2532 if (!netevent_registered
) {
2533 register_netevent_notifier(&cxgb4_netevent_nb
);
2534 netevent_registered
= true;
2537 if (adap
->flags
& FULL_INIT_DONE
)
2538 ulds
[uld
].state_change(handle
, CXGB4_STATE_UP
);
2541 static void attach_ulds(struct adapter
*adap
)
2545 spin_lock(&adap_rcu_lock
);
2546 list_add_tail_rcu(&adap
->rcu_node
, &adap_rcu_list
);
2547 spin_unlock(&adap_rcu_lock
);
2549 mutex_lock(&uld_mutex
);
2550 list_add_tail(&adap
->list_node
, &adapter_list
);
2551 for (i
= 0; i
< CXGB4_ULD_MAX
; i
++)
2553 uld_attach(adap
, i
);
2554 mutex_unlock(&uld_mutex
);
2557 static void detach_ulds(struct adapter
*adap
)
2561 mutex_lock(&uld_mutex
);
2562 list_del(&adap
->list_node
);
2563 for (i
= 0; i
< CXGB4_ULD_MAX
; i
++)
2564 if (adap
->uld_handle
[i
]) {
2565 ulds
[i
].state_change(adap
->uld_handle
[i
],
2566 CXGB4_STATE_DETACH
);
2567 adap
->uld_handle
[i
] = NULL
;
2569 for (i
= 0; i
< CXGB4_PCI_ULD_MAX
; i
++)
2570 if (adap
->uld
&& adap
->uld
[i
].handle
) {
2571 adap
->uld
[i
].state_change(adap
->uld
[i
].handle
,
2572 CXGB4_STATE_DETACH
);
2573 adap
->uld
[i
].handle
= NULL
;
2575 if (netevent_registered
&& list_empty(&adapter_list
)) {
2576 unregister_netevent_notifier(&cxgb4_netevent_nb
);
2577 netevent_registered
= false;
2579 mutex_unlock(&uld_mutex
);
2581 spin_lock(&adap_rcu_lock
);
2582 list_del_rcu(&adap
->rcu_node
);
2583 spin_unlock(&adap_rcu_lock
);
2586 static void notify_ulds(struct adapter
*adap
, enum cxgb4_state new_state
)
2590 mutex_lock(&uld_mutex
);
2591 for (i
= 0; i
< CXGB4_ULD_MAX
; i
++)
2592 if (adap
->uld_handle
[i
])
2593 ulds
[i
].state_change(adap
->uld_handle
[i
], new_state
);
2594 for (i
= 0; i
< CXGB4_PCI_ULD_MAX
; i
++)
2595 if (adap
->uld
&& adap
->uld
[i
].handle
)
2596 adap
->uld
[i
].state_change(adap
->uld
[i
].handle
,
2598 mutex_unlock(&uld_mutex
);
2602 * cxgb4_register_uld - register an upper-layer driver
2603 * @type: the ULD type
2604 * @p: the ULD methods
2606 * Registers an upper-layer driver with this driver and notifies the ULD
2607 * about any presently available devices that support its type. Returns
2608 * %-EBUSY if a ULD of the same type is already registered.
2610 int cxgb4_register_uld(enum cxgb4_uld type
, const struct cxgb4_uld_info
*p
)
2613 struct adapter
*adap
;
2615 if (type
>= CXGB4_ULD_MAX
)
2617 mutex_lock(&uld_mutex
);
2618 if (ulds
[type
].add
) {
2623 list_for_each_entry(adap
, &adapter_list
, list_node
)
2624 uld_attach(adap
, type
);
2625 out
: mutex_unlock(&uld_mutex
);
2628 EXPORT_SYMBOL(cxgb4_register_uld
);
2631 * cxgb4_unregister_uld - unregister an upper-layer driver
2632 * @type: the ULD type
2634 * Unregisters an existing upper-layer driver.
2636 int cxgb4_unregister_uld(enum cxgb4_uld type
)
2638 struct adapter
*adap
;
2640 if (type
>= CXGB4_ULD_MAX
)
2642 mutex_lock(&uld_mutex
);
2643 list_for_each_entry(adap
, &adapter_list
, list_node
)
2644 adap
->uld_handle
[type
] = NULL
;
2645 ulds
[type
].add
= NULL
;
2646 mutex_unlock(&uld_mutex
);
2649 EXPORT_SYMBOL(cxgb4_unregister_uld
);
2651 #if IS_ENABLED(CONFIG_IPV6)
2652 static int cxgb4_inet6addr_handler(struct notifier_block
*this,
2653 unsigned long event
, void *data
)
2655 struct inet6_ifaddr
*ifa
= data
;
2656 struct net_device
*event_dev
= ifa
->idev
->dev
;
2657 const struct device
*parent
= NULL
;
2658 #if IS_ENABLED(CONFIG_BONDING)
2659 struct adapter
*adap
;
2661 if (event_dev
->priv_flags
& IFF_802_1Q_VLAN
)
2662 event_dev
= vlan_dev_real_dev(event_dev
);
2663 #if IS_ENABLED(CONFIG_BONDING)
2664 if (event_dev
->flags
& IFF_MASTER
) {
2665 list_for_each_entry(adap
, &adapter_list
, list_node
) {
2668 cxgb4_clip_get(adap
->port
[0],
2669 (const u32
*)ifa
, 1);
2672 cxgb4_clip_release(adap
->port
[0],
2673 (const u32
*)ifa
, 1);
2684 parent
= event_dev
->dev
.parent
;
2686 if (parent
&& parent
->driver
== &cxgb4_driver
.driver
) {
2689 cxgb4_clip_get(event_dev
, (const u32
*)ifa
, 1);
2692 cxgb4_clip_release(event_dev
, (const u32
*)ifa
, 1);
2701 static bool inet6addr_registered
;
2702 static struct notifier_block cxgb4_inet6addr_notifier
= {
2703 .notifier_call
= cxgb4_inet6addr_handler
2706 static void update_clip(const struct adapter
*adap
)
2709 struct net_device
*dev
;
2714 for (i
= 0; i
< MAX_NPORTS
; i
++) {
2715 dev
= adap
->port
[i
];
2719 ret
= cxgb4_update_root_dev_clip(dev
);
2726 #endif /* IS_ENABLED(CONFIG_IPV6) */
2729 * cxgb_up - enable the adapter
2730 * @adap: adapter being enabled
2732 * Called when the first port is enabled, this function performs the
2733 * actions necessary to make an adapter operational, such as completing
2734 * the initialization of HW modules, and enabling interrupts.
2736 * Must be called with the rtnl lock held.
2738 static int cxgb_up(struct adapter
*adap
)
2742 err
= setup_sge_queues(adap
);
2745 err
= setup_rss(adap
);
2749 if (adap
->flags
& USING_MSIX
) {
2750 name_msix_vecs(adap
);
2751 err
= request_irq(adap
->msix_info
[0].vec
, t4_nondata_intr
, 0,
2752 adap
->msix_info
[0].desc
, adap
);
2756 err
= request_msix_queue_irqs(adap
);
2758 free_irq(adap
->msix_info
[0].vec
, adap
);
2762 err
= request_irq(adap
->pdev
->irq
, t4_intr_handler(adap
),
2763 (adap
->flags
& USING_MSI
) ? 0 : IRQF_SHARED
,
2764 adap
->port
[0]->name
, adap
);
2770 t4_intr_enable(adap
);
2771 adap
->flags
|= FULL_INIT_DONE
;
2772 notify_ulds(adap
, CXGB4_STATE_UP
);
2773 #if IS_ENABLED(CONFIG_IPV6)
2776 /* Initialize hash mac addr list*/
2777 INIT_LIST_HEAD(&adap
->mac_hlist
);
2781 dev_err(adap
->pdev_dev
, "request_irq failed, err %d\n", err
);
2783 t4_free_sge_resources(adap
);
2787 static void cxgb_down(struct adapter
*adapter
)
2789 cancel_work_sync(&adapter
->tid_release_task
);
2790 cancel_work_sync(&adapter
->db_full_task
);
2791 cancel_work_sync(&adapter
->db_drop_task
);
2792 adapter
->tid_release_task_busy
= false;
2793 adapter
->tid_release_head
= NULL
;
2795 t4_sge_stop(adapter
);
2796 t4_free_sge_resources(adapter
);
2797 adapter
->flags
&= ~FULL_INIT_DONE
;
2801 * net_device operations
2803 static int cxgb_open(struct net_device
*dev
)
2806 struct port_info
*pi
= netdev_priv(dev
);
2807 struct adapter
*adapter
= pi
->adapter
;
2809 netif_carrier_off(dev
);
2811 if (!(adapter
->flags
& FULL_INIT_DONE
)) {
2812 err
= cxgb_up(adapter
);
2817 err
= link_start(dev
);
2819 netif_tx_start_all_queues(dev
);
2823 static int cxgb_close(struct net_device
*dev
)
2825 struct port_info
*pi
= netdev_priv(dev
);
2826 struct adapter
*adapter
= pi
->adapter
;
2828 netif_tx_stop_all_queues(dev
);
2829 netif_carrier_off(dev
);
2830 return t4_enable_vi(adapter
, adapter
->pf
, pi
->viid
, false, false);
2833 /* Return an error number if the indicated filter isn't writable ...
2835 static int writable_filter(struct filter_entry
*f
)
2845 /* Delete the filter at the specified index (if valid). The checks for all
2846 * the common problems with doing this like the filter being locked, currently
2847 * pending in another operation, etc.
2849 static int delete_filter(struct adapter
*adapter
, unsigned int fidx
)
2851 struct filter_entry
*f
;
2854 if (fidx
>= adapter
->tids
.nftids
+ adapter
->tids
.nsftids
)
2857 f
= &adapter
->tids
.ftid_tab
[fidx
];
2858 ret
= writable_filter(f
);
2862 return del_filter_wr(adapter
, fidx
);
2867 int cxgb4_create_server_filter(const struct net_device
*dev
, unsigned int stid
,
2868 __be32 sip
, __be16 sport
, __be16 vlan
,
2869 unsigned int queue
, unsigned char port
, unsigned char mask
)
2872 struct filter_entry
*f
;
2873 struct adapter
*adap
;
2877 adap
= netdev2adap(dev
);
2879 /* Adjust stid to correct filter index */
2880 stid
-= adap
->tids
.sftid_base
;
2881 stid
+= adap
->tids
.nftids
;
2883 /* Check to make sure the filter requested is writable ...
2885 f
= &adap
->tids
.ftid_tab
[stid
];
2886 ret
= writable_filter(f
);
2890 /* Clear out any old resources being used by the filter before
2891 * we start constructing the new filter.
2894 clear_filter(adap
, f
);
2896 /* Clear out filter specifications */
2897 memset(&f
->fs
, 0, sizeof(struct ch_filter_specification
));
2898 f
->fs
.val
.lport
= cpu_to_be16(sport
);
2899 f
->fs
.mask
.lport
= ~0;
2901 if ((val
[0] | val
[1] | val
[2] | val
[3]) != 0) {
2902 for (i
= 0; i
< 4; i
++) {
2903 f
->fs
.val
.lip
[i
] = val
[i
];
2904 f
->fs
.mask
.lip
[i
] = ~0;
2906 if (adap
->params
.tp
.vlan_pri_map
& PORT_F
) {
2907 f
->fs
.val
.iport
= port
;
2908 f
->fs
.mask
.iport
= mask
;
2912 if (adap
->params
.tp
.vlan_pri_map
& PROTOCOL_F
) {
2913 f
->fs
.val
.proto
= IPPROTO_TCP
;
2914 f
->fs
.mask
.proto
= ~0;
2919 /* Mark filter as locked */
2923 ret
= set_filter_wr(adap
, stid
);
2925 clear_filter(adap
, f
);
2931 EXPORT_SYMBOL(cxgb4_create_server_filter
);
2933 int cxgb4_remove_server_filter(const struct net_device
*dev
, unsigned int stid
,
2934 unsigned int queue
, bool ipv6
)
2936 struct filter_entry
*f
;
2937 struct adapter
*adap
;
2939 adap
= netdev2adap(dev
);
2941 /* Adjust stid to correct filter index */
2942 stid
-= adap
->tids
.sftid_base
;
2943 stid
+= adap
->tids
.nftids
;
2945 f
= &adap
->tids
.ftid_tab
[stid
];
2946 /* Unlock the filter */
2949 return delete_filter(adap
, stid
);
2951 EXPORT_SYMBOL(cxgb4_remove_server_filter
);
2953 static struct rtnl_link_stats64
*cxgb_get_stats(struct net_device
*dev
,
2954 struct rtnl_link_stats64
*ns
)
2956 struct port_stats stats
;
2957 struct port_info
*p
= netdev_priv(dev
);
2958 struct adapter
*adapter
= p
->adapter
;
2960 /* Block retrieving statistics during EEH error
2961 * recovery. Otherwise, the recovery might fail
2962 * and the PCI device will be removed permanently
2964 spin_lock(&adapter
->stats_lock
);
2965 if (!netif_device_present(dev
)) {
2966 spin_unlock(&adapter
->stats_lock
);
2969 t4_get_port_stats_offset(adapter
, p
->tx_chan
, &stats
,
2971 spin_unlock(&adapter
->stats_lock
);
2973 ns
->tx_bytes
= stats
.tx_octets
;
2974 ns
->tx_packets
= stats
.tx_frames
;
2975 ns
->rx_bytes
= stats
.rx_octets
;
2976 ns
->rx_packets
= stats
.rx_frames
;
2977 ns
->multicast
= stats
.rx_mcast_frames
;
2979 /* detailed rx_errors */
2980 ns
->rx_length_errors
= stats
.rx_jabber
+ stats
.rx_too_long
+
2982 ns
->rx_over_errors
= 0;
2983 ns
->rx_crc_errors
= stats
.rx_fcs_err
;
2984 ns
->rx_frame_errors
= stats
.rx_symbol_err
;
2985 ns
->rx_fifo_errors
= stats
.rx_ovflow0
+ stats
.rx_ovflow1
+
2986 stats
.rx_ovflow2
+ stats
.rx_ovflow3
+
2987 stats
.rx_trunc0
+ stats
.rx_trunc1
+
2988 stats
.rx_trunc2
+ stats
.rx_trunc3
;
2989 ns
->rx_missed_errors
= 0;
2991 /* detailed tx_errors */
2992 ns
->tx_aborted_errors
= 0;
2993 ns
->tx_carrier_errors
= 0;
2994 ns
->tx_fifo_errors
= 0;
2995 ns
->tx_heartbeat_errors
= 0;
2996 ns
->tx_window_errors
= 0;
2998 ns
->tx_errors
= stats
.tx_error_frames
;
2999 ns
->rx_errors
= stats
.rx_symbol_err
+ stats
.rx_fcs_err
+
3000 ns
->rx_length_errors
+ stats
.rx_len_err
+ ns
->rx_fifo_errors
;
3004 static int cxgb_ioctl(struct net_device
*dev
, struct ifreq
*req
, int cmd
)
3007 int ret
= 0, prtad
, devad
;
3008 struct port_info
*pi
= netdev_priv(dev
);
3009 struct mii_ioctl_data
*data
= (struct mii_ioctl_data
*)&req
->ifr_data
;
3013 if (pi
->mdio_addr
< 0)
3015 data
->phy_id
= pi
->mdio_addr
;
3019 if (mdio_phy_id_is_c45(data
->phy_id
)) {
3020 prtad
= mdio_phy_id_prtad(data
->phy_id
);
3021 devad
= mdio_phy_id_devad(data
->phy_id
);
3022 } else if (data
->phy_id
< 32) {
3023 prtad
= data
->phy_id
;
3025 data
->reg_num
&= 0x1f;
3029 mbox
= pi
->adapter
->pf
;
3030 if (cmd
== SIOCGMIIREG
)
3031 ret
= t4_mdio_rd(pi
->adapter
, mbox
, prtad
, devad
,
3032 data
->reg_num
, &data
->val_out
);
3034 ret
= t4_mdio_wr(pi
->adapter
, mbox
, prtad
, devad
,
3035 data
->reg_num
, data
->val_in
);
3038 return copy_to_user(req
->ifr_data
, &pi
->tstamp_config
,
3039 sizeof(pi
->tstamp_config
)) ?
3042 if (copy_from_user(&pi
->tstamp_config
, req
->ifr_data
,
3043 sizeof(pi
->tstamp_config
)))
3046 switch (pi
->tstamp_config
.rx_filter
) {
3047 case HWTSTAMP_FILTER_NONE
:
3048 pi
->rxtstamp
= false;
3050 case HWTSTAMP_FILTER_ALL
:
3051 pi
->rxtstamp
= true;
3054 pi
->tstamp_config
.rx_filter
= HWTSTAMP_FILTER_NONE
;
3058 return copy_to_user(req
->ifr_data
, &pi
->tstamp_config
,
3059 sizeof(pi
->tstamp_config
)) ?
3067 static void cxgb_set_rxmode(struct net_device
*dev
)
3069 /* unfortunately we can't return errors to the stack */
3070 set_rxmode(dev
, -1, false);
3073 static int cxgb_change_mtu(struct net_device
*dev
, int new_mtu
)
3076 struct port_info
*pi
= netdev_priv(dev
);
3078 if (new_mtu
< 81 || new_mtu
> MAX_MTU
) /* accommodate SACK */
3080 ret
= t4_set_rxmode(pi
->adapter
, pi
->adapter
->pf
, pi
->viid
, new_mtu
, -1,
3087 #ifdef CONFIG_PCI_IOV
3088 static int dummy_open(struct net_device
*dev
)
3090 /* Turn carrier off since we don't have to transmit anything on this
3093 netif_carrier_off(dev
);
3097 /* Fill MAC address that will be assigned by the FW */
3098 static void fill_vf_station_mac_addr(struct adapter
*adap
)
3101 u8 hw_addr
[ETH_ALEN
], macaddr
[ETH_ALEN
];
3106 err
= t4_get_raw_vpd_params(adap
, &adap
->params
.vpd
);
3108 na
= adap
->params
.vpd
.na
;
3109 for (i
= 0; i
< ETH_ALEN
; i
++)
3110 hw_addr
[i
] = (hex2val(na
[2 * i
+ 0]) * 16 +
3111 hex2val(na
[2 * i
+ 1]));
3112 a
= (hw_addr
[0] << 8) | hw_addr
[1];
3113 b
= (hw_addr
[1] << 8) | hw_addr
[2];
3115 a
|= 0x0200; /* locally assigned Ethernet MAC address */
3116 a
&= ~0x0100; /* not a multicast Ethernet MAC address */
3117 macaddr
[0] = a
>> 8;
3118 macaddr
[1] = a
& 0xff;
3120 for (i
= 2; i
< 5; i
++)
3121 macaddr
[i
] = hw_addr
[i
+ 1];
3123 for (i
= 0; i
< adap
->num_vfs
; i
++) {
3124 macaddr
[5] = adap
->pf
* 16 + i
;
3125 ether_addr_copy(adap
->vfinfo
[i
].vf_mac_addr
, macaddr
);
3130 static int cxgb_set_vf_mac(struct net_device
*dev
, int vf
, u8
*mac
)
3132 struct port_info
*pi
= netdev_priv(dev
);
3133 struct adapter
*adap
= pi
->adapter
;
3136 /* verify MAC addr is valid */
3137 if (!is_valid_ether_addr(mac
)) {
3138 dev_err(pi
->adapter
->pdev_dev
,
3139 "Invalid Ethernet address %pM for VF %d\n",
3144 dev_info(pi
->adapter
->pdev_dev
,
3145 "Setting MAC %pM on VF %d\n", mac
, vf
);
3146 ret
= t4_set_vf_mac_acl(adap
, vf
+ 1, 1, mac
);
3148 ether_addr_copy(adap
->vfinfo
[vf
].vf_mac_addr
, mac
);
3152 static int cxgb_get_vf_config(struct net_device
*dev
,
3153 int vf
, struct ifla_vf_info
*ivi
)
3155 struct port_info
*pi
= netdev_priv(dev
);
3156 struct adapter
*adap
= pi
->adapter
;
3158 if (vf
>= adap
->num_vfs
)
3161 ether_addr_copy(ivi
->mac
, adap
->vfinfo
[vf
].vf_mac_addr
);
3166 static int cxgb_set_mac_addr(struct net_device
*dev
, void *p
)
3169 struct sockaddr
*addr
= p
;
3170 struct port_info
*pi
= netdev_priv(dev
);
3172 if (!is_valid_ether_addr(addr
->sa_data
))
3173 return -EADDRNOTAVAIL
;
3175 ret
= t4_change_mac(pi
->adapter
, pi
->adapter
->pf
, pi
->viid
,
3176 pi
->xact_addr_filt
, addr
->sa_data
, true, true);
3180 memcpy(dev
->dev_addr
, addr
->sa_data
, dev
->addr_len
);
3181 pi
->xact_addr_filt
= ret
;
3185 #ifdef CONFIG_NET_POLL_CONTROLLER
3186 static void cxgb_netpoll(struct net_device
*dev
)
3188 struct port_info
*pi
= netdev_priv(dev
);
3189 struct adapter
*adap
= pi
->adapter
;
3191 if (adap
->flags
& USING_MSIX
) {
3193 struct sge_eth_rxq
*rx
= &adap
->sge
.ethrxq
[pi
->first_qset
];
3195 for (i
= pi
->nqsets
; i
; i
--, rx
++)
3196 t4_sge_intr_msix(0, &rx
->rspq
);
3198 t4_intr_handler(adap
)(0, adap
);
3202 static int cxgb_set_tx_maxrate(struct net_device
*dev
, int index
, u32 rate
)
3204 struct port_info
*pi
= netdev_priv(dev
);
3205 struct adapter
*adap
= pi
->adapter
;
3206 struct sched_class
*e
;
3207 struct ch_sched_params p
;
3208 struct ch_sched_queue qe
;
3212 if (!can_sched(dev
))
3215 if (index
< 0 || index
> pi
->nqsets
- 1)
3218 if (!(adap
->flags
& FULL_INIT_DONE
)) {
3219 dev_err(adap
->pdev_dev
,
3220 "Failed to rate limit on queue %d. Link Down?\n",
3225 /* Convert from Mbps to Kbps */
3226 req_rate
= rate
<< 10;
3228 /* Max rate is 10 Gbps */
3229 if (req_rate
>= SCHED_MAX_RATE_KBPS
) {
3230 dev_err(adap
->pdev_dev
,
3231 "Invalid rate %u Mbps, Max rate is %u Gbps\n",
3232 rate
, SCHED_MAX_RATE_KBPS
);
3236 /* First unbind the queue from any existing class */
3237 memset(&qe
, 0, sizeof(qe
));
3239 qe
.class = SCHED_CLS_NONE
;
3241 err
= cxgb4_sched_class_unbind(dev
, (void *)(&qe
), SCHED_QUEUE
);
3243 dev_err(adap
->pdev_dev
,
3244 "Unbinding Queue %d on port %d fail. Err: %d\n",
3245 index
, pi
->port_id
, err
);
3249 /* Queue already unbound */
3253 /* Fetch any available unused or matching scheduling class */
3254 memset(&p
, 0, sizeof(p
));
3255 p
.type
= SCHED_CLASS_TYPE_PACKET
;
3256 p
.u
.params
.level
= SCHED_CLASS_LEVEL_CL_RL
;
3257 p
.u
.params
.mode
= SCHED_CLASS_MODE_CLASS
;
3258 p
.u
.params
.rateunit
= SCHED_CLASS_RATEUNIT_BITS
;
3259 p
.u
.params
.ratemode
= SCHED_CLASS_RATEMODE_ABS
;
3260 p
.u
.params
.channel
= pi
->tx_chan
;
3261 p
.u
.params
.class = SCHED_CLS_NONE
;
3262 p
.u
.params
.minrate
= 0;
3263 p
.u
.params
.maxrate
= req_rate
;
3264 p
.u
.params
.weight
= 0;
3265 p
.u
.params
.pktsize
= dev
->mtu
;
3267 e
= cxgb4_sched_class_alloc(dev
, &p
);
3271 /* Bind the queue to a scheduling class */
3272 memset(&qe
, 0, sizeof(qe
));
3276 err
= cxgb4_sched_class_bind(dev
, (void *)(&qe
), SCHED_QUEUE
);
3278 dev_err(adap
->pdev_dev
,
3279 "Queue rate limiting failed. Err: %d\n", err
);
3283 static const struct net_device_ops cxgb4_netdev_ops
= {
3284 .ndo_open
= cxgb_open
,
3285 .ndo_stop
= cxgb_close
,
3286 .ndo_start_xmit
= t4_eth_xmit
,
3287 .ndo_select_queue
= cxgb_select_queue
,
3288 .ndo_get_stats64
= cxgb_get_stats
,
3289 .ndo_set_rx_mode
= cxgb_set_rxmode
,
3290 .ndo_set_mac_address
= cxgb_set_mac_addr
,
3291 .ndo_set_features
= cxgb_set_features
,
3292 .ndo_validate_addr
= eth_validate_addr
,
3293 .ndo_do_ioctl
= cxgb_ioctl
,
3294 .ndo_change_mtu
= cxgb_change_mtu
,
3295 #ifdef CONFIG_NET_POLL_CONTROLLER
3296 .ndo_poll_controller
= cxgb_netpoll
,
3298 #ifdef CONFIG_CHELSIO_T4_FCOE
3299 .ndo_fcoe_enable
= cxgb_fcoe_enable
,
3300 .ndo_fcoe_disable
= cxgb_fcoe_disable
,
3301 #endif /* CONFIG_CHELSIO_T4_FCOE */
3302 #ifdef CONFIG_NET_RX_BUSY_POLL
3303 .ndo_busy_poll
= cxgb_busy_poll
,
3305 .ndo_set_tx_maxrate
= cxgb_set_tx_maxrate
,
3308 #ifdef CONFIG_PCI_IOV
3309 static const struct net_device_ops cxgb4_mgmt_netdev_ops
= {
3310 .ndo_open
= dummy_open
,
3311 .ndo_set_vf_mac
= cxgb_set_vf_mac
,
3312 .ndo_get_vf_config
= cxgb_get_vf_config
,
3316 static void get_drvinfo(struct net_device
*dev
, struct ethtool_drvinfo
*info
)
3318 struct adapter
*adapter
= netdev2adap(dev
);
3320 strlcpy(info
->driver
, cxgb4_driver_name
, sizeof(info
->driver
));
3321 strlcpy(info
->version
, cxgb4_driver_version
,
3322 sizeof(info
->version
));
3323 strlcpy(info
->bus_info
, pci_name(adapter
->pdev
),
3324 sizeof(info
->bus_info
));
3327 static const struct ethtool_ops cxgb4_mgmt_ethtool_ops
= {
3328 .get_drvinfo
= get_drvinfo
,
3331 void t4_fatal_err(struct adapter
*adap
)
3333 t4_set_reg_field(adap
, SGE_CONTROL_A
, GLOBALENABLE_F
, 0);
3334 t4_intr_disable(adap
);
3335 dev_alert(adap
->pdev_dev
, "encountered fatal error, adapter stopped\n");
3338 static void setup_memwin(struct adapter
*adap
)
3340 u32 nic_win_base
= t4_get_util_window(adap
);
3342 t4_setup_memwin(adap
, nic_win_base
, MEMWIN_NIC
);
3345 static void setup_memwin_rdma(struct adapter
*adap
)
3347 if (adap
->vres
.ocq
.size
) {
3351 start
= t4_read_pcie_cfg4(adap
, PCI_BASE_ADDRESS_2
);
3352 start
&= PCI_BASE_ADDRESS_MEM_MASK
;
3353 start
+= OCQ_WIN_OFFSET(adap
->pdev
, &adap
->vres
);
3354 sz_kb
= roundup_pow_of_two(adap
->vres
.ocq
.size
) >> 10;
3356 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN_A
, 3),
3357 start
| BIR_V(1) | WINDOW_V(ilog2(sz_kb
)));
3359 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET_A
, 3),
3360 adap
->vres
.ocq
.start
);
3362 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET_A
, 3));
3366 static int adap_init1(struct adapter
*adap
, struct fw_caps_config_cmd
*c
)
3371 /* get device capabilities */
3372 memset(c
, 0, sizeof(*c
));
3373 c
->op_to_write
= htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD
) |
3374 FW_CMD_REQUEST_F
| FW_CMD_READ_F
);
3375 c
->cfvalid_to_len16
= htonl(FW_LEN16(*c
));
3376 ret
= t4_wr_mbox(adap
, adap
->mbox
, c
, sizeof(*c
), c
);
3380 c
->op_to_write
= htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD
) |
3381 FW_CMD_REQUEST_F
| FW_CMD_WRITE_F
);
3382 ret
= t4_wr_mbox(adap
, adap
->mbox
, c
, sizeof(*c
), NULL
);
3386 ret
= t4_config_glbl_rss(adap
, adap
->pf
,
3387 FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL
,
3388 FW_RSS_GLB_CONFIG_CMD_TNLMAPEN_F
|
3389 FW_RSS_GLB_CONFIG_CMD_TNLALLLKP_F
);
3393 ret
= t4_cfg_pfvf(adap
, adap
->mbox
, adap
->pf
, 0, adap
->sge
.egr_sz
, 64,
3394 MAX_INGQ
, 0, 0, 4, 0xf, 0xf, 16, FW_CMD_CAP_PF
,
3401 /* tweak some settings */
3402 t4_write_reg(adap
, TP_SHIFT_CNT_A
, 0x64f8849);
3403 t4_write_reg(adap
, ULP_RX_TDDP_PSZ_A
, HPZ0_V(PAGE_SHIFT
- 12));
3404 t4_write_reg(adap
, TP_PIO_ADDR_A
, TP_INGRESS_CONFIG_A
);
3405 v
= t4_read_reg(adap
, TP_PIO_DATA_A
);
3406 t4_write_reg(adap
, TP_PIO_DATA_A
, v
& ~CSUM_HAS_PSEUDO_HDR_F
);
3408 /* first 4 Tx modulation queues point to consecutive Tx channels */
3409 adap
->params
.tp
.tx_modq_map
= 0xE4;
3410 t4_write_reg(adap
, TP_TX_MOD_QUEUE_REQ_MAP_A
,
3411 TX_MOD_QUEUE_REQ_MAP_V(adap
->params
.tp
.tx_modq_map
));
3413 /* associate each Tx modulation queue with consecutive Tx channels */
3415 t4_write_indirect(adap
, TP_PIO_ADDR_A
, TP_PIO_DATA_A
,
3416 &v
, 1, TP_TX_SCHED_HDR_A
);
3417 t4_write_indirect(adap
, TP_PIO_ADDR_A
, TP_PIO_DATA_A
,
3418 &v
, 1, TP_TX_SCHED_FIFO_A
);
3419 t4_write_indirect(adap
, TP_PIO_ADDR_A
, TP_PIO_DATA_A
,
3420 &v
, 1, TP_TX_SCHED_PCMD_A
);
3422 #define T4_TX_MODQ_10G_WEIGHT_DEFAULT 16 /* in KB units */
3423 if (is_offload(adap
)) {
3424 t4_write_reg(adap
, TP_TX_MOD_QUEUE_WEIGHT0_A
,
3425 TX_MODQ_WEIGHT0_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT
) |
3426 TX_MODQ_WEIGHT1_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT
) |
3427 TX_MODQ_WEIGHT2_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT
) |
3428 TX_MODQ_WEIGHT3_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT
));
3429 t4_write_reg(adap
, TP_TX_MOD_CHANNEL_WEIGHT_A
,
3430 TX_MODQ_WEIGHT0_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT
) |
3431 TX_MODQ_WEIGHT1_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT
) |
3432 TX_MODQ_WEIGHT2_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT
) |
3433 TX_MODQ_WEIGHT3_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT
));
3436 /* get basic stuff going */
3437 return t4_early_init(adap
, adap
->pf
);
3441 * Max # of ATIDs. The absolute HW max is 16K but we keep it lower.
3443 #define MAX_ATIDS 8192U
3446 * Phase 0 of initialization: contact FW, obtain config, perform basic init.
3448 * If the firmware we're dealing with has Configuration File support, then
3449 * we use that to perform all configuration
3453 * Tweak configuration based on module parameters, etc. Most of these have
3454 * defaults assigned to them by Firmware Configuration Files (if we're using
3455 * them) but need to be explicitly set if we're using hard-coded
3456 * initialization. But even in the case of using Firmware Configuration
3457 * Files, we'd like to expose the ability to change these via module
3458 * parameters so these are essentially common tweaks/settings for
3459 * Configuration Files and hard-coded initialization ...
3461 static int adap_init0_tweaks(struct adapter
*adapter
)
3464 * Fix up various Host-Dependent Parameters like Page Size, Cache
3465 * Line Size, etc. The firmware default is for a 4KB Page Size and
3466 * 64B Cache Line Size ...
3468 t4_fixup_host_params(adapter
, PAGE_SIZE
, L1_CACHE_BYTES
);
3471 * Process module parameters which affect early initialization.
3473 if (rx_dma_offset
!= 2 && rx_dma_offset
!= 0) {
3474 dev_err(&adapter
->pdev
->dev
,
3475 "Ignoring illegal rx_dma_offset=%d, using 2\n",
3479 t4_set_reg_field(adapter
, SGE_CONTROL_A
,
3480 PKTSHIFT_V(PKTSHIFT_M
),
3481 PKTSHIFT_V(rx_dma_offset
));
3484 * Don't include the "IP Pseudo Header" in CPL_RX_PKT checksums: Linux
3485 * adds the pseudo header itself.
3487 t4_tp_wr_bits_indirect(adapter
, TP_INGRESS_CONFIG_A
,
3488 CSUM_HAS_PSEUDO_HDR_F
, 0);
3493 /* 10Gb/s-BT PHY Support. chip-external 10Gb/s-BT PHYs are complex chips
3494 * unto themselves and they contain their own firmware to perform their
3497 static int phy_aq1202_version(const u8
*phy_fw_data
,
3502 /* At offset 0x8 you're looking for the primary image's
3503 * starting offset which is 3 Bytes wide
3505 * At offset 0xa of the primary image, you look for the offset
3506 * of the DRAM segment which is 3 Bytes wide.
3508 * The FW version is at offset 0x27e of the DRAM and is 2 Bytes
3511 #define be16(__p) (((__p)[0] << 8) | (__p)[1])
3512 #define le16(__p) ((__p)[0] | ((__p)[1] << 8))
3513 #define le24(__p) (le16(__p) | ((__p)[2] << 16))
3515 offset
= le24(phy_fw_data
+ 0x8) << 12;
3516 offset
= le24(phy_fw_data
+ offset
+ 0xa);
3517 return be16(phy_fw_data
+ offset
+ 0x27e);
3524 static struct info_10gbt_phy_fw
{
3525 unsigned int phy_fw_id
; /* PCI Device ID */
3526 char *phy_fw_file
; /* /lib/firmware/ PHY Firmware file */
3527 int (*phy_fw_version
)(const u8
*phy_fw_data
, size_t phy_fw_size
);
3528 int phy_flash
; /* Has FLASH for PHY Firmware */
3529 } phy_info_array
[] = {
3531 PHY_AQ1202_DEVICEID
,
3532 PHY_AQ1202_FIRMWARE
,
3537 PHY_BCM84834_DEVICEID
,
3538 PHY_BCM84834_FIRMWARE
,
3545 static struct info_10gbt_phy_fw
*find_phy_info(int devid
)
3549 for (i
= 0; i
< ARRAY_SIZE(phy_info_array
); i
++) {
3550 if (phy_info_array
[i
].phy_fw_id
== devid
)
3551 return &phy_info_array
[i
];
3556 /* Handle updating of chip-external 10Gb/s-BT PHY firmware. This needs to
3557 * happen after the FW_RESET_CMD but before the FW_INITIALIZE_CMD. On error
3558 * we return a negative error number. If we transfer new firmware we return 1
3559 * (from t4_load_phy_fw()). If we don't do anything we return 0.
3561 static int adap_init0_phy(struct adapter
*adap
)
3563 const struct firmware
*phyf
;
3565 struct info_10gbt_phy_fw
*phy_info
;
3567 /* Use the device ID to determine which PHY file to flash.
3569 phy_info
= find_phy_info(adap
->pdev
->device
);
3571 dev_warn(adap
->pdev_dev
,
3572 "No PHY Firmware file found for this PHY\n");
3576 /* If we have a T4 PHY firmware file under /lib/firmware/cxgb4/, then
3577 * use that. The adapter firmware provides us with a memory buffer
3578 * where we can load a PHY firmware file from the host if we want to
3579 * override the PHY firmware File in flash.
3581 ret
= request_firmware_direct(&phyf
, phy_info
->phy_fw_file
,
3584 /* For adapters without FLASH attached to PHY for their
3585 * firmware, it's obviously a fatal error if we can't get the
3586 * firmware to the adapter. For adapters with PHY firmware
3587 * FLASH storage, it's worth a warning if we can't find the
3588 * PHY Firmware but we'll neuter the error ...
3590 dev_err(adap
->pdev_dev
, "unable to find PHY Firmware image "
3591 "/lib/firmware/%s, error %d\n",
3592 phy_info
->phy_fw_file
, -ret
);
3593 if (phy_info
->phy_flash
) {
3594 int cur_phy_fw_ver
= 0;
3596 t4_phy_fw_ver(adap
, &cur_phy_fw_ver
);
3597 dev_warn(adap
->pdev_dev
, "continuing with, on-adapter "
3598 "FLASH copy, version %#x\n", cur_phy_fw_ver
);
3605 /* Load PHY Firmware onto adapter.
3607 ret
= t4_load_phy_fw(adap
, MEMWIN_NIC
, &adap
->win0_lock
,
3608 phy_info
->phy_fw_version
,
3609 (u8
*)phyf
->data
, phyf
->size
);
3611 dev_err(adap
->pdev_dev
, "PHY Firmware transfer error %d\n",
3614 int new_phy_fw_ver
= 0;
3616 if (phy_info
->phy_fw_version
)
3617 new_phy_fw_ver
= phy_info
->phy_fw_version(phyf
->data
,
3619 dev_info(adap
->pdev_dev
, "Successfully transferred PHY "
3620 "Firmware /lib/firmware/%s, version %#x\n",
3621 phy_info
->phy_fw_file
, new_phy_fw_ver
);
3624 release_firmware(phyf
);
3630 * Attempt to initialize the adapter via a Firmware Configuration File.
3632 static int adap_init0_config(struct adapter
*adapter
, int reset
)
3634 struct fw_caps_config_cmd caps_cmd
;
3635 const struct firmware
*cf
;
3636 unsigned long mtype
= 0, maddr
= 0;
3637 u32 finiver
, finicsum
, cfcsum
;
3639 int config_issued
= 0;
3640 char *fw_config_file
, fw_config_file_path
[256];
3641 char *config_name
= NULL
;
3644 * Reset device if necessary.
3647 ret
= t4_fw_reset(adapter
, adapter
->mbox
,
3648 PIORSTMODE_F
| PIORST_F
);
3653 /* If this is a 10Gb/s-BT adapter make sure the chip-external
3654 * 10Gb/s-BT PHYs have up-to-date firmware. Note that this step needs
3655 * to be performed after any global adapter RESET above since some
3656 * PHYs only have local RAM copies of the PHY firmware.
3658 if (is_10gbt_device(adapter
->pdev
->device
)) {
3659 ret
= adap_init0_phy(adapter
);
3664 * If we have a T4 configuration file under /lib/firmware/cxgb4/,
3665 * then use that. Otherwise, use the configuration file stored
3666 * in the adapter flash ...
3668 switch (CHELSIO_CHIP_VERSION(adapter
->params
.chip
)) {
3670 fw_config_file
= FW4_CFNAME
;
3673 fw_config_file
= FW5_CFNAME
;
3676 fw_config_file
= FW6_CFNAME
;
3679 dev_err(adapter
->pdev_dev
, "Device %d is not supported\n",
3680 adapter
->pdev
->device
);
3685 ret
= request_firmware(&cf
, fw_config_file
, adapter
->pdev_dev
);
3687 config_name
= "On FLASH";
3688 mtype
= FW_MEMTYPE_CF_FLASH
;
3689 maddr
= t4_flash_cfg_addr(adapter
);
3691 u32 params
[7], val
[7];
3693 sprintf(fw_config_file_path
,
3694 "/lib/firmware/%s", fw_config_file
);
3695 config_name
= fw_config_file_path
;
3697 if (cf
->size
>= FLASH_CFG_MAX_SIZE
)
3700 params
[0] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV
) |
3701 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_CF
));
3702 ret
= t4_query_params(adapter
, adapter
->mbox
,
3703 adapter
->pf
, 0, 1, params
, val
);
3706 * For t4_memory_rw() below addresses and
3707 * sizes have to be in terms of multiples of 4
3708 * bytes. So, if the Configuration File isn't
3709 * a multiple of 4 bytes in length we'll have
3710 * to write that out separately since we can't
3711 * guarantee that the bytes following the
3712 * residual byte in the buffer returned by
3713 * request_firmware() are zeroed out ...
3715 size_t resid
= cf
->size
& 0x3;
3716 size_t size
= cf
->size
& ~0x3;
3717 __be32
*data
= (__be32
*)cf
->data
;
3719 mtype
= FW_PARAMS_PARAM_Y_G(val
[0]);
3720 maddr
= FW_PARAMS_PARAM_Z_G(val
[0]) << 16;
3722 spin_lock(&adapter
->win0_lock
);
3723 ret
= t4_memory_rw(adapter
, 0, mtype
, maddr
,
3724 size
, data
, T4_MEMORY_WRITE
);
3725 if (ret
== 0 && resid
!= 0) {
3732 last
.word
= data
[size
>> 2];
3733 for (i
= resid
; i
< 4; i
++)
3735 ret
= t4_memory_rw(adapter
, 0, mtype
,
3740 spin_unlock(&adapter
->win0_lock
);
3744 release_firmware(cf
);
3750 * Issue a Capability Configuration command to the firmware to get it
3751 * to parse the Configuration File. We don't use t4_fw_config_file()
3752 * because we want the ability to modify various features after we've
3753 * processed the configuration file ...
3755 memset(&caps_cmd
, 0, sizeof(caps_cmd
));
3756 caps_cmd
.op_to_write
=
3757 htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD
) |
3760 caps_cmd
.cfvalid_to_len16
=
3761 htonl(FW_CAPS_CONFIG_CMD_CFVALID_F
|
3762 FW_CAPS_CONFIG_CMD_MEMTYPE_CF_V(mtype
) |
3763 FW_CAPS_CONFIG_CMD_MEMADDR64K_CF_V(maddr
>> 16) |
3764 FW_LEN16(caps_cmd
));
3765 ret
= t4_wr_mbox(adapter
, adapter
->mbox
, &caps_cmd
, sizeof(caps_cmd
),
3768 /* If the CAPS_CONFIG failed with an ENOENT (for a Firmware
3769 * Configuration File in FLASH), our last gasp effort is to use the
3770 * Firmware Configuration File which is embedded in the firmware. A
3771 * very few early versions of the firmware didn't have one embedded
3772 * but we can ignore those.
3774 if (ret
== -ENOENT
) {
3775 memset(&caps_cmd
, 0, sizeof(caps_cmd
));
3776 caps_cmd
.op_to_write
=
3777 htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD
) |
3780 caps_cmd
.cfvalid_to_len16
= htonl(FW_LEN16(caps_cmd
));
3781 ret
= t4_wr_mbox(adapter
, adapter
->mbox
, &caps_cmd
,
3782 sizeof(caps_cmd
), &caps_cmd
);
3783 config_name
= "Firmware Default";
3790 finiver
= ntohl(caps_cmd
.finiver
);
3791 finicsum
= ntohl(caps_cmd
.finicsum
);
3792 cfcsum
= ntohl(caps_cmd
.cfcsum
);
3793 if (finicsum
!= cfcsum
)
3794 dev_warn(adapter
->pdev_dev
, "Configuration File checksum "\
3795 "mismatch: [fini] csum=%#x, computed csum=%#x\n",
3799 * And now tell the firmware to use the configuration we just loaded.
3801 caps_cmd
.op_to_write
=
3802 htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD
) |
3805 caps_cmd
.cfvalid_to_len16
= htonl(FW_LEN16(caps_cmd
));
3806 ret
= t4_wr_mbox(adapter
, adapter
->mbox
, &caps_cmd
, sizeof(caps_cmd
),
3812 * Tweak configuration based on system architecture, module
3815 ret
= adap_init0_tweaks(adapter
);
3820 * And finally tell the firmware to initialize itself using the
3821 * parameters from the Configuration File.
3823 ret
= t4_fw_initialize(adapter
, adapter
->mbox
);
3827 /* Emit Firmware Configuration File information and return
3830 dev_info(adapter
->pdev_dev
, "Successfully configured using Firmware "\
3831 "Configuration File \"%s\", version %#x, computed checksum %#x\n",
3832 config_name
, finiver
, cfcsum
);
3836 * Something bad happened. Return the error ... (If the "error"
3837 * is that there's no Configuration File on the adapter we don't
3838 * want to issue a warning since this is fairly common.)
3841 if (config_issued
&& ret
!= -ENOENT
)
3842 dev_warn(adapter
->pdev_dev
, "\"%s\" configuration file error %d\n",
3847 static struct fw_info fw_info_array
[] = {
3850 .fs_name
= FW4_CFNAME
,
3851 .fw_mod_name
= FW4_FNAME
,
3853 .chip
= FW_HDR_CHIP_T4
,
3854 .fw_ver
= __cpu_to_be32(FW_VERSION(T4
)),
3855 .intfver_nic
= FW_INTFVER(T4
, NIC
),
3856 .intfver_vnic
= FW_INTFVER(T4
, VNIC
),
3857 .intfver_ri
= FW_INTFVER(T4
, RI
),
3858 .intfver_iscsi
= FW_INTFVER(T4
, ISCSI
),
3859 .intfver_fcoe
= FW_INTFVER(T4
, FCOE
),
3863 .fs_name
= FW5_CFNAME
,
3864 .fw_mod_name
= FW5_FNAME
,
3866 .chip
= FW_HDR_CHIP_T5
,
3867 .fw_ver
= __cpu_to_be32(FW_VERSION(T5
)),
3868 .intfver_nic
= FW_INTFVER(T5
, NIC
),
3869 .intfver_vnic
= FW_INTFVER(T5
, VNIC
),
3870 .intfver_ri
= FW_INTFVER(T5
, RI
),
3871 .intfver_iscsi
= FW_INTFVER(T5
, ISCSI
),
3872 .intfver_fcoe
= FW_INTFVER(T5
, FCOE
),
3876 .fs_name
= FW6_CFNAME
,
3877 .fw_mod_name
= FW6_FNAME
,
3879 .chip
= FW_HDR_CHIP_T6
,
3880 .fw_ver
= __cpu_to_be32(FW_VERSION(T6
)),
3881 .intfver_nic
= FW_INTFVER(T6
, NIC
),
3882 .intfver_vnic
= FW_INTFVER(T6
, VNIC
),
3883 .intfver_ofld
= FW_INTFVER(T6
, OFLD
),
3884 .intfver_ri
= FW_INTFVER(T6
, RI
),
3885 .intfver_iscsipdu
= FW_INTFVER(T6
, ISCSIPDU
),
3886 .intfver_iscsi
= FW_INTFVER(T6
, ISCSI
),
3887 .intfver_fcoepdu
= FW_INTFVER(T6
, FCOEPDU
),
3888 .intfver_fcoe
= FW_INTFVER(T6
, FCOE
),
3894 static struct fw_info
*find_fw_info(int chip
)
3898 for (i
= 0; i
< ARRAY_SIZE(fw_info_array
); i
++) {
3899 if (fw_info_array
[i
].chip
== chip
)
3900 return &fw_info_array
[i
];
3906 * Phase 0 of initialization: contact FW, obtain config, perform basic init.
3908 static int adap_init0(struct adapter
*adap
)
3912 enum dev_state state
;
3913 u32 params
[7], val
[7];
3914 struct fw_caps_config_cmd caps_cmd
;
3917 /* Grab Firmware Device Log parameters as early as possible so we have
3918 * access to it for debugging, etc.
3920 ret
= t4_init_devlog_params(adap
);
3924 /* Contact FW, advertising Master capability */
3925 ret
= t4_fw_hello(adap
, adap
->mbox
, adap
->mbox
,
3926 is_kdump_kernel() ? MASTER_MUST
: MASTER_MAY
, &state
);
3928 dev_err(adap
->pdev_dev
, "could not connect to FW, error %d\n",
3932 if (ret
== adap
->mbox
)
3933 adap
->flags
|= MASTER_PF
;
3936 * If we're the Master PF Driver and the device is uninitialized,
3937 * then let's consider upgrading the firmware ... (We always want
3938 * to check the firmware version number in order to A. get it for
3939 * later reporting and B. to warn if the currently loaded firmware
3940 * is excessively mismatched relative to the driver.)
3942 t4_get_fw_version(adap
, &adap
->params
.fw_vers
);
3943 t4_get_bs_version(adap
, &adap
->params
.bs_vers
);
3944 t4_get_tp_version(adap
, &adap
->params
.tp_vers
);
3945 t4_get_exprom_version(adap
, &adap
->params
.er_vers
);
3947 ret
= t4_check_fw_version(adap
);
3948 /* If firmware is too old (not supported by driver) force an update. */
3950 state
= DEV_STATE_UNINIT
;
3951 if ((adap
->flags
& MASTER_PF
) && state
!= DEV_STATE_INIT
) {
3952 struct fw_info
*fw_info
;
3953 struct fw_hdr
*card_fw
;
3954 const struct firmware
*fw
;
3955 const u8
*fw_data
= NULL
;
3956 unsigned int fw_size
= 0;
3958 /* This is the firmware whose headers the driver was compiled
3961 fw_info
= find_fw_info(CHELSIO_CHIP_VERSION(adap
->params
.chip
));
3962 if (fw_info
== NULL
) {
3963 dev_err(adap
->pdev_dev
,
3964 "unable to get firmware info for chip %d.\n",
3965 CHELSIO_CHIP_VERSION(adap
->params
.chip
));
3969 /* allocate memory to read the header of the firmware on the
3972 card_fw
= t4_alloc_mem(sizeof(*card_fw
));
3974 /* Get FW from from /lib/firmware/ */
3975 ret
= request_firmware(&fw
, fw_info
->fw_mod_name
,
3978 dev_err(adap
->pdev_dev
,
3979 "unable to load firmware image %s, error %d\n",
3980 fw_info
->fw_mod_name
, ret
);
3986 /* upgrade FW logic */
3987 ret
= t4_prep_fw(adap
, fw_info
, fw_data
, fw_size
, card_fw
,
3991 release_firmware(fw
);
3992 t4_free_mem(card_fw
);
3999 * Grab VPD parameters. This should be done after we establish a
4000 * connection to the firmware since some of the VPD parameters
4001 * (notably the Core Clock frequency) are retrieved via requests to
4002 * the firmware. On the other hand, we need these fairly early on
4003 * so we do this right after getting ahold of the firmware.
4005 ret
= t4_get_vpd_params(adap
, &adap
->params
.vpd
);
4010 * Find out what ports are available to us. Note that we need to do
4011 * this before calling adap_init0_no_config() since it needs nports
4015 FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV
) |
4016 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_PORTVEC
);
4017 ret
= t4_query_params(adap
, adap
->mbox
, adap
->pf
, 0, 1, &v
, &port_vec
);
4021 adap
->params
.nports
= hweight32(port_vec
);
4022 adap
->params
.portvec
= port_vec
;
4024 /* If the firmware is initialized already, emit a simply note to that
4025 * effect. Otherwise, it's time to try initializing the adapter.
4027 if (state
== DEV_STATE_INIT
) {
4028 dev_info(adap
->pdev_dev
, "Coming up as %s: "\
4029 "Adapter already initialized\n",
4030 adap
->flags
& MASTER_PF
? "MASTER" : "SLAVE");
4032 dev_info(adap
->pdev_dev
, "Coming up as MASTER: "\
4033 "Initializing adapter\n");
4035 /* Find out whether we're dealing with a version of the
4036 * firmware which has configuration file support.
4038 params
[0] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV
) |
4039 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_CF
));
4040 ret
= t4_query_params(adap
, adap
->mbox
, adap
->pf
, 0, 1,
4043 /* If the firmware doesn't support Configuration Files,
4047 dev_err(adap
->pdev_dev
, "firmware doesn't support "
4048 "Firmware Configuration Files\n");
4052 /* The firmware provides us with a memory buffer where we can
4053 * load a Configuration File from the host if we want to
4054 * override the Configuration File in flash.
4056 ret
= adap_init0_config(adap
, reset
);
4057 if (ret
== -ENOENT
) {
4058 dev_err(adap
->pdev_dev
, "no Configuration File "
4059 "present on adapter.\n");
4063 dev_err(adap
->pdev_dev
, "could not initialize "
4064 "adapter, error %d\n", -ret
);
4069 /* Give the SGE code a chance to pull in anything that it needs ...
4070 * Note that this must be called after we retrieve our VPD parameters
4071 * in order to know how to convert core ticks to seconds, etc.
4073 ret
= t4_sge_init(adap
);
4077 if (is_bypass_device(adap
->pdev
->device
))
4078 adap
->params
.bypass
= 1;
4081 * Grab some of our basic fundamental operating parameters.
4083 #define FW_PARAM_DEV(param) \
4084 (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) | \
4085 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_##param))
4087 #define FW_PARAM_PFVF(param) \
4088 FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_PFVF) | \
4089 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_PFVF_##param)| \
4090 FW_PARAMS_PARAM_Y_V(0) | \
4091 FW_PARAMS_PARAM_Z_V(0)
4093 params
[0] = FW_PARAM_PFVF(EQ_START
);
4094 params
[1] = FW_PARAM_PFVF(L2T_START
);
4095 params
[2] = FW_PARAM_PFVF(L2T_END
);
4096 params
[3] = FW_PARAM_PFVF(FILTER_START
);
4097 params
[4] = FW_PARAM_PFVF(FILTER_END
);
4098 params
[5] = FW_PARAM_PFVF(IQFLINT_START
);
4099 ret
= t4_query_params(adap
, adap
->mbox
, adap
->pf
, 0, 6, params
, val
);
4102 adap
->sge
.egr_start
= val
[0];
4103 adap
->l2t_start
= val
[1];
4104 adap
->l2t_end
= val
[2];
4105 adap
->tids
.ftid_base
= val
[3];
4106 adap
->tids
.nftids
= val
[4] - val
[3] + 1;
4107 adap
->sge
.ingr_start
= val
[5];
4109 /* qids (ingress/egress) returned from firmware can be anywhere
4110 * in the range from EQ(IQFLINT)_START to EQ(IQFLINT)_END.
4111 * Hence driver needs to allocate memory for this range to
4112 * store the queue info. Get the highest IQFLINT/EQ index returned
4113 * in FW_EQ_*_CMD.alloc command.
4115 params
[0] = FW_PARAM_PFVF(EQ_END
);
4116 params
[1] = FW_PARAM_PFVF(IQFLINT_END
);
4117 ret
= t4_query_params(adap
, adap
->mbox
, adap
->pf
, 0, 2, params
, val
);
4120 adap
->sge
.egr_sz
= val
[0] - adap
->sge
.egr_start
+ 1;
4121 adap
->sge
.ingr_sz
= val
[1] - adap
->sge
.ingr_start
+ 1;
4123 adap
->sge
.egr_map
= kcalloc(adap
->sge
.egr_sz
,
4124 sizeof(*adap
->sge
.egr_map
), GFP_KERNEL
);
4125 if (!adap
->sge
.egr_map
) {
4130 adap
->sge
.ingr_map
= kcalloc(adap
->sge
.ingr_sz
,
4131 sizeof(*adap
->sge
.ingr_map
), GFP_KERNEL
);
4132 if (!adap
->sge
.ingr_map
) {
4137 /* Allocate the memory for the vaious egress queue bitmaps
4138 * ie starving_fl, txq_maperr and blocked_fl.
4140 adap
->sge
.starving_fl
= kcalloc(BITS_TO_LONGS(adap
->sge
.egr_sz
),
4141 sizeof(long), GFP_KERNEL
);
4142 if (!adap
->sge
.starving_fl
) {
4147 adap
->sge
.txq_maperr
= kcalloc(BITS_TO_LONGS(adap
->sge
.egr_sz
),
4148 sizeof(long), GFP_KERNEL
);
4149 if (!adap
->sge
.txq_maperr
) {
4154 #ifdef CONFIG_DEBUG_FS
4155 adap
->sge
.blocked_fl
= kcalloc(BITS_TO_LONGS(adap
->sge
.egr_sz
),
4156 sizeof(long), GFP_KERNEL
);
4157 if (!adap
->sge
.blocked_fl
) {
4163 params
[0] = FW_PARAM_PFVF(CLIP_START
);
4164 params
[1] = FW_PARAM_PFVF(CLIP_END
);
4165 ret
= t4_query_params(adap
, adap
->mbox
, adap
->pf
, 0, 2, params
, val
);
4168 adap
->clipt_start
= val
[0];
4169 adap
->clipt_end
= val
[1];
4171 /* We don't yet have a PARAMs calls to retrieve the number of Traffic
4172 * Classes supported by the hardware/firmware so we hard code it here
4175 adap
->params
.nsched_cls
= is_t4(adap
->params
.chip
) ? 15 : 16;
4177 /* query params related to active filter region */
4178 params
[0] = FW_PARAM_PFVF(ACTIVE_FILTER_START
);
4179 params
[1] = FW_PARAM_PFVF(ACTIVE_FILTER_END
);
4180 ret
= t4_query_params(adap
, adap
->mbox
, adap
->pf
, 0, 2, params
, val
);
4181 /* If Active filter size is set we enable establishing
4182 * offload connection through firmware work request
4184 if ((val
[0] != val
[1]) && (ret
>= 0)) {
4185 adap
->flags
|= FW_OFLD_CONN
;
4186 adap
->tids
.aftid_base
= val
[0];
4187 adap
->tids
.aftid_end
= val
[1];
4190 /* If we're running on newer firmware, let it know that we're
4191 * prepared to deal with encapsulated CPL messages. Older
4192 * firmware won't understand this and we'll just get
4193 * unencapsulated messages ...
4195 params
[0] = FW_PARAM_PFVF(CPLFW4MSG_ENCAP
);
4197 (void)t4_set_params(adap
, adap
->mbox
, adap
->pf
, 0, 1, params
, val
);
4200 * Find out whether we're allowed to use the T5+ ULPTX MEMWRITE DSGL
4201 * capability. Earlier versions of the firmware didn't have the
4202 * ULPTX_MEMWRITE_DSGL so we'll interpret a query failure as no
4203 * permission to use ULPTX MEMWRITE DSGL.
4205 if (is_t4(adap
->params
.chip
)) {
4206 adap
->params
.ulptx_memwrite_dsgl
= false;
4208 params
[0] = FW_PARAM_DEV(ULPTX_MEMWRITE_DSGL
);
4209 ret
= t4_query_params(adap
, adap
->mbox
, adap
->pf
, 0,
4211 adap
->params
.ulptx_memwrite_dsgl
= (ret
== 0 && val
[0] != 0);
4215 * Get device capabilities so we can determine what resources we need
4218 memset(&caps_cmd
, 0, sizeof(caps_cmd
));
4219 caps_cmd
.op_to_write
= htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD
) |
4220 FW_CMD_REQUEST_F
| FW_CMD_READ_F
);
4221 caps_cmd
.cfvalid_to_len16
= htonl(FW_LEN16(caps_cmd
));
4222 ret
= t4_wr_mbox(adap
, adap
->mbox
, &caps_cmd
, sizeof(caps_cmd
),
4227 if (caps_cmd
.ofldcaps
) {
4228 /* query offload-related parameters */
4229 params
[0] = FW_PARAM_DEV(NTID
);
4230 params
[1] = FW_PARAM_PFVF(SERVER_START
);
4231 params
[2] = FW_PARAM_PFVF(SERVER_END
);
4232 params
[3] = FW_PARAM_PFVF(TDDP_START
);
4233 params
[4] = FW_PARAM_PFVF(TDDP_END
);
4234 params
[5] = FW_PARAM_DEV(FLOWC_BUFFIFO_SZ
);
4235 ret
= t4_query_params(adap
, adap
->mbox
, adap
->pf
, 0, 6,
4239 adap
->tids
.ntids
= val
[0];
4240 adap
->tids
.natids
= min(adap
->tids
.ntids
/ 2, MAX_ATIDS
);
4241 adap
->tids
.stid_base
= val
[1];
4242 adap
->tids
.nstids
= val
[2] - val
[1] + 1;
4244 * Setup server filter region. Divide the available filter
4245 * region into two parts. Regular filters get 1/3rd and server
4246 * filters get 2/3rd part. This is only enabled if workarond
4248 * 1. For regular filters.
4249 * 2. Server filter: This are special filters which are used
4250 * to redirect SYN packets to offload queue.
4252 if (adap
->flags
& FW_OFLD_CONN
&& !is_bypass(adap
)) {
4253 adap
->tids
.sftid_base
= adap
->tids
.ftid_base
+
4254 DIV_ROUND_UP(adap
->tids
.nftids
, 3);
4255 adap
->tids
.nsftids
= adap
->tids
.nftids
-
4256 DIV_ROUND_UP(adap
->tids
.nftids
, 3);
4257 adap
->tids
.nftids
= adap
->tids
.sftid_base
-
4258 adap
->tids
.ftid_base
;
4260 adap
->vres
.ddp
.start
= val
[3];
4261 adap
->vres
.ddp
.size
= val
[4] - val
[3] + 1;
4262 adap
->params
.ofldq_wr_cred
= val
[5];
4264 adap
->params
.offload
= 1;
4266 if (caps_cmd
.rdmacaps
) {
4267 params
[0] = FW_PARAM_PFVF(STAG_START
);
4268 params
[1] = FW_PARAM_PFVF(STAG_END
);
4269 params
[2] = FW_PARAM_PFVF(RQ_START
);
4270 params
[3] = FW_PARAM_PFVF(RQ_END
);
4271 params
[4] = FW_PARAM_PFVF(PBL_START
);
4272 params
[5] = FW_PARAM_PFVF(PBL_END
);
4273 ret
= t4_query_params(adap
, adap
->mbox
, adap
->pf
, 0, 6,
4277 adap
->vres
.stag
.start
= val
[0];
4278 adap
->vres
.stag
.size
= val
[1] - val
[0] + 1;
4279 adap
->vres
.rq
.start
= val
[2];
4280 adap
->vres
.rq
.size
= val
[3] - val
[2] + 1;
4281 adap
->vres
.pbl
.start
= val
[4];
4282 adap
->vres
.pbl
.size
= val
[5] - val
[4] + 1;
4284 params
[0] = FW_PARAM_PFVF(SQRQ_START
);
4285 params
[1] = FW_PARAM_PFVF(SQRQ_END
);
4286 params
[2] = FW_PARAM_PFVF(CQ_START
);
4287 params
[3] = FW_PARAM_PFVF(CQ_END
);
4288 params
[4] = FW_PARAM_PFVF(OCQ_START
);
4289 params
[5] = FW_PARAM_PFVF(OCQ_END
);
4290 ret
= t4_query_params(adap
, adap
->mbox
, adap
->pf
, 0, 6, params
,
4294 adap
->vres
.qp
.start
= val
[0];
4295 adap
->vres
.qp
.size
= val
[1] - val
[0] + 1;
4296 adap
->vres
.cq
.start
= val
[2];
4297 adap
->vres
.cq
.size
= val
[3] - val
[2] + 1;
4298 adap
->vres
.ocq
.start
= val
[4];
4299 adap
->vres
.ocq
.size
= val
[5] - val
[4] + 1;
4301 params
[0] = FW_PARAM_DEV(MAXORDIRD_QP
);
4302 params
[1] = FW_PARAM_DEV(MAXIRD_ADAPTER
);
4303 ret
= t4_query_params(adap
, adap
->mbox
, adap
->pf
, 0, 2, params
,
4306 adap
->params
.max_ordird_qp
= 8;
4307 adap
->params
.max_ird_adapter
= 32 * adap
->tids
.ntids
;
4310 adap
->params
.max_ordird_qp
= val
[0];
4311 adap
->params
.max_ird_adapter
= val
[1];
4313 dev_info(adap
->pdev_dev
,
4314 "max_ordird_qp %d max_ird_adapter %d\n",
4315 adap
->params
.max_ordird_qp
,
4316 adap
->params
.max_ird_adapter
);
4318 if (caps_cmd
.iscsicaps
) {
4319 params
[0] = FW_PARAM_PFVF(ISCSI_START
);
4320 params
[1] = FW_PARAM_PFVF(ISCSI_END
);
4321 ret
= t4_query_params(adap
, adap
->mbox
, adap
->pf
, 0, 2,
4325 adap
->vres
.iscsi
.start
= val
[0];
4326 adap
->vres
.iscsi
.size
= val
[1] - val
[0] + 1;
4328 if (caps_cmd
.cryptocaps
) {
4329 /* Should query params here...TODO */
4330 adap
->params
.crypto
|= ULP_CRYPTO_LOOKASIDE
;
4333 #undef FW_PARAM_PFVF
4336 /* The MTU/MSS Table is initialized by now, so load their values. If
4337 * we're initializing the adapter, then we'll make any modifications
4338 * we want to the MTU/MSS Table and also initialize the congestion
4341 t4_read_mtu_tbl(adap
, adap
->params
.mtus
, NULL
);
4342 if (state
!= DEV_STATE_INIT
) {
4345 /* The default MTU Table contains values 1492 and 1500.
4346 * However, for TCP, it's better to have two values which are
4347 * a multiple of 8 +/- 4 bytes apart near this popular MTU.
4348 * This allows us to have a TCP Data Payload which is a
4349 * multiple of 8 regardless of what combination of TCP Options
4350 * are in use (always a multiple of 4 bytes) which is
4351 * important for performance reasons. For instance, if no
4352 * options are in use, then we have a 20-byte IP header and a
4353 * 20-byte TCP header. In this case, a 1500-byte MSS would
4354 * result in a TCP Data Payload of 1500 - 40 == 1460 bytes
4355 * which is not a multiple of 8. So using an MSS of 1488 in
4356 * this case results in a TCP Data Payload of 1448 bytes which
4357 * is a multiple of 8. On the other hand, if 12-byte TCP Time
4358 * Stamps have been negotiated, then an MTU of 1500 bytes
4359 * results in a TCP Data Payload of 1448 bytes which, as
4360 * above, is a multiple of 8 bytes ...
4362 for (i
= 0; i
< NMTUS
; i
++)
4363 if (adap
->params
.mtus
[i
] == 1492) {
4364 adap
->params
.mtus
[i
] = 1488;
4368 t4_load_mtus(adap
, adap
->params
.mtus
, adap
->params
.a_wnd
,
4369 adap
->params
.b_wnd
);
4371 t4_init_sge_params(adap
);
4372 adap
->flags
|= FW_OK
;
4373 t4_init_tp_params(adap
);
4377 * Something bad happened. If a command timed out or failed with EIO
4378 * FW does not operate within its spec or something catastrophic
4379 * happened to HW/FW, stop issuing commands.
4382 kfree(adap
->sge
.egr_map
);
4383 kfree(adap
->sge
.ingr_map
);
4384 kfree(adap
->sge
.starving_fl
);
4385 kfree(adap
->sge
.txq_maperr
);
4386 #ifdef CONFIG_DEBUG_FS
4387 kfree(adap
->sge
.blocked_fl
);
4389 if (ret
!= -ETIMEDOUT
&& ret
!= -EIO
)
4390 t4_fw_bye(adap
, adap
->mbox
);
4396 static pci_ers_result_t
eeh_err_detected(struct pci_dev
*pdev
,
4397 pci_channel_state_t state
)
4400 struct adapter
*adap
= pci_get_drvdata(pdev
);
4406 adap
->flags
&= ~FW_OK
;
4407 notify_ulds(adap
, CXGB4_STATE_START_RECOVERY
);
4408 spin_lock(&adap
->stats_lock
);
4409 for_each_port(adap
, i
) {
4410 struct net_device
*dev
= adap
->port
[i
];
4412 netif_device_detach(dev
);
4413 netif_carrier_off(dev
);
4415 spin_unlock(&adap
->stats_lock
);
4416 disable_interrupts(adap
);
4417 if (adap
->flags
& FULL_INIT_DONE
)
4420 if ((adap
->flags
& DEV_ENABLED
)) {
4421 pci_disable_device(pdev
);
4422 adap
->flags
&= ~DEV_ENABLED
;
4424 out
: return state
== pci_channel_io_perm_failure
?
4425 PCI_ERS_RESULT_DISCONNECT
: PCI_ERS_RESULT_NEED_RESET
;
4428 static pci_ers_result_t
eeh_slot_reset(struct pci_dev
*pdev
)
4431 struct fw_caps_config_cmd c
;
4432 struct adapter
*adap
= pci_get_drvdata(pdev
);
4435 pci_restore_state(pdev
);
4436 pci_save_state(pdev
);
4437 return PCI_ERS_RESULT_RECOVERED
;
4440 if (!(adap
->flags
& DEV_ENABLED
)) {
4441 if (pci_enable_device(pdev
)) {
4442 dev_err(&pdev
->dev
, "Cannot reenable PCI "
4443 "device after reset\n");
4444 return PCI_ERS_RESULT_DISCONNECT
;
4446 adap
->flags
|= DEV_ENABLED
;
4449 pci_set_master(pdev
);
4450 pci_restore_state(pdev
);
4451 pci_save_state(pdev
);
4452 pci_cleanup_aer_uncorrect_error_status(pdev
);
4454 if (t4_wait_dev_ready(adap
->regs
) < 0)
4455 return PCI_ERS_RESULT_DISCONNECT
;
4456 if (t4_fw_hello(adap
, adap
->mbox
, adap
->pf
, MASTER_MUST
, NULL
) < 0)
4457 return PCI_ERS_RESULT_DISCONNECT
;
4458 adap
->flags
|= FW_OK
;
4459 if (adap_init1(adap
, &c
))
4460 return PCI_ERS_RESULT_DISCONNECT
;
4462 for_each_port(adap
, i
) {
4463 struct port_info
*p
= adap2pinfo(adap
, i
);
4465 ret
= t4_alloc_vi(adap
, adap
->mbox
, p
->tx_chan
, adap
->pf
, 0, 1,
4468 return PCI_ERS_RESULT_DISCONNECT
;
4470 p
->xact_addr_filt
= -1;
4473 t4_load_mtus(adap
, adap
->params
.mtus
, adap
->params
.a_wnd
,
4474 adap
->params
.b_wnd
);
4477 return PCI_ERS_RESULT_DISCONNECT
;
4478 return PCI_ERS_RESULT_RECOVERED
;
4481 static void eeh_resume(struct pci_dev
*pdev
)
4484 struct adapter
*adap
= pci_get_drvdata(pdev
);
4490 for_each_port(adap
, i
) {
4491 struct net_device
*dev
= adap
->port
[i
];
4493 if (netif_running(dev
)) {
4495 cxgb_set_rxmode(dev
);
4497 netif_device_attach(dev
);
4502 static const struct pci_error_handlers cxgb4_eeh
= {
4503 .error_detected
= eeh_err_detected
,
4504 .slot_reset
= eeh_slot_reset
,
4505 .resume
= eeh_resume
,
4508 static inline bool is_x_10g_port(const struct link_config
*lc
)
4510 return (lc
->supported
& FW_PORT_CAP_SPEED_10G
) != 0 ||
4511 (lc
->supported
& FW_PORT_CAP_SPEED_40G
) != 0;
4515 * Perform default configuration of DMA queues depending on the number and type
4516 * of ports we found and the number of available CPUs. Most settings can be
4517 * modified by the admin prior to actual use.
4519 static void cfg_queues(struct adapter
*adap
)
4521 struct sge
*s
= &adap
->sge
;
4522 int i
, n10g
= 0, qidx
= 0;
4523 #ifndef CONFIG_CHELSIO_T4_DCB
4528 /* Reduce memory usage in kdump environment, disable all offload.
4530 if (is_kdump_kernel()) {
4531 adap
->params
.offload
= 0;
4532 adap
->params
.crypto
= 0;
4533 } else if (adap
->num_uld
&& uld_mem_alloc(adap
)) {
4534 adap
->params
.crypto
= 0;
4537 for_each_port(adap
, i
)
4538 n10g
+= is_x_10g_port(&adap2pinfo(adap
, i
)->link_cfg
);
4539 #ifdef CONFIG_CHELSIO_T4_DCB
4540 /* For Data Center Bridging support we need to be able to support up
4541 * to 8 Traffic Priorities; each of which will be assigned to its
4542 * own TX Queue in order to prevent Head-Of-Line Blocking.
4544 if (adap
->params
.nports
* 8 > MAX_ETH_QSETS
) {
4545 dev_err(adap
->pdev_dev
, "MAX_ETH_QSETS=%d < %d!\n",
4546 MAX_ETH_QSETS
, adap
->params
.nports
* 8);
4550 for_each_port(adap
, i
) {
4551 struct port_info
*pi
= adap2pinfo(adap
, i
);
4553 pi
->first_qset
= qidx
;
4557 #else /* !CONFIG_CHELSIO_T4_DCB */
4559 * We default to 1 queue per non-10G port and up to # of cores queues
4563 q10g
= (MAX_ETH_QSETS
- (adap
->params
.nports
- n10g
)) / n10g
;
4564 if (q10g
> netif_get_num_default_rss_queues())
4565 q10g
= netif_get_num_default_rss_queues();
4567 for_each_port(adap
, i
) {
4568 struct port_info
*pi
= adap2pinfo(adap
, i
);
4570 pi
->first_qset
= qidx
;
4571 pi
->nqsets
= is_x_10g_port(&pi
->link_cfg
) ? q10g
: 1;
4574 #endif /* !CONFIG_CHELSIO_T4_DCB */
4577 s
->max_ethqsets
= qidx
; /* MSI-X may lower it later */
4579 if (is_offload(adap
)) {
4581 * For offload we use 1 queue/channel if all ports are up to 1G,
4582 * otherwise we divide all available queues amongst the channels
4583 * capped by the number of available cores.
4586 i
= min_t(int, ARRAY_SIZE(s
->iscsirxq
),
4588 s
->iscsiqsets
= roundup(i
, adap
->params
.nports
);
4590 s
->iscsiqsets
= adap
->params
.nports
;
4591 /* For RDMA one Rx queue per channel suffices */
4592 s
->rdmaqs
= adap
->params
.nports
;
4593 /* Try and allow at least 1 CIQ per cpu rounding down
4594 * to the number of ports, with a minimum of 1 per port.
4595 * A 2 port card in a 6 cpu system: 6 CIQs, 3 / port.
4596 * A 4 port card in a 6 cpu system: 4 CIQs, 1 / port.
4597 * A 4 port card in a 2 cpu system: 4 CIQs, 1 / port.
4599 s
->rdmaciqs
= min_t(int, MAX_RDMA_CIQS
, num_online_cpus());
4600 s
->rdmaciqs
= (s
->rdmaciqs
/ adap
->params
.nports
) *
4601 adap
->params
.nports
;
4602 s
->rdmaciqs
= max_t(int, s
->rdmaciqs
, adap
->params
.nports
);
4604 if (!is_t4(adap
->params
.chip
))
4605 s
->niscsitq
= s
->iscsiqsets
;
4608 for (i
= 0; i
< ARRAY_SIZE(s
->ethrxq
); i
++) {
4609 struct sge_eth_rxq
*r
= &s
->ethrxq
[i
];
4611 init_rspq(adap
, &r
->rspq
, 5, 10, 1024, 64);
4615 for (i
= 0; i
< ARRAY_SIZE(s
->ethtxq
); i
++)
4616 s
->ethtxq
[i
].q
.size
= 1024;
4618 for (i
= 0; i
< ARRAY_SIZE(s
->ctrlq
); i
++)
4619 s
->ctrlq
[i
].q
.size
= 512;
4621 for (i
= 0; i
< ARRAY_SIZE(s
->ofldtxq
); i
++)
4622 s
->ofldtxq
[i
].q
.size
= 1024;
4624 for (i
= 0; i
< ARRAY_SIZE(s
->iscsirxq
); i
++) {
4625 struct sge_ofld_rxq
*r
= &s
->iscsirxq
[i
];
4627 init_rspq(adap
, &r
->rspq
, 5, 1, 1024, 64);
4628 r
->rspq
.uld
= CXGB4_ULD_ISCSI
;
4632 if (!is_t4(adap
->params
.chip
)) {
4633 for (i
= 0; i
< ARRAY_SIZE(s
->iscsitrxq
); i
++) {
4634 struct sge_ofld_rxq
*r
= &s
->iscsitrxq
[i
];
4636 init_rspq(adap
, &r
->rspq
, 5, 1, 1024, 64);
4637 r
->rspq
.uld
= CXGB4_ULD_ISCSIT
;
4642 for (i
= 0; i
< ARRAY_SIZE(s
->rdmarxq
); i
++) {
4643 struct sge_ofld_rxq
*r
= &s
->rdmarxq
[i
];
4645 init_rspq(adap
, &r
->rspq
, 5, 1, 511, 64);
4646 r
->rspq
.uld
= CXGB4_ULD_RDMA
;
4650 ciq_size
= 64 + adap
->vres
.cq
.size
+ adap
->tids
.nftids
;
4651 if (ciq_size
> SGE_MAX_IQ_SIZE
) {
4652 CH_WARN(adap
, "CIQ size too small for available IQs\n");
4653 ciq_size
= SGE_MAX_IQ_SIZE
;
4656 for (i
= 0; i
< ARRAY_SIZE(s
->rdmaciq
); i
++) {
4657 struct sge_ofld_rxq
*r
= &s
->rdmaciq
[i
];
4659 init_rspq(adap
, &r
->rspq
, 5, 1, ciq_size
, 64);
4660 r
->rspq
.uld
= CXGB4_ULD_RDMA
;
4663 init_rspq(adap
, &s
->fw_evtq
, 0, 1, 1024, 64);
4664 init_rspq(adap
, &s
->intrq
, 0, 1, 2 * MAX_INGQ
, 64);
4668 * Reduce the number of Ethernet queues across all ports to at most n.
4669 * n provides at least one queue per port.
4671 static void reduce_ethqs(struct adapter
*adap
, int n
)
4674 struct port_info
*pi
;
4676 while (n
< adap
->sge
.ethqsets
)
4677 for_each_port(adap
, i
) {
4678 pi
= adap2pinfo(adap
, i
);
4679 if (pi
->nqsets
> 1) {
4681 adap
->sge
.ethqsets
--;
4682 if (adap
->sge
.ethqsets
<= n
)
4688 for_each_port(adap
, i
) {
4689 pi
= adap2pinfo(adap
, i
);
4695 static int get_msix_info(struct adapter
*adap
)
4697 struct uld_msix_info
*msix_info
;
4698 int max_ingq
= (MAX_OFLD_QSETS
* adap
->num_uld
);
4700 msix_info
= kcalloc(max_ingq
, sizeof(*msix_info
), GFP_KERNEL
);
4704 adap
->msix_bmap_ulds
.msix_bmap
= kcalloc(BITS_TO_LONGS(max_ingq
),
4705 sizeof(long), GFP_KERNEL
);
4706 if (!adap
->msix_bmap_ulds
.msix_bmap
) {
4710 spin_lock_init(&adap
->msix_bmap_ulds
.lock
);
4711 adap
->msix_info_ulds
= msix_info
;
4715 static void free_msix_info(struct adapter
*adap
)
4720 kfree(adap
->msix_info_ulds
);
4721 kfree(adap
->msix_bmap_ulds
.msix_bmap
);
4724 /* 2 MSI-X vectors needed for the FW queue and non-data interrupts */
4725 #define EXTRA_VECS 2
4727 static int enable_msix(struct adapter
*adap
)
4729 int ofld_need
= 0, uld_need
= 0;
4730 int i
, j
, want
, need
, allocated
;
4731 struct sge
*s
= &adap
->sge
;
4732 unsigned int nchan
= adap
->params
.nports
;
4733 struct msix_entry
*entries
;
4734 int max_ingq
= MAX_INGQ
;
4736 max_ingq
+= (MAX_OFLD_QSETS
* adap
->num_uld
);
4737 entries
= kmalloc(sizeof(*entries
) * (max_ingq
+ 1),
4743 if (is_pci_uld(adap
) && get_msix_info(adap
))
4744 adap
->params
.crypto
= 0;
4746 for (i
= 0; i
< max_ingq
+ 1; ++i
)
4747 entries
[i
].entry
= i
;
4749 want
= s
->max_ethqsets
+ EXTRA_VECS
;
4750 if (is_offload(adap
)) {
4751 want
+= s
->rdmaqs
+ s
->rdmaciqs
+ s
->iscsiqsets
+
4753 /* need nchan for each possible ULD */
4754 if (is_t4(adap
->params
.chip
))
4755 ofld_need
= 3 * nchan
;
4757 ofld_need
= 4 * nchan
;
4759 if (is_pci_uld(adap
)) {
4760 want
+= netif_get_num_default_rss_queues() * nchan
;
4763 #ifdef CONFIG_CHELSIO_T4_DCB
4764 /* For Data Center Bridging we need 8 Ethernet TX Priority Queues for
4767 need
= 8 * adap
->params
.nports
+ EXTRA_VECS
+ ofld_need
+ uld_need
;
4769 need
= adap
->params
.nports
+ EXTRA_VECS
+ ofld_need
+ uld_need
;
4771 allocated
= pci_enable_msix_range(adap
->pdev
, entries
, need
, want
);
4772 if (allocated
< 0) {
4773 dev_info(adap
->pdev_dev
, "not enough MSI-X vectors left,"
4774 " not using MSI-X\n");
4779 /* Distribute available vectors to the various queue groups.
4780 * Every group gets its minimum requirement and NIC gets top
4781 * priority for leftovers.
4783 i
= allocated
- EXTRA_VECS
- ofld_need
- uld_need
;
4784 if (i
< s
->max_ethqsets
) {
4785 s
->max_ethqsets
= i
;
4786 if (i
< s
->ethqsets
)
4787 reduce_ethqs(adap
, i
);
4789 if (is_pci_uld(adap
)) {
4790 if (allocated
< want
)
4791 s
->nqs_per_uld
= nchan
;
4793 s
->nqs_per_uld
= netif_get_num_default_rss_queues() *
4797 if (is_offload(adap
)) {
4798 if (allocated
< want
) {
4800 s
->rdmaciqs
= nchan
;
4802 if (!is_t4(adap
->params
.chip
))
4803 s
->niscsitq
= nchan
;
4806 /* leftovers go to OFLD */
4807 i
= allocated
- EXTRA_VECS
- s
->max_ethqsets
-
4808 s
->rdmaqs
- s
->rdmaciqs
- s
->niscsitq
;
4809 if (is_pci_uld(adap
))
4810 i
-= s
->nqs_per_uld
* adap
->num_uld
;
4811 s
->iscsiqsets
= (i
/ nchan
) * nchan
; /* round down */
4815 for (i
= 0; i
< (allocated
- (s
->nqs_per_uld
* adap
->num_uld
)); ++i
)
4816 adap
->msix_info
[i
].vec
= entries
[i
].vector
;
4817 if (is_pci_uld(adap
)) {
4818 for (j
= 0 ; i
< allocated
; ++i
, j
++)
4819 adap
->msix_info_ulds
[j
].vec
= entries
[i
].vector
;
4820 adap
->msix_bmap_ulds
.mapsize
= j
;
4822 dev_info(adap
->pdev_dev
, "%d MSI-X vectors allocated, "
4823 "nic %d iscsi %d rdma cpl %d rdma ciq %d uld %d\n",
4824 allocated
, s
->max_ethqsets
, s
->iscsiqsets
, s
->rdmaqs
,
4825 s
->rdmaciqs
, s
->nqs_per_uld
);
4833 static int init_rss(struct adapter
*adap
)
4838 err
= t4_init_rss_mode(adap
, adap
->mbox
);
4842 for_each_port(adap
, i
) {
4843 struct port_info
*pi
= adap2pinfo(adap
, i
);
4845 pi
->rss
= kcalloc(pi
->rss_size
, sizeof(u16
), GFP_KERNEL
);
4852 static int cxgb4_get_pcie_dev_link_caps(struct adapter
*adap
,
4853 enum pci_bus_speed
*speed
,
4854 enum pcie_link_width
*width
)
4856 u32 lnkcap1
, lnkcap2
;
4859 #define PCIE_MLW_CAP_SHIFT 4 /* start of MLW mask in link capabilities */
4861 *speed
= PCI_SPEED_UNKNOWN
;
4862 *width
= PCIE_LNK_WIDTH_UNKNOWN
;
4864 err1
= pcie_capability_read_dword(adap
->pdev
, PCI_EXP_LNKCAP
,
4866 err2
= pcie_capability_read_dword(adap
->pdev
, PCI_EXP_LNKCAP2
,
4868 if (!err2
&& lnkcap2
) { /* PCIe r3.0-compliant */
4869 if (lnkcap2
& PCI_EXP_LNKCAP2_SLS_8_0GB
)
4870 *speed
= PCIE_SPEED_8_0GT
;
4871 else if (lnkcap2
& PCI_EXP_LNKCAP2_SLS_5_0GB
)
4872 *speed
= PCIE_SPEED_5_0GT
;
4873 else if (lnkcap2
& PCI_EXP_LNKCAP2_SLS_2_5GB
)
4874 *speed
= PCIE_SPEED_2_5GT
;
4877 *width
= (lnkcap1
& PCI_EXP_LNKCAP_MLW
) >> PCIE_MLW_CAP_SHIFT
;
4878 if (!lnkcap2
) { /* pre-r3.0 */
4879 if (lnkcap1
& PCI_EXP_LNKCAP_SLS_5_0GB
)
4880 *speed
= PCIE_SPEED_5_0GT
;
4881 else if (lnkcap1
& PCI_EXP_LNKCAP_SLS_2_5GB
)
4882 *speed
= PCIE_SPEED_2_5GT
;
4886 if (*speed
== PCI_SPEED_UNKNOWN
|| *width
== PCIE_LNK_WIDTH_UNKNOWN
)
4887 return err1
? err1
: err2
? err2
: -EINVAL
;
4891 static void cxgb4_check_pcie_caps(struct adapter
*adap
)
4893 enum pcie_link_width width
, width_cap
;
4894 enum pci_bus_speed speed
, speed_cap
;
4896 #define PCIE_SPEED_STR(speed) \
4897 (speed == PCIE_SPEED_8_0GT ? "8.0GT/s" : \
4898 speed == PCIE_SPEED_5_0GT ? "5.0GT/s" : \
4899 speed == PCIE_SPEED_2_5GT ? "2.5GT/s" : \
4902 if (cxgb4_get_pcie_dev_link_caps(adap
, &speed_cap
, &width_cap
)) {
4903 dev_warn(adap
->pdev_dev
,
4904 "Unable to determine PCIe device BW capabilities\n");
4908 if (pcie_get_minimum_link(adap
->pdev
, &speed
, &width
) ||
4909 speed
== PCI_SPEED_UNKNOWN
|| width
== PCIE_LNK_WIDTH_UNKNOWN
) {
4910 dev_warn(adap
->pdev_dev
,
4911 "Unable to determine PCI Express bandwidth.\n");
4915 dev_info(adap
->pdev_dev
, "PCIe link speed is %s, device supports %s\n",
4916 PCIE_SPEED_STR(speed
), PCIE_SPEED_STR(speed_cap
));
4917 dev_info(adap
->pdev_dev
, "PCIe link width is x%d, device supports x%d\n",
4919 if (speed
< speed_cap
|| width
< width_cap
)
4920 dev_info(adap
->pdev_dev
,
4921 "A slot with more lanes and/or higher speed is "
4922 "suggested for optimal performance.\n");
4925 /* Dump basic information about the adapter */
4926 static void print_adapter_info(struct adapter
*adapter
)
4928 /* Device information */
4929 dev_info(adapter
->pdev_dev
, "Chelsio %s rev %d\n",
4930 adapter
->params
.vpd
.id
,
4931 CHELSIO_CHIP_RELEASE(adapter
->params
.chip
));
4932 dev_info(adapter
->pdev_dev
, "S/N: %s, P/N: %s\n",
4933 adapter
->params
.vpd
.sn
, adapter
->params
.vpd
.pn
);
4935 /* Firmware Version */
4936 if (!adapter
->params
.fw_vers
)
4937 dev_warn(adapter
->pdev_dev
, "No firmware loaded\n");
4939 dev_info(adapter
->pdev_dev
, "Firmware version: %u.%u.%u.%u\n",
4940 FW_HDR_FW_VER_MAJOR_G(adapter
->params
.fw_vers
),
4941 FW_HDR_FW_VER_MINOR_G(adapter
->params
.fw_vers
),
4942 FW_HDR_FW_VER_MICRO_G(adapter
->params
.fw_vers
),
4943 FW_HDR_FW_VER_BUILD_G(adapter
->params
.fw_vers
));
4945 /* Bootstrap Firmware Version. (Some adapters don't have Bootstrap
4946 * Firmware, so dev_info() is more appropriate here.)
4948 if (!adapter
->params
.bs_vers
)
4949 dev_info(adapter
->pdev_dev
, "No bootstrap loaded\n");
4951 dev_info(adapter
->pdev_dev
, "Bootstrap version: %u.%u.%u.%u\n",
4952 FW_HDR_FW_VER_MAJOR_G(adapter
->params
.bs_vers
),
4953 FW_HDR_FW_VER_MINOR_G(adapter
->params
.bs_vers
),
4954 FW_HDR_FW_VER_MICRO_G(adapter
->params
.bs_vers
),
4955 FW_HDR_FW_VER_BUILD_G(adapter
->params
.bs_vers
));
4957 /* TP Microcode Version */
4958 if (!adapter
->params
.tp_vers
)
4959 dev_warn(adapter
->pdev_dev
, "No TP Microcode loaded\n");
4961 dev_info(adapter
->pdev_dev
,
4962 "TP Microcode version: %u.%u.%u.%u\n",
4963 FW_HDR_FW_VER_MAJOR_G(adapter
->params
.tp_vers
),
4964 FW_HDR_FW_VER_MINOR_G(adapter
->params
.tp_vers
),
4965 FW_HDR_FW_VER_MICRO_G(adapter
->params
.tp_vers
),
4966 FW_HDR_FW_VER_BUILD_G(adapter
->params
.tp_vers
));
4968 /* Expansion ROM version */
4969 if (!adapter
->params
.er_vers
)
4970 dev_info(adapter
->pdev_dev
, "No Expansion ROM loaded\n");
4972 dev_info(adapter
->pdev_dev
,
4973 "Expansion ROM version: %u.%u.%u.%u\n",
4974 FW_HDR_FW_VER_MAJOR_G(adapter
->params
.er_vers
),
4975 FW_HDR_FW_VER_MINOR_G(adapter
->params
.er_vers
),
4976 FW_HDR_FW_VER_MICRO_G(adapter
->params
.er_vers
),
4977 FW_HDR_FW_VER_BUILD_G(adapter
->params
.er_vers
));
4979 /* Software/Hardware configuration */
4980 dev_info(adapter
->pdev_dev
, "Configuration: %sNIC %s, %s capable\n",
4981 is_offload(adapter
) ? "R" : "",
4982 ((adapter
->flags
& USING_MSIX
) ? "MSI-X" :
4983 (adapter
->flags
& USING_MSI
) ? "MSI" : ""),
4984 is_offload(adapter
) ? "Offload" : "non-Offload");
4987 static void print_port_info(const struct net_device
*dev
)
4991 const char *spd
= "";
4992 const struct port_info
*pi
= netdev_priv(dev
);
4993 const struct adapter
*adap
= pi
->adapter
;
4995 if (adap
->params
.pci
.speed
== PCI_EXP_LNKSTA_CLS_2_5GB
)
4997 else if (adap
->params
.pci
.speed
== PCI_EXP_LNKSTA_CLS_5_0GB
)
4999 else if (adap
->params
.pci
.speed
== PCI_EXP_LNKSTA_CLS_8_0GB
)
5002 if (pi
->link_cfg
.supported
& FW_PORT_CAP_SPEED_100M
)
5003 bufp
+= sprintf(bufp
, "100/");
5004 if (pi
->link_cfg
.supported
& FW_PORT_CAP_SPEED_1G
)
5005 bufp
+= sprintf(bufp
, "1000/");
5006 if (pi
->link_cfg
.supported
& FW_PORT_CAP_SPEED_10G
)
5007 bufp
+= sprintf(bufp
, "10G/");
5008 if (pi
->link_cfg
.supported
& FW_PORT_CAP_SPEED_40G
)
5009 bufp
+= sprintf(bufp
, "40G/");
5012 sprintf(bufp
, "BASE-%s", t4_get_port_type_description(pi
->port_type
));
5014 netdev_info(dev
, "%s: Chelsio %s (%s) %s\n",
5015 dev
->name
, adap
->params
.vpd
.id
, adap
->name
, buf
);
5018 static void enable_pcie_relaxed_ordering(struct pci_dev
*dev
)
5020 pcie_capability_set_word(dev
, PCI_EXP_DEVCTL
, PCI_EXP_DEVCTL_RELAX_EN
);
5024 * Free the following resources:
5025 * - memory used for tables
5028 * - resources FW is holding for us
5030 static void free_some_resources(struct adapter
*adapter
)
5034 t4_free_mem(adapter
->l2t
);
5035 t4_cleanup_sched(adapter
);
5036 t4_free_mem(adapter
->tids
.tid_tab
);
5037 kfree(adapter
->sge
.egr_map
);
5038 kfree(adapter
->sge
.ingr_map
);
5039 kfree(adapter
->sge
.starving_fl
);
5040 kfree(adapter
->sge
.txq_maperr
);
5041 #ifdef CONFIG_DEBUG_FS
5042 kfree(adapter
->sge
.blocked_fl
);
5044 disable_msi(adapter
);
5046 for_each_port(adapter
, i
)
5047 if (adapter
->port
[i
]) {
5048 struct port_info
*pi
= adap2pinfo(adapter
, i
);
5051 t4_free_vi(adapter
, adapter
->mbox
, adapter
->pf
,
5053 kfree(adap2pinfo(adapter
, i
)->rss
);
5054 free_netdev(adapter
->port
[i
]);
5056 if (adapter
->flags
& FW_OK
)
5057 t4_fw_bye(adapter
, adapter
->pf
);
5060 #define TSO_FLAGS (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN)
5061 #define VLAN_FEAT (NETIF_F_SG | NETIF_F_IP_CSUM | TSO_FLAGS | \
5062 NETIF_F_IPV6_CSUM | NETIF_F_HIGHDMA)
5063 #define SEGMENT_SIZE 128
5065 static int get_chip_type(struct pci_dev
*pdev
, u32 pl_rev
)
5069 /* Retrieve adapter's device ID */
5070 pci_read_config_word(pdev
, PCI_DEVICE_ID
, &device_id
);
5072 switch (device_id
>> 12) {
5074 return CHELSIO_CHIP_CODE(CHELSIO_T4
, pl_rev
);
5076 return CHELSIO_CHIP_CODE(CHELSIO_T5
, pl_rev
);
5078 return CHELSIO_CHIP_CODE(CHELSIO_T6
, pl_rev
);
5080 dev_err(&pdev
->dev
, "Device %d is not supported\n",
5086 #ifdef CONFIG_PCI_IOV
5087 static void dummy_setup(struct net_device
*dev
)
5089 dev
->type
= ARPHRD_NONE
;
5091 dev
->hard_header_len
= 0;
5093 dev
->tx_queue_len
= 0;
5094 dev
->flags
|= IFF_NOARP
;
5095 dev
->priv_flags
|= IFF_NO_QUEUE
;
5097 /* Initialize the device structure. */
5098 dev
->netdev_ops
= &cxgb4_mgmt_netdev_ops
;
5099 dev
->ethtool_ops
= &cxgb4_mgmt_ethtool_ops
;
5100 dev
->destructor
= free_netdev
;
5103 static int config_mgmt_dev(struct pci_dev
*pdev
)
5105 struct adapter
*adap
= pci_get_drvdata(pdev
);
5106 struct net_device
*netdev
;
5107 struct port_info
*pi
;
5108 char name
[IFNAMSIZ
];
5111 snprintf(name
, IFNAMSIZ
, "mgmtpf%d%d", adap
->adap_idx
, adap
->pf
);
5112 netdev
= alloc_netdev(0, name
, NET_NAME_UNKNOWN
, dummy_setup
);
5116 pi
= netdev_priv(netdev
);
5118 SET_NETDEV_DEV(netdev
, &pdev
->dev
);
5120 adap
->port
[0] = netdev
;
5122 err
= register_netdev(adap
->port
[0]);
5124 pr_info("Unable to register VF mgmt netdev %s\n", name
);
5125 free_netdev(adap
->port
[0]);
5126 adap
->port
[0] = NULL
;
5132 static int cxgb4_iov_configure(struct pci_dev
*pdev
, int num_vfs
)
5134 struct adapter
*adap
= pci_get_drvdata(pdev
);
5136 int current_vfs
= pci_num_vf(pdev
);
5139 pcie_fw
= readl(adap
->regs
+ PCIE_FW_A
);
5140 /* Check if cxgb4 is the MASTER and fw is initialized */
5141 if (!(pcie_fw
& PCIE_FW_INIT_F
) ||
5142 !(pcie_fw
& PCIE_FW_MASTER_VLD_F
) ||
5143 PCIE_FW_MASTER_G(pcie_fw
) != 4) {
5144 dev_warn(&pdev
->dev
,
5145 "cxgb4 driver needs to be MASTER to support SRIOV\n");
5149 /* If any of the VF's is already assigned to Guest OS, then
5150 * SRIOV for the same cannot be modified
5152 if (current_vfs
&& pci_vfs_assigned(pdev
)) {
5154 "Cannot modify SR-IOV while VFs are assigned\n");
5155 num_vfs
= current_vfs
;
5159 /* Disable SRIOV when zero is passed.
5160 * One needs to disable SRIOV before modifying it, else
5161 * stack throws the below warning:
5162 * " 'n' VFs already enabled. Disable before enabling 'm' VFs."
5165 pci_disable_sriov(pdev
);
5166 if (adap
->port
[0]) {
5167 unregister_netdev(adap
->port
[0]);
5168 adap
->port
[0] = NULL
;
5170 /* free VF resources */
5171 kfree(adap
->vfinfo
);
5172 adap
->vfinfo
= NULL
;
5177 if (num_vfs
!= current_vfs
) {
5178 err
= pci_enable_sriov(pdev
, num_vfs
);
5182 adap
->num_vfs
= num_vfs
;
5183 err
= config_mgmt_dev(pdev
);
5188 adap
->vfinfo
= kcalloc(adap
->num_vfs
,
5189 sizeof(struct vf_info
), GFP_KERNEL
);
5191 fill_vf_station_mac_addr(adap
);
5196 static int init_one(struct pci_dev
*pdev
, const struct pci_device_id
*ent
)
5198 int func
, i
, err
, s_qpp
, qpp
, num_seg
;
5199 struct port_info
*pi
;
5200 bool highdma
= false;
5201 struct adapter
*adapter
= NULL
;
5202 struct net_device
*netdev
;
5205 enum chip_type chip
;
5206 static int adap_idx
= 1;
5208 printk_once(KERN_INFO
"%s - version %s\n", DRV_DESC
, DRV_VERSION
);
5210 err
= pci_request_regions(pdev
, KBUILD_MODNAME
);
5212 /* Just info, some other driver may have claimed the device. */
5213 dev_info(&pdev
->dev
, "cannot obtain PCI resources\n");
5217 err
= pci_enable_device(pdev
);
5219 dev_err(&pdev
->dev
, "cannot enable PCI device\n");
5220 goto out_release_regions
;
5223 regs
= pci_ioremap_bar(pdev
, 0);
5225 dev_err(&pdev
->dev
, "cannot map device registers\n");
5227 goto out_disable_device
;
5230 err
= t4_wait_dev_ready(regs
);
5232 goto out_unmap_bar0
;
5234 /* We control everything through one PF */
5235 whoami
= readl(regs
+ PL_WHOAMI_A
);
5236 pl_rev
= REV_G(readl(regs
+ PL_REV_A
));
5237 chip
= get_chip_type(pdev
, pl_rev
);
5238 func
= CHELSIO_CHIP_VERSION(chip
) <= CHELSIO_T5
?
5239 SOURCEPF_G(whoami
) : T6_SOURCEPF_G(whoami
);
5240 if (func
!= ent
->driver_data
) {
5241 #ifndef CONFIG_PCI_IOV
5244 pci_disable_device(pdev
);
5245 pci_save_state(pdev
); /* to restore SR-IOV later */
5249 if (!pci_set_dma_mask(pdev
, DMA_BIT_MASK(64))) {
5251 err
= pci_set_consistent_dma_mask(pdev
, DMA_BIT_MASK(64));
5253 dev_err(&pdev
->dev
, "unable to obtain 64-bit DMA for "
5254 "coherent allocations\n");
5255 goto out_unmap_bar0
;
5258 err
= pci_set_dma_mask(pdev
, DMA_BIT_MASK(32));
5260 dev_err(&pdev
->dev
, "no usable DMA configuration\n");
5261 goto out_unmap_bar0
;
5265 pci_enable_pcie_error_reporting(pdev
);
5266 enable_pcie_relaxed_ordering(pdev
);
5267 pci_set_master(pdev
);
5268 pci_save_state(pdev
);
5270 adapter
= kzalloc(sizeof(*adapter
), GFP_KERNEL
);
5273 goto out_unmap_bar0
;
5277 adapter
->workq
= create_singlethread_workqueue("cxgb4");
5278 if (!adapter
->workq
) {
5280 goto out_free_adapter
;
5283 adapter
->mbox_log
= kzalloc(sizeof(*adapter
->mbox_log
) +
5284 (sizeof(struct mbox_cmd
) *
5285 T4_OS_LOG_MBOX_CMDS
),
5287 if (!adapter
->mbox_log
) {
5289 goto out_free_adapter
;
5291 adapter
->mbox_log
->size
= T4_OS_LOG_MBOX_CMDS
;
5293 /* PCI device has been enabled */
5294 adapter
->flags
|= DEV_ENABLED
;
5296 adapter
->regs
= regs
;
5297 adapter
->pdev
= pdev
;
5298 adapter
->pdev_dev
= &pdev
->dev
;
5299 adapter
->name
= pci_name(pdev
);
5300 adapter
->mbox
= func
;
5302 adapter
->msg_enable
= dflt_msg_enable
;
5303 memset(adapter
->chan_map
, 0xff, sizeof(adapter
->chan_map
));
5305 spin_lock_init(&adapter
->stats_lock
);
5306 spin_lock_init(&adapter
->tid_release_lock
);
5307 spin_lock_init(&adapter
->win0_lock
);
5309 INIT_WORK(&adapter
->tid_release_task
, process_tid_release_list
);
5310 INIT_WORK(&adapter
->db_full_task
, process_db_full
);
5311 INIT_WORK(&adapter
->db_drop_task
, process_db_drop
);
5313 err
= t4_prep_adapter(adapter
);
5315 goto out_free_adapter
;
5318 if (!is_t4(adapter
->params
.chip
)) {
5319 s_qpp
= (QUEUESPERPAGEPF0_S
+
5320 (QUEUESPERPAGEPF1_S
- QUEUESPERPAGEPF0_S
) *
5322 qpp
= 1 << QUEUESPERPAGEPF0_G(t4_read_reg(adapter
,
5323 SGE_EGRESS_QUEUES_PER_PAGE_PF_A
) >> s_qpp
);
5324 num_seg
= PAGE_SIZE
/ SEGMENT_SIZE
;
5326 /* Each segment size is 128B. Write coalescing is enabled only
5327 * when SGE_EGRESS_QUEUES_PER_PAGE_PF reg value for the
5328 * queue is less no of segments that can be accommodated in
5331 if (qpp
> num_seg
) {
5333 "Incorrect number of egress queues per page\n");
5335 goto out_free_adapter
;
5337 adapter
->bar2
= ioremap_wc(pci_resource_start(pdev
, 2),
5338 pci_resource_len(pdev
, 2));
5339 if (!adapter
->bar2
) {
5340 dev_err(&pdev
->dev
, "cannot map device bar2 region\n");
5342 goto out_free_adapter
;
5346 setup_memwin(adapter
);
5347 err
= adap_init0(adapter
);
5348 #ifdef CONFIG_DEBUG_FS
5349 bitmap_zero(adapter
->sge
.blocked_fl
, adapter
->sge
.egr_sz
);
5351 setup_memwin_rdma(adapter
);
5355 /* configure SGE_STAT_CFG_A to read WC stats */
5356 if (!is_t4(adapter
->params
.chip
))
5357 t4_write_reg(adapter
, SGE_STAT_CFG_A
, STATSOURCE_T5_V(7) |
5358 (is_t5(adapter
->params
.chip
) ? STATMODE_V(0) :
5361 for_each_port(adapter
, i
) {
5362 netdev
= alloc_etherdev_mq(sizeof(struct port_info
),
5369 SET_NETDEV_DEV(netdev
, &pdev
->dev
);
5371 adapter
->port
[i
] = netdev
;
5372 pi
= netdev_priv(netdev
);
5373 pi
->adapter
= adapter
;
5374 pi
->xact_addr_filt
= -1;
5376 netdev
->irq
= pdev
->irq
;
5378 netdev
->hw_features
= NETIF_F_SG
| TSO_FLAGS
|
5379 NETIF_F_IP_CSUM
| NETIF_F_IPV6_CSUM
|
5380 NETIF_F_RXCSUM
| NETIF_F_RXHASH
|
5381 NETIF_F_HW_VLAN_CTAG_TX
| NETIF_F_HW_VLAN_CTAG_RX
;
5383 netdev
->hw_features
|= NETIF_F_HIGHDMA
;
5384 netdev
->features
|= netdev
->hw_features
;
5385 netdev
->vlan_features
= netdev
->features
& VLAN_FEAT
;
5387 netdev
->priv_flags
|= IFF_UNICAST_FLT
;
5389 netdev
->netdev_ops
= &cxgb4_netdev_ops
;
5390 #ifdef CONFIG_CHELSIO_T4_DCB
5391 netdev
->dcbnl_ops
= &cxgb4_dcb_ops
;
5392 cxgb4_dcb_state_init(netdev
);
5394 cxgb4_set_ethtool_ops(netdev
);
5397 pci_set_drvdata(pdev
, adapter
);
5399 if (adapter
->flags
& FW_OK
) {
5400 err
= t4_port_init(adapter
, func
, func
, 0);
5403 } else if (adapter
->params
.nports
== 1) {
5404 /* If we don't have a connection to the firmware -- possibly
5405 * because of an error -- grab the raw VPD parameters so we
5406 * can set the proper MAC Address on the debug network
5407 * interface that we've created.
5409 u8 hw_addr
[ETH_ALEN
];
5410 u8
*na
= adapter
->params
.vpd
.na
;
5412 err
= t4_get_raw_vpd_params(adapter
, &adapter
->params
.vpd
);
5414 for (i
= 0; i
< ETH_ALEN
; i
++)
5415 hw_addr
[i
] = (hex2val(na
[2 * i
+ 0]) * 16 +
5416 hex2val(na
[2 * i
+ 1]));
5417 t4_set_hw_addr(adapter
, 0, hw_addr
);
5421 /* Configure queues and allocate tables now, they can be needed as
5422 * soon as the first register_netdev completes.
5424 cfg_queues(adapter
);
5426 adapter
->l2t
= t4_init_l2t(adapter
->l2t_start
, adapter
->l2t_end
);
5427 if (!adapter
->l2t
) {
5428 /* We tolerate a lack of L2T, giving up some functionality */
5429 dev_warn(&pdev
->dev
, "could not allocate L2T, continuing\n");
5430 adapter
->params
.offload
= 0;
5433 #if IS_ENABLED(CONFIG_IPV6)
5434 if ((CHELSIO_CHIP_VERSION(adapter
->params
.chip
) <= CHELSIO_T5
) &&
5435 (!(t4_read_reg(adapter
, LE_DB_CONFIG_A
) & ASLIPCOMPEN_F
))) {
5436 /* CLIP functionality is not present in hardware,
5437 * hence disable all offload features
5439 dev_warn(&pdev
->dev
,
5440 "CLIP not enabled in hardware, continuing\n");
5441 adapter
->params
.offload
= 0;
5443 adapter
->clipt
= t4_init_clip_tbl(adapter
->clipt_start
,
5444 adapter
->clipt_end
);
5445 if (!adapter
->clipt
) {
5446 /* We tolerate a lack of clip_table, giving up
5447 * some functionality
5449 dev_warn(&pdev
->dev
,
5450 "could not allocate Clip table, continuing\n");
5451 adapter
->params
.offload
= 0;
5456 for_each_port(adapter
, i
) {
5457 pi
= adap2pinfo(adapter
, i
);
5458 pi
->sched_tbl
= t4_init_sched(adapter
->params
.nsched_cls
);
5460 dev_warn(&pdev
->dev
,
5461 "could not activate scheduling on port %d\n",
5465 if (is_offload(adapter
) && tid_init(&adapter
->tids
) < 0) {
5466 dev_warn(&pdev
->dev
, "could not allocate TID table, "
5468 adapter
->params
.offload
= 0;
5471 if (is_offload(adapter
)) {
5472 if (t4_read_reg(adapter
, LE_DB_CONFIG_A
) & HASHEN_F
) {
5473 u32 hash_base
, hash_reg
;
5475 if (chip
<= CHELSIO_T5
) {
5476 hash_reg
= LE_DB_TID_HASHBASE_A
;
5477 hash_base
= t4_read_reg(adapter
, hash_reg
);
5478 adapter
->tids
.hash_base
= hash_base
/ 4;
5480 hash_reg
= T6_LE_DB_HASH_TID_BASE_A
;
5481 hash_base
= t4_read_reg(adapter
, hash_reg
);
5482 adapter
->tids
.hash_base
= hash_base
;
5487 /* See what interrupts we'll be using */
5488 if (msi
> 1 && enable_msix(adapter
) == 0)
5489 adapter
->flags
|= USING_MSIX
;
5490 else if (msi
> 0 && pci_enable_msi(pdev
) == 0) {
5491 adapter
->flags
|= USING_MSI
;
5493 free_msix_info(adapter
);
5496 /* check for PCI Express bandwidth capabiltites */
5497 cxgb4_check_pcie_caps(adapter
);
5499 err
= init_rss(adapter
);
5504 * The card is now ready to go. If any errors occur during device
5505 * registration we do not fail the whole card but rather proceed only
5506 * with the ports we manage to register successfully. However we must
5507 * register at least one net device.
5509 for_each_port(adapter
, i
) {
5510 pi
= adap2pinfo(adapter
, i
);
5511 netif_set_real_num_tx_queues(adapter
->port
[i
], pi
->nqsets
);
5512 netif_set_real_num_rx_queues(adapter
->port
[i
], pi
->nqsets
);
5514 err
= register_netdev(adapter
->port
[i
]);
5517 adapter
->chan_map
[pi
->tx_chan
] = i
;
5518 print_port_info(adapter
->port
[i
]);
5521 dev_err(&pdev
->dev
, "could not register any net devices\n");
5525 dev_warn(&pdev
->dev
, "only %d net devices registered\n", i
);
5529 if (cxgb4_debugfs_root
) {
5530 adapter
->debugfs_root
= debugfs_create_dir(pci_name(pdev
),
5531 cxgb4_debugfs_root
);
5532 setup_debugfs(adapter
);
5535 /* PCIe EEH recovery on powerpc platforms needs fundamental reset */
5536 pdev
->needs_freset
= 1;
5538 if (is_offload(adapter
))
5539 attach_ulds(adapter
);
5541 print_adapter_info(adapter
);
5545 #ifdef CONFIG_PCI_IOV
5546 if (func
< ARRAY_SIZE(num_vf
) && num_vf
[func
] > 0) {
5547 dev_warn(&pdev
->dev
,
5548 "Enabling SR-IOV VFs using the num_vf module "
5549 "parameter is deprecated - please use the pci sysfs "
5550 "interface instead.\n");
5551 if (pci_enable_sriov(pdev
, num_vf
[func
]) == 0)
5552 dev_info(&pdev
->dev
,
5553 "instantiated %u virtual functions\n",
5557 adapter
= kzalloc(sizeof(*adapter
), GFP_KERNEL
);
5560 goto free_pci_region
;
5563 adapter
->pdev
= pdev
;
5564 adapter
->pdev_dev
= &pdev
->dev
;
5565 adapter
->name
= pci_name(pdev
);
5566 adapter
->mbox
= func
;
5568 adapter
->regs
= regs
;
5569 adapter
->adap_idx
= adap_idx
;
5570 adapter
->mbox_log
= kzalloc(sizeof(*adapter
->mbox_log
) +
5571 (sizeof(struct mbox_cmd
) *
5572 T4_OS_LOG_MBOX_CMDS
),
5574 if (!adapter
->mbox_log
) {
5578 pci_set_drvdata(pdev
, adapter
);
5585 pci_disable_sriov(pdev
);
5586 pci_release_regions(pdev
);
5593 free_some_resources(adapter
);
5594 if (adapter
->flags
& USING_MSIX
)
5595 free_msix_info(adapter
);
5596 if (adapter
->num_uld
)
5597 uld_mem_free(adapter
);
5599 if (!is_t4(adapter
->params
.chip
))
5600 iounmap(adapter
->bar2
);
5603 destroy_workqueue(adapter
->workq
);
5605 kfree(adapter
->mbox_log
);
5610 pci_disable_pcie_error_reporting(pdev
);
5611 pci_disable_device(pdev
);
5612 out_release_regions
:
5613 pci_release_regions(pdev
);
5617 static void remove_one(struct pci_dev
*pdev
)
5619 struct adapter
*adapter
= pci_get_drvdata(pdev
);
5622 pci_release_regions(pdev
);
5626 if (adapter
->pf
== 4) {
5629 /* Tear down per-adapter Work Queue first since it can contain
5630 * references to our adapter data structure.
5632 destroy_workqueue(adapter
->workq
);
5634 if (is_offload(adapter
))
5635 detach_ulds(adapter
);
5637 disable_interrupts(adapter
);
5639 for_each_port(adapter
, i
)
5640 if (adapter
->port
[i
]->reg_state
== NETREG_REGISTERED
)
5641 unregister_netdev(adapter
->port
[i
]);
5643 debugfs_remove_recursive(adapter
->debugfs_root
);
5645 /* If we allocated filters, free up state associated with any
5648 if (adapter
->tids
.ftid_tab
) {
5649 struct filter_entry
*f
= &adapter
->tids
.ftid_tab
[0];
5650 for (i
= 0; i
< (adapter
->tids
.nftids
+
5651 adapter
->tids
.nsftids
); i
++, f
++)
5653 clear_filter(adapter
, f
);
5656 if (adapter
->flags
& FULL_INIT_DONE
)
5659 if (adapter
->flags
& USING_MSIX
)
5660 free_msix_info(adapter
);
5661 if (adapter
->num_uld
)
5662 uld_mem_free(adapter
);
5663 free_some_resources(adapter
);
5664 #if IS_ENABLED(CONFIG_IPV6)
5665 t4_cleanup_clip_tbl(adapter
);
5667 iounmap(adapter
->regs
);
5668 if (!is_t4(adapter
->params
.chip
))
5669 iounmap(adapter
->bar2
);
5670 pci_disable_pcie_error_reporting(pdev
);
5671 if ((adapter
->flags
& DEV_ENABLED
)) {
5672 pci_disable_device(pdev
);
5673 adapter
->flags
&= ~DEV_ENABLED
;
5675 pci_release_regions(pdev
);
5676 kfree(adapter
->mbox_log
);
5680 #ifdef CONFIG_PCI_IOV
5682 if (adapter
->port
[0])
5683 unregister_netdev(adapter
->port
[0]);
5684 iounmap(adapter
->regs
);
5685 kfree(adapter
->vfinfo
);
5687 pci_disable_sriov(pdev
);
5688 pci_release_regions(pdev
);
5693 static struct pci_driver cxgb4_driver
= {
5694 .name
= KBUILD_MODNAME
,
5695 .id_table
= cxgb4_pci_tbl
,
5697 .remove
= remove_one
,
5698 .shutdown
= remove_one
,
5699 #ifdef CONFIG_PCI_IOV
5700 .sriov_configure
= cxgb4_iov_configure
,
5702 .err_handler
= &cxgb4_eeh
,
5705 static int __init
cxgb4_init_module(void)
5709 /* Debugfs support is optional, just warn if this fails */
5710 cxgb4_debugfs_root
= debugfs_create_dir(KBUILD_MODNAME
, NULL
);
5711 if (!cxgb4_debugfs_root
)
5712 pr_warn("could not create debugfs entry, continuing\n");
5714 ret
= pci_register_driver(&cxgb4_driver
);
5716 debugfs_remove(cxgb4_debugfs_root
);
5718 #if IS_ENABLED(CONFIG_IPV6)
5719 if (!inet6addr_registered
) {
5720 register_inet6addr_notifier(&cxgb4_inet6addr_notifier
);
5721 inet6addr_registered
= true;
5728 static void __exit
cxgb4_cleanup_module(void)
5730 #if IS_ENABLED(CONFIG_IPV6)
5731 if (inet6addr_registered
) {
5732 unregister_inet6addr_notifier(&cxgb4_inet6addr_notifier
);
5733 inet6addr_registered
= false;
5736 pci_unregister_driver(&cxgb4_driver
);
5737 debugfs_remove(cxgb4_debugfs_root
); /* NULL ok */
5740 module_init(cxgb4_init_module
);
5741 module_exit(cxgb4_cleanup_module
);