2 * Copyright (C) 2015 Cavium, Inc.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of version 2 of the GNU General Public License
6 * as published by the Free Software Foundation.
9 #include <linux/module.h>
10 #include <linux/interrupt.h>
11 #include <linux/pci.h>
12 #include <linux/etherdevice.h>
18 #include "thunder_bgx.h"
20 #define DRV_NAME "thunder-nic"
21 #define DRV_VERSION "1.0"
26 u8 chans_per_bgx
; /* Rx/Tx chans */
36 bool tl1_per_bgx
; /* TL1 per BGX or per LMAC */
44 u8 num_vf_en
; /* No of VF enabled */
45 bool vf_enabled
[MAX_NUM_VFS_SUPPORTED
];
46 void __iomem
*reg_base
; /* Register start address */
47 u8 num_sqs_en
; /* Secondary qsets enabled */
48 u64 nicvf
[MAX_NUM_VFS_SUPPORTED
];
49 u8 vf_sqs
[MAX_NUM_VFS_SUPPORTED
][MAX_SQS_PER_VF
];
50 u8 pqs_vf
[MAX_NUM_VFS_SUPPORTED
];
51 bool sqs_used
[MAX_NUM_VFS_SUPPORTED
];
52 struct pkind_cfg pkind
;
53 #define NIC_SET_VF_LMAC_MAP(bgx, lmac) (((bgx & 0xF) << 4) | (lmac & 0xF))
54 #define NIC_GET_BGX_FROM_VF_LMAC_MAP(map) ((map >> 4) & 0xF)
55 #define NIC_GET_LMAC_FROM_VF_LMAC_MAP(map) (map & 0xF)
57 struct delayed_work dwork
;
58 struct workqueue_struct
*check_link
;
62 u16 cpi_base
[MAX_NUM_VFS_SUPPORTED
];
63 u16 rssi_base
[MAX_NUM_VFS_SUPPORTED
];
64 bool mbx_lock
[MAX_NUM_VFS_SUPPORTED
];
69 struct msix_entry
*msix_entries
;
70 bool irq_allocated
[NIC_PF_MSIX_VECTORS
];
71 char irq_name
[NIC_PF_MSIX_VECTORS
][20];
74 /* Supported devices */
75 static const struct pci_device_id nic_id_table
[] = {
76 { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM
, PCI_DEVICE_ID_THUNDER_NIC_PF
) },
77 { 0, } /* end of table */
80 MODULE_AUTHOR("Sunil Goutham");
81 MODULE_DESCRIPTION("Cavium Thunder NIC Physical Function Driver");
82 MODULE_LICENSE("GPL v2");
83 MODULE_VERSION(DRV_VERSION
);
84 MODULE_DEVICE_TABLE(pci
, nic_id_table
);
86 /* The Cavium ThunderX network controller can *only* be found in SoCs
87 * containing the ThunderX ARM64 CPU implementation. All accesses to the device
88 * registers on this platform are implicitly strongly ordered with respect
89 * to memory accesses. So writeq_relaxed() and readq_relaxed() are safe to use
90 * with no memory barriers in this driver. The readq()/writeq() functions add
91 * explicit ordering operation which in this case are redundant, and only
95 /* Register read/write APIs */
96 static void nic_reg_write(struct nicpf
*nic
, u64 offset
, u64 val
)
98 writeq_relaxed(val
, nic
->reg_base
+ offset
);
101 static u64
nic_reg_read(struct nicpf
*nic
, u64 offset
)
103 return readq_relaxed(nic
->reg_base
+ offset
);
106 /* PF -> VF mailbox communication APIs */
107 static void nic_enable_mbx_intr(struct nicpf
*nic
)
109 int vf_cnt
= pci_sriov_get_totalvfs(nic
->pdev
);
111 #define INTR_MASK(vfs) ((vfs < 64) ? (BIT_ULL(vfs) - 1) : (~0ull))
113 /* Clear it, to avoid spurious interrupts (if any) */
114 nic_reg_write(nic
, NIC_PF_MAILBOX_INT
, INTR_MASK(vf_cnt
));
116 /* Enable mailbox interrupt for all VFs */
117 nic_reg_write(nic
, NIC_PF_MAILBOX_ENA_W1S
, INTR_MASK(vf_cnt
));
118 /* One mailbox intr enable reg per 64 VFs */
120 nic_reg_write(nic
, NIC_PF_MAILBOX_INT
+ sizeof(u64
),
121 INTR_MASK(vf_cnt
- 64));
122 nic_reg_write(nic
, NIC_PF_MAILBOX_ENA_W1S
+ sizeof(u64
),
123 INTR_MASK(vf_cnt
- 64));
127 static void nic_clear_mbx_intr(struct nicpf
*nic
, int vf
, int mbx_reg
)
129 nic_reg_write(nic
, NIC_PF_MAILBOX_INT
+ (mbx_reg
<< 3), BIT_ULL(vf
));
132 static u64
nic_get_mbx_addr(int vf
)
134 return NIC_PF_VF_0_127_MAILBOX_0_1
+ (vf
<< NIC_VF_NUM_SHIFT
);
137 /* Send a mailbox message to VF
138 * @vf: vf to which this message to be sent
139 * @mbx: Message to be sent
141 static void nic_send_msg_to_vf(struct nicpf
*nic
, int vf
, union nic_mbx
*mbx
)
143 void __iomem
*mbx_addr
= nic
->reg_base
+ nic_get_mbx_addr(vf
);
144 u64
*msg
= (u64
*)mbx
;
146 /* In first revision HW, mbox interrupt is triggerred
147 * when PF writes to MBOX(1), in next revisions when
148 * PF writes to MBOX(0)
150 if (pass1_silicon(nic
->pdev
)) {
151 /* see the comment for nic_reg_write()/nic_reg_read()
154 writeq_relaxed(msg
[0], mbx_addr
);
155 writeq_relaxed(msg
[1], mbx_addr
+ 8);
157 writeq_relaxed(msg
[1], mbx_addr
+ 8);
158 writeq_relaxed(msg
[0], mbx_addr
);
162 /* Responds to VF's READY message with VF's
163 * ID, node, MAC address e.t.c
164 * @vf: VF which sent READY message
166 static void nic_mbx_send_ready(struct nicpf
*nic
, int vf
)
168 union nic_mbx mbx
= {};
172 mbx
.nic_cfg
.msg
= NIC_MBOX_MSG_READY
;
173 mbx
.nic_cfg
.vf_id
= vf
;
175 mbx
.nic_cfg
.tns_mode
= NIC_TNS_BYPASS_MODE
;
177 if (vf
< nic
->num_vf_en
) {
178 bgx_idx
= NIC_GET_BGX_FROM_VF_LMAC_MAP(nic
->vf_lmac_map
[vf
]);
179 lmac
= NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic
->vf_lmac_map
[vf
]);
181 mac
= bgx_get_lmac_mac(nic
->node
, bgx_idx
, lmac
);
183 ether_addr_copy((u8
*)&mbx
.nic_cfg
.mac_addr
, mac
);
185 mbx
.nic_cfg
.sqs_mode
= (vf
>= nic
->num_vf_en
) ? true : false;
186 mbx
.nic_cfg
.node_id
= nic
->node
;
188 mbx
.nic_cfg
.loopback_supported
= vf
< nic
->num_vf_en
;
190 nic_send_msg_to_vf(nic
, vf
, &mbx
);
193 /* ACKs VF's mailbox message
194 * @vf: VF to which ACK to be sent
196 static void nic_mbx_send_ack(struct nicpf
*nic
, int vf
)
198 union nic_mbx mbx
= {};
200 mbx
.msg
.msg
= NIC_MBOX_MSG_ACK
;
201 nic_send_msg_to_vf(nic
, vf
, &mbx
);
204 /* NACKs VF's mailbox message that PF is not able to
205 * complete the action
206 * @vf: VF to which ACK to be sent
208 static void nic_mbx_send_nack(struct nicpf
*nic
, int vf
)
210 union nic_mbx mbx
= {};
212 mbx
.msg
.msg
= NIC_MBOX_MSG_NACK
;
213 nic_send_msg_to_vf(nic
, vf
, &mbx
);
216 /* Flush all in flight receive packets to memory and
217 * bring down an active RQ
219 static int nic_rcv_queue_sw_sync(struct nicpf
*nic
)
223 nic_reg_write(nic
, NIC_PF_SW_SYNC_RX
, 0x01);
224 /* Wait till sync cycle is finished */
226 if (nic_reg_read(nic
, NIC_PF_SW_SYNC_RX_DONE
) & 0x1)
230 nic_reg_write(nic
, NIC_PF_SW_SYNC_RX
, 0x00);
232 dev_err(&nic
->pdev
->dev
, "Receive queue software sync failed");
238 /* Get BGX Rx/Tx stats and respond to VF's request */
239 static void nic_get_bgx_stats(struct nicpf
*nic
, struct bgx_stats_msg
*bgx
)
242 union nic_mbx mbx
= {};
244 bgx_idx
= NIC_GET_BGX_FROM_VF_LMAC_MAP(nic
->vf_lmac_map
[bgx
->vf_id
]);
245 lmac
= NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic
->vf_lmac_map
[bgx
->vf_id
]);
247 mbx
.bgx_stats
.msg
= NIC_MBOX_MSG_BGX_STATS
;
248 mbx
.bgx_stats
.vf_id
= bgx
->vf_id
;
249 mbx
.bgx_stats
.rx
= bgx
->rx
;
250 mbx
.bgx_stats
.idx
= bgx
->idx
;
252 mbx
.bgx_stats
.stats
= bgx_get_rx_stats(nic
->node
, bgx_idx
,
255 mbx
.bgx_stats
.stats
= bgx_get_tx_stats(nic
->node
, bgx_idx
,
257 nic_send_msg_to_vf(nic
, bgx
->vf_id
, &mbx
);
260 /* Update hardware min/max frame size */
261 static int nic_update_hw_frs(struct nicpf
*nic
, int new_frs
, int vf
)
263 if ((new_frs
> NIC_HW_MAX_FRS
) || (new_frs
< NIC_HW_MIN_FRS
)) {
264 dev_err(&nic
->pdev
->dev
,
265 "Invalid MTU setting from VF%d rejected, should be between %d and %d\n",
266 vf
, NIC_HW_MIN_FRS
, NIC_HW_MAX_FRS
);
270 if (new_frs
<= nic
->pkind
.maxlen
)
273 nic
->pkind
.maxlen
= new_frs
;
274 nic_reg_write(nic
, NIC_PF_PKIND_0_15_CFG
, *(u64
*)&nic
->pkind
);
278 /* Set minimum transmit packet size */
279 static void nic_set_tx_pkt_pad(struct nicpf
*nic
, int size
)
285 /* There is a issue in HW where-in while sending GSO sized
286 * pkts as part of TSO, if pkt len falls below this size
287 * NIC will zero PAD packet and also updates IP total length.
288 * Hence set this value to lessthan min pkt size of MAC+IP+TCP
289 * headers, BGX will do the padding to transmit 64 byte pkt.
294 pci_read_config_word(nic
->pdev
, PCI_SUBSYSTEM_ID
, &sdevid
);
295 /* 81xx's RGX has only one LMAC */
296 if (sdevid
== PCI_SUBSYS_DEVID_81XX_NIC_PF
)
297 max_lmac
= ((nic
->hw
->bgx_cnt
- 1) * MAX_LMAC_PER_BGX
) + 1;
299 max_lmac
= nic
->hw
->bgx_cnt
* MAX_LMAC_PER_BGX
;
301 for (lmac
= 0; lmac
< max_lmac
; lmac
++) {
302 lmac_cfg
= nic_reg_read(nic
, NIC_PF_LMAC_0_7_CFG
| (lmac
<< 3));
303 lmac_cfg
&= ~(0xF << 2);
304 lmac_cfg
|= ((size
/ 4) << 2);
305 nic_reg_write(nic
, NIC_PF_LMAC_0_7_CFG
| (lmac
<< 3), lmac_cfg
);
309 /* Function to check number of LMACs present and set VF::LMAC mapping.
310 * Mapping will be used while initializing channels.
312 static void nic_set_lmac_vf_mapping(struct nicpf
*nic
)
314 unsigned bgx_map
= bgx_get_map(nic
->node
);
315 int bgx
, next_bgx_lmac
= 0;
316 int lmac
, lmac_cnt
= 0;
321 for (bgx
= 0; bgx
< nic
->hw
->bgx_cnt
; bgx
++) {
322 if (!(bgx_map
& (1 << bgx
)))
324 lmac_cnt
= bgx_get_lmac_count(nic
->node
, bgx
);
325 for (lmac
= 0; lmac
< lmac_cnt
; lmac
++)
326 nic
->vf_lmac_map
[next_bgx_lmac
++] =
327 NIC_SET_VF_LMAC_MAP(bgx
, lmac
);
328 nic
->num_vf_en
+= lmac_cnt
;
330 /* Program LMAC credits */
331 lmac_credit
= (1ull << 1); /* channel credit enable */
332 lmac_credit
|= (0x1ff << 2); /* Max outstanding pkt count */
333 /* 48KB BGX Tx buffer size, each unit is of size 16bytes */
334 lmac_credit
|= (((((48 * 1024) / lmac_cnt
) -
335 NIC_HW_MAX_FRS
) / 16) << 12);
336 lmac
= bgx
* MAX_LMAC_PER_BGX
;
337 for (; lmac
< lmac_cnt
+ (bgx
* MAX_LMAC_PER_BGX
); lmac
++)
339 NIC_PF_LMAC_0_7_CREDIT
+ (lmac
* 8),
342 /* On CN81XX there are only 8 VFs but max possible no of
345 if (nic
->num_vf_en
>= pci_sriov_get_totalvfs(nic
->pdev
)) {
346 nic
->num_vf_en
= pci_sriov_get_totalvfs(nic
->pdev
);
352 static void nic_free_lmacmem(struct nicpf
*nic
)
354 kfree(nic
->vf_lmac_map
);
360 static int nic_get_hw_info(struct nicpf
*nic
)
364 struct hw_info
*hw
= nic
->hw
;
366 pci_read_config_word(nic
->pdev
, PCI_SUBSYSTEM_ID
, &sdevid
);
369 case PCI_SUBSYS_DEVID_88XX_NIC_PF
:
370 hw
->bgx_cnt
= MAX_BGX_PER_CN88XX
;
371 hw
->chans_per_lmac
= 16;
372 hw
->chans_per_bgx
= 128;
375 hw
->rss_ind_tbl_size
= NIC_MAX_RSS_IDR_TBL_SIZE
;
379 hw
->tl1_per_bgx
= true;
381 case PCI_SUBSYS_DEVID_81XX_NIC_PF
:
382 hw
->bgx_cnt
= MAX_BGX_PER_CN81XX
;
383 hw
->chans_per_lmac
= 8;
384 hw
->chans_per_bgx
= 32;
385 hw
->chans_per_rgx
= 8;
386 hw
->chans_per_lbk
= 24;
389 hw
->rss_ind_tbl_size
= 32; /* Max RSSI / Max interfaces */
393 hw
->tl1_per_bgx
= false;
395 case PCI_SUBSYS_DEVID_83XX_NIC_PF
:
396 hw
->bgx_cnt
= MAX_BGX_PER_CN83XX
;
397 hw
->chans_per_lmac
= 8;
398 hw
->chans_per_bgx
= 32;
399 hw
->chans_per_lbk
= 64;
402 hw
->rss_ind_tbl_size
= 64; /* Max RSSI / Max interfaces */
406 hw
->tl1_per_bgx
= false;
409 hw
->tl4_cnt
= MAX_QUEUES_PER_QSET
* pci_sriov_get_totalvfs(nic
->pdev
);
411 /* Allocate memory for LMAC tracking elements */
412 max_lmac
= hw
->bgx_cnt
* MAX_LMAC_PER_BGX
;
413 nic
->vf_lmac_map
= kmalloc_array(max_lmac
, sizeof(u8
), GFP_KERNEL
);
414 if (!nic
->vf_lmac_map
)
416 nic
->link
= kmalloc_array(max_lmac
, sizeof(u8
), GFP_KERNEL
);
419 nic
->duplex
= kmalloc_array(max_lmac
, sizeof(u8
), GFP_KERNEL
);
422 nic
->speed
= kmalloc_array(max_lmac
, sizeof(u32
), GFP_KERNEL
);
428 nic_free_lmacmem(nic
);
435 static int nic_init_hw(struct nicpf
*nic
)
440 /* Get HW capability info */
441 err
= nic_get_hw_info(nic
);
445 /* Enable NIC HW block */
446 nic_reg_write(nic
, NIC_PF_CFG
, 0x3);
448 /* Enable backpressure */
449 nic_reg_write(nic
, NIC_PF_BP_CFG
, (1ULL << 6) | 0x03);
451 /* TNS and TNS bypass modes are present only on 88xx */
452 if (nic
->pdev
->subsystem_device
== PCI_SUBSYS_DEVID_88XX_NIC_PF
) {
453 /* Disable TNS mode on both interfaces */
454 nic_reg_write(nic
, NIC_PF_INTF_0_1_SEND_CFG
,
455 (NIC_TNS_BYPASS_MODE
<< 7) | BGX0_BLOCK
);
456 nic_reg_write(nic
, NIC_PF_INTF_0_1_SEND_CFG
| (1 << 8),
457 (NIC_TNS_BYPASS_MODE
<< 7) | BGX1_BLOCK
);
460 nic_reg_write(nic
, NIC_PF_INTF_0_1_BP_CFG
,
461 (1ULL << 63) | BGX0_BLOCK
);
462 nic_reg_write(nic
, NIC_PF_INTF_0_1_BP_CFG
+ (1 << 8),
463 (1ULL << 63) | BGX1_BLOCK
);
465 /* PKIND configuration */
466 nic
->pkind
.minlen
= 0;
467 nic
->pkind
.maxlen
= NIC_HW_MAX_FRS
+ ETH_HLEN
;
468 nic
->pkind
.lenerr_en
= 1;
469 nic
->pkind
.rx_hdr
= 0;
470 nic
->pkind
.hdr_sl
= 0;
472 for (i
= 0; i
< NIC_MAX_PKIND
; i
++)
473 nic_reg_write(nic
, NIC_PF_PKIND_0_15_CFG
| (i
<< 3),
474 *(u64
*)&nic
->pkind
);
476 nic_set_tx_pkt_pad(nic
, NIC_HW_MIN_FRS
);
479 nic_reg_write(nic
, NIC_PF_INTR_TIMER_CFG
, NICPF_CLK_PER_INT_TICK
);
481 /* Enable VLAN ethertype matching and stripping */
482 nic_reg_write(nic
, NIC_PF_RX_ETYPE_0_7
,
483 (2 << 19) | (ETYPE_ALG_VLAN_STRIP
<< 16) | ETH_P_8021Q
);
485 /* Check if HW expected value is higher (could be in future chips) */
486 cqm_cfg
= nic_reg_read(nic
, NIC_PF_CQM_CFG
);
487 if (cqm_cfg
< NICPF_CQM_MIN_DROP_LEVEL
)
488 nic_reg_write(nic
, NIC_PF_CQM_CFG
, NICPF_CQM_MIN_DROP_LEVEL
);
493 /* Channel parse index configuration */
494 static void nic_config_cpi(struct nicpf
*nic
, struct cpi_cfg_msg
*cfg
)
496 struct hw_info
*hw
= nic
->hw
;
497 u32 vnic
, bgx
, lmac
, chan
;
498 u32 padd
, cpi_count
= 0;
499 u64 cpi_base
, cpi
, rssi_base
, rssi
;
503 bgx
= NIC_GET_BGX_FROM_VF_LMAC_MAP(nic
->vf_lmac_map
[vnic
]);
504 lmac
= NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic
->vf_lmac_map
[vnic
]);
506 chan
= (lmac
* hw
->chans_per_lmac
) + (bgx
* hw
->chans_per_bgx
);
507 cpi_base
= vnic
* NIC_MAX_CPI_PER_LMAC
;
508 rssi_base
= vnic
* hw
->rss_ind_tbl_size
;
510 /* Rx channel configuration */
511 nic_reg_write(nic
, NIC_PF_CHAN_0_255_RX_BP_CFG
| (chan
<< 3),
512 (1ull << 63) | (vnic
<< 0));
513 nic_reg_write(nic
, NIC_PF_CHAN_0_255_RX_CFG
| (chan
<< 3),
514 ((u64
)cfg
->cpi_alg
<< 62) | (cpi_base
<< 48));
516 if (cfg
->cpi_alg
== CPI_ALG_NONE
)
518 else if (cfg
->cpi_alg
== CPI_ALG_VLAN
) /* 3 bits of PCP */
520 else if (cfg
->cpi_alg
== CPI_ALG_VLAN16
) /* 3 bits PCP + DEI */
522 else if (cfg
->cpi_alg
== CPI_ALG_DIFF
) /* 6bits DSCP */
523 cpi_count
= NIC_MAX_CPI_PER_LMAC
;
525 /* RSS Qset, Qidx mapping */
528 for (; rssi
< (rssi_base
+ cfg
->rq_cnt
); rssi
++) {
529 nic_reg_write(nic
, NIC_PF_RSSI_0_4097_RQ
| (rssi
<< 3),
530 (qset
<< 3) | rq_idx
);
536 for (; cpi
< (cpi_base
+ cpi_count
); cpi
++) {
537 /* Determine port to channel adder */
538 if (cfg
->cpi_alg
!= CPI_ALG_DIFF
)
539 padd
= cpi
% cpi_count
;
541 padd
= cpi
% 8; /* 3 bits CS out of 6bits DSCP */
543 /* Leave RSS_SIZE as '0' to disable RSS */
544 if (pass1_silicon(nic
->pdev
)) {
545 nic_reg_write(nic
, NIC_PF_CPI_0_2047_CFG
| (cpi
<< 3),
546 (vnic
<< 24) | (padd
<< 16) |
549 /* Set MPI_ALG to '0' to disable MCAM parsing */
550 nic_reg_write(nic
, NIC_PF_CPI_0_2047_CFG
| (cpi
<< 3),
552 /* MPI index is same as CPI if MPI_ALG is not enabled */
553 nic_reg_write(nic
, NIC_PF_MPI_0_2047_CFG
| (cpi
<< 3),
554 (vnic
<< 24) | (rssi_base
+ rssi
));
557 if ((rssi
+ 1) >= cfg
->rq_cnt
)
560 if (cfg
->cpi_alg
== CPI_ALG_VLAN
)
562 else if (cfg
->cpi_alg
== CPI_ALG_VLAN16
)
563 rssi
= ((cpi
- cpi_base
) & 0xe) >> 1;
564 else if (cfg
->cpi_alg
== CPI_ALG_DIFF
)
565 rssi
= ((cpi
- cpi_base
) & 0x38) >> 3;
567 nic
->cpi_base
[cfg
->vf_id
] = cpi_base
;
568 nic
->rssi_base
[cfg
->vf_id
] = rssi_base
;
571 /* Responsds to VF with its RSS indirection table size */
572 static void nic_send_rss_size(struct nicpf
*nic
, int vf
)
574 union nic_mbx mbx
= {};
579 mbx
.rss_size
.msg
= NIC_MBOX_MSG_RSS_SIZE
;
580 mbx
.rss_size
.ind_tbl_size
= nic
->hw
->rss_ind_tbl_size
;
581 nic_send_msg_to_vf(nic
, vf
, &mbx
);
584 /* Receive side scaling configuration
587 * - indir table i.e hash::RQ mapping
588 * - no of hash bits to consider
590 static void nic_config_rss(struct nicpf
*nic
, struct rss_cfg_msg
*cfg
)
593 u64 cpi_cfg
, cpi_base
, rssi_base
, rssi
;
596 rssi_base
= nic
->rssi_base
[cfg
->vf_id
] + cfg
->tbl_offset
;
601 for (; rssi
< (rssi_base
+ cfg
->tbl_len
); rssi
++) {
602 u8 svf
= cfg
->ind_tbl
[idx
] >> 3;
605 qset
= nic
->vf_sqs
[cfg
->vf_id
][svf
- 1];
608 nic_reg_write(nic
, NIC_PF_RSSI_0_4097_RQ
| (rssi
<< 3),
609 (qset
<< 3) | (cfg
->ind_tbl
[idx
] & 0x7));
613 cpi_base
= nic
->cpi_base
[cfg
->vf_id
];
614 if (pass1_silicon(nic
->pdev
))
615 idx_addr
= NIC_PF_CPI_0_2047_CFG
;
617 idx_addr
= NIC_PF_MPI_0_2047_CFG
;
618 cpi_cfg
= nic_reg_read(nic
, idx_addr
| (cpi_base
<< 3));
619 cpi_cfg
&= ~(0xFULL
<< 20);
620 cpi_cfg
|= (cfg
->hash_bits
<< 20);
621 nic_reg_write(nic
, idx_addr
| (cpi_base
<< 3), cpi_cfg
);
624 /* 4 level transmit side scheduler configutation
625 * for TNS bypass mode
627 * Sample configuration for SQ0 on 88xx
628 * VNIC0-SQ0 -> TL4(0) -> TL3[0] -> TL2[0] -> TL1[0] -> BGX0
629 * VNIC1-SQ0 -> TL4(8) -> TL3[2] -> TL2[0] -> TL1[0] -> BGX0
630 * VNIC2-SQ0 -> TL4(16) -> TL3[4] -> TL2[1] -> TL1[0] -> BGX0
631 * VNIC3-SQ0 -> TL4(24) -> TL3[6] -> TL2[1] -> TL1[0] -> BGX0
632 * VNIC4-SQ0 -> TL4(512) -> TL3[128] -> TL2[32] -> TL1[1] -> BGX1
633 * VNIC5-SQ0 -> TL4(520) -> TL3[130] -> TL2[32] -> TL1[1] -> BGX1
634 * VNIC6-SQ0 -> TL4(528) -> TL3[132] -> TL2[33] -> TL1[1] -> BGX1
635 * VNIC7-SQ0 -> TL4(536) -> TL3[134] -> TL2[33] -> TL1[1] -> BGX1
637 static void nic_tx_channel_cfg(struct nicpf
*nic
, u8 vnic
,
638 struct sq_cfg_msg
*sq
)
640 struct hw_info
*hw
= nic
->hw
;
644 u8 sq_idx
= sq
->sq_num
;
649 pqs_vnic
= nic
->pqs_vf
[vnic
];
653 bgx
= NIC_GET_BGX_FROM_VF_LMAC_MAP(nic
->vf_lmac_map
[pqs_vnic
]);
654 lmac
= NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic
->vf_lmac_map
[pqs_vnic
]);
656 /* 24 bytes for FCS, IPG and preamble */
657 rr_quantum
= ((NIC_HW_MAX_FRS
+ 24) / 4);
659 /* For 88xx 0-511 TL4 transmits via BGX0 and
660 * 512-1023 TL4s transmit via BGX1.
662 if (hw
->tl1_per_bgx
) {
663 tl4
= bgx
* (hw
->tl4_cnt
/ hw
->bgx_cnt
);
665 tl4
+= (lmac
* MAX_QUEUES_PER_QSET
);
667 for (svf
= 0; svf
< MAX_SQS_PER_VF
; svf
++) {
668 if (nic
->vf_sqs
[pqs_vnic
][svf
] == vnic
)
671 tl4
+= (MAX_LMAC_PER_BGX
* MAX_QUEUES_PER_QSET
);
672 tl4
+= (lmac
* MAX_QUEUES_PER_QSET
* MAX_SQS_PER_VF
);
673 tl4
+= (svf
* MAX_QUEUES_PER_QSET
);
676 tl4
= (vnic
* MAX_QUEUES_PER_QSET
);
680 tl3
= tl4
/ (hw
->tl4_cnt
/ hw
->tl3_cnt
);
681 nic_reg_write(nic
, NIC_PF_QSET_0_127_SQ_0_7_CFG2
|
682 ((u64
)vnic
<< NIC_QS_ID_SHIFT
) |
683 ((u32
)sq_idx
<< NIC_Q_NUM_SHIFT
), tl4
);
684 nic_reg_write(nic
, NIC_PF_TL4_0_1023_CFG
| (tl4
<< 3),
685 ((u64
)vnic
<< 27) | ((u32
)sq_idx
<< 24) | rr_quantum
);
687 nic_reg_write(nic
, NIC_PF_TL3_0_255_CFG
| (tl3
<< 3), rr_quantum
);
689 /* On 88xx 0-127 channels are for BGX0 and
690 * 127-255 channels for BGX1.
692 * On 81xx/83xx TL3_CHAN reg should be configured with channel
693 * within LMAC i.e 0-7 and not the actual channel number like on 88xx
695 chan
= (lmac
* hw
->chans_per_lmac
) + (bgx
* hw
->chans_per_bgx
);
697 nic_reg_write(nic
, NIC_PF_TL3_0_255_CHAN
| (tl3
<< 3), chan
);
699 nic_reg_write(nic
, NIC_PF_TL3_0_255_CHAN
| (tl3
<< 3), 0);
701 /* Enable backpressure on the channel */
702 nic_reg_write(nic
, NIC_PF_CHAN_0_255_TX_CFG
| (chan
<< 3), 1);
705 nic_reg_write(nic
, NIC_PF_TL3A_0_63_CFG
| (tl2
<< 3), tl2
);
706 nic_reg_write(nic
, NIC_PF_TL2_0_63_CFG
| (tl2
<< 3), rr_quantum
);
707 /* No priorities as of now */
708 nic_reg_write(nic
, NIC_PF_TL2_0_63_PRI
| (tl2
<< 3), 0x00);
710 /* Unlike 88xx where TL2s 0-31 transmits to TL1 '0' and rest to TL1 '1'
711 * on 81xx/83xx TL2 needs to be configured to transmit to one of the
714 * This register doesn't exist on 88xx.
716 if (!hw
->tl1_per_bgx
)
717 nic_reg_write(nic
, NIC_PF_TL2_LMAC
| (tl2
<< 3),
718 lmac
+ (bgx
* MAX_LMAC_PER_BGX
));
721 /* Send primary nicvf pointer to secondary QS's VF */
722 static void nic_send_pnicvf(struct nicpf
*nic
, int sqs
)
724 union nic_mbx mbx
= {};
726 mbx
.nicvf
.msg
= NIC_MBOX_MSG_PNICVF_PTR
;
727 mbx
.nicvf
.nicvf
= nic
->nicvf
[nic
->pqs_vf
[sqs
]];
728 nic_send_msg_to_vf(nic
, sqs
, &mbx
);
731 /* Send SQS's nicvf pointer to primary QS's VF */
732 static void nic_send_snicvf(struct nicpf
*nic
, struct nicvf_ptr
*nicvf
)
734 union nic_mbx mbx
= {};
735 int sqs_id
= nic
->vf_sqs
[nicvf
->vf_id
][nicvf
->sqs_id
];
737 mbx
.nicvf
.msg
= NIC_MBOX_MSG_SNICVF_PTR
;
738 mbx
.nicvf
.sqs_id
= nicvf
->sqs_id
;
739 mbx
.nicvf
.nicvf
= nic
->nicvf
[sqs_id
];
740 nic_send_msg_to_vf(nic
, nicvf
->vf_id
, &mbx
);
743 /* Find next available Qset that can be assigned as a
744 * secondary Qset to a VF.
746 static int nic_nxt_avail_sqs(struct nicpf
*nic
)
750 for (sqs
= 0; sqs
< nic
->num_sqs_en
; sqs
++) {
751 if (!nic
->sqs_used
[sqs
])
752 nic
->sqs_used
[sqs
] = true;
755 return sqs
+ nic
->num_vf_en
;
760 /* Allocate additional Qsets for requested VF */
761 static void nic_alloc_sqs(struct nicpf
*nic
, struct sqs_alloc
*sqs
)
763 union nic_mbx mbx
= {};
764 int idx
, alloc_qs
= 0;
767 if (!nic
->num_sqs_en
)
770 for (idx
= 0; idx
< sqs
->qs_count
; idx
++) {
771 sqs_id
= nic_nxt_avail_sqs(nic
);
774 nic
->vf_sqs
[sqs
->vf_id
][idx
] = sqs_id
;
775 nic
->pqs_vf
[sqs_id
] = sqs
->vf_id
;
780 mbx
.sqs_alloc
.msg
= NIC_MBOX_MSG_ALLOC_SQS
;
781 mbx
.sqs_alloc
.vf_id
= sqs
->vf_id
;
782 mbx
.sqs_alloc
.qs_count
= alloc_qs
;
783 nic_send_msg_to_vf(nic
, sqs
->vf_id
, &mbx
);
786 static int nic_config_loopback(struct nicpf
*nic
, struct set_loopback
*lbk
)
788 int bgx_idx
, lmac_idx
;
790 if (lbk
->vf_id
>= nic
->num_vf_en
)
793 bgx_idx
= NIC_GET_BGX_FROM_VF_LMAC_MAP(nic
->vf_lmac_map
[lbk
->vf_id
]);
794 lmac_idx
= NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic
->vf_lmac_map
[lbk
->vf_id
]);
796 bgx_lmac_internal_loopback(nic
->node
, bgx_idx
, lmac_idx
, lbk
->enable
);
801 /* Reset statistics counters */
802 static int nic_reset_stat_counters(struct nicpf
*nic
,
803 int vf
, struct reset_stat_cfg
*cfg
)
808 for (i
= 0; i
< RX_STATS_ENUM_LAST
; i
++) {
809 if (cfg
->rx_stat_mask
& BIT(i
)) {
810 reg_addr
= NIC_PF_VNIC_0_127_RX_STAT_0_13
|
811 (vf
<< NIC_QS_ID_SHIFT
) |
813 nic_reg_write(nic
, reg_addr
, 0);
817 for (i
= 0; i
< TX_STATS_ENUM_LAST
; i
++) {
818 if (cfg
->tx_stat_mask
& BIT(i
)) {
819 reg_addr
= NIC_PF_VNIC_0_127_TX_STAT_0_4
|
820 (vf
<< NIC_QS_ID_SHIFT
) |
822 nic_reg_write(nic
, reg_addr
, 0);
826 for (i
= 0; i
<= 15; i
++) {
828 stat
= i
& 1 ? 1 : 0;
829 reg_addr
= (vf
<< NIC_QS_ID_SHIFT
) |
830 (qnum
<< NIC_Q_NUM_SHIFT
) | (stat
<< 3);
831 if (cfg
->rq_stat_mask
& BIT(i
)) {
832 reg_addr
|= NIC_PF_QSET_0_127_RQ_0_7_STAT_0_1
;
833 nic_reg_write(nic
, reg_addr
, 0);
835 if (cfg
->sq_stat_mask
& BIT(i
)) {
836 reg_addr
|= NIC_PF_QSET_0_127_SQ_0_7_STAT_0_1
;
837 nic_reg_write(nic
, reg_addr
, 0);
843 static void nic_enable_tunnel_parsing(struct nicpf
*nic
, int vf
)
845 u64 prot_def
= (IPV6_PROT
<< 32) | (IPV4_PROT
<< 16) | ET_PROT
;
846 u64 vxlan_prot_def
= (IPV6_PROT_DEF
<< 32) |
847 (IPV4_PROT_DEF
) << 16 | ET_PROT_DEF
;
849 /* Configure tunnel parsing parameters */
850 nic_reg_write(nic
, NIC_PF_RX_GENEVE_DEF
,
851 (1ULL << 63 | UDP_GENEVE_PORT_NUM
));
852 nic_reg_write(nic
, NIC_PF_RX_GENEVE_PROT_DEF
,
853 ((7ULL << 61) | prot_def
));
854 nic_reg_write(nic
, NIC_PF_RX_NVGRE_PROT_DEF
,
855 ((7ULL << 61) | prot_def
));
856 nic_reg_write(nic
, NIC_PF_RX_VXLAN_DEF_0_1
,
857 ((1ULL << 63) | UDP_VXLAN_PORT_NUM
));
858 nic_reg_write(nic
, NIC_PF_RX_VXLAN_PROT_DEF
,
859 ((0xfULL
<< 60) | vxlan_prot_def
));
862 static void nic_enable_vf(struct nicpf
*nic
, int vf
, bool enable
)
866 nic
->vf_enabled
[vf
] = enable
;
868 if (vf
>= nic
->num_vf_en
)
871 bgx
= NIC_GET_BGX_FROM_VF_LMAC_MAP(nic
->vf_lmac_map
[vf
]);
872 lmac
= NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic
->vf_lmac_map
[vf
]);
874 bgx_lmac_rx_tx_enable(nic
->node
, bgx
, lmac
, enable
);
877 /* Interrupt handler to handle mailbox messages from VFs */
878 static void nic_handle_mbx_intr(struct nicpf
*nic
, int vf
)
880 union nic_mbx mbx
= {};
889 nic
->mbx_lock
[vf
] = true;
891 mbx_addr
= nic_get_mbx_addr(vf
);
892 mbx_data
= (u64
*)&mbx
;
894 for (i
= 0; i
< NIC_PF_VF_MAILBOX_SIZE
; i
++) {
895 *mbx_data
= nic_reg_read(nic
, mbx_addr
);
897 mbx_addr
+= sizeof(u64
);
900 dev_dbg(&nic
->pdev
->dev
, "%s: Mailbox msg 0x%02x from VF%d\n",
901 __func__
, mbx
.msg
.msg
, vf
);
902 switch (mbx
.msg
.msg
) {
903 case NIC_MBOX_MSG_READY
:
904 nic_mbx_send_ready(nic
, vf
);
905 if (vf
< nic
->num_vf_en
) {
911 case NIC_MBOX_MSG_QS_CFG
:
912 reg_addr
= NIC_PF_QSET_0_127_CFG
|
913 (mbx
.qs
.num
<< NIC_QS_ID_SHIFT
);
915 /* Check if its a secondary Qset */
916 if (vf
>= nic
->num_vf_en
) {
917 cfg
= cfg
& (~0x7FULL
);
918 /* Assign this Qset to primary Qset's VF */
919 cfg
|= nic
->pqs_vf
[vf
];
921 nic_reg_write(nic
, reg_addr
, cfg
);
923 case NIC_MBOX_MSG_RQ_CFG
:
924 reg_addr
= NIC_PF_QSET_0_127_RQ_0_7_CFG
|
925 (mbx
.rq
.qs_num
<< NIC_QS_ID_SHIFT
) |
926 (mbx
.rq
.rq_num
<< NIC_Q_NUM_SHIFT
);
927 nic_reg_write(nic
, reg_addr
, mbx
.rq
.cfg
);
928 /* Enable CQE_RX2_S extension in CQE_RX descriptor.
929 * This gets appended by default on 81xx/83xx chips,
930 * for consistency enabling the same on 88xx pass2
931 * where this is introduced.
933 if (pass2_silicon(nic
->pdev
))
934 nic_reg_write(nic
, NIC_PF_RX_CFG
, 0x01);
935 if (!pass1_silicon(nic
->pdev
))
936 nic_enable_tunnel_parsing(nic
, vf
);
938 case NIC_MBOX_MSG_RQ_BP_CFG
:
939 reg_addr
= NIC_PF_QSET_0_127_RQ_0_7_BP_CFG
|
940 (mbx
.rq
.qs_num
<< NIC_QS_ID_SHIFT
) |
941 (mbx
.rq
.rq_num
<< NIC_Q_NUM_SHIFT
);
942 nic_reg_write(nic
, reg_addr
, mbx
.rq
.cfg
);
944 case NIC_MBOX_MSG_RQ_SW_SYNC
:
945 ret
= nic_rcv_queue_sw_sync(nic
);
947 case NIC_MBOX_MSG_RQ_DROP_CFG
:
948 reg_addr
= NIC_PF_QSET_0_127_RQ_0_7_DROP_CFG
|
949 (mbx
.rq
.qs_num
<< NIC_QS_ID_SHIFT
) |
950 (mbx
.rq
.rq_num
<< NIC_Q_NUM_SHIFT
);
951 nic_reg_write(nic
, reg_addr
, mbx
.rq
.cfg
);
953 case NIC_MBOX_MSG_SQ_CFG
:
954 reg_addr
= NIC_PF_QSET_0_127_SQ_0_7_CFG
|
955 (mbx
.sq
.qs_num
<< NIC_QS_ID_SHIFT
) |
956 (mbx
.sq
.sq_num
<< NIC_Q_NUM_SHIFT
);
957 nic_reg_write(nic
, reg_addr
, mbx
.sq
.cfg
);
958 nic_tx_channel_cfg(nic
, mbx
.qs
.num
, &mbx
.sq
);
960 case NIC_MBOX_MSG_SET_MAC
:
961 if (vf
>= nic
->num_vf_en
) {
965 lmac
= mbx
.mac
.vf_id
;
966 bgx
= NIC_GET_BGX_FROM_VF_LMAC_MAP(nic
->vf_lmac_map
[lmac
]);
967 lmac
= NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic
->vf_lmac_map
[lmac
]);
968 bgx_set_lmac_mac(nic
->node
, bgx
, lmac
, mbx
.mac
.mac_addr
);
970 case NIC_MBOX_MSG_SET_MAX_FRS
:
971 ret
= nic_update_hw_frs(nic
, mbx
.frs
.max_frs
,
974 case NIC_MBOX_MSG_CPI_CFG
:
975 nic_config_cpi(nic
, &mbx
.cpi_cfg
);
977 case NIC_MBOX_MSG_RSS_SIZE
:
978 nic_send_rss_size(nic
, vf
);
980 case NIC_MBOX_MSG_RSS_CFG
:
981 case NIC_MBOX_MSG_RSS_CFG_CONT
:
982 nic_config_rss(nic
, &mbx
.rss_cfg
);
984 case NIC_MBOX_MSG_CFG_DONE
:
985 /* Last message of VF config msg sequence */
986 nic_enable_vf(nic
, vf
, true);
988 case NIC_MBOX_MSG_SHUTDOWN
:
989 /* First msg in VF teardown sequence */
990 if (vf
>= nic
->num_vf_en
)
991 nic
->sqs_used
[vf
- nic
->num_vf_en
] = false;
993 nic_enable_vf(nic
, vf
, false);
995 case NIC_MBOX_MSG_ALLOC_SQS
:
996 nic_alloc_sqs(nic
, &mbx
.sqs_alloc
);
998 case NIC_MBOX_MSG_NICVF_PTR
:
999 nic
->nicvf
[vf
] = mbx
.nicvf
.nicvf
;
1001 case NIC_MBOX_MSG_PNICVF_PTR
:
1002 nic_send_pnicvf(nic
, vf
);
1004 case NIC_MBOX_MSG_SNICVF_PTR
:
1005 nic_send_snicvf(nic
, &mbx
.nicvf
);
1007 case NIC_MBOX_MSG_BGX_STATS
:
1008 nic_get_bgx_stats(nic
, &mbx
.bgx_stats
);
1010 case NIC_MBOX_MSG_LOOPBACK
:
1011 ret
= nic_config_loopback(nic
, &mbx
.lbk
);
1013 case NIC_MBOX_MSG_RESET_STAT_COUNTER
:
1014 ret
= nic_reset_stat_counters(nic
, vf
, &mbx
.reset_stat
);
1017 dev_err(&nic
->pdev
->dev
,
1018 "Invalid msg from VF%d, msg 0x%x\n", vf
, mbx
.msg
.msg
);
1023 nic_mbx_send_ack(nic
, vf
);
1024 } else if (mbx
.msg
.msg
!= NIC_MBOX_MSG_READY
) {
1025 dev_err(&nic
->pdev
->dev
, "NACK for MBOX 0x%02x from VF %d\n",
1027 nic_mbx_send_nack(nic
, vf
);
1030 nic
->mbx_lock
[vf
] = false;
1033 static irqreturn_t
nic_mbx_intr_handler(int irq
, void *nic_irq
)
1035 struct nicpf
*nic
= (struct nicpf
*)nic_irq
;
1038 u8 vf
, vf_per_mbx_reg
= 64;
1040 if (irq
== nic
->msix_entries
[NIC_PF_INTR_ID_MBOX0
].vector
)
1045 intr
= nic_reg_read(nic
, NIC_PF_MAILBOX_INT
+ (mbx
<< 3));
1046 dev_dbg(&nic
->pdev
->dev
, "PF interrupt Mbox%d 0x%llx\n", mbx
, intr
);
1047 for (vf
= 0; vf
< vf_per_mbx_reg
; vf
++) {
1048 if (intr
& (1ULL << vf
)) {
1049 dev_dbg(&nic
->pdev
->dev
, "Intr from VF %d\n",
1050 vf
+ (mbx
* vf_per_mbx_reg
));
1052 nic_handle_mbx_intr(nic
, vf
+ (mbx
* vf_per_mbx_reg
));
1053 nic_clear_mbx_intr(nic
, vf
, mbx
);
1059 static int nic_enable_msix(struct nicpf
*nic
)
1063 nic
->num_vec
= pci_msix_vec_count(nic
->pdev
);
1065 nic
->msix_entries
= kmalloc_array(nic
->num_vec
,
1066 sizeof(struct msix_entry
),
1068 if (!nic
->msix_entries
)
1071 for (i
= 0; i
< nic
->num_vec
; i
++)
1072 nic
->msix_entries
[i
].entry
= i
;
1074 ret
= pci_enable_msix(nic
->pdev
, nic
->msix_entries
, nic
->num_vec
);
1076 dev_err(&nic
->pdev
->dev
,
1077 "Request for #%d msix vectors failed, returned %d\n",
1079 kfree(nic
->msix_entries
);
1083 nic
->msix_enabled
= 1;
1087 static void nic_disable_msix(struct nicpf
*nic
)
1089 if (nic
->msix_enabled
) {
1090 pci_disable_msix(nic
->pdev
);
1091 kfree(nic
->msix_entries
);
1092 nic
->msix_enabled
= 0;
1097 static void nic_free_all_interrupts(struct nicpf
*nic
)
1101 for (irq
= 0; irq
< nic
->num_vec
; irq
++) {
1102 if (nic
->irq_allocated
[irq
])
1103 free_irq(nic
->msix_entries
[irq
].vector
, nic
);
1104 nic
->irq_allocated
[irq
] = false;
1108 static int nic_register_interrupts(struct nicpf
*nic
)
1113 ret
= nic_enable_msix(nic
);
1117 /* Register mailbox interrupt handler */
1118 for (i
= NIC_PF_INTR_ID_MBOX0
; i
< nic
->num_vec
; i
++) {
1119 sprintf(nic
->irq_name
[i
],
1120 "NICPF Mbox%d", (i
- NIC_PF_INTR_ID_MBOX0
));
1122 ret
= request_irq(nic
->msix_entries
[i
].vector
,
1123 nic_mbx_intr_handler
, 0,
1124 nic
->irq_name
[i
], nic
);
1128 nic
->irq_allocated
[i
] = true;
1131 /* Enable mailbox interrupt */
1132 nic_enable_mbx_intr(nic
);
1136 dev_err(&nic
->pdev
->dev
, "Request irq failed\n");
1137 nic_free_all_interrupts(nic
);
1138 nic_disable_msix(nic
);
1142 static void nic_unregister_interrupts(struct nicpf
*nic
)
1144 nic_free_all_interrupts(nic
);
1145 nic_disable_msix(nic
);
1148 static int nic_num_sqs_en(struct nicpf
*nic
, int vf_en
)
1150 int pos
, sqs_per_vf
= MAX_SQS_PER_VF_SINGLE_NODE
;
1153 /* Secondary Qsets are needed only if CPU count is
1154 * morethan MAX_QUEUES_PER_QSET.
1156 if (num_online_cpus() <= MAX_QUEUES_PER_QSET
)
1159 /* Check if its a multi-node environment */
1160 if (nr_node_ids
> 1)
1161 sqs_per_vf
= MAX_SQS_PER_VF
;
1163 pos
= pci_find_ext_capability(nic
->pdev
, PCI_EXT_CAP_ID_SRIOV
);
1164 pci_read_config_word(nic
->pdev
, (pos
+ PCI_SRIOV_TOTAL_VF
), &total_vf
);
1165 return min(total_vf
- vf_en
, vf_en
* sqs_per_vf
);
1168 static int nic_sriov_init(struct pci_dev
*pdev
, struct nicpf
*nic
)
1175 pos
= pci_find_ext_capability(pdev
, PCI_EXT_CAP_ID_SRIOV
);
1177 dev_err(&pdev
->dev
, "SRIOV capability is not found in PCIe config space\n");
1181 pci_read_config_word(pdev
, (pos
+ PCI_SRIOV_TOTAL_VF
), &total_vf_cnt
);
1182 if (total_vf_cnt
< nic
->num_vf_en
)
1183 nic
->num_vf_en
= total_vf_cnt
;
1188 vf_en
= nic
->num_vf_en
;
1189 nic
->num_sqs_en
= nic_num_sqs_en(nic
, nic
->num_vf_en
);
1190 vf_en
+= nic
->num_sqs_en
;
1192 err
= pci_enable_sriov(pdev
, vf_en
);
1194 dev_err(&pdev
->dev
, "SRIOV enable failed, num VF is %d\n",
1200 dev_info(&pdev
->dev
, "SRIOV enabled, number of VF available %d\n",
1203 nic
->flags
|= NIC_SRIOV_ENABLED
;
1207 /* Poll for BGX LMAC link status and update corresponding VF
1208 * if there is a change, valid only if internal L2 switch
1209 * is not present otherwise VF link is always treated as up
1211 static void nic_poll_for_link(struct work_struct
*work
)
1213 union nic_mbx mbx
= {};
1215 struct bgx_link_status link
;
1218 nic
= container_of(work
, struct nicpf
, dwork
.work
);
1220 mbx
.link_status
.msg
= NIC_MBOX_MSG_BGX_LINK_CHANGE
;
1222 for (vf
= 0; vf
< nic
->num_vf_en
; vf
++) {
1223 /* Poll only if VF is UP */
1224 if (!nic
->vf_enabled
[vf
])
1227 /* Get BGX, LMAC indices for the VF */
1228 bgx
= NIC_GET_BGX_FROM_VF_LMAC_MAP(nic
->vf_lmac_map
[vf
]);
1229 lmac
= NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic
->vf_lmac_map
[vf
]);
1230 /* Get interface link status */
1231 bgx_get_lmac_link_state(nic
->node
, bgx
, lmac
, &link
);
1233 /* Inform VF only if link status changed */
1234 if (nic
->link
[vf
] == link
.link_up
)
1237 if (!nic
->mbx_lock
[vf
]) {
1238 nic
->link
[vf
] = link
.link_up
;
1239 nic
->duplex
[vf
] = link
.duplex
;
1240 nic
->speed
[vf
] = link
.speed
;
1242 /* Send a mbox message to VF with current link status */
1243 mbx
.link_status
.link_up
= link
.link_up
;
1244 mbx
.link_status
.duplex
= link
.duplex
;
1245 mbx
.link_status
.speed
= link
.speed
;
1246 nic_send_msg_to_vf(nic
, vf
, &mbx
);
1249 queue_delayed_work(nic
->check_link
, &nic
->dwork
, HZ
* 2);
1252 static int nic_probe(struct pci_dev
*pdev
, const struct pci_device_id
*ent
)
1254 struct device
*dev
= &pdev
->dev
;
1258 BUILD_BUG_ON(sizeof(union nic_mbx
) > 16);
1260 nic
= devm_kzalloc(dev
, sizeof(*nic
), GFP_KERNEL
);
1264 nic
->hw
= devm_kzalloc(dev
, sizeof(struct hw_info
), GFP_KERNEL
);
1266 devm_kfree(dev
, nic
);
1270 pci_set_drvdata(pdev
, nic
);
1274 err
= pci_enable_device(pdev
);
1276 dev_err(dev
, "Failed to enable PCI device\n");
1277 pci_set_drvdata(pdev
, NULL
);
1281 err
= pci_request_regions(pdev
, DRV_NAME
);
1283 dev_err(dev
, "PCI request regions failed 0x%x\n", err
);
1284 goto err_disable_device
;
1287 err
= pci_set_dma_mask(pdev
, DMA_BIT_MASK(48));
1289 dev_err(dev
, "Unable to get usable DMA configuration\n");
1290 goto err_release_regions
;
1293 err
= pci_set_consistent_dma_mask(pdev
, DMA_BIT_MASK(48));
1295 dev_err(dev
, "Unable to get 48-bit DMA for consistent allocations\n");
1296 goto err_release_regions
;
1299 /* MAP PF's configuration registers */
1300 nic
->reg_base
= pcim_iomap(pdev
, PCI_CFG_REG_BAR_NUM
, 0);
1301 if (!nic
->reg_base
) {
1302 dev_err(dev
, "Cannot map config register space, aborting\n");
1304 goto err_release_regions
;
1307 nic
->node
= nic_get_node_id(pdev
);
1309 /* Initialize hardware */
1310 err
= nic_init_hw(nic
);
1312 goto err_release_regions
;
1314 nic_set_lmac_vf_mapping(nic
);
1316 /* Register interrupts */
1317 err
= nic_register_interrupts(nic
);
1319 goto err_release_regions
;
1321 /* Configure SRIOV */
1322 err
= nic_sriov_init(pdev
, nic
);
1324 goto err_unregister_interrupts
;
1326 /* Register a physical link status poll fn() */
1327 nic
->check_link
= alloc_workqueue("check_link_status",
1328 WQ_UNBOUND
| WQ_MEM_RECLAIM
, 1);
1329 if (!nic
->check_link
) {
1331 goto err_disable_sriov
;
1334 INIT_DELAYED_WORK(&nic
->dwork
, nic_poll_for_link
);
1335 queue_delayed_work(nic
->check_link
, &nic
->dwork
, 0);
1340 if (nic
->flags
& NIC_SRIOV_ENABLED
)
1341 pci_disable_sriov(pdev
);
1342 err_unregister_interrupts
:
1343 nic_unregister_interrupts(nic
);
1344 err_release_regions
:
1345 pci_release_regions(pdev
);
1347 nic_free_lmacmem(nic
);
1348 devm_kfree(dev
, nic
->hw
);
1349 devm_kfree(dev
, nic
);
1350 pci_disable_device(pdev
);
1351 pci_set_drvdata(pdev
, NULL
);
1355 static void nic_remove(struct pci_dev
*pdev
)
1357 struct nicpf
*nic
= pci_get_drvdata(pdev
);
1359 if (nic
->flags
& NIC_SRIOV_ENABLED
)
1360 pci_disable_sriov(pdev
);
1362 if (nic
->check_link
) {
1363 /* Destroy work Queue */
1364 cancel_delayed_work_sync(&nic
->dwork
);
1365 destroy_workqueue(nic
->check_link
);
1368 nic_unregister_interrupts(nic
);
1369 pci_release_regions(pdev
);
1371 nic_free_lmacmem(nic
);
1372 devm_kfree(&pdev
->dev
, nic
->hw
);
1373 devm_kfree(&pdev
->dev
, nic
);
1375 pci_disable_device(pdev
);
1376 pci_set_drvdata(pdev
, NULL
);
1379 static struct pci_driver nic_driver
= {
1381 .id_table
= nic_id_table
,
1383 .remove
= nic_remove
,
1386 static int __init
nic_init_module(void)
1388 pr_info("%s, ver %s\n", DRV_NAME
, DRV_VERSION
);
1390 return pci_register_driver(&nic_driver
);
1393 static void __exit
nic_cleanup_module(void)
1395 pci_unregister_driver(&nic_driver
);
1398 module_init(nic_init_module
);
1399 module_exit(nic_cleanup_module
);