Commit | Line | Data |
---|---|---|
4863dea3 SG |
1 | /* |
2 | * Copyright (C) 2015 Cavium, Inc. | |
3 | * | |
4 | * This program is free software; you can redistribute it and/or modify it | |
5 | * under the terms of version 2 of the GNU General Public License | |
6 | * as published by the Free Software Foundation. | |
7 | */ | |
8 | ||
9 | #include <linux/module.h> | |
10 | #include <linux/interrupt.h> | |
11 | #include <linux/pci.h> | |
12 | #include <linux/etherdevice.h> | |
13 | #include <linux/of.h> | |
14 | ||
15 | #include "nic_reg.h" | |
16 | #include "nic.h" | |
17 | #include "q_struct.h" | |
18 | #include "thunder_bgx.h" | |
19 | ||
20 | #define DRV_NAME "thunder-nic" | |
21 | #define DRV_VERSION "1.0" | |
22 | ||
a5c3d498 SG |
23 | struct hw_info { |
24 | u8 bgx_cnt; | |
25 | u8 chans_per_lmac; | |
26 | u8 chans_per_bgx; /* Rx/Tx chans */ | |
0025d93e SG |
27 | u8 chans_per_rgx; |
28 | u8 chans_per_lbk; | |
a5c3d498 SG |
29 | u16 cpi_cnt; |
30 | u16 rssi_cnt; | |
31 | u16 rss_ind_tbl_size; | |
32 | u16 tl4_cnt; | |
33 | u16 tl3_cnt; | |
34 | u8 tl2_cnt; | |
35 | u8 tl1_cnt; | |
36 | bool tl1_per_bgx; /* TL1 per BGX or per LMAC */ | |
37 | }; | |
38 | ||
4863dea3 SG |
39 | struct nicpf { |
40 | struct pci_dev *pdev; | |
a5c3d498 | 41 | struct hw_info *hw; |
4863dea3 SG |
42 | u8 node; |
43 | unsigned int flags; | |
44 | u8 num_vf_en; /* No of VF enabled */ | |
45 | bool vf_enabled[MAX_NUM_VFS_SUPPORTED]; | |
46 | void __iomem *reg_base; /* Register start address */ | |
92dc8769 SG |
47 | u8 num_sqs_en; /* Secondary qsets enabled */ |
48 | u64 nicvf[MAX_NUM_VFS_SUPPORTED]; | |
49 | u8 vf_sqs[MAX_NUM_VFS_SUPPORTED][MAX_SQS_PER_VF]; | |
50 | u8 pqs_vf[MAX_NUM_VFS_SUPPORTED]; | |
51 | bool sqs_used[MAX_NUM_VFS_SUPPORTED]; | |
4863dea3 SG |
52 | struct pkind_cfg pkind; |
53 | #define NIC_SET_VF_LMAC_MAP(bgx, lmac) (((bgx & 0xF) << 4) | (lmac & 0xF)) | |
54 | #define NIC_GET_BGX_FROM_VF_LMAC_MAP(map) ((map >> 4) & 0xF) | |
55 | #define NIC_GET_LMAC_FROM_VF_LMAC_MAP(map) (map & 0xF) | |
949b5331 | 56 | u8 *vf_lmac_map; |
4863dea3 SG |
57 | struct delayed_work dwork; |
58 | struct workqueue_struct *check_link; | |
949b5331 SG |
59 | u8 *link; |
60 | u8 *duplex; | |
61 | u32 *speed; | |
4863dea3 | 62 | u16 cpi_base[MAX_NUM_VFS_SUPPORTED]; |
34411b68 | 63 | u16 rssi_base[MAX_NUM_VFS_SUPPORTED]; |
4863dea3 SG |
64 | bool mbx_lock[MAX_NUM_VFS_SUPPORTED]; |
65 | ||
66 | /* MSI-X */ | |
67 | bool msix_enabled; | |
68 | u8 num_vec; | |
52358aad | 69 | struct msix_entry *msix_entries; |
4863dea3 | 70 | bool irq_allocated[NIC_PF_MSIX_VECTORS]; |
52358aad | 71 | char irq_name[NIC_PF_MSIX_VECTORS][20]; |
4863dea3 SG |
72 | }; |
73 | ||
74 | /* Supported devices */ | |
75 | static const struct pci_device_id nic_id_table[] = { | |
76 | { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVICE_ID_THUNDER_NIC_PF) }, | |
77 | { 0, } /* end of table */ | |
78 | }; | |
79 | ||
80 | MODULE_AUTHOR("Sunil Goutham"); | |
81 | MODULE_DESCRIPTION("Cavium Thunder NIC Physical Function Driver"); | |
82 | MODULE_LICENSE("GPL v2"); | |
83 | MODULE_VERSION(DRV_VERSION); | |
84 | MODULE_DEVICE_TABLE(pci, nic_id_table); | |
85 | ||
86 | /* The Cavium ThunderX network controller can *only* be found in SoCs | |
87 | * containing the ThunderX ARM64 CPU implementation. All accesses to the device | |
88 | * registers on this platform are implicitly strongly ordered with respect | |
89 | * to memory accesses. So writeq_relaxed() and readq_relaxed() are safe to use | |
90 | * with no memory barriers in this driver. The readq()/writeq() functions add | |
91 | * explicit ordering operation which in this case are redundant, and only | |
92 | * add overhead. | |
93 | */ | |
94 | ||
95 | /* Register read/write APIs */ | |
96 | static void nic_reg_write(struct nicpf *nic, u64 offset, u64 val) | |
97 | { | |
98 | writeq_relaxed(val, nic->reg_base + offset); | |
99 | } | |
100 | ||
101 | static u64 nic_reg_read(struct nicpf *nic, u64 offset) | |
102 | { | |
103 | return readq_relaxed(nic->reg_base + offset); | |
104 | } | |
105 | ||
106 | /* PF -> VF mailbox communication APIs */ | |
107 | static void nic_enable_mbx_intr(struct nicpf *nic) | |
108 | { | |
52358aad SG |
109 | int vf_cnt = pci_sriov_get_totalvfs(nic->pdev); |
110 | ||
111 | #define INTR_MASK(vfs) ((vfs < 64) ? (BIT_ULL(vfs) - 1) : (~0ull)) | |
112 | ||
113 | /* Clear it, to avoid spurious interrupts (if any) */ | |
114 | nic_reg_write(nic, NIC_PF_MAILBOX_INT, INTR_MASK(vf_cnt)); | |
115 | ||
116 | /* Enable mailbox interrupt for all VFs */ | |
117 | nic_reg_write(nic, NIC_PF_MAILBOX_ENA_W1S, INTR_MASK(vf_cnt)); | |
118 | /* One mailbox intr enable reg per 64 VFs */ | |
119 | if (vf_cnt > 64) { | |
120 | nic_reg_write(nic, NIC_PF_MAILBOX_INT + sizeof(u64), | |
121 | INTR_MASK(vf_cnt - 64)); | |
122 | nic_reg_write(nic, NIC_PF_MAILBOX_ENA_W1S + sizeof(u64), | |
123 | INTR_MASK(vf_cnt - 64)); | |
124 | } | |
4863dea3 SG |
125 | } |
126 | ||
127 | static void nic_clear_mbx_intr(struct nicpf *nic, int vf, int mbx_reg) | |
128 | { | |
129 | nic_reg_write(nic, NIC_PF_MAILBOX_INT + (mbx_reg << 3), BIT_ULL(vf)); | |
130 | } | |
131 | ||
132 | static u64 nic_get_mbx_addr(int vf) | |
133 | { | |
134 | return NIC_PF_VF_0_127_MAILBOX_0_1 + (vf << NIC_VF_NUM_SHIFT); | |
135 | } | |
136 | ||
137 | /* Send a mailbox message to VF | |
138 | * @vf: vf to which this message to be sent | |
139 | * @mbx: Message to be sent | |
140 | */ | |
141 | static void nic_send_msg_to_vf(struct nicpf *nic, int vf, union nic_mbx *mbx) | |
142 | { | |
143 | void __iomem *mbx_addr = nic->reg_base + nic_get_mbx_addr(vf); | |
144 | u64 *msg = (u64 *)mbx; | |
145 | ||
146 | /* In first revision HW, mbox interrupt is triggerred | |
147 | * when PF writes to MBOX(1), in next revisions when | |
148 | * PF writes to MBOX(0) | |
149 | */ | |
40fb5f8a | 150 | if (pass1_silicon(nic->pdev)) { |
4863dea3 SG |
151 | /* see the comment for nic_reg_write()/nic_reg_read() |
152 | * functions above | |
153 | */ | |
154 | writeq_relaxed(msg[0], mbx_addr); | |
155 | writeq_relaxed(msg[1], mbx_addr + 8); | |
156 | } else { | |
157 | writeq_relaxed(msg[1], mbx_addr + 8); | |
158 | writeq_relaxed(msg[0], mbx_addr); | |
159 | } | |
160 | } | |
161 | ||
162 | /* Responds to VF's READY message with VF's | |
163 | * ID, node, MAC address e.t.c | |
164 | * @vf: VF which sent READY message | |
165 | */ | |
166 | static void nic_mbx_send_ready(struct nicpf *nic, int vf) | |
167 | { | |
168 | union nic_mbx mbx = {}; | |
169 | int bgx_idx, lmac; | |
170 | const char *mac; | |
171 | ||
172 | mbx.nic_cfg.msg = NIC_MBOX_MSG_READY; | |
173 | mbx.nic_cfg.vf_id = vf; | |
174 | ||
175 | mbx.nic_cfg.tns_mode = NIC_TNS_BYPASS_MODE; | |
176 | ||
949b5331 | 177 | if (vf < nic->num_vf_en) { |
92dc8769 SG |
178 | bgx_idx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]); |
179 | lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]); | |
4863dea3 | 180 | |
92dc8769 SG |
181 | mac = bgx_get_lmac_mac(nic->node, bgx_idx, lmac); |
182 | if (mac) | |
183 | ether_addr_copy((u8 *)&mbx.nic_cfg.mac_addr, mac); | |
184 | } | |
185 | mbx.nic_cfg.sqs_mode = (vf >= nic->num_vf_en) ? true : false; | |
4863dea3 | 186 | mbx.nic_cfg.node_id = nic->node; |
d77a2384 | 187 | |
949b5331 | 188 | mbx.nic_cfg.loopback_supported = vf < nic->num_vf_en; |
d77a2384 | 189 | |
4863dea3 SG |
190 | nic_send_msg_to_vf(nic, vf, &mbx); |
191 | } | |
192 | ||
193 | /* ACKs VF's mailbox message | |
194 | * @vf: VF to which ACK to be sent | |
195 | */ | |
196 | static void nic_mbx_send_ack(struct nicpf *nic, int vf) | |
197 | { | |
198 | union nic_mbx mbx = {}; | |
199 | ||
200 | mbx.msg.msg = NIC_MBOX_MSG_ACK; | |
201 | nic_send_msg_to_vf(nic, vf, &mbx); | |
202 | } | |
203 | ||
204 | /* NACKs VF's mailbox message that PF is not able to | |
205 | * complete the action | |
206 | * @vf: VF to which ACK to be sent | |
207 | */ | |
208 | static void nic_mbx_send_nack(struct nicpf *nic, int vf) | |
209 | { | |
210 | union nic_mbx mbx = {}; | |
211 | ||
212 | mbx.msg.msg = NIC_MBOX_MSG_NACK; | |
213 | nic_send_msg_to_vf(nic, vf, &mbx); | |
214 | } | |
215 | ||
216 | /* Flush all in flight receive packets to memory and | |
217 | * bring down an active RQ | |
218 | */ | |
219 | static int nic_rcv_queue_sw_sync(struct nicpf *nic) | |
220 | { | |
221 | u16 timeout = ~0x00; | |
222 | ||
223 | nic_reg_write(nic, NIC_PF_SW_SYNC_RX, 0x01); | |
224 | /* Wait till sync cycle is finished */ | |
225 | while (timeout) { | |
226 | if (nic_reg_read(nic, NIC_PF_SW_SYNC_RX_DONE) & 0x1) | |
227 | break; | |
228 | timeout--; | |
229 | } | |
230 | nic_reg_write(nic, NIC_PF_SW_SYNC_RX, 0x00); | |
231 | if (!timeout) { | |
232 | dev_err(&nic->pdev->dev, "Receive queue software sync failed"); | |
233 | return 1; | |
234 | } | |
235 | return 0; | |
236 | } | |
237 | ||
238 | /* Get BGX Rx/Tx stats and respond to VF's request */ | |
239 | static void nic_get_bgx_stats(struct nicpf *nic, struct bgx_stats_msg *bgx) | |
240 | { | |
241 | int bgx_idx, lmac; | |
242 | union nic_mbx mbx = {}; | |
243 | ||
244 | bgx_idx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[bgx->vf_id]); | |
245 | lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[bgx->vf_id]); | |
246 | ||
247 | mbx.bgx_stats.msg = NIC_MBOX_MSG_BGX_STATS; | |
248 | mbx.bgx_stats.vf_id = bgx->vf_id; | |
249 | mbx.bgx_stats.rx = bgx->rx; | |
250 | mbx.bgx_stats.idx = bgx->idx; | |
251 | if (bgx->rx) | |
252 | mbx.bgx_stats.stats = bgx_get_rx_stats(nic->node, bgx_idx, | |
253 | lmac, bgx->idx); | |
254 | else | |
255 | mbx.bgx_stats.stats = bgx_get_tx_stats(nic->node, bgx_idx, | |
256 | lmac, bgx->idx); | |
257 | nic_send_msg_to_vf(nic, bgx->vf_id, &mbx); | |
258 | } | |
259 | ||
260 | /* Update hardware min/max frame size */ | |
261 | static int nic_update_hw_frs(struct nicpf *nic, int new_frs, int vf) | |
262 | { | |
263 | if ((new_frs > NIC_HW_MAX_FRS) || (new_frs < NIC_HW_MIN_FRS)) { | |
264 | dev_err(&nic->pdev->dev, | |
265 | "Invalid MTU setting from VF%d rejected, should be between %d and %d\n", | |
266 | vf, NIC_HW_MIN_FRS, NIC_HW_MAX_FRS); | |
267 | return 1; | |
268 | } | |
269 | new_frs += ETH_HLEN; | |
270 | if (new_frs <= nic->pkind.maxlen) | |
271 | return 0; | |
272 | ||
273 | nic->pkind.maxlen = new_frs; | |
274 | nic_reg_write(nic, NIC_PF_PKIND_0_15_CFG, *(u64 *)&nic->pkind); | |
275 | return 0; | |
276 | } | |
277 | ||
278 | /* Set minimum transmit packet size */ | |
279 | static void nic_set_tx_pkt_pad(struct nicpf *nic, int size) | |
280 | { | |
949b5331 SG |
281 | int lmac, max_lmac; |
282 | u16 sdevid; | |
4863dea3 SG |
283 | u64 lmac_cfg; |
284 | ||
57e81d44 SG |
285 | /* There is a issue in HW where-in while sending GSO sized |
286 | * pkts as part of TSO, if pkt len falls below this size | |
287 | * NIC will zero PAD packet and also updates IP total length. | |
288 | * Hence set this value to lessthan min pkt size of MAC+IP+TCP | |
289 | * headers, BGX will do the padding to transmit 64 byte pkt. | |
290 | */ | |
291 | if (size > 52) | |
292 | size = 52; | |
4863dea3 | 293 | |
949b5331 SG |
294 | pci_read_config_word(nic->pdev, PCI_SUBSYSTEM_ID, &sdevid); |
295 | /* 81xx's RGX has only one LMAC */ | |
296 | if (sdevid == PCI_SUBSYS_DEVID_81XX_NIC_PF) | |
297 | max_lmac = ((nic->hw->bgx_cnt - 1) * MAX_LMAC_PER_BGX) + 1; | |
298 | else | |
299 | max_lmac = nic->hw->bgx_cnt * MAX_LMAC_PER_BGX; | |
300 | ||
301 | for (lmac = 0; lmac < max_lmac; lmac++) { | |
4863dea3 SG |
302 | lmac_cfg = nic_reg_read(nic, NIC_PF_LMAC_0_7_CFG | (lmac << 3)); |
303 | lmac_cfg &= ~(0xF << 2); | |
304 | lmac_cfg |= ((size / 4) << 2); | |
305 | nic_reg_write(nic, NIC_PF_LMAC_0_7_CFG | (lmac << 3), lmac_cfg); | |
306 | } | |
307 | } | |
308 | ||
309 | /* Function to check number of LMACs present and set VF::LMAC mapping. | |
310 | * Mapping will be used while initializing channels. | |
311 | */ | |
312 | static void nic_set_lmac_vf_mapping(struct nicpf *nic) | |
313 | { | |
314 | unsigned bgx_map = bgx_get_map(nic->node); | |
315 | int bgx, next_bgx_lmac = 0; | |
316 | int lmac, lmac_cnt = 0; | |
317 | u64 lmac_credit; | |
318 | ||
319 | nic->num_vf_en = 0; | |
320 | ||
a5c3d498 | 321 | for (bgx = 0; bgx < nic->hw->bgx_cnt; bgx++) { |
4863dea3 SG |
322 | if (!(bgx_map & (1 << bgx))) |
323 | continue; | |
324 | lmac_cnt = bgx_get_lmac_count(nic->node, bgx); | |
325 | for (lmac = 0; lmac < lmac_cnt; lmac++) | |
326 | nic->vf_lmac_map[next_bgx_lmac++] = | |
327 | NIC_SET_VF_LMAC_MAP(bgx, lmac); | |
328 | nic->num_vf_en += lmac_cnt; | |
329 | ||
330 | /* Program LMAC credits */ | |
331 | lmac_credit = (1ull << 1); /* channel credit enable */ | |
332 | lmac_credit |= (0x1ff << 2); /* Max outstanding pkt count */ | |
333 | /* 48KB BGX Tx buffer size, each unit is of size 16bytes */ | |
334 | lmac_credit |= (((((48 * 1024) / lmac_cnt) - | |
335 | NIC_HW_MAX_FRS) / 16) << 12); | |
336 | lmac = bgx * MAX_LMAC_PER_BGX; | |
337 | for (; lmac < lmac_cnt + (bgx * MAX_LMAC_PER_BGX); lmac++) | |
338 | nic_reg_write(nic, | |
339 | NIC_PF_LMAC_0_7_CREDIT + (lmac * 8), | |
340 | lmac_credit); | |
6465859a SG |
341 | |
342 | /* On CN81XX there are only 8 VFs but max possible no of | |
343 | * interfaces are 9. | |
344 | */ | |
345 | if (nic->num_vf_en >= pci_sriov_get_totalvfs(nic->pdev)) { | |
346 | nic->num_vf_en = pci_sriov_get_totalvfs(nic->pdev); | |
347 | break; | |
348 | } | |
4863dea3 SG |
349 | } |
350 | } | |
351 | ||
949b5331 | 352 | static void nic_free_lmacmem(struct nicpf *nic) |
a5c3d498 | 353 | { |
949b5331 SG |
354 | kfree(nic->vf_lmac_map); |
355 | kfree(nic->link); | |
356 | kfree(nic->duplex); | |
357 | kfree(nic->speed); | |
358 | } | |
359 | ||
360 | static int nic_get_hw_info(struct nicpf *nic) | |
361 | { | |
362 | u8 max_lmac; | |
a5c3d498 SG |
363 | u16 sdevid; |
364 | struct hw_info *hw = nic->hw; | |
365 | ||
366 | pci_read_config_word(nic->pdev, PCI_SUBSYSTEM_ID, &sdevid); | |
367 | ||
368 | switch (sdevid) { | |
369 | case PCI_SUBSYS_DEVID_88XX_NIC_PF: | |
370 | hw->bgx_cnt = MAX_BGX_PER_CN88XX; | |
371 | hw->chans_per_lmac = 16; | |
372 | hw->chans_per_bgx = 128; | |
373 | hw->cpi_cnt = 2048; | |
374 | hw->rssi_cnt = 4096; | |
375 | hw->rss_ind_tbl_size = NIC_MAX_RSS_IDR_TBL_SIZE; | |
376 | hw->tl3_cnt = 256; | |
377 | hw->tl2_cnt = 64; | |
378 | hw->tl1_cnt = 2; | |
379 | hw->tl1_per_bgx = true; | |
380 | break; | |
0025d93e SG |
381 | case PCI_SUBSYS_DEVID_81XX_NIC_PF: |
382 | hw->bgx_cnt = MAX_BGX_PER_CN81XX; | |
383 | hw->chans_per_lmac = 8; | |
384 | hw->chans_per_bgx = 32; | |
385 | hw->chans_per_rgx = 8; | |
386 | hw->chans_per_lbk = 24; | |
387 | hw->cpi_cnt = 512; | |
388 | hw->rssi_cnt = 256; | |
389 | hw->rss_ind_tbl_size = 32; /* Max RSSI / Max interfaces */ | |
390 | hw->tl3_cnt = 64; | |
391 | hw->tl2_cnt = 16; | |
392 | hw->tl1_cnt = 10; | |
393 | hw->tl1_per_bgx = false; | |
394 | break; | |
395 | case PCI_SUBSYS_DEVID_83XX_NIC_PF: | |
396 | hw->bgx_cnt = MAX_BGX_PER_CN83XX; | |
397 | hw->chans_per_lmac = 8; | |
398 | hw->chans_per_bgx = 32; | |
399 | hw->chans_per_lbk = 64; | |
400 | hw->cpi_cnt = 2048; | |
401 | hw->rssi_cnt = 1024; | |
402 | hw->rss_ind_tbl_size = 64; /* Max RSSI / Max interfaces */ | |
403 | hw->tl3_cnt = 256; | |
404 | hw->tl2_cnt = 64; | |
405 | hw->tl1_cnt = 18; | |
406 | hw->tl1_per_bgx = false; | |
407 | break; | |
a5c3d498 SG |
408 | } |
409 | hw->tl4_cnt = MAX_QUEUES_PER_QSET * pci_sriov_get_totalvfs(nic->pdev); | |
949b5331 SG |
410 | |
411 | /* Allocate memory for LMAC tracking elements */ | |
412 | max_lmac = hw->bgx_cnt * MAX_LMAC_PER_BGX; | |
413 | nic->vf_lmac_map = kmalloc_array(max_lmac, sizeof(u8), GFP_KERNEL); | |
414 | if (!nic->vf_lmac_map) | |
415 | goto error; | |
416 | nic->link = kmalloc_array(max_lmac, sizeof(u8), GFP_KERNEL); | |
417 | if (!nic->link) | |
418 | goto error; | |
419 | nic->duplex = kmalloc_array(max_lmac, sizeof(u8), GFP_KERNEL); | |
420 | if (!nic->duplex) | |
421 | goto error; | |
422 | nic->speed = kmalloc_array(max_lmac, sizeof(u32), GFP_KERNEL); | |
423 | if (!nic->speed) | |
424 | goto error; | |
425 | return 0; | |
426 | ||
427 | error: | |
428 | nic_free_lmacmem(nic); | |
429 | return -ENOMEM; | |
a5c3d498 SG |
430 | } |
431 | ||
4863dea3 SG |
432 | #define BGX0_BLOCK 8 |
433 | #define BGX1_BLOCK 9 | |
434 | ||
949b5331 | 435 | static int nic_init_hw(struct nicpf *nic) |
4863dea3 | 436 | { |
949b5331 | 437 | int i, err; |
4c0b6eaf | 438 | u64 cqm_cfg; |
4863dea3 | 439 | |
a5c3d498 | 440 | /* Get HW capability info */ |
949b5331 SG |
441 | err = nic_get_hw_info(nic); |
442 | if (err) | |
443 | return err; | |
a5c3d498 | 444 | |
4863dea3 SG |
445 | /* Enable NIC HW block */ |
446 | nic_reg_write(nic, NIC_PF_CFG, 0x3); | |
447 | ||
448 | /* Enable backpressure */ | |
449 | nic_reg_write(nic, NIC_PF_BP_CFG, (1ULL << 6) | 0x03); | |
450 | ||
0025d93e SG |
451 | /* TNS and TNS bypass modes are present only on 88xx */ |
452 | if (nic->pdev->subsystem_device == PCI_SUBSYS_DEVID_88XX_NIC_PF) { | |
453 | /* Disable TNS mode on both interfaces */ | |
454 | nic_reg_write(nic, NIC_PF_INTF_0_1_SEND_CFG, | |
455 | (NIC_TNS_BYPASS_MODE << 7) | BGX0_BLOCK); | |
456 | nic_reg_write(nic, NIC_PF_INTF_0_1_SEND_CFG | (1 << 8), | |
457 | (NIC_TNS_BYPASS_MODE << 7) | BGX1_BLOCK); | |
458 | } | |
459 | ||
4863dea3 SG |
460 | nic_reg_write(nic, NIC_PF_INTF_0_1_BP_CFG, |
461 | (1ULL << 63) | BGX0_BLOCK); | |
462 | nic_reg_write(nic, NIC_PF_INTF_0_1_BP_CFG + (1 << 8), | |
463 | (1ULL << 63) | BGX1_BLOCK); | |
464 | ||
465 | /* PKIND configuration */ | |
466 | nic->pkind.minlen = 0; | |
467 | nic->pkind.maxlen = NIC_HW_MAX_FRS + ETH_HLEN; | |
468 | nic->pkind.lenerr_en = 1; | |
469 | nic->pkind.rx_hdr = 0; | |
470 | nic->pkind.hdr_sl = 0; | |
471 | ||
472 | for (i = 0; i < NIC_MAX_PKIND; i++) | |
473 | nic_reg_write(nic, NIC_PF_PKIND_0_15_CFG | (i << 3), | |
474 | *(u64 *)&nic->pkind); | |
475 | ||
476 | nic_set_tx_pkt_pad(nic, NIC_HW_MIN_FRS); | |
477 | ||
478 | /* Timer config */ | |
479 | nic_reg_write(nic, NIC_PF_INTR_TIMER_CFG, NICPF_CLK_PER_INT_TICK); | |
aa2e259b SG |
480 | |
481 | /* Enable VLAN ethertype matching and stripping */ | |
482 | nic_reg_write(nic, NIC_PF_RX_ETYPE_0_7, | |
483 | (2 << 19) | (ETYPE_ALG_VLAN_STRIP << 16) | ETH_P_8021Q); | |
4c0b6eaf SG |
484 | |
485 | /* Check if HW expected value is higher (could be in future chips) */ | |
486 | cqm_cfg = nic_reg_read(nic, NIC_PF_CQM_CFG); | |
487 | if (cqm_cfg < NICPF_CQM_MIN_DROP_LEVEL) | |
488 | nic_reg_write(nic, NIC_PF_CQM_CFG, NICPF_CQM_MIN_DROP_LEVEL); | |
949b5331 SG |
489 | |
490 | return 0; | |
4863dea3 SG |
491 | } |
492 | ||
493 | /* Channel parse index configuration */ | |
494 | static void nic_config_cpi(struct nicpf *nic, struct cpi_cfg_msg *cfg) | |
495 | { | |
a5c3d498 | 496 | struct hw_info *hw = nic->hw; |
4863dea3 SG |
497 | u32 vnic, bgx, lmac, chan; |
498 | u32 padd, cpi_count = 0; | |
499 | u64 cpi_base, cpi, rssi_base, rssi; | |
500 | u8 qset, rq_idx = 0; | |
501 | ||
502 | vnic = cfg->vf_id; | |
503 | bgx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vnic]); | |
504 | lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vnic]); | |
505 | ||
a5c3d498 | 506 | chan = (lmac * hw->chans_per_lmac) + (bgx * hw->chans_per_bgx); |
6465859a SG |
507 | cpi_base = vnic * NIC_MAX_CPI_PER_LMAC; |
508 | rssi_base = vnic * hw->rss_ind_tbl_size; | |
4863dea3 SG |
509 | |
510 | /* Rx channel configuration */ | |
511 | nic_reg_write(nic, NIC_PF_CHAN_0_255_RX_BP_CFG | (chan << 3), | |
512 | (1ull << 63) | (vnic << 0)); | |
513 | nic_reg_write(nic, NIC_PF_CHAN_0_255_RX_CFG | (chan << 3), | |
514 | ((u64)cfg->cpi_alg << 62) | (cpi_base << 48)); | |
515 | ||
516 | if (cfg->cpi_alg == CPI_ALG_NONE) | |
517 | cpi_count = 1; | |
518 | else if (cfg->cpi_alg == CPI_ALG_VLAN) /* 3 bits of PCP */ | |
519 | cpi_count = 8; | |
520 | else if (cfg->cpi_alg == CPI_ALG_VLAN16) /* 3 bits PCP + DEI */ | |
521 | cpi_count = 16; | |
522 | else if (cfg->cpi_alg == CPI_ALG_DIFF) /* 6bits DSCP */ | |
523 | cpi_count = NIC_MAX_CPI_PER_LMAC; | |
524 | ||
525 | /* RSS Qset, Qidx mapping */ | |
526 | qset = cfg->vf_id; | |
527 | rssi = rssi_base; | |
528 | for (; rssi < (rssi_base + cfg->rq_cnt); rssi++) { | |
529 | nic_reg_write(nic, NIC_PF_RSSI_0_4097_RQ | (rssi << 3), | |
530 | (qset << 3) | rq_idx); | |
531 | rq_idx++; | |
532 | } | |
533 | ||
534 | rssi = 0; | |
535 | cpi = cpi_base; | |
536 | for (; cpi < (cpi_base + cpi_count); cpi++) { | |
537 | /* Determine port to channel adder */ | |
538 | if (cfg->cpi_alg != CPI_ALG_DIFF) | |
539 | padd = cpi % cpi_count; | |
540 | else | |
541 | padd = cpi % 8; /* 3 bits CS out of 6bits DSCP */ | |
542 | ||
543 | /* Leave RSS_SIZE as '0' to disable RSS */ | |
40fb5f8a | 544 | if (pass1_silicon(nic->pdev)) { |
34411b68 TS |
545 | nic_reg_write(nic, NIC_PF_CPI_0_2047_CFG | (cpi << 3), |
546 | (vnic << 24) | (padd << 16) | | |
547 | (rssi_base + rssi)); | |
548 | } else { | |
549 | /* Set MPI_ALG to '0' to disable MCAM parsing */ | |
550 | nic_reg_write(nic, NIC_PF_CPI_0_2047_CFG | (cpi << 3), | |
551 | (padd << 16)); | |
552 | /* MPI index is same as CPI if MPI_ALG is not enabled */ | |
553 | nic_reg_write(nic, NIC_PF_MPI_0_2047_CFG | (cpi << 3), | |
554 | (vnic << 24) | (rssi_base + rssi)); | |
555 | } | |
4863dea3 SG |
556 | |
557 | if ((rssi + 1) >= cfg->rq_cnt) | |
558 | continue; | |
559 | ||
560 | if (cfg->cpi_alg == CPI_ALG_VLAN) | |
561 | rssi++; | |
562 | else if (cfg->cpi_alg == CPI_ALG_VLAN16) | |
563 | rssi = ((cpi - cpi_base) & 0xe) >> 1; | |
564 | else if (cfg->cpi_alg == CPI_ALG_DIFF) | |
565 | rssi = ((cpi - cpi_base) & 0x38) >> 3; | |
566 | } | |
567 | nic->cpi_base[cfg->vf_id] = cpi_base; | |
34411b68 | 568 | nic->rssi_base[cfg->vf_id] = rssi_base; |
4863dea3 SG |
569 | } |
570 | ||
571 | /* Responsds to VF with its RSS indirection table size */ | |
572 | static void nic_send_rss_size(struct nicpf *nic, int vf) | |
573 | { | |
574 | union nic_mbx mbx = {}; | |
575 | u64 *msg; | |
576 | ||
577 | msg = (u64 *)&mbx; | |
578 | ||
579 | mbx.rss_size.msg = NIC_MBOX_MSG_RSS_SIZE; | |
a5c3d498 | 580 | mbx.rss_size.ind_tbl_size = nic->hw->rss_ind_tbl_size; |
4863dea3 SG |
581 | nic_send_msg_to_vf(nic, vf, &mbx); |
582 | } | |
583 | ||
584 | /* Receive side scaling configuration | |
585 | * configure: | |
586 | * - RSS index | |
587 | * - indir table i.e hash::RQ mapping | |
588 | * - no of hash bits to consider | |
589 | */ | |
590 | static void nic_config_rss(struct nicpf *nic, struct rss_cfg_msg *cfg) | |
591 | { | |
592 | u8 qset, idx = 0; | |
593 | u64 cpi_cfg, cpi_base, rssi_base, rssi; | |
34411b68 | 594 | u64 idx_addr; |
4863dea3 | 595 | |
34411b68 | 596 | rssi_base = nic->rssi_base[cfg->vf_id] + cfg->tbl_offset; |
4863dea3 SG |
597 | |
598 | rssi = rssi_base; | |
599 | qset = cfg->vf_id; | |
600 | ||
601 | for (; rssi < (rssi_base + cfg->tbl_len); rssi++) { | |
92dc8769 SG |
602 | u8 svf = cfg->ind_tbl[idx] >> 3; |
603 | ||
604 | if (svf) | |
605 | qset = nic->vf_sqs[cfg->vf_id][svf - 1]; | |
606 | else | |
607 | qset = cfg->vf_id; | |
4863dea3 SG |
608 | nic_reg_write(nic, NIC_PF_RSSI_0_4097_RQ | (rssi << 3), |
609 | (qset << 3) | (cfg->ind_tbl[idx] & 0x7)); | |
610 | idx++; | |
611 | } | |
612 | ||
34411b68 | 613 | cpi_base = nic->cpi_base[cfg->vf_id]; |
40fb5f8a | 614 | if (pass1_silicon(nic->pdev)) |
34411b68 TS |
615 | idx_addr = NIC_PF_CPI_0_2047_CFG; |
616 | else | |
617 | idx_addr = NIC_PF_MPI_0_2047_CFG; | |
618 | cpi_cfg = nic_reg_read(nic, idx_addr | (cpi_base << 3)); | |
4863dea3 SG |
619 | cpi_cfg &= ~(0xFULL << 20); |
620 | cpi_cfg |= (cfg->hash_bits << 20); | |
34411b68 | 621 | nic_reg_write(nic, idx_addr | (cpi_base << 3), cpi_cfg); |
4863dea3 SG |
622 | } |
623 | ||
624 | /* 4 level transmit side scheduler configutation | |
625 | * for TNS bypass mode | |
626 | * | |
0025d93e | 627 | * Sample configuration for SQ0 on 88xx |
4863dea3 SG |
628 | * VNIC0-SQ0 -> TL4(0) -> TL3[0] -> TL2[0] -> TL1[0] -> BGX0 |
629 | * VNIC1-SQ0 -> TL4(8) -> TL3[2] -> TL2[0] -> TL1[0] -> BGX0 | |
630 | * VNIC2-SQ0 -> TL4(16) -> TL3[4] -> TL2[1] -> TL1[0] -> BGX0 | |
631 | * VNIC3-SQ0 -> TL4(24) -> TL3[6] -> TL2[1] -> TL1[0] -> BGX0 | |
632 | * VNIC4-SQ0 -> TL4(512) -> TL3[128] -> TL2[32] -> TL1[1] -> BGX1 | |
633 | * VNIC5-SQ0 -> TL4(520) -> TL3[130] -> TL2[32] -> TL1[1] -> BGX1 | |
634 | * VNIC6-SQ0 -> TL4(528) -> TL3[132] -> TL2[33] -> TL1[1] -> BGX1 | |
635 | * VNIC7-SQ0 -> TL4(536) -> TL3[134] -> TL2[33] -> TL1[1] -> BGX1 | |
636 | */ | |
92dc8769 SG |
637 | static void nic_tx_channel_cfg(struct nicpf *nic, u8 vnic, |
638 | struct sq_cfg_msg *sq) | |
4863dea3 | 639 | { |
a5c3d498 | 640 | struct hw_info *hw = nic->hw; |
4863dea3 SG |
641 | u32 bgx, lmac, chan; |
642 | u32 tl2, tl3, tl4; | |
643 | u32 rr_quantum; | |
92dc8769 SG |
644 | u8 sq_idx = sq->sq_num; |
645 | u8 pqs_vnic; | |
3e29adba | 646 | int svf; |
92dc8769 SG |
647 | |
648 | if (sq->sqs_mode) | |
649 | pqs_vnic = nic->pqs_vf[vnic]; | |
650 | else | |
651 | pqs_vnic = vnic; | |
652 | ||
653 | bgx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[pqs_vnic]); | |
654 | lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[pqs_vnic]); | |
4863dea3 | 655 | |
4863dea3 SG |
656 | /* 24 bytes for FCS, IPG and preamble */ |
657 | rr_quantum = ((NIC_HW_MAX_FRS + 24) / 4); | |
658 | ||
a5c3d498 SG |
659 | /* For 88xx 0-511 TL4 transmits via BGX0 and |
660 | * 512-1023 TL4s transmit via BGX1. | |
661 | */ | |
0025d93e SG |
662 | if (hw->tl1_per_bgx) { |
663 | tl4 = bgx * (hw->tl4_cnt / hw->bgx_cnt); | |
664 | if (!sq->sqs_mode) { | |
665 | tl4 += (lmac * MAX_QUEUES_PER_QSET); | |
666 | } else { | |
667 | for (svf = 0; svf < MAX_SQS_PER_VF; svf++) { | |
668 | if (nic->vf_sqs[pqs_vnic][svf] == vnic) | |
669 | break; | |
670 | } | |
671 | tl4 += (MAX_LMAC_PER_BGX * MAX_QUEUES_PER_QSET); | |
672 | tl4 += (lmac * MAX_QUEUES_PER_QSET * MAX_SQS_PER_VF); | |
673 | tl4 += (svf * MAX_QUEUES_PER_QSET); | |
3e29adba | 674 | } |
0025d93e SG |
675 | } else { |
676 | tl4 = (vnic * MAX_QUEUES_PER_QSET); | |
3e29adba | 677 | } |
4863dea3 | 678 | tl4 += sq_idx; |
92dc8769 | 679 | |
a5c3d498 | 680 | tl3 = tl4 / (hw->tl4_cnt / hw->tl3_cnt); |
4863dea3 SG |
681 | nic_reg_write(nic, NIC_PF_QSET_0_127_SQ_0_7_CFG2 | |
682 | ((u64)vnic << NIC_QS_ID_SHIFT) | | |
683 | ((u32)sq_idx << NIC_Q_NUM_SHIFT), tl4); | |
684 | nic_reg_write(nic, NIC_PF_TL4_0_1023_CFG | (tl4 << 3), | |
685 | ((u64)vnic << 27) | ((u32)sq_idx << 24) | rr_quantum); | |
686 | ||
687 | nic_reg_write(nic, NIC_PF_TL3_0_255_CFG | (tl3 << 3), rr_quantum); | |
a5c3d498 SG |
688 | |
689 | /* On 88xx 0-127 channels are for BGX0 and | |
690 | * 127-255 channels for BGX1. | |
0025d93e SG |
691 | * |
692 | * On 81xx/83xx TL3_CHAN reg should be configured with channel | |
693 | * within LMAC i.e 0-7 and not the actual channel number like on 88xx | |
a5c3d498 SG |
694 | */ |
695 | chan = (lmac * hw->chans_per_lmac) + (bgx * hw->chans_per_bgx); | |
0025d93e SG |
696 | if (hw->tl1_per_bgx) |
697 | nic_reg_write(nic, NIC_PF_TL3_0_255_CHAN | (tl3 << 3), chan); | |
698 | else | |
699 | nic_reg_write(nic, NIC_PF_TL3_0_255_CHAN | (tl3 << 3), 0); | |
a5c3d498 | 700 | |
4863dea3 SG |
701 | /* Enable backpressure on the channel */ |
702 | nic_reg_write(nic, NIC_PF_CHAN_0_255_TX_CFG | (chan << 3), 1); | |
703 | ||
704 | tl2 = tl3 >> 2; | |
705 | nic_reg_write(nic, NIC_PF_TL3A_0_63_CFG | (tl2 << 3), tl2); | |
706 | nic_reg_write(nic, NIC_PF_TL2_0_63_CFG | (tl2 << 3), rr_quantum); | |
707 | /* No priorities as of now */ | |
708 | nic_reg_write(nic, NIC_PF_TL2_0_63_PRI | (tl2 << 3), 0x00); | |
0025d93e SG |
709 | |
710 | /* Unlike 88xx where TL2s 0-31 transmits to TL1 '0' and rest to TL1 '1' | |
711 | * on 81xx/83xx TL2 needs to be configured to transmit to one of the | |
712 | * possible LMACs. | |
713 | * | |
714 | * This register doesn't exist on 88xx. | |
715 | */ | |
716 | if (!hw->tl1_per_bgx) | |
717 | nic_reg_write(nic, NIC_PF_TL2_LMAC | (tl2 << 3), | |
718 | lmac + (bgx * MAX_LMAC_PER_BGX)); | |
4863dea3 SG |
719 | } |
720 | ||
92dc8769 SG |
721 | /* Send primary nicvf pointer to secondary QS's VF */ |
722 | static void nic_send_pnicvf(struct nicpf *nic, int sqs) | |
723 | { | |
724 | union nic_mbx mbx = {}; | |
725 | ||
726 | mbx.nicvf.msg = NIC_MBOX_MSG_PNICVF_PTR; | |
727 | mbx.nicvf.nicvf = nic->nicvf[nic->pqs_vf[sqs]]; | |
728 | nic_send_msg_to_vf(nic, sqs, &mbx); | |
729 | } | |
730 | ||
731 | /* Send SQS's nicvf pointer to primary QS's VF */ | |
732 | static void nic_send_snicvf(struct nicpf *nic, struct nicvf_ptr *nicvf) | |
733 | { | |
734 | union nic_mbx mbx = {}; | |
735 | int sqs_id = nic->vf_sqs[nicvf->vf_id][nicvf->sqs_id]; | |
736 | ||
737 | mbx.nicvf.msg = NIC_MBOX_MSG_SNICVF_PTR; | |
738 | mbx.nicvf.sqs_id = nicvf->sqs_id; | |
739 | mbx.nicvf.nicvf = nic->nicvf[sqs_id]; | |
740 | nic_send_msg_to_vf(nic, nicvf->vf_id, &mbx); | |
741 | } | |
742 | ||
743 | /* Find next available Qset that can be assigned as a | |
744 | * secondary Qset to a VF. | |
745 | */ | |
746 | static int nic_nxt_avail_sqs(struct nicpf *nic) | |
747 | { | |
748 | int sqs; | |
749 | ||
750 | for (sqs = 0; sqs < nic->num_sqs_en; sqs++) { | |
751 | if (!nic->sqs_used[sqs]) | |
752 | nic->sqs_used[sqs] = true; | |
753 | else | |
754 | continue; | |
755 | return sqs + nic->num_vf_en; | |
756 | } | |
757 | return -1; | |
758 | } | |
759 | ||
760 | /* Allocate additional Qsets for requested VF */ | |
761 | static void nic_alloc_sqs(struct nicpf *nic, struct sqs_alloc *sqs) | |
762 | { | |
763 | union nic_mbx mbx = {}; | |
764 | int idx, alloc_qs = 0; | |
765 | int sqs_id; | |
766 | ||
767 | if (!nic->num_sqs_en) | |
768 | goto send_mbox; | |
769 | ||
770 | for (idx = 0; idx < sqs->qs_count; idx++) { | |
771 | sqs_id = nic_nxt_avail_sqs(nic); | |
772 | if (sqs_id < 0) | |
773 | break; | |
774 | nic->vf_sqs[sqs->vf_id][idx] = sqs_id; | |
775 | nic->pqs_vf[sqs_id] = sqs->vf_id; | |
776 | alloc_qs++; | |
777 | } | |
778 | ||
779 | send_mbox: | |
780 | mbx.sqs_alloc.msg = NIC_MBOX_MSG_ALLOC_SQS; | |
781 | mbx.sqs_alloc.vf_id = sqs->vf_id; | |
782 | mbx.sqs_alloc.qs_count = alloc_qs; | |
783 | nic_send_msg_to_vf(nic, sqs->vf_id, &mbx); | |
784 | } | |
785 | ||
d77a2384 SG |
786 | static int nic_config_loopback(struct nicpf *nic, struct set_loopback *lbk) |
787 | { | |
788 | int bgx_idx, lmac_idx; | |
789 | ||
949b5331 | 790 | if (lbk->vf_id >= nic->num_vf_en) |
d77a2384 SG |
791 | return -1; |
792 | ||
793 | bgx_idx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[lbk->vf_id]); | |
794 | lmac_idx = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[lbk->vf_id]); | |
795 | ||
796 | bgx_lmac_internal_loopback(nic->node, bgx_idx, lmac_idx, lbk->enable); | |
797 | ||
798 | return 0; | |
799 | } | |
800 | ||
3458c40d JJ |
801 | /* Reset statistics counters */ |
802 | static int nic_reset_stat_counters(struct nicpf *nic, | |
803 | int vf, struct reset_stat_cfg *cfg) | |
804 | { | |
805 | int i, stat, qnum; | |
806 | u64 reg_addr; | |
807 | ||
808 | for (i = 0; i < RX_STATS_ENUM_LAST; i++) { | |
809 | if (cfg->rx_stat_mask & BIT(i)) { | |
810 | reg_addr = NIC_PF_VNIC_0_127_RX_STAT_0_13 | | |
811 | (vf << NIC_QS_ID_SHIFT) | | |
812 | (i << 3); | |
813 | nic_reg_write(nic, reg_addr, 0); | |
814 | } | |
815 | } | |
816 | ||
817 | for (i = 0; i < TX_STATS_ENUM_LAST; i++) { | |
818 | if (cfg->tx_stat_mask & BIT(i)) { | |
819 | reg_addr = NIC_PF_VNIC_0_127_TX_STAT_0_4 | | |
820 | (vf << NIC_QS_ID_SHIFT) | | |
821 | (i << 3); | |
822 | nic_reg_write(nic, reg_addr, 0); | |
823 | } | |
824 | } | |
825 | ||
826 | for (i = 0; i <= 15; i++) { | |
827 | qnum = i >> 1; | |
828 | stat = i & 1 ? 1 : 0; | |
829 | reg_addr = (vf << NIC_QS_ID_SHIFT) | | |
830 | (qnum << NIC_Q_NUM_SHIFT) | (stat << 3); | |
831 | if (cfg->rq_stat_mask & BIT(i)) { | |
832 | reg_addr |= NIC_PF_QSET_0_127_RQ_0_7_STAT_0_1; | |
833 | nic_reg_write(nic, reg_addr, 0); | |
834 | } | |
835 | if (cfg->sq_stat_mask & BIT(i)) { | |
836 | reg_addr |= NIC_PF_QSET_0_127_SQ_0_7_STAT_0_1; | |
837 | nic_reg_write(nic, reg_addr, 0); | |
838 | } | |
839 | } | |
840 | return 0; | |
841 | } | |
842 | ||
e22e86ea ZS |
843 | static void nic_enable_tunnel_parsing(struct nicpf *nic, int vf) |
844 | { | |
845 | u64 prot_def = (IPV6_PROT << 32) | (IPV4_PROT << 16) | ET_PROT; | |
846 | u64 vxlan_prot_def = (IPV6_PROT_DEF << 32) | | |
847 | (IPV4_PROT_DEF) << 16 | ET_PROT_DEF; | |
848 | ||
849 | /* Configure tunnel parsing parameters */ | |
850 | nic_reg_write(nic, NIC_PF_RX_GENEVE_DEF, | |
851 | (1ULL << 63 | UDP_GENEVE_PORT_NUM)); | |
852 | nic_reg_write(nic, NIC_PF_RX_GENEVE_PROT_DEF, | |
853 | ((7ULL << 61) | prot_def)); | |
854 | nic_reg_write(nic, NIC_PF_RX_NVGRE_PROT_DEF, | |
855 | ((7ULL << 61) | prot_def)); | |
856 | nic_reg_write(nic, NIC_PF_RX_VXLAN_DEF_0_1, | |
857 | ((1ULL << 63) | UDP_VXLAN_PORT_NUM)); | |
858 | nic_reg_write(nic, NIC_PF_RX_VXLAN_PROT_DEF, | |
859 | ((0xfULL << 60) | vxlan_prot_def)); | |
860 | } | |
861 | ||
f406ce42 PF |
862 | static void nic_enable_vf(struct nicpf *nic, int vf, bool enable) |
863 | { | |
864 | int bgx, lmac; | |
865 | ||
866 | nic->vf_enabled[vf] = enable; | |
867 | ||
868 | if (vf >= nic->num_vf_en) | |
869 | return; | |
870 | ||
871 | bgx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]); | |
872 | lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]); | |
873 | ||
874 | bgx_lmac_rx_tx_enable(nic->node, bgx, lmac, enable); | |
875 | } | |
876 | ||
4863dea3 SG |
877 | /* Interrupt handler to handle mailbox messages from VFs */ |
878 | static void nic_handle_mbx_intr(struct nicpf *nic, int vf) | |
879 | { | |
880 | union nic_mbx mbx = {}; | |
881 | u64 *mbx_data; | |
882 | u64 mbx_addr; | |
883 | u64 reg_addr; | |
92dc8769 | 884 | u64 cfg; |
4863dea3 SG |
885 | int bgx, lmac; |
886 | int i; | |
887 | int ret = 0; | |
888 | ||
889 | nic->mbx_lock[vf] = true; | |
890 | ||
891 | mbx_addr = nic_get_mbx_addr(vf); | |
892 | mbx_data = (u64 *)&mbx; | |
893 | ||
894 | for (i = 0; i < NIC_PF_VF_MAILBOX_SIZE; i++) { | |
895 | *mbx_data = nic_reg_read(nic, mbx_addr); | |
896 | mbx_data++; | |
897 | mbx_addr += sizeof(u64); | |
898 | } | |
899 | ||
ecae29cb | 900 | dev_dbg(&nic->pdev->dev, "%s: Mailbox msg 0x%02x from VF%d\n", |
4863dea3 SG |
901 | __func__, mbx.msg.msg, vf); |
902 | switch (mbx.msg.msg) { | |
903 | case NIC_MBOX_MSG_READY: | |
904 | nic_mbx_send_ready(nic, vf); | |
949b5331 | 905 | if (vf < nic->num_vf_en) { |
92dc8769 SG |
906 | nic->link[vf] = 0; |
907 | nic->duplex[vf] = 0; | |
908 | nic->speed[vf] = 0; | |
909 | } | |
ecae29cb | 910 | goto unlock; |
4863dea3 SG |
911 | case NIC_MBOX_MSG_QS_CFG: |
912 | reg_addr = NIC_PF_QSET_0_127_CFG | | |
913 | (mbx.qs.num << NIC_QS_ID_SHIFT); | |
92dc8769 SG |
914 | cfg = mbx.qs.cfg; |
915 | /* Check if its a secondary Qset */ | |
916 | if (vf >= nic->num_vf_en) { | |
917 | cfg = cfg & (~0x7FULL); | |
918 | /* Assign this Qset to primary Qset's VF */ | |
919 | cfg |= nic->pqs_vf[vf]; | |
920 | } | |
921 | nic_reg_write(nic, reg_addr, cfg); | |
4863dea3 SG |
922 | break; |
923 | case NIC_MBOX_MSG_RQ_CFG: | |
924 | reg_addr = NIC_PF_QSET_0_127_RQ_0_7_CFG | | |
925 | (mbx.rq.qs_num << NIC_QS_ID_SHIFT) | | |
926 | (mbx.rq.rq_num << NIC_Q_NUM_SHIFT); | |
927 | nic_reg_write(nic, reg_addr, mbx.rq.cfg); | |
02a72bd8 SG |
928 | /* Enable CQE_RX2_S extension in CQE_RX descriptor. |
929 | * This gets appended by default on 81xx/83xx chips, | |
930 | * for consistency enabling the same on 88xx pass2 | |
931 | * where this is introduced. | |
932 | */ | |
933 | if (pass2_silicon(nic->pdev)) | |
934 | nic_reg_write(nic, NIC_PF_RX_CFG, 0x01); | |
e22e86ea ZS |
935 | if (!pass1_silicon(nic->pdev)) |
936 | nic_enable_tunnel_parsing(nic, vf); | |
4863dea3 SG |
937 | break; |
938 | case NIC_MBOX_MSG_RQ_BP_CFG: | |
939 | reg_addr = NIC_PF_QSET_0_127_RQ_0_7_BP_CFG | | |
940 | (mbx.rq.qs_num << NIC_QS_ID_SHIFT) | | |
941 | (mbx.rq.rq_num << NIC_Q_NUM_SHIFT); | |
942 | nic_reg_write(nic, reg_addr, mbx.rq.cfg); | |
943 | break; | |
944 | case NIC_MBOX_MSG_RQ_SW_SYNC: | |
945 | ret = nic_rcv_queue_sw_sync(nic); | |
946 | break; | |
947 | case NIC_MBOX_MSG_RQ_DROP_CFG: | |
948 | reg_addr = NIC_PF_QSET_0_127_RQ_0_7_DROP_CFG | | |
949 | (mbx.rq.qs_num << NIC_QS_ID_SHIFT) | | |
950 | (mbx.rq.rq_num << NIC_Q_NUM_SHIFT); | |
951 | nic_reg_write(nic, reg_addr, mbx.rq.cfg); | |
952 | break; | |
953 | case NIC_MBOX_MSG_SQ_CFG: | |
954 | reg_addr = NIC_PF_QSET_0_127_SQ_0_7_CFG | | |
955 | (mbx.sq.qs_num << NIC_QS_ID_SHIFT) | | |
956 | (mbx.sq.sq_num << NIC_Q_NUM_SHIFT); | |
957 | nic_reg_write(nic, reg_addr, mbx.sq.cfg); | |
92dc8769 | 958 | nic_tx_channel_cfg(nic, mbx.qs.num, &mbx.sq); |
4863dea3 SG |
959 | break; |
960 | case NIC_MBOX_MSG_SET_MAC: | |
ecae29cb RB |
961 | if (vf >= nic->num_vf_en) { |
962 | ret = -1; /* NACK */ | |
92dc8769 | 963 | break; |
ecae29cb | 964 | } |
4863dea3 SG |
965 | lmac = mbx.mac.vf_id; |
966 | bgx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[lmac]); | |
967 | lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[lmac]); | |
e610cb32 | 968 | bgx_set_lmac_mac(nic->node, bgx, lmac, mbx.mac.mac_addr); |
4863dea3 SG |
969 | break; |
970 | case NIC_MBOX_MSG_SET_MAX_FRS: | |
971 | ret = nic_update_hw_frs(nic, mbx.frs.max_frs, | |
972 | mbx.frs.vf_id); | |
973 | break; | |
974 | case NIC_MBOX_MSG_CPI_CFG: | |
975 | nic_config_cpi(nic, &mbx.cpi_cfg); | |
976 | break; | |
977 | case NIC_MBOX_MSG_RSS_SIZE: | |
978 | nic_send_rss_size(nic, vf); | |
979 | goto unlock; | |
980 | case NIC_MBOX_MSG_RSS_CFG: | |
981 | case NIC_MBOX_MSG_RSS_CFG_CONT: | |
982 | nic_config_rss(nic, &mbx.rss_cfg); | |
983 | break; | |
984 | case NIC_MBOX_MSG_CFG_DONE: | |
985 | /* Last message of VF config msg sequence */ | |
f406ce42 | 986 | nic_enable_vf(nic, vf, true); |
4863dea3 SG |
987 | goto unlock; |
988 | case NIC_MBOX_MSG_SHUTDOWN: | |
989 | /* First msg in VF teardown sequence */ | |
92dc8769 SG |
990 | if (vf >= nic->num_vf_en) |
991 | nic->sqs_used[vf - nic->num_vf_en] = false; | |
992 | nic->pqs_vf[vf] = 0; | |
f406ce42 | 993 | nic_enable_vf(nic, vf, false); |
92dc8769 SG |
994 | break; |
995 | case NIC_MBOX_MSG_ALLOC_SQS: | |
996 | nic_alloc_sqs(nic, &mbx.sqs_alloc); | |
997 | goto unlock; | |
998 | case NIC_MBOX_MSG_NICVF_PTR: | |
999 | nic->nicvf[vf] = mbx.nicvf.nicvf; | |
4863dea3 | 1000 | break; |
92dc8769 SG |
1001 | case NIC_MBOX_MSG_PNICVF_PTR: |
1002 | nic_send_pnicvf(nic, vf); | |
1003 | goto unlock; | |
1004 | case NIC_MBOX_MSG_SNICVF_PTR: | |
1005 | nic_send_snicvf(nic, &mbx.nicvf); | |
1006 | goto unlock; | |
4863dea3 SG |
1007 | case NIC_MBOX_MSG_BGX_STATS: |
1008 | nic_get_bgx_stats(nic, &mbx.bgx_stats); | |
1009 | goto unlock; | |
d77a2384 SG |
1010 | case NIC_MBOX_MSG_LOOPBACK: |
1011 | ret = nic_config_loopback(nic, &mbx.lbk); | |
1012 | break; | |
3458c40d JJ |
1013 | case NIC_MBOX_MSG_RESET_STAT_COUNTER: |
1014 | ret = nic_reset_stat_counters(nic, vf, &mbx.reset_stat); | |
1015 | break; | |
4863dea3 SG |
1016 | default: |
1017 | dev_err(&nic->pdev->dev, | |
1018 | "Invalid msg from VF%d, msg 0x%x\n", vf, mbx.msg.msg); | |
1019 | break; | |
1020 | } | |
1021 | ||
ecae29cb | 1022 | if (!ret) { |
4863dea3 | 1023 | nic_mbx_send_ack(nic, vf); |
ecae29cb RB |
1024 | } else if (mbx.msg.msg != NIC_MBOX_MSG_READY) { |
1025 | dev_err(&nic->pdev->dev, "NACK for MBOX 0x%02x from VF %d\n", | |
1026 | mbx.msg.msg, vf); | |
4863dea3 | 1027 | nic_mbx_send_nack(nic, vf); |
ecae29cb | 1028 | } |
4863dea3 SG |
1029 | unlock: |
1030 | nic->mbx_lock[vf] = false; | |
1031 | } | |
1032 | ||
52358aad | 1033 | static irqreturn_t nic_mbx_intr_handler(int irq, void *nic_irq) |
4863dea3 | 1034 | { |
52358aad SG |
1035 | struct nicpf *nic = (struct nicpf *)nic_irq; |
1036 | int mbx; | |
4863dea3 SG |
1037 | u64 intr; |
1038 | u8 vf, vf_per_mbx_reg = 64; | |
1039 | ||
52358aad SG |
1040 | if (irq == nic->msix_entries[NIC_PF_INTR_ID_MBOX0].vector) |
1041 | mbx = 0; | |
1042 | else | |
1043 | mbx = 1; | |
1044 | ||
4863dea3 SG |
1045 | intr = nic_reg_read(nic, NIC_PF_MAILBOX_INT + (mbx << 3)); |
1046 | dev_dbg(&nic->pdev->dev, "PF interrupt Mbox%d 0x%llx\n", mbx, intr); | |
1047 | for (vf = 0; vf < vf_per_mbx_reg; vf++) { | |
1048 | if (intr & (1ULL << vf)) { | |
1049 | dev_dbg(&nic->pdev->dev, "Intr from VF %d\n", | |
1050 | vf + (mbx * vf_per_mbx_reg)); | |
92dc8769 | 1051 | |
4863dea3 SG |
1052 | nic_handle_mbx_intr(nic, vf + (mbx * vf_per_mbx_reg)); |
1053 | nic_clear_mbx_intr(nic, vf, mbx); | |
1054 | } | |
1055 | } | |
4863dea3 SG |
1056 | return IRQ_HANDLED; |
1057 | } | |
1058 | ||
1059 | static int nic_enable_msix(struct nicpf *nic) | |
1060 | { | |
1061 | int i, ret; | |
1062 | ||
52358aad SG |
1063 | nic->num_vec = pci_msix_vec_count(nic->pdev); |
1064 | ||
1065 | nic->msix_entries = kmalloc_array(nic->num_vec, | |
1066 | sizeof(struct msix_entry), | |
1067 | GFP_KERNEL); | |
1068 | if (!nic->msix_entries) | |
1069 | return -ENOMEM; | |
4863dea3 SG |
1070 | |
1071 | for (i = 0; i < nic->num_vec; i++) | |
1072 | nic->msix_entries[i].entry = i; | |
1073 | ||
1074 | ret = pci_enable_msix(nic->pdev, nic->msix_entries, nic->num_vec); | |
1075 | if (ret) { | |
1076 | dev_err(&nic->pdev->dev, | |
52358aad SG |
1077 | "Request for #%d msix vectors failed, returned %d\n", |
1078 | nic->num_vec, ret); | |
1079 | kfree(nic->msix_entries); | |
4863dea3 SG |
1080 | return ret; |
1081 | } | |
1082 | ||
1083 | nic->msix_enabled = 1; | |
1084 | return 0; | |
1085 | } | |
1086 | ||
1087 | static void nic_disable_msix(struct nicpf *nic) | |
1088 | { | |
1089 | if (nic->msix_enabled) { | |
1090 | pci_disable_msix(nic->pdev); | |
52358aad | 1091 | kfree(nic->msix_entries); |
4863dea3 SG |
1092 | nic->msix_enabled = 0; |
1093 | nic->num_vec = 0; | |
1094 | } | |
1095 | } | |
1096 | ||
1097 | static void nic_free_all_interrupts(struct nicpf *nic) | |
1098 | { | |
1099 | int irq; | |
1100 | ||
1101 | for (irq = 0; irq < nic->num_vec; irq++) { | |
1102 | if (nic->irq_allocated[irq]) | |
1103 | free_irq(nic->msix_entries[irq].vector, nic); | |
1104 | nic->irq_allocated[irq] = false; | |
1105 | } | |
1106 | } | |
1107 | ||
1108 | static int nic_register_interrupts(struct nicpf *nic) | |
1109 | { | |
52358aad | 1110 | int i, ret; |
4863dea3 SG |
1111 | |
1112 | /* Enable MSI-X */ | |
1113 | ret = nic_enable_msix(nic); | |
1114 | if (ret) | |
1115 | return ret; | |
1116 | ||
52358aad SG |
1117 | /* Register mailbox interrupt handler */ |
1118 | for (i = NIC_PF_INTR_ID_MBOX0; i < nic->num_vec; i++) { | |
1119 | sprintf(nic->irq_name[i], | |
1120 | "NICPF Mbox%d", (i - NIC_PF_INTR_ID_MBOX0)); | |
4863dea3 | 1121 | |
52358aad SG |
1122 | ret = request_irq(nic->msix_entries[i].vector, |
1123 | nic_mbx_intr_handler, 0, | |
1124 | nic->irq_name[i], nic); | |
1125 | if (ret) | |
1126 | goto fail; | |
4863dea3 | 1127 | |
52358aad SG |
1128 | nic->irq_allocated[i] = true; |
1129 | } | |
4863dea3 SG |
1130 | |
1131 | /* Enable mailbox interrupt */ | |
1132 | nic_enable_mbx_intr(nic); | |
1133 | return 0; | |
1134 | ||
1135 | fail: | |
1136 | dev_err(&nic->pdev->dev, "Request irq failed\n"); | |
1137 | nic_free_all_interrupts(nic); | |
52358aad | 1138 | nic_disable_msix(nic); |
4863dea3 SG |
1139 | return ret; |
1140 | } | |
1141 | ||
1142 | static void nic_unregister_interrupts(struct nicpf *nic) | |
1143 | { | |
1144 | nic_free_all_interrupts(nic); | |
1145 | nic_disable_msix(nic); | |
1146 | } | |
1147 | ||
92dc8769 SG |
1148 | static int nic_num_sqs_en(struct nicpf *nic, int vf_en) |
1149 | { | |
1150 | int pos, sqs_per_vf = MAX_SQS_PER_VF_SINGLE_NODE; | |
1151 | u16 total_vf; | |
1152 | ||
3a397ebe SG |
1153 | /* Secondary Qsets are needed only if CPU count is |
1154 | * morethan MAX_QUEUES_PER_QSET. | |
1155 | */ | |
1156 | if (num_online_cpus() <= MAX_QUEUES_PER_QSET) | |
1157 | return 0; | |
1158 | ||
92dc8769 SG |
1159 | /* Check if its a multi-node environment */ |
1160 | if (nr_node_ids > 1) | |
1161 | sqs_per_vf = MAX_SQS_PER_VF; | |
1162 | ||
1163 | pos = pci_find_ext_capability(nic->pdev, PCI_EXT_CAP_ID_SRIOV); | |
1164 | pci_read_config_word(nic->pdev, (pos + PCI_SRIOV_TOTAL_VF), &total_vf); | |
1165 | return min(total_vf - vf_en, vf_en * sqs_per_vf); | |
1166 | } | |
1167 | ||
4863dea3 SG |
1168 | static int nic_sriov_init(struct pci_dev *pdev, struct nicpf *nic) |
1169 | { | |
1170 | int pos = 0; | |
92dc8769 | 1171 | int vf_en; |
4863dea3 SG |
1172 | int err; |
1173 | u16 total_vf_cnt; | |
1174 | ||
1175 | pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV); | |
1176 | if (!pos) { | |
1177 | dev_err(&pdev->dev, "SRIOV capability is not found in PCIe config space\n"); | |
1178 | return -ENODEV; | |
1179 | } | |
1180 | ||
1181 | pci_read_config_word(pdev, (pos + PCI_SRIOV_TOTAL_VF), &total_vf_cnt); | |
1182 | if (total_vf_cnt < nic->num_vf_en) | |
1183 | nic->num_vf_en = total_vf_cnt; | |
1184 | ||
1185 | if (!total_vf_cnt) | |
1186 | return 0; | |
1187 | ||
92dc8769 SG |
1188 | vf_en = nic->num_vf_en; |
1189 | nic->num_sqs_en = nic_num_sqs_en(nic, nic->num_vf_en); | |
1190 | vf_en += nic->num_sqs_en; | |
1191 | ||
1192 | err = pci_enable_sriov(pdev, vf_en); | |
4863dea3 SG |
1193 | if (err) { |
1194 | dev_err(&pdev->dev, "SRIOV enable failed, num VF is %d\n", | |
92dc8769 | 1195 | vf_en); |
4863dea3 SG |
1196 | nic->num_vf_en = 0; |
1197 | return err; | |
1198 | } | |
1199 | ||
1200 | dev_info(&pdev->dev, "SRIOV enabled, number of VF available %d\n", | |
92dc8769 | 1201 | vf_en); |
4863dea3 SG |
1202 | |
1203 | nic->flags |= NIC_SRIOV_ENABLED; | |
1204 | return 0; | |
1205 | } | |
1206 | ||
1207 | /* Poll for BGX LMAC link status and update corresponding VF | |
1208 | * if there is a change, valid only if internal L2 switch | |
1209 | * is not present otherwise VF link is always treated as up | |
1210 | */ | |
1211 | static void nic_poll_for_link(struct work_struct *work) | |
1212 | { | |
1213 | union nic_mbx mbx = {}; | |
1214 | struct nicpf *nic; | |
1215 | struct bgx_link_status link; | |
1216 | u8 vf, bgx, lmac; | |
1217 | ||
1218 | nic = container_of(work, struct nicpf, dwork.work); | |
1219 | ||
1220 | mbx.link_status.msg = NIC_MBOX_MSG_BGX_LINK_CHANGE; | |
1221 | ||
f406ce42 | 1222 | for (vf = 0; vf < nic->num_vf_en; vf++) { |
4863dea3 SG |
1223 | /* Poll only if VF is UP */ |
1224 | if (!nic->vf_enabled[vf]) | |
1225 | continue; | |
1226 | ||
1227 | /* Get BGX, LMAC indices for the VF */ | |
1228 | bgx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]); | |
1229 | lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]); | |
1230 | /* Get interface link status */ | |
1231 | bgx_get_lmac_link_state(nic->node, bgx, lmac, &link); | |
1232 | ||
1233 | /* Inform VF only if link status changed */ | |
1234 | if (nic->link[vf] == link.link_up) | |
1235 | continue; | |
1236 | ||
1237 | if (!nic->mbx_lock[vf]) { | |
1238 | nic->link[vf] = link.link_up; | |
1239 | nic->duplex[vf] = link.duplex; | |
1240 | nic->speed[vf] = link.speed; | |
1241 | ||
1242 | /* Send a mbox message to VF with current link status */ | |
1243 | mbx.link_status.link_up = link.link_up; | |
1244 | mbx.link_status.duplex = link.duplex; | |
1245 | mbx.link_status.speed = link.speed; | |
1246 | nic_send_msg_to_vf(nic, vf, &mbx); | |
1247 | } | |
1248 | } | |
1249 | queue_delayed_work(nic->check_link, &nic->dwork, HZ * 2); | |
1250 | } | |
1251 | ||
1252 | static int nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |
1253 | { | |
1254 | struct device *dev = &pdev->dev; | |
1255 | struct nicpf *nic; | |
1256 | int err; | |
1257 | ||
1258 | BUILD_BUG_ON(sizeof(union nic_mbx) > 16); | |
1259 | ||
1260 | nic = devm_kzalloc(dev, sizeof(*nic), GFP_KERNEL); | |
1261 | if (!nic) | |
1262 | return -ENOMEM; | |
1263 | ||
a5c3d498 SG |
1264 | nic->hw = devm_kzalloc(dev, sizeof(struct hw_info), GFP_KERNEL); |
1265 | if (!nic->hw) { | |
1266 | devm_kfree(dev, nic); | |
1267 | return -ENOMEM; | |
1268 | } | |
1269 | ||
4863dea3 SG |
1270 | pci_set_drvdata(pdev, nic); |
1271 | ||
1272 | nic->pdev = pdev; | |
1273 | ||
1274 | err = pci_enable_device(pdev); | |
1275 | if (err) { | |
1276 | dev_err(dev, "Failed to enable PCI device\n"); | |
1277 | pci_set_drvdata(pdev, NULL); | |
1278 | return err; | |
1279 | } | |
1280 | ||
1281 | err = pci_request_regions(pdev, DRV_NAME); | |
1282 | if (err) { | |
1283 | dev_err(dev, "PCI request regions failed 0x%x\n", err); | |
1284 | goto err_disable_device; | |
1285 | } | |
1286 | ||
1287 | err = pci_set_dma_mask(pdev, DMA_BIT_MASK(48)); | |
1288 | if (err) { | |
1289 | dev_err(dev, "Unable to get usable DMA configuration\n"); | |
1290 | goto err_release_regions; | |
1291 | } | |
1292 | ||
1293 | err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(48)); | |
1294 | if (err) { | |
1295 | dev_err(dev, "Unable to get 48-bit DMA for consistent allocations\n"); | |
1296 | goto err_release_regions; | |
1297 | } | |
1298 | ||
1299 | /* MAP PF's configuration registers */ | |
1300 | nic->reg_base = pcim_iomap(pdev, PCI_CFG_REG_BAR_NUM, 0); | |
1301 | if (!nic->reg_base) { | |
1302 | dev_err(dev, "Cannot map config register space, aborting\n"); | |
1303 | err = -ENOMEM; | |
1304 | goto err_release_regions; | |
1305 | } | |
1306 | ||
d768b678 | 1307 | nic->node = nic_get_node_id(pdev); |
4863dea3 | 1308 | |
4863dea3 | 1309 | /* Initialize hardware */ |
949b5331 SG |
1310 | err = nic_init_hw(nic); |
1311 | if (err) | |
1312 | goto err_release_regions; | |
4863dea3 | 1313 | |
a5c3d498 | 1314 | nic_set_lmac_vf_mapping(nic); |
4863dea3 SG |
1315 | |
1316 | /* Register interrupts */ | |
1317 | err = nic_register_interrupts(nic); | |
1318 | if (err) | |
1319 | goto err_release_regions; | |
1320 | ||
1321 | /* Configure SRIOV */ | |
1322 | err = nic_sriov_init(pdev, nic); | |
1323 | if (err) | |
1324 | goto err_unregister_interrupts; | |
1325 | ||
1326 | /* Register a physical link status poll fn() */ | |
1327 | nic->check_link = alloc_workqueue("check_link_status", | |
1328 | WQ_UNBOUND | WQ_MEM_RECLAIM, 1); | |
1329 | if (!nic->check_link) { | |
1330 | err = -ENOMEM; | |
1331 | goto err_disable_sriov; | |
1332 | } | |
1333 | ||
1334 | INIT_DELAYED_WORK(&nic->dwork, nic_poll_for_link); | |
1335 | queue_delayed_work(nic->check_link, &nic->dwork, 0); | |
1336 | ||
1337 | return 0; | |
1338 | ||
1339 | err_disable_sriov: | |
1340 | if (nic->flags & NIC_SRIOV_ENABLED) | |
1341 | pci_disable_sriov(pdev); | |
1342 | err_unregister_interrupts: | |
1343 | nic_unregister_interrupts(nic); | |
1344 | err_release_regions: | |
1345 | pci_release_regions(pdev); | |
1346 | err_disable_device: | |
949b5331 | 1347 | nic_free_lmacmem(nic); |
a5c3d498 SG |
1348 | devm_kfree(dev, nic->hw); |
1349 | devm_kfree(dev, nic); | |
4863dea3 SG |
1350 | pci_disable_device(pdev); |
1351 | pci_set_drvdata(pdev, NULL); | |
1352 | return err; | |
1353 | } | |
1354 | ||
1355 | static void nic_remove(struct pci_dev *pdev) | |
1356 | { | |
1357 | struct nicpf *nic = pci_get_drvdata(pdev); | |
1358 | ||
1359 | if (nic->flags & NIC_SRIOV_ENABLED) | |
1360 | pci_disable_sriov(pdev); | |
1361 | ||
1362 | if (nic->check_link) { | |
1363 | /* Destroy work Queue */ | |
a7b1f535 | 1364 | cancel_delayed_work_sync(&nic->dwork); |
4863dea3 SG |
1365 | destroy_workqueue(nic->check_link); |
1366 | } | |
1367 | ||
1368 | nic_unregister_interrupts(nic); | |
1369 | pci_release_regions(pdev); | |
a5c3d498 | 1370 | |
949b5331 | 1371 | nic_free_lmacmem(nic); |
a5c3d498 SG |
1372 | devm_kfree(&pdev->dev, nic->hw); |
1373 | devm_kfree(&pdev->dev, nic); | |
1374 | ||
4863dea3 SG |
1375 | pci_disable_device(pdev); |
1376 | pci_set_drvdata(pdev, NULL); | |
1377 | } | |
1378 | ||
1379 | static struct pci_driver nic_driver = { | |
1380 | .name = DRV_NAME, | |
1381 | .id_table = nic_id_table, | |
1382 | .probe = nic_probe, | |
1383 | .remove = nic_remove, | |
1384 | }; | |
1385 | ||
1386 | static int __init nic_init_module(void) | |
1387 | { | |
1388 | pr_info("%s, ver %s\n", DRV_NAME, DRV_VERSION); | |
1389 | ||
1390 | return pci_register_driver(&nic_driver); | |
1391 | } | |
1392 | ||
1393 | static void __exit nic_cleanup_module(void) | |
1394 | { | |
1395 | pci_unregister_driver(&nic_driver); | |
1396 | } | |
1397 | ||
1398 | module_init(nic_init_module); | |
1399 | module_exit(nic_cleanup_module); |