Commit | Line | Data |
---|---|---|
4863dea3 SG |
1 | /* |
2 | * Copyright (C) 2015 Cavium, Inc. | |
3 | * | |
4 | * This program is free software; you can redistribute it and/or modify it | |
5 | * under the terms of version 2 of the GNU General Public License | |
6 | * as published by the Free Software Foundation. | |
7 | */ | |
8 | ||
9 | #include <linux/module.h> | |
10 | #include <linux/interrupt.h> | |
11 | #include <linux/pci.h> | |
12 | #include <linux/netdevice.h> | |
aa2e259b | 13 | #include <linux/if_vlan.h> |
4863dea3 SG |
14 | #include <linux/etherdevice.h> |
15 | #include <linux/ethtool.h> | |
16 | #include <linux/log2.h> | |
17 | #include <linux/prefetch.h> | |
18 | #include <linux/irq.h> | |
19 | ||
20 | #include "nic_reg.h" | |
21 | #include "nic.h" | |
22 | #include "nicvf_queues.h" | |
23 | #include "thunder_bgx.h" | |
24 | ||
25 | #define DRV_NAME "thunder-nicvf" | |
26 | #define DRV_VERSION "1.0" | |
27 | ||
28 | /* Supported devices */ | |
29 | static const struct pci_device_id nicvf_id_table[] = { | |
30 | { PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM, | |
31 | PCI_DEVICE_ID_THUNDER_NIC_VF, | |
f7ff0ae8 SG |
32 | PCI_VENDOR_ID_CAVIUM, |
33 | PCI_SUBSYS_DEVID_88XX_NIC_VF) }, | |
4863dea3 SG |
34 | { PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM, |
35 | PCI_DEVICE_ID_THUNDER_PASS1_NIC_VF, | |
f7ff0ae8 SG |
36 | PCI_VENDOR_ID_CAVIUM, |
37 | PCI_SUBSYS_DEVID_88XX_PASS1_NIC_VF) }, | |
38 | { PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM, | |
39 | PCI_DEVICE_ID_THUNDER_NIC_VF, | |
40 | PCI_VENDOR_ID_CAVIUM, | |
41 | PCI_SUBSYS_DEVID_81XX_NIC_VF) }, | |
42 | { PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM, | |
43 | PCI_DEVICE_ID_THUNDER_NIC_VF, | |
44 | PCI_VENDOR_ID_CAVIUM, | |
45 | PCI_SUBSYS_DEVID_83XX_NIC_VF) }, | |
4863dea3 SG |
46 | { 0, } /* end of table */ |
47 | }; | |
48 | ||
49 | MODULE_AUTHOR("Sunil Goutham"); | |
50 | MODULE_DESCRIPTION("Cavium Thunder NIC Virtual Function Driver"); | |
51 | MODULE_LICENSE("GPL v2"); | |
52 | MODULE_VERSION(DRV_VERSION); | |
53 | MODULE_DEVICE_TABLE(pci, nicvf_id_table); | |
54 | ||
55 | static int debug = 0x00; | |
56 | module_param(debug, int, 0644); | |
57 | MODULE_PARM_DESC(debug, "Debug message level bitmap"); | |
58 | ||
59 | static int cpi_alg = CPI_ALG_NONE; | |
60 | module_param(cpi_alg, int, S_IRUGO); | |
61 | MODULE_PARM_DESC(cpi_alg, | |
62 | "PFC algorithm (0=none, 1=VLAN, 2=VLAN16, 3=IP Diffserv)"); | |
63 | ||
92dc8769 SG |
64 | static inline u8 nicvf_netdev_qidx(struct nicvf *nic, u8 qidx) |
65 | { | |
66 | if (nic->sqs_mode) | |
67 | return qidx + ((nic->sqs_id + 1) * MAX_CMP_QUEUES_PER_QS); | |
68 | else | |
69 | return qidx; | |
70 | } | |
71 | ||
4863dea3 SG |
72 | static inline void nicvf_set_rx_frame_cnt(struct nicvf *nic, |
73 | struct sk_buff *skb) | |
74 | { | |
75 | if (skb->len <= 64) | |
76 | nic->drv_stats.rx_frames_64++; | |
77 | else if (skb->len <= 127) | |
78 | nic->drv_stats.rx_frames_127++; | |
79 | else if (skb->len <= 255) | |
80 | nic->drv_stats.rx_frames_255++; | |
81 | else if (skb->len <= 511) | |
82 | nic->drv_stats.rx_frames_511++; | |
83 | else if (skb->len <= 1023) | |
84 | nic->drv_stats.rx_frames_1023++; | |
85 | else if (skb->len <= 1518) | |
86 | nic->drv_stats.rx_frames_1518++; | |
87 | else | |
88 | nic->drv_stats.rx_frames_jumbo++; | |
89 | } | |
90 | ||
91 | /* The Cavium ThunderX network controller can *only* be found in SoCs | |
92 | * containing the ThunderX ARM64 CPU implementation. All accesses to the device | |
93 | * registers on this platform are implicitly strongly ordered with respect | |
94 | * to memory accesses. So writeq_relaxed() and readq_relaxed() are safe to use | |
95 | * with no memory barriers in this driver. The readq()/writeq() functions add | |
96 | * explicit ordering operation which in this case are redundant, and only | |
97 | * add overhead. | |
98 | */ | |
99 | ||
100 | /* Register read/write APIs */ | |
101 | void nicvf_reg_write(struct nicvf *nic, u64 offset, u64 val) | |
102 | { | |
103 | writeq_relaxed(val, nic->reg_base + offset); | |
104 | } | |
105 | ||
106 | u64 nicvf_reg_read(struct nicvf *nic, u64 offset) | |
107 | { | |
108 | return readq_relaxed(nic->reg_base + offset); | |
109 | } | |
110 | ||
111 | void nicvf_queue_reg_write(struct nicvf *nic, u64 offset, | |
112 | u64 qidx, u64 val) | |
113 | { | |
114 | void __iomem *addr = nic->reg_base + offset; | |
115 | ||
116 | writeq_relaxed(val, addr + (qidx << NIC_Q_NUM_SHIFT)); | |
117 | } | |
118 | ||
119 | u64 nicvf_queue_reg_read(struct nicvf *nic, u64 offset, u64 qidx) | |
120 | { | |
121 | void __iomem *addr = nic->reg_base + offset; | |
122 | ||
123 | return readq_relaxed(addr + (qidx << NIC_Q_NUM_SHIFT)); | |
124 | } | |
125 | ||
126 | /* VF -> PF mailbox communication */ | |
2cd2a196 AM |
127 | static void nicvf_write_to_mbx(struct nicvf *nic, union nic_mbx *mbx) |
128 | { | |
129 | u64 *msg = (u64 *)mbx; | |
130 | ||
131 | nicvf_reg_write(nic, NIC_VF_PF_MAILBOX_0_1 + 0, msg[0]); | |
132 | nicvf_reg_write(nic, NIC_VF_PF_MAILBOX_0_1 + 8, msg[1]); | |
133 | } | |
134 | ||
4863dea3 SG |
135 | int nicvf_send_msg_to_pf(struct nicvf *nic, union nic_mbx *mbx) |
136 | { | |
137 | int timeout = NIC_MBOX_MSG_TIMEOUT; | |
138 | int sleep = 10; | |
4863dea3 SG |
139 | |
140 | nic->pf_acked = false; | |
141 | nic->pf_nacked = false; | |
142 | ||
2cd2a196 | 143 | nicvf_write_to_mbx(nic, mbx); |
4863dea3 SG |
144 | |
145 | /* Wait for previous message to be acked, timeout 2sec */ | |
146 | while (!nic->pf_acked) { | |
ecae29cb RB |
147 | if (nic->pf_nacked) { |
148 | netdev_err(nic->netdev, | |
149 | "PF NACK to mbox msg 0x%02x from VF%d\n", | |
150 | (mbx->msg.msg & 0xFF), nic->vf_id); | |
4863dea3 | 151 | return -EINVAL; |
ecae29cb | 152 | } |
4863dea3 SG |
153 | msleep(sleep); |
154 | if (nic->pf_acked) | |
155 | break; | |
156 | timeout -= sleep; | |
157 | if (!timeout) { | |
158 | netdev_err(nic->netdev, | |
ecae29cb | 159 | "PF didn't ACK to mbox msg 0x%02x from VF%d\n", |
4863dea3 SG |
160 | (mbx->msg.msg & 0xFF), nic->vf_id); |
161 | return -EBUSY; | |
162 | } | |
163 | } | |
164 | return 0; | |
165 | } | |
166 | ||
167 | /* Checks if VF is able to comminicate with PF | |
168 | * and also gets the VNIC number this VF is associated to. | |
169 | */ | |
170 | static int nicvf_check_pf_ready(struct nicvf *nic) | |
171 | { | |
2cd2a196 AM |
172 | union nic_mbx mbx = {}; |
173 | ||
174 | mbx.msg.msg = NIC_MBOX_MSG_READY; | |
6051cba7 SG |
175 | if (nicvf_send_msg_to_pf(nic, &mbx)) { |
176 | netdev_err(nic->netdev, | |
177 | "PF didn't respond to READY msg\n"); | |
178 | return 0; | |
4863dea3 | 179 | } |
6051cba7 | 180 | |
4863dea3 SG |
181 | return 1; |
182 | } | |
183 | ||
fd7ec062 AM |
184 | static void nicvf_read_bgx_stats(struct nicvf *nic, struct bgx_stats_msg *bgx) |
185 | { | |
186 | if (bgx->rx) | |
187 | nic->bgx_stats.rx_stats[bgx->idx] = bgx->stats; | |
188 | else | |
189 | nic->bgx_stats.tx_stats[bgx->idx] = bgx->stats; | |
190 | } | |
191 | ||
4863dea3 SG |
192 | static void nicvf_handle_mbx_intr(struct nicvf *nic) |
193 | { | |
194 | union nic_mbx mbx = {}; | |
195 | u64 *mbx_data; | |
196 | u64 mbx_addr; | |
197 | int i; | |
198 | ||
199 | mbx_addr = NIC_VF_PF_MAILBOX_0_1; | |
200 | mbx_data = (u64 *)&mbx; | |
201 | ||
202 | for (i = 0; i < NIC_PF_VF_MAILBOX_SIZE; i++) { | |
203 | *mbx_data = nicvf_reg_read(nic, mbx_addr); | |
204 | mbx_data++; | |
205 | mbx_addr += sizeof(u64); | |
206 | } | |
207 | ||
208 | netdev_dbg(nic->netdev, "Mbox message: msg: 0x%x\n", mbx.msg.msg); | |
209 | switch (mbx.msg.msg) { | |
210 | case NIC_MBOX_MSG_READY: | |
6051cba7 | 211 | nic->pf_acked = true; |
4863dea3 SG |
212 | nic->vf_id = mbx.nic_cfg.vf_id & 0x7F; |
213 | nic->tns_mode = mbx.nic_cfg.tns_mode & 0x7F; | |
214 | nic->node = mbx.nic_cfg.node_id; | |
bd049a90 PF |
215 | if (!nic->set_mac_pending) |
216 | ether_addr_copy(nic->netdev->dev_addr, | |
217 | mbx.nic_cfg.mac_addr); | |
92dc8769 | 218 | nic->sqs_mode = mbx.nic_cfg.sqs_mode; |
d77a2384 | 219 | nic->loopback_supported = mbx.nic_cfg.loopback_supported; |
4863dea3 SG |
220 | nic->link_up = false; |
221 | nic->duplex = 0; | |
222 | nic->speed = 0; | |
223 | break; | |
224 | case NIC_MBOX_MSG_ACK: | |
225 | nic->pf_acked = true; | |
226 | break; | |
227 | case NIC_MBOX_MSG_NACK: | |
228 | nic->pf_nacked = true; | |
229 | break; | |
230 | case NIC_MBOX_MSG_RSS_SIZE: | |
231 | nic->rss_info.rss_size = mbx.rss_size.ind_tbl_size; | |
232 | nic->pf_acked = true; | |
233 | break; | |
234 | case NIC_MBOX_MSG_BGX_STATS: | |
235 | nicvf_read_bgx_stats(nic, &mbx.bgx_stats); | |
236 | nic->pf_acked = true; | |
4863dea3 SG |
237 | break; |
238 | case NIC_MBOX_MSG_BGX_LINK_CHANGE: | |
239 | nic->pf_acked = true; | |
240 | nic->link_up = mbx.link_status.link_up; | |
241 | nic->duplex = mbx.link_status.duplex; | |
242 | nic->speed = mbx.link_status.speed; | |
243 | if (nic->link_up) { | |
244 | netdev_info(nic->netdev, "%s: Link is Up %d Mbps %s\n", | |
245 | nic->netdev->name, nic->speed, | |
246 | nic->duplex == DUPLEX_FULL ? | |
247 | "Full duplex" : "Half duplex"); | |
248 | netif_carrier_on(nic->netdev); | |
b49087dd | 249 | netif_tx_start_all_queues(nic->netdev); |
4863dea3 SG |
250 | } else { |
251 | netdev_info(nic->netdev, "%s: Link is Down\n", | |
252 | nic->netdev->name); | |
253 | netif_carrier_off(nic->netdev); | |
254 | netif_tx_stop_all_queues(nic->netdev); | |
255 | } | |
256 | break; | |
92dc8769 SG |
257 | case NIC_MBOX_MSG_ALLOC_SQS: |
258 | nic->sqs_count = mbx.sqs_alloc.qs_count; | |
259 | nic->pf_acked = true; | |
260 | break; | |
261 | case NIC_MBOX_MSG_SNICVF_PTR: | |
262 | /* Primary VF: make note of secondary VF's pointer | |
263 | * to be used while packet transmission. | |
264 | */ | |
265 | nic->snicvf[mbx.nicvf.sqs_id] = | |
266 | (struct nicvf *)mbx.nicvf.nicvf; | |
267 | nic->pf_acked = true; | |
268 | break; | |
269 | case NIC_MBOX_MSG_PNICVF_PTR: | |
270 | /* Secondary VF/Qset: make note of primary VF's pointer | |
271 | * to be used while packet reception, to handover packet | |
272 | * to primary VF's netdev. | |
273 | */ | |
274 | nic->pnicvf = (struct nicvf *)mbx.nicvf.nicvf; | |
275 | nic->pf_acked = true; | |
276 | break; | |
4863dea3 SG |
277 | default: |
278 | netdev_err(nic->netdev, | |
279 | "Invalid message from PF, msg 0x%x\n", mbx.msg.msg); | |
280 | break; | |
281 | } | |
282 | nicvf_clear_intr(nic, NICVF_INTR_MBOX, 0); | |
283 | } | |
284 | ||
285 | static int nicvf_hw_set_mac_addr(struct nicvf *nic, struct net_device *netdev) | |
286 | { | |
287 | union nic_mbx mbx = {}; | |
4863dea3 SG |
288 | |
289 | mbx.mac.msg = NIC_MBOX_MSG_SET_MAC; | |
290 | mbx.mac.vf_id = nic->vf_id; | |
e610cb32 | 291 | ether_addr_copy(mbx.mac.mac_addr, netdev->dev_addr); |
4863dea3 SG |
292 | |
293 | return nicvf_send_msg_to_pf(nic, &mbx); | |
294 | } | |
295 | ||
fd7ec062 | 296 | static void nicvf_config_cpi(struct nicvf *nic) |
4863dea3 SG |
297 | { |
298 | union nic_mbx mbx = {}; | |
299 | ||
300 | mbx.cpi_cfg.msg = NIC_MBOX_MSG_CPI_CFG; | |
301 | mbx.cpi_cfg.vf_id = nic->vf_id; | |
302 | mbx.cpi_cfg.cpi_alg = nic->cpi_alg; | |
303 | mbx.cpi_cfg.rq_cnt = nic->qs->rq_cnt; | |
304 | ||
305 | nicvf_send_msg_to_pf(nic, &mbx); | |
306 | } | |
307 | ||
fd7ec062 | 308 | static void nicvf_get_rss_size(struct nicvf *nic) |
4863dea3 SG |
309 | { |
310 | union nic_mbx mbx = {}; | |
311 | ||
312 | mbx.rss_size.msg = NIC_MBOX_MSG_RSS_SIZE; | |
313 | mbx.rss_size.vf_id = nic->vf_id; | |
314 | nicvf_send_msg_to_pf(nic, &mbx); | |
315 | } | |
316 | ||
317 | void nicvf_config_rss(struct nicvf *nic) | |
318 | { | |
319 | union nic_mbx mbx = {}; | |
320 | struct nicvf_rss_info *rss = &nic->rss_info; | |
321 | int ind_tbl_len = rss->rss_size; | |
322 | int i, nextq = 0; | |
323 | ||
324 | mbx.rss_cfg.vf_id = nic->vf_id; | |
325 | mbx.rss_cfg.hash_bits = rss->hash_bits; | |
326 | while (ind_tbl_len) { | |
327 | mbx.rss_cfg.tbl_offset = nextq; | |
328 | mbx.rss_cfg.tbl_len = min(ind_tbl_len, | |
329 | RSS_IND_TBL_LEN_PER_MBX_MSG); | |
330 | mbx.rss_cfg.msg = mbx.rss_cfg.tbl_offset ? | |
331 | NIC_MBOX_MSG_RSS_CFG_CONT : NIC_MBOX_MSG_RSS_CFG; | |
332 | ||
333 | for (i = 0; i < mbx.rss_cfg.tbl_len; i++) | |
334 | mbx.rss_cfg.ind_tbl[i] = rss->ind_tbl[nextq++]; | |
335 | ||
336 | nicvf_send_msg_to_pf(nic, &mbx); | |
337 | ||
338 | ind_tbl_len -= mbx.rss_cfg.tbl_len; | |
339 | } | |
340 | } | |
341 | ||
342 | void nicvf_set_rss_key(struct nicvf *nic) | |
343 | { | |
344 | struct nicvf_rss_info *rss = &nic->rss_info; | |
345 | u64 key_addr = NIC_VNIC_RSS_KEY_0_4; | |
346 | int idx; | |
347 | ||
348 | for (idx = 0; idx < RSS_HASH_KEY_SIZE; idx++) { | |
349 | nicvf_reg_write(nic, key_addr, rss->key[idx]); | |
350 | key_addr += sizeof(u64); | |
351 | } | |
352 | } | |
353 | ||
354 | static int nicvf_rss_init(struct nicvf *nic) | |
355 | { | |
356 | struct nicvf_rss_info *rss = &nic->rss_info; | |
357 | int idx; | |
358 | ||
359 | nicvf_get_rss_size(nic); | |
360 | ||
38bb5d4f | 361 | if (cpi_alg != CPI_ALG_NONE) { |
4863dea3 SG |
362 | rss->enable = false; |
363 | rss->hash_bits = 0; | |
364 | return 0; | |
365 | } | |
366 | ||
367 | rss->enable = true; | |
368 | ||
0052c92f | 369 | netdev_rss_key_fill(rss->key, RSS_HASH_KEY_SIZE * sizeof(u64)); |
4863dea3 SG |
370 | nicvf_set_rss_key(nic); |
371 | ||
372 | rss->cfg = RSS_IP_HASH_ENA | RSS_TCP_HASH_ENA | RSS_UDP_HASH_ENA; | |
373 | nicvf_reg_write(nic, NIC_VNIC_RSS_CFG, rss->cfg); | |
374 | ||
375 | rss->hash_bits = ilog2(rounddown_pow_of_two(rss->rss_size)); | |
376 | ||
377 | for (idx = 0; idx < rss->rss_size; idx++) | |
378 | rss->ind_tbl[idx] = ethtool_rxfh_indir_default(idx, | |
92dc8769 | 379 | nic->rx_queues); |
4863dea3 SG |
380 | nicvf_config_rss(nic); |
381 | return 1; | |
382 | } | |
383 | ||
92dc8769 SG |
384 | /* Request PF to allocate additional Qsets */ |
385 | static void nicvf_request_sqs(struct nicvf *nic) | |
386 | { | |
387 | union nic_mbx mbx = {}; | |
388 | int sqs; | |
389 | int sqs_count = nic->sqs_count; | |
390 | int rx_queues = 0, tx_queues = 0; | |
391 | ||
392 | /* Only primary VF should request */ | |
393 | if (nic->sqs_mode || !nic->sqs_count) | |
394 | return; | |
395 | ||
396 | mbx.sqs_alloc.msg = NIC_MBOX_MSG_ALLOC_SQS; | |
397 | mbx.sqs_alloc.vf_id = nic->vf_id; | |
398 | mbx.sqs_alloc.qs_count = nic->sqs_count; | |
399 | if (nicvf_send_msg_to_pf(nic, &mbx)) { | |
400 | /* No response from PF */ | |
401 | nic->sqs_count = 0; | |
402 | return; | |
403 | } | |
404 | ||
405 | /* Return if no Secondary Qsets available */ | |
406 | if (!nic->sqs_count) | |
407 | return; | |
408 | ||
409 | if (nic->rx_queues > MAX_RCV_QUEUES_PER_QS) | |
410 | rx_queues = nic->rx_queues - MAX_RCV_QUEUES_PER_QS; | |
411 | if (nic->tx_queues > MAX_SND_QUEUES_PER_QS) | |
412 | tx_queues = nic->tx_queues - MAX_SND_QUEUES_PER_QS; | |
413 | ||
414 | /* Set no of Rx/Tx queues in each of the SQsets */ | |
415 | for (sqs = 0; sqs < nic->sqs_count; sqs++) { | |
416 | mbx.nicvf.msg = NIC_MBOX_MSG_SNICVF_PTR; | |
417 | mbx.nicvf.vf_id = nic->vf_id; | |
418 | mbx.nicvf.sqs_id = sqs; | |
419 | nicvf_send_msg_to_pf(nic, &mbx); | |
420 | ||
421 | nic->snicvf[sqs]->sqs_id = sqs; | |
422 | if (rx_queues > MAX_RCV_QUEUES_PER_QS) { | |
423 | nic->snicvf[sqs]->qs->rq_cnt = MAX_RCV_QUEUES_PER_QS; | |
424 | rx_queues -= MAX_RCV_QUEUES_PER_QS; | |
425 | } else { | |
426 | nic->snicvf[sqs]->qs->rq_cnt = rx_queues; | |
427 | rx_queues = 0; | |
428 | } | |
429 | ||
430 | if (tx_queues > MAX_SND_QUEUES_PER_QS) { | |
431 | nic->snicvf[sqs]->qs->sq_cnt = MAX_SND_QUEUES_PER_QS; | |
432 | tx_queues -= MAX_SND_QUEUES_PER_QS; | |
433 | } else { | |
434 | nic->snicvf[sqs]->qs->sq_cnt = tx_queues; | |
435 | tx_queues = 0; | |
436 | } | |
437 | ||
438 | nic->snicvf[sqs]->qs->cq_cnt = | |
439 | max(nic->snicvf[sqs]->qs->rq_cnt, nic->snicvf[sqs]->qs->sq_cnt); | |
440 | ||
441 | /* Initialize secondary Qset's queues and its interrupts */ | |
442 | nicvf_open(nic->snicvf[sqs]->netdev); | |
443 | } | |
444 | ||
445 | /* Update stack with actual Rx/Tx queue count allocated */ | |
446 | if (sqs_count != nic->sqs_count) | |
447 | nicvf_set_real_num_queues(nic->netdev, | |
448 | nic->tx_queues, nic->rx_queues); | |
449 | } | |
450 | ||
451 | /* Send this Qset's nicvf pointer to PF. | |
452 | * PF inturn sends primary VF's nicvf struct to secondary Qsets/VFs | |
453 | * so that packets received by these Qsets can use primary VF's netdev | |
454 | */ | |
455 | static void nicvf_send_vf_struct(struct nicvf *nic) | |
456 | { | |
457 | union nic_mbx mbx = {}; | |
458 | ||
459 | mbx.nicvf.msg = NIC_MBOX_MSG_NICVF_PTR; | |
460 | mbx.nicvf.sqs_mode = nic->sqs_mode; | |
461 | mbx.nicvf.nicvf = (u64)nic; | |
462 | nicvf_send_msg_to_pf(nic, &mbx); | |
463 | } | |
464 | ||
465 | static void nicvf_get_primary_vf_struct(struct nicvf *nic) | |
466 | { | |
467 | union nic_mbx mbx = {}; | |
468 | ||
469 | mbx.nicvf.msg = NIC_MBOX_MSG_PNICVF_PTR; | |
470 | nicvf_send_msg_to_pf(nic, &mbx); | |
471 | } | |
472 | ||
4863dea3 SG |
473 | int nicvf_set_real_num_queues(struct net_device *netdev, |
474 | int tx_queues, int rx_queues) | |
475 | { | |
476 | int err = 0; | |
477 | ||
478 | err = netif_set_real_num_tx_queues(netdev, tx_queues); | |
479 | if (err) { | |
480 | netdev_err(netdev, | |
481 | "Failed to set no of Tx queues: %d\n", tx_queues); | |
482 | return err; | |
483 | } | |
484 | ||
485 | err = netif_set_real_num_rx_queues(netdev, rx_queues); | |
486 | if (err) | |
487 | netdev_err(netdev, | |
488 | "Failed to set no of Rx queues: %d\n", rx_queues); | |
489 | return err; | |
490 | } | |
491 | ||
492 | static int nicvf_init_resources(struct nicvf *nic) | |
493 | { | |
494 | int err; | |
2cd2a196 AM |
495 | union nic_mbx mbx = {}; |
496 | ||
497 | mbx.msg.msg = NIC_MBOX_MSG_CFG_DONE; | |
4863dea3 SG |
498 | |
499 | /* Enable Qset */ | |
500 | nicvf_qset_config(nic, true); | |
501 | ||
502 | /* Initialize queues and HW for data transfer */ | |
503 | err = nicvf_config_data_transfer(nic, true); | |
504 | if (err) { | |
505 | netdev_err(nic->netdev, | |
506 | "Failed to alloc/config VF's QSet resources\n"); | |
507 | return err; | |
508 | } | |
509 | ||
510 | /* Send VF config done msg to PF */ | |
2cd2a196 | 511 | nicvf_write_to_mbx(nic, &mbx); |
4863dea3 SG |
512 | |
513 | return 0; | |
514 | } | |
515 | ||
516 | static void nicvf_snd_pkt_handler(struct net_device *netdev, | |
517 | struct cmp_queue *cq, | |
c43548d2 SG |
518 | struct cqe_send_t *cqe_tx, |
519 | int cqe_type, int budget) | |
4863dea3 SG |
520 | { |
521 | struct sk_buff *skb = NULL; | |
522 | struct nicvf *nic = netdev_priv(netdev); | |
523 | struct snd_queue *sq; | |
524 | struct sq_hdr_subdesc *hdr; | |
7ceb8a13 | 525 | struct sq_hdr_subdesc *tso_sqe; |
4863dea3 SG |
526 | |
527 | sq = &nic->qs->sq[cqe_tx->sq_idx]; | |
528 | ||
529 | hdr = (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, cqe_tx->sqe_ptr); | |
530 | if (hdr->subdesc_type != SQ_DESC_TYPE_HEADER) | |
531 | return; | |
532 | ||
533 | netdev_dbg(nic->netdev, | |
534 | "%s Qset #%d SQ #%d SQ ptr #%d subdesc count %d\n", | |
535 | __func__, cqe_tx->sq_qs, cqe_tx->sq_idx, | |
536 | cqe_tx->sqe_ptr, hdr->subdesc_cnt); | |
537 | ||
4863dea3 SG |
538 | nicvf_check_cqe_tx_errs(nic, cq, cqe_tx); |
539 | skb = (struct sk_buff *)sq->skbuff[cqe_tx->sqe_ptr]; | |
4863dea3 | 540 | if (skb) { |
7ceb8a13 SG |
541 | /* Check for dummy descriptor used for HW TSO offload on 88xx */ |
542 | if (hdr->dont_send) { | |
543 | /* Get actual TSO descriptors and free them */ | |
544 | tso_sqe = | |
545 | (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, hdr->rsvd2); | |
546 | nicvf_put_sq_desc(sq, tso_sqe->subdesc_cnt + 1); | |
547 | } | |
40fb5f8a | 548 | nicvf_put_sq_desc(sq, hdr->subdesc_cnt + 1); |
4863dea3 | 549 | prefetch(skb); |
c43548d2 | 550 | napi_consume_skb(skb, budget); |
143ceb0b | 551 | sq->skbuff[cqe_tx->sqe_ptr] = (u64)NULL; |
40fb5f8a | 552 | } else { |
7ceb8a13 SG |
553 | /* In case of SW TSO on 88xx, only last segment will have |
554 | * a SKB attached, so just free SQEs here. | |
40fb5f8a SG |
555 | */ |
556 | if (!nic->hw_tso) | |
557 | nicvf_put_sq_desc(sq, hdr->subdesc_cnt + 1); | |
4863dea3 SG |
558 | } |
559 | } | |
560 | ||
38bb5d4f SG |
561 | static inline void nicvf_set_rxhash(struct net_device *netdev, |
562 | struct cqe_rx_t *cqe_rx, | |
563 | struct sk_buff *skb) | |
564 | { | |
565 | u8 hash_type; | |
566 | u32 hash; | |
567 | ||
568 | if (!(netdev->features & NETIF_F_RXHASH)) | |
569 | return; | |
570 | ||
571 | switch (cqe_rx->rss_alg) { | |
572 | case RSS_ALG_TCP_IP: | |
573 | case RSS_ALG_UDP_IP: | |
574 | hash_type = PKT_HASH_TYPE_L4; | |
575 | hash = cqe_rx->rss_tag; | |
576 | break; | |
577 | case RSS_ALG_IP: | |
578 | hash_type = PKT_HASH_TYPE_L3; | |
579 | hash = cqe_rx->rss_tag; | |
580 | break; | |
581 | default: | |
582 | hash_type = PKT_HASH_TYPE_NONE; | |
583 | hash = 0; | |
584 | } | |
585 | ||
586 | skb_set_hash(skb, hash, hash_type); | |
587 | } | |
588 | ||
4863dea3 SG |
589 | static void nicvf_rcv_pkt_handler(struct net_device *netdev, |
590 | struct napi_struct *napi, | |
ad2ecebd | 591 | struct cqe_rx_t *cqe_rx) |
4863dea3 SG |
592 | { |
593 | struct sk_buff *skb; | |
594 | struct nicvf *nic = netdev_priv(netdev); | |
595 | int err = 0; | |
92dc8769 SG |
596 | int rq_idx; |
597 | ||
598 | rq_idx = nicvf_netdev_qidx(nic, cqe_rx->rq_idx); | |
599 | ||
600 | if (nic->sqs_mode) { | |
601 | /* Use primary VF's 'nicvf' struct */ | |
602 | nic = nic->pnicvf; | |
603 | netdev = nic->netdev; | |
604 | } | |
4863dea3 SG |
605 | |
606 | /* Check for errors */ | |
ad2ecebd | 607 | err = nicvf_check_cqe_rx_errs(nic, cqe_rx); |
4863dea3 SG |
608 | if (err && !cqe_rx->rb_cnt) |
609 | return; | |
610 | ||
611 | skb = nicvf_get_rcv_skb(nic, cqe_rx); | |
612 | if (!skb) { | |
613 | netdev_dbg(nic->netdev, "Packet not received\n"); | |
614 | return; | |
615 | } | |
616 | ||
617 | if (netif_msg_pktdata(nic)) { | |
618 | netdev_info(nic->netdev, "%s: skb 0x%p, len=%d\n", netdev->name, | |
619 | skb, skb->len); | |
620 | print_hex_dump(KERN_INFO, "", DUMP_PREFIX_OFFSET, 16, 1, | |
621 | skb->data, skb->len, true); | |
622 | } | |
623 | ||
a2dc5ded SG |
624 | /* If error packet, drop it here */ |
625 | if (err) { | |
626 | dev_kfree_skb_any(skb); | |
627 | return; | |
628 | } | |
629 | ||
4863dea3 SG |
630 | nicvf_set_rx_frame_cnt(nic, skb); |
631 | ||
38bb5d4f SG |
632 | nicvf_set_rxhash(netdev, cqe_rx, skb); |
633 | ||
92dc8769 | 634 | skb_record_rx_queue(skb, rq_idx); |
4863dea3 SG |
635 | if (netdev->hw_features & NETIF_F_RXCSUM) { |
636 | /* HW by default verifies TCP/UDP/SCTP checksums */ | |
637 | skb->ip_summed = CHECKSUM_UNNECESSARY; | |
638 | } else { | |
639 | skb_checksum_none_assert(skb); | |
640 | } | |
641 | ||
642 | skb->protocol = eth_type_trans(skb, netdev); | |
643 | ||
aa2e259b SG |
644 | /* Check for stripped VLAN */ |
645 | if (cqe_rx->vlan_found && cqe_rx->vlan_stripped) | |
646 | __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), | |
647 | ntohs((__force __be16)cqe_rx->vlan_tci)); | |
648 | ||
4863dea3 SG |
649 | if (napi && (netdev->features & NETIF_F_GRO)) |
650 | napi_gro_receive(napi, skb); | |
651 | else | |
652 | netif_receive_skb(skb); | |
653 | } | |
654 | ||
655 | static int nicvf_cq_intr_handler(struct net_device *netdev, u8 cq_idx, | |
656 | struct napi_struct *napi, int budget) | |
657 | { | |
74840b83 | 658 | int processed_cqe, work_done = 0, tx_done = 0; |
4863dea3 SG |
659 | int cqe_count, cqe_head; |
660 | struct nicvf *nic = netdev_priv(netdev); | |
661 | struct queue_set *qs = nic->qs; | |
662 | struct cmp_queue *cq = &qs->cq[cq_idx]; | |
663 | struct cqe_rx_t *cq_desc; | |
74840b83 | 664 | struct netdev_queue *txq; |
4863dea3 SG |
665 | |
666 | spin_lock_bh(&cq->lock); | |
667 | loop: | |
668 | processed_cqe = 0; | |
669 | /* Get no of valid CQ entries to process */ | |
670 | cqe_count = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_STATUS, cq_idx); | |
671 | cqe_count &= CQ_CQE_COUNT; | |
672 | if (!cqe_count) | |
673 | goto done; | |
674 | ||
675 | /* Get head of the valid CQ entries */ | |
676 | cqe_head = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_HEAD, cq_idx) >> 9; | |
677 | cqe_head &= 0xFFFF; | |
678 | ||
74840b83 SG |
679 | netdev_dbg(nic->netdev, "%s CQ%d cqe_count %d cqe_head %d\n", |
680 | __func__, cq_idx, cqe_count, cqe_head); | |
4863dea3 SG |
681 | while (processed_cqe < cqe_count) { |
682 | /* Get the CQ descriptor */ | |
683 | cq_desc = (struct cqe_rx_t *)GET_CQ_DESC(cq, cqe_head); | |
684 | cqe_head++; | |
685 | cqe_head &= (cq->dmem.q_len - 1); | |
686 | /* Initiate prefetch for next descriptor */ | |
687 | prefetch((struct cqe_rx_t *)GET_CQ_DESC(cq, cqe_head)); | |
688 | ||
689 | if ((work_done >= budget) && napi && | |
690 | (cq_desc->cqe_type != CQE_TYPE_SEND)) { | |
691 | break; | |
692 | } | |
693 | ||
74840b83 SG |
694 | netdev_dbg(nic->netdev, "CQ%d cq_desc->cqe_type %d\n", |
695 | cq_idx, cq_desc->cqe_type); | |
4863dea3 SG |
696 | switch (cq_desc->cqe_type) { |
697 | case CQE_TYPE_RX: | |
ad2ecebd | 698 | nicvf_rcv_pkt_handler(netdev, napi, cq_desc); |
4863dea3 SG |
699 | work_done++; |
700 | break; | |
701 | case CQE_TYPE_SEND: | |
702 | nicvf_snd_pkt_handler(netdev, cq, | |
c43548d2 SG |
703 | (void *)cq_desc, CQE_TYPE_SEND, |
704 | budget); | |
74840b83 | 705 | tx_done++; |
4863dea3 SG |
706 | break; |
707 | case CQE_TYPE_INVALID: | |
708 | case CQE_TYPE_RX_SPLIT: | |
709 | case CQE_TYPE_RX_TCP: | |
710 | case CQE_TYPE_SEND_PTP: | |
711 | /* Ignore for now */ | |
712 | break; | |
713 | } | |
714 | processed_cqe++; | |
715 | } | |
74840b83 SG |
716 | netdev_dbg(nic->netdev, |
717 | "%s CQ%d processed_cqe %d work_done %d budget %d\n", | |
718 | __func__, cq_idx, processed_cqe, work_done, budget); | |
4863dea3 SG |
719 | |
720 | /* Ring doorbell to inform H/W to reuse processed CQEs */ | |
721 | nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_DOOR, | |
722 | cq_idx, processed_cqe); | |
723 | ||
724 | if ((work_done < budget) && napi) | |
725 | goto loop; | |
726 | ||
727 | done: | |
74840b83 SG |
728 | /* Wakeup TXQ if its stopped earlier due to SQ full */ |
729 | if (tx_done) { | |
92dc8769 SG |
730 | netdev = nic->pnicvf->netdev; |
731 | txq = netdev_get_tx_queue(netdev, | |
732 | nicvf_netdev_qidx(nic, cq_idx)); | |
733 | nic = nic->pnicvf; | |
734 | if (netif_tx_queue_stopped(txq) && netif_carrier_ok(netdev)) { | |
b49087dd | 735 | netif_tx_start_queue(txq); |
74840b83 SG |
736 | nic->drv_stats.txq_wake++; |
737 | if (netif_msg_tx_err(nic)) | |
738 | netdev_warn(netdev, | |
739 | "%s: Transmit queue wakeup SQ%d\n", | |
740 | netdev->name, cq_idx); | |
741 | } | |
742 | } | |
743 | ||
4863dea3 SG |
744 | spin_unlock_bh(&cq->lock); |
745 | return work_done; | |
746 | } | |
747 | ||
748 | static int nicvf_poll(struct napi_struct *napi, int budget) | |
749 | { | |
750 | u64 cq_head; | |
751 | int work_done = 0; | |
752 | struct net_device *netdev = napi->dev; | |
753 | struct nicvf *nic = netdev_priv(netdev); | |
754 | struct nicvf_cq_poll *cq; | |
4863dea3 SG |
755 | |
756 | cq = container_of(napi, struct nicvf_cq_poll, napi); | |
757 | work_done = nicvf_cq_intr_handler(netdev, cq->cq_idx, napi, budget); | |
758 | ||
4863dea3 SG |
759 | if (work_done < budget) { |
760 | /* Slow packet rate, exit polling */ | |
761 | napi_complete(napi); | |
762 | /* Re-enable interrupts */ | |
763 | cq_head = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_HEAD, | |
764 | cq->cq_idx); | |
765 | nicvf_clear_intr(nic, NICVF_INTR_CQ, cq->cq_idx); | |
766 | nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_HEAD, | |
767 | cq->cq_idx, cq_head); | |
768 | nicvf_enable_intr(nic, NICVF_INTR_CQ, cq->cq_idx); | |
769 | } | |
770 | return work_done; | |
771 | } | |
772 | ||
773 | /* Qset error interrupt handler | |
774 | * | |
775 | * As of now only CQ errors are handled | |
776 | */ | |
fd7ec062 | 777 | static void nicvf_handle_qs_err(unsigned long data) |
4863dea3 SG |
778 | { |
779 | struct nicvf *nic = (struct nicvf *)data; | |
780 | struct queue_set *qs = nic->qs; | |
781 | int qidx; | |
782 | u64 status; | |
783 | ||
784 | netif_tx_disable(nic->netdev); | |
785 | ||
786 | /* Check if it is CQ err */ | |
787 | for (qidx = 0; qidx < qs->cq_cnt; qidx++) { | |
788 | status = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_STATUS, | |
789 | qidx); | |
790 | if (!(status & CQ_ERR_MASK)) | |
791 | continue; | |
792 | /* Process already queued CQEs and reconfig CQ */ | |
793 | nicvf_disable_intr(nic, NICVF_INTR_CQ, qidx); | |
794 | nicvf_sq_disable(nic, qidx); | |
795 | nicvf_cq_intr_handler(nic->netdev, qidx, NULL, 0); | |
796 | nicvf_cmp_queue_config(nic, qs, qidx, true); | |
797 | nicvf_sq_free_used_descs(nic->netdev, &qs->sq[qidx], qidx); | |
798 | nicvf_sq_enable(nic, &qs->sq[qidx], qidx); | |
799 | ||
800 | nicvf_enable_intr(nic, NICVF_INTR_CQ, qidx); | |
801 | } | |
802 | ||
803 | netif_tx_start_all_queues(nic->netdev); | |
804 | /* Re-enable Qset error interrupt */ | |
805 | nicvf_enable_intr(nic, NICVF_INTR_QS_ERR, 0); | |
806 | } | |
807 | ||
39ad6eea SG |
808 | static void nicvf_dump_intr_status(struct nicvf *nic) |
809 | { | |
810 | if (netif_msg_intr(nic)) | |
811 | netdev_info(nic->netdev, "%s: interrupt status 0x%llx\n", | |
812 | nic->netdev->name, nicvf_reg_read(nic, NIC_VF_INT)); | |
813 | } | |
814 | ||
4863dea3 SG |
815 | static irqreturn_t nicvf_misc_intr_handler(int irq, void *nicvf_irq) |
816 | { | |
817 | struct nicvf *nic = (struct nicvf *)nicvf_irq; | |
818 | u64 intr; | |
819 | ||
39ad6eea SG |
820 | nicvf_dump_intr_status(nic); |
821 | ||
4863dea3 SG |
822 | intr = nicvf_reg_read(nic, NIC_VF_INT); |
823 | /* Check for spurious interrupt */ | |
824 | if (!(intr & NICVF_INTR_MBOX_MASK)) | |
825 | return IRQ_HANDLED; | |
826 | ||
827 | nicvf_handle_mbx_intr(nic); | |
828 | ||
829 | return IRQ_HANDLED; | |
830 | } | |
831 | ||
39ad6eea SG |
832 | static irqreturn_t nicvf_intr_handler(int irq, void *cq_irq) |
833 | { | |
834 | struct nicvf_cq_poll *cq_poll = (struct nicvf_cq_poll *)cq_irq; | |
835 | struct nicvf *nic = cq_poll->nicvf; | |
836 | int qidx = cq_poll->cq_idx; | |
837 | ||
838 | nicvf_dump_intr_status(nic); | |
839 | ||
840 | /* Disable interrupts */ | |
841 | nicvf_disable_intr(nic, NICVF_INTR_CQ, qidx); | |
842 | ||
843 | /* Schedule NAPI */ | |
ef0a4d86 | 844 | napi_schedule_irqoff(&cq_poll->napi); |
39ad6eea SG |
845 | |
846 | /* Clear interrupt */ | |
847 | nicvf_clear_intr(nic, NICVF_INTR_CQ, qidx); | |
848 | ||
849 | return IRQ_HANDLED; | |
850 | } | |
851 | ||
852 | static irqreturn_t nicvf_rbdr_intr_handler(int irq, void *nicvf_irq) | |
4863dea3 | 853 | { |
4863dea3 | 854 | struct nicvf *nic = (struct nicvf *)nicvf_irq; |
39ad6eea | 855 | u8 qidx; |
4863dea3 | 856 | |
4863dea3 | 857 | |
39ad6eea SG |
858 | nicvf_dump_intr_status(nic); |
859 | ||
860 | /* Disable RBDR interrupt and schedule softirq */ | |
861 | for (qidx = 0; qidx < nic->qs->rbdr_cnt; qidx++) { | |
862 | if (!nicvf_is_intr_enabled(nic, NICVF_INTR_RBDR, qidx)) | |
4863dea3 | 863 | continue; |
39ad6eea SG |
864 | nicvf_disable_intr(nic, NICVF_INTR_RBDR, qidx); |
865 | tasklet_hi_schedule(&nic->rbdr_task); | |
866 | /* Clear interrupt */ | |
867 | nicvf_clear_intr(nic, NICVF_INTR_RBDR, qidx); | |
868 | } | |
4863dea3 | 869 | |
39ad6eea SG |
870 | return IRQ_HANDLED; |
871 | } | |
4863dea3 | 872 | |
39ad6eea SG |
873 | static irqreturn_t nicvf_qs_err_intr_handler(int irq, void *nicvf_irq) |
874 | { | |
875 | struct nicvf *nic = (struct nicvf *)nicvf_irq; | |
4863dea3 | 876 | |
39ad6eea SG |
877 | nicvf_dump_intr_status(nic); |
878 | ||
879 | /* Disable Qset err interrupt and schedule softirq */ | |
880 | nicvf_disable_intr(nic, NICVF_INTR_QS_ERR, 0); | |
881 | tasklet_hi_schedule(&nic->qs_err_task); | |
882 | nicvf_clear_intr(nic, NICVF_INTR_QS_ERR, 0); | |
4863dea3 | 883 | |
4863dea3 SG |
884 | return IRQ_HANDLED; |
885 | } | |
886 | ||
887 | static int nicvf_enable_msix(struct nicvf *nic) | |
888 | { | |
889 | int ret, vec; | |
890 | ||
891 | nic->num_vec = NIC_VF_MSIX_VECTORS; | |
892 | ||
893 | for (vec = 0; vec < nic->num_vec; vec++) | |
894 | nic->msix_entries[vec].entry = vec; | |
895 | ||
896 | ret = pci_enable_msix(nic->pdev, nic->msix_entries, nic->num_vec); | |
897 | if (ret) { | |
898 | netdev_err(nic->netdev, | |
899 | "Req for #%d msix vectors failed\n", nic->num_vec); | |
900 | return 0; | |
901 | } | |
902 | nic->msix_enabled = 1; | |
903 | return 1; | |
904 | } | |
905 | ||
906 | static void nicvf_disable_msix(struct nicvf *nic) | |
907 | { | |
908 | if (nic->msix_enabled) { | |
909 | pci_disable_msix(nic->pdev); | |
910 | nic->msix_enabled = 0; | |
911 | nic->num_vec = 0; | |
912 | } | |
913 | } | |
914 | ||
fb4b7d98 SG |
915 | static void nicvf_set_irq_affinity(struct nicvf *nic) |
916 | { | |
917 | int vec, cpu; | |
918 | int irqnum; | |
919 | ||
920 | for (vec = 0; vec < nic->num_vec; vec++) { | |
921 | if (!nic->irq_allocated[vec]) | |
922 | continue; | |
923 | ||
924 | if (!zalloc_cpumask_var(&nic->affinity_mask[vec], GFP_KERNEL)) | |
925 | return; | |
926 | /* CQ interrupts */ | |
927 | if (vec < NICVF_INTR_ID_SQ) | |
928 | /* Leave CPU0 for RBDR and other interrupts */ | |
929 | cpu = nicvf_netdev_qidx(nic, vec) + 1; | |
930 | else | |
931 | cpu = 0; | |
932 | ||
933 | cpumask_set_cpu(cpumask_local_spread(cpu, nic->node), | |
934 | nic->affinity_mask[vec]); | |
935 | irqnum = nic->msix_entries[vec].vector; | |
936 | irq_set_affinity_hint(irqnum, nic->affinity_mask[vec]); | |
937 | } | |
938 | } | |
939 | ||
4863dea3 SG |
940 | static int nicvf_register_interrupts(struct nicvf *nic) |
941 | { | |
39ad6eea | 942 | int irq, ret = 0; |
4863dea3 SG |
943 | int vector; |
944 | ||
945 | for_each_cq_irq(irq) | |
e4126213 SG |
946 | sprintf(nic->irq_name[irq], "%s-rxtx-%d", |
947 | nic->pnicvf->netdev->name, | |
948 | nicvf_netdev_qidx(nic, irq)); | |
4863dea3 SG |
949 | |
950 | for_each_sq_irq(irq) | |
e4126213 SG |
951 | sprintf(nic->irq_name[irq], "%s-sq-%d", |
952 | nic->pnicvf->netdev->name, | |
953 | nicvf_netdev_qidx(nic, irq - NICVF_INTR_ID_SQ)); | |
4863dea3 SG |
954 | |
955 | for_each_rbdr_irq(irq) | |
e4126213 SG |
956 | sprintf(nic->irq_name[irq], "%s-rbdr-%d", |
957 | nic->pnicvf->netdev->name, | |
958 | nic->sqs_mode ? (nic->sqs_id + 1) : 0); | |
4863dea3 | 959 | |
39ad6eea SG |
960 | /* Register CQ interrupts */ |
961 | for (irq = 0; irq < nic->qs->cq_cnt; irq++) { | |
4863dea3 SG |
962 | vector = nic->msix_entries[irq].vector; |
963 | ret = request_irq(vector, nicvf_intr_handler, | |
39ad6eea | 964 | 0, nic->irq_name[irq], nic->napi[irq]); |
4863dea3 | 965 | if (ret) |
39ad6eea | 966 | goto err; |
4863dea3 SG |
967 | nic->irq_allocated[irq] = true; |
968 | } | |
969 | ||
39ad6eea SG |
970 | /* Register RBDR interrupt */ |
971 | for (irq = NICVF_INTR_ID_RBDR; | |
972 | irq < (NICVF_INTR_ID_RBDR + nic->qs->rbdr_cnt); irq++) { | |
4863dea3 | 973 | vector = nic->msix_entries[irq].vector; |
39ad6eea | 974 | ret = request_irq(vector, nicvf_rbdr_intr_handler, |
4863dea3 SG |
975 | 0, nic->irq_name[irq], nic); |
976 | if (ret) | |
39ad6eea | 977 | goto err; |
4863dea3 SG |
978 | nic->irq_allocated[irq] = true; |
979 | } | |
980 | ||
39ad6eea | 981 | /* Register QS error interrupt */ |
e4126213 SG |
982 | sprintf(nic->irq_name[NICVF_INTR_ID_QS_ERR], "%s-qset-err-%d", |
983 | nic->pnicvf->netdev->name, | |
984 | nic->sqs_mode ? (nic->sqs_id + 1) : 0); | |
39ad6eea SG |
985 | irq = NICVF_INTR_ID_QS_ERR; |
986 | ret = request_irq(nic->msix_entries[irq].vector, | |
987 | nicvf_qs_err_intr_handler, | |
988 | 0, nic->irq_name[irq], nic); | |
fb4b7d98 SG |
989 | if (ret) |
990 | goto err; | |
991 | ||
992 | nic->irq_allocated[irq] = true; | |
993 | ||
994 | /* Set IRQ affinities */ | |
995 | nicvf_set_irq_affinity(nic); | |
4863dea3 | 996 | |
39ad6eea SG |
997 | err: |
998 | if (ret) | |
999 | netdev_err(nic->netdev, "request_irq failed, vector %d\n", irq); | |
4863dea3 | 1000 | |
39ad6eea | 1001 | return ret; |
4863dea3 SG |
1002 | } |
1003 | ||
1004 | static void nicvf_unregister_interrupts(struct nicvf *nic) | |
1005 | { | |
1006 | int irq; | |
1007 | ||
1008 | /* Free registered interrupts */ | |
1009 | for (irq = 0; irq < nic->num_vec; irq++) { | |
39ad6eea SG |
1010 | if (!nic->irq_allocated[irq]) |
1011 | continue; | |
1012 | ||
fb4b7d98 SG |
1013 | irq_set_affinity_hint(nic->msix_entries[irq].vector, NULL); |
1014 | free_cpumask_var(nic->affinity_mask[irq]); | |
1015 | ||
39ad6eea SG |
1016 | if (irq < NICVF_INTR_ID_SQ) |
1017 | free_irq(nic->msix_entries[irq].vector, nic->napi[irq]); | |
1018 | else | |
4863dea3 | 1019 | free_irq(nic->msix_entries[irq].vector, nic); |
39ad6eea | 1020 | |
4863dea3 SG |
1021 | nic->irq_allocated[irq] = false; |
1022 | } | |
1023 | ||
1024 | /* Disable MSI-X */ | |
1025 | nicvf_disable_msix(nic); | |
1026 | } | |
1027 | ||
1028 | /* Initialize MSIX vectors and register MISC interrupt. | |
1029 | * Send READY message to PF to check if its alive | |
1030 | */ | |
1031 | static int nicvf_register_misc_interrupt(struct nicvf *nic) | |
1032 | { | |
1033 | int ret = 0; | |
1034 | int irq = NICVF_INTR_ID_MISC; | |
1035 | ||
1036 | /* Return if mailbox interrupt is already registered */ | |
1037 | if (nic->msix_enabled) | |
1038 | return 0; | |
1039 | ||
1040 | /* Enable MSI-X */ | |
1041 | if (!nicvf_enable_msix(nic)) | |
1042 | return 1; | |
1043 | ||
1044 | sprintf(nic->irq_name[irq], "%s Mbox", "NICVF"); | |
1045 | /* Register Misc interrupt */ | |
1046 | ret = request_irq(nic->msix_entries[irq].vector, | |
1047 | nicvf_misc_intr_handler, 0, nic->irq_name[irq], nic); | |
1048 | ||
1049 | if (ret) | |
1050 | return ret; | |
1051 | nic->irq_allocated[irq] = true; | |
1052 | ||
1053 | /* Enable mailbox interrupt */ | |
1054 | nicvf_enable_intr(nic, NICVF_INTR_MBOX, 0); | |
1055 | ||
1056 | /* Check if VF is able to communicate with PF */ | |
1057 | if (!nicvf_check_pf_ready(nic)) { | |
1058 | nicvf_disable_intr(nic, NICVF_INTR_MBOX, 0); | |
1059 | nicvf_unregister_interrupts(nic); | |
1060 | return 1; | |
1061 | } | |
1062 | ||
1063 | return 0; | |
1064 | } | |
1065 | ||
1066 | static netdev_tx_t nicvf_xmit(struct sk_buff *skb, struct net_device *netdev) | |
1067 | { | |
1068 | struct nicvf *nic = netdev_priv(netdev); | |
1069 | int qid = skb_get_queue_mapping(skb); | |
1070 | struct netdev_queue *txq = netdev_get_tx_queue(netdev, qid); | |
1071 | ||
1072 | /* Check for minimum packet length */ | |
1073 | if (skb->len <= ETH_HLEN) { | |
1074 | dev_kfree_skb(skb); | |
1075 | return NETDEV_TX_OK; | |
1076 | } | |
1077 | ||
b49087dd | 1078 | if (!netif_tx_queue_stopped(txq) && !nicvf_sq_append_skb(nic, skb)) { |
4863dea3 | 1079 | netif_tx_stop_queue(txq); |
74840b83 | 1080 | nic->drv_stats.txq_stop++; |
4863dea3 SG |
1081 | if (netif_msg_tx_err(nic)) |
1082 | netdev_warn(netdev, | |
1083 | "%s: Transmit ring full, stopping SQ%d\n", | |
1084 | netdev->name, qid); | |
4863dea3 SG |
1085 | return NETDEV_TX_BUSY; |
1086 | } | |
1087 | ||
1088 | return NETDEV_TX_OK; | |
1089 | } | |
1090 | ||
39ad6eea SG |
1091 | static inline void nicvf_free_cq_poll(struct nicvf *nic) |
1092 | { | |
1093 | struct nicvf_cq_poll *cq_poll; | |
1094 | int qidx; | |
1095 | ||
1096 | for (qidx = 0; qidx < nic->qs->cq_cnt; qidx++) { | |
1097 | cq_poll = nic->napi[qidx]; | |
1098 | if (!cq_poll) | |
1099 | continue; | |
1100 | nic->napi[qidx] = NULL; | |
1101 | kfree(cq_poll); | |
1102 | } | |
1103 | } | |
1104 | ||
4863dea3 SG |
1105 | int nicvf_stop(struct net_device *netdev) |
1106 | { | |
1107 | int irq, qidx; | |
1108 | struct nicvf *nic = netdev_priv(netdev); | |
1109 | struct queue_set *qs = nic->qs; | |
1110 | struct nicvf_cq_poll *cq_poll = NULL; | |
1111 | union nic_mbx mbx = {}; | |
1112 | ||
1113 | mbx.msg.msg = NIC_MBOX_MSG_SHUTDOWN; | |
1114 | nicvf_send_msg_to_pf(nic, &mbx); | |
1115 | ||
1116 | netif_carrier_off(netdev); | |
92dc8769 | 1117 | netif_tx_stop_all_queues(nic->netdev); |
0b72a9a1 | 1118 | nic->link_up = false; |
92dc8769 SG |
1119 | |
1120 | /* Teardown secondary qsets first */ | |
1121 | if (!nic->sqs_mode) { | |
1122 | for (qidx = 0; qidx < nic->sqs_count; qidx++) { | |
1123 | if (!nic->snicvf[qidx]) | |
1124 | continue; | |
1125 | nicvf_stop(nic->snicvf[qidx]->netdev); | |
1126 | nic->snicvf[qidx] = NULL; | |
1127 | } | |
1128 | } | |
4863dea3 SG |
1129 | |
1130 | /* Disable RBDR & QS error interrupts */ | |
1131 | for (qidx = 0; qidx < qs->rbdr_cnt; qidx++) { | |
1132 | nicvf_disable_intr(nic, NICVF_INTR_RBDR, qidx); | |
1133 | nicvf_clear_intr(nic, NICVF_INTR_RBDR, qidx); | |
1134 | } | |
1135 | nicvf_disable_intr(nic, NICVF_INTR_QS_ERR, 0); | |
1136 | nicvf_clear_intr(nic, NICVF_INTR_QS_ERR, 0); | |
1137 | ||
1138 | /* Wait for pending IRQ handlers to finish */ | |
1139 | for (irq = 0; irq < nic->num_vec; irq++) | |
1140 | synchronize_irq(nic->msix_entries[irq].vector); | |
1141 | ||
1142 | tasklet_kill(&nic->rbdr_task); | |
1143 | tasklet_kill(&nic->qs_err_task); | |
1144 | if (nic->rb_work_scheduled) | |
1145 | cancel_delayed_work_sync(&nic->rbdr_work); | |
1146 | ||
1147 | for (qidx = 0; qidx < nic->qs->cq_cnt; qidx++) { | |
1148 | cq_poll = nic->napi[qidx]; | |
1149 | if (!cq_poll) | |
1150 | continue; | |
4863dea3 SG |
1151 | napi_synchronize(&cq_poll->napi); |
1152 | /* CQ intr is enabled while napi_complete, | |
1153 | * so disable it now | |
1154 | */ | |
1155 | nicvf_disable_intr(nic, NICVF_INTR_CQ, qidx); | |
1156 | nicvf_clear_intr(nic, NICVF_INTR_CQ, qidx); | |
1157 | napi_disable(&cq_poll->napi); | |
1158 | netif_napi_del(&cq_poll->napi); | |
4863dea3 SG |
1159 | } |
1160 | ||
b49087dd SG |
1161 | netif_tx_disable(netdev); |
1162 | ||
4863dea3 SG |
1163 | /* Free resources */ |
1164 | nicvf_config_data_transfer(nic, false); | |
1165 | ||
1166 | /* Disable HW Qset */ | |
1167 | nicvf_qset_config(nic, false); | |
1168 | ||
1169 | /* disable mailbox interrupt */ | |
1170 | nicvf_disable_intr(nic, NICVF_INTR_MBOX, 0); | |
1171 | ||
1172 | nicvf_unregister_interrupts(nic); | |
1173 | ||
39ad6eea SG |
1174 | nicvf_free_cq_poll(nic); |
1175 | ||
92dc8769 SG |
1176 | /* Clear multiqset info */ |
1177 | nic->pnicvf = nic; | |
92dc8769 | 1178 | |
4863dea3 SG |
1179 | return 0; |
1180 | } | |
1181 | ||
1182 | int nicvf_open(struct net_device *netdev) | |
1183 | { | |
1184 | int err, qidx; | |
1185 | struct nicvf *nic = netdev_priv(netdev); | |
1186 | struct queue_set *qs = nic->qs; | |
1187 | struct nicvf_cq_poll *cq_poll = NULL; | |
1188 | ||
1189 | nic->mtu = netdev->mtu; | |
1190 | ||
1191 | netif_carrier_off(netdev); | |
1192 | ||
1193 | err = nicvf_register_misc_interrupt(nic); | |
1194 | if (err) | |
1195 | return err; | |
1196 | ||
1197 | /* Register NAPI handler for processing CQEs */ | |
1198 | for (qidx = 0; qidx < qs->cq_cnt; qidx++) { | |
1199 | cq_poll = kzalloc(sizeof(*cq_poll), GFP_KERNEL); | |
1200 | if (!cq_poll) { | |
1201 | err = -ENOMEM; | |
1202 | goto napi_del; | |
1203 | } | |
1204 | cq_poll->cq_idx = qidx; | |
39ad6eea | 1205 | cq_poll->nicvf = nic; |
4863dea3 SG |
1206 | netif_napi_add(netdev, &cq_poll->napi, nicvf_poll, |
1207 | NAPI_POLL_WEIGHT); | |
1208 | napi_enable(&cq_poll->napi); | |
1209 | nic->napi[qidx] = cq_poll; | |
1210 | } | |
1211 | ||
1212 | /* Check if we got MAC address from PF or else generate a radom MAC */ | |
a3a8ce4c | 1213 | if (!nic->sqs_mode && is_zero_ether_addr(netdev->dev_addr)) { |
4863dea3 SG |
1214 | eth_hw_addr_random(netdev); |
1215 | nicvf_hw_set_mac_addr(nic, netdev); | |
1216 | } | |
1217 | ||
bd049a90 PF |
1218 | if (nic->set_mac_pending) { |
1219 | nic->set_mac_pending = false; | |
1220 | nicvf_hw_set_mac_addr(nic, netdev); | |
1221 | } | |
1222 | ||
4863dea3 SG |
1223 | /* Init tasklet for handling Qset err interrupt */ |
1224 | tasklet_init(&nic->qs_err_task, nicvf_handle_qs_err, | |
1225 | (unsigned long)nic); | |
1226 | ||
1227 | /* Init RBDR tasklet which will refill RBDR */ | |
1228 | tasklet_init(&nic->rbdr_task, nicvf_rbdr_task, | |
1229 | (unsigned long)nic); | |
1230 | INIT_DELAYED_WORK(&nic->rbdr_work, nicvf_rbdr_work); | |
1231 | ||
1232 | /* Configure CPI alorithm */ | |
1233 | nic->cpi_alg = cpi_alg; | |
92dc8769 SG |
1234 | if (!nic->sqs_mode) |
1235 | nicvf_config_cpi(nic); | |
1236 | ||
1237 | nicvf_request_sqs(nic); | |
1238 | if (nic->sqs_mode) | |
1239 | nicvf_get_primary_vf_struct(nic); | |
4863dea3 SG |
1240 | |
1241 | /* Configure receive side scaling */ | |
92dc8769 SG |
1242 | if (!nic->sqs_mode) |
1243 | nicvf_rss_init(nic); | |
4863dea3 SG |
1244 | |
1245 | err = nicvf_register_interrupts(nic); | |
1246 | if (err) | |
1247 | goto cleanup; | |
1248 | ||
1249 | /* Initialize the queues */ | |
1250 | err = nicvf_init_resources(nic); | |
1251 | if (err) | |
1252 | goto cleanup; | |
1253 | ||
1254 | /* Make sure queue initialization is written */ | |
1255 | wmb(); | |
1256 | ||
1257 | nicvf_reg_write(nic, NIC_VF_INT, -1); | |
1258 | /* Enable Qset err interrupt */ | |
1259 | nicvf_enable_intr(nic, NICVF_INTR_QS_ERR, 0); | |
1260 | ||
1261 | /* Enable completion queue interrupt */ | |
1262 | for (qidx = 0; qidx < qs->cq_cnt; qidx++) | |
1263 | nicvf_enable_intr(nic, NICVF_INTR_CQ, qidx); | |
1264 | ||
1265 | /* Enable RBDR threshold interrupt */ | |
1266 | for (qidx = 0; qidx < qs->rbdr_cnt; qidx++) | |
1267 | nicvf_enable_intr(nic, NICVF_INTR_RBDR, qidx); | |
1268 | ||
74840b83 SG |
1269 | nic->drv_stats.txq_stop = 0; |
1270 | nic->drv_stats.txq_wake = 0; | |
1271 | ||
4863dea3 SG |
1272 | return 0; |
1273 | cleanup: | |
1274 | nicvf_disable_intr(nic, NICVF_INTR_MBOX, 0); | |
1275 | nicvf_unregister_interrupts(nic); | |
39ad6eea SG |
1276 | tasklet_kill(&nic->qs_err_task); |
1277 | tasklet_kill(&nic->rbdr_task); | |
4863dea3 SG |
1278 | napi_del: |
1279 | for (qidx = 0; qidx < qs->cq_cnt; qidx++) { | |
1280 | cq_poll = nic->napi[qidx]; | |
1281 | if (!cq_poll) | |
1282 | continue; | |
1283 | napi_disable(&cq_poll->napi); | |
1284 | netif_napi_del(&cq_poll->napi); | |
4863dea3 | 1285 | } |
39ad6eea | 1286 | nicvf_free_cq_poll(nic); |
4863dea3 SG |
1287 | return err; |
1288 | } | |
1289 | ||
1290 | static int nicvf_update_hw_max_frs(struct nicvf *nic, int mtu) | |
1291 | { | |
1292 | union nic_mbx mbx = {}; | |
1293 | ||
1294 | mbx.frs.msg = NIC_MBOX_MSG_SET_MAX_FRS; | |
1295 | mbx.frs.max_frs = mtu; | |
1296 | mbx.frs.vf_id = nic->vf_id; | |
1297 | ||
1298 | return nicvf_send_msg_to_pf(nic, &mbx); | |
1299 | } | |
1300 | ||
1301 | static int nicvf_change_mtu(struct net_device *netdev, int new_mtu) | |
1302 | { | |
1303 | struct nicvf *nic = netdev_priv(netdev); | |
1304 | ||
1305 | if (new_mtu > NIC_HW_MAX_FRS) | |
1306 | return -EINVAL; | |
1307 | ||
1308 | if (new_mtu < NIC_HW_MIN_FRS) | |
1309 | return -EINVAL; | |
1310 | ||
1311 | if (nicvf_update_hw_max_frs(nic, new_mtu)) | |
1312 | return -EINVAL; | |
1313 | netdev->mtu = new_mtu; | |
1314 | nic->mtu = new_mtu; | |
1315 | ||
1316 | return 0; | |
1317 | } | |
1318 | ||
1319 | static int nicvf_set_mac_address(struct net_device *netdev, void *p) | |
1320 | { | |
1321 | struct sockaddr *addr = p; | |
1322 | struct nicvf *nic = netdev_priv(netdev); | |
1323 | ||
1324 | if (!is_valid_ether_addr(addr->sa_data)) | |
1325 | return -EADDRNOTAVAIL; | |
1326 | ||
1327 | memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); | |
1328 | ||
bd049a90 | 1329 | if (nic->msix_enabled) { |
4863dea3 SG |
1330 | if (nicvf_hw_set_mac_addr(nic, netdev)) |
1331 | return -EBUSY; | |
bd049a90 PF |
1332 | } else { |
1333 | nic->set_mac_pending = true; | |
1334 | } | |
4863dea3 SG |
1335 | |
1336 | return 0; | |
1337 | } | |
1338 | ||
4863dea3 SG |
1339 | void nicvf_update_lmac_stats(struct nicvf *nic) |
1340 | { | |
1341 | int stat = 0; | |
1342 | union nic_mbx mbx = {}; | |
4863dea3 SG |
1343 | |
1344 | if (!netif_running(nic->netdev)) | |
1345 | return; | |
1346 | ||
1347 | mbx.bgx_stats.msg = NIC_MBOX_MSG_BGX_STATS; | |
1348 | mbx.bgx_stats.vf_id = nic->vf_id; | |
1349 | /* Rx stats */ | |
1350 | mbx.bgx_stats.rx = 1; | |
1351 | while (stat < BGX_RX_STATS_COUNT) { | |
4863dea3 | 1352 | mbx.bgx_stats.idx = stat; |
6051cba7 SG |
1353 | if (nicvf_send_msg_to_pf(nic, &mbx)) |
1354 | return; | |
4863dea3 SG |
1355 | stat++; |
1356 | } | |
1357 | ||
1358 | stat = 0; | |
1359 | ||
1360 | /* Tx stats */ | |
1361 | mbx.bgx_stats.rx = 0; | |
1362 | while (stat < BGX_TX_STATS_COUNT) { | |
4863dea3 | 1363 | mbx.bgx_stats.idx = stat; |
6051cba7 SG |
1364 | if (nicvf_send_msg_to_pf(nic, &mbx)) |
1365 | return; | |
4863dea3 SG |
1366 | stat++; |
1367 | } | |
1368 | } | |
1369 | ||
1370 | void nicvf_update_stats(struct nicvf *nic) | |
1371 | { | |
1372 | int qidx; | |
a2dc5ded | 1373 | struct nicvf_hw_stats *stats = &nic->hw_stats; |
4863dea3 SG |
1374 | struct nicvf_drv_stats *drv_stats = &nic->drv_stats; |
1375 | struct queue_set *qs = nic->qs; | |
1376 | ||
1377 | #define GET_RX_STATS(reg) \ | |
1378 | nicvf_reg_read(nic, NIC_VNIC_RX_STAT_0_13 | (reg << 3)) | |
1379 | #define GET_TX_STATS(reg) \ | |
1380 | nicvf_reg_read(nic, NIC_VNIC_TX_STAT_0_4 | (reg << 3)) | |
1381 | ||
a2dc5ded SG |
1382 | stats->rx_bytes = GET_RX_STATS(RX_OCTS); |
1383 | stats->rx_ucast_frames = GET_RX_STATS(RX_UCAST); | |
1384 | stats->rx_bcast_frames = GET_RX_STATS(RX_BCAST); | |
1385 | stats->rx_mcast_frames = GET_RX_STATS(RX_MCAST); | |
4863dea3 SG |
1386 | stats->rx_fcs_errors = GET_RX_STATS(RX_FCS); |
1387 | stats->rx_l2_errors = GET_RX_STATS(RX_L2ERR); | |
1388 | stats->rx_drop_red = GET_RX_STATS(RX_RED); | |
a2dc5ded | 1389 | stats->rx_drop_red_bytes = GET_RX_STATS(RX_RED_OCTS); |
4863dea3 | 1390 | stats->rx_drop_overrun = GET_RX_STATS(RX_ORUN); |
a2dc5ded | 1391 | stats->rx_drop_overrun_bytes = GET_RX_STATS(RX_ORUN_OCTS); |
4863dea3 SG |
1392 | stats->rx_drop_bcast = GET_RX_STATS(RX_DRP_BCAST); |
1393 | stats->rx_drop_mcast = GET_RX_STATS(RX_DRP_MCAST); | |
1394 | stats->rx_drop_l3_bcast = GET_RX_STATS(RX_DRP_L3BCAST); | |
1395 | stats->rx_drop_l3_mcast = GET_RX_STATS(RX_DRP_L3MCAST); | |
1396 | ||
1397 | stats->tx_bytes_ok = GET_TX_STATS(TX_OCTS); | |
1398 | stats->tx_ucast_frames_ok = GET_TX_STATS(TX_UCAST); | |
1399 | stats->tx_bcast_frames_ok = GET_TX_STATS(TX_BCAST); | |
1400 | stats->tx_mcast_frames_ok = GET_TX_STATS(TX_MCAST); | |
1401 | stats->tx_drops = GET_TX_STATS(TX_DROP); | |
1402 | ||
4863dea3 SG |
1403 | drv_stats->tx_frames_ok = stats->tx_ucast_frames_ok + |
1404 | stats->tx_bcast_frames_ok + | |
1405 | stats->tx_mcast_frames_ok; | |
ad2ecebd SG |
1406 | drv_stats->rx_frames_ok = stats->rx_ucast_frames + |
1407 | stats->rx_bcast_frames + | |
1408 | stats->rx_mcast_frames; | |
4863dea3 SG |
1409 | drv_stats->rx_drops = stats->rx_drop_red + |
1410 | stats->rx_drop_overrun; | |
1411 | drv_stats->tx_drops = stats->tx_drops; | |
1412 | ||
1413 | /* Update RQ and SQ stats */ | |
1414 | for (qidx = 0; qidx < qs->rq_cnt; qidx++) | |
1415 | nicvf_update_rq_stats(nic, qidx); | |
1416 | for (qidx = 0; qidx < qs->sq_cnt; qidx++) | |
1417 | nicvf_update_sq_stats(nic, qidx); | |
1418 | } | |
1419 | ||
fd7ec062 | 1420 | static struct rtnl_link_stats64 *nicvf_get_stats64(struct net_device *netdev, |
4863dea3 SG |
1421 | struct rtnl_link_stats64 *stats) |
1422 | { | |
1423 | struct nicvf *nic = netdev_priv(netdev); | |
a2dc5ded | 1424 | struct nicvf_hw_stats *hw_stats = &nic->hw_stats; |
4863dea3 SG |
1425 | struct nicvf_drv_stats *drv_stats = &nic->drv_stats; |
1426 | ||
1427 | nicvf_update_stats(nic); | |
1428 | ||
a2dc5ded | 1429 | stats->rx_bytes = hw_stats->rx_bytes; |
4863dea3 SG |
1430 | stats->rx_packets = drv_stats->rx_frames_ok; |
1431 | stats->rx_dropped = drv_stats->rx_drops; | |
a2dc5ded | 1432 | stats->multicast = hw_stats->rx_mcast_frames; |
4863dea3 SG |
1433 | |
1434 | stats->tx_bytes = hw_stats->tx_bytes_ok; | |
1435 | stats->tx_packets = drv_stats->tx_frames_ok; | |
1436 | stats->tx_dropped = drv_stats->tx_drops; | |
1437 | ||
1438 | return stats; | |
1439 | } | |
1440 | ||
1441 | static void nicvf_tx_timeout(struct net_device *dev) | |
1442 | { | |
1443 | struct nicvf *nic = netdev_priv(dev); | |
1444 | ||
1445 | if (netif_msg_tx_err(nic)) | |
1446 | netdev_warn(dev, "%s: Transmit timed out, resetting\n", | |
1447 | dev->name); | |
1448 | ||
a05d4845 | 1449 | nic->drv_stats.tx_timeout++; |
4863dea3 SG |
1450 | schedule_work(&nic->reset_task); |
1451 | } | |
1452 | ||
1453 | static void nicvf_reset_task(struct work_struct *work) | |
1454 | { | |
1455 | struct nicvf *nic; | |
1456 | ||
1457 | nic = container_of(work, struct nicvf, reset_task); | |
1458 | ||
1459 | if (!netif_running(nic->netdev)) | |
1460 | return; | |
1461 | ||
1462 | nicvf_stop(nic->netdev); | |
1463 | nicvf_open(nic->netdev); | |
860e9538 | 1464 | netif_trans_update(nic->netdev); |
4863dea3 SG |
1465 | } |
1466 | ||
d77a2384 SG |
1467 | static int nicvf_config_loopback(struct nicvf *nic, |
1468 | netdev_features_t features) | |
1469 | { | |
1470 | union nic_mbx mbx = {}; | |
1471 | ||
1472 | mbx.lbk.msg = NIC_MBOX_MSG_LOOPBACK; | |
1473 | mbx.lbk.vf_id = nic->vf_id; | |
1474 | mbx.lbk.enable = (features & NETIF_F_LOOPBACK) != 0; | |
1475 | ||
1476 | return nicvf_send_msg_to_pf(nic, &mbx); | |
1477 | } | |
1478 | ||
1479 | static netdev_features_t nicvf_fix_features(struct net_device *netdev, | |
1480 | netdev_features_t features) | |
1481 | { | |
1482 | struct nicvf *nic = netdev_priv(netdev); | |
1483 | ||
1484 | if ((features & NETIF_F_LOOPBACK) && | |
1485 | netif_running(netdev) && !nic->loopback_supported) | |
1486 | features &= ~NETIF_F_LOOPBACK; | |
1487 | ||
1488 | return features; | |
1489 | } | |
1490 | ||
aa2e259b SG |
1491 | static int nicvf_set_features(struct net_device *netdev, |
1492 | netdev_features_t features) | |
1493 | { | |
1494 | struct nicvf *nic = netdev_priv(netdev); | |
1495 | netdev_features_t changed = features ^ netdev->features; | |
1496 | ||
1497 | if (changed & NETIF_F_HW_VLAN_CTAG_RX) | |
1498 | nicvf_config_vlan_stripping(nic, features); | |
1499 | ||
d77a2384 SG |
1500 | if ((changed & NETIF_F_LOOPBACK) && netif_running(netdev)) |
1501 | return nicvf_config_loopback(nic, features); | |
1502 | ||
aa2e259b SG |
1503 | return 0; |
1504 | } | |
1505 | ||
4863dea3 SG |
1506 | static const struct net_device_ops nicvf_netdev_ops = { |
1507 | .ndo_open = nicvf_open, | |
1508 | .ndo_stop = nicvf_stop, | |
1509 | .ndo_start_xmit = nicvf_xmit, | |
1510 | .ndo_change_mtu = nicvf_change_mtu, | |
1511 | .ndo_set_mac_address = nicvf_set_mac_address, | |
1512 | .ndo_get_stats64 = nicvf_get_stats64, | |
1513 | .ndo_tx_timeout = nicvf_tx_timeout, | |
d77a2384 | 1514 | .ndo_fix_features = nicvf_fix_features, |
aa2e259b | 1515 | .ndo_set_features = nicvf_set_features, |
4863dea3 SG |
1516 | }; |
1517 | ||
1518 | static int nicvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |
1519 | { | |
1520 | struct device *dev = &pdev->dev; | |
1521 | struct net_device *netdev; | |
1522 | struct nicvf *nic; | |
92dc8769 | 1523 | int err, qcount; |
7ceb8a13 | 1524 | u16 sdevid; |
4863dea3 SG |
1525 | |
1526 | err = pci_enable_device(pdev); | |
1527 | if (err) { | |
1528 | dev_err(dev, "Failed to enable PCI device\n"); | |
1529 | return err; | |
1530 | } | |
1531 | ||
1532 | err = pci_request_regions(pdev, DRV_NAME); | |
1533 | if (err) { | |
1534 | dev_err(dev, "PCI request regions failed 0x%x\n", err); | |
1535 | goto err_disable_device; | |
1536 | } | |
1537 | ||
1538 | err = pci_set_dma_mask(pdev, DMA_BIT_MASK(48)); | |
1539 | if (err) { | |
1540 | dev_err(dev, "Unable to get usable DMA configuration\n"); | |
1541 | goto err_release_regions; | |
1542 | } | |
1543 | ||
1544 | err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(48)); | |
1545 | if (err) { | |
1546 | dev_err(dev, "unable to get 48-bit DMA for consistent allocations\n"); | |
1547 | goto err_release_regions; | |
1548 | } | |
1549 | ||
3a397ebe | 1550 | qcount = netif_get_num_default_rss_queues(); |
92dc8769 SG |
1551 | |
1552 | /* Restrict multiqset support only for host bound VFs */ | |
1553 | if (pdev->is_virtfn) { | |
1554 | /* Set max number of queues per VF */ | |
3a397ebe SG |
1555 | qcount = min_t(int, num_online_cpus(), |
1556 | (MAX_SQS_PER_VF + 1) * MAX_CMP_QUEUES_PER_QS); | |
92dc8769 SG |
1557 | } |
1558 | ||
1559 | netdev = alloc_etherdev_mqs(sizeof(struct nicvf), qcount, qcount); | |
4863dea3 SG |
1560 | if (!netdev) { |
1561 | err = -ENOMEM; | |
1562 | goto err_release_regions; | |
1563 | } | |
1564 | ||
1565 | pci_set_drvdata(pdev, netdev); | |
1566 | ||
1567 | SET_NETDEV_DEV(netdev, &pdev->dev); | |
1568 | ||
1569 | nic = netdev_priv(netdev); | |
1570 | nic->netdev = netdev; | |
1571 | nic->pdev = pdev; | |
92dc8769 SG |
1572 | nic->pnicvf = nic; |
1573 | nic->max_queues = qcount; | |
4863dea3 SG |
1574 | |
1575 | /* MAP VF's configuration registers */ | |
1576 | nic->reg_base = pcim_iomap(pdev, PCI_CFG_REG_BAR_NUM, 0); | |
1577 | if (!nic->reg_base) { | |
1578 | dev_err(dev, "Cannot map config register space, aborting\n"); | |
1579 | err = -ENOMEM; | |
1580 | goto err_free_netdev; | |
1581 | } | |
1582 | ||
1583 | err = nicvf_set_qset_resources(nic); | |
1584 | if (err) | |
1585 | goto err_free_netdev; | |
1586 | ||
4863dea3 SG |
1587 | /* Check if PF is alive and get MAC address for this VF */ |
1588 | err = nicvf_register_misc_interrupt(nic); | |
1589 | if (err) | |
1590 | goto err_free_netdev; | |
1591 | ||
92dc8769 SG |
1592 | nicvf_send_vf_struct(nic); |
1593 | ||
8d210d54 SG |
1594 | if (!pass1_silicon(nic->pdev)) |
1595 | nic->hw_tso = true; | |
1596 | ||
7ceb8a13 SG |
1597 | pci_read_config_word(nic->pdev, PCI_SUBSYSTEM_ID, &sdevid); |
1598 | if (sdevid == 0xA134) | |
1599 | nic->t88 = true; | |
1600 | ||
92dc8769 SG |
1601 | /* Check if this VF is in QS only mode */ |
1602 | if (nic->sqs_mode) | |
1603 | return 0; | |
1604 | ||
1605 | err = nicvf_set_real_num_queues(netdev, nic->tx_queues, nic->rx_queues); | |
1606 | if (err) | |
1607 | goto err_unregister_interrupts; | |
1608 | ||
aa2e259b SG |
1609 | netdev->hw_features = (NETIF_F_RXCSUM | NETIF_F_IP_CSUM | NETIF_F_SG | |
1610 | NETIF_F_TSO | NETIF_F_GRO | | |
92dc8769 SG |
1611 | NETIF_F_HW_VLAN_CTAG_RX); |
1612 | ||
1613 | netdev->hw_features |= NETIF_F_RXHASH; | |
aa2e259b SG |
1614 | |
1615 | netdev->features |= netdev->hw_features; | |
d77a2384 | 1616 | netdev->hw_features |= NETIF_F_LOOPBACK; |
38bb5d4f | 1617 | |
aa2e259b | 1618 | netdev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO; |
4863dea3 SG |
1619 | |
1620 | netdev->netdev_ops = &nicvf_netdev_ops; | |
3d7a8aaa | 1621 | netdev->watchdog_timeo = NICVF_TX_TIMEOUT; |
4863dea3 SG |
1622 | |
1623 | INIT_WORK(&nic->reset_task, nicvf_reset_task); | |
1624 | ||
1625 | err = register_netdev(netdev); | |
1626 | if (err) { | |
1627 | dev_err(dev, "Failed to register netdevice\n"); | |
1628 | goto err_unregister_interrupts; | |
1629 | } | |
1630 | ||
1631 | nic->msg_enable = debug; | |
1632 | ||
1633 | nicvf_set_ethtool_ops(netdev); | |
1634 | ||
1635 | return 0; | |
1636 | ||
1637 | err_unregister_interrupts: | |
1638 | nicvf_unregister_interrupts(nic); | |
1639 | err_free_netdev: | |
1640 | pci_set_drvdata(pdev, NULL); | |
1641 | free_netdev(netdev); | |
1642 | err_release_regions: | |
1643 | pci_release_regions(pdev); | |
1644 | err_disable_device: | |
1645 | pci_disable_device(pdev); | |
1646 | return err; | |
1647 | } | |
1648 | ||
1649 | static void nicvf_remove(struct pci_dev *pdev) | |
1650 | { | |
1651 | struct net_device *netdev = pci_get_drvdata(pdev); | |
7750130d PF |
1652 | struct nicvf *nic; |
1653 | struct net_device *pnetdev; | |
1654 | ||
1655 | if (!netdev) | |
1656 | return; | |
1657 | ||
1658 | nic = netdev_priv(netdev); | |
1659 | pnetdev = nic->pnicvf->netdev; | |
4863dea3 | 1660 | |
92dc8769 SG |
1661 | /* Check if this Qset is assigned to different VF. |
1662 | * If yes, clean primary and all secondary Qsets. | |
1663 | */ | |
1664 | if (pnetdev && (pnetdev->reg_state == NETREG_REGISTERED)) | |
1665 | unregister_netdev(pnetdev); | |
4863dea3 SG |
1666 | nicvf_unregister_interrupts(nic); |
1667 | pci_set_drvdata(pdev, NULL); | |
1668 | free_netdev(netdev); | |
1669 | pci_release_regions(pdev); | |
1670 | pci_disable_device(pdev); | |
1671 | } | |
1672 | ||
4adf4351 SG |
1673 | static void nicvf_shutdown(struct pci_dev *pdev) |
1674 | { | |
1675 | nicvf_remove(pdev); | |
1676 | } | |
1677 | ||
4863dea3 SG |
1678 | static struct pci_driver nicvf_driver = { |
1679 | .name = DRV_NAME, | |
1680 | .id_table = nicvf_id_table, | |
1681 | .probe = nicvf_probe, | |
1682 | .remove = nicvf_remove, | |
4adf4351 | 1683 | .shutdown = nicvf_shutdown, |
4863dea3 SG |
1684 | }; |
1685 | ||
1686 | static int __init nicvf_init_module(void) | |
1687 | { | |
1688 | pr_info("%s, ver %s\n", DRV_NAME, DRV_VERSION); | |
1689 | ||
1690 | return pci_register_driver(&nicvf_driver); | |
1691 | } | |
1692 | ||
1693 | static void __exit nicvf_cleanup_module(void) | |
1694 | { | |
1695 | pci_unregister_driver(&nicvf_driver); | |
1696 | } | |
1697 | ||
1698 | module_init(nicvf_init_module); | |
1699 | module_exit(nicvf_cleanup_module); |