Merge branch 'fix/rt5645' of git://git.kernel.org/pub/scm/linux/kernel/git/broonie...
[deliverable/linux.git] / drivers / net / ethernet / cavium / thunder / nicvf_ethtool.c
CommitLineData
4863dea3
SG
1/*
2 * Copyright (C) 2015 Cavium, Inc.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of version 2 of the GNU General Public License
6 * as published by the Free Software Foundation.
7 */
8
9/* ETHTOOL Support for VNIC_VF Device*/
10
11#include <linux/pci.h>
12
13#include "nic_reg.h"
14#include "nic.h"
15#include "nicvf_queues.h"
16#include "q_struct.h"
17#include "thunder_bgx.h"
18
19#define DRV_NAME "thunder-nicvf"
20#define DRV_VERSION "1.0"
21
22struct nicvf_stat {
23 char name[ETH_GSTRING_LEN];
24 unsigned int index;
25};
26
27#define NICVF_HW_STAT(stat) { \
28 .name = #stat, \
29 .index = offsetof(struct nicvf_hw_stats, stat) / sizeof(u64), \
30}
31
32#define NICVF_DRV_STAT(stat) { \
33 .name = #stat, \
34 .index = offsetof(struct nicvf_drv_stats, stat) / sizeof(u64), \
35}
36
37static const struct nicvf_stat nicvf_hw_stats[] = {
38 NICVF_HW_STAT(rx_bytes_ok),
39 NICVF_HW_STAT(rx_ucast_frames_ok),
40 NICVF_HW_STAT(rx_bcast_frames_ok),
41 NICVF_HW_STAT(rx_mcast_frames_ok),
42 NICVF_HW_STAT(rx_fcs_errors),
43 NICVF_HW_STAT(rx_l2_errors),
44 NICVF_HW_STAT(rx_drop_red),
45 NICVF_HW_STAT(rx_drop_red_bytes),
46 NICVF_HW_STAT(rx_drop_overrun),
47 NICVF_HW_STAT(rx_drop_overrun_bytes),
48 NICVF_HW_STAT(rx_drop_bcast),
49 NICVF_HW_STAT(rx_drop_mcast),
50 NICVF_HW_STAT(rx_drop_l3_bcast),
51 NICVF_HW_STAT(rx_drop_l3_mcast),
52 NICVF_HW_STAT(tx_bytes_ok),
53 NICVF_HW_STAT(tx_ucast_frames_ok),
54 NICVF_HW_STAT(tx_bcast_frames_ok),
55 NICVF_HW_STAT(tx_mcast_frames_ok),
56};
57
58static const struct nicvf_stat nicvf_drv_stats[] = {
59 NICVF_DRV_STAT(rx_frames_ok),
60 NICVF_DRV_STAT(rx_frames_64),
61 NICVF_DRV_STAT(rx_frames_127),
62 NICVF_DRV_STAT(rx_frames_255),
63 NICVF_DRV_STAT(rx_frames_511),
64 NICVF_DRV_STAT(rx_frames_1023),
65 NICVF_DRV_STAT(rx_frames_1518),
66 NICVF_DRV_STAT(rx_frames_jumbo),
67 NICVF_DRV_STAT(rx_drops),
68 NICVF_DRV_STAT(tx_frames_ok),
4863dea3
SG
69 NICVF_DRV_STAT(tx_tso),
70 NICVF_DRV_STAT(tx_drops),
74840b83
SG
71 NICVF_DRV_STAT(txq_stop),
72 NICVF_DRV_STAT(txq_wake),
4863dea3
SG
73};
74
75static const struct nicvf_stat nicvf_queue_stats[] = {
76 { "bytes", 0 },
77 { "frames", 1 },
78};
79
80static const unsigned int nicvf_n_hw_stats = ARRAY_SIZE(nicvf_hw_stats);
81static const unsigned int nicvf_n_drv_stats = ARRAY_SIZE(nicvf_drv_stats);
82static const unsigned int nicvf_n_queue_stats = ARRAY_SIZE(nicvf_queue_stats);
83
84static int nicvf_get_settings(struct net_device *netdev,
85 struct ethtool_cmd *cmd)
86{
87 struct nicvf *nic = netdev_priv(netdev);
88
89 cmd->supported = 0;
90 cmd->transceiver = XCVR_EXTERNAL;
91 if (nic->speed <= 1000) {
92 cmd->port = PORT_MII;
93 cmd->autoneg = AUTONEG_ENABLE;
94 } else {
95 cmd->port = PORT_FIBRE;
96 cmd->autoneg = AUTONEG_DISABLE;
97 }
98 cmd->duplex = nic->duplex;
99 ethtool_cmd_speed_set(cmd, nic->speed);
100
101 return 0;
102}
103
104static void nicvf_get_drvinfo(struct net_device *netdev,
105 struct ethtool_drvinfo *info)
106{
107 struct nicvf *nic = netdev_priv(netdev);
108
109 strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
110 strlcpy(info->version, DRV_VERSION, sizeof(info->version));
111 strlcpy(info->bus_info, pci_name(nic->pdev), sizeof(info->bus_info));
112}
113
114static u32 nicvf_get_msglevel(struct net_device *netdev)
115{
116 struct nicvf *nic = netdev_priv(netdev);
117
118 return nic->msg_enable;
119}
120
121static void nicvf_set_msglevel(struct net_device *netdev, u32 lvl)
122{
123 struct nicvf *nic = netdev_priv(netdev);
124
125 nic->msg_enable = lvl;
126}
127
128static void nicvf_get_strings(struct net_device *netdev, u32 sset, u8 *data)
129{
c62cd3c4 130 struct nicvf *nic = netdev_priv(netdev);
4863dea3
SG
131 int stats, qidx;
132
133 if (sset != ETH_SS_STATS)
134 return;
135
136 for (stats = 0; stats < nicvf_n_hw_stats; stats++) {
137 memcpy(data, nicvf_hw_stats[stats].name, ETH_GSTRING_LEN);
138 data += ETH_GSTRING_LEN;
139 }
140
141 for (stats = 0; stats < nicvf_n_drv_stats; stats++) {
142 memcpy(data, nicvf_drv_stats[stats].name, ETH_GSTRING_LEN);
143 data += ETH_GSTRING_LEN;
144 }
145
c62cd3c4 146 for (qidx = 0; qidx < nic->qs->rq_cnt; qidx++) {
4863dea3
SG
147 for (stats = 0; stats < nicvf_n_queue_stats; stats++) {
148 sprintf(data, "rxq%d: %s", qidx,
149 nicvf_queue_stats[stats].name);
150 data += ETH_GSTRING_LEN;
151 }
152 }
153
c62cd3c4 154 for (qidx = 0; qidx < nic->qs->sq_cnt; qidx++) {
4863dea3
SG
155 for (stats = 0; stats < nicvf_n_queue_stats; stats++) {
156 sprintf(data, "txq%d: %s", qidx,
157 nicvf_queue_stats[stats].name);
158 data += ETH_GSTRING_LEN;
159 }
160 }
161
162 for (stats = 0; stats < BGX_RX_STATS_COUNT; stats++) {
163 sprintf(data, "bgx_rxstat%d: ", stats);
164 data += ETH_GSTRING_LEN;
165 }
166
167 for (stats = 0; stats < BGX_TX_STATS_COUNT; stats++) {
168 sprintf(data, "bgx_txstat%d: ", stats);
169 data += ETH_GSTRING_LEN;
170 }
171}
172
173static int nicvf_get_sset_count(struct net_device *netdev, int sset)
174{
c62cd3c4
SG
175 struct nicvf *nic = netdev_priv(netdev);
176
4863dea3
SG
177 if (sset != ETH_SS_STATS)
178 return -EINVAL;
179
180 return nicvf_n_hw_stats + nicvf_n_drv_stats +
181 (nicvf_n_queue_stats *
c62cd3c4 182 (nic->qs->rq_cnt + nic->qs->sq_cnt)) +
4863dea3
SG
183 BGX_RX_STATS_COUNT + BGX_TX_STATS_COUNT;
184}
185
186static void nicvf_get_ethtool_stats(struct net_device *netdev,
187 struct ethtool_stats *stats, u64 *data)
188{
189 struct nicvf *nic = netdev_priv(netdev);
190 int stat, qidx;
191
192 nicvf_update_stats(nic);
193
194 /* Update LMAC stats */
195 nicvf_update_lmac_stats(nic);
196
197 for (stat = 0; stat < nicvf_n_hw_stats; stat++)
198 *(data++) = ((u64 *)&nic->stats)
199 [nicvf_hw_stats[stat].index];
200 for (stat = 0; stat < nicvf_n_drv_stats; stat++)
201 *(data++) = ((u64 *)&nic->drv_stats)
202 [nicvf_drv_stats[stat].index];
203
c62cd3c4 204 for (qidx = 0; qidx < nic->qs->rq_cnt; qidx++) {
4863dea3
SG
205 for (stat = 0; stat < nicvf_n_queue_stats; stat++)
206 *(data++) = ((u64 *)&nic->qs->rq[qidx].stats)
207 [nicvf_queue_stats[stat].index];
208 }
209
c62cd3c4 210 for (qidx = 0; qidx < nic->qs->sq_cnt; qidx++) {
4863dea3
SG
211 for (stat = 0; stat < nicvf_n_queue_stats; stat++)
212 *(data++) = ((u64 *)&nic->qs->sq[qidx].stats)
213 [nicvf_queue_stats[stat].index];
214 }
215
216 for (stat = 0; stat < BGX_RX_STATS_COUNT; stat++)
217 *(data++) = nic->bgx_stats.rx_stats[stat];
218 for (stat = 0; stat < BGX_TX_STATS_COUNT; stat++)
219 *(data++) = nic->bgx_stats.tx_stats[stat];
220}
221
222static int nicvf_get_regs_len(struct net_device *dev)
223{
224 return sizeof(u64) * NIC_VF_REG_COUNT;
225}
226
227static void nicvf_get_regs(struct net_device *dev,
228 struct ethtool_regs *regs, void *reg)
229{
230 struct nicvf *nic = netdev_priv(dev);
231 u64 *p = (u64 *)reg;
232 u64 reg_offset;
233 int mbox, key, stat, q;
234 int i = 0;
235
236 regs->version = 0;
237 memset(p, 0, NIC_VF_REG_COUNT);
238
239 p[i++] = nicvf_reg_read(nic, NIC_VNIC_CFG);
240 /* Mailbox registers */
241 for (mbox = 0; mbox < NIC_PF_VF_MAILBOX_SIZE; mbox++)
242 p[i++] = nicvf_reg_read(nic,
243 NIC_VF_PF_MAILBOX_0_1 | (mbox << 3));
244
245 p[i++] = nicvf_reg_read(nic, NIC_VF_INT);
246 p[i++] = nicvf_reg_read(nic, NIC_VF_INT_W1S);
247 p[i++] = nicvf_reg_read(nic, NIC_VF_ENA_W1C);
248 p[i++] = nicvf_reg_read(nic, NIC_VF_ENA_W1S);
249 p[i++] = nicvf_reg_read(nic, NIC_VNIC_RSS_CFG);
250
251 for (key = 0; key < RSS_HASH_KEY_SIZE; key++)
252 p[i++] = nicvf_reg_read(nic, NIC_VNIC_RSS_KEY_0_4 | (key << 3));
253
254 /* Tx/Rx statistics */
255 for (stat = 0; stat < TX_STATS_ENUM_LAST; stat++)
256 p[i++] = nicvf_reg_read(nic,
257 NIC_VNIC_TX_STAT_0_4 | (stat << 3));
258
259 for (i = 0; i < RX_STATS_ENUM_LAST; i++)
260 p[i++] = nicvf_reg_read(nic,
261 NIC_VNIC_RX_STAT_0_13 | (stat << 3));
262
263 p[i++] = nicvf_reg_read(nic, NIC_QSET_RQ_GEN_CFG);
264
265 /* All completion queue's registers */
266 for (q = 0; q < MAX_CMP_QUEUES_PER_QS; q++) {
267 p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_CFG, q);
268 p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_CFG2, q);
269 p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_THRESH, q);
270 p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_BASE, q);
271 p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_HEAD, q);
272 p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_TAIL, q);
273 p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_DOOR, q);
274 p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_STATUS, q);
275 p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_STATUS2, q);
276 p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_DEBUG, q);
277 }
278
279 /* All receive queue's registers */
280 for (q = 0; q < MAX_RCV_QUEUES_PER_QS; q++) {
281 p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_RQ_0_7_CFG, q);
282 p[i++] = nicvf_queue_reg_read(nic,
283 NIC_QSET_RQ_0_7_STAT_0_1, q);
284 reg_offset = NIC_QSET_RQ_0_7_STAT_0_1 | (1 << 3);
285 p[i++] = nicvf_queue_reg_read(nic, reg_offset, q);
286 }
287
288 for (q = 0; q < MAX_SND_QUEUES_PER_QS; q++) {
289 p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_CFG, q);
290 p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_THRESH, q);
291 p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_BASE, q);
292 p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_HEAD, q);
293 p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_TAIL, q);
294 p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_DOOR, q);
295 p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_STATUS, q);
296 p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_DEBUG, q);
297 p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_CNM_CHG, q);
298 p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_STAT_0_1, q);
299 reg_offset = NIC_QSET_SQ_0_7_STAT_0_1 | (1 << 3);
300 p[i++] = nicvf_queue_reg_read(nic, reg_offset, q);
301 }
302
303 for (q = 0; q < MAX_RCV_BUF_DESC_RINGS_PER_QS; q++) {
304 p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_CFG, q);
305 p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_THRESH, q);
306 p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_BASE, q);
307 p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_HEAD, q);
308 p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_TAIL, q);
309 p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_DOOR, q);
310 p[i++] = nicvf_queue_reg_read(nic,
311 NIC_QSET_RBDR_0_1_STATUS0, q);
312 p[i++] = nicvf_queue_reg_read(nic,
313 NIC_QSET_RBDR_0_1_STATUS1, q);
314 reg_offset = NIC_QSET_RBDR_0_1_PREFETCH_STATUS;
315 p[i++] = nicvf_queue_reg_read(nic, reg_offset, q);
316 }
317}
318
319static int nicvf_get_coalesce(struct net_device *netdev,
320 struct ethtool_coalesce *cmd)
321{
322 struct nicvf *nic = netdev_priv(netdev);
323
324 cmd->rx_coalesce_usecs = nic->cq_coalesce_usecs;
325 return 0;
326}
327
328static void nicvf_get_ringparam(struct net_device *netdev,
329 struct ethtool_ringparam *ring)
330{
331 struct nicvf *nic = netdev_priv(netdev);
332 struct queue_set *qs = nic->qs;
333
334 ring->rx_max_pending = MAX_RCV_BUF_COUNT;
335 ring->rx_pending = qs->rbdr_len;
336 ring->tx_max_pending = MAX_SND_QUEUE_LEN;
337 ring->tx_pending = qs->sq_len;
338}
339
340static int nicvf_get_rss_hash_opts(struct nicvf *nic,
341 struct ethtool_rxnfc *info)
342{
343 info->data = 0;
344
345 switch (info->flow_type) {
346 case TCP_V4_FLOW:
347 case TCP_V6_FLOW:
348 case UDP_V4_FLOW:
349 case UDP_V6_FLOW:
350 case SCTP_V4_FLOW:
351 case SCTP_V6_FLOW:
352 info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
353 case IPV4_FLOW:
354 case IPV6_FLOW:
355 info->data |= RXH_IP_SRC | RXH_IP_DST;
356 break;
357 default:
358 return -EINVAL;
359 }
360
361 return 0;
362}
363
364static int nicvf_get_rxnfc(struct net_device *dev,
365 struct ethtool_rxnfc *info, u32 *rules)
366{
367 struct nicvf *nic = netdev_priv(dev);
368 int ret = -EOPNOTSUPP;
369
370 switch (info->cmd) {
371 case ETHTOOL_GRXRINGS:
372 info->data = nic->qs->rq_cnt;
373 ret = 0;
374 break;
375 case ETHTOOL_GRXFH:
376 return nicvf_get_rss_hash_opts(nic, info);
377 default:
378 break;
379 }
380 return ret;
381}
382
383static int nicvf_set_rss_hash_opts(struct nicvf *nic,
384 struct ethtool_rxnfc *info)
385{
386 struct nicvf_rss_info *rss = &nic->rss_info;
387 u64 rss_cfg = nicvf_reg_read(nic, NIC_VNIC_RSS_CFG);
388
389 if (!rss->enable)
390 netdev_err(nic->netdev,
391 "RSS is disabled, hash cannot be set\n");
392
393 netdev_info(nic->netdev, "Set RSS flow type = %d, data = %lld\n",
394 info->flow_type, info->data);
395
396 if (!(info->data & RXH_IP_SRC) || !(info->data & RXH_IP_DST))
397 return -EINVAL;
398
399 switch (info->flow_type) {
400 case TCP_V4_FLOW:
401 case TCP_V6_FLOW:
402 switch (info->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
403 case 0:
404 rss_cfg &= ~(1ULL << RSS_HASH_TCP);
405 break;
406 case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
407 rss_cfg |= (1ULL << RSS_HASH_TCP);
408 break;
409 default:
410 return -EINVAL;
411 }
412 break;
413 case UDP_V4_FLOW:
414 case UDP_V6_FLOW:
415 switch (info->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
416 case 0:
417 rss_cfg &= ~(1ULL << RSS_HASH_UDP);
418 break;
419 case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
420 rss_cfg |= (1ULL << RSS_HASH_UDP);
421 break;
422 default:
423 return -EINVAL;
424 }
425 break;
426 case SCTP_V4_FLOW:
427 case SCTP_V6_FLOW:
428 switch (info->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
429 case 0:
430 rss_cfg &= ~(1ULL << RSS_HASH_L4ETC);
431 break;
432 case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
433 rss_cfg |= (1ULL << RSS_HASH_L4ETC);
434 break;
435 default:
436 return -EINVAL;
437 }
438 break;
439 case IPV4_FLOW:
440 case IPV6_FLOW:
441 rss_cfg = RSS_HASH_IP;
442 break;
443 default:
444 return -EINVAL;
445 }
446
447 nicvf_reg_write(nic, NIC_VNIC_RSS_CFG, rss_cfg);
448 return 0;
449}
450
451static int nicvf_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info)
452{
453 struct nicvf *nic = netdev_priv(dev);
454
455 switch (info->cmd) {
456 case ETHTOOL_SRXFH:
457 return nicvf_set_rss_hash_opts(nic, info);
458 default:
459 break;
460 }
461 return -EOPNOTSUPP;
462}
463
464static u32 nicvf_get_rxfh_key_size(struct net_device *netdev)
465{
466 return RSS_HASH_KEY_SIZE * sizeof(u64);
467}
468
469static u32 nicvf_get_rxfh_indir_size(struct net_device *dev)
470{
471 struct nicvf *nic = netdev_priv(dev);
472
473 return nic->rss_info.rss_size;
474}
475
476static int nicvf_get_rxfh(struct net_device *dev, u32 *indir, u8 *hkey,
477 u8 *hfunc)
478{
479 struct nicvf *nic = netdev_priv(dev);
480 struct nicvf_rss_info *rss = &nic->rss_info;
481 int idx;
482
483 if (indir) {
484 for (idx = 0; idx < rss->rss_size; idx++)
485 indir[idx] = rss->ind_tbl[idx];
486 }
487
488 if (hkey)
489 memcpy(hkey, rss->key, RSS_HASH_KEY_SIZE * sizeof(u64));
490
491 if (hfunc)
492 *hfunc = ETH_RSS_HASH_TOP;
493
494 return 0;
495}
496
497static int nicvf_set_rxfh(struct net_device *dev, const u32 *indir,
498 const u8 *hkey, u8 hfunc)
499{
500 struct nicvf *nic = netdev_priv(dev);
501 struct nicvf_rss_info *rss = &nic->rss_info;
502 int idx;
503
504 if ((nic->qs->rq_cnt <= 1) || (nic->cpi_alg != CPI_ALG_NONE)) {
505 rss->enable = false;
506 rss->hash_bits = 0;
507 return -EIO;
508 }
509
510 /* We do not allow change in unsupported parameters */
89987844 511 if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)
4863dea3
SG
512 return -EOPNOTSUPP;
513
514 rss->enable = true;
515 if (indir) {
516 for (idx = 0; idx < rss->rss_size; idx++)
517 rss->ind_tbl[idx] = indir[idx];
518 }
519
520 if (hkey) {
521 memcpy(rss->key, hkey, RSS_HASH_KEY_SIZE * sizeof(u64));
522 nicvf_set_rss_key(nic);
523 }
524
525 nicvf_config_rss(nic);
526 return 0;
527}
528
529/* Get no of queues device supports and current queue count */
530static void nicvf_get_channels(struct net_device *dev,
531 struct ethtool_channels *channel)
532{
533 struct nicvf *nic = netdev_priv(dev);
534
535 memset(channel, 0, sizeof(*channel));
536
537 channel->max_rx = MAX_RCV_QUEUES_PER_QS;
538 channel->max_tx = MAX_SND_QUEUES_PER_QS;
539
540 channel->rx_count = nic->qs->rq_cnt;
541 channel->tx_count = nic->qs->sq_cnt;
542}
543
544/* Set no of Tx, Rx queues to be used */
545static int nicvf_set_channels(struct net_device *dev,
546 struct ethtool_channels *channel)
547{
548 struct nicvf *nic = netdev_priv(dev);
549 int err = 0;
c62cd3c4 550 bool if_up = netif_running(dev);
4863dea3
SG
551
552 if (!channel->rx_count || !channel->tx_count)
553 return -EINVAL;
554 if (channel->rx_count > MAX_RCV_QUEUES_PER_QS)
555 return -EINVAL;
556 if (channel->tx_count > MAX_SND_QUEUES_PER_QS)
557 return -EINVAL;
558
c62cd3c4
SG
559 if (if_up)
560 nicvf_stop(dev);
561
4863dea3
SG
562 nic->qs->rq_cnt = channel->rx_count;
563 nic->qs->sq_cnt = channel->tx_count;
564 nic->qs->cq_cnt = max(nic->qs->rq_cnt, nic->qs->sq_cnt);
565
566 err = nicvf_set_real_num_queues(dev, nic->qs->sq_cnt, nic->qs->rq_cnt);
567 if (err)
568 return err;
569
c62cd3c4
SG
570 if (if_up)
571 nicvf_open(dev);
4863dea3 572
4863dea3
SG
573 netdev_info(dev, "Setting num Tx rings to %d, Rx rings to %d success\n",
574 nic->qs->sq_cnt, nic->qs->rq_cnt);
575
576 return err;
577}
578
579static const struct ethtool_ops nicvf_ethtool_ops = {
580 .get_settings = nicvf_get_settings,
581 .get_link = ethtool_op_get_link,
582 .get_drvinfo = nicvf_get_drvinfo,
583 .get_msglevel = nicvf_get_msglevel,
584 .set_msglevel = nicvf_set_msglevel,
585 .get_strings = nicvf_get_strings,
586 .get_sset_count = nicvf_get_sset_count,
587 .get_ethtool_stats = nicvf_get_ethtool_stats,
588 .get_regs_len = nicvf_get_regs_len,
589 .get_regs = nicvf_get_regs,
590 .get_coalesce = nicvf_get_coalesce,
591 .get_ringparam = nicvf_get_ringparam,
592 .get_rxnfc = nicvf_get_rxnfc,
593 .set_rxnfc = nicvf_set_rxnfc,
594 .get_rxfh_key_size = nicvf_get_rxfh_key_size,
595 .get_rxfh_indir_size = nicvf_get_rxfh_indir_size,
596 .get_rxfh = nicvf_get_rxfh,
597 .set_rxfh = nicvf_set_rxfh,
598 .get_channels = nicvf_get_channels,
599 .set_channels = nicvf_set_channels,
600 .get_ts_info = ethtool_op_get_ts_info,
601};
602
603void nicvf_set_ethtool_ops(struct net_device *netdev)
604{
605 netdev->ethtool_ops = &nicvf_ethtool_ops;
606}
This page took 0.062787 seconds and 5 git commands to generate.