Commit | Line | Data |
---|---|---|
8b230ed8 RM |
1 | /* |
2 | * Linux network driver for Brocade Converged Network Adapter. | |
3 | * | |
4 | * This program is free software; you can redistribute it and/or modify it | |
5 | * under the terms of the GNU General Public License (GPL) Version 2 as | |
6 | * published by the Free Software Foundation | |
7 | * | |
8 | * This program is distributed in the hope that it will be useful, but | |
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | |
10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
11 | * General Public License for more details. | |
12 | */ | |
13 | /* | |
14 | * Copyright (c) 2005-2010 Brocade Communications Systems, Inc. | |
15 | * All rights reserved | |
16 | * www.brocade.com | |
17 | */ | |
18 | ||
19 | #include "cna.h" | |
20 | ||
21 | #include <linux/netdevice.h> | |
22 | #include <linux/skbuff.h> | |
23 | #include <linux/ethtool.h> | |
24 | #include <linux/rtnetlink.h> | |
25 | ||
26 | #include "bna.h" | |
27 | ||
28 | #include "bnad.h" | |
29 | ||
30 | #define BNAD_NUM_TXF_COUNTERS 12 | |
31 | #define BNAD_NUM_RXF_COUNTERS 10 | |
32 | #define BNAD_NUM_CQ_COUNTERS 3 | |
33 | #define BNAD_NUM_RXQ_COUNTERS 6 | |
34 | #define BNAD_NUM_TXQ_COUNTERS 5 | |
35 | ||
36 | #define BNAD_ETHTOOL_STATS_NUM \ | |
250e061e | 37 | (sizeof(struct rtnl_link_stats64) / sizeof(u64) + \ |
8b230ed8 RM |
38 | sizeof(struct bnad_drv_stats) / sizeof(u64) + \ |
39 | offsetof(struct bfi_ll_stats, rxf_stats[0]) / sizeof(u64)) | |
40 | ||
41 | static char *bnad_net_stats_strings[BNAD_ETHTOOL_STATS_NUM] = { | |
42 | "rx_packets", | |
43 | "tx_packets", | |
44 | "rx_bytes", | |
45 | "tx_bytes", | |
46 | "rx_errors", | |
47 | "tx_errors", | |
48 | "rx_dropped", | |
49 | "tx_dropped", | |
50 | "multicast", | |
51 | "collisions", | |
52 | ||
53 | "rx_length_errors", | |
54 | "rx_over_errors", | |
55 | "rx_crc_errors", | |
56 | "rx_frame_errors", | |
57 | "rx_fifo_errors", | |
58 | "rx_missed_errors", | |
59 | ||
60 | "tx_aborted_errors", | |
61 | "tx_carrier_errors", | |
62 | "tx_fifo_errors", | |
63 | "tx_heartbeat_errors", | |
64 | "tx_window_errors", | |
65 | ||
66 | "rx_compressed", | |
67 | "tx_compressed", | |
68 | ||
69 | "netif_queue_stop", | |
70 | "netif_queue_wakeup", | |
71 | "tso4", | |
72 | "tso6", | |
73 | "tso_err", | |
74 | "tcpcsum_offload", | |
75 | "udpcsum_offload", | |
76 | "csum_help", | |
77 | "csum_help_err", | |
78 | "hw_stats_updates", | |
79 | "netif_rx_schedule", | |
80 | "netif_rx_complete", | |
81 | "netif_rx_dropped", | |
82 | ||
83 | "link_toggle", | |
84 | "cee_up", | |
85 | ||
86 | "rxp_info_alloc_failed", | |
87 | "mbox_intr_disabled", | |
88 | "mbox_intr_enabled", | |
89 | "tx_unmap_q_alloc_failed", | |
90 | "rx_unmap_q_alloc_failed", | |
91 | "rxbuf_alloc_failed", | |
92 | ||
93 | "mac_frame_64", | |
94 | "mac_frame_65_127", | |
95 | "mac_frame_128_255", | |
96 | "mac_frame_256_511", | |
97 | "mac_frame_512_1023", | |
98 | "mac_frame_1024_1518", | |
99 | "mac_frame_1518_1522", | |
100 | "mac_rx_bytes", | |
101 | "mac_rx_packets", | |
102 | "mac_rx_fcs_error", | |
103 | "mac_rx_multicast", | |
104 | "mac_rx_broadcast", | |
105 | "mac_rx_control_frames", | |
106 | "mac_rx_pause", | |
107 | "mac_rx_unknown_opcode", | |
108 | "mac_rx_alignment_error", | |
109 | "mac_rx_frame_length_error", | |
110 | "mac_rx_code_error", | |
111 | "mac_rx_carrier_sense_error", | |
112 | "mac_rx_undersize", | |
113 | "mac_rx_oversize", | |
114 | "mac_rx_fragments", | |
115 | "mac_rx_jabber", | |
116 | "mac_rx_drop", | |
117 | ||
118 | "mac_tx_bytes", | |
119 | "mac_tx_packets", | |
120 | "mac_tx_multicast", | |
121 | "mac_tx_broadcast", | |
122 | "mac_tx_pause", | |
123 | "mac_tx_deferral", | |
124 | "mac_tx_excessive_deferral", | |
125 | "mac_tx_single_collision", | |
126 | "mac_tx_muliple_collision", | |
127 | "mac_tx_late_collision", | |
128 | "mac_tx_excessive_collision", | |
129 | "mac_tx_total_collision", | |
130 | "mac_tx_pause_honored", | |
131 | "mac_tx_drop", | |
132 | "mac_tx_jabber", | |
133 | "mac_tx_fcs_error", | |
134 | "mac_tx_control_frame", | |
135 | "mac_tx_oversize", | |
136 | "mac_tx_undersize", | |
137 | "mac_tx_fragments", | |
138 | ||
139 | "bpc_tx_pause_0", | |
140 | "bpc_tx_pause_1", | |
141 | "bpc_tx_pause_2", | |
142 | "bpc_tx_pause_3", | |
143 | "bpc_tx_pause_4", | |
144 | "bpc_tx_pause_5", | |
145 | "bpc_tx_pause_6", | |
146 | "bpc_tx_pause_7", | |
147 | "bpc_tx_zero_pause_0", | |
148 | "bpc_tx_zero_pause_1", | |
149 | "bpc_tx_zero_pause_2", | |
150 | "bpc_tx_zero_pause_3", | |
151 | "bpc_tx_zero_pause_4", | |
152 | "bpc_tx_zero_pause_5", | |
153 | "bpc_tx_zero_pause_6", | |
154 | "bpc_tx_zero_pause_7", | |
155 | "bpc_tx_first_pause_0", | |
156 | "bpc_tx_first_pause_1", | |
157 | "bpc_tx_first_pause_2", | |
158 | "bpc_tx_first_pause_3", | |
159 | "bpc_tx_first_pause_4", | |
160 | "bpc_tx_first_pause_5", | |
161 | "bpc_tx_first_pause_6", | |
162 | "bpc_tx_first_pause_7", | |
163 | ||
164 | "bpc_rx_pause_0", | |
165 | "bpc_rx_pause_1", | |
166 | "bpc_rx_pause_2", | |
167 | "bpc_rx_pause_3", | |
168 | "bpc_rx_pause_4", | |
169 | "bpc_rx_pause_5", | |
170 | "bpc_rx_pause_6", | |
171 | "bpc_rx_pause_7", | |
172 | "bpc_rx_zero_pause_0", | |
173 | "bpc_rx_zero_pause_1", | |
174 | "bpc_rx_zero_pause_2", | |
175 | "bpc_rx_zero_pause_3", | |
176 | "bpc_rx_zero_pause_4", | |
177 | "bpc_rx_zero_pause_5", | |
178 | "bpc_rx_zero_pause_6", | |
179 | "bpc_rx_zero_pause_7", | |
180 | "bpc_rx_first_pause_0", | |
181 | "bpc_rx_first_pause_1", | |
182 | "bpc_rx_first_pause_2", | |
183 | "bpc_rx_first_pause_3", | |
184 | "bpc_rx_first_pause_4", | |
185 | "bpc_rx_first_pause_5", | |
186 | "bpc_rx_first_pause_6", | |
187 | "bpc_rx_first_pause_7", | |
188 | ||
189 | "rad_rx_frames", | |
190 | "rad_rx_octets", | |
191 | "rad_rx_vlan_frames", | |
192 | "rad_rx_ucast", | |
193 | "rad_rx_ucast_octets", | |
194 | "rad_rx_ucast_vlan", | |
195 | "rad_rx_mcast", | |
196 | "rad_rx_mcast_octets", | |
197 | "rad_rx_mcast_vlan", | |
198 | "rad_rx_bcast", | |
199 | "rad_rx_bcast_octets", | |
200 | "rad_rx_bcast_vlan", | |
201 | "rad_rx_drops", | |
202 | ||
203 | "fc_rx_ucast_octets", | |
204 | "fc_rx_ucast", | |
205 | "fc_rx_ucast_vlan", | |
206 | "fc_rx_mcast_octets", | |
207 | "fc_rx_mcast", | |
208 | "fc_rx_mcast_vlan", | |
209 | "fc_rx_bcast_octets", | |
210 | "fc_rx_bcast", | |
211 | "fc_rx_bcast_vlan", | |
212 | ||
213 | "fc_tx_ucast_octets", | |
214 | "fc_tx_ucast", | |
215 | "fc_tx_ucast_vlan", | |
216 | "fc_tx_mcast_octets", | |
217 | "fc_tx_mcast", | |
218 | "fc_tx_mcast_vlan", | |
219 | "fc_tx_bcast_octets", | |
220 | "fc_tx_bcast", | |
221 | "fc_tx_bcast_vlan", | |
222 | "fc_tx_parity_errors", | |
223 | "fc_tx_timeout", | |
224 | "fc_tx_fid_parity_errors", | |
225 | }; | |
226 | ||
227 | static int | |
228 | bnad_get_settings(struct net_device *netdev, struct ethtool_cmd *cmd) | |
229 | { | |
230 | cmd->supported = SUPPORTED_10000baseT_Full; | |
231 | cmd->advertising = ADVERTISED_10000baseT_Full; | |
232 | cmd->autoneg = AUTONEG_DISABLE; | |
233 | cmd->supported |= SUPPORTED_FIBRE; | |
234 | cmd->advertising |= ADVERTISED_FIBRE; | |
235 | cmd->port = PORT_FIBRE; | |
236 | cmd->phy_address = 0; | |
237 | ||
238 | if (netif_carrier_ok(netdev)) { | |
239 | cmd->speed = SPEED_10000; | |
240 | cmd->duplex = DUPLEX_FULL; | |
241 | } else { | |
242 | cmd->speed = -1; | |
243 | cmd->duplex = -1; | |
244 | } | |
245 | cmd->transceiver = XCVR_EXTERNAL; | |
246 | cmd->maxtxpkt = 0; | |
247 | cmd->maxrxpkt = 0; | |
248 | ||
249 | return 0; | |
250 | } | |
251 | ||
252 | static int | |
253 | bnad_set_settings(struct net_device *netdev, struct ethtool_cmd *cmd) | |
254 | { | |
255 | /* 10G full duplex setting supported only */ | |
256 | if (cmd->autoneg == AUTONEG_ENABLE) | |
257 | return -EOPNOTSUPP; else { | |
258 | if ((cmd->speed == SPEED_10000) && (cmd->duplex == DUPLEX_FULL)) | |
259 | return 0; | |
260 | } | |
261 | ||
262 | return -EOPNOTSUPP; | |
263 | } | |
264 | ||
265 | static void | |
266 | bnad_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo) | |
267 | { | |
268 | struct bnad *bnad = netdev_priv(netdev); | |
269 | struct bfa_ioc_attr *ioc_attr; | |
270 | unsigned long flags; | |
271 | ||
272 | strcpy(drvinfo->driver, BNAD_NAME); | |
273 | strcpy(drvinfo->version, BNAD_VERSION); | |
274 | ||
275 | ioc_attr = kzalloc(sizeof(*ioc_attr), GFP_KERNEL); | |
276 | if (ioc_attr) { | |
277 | memset(ioc_attr, 0, sizeof(*ioc_attr)); | |
278 | spin_lock_irqsave(&bnad->bna_lock, flags); | |
8a891429 | 279 | bfa_nw_ioc_get_attr(&bnad->bna.device.ioc, ioc_attr); |
8b230ed8 RM |
280 | spin_unlock_irqrestore(&bnad->bna_lock, flags); |
281 | ||
282 | strncpy(drvinfo->fw_version, ioc_attr->adapter_attr.fw_ver, | |
283 | sizeof(drvinfo->fw_version) - 1); | |
284 | kfree(ioc_attr); | |
285 | } | |
286 | ||
287 | strncpy(drvinfo->bus_info, pci_name(bnad->pcidev), ETHTOOL_BUSINFO_LEN); | |
288 | } | |
289 | ||
290 | static int | |
291 | get_regs(struct bnad *bnad, u32 * regs) | |
292 | { | |
293 | int num = 0, i; | |
294 | u32 reg_addr; | |
295 | unsigned long flags; | |
296 | ||
297 | #define BNAD_GET_REG(addr) \ | |
298 | do { \ | |
299 | if (regs) \ | |
300 | regs[num++] = readl(bnad->bar0 + (addr)); \ | |
301 | else \ | |
302 | num++; \ | |
303 | } while (0) | |
304 | ||
305 | spin_lock_irqsave(&bnad->bna_lock, flags); | |
306 | ||
307 | /* DMA Block Internal Registers */ | |
308 | BNAD_GET_REG(DMA_CTRL_REG0); | |
309 | BNAD_GET_REG(DMA_CTRL_REG1); | |
310 | BNAD_GET_REG(DMA_ERR_INT_STATUS); | |
311 | BNAD_GET_REG(DMA_ERR_INT_ENABLE); | |
312 | BNAD_GET_REG(DMA_ERR_INT_STATUS_SET); | |
313 | ||
314 | /* APP Block Register Address Offset from BAR0 */ | |
315 | BNAD_GET_REG(HOSTFN0_INT_STATUS); | |
316 | BNAD_GET_REG(HOSTFN0_INT_MASK); | |
317 | BNAD_GET_REG(HOST_PAGE_NUM_FN0); | |
318 | BNAD_GET_REG(HOST_MSIX_ERR_INDEX_FN0); | |
319 | BNAD_GET_REG(FN0_PCIE_ERR_REG); | |
320 | BNAD_GET_REG(FN0_ERR_TYPE_STATUS_REG); | |
321 | BNAD_GET_REG(FN0_ERR_TYPE_MSK_STATUS_REG); | |
322 | ||
323 | BNAD_GET_REG(HOSTFN1_INT_STATUS); | |
324 | BNAD_GET_REG(HOSTFN1_INT_MASK); | |
325 | BNAD_GET_REG(HOST_PAGE_NUM_FN1); | |
326 | BNAD_GET_REG(HOST_MSIX_ERR_INDEX_FN1); | |
327 | BNAD_GET_REG(FN1_PCIE_ERR_REG); | |
328 | BNAD_GET_REG(FN1_ERR_TYPE_STATUS_REG); | |
329 | BNAD_GET_REG(FN1_ERR_TYPE_MSK_STATUS_REG); | |
330 | ||
331 | BNAD_GET_REG(PCIE_MISC_REG); | |
332 | ||
333 | BNAD_GET_REG(HOST_SEM0_REG); | |
334 | BNAD_GET_REG(HOST_SEM1_REG); | |
335 | BNAD_GET_REG(HOST_SEM2_REG); | |
336 | BNAD_GET_REG(HOST_SEM3_REG); | |
337 | BNAD_GET_REG(HOST_SEM0_INFO_REG); | |
338 | BNAD_GET_REG(HOST_SEM1_INFO_REG); | |
339 | BNAD_GET_REG(HOST_SEM2_INFO_REG); | |
340 | BNAD_GET_REG(HOST_SEM3_INFO_REG); | |
341 | ||
342 | BNAD_GET_REG(TEMPSENSE_CNTL_REG); | |
343 | BNAD_GET_REG(TEMPSENSE_STAT_REG); | |
344 | ||
345 | BNAD_GET_REG(APP_LOCAL_ERR_STAT); | |
346 | BNAD_GET_REG(APP_LOCAL_ERR_MSK); | |
347 | ||
348 | BNAD_GET_REG(PCIE_LNK_ERR_STAT); | |
349 | BNAD_GET_REG(PCIE_LNK_ERR_MSK); | |
350 | ||
351 | BNAD_GET_REG(FCOE_FIP_ETH_TYPE); | |
352 | BNAD_GET_REG(RESV_ETH_TYPE); | |
353 | ||
354 | BNAD_GET_REG(HOSTFN2_INT_STATUS); | |
355 | BNAD_GET_REG(HOSTFN2_INT_MASK); | |
356 | BNAD_GET_REG(HOST_PAGE_NUM_FN2); | |
357 | BNAD_GET_REG(HOST_MSIX_ERR_INDEX_FN2); | |
358 | BNAD_GET_REG(FN2_PCIE_ERR_REG); | |
359 | BNAD_GET_REG(FN2_ERR_TYPE_STATUS_REG); | |
360 | BNAD_GET_REG(FN2_ERR_TYPE_MSK_STATUS_REG); | |
361 | ||
362 | BNAD_GET_REG(HOSTFN3_INT_STATUS); | |
363 | BNAD_GET_REG(HOSTFN3_INT_MASK); | |
364 | BNAD_GET_REG(HOST_PAGE_NUM_FN3); | |
365 | BNAD_GET_REG(HOST_MSIX_ERR_INDEX_FN3); | |
366 | BNAD_GET_REG(FN3_PCIE_ERR_REG); | |
367 | BNAD_GET_REG(FN3_ERR_TYPE_STATUS_REG); | |
368 | BNAD_GET_REG(FN3_ERR_TYPE_MSK_STATUS_REG); | |
369 | ||
370 | /* Host Command Status Registers */ | |
371 | reg_addr = HOST_CMDSTS0_CLR_REG; | |
372 | for (i = 0; i < 16; i++) { | |
373 | BNAD_GET_REG(reg_addr); | |
374 | BNAD_GET_REG(reg_addr + 4); | |
375 | BNAD_GET_REG(reg_addr + 8); | |
376 | reg_addr += 0x10; | |
377 | } | |
378 | ||
379 | /* Function ID register */ | |
380 | BNAD_GET_REG(FNC_ID_REG); | |
381 | ||
382 | /* Function personality register */ | |
383 | BNAD_GET_REG(FNC_PERS_REG); | |
384 | ||
385 | /* Operation mode register */ | |
386 | BNAD_GET_REG(OP_MODE); | |
387 | ||
388 | /* LPU0 Registers */ | |
389 | BNAD_GET_REG(LPU0_MBOX_CTL_REG); | |
390 | BNAD_GET_REG(LPU0_MBOX_CMD_REG); | |
391 | BNAD_GET_REG(LPU0_MBOX_LINK_0REG); | |
392 | BNAD_GET_REG(LPU1_MBOX_LINK_0REG); | |
393 | BNAD_GET_REG(LPU0_MBOX_STATUS_0REG); | |
394 | BNAD_GET_REG(LPU1_MBOX_STATUS_0REG); | |
395 | BNAD_GET_REG(LPU0_ERR_STATUS_REG); | |
396 | BNAD_GET_REG(LPU0_ERR_SET_REG); | |
397 | ||
398 | /* LPU1 Registers */ | |
399 | BNAD_GET_REG(LPU1_MBOX_CTL_REG); | |
400 | BNAD_GET_REG(LPU1_MBOX_CMD_REG); | |
401 | BNAD_GET_REG(LPU0_MBOX_LINK_1REG); | |
402 | BNAD_GET_REG(LPU1_MBOX_LINK_1REG); | |
403 | BNAD_GET_REG(LPU0_MBOX_STATUS_1REG); | |
404 | BNAD_GET_REG(LPU1_MBOX_STATUS_1REG); | |
405 | BNAD_GET_REG(LPU1_ERR_STATUS_REG); | |
406 | BNAD_GET_REG(LPU1_ERR_SET_REG); | |
407 | ||
408 | /* PSS Registers */ | |
409 | BNAD_GET_REG(PSS_CTL_REG); | |
410 | BNAD_GET_REG(PSS_ERR_STATUS_REG); | |
411 | BNAD_GET_REG(ERR_STATUS_SET); | |
412 | BNAD_GET_REG(PSS_RAM_ERR_STATUS_REG); | |
413 | ||
414 | /* Catapult CPQ Registers */ | |
415 | BNAD_GET_REG(HOSTFN0_LPU0_MBOX0_CMD_STAT); | |
416 | BNAD_GET_REG(HOSTFN0_LPU1_MBOX0_CMD_STAT); | |
417 | BNAD_GET_REG(LPU0_HOSTFN0_MBOX0_CMD_STAT); | |
418 | BNAD_GET_REG(LPU1_HOSTFN0_MBOX0_CMD_STAT); | |
419 | ||
420 | BNAD_GET_REG(HOSTFN0_LPU0_MBOX1_CMD_STAT); | |
421 | BNAD_GET_REG(HOSTFN0_LPU1_MBOX1_CMD_STAT); | |
422 | BNAD_GET_REG(LPU0_HOSTFN0_MBOX1_CMD_STAT); | |
423 | BNAD_GET_REG(LPU1_HOSTFN0_MBOX1_CMD_STAT); | |
424 | ||
425 | BNAD_GET_REG(HOSTFN1_LPU0_MBOX0_CMD_STAT); | |
426 | BNAD_GET_REG(HOSTFN1_LPU1_MBOX0_CMD_STAT); | |
427 | BNAD_GET_REG(LPU0_HOSTFN1_MBOX0_CMD_STAT); | |
428 | BNAD_GET_REG(LPU1_HOSTFN1_MBOX0_CMD_STAT); | |
429 | ||
430 | BNAD_GET_REG(HOSTFN1_LPU0_MBOX1_CMD_STAT); | |
431 | BNAD_GET_REG(HOSTFN1_LPU1_MBOX1_CMD_STAT); | |
432 | BNAD_GET_REG(LPU0_HOSTFN1_MBOX1_CMD_STAT); | |
433 | BNAD_GET_REG(LPU1_HOSTFN1_MBOX1_CMD_STAT); | |
434 | ||
435 | BNAD_GET_REG(HOSTFN2_LPU0_MBOX0_CMD_STAT); | |
436 | BNAD_GET_REG(HOSTFN2_LPU1_MBOX0_CMD_STAT); | |
437 | BNAD_GET_REG(LPU0_HOSTFN2_MBOX0_CMD_STAT); | |
438 | BNAD_GET_REG(LPU1_HOSTFN2_MBOX0_CMD_STAT); | |
439 | ||
440 | BNAD_GET_REG(HOSTFN2_LPU0_MBOX1_CMD_STAT); | |
441 | BNAD_GET_REG(HOSTFN2_LPU1_MBOX1_CMD_STAT); | |
442 | BNAD_GET_REG(LPU0_HOSTFN2_MBOX1_CMD_STAT); | |
443 | BNAD_GET_REG(LPU1_HOSTFN2_MBOX1_CMD_STAT); | |
444 | ||
445 | BNAD_GET_REG(HOSTFN3_LPU0_MBOX0_CMD_STAT); | |
446 | BNAD_GET_REG(HOSTFN3_LPU1_MBOX0_CMD_STAT); | |
447 | BNAD_GET_REG(LPU0_HOSTFN3_MBOX0_CMD_STAT); | |
448 | BNAD_GET_REG(LPU1_HOSTFN3_MBOX0_CMD_STAT); | |
449 | ||
450 | BNAD_GET_REG(HOSTFN3_LPU0_MBOX1_CMD_STAT); | |
451 | BNAD_GET_REG(HOSTFN3_LPU1_MBOX1_CMD_STAT); | |
452 | BNAD_GET_REG(LPU0_HOSTFN3_MBOX1_CMD_STAT); | |
453 | BNAD_GET_REG(LPU1_HOSTFN3_MBOX1_CMD_STAT); | |
454 | ||
455 | /* Host Function Force Parity Error Registers */ | |
456 | BNAD_GET_REG(HOSTFN0_LPU_FORCE_PERR); | |
457 | BNAD_GET_REG(HOSTFN1_LPU_FORCE_PERR); | |
458 | BNAD_GET_REG(HOSTFN2_LPU_FORCE_PERR); | |
459 | BNAD_GET_REG(HOSTFN3_LPU_FORCE_PERR); | |
460 | ||
461 | /* LL Port[0|1] Halt Mask Registers */ | |
462 | BNAD_GET_REG(LL_HALT_MSK_P0); | |
463 | BNAD_GET_REG(LL_HALT_MSK_P1); | |
464 | ||
465 | /* LL Port[0|1] Error Mask Registers */ | |
466 | BNAD_GET_REG(LL_ERR_MSK_P0); | |
467 | BNAD_GET_REG(LL_ERR_MSK_P1); | |
468 | ||
469 | /* EMC FLI Registers */ | |
470 | BNAD_GET_REG(FLI_CMD_REG); | |
471 | BNAD_GET_REG(FLI_ADDR_REG); | |
472 | BNAD_GET_REG(FLI_CTL_REG); | |
473 | BNAD_GET_REG(FLI_WRDATA_REG); | |
474 | BNAD_GET_REG(FLI_RDDATA_REG); | |
475 | BNAD_GET_REG(FLI_DEV_STATUS_REG); | |
476 | BNAD_GET_REG(FLI_SIG_WD_REG); | |
477 | ||
478 | BNAD_GET_REG(FLI_DEV_VENDOR_REG); | |
479 | BNAD_GET_REG(FLI_ERR_STATUS_REG); | |
480 | ||
481 | /* RxAdm 0 Registers */ | |
482 | BNAD_GET_REG(RAD0_CTL_REG); | |
483 | BNAD_GET_REG(RAD0_PE_PARM_REG); | |
484 | BNAD_GET_REG(RAD0_BCN_REG); | |
485 | BNAD_GET_REG(RAD0_DEFAULT_REG); | |
486 | BNAD_GET_REG(RAD0_PROMISC_REG); | |
487 | BNAD_GET_REG(RAD0_BCNQ_REG); | |
488 | BNAD_GET_REG(RAD0_DEFAULTQ_REG); | |
489 | ||
490 | BNAD_GET_REG(RAD0_ERR_STS); | |
491 | BNAD_GET_REG(RAD0_SET_ERR_STS); | |
492 | BNAD_GET_REG(RAD0_ERR_INT_EN); | |
493 | BNAD_GET_REG(RAD0_FIRST_ERR); | |
494 | BNAD_GET_REG(RAD0_FORCE_ERR); | |
495 | ||
496 | BNAD_GET_REG(RAD0_MAC_MAN_1H); | |
497 | BNAD_GET_REG(RAD0_MAC_MAN_1L); | |
498 | BNAD_GET_REG(RAD0_MAC_MAN_2H); | |
499 | BNAD_GET_REG(RAD0_MAC_MAN_2L); | |
500 | BNAD_GET_REG(RAD0_MAC_MAN_3H); | |
501 | BNAD_GET_REG(RAD0_MAC_MAN_3L); | |
502 | BNAD_GET_REG(RAD0_MAC_MAN_4H); | |
503 | BNAD_GET_REG(RAD0_MAC_MAN_4L); | |
504 | ||
505 | BNAD_GET_REG(RAD0_LAST4_IP); | |
506 | ||
507 | /* RxAdm 1 Registers */ | |
508 | BNAD_GET_REG(RAD1_CTL_REG); | |
509 | BNAD_GET_REG(RAD1_PE_PARM_REG); | |
510 | BNAD_GET_REG(RAD1_BCN_REG); | |
511 | BNAD_GET_REG(RAD1_DEFAULT_REG); | |
512 | BNAD_GET_REG(RAD1_PROMISC_REG); | |
513 | BNAD_GET_REG(RAD1_BCNQ_REG); | |
514 | BNAD_GET_REG(RAD1_DEFAULTQ_REG); | |
515 | ||
516 | BNAD_GET_REG(RAD1_ERR_STS); | |
517 | BNAD_GET_REG(RAD1_SET_ERR_STS); | |
518 | BNAD_GET_REG(RAD1_ERR_INT_EN); | |
519 | ||
520 | /* TxA0 Registers */ | |
521 | BNAD_GET_REG(TXA0_CTRL_REG); | |
522 | /* TxA0 TSO Sequence # Registers (RO) */ | |
523 | for (i = 0; i < 8; i++) { | |
524 | BNAD_GET_REG(TXA0_TSO_TCP_SEQ_REG(i)); | |
525 | BNAD_GET_REG(TXA0_TSO_IP_INFO_REG(i)); | |
526 | } | |
527 | ||
528 | /* TxA1 Registers */ | |
529 | BNAD_GET_REG(TXA1_CTRL_REG); | |
530 | /* TxA1 TSO Sequence # Registers (RO) */ | |
531 | for (i = 0; i < 8; i++) { | |
532 | BNAD_GET_REG(TXA1_TSO_TCP_SEQ_REG(i)); | |
533 | BNAD_GET_REG(TXA1_TSO_IP_INFO_REG(i)); | |
534 | } | |
535 | ||
536 | /* RxA Registers */ | |
537 | BNAD_GET_REG(RXA0_CTL_REG); | |
538 | BNAD_GET_REG(RXA1_CTL_REG); | |
539 | ||
540 | /* PLB0 Registers */ | |
541 | BNAD_GET_REG(PLB0_ECM_TIMER_REG); | |
542 | BNAD_GET_REG(PLB0_RL_CTL); | |
543 | for (i = 0; i < 8; i++) | |
544 | BNAD_GET_REG(PLB0_RL_MAX_BC(i)); | |
545 | BNAD_GET_REG(PLB0_RL_TU_PRIO); | |
546 | for (i = 0; i < 8; i++) | |
547 | BNAD_GET_REG(PLB0_RL_BYTE_CNT(i)); | |
548 | BNAD_GET_REG(PLB0_RL_MIN_REG); | |
549 | BNAD_GET_REG(PLB0_RL_MAX_REG); | |
550 | BNAD_GET_REG(PLB0_EMS_ADD_REG); | |
551 | ||
552 | /* PLB1 Registers */ | |
553 | BNAD_GET_REG(PLB1_ECM_TIMER_REG); | |
554 | BNAD_GET_REG(PLB1_RL_CTL); | |
555 | for (i = 0; i < 8; i++) | |
556 | BNAD_GET_REG(PLB1_RL_MAX_BC(i)); | |
557 | BNAD_GET_REG(PLB1_RL_TU_PRIO); | |
558 | for (i = 0; i < 8; i++) | |
559 | BNAD_GET_REG(PLB1_RL_BYTE_CNT(i)); | |
560 | BNAD_GET_REG(PLB1_RL_MIN_REG); | |
561 | BNAD_GET_REG(PLB1_RL_MAX_REG); | |
562 | BNAD_GET_REG(PLB1_EMS_ADD_REG); | |
563 | ||
564 | /* HQM Control Register */ | |
565 | BNAD_GET_REG(HQM0_CTL_REG); | |
566 | BNAD_GET_REG(HQM0_RXQ_STOP_SEM); | |
567 | BNAD_GET_REG(HQM0_TXQ_STOP_SEM); | |
568 | BNAD_GET_REG(HQM1_CTL_REG); | |
569 | BNAD_GET_REG(HQM1_RXQ_STOP_SEM); | |
570 | BNAD_GET_REG(HQM1_TXQ_STOP_SEM); | |
571 | ||
572 | /* LUT Registers */ | |
573 | BNAD_GET_REG(LUT0_ERR_STS); | |
574 | BNAD_GET_REG(LUT0_SET_ERR_STS); | |
575 | BNAD_GET_REG(LUT1_ERR_STS); | |
576 | BNAD_GET_REG(LUT1_SET_ERR_STS); | |
577 | ||
578 | /* TRC Registers */ | |
579 | BNAD_GET_REG(TRC_CTL_REG); | |
580 | BNAD_GET_REG(TRC_MODS_REG); | |
581 | BNAD_GET_REG(TRC_TRGC_REG); | |
582 | BNAD_GET_REG(TRC_CNT1_REG); | |
583 | BNAD_GET_REG(TRC_CNT2_REG); | |
584 | BNAD_GET_REG(TRC_NXTS_REG); | |
585 | BNAD_GET_REG(TRC_DIRR_REG); | |
586 | for (i = 0; i < 10; i++) | |
587 | BNAD_GET_REG(TRC_TRGM_REG(i)); | |
588 | for (i = 0; i < 10; i++) | |
589 | BNAD_GET_REG(TRC_NXTM_REG(i)); | |
590 | for (i = 0; i < 10; i++) | |
591 | BNAD_GET_REG(TRC_STRM_REG(i)); | |
592 | ||
593 | spin_unlock_irqrestore(&bnad->bna_lock, flags); | |
594 | #undef BNAD_GET_REG | |
595 | return num; | |
596 | } | |
597 | static int | |
598 | bnad_get_regs_len(struct net_device *netdev) | |
599 | { | |
600 | int ret = get_regs(netdev_priv(netdev), NULL) * sizeof(u32); | |
601 | return ret; | |
602 | } | |
603 | ||
604 | static void | |
605 | bnad_get_regs(struct net_device *netdev, struct ethtool_regs *regs, void *buf) | |
606 | { | |
607 | memset(buf, 0, bnad_get_regs_len(netdev)); | |
608 | get_regs(netdev_priv(netdev), buf); | |
609 | } | |
610 | ||
611 | static void | |
612 | bnad_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wolinfo) | |
613 | { | |
614 | wolinfo->supported = 0; | |
615 | wolinfo->wolopts = 0; | |
616 | } | |
617 | ||
618 | static int | |
619 | bnad_get_coalesce(struct net_device *netdev, struct ethtool_coalesce *coalesce) | |
620 | { | |
621 | struct bnad *bnad = netdev_priv(netdev); | |
622 | unsigned long flags; | |
623 | ||
624 | /* Lock rqd. to access bnad->bna_lock */ | |
625 | spin_lock_irqsave(&bnad->bna_lock, flags); | |
626 | coalesce->use_adaptive_rx_coalesce = | |
627 | (bnad->cfg_flags & BNAD_CF_DIM_ENABLED) ? true : false; | |
628 | spin_unlock_irqrestore(&bnad->bna_lock, flags); | |
629 | ||
630 | coalesce->rx_coalesce_usecs = bnad->rx_coalescing_timeo * | |
631 | BFI_COALESCING_TIMER_UNIT; | |
632 | coalesce->tx_coalesce_usecs = bnad->tx_coalescing_timeo * | |
633 | BFI_COALESCING_TIMER_UNIT; | |
634 | coalesce->tx_max_coalesced_frames = BFI_TX_INTERPKT_COUNT; | |
635 | ||
636 | return 0; | |
637 | } | |
638 | ||
639 | static int | |
640 | bnad_set_coalesce(struct net_device *netdev, struct ethtool_coalesce *coalesce) | |
641 | { | |
642 | struct bnad *bnad = netdev_priv(netdev); | |
643 | unsigned long flags; | |
644 | int dim_timer_del = 0; | |
645 | ||
646 | if (coalesce->rx_coalesce_usecs == 0 || | |
647 | coalesce->rx_coalesce_usecs > | |
648 | BFI_MAX_COALESCING_TIMEO * BFI_COALESCING_TIMER_UNIT) | |
649 | return -EINVAL; | |
650 | ||
651 | if (coalesce->tx_coalesce_usecs == 0 || | |
652 | coalesce->tx_coalesce_usecs > | |
653 | BFI_MAX_COALESCING_TIMEO * BFI_COALESCING_TIMER_UNIT) | |
654 | return -EINVAL; | |
655 | ||
656 | mutex_lock(&bnad->conf_mutex); | |
657 | /* | |
658 | * Do not need to store rx_coalesce_usecs here | |
659 | * Every time DIM is disabled, we can get it from the | |
660 | * stack. | |
661 | */ | |
662 | spin_lock_irqsave(&bnad->bna_lock, flags); | |
663 | if (coalesce->use_adaptive_rx_coalesce) { | |
664 | if (!(bnad->cfg_flags & BNAD_CF_DIM_ENABLED)) { | |
665 | bnad->cfg_flags |= BNAD_CF_DIM_ENABLED; | |
666 | bnad_dim_timer_start(bnad); | |
667 | } | |
668 | } else { | |
669 | if (bnad->cfg_flags & BNAD_CF_DIM_ENABLED) { | |
670 | bnad->cfg_flags &= ~BNAD_CF_DIM_ENABLED; | |
671 | dim_timer_del = bnad_dim_timer_running(bnad); | |
672 | if (dim_timer_del) { | |
673 | clear_bit(BNAD_RF_DIM_TIMER_RUNNING, | |
674 | &bnad->run_flags); | |
675 | spin_unlock_irqrestore(&bnad->bna_lock, flags); | |
676 | del_timer_sync(&bnad->dim_timer); | |
677 | spin_lock_irqsave(&bnad->bna_lock, flags); | |
678 | } | |
679 | bnad_rx_coalescing_timeo_set(bnad); | |
680 | } | |
681 | } | |
682 | if (bnad->tx_coalescing_timeo != coalesce->tx_coalesce_usecs / | |
683 | BFI_COALESCING_TIMER_UNIT) { | |
684 | bnad->tx_coalescing_timeo = coalesce->tx_coalesce_usecs / | |
685 | BFI_COALESCING_TIMER_UNIT; | |
686 | bnad_tx_coalescing_timeo_set(bnad); | |
687 | } | |
688 | ||
689 | if (bnad->rx_coalescing_timeo != coalesce->rx_coalesce_usecs / | |
690 | BFI_COALESCING_TIMER_UNIT) { | |
691 | bnad->rx_coalescing_timeo = coalesce->rx_coalesce_usecs / | |
692 | BFI_COALESCING_TIMER_UNIT; | |
693 | ||
694 | if (!(bnad->cfg_flags & BNAD_CF_DIM_ENABLED)) | |
695 | bnad_rx_coalescing_timeo_set(bnad); | |
696 | ||
697 | } | |
698 | ||
699 | /* Add Tx Inter-pkt DMA count? */ | |
700 | ||
701 | spin_unlock_irqrestore(&bnad->bna_lock, flags); | |
702 | ||
703 | mutex_unlock(&bnad->conf_mutex); | |
704 | return 0; | |
705 | } | |
706 | ||
707 | static void | |
708 | bnad_get_ringparam(struct net_device *netdev, | |
709 | struct ethtool_ringparam *ringparam) | |
710 | { | |
711 | struct bnad *bnad = netdev_priv(netdev); | |
712 | ||
713 | ringparam->rx_max_pending = BNAD_MAX_Q_DEPTH / bnad_rxqs_per_cq; | |
714 | ringparam->rx_mini_max_pending = 0; | |
715 | ringparam->rx_jumbo_max_pending = 0; | |
716 | ringparam->tx_max_pending = BNAD_MAX_Q_DEPTH; | |
717 | ||
718 | ringparam->rx_pending = bnad->rxq_depth; | |
719 | ringparam->rx_mini_max_pending = 0; | |
720 | ringparam->rx_jumbo_max_pending = 0; | |
721 | ringparam->tx_pending = bnad->txq_depth; | |
722 | } | |
723 | ||
724 | static int | |
725 | bnad_set_ringparam(struct net_device *netdev, | |
726 | struct ethtool_ringparam *ringparam) | |
727 | { | |
728 | int i, current_err, err = 0; | |
729 | struct bnad *bnad = netdev_priv(netdev); | |
730 | ||
731 | mutex_lock(&bnad->conf_mutex); | |
732 | if (ringparam->rx_pending == bnad->rxq_depth && | |
733 | ringparam->tx_pending == bnad->txq_depth) { | |
734 | mutex_unlock(&bnad->conf_mutex); | |
735 | return 0; | |
736 | } | |
737 | ||
738 | if (ringparam->rx_pending < BNAD_MIN_Q_DEPTH || | |
739 | ringparam->rx_pending > BNAD_MAX_Q_DEPTH / bnad_rxqs_per_cq || | |
740 | !BNA_POWER_OF_2(ringparam->rx_pending)) { | |
741 | mutex_unlock(&bnad->conf_mutex); | |
742 | return -EINVAL; | |
743 | } | |
744 | if (ringparam->tx_pending < BNAD_MIN_Q_DEPTH || | |
745 | ringparam->tx_pending > BNAD_MAX_Q_DEPTH || | |
746 | !BNA_POWER_OF_2(ringparam->tx_pending)) { | |
747 | mutex_unlock(&bnad->conf_mutex); | |
748 | return -EINVAL; | |
749 | } | |
750 | ||
751 | if (ringparam->rx_pending != bnad->rxq_depth) { | |
752 | bnad->rxq_depth = ringparam->rx_pending; | |
753 | for (i = 0; i < bnad->num_rx; i++) { | |
754 | if (!bnad->rx_info[i].rx) | |
755 | continue; | |
756 | bnad_cleanup_rx(bnad, i); | |
757 | current_err = bnad_setup_rx(bnad, i); | |
758 | if (current_err && !err) | |
759 | err = current_err; | |
760 | } | |
761 | } | |
762 | if (ringparam->tx_pending != bnad->txq_depth) { | |
763 | bnad->txq_depth = ringparam->tx_pending; | |
764 | for (i = 0; i < bnad->num_tx; i++) { | |
765 | if (!bnad->tx_info[i].tx) | |
766 | continue; | |
767 | bnad_cleanup_tx(bnad, i); | |
768 | current_err = bnad_setup_tx(bnad, i); | |
769 | if (current_err && !err) | |
770 | err = current_err; | |
771 | } | |
772 | } | |
773 | ||
774 | mutex_unlock(&bnad->conf_mutex); | |
775 | return err; | |
776 | } | |
777 | ||
778 | static void | |
779 | bnad_get_pauseparam(struct net_device *netdev, | |
780 | struct ethtool_pauseparam *pauseparam) | |
781 | { | |
782 | struct bnad *bnad = netdev_priv(netdev); | |
783 | ||
784 | pauseparam->autoneg = 0; | |
785 | pauseparam->rx_pause = bnad->bna.port.pause_config.rx_pause; | |
786 | pauseparam->tx_pause = bnad->bna.port.pause_config.tx_pause; | |
787 | } | |
788 | ||
789 | static int | |
790 | bnad_set_pauseparam(struct net_device *netdev, | |
791 | struct ethtool_pauseparam *pauseparam) | |
792 | { | |
793 | struct bnad *bnad = netdev_priv(netdev); | |
794 | struct bna_pause_config pause_config; | |
795 | unsigned long flags; | |
796 | ||
797 | if (pauseparam->autoneg == AUTONEG_ENABLE) | |
798 | return -EINVAL; | |
799 | ||
800 | mutex_lock(&bnad->conf_mutex); | |
801 | if (pauseparam->rx_pause != bnad->bna.port.pause_config.rx_pause || | |
802 | pauseparam->tx_pause != bnad->bna.port.pause_config.tx_pause) { | |
803 | pause_config.rx_pause = pauseparam->rx_pause; | |
804 | pause_config.tx_pause = pauseparam->tx_pause; | |
805 | spin_lock_irqsave(&bnad->bna_lock, flags); | |
806 | bna_port_pause_config(&bnad->bna.port, &pause_config, NULL); | |
807 | spin_unlock_irqrestore(&bnad->bna_lock, flags); | |
808 | } | |
809 | mutex_unlock(&bnad->conf_mutex); | |
810 | return 0; | |
811 | } | |
812 | ||
813 | static u32 | |
814 | bnad_get_rx_csum(struct net_device *netdev) | |
815 | { | |
816 | u32 rx_csum; | |
817 | struct bnad *bnad = netdev_priv(netdev); | |
818 | ||
819 | rx_csum = bnad->rx_csum; | |
820 | return rx_csum; | |
821 | } | |
822 | ||
823 | static int | |
824 | bnad_set_rx_csum(struct net_device *netdev, u32 rx_csum) | |
825 | { | |
826 | struct bnad *bnad = netdev_priv(netdev); | |
827 | ||
828 | mutex_lock(&bnad->conf_mutex); | |
829 | bnad->rx_csum = rx_csum; | |
830 | mutex_unlock(&bnad->conf_mutex); | |
831 | return 0; | |
832 | } | |
833 | ||
834 | static int | |
835 | bnad_set_tx_csum(struct net_device *netdev, u32 tx_csum) | |
836 | { | |
837 | struct bnad *bnad = netdev_priv(netdev); | |
838 | ||
839 | mutex_lock(&bnad->conf_mutex); | |
840 | if (tx_csum) { | |
841 | netdev->features |= NETIF_F_IP_CSUM; | |
842 | netdev->features |= NETIF_F_IPV6_CSUM; | |
843 | } else { | |
844 | netdev->features &= ~NETIF_F_IP_CSUM; | |
845 | netdev->features &= ~NETIF_F_IPV6_CSUM; | |
846 | } | |
847 | mutex_unlock(&bnad->conf_mutex); | |
848 | return 0; | |
849 | } | |
850 | ||
851 | static int | |
852 | bnad_set_tso(struct net_device *netdev, u32 tso) | |
853 | { | |
854 | struct bnad *bnad = netdev_priv(netdev); | |
855 | ||
856 | mutex_lock(&bnad->conf_mutex); | |
857 | if (tso) { | |
858 | netdev->features |= NETIF_F_TSO; | |
859 | netdev->features |= NETIF_F_TSO6; | |
860 | } else { | |
861 | netdev->features &= ~NETIF_F_TSO; | |
862 | netdev->features &= ~NETIF_F_TSO6; | |
863 | } | |
864 | mutex_unlock(&bnad->conf_mutex); | |
865 | return 0; | |
866 | } | |
867 | ||
868 | static void | |
869 | bnad_get_strings(struct net_device *netdev, u32 stringset, u8 * string) | |
870 | { | |
871 | struct bnad *bnad = netdev_priv(netdev); | |
872 | int i, j, q_num; | |
873 | u64 bmap; | |
874 | ||
875 | mutex_lock(&bnad->conf_mutex); | |
876 | ||
877 | switch (stringset) { | |
878 | case ETH_SS_STATS: | |
879 | for (i = 0; i < BNAD_ETHTOOL_STATS_NUM; i++) { | |
880 | BUG_ON(!(strlen(bnad_net_stats_strings[i]) < | |
881 | ETH_GSTRING_LEN)); | |
882 | memcpy(string, bnad_net_stats_strings[i], | |
883 | ETH_GSTRING_LEN); | |
884 | string += ETH_GSTRING_LEN; | |
885 | } | |
886 | bmap = (u64)bnad->bna.tx_mod.txf_bmap[0] | | |
887 | ((u64)bnad->bna.tx_mod.txf_bmap[1] << 32); | |
888 | for (i = 0; bmap && (i < BFI_LL_TXF_ID_MAX); i++) { | |
889 | if (bmap & 1) { | |
890 | sprintf(string, "txf%d_ucast_octets", i); | |
891 | string += ETH_GSTRING_LEN; | |
892 | sprintf(string, "txf%d_ucast", i); | |
893 | string += ETH_GSTRING_LEN; | |
894 | sprintf(string, "txf%d_ucast_vlan", i); | |
895 | string += ETH_GSTRING_LEN; | |
896 | sprintf(string, "txf%d_mcast_octets", i); | |
897 | string += ETH_GSTRING_LEN; | |
898 | sprintf(string, "txf%d_mcast", i); | |
899 | string += ETH_GSTRING_LEN; | |
900 | sprintf(string, "txf%d_mcast_vlan", i); | |
901 | string += ETH_GSTRING_LEN; | |
902 | sprintf(string, "txf%d_bcast_octets", i); | |
903 | string += ETH_GSTRING_LEN; | |
904 | sprintf(string, "txf%d_bcast", i); | |
905 | string += ETH_GSTRING_LEN; | |
906 | sprintf(string, "txf%d_bcast_vlan", i); | |
907 | string += ETH_GSTRING_LEN; | |
908 | sprintf(string, "txf%d_errors", i); | |
909 | string += ETH_GSTRING_LEN; | |
910 | sprintf(string, "txf%d_filter_vlan", i); | |
911 | string += ETH_GSTRING_LEN; | |
912 | sprintf(string, "txf%d_filter_mac_sa", i); | |
913 | string += ETH_GSTRING_LEN; | |
914 | } | |
915 | bmap >>= 1; | |
916 | } | |
917 | ||
918 | bmap = (u64)bnad->bna.rx_mod.rxf_bmap[0] | | |
919 | ((u64)bnad->bna.rx_mod.rxf_bmap[1] << 32); | |
920 | for (i = 0; bmap && (i < BFI_LL_RXF_ID_MAX); i++) { | |
921 | if (bmap & 1) { | |
922 | sprintf(string, "rxf%d_ucast_octets", i); | |
923 | string += ETH_GSTRING_LEN; | |
924 | sprintf(string, "rxf%d_ucast", i); | |
925 | string += ETH_GSTRING_LEN; | |
926 | sprintf(string, "rxf%d_ucast_vlan", i); | |
927 | string += ETH_GSTRING_LEN; | |
928 | sprintf(string, "rxf%d_mcast_octets", i); | |
929 | string += ETH_GSTRING_LEN; | |
930 | sprintf(string, "rxf%d_mcast", i); | |
931 | string += ETH_GSTRING_LEN; | |
932 | sprintf(string, "rxf%d_mcast_vlan", i); | |
933 | string += ETH_GSTRING_LEN; | |
934 | sprintf(string, "rxf%d_bcast_octets", i); | |
935 | string += ETH_GSTRING_LEN; | |
936 | sprintf(string, "rxf%d_bcast", i); | |
937 | string += ETH_GSTRING_LEN; | |
938 | sprintf(string, "rxf%d_bcast_vlan", i); | |
939 | string += ETH_GSTRING_LEN; | |
940 | sprintf(string, "rxf%d_frame_drops", i); | |
941 | string += ETH_GSTRING_LEN; | |
942 | } | |
943 | bmap >>= 1; | |
944 | } | |
945 | ||
946 | q_num = 0; | |
947 | for (i = 0; i < bnad->num_rx; i++) { | |
948 | if (!bnad->rx_info[i].rx) | |
949 | continue; | |
950 | for (j = 0; j < bnad->num_rxp_per_rx; j++) { | |
951 | sprintf(string, "cq%d_producer_index", q_num); | |
952 | string += ETH_GSTRING_LEN; | |
953 | sprintf(string, "cq%d_consumer_index", q_num); | |
954 | string += ETH_GSTRING_LEN; | |
955 | sprintf(string, "cq%d_hw_producer_index", | |
956 | q_num); | |
957 | string += ETH_GSTRING_LEN; | |
958 | q_num++; | |
959 | } | |
960 | } | |
961 | ||
962 | q_num = 0; | |
963 | for (i = 0; i < bnad->num_rx; i++) { | |
964 | if (!bnad->rx_info[i].rx) | |
965 | continue; | |
966 | for (j = 0; j < bnad->num_rxp_per_rx; j++) { | |
967 | sprintf(string, "rxq%d_packets", q_num); | |
968 | string += ETH_GSTRING_LEN; | |
969 | sprintf(string, "rxq%d_bytes", q_num); | |
970 | string += ETH_GSTRING_LEN; | |
971 | sprintf(string, "rxq%d_packets_with_error", | |
972 | q_num); | |
973 | string += ETH_GSTRING_LEN; | |
974 | sprintf(string, "rxq%d_allocbuf_failed", q_num); | |
975 | string += ETH_GSTRING_LEN; | |
976 | sprintf(string, "rxq%d_producer_index", q_num); | |
977 | string += ETH_GSTRING_LEN; | |
978 | sprintf(string, "rxq%d_consumer_index", q_num); | |
979 | string += ETH_GSTRING_LEN; | |
980 | q_num++; | |
981 | if (bnad->rx_info[i].rx_ctrl[j].ccb && | |
982 | bnad->rx_info[i].rx_ctrl[j].ccb-> | |
983 | rcb[1] && | |
984 | bnad->rx_info[i].rx_ctrl[j].ccb-> | |
985 | rcb[1]->rxq) { | |
986 | sprintf(string, "rxq%d_packets", q_num); | |
987 | string += ETH_GSTRING_LEN; | |
988 | sprintf(string, "rxq%d_bytes", q_num); | |
989 | string += ETH_GSTRING_LEN; | |
990 | sprintf(string, | |
991 | "rxq%d_packets_with_error", q_num); | |
992 | string += ETH_GSTRING_LEN; | |
993 | sprintf(string, "rxq%d_allocbuf_failed", | |
994 | q_num); | |
995 | string += ETH_GSTRING_LEN; | |
996 | sprintf(string, "rxq%d_producer_index", | |
997 | q_num); | |
998 | string += ETH_GSTRING_LEN; | |
999 | sprintf(string, "rxq%d_consumer_index", | |
1000 | q_num); | |
1001 | string += ETH_GSTRING_LEN; | |
1002 | q_num++; | |
1003 | } | |
1004 | } | |
1005 | } | |
1006 | ||
1007 | q_num = 0; | |
1008 | for (i = 0; i < bnad->num_tx; i++) { | |
1009 | if (!bnad->tx_info[i].tx) | |
1010 | continue; | |
1011 | for (j = 0; j < bnad->num_txq_per_tx; j++) { | |
1012 | sprintf(string, "txq%d_packets", q_num); | |
1013 | string += ETH_GSTRING_LEN; | |
1014 | sprintf(string, "txq%d_bytes", q_num); | |
1015 | string += ETH_GSTRING_LEN; | |
1016 | sprintf(string, "txq%d_producer_index", q_num); | |
1017 | string += ETH_GSTRING_LEN; | |
1018 | sprintf(string, "txq%d_consumer_index", q_num); | |
1019 | string += ETH_GSTRING_LEN; | |
1020 | sprintf(string, "txq%d_hw_consumer_index", | |
1021 | q_num); | |
1022 | string += ETH_GSTRING_LEN; | |
1023 | q_num++; | |
1024 | } | |
1025 | } | |
1026 | ||
1027 | break; | |
1028 | ||
1029 | default: | |
1030 | break; | |
1031 | } | |
1032 | ||
1033 | mutex_unlock(&bnad->conf_mutex); | |
1034 | } | |
1035 | ||
1036 | static int | |
1037 | bnad_get_stats_count_locked(struct net_device *netdev) | |
1038 | { | |
1039 | struct bnad *bnad = netdev_priv(netdev); | |
1040 | int i, j, count, rxf_active_num = 0, txf_active_num = 0; | |
1041 | u64 bmap; | |
1042 | ||
1043 | bmap = (u64)bnad->bna.tx_mod.txf_bmap[0] | | |
1044 | ((u64)bnad->bna.tx_mod.txf_bmap[1] << 32); | |
1045 | for (i = 0; bmap && (i < BFI_LL_TXF_ID_MAX); i++) { | |
1046 | if (bmap & 1) | |
1047 | txf_active_num++; | |
1048 | bmap >>= 1; | |
1049 | } | |
1050 | bmap = (u64)bnad->bna.rx_mod.rxf_bmap[0] | | |
1051 | ((u64)bnad->bna.rx_mod.rxf_bmap[1] << 32); | |
1052 | for (i = 0; bmap && (i < BFI_LL_RXF_ID_MAX); i++) { | |
1053 | if (bmap & 1) | |
1054 | rxf_active_num++; | |
1055 | bmap >>= 1; | |
1056 | } | |
1057 | count = BNAD_ETHTOOL_STATS_NUM + | |
1058 | txf_active_num * BNAD_NUM_TXF_COUNTERS + | |
1059 | rxf_active_num * BNAD_NUM_RXF_COUNTERS; | |
1060 | ||
1061 | for (i = 0; i < bnad->num_rx; i++) { | |
1062 | if (!bnad->rx_info[i].rx) | |
1063 | continue; | |
1064 | count += bnad->num_rxp_per_rx * BNAD_NUM_CQ_COUNTERS; | |
1065 | count += bnad->num_rxp_per_rx * BNAD_NUM_RXQ_COUNTERS; | |
1066 | for (j = 0; j < bnad->num_rxp_per_rx; j++) | |
1067 | if (bnad->rx_info[i].rx_ctrl[j].ccb && | |
1068 | bnad->rx_info[i].rx_ctrl[j].ccb->rcb[1] && | |
1069 | bnad->rx_info[i].rx_ctrl[j].ccb->rcb[1]->rxq) | |
1070 | count += BNAD_NUM_RXQ_COUNTERS; | |
1071 | } | |
1072 | ||
1073 | for (i = 0; i < bnad->num_tx; i++) { | |
1074 | if (!bnad->tx_info[i].tx) | |
1075 | continue; | |
1076 | count += bnad->num_txq_per_tx * BNAD_NUM_TXQ_COUNTERS; | |
1077 | } | |
1078 | return count; | |
1079 | } | |
1080 | ||
1081 | static int | |
1082 | bnad_per_q_stats_fill(struct bnad *bnad, u64 *buf, int bi) | |
1083 | { | |
1084 | int i, j; | |
1085 | struct bna_rcb *rcb = NULL; | |
1086 | struct bna_tcb *tcb = NULL; | |
1087 | ||
1088 | for (i = 0; i < bnad->num_rx; i++) { | |
1089 | if (!bnad->rx_info[i].rx) | |
1090 | continue; | |
1091 | for (j = 0; j < bnad->num_rxp_per_rx; j++) | |
1092 | if (bnad->rx_info[i].rx_ctrl[j].ccb && | |
1093 | bnad->rx_info[i].rx_ctrl[j].ccb->rcb[0] && | |
1094 | bnad->rx_info[i].rx_ctrl[j].ccb->rcb[0]->rxq) { | |
1095 | buf[bi++] = bnad->rx_info[i].rx_ctrl[j]. | |
1096 | ccb->producer_index; | |
1097 | buf[bi++] = 0; /* ccb->consumer_index */ | |
1098 | buf[bi++] = *(bnad->rx_info[i].rx_ctrl[j]. | |
1099 | ccb->hw_producer_index); | |
1100 | } | |
1101 | } | |
1102 | for (i = 0; i < bnad->num_rx; i++) { | |
1103 | if (!bnad->rx_info[i].rx) | |
1104 | continue; | |
1105 | for (j = 0; j < bnad->num_rxp_per_rx; j++) | |
1106 | if (bnad->rx_info[i].rx_ctrl[j].ccb) { | |
1107 | if (bnad->rx_info[i].rx_ctrl[j].ccb->rcb[0] && | |
1108 | bnad->rx_info[i].rx_ctrl[j].ccb-> | |
1109 | rcb[0]->rxq) { | |
1110 | rcb = bnad->rx_info[i].rx_ctrl[j]. | |
1111 | ccb->rcb[0]; | |
1112 | buf[bi++] = rcb->rxq->rx_packets; | |
1113 | buf[bi++] = rcb->rxq->rx_bytes; | |
1114 | buf[bi++] = rcb->rxq-> | |
1115 | rx_packets_with_error; | |
1116 | buf[bi++] = rcb->rxq-> | |
1117 | rxbuf_alloc_failed; | |
1118 | buf[bi++] = rcb->producer_index; | |
1119 | buf[bi++] = rcb->consumer_index; | |
1120 | } | |
1121 | if (bnad->rx_info[i].rx_ctrl[j].ccb->rcb[1] && | |
1122 | bnad->rx_info[i].rx_ctrl[j].ccb-> | |
1123 | rcb[1]->rxq) { | |
1124 | rcb = bnad->rx_info[i].rx_ctrl[j]. | |
1125 | ccb->rcb[1]; | |
1126 | buf[bi++] = rcb->rxq->rx_packets; | |
1127 | buf[bi++] = rcb->rxq->rx_bytes; | |
1128 | buf[bi++] = rcb->rxq-> | |
1129 | rx_packets_with_error; | |
1130 | buf[bi++] = rcb->rxq-> | |
1131 | rxbuf_alloc_failed; | |
1132 | buf[bi++] = rcb->producer_index; | |
1133 | buf[bi++] = rcb->consumer_index; | |
1134 | } | |
1135 | } | |
1136 | } | |
1137 | ||
1138 | for (i = 0; i < bnad->num_tx; i++) { | |
1139 | if (!bnad->tx_info[i].tx) | |
1140 | continue; | |
1141 | for (j = 0; j < bnad->num_txq_per_tx; j++) | |
1142 | if (bnad->tx_info[i].tcb[j] && | |
1143 | bnad->tx_info[i].tcb[j]->txq) { | |
1144 | tcb = bnad->tx_info[i].tcb[j]; | |
1145 | buf[bi++] = tcb->txq->tx_packets; | |
1146 | buf[bi++] = tcb->txq->tx_bytes; | |
1147 | buf[bi++] = tcb->producer_index; | |
1148 | buf[bi++] = tcb->consumer_index; | |
1149 | buf[bi++] = *(tcb->hw_consumer_index); | |
1150 | } | |
1151 | } | |
1152 | ||
1153 | return bi; | |
1154 | } | |
1155 | ||
1156 | static void | |
1157 | bnad_get_ethtool_stats(struct net_device *netdev, struct ethtool_stats *stats, | |
1158 | u64 *buf) | |
1159 | { | |
1160 | struct bnad *bnad = netdev_priv(netdev); | |
1161 | int i, j, bi; | |
250e061e ED |
1162 | unsigned long flags; |
1163 | struct rtnl_link_stats64 *net_stats64; | |
8b230ed8 RM |
1164 | u64 *stats64; |
1165 | u64 bmap; | |
1166 | ||
1167 | mutex_lock(&bnad->conf_mutex); | |
1168 | if (bnad_get_stats_count_locked(netdev) != stats->n_stats) { | |
1169 | mutex_unlock(&bnad->conf_mutex); | |
1170 | return; | |
1171 | } | |
1172 | ||
1173 | /* | |
1174 | * Used bna_lock to sync reads from bna_stats, which is written | |
1175 | * under the same lock | |
1176 | */ | |
1177 | spin_lock_irqsave(&bnad->bna_lock, flags); | |
1178 | bi = 0; | |
1179 | memset(buf, 0, stats->n_stats * sizeof(u64)); | |
8b230ed8 | 1180 | |
250e061e ED |
1181 | net_stats64 = (struct rtnl_link_stats64 *)buf; |
1182 | bnad_netdev_qstats_fill(bnad, net_stats64); | |
1183 | bnad_netdev_hwstats_fill(bnad, net_stats64); | |
8b230ed8 | 1184 | |
250e061e | 1185 | bi = sizeof(*net_stats64) / sizeof(u64); |
8b230ed8 RM |
1186 | |
1187 | /* Fill driver stats into ethtool buffers */ | |
1188 | stats64 = (u64 *)&bnad->stats.drv_stats; | |
1189 | for (i = 0; i < sizeof(struct bnad_drv_stats) / sizeof(u64); i++) | |
1190 | buf[bi++] = stats64[i]; | |
1191 | ||
1192 | /* Fill hardware stats excluding the rxf/txf into ethtool bufs */ | |
1193 | stats64 = (u64 *) bnad->stats.bna_stats->hw_stats; | |
1194 | for (i = 0; | |
1195 | i < offsetof(struct bfi_ll_stats, rxf_stats[0]) / sizeof(u64); | |
1196 | i++) | |
1197 | buf[bi++] = stats64[i]; | |
1198 | ||
1199 | /* Fill txf stats into ethtool buffers */ | |
1200 | bmap = (u64)bnad->bna.tx_mod.txf_bmap[0] | | |
1201 | ((u64)bnad->bna.tx_mod.txf_bmap[1] << 32); | |
1202 | for (i = 0; bmap && (i < BFI_LL_TXF_ID_MAX); i++) { | |
1203 | if (bmap & 1) { | |
1204 | stats64 = (u64 *)&bnad->stats.bna_stats-> | |
1205 | hw_stats->txf_stats[i]; | |
1206 | for (j = 0; j < sizeof(struct bfi_ll_stats_txf) / | |
1207 | sizeof(u64); j++) | |
1208 | buf[bi++] = stats64[j]; | |
1209 | } | |
1210 | bmap >>= 1; | |
1211 | } | |
1212 | ||
1213 | /* Fill rxf stats into ethtool buffers */ | |
1214 | bmap = (u64)bnad->bna.rx_mod.rxf_bmap[0] | | |
1215 | ((u64)bnad->bna.rx_mod.rxf_bmap[1] << 32); | |
1216 | for (i = 0; bmap && (i < BFI_LL_RXF_ID_MAX); i++) { | |
1217 | if (bmap & 1) { | |
1218 | stats64 = (u64 *)&bnad->stats.bna_stats-> | |
1219 | hw_stats->rxf_stats[i]; | |
1220 | for (j = 0; j < sizeof(struct bfi_ll_stats_rxf) / | |
1221 | sizeof(u64); j++) | |
1222 | buf[bi++] = stats64[j]; | |
1223 | } | |
1224 | bmap >>= 1; | |
1225 | } | |
1226 | ||
1227 | /* Fill per Q stats into ethtool buffers */ | |
1228 | bi = bnad_per_q_stats_fill(bnad, buf, bi); | |
1229 | ||
1230 | spin_unlock_irqrestore(&bnad->bna_lock, flags); | |
1231 | ||
1232 | mutex_unlock(&bnad->conf_mutex); | |
1233 | } | |
1234 | ||
1235 | static int | |
1236 | bnad_get_sset_count(struct net_device *netdev, int sset) | |
1237 | { | |
1238 | switch (sset) { | |
1239 | case ETH_SS_STATS: | |
1240 | return bnad_get_stats_count_locked(netdev); | |
1241 | default: | |
1242 | return -EOPNOTSUPP; | |
1243 | } | |
1244 | } | |
1245 | ||
1246 | static struct ethtool_ops bnad_ethtool_ops = { | |
1247 | .get_settings = bnad_get_settings, | |
1248 | .set_settings = bnad_set_settings, | |
1249 | .get_drvinfo = bnad_get_drvinfo, | |
1250 | .get_regs_len = bnad_get_regs_len, | |
1251 | .get_regs = bnad_get_regs, | |
1252 | .get_wol = bnad_get_wol, | |
1253 | .get_link = ethtool_op_get_link, | |
1254 | .get_coalesce = bnad_get_coalesce, | |
1255 | .set_coalesce = bnad_set_coalesce, | |
1256 | .get_ringparam = bnad_get_ringparam, | |
1257 | .set_ringparam = bnad_set_ringparam, | |
1258 | .get_pauseparam = bnad_get_pauseparam, | |
1259 | .set_pauseparam = bnad_set_pauseparam, | |
1260 | .get_rx_csum = bnad_get_rx_csum, | |
1261 | .set_rx_csum = bnad_set_rx_csum, | |
1262 | .get_tx_csum = ethtool_op_get_tx_csum, | |
1263 | .set_tx_csum = bnad_set_tx_csum, | |
1264 | .get_sg = ethtool_op_get_sg, | |
1265 | .set_sg = ethtool_op_set_sg, | |
1266 | .get_tso = ethtool_op_get_tso, | |
1267 | .set_tso = bnad_set_tso, | |
8b230ed8 RM |
1268 | .get_strings = bnad_get_strings, |
1269 | .get_ethtool_stats = bnad_get_ethtool_stats, | |
1270 | .get_sset_count = bnad_get_sset_count | |
1271 | }; | |
1272 | ||
1273 | void | |
1274 | bnad_set_ethtool_ops(struct net_device *netdev) | |
1275 | { | |
1276 | SET_ETHTOOL_OPS(netdev, &bnad_ethtool_ops); | |
1277 | } |