Merge remote-tracking branch 'regmap/for-next'
[deliverable/linux.git] / drivers / net / ethernet / sfc / ef10.c
CommitLineData
8127d661
BH
1/****************************************************************************
2 * Driver for Solarflare network controllers and boards
3 * Copyright 2012-2013 Solarflare Communications Inc.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published
7 * by the Free Software Foundation, incorporated herein by reference.
8 */
9
10#include "net_driver.h"
11#include "ef10_regs.h"
12#include "io.h"
13#include "mcdi.h"
14#include "mcdi_pcol.h"
15#include "nic.h"
16#include "workarounds.h"
74cd60a4 17#include "selftest.h"
7fa8d547 18#include "ef10_sriov.h"
8127d661
BH
19#include <linux/in.h>
20#include <linux/jhash.h>
21#include <linux/wait.h>
22#include <linux/workqueue.h>
23
24/* Hardware control for EF10 architecture including 'Huntington'. */
25
26#define EFX_EF10_DRVGEN_EV 7
27enum {
28 EFX_EF10_TEST = 1,
29 EFX_EF10_REFILL,
30};
31
32/* The reserved RSS context value */
33#define EFX_EF10_RSS_CONTEXT_INVALID 0xffffffff
267c0157
JC
34/* The maximum size of a shared RSS context */
35/* TODO: this should really be from the mcdi protocol export */
36#define EFX_EF10_MAX_SHARED_RSS_CONTEXT_SIZE 64UL
8127d661
BH
37
38/* The filter table(s) are managed by firmware and we have write-only
39 * access. When removing filters we must identify them to the
40 * firmware by a 64-bit handle, but this is too wide for Linux kernel
41 * interfaces (32-bit for RX NFC, 16-bit for RFS). Also, we need to
42 * be able to tell in advance whether a requested insertion will
43 * replace an existing filter. Therefore we maintain a software hash
44 * table, which should be at least as large as the hardware hash
45 * table.
46 *
47 * Huntington has a single 8K filter table shared between all filter
48 * types and both ports.
49 */
50#define HUNT_FILTER_TBL_ROWS 8192
51
12fb0da4 52#define EFX_EF10_FILTER_ID_INVALID 0xffff
dc3273e0
AR
53
54#define EFX_EF10_FILTER_DEV_UC_MAX 32
55#define EFX_EF10_FILTER_DEV_MC_MAX 256
56
34813fe2
AR
57/* VLAN list entry */
58struct efx_ef10_vlan {
59 struct list_head list;
60 u16 vid;
61};
62
dc3273e0
AR
63/* Per-VLAN filters information */
64struct efx_ef10_filter_vlan {
34813fe2 65 struct list_head list;
b3a3c03c 66 u16 vid;
dc3273e0
AR
67 u16 uc[EFX_EF10_FILTER_DEV_UC_MAX];
68 u16 mc[EFX_EF10_FILTER_DEV_MC_MAX];
69 u16 ucdef;
70 u16 bcast;
71 u16 mcdef;
72};
73
822b96f8
DP
74struct efx_ef10_dev_addr {
75 u8 addr[ETH_ALEN];
822b96f8
DP
76};
77
8127d661 78struct efx_ef10_filter_table {
7ac0dd9d
AR
79/* The MCDI match masks supported by this fw & hw, in order of priority */
80 u32 rx_match_mcdi_flags[
8127d661
BH
81 MC_CMD_GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES_MAXNUM];
82 unsigned int rx_match_count;
83
84 struct {
85 unsigned long spec; /* pointer to spec plus flag bits */
b59e6ef8
BH
86/* BUSY flag indicates that an update is in progress. AUTO_OLD is
87 * used to mark and sweep MAC filters for the device address lists.
8127d661
BH
88 */
89#define EFX_EF10_FILTER_FLAG_BUSY 1UL
b59e6ef8 90#define EFX_EF10_FILTER_FLAG_AUTO_OLD 2UL
8127d661
BH
91#define EFX_EF10_FILTER_FLAGS 3UL
92 u64 handle; /* firmware handle */
93 } *entry;
94 wait_queue_head_t waitq;
95/* Shadow of net_device address lists, guarded by mac_lock */
822b96f8
DP
96 struct efx_ef10_dev_addr dev_uc_list[EFX_EF10_FILTER_DEV_UC_MAX];
97 struct efx_ef10_dev_addr dev_mc_list[EFX_EF10_FILTER_DEV_MC_MAX];
12fb0da4
EC
98 int dev_uc_count;
99 int dev_mc_count;
afa4ce12
AR
100 bool uc_promisc;
101 bool mc_promisc;
b071c3a2
AR
102/* Whether in multicast promiscuous mode when last changed */
103 bool mc_promisc_last;
4a53ea8a 104 bool vlan_filter;
34813fe2 105 struct list_head vlan_list;
8127d661
BH
106};
107
108/* An arbitrary search limit for the software hash table */
109#define EFX_EF10_FILTER_SEARCH_LIMIT 200
110
8127d661
BH
111static void efx_ef10_rx_free_indir_table(struct efx_nic *efx);
112static void efx_ef10_filter_table_remove(struct efx_nic *efx);
34813fe2
AR
113static int efx_ef10_filter_add_vlan(struct efx_nic *efx, u16 vid);
114static void efx_ef10_filter_del_vlan_internal(struct efx_nic *efx,
115 struct efx_ef10_filter_vlan *vlan);
116static void efx_ef10_filter_del_vlan(struct efx_nic *efx, u16 vid);
8127d661
BH
117
118static int efx_ef10_get_warm_boot_count(struct efx_nic *efx)
119{
120 efx_dword_t reg;
121
122 efx_readd(efx, &reg, ER_DZ_BIU_MC_SFT_STATUS);
123 return EFX_DWORD_FIELD(reg, EFX_WORD_1) == 0xb007 ?
124 EFX_DWORD_FIELD(reg, EFX_WORD_0) : -EIO;
125}
126
127static unsigned int efx_ef10_mem_map_size(struct efx_nic *efx)
128{
02246a7f
SS
129 int bar;
130
131 bar = efx->type->mem_bar;
132 return resource_size(&efx->pci_dev->resource[bar]);
8127d661
BH
133}
134
7a186f47
DP
135static bool efx_ef10_is_vf(struct efx_nic *efx)
136{
137 return efx->type->is_vf;
138}
139
1cd9ecbb
DP
140static int efx_ef10_get_pf_index(struct efx_nic *efx)
141{
142 MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_FUNCTION_INFO_OUT_LEN);
143 struct efx_ef10_nic_data *nic_data = efx->nic_data;
144 size_t outlen;
145 int rc;
146
147 rc = efx_mcdi_rpc(efx, MC_CMD_GET_FUNCTION_INFO, NULL, 0, outbuf,
148 sizeof(outbuf), &outlen);
149 if (rc)
150 return rc;
151 if (outlen < sizeof(outbuf))
152 return -EIO;
153
154 nic_data->pf_index = MCDI_DWORD(outbuf, GET_FUNCTION_INFO_OUT_PF);
155 return 0;
156}
157
88a37de6
SS
158#ifdef CONFIG_SFC_SRIOV
159static int efx_ef10_get_vf_index(struct efx_nic *efx)
160{
161 MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_FUNCTION_INFO_OUT_LEN);
162 struct efx_ef10_nic_data *nic_data = efx->nic_data;
163 size_t outlen;
164 int rc;
165
166 rc = efx_mcdi_rpc(efx, MC_CMD_GET_FUNCTION_INFO, NULL, 0, outbuf,
167 sizeof(outbuf), &outlen);
168 if (rc)
169 return rc;
170 if (outlen < sizeof(outbuf))
171 return -EIO;
172
173 nic_data->vf_index = MCDI_DWORD(outbuf, GET_FUNCTION_INFO_OUT_VF);
174 return 0;
175}
176#endif
177
e5a2538a 178static int efx_ef10_init_datapath_caps(struct efx_nic *efx)
8127d661 179{
ca889a05 180 MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_CAPABILITIES_V2_OUT_LEN);
8127d661
BH
181 struct efx_ef10_nic_data *nic_data = efx->nic_data;
182 size_t outlen;
183 int rc;
184
185 BUILD_BUG_ON(MC_CMD_GET_CAPABILITIES_IN_LEN != 0);
186
187 rc = efx_mcdi_rpc(efx, MC_CMD_GET_CAPABILITIES, NULL, 0,
188 outbuf, sizeof(outbuf), &outlen);
189 if (rc)
190 return rc;
ca889a05 191 if (outlen < MC_CMD_GET_CAPABILITIES_OUT_LEN) {
e5a2538a
BH
192 netif_err(efx, drv, efx->net_dev,
193 "unable to read datapath firmware capabilities\n");
194 return -EIO;
195 }
196
197 nic_data->datapath_caps =
198 MCDI_DWORD(outbuf, GET_CAPABILITIES_OUT_FLAGS1);
8127d661 199
ca889a05
BK
200 if (outlen >= MC_CMD_GET_CAPABILITIES_V2_OUT_LEN)
201 nic_data->datapath_caps2 = MCDI_DWORD(outbuf,
202 GET_CAPABILITIES_V2_OUT_FLAGS2);
203 else
204 nic_data->datapath_caps2 = 0;
205
8d9f9dd4
DP
206 /* record the DPCPU firmware IDs to determine VEB vswitching support.
207 */
208 nic_data->rx_dpcpu_fw_id =
209 MCDI_WORD(outbuf, GET_CAPABILITIES_OUT_RX_DPCPU_FW_ID);
210 nic_data->tx_dpcpu_fw_id =
211 MCDI_WORD(outbuf, GET_CAPABILITIES_OUT_TX_DPCPU_FW_ID);
212
e5a2538a
BH
213 if (!(nic_data->datapath_caps &
214 (1 << MC_CMD_GET_CAPABILITIES_OUT_RX_PREFIX_LEN_14_LBN))) {
215 netif_err(efx, probe, efx->net_dev,
216 "current firmware does not support an RX prefix\n");
217 return -ENODEV;
8127d661
BH
218 }
219
220 return 0;
221}
222
223static int efx_ef10_get_sysclk_freq(struct efx_nic *efx)
224{
225 MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_CLOCK_OUT_LEN);
226 int rc;
227
228 rc = efx_mcdi_rpc(efx, MC_CMD_GET_CLOCK, NULL, 0,
229 outbuf, sizeof(outbuf), NULL);
230 if (rc)
231 return rc;
232 rc = MCDI_DWORD(outbuf, GET_CLOCK_OUT_SYS_FREQ);
233 return rc > 0 ? rc : -ERANGE;
234}
235
d95e329a
BK
236static int efx_ef10_get_timer_workarounds(struct efx_nic *efx)
237{
238 struct efx_ef10_nic_data *nic_data = efx->nic_data;
239 unsigned int implemented;
240 unsigned int enabled;
241 int rc;
242
243 nic_data->workaround_35388 = false;
244 nic_data->workaround_61265 = false;
245
246 rc = efx_mcdi_get_workarounds(efx, &implemented, &enabled);
247
248 if (rc == -ENOSYS) {
249 /* Firmware without GET_WORKAROUNDS - not a problem. */
250 rc = 0;
251 } else if (rc == 0) {
252 /* Bug61265 workaround is always enabled if implemented. */
253 if (enabled & MC_CMD_GET_WORKAROUNDS_OUT_BUG61265)
254 nic_data->workaround_61265 = true;
255
256 if (enabled & MC_CMD_GET_WORKAROUNDS_OUT_BUG35388) {
257 nic_data->workaround_35388 = true;
258 } else if (implemented & MC_CMD_GET_WORKAROUNDS_OUT_BUG35388) {
259 /* Workaround is implemented but not enabled.
260 * Try to enable it.
261 */
262 rc = efx_mcdi_set_workaround(efx,
263 MC_CMD_WORKAROUND_BUG35388,
264 true, NULL);
265 if (rc == 0)
266 nic_data->workaround_35388 = true;
267 /* If we failed to set the workaround just carry on. */
268 rc = 0;
269 }
270 }
271
272 netif_dbg(efx, probe, efx->net_dev,
273 "workaround for bug 35388 is %sabled\n",
274 nic_data->workaround_35388 ? "en" : "dis");
275 netif_dbg(efx, probe, efx->net_dev,
276 "workaround for bug 61265 is %sabled\n",
277 nic_data->workaround_61265 ? "en" : "dis");
278
279 return rc;
280}
281
282static void efx_ef10_process_timer_config(struct efx_nic *efx,
283 const efx_dword_t *data)
284{
285 unsigned int max_count;
286
287 if (EFX_EF10_WORKAROUND_61265(efx)) {
288 efx->timer_quantum_ns = MCDI_DWORD(data,
289 GET_EVQ_TMR_PROPERTIES_OUT_MCDI_TMR_STEP_NS);
290 efx->timer_max_ns = MCDI_DWORD(data,
291 GET_EVQ_TMR_PROPERTIES_OUT_MCDI_TMR_MAX_NS);
292 } else if (EFX_EF10_WORKAROUND_35388(efx)) {
293 efx->timer_quantum_ns = MCDI_DWORD(data,
294 GET_EVQ_TMR_PROPERTIES_OUT_BUG35388_TMR_NS_PER_COUNT);
295 max_count = MCDI_DWORD(data,
296 GET_EVQ_TMR_PROPERTIES_OUT_BUG35388_TMR_MAX_COUNT);
297 efx->timer_max_ns = max_count * efx->timer_quantum_ns;
298 } else {
299 efx->timer_quantum_ns = MCDI_DWORD(data,
300 GET_EVQ_TMR_PROPERTIES_OUT_TMR_REG_NS_PER_COUNT);
301 max_count = MCDI_DWORD(data,
302 GET_EVQ_TMR_PROPERTIES_OUT_TMR_REG_MAX_COUNT);
303 efx->timer_max_ns = max_count * efx->timer_quantum_ns;
304 }
305
306 netif_dbg(efx, probe, efx->net_dev,
307 "got timer properties from MC: quantum %u ns; max %u ns\n",
308 efx->timer_quantum_ns, efx->timer_max_ns);
309}
310
311static int efx_ef10_get_timer_config(struct efx_nic *efx)
312{
313 MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_LEN);
314 int rc;
315
316 rc = efx_ef10_get_timer_workarounds(efx);
317 if (rc)
318 return rc;
319
320 rc = efx_mcdi_rpc_quiet(efx, MC_CMD_GET_EVQ_TMR_PROPERTIES, NULL, 0,
321 outbuf, sizeof(outbuf), NULL);
322
323 if (rc == 0) {
324 efx_ef10_process_timer_config(efx, outbuf);
325 } else if (rc == -ENOSYS || rc == -EPERM) {
326 /* Not available - fall back to Huntington defaults. */
327 unsigned int quantum;
328
329 rc = efx_ef10_get_sysclk_freq(efx);
330 if (rc < 0)
331 return rc;
332
333 quantum = 1536000 / rc; /* 1536 cycles */
334 efx->timer_quantum_ns = quantum;
335 efx->timer_max_ns = efx->type->timer_period_max * quantum;
336 rc = 0;
337 } else {
338 efx_mcdi_display_error(efx, MC_CMD_GET_EVQ_TMR_PROPERTIES,
339 MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_LEN,
340 NULL, 0, rc);
341 }
342
343 return rc;
344}
345
0d5e0fbb 346static int efx_ef10_get_mac_address_pf(struct efx_nic *efx, u8 *mac_address)
8127d661
BH
347{
348 MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_MAC_ADDRESSES_OUT_LEN);
349 size_t outlen;
350 int rc;
351
352 BUILD_BUG_ON(MC_CMD_GET_MAC_ADDRESSES_IN_LEN != 0);
353
354 rc = efx_mcdi_rpc(efx, MC_CMD_GET_MAC_ADDRESSES, NULL, 0,
355 outbuf, sizeof(outbuf), &outlen);
356 if (rc)
357 return rc;
358 if (outlen < MC_CMD_GET_MAC_ADDRESSES_OUT_LEN)
359 return -EIO;
360
cd84ff4d
EC
361 ether_addr_copy(mac_address,
362 MCDI_PTR(outbuf, GET_MAC_ADDRESSES_OUT_MAC_ADDR_BASE));
8127d661
BH
363 return 0;
364}
365
0d5e0fbb
DP
366static int efx_ef10_get_mac_address_vf(struct efx_nic *efx, u8 *mac_address)
367{
368 MCDI_DECLARE_BUF(inbuf, MC_CMD_VPORT_GET_MAC_ADDRESSES_IN_LEN);
369 MCDI_DECLARE_BUF(outbuf, MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_LENMAX);
370 size_t outlen;
371 int num_addrs, rc;
372
373 MCDI_SET_DWORD(inbuf, VPORT_GET_MAC_ADDRESSES_IN_VPORT_ID,
374 EVB_PORT_ID_ASSIGNED);
375 rc = efx_mcdi_rpc(efx, MC_CMD_VPORT_GET_MAC_ADDRESSES, inbuf,
376 sizeof(inbuf), outbuf, sizeof(outbuf), &outlen);
377
378 if (rc)
379 return rc;
380 if (outlen < MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_LENMIN)
381 return -EIO;
382
383 num_addrs = MCDI_DWORD(outbuf,
384 VPORT_GET_MAC_ADDRESSES_OUT_MACADDR_COUNT);
385
386 WARN_ON(num_addrs != 1);
387
388 ether_addr_copy(mac_address,
389 MCDI_PTR(outbuf, VPORT_GET_MAC_ADDRESSES_OUT_MACADDR));
390
391 return 0;
392}
393
0f5c0845
SS
394static ssize_t efx_ef10_show_link_control_flag(struct device *dev,
395 struct device_attribute *attr,
396 char *buf)
397{
398 struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev));
399
400 return sprintf(buf, "%d\n",
401 ((efx->mcdi->fn_flags) &
402 (1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_LINKCTRL))
403 ? 1 : 0);
404}
405
406static ssize_t efx_ef10_show_primary_flag(struct device *dev,
407 struct device_attribute *attr,
408 char *buf)
409{
410 struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev));
411
412 return sprintf(buf, "%d\n",
413 ((efx->mcdi->fn_flags) &
414 (1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_PRIMARY))
415 ? 1 : 0);
416}
417
34813fe2
AR
418static struct efx_ef10_vlan *efx_ef10_find_vlan(struct efx_nic *efx, u16 vid)
419{
420 struct efx_ef10_nic_data *nic_data = efx->nic_data;
421 struct efx_ef10_vlan *vlan;
422
423 WARN_ON(!mutex_is_locked(&nic_data->vlan_lock));
424
425 list_for_each_entry(vlan, &nic_data->vlan_list, list) {
426 if (vlan->vid == vid)
427 return vlan;
428 }
429
430 return NULL;
431}
432
433static int efx_ef10_add_vlan(struct efx_nic *efx, u16 vid)
434{
435 struct efx_ef10_nic_data *nic_data = efx->nic_data;
436 struct efx_ef10_vlan *vlan;
437 int rc;
438
439 mutex_lock(&nic_data->vlan_lock);
440
441 vlan = efx_ef10_find_vlan(efx, vid);
442 if (vlan) {
4a53ea8a
AR
443 /* We add VID 0 on init. 8021q adds it on module init
444 * for all interfaces with VLAN filtring feature.
445 */
446 if (vid == 0)
447 goto done_unlock;
34813fe2
AR
448 netif_warn(efx, drv, efx->net_dev,
449 "VLAN %u already added\n", vid);
450 rc = -EALREADY;
451 goto fail_exist;
452 }
453
454 rc = -ENOMEM;
455 vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
456 if (!vlan)
457 goto fail_alloc;
458
459 vlan->vid = vid;
460
461 list_add_tail(&vlan->list, &nic_data->vlan_list);
462
463 if (efx->filter_state) {
464 mutex_lock(&efx->mac_lock);
465 down_write(&efx->filter_sem);
466 rc = efx_ef10_filter_add_vlan(efx, vlan->vid);
467 up_write(&efx->filter_sem);
468 mutex_unlock(&efx->mac_lock);
469 if (rc)
470 goto fail_filter_add_vlan;
471 }
472
4a53ea8a 473done_unlock:
34813fe2
AR
474 mutex_unlock(&nic_data->vlan_lock);
475 return 0;
476
477fail_filter_add_vlan:
478 list_del(&vlan->list);
479 kfree(vlan);
480fail_alloc:
481fail_exist:
482 mutex_unlock(&nic_data->vlan_lock);
483 return rc;
484}
485
486static void efx_ef10_del_vlan_internal(struct efx_nic *efx,
487 struct efx_ef10_vlan *vlan)
488{
489 struct efx_ef10_nic_data *nic_data = efx->nic_data;
490
491 WARN_ON(!mutex_is_locked(&nic_data->vlan_lock));
492
493 if (efx->filter_state) {
494 down_write(&efx->filter_sem);
495 efx_ef10_filter_del_vlan(efx, vlan->vid);
496 up_write(&efx->filter_sem);
497 }
498
499 list_del(&vlan->list);
500 kfree(vlan);
501}
502
4a53ea8a
AR
503static int efx_ef10_del_vlan(struct efx_nic *efx, u16 vid)
504{
505 struct efx_ef10_nic_data *nic_data = efx->nic_data;
506 struct efx_ef10_vlan *vlan;
507 int rc = 0;
508
509 /* 8021q removes VID 0 on module unload for all interfaces
510 * with VLAN filtering feature. We need to keep it to receive
511 * untagged traffic.
512 */
513 if (vid == 0)
514 return 0;
515
516 mutex_lock(&nic_data->vlan_lock);
517
518 vlan = efx_ef10_find_vlan(efx, vid);
519 if (!vlan) {
520 netif_err(efx, drv, efx->net_dev,
521 "VLAN %u to be deleted not found\n", vid);
522 rc = -ENOENT;
523 } else {
524 efx_ef10_del_vlan_internal(efx, vlan);
525 }
526
527 mutex_unlock(&nic_data->vlan_lock);
528
529 return rc;
530}
531
34813fe2
AR
532static void efx_ef10_cleanup_vlans(struct efx_nic *efx)
533{
534 struct efx_ef10_nic_data *nic_data = efx->nic_data;
535 struct efx_ef10_vlan *vlan, *next_vlan;
536
537 mutex_lock(&nic_data->vlan_lock);
538 list_for_each_entry_safe(vlan, next_vlan, &nic_data->vlan_list, list)
539 efx_ef10_del_vlan_internal(efx, vlan);
540 mutex_unlock(&nic_data->vlan_lock);
541}
542
0f5c0845
SS
543static DEVICE_ATTR(link_control_flag, 0444, efx_ef10_show_link_control_flag,
544 NULL);
545static DEVICE_ATTR(primary_flag, 0444, efx_ef10_show_primary_flag, NULL);
546
8127d661
BH
547static int efx_ef10_probe(struct efx_nic *efx)
548{
549 struct efx_ef10_nic_data *nic_data;
8be41320 550 struct net_device *net_dev = efx->net_dev;
8127d661
BH
551 int i, rc;
552
aa3930ee
BH
553 /* We can have one VI for each 8K region. However, until we
554 * use TX option descriptors we need two TX queues per channel.
8127d661 555 */
b0fbdae1
SS
556 efx->max_channels = min_t(unsigned int,
557 EFX_MAX_CHANNELS,
558 efx_ef10_mem_map_size(efx) /
559 (EFX_VI_PAGE_SIZE * EFX_TXQ_TYPES));
560 efx->max_tx_channels = efx->max_channels;
9fd3d3a4
EC
561 if (WARN_ON(efx->max_channels == 0))
562 return -EIO;
8127d661
BH
563
564 nic_data = kzalloc(sizeof(*nic_data), GFP_KERNEL);
565 if (!nic_data)
566 return -ENOMEM;
567 efx->nic_data = nic_data;
568
75aba2a5
EC
569 /* we assume later that we can copy from this buffer in dwords */
570 BUILD_BUG_ON(MCDI_CTL_SDU_LEN_MAX_V2 % 4);
571
8127d661
BH
572 rc = efx_nic_alloc_buffer(efx, &nic_data->mcdi_buf,
573 8 + MCDI_CTL_SDU_LEN_MAX_V2, GFP_KERNEL);
574 if (rc)
575 goto fail1;
576
577 /* Get the MC's warm boot count. In case it's rebooting right
578 * now, be prepared to retry.
579 */
580 i = 0;
581 for (;;) {
582 rc = efx_ef10_get_warm_boot_count(efx);
583 if (rc >= 0)
584 break;
585 if (++i == 5)
586 goto fail2;
587 ssleep(1);
588 }
589 nic_data->warm_boot_count = rc;
590
591 nic_data->rx_rss_context = EFX_EF10_RSS_CONTEXT_INVALID;
592
45b2449e
DP
593 nic_data->vport_id = EVB_PORT_ID_ASSIGNED;
594
8127d661
BH
595 /* In case we're recovering from a crash (kexec), we want to
596 * cancel any outstanding request by the previous user of this
597 * function. We send a special message using the least
598 * significant bits of the 'high' (doorbell) register.
599 */
600 _efx_writed(efx, cpu_to_le32(1), ER_DZ_MC_DB_HWRD);
601
602 rc = efx_mcdi_init(efx);
603 if (rc)
604 goto fail2;
605
606 /* Reset (most) configuration for this function */
607 rc = efx_mcdi_reset(efx, RESET_TYPE_ALL);
608 if (rc)
609 goto fail3;
610
611 /* Enable event logging */
612 rc = efx_mcdi_log_ctrl(efx, true, false, 0);
613 if (rc)
614 goto fail3;
615
0f5c0845
SS
616 rc = device_create_file(&efx->pci_dev->dev,
617 &dev_attr_link_control_flag);
1cd9ecbb
DP
618 if (rc)
619 goto fail3;
620
0f5c0845
SS
621 rc = device_create_file(&efx->pci_dev->dev, &dev_attr_primary_flag);
622 if (rc)
623 goto fail4;
624
625 rc = efx_ef10_get_pf_index(efx);
626 if (rc)
627 goto fail5;
628
e5a2538a 629 rc = efx_ef10_init_datapath_caps(efx);
8127d661 630 if (rc < 0)
0f5c0845 631 goto fail5;
8127d661
BH
632
633 efx->rx_packet_len_offset =
634 ES_DZ_RX_PREFIX_PKTLEN_OFST - ES_DZ_RX_PREFIX_SIZE;
635
8127d661
BH
636 rc = efx_mcdi_port_get_number(efx);
637 if (rc < 0)
0f5c0845 638 goto fail5;
8127d661 639 efx->port_num = rc;
8be41320 640 net_dev->dev_port = rc;
8127d661 641
0d5e0fbb 642 rc = efx->type->get_mac_address(efx, efx->net_dev->perm_addr);
8127d661 643 if (rc)
0f5c0845 644 goto fail5;
8127d661 645
d95e329a 646 rc = efx_ef10_get_timer_config(efx);
8127d661 647 if (rc < 0)
0f5c0845 648 goto fail5;
8127d661 649
8127d661 650 rc = efx_mcdi_mon_probe(efx);
267d9d73 651 if (rc && rc != -EPERM)
0f5c0845 652 goto fail5;
8127d661 653
9aecda95
BH
654 efx_ptp_probe(efx, NULL);
655
1d051e00
SS
656#ifdef CONFIG_SFC_SRIOV
657 if ((efx->pci_dev->physfn) && (!efx->pci_dev->is_physfn)) {
658 struct pci_dev *pci_dev_pf = efx->pci_dev->physfn;
659 struct efx_nic *efx_pf = pci_get_drvdata(pci_dev_pf);
660
661 efx_pf->type->get_mac_address(efx_pf, nic_data->port_id);
662 } else
663#endif
664 ether_addr_copy(nic_data->port_id, efx->net_dev->perm_addr);
665
34813fe2
AR
666 INIT_LIST_HEAD(&nic_data->vlan_list);
667 mutex_init(&nic_data->vlan_lock);
668
669 /* Add unspecified VID to support VLAN filtering being disabled */
670 rc = efx_ef10_add_vlan(efx, EFX_FILTER_VID_UNSPEC);
671 if (rc)
672 goto fail_add_vid_unspec;
673
4a53ea8a
AR
674 /* If VLAN filtering is enabled, we need VID 0 to get untagged
675 * traffic. It is added automatically if 8021q module is loaded,
676 * but we can't rely on it since module may be not loaded.
677 */
678 rc = efx_ef10_add_vlan(efx, 0);
679 if (rc)
680 goto fail_add_vid_0;
681
8127d661
BH
682 return 0;
683
4a53ea8a
AR
684fail_add_vid_0:
685 efx_ef10_cleanup_vlans(efx);
34813fe2
AR
686fail_add_vid_unspec:
687 mutex_destroy(&nic_data->vlan_lock);
688 efx_ptp_remove(efx);
689 efx_mcdi_mon_remove(efx);
0f5c0845
SS
690fail5:
691 device_remove_file(&efx->pci_dev->dev, &dev_attr_primary_flag);
692fail4:
693 device_remove_file(&efx->pci_dev->dev, &dev_attr_link_control_flag);
8127d661
BH
694fail3:
695 efx_mcdi_fini(efx);
696fail2:
697 efx_nic_free_buffer(efx, &nic_data->mcdi_buf);
698fail1:
699 kfree(nic_data);
700 efx->nic_data = NULL;
701 return rc;
702}
703
704static int efx_ef10_free_vis(struct efx_nic *efx)
705{
aa09a3da 706 MCDI_DECLARE_BUF_ERR(outbuf);
1e0b8120
EC
707 size_t outlen;
708 int rc = efx_mcdi_rpc_quiet(efx, MC_CMD_FREE_VIS, NULL, 0,
709 outbuf, sizeof(outbuf), &outlen);
8127d661
BH
710
711 /* -EALREADY means nothing to free, so ignore */
712 if (rc == -EALREADY)
713 rc = 0;
1e0b8120
EC
714 if (rc)
715 efx_mcdi_display_error(efx, MC_CMD_FREE_VIS, 0, outbuf, outlen,
716 rc);
8127d661
BH
717 return rc;
718}
719
183233be
BH
720#ifdef EFX_USE_PIO
721
722static void efx_ef10_free_piobufs(struct efx_nic *efx)
723{
724 struct efx_ef10_nic_data *nic_data = efx->nic_data;
725 MCDI_DECLARE_BUF(inbuf, MC_CMD_FREE_PIOBUF_IN_LEN);
726 unsigned int i;
727 int rc;
728
729 BUILD_BUG_ON(MC_CMD_FREE_PIOBUF_OUT_LEN != 0);
730
731 for (i = 0; i < nic_data->n_piobufs; i++) {
732 MCDI_SET_DWORD(inbuf, FREE_PIOBUF_IN_PIOBUF_HANDLE,
733 nic_data->piobuf_handle[i]);
734 rc = efx_mcdi_rpc(efx, MC_CMD_FREE_PIOBUF, inbuf, sizeof(inbuf),
735 NULL, 0, NULL);
736 WARN_ON(rc);
737 }
738
739 nic_data->n_piobufs = 0;
740}
741
742static int efx_ef10_alloc_piobufs(struct efx_nic *efx, unsigned int n)
743{
744 struct efx_ef10_nic_data *nic_data = efx->nic_data;
745 MCDI_DECLARE_BUF(outbuf, MC_CMD_ALLOC_PIOBUF_OUT_LEN);
746 unsigned int i;
747 size_t outlen;
748 int rc = 0;
749
750 BUILD_BUG_ON(MC_CMD_ALLOC_PIOBUF_IN_LEN != 0);
751
752 for (i = 0; i < n; i++) {
09a04204
BK
753 rc = efx_mcdi_rpc_quiet(efx, MC_CMD_ALLOC_PIOBUF, NULL, 0,
754 outbuf, sizeof(outbuf), &outlen);
755 if (rc) {
756 /* Don't display the MC error if we didn't have space
757 * for a VF.
758 */
759 if (!(efx_ef10_is_vf(efx) && rc == -ENOSPC))
760 efx_mcdi_display_error(efx, MC_CMD_ALLOC_PIOBUF,
761 0, outbuf, outlen, rc);
183233be 762 break;
09a04204 763 }
183233be
BH
764 if (outlen < MC_CMD_ALLOC_PIOBUF_OUT_LEN) {
765 rc = -EIO;
766 break;
767 }
768 nic_data->piobuf_handle[i] =
769 MCDI_DWORD(outbuf, ALLOC_PIOBUF_OUT_PIOBUF_HANDLE);
770 netif_dbg(efx, probe, efx->net_dev,
771 "allocated PIO buffer %u handle %x\n", i,
772 nic_data->piobuf_handle[i]);
773 }
774
775 nic_data->n_piobufs = i;
776 if (rc)
777 efx_ef10_free_piobufs(efx);
778 return rc;
779}
780
781static int efx_ef10_link_piobufs(struct efx_nic *efx)
782{
783 struct efx_ef10_nic_data *nic_data = efx->nic_data;
aa09a3da
JC
784 _MCDI_DECLARE_BUF(inbuf,
785 max(MC_CMD_LINK_PIOBUF_IN_LEN,
786 MC_CMD_UNLINK_PIOBUF_IN_LEN));
183233be
BH
787 struct efx_channel *channel;
788 struct efx_tx_queue *tx_queue;
789 unsigned int offset, index;
790 int rc;
791
792 BUILD_BUG_ON(MC_CMD_LINK_PIOBUF_OUT_LEN != 0);
793 BUILD_BUG_ON(MC_CMD_UNLINK_PIOBUF_OUT_LEN != 0);
794
aa09a3da
JC
795 memset(inbuf, 0, sizeof(inbuf));
796
183233be
BH
797 /* Link a buffer to each VI in the write-combining mapping */
798 for (index = 0; index < nic_data->n_piobufs; ++index) {
799 MCDI_SET_DWORD(inbuf, LINK_PIOBUF_IN_PIOBUF_HANDLE,
800 nic_data->piobuf_handle[index]);
801 MCDI_SET_DWORD(inbuf, LINK_PIOBUF_IN_TXQ_INSTANCE,
802 nic_data->pio_write_vi_base + index);
803 rc = efx_mcdi_rpc(efx, MC_CMD_LINK_PIOBUF,
804 inbuf, MC_CMD_LINK_PIOBUF_IN_LEN,
805 NULL, 0, NULL);
806 if (rc) {
807 netif_err(efx, drv, efx->net_dev,
808 "failed to link VI %u to PIO buffer %u (%d)\n",
809 nic_data->pio_write_vi_base + index, index,
810 rc);
811 goto fail;
812 }
813 netif_dbg(efx, probe, efx->net_dev,
814 "linked VI %u to PIO buffer %u\n",
815 nic_data->pio_write_vi_base + index, index);
816 }
817
818 /* Link a buffer to each TX queue */
819 efx_for_each_channel(channel, efx) {
820 efx_for_each_channel_tx_queue(tx_queue, channel) {
821 /* We assign the PIO buffers to queues in
822 * reverse order to allow for the following
823 * special case.
824 */
825 offset = ((efx->tx_channel_offset + efx->n_tx_channels -
826 tx_queue->channel->channel - 1) *
827 efx_piobuf_size);
828 index = offset / ER_DZ_TX_PIOBUF_SIZE;
829 offset = offset % ER_DZ_TX_PIOBUF_SIZE;
830
831 /* When the host page size is 4K, the first
832 * host page in the WC mapping may be within
833 * the same VI page as the last TX queue. We
834 * can only link one buffer to each VI.
835 */
836 if (tx_queue->queue == nic_data->pio_write_vi_base) {
837 BUG_ON(index != 0);
838 rc = 0;
839 } else {
840 MCDI_SET_DWORD(inbuf,
841 LINK_PIOBUF_IN_PIOBUF_HANDLE,
842 nic_data->piobuf_handle[index]);
843 MCDI_SET_DWORD(inbuf,
844 LINK_PIOBUF_IN_TXQ_INSTANCE,
845 tx_queue->queue);
846 rc = efx_mcdi_rpc(efx, MC_CMD_LINK_PIOBUF,
847 inbuf, MC_CMD_LINK_PIOBUF_IN_LEN,
848 NULL, 0, NULL);
849 }
850
851 if (rc) {
852 /* This is non-fatal; the TX path just
853 * won't use PIO for this queue
854 */
855 netif_err(efx, drv, efx->net_dev,
856 "failed to link VI %u to PIO buffer %u (%d)\n",
857 tx_queue->queue, index, rc);
858 tx_queue->piobuf = NULL;
859 } else {
860 tx_queue->piobuf =
861 nic_data->pio_write_base +
862 index * EFX_VI_PAGE_SIZE + offset;
863 tx_queue->piobuf_offset = offset;
864 netif_dbg(efx, probe, efx->net_dev,
865 "linked VI %u to PIO buffer %u offset %x addr %p\n",
866 tx_queue->queue, index,
867 tx_queue->piobuf_offset,
868 tx_queue->piobuf);
869 }
870 }
871 }
872
873 return 0;
874
875fail:
876 while (index--) {
877 MCDI_SET_DWORD(inbuf, UNLINK_PIOBUF_IN_TXQ_INSTANCE,
878 nic_data->pio_write_vi_base + index);
879 efx_mcdi_rpc(efx, MC_CMD_UNLINK_PIOBUF,
880 inbuf, MC_CMD_UNLINK_PIOBUF_IN_LEN,
881 NULL, 0, NULL);
882 }
883 return rc;
884}
885
c0795bf6
EC
886static void efx_ef10_forget_old_piobufs(struct efx_nic *efx)
887{
888 struct efx_channel *channel;
889 struct efx_tx_queue *tx_queue;
890
891 /* All our existing PIO buffers went away */
892 efx_for_each_channel(channel, efx)
893 efx_for_each_channel_tx_queue(tx_queue, channel)
894 tx_queue->piobuf = NULL;
895}
896
183233be
BH
897#else /* !EFX_USE_PIO */
898
899static int efx_ef10_alloc_piobufs(struct efx_nic *efx, unsigned int n)
900{
901 return n == 0 ? 0 : -ENOBUFS;
902}
903
904static int efx_ef10_link_piobufs(struct efx_nic *efx)
905{
906 return 0;
907}
908
909static void efx_ef10_free_piobufs(struct efx_nic *efx)
910{
911}
912
c0795bf6
EC
913static void efx_ef10_forget_old_piobufs(struct efx_nic *efx)
914{
915}
916
183233be
BH
917#endif /* EFX_USE_PIO */
918
8127d661
BH
919static void efx_ef10_remove(struct efx_nic *efx)
920{
921 struct efx_ef10_nic_data *nic_data = efx->nic_data;
922 int rc;
923
f1122a34
SS
924#ifdef CONFIG_SFC_SRIOV
925 struct efx_ef10_nic_data *nic_data_pf;
926 struct pci_dev *pci_dev_pf;
927 struct efx_nic *efx_pf;
928 struct ef10_vf *vf;
929
930 if (efx->pci_dev->is_virtfn) {
931 pci_dev_pf = efx->pci_dev->physfn;
932 if (pci_dev_pf) {
933 efx_pf = pci_get_drvdata(pci_dev_pf);
934 nic_data_pf = efx_pf->nic_data;
935 vf = nic_data_pf->vf + nic_data->vf_index;
936 vf->efx = NULL;
937 } else
938 netif_info(efx, drv, efx->net_dev,
939 "Could not get the PF id from VF\n");
940 }
941#endif
942
34813fe2
AR
943 efx_ef10_cleanup_vlans(efx);
944 mutex_destroy(&nic_data->vlan_lock);
945
9aecda95
BH
946 efx_ptp_remove(efx);
947
8127d661
BH
948 efx_mcdi_mon_remove(efx);
949
8127d661
BH
950 efx_ef10_rx_free_indir_table(efx);
951
183233be
BH
952 if (nic_data->wc_membase)
953 iounmap(nic_data->wc_membase);
954
8127d661
BH
955 rc = efx_ef10_free_vis(efx);
956 WARN_ON(rc != 0);
957
183233be
BH
958 if (!nic_data->must_restore_piobufs)
959 efx_ef10_free_piobufs(efx);
960
0f5c0845
SS
961 device_remove_file(&efx->pci_dev->dev, &dev_attr_primary_flag);
962 device_remove_file(&efx->pci_dev->dev, &dev_attr_link_control_flag);
963
8127d661
BH
964 efx_mcdi_fini(efx);
965 efx_nic_free_buffer(efx, &nic_data->mcdi_buf);
966 kfree(nic_data);
967}
968
88a37de6
SS
969static int efx_ef10_probe_pf(struct efx_nic *efx)
970{
971 return efx_ef10_probe(efx);
972}
973
38d27f38
AR
974int efx_ef10_vadaptor_query(struct efx_nic *efx, unsigned int port_id,
975 u32 *port_flags, u32 *vadaptor_flags,
976 unsigned int *vlan_tags)
977{
978 struct efx_ef10_nic_data *nic_data = efx->nic_data;
979 MCDI_DECLARE_BUF(inbuf, MC_CMD_VADAPTOR_QUERY_IN_LEN);
980 MCDI_DECLARE_BUF(outbuf, MC_CMD_VADAPTOR_QUERY_OUT_LEN);
981 size_t outlen;
982 int rc;
983
984 if (nic_data->datapath_caps &
985 (1 << MC_CMD_GET_CAPABILITIES_OUT_VADAPTOR_QUERY_LBN)) {
986 MCDI_SET_DWORD(inbuf, VADAPTOR_QUERY_IN_UPSTREAM_PORT_ID,
987 port_id);
988
989 rc = efx_mcdi_rpc(efx, MC_CMD_VADAPTOR_QUERY, inbuf, sizeof(inbuf),
990 outbuf, sizeof(outbuf), &outlen);
991 if (rc)
992 return rc;
993
994 if (outlen < sizeof(outbuf)) {
995 rc = -EIO;
996 return rc;
997 }
998 }
999
1000 if (port_flags)
1001 *port_flags = MCDI_DWORD(outbuf, VADAPTOR_QUERY_OUT_PORT_FLAGS);
1002 if (vadaptor_flags)
1003 *vadaptor_flags =
1004 MCDI_DWORD(outbuf, VADAPTOR_QUERY_OUT_VADAPTOR_FLAGS);
1005 if (vlan_tags)
1006 *vlan_tags =
1007 MCDI_DWORD(outbuf,
1008 VADAPTOR_QUERY_OUT_NUM_AVAILABLE_VLAN_TAGS);
1009
1010 return 0;
1011}
1012
7a186f47
DP
1013int efx_ef10_vadaptor_alloc(struct efx_nic *efx, unsigned int port_id)
1014{
1015 MCDI_DECLARE_BUF(inbuf, MC_CMD_VADAPTOR_ALLOC_IN_LEN);
1016
1017 MCDI_SET_DWORD(inbuf, VADAPTOR_ALLOC_IN_UPSTREAM_PORT_ID, port_id);
1018 return efx_mcdi_rpc(efx, MC_CMD_VADAPTOR_ALLOC, inbuf, sizeof(inbuf),
1019 NULL, 0, NULL);
1020}
1021
1022int efx_ef10_vadaptor_free(struct efx_nic *efx, unsigned int port_id)
1023{
1024 MCDI_DECLARE_BUF(inbuf, MC_CMD_VADAPTOR_FREE_IN_LEN);
1025
1026 MCDI_SET_DWORD(inbuf, VADAPTOR_FREE_IN_UPSTREAM_PORT_ID, port_id);
1027 return efx_mcdi_rpc(efx, MC_CMD_VADAPTOR_FREE, inbuf, sizeof(inbuf),
1028 NULL, 0, NULL);
1029}
1030
1031int efx_ef10_vport_add_mac(struct efx_nic *efx,
1032 unsigned int port_id, u8 *mac)
1033{
1034 MCDI_DECLARE_BUF(inbuf, MC_CMD_VPORT_ADD_MAC_ADDRESS_IN_LEN);
1035
1036 MCDI_SET_DWORD(inbuf, VPORT_ADD_MAC_ADDRESS_IN_VPORT_ID, port_id);
1037 ether_addr_copy(MCDI_PTR(inbuf, VPORT_ADD_MAC_ADDRESS_IN_MACADDR), mac);
1038
1039 return efx_mcdi_rpc(efx, MC_CMD_VPORT_ADD_MAC_ADDRESS, inbuf,
1040 sizeof(inbuf), NULL, 0, NULL);
1041}
1042
1043int efx_ef10_vport_del_mac(struct efx_nic *efx,
1044 unsigned int port_id, u8 *mac)
1045{
1046 MCDI_DECLARE_BUF(inbuf, MC_CMD_VPORT_DEL_MAC_ADDRESS_IN_LEN);
1047
1048 MCDI_SET_DWORD(inbuf, VPORT_DEL_MAC_ADDRESS_IN_VPORT_ID, port_id);
1049 ether_addr_copy(MCDI_PTR(inbuf, VPORT_DEL_MAC_ADDRESS_IN_MACADDR), mac);
1050
1051 return efx_mcdi_rpc(efx, MC_CMD_VPORT_DEL_MAC_ADDRESS, inbuf,
1052 sizeof(inbuf), NULL, 0, NULL);
1053}
1054
88a37de6
SS
1055#ifdef CONFIG_SFC_SRIOV
1056static int efx_ef10_probe_vf(struct efx_nic *efx)
1057{
1058 int rc;
6598dad2
DP
1059 struct pci_dev *pci_dev_pf;
1060
1061 /* If the parent PF has no VF data structure, it doesn't know about this
1062 * VF so fail probe. The VF needs to be re-created. This can happen
1063 * if the PF driver is unloaded while the VF is assigned to a guest.
1064 */
1065 pci_dev_pf = efx->pci_dev->physfn;
1066 if (pci_dev_pf) {
1067 struct efx_nic *efx_pf = pci_get_drvdata(pci_dev_pf);
1068 struct efx_ef10_nic_data *nic_data_pf = efx_pf->nic_data;
1069
1070 if (!nic_data_pf->vf) {
1071 netif_info(efx, drv, efx->net_dev,
1072 "The VF cannot link to its parent PF; "
1073 "please destroy and re-create the VF\n");
1074 return -EBUSY;
1075 }
1076 }
88a37de6
SS
1077
1078 rc = efx_ef10_probe(efx);
1079 if (rc)
1080 return rc;
1081
1082 rc = efx_ef10_get_vf_index(efx);
1083 if (rc)
1084 goto fail;
1085
f1122a34
SS
1086 if (efx->pci_dev->is_virtfn) {
1087 if (efx->pci_dev->physfn) {
1088 struct efx_nic *efx_pf =
1089 pci_get_drvdata(efx->pci_dev->physfn);
1090 struct efx_ef10_nic_data *nic_data_p = efx_pf->nic_data;
1091 struct efx_ef10_nic_data *nic_data = efx->nic_data;
1092
1093 nic_data_p->vf[nic_data->vf_index].efx = efx;
6598dad2
DP
1094 nic_data_p->vf[nic_data->vf_index].pci_dev =
1095 efx->pci_dev;
f1122a34
SS
1096 } else
1097 netif_info(efx, drv, efx->net_dev,
1098 "Could not get the PF id from VF\n");
1099 }
1100
88a37de6
SS
1101 return 0;
1102
1103fail:
1104 efx_ef10_remove(efx);
1105 return rc;
1106}
1107#else
1108static int efx_ef10_probe_vf(struct efx_nic *efx __attribute__ ((unused)))
1109{
1110 return 0;
1111}
1112#endif
1113
8127d661
BH
1114static int efx_ef10_alloc_vis(struct efx_nic *efx,
1115 unsigned int min_vis, unsigned int max_vis)
1116{
1117 MCDI_DECLARE_BUF(inbuf, MC_CMD_ALLOC_VIS_IN_LEN);
1118 MCDI_DECLARE_BUF(outbuf, MC_CMD_ALLOC_VIS_OUT_LEN);
1119 struct efx_ef10_nic_data *nic_data = efx->nic_data;
1120 size_t outlen;
1121 int rc;
1122
1123 MCDI_SET_DWORD(inbuf, ALLOC_VIS_IN_MIN_VI_COUNT, min_vis);
1124 MCDI_SET_DWORD(inbuf, ALLOC_VIS_IN_MAX_VI_COUNT, max_vis);
1125 rc = efx_mcdi_rpc(efx, MC_CMD_ALLOC_VIS, inbuf, sizeof(inbuf),
1126 outbuf, sizeof(outbuf), &outlen);
1127 if (rc != 0)
1128 return rc;
1129
1130 if (outlen < MC_CMD_ALLOC_VIS_OUT_LEN)
1131 return -EIO;
1132
1133 netif_dbg(efx, drv, efx->net_dev, "base VI is A0x%03x\n",
1134 MCDI_DWORD(outbuf, ALLOC_VIS_OUT_VI_BASE));
1135
1136 nic_data->vi_base = MCDI_DWORD(outbuf, ALLOC_VIS_OUT_VI_BASE);
1137 nic_data->n_allocated_vis = MCDI_DWORD(outbuf, ALLOC_VIS_OUT_VI_COUNT);
1138 return 0;
1139}
1140
183233be
BH
1141/* Note that the failure path of this function does not free
1142 * resources, as this will be done by efx_ef10_remove().
1143 */
8127d661
BH
1144static int efx_ef10_dimension_resources(struct efx_nic *efx)
1145{
183233be
BH
1146 struct efx_ef10_nic_data *nic_data = efx->nic_data;
1147 unsigned int uc_mem_map_size, wc_mem_map_size;
b0fbdae1
SS
1148 unsigned int min_vis = max(EFX_TXQ_TYPES,
1149 efx_separate_tx_channels ? 2 : 1);
1150 unsigned int channel_vis, pio_write_vi_base, max_vis;
183233be
BH
1151 void __iomem *membase;
1152 int rc;
1153
b0fbdae1 1154 channel_vis = max(efx->n_channels, efx->n_tx_channels * EFX_TXQ_TYPES);
8127d661 1155
183233be
BH
1156#ifdef EFX_USE_PIO
1157 /* Try to allocate PIO buffers if wanted and if the full
1158 * number of PIO buffers would be sufficient to allocate one
1159 * copy-buffer per TX channel. Failure is non-fatal, as there
1160 * are only a small number of PIO buffers shared between all
1161 * functions of the controller.
1162 */
1163 if (efx_piobuf_size != 0 &&
1164 ER_DZ_TX_PIOBUF_SIZE / efx_piobuf_size * EF10_TX_PIOBUF_COUNT >=
1165 efx->n_tx_channels) {
1166 unsigned int n_piobufs =
1167 DIV_ROUND_UP(efx->n_tx_channels,
1168 ER_DZ_TX_PIOBUF_SIZE / efx_piobuf_size);
1169
1170 rc = efx_ef10_alloc_piobufs(efx, n_piobufs);
1171 if (rc)
1172 netif_err(efx, probe, efx->net_dev,
1173 "failed to allocate PIO buffers (%d)\n", rc);
1174 else
1175 netif_dbg(efx, probe, efx->net_dev,
1176 "allocated %u PIO buffers\n", n_piobufs);
1177 }
1178#else
1179 nic_data->n_piobufs = 0;
1180#endif
1181
1182 /* PIO buffers should be mapped with write-combining enabled,
1183 * and we want to make single UC and WC mappings rather than
1184 * several of each (in fact that's the only option if host
1185 * page size is >4K). So we may allocate some extra VIs just
1186 * for writing PIO buffers through.
52ad762b 1187 *
b0fbdae1 1188 * The UC mapping contains (channel_vis - 1) complete VIs and the
52ad762b
DP
1189 * first half of the next VI. Then the WC mapping begins with
1190 * the second half of this last VI.
183233be 1191 */
b0fbdae1 1192 uc_mem_map_size = PAGE_ALIGN((channel_vis - 1) * EFX_VI_PAGE_SIZE +
183233be
BH
1193 ER_DZ_TX_PIOBUF);
1194 if (nic_data->n_piobufs) {
52ad762b
DP
1195 /* pio_write_vi_base rounds down to give the number of complete
1196 * VIs inside the UC mapping.
1197 */
183233be
BH
1198 pio_write_vi_base = uc_mem_map_size / EFX_VI_PAGE_SIZE;
1199 wc_mem_map_size = (PAGE_ALIGN((pio_write_vi_base +
1200 nic_data->n_piobufs) *
1201 EFX_VI_PAGE_SIZE) -
1202 uc_mem_map_size);
1203 max_vis = pio_write_vi_base + nic_data->n_piobufs;
1204 } else {
1205 pio_write_vi_base = 0;
1206 wc_mem_map_size = 0;
b0fbdae1 1207 max_vis = channel_vis;
183233be
BH
1208 }
1209
1210 /* In case the last attached driver failed to free VIs, do it now */
1211 rc = efx_ef10_free_vis(efx);
1212 if (rc != 0)
1213 return rc;
1214
1215 rc = efx_ef10_alloc_vis(efx, min_vis, max_vis);
1216 if (rc != 0)
1217 return rc;
1218
b0fbdae1
SS
1219 if (nic_data->n_allocated_vis < channel_vis) {
1220 netif_info(efx, drv, efx->net_dev,
1221 "Could not allocate enough VIs to satisfy RSS"
1222 " requirements. Performance may not be optimal.\n");
1223 /* We didn't get the VIs to populate our channels.
1224 * We could keep what we got but then we'd have more
1225 * interrupts than we need.
1226 * Instead calculate new max_channels and restart
1227 */
1228 efx->max_channels = nic_data->n_allocated_vis;
1229 efx->max_tx_channels =
1230 nic_data->n_allocated_vis / EFX_TXQ_TYPES;
1231
1232 efx_ef10_free_vis(efx);
1233 return -EAGAIN;
1234 }
1235
183233be
BH
1236 /* If we didn't get enough VIs to map all the PIO buffers, free the
1237 * PIO buffers
1238 */
1239 if (nic_data->n_piobufs &&
1240 nic_data->n_allocated_vis <
1241 pio_write_vi_base + nic_data->n_piobufs) {
1242 netif_dbg(efx, probe, efx->net_dev,
1243 "%u VIs are not sufficient to map %u PIO buffers\n",
1244 nic_data->n_allocated_vis, nic_data->n_piobufs);
1245 efx_ef10_free_piobufs(efx);
1246 }
1247
1248 /* Shrink the original UC mapping of the memory BAR */
1249 membase = ioremap_nocache(efx->membase_phys, uc_mem_map_size);
1250 if (!membase) {
1251 netif_err(efx, probe, efx->net_dev,
1252 "could not shrink memory BAR to %x\n",
1253 uc_mem_map_size);
1254 return -ENOMEM;
1255 }
1256 iounmap(efx->membase);
1257 efx->membase = membase;
1258
1259 /* Set up the WC mapping if needed */
1260 if (wc_mem_map_size) {
1261 nic_data->wc_membase = ioremap_wc(efx->membase_phys +
1262 uc_mem_map_size,
1263 wc_mem_map_size);
1264 if (!nic_data->wc_membase) {
1265 netif_err(efx, probe, efx->net_dev,
1266 "could not allocate WC mapping of size %x\n",
1267 wc_mem_map_size);
1268 return -ENOMEM;
1269 }
1270 nic_data->pio_write_vi_base = pio_write_vi_base;
1271 nic_data->pio_write_base =
1272 nic_data->wc_membase +
1273 (pio_write_vi_base * EFX_VI_PAGE_SIZE + ER_DZ_TX_PIOBUF -
1274 uc_mem_map_size);
1275
1276 rc = efx_ef10_link_piobufs(efx);
1277 if (rc)
1278 efx_ef10_free_piobufs(efx);
1279 }
1280
1281 netif_dbg(efx, probe, efx->net_dev,
1282 "memory BAR at %pa (virtual %p+%x UC, %p+%x WC)\n",
1283 &efx->membase_phys, efx->membase, uc_mem_map_size,
1284 nic_data->wc_membase, wc_mem_map_size);
1285
1286 return 0;
8127d661
BH
1287}
1288
1289static int efx_ef10_init_nic(struct efx_nic *efx)
1290{
1291 struct efx_ef10_nic_data *nic_data = efx->nic_data;
1292 int rc;
1293
a915ccc9
BH
1294 if (nic_data->must_check_datapath_caps) {
1295 rc = efx_ef10_init_datapath_caps(efx);
1296 if (rc)
1297 return rc;
1298 nic_data->must_check_datapath_caps = false;
1299 }
1300
8127d661
BH
1301 if (nic_data->must_realloc_vis) {
1302 /* We cannot let the number of VIs change now */
1303 rc = efx_ef10_alloc_vis(efx, nic_data->n_allocated_vis,
1304 nic_data->n_allocated_vis);
1305 if (rc)
1306 return rc;
1307 nic_data->must_realloc_vis = false;
1308 }
1309
183233be
BH
1310 if (nic_data->must_restore_piobufs && nic_data->n_piobufs) {
1311 rc = efx_ef10_alloc_piobufs(efx, nic_data->n_piobufs);
1312 if (rc == 0) {
1313 rc = efx_ef10_link_piobufs(efx);
1314 if (rc)
1315 efx_ef10_free_piobufs(efx);
1316 }
1317
1318 /* Log an error on failure, but this is non-fatal */
1319 if (rc)
1320 netif_err(efx, drv, efx->net_dev,
1321 "failed to restore PIO buffers (%d)\n", rc);
1322 nic_data->must_restore_piobufs = false;
1323 }
1324
267c0157
JC
1325 /* don't fail init if RSS setup doesn't work */
1326 efx->type->rx_push_rss_config(efx, false, efx->rx_indir_table);
1327
8127d661
BH
1328 return 0;
1329}
1330
3e336261
JC
1331static void efx_ef10_reset_mc_allocations(struct efx_nic *efx)
1332{
1333 struct efx_ef10_nic_data *nic_data = efx->nic_data;
774ad031
DP
1334#ifdef CONFIG_SFC_SRIOV
1335 unsigned int i;
1336#endif
3e336261
JC
1337
1338 /* All our allocations have been reset */
1339 nic_data->must_realloc_vis = true;
1340 nic_data->must_restore_filters = true;
1341 nic_data->must_restore_piobufs = true;
c0795bf6 1342 efx_ef10_forget_old_piobufs(efx);
3e336261 1343 nic_data->rx_rss_context = EFX_EF10_RSS_CONTEXT_INVALID;
774ad031
DP
1344
1345 /* Driver-created vswitches and vports must be re-created */
1346 nic_data->must_probe_vswitching = true;
1347 nic_data->vport_id = EVB_PORT_ID_ASSIGNED;
1348#ifdef CONFIG_SFC_SRIOV
1349 if (nic_data->vf)
1350 for (i = 0; i < efx->vf_count; i++)
1351 nic_data->vf[i].vport_id = 0;
1352#endif
3e336261
JC
1353}
1354
087e9025
JC
1355static enum reset_type efx_ef10_map_reset_reason(enum reset_type reason)
1356{
1357 if (reason == RESET_TYPE_MC_FAILURE)
1358 return RESET_TYPE_DATAPATH;
1359
1360 return efx_mcdi_map_reset_reason(reason);
1361}
1362
8127d661
BH
1363static int efx_ef10_map_reset_flags(u32 *flags)
1364{
1365 enum {
1366 EF10_RESET_PORT = ((ETH_RESET_MAC | ETH_RESET_PHY) <<
1367 ETH_RESET_SHARED_SHIFT),
1368 EF10_RESET_MC = ((ETH_RESET_DMA | ETH_RESET_FILTER |
1369 ETH_RESET_OFFLOAD | ETH_RESET_MAC |
1370 ETH_RESET_PHY | ETH_RESET_MGMT) <<
1371 ETH_RESET_SHARED_SHIFT)
1372 };
1373
1374 /* We assume for now that our PCI function is permitted to
1375 * reset everything.
1376 */
1377
1378 if ((*flags & EF10_RESET_MC) == EF10_RESET_MC) {
1379 *flags &= ~EF10_RESET_MC;
1380 return RESET_TYPE_WORLD;
1381 }
1382
1383 if ((*flags & EF10_RESET_PORT) == EF10_RESET_PORT) {
1384 *flags &= ~EF10_RESET_PORT;
1385 return RESET_TYPE_ALL;
1386 }
1387
1388 /* no invisible reset implemented */
1389
1390 return -EINVAL;
1391}
1392
3e336261
JC
1393static int efx_ef10_reset(struct efx_nic *efx, enum reset_type reset_type)
1394{
1395 int rc = efx_mcdi_reset(efx, reset_type);
1396
27324820
DP
1397 /* Unprivileged functions return -EPERM, but need to return success
1398 * here so that the datapath is brought back up.
1399 */
1400 if (reset_type == RESET_TYPE_WORLD && rc == -EPERM)
1401 rc = 0;
1402
3e336261
JC
1403 /* If it was a port reset, trigger reallocation of MC resources.
1404 * Note that on an MC reset nothing needs to be done now because we'll
1405 * detect the MC reset later and handle it then.
e283546c
EC
1406 * For an FLR, we never get an MC reset event, but the MC has reset all
1407 * resources assigned to us, so we have to trigger reallocation now.
3e336261 1408 */
e283546c
EC
1409 if ((reset_type == RESET_TYPE_ALL ||
1410 reset_type == RESET_TYPE_MCDI_TIMEOUT) && !rc)
3e336261
JC
1411 efx_ef10_reset_mc_allocations(efx);
1412 return rc;
1413}
1414
8127d661
BH
1415#define EF10_DMA_STAT(ext_name, mcdi_name) \
1416 [EF10_STAT_ ## ext_name] = \
1417 { #ext_name, 64, 8 * MC_CMD_MAC_ ## mcdi_name }
1418#define EF10_DMA_INVIS_STAT(int_name, mcdi_name) \
1419 [EF10_STAT_ ## int_name] = \
1420 { NULL, 64, 8 * MC_CMD_MAC_ ## mcdi_name }
1421#define EF10_OTHER_STAT(ext_name) \
1422 [EF10_STAT_ ## ext_name] = { #ext_name, 0, 0 }
e4d112e4
EC
1423#define GENERIC_SW_STAT(ext_name) \
1424 [GENERIC_STAT_ ## ext_name] = { #ext_name, 0, 0 }
8127d661
BH
1425
1426static const struct efx_hw_stat_desc efx_ef10_stat_desc[EF10_STAT_COUNT] = {
e80ca013
DP
1427 EF10_DMA_STAT(port_tx_bytes, TX_BYTES),
1428 EF10_DMA_STAT(port_tx_packets, TX_PKTS),
1429 EF10_DMA_STAT(port_tx_pause, TX_PAUSE_PKTS),
1430 EF10_DMA_STAT(port_tx_control, TX_CONTROL_PKTS),
1431 EF10_DMA_STAT(port_tx_unicast, TX_UNICAST_PKTS),
1432 EF10_DMA_STAT(port_tx_multicast, TX_MULTICAST_PKTS),
1433 EF10_DMA_STAT(port_tx_broadcast, TX_BROADCAST_PKTS),
1434 EF10_DMA_STAT(port_tx_lt64, TX_LT64_PKTS),
1435 EF10_DMA_STAT(port_tx_64, TX_64_PKTS),
1436 EF10_DMA_STAT(port_tx_65_to_127, TX_65_TO_127_PKTS),
1437 EF10_DMA_STAT(port_tx_128_to_255, TX_128_TO_255_PKTS),
1438 EF10_DMA_STAT(port_tx_256_to_511, TX_256_TO_511_PKTS),
1439 EF10_DMA_STAT(port_tx_512_to_1023, TX_512_TO_1023_PKTS),
1440 EF10_DMA_STAT(port_tx_1024_to_15xx, TX_1024_TO_15XX_PKTS),
1441 EF10_DMA_STAT(port_tx_15xx_to_jumbo, TX_15XX_TO_JUMBO_PKTS),
1442 EF10_DMA_STAT(port_rx_bytes, RX_BYTES),
1443 EF10_DMA_INVIS_STAT(port_rx_bytes_minus_good_bytes, RX_BAD_BYTES),
1444 EF10_OTHER_STAT(port_rx_good_bytes),
1445 EF10_OTHER_STAT(port_rx_bad_bytes),
1446 EF10_DMA_STAT(port_rx_packets, RX_PKTS),
1447 EF10_DMA_STAT(port_rx_good, RX_GOOD_PKTS),
1448 EF10_DMA_STAT(port_rx_bad, RX_BAD_FCS_PKTS),
1449 EF10_DMA_STAT(port_rx_pause, RX_PAUSE_PKTS),
1450 EF10_DMA_STAT(port_rx_control, RX_CONTROL_PKTS),
1451 EF10_DMA_STAT(port_rx_unicast, RX_UNICAST_PKTS),
1452 EF10_DMA_STAT(port_rx_multicast, RX_MULTICAST_PKTS),
1453 EF10_DMA_STAT(port_rx_broadcast, RX_BROADCAST_PKTS),
1454 EF10_DMA_STAT(port_rx_lt64, RX_UNDERSIZE_PKTS),
1455 EF10_DMA_STAT(port_rx_64, RX_64_PKTS),
1456 EF10_DMA_STAT(port_rx_65_to_127, RX_65_TO_127_PKTS),
1457 EF10_DMA_STAT(port_rx_128_to_255, RX_128_TO_255_PKTS),
1458 EF10_DMA_STAT(port_rx_256_to_511, RX_256_TO_511_PKTS),
1459 EF10_DMA_STAT(port_rx_512_to_1023, RX_512_TO_1023_PKTS),
1460 EF10_DMA_STAT(port_rx_1024_to_15xx, RX_1024_TO_15XX_PKTS),
1461 EF10_DMA_STAT(port_rx_15xx_to_jumbo, RX_15XX_TO_JUMBO_PKTS),
1462 EF10_DMA_STAT(port_rx_gtjumbo, RX_GTJUMBO_PKTS),
1463 EF10_DMA_STAT(port_rx_bad_gtjumbo, RX_JABBER_PKTS),
1464 EF10_DMA_STAT(port_rx_overflow, RX_OVERFLOW_PKTS),
1465 EF10_DMA_STAT(port_rx_align_error, RX_ALIGN_ERROR_PKTS),
1466 EF10_DMA_STAT(port_rx_length_error, RX_LENGTH_ERROR_PKTS),
1467 EF10_DMA_STAT(port_rx_nodesc_drops, RX_NODESC_DROPS),
e4d112e4
EC
1468 GENERIC_SW_STAT(rx_nodesc_trunc),
1469 GENERIC_SW_STAT(rx_noskb_drops),
e80ca013
DP
1470 EF10_DMA_STAT(port_rx_pm_trunc_bb_overflow, PM_TRUNC_BB_OVERFLOW),
1471 EF10_DMA_STAT(port_rx_pm_discard_bb_overflow, PM_DISCARD_BB_OVERFLOW),
1472 EF10_DMA_STAT(port_rx_pm_trunc_vfifo_full, PM_TRUNC_VFIFO_FULL),
1473 EF10_DMA_STAT(port_rx_pm_discard_vfifo_full, PM_DISCARD_VFIFO_FULL),
1474 EF10_DMA_STAT(port_rx_pm_trunc_qbb, PM_TRUNC_QBB),
1475 EF10_DMA_STAT(port_rx_pm_discard_qbb, PM_DISCARD_QBB),
1476 EF10_DMA_STAT(port_rx_pm_discard_mapping, PM_DISCARD_MAPPING),
1477 EF10_DMA_STAT(port_rx_dp_q_disabled_packets, RXDP_Q_DISABLED_PKTS),
1478 EF10_DMA_STAT(port_rx_dp_di_dropped_packets, RXDP_DI_DROPPED_PKTS),
1479 EF10_DMA_STAT(port_rx_dp_streaming_packets, RXDP_STREAMING_PKTS),
1480 EF10_DMA_STAT(port_rx_dp_hlb_fetch, RXDP_HLB_FETCH_CONDITIONS),
1481 EF10_DMA_STAT(port_rx_dp_hlb_wait, RXDP_HLB_WAIT_CONDITIONS),
3c36a2ad
DP
1482 EF10_DMA_STAT(rx_unicast, VADAPTER_RX_UNICAST_PACKETS),
1483 EF10_DMA_STAT(rx_unicast_bytes, VADAPTER_RX_UNICAST_BYTES),
1484 EF10_DMA_STAT(rx_multicast, VADAPTER_RX_MULTICAST_PACKETS),
1485 EF10_DMA_STAT(rx_multicast_bytes, VADAPTER_RX_MULTICAST_BYTES),
1486 EF10_DMA_STAT(rx_broadcast, VADAPTER_RX_BROADCAST_PACKETS),
1487 EF10_DMA_STAT(rx_broadcast_bytes, VADAPTER_RX_BROADCAST_BYTES),
1488 EF10_DMA_STAT(rx_bad, VADAPTER_RX_BAD_PACKETS),
1489 EF10_DMA_STAT(rx_bad_bytes, VADAPTER_RX_BAD_BYTES),
1490 EF10_DMA_STAT(rx_overflow, VADAPTER_RX_OVERFLOW),
1491 EF10_DMA_STAT(tx_unicast, VADAPTER_TX_UNICAST_PACKETS),
1492 EF10_DMA_STAT(tx_unicast_bytes, VADAPTER_TX_UNICAST_BYTES),
1493 EF10_DMA_STAT(tx_multicast, VADAPTER_TX_MULTICAST_PACKETS),
1494 EF10_DMA_STAT(tx_multicast_bytes, VADAPTER_TX_MULTICAST_BYTES),
1495 EF10_DMA_STAT(tx_broadcast, VADAPTER_TX_BROADCAST_PACKETS),
1496 EF10_DMA_STAT(tx_broadcast_bytes, VADAPTER_TX_BROADCAST_BYTES),
1497 EF10_DMA_STAT(tx_bad, VADAPTER_TX_BAD_PACKETS),
1498 EF10_DMA_STAT(tx_bad_bytes, VADAPTER_TX_BAD_BYTES),
1499 EF10_DMA_STAT(tx_overflow, VADAPTER_TX_OVERFLOW),
8127d661
BH
1500};
1501
e80ca013
DP
1502#define HUNT_COMMON_STAT_MASK ((1ULL << EF10_STAT_port_tx_bytes) | \
1503 (1ULL << EF10_STAT_port_tx_packets) | \
1504 (1ULL << EF10_STAT_port_tx_pause) | \
1505 (1ULL << EF10_STAT_port_tx_unicast) | \
1506 (1ULL << EF10_STAT_port_tx_multicast) | \
1507 (1ULL << EF10_STAT_port_tx_broadcast) | \
1508 (1ULL << EF10_STAT_port_rx_bytes) | \
1509 (1ULL << \
1510 EF10_STAT_port_rx_bytes_minus_good_bytes) | \
1511 (1ULL << EF10_STAT_port_rx_good_bytes) | \
1512 (1ULL << EF10_STAT_port_rx_bad_bytes) | \
1513 (1ULL << EF10_STAT_port_rx_packets) | \
1514 (1ULL << EF10_STAT_port_rx_good) | \
1515 (1ULL << EF10_STAT_port_rx_bad) | \
1516 (1ULL << EF10_STAT_port_rx_pause) | \
1517 (1ULL << EF10_STAT_port_rx_control) | \
1518 (1ULL << EF10_STAT_port_rx_unicast) | \
1519 (1ULL << EF10_STAT_port_rx_multicast) | \
1520 (1ULL << EF10_STAT_port_rx_broadcast) | \
1521 (1ULL << EF10_STAT_port_rx_lt64) | \
1522 (1ULL << EF10_STAT_port_rx_64) | \
1523 (1ULL << EF10_STAT_port_rx_65_to_127) | \
1524 (1ULL << EF10_STAT_port_rx_128_to_255) | \
1525 (1ULL << EF10_STAT_port_rx_256_to_511) | \
1526 (1ULL << EF10_STAT_port_rx_512_to_1023) |\
1527 (1ULL << EF10_STAT_port_rx_1024_to_15xx) |\
1528 (1ULL << EF10_STAT_port_rx_15xx_to_jumbo) |\
1529 (1ULL << EF10_STAT_port_rx_gtjumbo) | \
1530 (1ULL << EF10_STAT_port_rx_bad_gtjumbo) |\
1531 (1ULL << EF10_STAT_port_rx_overflow) | \
1532 (1ULL << EF10_STAT_port_rx_nodesc_drops) |\
e4d112e4
EC
1533 (1ULL << GENERIC_STAT_rx_nodesc_trunc) | \
1534 (1ULL << GENERIC_STAT_rx_noskb_drops))
8127d661 1535
69b365c3
EC
1536/* On 7000 series NICs, these statistics are only provided by the 10G MAC.
1537 * For a 10G/40G switchable port we do not expose these because they might
1538 * not include all the packets they should.
1539 * On 8000 series NICs these statistics are always provided.
8127d661 1540 */
e80ca013
DP
1541#define HUNT_10G_ONLY_STAT_MASK ((1ULL << EF10_STAT_port_tx_control) | \
1542 (1ULL << EF10_STAT_port_tx_lt64) | \
1543 (1ULL << EF10_STAT_port_tx_64) | \
1544 (1ULL << EF10_STAT_port_tx_65_to_127) |\
1545 (1ULL << EF10_STAT_port_tx_128_to_255) |\
1546 (1ULL << EF10_STAT_port_tx_256_to_511) |\
1547 (1ULL << EF10_STAT_port_tx_512_to_1023) |\
1548 (1ULL << EF10_STAT_port_tx_1024_to_15xx) |\
1549 (1ULL << EF10_STAT_port_tx_15xx_to_jumbo))
8127d661
BH
1550
1551/* These statistics are only provided by the 40G MAC. For a 10G/40G
1552 * switchable port we do expose these because the errors will otherwise
1553 * be silent.
1554 */
e80ca013
DP
1555#define HUNT_40G_EXTRA_STAT_MASK ((1ULL << EF10_STAT_port_rx_align_error) |\
1556 (1ULL << EF10_STAT_port_rx_length_error))
8127d661 1557
568d7a00
EC
1558/* These statistics are only provided if the firmware supports the
1559 * capability PM_AND_RXDP_COUNTERS.
1560 */
1561#define HUNT_PM_AND_RXDP_STAT_MASK ( \
e80ca013
DP
1562 (1ULL << EF10_STAT_port_rx_pm_trunc_bb_overflow) | \
1563 (1ULL << EF10_STAT_port_rx_pm_discard_bb_overflow) | \
1564 (1ULL << EF10_STAT_port_rx_pm_trunc_vfifo_full) | \
1565 (1ULL << EF10_STAT_port_rx_pm_discard_vfifo_full) | \
1566 (1ULL << EF10_STAT_port_rx_pm_trunc_qbb) | \
1567 (1ULL << EF10_STAT_port_rx_pm_discard_qbb) | \
1568 (1ULL << EF10_STAT_port_rx_pm_discard_mapping) | \
1569 (1ULL << EF10_STAT_port_rx_dp_q_disabled_packets) | \
1570 (1ULL << EF10_STAT_port_rx_dp_di_dropped_packets) | \
1571 (1ULL << EF10_STAT_port_rx_dp_streaming_packets) | \
1572 (1ULL << EF10_STAT_port_rx_dp_hlb_fetch) | \
1573 (1ULL << EF10_STAT_port_rx_dp_hlb_wait))
568d7a00 1574
4bae913b 1575static u64 efx_ef10_raw_stat_mask(struct efx_nic *efx)
8127d661 1576{
4bae913b 1577 u64 raw_mask = HUNT_COMMON_STAT_MASK;
8127d661 1578 u32 port_caps = efx_mcdi_phy_get_caps(efx);
568d7a00 1579 struct efx_ef10_nic_data *nic_data = efx->nic_data;
8127d661 1580
3c36a2ad
DP
1581 if (!(efx->mcdi->fn_flags &
1582 1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_LINKCTRL))
1583 return 0;
1584
69b365c3 1585 if (port_caps & (1 << MC_CMD_PHY_CAP_40000FDX_LBN)) {
4bae913b 1586 raw_mask |= HUNT_40G_EXTRA_STAT_MASK;
69b365c3
EC
1587 /* 8000 series have everything even at 40G */
1588 if (nic_data->datapath_caps2 &
1589 (1 << MC_CMD_GET_CAPABILITIES_V2_OUT_MAC_STATS_40G_TX_SIZE_BINS_LBN))
1590 raw_mask |= HUNT_10G_ONLY_STAT_MASK;
1591 } else {
4bae913b 1592 raw_mask |= HUNT_10G_ONLY_STAT_MASK;
69b365c3 1593 }
568d7a00
EC
1594
1595 if (nic_data->datapath_caps &
1596 (1 << MC_CMD_GET_CAPABILITIES_OUT_PM_AND_RXDP_COUNTERS_LBN))
1597 raw_mask |= HUNT_PM_AND_RXDP_STAT_MASK;
1598
4bae913b
EC
1599 return raw_mask;
1600}
1601
1602static void efx_ef10_get_stat_mask(struct efx_nic *efx, unsigned long *mask)
1603{
d94619cd 1604 struct efx_ef10_nic_data *nic_data = efx->nic_data;
3c36a2ad
DP
1605 u64 raw_mask[2];
1606
1607 raw_mask[0] = efx_ef10_raw_stat_mask(efx);
1608
d94619cd
DP
1609 /* Only show vadaptor stats when EVB capability is present */
1610 if (nic_data->datapath_caps &
1611 (1 << MC_CMD_GET_CAPABILITIES_OUT_EVB_LBN)) {
1612 raw_mask[0] |= ~((1ULL << EF10_STAT_rx_unicast) - 1);
1613 raw_mask[1] = (1ULL << (EF10_STAT_COUNT - 63)) - 1;
1614 } else {
1615 raw_mask[1] = 0;
1616 }
4bae913b
EC
1617
1618#if BITS_PER_LONG == 64
e70c70c3 1619 BUILD_BUG_ON(BITS_TO_LONGS(EF10_STAT_COUNT) != 2);
3c36a2ad
DP
1620 mask[0] = raw_mask[0];
1621 mask[1] = raw_mask[1];
4bae913b 1622#else
e70c70c3 1623 BUILD_BUG_ON(BITS_TO_LONGS(EF10_STAT_COUNT) != 3);
3c36a2ad
DP
1624 mask[0] = raw_mask[0] & 0xffffffff;
1625 mask[1] = raw_mask[0] >> 32;
1626 mask[2] = raw_mask[1] & 0xffffffff;
4bae913b 1627#endif
8127d661
BH
1628}
1629
1630static size_t efx_ef10_describe_stats(struct efx_nic *efx, u8 *names)
1631{
4bae913b
EC
1632 DECLARE_BITMAP(mask, EF10_STAT_COUNT);
1633
1634 efx_ef10_get_stat_mask(efx, mask);
8127d661 1635 return efx_nic_describe_stats(efx_ef10_stat_desc, EF10_STAT_COUNT,
4bae913b 1636 mask, names);
8127d661
BH
1637}
1638
d7788196
DP
1639static size_t efx_ef10_update_stats_common(struct efx_nic *efx, u64 *full_stats,
1640 struct rtnl_link_stats64 *core_stats)
1641{
1642 DECLARE_BITMAP(mask, EF10_STAT_COUNT);
1643 struct efx_ef10_nic_data *nic_data = efx->nic_data;
1644 u64 *stats = nic_data->stats;
1645 size_t stats_count = 0, index;
1646
1647 efx_ef10_get_stat_mask(efx, mask);
1648
1649 if (full_stats) {
1650 for_each_set_bit(index, mask, EF10_STAT_COUNT) {
1651 if (efx_ef10_stat_desc[index].name) {
1652 *full_stats++ = stats[index];
1653 ++stats_count;
1654 }
1655 }
1656 }
1657
fbe4307e
BK
1658 if (!core_stats)
1659 return stats_count;
1660
1661 if (nic_data->datapath_caps &
1662 1 << MC_CMD_GET_CAPABILITIES_OUT_EVB_LBN) {
1663 /* Use vadaptor stats. */
0fc95fca
DP
1664 core_stats->rx_packets = stats[EF10_STAT_rx_unicast] +
1665 stats[EF10_STAT_rx_multicast] +
1666 stats[EF10_STAT_rx_broadcast];
1667 core_stats->tx_packets = stats[EF10_STAT_tx_unicast] +
1668 stats[EF10_STAT_tx_multicast] +
1669 stats[EF10_STAT_tx_broadcast];
1670 core_stats->rx_bytes = stats[EF10_STAT_rx_unicast_bytes] +
1671 stats[EF10_STAT_rx_multicast_bytes] +
1672 stats[EF10_STAT_rx_broadcast_bytes];
1673 core_stats->tx_bytes = stats[EF10_STAT_tx_unicast_bytes] +
1674 stats[EF10_STAT_tx_multicast_bytes] +
1675 stats[EF10_STAT_tx_broadcast_bytes];
1676 core_stats->rx_dropped = stats[GENERIC_STAT_rx_nodesc_trunc] +
d7788196 1677 stats[GENERIC_STAT_rx_noskb_drops];
0fc95fca
DP
1678 core_stats->multicast = stats[EF10_STAT_rx_multicast];
1679 core_stats->rx_crc_errors = stats[EF10_STAT_rx_bad];
1680 core_stats->rx_fifo_errors = stats[EF10_STAT_rx_overflow];
1681 core_stats->rx_errors = core_stats->rx_crc_errors;
1682 core_stats->tx_errors = stats[EF10_STAT_tx_bad];
fbe4307e
BK
1683 } else {
1684 /* Use port stats. */
1685 core_stats->rx_packets = stats[EF10_STAT_port_rx_packets];
1686 core_stats->tx_packets = stats[EF10_STAT_port_tx_packets];
1687 core_stats->rx_bytes = stats[EF10_STAT_port_rx_bytes];
1688 core_stats->tx_bytes = stats[EF10_STAT_port_tx_bytes];
1689 core_stats->rx_dropped = stats[EF10_STAT_port_rx_nodesc_drops] +
1690 stats[GENERIC_STAT_rx_nodesc_trunc] +
1691 stats[GENERIC_STAT_rx_noskb_drops];
1692 core_stats->multicast = stats[EF10_STAT_port_rx_multicast];
1693 core_stats->rx_length_errors =
1694 stats[EF10_STAT_port_rx_gtjumbo] +
1695 stats[EF10_STAT_port_rx_length_error];
1696 core_stats->rx_crc_errors = stats[EF10_STAT_port_rx_bad];
1697 core_stats->rx_frame_errors =
1698 stats[EF10_STAT_port_rx_align_error];
1699 core_stats->rx_fifo_errors = stats[EF10_STAT_port_rx_overflow];
1700 core_stats->rx_errors = (core_stats->rx_length_errors +
1701 core_stats->rx_crc_errors +
1702 core_stats->rx_frame_errors);
d7788196
DP
1703 }
1704
1705 return stats_count;
1706}
1707
1708static int efx_ef10_try_update_nic_stats_pf(struct efx_nic *efx)
8127d661
BH
1709{
1710 struct efx_ef10_nic_data *nic_data = efx->nic_data;
4bae913b 1711 DECLARE_BITMAP(mask, EF10_STAT_COUNT);
8127d661
BH
1712 __le64 generation_start, generation_end;
1713 u64 *stats = nic_data->stats;
1714 __le64 *dma_stats;
1715
4bae913b
EC
1716 efx_ef10_get_stat_mask(efx, mask);
1717
8127d661 1718 dma_stats = efx->stats_buffer.addr;
8127d661
BH
1719
1720 generation_end = dma_stats[MC_CMD_MAC_GENERATION_END];
1721 if (generation_end == EFX_MC_STATS_GENERATION_INVALID)
1722 return 0;
1723 rmb();
4bae913b 1724 efx_nic_update_stats(efx_ef10_stat_desc, EF10_STAT_COUNT, mask,
8127d661 1725 stats, efx->stats_buffer.addr, false);
d546a893 1726 rmb();
8127d661
BH
1727 generation_start = dma_stats[MC_CMD_MAC_GENERATION_START];
1728 if (generation_end != generation_start)
1729 return -EAGAIN;
1730
1731 /* Update derived statistics */
e80ca013
DP
1732 efx_nic_fix_nodesc_drop_stat(efx,
1733 &stats[EF10_STAT_port_rx_nodesc_drops]);
1734 stats[EF10_STAT_port_rx_good_bytes] =
1735 stats[EF10_STAT_port_rx_bytes] -
1736 stats[EF10_STAT_port_rx_bytes_minus_good_bytes];
1737 efx_update_diff_stat(&stats[EF10_STAT_port_rx_bad_bytes],
1738 stats[EF10_STAT_port_rx_bytes_minus_good_bytes]);
e4d112e4 1739 efx_update_sw_stats(efx, stats);
8127d661
BH
1740 return 0;
1741}
1742
1743
d7788196
DP
1744static size_t efx_ef10_update_stats_pf(struct efx_nic *efx, u64 *full_stats,
1745 struct rtnl_link_stats64 *core_stats)
8127d661 1746{
8127d661
BH
1747 int retry;
1748
1749 /* If we're unlucky enough to read statistics during the DMA, wait
1750 * up to 10ms for it to finish (typically takes <500us)
1751 */
1752 for (retry = 0; retry < 100; ++retry) {
d7788196 1753 if (efx_ef10_try_update_nic_stats_pf(efx) == 0)
8127d661
BH
1754 break;
1755 udelay(100);
1756 }
1757
d7788196
DP
1758 return efx_ef10_update_stats_common(efx, full_stats, core_stats);
1759}
8127d661 1760
d7788196
DP
1761static int efx_ef10_try_update_nic_stats_vf(struct efx_nic *efx)
1762{
1763 MCDI_DECLARE_BUF(inbuf, MC_CMD_MAC_STATS_IN_LEN);
1764 struct efx_ef10_nic_data *nic_data = efx->nic_data;
1765 DECLARE_BITMAP(mask, EF10_STAT_COUNT);
1766 __le64 generation_start, generation_end;
1767 u64 *stats = nic_data->stats;
1768 u32 dma_len = MC_CMD_MAC_NSTATS * sizeof(u64);
1769 struct efx_buffer stats_buf;
1770 __le64 *dma_stats;
1771 int rc;
1772
f00bf230
DP
1773 spin_unlock_bh(&efx->stats_lock);
1774
1775 if (in_interrupt()) {
1776 /* If in atomic context, cannot update stats. Just update the
1777 * software stats and return so the caller can continue.
1778 */
1779 spin_lock_bh(&efx->stats_lock);
1780 efx_update_sw_stats(efx, stats);
1781 return 0;
1782 }
1783
d7788196
DP
1784 efx_ef10_get_stat_mask(efx, mask);
1785
1786 rc = efx_nic_alloc_buffer(efx, &stats_buf, dma_len, GFP_ATOMIC);
f00bf230
DP
1787 if (rc) {
1788 spin_lock_bh(&efx->stats_lock);
d7788196 1789 return rc;
f00bf230 1790 }
d7788196
DP
1791
1792 dma_stats = stats_buf.addr;
1793 dma_stats[MC_CMD_MAC_GENERATION_END] = EFX_MC_STATS_GENERATION_INVALID;
1794
1795 MCDI_SET_QWORD(inbuf, MAC_STATS_IN_DMA_ADDR, stats_buf.dma_addr);
1796 MCDI_POPULATE_DWORD_1(inbuf, MAC_STATS_IN_CMD,
0fc95fca 1797 MAC_STATS_IN_DMA, 1);
d7788196
DP
1798 MCDI_SET_DWORD(inbuf, MAC_STATS_IN_DMA_LEN, dma_len);
1799 MCDI_SET_DWORD(inbuf, MAC_STATS_IN_PORT_ID, EVB_PORT_ID_ASSIGNED);
1800
6dd4859b
DP
1801 rc = efx_mcdi_rpc_quiet(efx, MC_CMD_MAC_STATS, inbuf, sizeof(inbuf),
1802 NULL, 0, NULL);
d7788196 1803 spin_lock_bh(&efx->stats_lock);
6dd4859b
DP
1804 if (rc) {
1805 /* Expect ENOENT if DMA queues have not been set up */
1806 if (rc != -ENOENT || atomic_read(&efx->active_queues))
1807 efx_mcdi_display_error(efx, MC_CMD_MAC_STATS,
1808 sizeof(inbuf), NULL, 0, rc);
d7788196 1809 goto out;
6dd4859b 1810 }
d7788196
DP
1811
1812 generation_end = dma_stats[MC_CMD_MAC_GENERATION_END];
0fc95fca
DP
1813 if (generation_end == EFX_MC_STATS_GENERATION_INVALID) {
1814 WARN_ON_ONCE(1);
d7788196 1815 goto out;
0fc95fca 1816 }
d7788196
DP
1817 rmb();
1818 efx_nic_update_stats(efx_ef10_stat_desc, EF10_STAT_COUNT, mask,
1819 stats, stats_buf.addr, false);
1820 rmb();
1821 generation_start = dma_stats[MC_CMD_MAC_GENERATION_START];
1822 if (generation_end != generation_start) {
1823 rc = -EAGAIN;
1824 goto out;
8127d661
BH
1825 }
1826
d7788196
DP
1827 efx_update_sw_stats(efx, stats);
1828out:
1829 efx_nic_free_buffer(efx, &stats_buf);
1830 return rc;
1831}
1832
1833static size_t efx_ef10_update_stats_vf(struct efx_nic *efx, u64 *full_stats,
1834 struct rtnl_link_stats64 *core_stats)
1835{
1836 if (efx_ef10_try_update_nic_stats_vf(efx))
1837 return 0;
1838
1839 return efx_ef10_update_stats_common(efx, full_stats, core_stats);
8127d661
BH
1840}
1841
1842static void efx_ef10_push_irq_moderation(struct efx_channel *channel)
1843{
1844 struct efx_nic *efx = channel->efx;
539de7c5 1845 unsigned int mode, usecs;
8127d661
BH
1846 efx_dword_t timer_cmd;
1847
539de7c5 1848 if (channel->irq_moderation_us) {
8127d661 1849 mode = 3;
539de7c5 1850 usecs = channel->irq_moderation_us;
8127d661
BH
1851 } else {
1852 mode = 0;
539de7c5 1853 usecs = 0;
8127d661
BH
1854 }
1855
539de7c5
BK
1856 if (EFX_EF10_WORKAROUND_61265(efx)) {
1857 MCDI_DECLARE_BUF(inbuf, MC_CMD_SET_EVQ_TMR_IN_LEN);
1858 unsigned int ns = usecs * 1000;
1859
1860 MCDI_SET_DWORD(inbuf, SET_EVQ_TMR_IN_INSTANCE,
1861 channel->channel);
1862 MCDI_SET_DWORD(inbuf, SET_EVQ_TMR_IN_TMR_LOAD_REQ_NS, ns);
1863 MCDI_SET_DWORD(inbuf, SET_EVQ_TMR_IN_TMR_RELOAD_REQ_NS, ns);
1864 MCDI_SET_DWORD(inbuf, SET_EVQ_TMR_IN_TMR_MODE, mode);
1865
1866 efx_mcdi_rpc_async(efx, MC_CMD_SET_EVQ_TMR,
1867 inbuf, sizeof(inbuf), 0, NULL, 0);
1868 } else if (EFX_EF10_WORKAROUND_35388(efx)) {
1869 unsigned int ticks = efx_usecs_to_ticks(efx, usecs);
1870
8127d661
BH
1871 EFX_POPULATE_DWORD_3(timer_cmd, ERF_DD_EVQ_IND_TIMER_FLAGS,
1872 EFE_DD_EVQ_IND_TIMER_FLAGS,
1873 ERF_DD_EVQ_IND_TIMER_MODE, mode,
539de7c5 1874 ERF_DD_EVQ_IND_TIMER_VAL, ticks);
8127d661
BH
1875 efx_writed_page(efx, &timer_cmd, ER_DD_EVQ_INDIRECT,
1876 channel->channel);
1877 } else {
539de7c5
BK
1878 unsigned int ticks = efx_usecs_to_ticks(efx, usecs);
1879
8127d661 1880 EFX_POPULATE_DWORD_2(timer_cmd, ERF_DZ_TC_TIMER_MODE, mode,
539de7c5 1881 ERF_DZ_TC_TIMER_VAL, ticks);
8127d661
BH
1882 efx_writed_page(efx, &timer_cmd, ER_DZ_EVQ_TMR,
1883 channel->channel);
1884 }
1885}
1886
02246a7f
SS
1887static void efx_ef10_get_wol_vf(struct efx_nic *efx,
1888 struct ethtool_wolinfo *wol) {}
1889
1890static int efx_ef10_set_wol_vf(struct efx_nic *efx, u32 type)
1891{
1892 return -EOPNOTSUPP;
1893}
1894
8127d661
BH
1895static void efx_ef10_get_wol(struct efx_nic *efx, struct ethtool_wolinfo *wol)
1896{
1897 wol->supported = 0;
1898 wol->wolopts = 0;
1899 memset(&wol->sopass, 0, sizeof(wol->sopass));
1900}
1901
1902static int efx_ef10_set_wol(struct efx_nic *efx, u32 type)
1903{
1904 if (type != 0)
1905 return -EINVAL;
1906 return 0;
1907}
1908
1909static void efx_ef10_mcdi_request(struct efx_nic *efx,
1910 const efx_dword_t *hdr, size_t hdr_len,
1911 const efx_dword_t *sdu, size_t sdu_len)
1912{
1913 struct efx_ef10_nic_data *nic_data = efx->nic_data;
1914 u8 *pdu = nic_data->mcdi_buf.addr;
1915
1916 memcpy(pdu, hdr, hdr_len);
1917 memcpy(pdu + hdr_len, sdu, sdu_len);
1918 wmb();
1919
1920 /* The hardware provides 'low' and 'high' (doorbell) registers
1921 * for passing the 64-bit address of an MCDI request to
1922 * firmware. However the dwords are swapped by firmware. The
1923 * least significant bits of the doorbell are then 0 for all
1924 * MCDI requests due to alignment.
1925 */
1926 _efx_writed(efx, cpu_to_le32((u64)nic_data->mcdi_buf.dma_addr >> 32),
1927 ER_DZ_MC_DB_LWRD);
1928 _efx_writed(efx, cpu_to_le32((u32)nic_data->mcdi_buf.dma_addr),
1929 ER_DZ_MC_DB_HWRD);
1930}
1931
1932static bool efx_ef10_mcdi_poll_response(struct efx_nic *efx)
1933{
1934 struct efx_ef10_nic_data *nic_data = efx->nic_data;
1935 const efx_dword_t hdr = *(const efx_dword_t *)nic_data->mcdi_buf.addr;
1936
1937 rmb();
1938 return EFX_DWORD_FIELD(hdr, MCDI_HEADER_RESPONSE);
1939}
1940
1941static void
1942efx_ef10_mcdi_read_response(struct efx_nic *efx, efx_dword_t *outbuf,
1943 size_t offset, size_t outlen)
1944{
1945 struct efx_ef10_nic_data *nic_data = efx->nic_data;
1946 const u8 *pdu = nic_data->mcdi_buf.addr;
1947
1948 memcpy(outbuf, pdu + offset, outlen);
1949}
1950
c577e59e
DP
1951static void efx_ef10_mcdi_reboot_detected(struct efx_nic *efx)
1952{
1953 struct efx_ef10_nic_data *nic_data = efx->nic_data;
1954
1955 /* All our allocations have been reset */
1956 efx_ef10_reset_mc_allocations(efx);
1957
1958 /* The datapath firmware might have been changed */
1959 nic_data->must_check_datapath_caps = true;
1960
1961 /* MAC statistics have been cleared on the NIC; clear the local
1962 * statistic that we update with efx_update_diff_stat().
1963 */
1964 nic_data->stats[EF10_STAT_port_rx_bad_bytes] = 0;
1965}
1966
8127d661
BH
1967static int efx_ef10_mcdi_poll_reboot(struct efx_nic *efx)
1968{
1969 struct efx_ef10_nic_data *nic_data = efx->nic_data;
1970 int rc;
1971
1972 rc = efx_ef10_get_warm_boot_count(efx);
1973 if (rc < 0) {
1974 /* The firmware is presumably in the process of
1975 * rebooting. However, we are supposed to report each
1976 * reboot just once, so we must only do that once we
1977 * can read and store the updated warm boot count.
1978 */
1979 return 0;
1980 }
1981
1982 if (rc == nic_data->warm_boot_count)
1983 return 0;
1984
1985 nic_data->warm_boot_count = rc;
c577e59e 1986 efx_ef10_mcdi_reboot_detected(efx);
869070c5 1987
8127d661
BH
1988 return -EIO;
1989}
1990
1991/* Handle an MSI interrupt
1992 *
1993 * Handle an MSI hardware interrupt. This routine schedules event
1994 * queue processing. No interrupt acknowledgement cycle is necessary.
1995 * Also, we never need to check that the interrupt is for us, since
1996 * MSI interrupts cannot be shared.
1997 */
1998static irqreturn_t efx_ef10_msi_interrupt(int irq, void *dev_id)
1999{
2000 struct efx_msi_context *context = dev_id;
2001 struct efx_nic *efx = context->efx;
2002
2003 netif_vdbg(efx, intr, efx->net_dev,
2004 "IRQ %d on CPU %d\n", irq, raw_smp_processor_id());
2005
2006 if (likely(ACCESS_ONCE(efx->irq_soft_enabled))) {
2007 /* Note test interrupts */
2008 if (context->index == efx->irq_level)
2009 efx->last_irq_cpu = raw_smp_processor_id();
2010
2011 /* Schedule processing of the channel */
2012 efx_schedule_channel_irq(efx->channel[context->index]);
2013 }
2014
2015 return IRQ_HANDLED;
2016}
2017
2018static irqreturn_t efx_ef10_legacy_interrupt(int irq, void *dev_id)
2019{
2020 struct efx_nic *efx = dev_id;
2021 bool soft_enabled = ACCESS_ONCE(efx->irq_soft_enabled);
2022 struct efx_channel *channel;
2023 efx_dword_t reg;
2024 u32 queues;
2025
2026 /* Read the ISR which also ACKs the interrupts */
2027 efx_readd(efx, &reg, ER_DZ_BIU_INT_ISR);
2028 queues = EFX_DWORD_FIELD(reg, ERF_DZ_ISR_REG);
2029
2030 if (queues == 0)
2031 return IRQ_NONE;
2032
2033 if (likely(soft_enabled)) {
2034 /* Note test interrupts */
2035 if (queues & (1U << efx->irq_level))
2036 efx->last_irq_cpu = raw_smp_processor_id();
2037
2038 efx_for_each_channel(channel, efx) {
2039 if (queues & 1)
2040 efx_schedule_channel_irq(channel);
2041 queues >>= 1;
2042 }
2043 }
2044
2045 netif_vdbg(efx, intr, efx->net_dev,
2046 "IRQ %d on CPU %d status " EFX_DWORD_FMT "\n",
2047 irq, raw_smp_processor_id(), EFX_DWORD_VAL(reg));
2048
2049 return IRQ_HANDLED;
2050}
2051
942e298e 2052static int efx_ef10_irq_test_generate(struct efx_nic *efx)
8127d661
BH
2053{
2054 MCDI_DECLARE_BUF(inbuf, MC_CMD_TRIGGER_INTERRUPT_IN_LEN);
2055
942e298e
JC
2056 if (efx_mcdi_set_workaround(efx, MC_CMD_WORKAROUND_BUG41750, true,
2057 NULL) == 0)
2058 return -ENOTSUPP;
2059
8127d661
BH
2060 BUILD_BUG_ON(MC_CMD_TRIGGER_INTERRUPT_OUT_LEN != 0);
2061
2062 MCDI_SET_DWORD(inbuf, TRIGGER_INTERRUPT_IN_INTR_LEVEL, efx->irq_level);
942e298e 2063 return efx_mcdi_rpc(efx, MC_CMD_TRIGGER_INTERRUPT,
8127d661
BH
2064 inbuf, sizeof(inbuf), NULL, 0, NULL);
2065}
2066
2067static int efx_ef10_tx_probe(struct efx_tx_queue *tx_queue)
2068{
2069 return efx_nic_alloc_buffer(tx_queue->efx, &tx_queue->txd.buf,
2070 (tx_queue->ptr_mask + 1) *
2071 sizeof(efx_qword_t),
2072 GFP_KERNEL);
2073}
2074
2075/* This writes to the TX_DESC_WPTR and also pushes data */
2076static inline void efx_ef10_push_tx_desc(struct efx_tx_queue *tx_queue,
2077 const efx_qword_t *txd)
2078{
2079 unsigned int write_ptr;
2080 efx_oword_t reg;
2081
2082 write_ptr = tx_queue->write_count & tx_queue->ptr_mask;
2083 EFX_POPULATE_OWORD_1(reg, ERF_DZ_TX_DESC_WPTR, write_ptr);
2084 reg.qword[0] = *txd;
2085 efx_writeo_page(tx_queue->efx, &reg,
2086 ER_DZ_TX_DESC_UPD, tx_queue->queue);
2087}
2088
2089static void efx_ef10_tx_init(struct efx_tx_queue *tx_queue)
2090{
2091 MCDI_DECLARE_BUF(inbuf, MC_CMD_INIT_TXQ_IN_LEN(EFX_MAX_DMAQ_SIZE * 8 /
2092 EFX_BUF_SIZE));
8127d661
BH
2093 bool csum_offload = tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD;
2094 size_t entries = tx_queue->txd.buf.len / EFX_BUF_SIZE;
2095 struct efx_channel *channel = tx_queue->channel;
2096 struct efx_nic *efx = tx_queue->efx;
45b2449e 2097 struct efx_ef10_nic_data *nic_data = efx->nic_data;
aa09a3da 2098 size_t inlen;
8127d661
BH
2099 dma_addr_t dma_addr;
2100 efx_qword_t *txd;
2101 int rc;
2102 int i;
aa09a3da 2103 BUILD_BUG_ON(MC_CMD_INIT_TXQ_OUT_LEN != 0);
8127d661
BH
2104
2105 MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_SIZE, tx_queue->ptr_mask + 1);
2106 MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_TARGET_EVQ, channel->channel);
2107 MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_LABEL, tx_queue->queue);
2108 MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_INSTANCE, tx_queue->queue);
2109 MCDI_POPULATE_DWORD_2(inbuf, INIT_TXQ_IN_FLAGS,
2110 INIT_TXQ_IN_FLAG_IP_CSUM_DIS, !csum_offload,
2111 INIT_TXQ_IN_FLAG_TCP_CSUM_DIS, !csum_offload);
2112 MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_OWNER_ID, 0);
45b2449e 2113 MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_PORT_ID, nic_data->vport_id);
8127d661
BH
2114
2115 dma_addr = tx_queue->txd.buf.dma_addr;
2116
2117 netif_dbg(efx, hw, efx->net_dev, "pushing TXQ %d. %zu entries (%llx)\n",
2118 tx_queue->queue, entries, (u64)dma_addr);
2119
2120 for (i = 0; i < entries; ++i) {
2121 MCDI_SET_ARRAY_QWORD(inbuf, INIT_TXQ_IN_DMA_ADDR, i, dma_addr);
2122 dma_addr += EFX_BUF_SIZE;
2123 }
2124
2125 inlen = MC_CMD_INIT_TXQ_IN_LEN(entries);
2126
2127 rc = efx_mcdi_rpc(efx, MC_CMD_INIT_TXQ, inbuf, inlen,
aa09a3da 2128 NULL, 0, NULL);
8127d661
BH
2129 if (rc)
2130 goto fail;
2131
2132 /* A previous user of this TX queue might have set us up the
2133 * bomb by writing a descriptor to the TX push collector but
2134 * not the doorbell. (Each collector belongs to a port, not a
2135 * queue or function, so cannot easily be reset.) We must
2136 * attempt to push a no-op descriptor in its place.
2137 */
2138 tx_queue->buffer[0].flags = EFX_TX_BUF_OPTION;
2139 tx_queue->insert_count = 1;
2140 txd = efx_tx_desc(tx_queue, 0);
2141 EFX_POPULATE_QWORD_4(*txd,
2142 ESF_DZ_TX_DESC_IS_OPT, true,
2143 ESF_DZ_TX_OPTION_TYPE,
2144 ESE_DZ_TX_OPTION_DESC_CRC_CSUM,
2145 ESF_DZ_TX_OPTION_UDP_TCP_CSUM, csum_offload,
2146 ESF_DZ_TX_OPTION_IP_CSUM, csum_offload);
2147 tx_queue->write_count = 1;
93171b14
BK
2148
2149 if (nic_data->datapath_caps &
2150 (1 << MC_CMD_GET_CAPABILITIES_OUT_TX_TSO_LBN)) {
2151 tx_queue->tso_version = 1;
2152 }
2153
8127d661
BH
2154 wmb();
2155 efx_ef10_push_tx_desc(tx_queue, txd);
2156
2157 return;
2158
2159fail:
48ce5634
BH
2160 netdev_WARN(efx->net_dev, "failed to initialise TXQ %d\n",
2161 tx_queue->queue);
8127d661
BH
2162}
2163
2164static void efx_ef10_tx_fini(struct efx_tx_queue *tx_queue)
2165{
2166 MCDI_DECLARE_BUF(inbuf, MC_CMD_FINI_TXQ_IN_LEN);
aa09a3da 2167 MCDI_DECLARE_BUF_ERR(outbuf);
8127d661
BH
2168 struct efx_nic *efx = tx_queue->efx;
2169 size_t outlen;
2170 int rc;
2171
2172 MCDI_SET_DWORD(inbuf, FINI_TXQ_IN_INSTANCE,
2173 tx_queue->queue);
2174
1e0b8120 2175 rc = efx_mcdi_rpc_quiet(efx, MC_CMD_FINI_TXQ, inbuf, sizeof(inbuf),
8127d661
BH
2176 outbuf, sizeof(outbuf), &outlen);
2177
2178 if (rc && rc != -EALREADY)
2179 goto fail;
2180
2181 return;
2182
2183fail:
1e0b8120
EC
2184 efx_mcdi_display_error(efx, MC_CMD_FINI_TXQ, MC_CMD_FINI_TXQ_IN_LEN,
2185 outbuf, outlen, rc);
8127d661
BH
2186}
2187
2188static void efx_ef10_tx_remove(struct efx_tx_queue *tx_queue)
2189{
2190 efx_nic_free_buffer(tx_queue->efx, &tx_queue->txd.buf);
2191}
2192
2193/* This writes to the TX_DESC_WPTR; write pointer for TX descriptor ring */
2194static inline void efx_ef10_notify_tx_desc(struct efx_tx_queue *tx_queue)
2195{
2196 unsigned int write_ptr;
2197 efx_dword_t reg;
2198
2199 write_ptr = tx_queue->write_count & tx_queue->ptr_mask;
2200 EFX_POPULATE_DWORD_1(reg, ERF_DZ_TX_DESC_WPTR_DWORD, write_ptr);
2201 efx_writed_page(tx_queue->efx, &reg,
2202 ER_DZ_TX_DESC_UPD_DWORD, tx_queue->queue);
2203}
2204
2205static void efx_ef10_tx_write(struct efx_tx_queue *tx_queue)
2206{
2207 unsigned int old_write_count = tx_queue->write_count;
2208 struct efx_tx_buffer *buffer;
2209 unsigned int write_ptr;
2210 efx_qword_t *txd;
2211
b2663a4f
MH
2212 tx_queue->xmit_more_available = false;
2213 if (unlikely(tx_queue->write_count == tx_queue->insert_count))
2214 return;
8127d661
BH
2215
2216 do {
2217 write_ptr = tx_queue->write_count & tx_queue->ptr_mask;
2218 buffer = &tx_queue->buffer[write_ptr];
2219 txd = efx_tx_desc(tx_queue, write_ptr);
2220 ++tx_queue->write_count;
2221
2222 /* Create TX descriptor ring entry */
2223 if (buffer->flags & EFX_TX_BUF_OPTION) {
2224 *txd = buffer->option;
2225 } else {
2226 BUILD_BUG_ON(EFX_TX_BUF_CONT != 1);
2227 EFX_POPULATE_QWORD_3(
2228 *txd,
2229 ESF_DZ_TX_KER_CONT,
2230 buffer->flags & EFX_TX_BUF_CONT,
2231 ESF_DZ_TX_KER_BYTE_CNT, buffer->len,
2232 ESF_DZ_TX_KER_BUF_ADDR, buffer->dma_addr);
2233 }
2234 } while (tx_queue->write_count != tx_queue->insert_count);
2235
2236 wmb(); /* Ensure descriptors are written before they are fetched */
2237
2238 if (efx_nic_may_push_tx_desc(tx_queue, old_write_count)) {
2239 txd = efx_tx_desc(tx_queue,
2240 old_write_count & tx_queue->ptr_mask);
2241 efx_ef10_push_tx_desc(tx_queue, txd);
2242 ++tx_queue->pushes;
2243 } else {
2244 efx_ef10_notify_tx_desc(tx_queue);
2245 }
2246}
2247
267c0157
JC
2248static int efx_ef10_alloc_rss_context(struct efx_nic *efx, u32 *context,
2249 bool exclusive, unsigned *context_size)
8127d661
BH
2250{
2251 MCDI_DECLARE_BUF(inbuf, MC_CMD_RSS_CONTEXT_ALLOC_IN_LEN);
2252 MCDI_DECLARE_BUF(outbuf, MC_CMD_RSS_CONTEXT_ALLOC_OUT_LEN);
45b2449e 2253 struct efx_ef10_nic_data *nic_data = efx->nic_data;
8127d661
BH
2254 size_t outlen;
2255 int rc;
267c0157
JC
2256 u32 alloc_type = exclusive ?
2257 MC_CMD_RSS_CONTEXT_ALLOC_IN_TYPE_EXCLUSIVE :
2258 MC_CMD_RSS_CONTEXT_ALLOC_IN_TYPE_SHARED;
2259 unsigned rss_spread = exclusive ?
2260 efx->rss_spread :
2261 min(rounddown_pow_of_two(efx->rss_spread),
2262 EFX_EF10_MAX_SHARED_RSS_CONTEXT_SIZE);
2263
2264 if (!exclusive && rss_spread == 1) {
2265 *context = EFX_EF10_RSS_CONTEXT_INVALID;
2266 if (context_size)
2267 *context_size = 1;
2268 return 0;
2269 }
8127d661 2270
dcb4123c
JC
2271 if (nic_data->datapath_caps &
2272 1 << MC_CMD_GET_CAPABILITIES_OUT_RX_RSS_LIMITED_LBN)
2273 return -EOPNOTSUPP;
2274
8127d661 2275 MCDI_SET_DWORD(inbuf, RSS_CONTEXT_ALLOC_IN_UPSTREAM_PORT_ID,
45b2449e 2276 nic_data->vport_id);
267c0157
JC
2277 MCDI_SET_DWORD(inbuf, RSS_CONTEXT_ALLOC_IN_TYPE, alloc_type);
2278 MCDI_SET_DWORD(inbuf, RSS_CONTEXT_ALLOC_IN_NUM_QUEUES, rss_spread);
8127d661
BH
2279
2280 rc = efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_ALLOC, inbuf, sizeof(inbuf),
2281 outbuf, sizeof(outbuf), &outlen);
2282 if (rc != 0)
2283 return rc;
2284
2285 if (outlen < MC_CMD_RSS_CONTEXT_ALLOC_OUT_LEN)
2286 return -EIO;
2287
2288 *context = MCDI_DWORD(outbuf, RSS_CONTEXT_ALLOC_OUT_RSS_CONTEXT_ID);
2289
267c0157
JC
2290 if (context_size)
2291 *context_size = rss_spread;
2292
8127d661
BH
2293 return 0;
2294}
2295
2296static void efx_ef10_free_rss_context(struct efx_nic *efx, u32 context)
2297{
2298 MCDI_DECLARE_BUF(inbuf, MC_CMD_RSS_CONTEXT_FREE_IN_LEN);
2299 int rc;
2300
2301 MCDI_SET_DWORD(inbuf, RSS_CONTEXT_FREE_IN_RSS_CONTEXT_ID,
2302 context);
2303
2304 rc = efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_FREE, inbuf, sizeof(inbuf),
2305 NULL, 0, NULL);
2306 WARN_ON(rc != 0);
2307}
2308
267c0157
JC
2309static int efx_ef10_populate_rss_table(struct efx_nic *efx, u32 context,
2310 const u32 *rx_indir_table)
8127d661
BH
2311{
2312 MCDI_DECLARE_BUF(tablebuf, MC_CMD_RSS_CONTEXT_SET_TABLE_IN_LEN);
2313 MCDI_DECLARE_BUF(keybuf, MC_CMD_RSS_CONTEXT_SET_KEY_IN_LEN);
2314 int i, rc;
2315
2316 MCDI_SET_DWORD(tablebuf, RSS_CONTEXT_SET_TABLE_IN_RSS_CONTEXT_ID,
2317 context);
2318 BUILD_BUG_ON(ARRAY_SIZE(efx->rx_indir_table) !=
2319 MC_CMD_RSS_CONTEXT_SET_TABLE_IN_INDIRECTION_TABLE_LEN);
2320
2321 for (i = 0; i < ARRAY_SIZE(efx->rx_indir_table); ++i)
2322 MCDI_PTR(tablebuf,
2323 RSS_CONTEXT_SET_TABLE_IN_INDIRECTION_TABLE)[i] =
267c0157 2324 (u8) rx_indir_table[i];
8127d661
BH
2325
2326 rc = efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_SET_TABLE, tablebuf,
2327 sizeof(tablebuf), NULL, 0, NULL);
2328 if (rc != 0)
2329 return rc;
2330
2331 MCDI_SET_DWORD(keybuf, RSS_CONTEXT_SET_KEY_IN_RSS_CONTEXT_ID,
2332 context);
2333 BUILD_BUG_ON(ARRAY_SIZE(efx->rx_hash_key) !=
2334 MC_CMD_RSS_CONTEXT_SET_KEY_IN_TOEPLITZ_KEY_LEN);
2335 for (i = 0; i < ARRAY_SIZE(efx->rx_hash_key); ++i)
2336 MCDI_PTR(keybuf, RSS_CONTEXT_SET_KEY_IN_TOEPLITZ_KEY)[i] =
2337 efx->rx_hash_key[i];
2338
2339 return efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_SET_KEY, keybuf,
2340 sizeof(keybuf), NULL, 0, NULL);
2341}
2342
2343static void efx_ef10_rx_free_indir_table(struct efx_nic *efx)
2344{
2345 struct efx_ef10_nic_data *nic_data = efx->nic_data;
2346
2347 if (nic_data->rx_rss_context != EFX_EF10_RSS_CONTEXT_INVALID)
2348 efx_ef10_free_rss_context(efx, nic_data->rx_rss_context);
2349 nic_data->rx_rss_context = EFX_EF10_RSS_CONTEXT_INVALID;
2350}
2351
267c0157
JC
2352static int efx_ef10_rx_push_shared_rss_config(struct efx_nic *efx,
2353 unsigned *context_size)
8127d661 2354{
267c0157 2355 u32 new_rx_rss_context;
8127d661 2356 struct efx_ef10_nic_data *nic_data = efx->nic_data;
267c0157
JC
2357 int rc = efx_ef10_alloc_rss_context(efx, &new_rx_rss_context,
2358 false, context_size);
2359
2360 if (rc != 0)
2361 return rc;
8127d661 2362
267c0157
JC
2363 nic_data->rx_rss_context = new_rx_rss_context;
2364 nic_data->rx_rss_context_exclusive = false;
2365 efx_set_default_rx_indir_table(efx);
2366 return 0;
2367}
8127d661 2368
267c0157
JC
2369static int efx_ef10_rx_push_exclusive_rss_config(struct efx_nic *efx,
2370 const u32 *rx_indir_table)
2371{
2372 struct efx_ef10_nic_data *nic_data = efx->nic_data;
2373 int rc;
2374 u32 new_rx_rss_context;
2375
2376 if (nic_data->rx_rss_context == EFX_EF10_RSS_CONTEXT_INVALID ||
2377 !nic_data->rx_rss_context_exclusive) {
2378 rc = efx_ef10_alloc_rss_context(efx, &new_rx_rss_context,
2379 true, NULL);
2380 if (rc == -EOPNOTSUPP)
2381 return rc;
2382 else if (rc != 0)
2383 goto fail1;
2384 } else {
2385 new_rx_rss_context = nic_data->rx_rss_context;
8127d661
BH
2386 }
2387
267c0157
JC
2388 rc = efx_ef10_populate_rss_table(efx, new_rx_rss_context,
2389 rx_indir_table);
8127d661 2390 if (rc != 0)
267c0157 2391 goto fail2;
8127d661 2392
267c0157
JC
2393 if (nic_data->rx_rss_context != new_rx_rss_context)
2394 efx_ef10_rx_free_indir_table(efx);
2395 nic_data->rx_rss_context = new_rx_rss_context;
2396 nic_data->rx_rss_context_exclusive = true;
2397 if (rx_indir_table != efx->rx_indir_table)
2398 memcpy(efx->rx_indir_table, rx_indir_table,
2399 sizeof(efx->rx_indir_table));
2400 return 0;
8127d661 2401
267c0157
JC
2402fail2:
2403 if (new_rx_rss_context != nic_data->rx_rss_context)
2404 efx_ef10_free_rss_context(efx, new_rx_rss_context);
2405fail1:
8127d661 2406 netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
267c0157
JC
2407 return rc;
2408}
2409
2410static int efx_ef10_pf_rx_push_rss_config(struct efx_nic *efx, bool user,
2411 const u32 *rx_indir_table)
2412{
2413 int rc;
2414
2415 if (efx->rss_spread == 1)
2416 return 0;
2417
2418 rc = efx_ef10_rx_push_exclusive_rss_config(efx, rx_indir_table);
2419
2420 if (rc == -ENOBUFS && !user) {
2421 unsigned context_size;
2422 bool mismatch = false;
2423 size_t i;
2424
2425 for (i = 0; i < ARRAY_SIZE(efx->rx_indir_table) && !mismatch;
2426 i++)
2427 mismatch = rx_indir_table[i] !=
2428 ethtool_rxfh_indir_default(i, efx->rss_spread);
2429
2430 rc = efx_ef10_rx_push_shared_rss_config(efx, &context_size);
2431 if (rc == 0) {
2432 if (context_size != efx->rss_spread)
2433 netif_warn(efx, probe, efx->net_dev,
2434 "Could not allocate an exclusive RSS"
2435 " context; allocated a shared one of"
2436 " different size."
2437 " Wanted %u, got %u.\n",
2438 efx->rss_spread, context_size);
2439 else if (mismatch)
2440 netif_warn(efx, probe, efx->net_dev,
2441 "Could not allocate an exclusive RSS"
2442 " context; allocated a shared one but"
2443 " could not apply custom"
2444 " indirection.\n");
2445 else
2446 netif_info(efx, probe, efx->net_dev,
2447 "Could not allocate an exclusive RSS"
2448 " context; allocated a shared one.\n");
2449 }
2450 }
2451 return rc;
2452}
2453
2454static int efx_ef10_vf_rx_push_rss_config(struct efx_nic *efx, bool user,
2455 const u32 *rx_indir_table
2456 __attribute__ ((unused)))
2457{
2458 struct efx_ef10_nic_data *nic_data = efx->nic_data;
2459
2460 if (user)
2461 return -EOPNOTSUPP;
2462 if (nic_data->rx_rss_context != EFX_EF10_RSS_CONTEXT_INVALID)
2463 return 0;
2464 return efx_ef10_rx_push_shared_rss_config(efx, NULL);
8127d661
BH
2465}
2466
2467static int efx_ef10_rx_probe(struct efx_rx_queue *rx_queue)
2468{
2469 return efx_nic_alloc_buffer(rx_queue->efx, &rx_queue->rxd.buf,
2470 (rx_queue->ptr_mask + 1) *
2471 sizeof(efx_qword_t),
2472 GFP_KERNEL);
2473}
2474
2475static void efx_ef10_rx_init(struct efx_rx_queue *rx_queue)
2476{
2477 MCDI_DECLARE_BUF(inbuf,
2478 MC_CMD_INIT_RXQ_IN_LEN(EFX_MAX_DMAQ_SIZE * 8 /
2479 EFX_BUF_SIZE));
8127d661
BH
2480 struct efx_channel *channel = efx_rx_queue_channel(rx_queue);
2481 size_t entries = rx_queue->rxd.buf.len / EFX_BUF_SIZE;
2482 struct efx_nic *efx = rx_queue->efx;
45b2449e 2483 struct efx_ef10_nic_data *nic_data = efx->nic_data;
aa09a3da 2484 size_t inlen;
8127d661
BH
2485 dma_addr_t dma_addr;
2486 int rc;
2487 int i;
aa09a3da 2488 BUILD_BUG_ON(MC_CMD_INIT_RXQ_OUT_LEN != 0);
8127d661
BH
2489
2490 rx_queue->scatter_n = 0;
2491 rx_queue->scatter_len = 0;
2492
2493 MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_SIZE, rx_queue->ptr_mask + 1);
2494 MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_TARGET_EVQ, channel->channel);
2495 MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_LABEL, efx_rx_queue_index(rx_queue));
2496 MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_INSTANCE,
2497 efx_rx_queue_index(rx_queue));
bd9a265d
JC
2498 MCDI_POPULATE_DWORD_2(inbuf, INIT_RXQ_IN_FLAGS,
2499 INIT_RXQ_IN_FLAG_PREFIX, 1,
2500 INIT_RXQ_IN_FLAG_TIMESTAMP, 1);
8127d661 2501 MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_OWNER_ID, 0);
45b2449e 2502 MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_PORT_ID, nic_data->vport_id);
8127d661
BH
2503
2504 dma_addr = rx_queue->rxd.buf.dma_addr;
2505
2506 netif_dbg(efx, hw, efx->net_dev, "pushing RXQ %d. %zu entries (%llx)\n",
2507 efx_rx_queue_index(rx_queue), entries, (u64)dma_addr);
2508
2509 for (i = 0; i < entries; ++i) {
2510 MCDI_SET_ARRAY_QWORD(inbuf, INIT_RXQ_IN_DMA_ADDR, i, dma_addr);
2511 dma_addr += EFX_BUF_SIZE;
2512 }
2513
2514 inlen = MC_CMD_INIT_RXQ_IN_LEN(entries);
2515
2516 rc = efx_mcdi_rpc(efx, MC_CMD_INIT_RXQ, inbuf, inlen,
aa09a3da 2517 NULL, 0, NULL);
48ce5634
BH
2518 if (rc)
2519 netdev_WARN(efx->net_dev, "failed to initialise RXQ %d\n",
2520 efx_rx_queue_index(rx_queue));
8127d661
BH
2521}
2522
2523static void efx_ef10_rx_fini(struct efx_rx_queue *rx_queue)
2524{
2525 MCDI_DECLARE_BUF(inbuf, MC_CMD_FINI_RXQ_IN_LEN);
aa09a3da 2526 MCDI_DECLARE_BUF_ERR(outbuf);
8127d661
BH
2527 struct efx_nic *efx = rx_queue->efx;
2528 size_t outlen;
2529 int rc;
2530
2531 MCDI_SET_DWORD(inbuf, FINI_RXQ_IN_INSTANCE,
2532 efx_rx_queue_index(rx_queue));
2533
1e0b8120 2534 rc = efx_mcdi_rpc_quiet(efx, MC_CMD_FINI_RXQ, inbuf, sizeof(inbuf),
8127d661
BH
2535 outbuf, sizeof(outbuf), &outlen);
2536
2537 if (rc && rc != -EALREADY)
2538 goto fail;
2539
2540 return;
2541
2542fail:
1e0b8120
EC
2543 efx_mcdi_display_error(efx, MC_CMD_FINI_RXQ, MC_CMD_FINI_RXQ_IN_LEN,
2544 outbuf, outlen, rc);
8127d661
BH
2545}
2546
2547static void efx_ef10_rx_remove(struct efx_rx_queue *rx_queue)
2548{
2549 efx_nic_free_buffer(rx_queue->efx, &rx_queue->rxd.buf);
2550}
2551
2552/* This creates an entry in the RX descriptor queue */
2553static inline void
2554efx_ef10_build_rx_desc(struct efx_rx_queue *rx_queue, unsigned int index)
2555{
2556 struct efx_rx_buffer *rx_buf;
2557 efx_qword_t *rxd;
2558
2559 rxd = efx_rx_desc(rx_queue, index);
2560 rx_buf = efx_rx_buffer(rx_queue, index);
2561 EFX_POPULATE_QWORD_2(*rxd,
2562 ESF_DZ_RX_KER_BYTE_CNT, rx_buf->len,
2563 ESF_DZ_RX_KER_BUF_ADDR, rx_buf->dma_addr);
2564}
2565
2566static void efx_ef10_rx_write(struct efx_rx_queue *rx_queue)
2567{
2568 struct efx_nic *efx = rx_queue->efx;
2569 unsigned int write_count;
2570 efx_dword_t reg;
2571
2572 /* Firmware requires that RX_DESC_WPTR be a multiple of 8 */
2573 write_count = rx_queue->added_count & ~7;
2574 if (rx_queue->notified_count == write_count)
2575 return;
2576
2577 do
2578 efx_ef10_build_rx_desc(
2579 rx_queue,
2580 rx_queue->notified_count & rx_queue->ptr_mask);
2581 while (++rx_queue->notified_count != write_count);
2582
2583 wmb();
2584 EFX_POPULATE_DWORD_1(reg, ERF_DZ_RX_DESC_WPTR,
2585 write_count & rx_queue->ptr_mask);
2586 efx_writed_page(efx, &reg, ER_DZ_RX_DESC_UPD,
2587 efx_rx_queue_index(rx_queue));
2588}
2589
2590static efx_mcdi_async_completer efx_ef10_rx_defer_refill_complete;
2591
2592static void efx_ef10_rx_defer_refill(struct efx_rx_queue *rx_queue)
2593{
2594 struct efx_channel *channel = efx_rx_queue_channel(rx_queue);
2595 MCDI_DECLARE_BUF(inbuf, MC_CMD_DRIVER_EVENT_IN_LEN);
2596 efx_qword_t event;
2597
2598 EFX_POPULATE_QWORD_2(event,
2599 ESF_DZ_EV_CODE, EFX_EF10_DRVGEN_EV,
2600 ESF_DZ_EV_DATA, EFX_EF10_REFILL);
2601
2602 MCDI_SET_DWORD(inbuf, DRIVER_EVENT_IN_EVQ, channel->channel);
2603
2604 /* MCDI_SET_QWORD is not appropriate here since EFX_POPULATE_* has
2605 * already swapped the data to little-endian order.
2606 */
2607 memcpy(MCDI_PTR(inbuf, DRIVER_EVENT_IN_DATA), &event.u64[0],
2608 sizeof(efx_qword_t));
2609
2610 efx_mcdi_rpc_async(channel->efx, MC_CMD_DRIVER_EVENT,
2611 inbuf, sizeof(inbuf), 0,
2612 efx_ef10_rx_defer_refill_complete, 0);
2613}
2614
2615static void
2616efx_ef10_rx_defer_refill_complete(struct efx_nic *efx, unsigned long cookie,
2617 int rc, efx_dword_t *outbuf,
2618 size_t outlen_actual)
2619{
2620 /* nothing to do */
2621}
2622
2623static int efx_ef10_ev_probe(struct efx_channel *channel)
2624{
2625 return efx_nic_alloc_buffer(channel->efx, &channel->eventq.buf,
2626 (channel->eventq_mask + 1) *
2627 sizeof(efx_qword_t),
2628 GFP_KERNEL);
2629}
2630
46e612b0
DP
2631static void efx_ef10_ev_fini(struct efx_channel *channel)
2632{
2633 MCDI_DECLARE_BUF(inbuf, MC_CMD_FINI_EVQ_IN_LEN);
2634 MCDI_DECLARE_BUF_ERR(outbuf);
2635 struct efx_nic *efx = channel->efx;
2636 size_t outlen;
2637 int rc;
2638
2639 MCDI_SET_DWORD(inbuf, FINI_EVQ_IN_INSTANCE, channel->channel);
2640
2641 rc = efx_mcdi_rpc_quiet(efx, MC_CMD_FINI_EVQ, inbuf, sizeof(inbuf),
2642 outbuf, sizeof(outbuf), &outlen);
2643
2644 if (rc && rc != -EALREADY)
2645 goto fail;
2646
2647 return;
2648
2649fail:
2650 efx_mcdi_display_error(efx, MC_CMD_FINI_EVQ, MC_CMD_FINI_EVQ_IN_LEN,
2651 outbuf, outlen, rc);
2652}
2653
8127d661
BH
2654static int efx_ef10_ev_init(struct efx_channel *channel)
2655{
2656 MCDI_DECLARE_BUF(inbuf,
a995560a
BK
2657 MC_CMD_INIT_EVQ_V2_IN_LEN(EFX_MAX_EVQ_SIZE * 8 /
2658 EFX_BUF_SIZE));
2659 MCDI_DECLARE_BUF(outbuf, MC_CMD_INIT_EVQ_V2_OUT_LEN);
8127d661
BH
2660 size_t entries = channel->eventq.buf.len / EFX_BUF_SIZE;
2661 struct efx_nic *efx = channel->efx;
2662 struct efx_ef10_nic_data *nic_data;
8127d661 2663 size_t inlen, outlen;
46e612b0 2664 unsigned int enabled, implemented;
8127d661
BH
2665 dma_addr_t dma_addr;
2666 int rc;
2667 int i;
2668
2669 nic_data = efx->nic_data;
8127d661
BH
2670
2671 /* Fill event queue with all ones (i.e. empty events) */
2672 memset(channel->eventq.buf.addr, 0xff, channel->eventq.buf.len);
2673
2674 MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_SIZE, channel->eventq_mask + 1);
2675 MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_INSTANCE, channel->channel);
2676 /* INIT_EVQ expects index in vector table, not absolute */
2677 MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_IRQ_NUM, channel->channel);
8127d661
BH
2678 MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_TMR_MODE,
2679 MC_CMD_INIT_EVQ_IN_TMR_MODE_DIS);
2680 MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_TMR_LOAD, 0);
2681 MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_TMR_RELOAD, 0);
2682 MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_COUNT_MODE,
2683 MC_CMD_INIT_EVQ_IN_COUNT_MODE_DIS);
2684 MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_COUNT_THRSHLD, 0);
2685
a995560a
BK
2686 if (nic_data->datapath_caps2 &
2687 1 << MC_CMD_GET_CAPABILITIES_V2_OUT_INIT_EVQ_V2_LBN) {
2688 /* Use the new generic approach to specifying event queue
2689 * configuration, requesting lower latency or higher throughput.
2690 * The options that actually get used appear in the output.
2691 */
2692 MCDI_POPULATE_DWORD_2(inbuf, INIT_EVQ_V2_IN_FLAGS,
2693 INIT_EVQ_V2_IN_FLAG_INTERRUPTING, 1,
2694 INIT_EVQ_V2_IN_FLAG_TYPE,
2695 MC_CMD_INIT_EVQ_V2_IN_FLAG_TYPE_AUTO);
2696 } else {
2697 bool cut_thru = !(nic_data->datapath_caps &
2698 1 << MC_CMD_GET_CAPABILITIES_OUT_RX_BATCHING_LBN);
2699
2700 MCDI_POPULATE_DWORD_4(inbuf, INIT_EVQ_IN_FLAGS,
2701 INIT_EVQ_IN_FLAG_INTERRUPTING, 1,
2702 INIT_EVQ_IN_FLAG_RX_MERGE, 1,
2703 INIT_EVQ_IN_FLAG_TX_MERGE, 1,
2704 INIT_EVQ_IN_FLAG_CUT_THRU, cut_thru);
2705 }
2706
8127d661
BH
2707 dma_addr = channel->eventq.buf.dma_addr;
2708 for (i = 0; i < entries; ++i) {
2709 MCDI_SET_ARRAY_QWORD(inbuf, INIT_EVQ_IN_DMA_ADDR, i, dma_addr);
2710 dma_addr += EFX_BUF_SIZE;
2711 }
2712
2713 inlen = MC_CMD_INIT_EVQ_IN_LEN(entries);
2714
2715 rc = efx_mcdi_rpc(efx, MC_CMD_INIT_EVQ, inbuf, inlen,
2716 outbuf, sizeof(outbuf), &outlen);
a995560a
BK
2717
2718 if (outlen >= MC_CMD_INIT_EVQ_V2_OUT_LEN)
2719 netif_dbg(efx, drv, efx->net_dev,
2720 "Channel %d using event queue flags %08x\n",
2721 channel->channel,
2722 MCDI_DWORD(outbuf, INIT_EVQ_V2_OUT_FLAGS));
2723
8127d661 2724 /* IRQ return is ignored */
46e612b0
DP
2725 if (channel->channel || rc)
2726 return rc;
8127d661 2727
46e612b0
DP
2728 /* Successfully created event queue on channel 0 */
2729 rc = efx_mcdi_get_workarounds(efx, &implemented, &enabled);
832dc9ed 2730 if (rc == -ENOSYS) {
d95e329a
BK
2731 /* GET_WORKAROUNDS was implemented before this workaround,
2732 * thus it must be unavailable in this firmware.
832dc9ed
EC
2733 */
2734 nic_data->workaround_26807 = false;
2735 rc = 0;
2736 } else if (rc) {
8127d661 2737 goto fail;
832dc9ed
EC
2738 } else {
2739 nic_data->workaround_26807 =
2740 !!(enabled & MC_CMD_GET_WORKAROUNDS_OUT_BUG26807);
2741
2742 if (implemented & MC_CMD_GET_WORKAROUNDS_OUT_BUG26807 &&
2743 !nic_data->workaround_26807) {
5a55a72a
DP
2744 unsigned int flags;
2745
34ccfe6f
DP
2746 rc = efx_mcdi_set_workaround(efx,
2747 MC_CMD_WORKAROUND_BUG26807,
5a55a72a
DP
2748 true, &flags);
2749
2750 if (!rc) {
2751 if (flags &
2752 1 << MC_CMD_WORKAROUND_EXT_OUT_FLR_DONE_LBN) {
2753 netif_info(efx, drv, efx->net_dev,
2754 "other functions on NIC have been reset\n");
abd86a55
DP
2755
2756 /* With MCFW v4.6.x and earlier, the
2757 * boot count will have incremented,
2758 * so re-read the warm_boot_count
2759 * value now to ensure this function
2760 * doesn't think it has changed next
2761 * time it checks.
2762 */
2763 rc = efx_ef10_get_warm_boot_count(efx);
2764 if (rc >= 0) {
2765 nic_data->warm_boot_count = rc;
2766 rc = 0;
2767 }
5a55a72a 2768 }
832dc9ed 2769 nic_data->workaround_26807 = true;
5a55a72a 2770 } else if (rc == -EPERM) {
832dc9ed 2771 rc = 0;
5a55a72a 2772 }
832dc9ed 2773 }
46e612b0
DP
2774 }
2775
2776 if (!rc)
2777 return 0;
8127d661
BH
2778
2779fail:
46e612b0
DP
2780 efx_ef10_ev_fini(channel);
2781 return rc;
8127d661
BH
2782}
2783
2784static void efx_ef10_ev_remove(struct efx_channel *channel)
2785{
2786 efx_nic_free_buffer(channel->efx, &channel->eventq.buf);
2787}
2788
2789static void efx_ef10_handle_rx_wrong_queue(struct efx_rx_queue *rx_queue,
2790 unsigned int rx_queue_label)
2791{
2792 struct efx_nic *efx = rx_queue->efx;
2793
2794 netif_info(efx, hw, efx->net_dev,
2795 "rx event arrived on queue %d labeled as queue %u\n",
2796 efx_rx_queue_index(rx_queue), rx_queue_label);
2797
2798 efx_schedule_reset(efx, RESET_TYPE_DISABLE);
2799}
2800
2801static void
2802efx_ef10_handle_rx_bad_lbits(struct efx_rx_queue *rx_queue,
2803 unsigned int actual, unsigned int expected)
2804{
2805 unsigned int dropped = (actual - expected) & rx_queue->ptr_mask;
2806 struct efx_nic *efx = rx_queue->efx;
2807
2808 netif_info(efx, hw, efx->net_dev,
2809 "dropped %d events (index=%d expected=%d)\n",
2810 dropped, actual, expected);
2811
2812 efx_schedule_reset(efx, RESET_TYPE_DISABLE);
2813}
2814
2815/* partially received RX was aborted. clean up. */
2816static void efx_ef10_handle_rx_abort(struct efx_rx_queue *rx_queue)
2817{
2818 unsigned int rx_desc_ptr;
2819
8127d661
BH
2820 netif_dbg(rx_queue->efx, hw, rx_queue->efx->net_dev,
2821 "scattered RX aborted (dropping %u buffers)\n",
2822 rx_queue->scatter_n);
2823
2824 rx_desc_ptr = rx_queue->removed_count & rx_queue->ptr_mask;
2825
2826 efx_rx_packet(rx_queue, rx_desc_ptr, rx_queue->scatter_n,
2827 0, EFX_RX_PKT_DISCARD);
2828
2829 rx_queue->removed_count += rx_queue->scatter_n;
2830 rx_queue->scatter_n = 0;
2831 rx_queue->scatter_len = 0;
2832 ++efx_rx_queue_channel(rx_queue)->n_rx_nodesc_trunc;
2833}
2834
2835static int efx_ef10_handle_rx_event(struct efx_channel *channel,
2836 const efx_qword_t *event)
2837{
2838 unsigned int rx_bytes, next_ptr_lbits, rx_queue_label, rx_l4_class;
2839 unsigned int n_descs, n_packets, i;
2840 struct efx_nic *efx = channel->efx;
2841 struct efx_rx_queue *rx_queue;
2842 bool rx_cont;
2843 u16 flags = 0;
2844
2845 if (unlikely(ACCESS_ONCE(efx->reset_pending)))
2846 return 0;
2847
2848 /* Basic packet information */
2849 rx_bytes = EFX_QWORD_FIELD(*event, ESF_DZ_RX_BYTES);
2850 next_ptr_lbits = EFX_QWORD_FIELD(*event, ESF_DZ_RX_DSC_PTR_LBITS);
2851 rx_queue_label = EFX_QWORD_FIELD(*event, ESF_DZ_RX_QLABEL);
2852 rx_l4_class = EFX_QWORD_FIELD(*event, ESF_DZ_RX_L4_CLASS);
2853 rx_cont = EFX_QWORD_FIELD(*event, ESF_DZ_RX_CONT);
2854
48ce5634
BH
2855 if (EFX_QWORD_FIELD(*event, ESF_DZ_RX_DROP_EVENT))
2856 netdev_WARN(efx->net_dev, "saw RX_DROP_EVENT: event="
2857 EFX_QWORD_FMT "\n",
2858 EFX_QWORD_VAL(*event));
8127d661
BH
2859
2860 rx_queue = efx_channel_get_rx_queue(channel);
2861
2862 if (unlikely(rx_queue_label != efx_rx_queue_index(rx_queue)))
2863 efx_ef10_handle_rx_wrong_queue(rx_queue, rx_queue_label);
2864
2865 n_descs = ((next_ptr_lbits - rx_queue->removed_count) &
2866 ((1 << ESF_DZ_RX_DSC_PTR_LBITS_WIDTH) - 1));
2867
2868 if (n_descs != rx_queue->scatter_n + 1) {
92a04168
BH
2869 struct efx_ef10_nic_data *nic_data = efx->nic_data;
2870
8127d661
BH
2871 /* detect rx abort */
2872 if (unlikely(n_descs == rx_queue->scatter_n)) {
48ce5634
BH
2873 if (rx_queue->scatter_n == 0 || rx_bytes != 0)
2874 netdev_WARN(efx->net_dev,
2875 "invalid RX abort: scatter_n=%u event="
2876 EFX_QWORD_FMT "\n",
2877 rx_queue->scatter_n,
2878 EFX_QWORD_VAL(*event));
8127d661
BH
2879 efx_ef10_handle_rx_abort(rx_queue);
2880 return 0;
2881 }
2882
92a04168
BH
2883 /* Check that RX completion merging is valid, i.e.
2884 * the current firmware supports it and this is a
2885 * non-scattered packet.
2886 */
2887 if (!(nic_data->datapath_caps &
2888 (1 << MC_CMD_GET_CAPABILITIES_OUT_RX_BATCHING_LBN)) ||
2889 rx_queue->scatter_n != 0 || rx_cont) {
8127d661
BH
2890 efx_ef10_handle_rx_bad_lbits(
2891 rx_queue, next_ptr_lbits,
2892 (rx_queue->removed_count +
2893 rx_queue->scatter_n + 1) &
2894 ((1 << ESF_DZ_RX_DSC_PTR_LBITS_WIDTH) - 1));
2895 return 0;
2896 }
2897
2898 /* Merged completion for multiple non-scattered packets */
2899 rx_queue->scatter_n = 1;
2900 rx_queue->scatter_len = 0;
2901 n_packets = n_descs;
2902 ++channel->n_rx_merge_events;
2903 channel->n_rx_merge_packets += n_packets;
2904 flags |= EFX_RX_PKT_PREFIX_LEN;
2905 } else {
2906 ++rx_queue->scatter_n;
2907 rx_queue->scatter_len += rx_bytes;
2908 if (rx_cont)
2909 return 0;
2910 n_packets = 1;
2911 }
2912
2913 if (unlikely(EFX_QWORD_FIELD(*event, ESF_DZ_RX_ECRC_ERR)))
2914 flags |= EFX_RX_PKT_DISCARD;
2915
2916 if (unlikely(EFX_QWORD_FIELD(*event, ESF_DZ_RX_IPCKSUM_ERR))) {
2917 channel->n_rx_ip_hdr_chksum_err += n_packets;
2918 } else if (unlikely(EFX_QWORD_FIELD(*event,
2919 ESF_DZ_RX_TCPUDP_CKSUM_ERR))) {
2920 channel->n_rx_tcp_udp_chksum_err += n_packets;
2921 } else if (rx_l4_class == ESE_DZ_L4_CLASS_TCP ||
2922 rx_l4_class == ESE_DZ_L4_CLASS_UDP) {
2923 flags |= EFX_RX_PKT_CSUMMED;
2924 }
2925
2926 if (rx_l4_class == ESE_DZ_L4_CLASS_TCP)
2927 flags |= EFX_RX_PKT_TCP;
2928
2929 channel->irq_mod_score += 2 * n_packets;
2930
2931 /* Handle received packet(s) */
2932 for (i = 0; i < n_packets; i++) {
2933 efx_rx_packet(rx_queue,
2934 rx_queue->removed_count & rx_queue->ptr_mask,
2935 rx_queue->scatter_n, rx_queue->scatter_len,
2936 flags);
2937 rx_queue->removed_count += rx_queue->scatter_n;
2938 }
2939
2940 rx_queue->scatter_n = 0;
2941 rx_queue->scatter_len = 0;
2942
2943 return n_packets;
2944}
2945
2946static int
2947efx_ef10_handle_tx_event(struct efx_channel *channel, efx_qword_t *event)
2948{
2949 struct efx_nic *efx = channel->efx;
2950 struct efx_tx_queue *tx_queue;
2951 unsigned int tx_ev_desc_ptr;
2952 unsigned int tx_ev_q_label;
2953 int tx_descs = 0;
2954
2955 if (unlikely(ACCESS_ONCE(efx->reset_pending)))
2956 return 0;
2957
2958 if (unlikely(EFX_QWORD_FIELD(*event, ESF_DZ_TX_DROP_EVENT)))
2959 return 0;
2960
2961 /* Transmit completion */
2962 tx_ev_desc_ptr = EFX_QWORD_FIELD(*event, ESF_DZ_TX_DESCR_INDX);
2963 tx_ev_q_label = EFX_QWORD_FIELD(*event, ESF_DZ_TX_QLABEL);
2964 tx_queue = efx_channel_get_tx_queue(channel,
2965 tx_ev_q_label % EFX_TXQ_TYPES);
2966 tx_descs = ((tx_ev_desc_ptr + 1 - tx_queue->read_count) &
2967 tx_queue->ptr_mask);
2968 efx_xmit_done(tx_queue, tx_ev_desc_ptr & tx_queue->ptr_mask);
2969
2970 return tx_descs;
2971}
2972
2973static void
2974efx_ef10_handle_driver_event(struct efx_channel *channel, efx_qword_t *event)
2975{
2976 struct efx_nic *efx = channel->efx;
2977 int subcode;
2978
2979 subcode = EFX_QWORD_FIELD(*event, ESF_DZ_DRV_SUB_CODE);
2980
2981 switch (subcode) {
2982 case ESE_DZ_DRV_TIMER_EV:
2983 case ESE_DZ_DRV_WAKE_UP_EV:
2984 break;
2985 case ESE_DZ_DRV_START_UP_EV:
2986 /* event queue init complete. ok. */
2987 break;
2988 default:
2989 netif_err(efx, hw, efx->net_dev,
2990 "channel %d unknown driver event type %d"
2991 " (data " EFX_QWORD_FMT ")\n",
2992 channel->channel, subcode,
2993 EFX_QWORD_VAL(*event));
2994
2995 }
2996}
2997
2998static void efx_ef10_handle_driver_generated_event(struct efx_channel *channel,
2999 efx_qword_t *event)
3000{
3001 struct efx_nic *efx = channel->efx;
3002 u32 subcode;
3003
3004 subcode = EFX_QWORD_FIELD(*event, EFX_DWORD_0);
3005
3006 switch (subcode) {
3007 case EFX_EF10_TEST:
3008 channel->event_test_cpu = raw_smp_processor_id();
3009 break;
3010 case EFX_EF10_REFILL:
3011 /* The queue must be empty, so we won't receive any rx
3012 * events, so efx_process_channel() won't refill the
3013 * queue. Refill it here
3014 */
cce28794 3015 efx_fast_push_rx_descriptors(&channel->rx_queue, true);
8127d661
BH
3016 break;
3017 default:
3018 netif_err(efx, hw, efx->net_dev,
3019 "channel %d unknown driver event type %u"
3020 " (data " EFX_QWORD_FMT ")\n",
3021 channel->channel, (unsigned) subcode,
3022 EFX_QWORD_VAL(*event));
3023 }
3024}
3025
3026static int efx_ef10_ev_process(struct efx_channel *channel, int quota)
3027{
3028 struct efx_nic *efx = channel->efx;
3029 efx_qword_t event, *p_event;
3030 unsigned int read_ptr;
3031 int ev_code;
3032 int tx_descs = 0;
3033 int spent = 0;
3034
75363a46
EB
3035 if (quota <= 0)
3036 return spent;
3037
8127d661
BH
3038 read_ptr = channel->eventq_read_ptr;
3039
3040 for (;;) {
3041 p_event = efx_event(channel, read_ptr);
3042 event = *p_event;
3043
3044 if (!efx_event_present(&event))
3045 break;
3046
3047 EFX_SET_QWORD(*p_event);
3048
3049 ++read_ptr;
3050
3051 ev_code = EFX_QWORD_FIELD(event, ESF_DZ_EV_CODE);
3052
3053 netif_vdbg(efx, drv, efx->net_dev,
3054 "processing event on %d " EFX_QWORD_FMT "\n",
3055 channel->channel, EFX_QWORD_VAL(event));
3056
3057 switch (ev_code) {
3058 case ESE_DZ_EV_CODE_MCDI_EV:
3059 efx_mcdi_process_event(channel, &event);
3060 break;
3061 case ESE_DZ_EV_CODE_RX_EV:
3062 spent += efx_ef10_handle_rx_event(channel, &event);
3063 if (spent >= quota) {
3064 /* XXX can we split a merged event to
3065 * avoid going over-quota?
3066 */
3067 spent = quota;
3068 goto out;
3069 }
3070 break;
3071 case ESE_DZ_EV_CODE_TX_EV:
3072 tx_descs += efx_ef10_handle_tx_event(channel, &event);
3073 if (tx_descs > efx->txq_entries) {
3074 spent = quota;
3075 goto out;
3076 } else if (++spent == quota) {
3077 goto out;
3078 }
3079 break;
3080 case ESE_DZ_EV_CODE_DRIVER_EV:
3081 efx_ef10_handle_driver_event(channel, &event);
3082 if (++spent == quota)
3083 goto out;
3084 break;
3085 case EFX_EF10_DRVGEN_EV:
3086 efx_ef10_handle_driver_generated_event(channel, &event);
3087 break;
3088 default:
3089 netif_err(efx, hw, efx->net_dev,
3090 "channel %d unknown event type %d"
3091 " (data " EFX_QWORD_FMT ")\n",
3092 channel->channel, ev_code,
3093 EFX_QWORD_VAL(event));
3094 }
3095 }
3096
3097out:
3098 channel->eventq_read_ptr = read_ptr;
3099 return spent;
3100}
3101
3102static void efx_ef10_ev_read_ack(struct efx_channel *channel)
3103{
3104 struct efx_nic *efx = channel->efx;
3105 efx_dword_t rptr;
3106
3107 if (EFX_EF10_WORKAROUND_35388(efx)) {
3108 BUILD_BUG_ON(EFX_MIN_EVQ_SIZE <
3109 (1 << ERF_DD_EVQ_IND_RPTR_WIDTH));
3110 BUILD_BUG_ON(EFX_MAX_EVQ_SIZE >
3111 (1 << 2 * ERF_DD_EVQ_IND_RPTR_WIDTH));
3112
3113 EFX_POPULATE_DWORD_2(rptr, ERF_DD_EVQ_IND_RPTR_FLAGS,
3114 EFE_DD_EVQ_IND_RPTR_FLAGS_HIGH,
3115 ERF_DD_EVQ_IND_RPTR,
3116 (channel->eventq_read_ptr &
3117 channel->eventq_mask) >>
3118 ERF_DD_EVQ_IND_RPTR_WIDTH);
3119 efx_writed_page(efx, &rptr, ER_DD_EVQ_INDIRECT,
3120 channel->channel);
3121 EFX_POPULATE_DWORD_2(rptr, ERF_DD_EVQ_IND_RPTR_FLAGS,
3122 EFE_DD_EVQ_IND_RPTR_FLAGS_LOW,
3123 ERF_DD_EVQ_IND_RPTR,
3124 channel->eventq_read_ptr &
3125 ((1 << ERF_DD_EVQ_IND_RPTR_WIDTH) - 1));
3126 efx_writed_page(efx, &rptr, ER_DD_EVQ_INDIRECT,
3127 channel->channel);
3128 } else {
3129 EFX_POPULATE_DWORD_1(rptr, ERF_DZ_EVQ_RPTR,
3130 channel->eventq_read_ptr &
3131 channel->eventq_mask);
3132 efx_writed_page(efx, &rptr, ER_DZ_EVQ_RPTR, channel->channel);
3133 }
3134}
3135
3136static void efx_ef10_ev_test_generate(struct efx_channel *channel)
3137{
3138 MCDI_DECLARE_BUF(inbuf, MC_CMD_DRIVER_EVENT_IN_LEN);
3139 struct efx_nic *efx = channel->efx;
3140 efx_qword_t event;
3141 int rc;
3142
3143 EFX_POPULATE_QWORD_2(event,
3144 ESF_DZ_EV_CODE, EFX_EF10_DRVGEN_EV,
3145 ESF_DZ_EV_DATA, EFX_EF10_TEST);
3146
3147 MCDI_SET_DWORD(inbuf, DRIVER_EVENT_IN_EVQ, channel->channel);
3148
3149 /* MCDI_SET_QWORD is not appropriate here since EFX_POPULATE_* has
3150 * already swapped the data to little-endian order.
3151 */
3152 memcpy(MCDI_PTR(inbuf, DRIVER_EVENT_IN_DATA), &event.u64[0],
3153 sizeof(efx_qword_t));
3154
3155 rc = efx_mcdi_rpc(efx, MC_CMD_DRIVER_EVENT, inbuf, sizeof(inbuf),
3156 NULL, 0, NULL);
3157 if (rc != 0)
3158 goto fail;
3159
3160 return;
3161
3162fail:
3163 WARN_ON(true);
3164 netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
3165}
3166
3167void efx_ef10_handle_drain_event(struct efx_nic *efx)
3168{
3169 if (atomic_dec_and_test(&efx->active_queues))
3170 wake_up(&efx->flush_wq);
3171
3172 WARN_ON(atomic_read(&efx->active_queues) < 0);
3173}
3174
3175static int efx_ef10_fini_dmaq(struct efx_nic *efx)
3176{
3177 struct efx_ef10_nic_data *nic_data = efx->nic_data;
3178 struct efx_channel *channel;
3179 struct efx_tx_queue *tx_queue;
3180 struct efx_rx_queue *rx_queue;
3181 int pending;
3182
3183 /* If the MC has just rebooted, the TX/RX queues will have already been
3184 * torn down, but efx->active_queues needs to be set to zero.
3185 */
3186 if (nic_data->must_realloc_vis) {
3187 atomic_set(&efx->active_queues, 0);
3188 return 0;
3189 }
3190
3191 /* Do not attempt to write to the NIC during EEH recovery */
3192 if (efx->state != STATE_RECOVERY) {
3193 efx_for_each_channel(channel, efx) {
3194 efx_for_each_channel_rx_queue(rx_queue, channel)
3195 efx_ef10_rx_fini(rx_queue);
3196 efx_for_each_channel_tx_queue(tx_queue, channel)
3197 efx_ef10_tx_fini(tx_queue);
3198 }
3199
3200 wait_event_timeout(efx->flush_wq,
3201 atomic_read(&efx->active_queues) == 0,
3202 msecs_to_jiffies(EFX_MAX_FLUSH_TIME));
3203 pending = atomic_read(&efx->active_queues);
3204 if (pending) {
3205 netif_err(efx, hw, efx->net_dev, "failed to flush %d queues\n",
3206 pending);
3207 return -ETIMEDOUT;
3208 }
3209 }
3210
3211 return 0;
3212}
3213
e283546c
EC
3214static void efx_ef10_prepare_flr(struct efx_nic *efx)
3215{
3216 atomic_set(&efx->active_queues, 0);
3217}
3218
8127d661
BH
3219static bool efx_ef10_filter_equal(const struct efx_filter_spec *left,
3220 const struct efx_filter_spec *right)
3221{
3222 if ((left->match_flags ^ right->match_flags) |
3223 ((left->flags ^ right->flags) &
3224 (EFX_FILTER_FLAG_RX | EFX_FILTER_FLAG_TX)))
3225 return false;
3226
3227 return memcmp(&left->outer_vid, &right->outer_vid,
3228 sizeof(struct efx_filter_spec) -
3229 offsetof(struct efx_filter_spec, outer_vid)) == 0;
3230}
3231
3232static unsigned int efx_ef10_filter_hash(const struct efx_filter_spec *spec)
3233{
3234 BUILD_BUG_ON(offsetof(struct efx_filter_spec, outer_vid) & 3);
3235 return jhash2((const u32 *)&spec->outer_vid,
3236 (sizeof(struct efx_filter_spec) -
3237 offsetof(struct efx_filter_spec, outer_vid)) / 4,
3238 0);
3239 /* XXX should we randomise the initval? */
3240}
3241
3242/* Decide whether a filter should be exclusive or else should allow
3243 * delivery to additional recipients. Currently we decide that
3244 * filters for specific local unicast MAC and IP addresses are
3245 * exclusive.
3246 */
3247static bool efx_ef10_filter_is_exclusive(const struct efx_filter_spec *spec)
3248{
3249 if (spec->match_flags & EFX_FILTER_MATCH_LOC_MAC &&
3250 !is_multicast_ether_addr(spec->loc_mac))
3251 return true;
3252
3253 if ((spec->match_flags &
3254 (EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_LOC_HOST)) ==
3255 (EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_LOC_HOST)) {
3256 if (spec->ether_type == htons(ETH_P_IP) &&
3257 !ipv4_is_multicast(spec->loc_host[0]))
3258 return true;
3259 if (spec->ether_type == htons(ETH_P_IPV6) &&
3260 ((const u8 *)spec->loc_host)[0] != 0xff)
3261 return true;
3262 }
3263
3264 return false;
3265}
3266
3267static struct efx_filter_spec *
3268efx_ef10_filter_entry_spec(const struct efx_ef10_filter_table *table,
3269 unsigned int filter_idx)
3270{
3271 return (struct efx_filter_spec *)(table->entry[filter_idx].spec &
3272 ~EFX_EF10_FILTER_FLAGS);
3273}
3274
3275static unsigned int
3276efx_ef10_filter_entry_flags(const struct efx_ef10_filter_table *table,
3277 unsigned int filter_idx)
3278{
3279 return table->entry[filter_idx].spec & EFX_EF10_FILTER_FLAGS;
3280}
3281
3282static void
3283efx_ef10_filter_set_entry(struct efx_ef10_filter_table *table,
3284 unsigned int filter_idx,
3285 const struct efx_filter_spec *spec,
3286 unsigned int flags)
3287{
3288 table->entry[filter_idx].spec = (unsigned long)spec | flags;
3289}
3290
3291static void efx_ef10_filter_push_prep(struct efx_nic *efx,
3292 const struct efx_filter_spec *spec,
3293 efx_dword_t *inbuf, u64 handle,
3294 bool replacing)
3295{
3296 struct efx_ef10_nic_data *nic_data = efx->nic_data;
dcb4123c 3297 u32 flags = spec->flags;
8127d661
BH
3298
3299 memset(inbuf, 0, MC_CMD_FILTER_OP_IN_LEN);
3300
dcb4123c
JC
3301 /* Remove RSS flag if we don't have an RSS context. */
3302 if (flags & EFX_FILTER_FLAG_RX_RSS &&
3303 spec->rss_context == EFX_FILTER_RSS_CONTEXT_DEFAULT &&
3304 nic_data->rx_rss_context == EFX_EF10_RSS_CONTEXT_INVALID)
3305 flags &= ~EFX_FILTER_FLAG_RX_RSS;
3306
8127d661
BH
3307 if (replacing) {
3308 MCDI_SET_DWORD(inbuf, FILTER_OP_IN_OP,
3309 MC_CMD_FILTER_OP_IN_OP_REPLACE);
3310 MCDI_SET_QWORD(inbuf, FILTER_OP_IN_HANDLE, handle);
3311 } else {
3312 u32 match_fields = 0;
3313
3314 MCDI_SET_DWORD(inbuf, FILTER_OP_IN_OP,
3315 efx_ef10_filter_is_exclusive(spec) ?
3316 MC_CMD_FILTER_OP_IN_OP_INSERT :
3317 MC_CMD_FILTER_OP_IN_OP_SUBSCRIBE);
3318
3319 /* Convert match flags and values. Unlike almost
3320 * everything else in MCDI, these fields are in
3321 * network byte order.
3322 */
3323 if (spec->match_flags & EFX_FILTER_MATCH_LOC_MAC_IG)
3324 match_fields |=
3325 is_multicast_ether_addr(spec->loc_mac) ?
3326 1 << MC_CMD_FILTER_OP_IN_MATCH_UNKNOWN_MCAST_DST_LBN :
3327 1 << MC_CMD_FILTER_OP_IN_MATCH_UNKNOWN_UCAST_DST_LBN;
3328#define COPY_FIELD(gen_flag, gen_field, mcdi_field) \
3329 if (spec->match_flags & EFX_FILTER_MATCH_ ## gen_flag) { \
3330 match_fields |= \
3331 1 << MC_CMD_FILTER_OP_IN_MATCH_ ## \
3332 mcdi_field ## _LBN; \
3333 BUILD_BUG_ON( \
3334 MC_CMD_FILTER_OP_IN_ ## mcdi_field ## _LEN < \
3335 sizeof(spec->gen_field)); \
3336 memcpy(MCDI_PTR(inbuf, FILTER_OP_IN_ ## mcdi_field), \
3337 &spec->gen_field, sizeof(spec->gen_field)); \
3338 }
3339 COPY_FIELD(REM_HOST, rem_host, SRC_IP);
3340 COPY_FIELD(LOC_HOST, loc_host, DST_IP);
3341 COPY_FIELD(REM_MAC, rem_mac, SRC_MAC);
3342 COPY_FIELD(REM_PORT, rem_port, SRC_PORT);
3343 COPY_FIELD(LOC_MAC, loc_mac, DST_MAC);
3344 COPY_FIELD(LOC_PORT, loc_port, DST_PORT);
3345 COPY_FIELD(ETHER_TYPE, ether_type, ETHER_TYPE);
3346 COPY_FIELD(INNER_VID, inner_vid, INNER_VLAN);
3347 COPY_FIELD(OUTER_VID, outer_vid, OUTER_VLAN);
3348 COPY_FIELD(IP_PROTO, ip_proto, IP_PROTO);
3349#undef COPY_FIELD
3350 MCDI_SET_DWORD(inbuf, FILTER_OP_IN_MATCH_FIELDS,
3351 match_fields);
3352 }
3353
45b2449e 3354 MCDI_SET_DWORD(inbuf, FILTER_OP_IN_PORT_ID, nic_data->vport_id);
8127d661
BH
3355 MCDI_SET_DWORD(inbuf, FILTER_OP_IN_RX_DEST,
3356 spec->dmaq_id == EFX_FILTER_RX_DMAQ_ID_DROP ?
3357 MC_CMD_FILTER_OP_IN_RX_DEST_DROP :
3358 MC_CMD_FILTER_OP_IN_RX_DEST_HOST);
e3d36293 3359 MCDI_SET_DWORD(inbuf, FILTER_OP_IN_TX_DOMAIN, 0);
8127d661
BH
3360 MCDI_SET_DWORD(inbuf, FILTER_OP_IN_TX_DEST,
3361 MC_CMD_FILTER_OP_IN_TX_DEST_DEFAULT);
a0bc3487
BH
3362 MCDI_SET_DWORD(inbuf, FILTER_OP_IN_RX_QUEUE,
3363 spec->dmaq_id == EFX_FILTER_RX_DMAQ_ID_DROP ?
3364 0 : spec->dmaq_id);
8127d661 3365 MCDI_SET_DWORD(inbuf, FILTER_OP_IN_RX_MODE,
dcb4123c 3366 (flags & EFX_FILTER_FLAG_RX_RSS) ?
8127d661
BH
3367 MC_CMD_FILTER_OP_IN_RX_MODE_RSS :
3368 MC_CMD_FILTER_OP_IN_RX_MODE_SIMPLE);
dcb4123c 3369 if (flags & EFX_FILTER_FLAG_RX_RSS)
8127d661
BH
3370 MCDI_SET_DWORD(inbuf, FILTER_OP_IN_RX_CONTEXT,
3371 spec->rss_context !=
3372 EFX_FILTER_RSS_CONTEXT_DEFAULT ?
3373 spec->rss_context : nic_data->rx_rss_context);
3374}
3375
3376static int efx_ef10_filter_push(struct efx_nic *efx,
3377 const struct efx_filter_spec *spec,
3378 u64 *handle, bool replacing)
3379{
3380 MCDI_DECLARE_BUF(inbuf, MC_CMD_FILTER_OP_IN_LEN);
3381 MCDI_DECLARE_BUF(outbuf, MC_CMD_FILTER_OP_OUT_LEN);
3382 int rc;
3383
3384 efx_ef10_filter_push_prep(efx, spec, inbuf, *handle, replacing);
3385 rc = efx_mcdi_rpc(efx, MC_CMD_FILTER_OP, inbuf, sizeof(inbuf),
3386 outbuf, sizeof(outbuf), NULL);
3387 if (rc == 0)
3388 *handle = MCDI_QWORD(outbuf, FILTER_OP_OUT_HANDLE);
065e64c4
BH
3389 if (rc == -ENOSPC)
3390 rc = -EBUSY; /* to match efx_farch_filter_insert() */
8127d661
BH
3391 return rc;
3392}
3393
7ac0dd9d 3394static u32 efx_ef10_filter_mcdi_flags_from_spec(const struct efx_filter_spec *spec)
8127d661 3395{
7ac0dd9d
AR
3396 unsigned int match_flags = spec->match_flags;
3397 u32 mcdi_flags = 0;
3398
3399 if (match_flags & EFX_FILTER_MATCH_LOC_MAC_IG) {
3400 match_flags &= ~EFX_FILTER_MATCH_LOC_MAC_IG;
3401 mcdi_flags |=
3402 is_multicast_ether_addr(spec->loc_mac) ?
3403 (1 << MC_CMD_FILTER_OP_IN_MATCH_UNKNOWN_MCAST_DST_LBN) :
3404 (1 << MC_CMD_FILTER_OP_IN_MATCH_UNKNOWN_UCAST_DST_LBN);
3405 }
3406
3407#define MAP_FILTER_TO_MCDI_FLAG(gen_flag, mcdi_field) { \
3408 unsigned int old_match_flags = match_flags; \
3409 match_flags &= ~EFX_FILTER_MATCH_ ## gen_flag; \
3410 if (match_flags != old_match_flags) \
3411 mcdi_flags |= \
3412 (1 << MC_CMD_FILTER_OP_IN_MATCH_ ## \
3413 mcdi_field ## _LBN); \
3414 }
3415 MAP_FILTER_TO_MCDI_FLAG(REM_HOST, SRC_IP);
3416 MAP_FILTER_TO_MCDI_FLAG(LOC_HOST, DST_IP);
3417 MAP_FILTER_TO_MCDI_FLAG(REM_MAC, SRC_MAC);
3418 MAP_FILTER_TO_MCDI_FLAG(REM_PORT, SRC_PORT);
3419 MAP_FILTER_TO_MCDI_FLAG(LOC_MAC, DST_MAC);
3420 MAP_FILTER_TO_MCDI_FLAG(LOC_PORT, DST_PORT);
3421 MAP_FILTER_TO_MCDI_FLAG(ETHER_TYPE, ETHER_TYPE);
3422 MAP_FILTER_TO_MCDI_FLAG(INNER_VID, INNER_VLAN);
3423 MAP_FILTER_TO_MCDI_FLAG(OUTER_VID, OUTER_VLAN);
3424 MAP_FILTER_TO_MCDI_FLAG(IP_PROTO, IP_PROTO);
3425#undef MAP_FILTER_TO_MCDI_FLAG
3426
3427 /* Did we map them all? */
3428 WARN_ON_ONCE(match_flags);
3429
3430 return mcdi_flags;
3431}
3432
3433static int efx_ef10_filter_pri(struct efx_ef10_filter_table *table,
3434 const struct efx_filter_spec *spec)
3435{
3436 u32 mcdi_flags = efx_ef10_filter_mcdi_flags_from_spec(spec);
8127d661
BH
3437 unsigned int match_pri;
3438
3439 for (match_pri = 0;
3440 match_pri < table->rx_match_count;
3441 match_pri++)
7ac0dd9d 3442 if (table->rx_match_mcdi_flags[match_pri] == mcdi_flags)
8127d661
BH
3443 return match_pri;
3444
3445 return -EPROTONOSUPPORT;
3446}
3447
3448static s32 efx_ef10_filter_insert(struct efx_nic *efx,
3449 struct efx_filter_spec *spec,
3450 bool replace_equal)
3451{
3452 struct efx_ef10_filter_table *table = efx->filter_state;
3453 DECLARE_BITMAP(mc_rem_map, EFX_EF10_FILTER_SEARCH_LIMIT);
3454 struct efx_filter_spec *saved_spec;
3455 unsigned int match_pri, hash;
3456 unsigned int priv_flags;
3457 bool replacing = false;
3458 int ins_index = -1;
3459 DEFINE_WAIT(wait);
3460 bool is_mc_recip;
3461 s32 rc;
3462
3463 /* For now, only support RX filters */
3464 if ((spec->flags & (EFX_FILTER_FLAG_RX | EFX_FILTER_FLAG_TX)) !=
3465 EFX_FILTER_FLAG_RX)
3466 return -EINVAL;
3467
7ac0dd9d 3468 rc = efx_ef10_filter_pri(table, spec);
8127d661
BH
3469 if (rc < 0)
3470 return rc;
3471 match_pri = rc;
3472
3473 hash = efx_ef10_filter_hash(spec);
3474 is_mc_recip = efx_filter_is_mc_recipient(spec);
3475 if (is_mc_recip)
3476 bitmap_zero(mc_rem_map, EFX_EF10_FILTER_SEARCH_LIMIT);
3477
3478 /* Find any existing filters with the same match tuple or
3479 * else a free slot to insert at. If any of them are busy,
3480 * we have to wait and retry.
3481 */
3482 for (;;) {
3483 unsigned int depth = 1;
3484 unsigned int i;
3485
3486 spin_lock_bh(&efx->filter_lock);
3487
3488 for (;;) {
3489 i = (hash + depth) & (HUNT_FILTER_TBL_ROWS - 1);
3490 saved_spec = efx_ef10_filter_entry_spec(table, i);
3491
3492 if (!saved_spec) {
3493 if (ins_index < 0)
3494 ins_index = i;
3495 } else if (efx_ef10_filter_equal(spec, saved_spec)) {
3496 if (table->entry[i].spec &
3497 EFX_EF10_FILTER_FLAG_BUSY)
3498 break;
3499 if (spec->priority < saved_spec->priority &&
7665d1ab 3500 spec->priority != EFX_FILTER_PRI_AUTO) {
8127d661
BH
3501 rc = -EPERM;
3502 goto out_unlock;
3503 }
3504 if (!is_mc_recip) {
3505 /* This is the only one */
3506 if (spec->priority ==
3507 saved_spec->priority &&
3508 !replace_equal) {
3509 rc = -EEXIST;
3510 goto out_unlock;
3511 }
3512 ins_index = i;
3513 goto found;
3514 } else if (spec->priority >
3515 saved_spec->priority ||
3516 (spec->priority ==
3517 saved_spec->priority &&
3518 replace_equal)) {
3519 if (ins_index < 0)
3520 ins_index = i;
3521 else
3522 __set_bit(depth, mc_rem_map);
3523 }
3524 }
3525
3526 /* Once we reach the maximum search depth, use
3527 * the first suitable slot or return -EBUSY if
3528 * there was none
3529 */
3530 if (depth == EFX_EF10_FILTER_SEARCH_LIMIT) {
3531 if (ins_index < 0) {
3532 rc = -EBUSY;
3533 goto out_unlock;
3534 }
3535 goto found;
3536 }
3537
3538 ++depth;
3539 }
3540
3541 prepare_to_wait(&table->waitq, &wait, TASK_UNINTERRUPTIBLE);
3542 spin_unlock_bh(&efx->filter_lock);
3543 schedule();
3544 }
3545
3546found:
3547 /* Create a software table entry if necessary, and mark it
3548 * busy. We might yet fail to insert, but any attempt to
3549 * insert a conflicting filter while we're waiting for the
3550 * firmware must find the busy entry.
3551 */
3552 saved_spec = efx_ef10_filter_entry_spec(table, ins_index);
3553 if (saved_spec) {
7665d1ab
BH
3554 if (spec->priority == EFX_FILTER_PRI_AUTO &&
3555 saved_spec->priority >= EFX_FILTER_PRI_AUTO) {
8127d661 3556 /* Just make sure it won't be removed */
7665d1ab
BH
3557 if (saved_spec->priority > EFX_FILTER_PRI_AUTO)
3558 saved_spec->flags |= EFX_FILTER_FLAG_RX_OVER_AUTO;
8127d661 3559 table->entry[ins_index].spec &=
b59e6ef8 3560 ~EFX_EF10_FILTER_FLAG_AUTO_OLD;
8127d661
BH
3561 rc = ins_index;
3562 goto out_unlock;
3563 }
3564 replacing = true;
3565 priv_flags = efx_ef10_filter_entry_flags(table, ins_index);
3566 } else {
3567 saved_spec = kmalloc(sizeof(*spec), GFP_ATOMIC);
3568 if (!saved_spec) {
3569 rc = -ENOMEM;
3570 goto out_unlock;
3571 }
3572 *saved_spec = *spec;
3573 priv_flags = 0;
3574 }
3575 efx_ef10_filter_set_entry(table, ins_index, saved_spec,
3576 priv_flags | EFX_EF10_FILTER_FLAG_BUSY);
3577
3578 /* Mark lower-priority multicast recipients busy prior to removal */
3579 if (is_mc_recip) {
3580 unsigned int depth, i;
3581
3582 for (depth = 0; depth < EFX_EF10_FILTER_SEARCH_LIMIT; depth++) {
3583 i = (hash + depth) & (HUNT_FILTER_TBL_ROWS - 1);
3584 if (test_bit(depth, mc_rem_map))
3585 table->entry[i].spec |=
3586 EFX_EF10_FILTER_FLAG_BUSY;
3587 }
3588 }
3589
3590 spin_unlock_bh(&efx->filter_lock);
3591
3592 rc = efx_ef10_filter_push(efx, spec, &table->entry[ins_index].handle,
3593 replacing);
3594
3595 /* Finalise the software table entry */
3596 spin_lock_bh(&efx->filter_lock);
3597 if (rc == 0) {
3598 if (replacing) {
3599 /* Update the fields that may differ */
7665d1ab
BH
3600 if (saved_spec->priority == EFX_FILTER_PRI_AUTO)
3601 saved_spec->flags |=
3602 EFX_FILTER_FLAG_RX_OVER_AUTO;
8127d661 3603 saved_spec->priority = spec->priority;
7665d1ab 3604 saved_spec->flags &= EFX_FILTER_FLAG_RX_OVER_AUTO;
8127d661
BH
3605 saved_spec->flags |= spec->flags;
3606 saved_spec->rss_context = spec->rss_context;
3607 saved_spec->dmaq_id = spec->dmaq_id;
3608 }
3609 } else if (!replacing) {
3610 kfree(saved_spec);
3611 saved_spec = NULL;
3612 }
3613 efx_ef10_filter_set_entry(table, ins_index, saved_spec, priv_flags);
3614
3615 /* Remove and finalise entries for lower-priority multicast
3616 * recipients
3617 */
3618 if (is_mc_recip) {
3619 MCDI_DECLARE_BUF(inbuf, MC_CMD_FILTER_OP_IN_LEN);
3620 unsigned int depth, i;
3621
3622 memset(inbuf, 0, sizeof(inbuf));
3623
3624 for (depth = 0; depth < EFX_EF10_FILTER_SEARCH_LIMIT; depth++) {
3625 if (!test_bit(depth, mc_rem_map))
3626 continue;
3627
3628 i = (hash + depth) & (HUNT_FILTER_TBL_ROWS - 1);
3629 saved_spec = efx_ef10_filter_entry_spec(table, i);
3630 priv_flags = efx_ef10_filter_entry_flags(table, i);
3631
3632 if (rc == 0) {
3633 spin_unlock_bh(&efx->filter_lock);
3634 MCDI_SET_DWORD(inbuf, FILTER_OP_IN_OP,
3635 MC_CMD_FILTER_OP_IN_OP_UNSUBSCRIBE);
3636 MCDI_SET_QWORD(inbuf, FILTER_OP_IN_HANDLE,
3637 table->entry[i].handle);
3638 rc = efx_mcdi_rpc(efx, MC_CMD_FILTER_OP,
3639 inbuf, sizeof(inbuf),
3640 NULL, 0, NULL);
3641 spin_lock_bh(&efx->filter_lock);
3642 }
3643
3644 if (rc == 0) {
3645 kfree(saved_spec);
3646 saved_spec = NULL;
3647 priv_flags = 0;
3648 } else {
3649 priv_flags &= ~EFX_EF10_FILTER_FLAG_BUSY;
3650 }
3651 efx_ef10_filter_set_entry(table, i, saved_spec,
3652 priv_flags);
3653 }
3654 }
3655
3656 /* If successful, return the inserted filter ID */
3657 if (rc == 0)
3658 rc = match_pri * HUNT_FILTER_TBL_ROWS + ins_index;
3659
3660 wake_up_all(&table->waitq);
3661out_unlock:
3662 spin_unlock_bh(&efx->filter_lock);
3663 finish_wait(&table->waitq, &wait);
3664 return rc;
3665}
3666
9fd8095d 3667static void efx_ef10_filter_update_rx_scatter(struct efx_nic *efx)
8127d661
BH
3668{
3669 /* no need to do anything here on EF10 */
3670}
3671
3672/* Remove a filter.
b59e6ef8
BH
3673 * If !by_index, remove by ID
3674 * If by_index, remove by index
8127d661
BH
3675 * Filter ID may come from userland and must be range-checked.
3676 */
3677static int efx_ef10_filter_remove_internal(struct efx_nic *efx,
fbd79120 3678 unsigned int priority_mask,
b59e6ef8 3679 u32 filter_id, bool by_index)
8127d661
BH
3680{
3681 unsigned int filter_idx = filter_id % HUNT_FILTER_TBL_ROWS;
3682 struct efx_ef10_filter_table *table = efx->filter_state;
3683 MCDI_DECLARE_BUF(inbuf,
3684 MC_CMD_FILTER_OP_IN_HANDLE_OFST +
3685 MC_CMD_FILTER_OP_IN_HANDLE_LEN);
3686 struct efx_filter_spec *spec;
3687 DEFINE_WAIT(wait);
3688 int rc;
3689
3690 /* Find the software table entry and mark it busy. Don't
3691 * remove it yet; any attempt to update while we're waiting
3692 * for the firmware must find the busy entry.
3693 */
3694 for (;;) {
3695 spin_lock_bh(&efx->filter_lock);
3696 if (!(table->entry[filter_idx].spec &
3697 EFX_EF10_FILTER_FLAG_BUSY))
3698 break;
3699 prepare_to_wait(&table->waitq, &wait, TASK_UNINTERRUPTIBLE);
3700 spin_unlock_bh(&efx->filter_lock);
3701 schedule();
3702 }
7665d1ab 3703
8127d661 3704 spec = efx_ef10_filter_entry_spec(table, filter_idx);
7665d1ab 3705 if (!spec ||
b59e6ef8 3706 (!by_index &&
7ac0dd9d 3707 efx_ef10_filter_pri(table, spec) !=
8127d661
BH
3708 filter_id / HUNT_FILTER_TBL_ROWS)) {
3709 rc = -ENOENT;
3710 goto out_unlock;
3711 }
7665d1ab
BH
3712
3713 if (spec->flags & EFX_FILTER_FLAG_RX_OVER_AUTO &&
fbd79120 3714 priority_mask == (1U << EFX_FILTER_PRI_AUTO)) {
7665d1ab
BH
3715 /* Just remove flags */
3716 spec->flags &= ~EFX_FILTER_FLAG_RX_OVER_AUTO;
b59e6ef8 3717 table->entry[filter_idx].spec &= ~EFX_EF10_FILTER_FLAG_AUTO_OLD;
7665d1ab
BH
3718 rc = 0;
3719 goto out_unlock;
3720 }
3721
fbd79120 3722 if (!(priority_mask & (1U << spec->priority))) {
7665d1ab
BH
3723 rc = -ENOENT;
3724 goto out_unlock;
3725 }
3726
8127d661
BH
3727 table->entry[filter_idx].spec |= EFX_EF10_FILTER_FLAG_BUSY;
3728 spin_unlock_bh(&efx->filter_lock);
3729
7665d1ab 3730 if (spec->flags & EFX_FILTER_FLAG_RX_OVER_AUTO) {
b59e6ef8 3731 /* Reset to an automatic filter */
8127d661
BH
3732
3733 struct efx_filter_spec new_spec = *spec;
3734
7665d1ab 3735 new_spec.priority = EFX_FILTER_PRI_AUTO;
8127d661 3736 new_spec.flags = (EFX_FILTER_FLAG_RX |
f1c2ef40
BK
3737 (efx_rss_enabled(efx) ?
3738 EFX_FILTER_FLAG_RX_RSS : 0));
8127d661
BH
3739 new_spec.dmaq_id = 0;
3740 new_spec.rss_context = EFX_FILTER_RSS_CONTEXT_DEFAULT;
3741 rc = efx_ef10_filter_push(efx, &new_spec,
3742 &table->entry[filter_idx].handle,
3743 true);
3744
3745 spin_lock_bh(&efx->filter_lock);
3746 if (rc == 0)
3747 *spec = new_spec;
3748 } else {
3749 /* Really remove the filter */
3750
3751 MCDI_SET_DWORD(inbuf, FILTER_OP_IN_OP,
3752 efx_ef10_filter_is_exclusive(spec) ?
3753 MC_CMD_FILTER_OP_IN_OP_REMOVE :
3754 MC_CMD_FILTER_OP_IN_OP_UNSUBSCRIBE);
3755 MCDI_SET_QWORD(inbuf, FILTER_OP_IN_HANDLE,
3756 table->entry[filter_idx].handle);
3757 rc = efx_mcdi_rpc(efx, MC_CMD_FILTER_OP,
3758 inbuf, sizeof(inbuf), NULL, 0, NULL);
3759
3760 spin_lock_bh(&efx->filter_lock);
3761 if (rc == 0) {
3762 kfree(spec);
3763 efx_ef10_filter_set_entry(table, filter_idx, NULL, 0);
3764 }
3765 }
7665d1ab 3766
8127d661
BH
3767 table->entry[filter_idx].spec &= ~EFX_EF10_FILTER_FLAG_BUSY;
3768 wake_up_all(&table->waitq);
3769out_unlock:
3770 spin_unlock_bh(&efx->filter_lock);
3771 finish_wait(&table->waitq, &wait);
3772 return rc;
3773}
3774
3775static int efx_ef10_filter_remove_safe(struct efx_nic *efx,
3776 enum efx_filter_priority priority,
3777 u32 filter_id)
3778{
fbd79120
BH
3779 return efx_ef10_filter_remove_internal(efx, 1U << priority,
3780 filter_id, false);
8127d661
BH
3781}
3782
12fb0da4
EC
3783static u32 efx_ef10_filter_get_unsafe_id(struct efx_nic *efx, u32 filter_id)
3784{
3785 return filter_id % HUNT_FILTER_TBL_ROWS;
3786}
3787
8c915620
EC
3788static void efx_ef10_filter_remove_unsafe(struct efx_nic *efx,
3789 enum efx_filter_priority priority,
3790 u32 filter_id)
12fb0da4 3791{
8c915620
EC
3792 if (filter_id == EFX_EF10_FILTER_ID_INVALID)
3793 return;
3794 efx_ef10_filter_remove_internal(efx, 1U << priority, filter_id, true);
12fb0da4
EC
3795}
3796
8127d661
BH
3797static int efx_ef10_filter_get_safe(struct efx_nic *efx,
3798 enum efx_filter_priority priority,
3799 u32 filter_id, struct efx_filter_spec *spec)
3800{
3801 unsigned int filter_idx = filter_id % HUNT_FILTER_TBL_ROWS;
3802 struct efx_ef10_filter_table *table = efx->filter_state;
3803 const struct efx_filter_spec *saved_spec;
3804 int rc;
3805
3806 spin_lock_bh(&efx->filter_lock);
3807 saved_spec = efx_ef10_filter_entry_spec(table, filter_idx);
3808 if (saved_spec && saved_spec->priority == priority &&
7ac0dd9d 3809 efx_ef10_filter_pri(table, saved_spec) ==
8127d661
BH
3810 filter_id / HUNT_FILTER_TBL_ROWS) {
3811 *spec = *saved_spec;
3812 rc = 0;
3813 } else {
3814 rc = -ENOENT;
3815 }
3816 spin_unlock_bh(&efx->filter_lock);
3817 return rc;
3818}
3819
fbd79120 3820static int efx_ef10_filter_clear_rx(struct efx_nic *efx,
8127d661
BH
3821 enum efx_filter_priority priority)
3822{
fbd79120
BH
3823 unsigned int priority_mask;
3824 unsigned int i;
3825 int rc;
3826
3827 priority_mask = (((1U << (priority + 1)) - 1) &
3828 ~(1U << EFX_FILTER_PRI_AUTO));
3829
3830 for (i = 0; i < HUNT_FILTER_TBL_ROWS; i++) {
3831 rc = efx_ef10_filter_remove_internal(efx, priority_mask,
3832 i, true);
3833 if (rc && rc != -ENOENT)
3834 return rc;
3835 }
3836
3837 return 0;
8127d661
BH
3838}
3839
3840static u32 efx_ef10_filter_count_rx_used(struct efx_nic *efx,
3841 enum efx_filter_priority priority)
3842{
3843 struct efx_ef10_filter_table *table = efx->filter_state;
3844 unsigned int filter_idx;
3845 s32 count = 0;
3846
3847 spin_lock_bh(&efx->filter_lock);
3848 for (filter_idx = 0; filter_idx < HUNT_FILTER_TBL_ROWS; filter_idx++) {
3849 if (table->entry[filter_idx].spec &&
3850 efx_ef10_filter_entry_spec(table, filter_idx)->priority ==
3851 priority)
3852 ++count;
3853 }
3854 spin_unlock_bh(&efx->filter_lock);
3855 return count;
3856}
3857
3858static u32 efx_ef10_filter_get_rx_id_limit(struct efx_nic *efx)
3859{
3860 struct efx_ef10_filter_table *table = efx->filter_state;
3861
3862 return table->rx_match_count * HUNT_FILTER_TBL_ROWS;
3863}
3864
3865static s32 efx_ef10_filter_get_rx_ids(struct efx_nic *efx,
3866 enum efx_filter_priority priority,
3867 u32 *buf, u32 size)
3868{
3869 struct efx_ef10_filter_table *table = efx->filter_state;
3870 struct efx_filter_spec *spec;
3871 unsigned int filter_idx;
3872 s32 count = 0;
3873
3874 spin_lock_bh(&efx->filter_lock);
3875 for (filter_idx = 0; filter_idx < HUNT_FILTER_TBL_ROWS; filter_idx++) {
3876 spec = efx_ef10_filter_entry_spec(table, filter_idx);
3877 if (spec && spec->priority == priority) {
3878 if (count == size) {
3879 count = -EMSGSIZE;
3880 break;
3881 }
7ac0dd9d 3882 buf[count++] = (efx_ef10_filter_pri(table, spec) *
8127d661
BH
3883 HUNT_FILTER_TBL_ROWS +
3884 filter_idx);
3885 }
3886 }
3887 spin_unlock_bh(&efx->filter_lock);
3888 return count;
3889}
3890
3891#ifdef CONFIG_RFS_ACCEL
3892
3893static efx_mcdi_async_completer efx_ef10_filter_rfs_insert_complete;
3894
3895static s32 efx_ef10_filter_rfs_insert(struct efx_nic *efx,
3896 struct efx_filter_spec *spec)
3897{
3898 struct efx_ef10_filter_table *table = efx->filter_state;
3899 MCDI_DECLARE_BUF(inbuf, MC_CMD_FILTER_OP_IN_LEN);
3900 struct efx_filter_spec *saved_spec;
3901 unsigned int hash, i, depth = 1;
3902 bool replacing = false;
3903 int ins_index = -1;
3904 u64 cookie;
3905 s32 rc;
3906
3907 /* Must be an RX filter without RSS and not for a multicast
3908 * destination address (RFS only works for connected sockets).
3909 * These restrictions allow us to pass only a tiny amount of
3910 * data through to the completion function.
3911 */
3912 EFX_WARN_ON_PARANOID(spec->flags !=
3913 (EFX_FILTER_FLAG_RX | EFX_FILTER_FLAG_RX_SCATTER));
3914 EFX_WARN_ON_PARANOID(spec->priority != EFX_FILTER_PRI_HINT);
3915 EFX_WARN_ON_PARANOID(efx_filter_is_mc_recipient(spec));
3916
3917 hash = efx_ef10_filter_hash(spec);
3918
3919 spin_lock_bh(&efx->filter_lock);
3920
3921 /* Find any existing filter with the same match tuple or else
3922 * a free slot to insert at. If an existing filter is busy,
3923 * we have to give up.
3924 */
3925 for (;;) {
3926 i = (hash + depth) & (HUNT_FILTER_TBL_ROWS - 1);
3927 saved_spec = efx_ef10_filter_entry_spec(table, i);
3928
3929 if (!saved_spec) {
3930 if (ins_index < 0)
3931 ins_index = i;
3932 } else if (efx_ef10_filter_equal(spec, saved_spec)) {
3933 if (table->entry[i].spec & EFX_EF10_FILTER_FLAG_BUSY) {
3934 rc = -EBUSY;
3935 goto fail_unlock;
3936 }
8127d661
BH
3937 if (spec->priority < saved_spec->priority) {
3938 rc = -EPERM;
3939 goto fail_unlock;
3940 }
3941 ins_index = i;
3942 break;
3943 }
3944
3945 /* Once we reach the maximum search depth, use the
3946 * first suitable slot or return -EBUSY if there was
3947 * none
3948 */
3949 if (depth == EFX_EF10_FILTER_SEARCH_LIMIT) {
3950 if (ins_index < 0) {
3951 rc = -EBUSY;
3952 goto fail_unlock;
3953 }
3954 break;
3955 }
3956
3957 ++depth;
3958 }
3959
3960 /* Create a software table entry if necessary, and mark it
3961 * busy. We might yet fail to insert, but any attempt to
3962 * insert a conflicting filter while we're waiting for the
3963 * firmware must find the busy entry.
3964 */
3965 saved_spec = efx_ef10_filter_entry_spec(table, ins_index);
3966 if (saved_spec) {
3967 replacing = true;
3968 } else {
3969 saved_spec = kmalloc(sizeof(*spec), GFP_ATOMIC);
3970 if (!saved_spec) {
3971 rc = -ENOMEM;
3972 goto fail_unlock;
3973 }
3974 *saved_spec = *spec;
3975 }
3976 efx_ef10_filter_set_entry(table, ins_index, saved_spec,
3977 EFX_EF10_FILTER_FLAG_BUSY);
3978
3979 spin_unlock_bh(&efx->filter_lock);
3980
3981 /* Pack up the variables needed on completion */
3982 cookie = replacing << 31 | ins_index << 16 | spec->dmaq_id;
3983
3984 efx_ef10_filter_push_prep(efx, spec, inbuf,
3985 table->entry[ins_index].handle, replacing);
3986 efx_mcdi_rpc_async(efx, MC_CMD_FILTER_OP, inbuf, sizeof(inbuf),
3987 MC_CMD_FILTER_OP_OUT_LEN,
3988 efx_ef10_filter_rfs_insert_complete, cookie);
3989
3990 return ins_index;
3991
3992fail_unlock:
3993 spin_unlock_bh(&efx->filter_lock);
3994 return rc;
3995}
3996
3997static void
3998efx_ef10_filter_rfs_insert_complete(struct efx_nic *efx, unsigned long cookie,
3999 int rc, efx_dword_t *outbuf,
4000 size_t outlen_actual)
4001{
4002 struct efx_ef10_filter_table *table = efx->filter_state;
4003 unsigned int ins_index, dmaq_id;
4004 struct efx_filter_spec *spec;
4005 bool replacing;
4006
4007 /* Unpack the cookie */
4008 replacing = cookie >> 31;
4009 ins_index = (cookie >> 16) & (HUNT_FILTER_TBL_ROWS - 1);
4010 dmaq_id = cookie & 0xffff;
4011
4012 spin_lock_bh(&efx->filter_lock);
4013 spec = efx_ef10_filter_entry_spec(table, ins_index);
4014 if (rc == 0) {
4015 table->entry[ins_index].handle =
4016 MCDI_QWORD(outbuf, FILTER_OP_OUT_HANDLE);
4017 if (replacing)
4018 spec->dmaq_id = dmaq_id;
4019 } else if (!replacing) {
4020 kfree(spec);
4021 spec = NULL;
4022 }
4023 efx_ef10_filter_set_entry(table, ins_index, spec, 0);
4024 spin_unlock_bh(&efx->filter_lock);
4025
4026 wake_up_all(&table->waitq);
4027}
4028
4029static void
4030efx_ef10_filter_rfs_expire_complete(struct efx_nic *efx,
4031 unsigned long filter_idx,
4032 int rc, efx_dword_t *outbuf,
4033 size_t outlen_actual);
4034
4035static bool efx_ef10_filter_rfs_expire_one(struct efx_nic *efx, u32 flow_id,
4036 unsigned int filter_idx)
4037{
4038 struct efx_ef10_filter_table *table = efx->filter_state;
4039 struct efx_filter_spec *spec =
4040 efx_ef10_filter_entry_spec(table, filter_idx);
4041 MCDI_DECLARE_BUF(inbuf,
4042 MC_CMD_FILTER_OP_IN_HANDLE_OFST +
4043 MC_CMD_FILTER_OP_IN_HANDLE_LEN);
4044
4045 if (!spec ||
4046 (table->entry[filter_idx].spec & EFX_EF10_FILTER_FLAG_BUSY) ||
4047 spec->priority != EFX_FILTER_PRI_HINT ||
4048 !rps_may_expire_flow(efx->net_dev, spec->dmaq_id,
4049 flow_id, filter_idx))
4050 return false;
4051
4052 MCDI_SET_DWORD(inbuf, FILTER_OP_IN_OP,
4053 MC_CMD_FILTER_OP_IN_OP_REMOVE);
4054 MCDI_SET_QWORD(inbuf, FILTER_OP_IN_HANDLE,
4055 table->entry[filter_idx].handle);
4056 if (efx_mcdi_rpc_async(efx, MC_CMD_FILTER_OP, inbuf, sizeof(inbuf), 0,
4057 efx_ef10_filter_rfs_expire_complete, filter_idx))
4058 return false;
4059
4060 table->entry[filter_idx].spec |= EFX_EF10_FILTER_FLAG_BUSY;
4061 return true;
4062}
4063
4064static void
4065efx_ef10_filter_rfs_expire_complete(struct efx_nic *efx,
4066 unsigned long filter_idx,
4067 int rc, efx_dword_t *outbuf,
4068 size_t outlen_actual)
4069{
4070 struct efx_ef10_filter_table *table = efx->filter_state;
4071 struct efx_filter_spec *spec =
4072 efx_ef10_filter_entry_spec(table, filter_idx);
4073
4074 spin_lock_bh(&efx->filter_lock);
4075 if (rc == 0) {
4076 kfree(spec);
4077 efx_ef10_filter_set_entry(table, filter_idx, NULL, 0);
4078 }
4079 table->entry[filter_idx].spec &= ~EFX_EF10_FILTER_FLAG_BUSY;
4080 wake_up_all(&table->waitq);
4081 spin_unlock_bh(&efx->filter_lock);
4082}
4083
4084#endif /* CONFIG_RFS_ACCEL */
4085
4086static int efx_ef10_filter_match_flags_from_mcdi(u32 mcdi_flags)
4087{
4088 int match_flags = 0;
4089
4090#define MAP_FLAG(gen_flag, mcdi_field) { \
4091 u32 old_mcdi_flags = mcdi_flags; \
4092 mcdi_flags &= ~(1 << MC_CMD_FILTER_OP_IN_MATCH_ ## \
4093 mcdi_field ## _LBN); \
4094 if (mcdi_flags != old_mcdi_flags) \
4095 match_flags |= EFX_FILTER_MATCH_ ## gen_flag; \
4096 }
4097 MAP_FLAG(LOC_MAC_IG, UNKNOWN_UCAST_DST);
4098 MAP_FLAG(LOC_MAC_IG, UNKNOWN_MCAST_DST);
4099 MAP_FLAG(REM_HOST, SRC_IP);
4100 MAP_FLAG(LOC_HOST, DST_IP);
4101 MAP_FLAG(REM_MAC, SRC_MAC);
4102 MAP_FLAG(REM_PORT, SRC_PORT);
4103 MAP_FLAG(LOC_MAC, DST_MAC);
4104 MAP_FLAG(LOC_PORT, DST_PORT);
4105 MAP_FLAG(ETHER_TYPE, ETHER_TYPE);
4106 MAP_FLAG(INNER_VID, INNER_VLAN);
4107 MAP_FLAG(OUTER_VID, OUTER_VLAN);
4108 MAP_FLAG(IP_PROTO, IP_PROTO);
4109#undef MAP_FLAG
4110
4111 /* Did we map them all? */
4112 if (mcdi_flags)
4113 return -EINVAL;
4114
4115 return match_flags;
4116}
4117
34813fe2
AR
4118static void efx_ef10_filter_cleanup_vlans(struct efx_nic *efx)
4119{
4120 struct efx_ef10_filter_table *table = efx->filter_state;
4121 struct efx_ef10_filter_vlan *vlan, *next_vlan;
4122
4123 /* See comment in efx_ef10_filter_table_remove() */
4124 if (!efx_rwsem_assert_write_locked(&efx->filter_sem))
4125 return;
4126
4127 if (!table)
4128 return;
4129
4130 list_for_each_entry_safe(vlan, next_vlan, &table->vlan_list, list)
4131 efx_ef10_filter_del_vlan_internal(efx, vlan);
4132}
4133
7ac0dd9d
AR
4134static bool efx_ef10_filter_match_supported(struct efx_ef10_filter_table *table,
4135 enum efx_filter_match_flags match_flags)
4136{
4137 unsigned int match_pri;
4138 int mf;
4139
4140 for (match_pri = 0;
4141 match_pri < table->rx_match_count;
4142 match_pri++) {
4143 mf = efx_ef10_filter_match_flags_from_mcdi(
4144 table->rx_match_mcdi_flags[match_pri]);
4145 if (mf == match_flags)
4146 return true;
4147 }
4148
4149 return false;
4150}
4151
8127d661
BH
4152static int efx_ef10_filter_table_probe(struct efx_nic *efx)
4153{
4154 MCDI_DECLARE_BUF(inbuf, MC_CMD_GET_PARSER_DISP_INFO_IN_LEN);
4155 MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_PARSER_DISP_INFO_OUT_LENMAX);
34813fe2 4156 struct efx_ef10_nic_data *nic_data = efx->nic_data;
e4478ad1 4157 struct net_device *net_dev = efx->net_dev;
8127d661
BH
4158 unsigned int pd_match_pri, pd_match_count;
4159 struct efx_ef10_filter_table *table;
34813fe2 4160 struct efx_ef10_vlan *vlan;
8127d661
BH
4161 size_t outlen;
4162 int rc;
4163
dd98708c
EC
4164 if (!efx_rwsem_assert_write_locked(&efx->filter_sem))
4165 return -EINVAL;
4166
4167 if (efx->filter_state) /* already probed */
4168 return 0;
4169
8127d661
BH
4170 table = kzalloc(sizeof(*table), GFP_KERNEL);
4171 if (!table)
4172 return -ENOMEM;
4173
4174 /* Find out which RX filter types are supported, and their priorities */
4175 MCDI_SET_DWORD(inbuf, GET_PARSER_DISP_INFO_IN_OP,
4176 MC_CMD_GET_PARSER_DISP_INFO_IN_OP_GET_SUPPORTED_RX_MATCHES);
4177 rc = efx_mcdi_rpc(efx, MC_CMD_GET_PARSER_DISP_INFO,
4178 inbuf, sizeof(inbuf), outbuf, sizeof(outbuf),
4179 &outlen);
4180 if (rc)
4181 goto fail;
4182 pd_match_count = MCDI_VAR_ARRAY_LEN(
4183 outlen, GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES);
4184 table->rx_match_count = 0;
4185
4186 for (pd_match_pri = 0; pd_match_pri < pd_match_count; pd_match_pri++) {
4187 u32 mcdi_flags =
4188 MCDI_ARRAY_DWORD(
4189 outbuf,
4190 GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES,
4191 pd_match_pri);
4192 rc = efx_ef10_filter_match_flags_from_mcdi(mcdi_flags);
4193 if (rc < 0) {
4194 netif_dbg(efx, probe, efx->net_dev,
4195 "%s: fw flags %#x pri %u not supported in driver\n",
4196 __func__, mcdi_flags, pd_match_pri);
4197 } else {
4198 netif_dbg(efx, probe, efx->net_dev,
4199 "%s: fw flags %#x pri %u supported as driver flags %#x pri %u\n",
4200 __func__, mcdi_flags, pd_match_pri,
4201 rc, table->rx_match_count);
7ac0dd9d
AR
4202 table->rx_match_mcdi_flags[table->rx_match_count] = mcdi_flags;
4203 table->rx_match_count++;
8127d661
BH
4204 }
4205 }
4206
e4478ad1
MH
4207 if ((efx_supported_features(efx) & NETIF_F_HW_VLAN_CTAG_FILTER) &&
4208 !(efx_ef10_filter_match_supported(table,
4209 (EFX_FILTER_MATCH_OUTER_VID | EFX_FILTER_MATCH_LOC_MAC)) &&
4210 efx_ef10_filter_match_supported(table,
4211 (EFX_FILTER_MATCH_OUTER_VID | EFX_FILTER_MATCH_LOC_MAC_IG)))) {
4212 netif_info(efx, probe, net_dev,
4213 "VLAN filters are not supported in this firmware variant\n");
4214 net_dev->features &= ~NETIF_F_HW_VLAN_CTAG_FILTER;
4215 efx->fixed_features &= ~NETIF_F_HW_VLAN_CTAG_FILTER;
4216 net_dev->hw_features &= ~NETIF_F_HW_VLAN_CTAG_FILTER;
4217 }
4218
8127d661
BH
4219 table->entry = vzalloc(HUNT_FILTER_TBL_ROWS * sizeof(*table->entry));
4220 if (!table->entry) {
4221 rc = -ENOMEM;
4222 goto fail;
4223 }
4224
b071c3a2 4225 table->mc_promisc_last = false;
4a53ea8a
AR
4226 table->vlan_filter =
4227 !!(efx->net_dev->features & NETIF_F_HW_VLAN_CTAG_FILTER);
34813fe2 4228 INIT_LIST_HEAD(&table->vlan_list);
12fb0da4 4229
8127d661
BH
4230 efx->filter_state = table;
4231 init_waitqueue_head(&table->waitq);
34813fe2
AR
4232
4233 list_for_each_entry(vlan, &nic_data->vlan_list, list) {
4234 rc = efx_ef10_filter_add_vlan(efx, vlan->vid);
4235 if (rc)
4236 goto fail_add_vlan;
4237 }
4238
8127d661
BH
4239 return 0;
4240
34813fe2
AR
4241fail_add_vlan:
4242 efx_ef10_filter_cleanup_vlans(efx);
4243 efx->filter_state = NULL;
8127d661
BH
4244fail:
4245 kfree(table);
4246 return rc;
4247}
4248
0d322413
EC
4249/* Caller must hold efx->filter_sem for read if race against
4250 * efx_ef10_filter_table_remove() is possible
4251 */
8127d661
BH
4252static void efx_ef10_filter_table_restore(struct efx_nic *efx)
4253{
4254 struct efx_ef10_filter_table *table = efx->filter_state;
4255 struct efx_ef10_nic_data *nic_data = efx->nic_data;
4256 struct efx_filter_spec *spec;
4257 unsigned int filter_idx;
4258 bool failed = false;
4259 int rc;
4260
0d322413
EC
4261 WARN_ON(!rwsem_is_locked(&efx->filter_sem));
4262
8127d661
BH
4263 if (!nic_data->must_restore_filters)
4264 return;
4265
0d322413
EC
4266 if (!table)
4267 return;
4268
8127d661
BH
4269 spin_lock_bh(&efx->filter_lock);
4270
4271 for (filter_idx = 0; filter_idx < HUNT_FILTER_TBL_ROWS; filter_idx++) {
4272 spec = efx_ef10_filter_entry_spec(table, filter_idx);
4273 if (!spec)
4274 continue;
4275
4276 table->entry[filter_idx].spec |= EFX_EF10_FILTER_FLAG_BUSY;
4277 spin_unlock_bh(&efx->filter_lock);
4278
4279 rc = efx_ef10_filter_push(efx, spec,
4280 &table->entry[filter_idx].handle,
4281 false);
4282 if (rc)
4283 failed = true;
4284
4285 spin_lock_bh(&efx->filter_lock);
4286 if (rc) {
4287 kfree(spec);
4288 efx_ef10_filter_set_entry(table, filter_idx, NULL, 0);
4289 } else {
4290 table->entry[filter_idx].spec &=
4291 ~EFX_EF10_FILTER_FLAG_BUSY;
4292 }
4293 }
4294
4295 spin_unlock_bh(&efx->filter_lock);
4296
4297 if (failed)
4298 netif_err(efx, hw, efx->net_dev,
4299 "unable to restore all filters\n");
4300 else
4301 nic_data->must_restore_filters = false;
4302}
4303
4304static void efx_ef10_filter_table_remove(struct efx_nic *efx)
4305{
4306 struct efx_ef10_filter_table *table = efx->filter_state;
4307 MCDI_DECLARE_BUF(inbuf, MC_CMD_FILTER_OP_IN_LEN);
4308 struct efx_filter_spec *spec;
4309 unsigned int filter_idx;
4310 int rc;
4311
34813fe2 4312 efx_ef10_filter_cleanup_vlans(efx);
0d322413 4313 efx->filter_state = NULL;
dd98708c
EC
4314 /* If we were called without locking, then it's not safe to free
4315 * the table as others might be using it. So we just WARN, leak
4316 * the memory, and potentially get an inconsistent filter table
4317 * state.
4318 * This should never actually happen.
4319 */
4320 if (!efx_rwsem_assert_write_locked(&efx->filter_sem))
4321 return;
4322
0d322413
EC
4323 if (!table)
4324 return;
4325
8127d661
BH
4326 for (filter_idx = 0; filter_idx < HUNT_FILTER_TBL_ROWS; filter_idx++) {
4327 spec = efx_ef10_filter_entry_spec(table, filter_idx);
4328 if (!spec)
4329 continue;
4330
4331 MCDI_SET_DWORD(inbuf, FILTER_OP_IN_OP,
4332 efx_ef10_filter_is_exclusive(spec) ?
4333 MC_CMD_FILTER_OP_IN_OP_REMOVE :
4334 MC_CMD_FILTER_OP_IN_OP_UNSUBSCRIBE);
4335 MCDI_SET_QWORD(inbuf, FILTER_OP_IN_HANDLE,
4336 table->entry[filter_idx].handle);
e65a5109
BK
4337 rc = efx_mcdi_rpc_quiet(efx, MC_CMD_FILTER_OP, inbuf,
4338 sizeof(inbuf), NULL, 0, NULL);
48ce5634 4339 if (rc)
e65a5109
BK
4340 netif_info(efx, drv, efx->net_dev,
4341 "%s: filter %04x remove failed\n",
4342 __func__, filter_idx);
8127d661
BH
4343 kfree(spec);
4344 }
4345
4346 vfree(table->entry);
4347 kfree(table);
4348}
4349
6a37958b
AR
4350static void efx_ef10_filter_mark_one_old(struct efx_nic *efx, uint16_t *id)
4351{
4352 struct efx_ef10_filter_table *table = efx->filter_state;
4353 unsigned int filter_idx;
4354
4355 if (*id != EFX_EF10_FILTER_ID_INVALID) {
4356 filter_idx = efx_ef10_filter_get_unsafe_id(efx, *id);
4357 if (!table->entry[filter_idx].spec)
4358 netif_dbg(efx, drv, efx->net_dev,
4359 "marked null spec old %04x:%04x\n", *id,
4360 filter_idx);
4361 table->entry[filter_idx].spec |= EFX_EF10_FILTER_FLAG_AUTO_OLD;
4362 *id = EFX_EF10_FILTER_ID_INVALID;
e65a5109 4363 }
6a37958b
AR
4364}
4365
b3a3c03c
AR
4366/* Mark old per-VLAN filters that may need to be removed */
4367static void _efx_ef10_filter_vlan_mark_old(struct efx_nic *efx,
4368 struct efx_ef10_filter_vlan *vlan)
8127d661
BH
4369{
4370 struct efx_ef10_filter_table *table = efx->filter_state;
6a37958b 4371 unsigned int i;
8127d661 4372
12fb0da4 4373 for (i = 0; i < table->dev_uc_count; i++)
dc3273e0 4374 efx_ef10_filter_mark_one_old(efx, &vlan->uc[i]);
12fb0da4 4375 for (i = 0; i < table->dev_mc_count; i++)
dc3273e0
AR
4376 efx_ef10_filter_mark_one_old(efx, &vlan->mc[i]);
4377 efx_ef10_filter_mark_one_old(efx, &vlan->ucdef);
4378 efx_ef10_filter_mark_one_old(efx, &vlan->bcast);
4379 efx_ef10_filter_mark_one_old(efx, &vlan->mcdef);
b3a3c03c
AR
4380}
4381
34813fe2
AR
4382/* Mark old filters that may need to be removed.
4383 * Caller must hold efx->filter_sem for read if race against
4384 * efx_ef10_filter_table_remove() is possible
4385 */
b3a3c03c
AR
4386static void efx_ef10_filter_mark_old(struct efx_nic *efx)
4387{
4388 struct efx_ef10_filter_table *table = efx->filter_state;
34813fe2 4389 struct efx_ef10_filter_vlan *vlan;
b3a3c03c
AR
4390
4391 spin_lock_bh(&efx->filter_lock);
34813fe2
AR
4392 list_for_each_entry(vlan, &table->vlan_list, list)
4393 _efx_ef10_filter_vlan_mark_old(efx, vlan);
8127d661 4394 spin_unlock_bh(&efx->filter_lock);
822b96f8
DP
4395}
4396
afa4ce12 4397static void efx_ef10_filter_uc_addr_list(struct efx_nic *efx)
822b96f8
DP
4398{
4399 struct efx_ef10_filter_table *table = efx->filter_state;
4400 struct net_device *net_dev = efx->net_dev;
4401 struct netdev_hw_addr *uc;
12fb0da4 4402 int addr_count;
822b96f8 4403 unsigned int i;
8127d661 4404
12fb0da4 4405 addr_count = netdev_uc_count(net_dev);
afa4ce12 4406 table->uc_promisc = !!(net_dev->flags & IFF_PROMISC);
12fb0da4 4407 table->dev_uc_count = 1 + addr_count;
822b96f8
DP
4408 ether_addr_copy(table->dev_uc_list[0].addr, net_dev->dev_addr);
4409 i = 1;
4410 netdev_for_each_uc_addr(uc, net_dev) {
12fb0da4 4411 if (i >= EFX_EF10_FILTER_DEV_UC_MAX) {
afa4ce12 4412 table->uc_promisc = true;
12fb0da4
EC
4413 break;
4414 }
822b96f8
DP
4415 ether_addr_copy(table->dev_uc_list[i].addr, uc->addr);
4416 i++;
4417 }
4418}
4419
afa4ce12 4420static void efx_ef10_filter_mc_addr_list(struct efx_nic *efx)
822b96f8
DP
4421{
4422 struct efx_ef10_filter_table *table = efx->filter_state;
4423 struct net_device *net_dev = efx->net_dev;
4424 struct netdev_hw_addr *mc;
ab8b1f7c 4425 unsigned int i, addr_count;
822b96f8 4426
afa4ce12 4427 table->mc_promisc = !!(net_dev->flags & (IFF_PROMISC | IFF_ALLMULTI));
ab8b1f7c 4428
12fb0da4
EC
4429 addr_count = netdev_mc_count(net_dev);
4430 i = 0;
ab8b1f7c 4431 netdev_for_each_mc_addr(mc, net_dev) {
12fb0da4 4432 if (i >= EFX_EF10_FILTER_DEV_MC_MAX) {
afa4ce12 4433 table->mc_promisc = true;
12fb0da4
EC
4434 break;
4435 }
ab8b1f7c
DP
4436 ether_addr_copy(table->dev_mc_list[i].addr, mc->addr);
4437 i++;
8127d661 4438 }
12fb0da4
EC
4439
4440 table->dev_mc_count = i;
822b96f8 4441}
8127d661 4442
12fb0da4 4443static int efx_ef10_filter_insert_addr_list(struct efx_nic *efx,
b3a3c03c
AR
4444 struct efx_ef10_filter_vlan *vlan,
4445 bool multicast, bool rollback)
822b96f8
DP
4446{
4447 struct efx_ef10_filter_table *table = efx->filter_state;
4448 struct efx_ef10_dev_addr *addr_list;
f1c2ef40 4449 enum efx_filter_flags filter_flags;
822b96f8 4450 struct efx_filter_spec spec;
12fb0da4
EC
4451 u8 baddr[ETH_ALEN];
4452 unsigned int i, j;
4453 int addr_count;
dc3273e0 4454 u16 *ids;
822b96f8
DP
4455 int rc;
4456
4457 if (multicast) {
4458 addr_list = table->dev_mc_list;
12fb0da4 4459 addr_count = table->dev_mc_count;
dc3273e0 4460 ids = vlan->mc;
822b96f8
DP
4461 } else {
4462 addr_list = table->dev_uc_list;
12fb0da4 4463 addr_count = table->dev_uc_count;
dc3273e0 4464 ids = vlan->uc;
8127d661
BH
4465 }
4466
f1c2ef40
BK
4467 filter_flags = efx_rss_enabled(efx) ? EFX_FILTER_FLAG_RX_RSS : 0;
4468
822b96f8 4469 /* Insert/renew filters */
12fb0da4 4470 for (i = 0; i < addr_count; i++) {
f1c2ef40 4471 efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO, filter_flags, 0);
b3a3c03c 4472 efx_filter_set_eth_local(&spec, vlan->vid, addr_list[i].addr);
b6f568e2
JC
4473 rc = efx_ef10_filter_insert(efx, &spec, true);
4474 if (rc < 0) {
12fb0da4
EC
4475 if (rollback) {
4476 netif_info(efx, drv, efx->net_dev,
4477 "efx_ef10_filter_insert failed rc=%d\n",
4478 rc);
4479 /* Fall back to promiscuous */
4480 for (j = 0; j < i; j++) {
12fb0da4
EC
4481 efx_ef10_filter_remove_unsafe(
4482 efx, EFX_FILTER_PRI_AUTO,
dc3273e0
AR
4483 ids[j]);
4484 ids[j] = EFX_EF10_FILTER_ID_INVALID;
12fb0da4
EC
4485 }
4486 return rc;
4487 } else {
4488 /* mark as not inserted, and carry on */
4489 rc = EFX_EF10_FILTER_ID_INVALID;
822b96f8 4490 }
8127d661 4491 }
dc3273e0 4492 ids[i] = efx_ef10_filter_get_unsafe_id(efx, rc);
8127d661 4493 }
822b96f8 4494
12fb0da4
EC
4495 if (multicast && rollback) {
4496 /* Also need an Ethernet broadcast filter */
f1c2ef40 4497 efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO, filter_flags, 0);
12fb0da4 4498 eth_broadcast_addr(baddr);
b3a3c03c 4499 efx_filter_set_eth_local(&spec, vlan->vid, baddr);
8127d661 4500 rc = efx_ef10_filter_insert(efx, &spec, true);
12fb0da4 4501 if (rc < 0) {
822b96f8 4502 netif_warn(efx, drv, efx->net_dev,
12fb0da4
EC
4503 "Broadcast filter insert failed rc=%d\n", rc);
4504 /* Fall back to promiscuous */
4505 for (j = 0; j < i; j++) {
12fb0da4
EC
4506 efx_ef10_filter_remove_unsafe(
4507 efx, EFX_FILTER_PRI_AUTO,
dc3273e0
AR
4508 ids[j]);
4509 ids[j] = EFX_EF10_FILTER_ID_INVALID;
12fb0da4
EC
4510 }
4511 return rc;
4512 } else {
dc3273e0 4513 EFX_WARN_ON_PARANOID(vlan->bcast !=
6a37958b 4514 EFX_EF10_FILTER_ID_INVALID);
dc3273e0 4515 vlan->bcast = efx_ef10_filter_get_unsafe_id(efx, rc);
12fb0da4 4516 }
8127d661 4517 }
12fb0da4
EC
4518
4519 return 0;
4520}
4521
b3a3c03c
AR
4522static int efx_ef10_filter_insert_def(struct efx_nic *efx,
4523 struct efx_ef10_filter_vlan *vlan,
4524 bool multicast, bool rollback)
12fb0da4 4525{
12fb0da4 4526 struct efx_ef10_nic_data *nic_data = efx->nic_data;
f1c2ef40 4527 enum efx_filter_flags filter_flags;
12fb0da4
EC
4528 struct efx_filter_spec spec;
4529 u8 baddr[ETH_ALEN];
4530 int rc;
4531
f1c2ef40
BK
4532 filter_flags = efx_rss_enabled(efx) ? EFX_FILTER_FLAG_RX_RSS : 0;
4533
4534 efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO, filter_flags, 0);
12fb0da4
EC
4535
4536 if (multicast)
4537 efx_filter_set_mc_def(&spec);
4538 else
4539 efx_filter_set_uc_def(&spec);
4540
b3a3c03c
AR
4541 if (vlan->vid != EFX_FILTER_VID_UNSPEC)
4542 efx_filter_set_eth_local(&spec, vlan->vid, NULL);
4543
12fb0da4
EC
4544 rc = efx_ef10_filter_insert(efx, &spec, true);
4545 if (rc < 0) {
09a04204
BK
4546 netif_printk(efx, drv, rc == -EPERM ? KERN_DEBUG : KERN_WARNING,
4547 efx->net_dev,
4548 "%scast mismatch filter insert failed rc=%d\n",
4549 multicast ? "Multi" : "Uni", rc);
12fb0da4 4550 } else if (multicast) {
dc3273e0
AR
4551 EFX_WARN_ON_PARANOID(vlan->mcdef != EFX_EF10_FILTER_ID_INVALID);
4552 vlan->mcdef = efx_ef10_filter_get_unsafe_id(efx, rc);
12fb0da4
EC
4553 if (!nic_data->workaround_26807) {
4554 /* Also need an Ethernet broadcast filter */
4555 efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO,
f1c2ef40 4556 filter_flags, 0);
12fb0da4 4557 eth_broadcast_addr(baddr);
b3a3c03c 4558 efx_filter_set_eth_local(&spec, vlan->vid, baddr);
12fb0da4
EC
4559 rc = efx_ef10_filter_insert(efx, &spec, true);
4560 if (rc < 0) {
4561 netif_warn(efx, drv, efx->net_dev,
4562 "Broadcast filter insert failed rc=%d\n",
4563 rc);
4564 if (rollback) {
4565 /* Roll back the mc_def filter */
4566 efx_ef10_filter_remove_unsafe(
4567 efx, EFX_FILTER_PRI_AUTO,
dc3273e0
AR
4568 vlan->mcdef);
4569 vlan->mcdef = EFX_EF10_FILTER_ID_INVALID;
12fb0da4
EC
4570 return rc;
4571 }
4572 } else {
dc3273e0 4573 EFX_WARN_ON_PARANOID(vlan->bcast !=
6a37958b 4574 EFX_EF10_FILTER_ID_INVALID);
dc3273e0 4575 vlan->bcast = efx_ef10_filter_get_unsafe_id(efx, rc);
12fb0da4
EC
4576 }
4577 }
4578 rc = 0;
4579 } else {
dc3273e0
AR
4580 EFX_WARN_ON_PARANOID(vlan->ucdef != EFX_EF10_FILTER_ID_INVALID);
4581 vlan->ucdef = rc;
12fb0da4
EC
4582 rc = 0;
4583 }
4584 return rc;
822b96f8
DP
4585}
4586
4587/* Remove filters that weren't renewed. Since nothing else changes the AUTO_OLD
4588 * flag or removes these filters, we don't need to hold the filter_lock while
4589 * scanning for these filters.
4590 */
4591static void efx_ef10_filter_remove_old(struct efx_nic *efx)
4592{
4593 struct efx_ef10_filter_table *table = efx->filter_state;
e65a5109
BK
4594 int remove_failed = 0;
4595 int remove_noent = 0;
4596 int rc;
822b96f8 4597 int i;
8127d661 4598
8127d661
BH
4599 for (i = 0; i < HUNT_FILTER_TBL_ROWS; i++) {
4600 if (ACCESS_ONCE(table->entry[i].spec) &
b59e6ef8 4601 EFX_EF10_FILTER_FLAG_AUTO_OLD) {
e65a5109
BK
4602 rc = efx_ef10_filter_remove_internal(efx,
4603 1U << EFX_FILTER_PRI_AUTO, i, true);
4604 if (rc == -ENOENT)
4605 remove_noent++;
4606 else if (rc)
4607 remove_failed++;
8127d661
BH
4608 }
4609 }
e65a5109
BK
4610
4611 if (remove_failed)
4612 netif_info(efx, drv, efx->net_dev,
4613 "%s: failed to remove %d filters\n",
4614 __func__, remove_failed);
4615 if (remove_noent)
4616 netif_info(efx, drv, efx->net_dev,
4617 "%s: failed to remove %d non-existent filters\n",
4618 __func__, remove_noent);
8127d661
BH
4619}
4620
7a186f47
DP
4621static int efx_ef10_vport_set_mac_address(struct efx_nic *efx)
4622{
4623 struct efx_ef10_nic_data *nic_data = efx->nic_data;
4624 u8 mac_old[ETH_ALEN];
4625 int rc, rc2;
4626
4627 /* Only reconfigure a PF-created vport */
4628 if (is_zero_ether_addr(nic_data->vport_mac))
4629 return 0;
4630
4631 efx_device_detach_sync(efx);
4632 efx_net_stop(efx->net_dev);
4633 down_write(&efx->filter_sem);
4634 efx_ef10_filter_table_remove(efx);
4635 up_write(&efx->filter_sem);
4636
4637 rc = efx_ef10_vadaptor_free(efx, nic_data->vport_id);
4638 if (rc)
4639 goto restore_filters;
4640
4641 ether_addr_copy(mac_old, nic_data->vport_mac);
4642 rc = efx_ef10_vport_del_mac(efx, nic_data->vport_id,
4643 nic_data->vport_mac);
4644 if (rc)
4645 goto restore_vadaptor;
4646
4647 rc = efx_ef10_vport_add_mac(efx, nic_data->vport_id,
4648 efx->net_dev->dev_addr);
4649 if (!rc) {
4650 ether_addr_copy(nic_data->vport_mac, efx->net_dev->dev_addr);
4651 } else {
4652 rc2 = efx_ef10_vport_add_mac(efx, nic_data->vport_id, mac_old);
4653 if (rc2) {
4654 /* Failed to add original MAC, so clear vport_mac */
4655 eth_zero_addr(nic_data->vport_mac);
4656 goto reset_nic;
4657 }
4658 }
4659
4660restore_vadaptor:
4661 rc2 = efx_ef10_vadaptor_alloc(efx, nic_data->vport_id);
4662 if (rc2)
4663 goto reset_nic;
4664restore_filters:
4665 down_write(&efx->filter_sem);
4666 rc2 = efx_ef10_filter_table_probe(efx);
4667 up_write(&efx->filter_sem);
4668 if (rc2)
4669 goto reset_nic;
4670
4671 rc2 = efx_net_open(efx->net_dev);
4672 if (rc2)
4673 goto reset_nic;
4674
4675 netif_device_attach(efx->net_dev);
4676
4677 return rc;
4678
4679reset_nic:
4680 netif_err(efx, drv, efx->net_dev,
4681 "Failed to restore when changing MAC address - scheduling reset\n");
4682 efx_schedule_reset(efx, RESET_TYPE_DATAPATH);
4683
4684 return rc ? rc : rc2;
4685}
4686
822b96f8
DP
4687/* Caller must hold efx->filter_sem for read if race against
4688 * efx_ef10_filter_table_remove() is possible
4689 */
34813fe2
AR
4690static void efx_ef10_filter_vlan_sync_rx_mode(struct efx_nic *efx,
4691 struct efx_ef10_filter_vlan *vlan)
822b96f8
DP
4692{
4693 struct efx_ef10_filter_table *table = efx->filter_state;
ab8b1f7c 4694 struct efx_ef10_nic_data *nic_data = efx->nic_data;
b3a3c03c 4695
4a53ea8a
AR
4696 /* Do not install unspecified VID if VLAN filtering is enabled.
4697 * Do not install all specified VIDs if VLAN filtering is disabled.
4698 */
4699 if ((vlan->vid == EFX_FILTER_VID_UNSPEC) == table->vlan_filter)
4700 return;
4701
12fb0da4 4702 /* Insert/renew unicast filters */
afa4ce12 4703 if (table->uc_promisc) {
b3a3c03c
AR
4704 efx_ef10_filter_insert_def(efx, vlan, false, false);
4705 efx_ef10_filter_insert_addr_list(efx, vlan, false, false);
12fb0da4
EC
4706 } else {
4707 /* If any of the filters failed to insert, fall back to
4708 * promiscuous mode - add in the uc_def filter. But keep
4709 * our individual unicast filters.
4710 */
b3a3c03c
AR
4711 if (efx_ef10_filter_insert_addr_list(efx, vlan, false, false))
4712 efx_ef10_filter_insert_def(efx, vlan, false, false);
12fb0da4 4713 }
ab8b1f7c 4714
12fb0da4 4715 /* Insert/renew multicast filters */
ab8b1f7c
DP
4716 /* If changing promiscuous state with cascaded multicast filters, remove
4717 * old filters first, so that packets are dropped rather than duplicated
4718 */
afa4ce12
AR
4719 if (nic_data->workaround_26807 &&
4720 table->mc_promisc_last != table->mc_promisc)
ab8b1f7c 4721 efx_ef10_filter_remove_old(efx);
afa4ce12 4722 if (table->mc_promisc) {
12fb0da4
EC
4723 if (nic_data->workaround_26807) {
4724 /* If we failed to insert promiscuous filters, rollback
4725 * and fall back to individual multicast filters
4726 */
b3a3c03c 4727 if (efx_ef10_filter_insert_def(efx, vlan, true, true)) {
12fb0da4
EC
4728 /* Changing promisc state, so remove old filters */
4729 efx_ef10_filter_remove_old(efx);
b3a3c03c
AR
4730 efx_ef10_filter_insert_addr_list(efx, vlan,
4731 true, false);
12fb0da4
EC
4732 }
4733 } else {
4734 /* If we failed to insert promiscuous filters, don't
4735 * rollback. Regardless, also insert the mc_list
4736 */
b3a3c03c
AR
4737 efx_ef10_filter_insert_def(efx, vlan, true, false);
4738 efx_ef10_filter_insert_addr_list(efx, vlan, true, false);
12fb0da4
EC
4739 }
4740 } else {
4741 /* If any filters failed to insert, rollback and fall back to
4742 * promiscuous mode - mc_def filter and maybe broadcast. If
4743 * that fails, roll back again and insert as many of our
4744 * individual multicast filters as we can.
4745 */
b3a3c03c 4746 if (efx_ef10_filter_insert_addr_list(efx, vlan, true, true)) {
12fb0da4
EC
4747 /* Changing promisc state, so remove old filters */
4748 if (nic_data->workaround_26807)
4749 efx_ef10_filter_remove_old(efx);
b3a3c03c
AR
4750 if (efx_ef10_filter_insert_def(efx, vlan, true, true))
4751 efx_ef10_filter_insert_addr_list(efx, vlan,
4752 true, false);
12fb0da4
EC
4753 }
4754 }
34813fe2
AR
4755}
4756
4757/* Caller must hold efx->filter_sem for read if race against
4758 * efx_ef10_filter_table_remove() is possible
4759 */
4760static void efx_ef10_filter_sync_rx_mode(struct efx_nic *efx)
4761{
4762 struct efx_ef10_filter_table *table = efx->filter_state;
4763 struct net_device *net_dev = efx->net_dev;
4764 struct efx_ef10_filter_vlan *vlan;
4a53ea8a 4765 bool vlan_filter;
34813fe2
AR
4766
4767 if (!efx_dev_registered(efx))
4768 return;
4769
4770 if (!table)
4771 return;
4772
4773 efx_ef10_filter_mark_old(efx);
4774
4775 /* Copy/convert the address lists; add the primary station
4776 * address and broadcast address
4777 */
4778 netif_addr_lock_bh(net_dev);
4779 efx_ef10_filter_uc_addr_list(efx);
4780 efx_ef10_filter_mc_addr_list(efx);
4781 netif_addr_unlock_bh(net_dev);
4782
4a53ea8a
AR
4783 /* If VLAN filtering changes, all old filters are finally removed.
4784 * Do it in advance to avoid conflicts for unicast untagged and
4785 * VLAN 0 tagged filters.
4786 */
4787 vlan_filter = !!(net_dev->features & NETIF_F_HW_VLAN_CTAG_FILTER);
4788 if (table->vlan_filter != vlan_filter) {
4789 table->vlan_filter = vlan_filter;
4790 efx_ef10_filter_remove_old(efx);
4791 }
4792
34813fe2
AR
4793 list_for_each_entry(vlan, &table->vlan_list, list)
4794 efx_ef10_filter_vlan_sync_rx_mode(efx, vlan);
822b96f8
DP
4795
4796 efx_ef10_filter_remove_old(efx);
afa4ce12 4797 table->mc_promisc_last = table->mc_promisc;
822b96f8
DP
4798}
4799
34813fe2
AR
4800static struct efx_ef10_filter_vlan *efx_ef10_filter_find_vlan(struct efx_nic *efx, u16 vid)
4801{
4802 struct efx_ef10_filter_table *table = efx->filter_state;
4803 struct efx_ef10_filter_vlan *vlan;
4804
4805 WARN_ON(!rwsem_is_locked(&efx->filter_sem));
4806
4807 list_for_each_entry(vlan, &table->vlan_list, list) {
4808 if (vlan->vid == vid)
4809 return vlan;
4810 }
4811
4812 return NULL;
4813}
4814
4815static int efx_ef10_filter_add_vlan(struct efx_nic *efx, u16 vid)
4816{
4817 struct efx_ef10_filter_table *table = efx->filter_state;
4818 struct efx_ef10_filter_vlan *vlan;
4819 unsigned int i;
4820
4821 if (!efx_rwsem_assert_write_locked(&efx->filter_sem))
4822 return -EINVAL;
4823
4824 vlan = efx_ef10_filter_find_vlan(efx, vid);
4825 if (WARN_ON(vlan)) {
4826 netif_err(efx, drv, efx->net_dev,
4827 "VLAN %u already added\n", vid);
4828 return -EALREADY;
4829 }
4830
4831 vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
4832 if (!vlan)
4833 return -ENOMEM;
4834
4835 vlan->vid = vid;
4836
4837 for (i = 0; i < ARRAY_SIZE(vlan->uc); i++)
4838 vlan->uc[i] = EFX_EF10_FILTER_ID_INVALID;
4839 for (i = 0; i < ARRAY_SIZE(vlan->mc); i++)
4840 vlan->mc[i] = EFX_EF10_FILTER_ID_INVALID;
4841 vlan->ucdef = EFX_EF10_FILTER_ID_INVALID;
4842 vlan->bcast = EFX_EF10_FILTER_ID_INVALID;
4843 vlan->mcdef = EFX_EF10_FILTER_ID_INVALID;
4844
4845 list_add_tail(&vlan->list, &table->vlan_list);
4846
4847 if (efx_dev_registered(efx))
4848 efx_ef10_filter_vlan_sync_rx_mode(efx, vlan);
4849
4850 return 0;
4851}
4852
4853static void efx_ef10_filter_del_vlan_internal(struct efx_nic *efx,
4854 struct efx_ef10_filter_vlan *vlan)
4855{
4856 unsigned int i;
4857
4858 /* See comment in efx_ef10_filter_table_remove() */
4859 if (!efx_rwsem_assert_write_locked(&efx->filter_sem))
4860 return;
4861
4862 list_del(&vlan->list);
4863
8c915620 4864 for (i = 0; i < ARRAY_SIZE(vlan->uc); i++)
34813fe2 4865 efx_ef10_filter_remove_unsafe(efx, EFX_FILTER_PRI_AUTO,
8c915620
EC
4866 vlan->uc[i]);
4867 for (i = 0; i < ARRAY_SIZE(vlan->mc); i++)
34813fe2 4868 efx_ef10_filter_remove_unsafe(efx, EFX_FILTER_PRI_AUTO,
8c915620
EC
4869 vlan->mc[i]);
4870 efx_ef10_filter_remove_unsafe(efx, EFX_FILTER_PRI_AUTO, vlan->ucdef);
4871 efx_ef10_filter_remove_unsafe(efx, EFX_FILTER_PRI_AUTO, vlan->bcast);
4872 efx_ef10_filter_remove_unsafe(efx, EFX_FILTER_PRI_AUTO, vlan->mcdef);
34813fe2
AR
4873
4874 kfree(vlan);
4875}
4876
4877static void efx_ef10_filter_del_vlan(struct efx_nic *efx, u16 vid)
4878{
4879 struct efx_ef10_filter_vlan *vlan;
4880
4881 /* See comment in efx_ef10_filter_table_remove() */
4882 if (!efx_rwsem_assert_write_locked(&efx->filter_sem))
4883 return;
4884
4885 vlan = efx_ef10_filter_find_vlan(efx, vid);
4886 if (!vlan) {
4887 netif_err(efx, drv, efx->net_dev,
4888 "VLAN %u not found in filter state\n", vid);
4889 return;
4890 }
4891
4892 efx_ef10_filter_del_vlan_internal(efx, vlan);
4893}
4894
910c8789
SS
4895static int efx_ef10_set_mac_address(struct efx_nic *efx)
4896{
4897 MCDI_DECLARE_BUF(inbuf, MC_CMD_VADAPTOR_SET_MAC_IN_LEN);
4898 struct efx_ef10_nic_data *nic_data = efx->nic_data;
4899 bool was_enabled = efx->port_enabled;
4900 int rc;
4901
4902 efx_device_detach_sync(efx);
4903 efx_net_stop(efx->net_dev);
d248953a
MH
4904
4905 mutex_lock(&efx->mac_lock);
910c8789
SS
4906 down_write(&efx->filter_sem);
4907 efx_ef10_filter_table_remove(efx);
4908
4909 ether_addr_copy(MCDI_PTR(inbuf, VADAPTOR_SET_MAC_IN_MACADDR),
4910 efx->net_dev->dev_addr);
4911 MCDI_SET_DWORD(inbuf, VADAPTOR_SET_MAC_IN_UPSTREAM_PORT_ID,
4912 nic_data->vport_id);
535a6177
DP
4913 rc = efx_mcdi_rpc_quiet(efx, MC_CMD_VADAPTOR_SET_MAC, inbuf,
4914 sizeof(inbuf), NULL, 0, NULL);
910c8789
SS
4915
4916 efx_ef10_filter_table_probe(efx);
4917 up_write(&efx->filter_sem);
d248953a
MH
4918 mutex_unlock(&efx->mac_lock);
4919
910c8789
SS
4920 if (was_enabled)
4921 efx_net_open(efx->net_dev);
4922 netif_device_attach(efx->net_dev);
4923
9e9f665a
DP
4924#ifdef CONFIG_SFC_SRIOV
4925 if (efx->pci_dev->is_virtfn && efx->pci_dev->physfn) {
910c8789
SS
4926 struct pci_dev *pci_dev_pf = efx->pci_dev->physfn;
4927
9e9f665a
DP
4928 if (rc == -EPERM) {
4929 struct efx_nic *efx_pf;
910c8789 4930
9e9f665a
DP
4931 /* Switch to PF and change MAC address on vport */
4932 efx_pf = pci_get_drvdata(pci_dev_pf);
910c8789 4933
9e9f665a
DP
4934 rc = efx_ef10_sriov_set_vf_mac(efx_pf,
4935 nic_data->vf_index,
4936 efx->net_dev->dev_addr);
4937 } else if (!rc) {
910c8789
SS
4938 struct efx_nic *efx_pf = pci_get_drvdata(pci_dev_pf);
4939 struct efx_ef10_nic_data *nic_data = efx_pf->nic_data;
4940 unsigned int i;
4941
9e9f665a
DP
4942 /* MAC address successfully changed by VF (with MAC
4943 * spoofing) so update the parent PF if possible.
4944 */
910c8789
SS
4945 for (i = 0; i < efx_pf->vf_count; ++i) {
4946 struct ef10_vf *vf = nic_data->vf + i;
4947
4948 if (vf->efx == efx) {
4949 ether_addr_copy(vf->mac,
4950 efx->net_dev->dev_addr);
4951 return 0;
4952 }
4953 }
4954 }
9e9f665a 4955 } else
910c8789 4956#endif
9e9f665a
DP
4957 if (rc == -EPERM) {
4958 netif_err(efx, drv, efx->net_dev,
4959 "Cannot change MAC address; use sfboot to enable"
4960 " mac-spoofing on this interface\n");
7a186f47
DP
4961 } else if (rc == -ENOSYS && !efx_ef10_is_vf(efx)) {
4962 /* If the active MCFW does not support MC_CMD_VADAPTOR_SET_MAC
4963 * fall-back to the method of changing the MAC address on the
4964 * vport. This only applies to PFs because such versions of
4965 * MCFW do not support VFs.
4966 */
4967 rc = efx_ef10_vport_set_mac_address(efx);
535a6177
DP
4968 } else {
4969 efx_mcdi_display_error(efx, MC_CMD_VADAPTOR_SET_MAC,
4970 sizeof(inbuf), NULL, 0, rc);
9e9f665a
DP
4971 }
4972
910c8789
SS
4973 return rc;
4974}
4975
8127d661
BH
4976static int efx_ef10_mac_reconfigure(struct efx_nic *efx)
4977{
4978 efx_ef10_filter_sync_rx_mode(efx);
4979
4980 return efx_mcdi_set_mac(efx);
4981}
4982
862f894c
SS
4983static int efx_ef10_mac_reconfigure_vf(struct efx_nic *efx)
4984{
4985 efx_ef10_filter_sync_rx_mode(efx);
4986
4987 return 0;
4988}
4989
74cd60a4
JC
4990static int efx_ef10_start_bist(struct efx_nic *efx, u32 bist_type)
4991{
4992 MCDI_DECLARE_BUF(inbuf, MC_CMD_START_BIST_IN_LEN);
4993
4994 MCDI_SET_DWORD(inbuf, START_BIST_IN_TYPE, bist_type);
4995 return efx_mcdi_rpc(efx, MC_CMD_START_BIST, inbuf, sizeof(inbuf),
4996 NULL, 0, NULL);
4997}
4998
4999/* MC BISTs follow a different poll mechanism to phy BISTs.
5000 * The BIST is done in the poll handler on the MC, and the MCDI command
5001 * will block until the BIST is done.
5002 */
5003static int efx_ef10_poll_bist(struct efx_nic *efx)
5004{
5005 int rc;
5006 MCDI_DECLARE_BUF(outbuf, MC_CMD_POLL_BIST_OUT_LEN);
5007 size_t outlen;
5008 u32 result;
5009
5010 rc = efx_mcdi_rpc(efx, MC_CMD_POLL_BIST, NULL, 0,
5011 outbuf, sizeof(outbuf), &outlen);
5012 if (rc != 0)
5013 return rc;
5014
5015 if (outlen < MC_CMD_POLL_BIST_OUT_LEN)
5016 return -EIO;
5017
5018 result = MCDI_DWORD(outbuf, POLL_BIST_OUT_RESULT);
5019 switch (result) {
5020 case MC_CMD_POLL_BIST_PASSED:
5021 netif_dbg(efx, hw, efx->net_dev, "BIST passed.\n");
5022 return 0;
5023 case MC_CMD_POLL_BIST_TIMEOUT:
5024 netif_err(efx, hw, efx->net_dev, "BIST timed out\n");
5025 return -EIO;
5026 case MC_CMD_POLL_BIST_FAILED:
5027 netif_err(efx, hw, efx->net_dev, "BIST failed.\n");
5028 return -EIO;
5029 default:
5030 netif_err(efx, hw, efx->net_dev,
5031 "BIST returned unknown result %u", result);
5032 return -EIO;
5033 }
5034}
5035
5036static int efx_ef10_run_bist(struct efx_nic *efx, u32 bist_type)
5037{
5038 int rc;
5039
5040 netif_dbg(efx, drv, efx->net_dev, "starting BIST type %u\n", bist_type);
5041
5042 rc = efx_ef10_start_bist(efx, bist_type);
5043 if (rc != 0)
5044 return rc;
5045
5046 return efx_ef10_poll_bist(efx);
5047}
5048
5049static int
5050efx_ef10_test_chip(struct efx_nic *efx, struct efx_self_tests *tests)
5051{
5052 int rc, rc2;
5053
5054 efx_reset_down(efx, RESET_TYPE_WORLD);
5055
5056 rc = efx_mcdi_rpc(efx, MC_CMD_ENABLE_OFFLINE_BIST,
5057 NULL, 0, NULL, 0, NULL);
5058 if (rc != 0)
5059 goto out;
5060
5061 tests->memory = efx_ef10_run_bist(efx, MC_CMD_MC_MEM_BIST) ? -1 : 1;
5062 tests->registers = efx_ef10_run_bist(efx, MC_CMD_REG_BIST) ? -1 : 1;
5063
5064 rc = efx_mcdi_reset(efx, RESET_TYPE_WORLD);
5065
5066out:
27324820
DP
5067 if (rc == -EPERM)
5068 rc = 0;
74cd60a4
JC
5069 rc2 = efx_reset_up(efx, RESET_TYPE_WORLD, rc == 0);
5070 return rc ? rc : rc2;
5071}
5072
8127d661
BH
5073#ifdef CONFIG_SFC_MTD
5074
5075struct efx_ef10_nvram_type_info {
5076 u16 type, type_mask;
5077 u8 port;
5078 const char *name;
5079};
5080
5081static const struct efx_ef10_nvram_type_info efx_ef10_nvram_types[] = {
5082 { NVRAM_PARTITION_TYPE_MC_FIRMWARE, 0, 0, "sfc_mcfw" },
5083 { NVRAM_PARTITION_TYPE_MC_FIRMWARE_BACKUP, 0, 0, "sfc_mcfw_backup" },
5084 { NVRAM_PARTITION_TYPE_EXPANSION_ROM, 0, 0, "sfc_exp_rom" },
5085 { NVRAM_PARTITION_TYPE_STATIC_CONFIG, 0, 0, "sfc_static_cfg" },
5086 { NVRAM_PARTITION_TYPE_DYNAMIC_CONFIG, 0, 0, "sfc_dynamic_cfg" },
5087 { NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT0, 0, 0, "sfc_exp_rom_cfg" },
5088 { NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT1, 0, 1, "sfc_exp_rom_cfg" },
5089 { NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT2, 0, 2, "sfc_exp_rom_cfg" },
5090 { NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT3, 0, 3, "sfc_exp_rom_cfg" },
a84f3bf9 5091 { NVRAM_PARTITION_TYPE_LICENSE, 0, 0, "sfc_license" },
8127d661
BH
5092 { NVRAM_PARTITION_TYPE_PHY_MIN, 0xff, 0, "sfc_phy_fw" },
5093};
5094
5095static int efx_ef10_mtd_probe_partition(struct efx_nic *efx,
5096 struct efx_mcdi_mtd_partition *part,
5097 unsigned int type)
5098{
5099 MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_METADATA_IN_LEN);
5100 MCDI_DECLARE_BUF(outbuf, MC_CMD_NVRAM_METADATA_OUT_LENMAX);
5101 const struct efx_ef10_nvram_type_info *info;
5102 size_t size, erase_size, outlen;
5103 bool protected;
5104 int rc;
5105
5106 for (info = efx_ef10_nvram_types; ; info++) {
5107 if (info ==
5108 efx_ef10_nvram_types + ARRAY_SIZE(efx_ef10_nvram_types))
5109 return -ENODEV;
5110 if ((type & ~info->type_mask) == info->type)
5111 break;
5112 }
5113 if (info->port != efx_port_num(efx))
5114 return -ENODEV;
5115
5116 rc = efx_mcdi_nvram_info(efx, type, &size, &erase_size, &protected);
5117 if (rc)
5118 return rc;
5119 if (protected)
5120 return -ENODEV; /* hide it */
5121
5122 part->nvram_type = type;
5123
5124 MCDI_SET_DWORD(inbuf, NVRAM_METADATA_IN_TYPE, type);
5125 rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_METADATA, inbuf, sizeof(inbuf),
5126 outbuf, sizeof(outbuf), &outlen);
5127 if (rc)
5128 return rc;
5129 if (outlen < MC_CMD_NVRAM_METADATA_OUT_LENMIN)
5130 return -EIO;
5131 if (MCDI_DWORD(outbuf, NVRAM_METADATA_OUT_FLAGS) &
5132 (1 << MC_CMD_NVRAM_METADATA_OUT_SUBTYPE_VALID_LBN))
5133 part->fw_subtype = MCDI_DWORD(outbuf,
5134 NVRAM_METADATA_OUT_SUBTYPE);
5135
5136 part->common.dev_type_name = "EF10 NVRAM manager";
5137 part->common.type_name = info->name;
5138
5139 part->common.mtd.type = MTD_NORFLASH;
5140 part->common.mtd.flags = MTD_CAP_NORFLASH;
5141 part->common.mtd.size = size;
5142 part->common.mtd.erasesize = erase_size;
5143
5144 return 0;
5145}
5146
5147static int efx_ef10_mtd_probe(struct efx_nic *efx)
5148{
5149 MCDI_DECLARE_BUF(outbuf, MC_CMD_NVRAM_PARTITIONS_OUT_LENMAX);
5150 struct efx_mcdi_mtd_partition *parts;
5151 size_t outlen, n_parts_total, i, n_parts;
5152 unsigned int type;
5153 int rc;
5154
5155 ASSERT_RTNL();
5156
5157 BUILD_BUG_ON(MC_CMD_NVRAM_PARTITIONS_IN_LEN != 0);
5158 rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_PARTITIONS, NULL, 0,
5159 outbuf, sizeof(outbuf), &outlen);
5160 if (rc)
5161 return rc;
5162 if (outlen < MC_CMD_NVRAM_PARTITIONS_OUT_LENMIN)
5163 return -EIO;
5164
5165 n_parts_total = MCDI_DWORD(outbuf, NVRAM_PARTITIONS_OUT_NUM_PARTITIONS);
5166 if (n_parts_total >
5167 MCDI_VAR_ARRAY_LEN(outlen, NVRAM_PARTITIONS_OUT_TYPE_ID))
5168 return -EIO;
5169
5170 parts = kcalloc(n_parts_total, sizeof(*parts), GFP_KERNEL);
5171 if (!parts)
5172 return -ENOMEM;
5173
5174 n_parts = 0;
5175 for (i = 0; i < n_parts_total; i++) {
5176 type = MCDI_ARRAY_DWORD(outbuf, NVRAM_PARTITIONS_OUT_TYPE_ID,
5177 i);
5178 rc = efx_ef10_mtd_probe_partition(efx, &parts[n_parts], type);
5179 if (rc == 0)
5180 n_parts++;
5181 else if (rc != -ENODEV)
5182 goto fail;
5183 }
5184
5185 rc = efx_mtd_add(efx, &parts[0].common, n_parts, sizeof(*parts));
5186fail:
5187 if (rc)
5188 kfree(parts);
5189 return rc;
5190}
5191
5192#endif /* CONFIG_SFC_MTD */
5193
5194static void efx_ef10_ptp_write_host_time(struct efx_nic *efx, u32 host_time)
5195{
5196 _efx_writed(efx, cpu_to_le32(host_time), ER_DZ_MC_DB_LWRD);
5197}
5198
02246a7f
SS
5199static void efx_ef10_ptp_write_host_time_vf(struct efx_nic *efx,
5200 u32 host_time) {}
5201
bd9a265d
JC
5202static int efx_ef10_rx_enable_timestamping(struct efx_channel *channel,
5203 bool temp)
5204{
5205 MCDI_DECLARE_BUF(inbuf, MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_LEN);
5206 int rc;
5207
5208 if (channel->sync_events_state == SYNC_EVENTS_REQUESTED ||
5209 channel->sync_events_state == SYNC_EVENTS_VALID ||
5210 (temp && channel->sync_events_state == SYNC_EVENTS_DISABLED))
5211 return 0;
5212 channel->sync_events_state = SYNC_EVENTS_REQUESTED;
5213
5214 MCDI_SET_DWORD(inbuf, PTP_IN_OP, MC_CMD_PTP_OP_TIME_EVENT_SUBSCRIBE);
5215 MCDI_SET_DWORD(inbuf, PTP_IN_PERIPH_ID, 0);
5216 MCDI_SET_DWORD(inbuf, PTP_IN_TIME_EVENT_SUBSCRIBE_QUEUE,
5217 channel->channel);
5218
5219 rc = efx_mcdi_rpc(channel->efx, MC_CMD_PTP,
5220 inbuf, sizeof(inbuf), NULL, 0, NULL);
5221
5222 if (rc != 0)
5223 channel->sync_events_state = temp ? SYNC_EVENTS_QUIESCENT :
5224 SYNC_EVENTS_DISABLED;
5225
5226 return rc;
5227}
5228
5229static int efx_ef10_rx_disable_timestamping(struct efx_channel *channel,
5230 bool temp)
5231{
5232 MCDI_DECLARE_BUF(inbuf, MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE_LEN);
5233 int rc;
5234
5235 if (channel->sync_events_state == SYNC_EVENTS_DISABLED ||
5236 (temp && channel->sync_events_state == SYNC_EVENTS_QUIESCENT))
5237 return 0;
5238 if (channel->sync_events_state == SYNC_EVENTS_QUIESCENT) {
5239 channel->sync_events_state = SYNC_EVENTS_DISABLED;
5240 return 0;
5241 }
5242 channel->sync_events_state = temp ? SYNC_EVENTS_QUIESCENT :
5243 SYNC_EVENTS_DISABLED;
5244
5245 MCDI_SET_DWORD(inbuf, PTP_IN_OP, MC_CMD_PTP_OP_TIME_EVENT_UNSUBSCRIBE);
5246 MCDI_SET_DWORD(inbuf, PTP_IN_PERIPH_ID, 0);
5247 MCDI_SET_DWORD(inbuf, PTP_IN_TIME_EVENT_UNSUBSCRIBE_CONTROL,
5248 MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE_SINGLE);
5249 MCDI_SET_DWORD(inbuf, PTP_IN_TIME_EVENT_UNSUBSCRIBE_QUEUE,
5250 channel->channel);
5251
5252 rc = efx_mcdi_rpc(channel->efx, MC_CMD_PTP,
5253 inbuf, sizeof(inbuf), NULL, 0, NULL);
5254
5255 return rc;
5256}
5257
5258static int efx_ef10_ptp_set_ts_sync_events(struct efx_nic *efx, bool en,
5259 bool temp)
5260{
5261 int (*set)(struct efx_channel *channel, bool temp);
5262 struct efx_channel *channel;
5263
5264 set = en ?
5265 efx_ef10_rx_enable_timestamping :
5266 efx_ef10_rx_disable_timestamping;
5267
5268 efx_for_each_channel(channel, efx) {
5269 int rc = set(channel, temp);
5270 if (en && rc != 0) {
5271 efx_ef10_ptp_set_ts_sync_events(efx, false, temp);
5272 return rc;
5273 }
5274 }
5275
5276 return 0;
5277}
5278
02246a7f
SS
5279static int efx_ef10_ptp_set_ts_config_vf(struct efx_nic *efx,
5280 struct hwtstamp_config *init)
5281{
5282 return -EOPNOTSUPP;
5283}
5284
bd9a265d
JC
5285static int efx_ef10_ptp_set_ts_config(struct efx_nic *efx,
5286 struct hwtstamp_config *init)
5287{
5288 int rc;
5289
5290 switch (init->rx_filter) {
5291 case HWTSTAMP_FILTER_NONE:
5292 efx_ef10_ptp_set_ts_sync_events(efx, false, false);
5293 /* if TX timestamping is still requested then leave PTP on */
5294 return efx_ptp_change_mode(efx,
5295 init->tx_type != HWTSTAMP_TX_OFF, 0);
5296 case HWTSTAMP_FILTER_ALL:
5297 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
5298 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
5299 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
5300 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
5301 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
5302 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
5303 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
5304 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
5305 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
5306 case HWTSTAMP_FILTER_PTP_V2_EVENT:
5307 case HWTSTAMP_FILTER_PTP_V2_SYNC:
5308 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
5309 init->rx_filter = HWTSTAMP_FILTER_ALL;
5310 rc = efx_ptp_change_mode(efx, true, 0);
5311 if (!rc)
5312 rc = efx_ef10_ptp_set_ts_sync_events(efx, true, false);
5313 if (rc)
5314 efx_ptp_change_mode(efx, false, 0);
5315 return rc;
5316 default:
5317 return -ERANGE;
5318 }
5319}
5320
4a53ea8a
AR
5321static int efx_ef10_vlan_rx_add_vid(struct efx_nic *efx, __be16 proto, u16 vid)
5322{
5323 if (proto != htons(ETH_P_8021Q))
5324 return -EINVAL;
5325
5326 return efx_ef10_add_vlan(efx, vid);
5327}
5328
5329static int efx_ef10_vlan_rx_kill_vid(struct efx_nic *efx, __be16 proto, u16 vid)
5330{
5331 if (proto != htons(ETH_P_8021Q))
5332 return -EINVAL;
5333
5334 return efx_ef10_del_vlan(efx, vid);
5335}
5336
100a9db5
AR
5337#define EF10_OFFLOAD_FEATURES \
5338 (NETIF_F_IP_CSUM | \
4a53ea8a 5339 NETIF_F_HW_VLAN_CTAG_FILTER | \
100a9db5
AR
5340 NETIF_F_IPV6_CSUM | \
5341 NETIF_F_RXHASH | \
5342 NETIF_F_NTUPLE)
5343
02246a7f 5344const struct efx_nic_type efx_hunt_a0_vf_nic_type = {
6f7f8aa6 5345 .is_vf = true,
02246a7f
SS
5346 .mem_bar = EFX_MEM_VF_BAR,
5347 .mem_map_size = efx_ef10_mem_map_size,
5348 .probe = efx_ef10_probe_vf,
5349 .remove = efx_ef10_remove,
5350 .dimension_resources = efx_ef10_dimension_resources,
5351 .init = efx_ef10_init_nic,
5352 .fini = efx_port_dummy_op_void,
087e9025 5353 .map_reset_reason = efx_ef10_map_reset_reason,
02246a7f
SS
5354 .map_reset_flags = efx_ef10_map_reset_flags,
5355 .reset = efx_ef10_reset,
5356 .probe_port = efx_mcdi_port_probe,
5357 .remove_port = efx_mcdi_port_remove,
5358 .fini_dmaq = efx_ef10_fini_dmaq,
5359 .prepare_flr = efx_ef10_prepare_flr,
5360 .finish_flr = efx_port_dummy_op_void,
5361 .describe_stats = efx_ef10_describe_stats,
d7788196 5362 .update_stats = efx_ef10_update_stats_vf,
02246a7f
SS
5363 .start_stats = efx_port_dummy_op_void,
5364 .pull_stats = efx_port_dummy_op_void,
5365 .stop_stats = efx_port_dummy_op_void,
5366 .set_id_led = efx_mcdi_set_id_led,
5367 .push_irq_moderation = efx_ef10_push_irq_moderation,
862f894c 5368 .reconfigure_mac = efx_ef10_mac_reconfigure_vf,
02246a7f
SS
5369 .check_mac_fault = efx_mcdi_mac_check_fault,
5370 .reconfigure_port = efx_mcdi_port_reconfigure,
5371 .get_wol = efx_ef10_get_wol_vf,
5372 .set_wol = efx_ef10_set_wol_vf,
5373 .resume_wol = efx_port_dummy_op_void,
5374 .mcdi_request = efx_ef10_mcdi_request,
5375 .mcdi_poll_response = efx_ef10_mcdi_poll_response,
5376 .mcdi_read_response = efx_ef10_mcdi_read_response,
5377 .mcdi_poll_reboot = efx_ef10_mcdi_poll_reboot,
c577e59e 5378 .mcdi_reboot_detected = efx_ef10_mcdi_reboot_detected,
02246a7f
SS
5379 .irq_enable_master = efx_port_dummy_op_void,
5380 .irq_test_generate = efx_ef10_irq_test_generate,
5381 .irq_disable_non_ev = efx_port_dummy_op_void,
5382 .irq_handle_msi = efx_ef10_msi_interrupt,
5383 .irq_handle_legacy = efx_ef10_legacy_interrupt,
5384 .tx_probe = efx_ef10_tx_probe,
5385 .tx_init = efx_ef10_tx_init,
5386 .tx_remove = efx_ef10_tx_remove,
5387 .tx_write = efx_ef10_tx_write,
267c0157 5388 .rx_push_rss_config = efx_ef10_vf_rx_push_rss_config,
02246a7f
SS
5389 .rx_probe = efx_ef10_rx_probe,
5390 .rx_init = efx_ef10_rx_init,
5391 .rx_remove = efx_ef10_rx_remove,
5392 .rx_write = efx_ef10_rx_write,
5393 .rx_defer_refill = efx_ef10_rx_defer_refill,
5394 .ev_probe = efx_ef10_ev_probe,
5395 .ev_init = efx_ef10_ev_init,
5396 .ev_fini = efx_ef10_ev_fini,
5397 .ev_remove = efx_ef10_ev_remove,
5398 .ev_process = efx_ef10_ev_process,
5399 .ev_read_ack = efx_ef10_ev_read_ack,
5400 .ev_test_generate = efx_ef10_ev_test_generate,
5401 .filter_table_probe = efx_ef10_filter_table_probe,
5402 .filter_table_restore = efx_ef10_filter_table_restore,
5403 .filter_table_remove = efx_ef10_filter_table_remove,
5404 .filter_update_rx_scatter = efx_ef10_filter_update_rx_scatter,
5405 .filter_insert = efx_ef10_filter_insert,
5406 .filter_remove_safe = efx_ef10_filter_remove_safe,
5407 .filter_get_safe = efx_ef10_filter_get_safe,
5408 .filter_clear_rx = efx_ef10_filter_clear_rx,
5409 .filter_count_rx_used = efx_ef10_filter_count_rx_used,
5410 .filter_get_rx_id_limit = efx_ef10_filter_get_rx_id_limit,
5411 .filter_get_rx_ids = efx_ef10_filter_get_rx_ids,
5412#ifdef CONFIG_RFS_ACCEL
5413 .filter_rfs_insert = efx_ef10_filter_rfs_insert,
5414 .filter_rfs_expire_one = efx_ef10_filter_rfs_expire_one,
5415#endif
5416#ifdef CONFIG_SFC_MTD
5417 .mtd_probe = efx_port_dummy_op_int,
5418#endif
5419 .ptp_write_host_time = efx_ef10_ptp_write_host_time_vf,
5420 .ptp_set_ts_config = efx_ef10_ptp_set_ts_config_vf,
4a53ea8a
AR
5421 .vlan_rx_add_vid = efx_ef10_vlan_rx_add_vid,
5422 .vlan_rx_kill_vid = efx_ef10_vlan_rx_kill_vid,
02246a7f 5423#ifdef CONFIG_SFC_SRIOV
7b8c7b54
SS
5424 .vswitching_probe = efx_ef10_vswitching_probe_vf,
5425 .vswitching_restore = efx_ef10_vswitching_restore_vf,
5426 .vswitching_remove = efx_ef10_vswitching_remove_vf,
1d051e00 5427 .sriov_get_phys_port_id = efx_ef10_sriov_get_phys_port_id,
02246a7f 5428#endif
0d5e0fbb 5429 .get_mac_address = efx_ef10_get_mac_address_vf,
910c8789 5430 .set_mac_address = efx_ef10_set_mac_address,
0d5e0fbb 5431
02246a7f
SS
5432 .revision = EFX_REV_HUNT_A0,
5433 .max_dma_mask = DMA_BIT_MASK(ESF_DZ_TX_KER_BUF_ADDR_WIDTH),
5434 .rx_prefix_size = ES_DZ_RX_PREFIX_SIZE,
5435 .rx_hash_offset = ES_DZ_RX_PREFIX_HASH_OFST,
5436 .rx_ts_offset = ES_DZ_RX_PREFIX_TSTAMP_OFST,
5437 .can_rx_scatter = true,
5438 .always_rx_scatter = true,
5439 .max_interrupt_mode = EFX_INT_MODE_MSIX,
5440 .timer_period_max = 1 << ERF_DD_EVQ_IND_TIMER_VAL_WIDTH,
100a9db5 5441 .offload_features = EF10_OFFLOAD_FEATURES,
02246a7f
SS
5442 .mcdi_max_ver = 2,
5443 .max_rx_ip_filters = HUNT_FILTER_TBL_ROWS,
5444 .hwtstamp_filters = 1 << HWTSTAMP_FILTER_NONE |
5445 1 << HWTSTAMP_FILTER_ALL,
5446};
5447
8127d661 5448const struct efx_nic_type efx_hunt_a0_nic_type = {
6f7f8aa6 5449 .is_vf = false,
02246a7f 5450 .mem_bar = EFX_MEM_BAR,
8127d661 5451 .mem_map_size = efx_ef10_mem_map_size,
02246a7f 5452 .probe = efx_ef10_probe_pf,
8127d661
BH
5453 .remove = efx_ef10_remove,
5454 .dimension_resources = efx_ef10_dimension_resources,
5455 .init = efx_ef10_init_nic,
5456 .fini = efx_port_dummy_op_void,
087e9025 5457 .map_reset_reason = efx_ef10_map_reset_reason,
8127d661 5458 .map_reset_flags = efx_ef10_map_reset_flags,
3e336261 5459 .reset = efx_ef10_reset,
8127d661
BH
5460 .probe_port = efx_mcdi_port_probe,
5461 .remove_port = efx_mcdi_port_remove,
5462 .fini_dmaq = efx_ef10_fini_dmaq,
e283546c
EC
5463 .prepare_flr = efx_ef10_prepare_flr,
5464 .finish_flr = efx_port_dummy_op_void,
8127d661 5465 .describe_stats = efx_ef10_describe_stats,
d7788196 5466 .update_stats = efx_ef10_update_stats_pf,
8127d661 5467 .start_stats = efx_mcdi_mac_start_stats,
f8f3b5ae 5468 .pull_stats = efx_mcdi_mac_pull_stats,
8127d661
BH
5469 .stop_stats = efx_mcdi_mac_stop_stats,
5470 .set_id_led = efx_mcdi_set_id_led,
5471 .push_irq_moderation = efx_ef10_push_irq_moderation,
5472 .reconfigure_mac = efx_ef10_mac_reconfigure,
5473 .check_mac_fault = efx_mcdi_mac_check_fault,
5474 .reconfigure_port = efx_mcdi_port_reconfigure,
5475 .get_wol = efx_ef10_get_wol,
5476 .set_wol = efx_ef10_set_wol,
5477 .resume_wol = efx_port_dummy_op_void,
74cd60a4 5478 .test_chip = efx_ef10_test_chip,
8127d661
BH
5479 .test_nvram = efx_mcdi_nvram_test_all,
5480 .mcdi_request = efx_ef10_mcdi_request,
5481 .mcdi_poll_response = efx_ef10_mcdi_poll_response,
5482 .mcdi_read_response = efx_ef10_mcdi_read_response,
5483 .mcdi_poll_reboot = efx_ef10_mcdi_poll_reboot,
c577e59e 5484 .mcdi_reboot_detected = efx_ef10_mcdi_reboot_detected,
8127d661
BH
5485 .irq_enable_master = efx_port_dummy_op_void,
5486 .irq_test_generate = efx_ef10_irq_test_generate,
5487 .irq_disable_non_ev = efx_port_dummy_op_void,
5488 .irq_handle_msi = efx_ef10_msi_interrupt,
5489 .irq_handle_legacy = efx_ef10_legacy_interrupt,
5490 .tx_probe = efx_ef10_tx_probe,
5491 .tx_init = efx_ef10_tx_init,
5492 .tx_remove = efx_ef10_tx_remove,
5493 .tx_write = efx_ef10_tx_write,
267c0157 5494 .rx_push_rss_config = efx_ef10_pf_rx_push_rss_config,
8127d661
BH
5495 .rx_probe = efx_ef10_rx_probe,
5496 .rx_init = efx_ef10_rx_init,
5497 .rx_remove = efx_ef10_rx_remove,
5498 .rx_write = efx_ef10_rx_write,
5499 .rx_defer_refill = efx_ef10_rx_defer_refill,
5500 .ev_probe = efx_ef10_ev_probe,
5501 .ev_init = efx_ef10_ev_init,
5502 .ev_fini = efx_ef10_ev_fini,
5503 .ev_remove = efx_ef10_ev_remove,
5504 .ev_process = efx_ef10_ev_process,
5505 .ev_read_ack = efx_ef10_ev_read_ack,
5506 .ev_test_generate = efx_ef10_ev_test_generate,
5507 .filter_table_probe = efx_ef10_filter_table_probe,
5508 .filter_table_restore = efx_ef10_filter_table_restore,
5509 .filter_table_remove = efx_ef10_filter_table_remove,
5510 .filter_update_rx_scatter = efx_ef10_filter_update_rx_scatter,
5511 .filter_insert = efx_ef10_filter_insert,
5512 .filter_remove_safe = efx_ef10_filter_remove_safe,
5513 .filter_get_safe = efx_ef10_filter_get_safe,
5514 .filter_clear_rx = efx_ef10_filter_clear_rx,
5515 .filter_count_rx_used = efx_ef10_filter_count_rx_used,
5516 .filter_get_rx_id_limit = efx_ef10_filter_get_rx_id_limit,
5517 .filter_get_rx_ids = efx_ef10_filter_get_rx_ids,
5518#ifdef CONFIG_RFS_ACCEL
5519 .filter_rfs_insert = efx_ef10_filter_rfs_insert,
5520 .filter_rfs_expire_one = efx_ef10_filter_rfs_expire_one,
5521#endif
5522#ifdef CONFIG_SFC_MTD
5523 .mtd_probe = efx_ef10_mtd_probe,
5524 .mtd_rename = efx_mcdi_mtd_rename,
5525 .mtd_read = efx_mcdi_mtd_read,
5526 .mtd_erase = efx_mcdi_mtd_erase,
5527 .mtd_write = efx_mcdi_mtd_write,
5528 .mtd_sync = efx_mcdi_mtd_sync,
5529#endif
5530 .ptp_write_host_time = efx_ef10_ptp_write_host_time,
bd9a265d
JC
5531 .ptp_set_ts_sync_events = efx_ef10_ptp_set_ts_sync_events,
5532 .ptp_set_ts_config = efx_ef10_ptp_set_ts_config,
4a53ea8a
AR
5533 .vlan_rx_add_vid = efx_ef10_vlan_rx_add_vid,
5534 .vlan_rx_kill_vid = efx_ef10_vlan_rx_kill_vid,
7fa8d547 5535#ifdef CONFIG_SFC_SRIOV
834e23dd 5536 .sriov_configure = efx_ef10_sriov_configure,
d98a4ffe
SS
5537 .sriov_init = efx_ef10_sriov_init,
5538 .sriov_fini = efx_ef10_sriov_fini,
d98a4ffe
SS
5539 .sriov_wanted = efx_ef10_sriov_wanted,
5540 .sriov_reset = efx_ef10_sriov_reset,
7fa8d547
SS
5541 .sriov_flr = efx_ef10_sriov_flr,
5542 .sriov_set_vf_mac = efx_ef10_sriov_set_vf_mac,
5543 .sriov_set_vf_vlan = efx_ef10_sriov_set_vf_vlan,
5544 .sriov_set_vf_spoofchk = efx_ef10_sriov_set_vf_spoofchk,
5545 .sriov_get_vf_config = efx_ef10_sriov_get_vf_config,
4392dc69 5546 .sriov_set_vf_link_state = efx_ef10_sriov_set_vf_link_state,
7b8c7b54
SS
5547 .vswitching_probe = efx_ef10_vswitching_probe_pf,
5548 .vswitching_restore = efx_ef10_vswitching_restore_pf,
5549 .vswitching_remove = efx_ef10_vswitching_remove_pf,
7fa8d547 5550#endif
0d5e0fbb 5551 .get_mac_address = efx_ef10_get_mac_address_pf,
910c8789 5552 .set_mac_address = efx_ef10_set_mac_address,
8127d661
BH
5553
5554 .revision = EFX_REV_HUNT_A0,
5555 .max_dma_mask = DMA_BIT_MASK(ESF_DZ_TX_KER_BUF_ADDR_WIDTH),
5556 .rx_prefix_size = ES_DZ_RX_PREFIX_SIZE,
5557 .rx_hash_offset = ES_DZ_RX_PREFIX_HASH_OFST,
bd9a265d 5558 .rx_ts_offset = ES_DZ_RX_PREFIX_TSTAMP_OFST,
8127d661
BH
5559 .can_rx_scatter = true,
5560 .always_rx_scatter = true,
5561 .max_interrupt_mode = EFX_INT_MODE_MSIX,
5562 .timer_period_max = 1 << ERF_DD_EVQ_IND_TIMER_VAL_WIDTH,
100a9db5 5563 .offload_features = EF10_OFFLOAD_FEATURES,
8127d661
BH
5564 .mcdi_max_ver = 2,
5565 .max_rx_ip_filters = HUNT_FILTER_TBL_ROWS,
bd9a265d
JC
5566 .hwtstamp_filters = 1 << HWTSTAMP_FILTER_NONE |
5567 1 << HWTSTAMP_FILTER_ALL,
8127d661 5568};
This page took 0.511386 seconds and 5 git commands to generate.