Commit | Line | Data |
---|---|---|
8127d661 BH |
1 | /**************************************************************************** |
2 | * Driver for Solarflare network controllers and boards | |
3 | * Copyright 2012-2013 Solarflare Communications Inc. | |
4 | * | |
5 | * This program is free software; you can redistribute it and/or modify it | |
6 | * under the terms of the GNU General Public License version 2 as published | |
7 | * by the Free Software Foundation, incorporated herein by reference. | |
8 | */ | |
9 | ||
10 | #include "net_driver.h" | |
11 | #include "ef10_regs.h" | |
12 | #include "io.h" | |
13 | #include "mcdi.h" | |
14 | #include "mcdi_pcol.h" | |
15 | #include "nic.h" | |
16 | #include "workarounds.h" | |
74cd60a4 | 17 | #include "selftest.h" |
8127d661 BH |
18 | #include <linux/in.h> |
19 | #include <linux/jhash.h> | |
20 | #include <linux/wait.h> | |
21 | #include <linux/workqueue.h> | |
22 | ||
23 | /* Hardware control for EF10 architecture including 'Huntington'. */ | |
24 | ||
25 | #define EFX_EF10_DRVGEN_EV 7 | |
26 | enum { | |
27 | EFX_EF10_TEST = 1, | |
28 | EFX_EF10_REFILL, | |
29 | }; | |
30 | ||
31 | /* The reserved RSS context value */ | |
32 | #define EFX_EF10_RSS_CONTEXT_INVALID 0xffffffff | |
33 | ||
34 | /* The filter table(s) are managed by firmware and we have write-only | |
35 | * access. When removing filters we must identify them to the | |
36 | * firmware by a 64-bit handle, but this is too wide for Linux kernel | |
37 | * interfaces (32-bit for RX NFC, 16-bit for RFS). Also, we need to | |
38 | * be able to tell in advance whether a requested insertion will | |
39 | * replace an existing filter. Therefore we maintain a software hash | |
40 | * table, which should be at least as large as the hardware hash | |
41 | * table. | |
42 | * | |
43 | * Huntington has a single 8K filter table shared between all filter | |
44 | * types and both ports. | |
45 | */ | |
46 | #define HUNT_FILTER_TBL_ROWS 8192 | |
47 | ||
48 | struct efx_ef10_filter_table { | |
49 | /* The RX match field masks supported by this fw & hw, in order of priority */ | |
50 | enum efx_filter_match_flags rx_match_flags[ | |
51 | MC_CMD_GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES_MAXNUM]; | |
52 | unsigned int rx_match_count; | |
53 | ||
54 | struct { | |
55 | unsigned long spec; /* pointer to spec plus flag bits */ | |
56 | /* BUSY flag indicates that an update is in progress. STACK_OLD is | |
57 | * used to mark and sweep stack-owned MAC filters. | |
58 | */ | |
59 | #define EFX_EF10_FILTER_FLAG_BUSY 1UL | |
60 | #define EFX_EF10_FILTER_FLAG_STACK_OLD 2UL | |
61 | #define EFX_EF10_FILTER_FLAGS 3UL | |
62 | u64 handle; /* firmware handle */ | |
63 | } *entry; | |
64 | wait_queue_head_t waitq; | |
65 | /* Shadow of net_device address lists, guarded by mac_lock */ | |
66 | #define EFX_EF10_FILTER_STACK_UC_MAX 32 | |
67 | #define EFX_EF10_FILTER_STACK_MC_MAX 256 | |
68 | struct { | |
69 | u8 addr[ETH_ALEN]; | |
70 | u16 id; | |
71 | } stack_uc_list[EFX_EF10_FILTER_STACK_UC_MAX], | |
72 | stack_mc_list[EFX_EF10_FILTER_STACK_MC_MAX]; | |
73 | int stack_uc_count; /* negative for PROMISC */ | |
74 | int stack_mc_count; /* negative for PROMISC/ALLMULTI */ | |
75 | }; | |
76 | ||
77 | /* An arbitrary search limit for the software hash table */ | |
78 | #define EFX_EF10_FILTER_SEARCH_LIMIT 200 | |
79 | ||
80 | static void efx_ef10_rx_push_indir_table(struct efx_nic *efx); | |
81 | static void efx_ef10_rx_free_indir_table(struct efx_nic *efx); | |
82 | static void efx_ef10_filter_table_remove(struct efx_nic *efx); | |
83 | ||
84 | static int efx_ef10_get_warm_boot_count(struct efx_nic *efx) | |
85 | { | |
86 | efx_dword_t reg; | |
87 | ||
88 | efx_readd(efx, ®, ER_DZ_BIU_MC_SFT_STATUS); | |
89 | return EFX_DWORD_FIELD(reg, EFX_WORD_1) == 0xb007 ? | |
90 | EFX_DWORD_FIELD(reg, EFX_WORD_0) : -EIO; | |
91 | } | |
92 | ||
93 | static unsigned int efx_ef10_mem_map_size(struct efx_nic *efx) | |
94 | { | |
95 | return resource_size(&efx->pci_dev->resource[EFX_MEM_BAR]); | |
96 | } | |
97 | ||
e5a2538a | 98 | static int efx_ef10_init_datapath_caps(struct efx_nic *efx) |
8127d661 BH |
99 | { |
100 | MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_CAPABILITIES_OUT_LEN); | |
101 | struct efx_ef10_nic_data *nic_data = efx->nic_data; | |
102 | size_t outlen; | |
103 | int rc; | |
104 | ||
105 | BUILD_BUG_ON(MC_CMD_GET_CAPABILITIES_IN_LEN != 0); | |
106 | ||
107 | rc = efx_mcdi_rpc(efx, MC_CMD_GET_CAPABILITIES, NULL, 0, | |
108 | outbuf, sizeof(outbuf), &outlen); | |
109 | if (rc) | |
110 | return rc; | |
e5a2538a BH |
111 | if (outlen < sizeof(outbuf)) { |
112 | netif_err(efx, drv, efx->net_dev, | |
113 | "unable to read datapath firmware capabilities\n"); | |
114 | return -EIO; | |
115 | } | |
116 | ||
117 | nic_data->datapath_caps = | |
118 | MCDI_DWORD(outbuf, GET_CAPABILITIES_OUT_FLAGS1); | |
8127d661 | 119 | |
e5a2538a BH |
120 | if (!(nic_data->datapath_caps & |
121 | (1 << MC_CMD_GET_CAPABILITIES_OUT_TX_TSO_LBN))) { | |
122 | netif_err(efx, drv, efx->net_dev, | |
123 | "current firmware does not support TSO\n"); | |
124 | return -ENODEV; | |
125 | } | |
126 | ||
127 | if (!(nic_data->datapath_caps & | |
128 | (1 << MC_CMD_GET_CAPABILITIES_OUT_RX_PREFIX_LEN_14_LBN))) { | |
129 | netif_err(efx, probe, efx->net_dev, | |
130 | "current firmware does not support an RX prefix\n"); | |
131 | return -ENODEV; | |
8127d661 BH |
132 | } |
133 | ||
134 | return 0; | |
135 | } | |
136 | ||
137 | static int efx_ef10_get_sysclk_freq(struct efx_nic *efx) | |
138 | { | |
139 | MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_CLOCK_OUT_LEN); | |
140 | int rc; | |
141 | ||
142 | rc = efx_mcdi_rpc(efx, MC_CMD_GET_CLOCK, NULL, 0, | |
143 | outbuf, sizeof(outbuf), NULL); | |
144 | if (rc) | |
145 | return rc; | |
146 | rc = MCDI_DWORD(outbuf, GET_CLOCK_OUT_SYS_FREQ); | |
147 | return rc > 0 ? rc : -ERANGE; | |
148 | } | |
149 | ||
150 | static int efx_ef10_get_mac_address(struct efx_nic *efx, u8 *mac_address) | |
151 | { | |
152 | MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_MAC_ADDRESSES_OUT_LEN); | |
153 | size_t outlen; | |
154 | int rc; | |
155 | ||
156 | BUILD_BUG_ON(MC_CMD_GET_MAC_ADDRESSES_IN_LEN != 0); | |
157 | ||
158 | rc = efx_mcdi_rpc(efx, MC_CMD_GET_MAC_ADDRESSES, NULL, 0, | |
159 | outbuf, sizeof(outbuf), &outlen); | |
160 | if (rc) | |
161 | return rc; | |
162 | if (outlen < MC_CMD_GET_MAC_ADDRESSES_OUT_LEN) | |
163 | return -EIO; | |
164 | ||
165 | memcpy(mac_address, | |
166 | MCDI_PTR(outbuf, GET_MAC_ADDRESSES_OUT_MAC_ADDR_BASE), ETH_ALEN); | |
167 | return 0; | |
168 | } | |
169 | ||
170 | static int efx_ef10_probe(struct efx_nic *efx) | |
171 | { | |
172 | struct efx_ef10_nic_data *nic_data; | |
173 | int i, rc; | |
174 | ||
175 | /* We can have one VI for each 8K region. However we need | |
176 | * multiple TX queues per channel. | |
177 | */ | |
178 | efx->max_channels = | |
179 | min_t(unsigned int, | |
180 | EFX_MAX_CHANNELS, | |
181 | resource_size(&efx->pci_dev->resource[EFX_MEM_BAR]) / | |
182 | (EFX_VI_PAGE_SIZE * EFX_TXQ_TYPES)); | |
183 | BUG_ON(efx->max_channels == 0); | |
184 | ||
185 | nic_data = kzalloc(sizeof(*nic_data), GFP_KERNEL); | |
186 | if (!nic_data) | |
187 | return -ENOMEM; | |
188 | efx->nic_data = nic_data; | |
189 | ||
190 | rc = efx_nic_alloc_buffer(efx, &nic_data->mcdi_buf, | |
191 | 8 + MCDI_CTL_SDU_LEN_MAX_V2, GFP_KERNEL); | |
192 | if (rc) | |
193 | goto fail1; | |
194 | ||
195 | /* Get the MC's warm boot count. In case it's rebooting right | |
196 | * now, be prepared to retry. | |
197 | */ | |
198 | i = 0; | |
199 | for (;;) { | |
200 | rc = efx_ef10_get_warm_boot_count(efx); | |
201 | if (rc >= 0) | |
202 | break; | |
203 | if (++i == 5) | |
204 | goto fail2; | |
205 | ssleep(1); | |
206 | } | |
207 | nic_data->warm_boot_count = rc; | |
208 | ||
209 | nic_data->rx_rss_context = EFX_EF10_RSS_CONTEXT_INVALID; | |
210 | ||
211 | /* In case we're recovering from a crash (kexec), we want to | |
212 | * cancel any outstanding request by the previous user of this | |
213 | * function. We send a special message using the least | |
214 | * significant bits of the 'high' (doorbell) register. | |
215 | */ | |
216 | _efx_writed(efx, cpu_to_le32(1), ER_DZ_MC_DB_HWRD); | |
217 | ||
218 | rc = efx_mcdi_init(efx); | |
219 | if (rc) | |
220 | goto fail2; | |
221 | ||
222 | /* Reset (most) configuration for this function */ | |
223 | rc = efx_mcdi_reset(efx, RESET_TYPE_ALL); | |
224 | if (rc) | |
225 | goto fail3; | |
226 | ||
227 | /* Enable event logging */ | |
228 | rc = efx_mcdi_log_ctrl(efx, true, false, 0); | |
229 | if (rc) | |
230 | goto fail3; | |
231 | ||
e5a2538a | 232 | rc = efx_ef10_init_datapath_caps(efx); |
8127d661 BH |
233 | if (rc < 0) |
234 | goto fail3; | |
235 | ||
236 | efx->rx_packet_len_offset = | |
237 | ES_DZ_RX_PREFIX_PKTLEN_OFST - ES_DZ_RX_PREFIX_SIZE; | |
238 | ||
8127d661 BH |
239 | rc = efx_mcdi_port_get_number(efx); |
240 | if (rc < 0) | |
241 | goto fail3; | |
242 | efx->port_num = rc; | |
243 | ||
244 | rc = efx_ef10_get_mac_address(efx, efx->net_dev->perm_addr); | |
245 | if (rc) | |
246 | goto fail3; | |
247 | ||
248 | rc = efx_ef10_get_sysclk_freq(efx); | |
249 | if (rc < 0) | |
250 | goto fail3; | |
251 | efx->timer_quantum_ns = 1536000 / rc; /* 1536 cycles */ | |
252 | ||
253 | /* Check whether firmware supports bug 35388 workaround */ | |
254 | rc = efx_mcdi_set_workaround(efx, MC_CMD_WORKAROUND_BUG35388, true); | |
255 | if (rc == 0) | |
256 | nic_data->workaround_35388 = true; | |
257 | else if (rc != -ENOSYS && rc != -ENOENT) | |
258 | goto fail3; | |
259 | netif_dbg(efx, probe, efx->net_dev, | |
260 | "workaround for bug 35388 is %sabled\n", | |
261 | nic_data->workaround_35388 ? "en" : "dis"); | |
262 | ||
263 | rc = efx_mcdi_mon_probe(efx); | |
264 | if (rc) | |
265 | goto fail3; | |
266 | ||
8127d661 BH |
267 | return 0; |
268 | ||
269 | fail3: | |
270 | efx_mcdi_fini(efx); | |
271 | fail2: | |
272 | efx_nic_free_buffer(efx, &nic_data->mcdi_buf); | |
273 | fail1: | |
274 | kfree(nic_data); | |
275 | efx->nic_data = NULL; | |
276 | return rc; | |
277 | } | |
278 | ||
279 | static int efx_ef10_free_vis(struct efx_nic *efx) | |
280 | { | |
281 | int rc = efx_mcdi_rpc(efx, MC_CMD_FREE_VIS, NULL, 0, NULL, 0, NULL); | |
282 | ||
283 | /* -EALREADY means nothing to free, so ignore */ | |
284 | if (rc == -EALREADY) | |
285 | rc = 0; | |
286 | return rc; | |
287 | } | |
288 | ||
183233be BH |
289 | #ifdef EFX_USE_PIO |
290 | ||
291 | static void efx_ef10_free_piobufs(struct efx_nic *efx) | |
292 | { | |
293 | struct efx_ef10_nic_data *nic_data = efx->nic_data; | |
294 | MCDI_DECLARE_BUF(inbuf, MC_CMD_FREE_PIOBUF_IN_LEN); | |
295 | unsigned int i; | |
296 | int rc; | |
297 | ||
298 | BUILD_BUG_ON(MC_CMD_FREE_PIOBUF_OUT_LEN != 0); | |
299 | ||
300 | for (i = 0; i < nic_data->n_piobufs; i++) { | |
301 | MCDI_SET_DWORD(inbuf, FREE_PIOBUF_IN_PIOBUF_HANDLE, | |
302 | nic_data->piobuf_handle[i]); | |
303 | rc = efx_mcdi_rpc(efx, MC_CMD_FREE_PIOBUF, inbuf, sizeof(inbuf), | |
304 | NULL, 0, NULL); | |
305 | WARN_ON(rc); | |
306 | } | |
307 | ||
308 | nic_data->n_piobufs = 0; | |
309 | } | |
310 | ||
311 | static int efx_ef10_alloc_piobufs(struct efx_nic *efx, unsigned int n) | |
312 | { | |
313 | struct efx_ef10_nic_data *nic_data = efx->nic_data; | |
314 | MCDI_DECLARE_BUF(outbuf, MC_CMD_ALLOC_PIOBUF_OUT_LEN); | |
315 | unsigned int i; | |
316 | size_t outlen; | |
317 | int rc = 0; | |
318 | ||
319 | BUILD_BUG_ON(MC_CMD_ALLOC_PIOBUF_IN_LEN != 0); | |
320 | ||
321 | for (i = 0; i < n; i++) { | |
322 | rc = efx_mcdi_rpc(efx, MC_CMD_ALLOC_PIOBUF, NULL, 0, | |
323 | outbuf, sizeof(outbuf), &outlen); | |
324 | if (rc) | |
325 | break; | |
326 | if (outlen < MC_CMD_ALLOC_PIOBUF_OUT_LEN) { | |
327 | rc = -EIO; | |
328 | break; | |
329 | } | |
330 | nic_data->piobuf_handle[i] = | |
331 | MCDI_DWORD(outbuf, ALLOC_PIOBUF_OUT_PIOBUF_HANDLE); | |
332 | netif_dbg(efx, probe, efx->net_dev, | |
333 | "allocated PIO buffer %u handle %x\n", i, | |
334 | nic_data->piobuf_handle[i]); | |
335 | } | |
336 | ||
337 | nic_data->n_piobufs = i; | |
338 | if (rc) | |
339 | efx_ef10_free_piobufs(efx); | |
340 | return rc; | |
341 | } | |
342 | ||
343 | static int efx_ef10_link_piobufs(struct efx_nic *efx) | |
344 | { | |
345 | struct efx_ef10_nic_data *nic_data = efx->nic_data; | |
346 | MCDI_DECLARE_BUF(inbuf, | |
347 | max(MC_CMD_LINK_PIOBUF_IN_LEN, | |
348 | MC_CMD_UNLINK_PIOBUF_IN_LEN)); | |
349 | struct efx_channel *channel; | |
350 | struct efx_tx_queue *tx_queue; | |
351 | unsigned int offset, index; | |
352 | int rc; | |
353 | ||
354 | BUILD_BUG_ON(MC_CMD_LINK_PIOBUF_OUT_LEN != 0); | |
355 | BUILD_BUG_ON(MC_CMD_UNLINK_PIOBUF_OUT_LEN != 0); | |
356 | ||
357 | /* Link a buffer to each VI in the write-combining mapping */ | |
358 | for (index = 0; index < nic_data->n_piobufs; ++index) { | |
359 | MCDI_SET_DWORD(inbuf, LINK_PIOBUF_IN_PIOBUF_HANDLE, | |
360 | nic_data->piobuf_handle[index]); | |
361 | MCDI_SET_DWORD(inbuf, LINK_PIOBUF_IN_TXQ_INSTANCE, | |
362 | nic_data->pio_write_vi_base + index); | |
363 | rc = efx_mcdi_rpc(efx, MC_CMD_LINK_PIOBUF, | |
364 | inbuf, MC_CMD_LINK_PIOBUF_IN_LEN, | |
365 | NULL, 0, NULL); | |
366 | if (rc) { | |
367 | netif_err(efx, drv, efx->net_dev, | |
368 | "failed to link VI %u to PIO buffer %u (%d)\n", | |
369 | nic_data->pio_write_vi_base + index, index, | |
370 | rc); | |
371 | goto fail; | |
372 | } | |
373 | netif_dbg(efx, probe, efx->net_dev, | |
374 | "linked VI %u to PIO buffer %u\n", | |
375 | nic_data->pio_write_vi_base + index, index); | |
376 | } | |
377 | ||
378 | /* Link a buffer to each TX queue */ | |
379 | efx_for_each_channel(channel, efx) { | |
380 | efx_for_each_channel_tx_queue(tx_queue, channel) { | |
381 | /* We assign the PIO buffers to queues in | |
382 | * reverse order to allow for the following | |
383 | * special case. | |
384 | */ | |
385 | offset = ((efx->tx_channel_offset + efx->n_tx_channels - | |
386 | tx_queue->channel->channel - 1) * | |
387 | efx_piobuf_size); | |
388 | index = offset / ER_DZ_TX_PIOBUF_SIZE; | |
389 | offset = offset % ER_DZ_TX_PIOBUF_SIZE; | |
390 | ||
391 | /* When the host page size is 4K, the first | |
392 | * host page in the WC mapping may be within | |
393 | * the same VI page as the last TX queue. We | |
394 | * can only link one buffer to each VI. | |
395 | */ | |
396 | if (tx_queue->queue == nic_data->pio_write_vi_base) { | |
397 | BUG_ON(index != 0); | |
398 | rc = 0; | |
399 | } else { | |
400 | MCDI_SET_DWORD(inbuf, | |
401 | LINK_PIOBUF_IN_PIOBUF_HANDLE, | |
402 | nic_data->piobuf_handle[index]); | |
403 | MCDI_SET_DWORD(inbuf, | |
404 | LINK_PIOBUF_IN_TXQ_INSTANCE, | |
405 | tx_queue->queue); | |
406 | rc = efx_mcdi_rpc(efx, MC_CMD_LINK_PIOBUF, | |
407 | inbuf, MC_CMD_LINK_PIOBUF_IN_LEN, | |
408 | NULL, 0, NULL); | |
409 | } | |
410 | ||
411 | if (rc) { | |
412 | /* This is non-fatal; the TX path just | |
413 | * won't use PIO for this queue | |
414 | */ | |
415 | netif_err(efx, drv, efx->net_dev, | |
416 | "failed to link VI %u to PIO buffer %u (%d)\n", | |
417 | tx_queue->queue, index, rc); | |
418 | tx_queue->piobuf = NULL; | |
419 | } else { | |
420 | tx_queue->piobuf = | |
421 | nic_data->pio_write_base + | |
422 | index * EFX_VI_PAGE_SIZE + offset; | |
423 | tx_queue->piobuf_offset = offset; | |
424 | netif_dbg(efx, probe, efx->net_dev, | |
425 | "linked VI %u to PIO buffer %u offset %x addr %p\n", | |
426 | tx_queue->queue, index, | |
427 | tx_queue->piobuf_offset, | |
428 | tx_queue->piobuf); | |
429 | } | |
430 | } | |
431 | } | |
432 | ||
433 | return 0; | |
434 | ||
435 | fail: | |
436 | while (index--) { | |
437 | MCDI_SET_DWORD(inbuf, UNLINK_PIOBUF_IN_TXQ_INSTANCE, | |
438 | nic_data->pio_write_vi_base + index); | |
439 | efx_mcdi_rpc(efx, MC_CMD_UNLINK_PIOBUF, | |
440 | inbuf, MC_CMD_UNLINK_PIOBUF_IN_LEN, | |
441 | NULL, 0, NULL); | |
442 | } | |
443 | return rc; | |
444 | } | |
445 | ||
446 | #else /* !EFX_USE_PIO */ | |
447 | ||
448 | static int efx_ef10_alloc_piobufs(struct efx_nic *efx, unsigned int n) | |
449 | { | |
450 | return n == 0 ? 0 : -ENOBUFS; | |
451 | } | |
452 | ||
453 | static int efx_ef10_link_piobufs(struct efx_nic *efx) | |
454 | { | |
455 | return 0; | |
456 | } | |
457 | ||
458 | static void efx_ef10_free_piobufs(struct efx_nic *efx) | |
459 | { | |
460 | } | |
461 | ||
462 | #endif /* EFX_USE_PIO */ | |
463 | ||
8127d661 BH |
464 | static void efx_ef10_remove(struct efx_nic *efx) |
465 | { | |
466 | struct efx_ef10_nic_data *nic_data = efx->nic_data; | |
467 | int rc; | |
468 | ||
469 | efx_mcdi_mon_remove(efx); | |
470 | ||
471 | /* This needs to be after efx_ptp_remove_channel() with no filters */ | |
472 | efx_ef10_rx_free_indir_table(efx); | |
473 | ||
183233be BH |
474 | if (nic_data->wc_membase) |
475 | iounmap(nic_data->wc_membase); | |
476 | ||
8127d661 BH |
477 | rc = efx_ef10_free_vis(efx); |
478 | WARN_ON(rc != 0); | |
479 | ||
183233be BH |
480 | if (!nic_data->must_restore_piobufs) |
481 | efx_ef10_free_piobufs(efx); | |
482 | ||
8127d661 BH |
483 | efx_mcdi_fini(efx); |
484 | efx_nic_free_buffer(efx, &nic_data->mcdi_buf); | |
485 | kfree(nic_data); | |
486 | } | |
487 | ||
488 | static int efx_ef10_alloc_vis(struct efx_nic *efx, | |
489 | unsigned int min_vis, unsigned int max_vis) | |
490 | { | |
491 | MCDI_DECLARE_BUF(inbuf, MC_CMD_ALLOC_VIS_IN_LEN); | |
492 | MCDI_DECLARE_BUF(outbuf, MC_CMD_ALLOC_VIS_OUT_LEN); | |
493 | struct efx_ef10_nic_data *nic_data = efx->nic_data; | |
494 | size_t outlen; | |
495 | int rc; | |
496 | ||
497 | MCDI_SET_DWORD(inbuf, ALLOC_VIS_IN_MIN_VI_COUNT, min_vis); | |
498 | MCDI_SET_DWORD(inbuf, ALLOC_VIS_IN_MAX_VI_COUNT, max_vis); | |
499 | rc = efx_mcdi_rpc(efx, MC_CMD_ALLOC_VIS, inbuf, sizeof(inbuf), | |
500 | outbuf, sizeof(outbuf), &outlen); | |
501 | if (rc != 0) | |
502 | return rc; | |
503 | ||
504 | if (outlen < MC_CMD_ALLOC_VIS_OUT_LEN) | |
505 | return -EIO; | |
506 | ||
507 | netif_dbg(efx, drv, efx->net_dev, "base VI is A0x%03x\n", | |
508 | MCDI_DWORD(outbuf, ALLOC_VIS_OUT_VI_BASE)); | |
509 | ||
510 | nic_data->vi_base = MCDI_DWORD(outbuf, ALLOC_VIS_OUT_VI_BASE); | |
511 | nic_data->n_allocated_vis = MCDI_DWORD(outbuf, ALLOC_VIS_OUT_VI_COUNT); | |
512 | return 0; | |
513 | } | |
514 | ||
183233be BH |
515 | /* Note that the failure path of this function does not free |
516 | * resources, as this will be done by efx_ef10_remove(). | |
517 | */ | |
8127d661 BH |
518 | static int efx_ef10_dimension_resources(struct efx_nic *efx) |
519 | { | |
183233be BH |
520 | struct efx_ef10_nic_data *nic_data = efx->nic_data; |
521 | unsigned int uc_mem_map_size, wc_mem_map_size; | |
522 | unsigned int min_vis, pio_write_vi_base, max_vis; | |
523 | void __iomem *membase; | |
524 | int rc; | |
525 | ||
526 | min_vis = max(efx->n_channels, efx->n_tx_channels * EFX_TXQ_TYPES); | |
8127d661 | 527 | |
183233be BH |
528 | #ifdef EFX_USE_PIO |
529 | /* Try to allocate PIO buffers if wanted and if the full | |
530 | * number of PIO buffers would be sufficient to allocate one | |
531 | * copy-buffer per TX channel. Failure is non-fatal, as there | |
532 | * are only a small number of PIO buffers shared between all | |
533 | * functions of the controller. | |
534 | */ | |
535 | if (efx_piobuf_size != 0 && | |
536 | ER_DZ_TX_PIOBUF_SIZE / efx_piobuf_size * EF10_TX_PIOBUF_COUNT >= | |
537 | efx->n_tx_channels) { | |
538 | unsigned int n_piobufs = | |
539 | DIV_ROUND_UP(efx->n_tx_channels, | |
540 | ER_DZ_TX_PIOBUF_SIZE / efx_piobuf_size); | |
541 | ||
542 | rc = efx_ef10_alloc_piobufs(efx, n_piobufs); | |
543 | if (rc) | |
544 | netif_err(efx, probe, efx->net_dev, | |
545 | "failed to allocate PIO buffers (%d)\n", rc); | |
546 | else | |
547 | netif_dbg(efx, probe, efx->net_dev, | |
548 | "allocated %u PIO buffers\n", n_piobufs); | |
549 | } | |
550 | #else | |
551 | nic_data->n_piobufs = 0; | |
552 | #endif | |
553 | ||
554 | /* PIO buffers should be mapped with write-combining enabled, | |
555 | * and we want to make single UC and WC mappings rather than | |
556 | * several of each (in fact that's the only option if host | |
557 | * page size is >4K). So we may allocate some extra VIs just | |
558 | * for writing PIO buffers through. | |
559 | */ | |
560 | uc_mem_map_size = PAGE_ALIGN((min_vis - 1) * EFX_VI_PAGE_SIZE + | |
561 | ER_DZ_TX_PIOBUF); | |
562 | if (nic_data->n_piobufs) { | |
563 | pio_write_vi_base = uc_mem_map_size / EFX_VI_PAGE_SIZE; | |
564 | wc_mem_map_size = (PAGE_ALIGN((pio_write_vi_base + | |
565 | nic_data->n_piobufs) * | |
566 | EFX_VI_PAGE_SIZE) - | |
567 | uc_mem_map_size); | |
568 | max_vis = pio_write_vi_base + nic_data->n_piobufs; | |
569 | } else { | |
570 | pio_write_vi_base = 0; | |
571 | wc_mem_map_size = 0; | |
572 | max_vis = min_vis; | |
573 | } | |
574 | ||
575 | /* In case the last attached driver failed to free VIs, do it now */ | |
576 | rc = efx_ef10_free_vis(efx); | |
577 | if (rc != 0) | |
578 | return rc; | |
579 | ||
580 | rc = efx_ef10_alloc_vis(efx, min_vis, max_vis); | |
581 | if (rc != 0) | |
582 | return rc; | |
583 | ||
584 | /* If we didn't get enough VIs to map all the PIO buffers, free the | |
585 | * PIO buffers | |
586 | */ | |
587 | if (nic_data->n_piobufs && | |
588 | nic_data->n_allocated_vis < | |
589 | pio_write_vi_base + nic_data->n_piobufs) { | |
590 | netif_dbg(efx, probe, efx->net_dev, | |
591 | "%u VIs are not sufficient to map %u PIO buffers\n", | |
592 | nic_data->n_allocated_vis, nic_data->n_piobufs); | |
593 | efx_ef10_free_piobufs(efx); | |
594 | } | |
595 | ||
596 | /* Shrink the original UC mapping of the memory BAR */ | |
597 | membase = ioremap_nocache(efx->membase_phys, uc_mem_map_size); | |
598 | if (!membase) { | |
599 | netif_err(efx, probe, efx->net_dev, | |
600 | "could not shrink memory BAR to %x\n", | |
601 | uc_mem_map_size); | |
602 | return -ENOMEM; | |
603 | } | |
604 | iounmap(efx->membase); | |
605 | efx->membase = membase; | |
606 | ||
607 | /* Set up the WC mapping if needed */ | |
608 | if (wc_mem_map_size) { | |
609 | nic_data->wc_membase = ioremap_wc(efx->membase_phys + | |
610 | uc_mem_map_size, | |
611 | wc_mem_map_size); | |
612 | if (!nic_data->wc_membase) { | |
613 | netif_err(efx, probe, efx->net_dev, | |
614 | "could not allocate WC mapping of size %x\n", | |
615 | wc_mem_map_size); | |
616 | return -ENOMEM; | |
617 | } | |
618 | nic_data->pio_write_vi_base = pio_write_vi_base; | |
619 | nic_data->pio_write_base = | |
620 | nic_data->wc_membase + | |
621 | (pio_write_vi_base * EFX_VI_PAGE_SIZE + ER_DZ_TX_PIOBUF - | |
622 | uc_mem_map_size); | |
623 | ||
624 | rc = efx_ef10_link_piobufs(efx); | |
625 | if (rc) | |
626 | efx_ef10_free_piobufs(efx); | |
627 | } | |
628 | ||
629 | netif_dbg(efx, probe, efx->net_dev, | |
630 | "memory BAR at %pa (virtual %p+%x UC, %p+%x WC)\n", | |
631 | &efx->membase_phys, efx->membase, uc_mem_map_size, | |
632 | nic_data->wc_membase, wc_mem_map_size); | |
633 | ||
634 | return 0; | |
8127d661 BH |
635 | } |
636 | ||
637 | static int efx_ef10_init_nic(struct efx_nic *efx) | |
638 | { | |
639 | struct efx_ef10_nic_data *nic_data = efx->nic_data; | |
640 | int rc; | |
641 | ||
a915ccc9 BH |
642 | if (nic_data->must_check_datapath_caps) { |
643 | rc = efx_ef10_init_datapath_caps(efx); | |
644 | if (rc) | |
645 | return rc; | |
646 | nic_data->must_check_datapath_caps = false; | |
647 | } | |
648 | ||
8127d661 BH |
649 | if (nic_data->must_realloc_vis) { |
650 | /* We cannot let the number of VIs change now */ | |
651 | rc = efx_ef10_alloc_vis(efx, nic_data->n_allocated_vis, | |
652 | nic_data->n_allocated_vis); | |
653 | if (rc) | |
654 | return rc; | |
655 | nic_data->must_realloc_vis = false; | |
656 | } | |
657 | ||
183233be BH |
658 | if (nic_data->must_restore_piobufs && nic_data->n_piobufs) { |
659 | rc = efx_ef10_alloc_piobufs(efx, nic_data->n_piobufs); | |
660 | if (rc == 0) { | |
661 | rc = efx_ef10_link_piobufs(efx); | |
662 | if (rc) | |
663 | efx_ef10_free_piobufs(efx); | |
664 | } | |
665 | ||
666 | /* Log an error on failure, but this is non-fatal */ | |
667 | if (rc) | |
668 | netif_err(efx, drv, efx->net_dev, | |
669 | "failed to restore PIO buffers (%d)\n", rc); | |
670 | nic_data->must_restore_piobufs = false; | |
671 | } | |
672 | ||
8127d661 BH |
673 | efx_ef10_rx_push_indir_table(efx); |
674 | return 0; | |
675 | } | |
676 | ||
677 | static int efx_ef10_map_reset_flags(u32 *flags) | |
678 | { | |
679 | enum { | |
680 | EF10_RESET_PORT = ((ETH_RESET_MAC | ETH_RESET_PHY) << | |
681 | ETH_RESET_SHARED_SHIFT), | |
682 | EF10_RESET_MC = ((ETH_RESET_DMA | ETH_RESET_FILTER | | |
683 | ETH_RESET_OFFLOAD | ETH_RESET_MAC | | |
684 | ETH_RESET_PHY | ETH_RESET_MGMT) << | |
685 | ETH_RESET_SHARED_SHIFT) | |
686 | }; | |
687 | ||
688 | /* We assume for now that our PCI function is permitted to | |
689 | * reset everything. | |
690 | */ | |
691 | ||
692 | if ((*flags & EF10_RESET_MC) == EF10_RESET_MC) { | |
693 | *flags &= ~EF10_RESET_MC; | |
694 | return RESET_TYPE_WORLD; | |
695 | } | |
696 | ||
697 | if ((*flags & EF10_RESET_PORT) == EF10_RESET_PORT) { | |
698 | *flags &= ~EF10_RESET_PORT; | |
699 | return RESET_TYPE_ALL; | |
700 | } | |
701 | ||
702 | /* no invisible reset implemented */ | |
703 | ||
704 | return -EINVAL; | |
705 | } | |
706 | ||
707 | #define EF10_DMA_STAT(ext_name, mcdi_name) \ | |
708 | [EF10_STAT_ ## ext_name] = \ | |
709 | { #ext_name, 64, 8 * MC_CMD_MAC_ ## mcdi_name } | |
710 | #define EF10_DMA_INVIS_STAT(int_name, mcdi_name) \ | |
711 | [EF10_STAT_ ## int_name] = \ | |
712 | { NULL, 64, 8 * MC_CMD_MAC_ ## mcdi_name } | |
713 | #define EF10_OTHER_STAT(ext_name) \ | |
714 | [EF10_STAT_ ## ext_name] = { #ext_name, 0, 0 } | |
715 | ||
716 | static const struct efx_hw_stat_desc efx_ef10_stat_desc[EF10_STAT_COUNT] = { | |
717 | EF10_DMA_STAT(tx_bytes, TX_BYTES), | |
718 | EF10_DMA_STAT(tx_packets, TX_PKTS), | |
719 | EF10_DMA_STAT(tx_pause, TX_PAUSE_PKTS), | |
720 | EF10_DMA_STAT(tx_control, TX_CONTROL_PKTS), | |
721 | EF10_DMA_STAT(tx_unicast, TX_UNICAST_PKTS), | |
722 | EF10_DMA_STAT(tx_multicast, TX_MULTICAST_PKTS), | |
723 | EF10_DMA_STAT(tx_broadcast, TX_BROADCAST_PKTS), | |
724 | EF10_DMA_STAT(tx_lt64, TX_LT64_PKTS), | |
725 | EF10_DMA_STAT(tx_64, TX_64_PKTS), | |
726 | EF10_DMA_STAT(tx_65_to_127, TX_65_TO_127_PKTS), | |
727 | EF10_DMA_STAT(tx_128_to_255, TX_128_TO_255_PKTS), | |
728 | EF10_DMA_STAT(tx_256_to_511, TX_256_TO_511_PKTS), | |
729 | EF10_DMA_STAT(tx_512_to_1023, TX_512_TO_1023_PKTS), | |
730 | EF10_DMA_STAT(tx_1024_to_15xx, TX_1024_TO_15XX_PKTS), | |
731 | EF10_DMA_STAT(tx_15xx_to_jumbo, TX_15XX_TO_JUMBO_PKTS), | |
732 | EF10_DMA_STAT(rx_bytes, RX_BYTES), | |
733 | EF10_DMA_INVIS_STAT(rx_bytes_minus_good_bytes, RX_BAD_BYTES), | |
734 | EF10_OTHER_STAT(rx_good_bytes), | |
735 | EF10_OTHER_STAT(rx_bad_bytes), | |
736 | EF10_DMA_STAT(rx_packets, RX_PKTS), | |
737 | EF10_DMA_STAT(rx_good, RX_GOOD_PKTS), | |
738 | EF10_DMA_STAT(rx_bad, RX_BAD_FCS_PKTS), | |
739 | EF10_DMA_STAT(rx_pause, RX_PAUSE_PKTS), | |
740 | EF10_DMA_STAT(rx_control, RX_CONTROL_PKTS), | |
741 | EF10_DMA_STAT(rx_unicast, RX_UNICAST_PKTS), | |
742 | EF10_DMA_STAT(rx_multicast, RX_MULTICAST_PKTS), | |
743 | EF10_DMA_STAT(rx_broadcast, RX_BROADCAST_PKTS), | |
744 | EF10_DMA_STAT(rx_lt64, RX_UNDERSIZE_PKTS), | |
745 | EF10_DMA_STAT(rx_64, RX_64_PKTS), | |
746 | EF10_DMA_STAT(rx_65_to_127, RX_65_TO_127_PKTS), | |
747 | EF10_DMA_STAT(rx_128_to_255, RX_128_TO_255_PKTS), | |
748 | EF10_DMA_STAT(rx_256_to_511, RX_256_TO_511_PKTS), | |
749 | EF10_DMA_STAT(rx_512_to_1023, RX_512_TO_1023_PKTS), | |
750 | EF10_DMA_STAT(rx_1024_to_15xx, RX_1024_TO_15XX_PKTS), | |
751 | EF10_DMA_STAT(rx_15xx_to_jumbo, RX_15XX_TO_JUMBO_PKTS), | |
752 | EF10_DMA_STAT(rx_gtjumbo, RX_GTJUMBO_PKTS), | |
753 | EF10_DMA_STAT(rx_bad_gtjumbo, RX_JABBER_PKTS), | |
754 | EF10_DMA_STAT(rx_overflow, RX_OVERFLOW_PKTS), | |
755 | EF10_DMA_STAT(rx_align_error, RX_ALIGN_ERROR_PKTS), | |
756 | EF10_DMA_STAT(rx_length_error, RX_LENGTH_ERROR_PKTS), | |
757 | EF10_DMA_STAT(rx_nodesc_drops, RX_NODESC_DROPS), | |
568d7a00 EC |
758 | EF10_DMA_STAT(rx_pm_trunc_bb_overflow, PM_TRUNC_BB_OVERFLOW), |
759 | EF10_DMA_STAT(rx_pm_discard_bb_overflow, PM_DISCARD_BB_OVERFLOW), | |
760 | EF10_DMA_STAT(rx_pm_trunc_vfifo_full, PM_TRUNC_VFIFO_FULL), | |
761 | EF10_DMA_STAT(rx_pm_discard_vfifo_full, PM_DISCARD_VFIFO_FULL), | |
762 | EF10_DMA_STAT(rx_pm_trunc_qbb, PM_TRUNC_QBB), | |
763 | EF10_DMA_STAT(rx_pm_discard_qbb, PM_DISCARD_QBB), | |
764 | EF10_DMA_STAT(rx_pm_discard_mapping, PM_DISCARD_MAPPING), | |
765 | EF10_DMA_STAT(rx_dp_q_disabled_packets, RXDP_Q_DISABLED_PKTS), | |
766 | EF10_DMA_STAT(rx_dp_di_dropped_packets, RXDP_DI_DROPPED_PKTS), | |
767 | EF10_DMA_STAT(rx_dp_streaming_packets, RXDP_STREAMING_PKTS), | |
768 | EF10_DMA_STAT(rx_dp_emerg_fetch, RXDP_EMERGENCY_FETCH_CONDITIONS), | |
769 | EF10_DMA_STAT(rx_dp_emerg_wait, RXDP_EMERGENCY_WAIT_CONDITIONS), | |
8127d661 BH |
770 | }; |
771 | ||
772 | #define HUNT_COMMON_STAT_MASK ((1ULL << EF10_STAT_tx_bytes) | \ | |
773 | (1ULL << EF10_STAT_tx_packets) | \ | |
774 | (1ULL << EF10_STAT_tx_pause) | \ | |
775 | (1ULL << EF10_STAT_tx_unicast) | \ | |
776 | (1ULL << EF10_STAT_tx_multicast) | \ | |
777 | (1ULL << EF10_STAT_tx_broadcast) | \ | |
778 | (1ULL << EF10_STAT_rx_bytes) | \ | |
779 | (1ULL << EF10_STAT_rx_bytes_minus_good_bytes) | \ | |
780 | (1ULL << EF10_STAT_rx_good_bytes) | \ | |
781 | (1ULL << EF10_STAT_rx_bad_bytes) | \ | |
782 | (1ULL << EF10_STAT_rx_packets) | \ | |
783 | (1ULL << EF10_STAT_rx_good) | \ | |
784 | (1ULL << EF10_STAT_rx_bad) | \ | |
785 | (1ULL << EF10_STAT_rx_pause) | \ | |
786 | (1ULL << EF10_STAT_rx_control) | \ | |
787 | (1ULL << EF10_STAT_rx_unicast) | \ | |
788 | (1ULL << EF10_STAT_rx_multicast) | \ | |
789 | (1ULL << EF10_STAT_rx_broadcast) | \ | |
790 | (1ULL << EF10_STAT_rx_lt64) | \ | |
791 | (1ULL << EF10_STAT_rx_64) | \ | |
792 | (1ULL << EF10_STAT_rx_65_to_127) | \ | |
793 | (1ULL << EF10_STAT_rx_128_to_255) | \ | |
794 | (1ULL << EF10_STAT_rx_256_to_511) | \ | |
795 | (1ULL << EF10_STAT_rx_512_to_1023) | \ | |
796 | (1ULL << EF10_STAT_rx_1024_to_15xx) | \ | |
797 | (1ULL << EF10_STAT_rx_15xx_to_jumbo) | \ | |
798 | (1ULL << EF10_STAT_rx_gtjumbo) | \ | |
799 | (1ULL << EF10_STAT_rx_bad_gtjumbo) | \ | |
800 | (1ULL << EF10_STAT_rx_overflow) | \ | |
801 | (1ULL << EF10_STAT_rx_nodesc_drops)) | |
802 | ||
803 | /* These statistics are only provided by the 10G MAC. For a 10G/40G | |
804 | * switchable port we do not expose these because they might not | |
805 | * include all the packets they should. | |
806 | */ | |
807 | #define HUNT_10G_ONLY_STAT_MASK ((1ULL << EF10_STAT_tx_control) | \ | |
808 | (1ULL << EF10_STAT_tx_lt64) | \ | |
809 | (1ULL << EF10_STAT_tx_64) | \ | |
810 | (1ULL << EF10_STAT_tx_65_to_127) | \ | |
811 | (1ULL << EF10_STAT_tx_128_to_255) | \ | |
812 | (1ULL << EF10_STAT_tx_256_to_511) | \ | |
813 | (1ULL << EF10_STAT_tx_512_to_1023) | \ | |
814 | (1ULL << EF10_STAT_tx_1024_to_15xx) | \ | |
815 | (1ULL << EF10_STAT_tx_15xx_to_jumbo)) | |
816 | ||
817 | /* These statistics are only provided by the 40G MAC. For a 10G/40G | |
818 | * switchable port we do expose these because the errors will otherwise | |
819 | * be silent. | |
820 | */ | |
821 | #define HUNT_40G_EXTRA_STAT_MASK ((1ULL << EF10_STAT_rx_align_error) | \ | |
822 | (1ULL << EF10_STAT_rx_length_error)) | |
823 | ||
568d7a00 EC |
824 | /* These statistics are only provided if the firmware supports the |
825 | * capability PM_AND_RXDP_COUNTERS. | |
826 | */ | |
827 | #define HUNT_PM_AND_RXDP_STAT_MASK ( \ | |
828 | (1ULL << EF10_STAT_rx_pm_trunc_bb_overflow) | \ | |
829 | (1ULL << EF10_STAT_rx_pm_discard_bb_overflow) | \ | |
830 | (1ULL << EF10_STAT_rx_pm_trunc_vfifo_full) | \ | |
831 | (1ULL << EF10_STAT_rx_pm_discard_vfifo_full) | \ | |
832 | (1ULL << EF10_STAT_rx_pm_trunc_qbb) | \ | |
833 | (1ULL << EF10_STAT_rx_pm_discard_qbb) | \ | |
834 | (1ULL << EF10_STAT_rx_pm_discard_mapping) | \ | |
835 | (1ULL << EF10_STAT_rx_dp_q_disabled_packets) | \ | |
836 | (1ULL << EF10_STAT_rx_dp_di_dropped_packets) | \ | |
837 | (1ULL << EF10_STAT_rx_dp_streaming_packets) | \ | |
838 | (1ULL << EF10_STAT_rx_dp_emerg_fetch) | \ | |
839 | (1ULL << EF10_STAT_rx_dp_emerg_wait)) | |
840 | ||
4bae913b | 841 | static u64 efx_ef10_raw_stat_mask(struct efx_nic *efx) |
8127d661 | 842 | { |
4bae913b | 843 | u64 raw_mask = HUNT_COMMON_STAT_MASK; |
8127d661 | 844 | u32 port_caps = efx_mcdi_phy_get_caps(efx); |
568d7a00 | 845 | struct efx_ef10_nic_data *nic_data = efx->nic_data; |
8127d661 BH |
846 | |
847 | if (port_caps & (1 << MC_CMD_PHY_CAP_40000FDX_LBN)) | |
4bae913b | 848 | raw_mask |= HUNT_40G_EXTRA_STAT_MASK; |
8127d661 | 849 | else |
4bae913b | 850 | raw_mask |= HUNT_10G_ONLY_STAT_MASK; |
568d7a00 EC |
851 | |
852 | if (nic_data->datapath_caps & | |
853 | (1 << MC_CMD_GET_CAPABILITIES_OUT_PM_AND_RXDP_COUNTERS_LBN)) | |
854 | raw_mask |= HUNT_PM_AND_RXDP_STAT_MASK; | |
855 | ||
4bae913b EC |
856 | return raw_mask; |
857 | } | |
858 | ||
859 | static void efx_ef10_get_stat_mask(struct efx_nic *efx, unsigned long *mask) | |
860 | { | |
861 | u64 raw_mask = efx_ef10_raw_stat_mask(efx); | |
862 | ||
863 | #if BITS_PER_LONG == 64 | |
864 | mask[0] = raw_mask; | |
865 | #else | |
866 | mask[0] = raw_mask & 0xffffffff; | |
867 | mask[1] = raw_mask >> 32; | |
868 | #endif | |
8127d661 BH |
869 | } |
870 | ||
871 | static size_t efx_ef10_describe_stats(struct efx_nic *efx, u8 *names) | |
872 | { | |
4bae913b EC |
873 | DECLARE_BITMAP(mask, EF10_STAT_COUNT); |
874 | ||
875 | efx_ef10_get_stat_mask(efx, mask); | |
8127d661 | 876 | return efx_nic_describe_stats(efx_ef10_stat_desc, EF10_STAT_COUNT, |
4bae913b | 877 | mask, names); |
8127d661 BH |
878 | } |
879 | ||
880 | static int efx_ef10_try_update_nic_stats(struct efx_nic *efx) | |
881 | { | |
882 | struct efx_ef10_nic_data *nic_data = efx->nic_data; | |
4bae913b | 883 | DECLARE_BITMAP(mask, EF10_STAT_COUNT); |
8127d661 BH |
884 | __le64 generation_start, generation_end; |
885 | u64 *stats = nic_data->stats; | |
886 | __le64 *dma_stats; | |
887 | ||
4bae913b EC |
888 | efx_ef10_get_stat_mask(efx, mask); |
889 | ||
8127d661 BH |
890 | dma_stats = efx->stats_buffer.addr; |
891 | nic_data = efx->nic_data; | |
892 | ||
893 | generation_end = dma_stats[MC_CMD_MAC_GENERATION_END]; | |
894 | if (generation_end == EFX_MC_STATS_GENERATION_INVALID) | |
895 | return 0; | |
896 | rmb(); | |
4bae913b | 897 | efx_nic_update_stats(efx_ef10_stat_desc, EF10_STAT_COUNT, mask, |
8127d661 | 898 | stats, efx->stats_buffer.addr, false); |
d546a893 | 899 | rmb(); |
8127d661 BH |
900 | generation_start = dma_stats[MC_CMD_MAC_GENERATION_START]; |
901 | if (generation_end != generation_start) | |
902 | return -EAGAIN; | |
903 | ||
904 | /* Update derived statistics */ | |
f8f3b5ae | 905 | efx_nic_fix_nodesc_drop_stat(efx, &stats[EF10_STAT_rx_nodesc_drops]); |
8127d661 BH |
906 | stats[EF10_STAT_rx_good_bytes] = |
907 | stats[EF10_STAT_rx_bytes] - | |
908 | stats[EF10_STAT_rx_bytes_minus_good_bytes]; | |
909 | efx_update_diff_stat(&stats[EF10_STAT_rx_bad_bytes], | |
910 | stats[EF10_STAT_rx_bytes_minus_good_bytes]); | |
911 | ||
912 | return 0; | |
913 | } | |
914 | ||
915 | ||
916 | static size_t efx_ef10_update_stats(struct efx_nic *efx, u64 *full_stats, | |
917 | struct rtnl_link_stats64 *core_stats) | |
918 | { | |
4bae913b | 919 | DECLARE_BITMAP(mask, EF10_STAT_COUNT); |
8127d661 BH |
920 | struct efx_ef10_nic_data *nic_data = efx->nic_data; |
921 | u64 *stats = nic_data->stats; | |
922 | size_t stats_count = 0, index; | |
923 | int retry; | |
924 | ||
4bae913b EC |
925 | efx_ef10_get_stat_mask(efx, mask); |
926 | ||
8127d661 BH |
927 | /* If we're unlucky enough to read statistics during the DMA, wait |
928 | * up to 10ms for it to finish (typically takes <500us) | |
929 | */ | |
930 | for (retry = 0; retry < 100; ++retry) { | |
931 | if (efx_ef10_try_update_nic_stats(efx) == 0) | |
932 | break; | |
933 | udelay(100); | |
934 | } | |
935 | ||
936 | if (full_stats) { | |
937 | for_each_set_bit(index, mask, EF10_STAT_COUNT) { | |
938 | if (efx_ef10_stat_desc[index].name) { | |
939 | *full_stats++ = stats[index]; | |
940 | ++stats_count; | |
941 | } | |
942 | } | |
943 | } | |
944 | ||
945 | if (core_stats) { | |
946 | core_stats->rx_packets = stats[EF10_STAT_rx_packets]; | |
947 | core_stats->tx_packets = stats[EF10_STAT_tx_packets]; | |
948 | core_stats->rx_bytes = stats[EF10_STAT_rx_bytes]; | |
949 | core_stats->tx_bytes = stats[EF10_STAT_tx_bytes]; | |
950 | core_stats->rx_dropped = stats[EF10_STAT_rx_nodesc_drops]; | |
951 | core_stats->multicast = stats[EF10_STAT_rx_multicast]; | |
952 | core_stats->rx_length_errors = | |
953 | stats[EF10_STAT_rx_gtjumbo] + | |
954 | stats[EF10_STAT_rx_length_error]; | |
955 | core_stats->rx_crc_errors = stats[EF10_STAT_rx_bad]; | |
956 | core_stats->rx_frame_errors = stats[EF10_STAT_rx_align_error]; | |
957 | core_stats->rx_fifo_errors = stats[EF10_STAT_rx_overflow]; | |
958 | core_stats->rx_errors = (core_stats->rx_length_errors + | |
959 | core_stats->rx_crc_errors + | |
960 | core_stats->rx_frame_errors); | |
961 | } | |
962 | ||
963 | return stats_count; | |
964 | } | |
965 | ||
966 | static void efx_ef10_push_irq_moderation(struct efx_channel *channel) | |
967 | { | |
968 | struct efx_nic *efx = channel->efx; | |
969 | unsigned int mode, value; | |
970 | efx_dword_t timer_cmd; | |
971 | ||
972 | if (channel->irq_moderation) { | |
973 | mode = 3; | |
974 | value = channel->irq_moderation - 1; | |
975 | } else { | |
976 | mode = 0; | |
977 | value = 0; | |
978 | } | |
979 | ||
980 | if (EFX_EF10_WORKAROUND_35388(efx)) { | |
981 | EFX_POPULATE_DWORD_3(timer_cmd, ERF_DD_EVQ_IND_TIMER_FLAGS, | |
982 | EFE_DD_EVQ_IND_TIMER_FLAGS, | |
983 | ERF_DD_EVQ_IND_TIMER_MODE, mode, | |
984 | ERF_DD_EVQ_IND_TIMER_VAL, value); | |
985 | efx_writed_page(efx, &timer_cmd, ER_DD_EVQ_INDIRECT, | |
986 | channel->channel); | |
987 | } else { | |
988 | EFX_POPULATE_DWORD_2(timer_cmd, ERF_DZ_TC_TIMER_MODE, mode, | |
989 | ERF_DZ_TC_TIMER_VAL, value); | |
990 | efx_writed_page(efx, &timer_cmd, ER_DZ_EVQ_TMR, | |
991 | channel->channel); | |
992 | } | |
993 | } | |
994 | ||
995 | static void efx_ef10_get_wol(struct efx_nic *efx, struct ethtool_wolinfo *wol) | |
996 | { | |
997 | wol->supported = 0; | |
998 | wol->wolopts = 0; | |
999 | memset(&wol->sopass, 0, sizeof(wol->sopass)); | |
1000 | } | |
1001 | ||
1002 | static int efx_ef10_set_wol(struct efx_nic *efx, u32 type) | |
1003 | { | |
1004 | if (type != 0) | |
1005 | return -EINVAL; | |
1006 | return 0; | |
1007 | } | |
1008 | ||
1009 | static void efx_ef10_mcdi_request(struct efx_nic *efx, | |
1010 | const efx_dword_t *hdr, size_t hdr_len, | |
1011 | const efx_dword_t *sdu, size_t sdu_len) | |
1012 | { | |
1013 | struct efx_ef10_nic_data *nic_data = efx->nic_data; | |
1014 | u8 *pdu = nic_data->mcdi_buf.addr; | |
1015 | ||
1016 | memcpy(pdu, hdr, hdr_len); | |
1017 | memcpy(pdu + hdr_len, sdu, sdu_len); | |
1018 | wmb(); | |
1019 | ||
1020 | /* The hardware provides 'low' and 'high' (doorbell) registers | |
1021 | * for passing the 64-bit address of an MCDI request to | |
1022 | * firmware. However the dwords are swapped by firmware. The | |
1023 | * least significant bits of the doorbell are then 0 for all | |
1024 | * MCDI requests due to alignment. | |
1025 | */ | |
1026 | _efx_writed(efx, cpu_to_le32((u64)nic_data->mcdi_buf.dma_addr >> 32), | |
1027 | ER_DZ_MC_DB_LWRD); | |
1028 | _efx_writed(efx, cpu_to_le32((u32)nic_data->mcdi_buf.dma_addr), | |
1029 | ER_DZ_MC_DB_HWRD); | |
1030 | } | |
1031 | ||
1032 | static bool efx_ef10_mcdi_poll_response(struct efx_nic *efx) | |
1033 | { | |
1034 | struct efx_ef10_nic_data *nic_data = efx->nic_data; | |
1035 | const efx_dword_t hdr = *(const efx_dword_t *)nic_data->mcdi_buf.addr; | |
1036 | ||
1037 | rmb(); | |
1038 | return EFX_DWORD_FIELD(hdr, MCDI_HEADER_RESPONSE); | |
1039 | } | |
1040 | ||
1041 | static void | |
1042 | efx_ef10_mcdi_read_response(struct efx_nic *efx, efx_dword_t *outbuf, | |
1043 | size_t offset, size_t outlen) | |
1044 | { | |
1045 | struct efx_ef10_nic_data *nic_data = efx->nic_data; | |
1046 | const u8 *pdu = nic_data->mcdi_buf.addr; | |
1047 | ||
1048 | memcpy(outbuf, pdu + offset, outlen); | |
1049 | } | |
1050 | ||
1051 | static int efx_ef10_mcdi_poll_reboot(struct efx_nic *efx) | |
1052 | { | |
1053 | struct efx_ef10_nic_data *nic_data = efx->nic_data; | |
1054 | int rc; | |
1055 | ||
1056 | rc = efx_ef10_get_warm_boot_count(efx); | |
1057 | if (rc < 0) { | |
1058 | /* The firmware is presumably in the process of | |
1059 | * rebooting. However, we are supposed to report each | |
1060 | * reboot just once, so we must only do that once we | |
1061 | * can read and store the updated warm boot count. | |
1062 | */ | |
1063 | return 0; | |
1064 | } | |
1065 | ||
1066 | if (rc == nic_data->warm_boot_count) | |
1067 | return 0; | |
1068 | ||
1069 | nic_data->warm_boot_count = rc; | |
1070 | ||
1071 | /* All our allocations have been reset */ | |
1072 | nic_data->must_realloc_vis = true; | |
1073 | nic_data->must_restore_filters = true; | |
183233be | 1074 | nic_data->must_restore_piobufs = true; |
8127d661 BH |
1075 | nic_data->rx_rss_context = EFX_EF10_RSS_CONTEXT_INVALID; |
1076 | ||
a915ccc9 BH |
1077 | /* The datapath firmware might have been changed */ |
1078 | nic_data->must_check_datapath_caps = true; | |
1079 | ||
869070c5 BH |
1080 | /* MAC statistics have been cleared on the NIC; clear the local |
1081 | * statistic that we update with efx_update_diff_stat(). | |
1082 | */ | |
1083 | nic_data->stats[EF10_STAT_rx_bad_bytes] = 0; | |
1084 | ||
8127d661 BH |
1085 | return -EIO; |
1086 | } | |
1087 | ||
1088 | /* Handle an MSI interrupt | |
1089 | * | |
1090 | * Handle an MSI hardware interrupt. This routine schedules event | |
1091 | * queue processing. No interrupt acknowledgement cycle is necessary. | |
1092 | * Also, we never need to check that the interrupt is for us, since | |
1093 | * MSI interrupts cannot be shared. | |
1094 | */ | |
1095 | static irqreturn_t efx_ef10_msi_interrupt(int irq, void *dev_id) | |
1096 | { | |
1097 | struct efx_msi_context *context = dev_id; | |
1098 | struct efx_nic *efx = context->efx; | |
1099 | ||
1100 | netif_vdbg(efx, intr, efx->net_dev, | |
1101 | "IRQ %d on CPU %d\n", irq, raw_smp_processor_id()); | |
1102 | ||
1103 | if (likely(ACCESS_ONCE(efx->irq_soft_enabled))) { | |
1104 | /* Note test interrupts */ | |
1105 | if (context->index == efx->irq_level) | |
1106 | efx->last_irq_cpu = raw_smp_processor_id(); | |
1107 | ||
1108 | /* Schedule processing of the channel */ | |
1109 | efx_schedule_channel_irq(efx->channel[context->index]); | |
1110 | } | |
1111 | ||
1112 | return IRQ_HANDLED; | |
1113 | } | |
1114 | ||
1115 | static irqreturn_t efx_ef10_legacy_interrupt(int irq, void *dev_id) | |
1116 | { | |
1117 | struct efx_nic *efx = dev_id; | |
1118 | bool soft_enabled = ACCESS_ONCE(efx->irq_soft_enabled); | |
1119 | struct efx_channel *channel; | |
1120 | efx_dword_t reg; | |
1121 | u32 queues; | |
1122 | ||
1123 | /* Read the ISR which also ACKs the interrupts */ | |
1124 | efx_readd(efx, ®, ER_DZ_BIU_INT_ISR); | |
1125 | queues = EFX_DWORD_FIELD(reg, ERF_DZ_ISR_REG); | |
1126 | ||
1127 | if (queues == 0) | |
1128 | return IRQ_NONE; | |
1129 | ||
1130 | if (likely(soft_enabled)) { | |
1131 | /* Note test interrupts */ | |
1132 | if (queues & (1U << efx->irq_level)) | |
1133 | efx->last_irq_cpu = raw_smp_processor_id(); | |
1134 | ||
1135 | efx_for_each_channel(channel, efx) { | |
1136 | if (queues & 1) | |
1137 | efx_schedule_channel_irq(channel); | |
1138 | queues >>= 1; | |
1139 | } | |
1140 | } | |
1141 | ||
1142 | netif_vdbg(efx, intr, efx->net_dev, | |
1143 | "IRQ %d on CPU %d status " EFX_DWORD_FMT "\n", | |
1144 | irq, raw_smp_processor_id(), EFX_DWORD_VAL(reg)); | |
1145 | ||
1146 | return IRQ_HANDLED; | |
1147 | } | |
1148 | ||
1149 | static void efx_ef10_irq_test_generate(struct efx_nic *efx) | |
1150 | { | |
1151 | MCDI_DECLARE_BUF(inbuf, MC_CMD_TRIGGER_INTERRUPT_IN_LEN); | |
1152 | ||
1153 | BUILD_BUG_ON(MC_CMD_TRIGGER_INTERRUPT_OUT_LEN != 0); | |
1154 | ||
1155 | MCDI_SET_DWORD(inbuf, TRIGGER_INTERRUPT_IN_INTR_LEVEL, efx->irq_level); | |
1156 | (void) efx_mcdi_rpc(efx, MC_CMD_TRIGGER_INTERRUPT, | |
1157 | inbuf, sizeof(inbuf), NULL, 0, NULL); | |
1158 | } | |
1159 | ||
1160 | static int efx_ef10_tx_probe(struct efx_tx_queue *tx_queue) | |
1161 | { | |
1162 | return efx_nic_alloc_buffer(tx_queue->efx, &tx_queue->txd.buf, | |
1163 | (tx_queue->ptr_mask + 1) * | |
1164 | sizeof(efx_qword_t), | |
1165 | GFP_KERNEL); | |
1166 | } | |
1167 | ||
1168 | /* This writes to the TX_DESC_WPTR and also pushes data */ | |
1169 | static inline void efx_ef10_push_tx_desc(struct efx_tx_queue *tx_queue, | |
1170 | const efx_qword_t *txd) | |
1171 | { | |
1172 | unsigned int write_ptr; | |
1173 | efx_oword_t reg; | |
1174 | ||
1175 | write_ptr = tx_queue->write_count & tx_queue->ptr_mask; | |
1176 | EFX_POPULATE_OWORD_1(reg, ERF_DZ_TX_DESC_WPTR, write_ptr); | |
1177 | reg.qword[0] = *txd; | |
1178 | efx_writeo_page(tx_queue->efx, ®, | |
1179 | ER_DZ_TX_DESC_UPD, tx_queue->queue); | |
1180 | } | |
1181 | ||
1182 | static void efx_ef10_tx_init(struct efx_tx_queue *tx_queue) | |
1183 | { | |
1184 | MCDI_DECLARE_BUF(inbuf, MC_CMD_INIT_TXQ_IN_LEN(EFX_MAX_DMAQ_SIZE * 8 / | |
1185 | EFX_BUF_SIZE)); | |
1186 | MCDI_DECLARE_BUF(outbuf, MC_CMD_INIT_TXQ_OUT_LEN); | |
1187 | bool csum_offload = tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD; | |
1188 | size_t entries = tx_queue->txd.buf.len / EFX_BUF_SIZE; | |
1189 | struct efx_channel *channel = tx_queue->channel; | |
1190 | struct efx_nic *efx = tx_queue->efx; | |
1191 | size_t inlen, outlen; | |
1192 | dma_addr_t dma_addr; | |
1193 | efx_qword_t *txd; | |
1194 | int rc; | |
1195 | int i; | |
1196 | ||
1197 | MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_SIZE, tx_queue->ptr_mask + 1); | |
1198 | MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_TARGET_EVQ, channel->channel); | |
1199 | MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_LABEL, tx_queue->queue); | |
1200 | MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_INSTANCE, tx_queue->queue); | |
1201 | MCDI_POPULATE_DWORD_2(inbuf, INIT_TXQ_IN_FLAGS, | |
1202 | INIT_TXQ_IN_FLAG_IP_CSUM_DIS, !csum_offload, | |
1203 | INIT_TXQ_IN_FLAG_TCP_CSUM_DIS, !csum_offload); | |
1204 | MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_OWNER_ID, 0); | |
1205 | MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_PORT_ID, EVB_PORT_ID_ASSIGNED); | |
1206 | ||
1207 | dma_addr = tx_queue->txd.buf.dma_addr; | |
1208 | ||
1209 | netif_dbg(efx, hw, efx->net_dev, "pushing TXQ %d. %zu entries (%llx)\n", | |
1210 | tx_queue->queue, entries, (u64)dma_addr); | |
1211 | ||
1212 | for (i = 0; i < entries; ++i) { | |
1213 | MCDI_SET_ARRAY_QWORD(inbuf, INIT_TXQ_IN_DMA_ADDR, i, dma_addr); | |
1214 | dma_addr += EFX_BUF_SIZE; | |
1215 | } | |
1216 | ||
1217 | inlen = MC_CMD_INIT_TXQ_IN_LEN(entries); | |
1218 | ||
1219 | rc = efx_mcdi_rpc(efx, MC_CMD_INIT_TXQ, inbuf, inlen, | |
1220 | outbuf, sizeof(outbuf), &outlen); | |
1221 | if (rc) | |
1222 | goto fail; | |
1223 | ||
1224 | /* A previous user of this TX queue might have set us up the | |
1225 | * bomb by writing a descriptor to the TX push collector but | |
1226 | * not the doorbell. (Each collector belongs to a port, not a | |
1227 | * queue or function, so cannot easily be reset.) We must | |
1228 | * attempt to push a no-op descriptor in its place. | |
1229 | */ | |
1230 | tx_queue->buffer[0].flags = EFX_TX_BUF_OPTION; | |
1231 | tx_queue->insert_count = 1; | |
1232 | txd = efx_tx_desc(tx_queue, 0); | |
1233 | EFX_POPULATE_QWORD_4(*txd, | |
1234 | ESF_DZ_TX_DESC_IS_OPT, true, | |
1235 | ESF_DZ_TX_OPTION_TYPE, | |
1236 | ESE_DZ_TX_OPTION_DESC_CRC_CSUM, | |
1237 | ESF_DZ_TX_OPTION_UDP_TCP_CSUM, csum_offload, | |
1238 | ESF_DZ_TX_OPTION_IP_CSUM, csum_offload); | |
1239 | tx_queue->write_count = 1; | |
1240 | wmb(); | |
1241 | efx_ef10_push_tx_desc(tx_queue, txd); | |
1242 | ||
1243 | return; | |
1244 | ||
1245 | fail: | |
1246 | WARN_ON(true); | |
1247 | netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); | |
1248 | } | |
1249 | ||
1250 | static void efx_ef10_tx_fini(struct efx_tx_queue *tx_queue) | |
1251 | { | |
1252 | MCDI_DECLARE_BUF(inbuf, MC_CMD_FINI_TXQ_IN_LEN); | |
1253 | MCDI_DECLARE_BUF(outbuf, MC_CMD_FINI_TXQ_OUT_LEN); | |
1254 | struct efx_nic *efx = tx_queue->efx; | |
1255 | size_t outlen; | |
1256 | int rc; | |
1257 | ||
1258 | MCDI_SET_DWORD(inbuf, FINI_TXQ_IN_INSTANCE, | |
1259 | tx_queue->queue); | |
1260 | ||
1261 | rc = efx_mcdi_rpc(efx, MC_CMD_FINI_TXQ, inbuf, sizeof(inbuf), | |
1262 | outbuf, sizeof(outbuf), &outlen); | |
1263 | ||
1264 | if (rc && rc != -EALREADY) | |
1265 | goto fail; | |
1266 | ||
1267 | return; | |
1268 | ||
1269 | fail: | |
1270 | netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); | |
1271 | } | |
1272 | ||
1273 | static void efx_ef10_tx_remove(struct efx_tx_queue *tx_queue) | |
1274 | { | |
1275 | efx_nic_free_buffer(tx_queue->efx, &tx_queue->txd.buf); | |
1276 | } | |
1277 | ||
1278 | /* This writes to the TX_DESC_WPTR; write pointer for TX descriptor ring */ | |
1279 | static inline void efx_ef10_notify_tx_desc(struct efx_tx_queue *tx_queue) | |
1280 | { | |
1281 | unsigned int write_ptr; | |
1282 | efx_dword_t reg; | |
1283 | ||
1284 | write_ptr = tx_queue->write_count & tx_queue->ptr_mask; | |
1285 | EFX_POPULATE_DWORD_1(reg, ERF_DZ_TX_DESC_WPTR_DWORD, write_ptr); | |
1286 | efx_writed_page(tx_queue->efx, ®, | |
1287 | ER_DZ_TX_DESC_UPD_DWORD, tx_queue->queue); | |
1288 | } | |
1289 | ||
1290 | static void efx_ef10_tx_write(struct efx_tx_queue *tx_queue) | |
1291 | { | |
1292 | unsigned int old_write_count = tx_queue->write_count; | |
1293 | struct efx_tx_buffer *buffer; | |
1294 | unsigned int write_ptr; | |
1295 | efx_qword_t *txd; | |
1296 | ||
1297 | BUG_ON(tx_queue->write_count == tx_queue->insert_count); | |
1298 | ||
1299 | do { | |
1300 | write_ptr = tx_queue->write_count & tx_queue->ptr_mask; | |
1301 | buffer = &tx_queue->buffer[write_ptr]; | |
1302 | txd = efx_tx_desc(tx_queue, write_ptr); | |
1303 | ++tx_queue->write_count; | |
1304 | ||
1305 | /* Create TX descriptor ring entry */ | |
1306 | if (buffer->flags & EFX_TX_BUF_OPTION) { | |
1307 | *txd = buffer->option; | |
1308 | } else { | |
1309 | BUILD_BUG_ON(EFX_TX_BUF_CONT != 1); | |
1310 | EFX_POPULATE_QWORD_3( | |
1311 | *txd, | |
1312 | ESF_DZ_TX_KER_CONT, | |
1313 | buffer->flags & EFX_TX_BUF_CONT, | |
1314 | ESF_DZ_TX_KER_BYTE_CNT, buffer->len, | |
1315 | ESF_DZ_TX_KER_BUF_ADDR, buffer->dma_addr); | |
1316 | } | |
1317 | } while (tx_queue->write_count != tx_queue->insert_count); | |
1318 | ||
1319 | wmb(); /* Ensure descriptors are written before they are fetched */ | |
1320 | ||
1321 | if (efx_nic_may_push_tx_desc(tx_queue, old_write_count)) { | |
1322 | txd = efx_tx_desc(tx_queue, | |
1323 | old_write_count & tx_queue->ptr_mask); | |
1324 | efx_ef10_push_tx_desc(tx_queue, txd); | |
1325 | ++tx_queue->pushes; | |
1326 | } else { | |
1327 | efx_ef10_notify_tx_desc(tx_queue); | |
1328 | } | |
1329 | } | |
1330 | ||
1331 | static int efx_ef10_alloc_rss_context(struct efx_nic *efx, u32 *context) | |
1332 | { | |
1333 | MCDI_DECLARE_BUF(inbuf, MC_CMD_RSS_CONTEXT_ALLOC_IN_LEN); | |
1334 | MCDI_DECLARE_BUF(outbuf, MC_CMD_RSS_CONTEXT_ALLOC_OUT_LEN); | |
1335 | size_t outlen; | |
1336 | int rc; | |
1337 | ||
1338 | MCDI_SET_DWORD(inbuf, RSS_CONTEXT_ALLOC_IN_UPSTREAM_PORT_ID, | |
1339 | EVB_PORT_ID_ASSIGNED); | |
1340 | MCDI_SET_DWORD(inbuf, RSS_CONTEXT_ALLOC_IN_TYPE, | |
1341 | MC_CMD_RSS_CONTEXT_ALLOC_IN_TYPE_EXCLUSIVE); | |
1342 | MCDI_SET_DWORD(inbuf, RSS_CONTEXT_ALLOC_IN_NUM_QUEUES, | |
1343 | EFX_MAX_CHANNELS); | |
1344 | ||
1345 | rc = efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_ALLOC, inbuf, sizeof(inbuf), | |
1346 | outbuf, sizeof(outbuf), &outlen); | |
1347 | if (rc != 0) | |
1348 | return rc; | |
1349 | ||
1350 | if (outlen < MC_CMD_RSS_CONTEXT_ALLOC_OUT_LEN) | |
1351 | return -EIO; | |
1352 | ||
1353 | *context = MCDI_DWORD(outbuf, RSS_CONTEXT_ALLOC_OUT_RSS_CONTEXT_ID); | |
1354 | ||
1355 | return 0; | |
1356 | } | |
1357 | ||
1358 | static void efx_ef10_free_rss_context(struct efx_nic *efx, u32 context) | |
1359 | { | |
1360 | MCDI_DECLARE_BUF(inbuf, MC_CMD_RSS_CONTEXT_FREE_IN_LEN); | |
1361 | int rc; | |
1362 | ||
1363 | MCDI_SET_DWORD(inbuf, RSS_CONTEXT_FREE_IN_RSS_CONTEXT_ID, | |
1364 | context); | |
1365 | ||
1366 | rc = efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_FREE, inbuf, sizeof(inbuf), | |
1367 | NULL, 0, NULL); | |
1368 | WARN_ON(rc != 0); | |
1369 | } | |
1370 | ||
1371 | static int efx_ef10_populate_rss_table(struct efx_nic *efx, u32 context) | |
1372 | { | |
1373 | MCDI_DECLARE_BUF(tablebuf, MC_CMD_RSS_CONTEXT_SET_TABLE_IN_LEN); | |
1374 | MCDI_DECLARE_BUF(keybuf, MC_CMD_RSS_CONTEXT_SET_KEY_IN_LEN); | |
1375 | int i, rc; | |
1376 | ||
1377 | MCDI_SET_DWORD(tablebuf, RSS_CONTEXT_SET_TABLE_IN_RSS_CONTEXT_ID, | |
1378 | context); | |
1379 | BUILD_BUG_ON(ARRAY_SIZE(efx->rx_indir_table) != | |
1380 | MC_CMD_RSS_CONTEXT_SET_TABLE_IN_INDIRECTION_TABLE_LEN); | |
1381 | ||
1382 | for (i = 0; i < ARRAY_SIZE(efx->rx_indir_table); ++i) | |
1383 | MCDI_PTR(tablebuf, | |
1384 | RSS_CONTEXT_SET_TABLE_IN_INDIRECTION_TABLE)[i] = | |
1385 | (u8) efx->rx_indir_table[i]; | |
1386 | ||
1387 | rc = efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_SET_TABLE, tablebuf, | |
1388 | sizeof(tablebuf), NULL, 0, NULL); | |
1389 | if (rc != 0) | |
1390 | return rc; | |
1391 | ||
1392 | MCDI_SET_DWORD(keybuf, RSS_CONTEXT_SET_KEY_IN_RSS_CONTEXT_ID, | |
1393 | context); | |
1394 | BUILD_BUG_ON(ARRAY_SIZE(efx->rx_hash_key) != | |
1395 | MC_CMD_RSS_CONTEXT_SET_KEY_IN_TOEPLITZ_KEY_LEN); | |
1396 | for (i = 0; i < ARRAY_SIZE(efx->rx_hash_key); ++i) | |
1397 | MCDI_PTR(keybuf, RSS_CONTEXT_SET_KEY_IN_TOEPLITZ_KEY)[i] = | |
1398 | efx->rx_hash_key[i]; | |
1399 | ||
1400 | return efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_SET_KEY, keybuf, | |
1401 | sizeof(keybuf), NULL, 0, NULL); | |
1402 | } | |
1403 | ||
1404 | static void efx_ef10_rx_free_indir_table(struct efx_nic *efx) | |
1405 | { | |
1406 | struct efx_ef10_nic_data *nic_data = efx->nic_data; | |
1407 | ||
1408 | if (nic_data->rx_rss_context != EFX_EF10_RSS_CONTEXT_INVALID) | |
1409 | efx_ef10_free_rss_context(efx, nic_data->rx_rss_context); | |
1410 | nic_data->rx_rss_context = EFX_EF10_RSS_CONTEXT_INVALID; | |
1411 | } | |
1412 | ||
1413 | static void efx_ef10_rx_push_indir_table(struct efx_nic *efx) | |
1414 | { | |
1415 | struct efx_ef10_nic_data *nic_data = efx->nic_data; | |
1416 | int rc; | |
1417 | ||
1418 | netif_dbg(efx, drv, efx->net_dev, "pushing RX indirection table\n"); | |
1419 | ||
1420 | if (nic_data->rx_rss_context == EFX_EF10_RSS_CONTEXT_INVALID) { | |
1421 | rc = efx_ef10_alloc_rss_context(efx, &nic_data->rx_rss_context); | |
1422 | if (rc != 0) | |
1423 | goto fail; | |
1424 | } | |
1425 | ||
1426 | rc = efx_ef10_populate_rss_table(efx, nic_data->rx_rss_context); | |
1427 | if (rc != 0) | |
1428 | goto fail; | |
1429 | ||
1430 | return; | |
1431 | ||
1432 | fail: | |
1433 | netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); | |
1434 | } | |
1435 | ||
1436 | static int efx_ef10_rx_probe(struct efx_rx_queue *rx_queue) | |
1437 | { | |
1438 | return efx_nic_alloc_buffer(rx_queue->efx, &rx_queue->rxd.buf, | |
1439 | (rx_queue->ptr_mask + 1) * | |
1440 | sizeof(efx_qword_t), | |
1441 | GFP_KERNEL); | |
1442 | } | |
1443 | ||
1444 | static void efx_ef10_rx_init(struct efx_rx_queue *rx_queue) | |
1445 | { | |
1446 | MCDI_DECLARE_BUF(inbuf, | |
1447 | MC_CMD_INIT_RXQ_IN_LEN(EFX_MAX_DMAQ_SIZE * 8 / | |
1448 | EFX_BUF_SIZE)); | |
1449 | MCDI_DECLARE_BUF(outbuf, MC_CMD_INIT_RXQ_OUT_LEN); | |
1450 | struct efx_channel *channel = efx_rx_queue_channel(rx_queue); | |
1451 | size_t entries = rx_queue->rxd.buf.len / EFX_BUF_SIZE; | |
1452 | struct efx_nic *efx = rx_queue->efx; | |
1453 | size_t inlen, outlen; | |
1454 | dma_addr_t dma_addr; | |
1455 | int rc; | |
1456 | int i; | |
1457 | ||
1458 | rx_queue->scatter_n = 0; | |
1459 | rx_queue->scatter_len = 0; | |
1460 | ||
1461 | MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_SIZE, rx_queue->ptr_mask + 1); | |
1462 | MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_TARGET_EVQ, channel->channel); | |
1463 | MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_LABEL, efx_rx_queue_index(rx_queue)); | |
1464 | MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_INSTANCE, | |
1465 | efx_rx_queue_index(rx_queue)); | |
1466 | MCDI_POPULATE_DWORD_1(inbuf, INIT_RXQ_IN_FLAGS, | |
1467 | INIT_RXQ_IN_FLAG_PREFIX, 1); | |
1468 | MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_OWNER_ID, 0); | |
1469 | MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_PORT_ID, EVB_PORT_ID_ASSIGNED); | |
1470 | ||
1471 | dma_addr = rx_queue->rxd.buf.dma_addr; | |
1472 | ||
1473 | netif_dbg(efx, hw, efx->net_dev, "pushing RXQ %d. %zu entries (%llx)\n", | |
1474 | efx_rx_queue_index(rx_queue), entries, (u64)dma_addr); | |
1475 | ||
1476 | for (i = 0; i < entries; ++i) { | |
1477 | MCDI_SET_ARRAY_QWORD(inbuf, INIT_RXQ_IN_DMA_ADDR, i, dma_addr); | |
1478 | dma_addr += EFX_BUF_SIZE; | |
1479 | } | |
1480 | ||
1481 | inlen = MC_CMD_INIT_RXQ_IN_LEN(entries); | |
1482 | ||
1483 | rc = efx_mcdi_rpc(efx, MC_CMD_INIT_RXQ, inbuf, inlen, | |
1484 | outbuf, sizeof(outbuf), &outlen); | |
1485 | if (rc) | |
1486 | goto fail; | |
1487 | ||
1488 | return; | |
1489 | ||
1490 | fail: | |
1491 | WARN_ON(true); | |
1492 | netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); | |
1493 | } | |
1494 | ||
1495 | static void efx_ef10_rx_fini(struct efx_rx_queue *rx_queue) | |
1496 | { | |
1497 | MCDI_DECLARE_BUF(inbuf, MC_CMD_FINI_RXQ_IN_LEN); | |
1498 | MCDI_DECLARE_BUF(outbuf, MC_CMD_FINI_RXQ_OUT_LEN); | |
1499 | struct efx_nic *efx = rx_queue->efx; | |
1500 | size_t outlen; | |
1501 | int rc; | |
1502 | ||
1503 | MCDI_SET_DWORD(inbuf, FINI_RXQ_IN_INSTANCE, | |
1504 | efx_rx_queue_index(rx_queue)); | |
1505 | ||
1506 | rc = efx_mcdi_rpc(efx, MC_CMD_FINI_RXQ, inbuf, sizeof(inbuf), | |
1507 | outbuf, sizeof(outbuf), &outlen); | |
1508 | ||
1509 | if (rc && rc != -EALREADY) | |
1510 | goto fail; | |
1511 | ||
1512 | return; | |
1513 | ||
1514 | fail: | |
1515 | netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); | |
1516 | } | |
1517 | ||
1518 | static void efx_ef10_rx_remove(struct efx_rx_queue *rx_queue) | |
1519 | { | |
1520 | efx_nic_free_buffer(rx_queue->efx, &rx_queue->rxd.buf); | |
1521 | } | |
1522 | ||
1523 | /* This creates an entry in the RX descriptor queue */ | |
1524 | static inline void | |
1525 | efx_ef10_build_rx_desc(struct efx_rx_queue *rx_queue, unsigned int index) | |
1526 | { | |
1527 | struct efx_rx_buffer *rx_buf; | |
1528 | efx_qword_t *rxd; | |
1529 | ||
1530 | rxd = efx_rx_desc(rx_queue, index); | |
1531 | rx_buf = efx_rx_buffer(rx_queue, index); | |
1532 | EFX_POPULATE_QWORD_2(*rxd, | |
1533 | ESF_DZ_RX_KER_BYTE_CNT, rx_buf->len, | |
1534 | ESF_DZ_RX_KER_BUF_ADDR, rx_buf->dma_addr); | |
1535 | } | |
1536 | ||
1537 | static void efx_ef10_rx_write(struct efx_rx_queue *rx_queue) | |
1538 | { | |
1539 | struct efx_nic *efx = rx_queue->efx; | |
1540 | unsigned int write_count; | |
1541 | efx_dword_t reg; | |
1542 | ||
1543 | /* Firmware requires that RX_DESC_WPTR be a multiple of 8 */ | |
1544 | write_count = rx_queue->added_count & ~7; | |
1545 | if (rx_queue->notified_count == write_count) | |
1546 | return; | |
1547 | ||
1548 | do | |
1549 | efx_ef10_build_rx_desc( | |
1550 | rx_queue, | |
1551 | rx_queue->notified_count & rx_queue->ptr_mask); | |
1552 | while (++rx_queue->notified_count != write_count); | |
1553 | ||
1554 | wmb(); | |
1555 | EFX_POPULATE_DWORD_1(reg, ERF_DZ_RX_DESC_WPTR, | |
1556 | write_count & rx_queue->ptr_mask); | |
1557 | efx_writed_page(efx, ®, ER_DZ_RX_DESC_UPD, | |
1558 | efx_rx_queue_index(rx_queue)); | |
1559 | } | |
1560 | ||
1561 | static efx_mcdi_async_completer efx_ef10_rx_defer_refill_complete; | |
1562 | ||
1563 | static void efx_ef10_rx_defer_refill(struct efx_rx_queue *rx_queue) | |
1564 | { | |
1565 | struct efx_channel *channel = efx_rx_queue_channel(rx_queue); | |
1566 | MCDI_DECLARE_BUF(inbuf, MC_CMD_DRIVER_EVENT_IN_LEN); | |
1567 | efx_qword_t event; | |
1568 | ||
1569 | EFX_POPULATE_QWORD_2(event, | |
1570 | ESF_DZ_EV_CODE, EFX_EF10_DRVGEN_EV, | |
1571 | ESF_DZ_EV_DATA, EFX_EF10_REFILL); | |
1572 | ||
1573 | MCDI_SET_DWORD(inbuf, DRIVER_EVENT_IN_EVQ, channel->channel); | |
1574 | ||
1575 | /* MCDI_SET_QWORD is not appropriate here since EFX_POPULATE_* has | |
1576 | * already swapped the data to little-endian order. | |
1577 | */ | |
1578 | memcpy(MCDI_PTR(inbuf, DRIVER_EVENT_IN_DATA), &event.u64[0], | |
1579 | sizeof(efx_qword_t)); | |
1580 | ||
1581 | efx_mcdi_rpc_async(channel->efx, MC_CMD_DRIVER_EVENT, | |
1582 | inbuf, sizeof(inbuf), 0, | |
1583 | efx_ef10_rx_defer_refill_complete, 0); | |
1584 | } | |
1585 | ||
1586 | static void | |
1587 | efx_ef10_rx_defer_refill_complete(struct efx_nic *efx, unsigned long cookie, | |
1588 | int rc, efx_dword_t *outbuf, | |
1589 | size_t outlen_actual) | |
1590 | { | |
1591 | /* nothing to do */ | |
1592 | } | |
1593 | ||
1594 | static int efx_ef10_ev_probe(struct efx_channel *channel) | |
1595 | { | |
1596 | return efx_nic_alloc_buffer(channel->efx, &channel->eventq.buf, | |
1597 | (channel->eventq_mask + 1) * | |
1598 | sizeof(efx_qword_t), | |
1599 | GFP_KERNEL); | |
1600 | } | |
1601 | ||
1602 | static int efx_ef10_ev_init(struct efx_channel *channel) | |
1603 | { | |
1604 | MCDI_DECLARE_BUF(inbuf, | |
1605 | MC_CMD_INIT_EVQ_IN_LEN(EFX_MAX_EVQ_SIZE * 8 / | |
1606 | EFX_BUF_SIZE)); | |
1607 | MCDI_DECLARE_BUF(outbuf, MC_CMD_INIT_EVQ_OUT_LEN); | |
1608 | size_t entries = channel->eventq.buf.len / EFX_BUF_SIZE; | |
1609 | struct efx_nic *efx = channel->efx; | |
1610 | struct efx_ef10_nic_data *nic_data; | |
1611 | bool supports_rx_merge; | |
1612 | size_t inlen, outlen; | |
1613 | dma_addr_t dma_addr; | |
1614 | int rc; | |
1615 | int i; | |
1616 | ||
1617 | nic_data = efx->nic_data; | |
1618 | supports_rx_merge = | |
1619 | !!(nic_data->datapath_caps & | |
1620 | 1 << MC_CMD_GET_CAPABILITIES_OUT_RX_BATCHING_LBN); | |
1621 | ||
1622 | /* Fill event queue with all ones (i.e. empty events) */ | |
1623 | memset(channel->eventq.buf.addr, 0xff, channel->eventq.buf.len); | |
1624 | ||
1625 | MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_SIZE, channel->eventq_mask + 1); | |
1626 | MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_INSTANCE, channel->channel); | |
1627 | /* INIT_EVQ expects index in vector table, not absolute */ | |
1628 | MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_IRQ_NUM, channel->channel); | |
1629 | MCDI_POPULATE_DWORD_4(inbuf, INIT_EVQ_IN_FLAGS, | |
1630 | INIT_EVQ_IN_FLAG_INTERRUPTING, 1, | |
1631 | INIT_EVQ_IN_FLAG_RX_MERGE, 1, | |
1632 | INIT_EVQ_IN_FLAG_TX_MERGE, 1, | |
1633 | INIT_EVQ_IN_FLAG_CUT_THRU, !supports_rx_merge); | |
1634 | MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_TMR_MODE, | |
1635 | MC_CMD_INIT_EVQ_IN_TMR_MODE_DIS); | |
1636 | MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_TMR_LOAD, 0); | |
1637 | MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_TMR_RELOAD, 0); | |
1638 | MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_COUNT_MODE, | |
1639 | MC_CMD_INIT_EVQ_IN_COUNT_MODE_DIS); | |
1640 | MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_COUNT_THRSHLD, 0); | |
1641 | ||
1642 | dma_addr = channel->eventq.buf.dma_addr; | |
1643 | for (i = 0; i < entries; ++i) { | |
1644 | MCDI_SET_ARRAY_QWORD(inbuf, INIT_EVQ_IN_DMA_ADDR, i, dma_addr); | |
1645 | dma_addr += EFX_BUF_SIZE; | |
1646 | } | |
1647 | ||
1648 | inlen = MC_CMD_INIT_EVQ_IN_LEN(entries); | |
1649 | ||
1650 | rc = efx_mcdi_rpc(efx, MC_CMD_INIT_EVQ, inbuf, inlen, | |
1651 | outbuf, sizeof(outbuf), &outlen); | |
1652 | if (rc) | |
1653 | goto fail; | |
1654 | ||
1655 | /* IRQ return is ignored */ | |
1656 | ||
1657 | return 0; | |
1658 | ||
1659 | fail: | |
1660 | netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); | |
1661 | return rc; | |
1662 | } | |
1663 | ||
1664 | static void efx_ef10_ev_fini(struct efx_channel *channel) | |
1665 | { | |
1666 | MCDI_DECLARE_BUF(inbuf, MC_CMD_FINI_EVQ_IN_LEN); | |
1667 | MCDI_DECLARE_BUF(outbuf, MC_CMD_FINI_EVQ_OUT_LEN); | |
1668 | struct efx_nic *efx = channel->efx; | |
1669 | size_t outlen; | |
1670 | int rc; | |
1671 | ||
1672 | MCDI_SET_DWORD(inbuf, FINI_EVQ_IN_INSTANCE, channel->channel); | |
1673 | ||
1674 | rc = efx_mcdi_rpc(efx, MC_CMD_FINI_EVQ, inbuf, sizeof(inbuf), | |
1675 | outbuf, sizeof(outbuf), &outlen); | |
1676 | ||
1677 | if (rc && rc != -EALREADY) | |
1678 | goto fail; | |
1679 | ||
1680 | return; | |
1681 | ||
1682 | fail: | |
1683 | netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); | |
1684 | } | |
1685 | ||
1686 | static void efx_ef10_ev_remove(struct efx_channel *channel) | |
1687 | { | |
1688 | efx_nic_free_buffer(channel->efx, &channel->eventq.buf); | |
1689 | } | |
1690 | ||
1691 | static void efx_ef10_handle_rx_wrong_queue(struct efx_rx_queue *rx_queue, | |
1692 | unsigned int rx_queue_label) | |
1693 | { | |
1694 | struct efx_nic *efx = rx_queue->efx; | |
1695 | ||
1696 | netif_info(efx, hw, efx->net_dev, | |
1697 | "rx event arrived on queue %d labeled as queue %u\n", | |
1698 | efx_rx_queue_index(rx_queue), rx_queue_label); | |
1699 | ||
1700 | efx_schedule_reset(efx, RESET_TYPE_DISABLE); | |
1701 | } | |
1702 | ||
1703 | static void | |
1704 | efx_ef10_handle_rx_bad_lbits(struct efx_rx_queue *rx_queue, | |
1705 | unsigned int actual, unsigned int expected) | |
1706 | { | |
1707 | unsigned int dropped = (actual - expected) & rx_queue->ptr_mask; | |
1708 | struct efx_nic *efx = rx_queue->efx; | |
1709 | ||
1710 | netif_info(efx, hw, efx->net_dev, | |
1711 | "dropped %d events (index=%d expected=%d)\n", | |
1712 | dropped, actual, expected); | |
1713 | ||
1714 | efx_schedule_reset(efx, RESET_TYPE_DISABLE); | |
1715 | } | |
1716 | ||
1717 | /* partially received RX was aborted. clean up. */ | |
1718 | static void efx_ef10_handle_rx_abort(struct efx_rx_queue *rx_queue) | |
1719 | { | |
1720 | unsigned int rx_desc_ptr; | |
1721 | ||
1722 | WARN_ON(rx_queue->scatter_n == 0); | |
1723 | ||
1724 | netif_dbg(rx_queue->efx, hw, rx_queue->efx->net_dev, | |
1725 | "scattered RX aborted (dropping %u buffers)\n", | |
1726 | rx_queue->scatter_n); | |
1727 | ||
1728 | rx_desc_ptr = rx_queue->removed_count & rx_queue->ptr_mask; | |
1729 | ||
1730 | efx_rx_packet(rx_queue, rx_desc_ptr, rx_queue->scatter_n, | |
1731 | 0, EFX_RX_PKT_DISCARD); | |
1732 | ||
1733 | rx_queue->removed_count += rx_queue->scatter_n; | |
1734 | rx_queue->scatter_n = 0; | |
1735 | rx_queue->scatter_len = 0; | |
1736 | ++efx_rx_queue_channel(rx_queue)->n_rx_nodesc_trunc; | |
1737 | } | |
1738 | ||
1739 | static int efx_ef10_handle_rx_event(struct efx_channel *channel, | |
1740 | const efx_qword_t *event) | |
1741 | { | |
1742 | unsigned int rx_bytes, next_ptr_lbits, rx_queue_label, rx_l4_class; | |
1743 | unsigned int n_descs, n_packets, i; | |
1744 | struct efx_nic *efx = channel->efx; | |
1745 | struct efx_rx_queue *rx_queue; | |
1746 | bool rx_cont; | |
1747 | u16 flags = 0; | |
1748 | ||
1749 | if (unlikely(ACCESS_ONCE(efx->reset_pending))) | |
1750 | return 0; | |
1751 | ||
1752 | /* Basic packet information */ | |
1753 | rx_bytes = EFX_QWORD_FIELD(*event, ESF_DZ_RX_BYTES); | |
1754 | next_ptr_lbits = EFX_QWORD_FIELD(*event, ESF_DZ_RX_DSC_PTR_LBITS); | |
1755 | rx_queue_label = EFX_QWORD_FIELD(*event, ESF_DZ_RX_QLABEL); | |
1756 | rx_l4_class = EFX_QWORD_FIELD(*event, ESF_DZ_RX_L4_CLASS); | |
1757 | rx_cont = EFX_QWORD_FIELD(*event, ESF_DZ_RX_CONT); | |
1758 | ||
1759 | WARN_ON(EFX_QWORD_FIELD(*event, ESF_DZ_RX_DROP_EVENT)); | |
1760 | ||
1761 | rx_queue = efx_channel_get_rx_queue(channel); | |
1762 | ||
1763 | if (unlikely(rx_queue_label != efx_rx_queue_index(rx_queue))) | |
1764 | efx_ef10_handle_rx_wrong_queue(rx_queue, rx_queue_label); | |
1765 | ||
1766 | n_descs = ((next_ptr_lbits - rx_queue->removed_count) & | |
1767 | ((1 << ESF_DZ_RX_DSC_PTR_LBITS_WIDTH) - 1)); | |
1768 | ||
1769 | if (n_descs != rx_queue->scatter_n + 1) { | |
92a04168 BH |
1770 | struct efx_ef10_nic_data *nic_data = efx->nic_data; |
1771 | ||
8127d661 BH |
1772 | /* detect rx abort */ |
1773 | if (unlikely(n_descs == rx_queue->scatter_n)) { | |
1774 | WARN_ON(rx_bytes != 0); | |
1775 | efx_ef10_handle_rx_abort(rx_queue); | |
1776 | return 0; | |
1777 | } | |
1778 | ||
92a04168 BH |
1779 | /* Check that RX completion merging is valid, i.e. |
1780 | * the current firmware supports it and this is a | |
1781 | * non-scattered packet. | |
1782 | */ | |
1783 | if (!(nic_data->datapath_caps & | |
1784 | (1 << MC_CMD_GET_CAPABILITIES_OUT_RX_BATCHING_LBN)) || | |
1785 | rx_queue->scatter_n != 0 || rx_cont) { | |
8127d661 BH |
1786 | efx_ef10_handle_rx_bad_lbits( |
1787 | rx_queue, next_ptr_lbits, | |
1788 | (rx_queue->removed_count + | |
1789 | rx_queue->scatter_n + 1) & | |
1790 | ((1 << ESF_DZ_RX_DSC_PTR_LBITS_WIDTH) - 1)); | |
1791 | return 0; | |
1792 | } | |
1793 | ||
1794 | /* Merged completion for multiple non-scattered packets */ | |
1795 | rx_queue->scatter_n = 1; | |
1796 | rx_queue->scatter_len = 0; | |
1797 | n_packets = n_descs; | |
1798 | ++channel->n_rx_merge_events; | |
1799 | channel->n_rx_merge_packets += n_packets; | |
1800 | flags |= EFX_RX_PKT_PREFIX_LEN; | |
1801 | } else { | |
1802 | ++rx_queue->scatter_n; | |
1803 | rx_queue->scatter_len += rx_bytes; | |
1804 | if (rx_cont) | |
1805 | return 0; | |
1806 | n_packets = 1; | |
1807 | } | |
1808 | ||
1809 | if (unlikely(EFX_QWORD_FIELD(*event, ESF_DZ_RX_ECRC_ERR))) | |
1810 | flags |= EFX_RX_PKT_DISCARD; | |
1811 | ||
1812 | if (unlikely(EFX_QWORD_FIELD(*event, ESF_DZ_RX_IPCKSUM_ERR))) { | |
1813 | channel->n_rx_ip_hdr_chksum_err += n_packets; | |
1814 | } else if (unlikely(EFX_QWORD_FIELD(*event, | |
1815 | ESF_DZ_RX_TCPUDP_CKSUM_ERR))) { | |
1816 | channel->n_rx_tcp_udp_chksum_err += n_packets; | |
1817 | } else if (rx_l4_class == ESE_DZ_L4_CLASS_TCP || | |
1818 | rx_l4_class == ESE_DZ_L4_CLASS_UDP) { | |
1819 | flags |= EFX_RX_PKT_CSUMMED; | |
1820 | } | |
1821 | ||
1822 | if (rx_l4_class == ESE_DZ_L4_CLASS_TCP) | |
1823 | flags |= EFX_RX_PKT_TCP; | |
1824 | ||
1825 | channel->irq_mod_score += 2 * n_packets; | |
1826 | ||
1827 | /* Handle received packet(s) */ | |
1828 | for (i = 0; i < n_packets; i++) { | |
1829 | efx_rx_packet(rx_queue, | |
1830 | rx_queue->removed_count & rx_queue->ptr_mask, | |
1831 | rx_queue->scatter_n, rx_queue->scatter_len, | |
1832 | flags); | |
1833 | rx_queue->removed_count += rx_queue->scatter_n; | |
1834 | } | |
1835 | ||
1836 | rx_queue->scatter_n = 0; | |
1837 | rx_queue->scatter_len = 0; | |
1838 | ||
1839 | return n_packets; | |
1840 | } | |
1841 | ||
1842 | static int | |
1843 | efx_ef10_handle_tx_event(struct efx_channel *channel, efx_qword_t *event) | |
1844 | { | |
1845 | struct efx_nic *efx = channel->efx; | |
1846 | struct efx_tx_queue *tx_queue; | |
1847 | unsigned int tx_ev_desc_ptr; | |
1848 | unsigned int tx_ev_q_label; | |
1849 | int tx_descs = 0; | |
1850 | ||
1851 | if (unlikely(ACCESS_ONCE(efx->reset_pending))) | |
1852 | return 0; | |
1853 | ||
1854 | if (unlikely(EFX_QWORD_FIELD(*event, ESF_DZ_TX_DROP_EVENT))) | |
1855 | return 0; | |
1856 | ||
1857 | /* Transmit completion */ | |
1858 | tx_ev_desc_ptr = EFX_QWORD_FIELD(*event, ESF_DZ_TX_DESCR_INDX); | |
1859 | tx_ev_q_label = EFX_QWORD_FIELD(*event, ESF_DZ_TX_QLABEL); | |
1860 | tx_queue = efx_channel_get_tx_queue(channel, | |
1861 | tx_ev_q_label % EFX_TXQ_TYPES); | |
1862 | tx_descs = ((tx_ev_desc_ptr + 1 - tx_queue->read_count) & | |
1863 | tx_queue->ptr_mask); | |
1864 | efx_xmit_done(tx_queue, tx_ev_desc_ptr & tx_queue->ptr_mask); | |
1865 | ||
1866 | return tx_descs; | |
1867 | } | |
1868 | ||
1869 | static void | |
1870 | efx_ef10_handle_driver_event(struct efx_channel *channel, efx_qword_t *event) | |
1871 | { | |
1872 | struct efx_nic *efx = channel->efx; | |
1873 | int subcode; | |
1874 | ||
1875 | subcode = EFX_QWORD_FIELD(*event, ESF_DZ_DRV_SUB_CODE); | |
1876 | ||
1877 | switch (subcode) { | |
1878 | case ESE_DZ_DRV_TIMER_EV: | |
1879 | case ESE_DZ_DRV_WAKE_UP_EV: | |
1880 | break; | |
1881 | case ESE_DZ_DRV_START_UP_EV: | |
1882 | /* event queue init complete. ok. */ | |
1883 | break; | |
1884 | default: | |
1885 | netif_err(efx, hw, efx->net_dev, | |
1886 | "channel %d unknown driver event type %d" | |
1887 | " (data " EFX_QWORD_FMT ")\n", | |
1888 | channel->channel, subcode, | |
1889 | EFX_QWORD_VAL(*event)); | |
1890 | ||
1891 | } | |
1892 | } | |
1893 | ||
1894 | static void efx_ef10_handle_driver_generated_event(struct efx_channel *channel, | |
1895 | efx_qword_t *event) | |
1896 | { | |
1897 | struct efx_nic *efx = channel->efx; | |
1898 | u32 subcode; | |
1899 | ||
1900 | subcode = EFX_QWORD_FIELD(*event, EFX_DWORD_0); | |
1901 | ||
1902 | switch (subcode) { | |
1903 | case EFX_EF10_TEST: | |
1904 | channel->event_test_cpu = raw_smp_processor_id(); | |
1905 | break; | |
1906 | case EFX_EF10_REFILL: | |
1907 | /* The queue must be empty, so we won't receive any rx | |
1908 | * events, so efx_process_channel() won't refill the | |
1909 | * queue. Refill it here | |
1910 | */ | |
cce28794 | 1911 | efx_fast_push_rx_descriptors(&channel->rx_queue, true); |
8127d661 BH |
1912 | break; |
1913 | default: | |
1914 | netif_err(efx, hw, efx->net_dev, | |
1915 | "channel %d unknown driver event type %u" | |
1916 | " (data " EFX_QWORD_FMT ")\n", | |
1917 | channel->channel, (unsigned) subcode, | |
1918 | EFX_QWORD_VAL(*event)); | |
1919 | } | |
1920 | } | |
1921 | ||
1922 | static int efx_ef10_ev_process(struct efx_channel *channel, int quota) | |
1923 | { | |
1924 | struct efx_nic *efx = channel->efx; | |
1925 | efx_qword_t event, *p_event; | |
1926 | unsigned int read_ptr; | |
1927 | int ev_code; | |
1928 | int tx_descs = 0; | |
1929 | int spent = 0; | |
1930 | ||
1931 | read_ptr = channel->eventq_read_ptr; | |
1932 | ||
1933 | for (;;) { | |
1934 | p_event = efx_event(channel, read_ptr); | |
1935 | event = *p_event; | |
1936 | ||
1937 | if (!efx_event_present(&event)) | |
1938 | break; | |
1939 | ||
1940 | EFX_SET_QWORD(*p_event); | |
1941 | ||
1942 | ++read_ptr; | |
1943 | ||
1944 | ev_code = EFX_QWORD_FIELD(event, ESF_DZ_EV_CODE); | |
1945 | ||
1946 | netif_vdbg(efx, drv, efx->net_dev, | |
1947 | "processing event on %d " EFX_QWORD_FMT "\n", | |
1948 | channel->channel, EFX_QWORD_VAL(event)); | |
1949 | ||
1950 | switch (ev_code) { | |
1951 | case ESE_DZ_EV_CODE_MCDI_EV: | |
1952 | efx_mcdi_process_event(channel, &event); | |
1953 | break; | |
1954 | case ESE_DZ_EV_CODE_RX_EV: | |
1955 | spent += efx_ef10_handle_rx_event(channel, &event); | |
1956 | if (spent >= quota) { | |
1957 | /* XXX can we split a merged event to | |
1958 | * avoid going over-quota? | |
1959 | */ | |
1960 | spent = quota; | |
1961 | goto out; | |
1962 | } | |
1963 | break; | |
1964 | case ESE_DZ_EV_CODE_TX_EV: | |
1965 | tx_descs += efx_ef10_handle_tx_event(channel, &event); | |
1966 | if (tx_descs > efx->txq_entries) { | |
1967 | spent = quota; | |
1968 | goto out; | |
1969 | } else if (++spent == quota) { | |
1970 | goto out; | |
1971 | } | |
1972 | break; | |
1973 | case ESE_DZ_EV_CODE_DRIVER_EV: | |
1974 | efx_ef10_handle_driver_event(channel, &event); | |
1975 | if (++spent == quota) | |
1976 | goto out; | |
1977 | break; | |
1978 | case EFX_EF10_DRVGEN_EV: | |
1979 | efx_ef10_handle_driver_generated_event(channel, &event); | |
1980 | break; | |
1981 | default: | |
1982 | netif_err(efx, hw, efx->net_dev, | |
1983 | "channel %d unknown event type %d" | |
1984 | " (data " EFX_QWORD_FMT ")\n", | |
1985 | channel->channel, ev_code, | |
1986 | EFX_QWORD_VAL(event)); | |
1987 | } | |
1988 | } | |
1989 | ||
1990 | out: | |
1991 | channel->eventq_read_ptr = read_ptr; | |
1992 | return spent; | |
1993 | } | |
1994 | ||
1995 | static void efx_ef10_ev_read_ack(struct efx_channel *channel) | |
1996 | { | |
1997 | struct efx_nic *efx = channel->efx; | |
1998 | efx_dword_t rptr; | |
1999 | ||
2000 | if (EFX_EF10_WORKAROUND_35388(efx)) { | |
2001 | BUILD_BUG_ON(EFX_MIN_EVQ_SIZE < | |
2002 | (1 << ERF_DD_EVQ_IND_RPTR_WIDTH)); | |
2003 | BUILD_BUG_ON(EFX_MAX_EVQ_SIZE > | |
2004 | (1 << 2 * ERF_DD_EVQ_IND_RPTR_WIDTH)); | |
2005 | ||
2006 | EFX_POPULATE_DWORD_2(rptr, ERF_DD_EVQ_IND_RPTR_FLAGS, | |
2007 | EFE_DD_EVQ_IND_RPTR_FLAGS_HIGH, | |
2008 | ERF_DD_EVQ_IND_RPTR, | |
2009 | (channel->eventq_read_ptr & | |
2010 | channel->eventq_mask) >> | |
2011 | ERF_DD_EVQ_IND_RPTR_WIDTH); | |
2012 | efx_writed_page(efx, &rptr, ER_DD_EVQ_INDIRECT, | |
2013 | channel->channel); | |
2014 | EFX_POPULATE_DWORD_2(rptr, ERF_DD_EVQ_IND_RPTR_FLAGS, | |
2015 | EFE_DD_EVQ_IND_RPTR_FLAGS_LOW, | |
2016 | ERF_DD_EVQ_IND_RPTR, | |
2017 | channel->eventq_read_ptr & | |
2018 | ((1 << ERF_DD_EVQ_IND_RPTR_WIDTH) - 1)); | |
2019 | efx_writed_page(efx, &rptr, ER_DD_EVQ_INDIRECT, | |
2020 | channel->channel); | |
2021 | } else { | |
2022 | EFX_POPULATE_DWORD_1(rptr, ERF_DZ_EVQ_RPTR, | |
2023 | channel->eventq_read_ptr & | |
2024 | channel->eventq_mask); | |
2025 | efx_writed_page(efx, &rptr, ER_DZ_EVQ_RPTR, channel->channel); | |
2026 | } | |
2027 | } | |
2028 | ||
2029 | static void efx_ef10_ev_test_generate(struct efx_channel *channel) | |
2030 | { | |
2031 | MCDI_DECLARE_BUF(inbuf, MC_CMD_DRIVER_EVENT_IN_LEN); | |
2032 | struct efx_nic *efx = channel->efx; | |
2033 | efx_qword_t event; | |
2034 | int rc; | |
2035 | ||
2036 | EFX_POPULATE_QWORD_2(event, | |
2037 | ESF_DZ_EV_CODE, EFX_EF10_DRVGEN_EV, | |
2038 | ESF_DZ_EV_DATA, EFX_EF10_TEST); | |
2039 | ||
2040 | MCDI_SET_DWORD(inbuf, DRIVER_EVENT_IN_EVQ, channel->channel); | |
2041 | ||
2042 | /* MCDI_SET_QWORD is not appropriate here since EFX_POPULATE_* has | |
2043 | * already swapped the data to little-endian order. | |
2044 | */ | |
2045 | memcpy(MCDI_PTR(inbuf, DRIVER_EVENT_IN_DATA), &event.u64[0], | |
2046 | sizeof(efx_qword_t)); | |
2047 | ||
2048 | rc = efx_mcdi_rpc(efx, MC_CMD_DRIVER_EVENT, inbuf, sizeof(inbuf), | |
2049 | NULL, 0, NULL); | |
2050 | if (rc != 0) | |
2051 | goto fail; | |
2052 | ||
2053 | return; | |
2054 | ||
2055 | fail: | |
2056 | WARN_ON(true); | |
2057 | netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); | |
2058 | } | |
2059 | ||
2060 | void efx_ef10_handle_drain_event(struct efx_nic *efx) | |
2061 | { | |
2062 | if (atomic_dec_and_test(&efx->active_queues)) | |
2063 | wake_up(&efx->flush_wq); | |
2064 | ||
2065 | WARN_ON(atomic_read(&efx->active_queues) < 0); | |
2066 | } | |
2067 | ||
2068 | static int efx_ef10_fini_dmaq(struct efx_nic *efx) | |
2069 | { | |
2070 | struct efx_ef10_nic_data *nic_data = efx->nic_data; | |
2071 | struct efx_channel *channel; | |
2072 | struct efx_tx_queue *tx_queue; | |
2073 | struct efx_rx_queue *rx_queue; | |
2074 | int pending; | |
2075 | ||
2076 | /* If the MC has just rebooted, the TX/RX queues will have already been | |
2077 | * torn down, but efx->active_queues needs to be set to zero. | |
2078 | */ | |
2079 | if (nic_data->must_realloc_vis) { | |
2080 | atomic_set(&efx->active_queues, 0); | |
2081 | return 0; | |
2082 | } | |
2083 | ||
2084 | /* Do not attempt to write to the NIC during EEH recovery */ | |
2085 | if (efx->state != STATE_RECOVERY) { | |
2086 | efx_for_each_channel(channel, efx) { | |
2087 | efx_for_each_channel_rx_queue(rx_queue, channel) | |
2088 | efx_ef10_rx_fini(rx_queue); | |
2089 | efx_for_each_channel_tx_queue(tx_queue, channel) | |
2090 | efx_ef10_tx_fini(tx_queue); | |
2091 | } | |
2092 | ||
2093 | wait_event_timeout(efx->flush_wq, | |
2094 | atomic_read(&efx->active_queues) == 0, | |
2095 | msecs_to_jiffies(EFX_MAX_FLUSH_TIME)); | |
2096 | pending = atomic_read(&efx->active_queues); | |
2097 | if (pending) { | |
2098 | netif_err(efx, hw, efx->net_dev, "failed to flush %d queues\n", | |
2099 | pending); | |
2100 | return -ETIMEDOUT; | |
2101 | } | |
2102 | } | |
2103 | ||
2104 | return 0; | |
2105 | } | |
2106 | ||
2107 | static bool efx_ef10_filter_equal(const struct efx_filter_spec *left, | |
2108 | const struct efx_filter_spec *right) | |
2109 | { | |
2110 | if ((left->match_flags ^ right->match_flags) | | |
2111 | ((left->flags ^ right->flags) & | |
2112 | (EFX_FILTER_FLAG_RX | EFX_FILTER_FLAG_TX))) | |
2113 | return false; | |
2114 | ||
2115 | return memcmp(&left->outer_vid, &right->outer_vid, | |
2116 | sizeof(struct efx_filter_spec) - | |
2117 | offsetof(struct efx_filter_spec, outer_vid)) == 0; | |
2118 | } | |
2119 | ||
2120 | static unsigned int efx_ef10_filter_hash(const struct efx_filter_spec *spec) | |
2121 | { | |
2122 | BUILD_BUG_ON(offsetof(struct efx_filter_spec, outer_vid) & 3); | |
2123 | return jhash2((const u32 *)&spec->outer_vid, | |
2124 | (sizeof(struct efx_filter_spec) - | |
2125 | offsetof(struct efx_filter_spec, outer_vid)) / 4, | |
2126 | 0); | |
2127 | /* XXX should we randomise the initval? */ | |
2128 | } | |
2129 | ||
2130 | /* Decide whether a filter should be exclusive or else should allow | |
2131 | * delivery to additional recipients. Currently we decide that | |
2132 | * filters for specific local unicast MAC and IP addresses are | |
2133 | * exclusive. | |
2134 | */ | |
2135 | static bool efx_ef10_filter_is_exclusive(const struct efx_filter_spec *spec) | |
2136 | { | |
2137 | if (spec->match_flags & EFX_FILTER_MATCH_LOC_MAC && | |
2138 | !is_multicast_ether_addr(spec->loc_mac)) | |
2139 | return true; | |
2140 | ||
2141 | if ((spec->match_flags & | |
2142 | (EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_LOC_HOST)) == | |
2143 | (EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_LOC_HOST)) { | |
2144 | if (spec->ether_type == htons(ETH_P_IP) && | |
2145 | !ipv4_is_multicast(spec->loc_host[0])) | |
2146 | return true; | |
2147 | if (spec->ether_type == htons(ETH_P_IPV6) && | |
2148 | ((const u8 *)spec->loc_host)[0] != 0xff) | |
2149 | return true; | |
2150 | } | |
2151 | ||
2152 | return false; | |
2153 | } | |
2154 | ||
2155 | static struct efx_filter_spec * | |
2156 | efx_ef10_filter_entry_spec(const struct efx_ef10_filter_table *table, | |
2157 | unsigned int filter_idx) | |
2158 | { | |
2159 | return (struct efx_filter_spec *)(table->entry[filter_idx].spec & | |
2160 | ~EFX_EF10_FILTER_FLAGS); | |
2161 | } | |
2162 | ||
2163 | static unsigned int | |
2164 | efx_ef10_filter_entry_flags(const struct efx_ef10_filter_table *table, | |
2165 | unsigned int filter_idx) | |
2166 | { | |
2167 | return table->entry[filter_idx].spec & EFX_EF10_FILTER_FLAGS; | |
2168 | } | |
2169 | ||
2170 | static void | |
2171 | efx_ef10_filter_set_entry(struct efx_ef10_filter_table *table, | |
2172 | unsigned int filter_idx, | |
2173 | const struct efx_filter_spec *spec, | |
2174 | unsigned int flags) | |
2175 | { | |
2176 | table->entry[filter_idx].spec = (unsigned long)spec | flags; | |
2177 | } | |
2178 | ||
2179 | static void efx_ef10_filter_push_prep(struct efx_nic *efx, | |
2180 | const struct efx_filter_spec *spec, | |
2181 | efx_dword_t *inbuf, u64 handle, | |
2182 | bool replacing) | |
2183 | { | |
2184 | struct efx_ef10_nic_data *nic_data = efx->nic_data; | |
2185 | ||
2186 | memset(inbuf, 0, MC_CMD_FILTER_OP_IN_LEN); | |
2187 | ||
2188 | if (replacing) { | |
2189 | MCDI_SET_DWORD(inbuf, FILTER_OP_IN_OP, | |
2190 | MC_CMD_FILTER_OP_IN_OP_REPLACE); | |
2191 | MCDI_SET_QWORD(inbuf, FILTER_OP_IN_HANDLE, handle); | |
2192 | } else { | |
2193 | u32 match_fields = 0; | |
2194 | ||
2195 | MCDI_SET_DWORD(inbuf, FILTER_OP_IN_OP, | |
2196 | efx_ef10_filter_is_exclusive(spec) ? | |
2197 | MC_CMD_FILTER_OP_IN_OP_INSERT : | |
2198 | MC_CMD_FILTER_OP_IN_OP_SUBSCRIBE); | |
2199 | ||
2200 | /* Convert match flags and values. Unlike almost | |
2201 | * everything else in MCDI, these fields are in | |
2202 | * network byte order. | |
2203 | */ | |
2204 | if (spec->match_flags & EFX_FILTER_MATCH_LOC_MAC_IG) | |
2205 | match_fields |= | |
2206 | is_multicast_ether_addr(spec->loc_mac) ? | |
2207 | 1 << MC_CMD_FILTER_OP_IN_MATCH_UNKNOWN_MCAST_DST_LBN : | |
2208 | 1 << MC_CMD_FILTER_OP_IN_MATCH_UNKNOWN_UCAST_DST_LBN; | |
2209 | #define COPY_FIELD(gen_flag, gen_field, mcdi_field) \ | |
2210 | if (spec->match_flags & EFX_FILTER_MATCH_ ## gen_flag) { \ | |
2211 | match_fields |= \ | |
2212 | 1 << MC_CMD_FILTER_OP_IN_MATCH_ ## \ | |
2213 | mcdi_field ## _LBN; \ | |
2214 | BUILD_BUG_ON( \ | |
2215 | MC_CMD_FILTER_OP_IN_ ## mcdi_field ## _LEN < \ | |
2216 | sizeof(spec->gen_field)); \ | |
2217 | memcpy(MCDI_PTR(inbuf, FILTER_OP_IN_ ## mcdi_field), \ | |
2218 | &spec->gen_field, sizeof(spec->gen_field)); \ | |
2219 | } | |
2220 | COPY_FIELD(REM_HOST, rem_host, SRC_IP); | |
2221 | COPY_FIELD(LOC_HOST, loc_host, DST_IP); | |
2222 | COPY_FIELD(REM_MAC, rem_mac, SRC_MAC); | |
2223 | COPY_FIELD(REM_PORT, rem_port, SRC_PORT); | |
2224 | COPY_FIELD(LOC_MAC, loc_mac, DST_MAC); | |
2225 | COPY_FIELD(LOC_PORT, loc_port, DST_PORT); | |
2226 | COPY_FIELD(ETHER_TYPE, ether_type, ETHER_TYPE); | |
2227 | COPY_FIELD(INNER_VID, inner_vid, INNER_VLAN); | |
2228 | COPY_FIELD(OUTER_VID, outer_vid, OUTER_VLAN); | |
2229 | COPY_FIELD(IP_PROTO, ip_proto, IP_PROTO); | |
2230 | #undef COPY_FIELD | |
2231 | MCDI_SET_DWORD(inbuf, FILTER_OP_IN_MATCH_FIELDS, | |
2232 | match_fields); | |
2233 | } | |
2234 | ||
2235 | MCDI_SET_DWORD(inbuf, FILTER_OP_IN_PORT_ID, EVB_PORT_ID_ASSIGNED); | |
2236 | MCDI_SET_DWORD(inbuf, FILTER_OP_IN_RX_DEST, | |
2237 | spec->dmaq_id == EFX_FILTER_RX_DMAQ_ID_DROP ? | |
2238 | MC_CMD_FILTER_OP_IN_RX_DEST_DROP : | |
2239 | MC_CMD_FILTER_OP_IN_RX_DEST_HOST); | |
2240 | MCDI_SET_DWORD(inbuf, FILTER_OP_IN_TX_DEST, | |
2241 | MC_CMD_FILTER_OP_IN_TX_DEST_DEFAULT); | |
2242 | MCDI_SET_DWORD(inbuf, FILTER_OP_IN_RX_QUEUE, spec->dmaq_id); | |
2243 | MCDI_SET_DWORD(inbuf, FILTER_OP_IN_RX_MODE, | |
2244 | (spec->flags & EFX_FILTER_FLAG_RX_RSS) ? | |
2245 | MC_CMD_FILTER_OP_IN_RX_MODE_RSS : | |
2246 | MC_CMD_FILTER_OP_IN_RX_MODE_SIMPLE); | |
2247 | if (spec->flags & EFX_FILTER_FLAG_RX_RSS) | |
2248 | MCDI_SET_DWORD(inbuf, FILTER_OP_IN_RX_CONTEXT, | |
2249 | spec->rss_context != | |
2250 | EFX_FILTER_RSS_CONTEXT_DEFAULT ? | |
2251 | spec->rss_context : nic_data->rx_rss_context); | |
2252 | } | |
2253 | ||
2254 | static int efx_ef10_filter_push(struct efx_nic *efx, | |
2255 | const struct efx_filter_spec *spec, | |
2256 | u64 *handle, bool replacing) | |
2257 | { | |
2258 | MCDI_DECLARE_BUF(inbuf, MC_CMD_FILTER_OP_IN_LEN); | |
2259 | MCDI_DECLARE_BUF(outbuf, MC_CMD_FILTER_OP_OUT_LEN); | |
2260 | int rc; | |
2261 | ||
2262 | efx_ef10_filter_push_prep(efx, spec, inbuf, *handle, replacing); | |
2263 | rc = efx_mcdi_rpc(efx, MC_CMD_FILTER_OP, inbuf, sizeof(inbuf), | |
2264 | outbuf, sizeof(outbuf), NULL); | |
2265 | if (rc == 0) | |
2266 | *handle = MCDI_QWORD(outbuf, FILTER_OP_OUT_HANDLE); | |
2267 | return rc; | |
2268 | } | |
2269 | ||
2270 | static int efx_ef10_filter_rx_match_pri(struct efx_ef10_filter_table *table, | |
2271 | enum efx_filter_match_flags match_flags) | |
2272 | { | |
2273 | unsigned int match_pri; | |
2274 | ||
2275 | for (match_pri = 0; | |
2276 | match_pri < table->rx_match_count; | |
2277 | match_pri++) | |
2278 | if (table->rx_match_flags[match_pri] == match_flags) | |
2279 | return match_pri; | |
2280 | ||
2281 | return -EPROTONOSUPPORT; | |
2282 | } | |
2283 | ||
2284 | static s32 efx_ef10_filter_insert(struct efx_nic *efx, | |
2285 | struct efx_filter_spec *spec, | |
2286 | bool replace_equal) | |
2287 | { | |
2288 | struct efx_ef10_filter_table *table = efx->filter_state; | |
2289 | DECLARE_BITMAP(mc_rem_map, EFX_EF10_FILTER_SEARCH_LIMIT); | |
2290 | struct efx_filter_spec *saved_spec; | |
2291 | unsigned int match_pri, hash; | |
2292 | unsigned int priv_flags; | |
2293 | bool replacing = false; | |
2294 | int ins_index = -1; | |
2295 | DEFINE_WAIT(wait); | |
2296 | bool is_mc_recip; | |
2297 | s32 rc; | |
2298 | ||
2299 | /* For now, only support RX filters */ | |
2300 | if ((spec->flags & (EFX_FILTER_FLAG_RX | EFX_FILTER_FLAG_TX)) != | |
2301 | EFX_FILTER_FLAG_RX) | |
2302 | return -EINVAL; | |
2303 | ||
2304 | rc = efx_ef10_filter_rx_match_pri(table, spec->match_flags); | |
2305 | if (rc < 0) | |
2306 | return rc; | |
2307 | match_pri = rc; | |
2308 | ||
2309 | hash = efx_ef10_filter_hash(spec); | |
2310 | is_mc_recip = efx_filter_is_mc_recipient(spec); | |
2311 | if (is_mc_recip) | |
2312 | bitmap_zero(mc_rem_map, EFX_EF10_FILTER_SEARCH_LIMIT); | |
2313 | ||
2314 | /* Find any existing filters with the same match tuple or | |
2315 | * else a free slot to insert at. If any of them are busy, | |
2316 | * we have to wait and retry. | |
2317 | */ | |
2318 | for (;;) { | |
2319 | unsigned int depth = 1; | |
2320 | unsigned int i; | |
2321 | ||
2322 | spin_lock_bh(&efx->filter_lock); | |
2323 | ||
2324 | for (;;) { | |
2325 | i = (hash + depth) & (HUNT_FILTER_TBL_ROWS - 1); | |
2326 | saved_spec = efx_ef10_filter_entry_spec(table, i); | |
2327 | ||
2328 | if (!saved_spec) { | |
2329 | if (ins_index < 0) | |
2330 | ins_index = i; | |
2331 | } else if (efx_ef10_filter_equal(spec, saved_spec)) { | |
2332 | if (table->entry[i].spec & | |
2333 | EFX_EF10_FILTER_FLAG_BUSY) | |
2334 | break; | |
2335 | if (spec->priority < saved_spec->priority && | |
2336 | !(saved_spec->priority == | |
2337 | EFX_FILTER_PRI_REQUIRED && | |
2338 | saved_spec->flags & | |
2339 | EFX_FILTER_FLAG_RX_STACK)) { | |
2340 | rc = -EPERM; | |
2341 | goto out_unlock; | |
2342 | } | |
2343 | if (!is_mc_recip) { | |
2344 | /* This is the only one */ | |
2345 | if (spec->priority == | |
2346 | saved_spec->priority && | |
2347 | !replace_equal) { | |
2348 | rc = -EEXIST; | |
2349 | goto out_unlock; | |
2350 | } | |
2351 | ins_index = i; | |
2352 | goto found; | |
2353 | } else if (spec->priority > | |
2354 | saved_spec->priority || | |
2355 | (spec->priority == | |
2356 | saved_spec->priority && | |
2357 | replace_equal)) { | |
2358 | if (ins_index < 0) | |
2359 | ins_index = i; | |
2360 | else | |
2361 | __set_bit(depth, mc_rem_map); | |
2362 | } | |
2363 | } | |
2364 | ||
2365 | /* Once we reach the maximum search depth, use | |
2366 | * the first suitable slot or return -EBUSY if | |
2367 | * there was none | |
2368 | */ | |
2369 | if (depth == EFX_EF10_FILTER_SEARCH_LIMIT) { | |
2370 | if (ins_index < 0) { | |
2371 | rc = -EBUSY; | |
2372 | goto out_unlock; | |
2373 | } | |
2374 | goto found; | |
2375 | } | |
2376 | ||
2377 | ++depth; | |
2378 | } | |
2379 | ||
2380 | prepare_to_wait(&table->waitq, &wait, TASK_UNINTERRUPTIBLE); | |
2381 | spin_unlock_bh(&efx->filter_lock); | |
2382 | schedule(); | |
2383 | } | |
2384 | ||
2385 | found: | |
2386 | /* Create a software table entry if necessary, and mark it | |
2387 | * busy. We might yet fail to insert, but any attempt to | |
2388 | * insert a conflicting filter while we're waiting for the | |
2389 | * firmware must find the busy entry. | |
2390 | */ | |
2391 | saved_spec = efx_ef10_filter_entry_spec(table, ins_index); | |
2392 | if (saved_spec) { | |
2393 | if (spec->flags & EFX_FILTER_FLAG_RX_STACK) { | |
2394 | /* Just make sure it won't be removed */ | |
2395 | saved_spec->flags |= EFX_FILTER_FLAG_RX_STACK; | |
2396 | table->entry[ins_index].spec &= | |
2397 | ~EFX_EF10_FILTER_FLAG_STACK_OLD; | |
2398 | rc = ins_index; | |
2399 | goto out_unlock; | |
2400 | } | |
2401 | replacing = true; | |
2402 | priv_flags = efx_ef10_filter_entry_flags(table, ins_index); | |
2403 | } else { | |
2404 | saved_spec = kmalloc(sizeof(*spec), GFP_ATOMIC); | |
2405 | if (!saved_spec) { | |
2406 | rc = -ENOMEM; | |
2407 | goto out_unlock; | |
2408 | } | |
2409 | *saved_spec = *spec; | |
2410 | priv_flags = 0; | |
2411 | } | |
2412 | efx_ef10_filter_set_entry(table, ins_index, saved_spec, | |
2413 | priv_flags | EFX_EF10_FILTER_FLAG_BUSY); | |
2414 | ||
2415 | /* Mark lower-priority multicast recipients busy prior to removal */ | |
2416 | if (is_mc_recip) { | |
2417 | unsigned int depth, i; | |
2418 | ||
2419 | for (depth = 0; depth < EFX_EF10_FILTER_SEARCH_LIMIT; depth++) { | |
2420 | i = (hash + depth) & (HUNT_FILTER_TBL_ROWS - 1); | |
2421 | if (test_bit(depth, mc_rem_map)) | |
2422 | table->entry[i].spec |= | |
2423 | EFX_EF10_FILTER_FLAG_BUSY; | |
2424 | } | |
2425 | } | |
2426 | ||
2427 | spin_unlock_bh(&efx->filter_lock); | |
2428 | ||
2429 | rc = efx_ef10_filter_push(efx, spec, &table->entry[ins_index].handle, | |
2430 | replacing); | |
2431 | ||
2432 | /* Finalise the software table entry */ | |
2433 | spin_lock_bh(&efx->filter_lock); | |
2434 | if (rc == 0) { | |
2435 | if (replacing) { | |
2436 | /* Update the fields that may differ */ | |
2437 | saved_spec->priority = spec->priority; | |
2438 | saved_spec->flags &= EFX_FILTER_FLAG_RX_STACK; | |
2439 | saved_spec->flags |= spec->flags; | |
2440 | saved_spec->rss_context = spec->rss_context; | |
2441 | saved_spec->dmaq_id = spec->dmaq_id; | |
2442 | } | |
2443 | } else if (!replacing) { | |
2444 | kfree(saved_spec); | |
2445 | saved_spec = NULL; | |
2446 | } | |
2447 | efx_ef10_filter_set_entry(table, ins_index, saved_spec, priv_flags); | |
2448 | ||
2449 | /* Remove and finalise entries for lower-priority multicast | |
2450 | * recipients | |
2451 | */ | |
2452 | if (is_mc_recip) { | |
2453 | MCDI_DECLARE_BUF(inbuf, MC_CMD_FILTER_OP_IN_LEN); | |
2454 | unsigned int depth, i; | |
2455 | ||
2456 | memset(inbuf, 0, sizeof(inbuf)); | |
2457 | ||
2458 | for (depth = 0; depth < EFX_EF10_FILTER_SEARCH_LIMIT; depth++) { | |
2459 | if (!test_bit(depth, mc_rem_map)) | |
2460 | continue; | |
2461 | ||
2462 | i = (hash + depth) & (HUNT_FILTER_TBL_ROWS - 1); | |
2463 | saved_spec = efx_ef10_filter_entry_spec(table, i); | |
2464 | priv_flags = efx_ef10_filter_entry_flags(table, i); | |
2465 | ||
2466 | if (rc == 0) { | |
2467 | spin_unlock_bh(&efx->filter_lock); | |
2468 | MCDI_SET_DWORD(inbuf, FILTER_OP_IN_OP, | |
2469 | MC_CMD_FILTER_OP_IN_OP_UNSUBSCRIBE); | |
2470 | MCDI_SET_QWORD(inbuf, FILTER_OP_IN_HANDLE, | |
2471 | table->entry[i].handle); | |
2472 | rc = efx_mcdi_rpc(efx, MC_CMD_FILTER_OP, | |
2473 | inbuf, sizeof(inbuf), | |
2474 | NULL, 0, NULL); | |
2475 | spin_lock_bh(&efx->filter_lock); | |
2476 | } | |
2477 | ||
2478 | if (rc == 0) { | |
2479 | kfree(saved_spec); | |
2480 | saved_spec = NULL; | |
2481 | priv_flags = 0; | |
2482 | } else { | |
2483 | priv_flags &= ~EFX_EF10_FILTER_FLAG_BUSY; | |
2484 | } | |
2485 | efx_ef10_filter_set_entry(table, i, saved_spec, | |
2486 | priv_flags); | |
2487 | } | |
2488 | } | |
2489 | ||
2490 | /* If successful, return the inserted filter ID */ | |
2491 | if (rc == 0) | |
2492 | rc = match_pri * HUNT_FILTER_TBL_ROWS + ins_index; | |
2493 | ||
2494 | wake_up_all(&table->waitq); | |
2495 | out_unlock: | |
2496 | spin_unlock_bh(&efx->filter_lock); | |
2497 | finish_wait(&table->waitq, &wait); | |
2498 | return rc; | |
2499 | } | |
2500 | ||
9fd8095d | 2501 | static void efx_ef10_filter_update_rx_scatter(struct efx_nic *efx) |
8127d661 BH |
2502 | { |
2503 | /* no need to do anything here on EF10 */ | |
2504 | } | |
2505 | ||
2506 | /* Remove a filter. | |
2507 | * If !stack_requested, remove by ID | |
2508 | * If stack_requested, remove by index | |
2509 | * Filter ID may come from userland and must be range-checked. | |
2510 | */ | |
2511 | static int efx_ef10_filter_remove_internal(struct efx_nic *efx, | |
2512 | enum efx_filter_priority priority, | |
2513 | u32 filter_id, bool stack_requested) | |
2514 | { | |
2515 | unsigned int filter_idx = filter_id % HUNT_FILTER_TBL_ROWS; | |
2516 | struct efx_ef10_filter_table *table = efx->filter_state; | |
2517 | MCDI_DECLARE_BUF(inbuf, | |
2518 | MC_CMD_FILTER_OP_IN_HANDLE_OFST + | |
2519 | MC_CMD_FILTER_OP_IN_HANDLE_LEN); | |
2520 | struct efx_filter_spec *spec; | |
2521 | DEFINE_WAIT(wait); | |
2522 | int rc; | |
2523 | ||
2524 | /* Find the software table entry and mark it busy. Don't | |
2525 | * remove it yet; any attempt to update while we're waiting | |
2526 | * for the firmware must find the busy entry. | |
2527 | */ | |
2528 | for (;;) { | |
2529 | spin_lock_bh(&efx->filter_lock); | |
2530 | if (!(table->entry[filter_idx].spec & | |
2531 | EFX_EF10_FILTER_FLAG_BUSY)) | |
2532 | break; | |
2533 | prepare_to_wait(&table->waitq, &wait, TASK_UNINTERRUPTIBLE); | |
2534 | spin_unlock_bh(&efx->filter_lock); | |
2535 | schedule(); | |
2536 | } | |
2537 | spec = efx_ef10_filter_entry_spec(table, filter_idx); | |
2538 | if (!spec || spec->priority > priority || | |
2539 | (!stack_requested && | |
2540 | efx_ef10_filter_rx_match_pri(table, spec->match_flags) != | |
2541 | filter_id / HUNT_FILTER_TBL_ROWS)) { | |
2542 | rc = -ENOENT; | |
2543 | goto out_unlock; | |
2544 | } | |
2545 | table->entry[filter_idx].spec |= EFX_EF10_FILTER_FLAG_BUSY; | |
2546 | spin_unlock_bh(&efx->filter_lock); | |
2547 | ||
2548 | if (spec->flags & EFX_FILTER_FLAG_RX_STACK && !stack_requested) { | |
2549 | /* Reset steering of a stack-owned filter */ | |
2550 | ||
2551 | struct efx_filter_spec new_spec = *spec; | |
2552 | ||
2553 | new_spec.priority = EFX_FILTER_PRI_REQUIRED; | |
2554 | new_spec.flags = (EFX_FILTER_FLAG_RX | | |
2555 | EFX_FILTER_FLAG_RX_RSS | | |
2556 | EFX_FILTER_FLAG_RX_STACK); | |
2557 | new_spec.dmaq_id = 0; | |
2558 | new_spec.rss_context = EFX_FILTER_RSS_CONTEXT_DEFAULT; | |
2559 | rc = efx_ef10_filter_push(efx, &new_spec, | |
2560 | &table->entry[filter_idx].handle, | |
2561 | true); | |
2562 | ||
2563 | spin_lock_bh(&efx->filter_lock); | |
2564 | if (rc == 0) | |
2565 | *spec = new_spec; | |
2566 | } else { | |
2567 | /* Really remove the filter */ | |
2568 | ||
2569 | MCDI_SET_DWORD(inbuf, FILTER_OP_IN_OP, | |
2570 | efx_ef10_filter_is_exclusive(spec) ? | |
2571 | MC_CMD_FILTER_OP_IN_OP_REMOVE : | |
2572 | MC_CMD_FILTER_OP_IN_OP_UNSUBSCRIBE); | |
2573 | MCDI_SET_QWORD(inbuf, FILTER_OP_IN_HANDLE, | |
2574 | table->entry[filter_idx].handle); | |
2575 | rc = efx_mcdi_rpc(efx, MC_CMD_FILTER_OP, | |
2576 | inbuf, sizeof(inbuf), NULL, 0, NULL); | |
2577 | ||
2578 | spin_lock_bh(&efx->filter_lock); | |
2579 | if (rc == 0) { | |
2580 | kfree(spec); | |
2581 | efx_ef10_filter_set_entry(table, filter_idx, NULL, 0); | |
2582 | } | |
2583 | } | |
2584 | table->entry[filter_idx].spec &= ~EFX_EF10_FILTER_FLAG_BUSY; | |
2585 | wake_up_all(&table->waitq); | |
2586 | out_unlock: | |
2587 | spin_unlock_bh(&efx->filter_lock); | |
2588 | finish_wait(&table->waitq, &wait); | |
2589 | return rc; | |
2590 | } | |
2591 | ||
2592 | static int efx_ef10_filter_remove_safe(struct efx_nic *efx, | |
2593 | enum efx_filter_priority priority, | |
2594 | u32 filter_id) | |
2595 | { | |
2596 | return efx_ef10_filter_remove_internal(efx, priority, filter_id, false); | |
2597 | } | |
2598 | ||
2599 | static int efx_ef10_filter_get_safe(struct efx_nic *efx, | |
2600 | enum efx_filter_priority priority, | |
2601 | u32 filter_id, struct efx_filter_spec *spec) | |
2602 | { | |
2603 | unsigned int filter_idx = filter_id % HUNT_FILTER_TBL_ROWS; | |
2604 | struct efx_ef10_filter_table *table = efx->filter_state; | |
2605 | const struct efx_filter_spec *saved_spec; | |
2606 | int rc; | |
2607 | ||
2608 | spin_lock_bh(&efx->filter_lock); | |
2609 | saved_spec = efx_ef10_filter_entry_spec(table, filter_idx); | |
2610 | if (saved_spec && saved_spec->priority == priority && | |
2611 | efx_ef10_filter_rx_match_pri(table, saved_spec->match_flags) == | |
2612 | filter_id / HUNT_FILTER_TBL_ROWS) { | |
2613 | *spec = *saved_spec; | |
2614 | rc = 0; | |
2615 | } else { | |
2616 | rc = -ENOENT; | |
2617 | } | |
2618 | spin_unlock_bh(&efx->filter_lock); | |
2619 | return rc; | |
2620 | } | |
2621 | ||
2622 | static void efx_ef10_filter_clear_rx(struct efx_nic *efx, | |
2623 | enum efx_filter_priority priority) | |
2624 | { | |
2625 | /* TODO */ | |
2626 | } | |
2627 | ||
2628 | static u32 efx_ef10_filter_count_rx_used(struct efx_nic *efx, | |
2629 | enum efx_filter_priority priority) | |
2630 | { | |
2631 | struct efx_ef10_filter_table *table = efx->filter_state; | |
2632 | unsigned int filter_idx; | |
2633 | s32 count = 0; | |
2634 | ||
2635 | spin_lock_bh(&efx->filter_lock); | |
2636 | for (filter_idx = 0; filter_idx < HUNT_FILTER_TBL_ROWS; filter_idx++) { | |
2637 | if (table->entry[filter_idx].spec && | |
2638 | efx_ef10_filter_entry_spec(table, filter_idx)->priority == | |
2639 | priority) | |
2640 | ++count; | |
2641 | } | |
2642 | spin_unlock_bh(&efx->filter_lock); | |
2643 | return count; | |
2644 | } | |
2645 | ||
2646 | static u32 efx_ef10_filter_get_rx_id_limit(struct efx_nic *efx) | |
2647 | { | |
2648 | struct efx_ef10_filter_table *table = efx->filter_state; | |
2649 | ||
2650 | return table->rx_match_count * HUNT_FILTER_TBL_ROWS; | |
2651 | } | |
2652 | ||
2653 | static s32 efx_ef10_filter_get_rx_ids(struct efx_nic *efx, | |
2654 | enum efx_filter_priority priority, | |
2655 | u32 *buf, u32 size) | |
2656 | { | |
2657 | struct efx_ef10_filter_table *table = efx->filter_state; | |
2658 | struct efx_filter_spec *spec; | |
2659 | unsigned int filter_idx; | |
2660 | s32 count = 0; | |
2661 | ||
2662 | spin_lock_bh(&efx->filter_lock); | |
2663 | for (filter_idx = 0; filter_idx < HUNT_FILTER_TBL_ROWS; filter_idx++) { | |
2664 | spec = efx_ef10_filter_entry_spec(table, filter_idx); | |
2665 | if (spec && spec->priority == priority) { | |
2666 | if (count == size) { | |
2667 | count = -EMSGSIZE; | |
2668 | break; | |
2669 | } | |
2670 | buf[count++] = (efx_ef10_filter_rx_match_pri( | |
2671 | table, spec->match_flags) * | |
2672 | HUNT_FILTER_TBL_ROWS + | |
2673 | filter_idx); | |
2674 | } | |
2675 | } | |
2676 | spin_unlock_bh(&efx->filter_lock); | |
2677 | return count; | |
2678 | } | |
2679 | ||
2680 | #ifdef CONFIG_RFS_ACCEL | |
2681 | ||
2682 | static efx_mcdi_async_completer efx_ef10_filter_rfs_insert_complete; | |
2683 | ||
2684 | static s32 efx_ef10_filter_rfs_insert(struct efx_nic *efx, | |
2685 | struct efx_filter_spec *spec) | |
2686 | { | |
2687 | struct efx_ef10_filter_table *table = efx->filter_state; | |
2688 | MCDI_DECLARE_BUF(inbuf, MC_CMD_FILTER_OP_IN_LEN); | |
2689 | struct efx_filter_spec *saved_spec; | |
2690 | unsigned int hash, i, depth = 1; | |
2691 | bool replacing = false; | |
2692 | int ins_index = -1; | |
2693 | u64 cookie; | |
2694 | s32 rc; | |
2695 | ||
2696 | /* Must be an RX filter without RSS and not for a multicast | |
2697 | * destination address (RFS only works for connected sockets). | |
2698 | * These restrictions allow us to pass only a tiny amount of | |
2699 | * data through to the completion function. | |
2700 | */ | |
2701 | EFX_WARN_ON_PARANOID(spec->flags != | |
2702 | (EFX_FILTER_FLAG_RX | EFX_FILTER_FLAG_RX_SCATTER)); | |
2703 | EFX_WARN_ON_PARANOID(spec->priority != EFX_FILTER_PRI_HINT); | |
2704 | EFX_WARN_ON_PARANOID(efx_filter_is_mc_recipient(spec)); | |
2705 | ||
2706 | hash = efx_ef10_filter_hash(spec); | |
2707 | ||
2708 | spin_lock_bh(&efx->filter_lock); | |
2709 | ||
2710 | /* Find any existing filter with the same match tuple or else | |
2711 | * a free slot to insert at. If an existing filter is busy, | |
2712 | * we have to give up. | |
2713 | */ | |
2714 | for (;;) { | |
2715 | i = (hash + depth) & (HUNT_FILTER_TBL_ROWS - 1); | |
2716 | saved_spec = efx_ef10_filter_entry_spec(table, i); | |
2717 | ||
2718 | if (!saved_spec) { | |
2719 | if (ins_index < 0) | |
2720 | ins_index = i; | |
2721 | } else if (efx_ef10_filter_equal(spec, saved_spec)) { | |
2722 | if (table->entry[i].spec & EFX_EF10_FILTER_FLAG_BUSY) { | |
2723 | rc = -EBUSY; | |
2724 | goto fail_unlock; | |
2725 | } | |
2726 | EFX_WARN_ON_PARANOID(saved_spec->flags & | |
2727 | EFX_FILTER_FLAG_RX_STACK); | |
2728 | if (spec->priority < saved_spec->priority) { | |
2729 | rc = -EPERM; | |
2730 | goto fail_unlock; | |
2731 | } | |
2732 | ins_index = i; | |
2733 | break; | |
2734 | } | |
2735 | ||
2736 | /* Once we reach the maximum search depth, use the | |
2737 | * first suitable slot or return -EBUSY if there was | |
2738 | * none | |
2739 | */ | |
2740 | if (depth == EFX_EF10_FILTER_SEARCH_LIMIT) { | |
2741 | if (ins_index < 0) { | |
2742 | rc = -EBUSY; | |
2743 | goto fail_unlock; | |
2744 | } | |
2745 | break; | |
2746 | } | |
2747 | ||
2748 | ++depth; | |
2749 | } | |
2750 | ||
2751 | /* Create a software table entry if necessary, and mark it | |
2752 | * busy. We might yet fail to insert, but any attempt to | |
2753 | * insert a conflicting filter while we're waiting for the | |
2754 | * firmware must find the busy entry. | |
2755 | */ | |
2756 | saved_spec = efx_ef10_filter_entry_spec(table, ins_index); | |
2757 | if (saved_spec) { | |
2758 | replacing = true; | |
2759 | } else { | |
2760 | saved_spec = kmalloc(sizeof(*spec), GFP_ATOMIC); | |
2761 | if (!saved_spec) { | |
2762 | rc = -ENOMEM; | |
2763 | goto fail_unlock; | |
2764 | } | |
2765 | *saved_spec = *spec; | |
2766 | } | |
2767 | efx_ef10_filter_set_entry(table, ins_index, saved_spec, | |
2768 | EFX_EF10_FILTER_FLAG_BUSY); | |
2769 | ||
2770 | spin_unlock_bh(&efx->filter_lock); | |
2771 | ||
2772 | /* Pack up the variables needed on completion */ | |
2773 | cookie = replacing << 31 | ins_index << 16 | spec->dmaq_id; | |
2774 | ||
2775 | efx_ef10_filter_push_prep(efx, spec, inbuf, | |
2776 | table->entry[ins_index].handle, replacing); | |
2777 | efx_mcdi_rpc_async(efx, MC_CMD_FILTER_OP, inbuf, sizeof(inbuf), | |
2778 | MC_CMD_FILTER_OP_OUT_LEN, | |
2779 | efx_ef10_filter_rfs_insert_complete, cookie); | |
2780 | ||
2781 | return ins_index; | |
2782 | ||
2783 | fail_unlock: | |
2784 | spin_unlock_bh(&efx->filter_lock); | |
2785 | return rc; | |
2786 | } | |
2787 | ||
2788 | static void | |
2789 | efx_ef10_filter_rfs_insert_complete(struct efx_nic *efx, unsigned long cookie, | |
2790 | int rc, efx_dword_t *outbuf, | |
2791 | size_t outlen_actual) | |
2792 | { | |
2793 | struct efx_ef10_filter_table *table = efx->filter_state; | |
2794 | unsigned int ins_index, dmaq_id; | |
2795 | struct efx_filter_spec *spec; | |
2796 | bool replacing; | |
2797 | ||
2798 | /* Unpack the cookie */ | |
2799 | replacing = cookie >> 31; | |
2800 | ins_index = (cookie >> 16) & (HUNT_FILTER_TBL_ROWS - 1); | |
2801 | dmaq_id = cookie & 0xffff; | |
2802 | ||
2803 | spin_lock_bh(&efx->filter_lock); | |
2804 | spec = efx_ef10_filter_entry_spec(table, ins_index); | |
2805 | if (rc == 0) { | |
2806 | table->entry[ins_index].handle = | |
2807 | MCDI_QWORD(outbuf, FILTER_OP_OUT_HANDLE); | |
2808 | if (replacing) | |
2809 | spec->dmaq_id = dmaq_id; | |
2810 | } else if (!replacing) { | |
2811 | kfree(spec); | |
2812 | spec = NULL; | |
2813 | } | |
2814 | efx_ef10_filter_set_entry(table, ins_index, spec, 0); | |
2815 | spin_unlock_bh(&efx->filter_lock); | |
2816 | ||
2817 | wake_up_all(&table->waitq); | |
2818 | } | |
2819 | ||
2820 | static void | |
2821 | efx_ef10_filter_rfs_expire_complete(struct efx_nic *efx, | |
2822 | unsigned long filter_idx, | |
2823 | int rc, efx_dword_t *outbuf, | |
2824 | size_t outlen_actual); | |
2825 | ||
2826 | static bool efx_ef10_filter_rfs_expire_one(struct efx_nic *efx, u32 flow_id, | |
2827 | unsigned int filter_idx) | |
2828 | { | |
2829 | struct efx_ef10_filter_table *table = efx->filter_state; | |
2830 | struct efx_filter_spec *spec = | |
2831 | efx_ef10_filter_entry_spec(table, filter_idx); | |
2832 | MCDI_DECLARE_BUF(inbuf, | |
2833 | MC_CMD_FILTER_OP_IN_HANDLE_OFST + | |
2834 | MC_CMD_FILTER_OP_IN_HANDLE_LEN); | |
2835 | ||
2836 | if (!spec || | |
2837 | (table->entry[filter_idx].spec & EFX_EF10_FILTER_FLAG_BUSY) || | |
2838 | spec->priority != EFX_FILTER_PRI_HINT || | |
2839 | !rps_may_expire_flow(efx->net_dev, spec->dmaq_id, | |
2840 | flow_id, filter_idx)) | |
2841 | return false; | |
2842 | ||
2843 | MCDI_SET_DWORD(inbuf, FILTER_OP_IN_OP, | |
2844 | MC_CMD_FILTER_OP_IN_OP_REMOVE); | |
2845 | MCDI_SET_QWORD(inbuf, FILTER_OP_IN_HANDLE, | |
2846 | table->entry[filter_idx].handle); | |
2847 | if (efx_mcdi_rpc_async(efx, MC_CMD_FILTER_OP, inbuf, sizeof(inbuf), 0, | |
2848 | efx_ef10_filter_rfs_expire_complete, filter_idx)) | |
2849 | return false; | |
2850 | ||
2851 | table->entry[filter_idx].spec |= EFX_EF10_FILTER_FLAG_BUSY; | |
2852 | return true; | |
2853 | } | |
2854 | ||
2855 | static void | |
2856 | efx_ef10_filter_rfs_expire_complete(struct efx_nic *efx, | |
2857 | unsigned long filter_idx, | |
2858 | int rc, efx_dword_t *outbuf, | |
2859 | size_t outlen_actual) | |
2860 | { | |
2861 | struct efx_ef10_filter_table *table = efx->filter_state; | |
2862 | struct efx_filter_spec *spec = | |
2863 | efx_ef10_filter_entry_spec(table, filter_idx); | |
2864 | ||
2865 | spin_lock_bh(&efx->filter_lock); | |
2866 | if (rc == 0) { | |
2867 | kfree(spec); | |
2868 | efx_ef10_filter_set_entry(table, filter_idx, NULL, 0); | |
2869 | } | |
2870 | table->entry[filter_idx].spec &= ~EFX_EF10_FILTER_FLAG_BUSY; | |
2871 | wake_up_all(&table->waitq); | |
2872 | spin_unlock_bh(&efx->filter_lock); | |
2873 | } | |
2874 | ||
2875 | #endif /* CONFIG_RFS_ACCEL */ | |
2876 | ||
2877 | static int efx_ef10_filter_match_flags_from_mcdi(u32 mcdi_flags) | |
2878 | { | |
2879 | int match_flags = 0; | |
2880 | ||
2881 | #define MAP_FLAG(gen_flag, mcdi_field) { \ | |
2882 | u32 old_mcdi_flags = mcdi_flags; \ | |
2883 | mcdi_flags &= ~(1 << MC_CMD_FILTER_OP_IN_MATCH_ ## \ | |
2884 | mcdi_field ## _LBN); \ | |
2885 | if (mcdi_flags != old_mcdi_flags) \ | |
2886 | match_flags |= EFX_FILTER_MATCH_ ## gen_flag; \ | |
2887 | } | |
2888 | MAP_FLAG(LOC_MAC_IG, UNKNOWN_UCAST_DST); | |
2889 | MAP_FLAG(LOC_MAC_IG, UNKNOWN_MCAST_DST); | |
2890 | MAP_FLAG(REM_HOST, SRC_IP); | |
2891 | MAP_FLAG(LOC_HOST, DST_IP); | |
2892 | MAP_FLAG(REM_MAC, SRC_MAC); | |
2893 | MAP_FLAG(REM_PORT, SRC_PORT); | |
2894 | MAP_FLAG(LOC_MAC, DST_MAC); | |
2895 | MAP_FLAG(LOC_PORT, DST_PORT); | |
2896 | MAP_FLAG(ETHER_TYPE, ETHER_TYPE); | |
2897 | MAP_FLAG(INNER_VID, INNER_VLAN); | |
2898 | MAP_FLAG(OUTER_VID, OUTER_VLAN); | |
2899 | MAP_FLAG(IP_PROTO, IP_PROTO); | |
2900 | #undef MAP_FLAG | |
2901 | ||
2902 | /* Did we map them all? */ | |
2903 | if (mcdi_flags) | |
2904 | return -EINVAL; | |
2905 | ||
2906 | return match_flags; | |
2907 | } | |
2908 | ||
2909 | static int efx_ef10_filter_table_probe(struct efx_nic *efx) | |
2910 | { | |
2911 | MCDI_DECLARE_BUF(inbuf, MC_CMD_GET_PARSER_DISP_INFO_IN_LEN); | |
2912 | MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_PARSER_DISP_INFO_OUT_LENMAX); | |
2913 | unsigned int pd_match_pri, pd_match_count; | |
2914 | struct efx_ef10_filter_table *table; | |
2915 | size_t outlen; | |
2916 | int rc; | |
2917 | ||
2918 | table = kzalloc(sizeof(*table), GFP_KERNEL); | |
2919 | if (!table) | |
2920 | return -ENOMEM; | |
2921 | ||
2922 | /* Find out which RX filter types are supported, and their priorities */ | |
2923 | MCDI_SET_DWORD(inbuf, GET_PARSER_DISP_INFO_IN_OP, | |
2924 | MC_CMD_GET_PARSER_DISP_INFO_IN_OP_GET_SUPPORTED_RX_MATCHES); | |
2925 | rc = efx_mcdi_rpc(efx, MC_CMD_GET_PARSER_DISP_INFO, | |
2926 | inbuf, sizeof(inbuf), outbuf, sizeof(outbuf), | |
2927 | &outlen); | |
2928 | if (rc) | |
2929 | goto fail; | |
2930 | pd_match_count = MCDI_VAR_ARRAY_LEN( | |
2931 | outlen, GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES); | |
2932 | table->rx_match_count = 0; | |
2933 | ||
2934 | for (pd_match_pri = 0; pd_match_pri < pd_match_count; pd_match_pri++) { | |
2935 | u32 mcdi_flags = | |
2936 | MCDI_ARRAY_DWORD( | |
2937 | outbuf, | |
2938 | GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES, | |
2939 | pd_match_pri); | |
2940 | rc = efx_ef10_filter_match_flags_from_mcdi(mcdi_flags); | |
2941 | if (rc < 0) { | |
2942 | netif_dbg(efx, probe, efx->net_dev, | |
2943 | "%s: fw flags %#x pri %u not supported in driver\n", | |
2944 | __func__, mcdi_flags, pd_match_pri); | |
2945 | } else { | |
2946 | netif_dbg(efx, probe, efx->net_dev, | |
2947 | "%s: fw flags %#x pri %u supported as driver flags %#x pri %u\n", | |
2948 | __func__, mcdi_flags, pd_match_pri, | |
2949 | rc, table->rx_match_count); | |
2950 | table->rx_match_flags[table->rx_match_count++] = rc; | |
2951 | } | |
2952 | } | |
2953 | ||
2954 | table->entry = vzalloc(HUNT_FILTER_TBL_ROWS * sizeof(*table->entry)); | |
2955 | if (!table->entry) { | |
2956 | rc = -ENOMEM; | |
2957 | goto fail; | |
2958 | } | |
2959 | ||
2960 | efx->filter_state = table; | |
2961 | init_waitqueue_head(&table->waitq); | |
2962 | return 0; | |
2963 | ||
2964 | fail: | |
2965 | kfree(table); | |
2966 | return rc; | |
2967 | } | |
2968 | ||
2969 | static void efx_ef10_filter_table_restore(struct efx_nic *efx) | |
2970 | { | |
2971 | struct efx_ef10_filter_table *table = efx->filter_state; | |
2972 | struct efx_ef10_nic_data *nic_data = efx->nic_data; | |
2973 | struct efx_filter_spec *spec; | |
2974 | unsigned int filter_idx; | |
2975 | bool failed = false; | |
2976 | int rc; | |
2977 | ||
2978 | if (!nic_data->must_restore_filters) | |
2979 | return; | |
2980 | ||
2981 | spin_lock_bh(&efx->filter_lock); | |
2982 | ||
2983 | for (filter_idx = 0; filter_idx < HUNT_FILTER_TBL_ROWS; filter_idx++) { | |
2984 | spec = efx_ef10_filter_entry_spec(table, filter_idx); | |
2985 | if (!spec) | |
2986 | continue; | |
2987 | ||
2988 | table->entry[filter_idx].spec |= EFX_EF10_FILTER_FLAG_BUSY; | |
2989 | spin_unlock_bh(&efx->filter_lock); | |
2990 | ||
2991 | rc = efx_ef10_filter_push(efx, spec, | |
2992 | &table->entry[filter_idx].handle, | |
2993 | false); | |
2994 | if (rc) | |
2995 | failed = true; | |
2996 | ||
2997 | spin_lock_bh(&efx->filter_lock); | |
2998 | if (rc) { | |
2999 | kfree(spec); | |
3000 | efx_ef10_filter_set_entry(table, filter_idx, NULL, 0); | |
3001 | } else { | |
3002 | table->entry[filter_idx].spec &= | |
3003 | ~EFX_EF10_FILTER_FLAG_BUSY; | |
3004 | } | |
3005 | } | |
3006 | ||
3007 | spin_unlock_bh(&efx->filter_lock); | |
3008 | ||
3009 | if (failed) | |
3010 | netif_err(efx, hw, efx->net_dev, | |
3011 | "unable to restore all filters\n"); | |
3012 | else | |
3013 | nic_data->must_restore_filters = false; | |
3014 | } | |
3015 | ||
3016 | static void efx_ef10_filter_table_remove(struct efx_nic *efx) | |
3017 | { | |
3018 | struct efx_ef10_filter_table *table = efx->filter_state; | |
3019 | MCDI_DECLARE_BUF(inbuf, MC_CMD_FILTER_OP_IN_LEN); | |
3020 | struct efx_filter_spec *spec; | |
3021 | unsigned int filter_idx; | |
3022 | int rc; | |
3023 | ||
3024 | for (filter_idx = 0; filter_idx < HUNT_FILTER_TBL_ROWS; filter_idx++) { | |
3025 | spec = efx_ef10_filter_entry_spec(table, filter_idx); | |
3026 | if (!spec) | |
3027 | continue; | |
3028 | ||
3029 | MCDI_SET_DWORD(inbuf, FILTER_OP_IN_OP, | |
3030 | efx_ef10_filter_is_exclusive(spec) ? | |
3031 | MC_CMD_FILTER_OP_IN_OP_REMOVE : | |
3032 | MC_CMD_FILTER_OP_IN_OP_UNSUBSCRIBE); | |
3033 | MCDI_SET_QWORD(inbuf, FILTER_OP_IN_HANDLE, | |
3034 | table->entry[filter_idx].handle); | |
3035 | rc = efx_mcdi_rpc(efx, MC_CMD_FILTER_OP, inbuf, sizeof(inbuf), | |
3036 | NULL, 0, NULL); | |
3037 | ||
3038 | WARN_ON(rc != 0); | |
3039 | kfree(spec); | |
3040 | } | |
3041 | ||
3042 | vfree(table->entry); | |
3043 | kfree(table); | |
3044 | } | |
3045 | ||
3046 | static void efx_ef10_filter_sync_rx_mode(struct efx_nic *efx) | |
3047 | { | |
3048 | struct efx_ef10_filter_table *table = efx->filter_state; | |
3049 | struct net_device *net_dev = efx->net_dev; | |
3050 | struct efx_filter_spec spec; | |
3051 | bool remove_failed = false; | |
3052 | struct netdev_hw_addr *uc; | |
3053 | struct netdev_hw_addr *mc; | |
3054 | unsigned int filter_idx; | |
3055 | int i, n, rc; | |
3056 | ||
3057 | if (!efx_dev_registered(efx)) | |
3058 | return; | |
3059 | ||
3060 | /* Mark old filters that may need to be removed */ | |
3061 | spin_lock_bh(&efx->filter_lock); | |
3062 | n = table->stack_uc_count < 0 ? 1 : table->stack_uc_count; | |
3063 | for (i = 0; i < n; i++) { | |
3064 | filter_idx = table->stack_uc_list[i].id % HUNT_FILTER_TBL_ROWS; | |
3065 | table->entry[filter_idx].spec |= EFX_EF10_FILTER_FLAG_STACK_OLD; | |
3066 | } | |
3067 | n = table->stack_mc_count < 0 ? 1 : table->stack_mc_count; | |
3068 | for (i = 0; i < n; i++) { | |
3069 | filter_idx = table->stack_mc_list[i].id % HUNT_FILTER_TBL_ROWS; | |
3070 | table->entry[filter_idx].spec |= EFX_EF10_FILTER_FLAG_STACK_OLD; | |
3071 | } | |
3072 | spin_unlock_bh(&efx->filter_lock); | |
3073 | ||
3074 | /* Copy/convert the address lists; add the primary station | |
3075 | * address and broadcast address | |
3076 | */ | |
3077 | netif_addr_lock_bh(net_dev); | |
3078 | if (net_dev->flags & IFF_PROMISC || | |
3079 | netdev_uc_count(net_dev) >= EFX_EF10_FILTER_STACK_UC_MAX) { | |
3080 | table->stack_uc_count = -1; | |
3081 | } else { | |
3082 | table->stack_uc_count = 1 + netdev_uc_count(net_dev); | |
3083 | memcpy(table->stack_uc_list[0].addr, net_dev->dev_addr, | |
3084 | ETH_ALEN); | |
3085 | i = 1; | |
3086 | netdev_for_each_uc_addr(uc, net_dev) { | |
3087 | memcpy(table->stack_uc_list[i].addr, | |
3088 | uc->addr, ETH_ALEN); | |
3089 | i++; | |
3090 | } | |
3091 | } | |
3092 | if (net_dev->flags & (IFF_PROMISC | IFF_ALLMULTI) || | |
3093 | netdev_mc_count(net_dev) >= EFX_EF10_FILTER_STACK_MC_MAX) { | |
3094 | table->stack_mc_count = -1; | |
3095 | } else { | |
3096 | table->stack_mc_count = 1 + netdev_mc_count(net_dev); | |
3097 | eth_broadcast_addr(table->stack_mc_list[0].addr); | |
3098 | i = 1; | |
3099 | netdev_for_each_mc_addr(mc, net_dev) { | |
3100 | memcpy(table->stack_mc_list[i].addr, | |
3101 | mc->addr, ETH_ALEN); | |
3102 | i++; | |
3103 | } | |
3104 | } | |
3105 | netif_addr_unlock_bh(net_dev); | |
3106 | ||
3107 | /* Insert/renew unicast filters */ | |
3108 | if (table->stack_uc_count >= 0) { | |
3109 | for (i = 0; i < table->stack_uc_count; i++) { | |
3110 | efx_filter_init_rx(&spec, EFX_FILTER_PRI_REQUIRED, | |
3111 | EFX_FILTER_FLAG_RX_RSS | | |
3112 | EFX_FILTER_FLAG_RX_STACK, | |
3113 | 0); | |
3114 | efx_filter_set_eth_local(&spec, EFX_FILTER_VID_UNSPEC, | |
3115 | table->stack_uc_list[i].addr); | |
3116 | rc = efx_ef10_filter_insert(efx, &spec, true); | |
3117 | if (rc < 0) { | |
3118 | /* Fall back to unicast-promisc */ | |
3119 | while (i--) | |
3120 | efx_ef10_filter_remove_safe( | |
3121 | efx, EFX_FILTER_PRI_REQUIRED, | |
3122 | table->stack_uc_list[i].id); | |
3123 | table->stack_uc_count = -1; | |
3124 | break; | |
3125 | } | |
3126 | table->stack_uc_list[i].id = rc; | |
3127 | } | |
3128 | } | |
3129 | if (table->stack_uc_count < 0) { | |
3130 | efx_filter_init_rx(&spec, EFX_FILTER_PRI_REQUIRED, | |
3131 | EFX_FILTER_FLAG_RX_RSS | | |
3132 | EFX_FILTER_FLAG_RX_STACK, | |
3133 | 0); | |
3134 | efx_filter_set_uc_def(&spec); | |
3135 | rc = efx_ef10_filter_insert(efx, &spec, true); | |
3136 | if (rc < 0) { | |
3137 | WARN_ON(1); | |
3138 | table->stack_uc_count = 0; | |
3139 | } else { | |
3140 | table->stack_uc_list[0].id = rc; | |
3141 | } | |
3142 | } | |
3143 | ||
3144 | /* Insert/renew multicast filters */ | |
3145 | if (table->stack_mc_count >= 0) { | |
3146 | for (i = 0; i < table->stack_mc_count; i++) { | |
3147 | efx_filter_init_rx(&spec, EFX_FILTER_PRI_REQUIRED, | |
3148 | EFX_FILTER_FLAG_RX_RSS | | |
3149 | EFX_FILTER_FLAG_RX_STACK, | |
3150 | 0); | |
3151 | efx_filter_set_eth_local(&spec, EFX_FILTER_VID_UNSPEC, | |
3152 | table->stack_mc_list[i].addr); | |
3153 | rc = efx_ef10_filter_insert(efx, &spec, true); | |
3154 | if (rc < 0) { | |
3155 | /* Fall back to multicast-promisc */ | |
3156 | while (i--) | |
3157 | efx_ef10_filter_remove_safe( | |
3158 | efx, EFX_FILTER_PRI_REQUIRED, | |
3159 | table->stack_mc_list[i].id); | |
3160 | table->stack_mc_count = -1; | |
3161 | break; | |
3162 | } | |
3163 | table->stack_mc_list[i].id = rc; | |
3164 | } | |
3165 | } | |
3166 | if (table->stack_mc_count < 0) { | |
3167 | efx_filter_init_rx(&spec, EFX_FILTER_PRI_REQUIRED, | |
3168 | EFX_FILTER_FLAG_RX_RSS | | |
3169 | EFX_FILTER_FLAG_RX_STACK, | |
3170 | 0); | |
3171 | efx_filter_set_mc_def(&spec); | |
3172 | rc = efx_ef10_filter_insert(efx, &spec, true); | |
3173 | if (rc < 0) { | |
3174 | WARN_ON(1); | |
3175 | table->stack_mc_count = 0; | |
3176 | } else { | |
3177 | table->stack_mc_list[0].id = rc; | |
3178 | } | |
3179 | } | |
3180 | ||
3181 | /* Remove filters that weren't renewed. Since nothing else | |
3182 | * changes the STACK_OLD flag or removes these filters, we | |
3183 | * don't need to hold the filter_lock while scanning for | |
3184 | * these filters. | |
3185 | */ | |
3186 | for (i = 0; i < HUNT_FILTER_TBL_ROWS; i++) { | |
3187 | if (ACCESS_ONCE(table->entry[i].spec) & | |
3188 | EFX_EF10_FILTER_FLAG_STACK_OLD) { | |
3189 | if (efx_ef10_filter_remove_internal(efx, | |
3190 | EFX_FILTER_PRI_REQUIRED, | |
3191 | i, true) < 0) | |
3192 | remove_failed = true; | |
3193 | } | |
3194 | } | |
3195 | WARN_ON(remove_failed); | |
3196 | } | |
3197 | ||
3198 | static int efx_ef10_mac_reconfigure(struct efx_nic *efx) | |
3199 | { | |
3200 | efx_ef10_filter_sync_rx_mode(efx); | |
3201 | ||
3202 | return efx_mcdi_set_mac(efx); | |
3203 | } | |
3204 | ||
74cd60a4 JC |
3205 | static int efx_ef10_start_bist(struct efx_nic *efx, u32 bist_type) |
3206 | { | |
3207 | MCDI_DECLARE_BUF(inbuf, MC_CMD_START_BIST_IN_LEN); | |
3208 | ||
3209 | MCDI_SET_DWORD(inbuf, START_BIST_IN_TYPE, bist_type); | |
3210 | return efx_mcdi_rpc(efx, MC_CMD_START_BIST, inbuf, sizeof(inbuf), | |
3211 | NULL, 0, NULL); | |
3212 | } | |
3213 | ||
3214 | /* MC BISTs follow a different poll mechanism to phy BISTs. | |
3215 | * The BIST is done in the poll handler on the MC, and the MCDI command | |
3216 | * will block until the BIST is done. | |
3217 | */ | |
3218 | static int efx_ef10_poll_bist(struct efx_nic *efx) | |
3219 | { | |
3220 | int rc; | |
3221 | MCDI_DECLARE_BUF(outbuf, MC_CMD_POLL_BIST_OUT_LEN); | |
3222 | size_t outlen; | |
3223 | u32 result; | |
3224 | ||
3225 | rc = efx_mcdi_rpc(efx, MC_CMD_POLL_BIST, NULL, 0, | |
3226 | outbuf, sizeof(outbuf), &outlen); | |
3227 | if (rc != 0) | |
3228 | return rc; | |
3229 | ||
3230 | if (outlen < MC_CMD_POLL_BIST_OUT_LEN) | |
3231 | return -EIO; | |
3232 | ||
3233 | result = MCDI_DWORD(outbuf, POLL_BIST_OUT_RESULT); | |
3234 | switch (result) { | |
3235 | case MC_CMD_POLL_BIST_PASSED: | |
3236 | netif_dbg(efx, hw, efx->net_dev, "BIST passed.\n"); | |
3237 | return 0; | |
3238 | case MC_CMD_POLL_BIST_TIMEOUT: | |
3239 | netif_err(efx, hw, efx->net_dev, "BIST timed out\n"); | |
3240 | return -EIO; | |
3241 | case MC_CMD_POLL_BIST_FAILED: | |
3242 | netif_err(efx, hw, efx->net_dev, "BIST failed.\n"); | |
3243 | return -EIO; | |
3244 | default: | |
3245 | netif_err(efx, hw, efx->net_dev, | |
3246 | "BIST returned unknown result %u", result); | |
3247 | return -EIO; | |
3248 | } | |
3249 | } | |
3250 | ||
3251 | static int efx_ef10_run_bist(struct efx_nic *efx, u32 bist_type) | |
3252 | { | |
3253 | int rc; | |
3254 | ||
3255 | netif_dbg(efx, drv, efx->net_dev, "starting BIST type %u\n", bist_type); | |
3256 | ||
3257 | rc = efx_ef10_start_bist(efx, bist_type); | |
3258 | if (rc != 0) | |
3259 | return rc; | |
3260 | ||
3261 | return efx_ef10_poll_bist(efx); | |
3262 | } | |
3263 | ||
3264 | static int | |
3265 | efx_ef10_test_chip(struct efx_nic *efx, struct efx_self_tests *tests) | |
3266 | { | |
3267 | int rc, rc2; | |
3268 | ||
3269 | efx_reset_down(efx, RESET_TYPE_WORLD); | |
3270 | ||
3271 | rc = efx_mcdi_rpc(efx, MC_CMD_ENABLE_OFFLINE_BIST, | |
3272 | NULL, 0, NULL, 0, NULL); | |
3273 | if (rc != 0) | |
3274 | goto out; | |
3275 | ||
3276 | tests->memory = efx_ef10_run_bist(efx, MC_CMD_MC_MEM_BIST) ? -1 : 1; | |
3277 | tests->registers = efx_ef10_run_bist(efx, MC_CMD_REG_BIST) ? -1 : 1; | |
3278 | ||
3279 | rc = efx_mcdi_reset(efx, RESET_TYPE_WORLD); | |
3280 | ||
3281 | out: | |
3282 | rc2 = efx_reset_up(efx, RESET_TYPE_WORLD, rc == 0); | |
3283 | return rc ? rc : rc2; | |
3284 | } | |
3285 | ||
8127d661 BH |
3286 | #ifdef CONFIG_SFC_MTD |
3287 | ||
3288 | struct efx_ef10_nvram_type_info { | |
3289 | u16 type, type_mask; | |
3290 | u8 port; | |
3291 | const char *name; | |
3292 | }; | |
3293 | ||
3294 | static const struct efx_ef10_nvram_type_info efx_ef10_nvram_types[] = { | |
3295 | { NVRAM_PARTITION_TYPE_MC_FIRMWARE, 0, 0, "sfc_mcfw" }, | |
3296 | { NVRAM_PARTITION_TYPE_MC_FIRMWARE_BACKUP, 0, 0, "sfc_mcfw_backup" }, | |
3297 | { NVRAM_PARTITION_TYPE_EXPANSION_ROM, 0, 0, "sfc_exp_rom" }, | |
3298 | { NVRAM_PARTITION_TYPE_STATIC_CONFIG, 0, 0, "sfc_static_cfg" }, | |
3299 | { NVRAM_PARTITION_TYPE_DYNAMIC_CONFIG, 0, 0, "sfc_dynamic_cfg" }, | |
3300 | { NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT0, 0, 0, "sfc_exp_rom_cfg" }, | |
3301 | { NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT1, 0, 1, "sfc_exp_rom_cfg" }, | |
3302 | { NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT2, 0, 2, "sfc_exp_rom_cfg" }, | |
3303 | { NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT3, 0, 3, "sfc_exp_rom_cfg" }, | |
3304 | { NVRAM_PARTITION_TYPE_PHY_MIN, 0xff, 0, "sfc_phy_fw" }, | |
3305 | }; | |
3306 | ||
3307 | static int efx_ef10_mtd_probe_partition(struct efx_nic *efx, | |
3308 | struct efx_mcdi_mtd_partition *part, | |
3309 | unsigned int type) | |
3310 | { | |
3311 | MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_METADATA_IN_LEN); | |
3312 | MCDI_DECLARE_BUF(outbuf, MC_CMD_NVRAM_METADATA_OUT_LENMAX); | |
3313 | const struct efx_ef10_nvram_type_info *info; | |
3314 | size_t size, erase_size, outlen; | |
3315 | bool protected; | |
3316 | int rc; | |
3317 | ||
3318 | for (info = efx_ef10_nvram_types; ; info++) { | |
3319 | if (info == | |
3320 | efx_ef10_nvram_types + ARRAY_SIZE(efx_ef10_nvram_types)) | |
3321 | return -ENODEV; | |
3322 | if ((type & ~info->type_mask) == info->type) | |
3323 | break; | |
3324 | } | |
3325 | if (info->port != efx_port_num(efx)) | |
3326 | return -ENODEV; | |
3327 | ||
3328 | rc = efx_mcdi_nvram_info(efx, type, &size, &erase_size, &protected); | |
3329 | if (rc) | |
3330 | return rc; | |
3331 | if (protected) | |
3332 | return -ENODEV; /* hide it */ | |
3333 | ||
3334 | part->nvram_type = type; | |
3335 | ||
3336 | MCDI_SET_DWORD(inbuf, NVRAM_METADATA_IN_TYPE, type); | |
3337 | rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_METADATA, inbuf, sizeof(inbuf), | |
3338 | outbuf, sizeof(outbuf), &outlen); | |
3339 | if (rc) | |
3340 | return rc; | |
3341 | if (outlen < MC_CMD_NVRAM_METADATA_OUT_LENMIN) | |
3342 | return -EIO; | |
3343 | if (MCDI_DWORD(outbuf, NVRAM_METADATA_OUT_FLAGS) & | |
3344 | (1 << MC_CMD_NVRAM_METADATA_OUT_SUBTYPE_VALID_LBN)) | |
3345 | part->fw_subtype = MCDI_DWORD(outbuf, | |
3346 | NVRAM_METADATA_OUT_SUBTYPE); | |
3347 | ||
3348 | part->common.dev_type_name = "EF10 NVRAM manager"; | |
3349 | part->common.type_name = info->name; | |
3350 | ||
3351 | part->common.mtd.type = MTD_NORFLASH; | |
3352 | part->common.mtd.flags = MTD_CAP_NORFLASH; | |
3353 | part->common.mtd.size = size; | |
3354 | part->common.mtd.erasesize = erase_size; | |
3355 | ||
3356 | return 0; | |
3357 | } | |
3358 | ||
3359 | static int efx_ef10_mtd_probe(struct efx_nic *efx) | |
3360 | { | |
3361 | MCDI_DECLARE_BUF(outbuf, MC_CMD_NVRAM_PARTITIONS_OUT_LENMAX); | |
3362 | struct efx_mcdi_mtd_partition *parts; | |
3363 | size_t outlen, n_parts_total, i, n_parts; | |
3364 | unsigned int type; | |
3365 | int rc; | |
3366 | ||
3367 | ASSERT_RTNL(); | |
3368 | ||
3369 | BUILD_BUG_ON(MC_CMD_NVRAM_PARTITIONS_IN_LEN != 0); | |
3370 | rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_PARTITIONS, NULL, 0, | |
3371 | outbuf, sizeof(outbuf), &outlen); | |
3372 | if (rc) | |
3373 | return rc; | |
3374 | if (outlen < MC_CMD_NVRAM_PARTITIONS_OUT_LENMIN) | |
3375 | return -EIO; | |
3376 | ||
3377 | n_parts_total = MCDI_DWORD(outbuf, NVRAM_PARTITIONS_OUT_NUM_PARTITIONS); | |
3378 | if (n_parts_total > | |
3379 | MCDI_VAR_ARRAY_LEN(outlen, NVRAM_PARTITIONS_OUT_TYPE_ID)) | |
3380 | return -EIO; | |
3381 | ||
3382 | parts = kcalloc(n_parts_total, sizeof(*parts), GFP_KERNEL); | |
3383 | if (!parts) | |
3384 | return -ENOMEM; | |
3385 | ||
3386 | n_parts = 0; | |
3387 | for (i = 0; i < n_parts_total; i++) { | |
3388 | type = MCDI_ARRAY_DWORD(outbuf, NVRAM_PARTITIONS_OUT_TYPE_ID, | |
3389 | i); | |
3390 | rc = efx_ef10_mtd_probe_partition(efx, &parts[n_parts], type); | |
3391 | if (rc == 0) | |
3392 | n_parts++; | |
3393 | else if (rc != -ENODEV) | |
3394 | goto fail; | |
3395 | } | |
3396 | ||
3397 | rc = efx_mtd_add(efx, &parts[0].common, n_parts, sizeof(*parts)); | |
3398 | fail: | |
3399 | if (rc) | |
3400 | kfree(parts); | |
3401 | return rc; | |
3402 | } | |
3403 | ||
3404 | #endif /* CONFIG_SFC_MTD */ | |
3405 | ||
3406 | static void efx_ef10_ptp_write_host_time(struct efx_nic *efx, u32 host_time) | |
3407 | { | |
3408 | _efx_writed(efx, cpu_to_le32(host_time), ER_DZ_MC_DB_LWRD); | |
3409 | } | |
3410 | ||
3411 | const struct efx_nic_type efx_hunt_a0_nic_type = { | |
3412 | .mem_map_size = efx_ef10_mem_map_size, | |
3413 | .probe = efx_ef10_probe, | |
3414 | .remove = efx_ef10_remove, | |
3415 | .dimension_resources = efx_ef10_dimension_resources, | |
3416 | .init = efx_ef10_init_nic, | |
3417 | .fini = efx_port_dummy_op_void, | |
3418 | .map_reset_reason = efx_mcdi_map_reset_reason, | |
3419 | .map_reset_flags = efx_ef10_map_reset_flags, | |
3420 | .reset = efx_mcdi_reset, | |
3421 | .probe_port = efx_mcdi_port_probe, | |
3422 | .remove_port = efx_mcdi_port_remove, | |
3423 | .fini_dmaq = efx_ef10_fini_dmaq, | |
3424 | .describe_stats = efx_ef10_describe_stats, | |
3425 | .update_stats = efx_ef10_update_stats, | |
3426 | .start_stats = efx_mcdi_mac_start_stats, | |
f8f3b5ae | 3427 | .pull_stats = efx_mcdi_mac_pull_stats, |
8127d661 BH |
3428 | .stop_stats = efx_mcdi_mac_stop_stats, |
3429 | .set_id_led = efx_mcdi_set_id_led, | |
3430 | .push_irq_moderation = efx_ef10_push_irq_moderation, | |
3431 | .reconfigure_mac = efx_ef10_mac_reconfigure, | |
3432 | .check_mac_fault = efx_mcdi_mac_check_fault, | |
3433 | .reconfigure_port = efx_mcdi_port_reconfigure, | |
3434 | .get_wol = efx_ef10_get_wol, | |
3435 | .set_wol = efx_ef10_set_wol, | |
3436 | .resume_wol = efx_port_dummy_op_void, | |
74cd60a4 | 3437 | .test_chip = efx_ef10_test_chip, |
8127d661 BH |
3438 | .test_nvram = efx_mcdi_nvram_test_all, |
3439 | .mcdi_request = efx_ef10_mcdi_request, | |
3440 | .mcdi_poll_response = efx_ef10_mcdi_poll_response, | |
3441 | .mcdi_read_response = efx_ef10_mcdi_read_response, | |
3442 | .mcdi_poll_reboot = efx_ef10_mcdi_poll_reboot, | |
3443 | .irq_enable_master = efx_port_dummy_op_void, | |
3444 | .irq_test_generate = efx_ef10_irq_test_generate, | |
3445 | .irq_disable_non_ev = efx_port_dummy_op_void, | |
3446 | .irq_handle_msi = efx_ef10_msi_interrupt, | |
3447 | .irq_handle_legacy = efx_ef10_legacy_interrupt, | |
3448 | .tx_probe = efx_ef10_tx_probe, | |
3449 | .tx_init = efx_ef10_tx_init, | |
3450 | .tx_remove = efx_ef10_tx_remove, | |
3451 | .tx_write = efx_ef10_tx_write, | |
3452 | .rx_push_indir_table = efx_ef10_rx_push_indir_table, | |
3453 | .rx_probe = efx_ef10_rx_probe, | |
3454 | .rx_init = efx_ef10_rx_init, | |
3455 | .rx_remove = efx_ef10_rx_remove, | |
3456 | .rx_write = efx_ef10_rx_write, | |
3457 | .rx_defer_refill = efx_ef10_rx_defer_refill, | |
3458 | .ev_probe = efx_ef10_ev_probe, | |
3459 | .ev_init = efx_ef10_ev_init, | |
3460 | .ev_fini = efx_ef10_ev_fini, | |
3461 | .ev_remove = efx_ef10_ev_remove, | |
3462 | .ev_process = efx_ef10_ev_process, | |
3463 | .ev_read_ack = efx_ef10_ev_read_ack, | |
3464 | .ev_test_generate = efx_ef10_ev_test_generate, | |
3465 | .filter_table_probe = efx_ef10_filter_table_probe, | |
3466 | .filter_table_restore = efx_ef10_filter_table_restore, | |
3467 | .filter_table_remove = efx_ef10_filter_table_remove, | |
3468 | .filter_update_rx_scatter = efx_ef10_filter_update_rx_scatter, | |
3469 | .filter_insert = efx_ef10_filter_insert, | |
3470 | .filter_remove_safe = efx_ef10_filter_remove_safe, | |
3471 | .filter_get_safe = efx_ef10_filter_get_safe, | |
3472 | .filter_clear_rx = efx_ef10_filter_clear_rx, | |
3473 | .filter_count_rx_used = efx_ef10_filter_count_rx_used, | |
3474 | .filter_get_rx_id_limit = efx_ef10_filter_get_rx_id_limit, | |
3475 | .filter_get_rx_ids = efx_ef10_filter_get_rx_ids, | |
3476 | #ifdef CONFIG_RFS_ACCEL | |
3477 | .filter_rfs_insert = efx_ef10_filter_rfs_insert, | |
3478 | .filter_rfs_expire_one = efx_ef10_filter_rfs_expire_one, | |
3479 | #endif | |
3480 | #ifdef CONFIG_SFC_MTD | |
3481 | .mtd_probe = efx_ef10_mtd_probe, | |
3482 | .mtd_rename = efx_mcdi_mtd_rename, | |
3483 | .mtd_read = efx_mcdi_mtd_read, | |
3484 | .mtd_erase = efx_mcdi_mtd_erase, | |
3485 | .mtd_write = efx_mcdi_mtd_write, | |
3486 | .mtd_sync = efx_mcdi_mtd_sync, | |
3487 | #endif | |
3488 | .ptp_write_host_time = efx_ef10_ptp_write_host_time, | |
3489 | ||
3490 | .revision = EFX_REV_HUNT_A0, | |
3491 | .max_dma_mask = DMA_BIT_MASK(ESF_DZ_TX_KER_BUF_ADDR_WIDTH), | |
3492 | .rx_prefix_size = ES_DZ_RX_PREFIX_SIZE, | |
3493 | .rx_hash_offset = ES_DZ_RX_PREFIX_HASH_OFST, | |
3494 | .can_rx_scatter = true, | |
3495 | .always_rx_scatter = true, | |
3496 | .max_interrupt_mode = EFX_INT_MODE_MSIX, | |
3497 | .timer_period_max = 1 << ERF_DD_EVQ_IND_TIMER_VAL_WIDTH, | |
3498 | .offload_features = (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | | |
3499 | NETIF_F_RXHASH | NETIF_F_NTUPLE), | |
3500 | .mcdi_max_ver = 2, | |
3501 | .max_rx_ip_filters = HUNT_FILTER_TBL_ROWS, | |
3502 | }; |