Merge branch 'sctp-transport-rhashtable'
[deliverable/linux.git] / drivers / net / ethernet / mellanox / mlx5 / core / en_main.c
CommitLineData
f62b8bb8
AV
1/*
2 * Copyright (c) 2015, Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
86d722ad 33#include <linux/mlx5/fs.h>
f62b8bb8 34#include "en.h"
66e49ded 35#include "eswitch.h"
f62b8bb8
AV
36
37struct mlx5e_rq_param {
38 u32 rqc[MLX5_ST_SZ_DW(rqc)];
39 struct mlx5_wq_param wq;
40};
41
42struct mlx5e_sq_param {
43 u32 sqc[MLX5_ST_SZ_DW(sqc)];
44 struct mlx5_wq_param wq;
58d52291 45 u16 max_inline;
f62b8bb8
AV
46};
47
48struct mlx5e_cq_param {
49 u32 cqc[MLX5_ST_SZ_DW(cqc)];
50 struct mlx5_wq_param wq;
51 u16 eq_ix;
52};
53
54struct mlx5e_channel_param {
55 struct mlx5e_rq_param rq;
56 struct mlx5e_sq_param sq;
57 struct mlx5e_cq_param rx_cq;
58 struct mlx5e_cq_param tx_cq;
59};
60
61static void mlx5e_update_carrier(struct mlx5e_priv *priv)
62{
63 struct mlx5_core_dev *mdev = priv->mdev;
64 u8 port_state;
65
66 port_state = mlx5_query_vport_state(mdev,
e7546514 67 MLX5_QUERY_VPORT_STATE_IN_OP_MOD_VNIC_VPORT, 0);
f62b8bb8
AV
68
69 if (port_state == VPORT_STATE_UP)
70 netif_carrier_on(priv->netdev);
71 else
72 netif_carrier_off(priv->netdev);
73}
74
75static void mlx5e_update_carrier_work(struct work_struct *work)
76{
77 struct mlx5e_priv *priv = container_of(work, struct mlx5e_priv,
78 update_carrier_work);
79
80 mutex_lock(&priv->state_lock);
81 if (test_bit(MLX5E_STATE_OPENED, &priv->state))
82 mlx5e_update_carrier(priv);
83 mutex_unlock(&priv->state_lock);
84}
85
efea389d
GP
86static void mlx5e_update_pport_counters(struct mlx5e_priv *priv)
87{
88 struct mlx5_core_dev *mdev = priv->mdev;
89 struct mlx5e_pport_stats *s = &priv->stats.pport;
90 u32 *in;
91 u32 *out;
92 int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
93
94 in = mlx5_vzalloc(sz);
95 out = mlx5_vzalloc(sz);
96 if (!in || !out)
97 goto free_out;
98
99 MLX5_SET(ppcnt_reg, in, local_port, 1);
100
101 MLX5_SET(ppcnt_reg, in, grp, MLX5_IEEE_802_3_COUNTERS_GROUP);
102 mlx5_core_access_reg(mdev, in, sz, out,
103 sz, MLX5_REG_PPCNT, 0, 0);
104 memcpy(s->IEEE_802_3_counters,
105 MLX5_ADDR_OF(ppcnt_reg, out, counter_set),
106 sizeof(s->IEEE_802_3_counters));
107
108 MLX5_SET(ppcnt_reg, in, grp, MLX5_RFC_2863_COUNTERS_GROUP);
109 mlx5_core_access_reg(mdev, in, sz, out,
110 sz, MLX5_REG_PPCNT, 0, 0);
111 memcpy(s->RFC_2863_counters,
112 MLX5_ADDR_OF(ppcnt_reg, out, counter_set),
113 sizeof(s->RFC_2863_counters));
114
115 MLX5_SET(ppcnt_reg, in, grp, MLX5_RFC_2819_COUNTERS_GROUP);
116 mlx5_core_access_reg(mdev, in, sz, out,
117 sz, MLX5_REG_PPCNT, 0, 0);
118 memcpy(s->RFC_2819_counters,
119 MLX5_ADDR_OF(ppcnt_reg, out, counter_set),
120 sizeof(s->RFC_2819_counters));
121
122free_out:
123 kvfree(in);
124 kvfree(out);
125}
126
f62b8bb8
AV
127void mlx5e_update_stats(struct mlx5e_priv *priv)
128{
129 struct mlx5_core_dev *mdev = priv->mdev;
130 struct mlx5e_vport_stats *s = &priv->stats.vport;
131 struct mlx5e_rq_stats *rq_stats;
132 struct mlx5e_sq_stats *sq_stats;
133 u32 in[MLX5_ST_SZ_DW(query_vport_counter_in)];
134 u32 *out;
135 int outlen = MLX5_ST_SZ_BYTES(query_vport_counter_out);
136 u64 tx_offload_none;
137 int i, j;
138
139 out = mlx5_vzalloc(outlen);
140 if (!out)
141 return;
142
143 /* Collect firts the SW counters and then HW for consistency */
144 s->tso_packets = 0;
145 s->tso_bytes = 0;
146 s->tx_queue_stopped = 0;
147 s->tx_queue_wake = 0;
148 s->tx_queue_dropped = 0;
149 tx_offload_none = 0;
150 s->lro_packets = 0;
151 s->lro_bytes = 0;
152 s->rx_csum_none = 0;
bbceefce 153 s->rx_csum_sw = 0;
f62b8bb8
AV
154 s->rx_wqe_err = 0;
155 for (i = 0; i < priv->params.num_channels; i++) {
156 rq_stats = &priv->channel[i]->rq.stats;
157
158 s->lro_packets += rq_stats->lro_packets;
159 s->lro_bytes += rq_stats->lro_bytes;
160 s->rx_csum_none += rq_stats->csum_none;
bbceefce 161 s->rx_csum_sw += rq_stats->csum_sw;
f62b8bb8
AV
162 s->rx_wqe_err += rq_stats->wqe_err;
163
a4418a6c 164 for (j = 0; j < priv->params.num_tc; j++) {
f62b8bb8
AV
165 sq_stats = &priv->channel[i]->sq[j].stats;
166
167 s->tso_packets += sq_stats->tso_packets;
168 s->tso_bytes += sq_stats->tso_bytes;
169 s->tx_queue_stopped += sq_stats->stopped;
170 s->tx_queue_wake += sq_stats->wake;
171 s->tx_queue_dropped += sq_stats->dropped;
172 tx_offload_none += sq_stats->csum_offload_none;
173 }
174 }
175
176 /* HW counters */
177 memset(in, 0, sizeof(in));
178
179 MLX5_SET(query_vport_counter_in, in, opcode,
180 MLX5_CMD_OP_QUERY_VPORT_COUNTER);
181 MLX5_SET(query_vport_counter_in, in, op_mod, 0);
182 MLX5_SET(query_vport_counter_in, in, other_vport, 0);
183
184 memset(out, 0, outlen);
185
186 if (mlx5_cmd_exec(mdev, in, sizeof(in), out, outlen))
187 goto free_out;
188
189#define MLX5_GET_CTR(p, x) \
190 MLX5_GET64(query_vport_counter_out, p, x)
191
192 s->rx_error_packets =
193 MLX5_GET_CTR(out, received_errors.packets);
194 s->rx_error_bytes =
195 MLX5_GET_CTR(out, received_errors.octets);
196 s->tx_error_packets =
197 MLX5_GET_CTR(out, transmit_errors.packets);
198 s->tx_error_bytes =
199 MLX5_GET_CTR(out, transmit_errors.octets);
200
201 s->rx_unicast_packets =
202 MLX5_GET_CTR(out, received_eth_unicast.packets);
203 s->rx_unicast_bytes =
204 MLX5_GET_CTR(out, received_eth_unicast.octets);
205 s->tx_unicast_packets =
206 MLX5_GET_CTR(out, transmitted_eth_unicast.packets);
207 s->tx_unicast_bytes =
208 MLX5_GET_CTR(out, transmitted_eth_unicast.octets);
209
210 s->rx_multicast_packets =
211 MLX5_GET_CTR(out, received_eth_multicast.packets);
212 s->rx_multicast_bytes =
213 MLX5_GET_CTR(out, received_eth_multicast.octets);
214 s->tx_multicast_packets =
215 MLX5_GET_CTR(out, transmitted_eth_multicast.packets);
216 s->tx_multicast_bytes =
217 MLX5_GET_CTR(out, transmitted_eth_multicast.octets);
218
219 s->rx_broadcast_packets =
220 MLX5_GET_CTR(out, received_eth_broadcast.packets);
221 s->rx_broadcast_bytes =
222 MLX5_GET_CTR(out, received_eth_broadcast.octets);
223 s->tx_broadcast_packets =
224 MLX5_GET_CTR(out, transmitted_eth_broadcast.packets);
225 s->tx_broadcast_bytes =
226 MLX5_GET_CTR(out, transmitted_eth_broadcast.octets);
227
228 s->rx_packets =
229 s->rx_unicast_packets +
230 s->rx_multicast_packets +
231 s->rx_broadcast_packets;
232 s->rx_bytes =
233 s->rx_unicast_bytes +
234 s->rx_multicast_bytes +
235 s->rx_broadcast_bytes;
236 s->tx_packets =
237 s->tx_unicast_packets +
238 s->tx_multicast_packets +
239 s->tx_broadcast_packets;
240 s->tx_bytes =
241 s->tx_unicast_bytes +
242 s->tx_multicast_bytes +
243 s->tx_broadcast_bytes;
244
245 /* Update calculated offload counters */
246 s->tx_csum_offload = s->tx_packets - tx_offload_none;
bbceefce
AS
247 s->rx_csum_good = s->rx_packets - s->rx_csum_none -
248 s->rx_csum_sw;
f62b8bb8 249
efea389d 250 mlx5e_update_pport_counters(priv);
f62b8bb8
AV
251free_out:
252 kvfree(out);
253}
254
255static void mlx5e_update_stats_work(struct work_struct *work)
256{
257 struct delayed_work *dwork = to_delayed_work(work);
258 struct mlx5e_priv *priv = container_of(dwork, struct mlx5e_priv,
259 update_stats_work);
260 mutex_lock(&priv->state_lock);
261 if (test_bit(MLX5E_STATE_OPENED, &priv->state)) {
262 mlx5e_update_stats(priv);
263 schedule_delayed_work(dwork,
264 msecs_to_jiffies(
265 MLX5E_UPDATE_STATS_INTERVAL));
266 }
267 mutex_unlock(&priv->state_lock);
268}
269
270static void __mlx5e_async_event(struct mlx5e_priv *priv,
271 enum mlx5_dev_event event)
272{
273 switch (event) {
274 case MLX5_DEV_EVENT_PORT_UP:
275 case MLX5_DEV_EVENT_PORT_DOWN:
276 schedule_work(&priv->update_carrier_work);
277 break;
278
279 default:
280 break;
281 }
282}
283
284static void mlx5e_async_event(struct mlx5_core_dev *mdev, void *vpriv,
285 enum mlx5_dev_event event, unsigned long param)
286{
287 struct mlx5e_priv *priv = vpriv;
288
289 spin_lock(&priv->async_events_spinlock);
290 if (test_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLE, &priv->state))
291 __mlx5e_async_event(priv, event);
292 spin_unlock(&priv->async_events_spinlock);
293}
294
295static void mlx5e_enable_async_events(struct mlx5e_priv *priv)
296{
297 set_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLE, &priv->state);
298}
299
300static void mlx5e_disable_async_events(struct mlx5e_priv *priv)
301{
302 spin_lock_irq(&priv->async_events_spinlock);
303 clear_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLE, &priv->state);
304 spin_unlock_irq(&priv->async_events_spinlock);
305}
306
facc9699
SM
307#define MLX5E_HW2SW_MTU(hwmtu) (hwmtu - (ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN))
308#define MLX5E_SW2HW_MTU(swmtu) (swmtu + (ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN))
309
f62b8bb8
AV
310static int mlx5e_create_rq(struct mlx5e_channel *c,
311 struct mlx5e_rq_param *param,
312 struct mlx5e_rq *rq)
313{
314 struct mlx5e_priv *priv = c->priv;
315 struct mlx5_core_dev *mdev = priv->mdev;
316 void *rqc = param->rqc;
317 void *rqc_wq = MLX5_ADDR_OF(rqc, rqc, wq);
318 int wq_sz;
319 int err;
320 int i;
321
311c7c71
SM
322 param->wq.db_numa_node = cpu_to_node(c->cpu);
323
f62b8bb8
AV
324 err = mlx5_wq_ll_create(mdev, &param->wq, rqc_wq, &rq->wq,
325 &rq->wq_ctrl);
326 if (err)
327 return err;
328
329 rq->wq.db = &rq->wq.db[MLX5_RCV_DBR];
330
331 wq_sz = mlx5_wq_ll_get_size(&rq->wq);
332 rq->skb = kzalloc_node(wq_sz * sizeof(*rq->skb), GFP_KERNEL,
333 cpu_to_node(c->cpu));
334 if (!rq->skb) {
335 err = -ENOMEM;
336 goto err_rq_wq_destroy;
337 }
338
339 rq->wqe_sz = (priv->params.lro_en) ? priv->params.lro_wqe_sz :
facc9699 340 MLX5E_SW2HW_MTU(priv->netdev->mtu);
fc11fbf9 341 rq->wqe_sz = SKB_DATA_ALIGN(rq->wqe_sz + MLX5E_NET_IP_ALIGN);
f62b8bb8
AV
342
343 for (i = 0; i < wq_sz; i++) {
344 struct mlx5e_rx_wqe *wqe = mlx5_wq_ll_get_wqe(&rq->wq, i);
fc11fbf9 345 u32 byte_count = rq->wqe_sz - MLX5E_NET_IP_ALIGN;
f62b8bb8
AV
346
347 wqe->data.lkey = c->mkey_be;
fc11fbf9
SM
348 wqe->data.byte_count =
349 cpu_to_be32(byte_count | MLX5_HW_START_PADDING);
f62b8bb8
AV
350 }
351
352 rq->pdev = c->pdev;
353 rq->netdev = c->netdev;
354 rq->channel = c;
355 rq->ix = c->ix;
50cfa25a 356 rq->priv = c->priv;
f62b8bb8
AV
357
358 return 0;
359
360err_rq_wq_destroy:
361 mlx5_wq_destroy(&rq->wq_ctrl);
362
363 return err;
364}
365
366static void mlx5e_destroy_rq(struct mlx5e_rq *rq)
367{
368 kfree(rq->skb);
369 mlx5_wq_destroy(&rq->wq_ctrl);
370}
371
372static int mlx5e_enable_rq(struct mlx5e_rq *rq, struct mlx5e_rq_param *param)
373{
50cfa25a 374 struct mlx5e_priv *priv = rq->priv;
f62b8bb8
AV
375 struct mlx5_core_dev *mdev = priv->mdev;
376
377 void *in;
378 void *rqc;
379 void *wq;
380 int inlen;
381 int err;
382
383 inlen = MLX5_ST_SZ_BYTES(create_rq_in) +
384 sizeof(u64) * rq->wq_ctrl.buf.npages;
385 in = mlx5_vzalloc(inlen);
386 if (!in)
387 return -ENOMEM;
388
389 rqc = MLX5_ADDR_OF(create_rq_in, in, ctx);
390 wq = MLX5_ADDR_OF(rqc, rqc, wq);
391
392 memcpy(rqc, param->rqc, sizeof(param->rqc));
393
97de9f31 394 MLX5_SET(rqc, rqc, cqn, rq->cq.mcq.cqn);
f62b8bb8
AV
395 MLX5_SET(rqc, rqc, state, MLX5_RQC_STATE_RST);
396 MLX5_SET(rqc, rqc, flush_in_error_en, 1);
f62b8bb8 397 MLX5_SET(wq, wq, log_wq_pg_sz, rq->wq_ctrl.buf.page_shift -
68cdf5d6 398 MLX5_ADAPTER_PAGE_SHIFT);
f62b8bb8
AV
399 MLX5_SET64(wq, wq, dbr_addr, rq->wq_ctrl.db.dma);
400
401 mlx5_fill_page_array(&rq->wq_ctrl.buf,
402 (__be64 *)MLX5_ADDR_OF(wq, wq, pas));
403
7db22ffb 404 err = mlx5_core_create_rq(mdev, in, inlen, &rq->rqn);
f62b8bb8
AV
405
406 kvfree(in);
407
408 return err;
409}
410
411static int mlx5e_modify_rq(struct mlx5e_rq *rq, int curr_state, int next_state)
412{
413 struct mlx5e_channel *c = rq->channel;
414 struct mlx5e_priv *priv = c->priv;
415 struct mlx5_core_dev *mdev = priv->mdev;
416
417 void *in;
418 void *rqc;
419 int inlen;
420 int err;
421
422 inlen = MLX5_ST_SZ_BYTES(modify_rq_in);
423 in = mlx5_vzalloc(inlen);
424 if (!in)
425 return -ENOMEM;
426
427 rqc = MLX5_ADDR_OF(modify_rq_in, in, ctx);
428
429 MLX5_SET(modify_rq_in, in, rq_state, curr_state);
430 MLX5_SET(rqc, rqc, state, next_state);
431
7db22ffb 432 err = mlx5_core_modify_rq(mdev, rq->rqn, in, inlen);
f62b8bb8
AV
433
434 kvfree(in);
435
436 return err;
437}
438
439static void mlx5e_disable_rq(struct mlx5e_rq *rq)
440{
50cfa25a 441 mlx5_core_destroy_rq(rq->priv->mdev, rq->rqn);
f62b8bb8
AV
442}
443
444static int mlx5e_wait_for_min_rx_wqes(struct mlx5e_rq *rq)
445{
01c196a2 446 unsigned long exp_time = jiffies + msecs_to_jiffies(20000);
f62b8bb8
AV
447 struct mlx5e_channel *c = rq->channel;
448 struct mlx5e_priv *priv = c->priv;
449 struct mlx5_wq_ll *wq = &rq->wq;
f62b8bb8 450
01c196a2 451 while (time_before(jiffies, exp_time)) {
f62b8bb8
AV
452 if (wq->cur_sz >= priv->params.min_rx_wqes)
453 return 0;
454
455 msleep(20);
456 }
457
458 return -ETIMEDOUT;
459}
460
461static int mlx5e_open_rq(struct mlx5e_channel *c,
462 struct mlx5e_rq_param *param,
463 struct mlx5e_rq *rq)
464{
465 int err;
466
467 err = mlx5e_create_rq(c, param, rq);
468 if (err)
469 return err;
470
471 err = mlx5e_enable_rq(rq, param);
472 if (err)
473 goto err_destroy_rq;
474
475 err = mlx5e_modify_rq(rq, MLX5_RQC_STATE_RST, MLX5_RQC_STATE_RDY);
476 if (err)
477 goto err_disable_rq;
478
479 set_bit(MLX5E_RQ_STATE_POST_WQES_ENABLE, &rq->state);
12be4b21 480 mlx5e_send_nop(&c->sq[0], true); /* trigger mlx5e_post_rx_wqes() */
f62b8bb8
AV
481
482 return 0;
483
484err_disable_rq:
485 mlx5e_disable_rq(rq);
486err_destroy_rq:
487 mlx5e_destroy_rq(rq);
488
489 return err;
490}
491
492static void mlx5e_close_rq(struct mlx5e_rq *rq)
493{
494 clear_bit(MLX5E_RQ_STATE_POST_WQES_ENABLE, &rq->state);
495 napi_synchronize(&rq->channel->napi); /* prevent mlx5e_post_rx_wqes */
496
497 mlx5e_modify_rq(rq, MLX5_RQC_STATE_RDY, MLX5_RQC_STATE_ERR);
498 while (!mlx5_wq_ll_is_empty(&rq->wq))
499 msleep(20);
500
501 /* avoid destroying rq before mlx5e_poll_rx_cq() is done with it */
502 napi_synchronize(&rq->channel->napi);
503
504 mlx5e_disable_rq(rq);
505 mlx5e_destroy_rq(rq);
506}
507
508static void mlx5e_free_sq_db(struct mlx5e_sq *sq)
509{
510 kfree(sq->dma_fifo);
511 kfree(sq->skb);
512}
513
514static int mlx5e_alloc_sq_db(struct mlx5e_sq *sq, int numa)
515{
516 int wq_sz = mlx5_wq_cyc_get_size(&sq->wq);
517 int df_sz = wq_sz * MLX5_SEND_WQEBB_NUM_DS;
518
519 sq->skb = kzalloc_node(wq_sz * sizeof(*sq->skb), GFP_KERNEL, numa);
520 sq->dma_fifo = kzalloc_node(df_sz * sizeof(*sq->dma_fifo), GFP_KERNEL,
521 numa);
522
523 if (!sq->skb || !sq->dma_fifo) {
524 mlx5e_free_sq_db(sq);
525 return -ENOMEM;
526 }
527
528 sq->dma_fifo_mask = df_sz - 1;
529
530 return 0;
531}
532
533static int mlx5e_create_sq(struct mlx5e_channel *c,
534 int tc,
535 struct mlx5e_sq_param *param,
536 struct mlx5e_sq *sq)
537{
538 struct mlx5e_priv *priv = c->priv;
539 struct mlx5_core_dev *mdev = priv->mdev;
540
541 void *sqc = param->sqc;
542 void *sqc_wq = MLX5_ADDR_OF(sqc, sqc, wq);
03289b88 543 int txq_ix;
f62b8bb8
AV
544 int err;
545
546 err = mlx5_alloc_map_uar(mdev, &sq->uar);
547 if (err)
548 return err;
549
311c7c71
SM
550 param->wq.db_numa_node = cpu_to_node(c->cpu);
551
f62b8bb8
AV
552 err = mlx5_wq_cyc_create(mdev, &param->wq, sqc_wq, &sq->wq,
553 &sq->wq_ctrl);
554 if (err)
555 goto err_unmap_free_uar;
556
557 sq->wq.db = &sq->wq.db[MLX5_SND_DBR];
558 sq->uar_map = sq->uar.map;
88a85f99 559 sq->uar_bf_map = sq->uar.bf_map;
f62b8bb8 560 sq->bf_buf_size = (1 << MLX5_CAP_GEN(mdev, log_bf_reg_size)) / 2;
58d52291 561 sq->max_inline = param->max_inline;
f62b8bb8 562
7ec0bb22
DC
563 err = mlx5e_alloc_sq_db(sq, cpu_to_node(c->cpu));
564 if (err)
f62b8bb8
AV
565 goto err_sq_wq_destroy;
566
03289b88
SM
567 txq_ix = c->ix + tc * priv->params.num_channels;
568 sq->txq = netdev_get_tx_queue(priv->netdev, txq_ix);
f62b8bb8 569
88a85f99
AS
570 sq->pdev = c->pdev;
571 sq->mkey_be = c->mkey_be;
572 sq->channel = c;
573 sq->tc = tc;
574 sq->edge = (sq->wq.sz_m1 + 1) - MLX5_SEND_WQE_MAX_WQEBBS;
575 sq->bf_budget = MLX5E_SQ_BF_BUDGET;
03289b88 576 priv->txq_to_sq_map[txq_ix] = sq;
f62b8bb8
AV
577
578 return 0;
579
580err_sq_wq_destroy:
581 mlx5_wq_destroy(&sq->wq_ctrl);
582
583err_unmap_free_uar:
584 mlx5_unmap_free_uar(mdev, &sq->uar);
585
586 return err;
587}
588
589static void mlx5e_destroy_sq(struct mlx5e_sq *sq)
590{
591 struct mlx5e_channel *c = sq->channel;
592 struct mlx5e_priv *priv = c->priv;
593
594 mlx5e_free_sq_db(sq);
595 mlx5_wq_destroy(&sq->wq_ctrl);
596 mlx5_unmap_free_uar(priv->mdev, &sq->uar);
597}
598
599static int mlx5e_enable_sq(struct mlx5e_sq *sq, struct mlx5e_sq_param *param)
600{
601 struct mlx5e_channel *c = sq->channel;
602 struct mlx5e_priv *priv = c->priv;
603 struct mlx5_core_dev *mdev = priv->mdev;
604
605 void *in;
606 void *sqc;
607 void *wq;
608 int inlen;
609 int err;
610
611 inlen = MLX5_ST_SZ_BYTES(create_sq_in) +
612 sizeof(u64) * sq->wq_ctrl.buf.npages;
613 in = mlx5_vzalloc(inlen);
614 if (!in)
615 return -ENOMEM;
616
617 sqc = MLX5_ADDR_OF(create_sq_in, in, ctx);
618 wq = MLX5_ADDR_OF(sqc, sqc, wq);
619
620 memcpy(sqc, param->sqc, sizeof(param->sqc));
621
f62b8bb8
AV
622 MLX5_SET(sqc, sqc, tis_num_0, priv->tisn[sq->tc]);
623 MLX5_SET(sqc, sqc, cqn, c->sq[sq->tc].cq.mcq.cqn);
624 MLX5_SET(sqc, sqc, state, MLX5_SQC_STATE_RST);
625 MLX5_SET(sqc, sqc, tis_lst_sz, 1);
626 MLX5_SET(sqc, sqc, flush_in_error_en, 1);
627
628 MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_CYCLIC);
629 MLX5_SET(wq, wq, uar_page, sq->uar.index);
630 MLX5_SET(wq, wq, log_wq_pg_sz, sq->wq_ctrl.buf.page_shift -
68cdf5d6 631 MLX5_ADAPTER_PAGE_SHIFT);
f62b8bb8
AV
632 MLX5_SET64(wq, wq, dbr_addr, sq->wq_ctrl.db.dma);
633
634 mlx5_fill_page_array(&sq->wq_ctrl.buf,
635 (__be64 *)MLX5_ADDR_OF(wq, wq, pas));
636
7db22ffb 637 err = mlx5_core_create_sq(mdev, in, inlen, &sq->sqn);
f62b8bb8
AV
638
639 kvfree(in);
640
641 return err;
642}
643
644static int mlx5e_modify_sq(struct mlx5e_sq *sq, int curr_state, int next_state)
645{
646 struct mlx5e_channel *c = sq->channel;
647 struct mlx5e_priv *priv = c->priv;
648 struct mlx5_core_dev *mdev = priv->mdev;
649
650 void *in;
651 void *sqc;
652 int inlen;
653 int err;
654
655 inlen = MLX5_ST_SZ_BYTES(modify_sq_in);
656 in = mlx5_vzalloc(inlen);
657 if (!in)
658 return -ENOMEM;
659
660 sqc = MLX5_ADDR_OF(modify_sq_in, in, ctx);
661
662 MLX5_SET(modify_sq_in, in, sq_state, curr_state);
663 MLX5_SET(sqc, sqc, state, next_state);
664
7db22ffb 665 err = mlx5_core_modify_sq(mdev, sq->sqn, in, inlen);
f62b8bb8
AV
666
667 kvfree(in);
668
669 return err;
670}
671
672static void mlx5e_disable_sq(struct mlx5e_sq *sq)
673{
674 struct mlx5e_channel *c = sq->channel;
675 struct mlx5e_priv *priv = c->priv;
676 struct mlx5_core_dev *mdev = priv->mdev;
677
7db22ffb 678 mlx5_core_destroy_sq(mdev, sq->sqn);
f62b8bb8
AV
679}
680
681static int mlx5e_open_sq(struct mlx5e_channel *c,
682 int tc,
683 struct mlx5e_sq_param *param,
684 struct mlx5e_sq *sq)
685{
686 int err;
687
688 err = mlx5e_create_sq(c, tc, param, sq);
689 if (err)
690 return err;
691
692 err = mlx5e_enable_sq(sq, param);
693 if (err)
694 goto err_destroy_sq;
695
696 err = mlx5e_modify_sq(sq, MLX5_SQC_STATE_RST, MLX5_SQC_STATE_RDY);
697 if (err)
698 goto err_disable_sq;
699
700 set_bit(MLX5E_SQ_STATE_WAKE_TXQ_ENABLE, &sq->state);
701 netdev_tx_reset_queue(sq->txq);
702 netif_tx_start_queue(sq->txq);
703
704 return 0;
705
706err_disable_sq:
707 mlx5e_disable_sq(sq);
708err_destroy_sq:
709 mlx5e_destroy_sq(sq);
710
711 return err;
712}
713
714static inline void netif_tx_disable_queue(struct netdev_queue *txq)
715{
716 __netif_tx_lock_bh(txq);
717 netif_tx_stop_queue(txq);
718 __netif_tx_unlock_bh(txq);
719}
720
721static void mlx5e_close_sq(struct mlx5e_sq *sq)
722{
723 clear_bit(MLX5E_SQ_STATE_WAKE_TXQ_ENABLE, &sq->state);
724 napi_synchronize(&sq->channel->napi); /* prevent netif_tx_wake_queue */
725 netif_tx_disable_queue(sq->txq);
726
727 /* ensure hw is notified of all pending wqes */
728 if (mlx5e_sq_has_room_for(sq, 1))
12be4b21 729 mlx5e_send_nop(sq, true);
f62b8bb8
AV
730
731 mlx5e_modify_sq(sq, MLX5_SQC_STATE_RDY, MLX5_SQC_STATE_ERR);
732 while (sq->cc != sq->pc) /* wait till sq is empty */
733 msleep(20);
734
735 /* avoid destroying sq before mlx5e_poll_tx_cq() is done with it */
736 napi_synchronize(&sq->channel->napi);
737
738 mlx5e_disable_sq(sq);
739 mlx5e_destroy_sq(sq);
740}
741
742static int mlx5e_create_cq(struct mlx5e_channel *c,
743 struct mlx5e_cq_param *param,
744 struct mlx5e_cq *cq)
745{
746 struct mlx5e_priv *priv = c->priv;
747 struct mlx5_core_dev *mdev = priv->mdev;
748 struct mlx5_core_cq *mcq = &cq->mcq;
749 int eqn_not_used;
750 int irqn;
751 int err;
752 u32 i;
753
311c7c71
SM
754 param->wq.buf_numa_node = cpu_to_node(c->cpu);
755 param->wq.db_numa_node = cpu_to_node(c->cpu);
f62b8bb8
AV
756 param->eq_ix = c->ix;
757
758 err = mlx5_cqwq_create(mdev, &param->wq, param->cqc, &cq->wq,
759 &cq->wq_ctrl);
760 if (err)
761 return err;
762
763 mlx5_vector2eqn(mdev, param->eq_ix, &eqn_not_used, &irqn);
764
765 cq->napi = &c->napi;
766
767 mcq->cqe_sz = 64;
768 mcq->set_ci_db = cq->wq_ctrl.db.db;
769 mcq->arm_db = cq->wq_ctrl.db.db + 1;
770 *mcq->set_ci_db = 0;
771 *mcq->arm_db = 0;
772 mcq->vector = param->eq_ix;
773 mcq->comp = mlx5e_completion_event;
774 mcq->event = mlx5e_cq_error_event;
775 mcq->irqn = irqn;
776 mcq->uar = &priv->cq_uar;
777
778 for (i = 0; i < mlx5_cqwq_get_size(&cq->wq); i++) {
779 struct mlx5_cqe64 *cqe = mlx5_cqwq_get_wqe(&cq->wq, i);
780
781 cqe->op_own = 0xf1;
782 }
783
784 cq->channel = c;
50cfa25a 785 cq->priv = priv;
f62b8bb8
AV
786
787 return 0;
788}
789
790static void mlx5e_destroy_cq(struct mlx5e_cq *cq)
791{
792 mlx5_wq_destroy(&cq->wq_ctrl);
793}
794
795static int mlx5e_enable_cq(struct mlx5e_cq *cq, struct mlx5e_cq_param *param)
796{
50cfa25a 797 struct mlx5e_priv *priv = cq->priv;
f62b8bb8
AV
798 struct mlx5_core_dev *mdev = priv->mdev;
799 struct mlx5_core_cq *mcq = &cq->mcq;
800
801 void *in;
802 void *cqc;
803 int inlen;
804 int irqn_not_used;
805 int eqn;
806 int err;
807
808 inlen = MLX5_ST_SZ_BYTES(create_cq_in) +
809 sizeof(u64) * cq->wq_ctrl.buf.npages;
810 in = mlx5_vzalloc(inlen);
811 if (!in)
812 return -ENOMEM;
813
814 cqc = MLX5_ADDR_OF(create_cq_in, in, cq_context);
815
816 memcpy(cqc, param->cqc, sizeof(param->cqc));
817
818 mlx5_fill_page_array(&cq->wq_ctrl.buf,
819 (__be64 *)MLX5_ADDR_OF(create_cq_in, in, pas));
820
821 mlx5_vector2eqn(mdev, param->eq_ix, &eqn, &irqn_not_used);
822
823 MLX5_SET(cqc, cqc, c_eqn, eqn);
824 MLX5_SET(cqc, cqc, uar_page, mcq->uar->index);
825 MLX5_SET(cqc, cqc, log_page_size, cq->wq_ctrl.buf.page_shift -
68cdf5d6 826 MLX5_ADAPTER_PAGE_SHIFT);
f62b8bb8
AV
827 MLX5_SET64(cqc, cqc, dbr_addr, cq->wq_ctrl.db.dma);
828
829 err = mlx5_core_create_cq(mdev, mcq, in, inlen);
830
831 kvfree(in);
832
833 if (err)
834 return err;
835
836 mlx5e_cq_arm(cq);
837
838 return 0;
839}
840
841static void mlx5e_disable_cq(struct mlx5e_cq *cq)
842{
50cfa25a 843 struct mlx5e_priv *priv = cq->priv;
f62b8bb8
AV
844 struct mlx5_core_dev *mdev = priv->mdev;
845
846 mlx5_core_destroy_cq(mdev, &cq->mcq);
847}
848
849static int mlx5e_open_cq(struct mlx5e_channel *c,
850 struct mlx5e_cq_param *param,
851 struct mlx5e_cq *cq,
852 u16 moderation_usecs,
853 u16 moderation_frames)
854{
855 int err;
856 struct mlx5e_priv *priv = c->priv;
857 struct mlx5_core_dev *mdev = priv->mdev;
858
859 err = mlx5e_create_cq(c, param, cq);
860 if (err)
861 return err;
862
863 err = mlx5e_enable_cq(cq, param);
864 if (err)
865 goto err_destroy_cq;
866
867 err = mlx5_core_modify_cq_moderation(mdev, &cq->mcq,
868 moderation_usecs,
869 moderation_frames);
870 if (err)
871 goto err_destroy_cq;
872
873 return 0;
874
875err_destroy_cq:
876 mlx5e_destroy_cq(cq);
877
878 return err;
879}
880
881static void mlx5e_close_cq(struct mlx5e_cq *cq)
882{
883 mlx5e_disable_cq(cq);
884 mlx5e_destroy_cq(cq);
885}
886
887static int mlx5e_get_cpu(struct mlx5e_priv *priv, int ix)
888{
889 return cpumask_first(priv->mdev->priv.irq_info[ix].mask);
890}
891
892static int mlx5e_open_tx_cqs(struct mlx5e_channel *c,
893 struct mlx5e_channel_param *cparam)
894{
895 struct mlx5e_priv *priv = c->priv;
896 int err;
897 int tc;
898
899 for (tc = 0; tc < c->num_tc; tc++) {
900 err = mlx5e_open_cq(c, &cparam->tx_cq, &c->sq[tc].cq,
901 priv->params.tx_cq_moderation_usec,
902 priv->params.tx_cq_moderation_pkts);
903 if (err)
904 goto err_close_tx_cqs;
f62b8bb8
AV
905 }
906
907 return 0;
908
909err_close_tx_cqs:
910 for (tc--; tc >= 0; tc--)
911 mlx5e_close_cq(&c->sq[tc].cq);
912
913 return err;
914}
915
916static void mlx5e_close_tx_cqs(struct mlx5e_channel *c)
917{
918 int tc;
919
920 for (tc = 0; tc < c->num_tc; tc++)
921 mlx5e_close_cq(&c->sq[tc].cq);
922}
923
924static int mlx5e_open_sqs(struct mlx5e_channel *c,
925 struct mlx5e_channel_param *cparam)
926{
927 int err;
928 int tc;
929
930 for (tc = 0; tc < c->num_tc; tc++) {
931 err = mlx5e_open_sq(c, tc, &cparam->sq, &c->sq[tc]);
932 if (err)
933 goto err_close_sqs;
934 }
935
936 return 0;
937
938err_close_sqs:
939 for (tc--; tc >= 0; tc--)
940 mlx5e_close_sq(&c->sq[tc]);
941
942 return err;
943}
944
945static void mlx5e_close_sqs(struct mlx5e_channel *c)
946{
947 int tc;
948
949 for (tc = 0; tc < c->num_tc; tc++)
950 mlx5e_close_sq(&c->sq[tc]);
951}
952
5283af89 953static void mlx5e_build_channeltc_to_txq_map(struct mlx5e_priv *priv, int ix)
03289b88
SM
954{
955 int i;
956
957 for (i = 0; i < MLX5E_MAX_NUM_TC; i++)
5283af89
RS
958 priv->channeltc_to_txq_map[ix][i] =
959 ix + i * priv->params.num_channels;
03289b88
SM
960}
961
f62b8bb8
AV
962static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
963 struct mlx5e_channel_param *cparam,
964 struct mlx5e_channel **cp)
965{
966 struct net_device *netdev = priv->netdev;
967 int cpu = mlx5e_get_cpu(priv, ix);
968 struct mlx5e_channel *c;
969 int err;
970
971 c = kzalloc_node(sizeof(*c), GFP_KERNEL, cpu_to_node(cpu));
972 if (!c)
973 return -ENOMEM;
974
975 c->priv = priv;
976 c->ix = ix;
977 c->cpu = cpu;
978 c->pdev = &priv->mdev->pdev->dev;
979 c->netdev = priv->netdev;
980 c->mkey_be = cpu_to_be32(priv->mr.key);
a4418a6c 981 c->num_tc = priv->params.num_tc;
f62b8bb8 982
5283af89 983 mlx5e_build_channeltc_to_txq_map(priv, ix);
03289b88 984
f62b8bb8
AV
985 netif_napi_add(netdev, &c->napi, mlx5e_napi_poll, 64);
986
987 err = mlx5e_open_tx_cqs(c, cparam);
988 if (err)
989 goto err_napi_del;
990
991 err = mlx5e_open_cq(c, &cparam->rx_cq, &c->rq.cq,
992 priv->params.rx_cq_moderation_usec,
993 priv->params.rx_cq_moderation_pkts);
994 if (err)
995 goto err_close_tx_cqs;
f62b8bb8
AV
996
997 napi_enable(&c->napi);
998
999 err = mlx5e_open_sqs(c, cparam);
1000 if (err)
1001 goto err_disable_napi;
1002
1003 err = mlx5e_open_rq(c, &cparam->rq, &c->rq);
1004 if (err)
1005 goto err_close_sqs;
1006
1007 netif_set_xps_queue(netdev, get_cpu_mask(c->cpu), ix);
1008 *cp = c;
1009
1010 return 0;
1011
1012err_close_sqs:
1013 mlx5e_close_sqs(c);
1014
1015err_disable_napi:
1016 napi_disable(&c->napi);
1017 mlx5e_close_cq(&c->rq.cq);
1018
1019err_close_tx_cqs:
1020 mlx5e_close_tx_cqs(c);
1021
1022err_napi_del:
1023 netif_napi_del(&c->napi);
7ae92ae5 1024 napi_hash_del(&c->napi);
f62b8bb8
AV
1025 kfree(c);
1026
1027 return err;
1028}
1029
1030static void mlx5e_close_channel(struct mlx5e_channel *c)
1031{
1032 mlx5e_close_rq(&c->rq);
1033 mlx5e_close_sqs(c);
1034 napi_disable(&c->napi);
1035 mlx5e_close_cq(&c->rq.cq);
1036 mlx5e_close_tx_cqs(c);
1037 netif_napi_del(&c->napi);
7ae92ae5
ED
1038
1039 napi_hash_del(&c->napi);
1040 synchronize_rcu();
1041
f62b8bb8
AV
1042 kfree(c);
1043}
1044
1045static void mlx5e_build_rq_param(struct mlx5e_priv *priv,
1046 struct mlx5e_rq_param *param)
1047{
1048 void *rqc = param->rqc;
1049 void *wq = MLX5_ADDR_OF(rqc, rqc, wq);
1050
1051 MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_LINKED_LIST);
1052 MLX5_SET(wq, wq, end_padding_mode, MLX5_WQ_END_PAD_MODE_ALIGN);
1053 MLX5_SET(wq, wq, log_wq_stride, ilog2(sizeof(struct mlx5e_rx_wqe)));
1054 MLX5_SET(wq, wq, log_wq_sz, priv->params.log_rq_size);
1055 MLX5_SET(wq, wq, pd, priv->pdn);
1056
311c7c71 1057 param->wq.buf_numa_node = dev_to_node(&priv->mdev->pdev->dev);
f62b8bb8
AV
1058 param->wq.linear = 1;
1059}
1060
1061static void mlx5e_build_sq_param(struct mlx5e_priv *priv,
1062 struct mlx5e_sq_param *param)
1063{
1064 void *sqc = param->sqc;
1065 void *wq = MLX5_ADDR_OF(sqc, sqc, wq);
1066
1067 MLX5_SET(wq, wq, log_wq_sz, priv->params.log_sq_size);
1068 MLX5_SET(wq, wq, log_wq_stride, ilog2(MLX5_SEND_WQE_BB));
1069 MLX5_SET(wq, wq, pd, priv->pdn);
1070
311c7c71 1071 param->wq.buf_numa_node = dev_to_node(&priv->mdev->pdev->dev);
58d52291 1072 param->max_inline = priv->params.tx_max_inline;
f62b8bb8
AV
1073}
1074
1075static void mlx5e_build_common_cq_param(struct mlx5e_priv *priv,
1076 struct mlx5e_cq_param *param)
1077{
1078 void *cqc = param->cqc;
1079
1080 MLX5_SET(cqc, cqc, uar_page, priv->cq_uar.index);
1081}
1082
1083static void mlx5e_build_rx_cq_param(struct mlx5e_priv *priv,
1084 struct mlx5e_cq_param *param)
1085{
1086 void *cqc = param->cqc;
1087
1088 MLX5_SET(cqc, cqc, log_cq_size, priv->params.log_rq_size);
1089
1090 mlx5e_build_common_cq_param(priv, param);
1091}
1092
1093static void mlx5e_build_tx_cq_param(struct mlx5e_priv *priv,
1094 struct mlx5e_cq_param *param)
1095{
1096 void *cqc = param->cqc;
1097
1098 MLX5_SET(cqc, cqc, log_cq_size, priv->params.log_sq_size);
1099
1100 mlx5e_build_common_cq_param(priv, param);
1101}
1102
1103static void mlx5e_build_channel_param(struct mlx5e_priv *priv,
1104 struct mlx5e_channel_param *cparam)
1105{
1106 memset(cparam, 0, sizeof(*cparam));
1107
1108 mlx5e_build_rq_param(priv, &cparam->rq);
1109 mlx5e_build_sq_param(priv, &cparam->sq);
1110 mlx5e_build_rx_cq_param(priv, &cparam->rx_cq);
1111 mlx5e_build_tx_cq_param(priv, &cparam->tx_cq);
1112}
1113
1114static int mlx5e_open_channels(struct mlx5e_priv *priv)
1115{
1116 struct mlx5e_channel_param cparam;
a4418a6c 1117 int nch = priv->params.num_channels;
03289b88 1118 int err = -ENOMEM;
f62b8bb8
AV
1119 int i;
1120 int j;
1121
a4418a6c
AS
1122 priv->channel = kcalloc(nch, sizeof(struct mlx5e_channel *),
1123 GFP_KERNEL);
03289b88 1124
a4418a6c 1125 priv->txq_to_sq_map = kcalloc(nch * priv->params.num_tc,
03289b88
SM
1126 sizeof(struct mlx5e_sq *), GFP_KERNEL);
1127
1128 if (!priv->channel || !priv->txq_to_sq_map)
1129 goto err_free_txq_to_sq_map;
f62b8bb8
AV
1130
1131 mlx5e_build_channel_param(priv, &cparam);
a4418a6c 1132 for (i = 0; i < nch; i++) {
f62b8bb8
AV
1133 err = mlx5e_open_channel(priv, i, &cparam, &priv->channel[i]);
1134 if (err)
1135 goto err_close_channels;
1136 }
1137
a4418a6c 1138 for (j = 0; j < nch; j++) {
f62b8bb8
AV
1139 err = mlx5e_wait_for_min_rx_wqes(&priv->channel[j]->rq);
1140 if (err)
1141 goto err_close_channels;
1142 }
1143
1144 return 0;
1145
1146err_close_channels:
1147 for (i--; i >= 0; i--)
1148 mlx5e_close_channel(priv->channel[i]);
1149
03289b88
SM
1150err_free_txq_to_sq_map:
1151 kfree(priv->txq_to_sq_map);
f62b8bb8
AV
1152 kfree(priv->channel);
1153
1154 return err;
1155}
1156
1157static void mlx5e_close_channels(struct mlx5e_priv *priv)
1158{
1159 int i;
1160
1161 for (i = 0; i < priv->params.num_channels; i++)
1162 mlx5e_close_channel(priv->channel[i]);
1163
03289b88 1164 kfree(priv->txq_to_sq_map);
f62b8bb8
AV
1165 kfree(priv->channel);
1166}
1167
2be6967c
SM
1168static int mlx5e_rx_hash_fn(int hfunc)
1169{
1170 return (hfunc == ETH_RSS_HASH_TOP) ?
1171 MLX5_RX_HASH_FN_TOEPLITZ :
1172 MLX5_RX_HASH_FN_INVERTED_XOR8;
1173}
1174
1175static int mlx5e_bits_invert(unsigned long a, int size)
1176{
1177 int inv = 0;
1178 int i;
1179
1180 for (i = 0; i < size; i++)
1181 inv |= (test_bit(size - i - 1, &a) ? 1 : 0) << i;
1182
1183 return inv;
1184}
1185
936896e9
AS
1186static void mlx5e_fill_indir_rqt_rqns(struct mlx5e_priv *priv, void *rqtc)
1187{
1188 int i;
1189
1190 for (i = 0; i < MLX5E_INDIR_RQT_SIZE; i++) {
1191 int ix = i;
1192
1193 if (priv->params.rss_hfunc == ETH_RSS_HASH_XOR)
1194 ix = mlx5e_bits_invert(i, MLX5E_LOG_INDIR_RQT_SIZE);
1195
2d75b2bc 1196 ix = priv->params.indirection_rqt[ix];
936896e9
AS
1197 ix = ix % priv->params.num_channels;
1198 MLX5_SET(rqtc, rqtc, rq_num[i],
1199 test_bit(MLX5E_STATE_OPENED, &priv->state) ?
1200 priv->channel[ix]->rq.rqn :
1201 priv->drop_rq.rqn);
1202 }
1203}
1204
4cbeaff5
AS
1205static void mlx5e_fill_rqt_rqns(struct mlx5e_priv *priv, void *rqtc,
1206 enum mlx5e_rqt_ix rqt_ix)
1207{
4cbeaff5
AS
1208
1209 switch (rqt_ix) {
1210 case MLX5E_INDIRECTION_RQT:
936896e9 1211 mlx5e_fill_indir_rqt_rqns(priv, rqtc);
4cbeaff5
AS
1212
1213 break;
1214
1215 default: /* MLX5E_SINGLE_RQ_RQT */
1216 MLX5_SET(rqtc, rqtc, rq_num[0],
5c50368f
AS
1217 test_bit(MLX5E_STATE_OPENED, &priv->state) ?
1218 priv->channel[0]->rq.rqn :
1219 priv->drop_rq.rqn);
4cbeaff5
AS
1220
1221 break;
1222 }
1223}
1224
40ab6a6e 1225static int mlx5e_create_rqt(struct mlx5e_priv *priv, enum mlx5e_rqt_ix rqt_ix)
f62b8bb8
AV
1226{
1227 struct mlx5_core_dev *mdev = priv->mdev;
1228 u32 *in;
f62b8bb8
AV
1229 void *rqtc;
1230 int inlen;
4cbeaff5 1231 int sz;
f62b8bb8 1232 int err;
4cbeaff5 1233
936896e9 1234 sz = (rqt_ix == MLX5E_SINGLE_RQ_RQT) ? 1 : MLX5E_INDIR_RQT_SIZE;
f62b8bb8 1235
f62b8bb8
AV
1236 inlen = MLX5_ST_SZ_BYTES(create_rqt_in) + sizeof(u32) * sz;
1237 in = mlx5_vzalloc(inlen);
1238 if (!in)
1239 return -ENOMEM;
1240
1241 rqtc = MLX5_ADDR_OF(create_rqt_in, in, rqt_context);
1242
1243 MLX5_SET(rqtc, rqtc, rqt_actual_size, sz);
1244 MLX5_SET(rqtc, rqtc, rqt_max_size, sz);
1245
4cbeaff5 1246 mlx5e_fill_rqt_rqns(priv, rqtc, rqt_ix);
2be6967c 1247
4cbeaff5 1248 err = mlx5_core_create_rqt(mdev, in, inlen, &priv->rqtn[rqt_ix]);
f62b8bb8
AV
1249
1250 kvfree(in);
1251
1252 return err;
1253}
1254
2d75b2bc 1255int mlx5e_redirect_rqt(struct mlx5e_priv *priv, enum mlx5e_rqt_ix rqt_ix)
5c50368f
AS
1256{
1257 struct mlx5_core_dev *mdev = priv->mdev;
1258 u32 *in;
1259 void *rqtc;
1260 int inlen;
5c50368f
AS
1261 int sz;
1262 int err;
1263
936896e9 1264 sz = (rqt_ix == MLX5E_SINGLE_RQ_RQT) ? 1 : MLX5E_INDIR_RQT_SIZE;
5c50368f
AS
1265
1266 inlen = MLX5_ST_SZ_BYTES(modify_rqt_in) + sizeof(u32) * sz;
1267 in = mlx5_vzalloc(inlen);
1268 if (!in)
1269 return -ENOMEM;
1270
1271 rqtc = MLX5_ADDR_OF(modify_rqt_in, in, ctx);
1272
1273 MLX5_SET(rqtc, rqtc, rqt_actual_size, sz);
1274
1275 mlx5e_fill_rqt_rqns(priv, rqtc, rqt_ix);
1276
1277 MLX5_SET(modify_rqt_in, in, bitmask.rqn_list, 1);
1278
1279 err = mlx5_core_modify_rqt(mdev, priv->rqtn[rqt_ix], in, inlen);
1280
1281 kvfree(in);
1282
1283 return err;
1284}
1285
40ab6a6e 1286static void mlx5e_destroy_rqt(struct mlx5e_priv *priv, enum mlx5e_rqt_ix rqt_ix)
f62b8bb8 1287{
4cbeaff5 1288 mlx5_core_destroy_rqt(priv->mdev, priv->rqtn[rqt_ix]);
f62b8bb8
AV
1289}
1290
40ab6a6e
AS
1291static void mlx5e_redirect_rqts(struct mlx5e_priv *priv)
1292{
1293 mlx5e_redirect_rqt(priv, MLX5E_INDIRECTION_RQT);
1294 mlx5e_redirect_rqt(priv, MLX5E_SINGLE_RQ_RQT);
1295}
1296
5c50368f
AS
1297static void mlx5e_build_tir_ctx_lro(void *tirc, struct mlx5e_priv *priv)
1298{
1299 if (!priv->params.lro_en)
1300 return;
1301
1302#define ROUGH_MAX_L2_L3_HDR_SZ 256
1303
1304 MLX5_SET(tirc, tirc, lro_enable_mask,
1305 MLX5_TIRC_LRO_ENABLE_MASK_IPV4_LRO |
1306 MLX5_TIRC_LRO_ENABLE_MASK_IPV6_LRO);
1307 MLX5_SET(tirc, tirc, lro_max_ip_payload_size,
1308 (priv->params.lro_wqe_sz -
1309 ROUGH_MAX_L2_L3_HDR_SZ) >> 8);
1310 MLX5_SET(tirc, tirc, lro_timeout_period_usecs,
1311 MLX5_CAP_ETH(priv->mdev,
d9a40271 1312 lro_timer_supported_periods[2]));
5c50368f
AS
1313}
1314
1315static int mlx5e_modify_tir_lro(struct mlx5e_priv *priv, int tt)
1316{
1317 struct mlx5_core_dev *mdev = priv->mdev;
1318
1319 void *in;
1320 void *tirc;
1321 int inlen;
1322 int err;
1323
1324 inlen = MLX5_ST_SZ_BYTES(modify_tir_in);
1325 in = mlx5_vzalloc(inlen);
1326 if (!in)
1327 return -ENOMEM;
1328
1329 MLX5_SET(modify_tir_in, in, bitmask.lro, 1);
1330 tirc = MLX5_ADDR_OF(modify_tir_in, in, ctx);
1331
1332 mlx5e_build_tir_ctx_lro(tirc, priv);
1333
1334 err = mlx5_core_modify_tir(mdev, priv->tirn[tt], in, inlen);
1335
1336 kvfree(in);
1337
1338 return err;
1339}
1340
66189961
TT
1341static int mlx5e_refresh_tir_self_loopback_enable(struct mlx5_core_dev *mdev,
1342 u32 tirn)
1343{
1344 void *in;
1345 int inlen;
1346 int err;
1347
1348 inlen = MLX5_ST_SZ_BYTES(modify_tir_in);
1349 in = mlx5_vzalloc(inlen);
1350 if (!in)
1351 return -ENOMEM;
1352
1353 MLX5_SET(modify_tir_in, in, bitmask.self_lb_en, 1);
1354
1355 err = mlx5_core_modify_tir(mdev, tirn, in, inlen);
1356
1357 kvfree(in);
1358
1359 return err;
1360}
1361
1362static int mlx5e_refresh_tirs_self_loopback_enable(struct mlx5e_priv *priv)
1363{
1364 int err;
1365 int i;
1366
1367 for (i = 0; i < MLX5E_NUM_TT; i++) {
1368 err = mlx5e_refresh_tir_self_loopback_enable(priv->mdev,
1369 priv->tirn[i]);
1370 if (err)
1371 return err;
1372 }
1373
1374 return 0;
1375}
1376
40ab6a6e
AS
1377static int mlx5e_set_dev_port_mtu(struct net_device *netdev)
1378{
1379 struct mlx5e_priv *priv = netdev_priv(netdev);
1380 struct mlx5_core_dev *mdev = priv->mdev;
1381 int hw_mtu;
1382 int err;
1383
1384 err = mlx5_set_port_mtu(mdev, MLX5E_SW2HW_MTU(netdev->mtu), 1);
1385 if (err)
1386 return err;
1387
1388 mlx5_query_port_oper_mtu(mdev, &hw_mtu, 1);
1389
1390 if (MLX5E_HW2SW_MTU(hw_mtu) != netdev->mtu)
1391 netdev_warn(netdev, "%s: Port MTU %d is different than netdev mtu %d\n",
1392 __func__, MLX5E_HW2SW_MTU(hw_mtu), netdev->mtu);
1393
1394 netdev->mtu = MLX5E_HW2SW_MTU(hw_mtu);
1395 return 0;
1396}
1397
1398int mlx5e_open_locked(struct net_device *netdev)
1399{
1400 struct mlx5e_priv *priv = netdev_priv(netdev);
1401 int num_txqs;
1402 int err;
1403
1404 set_bit(MLX5E_STATE_OPENED, &priv->state);
1405
1406 num_txqs = priv->params.num_channels * priv->params.num_tc;
1407 netif_set_real_num_tx_queues(netdev, num_txqs);
1408 netif_set_real_num_rx_queues(netdev, priv->params.num_channels);
1409
1410 err = mlx5e_set_dev_port_mtu(netdev);
1411 if (err)
343b29f3 1412 goto err_clear_state_opened_flag;
40ab6a6e
AS
1413
1414 err = mlx5e_open_channels(priv);
1415 if (err) {
1416 netdev_err(netdev, "%s: mlx5e_open_channels failed, %d\n",
1417 __func__, err);
343b29f3 1418 goto err_clear_state_opened_flag;
40ab6a6e
AS
1419 }
1420
66189961
TT
1421 err = mlx5e_refresh_tirs_self_loopback_enable(priv);
1422 if (err) {
1423 netdev_err(netdev, "%s: mlx5e_refresh_tirs_self_loopback_enable failed, %d\n",
1424 __func__, err);
1425 goto err_close_channels;
1426 }
1427
40ab6a6e
AS
1428 mlx5e_update_carrier(priv);
1429 mlx5e_redirect_rqts(priv);
40ab6a6e
AS
1430
1431 schedule_delayed_work(&priv->update_stats_work, 0);
40ab6a6e 1432
9b37b07f 1433 return 0;
343b29f3 1434
66189961
TT
1435err_close_channels:
1436 mlx5e_close_channels(priv);
343b29f3
AS
1437err_clear_state_opened_flag:
1438 clear_bit(MLX5E_STATE_OPENED, &priv->state);
1439 return err;
40ab6a6e
AS
1440}
1441
1442static int mlx5e_open(struct net_device *netdev)
1443{
1444 struct mlx5e_priv *priv = netdev_priv(netdev);
1445 int err;
1446
1447 mutex_lock(&priv->state_lock);
1448 err = mlx5e_open_locked(netdev);
1449 mutex_unlock(&priv->state_lock);
1450
1451 return err;
1452}
1453
1454int mlx5e_close_locked(struct net_device *netdev)
1455{
1456 struct mlx5e_priv *priv = netdev_priv(netdev);
1457
a1985740
AS
1458 /* May already be CLOSED in case a previous configuration operation
1459 * (e.g RX/TX queue size change) that involves close&open failed.
1460 */
1461 if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
1462 return 0;
1463
40ab6a6e
AS
1464 clear_bit(MLX5E_STATE_OPENED, &priv->state);
1465
1466 mlx5e_redirect_rqts(priv);
40ab6a6e
AS
1467 netif_carrier_off(priv->netdev);
1468 mlx5e_close_channels(priv);
1469
1470 return 0;
1471}
1472
1473static int mlx5e_close(struct net_device *netdev)
1474{
1475 struct mlx5e_priv *priv = netdev_priv(netdev);
1476 int err;
1477
1478 mutex_lock(&priv->state_lock);
1479 err = mlx5e_close_locked(netdev);
1480 mutex_unlock(&priv->state_lock);
1481
1482 return err;
1483}
1484
1485static int mlx5e_create_drop_rq(struct mlx5e_priv *priv,
1486 struct mlx5e_rq *rq,
1487 struct mlx5e_rq_param *param)
1488{
1489 struct mlx5_core_dev *mdev = priv->mdev;
1490 void *rqc = param->rqc;
1491 void *rqc_wq = MLX5_ADDR_OF(rqc, rqc, wq);
1492 int err;
1493
1494 param->wq.db_numa_node = param->wq.buf_numa_node;
1495
1496 err = mlx5_wq_ll_create(mdev, &param->wq, rqc_wq, &rq->wq,
1497 &rq->wq_ctrl);
1498 if (err)
1499 return err;
1500
1501 rq->priv = priv;
1502
1503 return 0;
1504}
1505
1506static int mlx5e_create_drop_cq(struct mlx5e_priv *priv,
1507 struct mlx5e_cq *cq,
1508 struct mlx5e_cq_param *param)
1509{
1510 struct mlx5_core_dev *mdev = priv->mdev;
1511 struct mlx5_core_cq *mcq = &cq->mcq;
1512 int eqn_not_used;
1513 int irqn;
1514 int err;
1515
1516 err = mlx5_cqwq_create(mdev, &param->wq, param->cqc, &cq->wq,
1517 &cq->wq_ctrl);
1518 if (err)
1519 return err;
1520
1521 mlx5_vector2eqn(mdev, param->eq_ix, &eqn_not_used, &irqn);
1522
1523 mcq->cqe_sz = 64;
1524 mcq->set_ci_db = cq->wq_ctrl.db.db;
1525 mcq->arm_db = cq->wq_ctrl.db.db + 1;
1526 *mcq->set_ci_db = 0;
1527 *mcq->arm_db = 0;
1528 mcq->vector = param->eq_ix;
1529 mcq->comp = mlx5e_completion_event;
1530 mcq->event = mlx5e_cq_error_event;
1531 mcq->irqn = irqn;
1532 mcq->uar = &priv->cq_uar;
1533
1534 cq->priv = priv;
1535
1536 return 0;
1537}
1538
1539static int mlx5e_open_drop_rq(struct mlx5e_priv *priv)
1540{
1541 struct mlx5e_cq_param cq_param;
1542 struct mlx5e_rq_param rq_param;
1543 struct mlx5e_rq *rq = &priv->drop_rq;
1544 struct mlx5e_cq *cq = &priv->drop_rq.cq;
1545 int err;
1546
1547 memset(&cq_param, 0, sizeof(cq_param));
1548 memset(&rq_param, 0, sizeof(rq_param));
1549 mlx5e_build_rx_cq_param(priv, &cq_param);
1550 mlx5e_build_rq_param(priv, &rq_param);
1551
1552 err = mlx5e_create_drop_cq(priv, cq, &cq_param);
1553 if (err)
1554 return err;
1555
1556 err = mlx5e_enable_cq(cq, &cq_param);
1557 if (err)
1558 goto err_destroy_cq;
1559
1560 err = mlx5e_create_drop_rq(priv, rq, &rq_param);
1561 if (err)
1562 goto err_disable_cq;
1563
1564 err = mlx5e_enable_rq(rq, &rq_param);
1565 if (err)
1566 goto err_destroy_rq;
1567
1568 return 0;
1569
1570err_destroy_rq:
1571 mlx5e_destroy_rq(&priv->drop_rq);
1572
1573err_disable_cq:
1574 mlx5e_disable_cq(&priv->drop_rq.cq);
1575
1576err_destroy_cq:
1577 mlx5e_destroy_cq(&priv->drop_rq.cq);
1578
1579 return err;
1580}
1581
1582static void mlx5e_close_drop_rq(struct mlx5e_priv *priv)
1583{
1584 mlx5e_disable_rq(&priv->drop_rq);
1585 mlx5e_destroy_rq(&priv->drop_rq);
1586 mlx5e_disable_cq(&priv->drop_rq.cq);
1587 mlx5e_destroy_cq(&priv->drop_rq.cq);
1588}
1589
1590static int mlx5e_create_tis(struct mlx5e_priv *priv, int tc)
1591{
1592 struct mlx5_core_dev *mdev = priv->mdev;
1593 u32 in[MLX5_ST_SZ_DW(create_tis_in)];
1594 void *tisc = MLX5_ADDR_OF(create_tis_in, in, ctx);
1595
1596 memset(in, 0, sizeof(in));
1597
1598 MLX5_SET(tisc, tisc, prio, tc);
1599 MLX5_SET(tisc, tisc, transport_domain, priv->tdn);
1600
1601 return mlx5_core_create_tis(mdev, in, sizeof(in), &priv->tisn[tc]);
1602}
1603
1604static void mlx5e_destroy_tis(struct mlx5e_priv *priv, int tc)
1605{
1606 mlx5_core_destroy_tis(priv->mdev, priv->tisn[tc]);
1607}
1608
1609static int mlx5e_create_tises(struct mlx5e_priv *priv)
1610{
1611 int err;
1612 int tc;
1613
1614 for (tc = 0; tc < priv->params.num_tc; tc++) {
1615 err = mlx5e_create_tis(priv, tc);
1616 if (err)
1617 goto err_close_tises;
1618 }
1619
1620 return 0;
1621
1622err_close_tises:
1623 for (tc--; tc >= 0; tc--)
1624 mlx5e_destroy_tis(priv, tc);
1625
1626 return err;
1627}
1628
1629static void mlx5e_destroy_tises(struct mlx5e_priv *priv)
1630{
1631 int tc;
1632
1633 for (tc = 0; tc < priv->params.num_tc; tc++)
1634 mlx5e_destroy_tis(priv, tc);
1635}
1636
f62b8bb8
AV
1637static void mlx5e_build_tir_ctx(struct mlx5e_priv *priv, u32 *tirc, int tt)
1638{
1639 void *hfso = MLX5_ADDR_OF(tirc, tirc, rx_hash_field_selector_outer);
1640
3191e05f
AS
1641 MLX5_SET(tirc, tirc, transport_domain, priv->tdn);
1642
5a6f8aef
AS
1643#define MLX5_HASH_IP (MLX5_HASH_FIELD_SEL_SRC_IP |\
1644 MLX5_HASH_FIELD_SEL_DST_IP)
f62b8bb8 1645
5a6f8aef
AS
1646#define MLX5_HASH_IP_L4PORTS (MLX5_HASH_FIELD_SEL_SRC_IP |\
1647 MLX5_HASH_FIELD_SEL_DST_IP |\
1648 MLX5_HASH_FIELD_SEL_L4_SPORT |\
1649 MLX5_HASH_FIELD_SEL_L4_DPORT)
f62b8bb8 1650
a741749f
AS
1651#define MLX5_HASH_IP_IPSEC_SPI (MLX5_HASH_FIELD_SEL_SRC_IP |\
1652 MLX5_HASH_FIELD_SEL_DST_IP |\
1653 MLX5_HASH_FIELD_SEL_IPSEC_SPI)
1654
5c50368f 1655 mlx5e_build_tir_ctx_lro(tirc, priv);
f62b8bb8 1656
4cbeaff5
AS
1657 MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_INDIRECT);
1658
f62b8bb8
AV
1659 switch (tt) {
1660 case MLX5E_TT_ANY:
4cbeaff5
AS
1661 MLX5_SET(tirc, tirc, indirect_table,
1662 priv->rqtn[MLX5E_SINGLE_RQ_RQT]);
1663 MLX5_SET(tirc, tirc, rx_hash_fn, MLX5_RX_HASH_FN_INVERTED_XOR8);
f62b8bb8
AV
1664 break;
1665 default:
f62b8bb8 1666 MLX5_SET(tirc, tirc, indirect_table,
4cbeaff5 1667 priv->rqtn[MLX5E_INDIRECTION_RQT]);
f62b8bb8 1668 MLX5_SET(tirc, tirc, rx_hash_fn,
2be6967c
SM
1669 mlx5e_rx_hash_fn(priv->params.rss_hfunc));
1670 if (priv->params.rss_hfunc == ETH_RSS_HASH_TOP) {
1671 void *rss_key = MLX5_ADDR_OF(tirc, tirc,
1672 rx_hash_toeplitz_key);
1673 size_t len = MLX5_FLD_SZ_BYTES(tirc,
1674 rx_hash_toeplitz_key);
1675
1676 MLX5_SET(tirc, tirc, rx_hash_symmetric, 1);
57afead5 1677 memcpy(rss_key, priv->params.toeplitz_hash_key, len);
2be6967c 1678 }
f62b8bb8
AV
1679 break;
1680 }
1681
1682 switch (tt) {
1683 case MLX5E_TT_IPV4_TCP:
1684 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
1685 MLX5_L3_PROT_TYPE_IPV4);
1686 MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
1687 MLX5_L4_PROT_TYPE_TCP);
1688 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
5a6f8aef 1689 MLX5_HASH_IP_L4PORTS);
f62b8bb8
AV
1690 break;
1691
1692 case MLX5E_TT_IPV6_TCP:
1693 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
1694 MLX5_L3_PROT_TYPE_IPV6);
1695 MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
1696 MLX5_L4_PROT_TYPE_TCP);
1697 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
5a6f8aef 1698 MLX5_HASH_IP_L4PORTS);
f62b8bb8
AV
1699 break;
1700
1701 case MLX5E_TT_IPV4_UDP:
1702 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
1703 MLX5_L3_PROT_TYPE_IPV4);
1704 MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
1705 MLX5_L4_PROT_TYPE_UDP);
1706 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
5a6f8aef 1707 MLX5_HASH_IP_L4PORTS);
f62b8bb8
AV
1708 break;
1709
1710 case MLX5E_TT_IPV6_UDP:
1711 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
1712 MLX5_L3_PROT_TYPE_IPV6);
1713 MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
1714 MLX5_L4_PROT_TYPE_UDP);
1715 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
5a6f8aef 1716 MLX5_HASH_IP_L4PORTS);
f62b8bb8
AV
1717 break;
1718
a741749f
AS
1719 case MLX5E_TT_IPV4_IPSEC_AH:
1720 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
1721 MLX5_L3_PROT_TYPE_IPV4);
1722 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
1723 MLX5_HASH_IP_IPSEC_SPI);
1724 break;
1725
1726 case MLX5E_TT_IPV6_IPSEC_AH:
1727 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
1728 MLX5_L3_PROT_TYPE_IPV6);
1729 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
1730 MLX5_HASH_IP_IPSEC_SPI);
1731 break;
1732
1733 case MLX5E_TT_IPV4_IPSEC_ESP:
1734 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
1735 MLX5_L3_PROT_TYPE_IPV4);
1736 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
1737 MLX5_HASH_IP_IPSEC_SPI);
1738 break;
1739
1740 case MLX5E_TT_IPV6_IPSEC_ESP:
1741 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
1742 MLX5_L3_PROT_TYPE_IPV6);
1743 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
1744 MLX5_HASH_IP_IPSEC_SPI);
1745 break;
1746
f62b8bb8
AV
1747 case MLX5E_TT_IPV4:
1748 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
1749 MLX5_L3_PROT_TYPE_IPV4);
1750 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
1751 MLX5_HASH_IP);
1752 break;
1753
1754 case MLX5E_TT_IPV6:
1755 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
1756 MLX5_L3_PROT_TYPE_IPV6);
1757 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
1758 MLX5_HASH_IP);
1759 break;
1760 }
1761}
1762
40ab6a6e 1763static int mlx5e_create_tir(struct mlx5e_priv *priv, int tt)
f62b8bb8
AV
1764{
1765 struct mlx5_core_dev *mdev = priv->mdev;
1766 u32 *in;
1767 void *tirc;
1768 int inlen;
1769 int err;
1770
1771 inlen = MLX5_ST_SZ_BYTES(create_tir_in);
1772 in = mlx5_vzalloc(inlen);
1773 if (!in)
1774 return -ENOMEM;
1775
1776 tirc = MLX5_ADDR_OF(create_tir_in, in, ctx);
1777
1778 mlx5e_build_tir_ctx(priv, tirc, tt);
1779
7db22ffb 1780 err = mlx5_core_create_tir(mdev, in, inlen, &priv->tirn[tt]);
f62b8bb8
AV
1781
1782 kvfree(in);
1783
1784 return err;
1785}
1786
40ab6a6e 1787static void mlx5e_destroy_tir(struct mlx5e_priv *priv, int tt)
f62b8bb8 1788{
7db22ffb 1789 mlx5_core_destroy_tir(priv->mdev, priv->tirn[tt]);
f62b8bb8
AV
1790}
1791
40ab6a6e 1792static int mlx5e_create_tirs(struct mlx5e_priv *priv)
f62b8bb8
AV
1793{
1794 int err;
1795 int i;
1796
1797 for (i = 0; i < MLX5E_NUM_TT; i++) {
40ab6a6e 1798 err = mlx5e_create_tir(priv, i);
f62b8bb8 1799 if (err)
40ab6a6e 1800 goto err_destroy_tirs;
f62b8bb8
AV
1801 }
1802
1803 return 0;
1804
40ab6a6e 1805err_destroy_tirs:
f62b8bb8 1806 for (i--; i >= 0; i--)
40ab6a6e 1807 mlx5e_destroy_tir(priv, i);
f62b8bb8
AV
1808
1809 return err;
1810}
1811
40ab6a6e 1812static void mlx5e_destroy_tirs(struct mlx5e_priv *priv)
f62b8bb8
AV
1813{
1814 int i;
1815
1816 for (i = 0; i < MLX5E_NUM_TT; i++)
40ab6a6e 1817 mlx5e_destroy_tir(priv, i);
f62b8bb8
AV
1818}
1819
f62b8bb8
AV
1820static struct rtnl_link_stats64 *
1821mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats)
1822{
1823 struct mlx5e_priv *priv = netdev_priv(dev);
1824 struct mlx5e_vport_stats *vstats = &priv->stats.vport;
1825
1826 stats->rx_packets = vstats->rx_packets;
1827 stats->rx_bytes = vstats->rx_bytes;
1828 stats->tx_packets = vstats->tx_packets;
1829 stats->tx_bytes = vstats->tx_bytes;
1830 stats->multicast = vstats->rx_multicast_packets +
1831 vstats->tx_multicast_packets;
1832 stats->tx_errors = vstats->tx_error_packets;
1833 stats->rx_errors = vstats->rx_error_packets;
1834 stats->tx_dropped = vstats->tx_queue_dropped;
1835 stats->rx_crc_errors = 0;
1836 stats->rx_length_errors = 0;
1837
1838 return stats;
1839}
1840
1841static void mlx5e_set_rx_mode(struct net_device *dev)
1842{
1843 struct mlx5e_priv *priv = netdev_priv(dev);
1844
1845 schedule_work(&priv->set_rx_mode_work);
1846}
1847
1848static int mlx5e_set_mac(struct net_device *netdev, void *addr)
1849{
1850 struct mlx5e_priv *priv = netdev_priv(netdev);
1851 struct sockaddr *saddr = addr;
1852
1853 if (!is_valid_ether_addr(saddr->sa_data))
1854 return -EADDRNOTAVAIL;
1855
1856 netif_addr_lock_bh(netdev);
1857 ether_addr_copy(netdev->dev_addr, saddr->sa_data);
1858 netif_addr_unlock_bh(netdev);
1859
1860 schedule_work(&priv->set_rx_mode_work);
1861
1862 return 0;
1863}
1864
1865static int mlx5e_set_features(struct net_device *netdev,
1866 netdev_features_t features)
1867{
1868 struct mlx5e_priv *priv = netdev_priv(netdev);
98e81b0a 1869 int err = 0;
f62b8bb8 1870 netdev_features_t changes = features ^ netdev->features;
f62b8bb8
AV
1871
1872 mutex_lock(&priv->state_lock);
f62b8bb8
AV
1873
1874 if (changes & NETIF_F_LRO) {
98e81b0a
AS
1875 bool was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state);
1876
1877 if (was_opened)
1878 mlx5e_close_locked(priv->netdev);
f62b8bb8 1879
98e81b0a 1880 priv->params.lro_en = !!(features & NETIF_F_LRO);
5c50368f
AS
1881 mlx5e_modify_tir_lro(priv, MLX5E_TT_IPV4_TCP);
1882 mlx5e_modify_tir_lro(priv, MLX5E_TT_IPV6_TCP);
98e81b0a
AS
1883
1884 if (was_opened)
1885 err = mlx5e_open_locked(priv->netdev);
1886 }
f62b8bb8 1887
9b37b07f
AS
1888 mutex_unlock(&priv->state_lock);
1889
f62b8bb8
AV
1890 if (changes & NETIF_F_HW_VLAN_CTAG_FILTER) {
1891 if (features & NETIF_F_HW_VLAN_CTAG_FILTER)
1892 mlx5e_enable_vlan_filter(priv);
1893 else
1894 mlx5e_disable_vlan_filter(priv);
1895 }
1896
fe9f4fe5 1897 return err;
f62b8bb8
AV
1898}
1899
1900static int mlx5e_change_mtu(struct net_device *netdev, int new_mtu)
1901{
1902 struct mlx5e_priv *priv = netdev_priv(netdev);
1903 struct mlx5_core_dev *mdev = priv->mdev;
98e81b0a 1904 bool was_opened;
f62b8bb8 1905 int max_mtu;
98e81b0a 1906 int err = 0;
f62b8bb8 1907
facc9699 1908 mlx5_query_port_max_mtu(mdev, &max_mtu, 1);
f62b8bb8 1909
50a9eea6
DT
1910 max_mtu = MLX5E_HW2SW_MTU(max_mtu);
1911
facc9699
SM
1912 if (new_mtu > max_mtu) {
1913 netdev_err(netdev,
1914 "%s: Bad MTU (%d) > (%d) Max\n",
1915 __func__, new_mtu, max_mtu);
f62b8bb8
AV
1916 return -EINVAL;
1917 }
1918
1919 mutex_lock(&priv->state_lock);
98e81b0a
AS
1920
1921 was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state);
1922 if (was_opened)
1923 mlx5e_close_locked(netdev);
1924
f62b8bb8 1925 netdev->mtu = new_mtu;
98e81b0a
AS
1926
1927 if (was_opened)
1928 err = mlx5e_open_locked(netdev);
1929
f62b8bb8
AV
1930 mutex_unlock(&priv->state_lock);
1931
1932 return err;
1933}
1934
66e49ded
SM
1935static int mlx5e_set_vf_mac(struct net_device *dev, int vf, u8 *mac)
1936{
1937 struct mlx5e_priv *priv = netdev_priv(dev);
1938 struct mlx5_core_dev *mdev = priv->mdev;
1939
1940 return mlx5_eswitch_set_vport_mac(mdev->priv.eswitch, vf + 1, mac);
1941}
1942
1943static int mlx5e_set_vf_vlan(struct net_device *dev, int vf, u16 vlan, u8 qos)
1944{
1945 struct mlx5e_priv *priv = netdev_priv(dev);
1946 struct mlx5_core_dev *mdev = priv->mdev;
1947
1948 return mlx5_eswitch_set_vport_vlan(mdev->priv.eswitch, vf + 1,
1949 vlan, qos);
1950}
1951
1952static int mlx5_vport_link2ifla(u8 esw_link)
1953{
1954 switch (esw_link) {
1955 case MLX5_ESW_VPORT_ADMIN_STATE_DOWN:
1956 return IFLA_VF_LINK_STATE_DISABLE;
1957 case MLX5_ESW_VPORT_ADMIN_STATE_UP:
1958 return IFLA_VF_LINK_STATE_ENABLE;
1959 }
1960 return IFLA_VF_LINK_STATE_AUTO;
1961}
1962
1963static int mlx5_ifla_link2vport(u8 ifla_link)
1964{
1965 switch (ifla_link) {
1966 case IFLA_VF_LINK_STATE_DISABLE:
1967 return MLX5_ESW_VPORT_ADMIN_STATE_DOWN;
1968 case IFLA_VF_LINK_STATE_ENABLE:
1969 return MLX5_ESW_VPORT_ADMIN_STATE_UP;
1970 }
1971 return MLX5_ESW_VPORT_ADMIN_STATE_AUTO;
1972}
1973
1974static int mlx5e_set_vf_link_state(struct net_device *dev, int vf,
1975 int link_state)
1976{
1977 struct mlx5e_priv *priv = netdev_priv(dev);
1978 struct mlx5_core_dev *mdev = priv->mdev;
1979
1980 return mlx5_eswitch_set_vport_state(mdev->priv.eswitch, vf + 1,
1981 mlx5_ifla_link2vport(link_state));
1982}
1983
1984static int mlx5e_get_vf_config(struct net_device *dev,
1985 int vf, struct ifla_vf_info *ivi)
1986{
1987 struct mlx5e_priv *priv = netdev_priv(dev);
1988 struct mlx5_core_dev *mdev = priv->mdev;
1989 int err;
1990
1991 err = mlx5_eswitch_get_vport_config(mdev->priv.eswitch, vf + 1, ivi);
1992 if (err)
1993 return err;
1994 ivi->linkstate = mlx5_vport_link2ifla(ivi->linkstate);
1995 return 0;
1996}
1997
1998static int mlx5e_get_vf_stats(struct net_device *dev,
1999 int vf, struct ifla_vf_stats *vf_stats)
2000{
2001 struct mlx5e_priv *priv = netdev_priv(dev);
2002 struct mlx5_core_dev *mdev = priv->mdev;
2003
2004 return mlx5_eswitch_get_vport_stats(mdev->priv.eswitch, vf + 1,
2005 vf_stats);
2006}
2007
f62b8bb8
AV
2008static struct net_device_ops mlx5e_netdev_ops = {
2009 .ndo_open = mlx5e_open,
2010 .ndo_stop = mlx5e_close,
2011 .ndo_start_xmit = mlx5e_xmit,
2012 .ndo_get_stats64 = mlx5e_get_stats,
2013 .ndo_set_rx_mode = mlx5e_set_rx_mode,
2014 .ndo_set_mac_address = mlx5e_set_mac,
2015 .ndo_vlan_rx_add_vid = mlx5e_vlan_rx_add_vid,
2016 .ndo_vlan_rx_kill_vid = mlx5e_vlan_rx_kill_vid,
2017 .ndo_set_features = mlx5e_set_features,
66e49ded 2018 .ndo_change_mtu = mlx5e_change_mtu
f62b8bb8
AV
2019};
2020
2021static int mlx5e_check_required_hca_cap(struct mlx5_core_dev *mdev)
2022{
2023 if (MLX5_CAP_GEN(mdev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
2024 return -ENOTSUPP;
2025 if (!MLX5_CAP_GEN(mdev, eth_net_offloads) ||
2026 !MLX5_CAP_GEN(mdev, nic_flow_table) ||
2027 !MLX5_CAP_ETH(mdev, csum_cap) ||
2028 !MLX5_CAP_ETH(mdev, max_lso_cap) ||
2029 !MLX5_CAP_ETH(mdev, vlan_cap) ||
796a27ec
GP
2030 !MLX5_CAP_ETH(mdev, rss_ind_tbl_cap) ||
2031 MLX5_CAP_FLOWTABLE(mdev,
2032 flow_table_properties_nic_receive.max_ft_level)
2033 < 3) {
f62b8bb8
AV
2034 mlx5_core_warn(mdev,
2035 "Not creating net device, some required device capabilities are missing\n");
2036 return -ENOTSUPP;
2037 }
66189961
TT
2038 if (!MLX5_CAP_ETH(mdev, self_lb_en_modifiable))
2039 mlx5_core_warn(mdev, "Self loop back prevention is not supported\n");
2040
f62b8bb8
AV
2041 return 0;
2042}
2043
58d52291
AS
2044u16 mlx5e_get_max_inline_cap(struct mlx5_core_dev *mdev)
2045{
2046 int bf_buf_size = (1 << MLX5_CAP_GEN(mdev, log_bf_reg_size)) / 2;
2047
2048 return bf_buf_size -
2049 sizeof(struct mlx5e_tx_wqe) +
2050 2 /*sizeof(mlx5e_tx_wqe.inline_hdr_start)*/;
2051}
2052
f62b8bb8
AV
2053static void mlx5e_build_netdev_priv(struct mlx5_core_dev *mdev,
2054 struct net_device *netdev,
936896e9 2055 int num_channels)
f62b8bb8
AV
2056{
2057 struct mlx5e_priv *priv = netdev_priv(netdev);
2d75b2bc 2058 int i;
f62b8bb8
AV
2059
2060 priv->params.log_sq_size =
2061 MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE;
2062 priv->params.log_rq_size =
2063 MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE;
2064 priv->params.rx_cq_moderation_usec =
2065 MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC;
2066 priv->params.rx_cq_moderation_pkts =
2067 MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_PKTS;
2068 priv->params.tx_cq_moderation_usec =
2069 MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC;
2070 priv->params.tx_cq_moderation_pkts =
2071 MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_PKTS;
58d52291 2072 priv->params.tx_max_inline = mlx5e_get_max_inline_cap(mdev);
f62b8bb8
AV
2073 priv->params.min_rx_wqes =
2074 MLX5E_PARAMS_DEFAULT_MIN_RX_WQES;
f62b8bb8
AV
2075 priv->params.num_tc = 1;
2076 priv->params.default_vlan_prio = 0;
2be6967c 2077 priv->params.rss_hfunc = ETH_RSS_HASH_XOR;
f62b8bb8 2078
57afead5
AS
2079 netdev_rss_key_fill(priv->params.toeplitz_hash_key,
2080 sizeof(priv->params.toeplitz_hash_key));
2081
2d75b2bc
AS
2082 for (i = 0; i < MLX5E_INDIR_RQT_SIZE; i++)
2083 priv->params.indirection_rqt[i] = i % num_channels;
2084
f62b8bb8
AV
2085 priv->params.lro_wqe_sz =
2086 MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ;
2087
2088 priv->mdev = mdev;
2089 priv->netdev = netdev;
936896e9 2090 priv->params.num_channels = num_channels;
f62b8bb8
AV
2091 priv->default_vlan_prio = priv->params.default_vlan_prio;
2092
2093 spin_lock_init(&priv->async_events_spinlock);
2094 mutex_init(&priv->state_lock);
2095
2096 INIT_WORK(&priv->update_carrier_work, mlx5e_update_carrier_work);
2097 INIT_WORK(&priv->set_rx_mode_work, mlx5e_set_rx_mode_work);
2098 INIT_DELAYED_WORK(&priv->update_stats_work, mlx5e_update_stats_work);
2099}
2100
2101static void mlx5e_set_netdev_dev_addr(struct net_device *netdev)
2102{
2103 struct mlx5e_priv *priv = netdev_priv(netdev);
2104
e1d7d349 2105 mlx5_query_nic_vport_mac_address(priv->mdev, 0, netdev->dev_addr);
108805fc
SM
2106 if (is_zero_ether_addr(netdev->dev_addr) &&
2107 !MLX5_CAP_GEN(priv->mdev, vport_group_manager)) {
2108 eth_hw_addr_random(netdev);
2109 mlx5_core_info(priv->mdev, "Assigned random MAC address %pM\n", netdev->dev_addr);
2110 }
f62b8bb8
AV
2111}
2112
2113static void mlx5e_build_netdev(struct net_device *netdev)
2114{
2115 struct mlx5e_priv *priv = netdev_priv(netdev);
2116 struct mlx5_core_dev *mdev = priv->mdev;
2117
2118 SET_NETDEV_DEV(netdev, &mdev->pdev->dev);
2119
a4418a6c 2120 if (priv->params.num_tc > 1)
f62b8bb8 2121 mlx5e_netdev_ops.ndo_select_queue = mlx5e_select_queue;
f62b8bb8 2122
66e49ded
SM
2123 if (MLX5_CAP_GEN(mdev, vport_group_manager)) {
2124 mlx5e_netdev_ops.ndo_set_vf_mac = mlx5e_set_vf_mac;
2125 mlx5e_netdev_ops.ndo_set_vf_vlan = mlx5e_set_vf_vlan;
2126 mlx5e_netdev_ops.ndo_get_vf_config = mlx5e_get_vf_config;
2127 mlx5e_netdev_ops.ndo_set_vf_link_state = mlx5e_set_vf_link_state;
2128 mlx5e_netdev_ops.ndo_get_vf_stats = mlx5e_get_vf_stats;
2129 }
2130
f62b8bb8
AV
2131 netdev->netdev_ops = &mlx5e_netdev_ops;
2132 netdev->watchdog_timeo = 15 * HZ;
2133
2134 netdev->ethtool_ops = &mlx5e_ethtool_ops;
2135
12be4b21 2136 netdev->vlan_features |= NETIF_F_SG;
f62b8bb8
AV
2137 netdev->vlan_features |= NETIF_F_IP_CSUM;
2138 netdev->vlan_features |= NETIF_F_IPV6_CSUM;
2139 netdev->vlan_features |= NETIF_F_GRO;
2140 netdev->vlan_features |= NETIF_F_TSO;
2141 netdev->vlan_features |= NETIF_F_TSO6;
2142 netdev->vlan_features |= NETIF_F_RXCSUM;
2143 netdev->vlan_features |= NETIF_F_RXHASH;
2144
2145 if (!!MLX5_CAP_ETH(mdev, lro_cap))
2146 netdev->vlan_features |= NETIF_F_LRO;
2147
2148 netdev->hw_features = netdev->vlan_features;
e4cf27bd 2149 netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX;
f62b8bb8
AV
2150 netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX;
2151 netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
2152
2153 netdev->features = netdev->hw_features;
2154 if (!priv->params.lro_en)
2155 netdev->features &= ~NETIF_F_LRO;
2156
2157 netdev->features |= NETIF_F_HIGHDMA;
2158
2159 netdev->priv_flags |= IFF_UNICAST_FLT;
2160
2161 mlx5e_set_netdev_dev_addr(netdev);
2162}
2163
2164static int mlx5e_create_mkey(struct mlx5e_priv *priv, u32 pdn,
2165 struct mlx5_core_mr *mr)
2166{
2167 struct mlx5_core_dev *mdev = priv->mdev;
2168 struct mlx5_create_mkey_mbox_in *in;
2169 int err;
2170
2171 in = mlx5_vzalloc(sizeof(*in));
2172 if (!in)
2173 return -ENOMEM;
2174
2175 in->seg.flags = MLX5_PERM_LOCAL_WRITE |
2176 MLX5_PERM_LOCAL_READ |
2177 MLX5_ACCESS_MODE_PA;
2178 in->seg.flags_pd = cpu_to_be32(pdn | MLX5_MKEY_LEN64);
2179 in->seg.qpn_mkey7_0 = cpu_to_be32(0xffffff << 8);
2180
2181 err = mlx5_core_create_mkey(mdev, mr, in, sizeof(*in), NULL, NULL,
2182 NULL);
2183
2184 kvfree(in);
2185
2186 return err;
2187}
2188
2189static void *mlx5e_create_netdev(struct mlx5_core_dev *mdev)
2190{
2191 struct net_device *netdev;
2192 struct mlx5e_priv *priv;
3435ab59 2193 int nch = mlx5e_get_max_num_channels(mdev);
f62b8bb8
AV
2194 int err;
2195
2196 if (mlx5e_check_required_hca_cap(mdev))
2197 return NULL;
2198
936896e9 2199 netdev = alloc_etherdev_mqs(sizeof(struct mlx5e_priv), nch, nch);
f62b8bb8
AV
2200 if (!netdev) {
2201 mlx5_core_err(mdev, "alloc_etherdev_mqs() failed\n");
2202 return NULL;
2203 }
2204
936896e9 2205 mlx5e_build_netdev_priv(mdev, netdev, nch);
f62b8bb8
AV
2206 mlx5e_build_netdev(netdev);
2207
2208 netif_carrier_off(netdev);
2209
2210 priv = netdev_priv(netdev);
2211
2212 err = mlx5_alloc_map_uar(mdev, &priv->cq_uar);
2213 if (err) {
1f2a3003 2214 mlx5_core_err(mdev, "alloc_map uar failed, %d\n", err);
f62b8bb8
AV
2215 goto err_free_netdev;
2216 }
2217
2218 err = mlx5_core_alloc_pd(mdev, &priv->pdn);
2219 if (err) {
1f2a3003 2220 mlx5_core_err(mdev, "alloc pd failed, %d\n", err);
f62b8bb8
AV
2221 goto err_unmap_free_uar;
2222 }
2223
3191e05f
AS
2224 err = mlx5_alloc_transport_domain(mdev, &priv->tdn);
2225 if (err) {
1f2a3003 2226 mlx5_core_err(mdev, "alloc td failed, %d\n", err);
3191e05f
AS
2227 goto err_dealloc_pd;
2228 }
2229
f62b8bb8
AV
2230 err = mlx5e_create_mkey(priv, priv->pdn, &priv->mr);
2231 if (err) {
1f2a3003 2232 mlx5_core_err(mdev, "create mkey failed, %d\n", err);
3191e05f 2233 goto err_dealloc_transport_domain;
f62b8bb8
AV
2234 }
2235
40ab6a6e 2236 err = mlx5e_create_tises(priv);
5c50368f 2237 if (err) {
40ab6a6e 2238 mlx5_core_warn(mdev, "create tises failed, %d\n", err);
5c50368f
AS
2239 goto err_destroy_mkey;
2240 }
2241
2242 err = mlx5e_open_drop_rq(priv);
2243 if (err) {
2244 mlx5_core_err(mdev, "open drop rq failed, %d\n", err);
40ab6a6e 2245 goto err_destroy_tises;
5c50368f
AS
2246 }
2247
40ab6a6e 2248 err = mlx5e_create_rqt(priv, MLX5E_INDIRECTION_RQT);
5c50368f 2249 if (err) {
40ab6a6e 2250 mlx5_core_warn(mdev, "create rqt(INDIR) failed, %d\n", err);
5c50368f
AS
2251 goto err_close_drop_rq;
2252 }
2253
40ab6a6e 2254 err = mlx5e_create_rqt(priv, MLX5E_SINGLE_RQ_RQT);
5c50368f 2255 if (err) {
40ab6a6e
AS
2256 mlx5_core_warn(mdev, "create rqt(SINGLE) failed, %d\n", err);
2257 goto err_destroy_rqt_indir;
5c50368f
AS
2258 }
2259
40ab6a6e 2260 err = mlx5e_create_tirs(priv);
5c50368f 2261 if (err) {
40ab6a6e
AS
2262 mlx5_core_warn(mdev, "create tirs failed, %d\n", err);
2263 goto err_destroy_rqt_single;
5c50368f
AS
2264 }
2265
40ab6a6e 2266 err = mlx5e_create_flow_tables(priv);
5c50368f 2267 if (err) {
40ab6a6e
AS
2268 mlx5_core_warn(mdev, "create flow tables failed, %d\n", err);
2269 goto err_destroy_tirs;
5c50368f
AS
2270 }
2271
2272 mlx5e_init_eth_addr(priv);
2273
f62b8bb8
AV
2274 err = register_netdev(netdev);
2275 if (err) {
1f2a3003 2276 mlx5_core_err(mdev, "register_netdev failed, %d\n", err);
40ab6a6e 2277 goto err_destroy_flow_tables;
f62b8bb8
AV
2278 }
2279
2280 mlx5e_enable_async_events(priv);
9b37b07f 2281 schedule_work(&priv->set_rx_mode_work);
f62b8bb8
AV
2282
2283 return priv;
2284
40ab6a6e
AS
2285err_destroy_flow_tables:
2286 mlx5e_destroy_flow_tables(priv);
5c50368f 2287
40ab6a6e
AS
2288err_destroy_tirs:
2289 mlx5e_destroy_tirs(priv);
5c50368f 2290
40ab6a6e
AS
2291err_destroy_rqt_single:
2292 mlx5e_destroy_rqt(priv, MLX5E_SINGLE_RQ_RQT);
5c50368f 2293
40ab6a6e
AS
2294err_destroy_rqt_indir:
2295 mlx5e_destroy_rqt(priv, MLX5E_INDIRECTION_RQT);
5c50368f
AS
2296
2297err_close_drop_rq:
2298 mlx5e_close_drop_rq(priv);
2299
40ab6a6e
AS
2300err_destroy_tises:
2301 mlx5e_destroy_tises(priv);
5c50368f 2302
f62b8bb8
AV
2303err_destroy_mkey:
2304 mlx5_core_destroy_mkey(mdev, &priv->mr);
2305
3191e05f
AS
2306err_dealloc_transport_domain:
2307 mlx5_dealloc_transport_domain(mdev, priv->tdn);
2308
f62b8bb8
AV
2309err_dealloc_pd:
2310 mlx5_core_dealloc_pd(mdev, priv->pdn);
2311
2312err_unmap_free_uar:
2313 mlx5_unmap_free_uar(mdev, &priv->cq_uar);
2314
2315err_free_netdev:
2316 free_netdev(netdev);
2317
2318 return NULL;
2319}
2320
2321static void mlx5e_destroy_netdev(struct mlx5_core_dev *mdev, void *vpriv)
2322{
2323 struct mlx5e_priv *priv = vpriv;
2324 struct net_device *netdev = priv->netdev;
2325
9b37b07f
AS
2326 set_bit(MLX5E_STATE_DESTROYING, &priv->state);
2327
2328 schedule_work(&priv->set_rx_mode_work);
1cefa326
AS
2329 mlx5e_disable_async_events(priv);
2330 flush_scheduled_work();
f62b8bb8 2331 unregister_netdev(netdev);
40ab6a6e
AS
2332 mlx5e_destroy_flow_tables(priv);
2333 mlx5e_destroy_tirs(priv);
2334 mlx5e_destroy_rqt(priv, MLX5E_SINGLE_RQ_RQT);
2335 mlx5e_destroy_rqt(priv, MLX5E_INDIRECTION_RQT);
5c50368f 2336 mlx5e_close_drop_rq(priv);
40ab6a6e 2337 mlx5e_destroy_tises(priv);
f62b8bb8 2338 mlx5_core_destroy_mkey(priv->mdev, &priv->mr);
3191e05f 2339 mlx5_dealloc_transport_domain(priv->mdev, priv->tdn);
f62b8bb8
AV
2340 mlx5_core_dealloc_pd(priv->mdev, priv->pdn);
2341 mlx5_unmap_free_uar(priv->mdev, &priv->cq_uar);
f62b8bb8
AV
2342 free_netdev(netdev);
2343}
2344
2345static void *mlx5e_get_netdev(void *vpriv)
2346{
2347 struct mlx5e_priv *priv = vpriv;
2348
2349 return priv->netdev;
2350}
2351
2352static struct mlx5_interface mlx5e_interface = {
2353 .add = mlx5e_create_netdev,
2354 .remove = mlx5e_destroy_netdev,
2355 .event = mlx5e_async_event,
2356 .protocol = MLX5_INTERFACE_PROTOCOL_ETH,
2357 .get_dev = mlx5e_get_netdev,
2358};
2359
2360void mlx5e_init(void)
2361{
2362 mlx5_register_interface(&mlx5e_interface);
2363}
2364
2365void mlx5e_cleanup(void)
2366{
2367 mlx5_unregister_interface(&mlx5e_interface);
2368}
This page took 0.16965 seconds and 5 git commands to generate.