net/mlx5_core: Introduce access function to read internal timer
[deliverable/linux.git] / drivers / net / ethernet / mellanox / mlx5 / core / en_main.c
CommitLineData
f62b8bb8
AV
1/*
2 * Copyright (c) 2015, Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
86d722ad 33#include <linux/mlx5/fs.h>
f62b8bb8 34#include "en.h"
66e49ded 35#include "eswitch.h"
f62b8bb8
AV
36
37struct mlx5e_rq_param {
38 u32 rqc[MLX5_ST_SZ_DW(rqc)];
39 struct mlx5_wq_param wq;
40};
41
42struct mlx5e_sq_param {
43 u32 sqc[MLX5_ST_SZ_DW(sqc)];
44 struct mlx5_wq_param wq;
58d52291 45 u16 max_inline;
f62b8bb8
AV
46};
47
48struct mlx5e_cq_param {
49 u32 cqc[MLX5_ST_SZ_DW(cqc)];
50 struct mlx5_wq_param wq;
51 u16 eq_ix;
52};
53
54struct mlx5e_channel_param {
55 struct mlx5e_rq_param rq;
56 struct mlx5e_sq_param sq;
57 struct mlx5e_cq_param rx_cq;
58 struct mlx5e_cq_param tx_cq;
59};
60
61static void mlx5e_update_carrier(struct mlx5e_priv *priv)
62{
63 struct mlx5_core_dev *mdev = priv->mdev;
64 u8 port_state;
65
66 port_state = mlx5_query_vport_state(mdev,
e7546514 67 MLX5_QUERY_VPORT_STATE_IN_OP_MOD_VNIC_VPORT, 0);
f62b8bb8
AV
68
69 if (port_state == VPORT_STATE_UP)
70 netif_carrier_on(priv->netdev);
71 else
72 netif_carrier_off(priv->netdev);
73}
74
75static void mlx5e_update_carrier_work(struct work_struct *work)
76{
77 struct mlx5e_priv *priv = container_of(work, struct mlx5e_priv,
78 update_carrier_work);
79
80 mutex_lock(&priv->state_lock);
81 if (test_bit(MLX5E_STATE_OPENED, &priv->state))
82 mlx5e_update_carrier(priv);
83 mutex_unlock(&priv->state_lock);
84}
85
efea389d
GP
86static void mlx5e_update_pport_counters(struct mlx5e_priv *priv)
87{
88 struct mlx5_core_dev *mdev = priv->mdev;
89 struct mlx5e_pport_stats *s = &priv->stats.pport;
90 u32 *in;
91 u32 *out;
92 int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
93
94 in = mlx5_vzalloc(sz);
95 out = mlx5_vzalloc(sz);
96 if (!in || !out)
97 goto free_out;
98
99 MLX5_SET(ppcnt_reg, in, local_port, 1);
100
101 MLX5_SET(ppcnt_reg, in, grp, MLX5_IEEE_802_3_COUNTERS_GROUP);
102 mlx5_core_access_reg(mdev, in, sz, out,
103 sz, MLX5_REG_PPCNT, 0, 0);
104 memcpy(s->IEEE_802_3_counters,
105 MLX5_ADDR_OF(ppcnt_reg, out, counter_set),
106 sizeof(s->IEEE_802_3_counters));
107
108 MLX5_SET(ppcnt_reg, in, grp, MLX5_RFC_2863_COUNTERS_GROUP);
109 mlx5_core_access_reg(mdev, in, sz, out,
110 sz, MLX5_REG_PPCNT, 0, 0);
111 memcpy(s->RFC_2863_counters,
112 MLX5_ADDR_OF(ppcnt_reg, out, counter_set),
113 sizeof(s->RFC_2863_counters));
114
115 MLX5_SET(ppcnt_reg, in, grp, MLX5_RFC_2819_COUNTERS_GROUP);
116 mlx5_core_access_reg(mdev, in, sz, out,
117 sz, MLX5_REG_PPCNT, 0, 0);
118 memcpy(s->RFC_2819_counters,
119 MLX5_ADDR_OF(ppcnt_reg, out, counter_set),
120 sizeof(s->RFC_2819_counters));
121
122free_out:
123 kvfree(in);
124 kvfree(out);
125}
126
f62b8bb8
AV
127void mlx5e_update_stats(struct mlx5e_priv *priv)
128{
129 struct mlx5_core_dev *mdev = priv->mdev;
130 struct mlx5e_vport_stats *s = &priv->stats.vport;
131 struct mlx5e_rq_stats *rq_stats;
132 struct mlx5e_sq_stats *sq_stats;
133 u32 in[MLX5_ST_SZ_DW(query_vport_counter_in)];
134 u32 *out;
135 int outlen = MLX5_ST_SZ_BYTES(query_vport_counter_out);
136 u64 tx_offload_none;
137 int i, j;
138
139 out = mlx5_vzalloc(outlen);
140 if (!out)
141 return;
142
143 /* Collect firts the SW counters and then HW for consistency */
144 s->tso_packets = 0;
145 s->tso_bytes = 0;
146 s->tx_queue_stopped = 0;
147 s->tx_queue_wake = 0;
148 s->tx_queue_dropped = 0;
149 tx_offload_none = 0;
150 s->lro_packets = 0;
151 s->lro_bytes = 0;
152 s->rx_csum_none = 0;
bbceefce 153 s->rx_csum_sw = 0;
f62b8bb8
AV
154 s->rx_wqe_err = 0;
155 for (i = 0; i < priv->params.num_channels; i++) {
156 rq_stats = &priv->channel[i]->rq.stats;
157
158 s->lro_packets += rq_stats->lro_packets;
159 s->lro_bytes += rq_stats->lro_bytes;
160 s->rx_csum_none += rq_stats->csum_none;
bbceefce 161 s->rx_csum_sw += rq_stats->csum_sw;
f62b8bb8
AV
162 s->rx_wqe_err += rq_stats->wqe_err;
163
a4418a6c 164 for (j = 0; j < priv->params.num_tc; j++) {
f62b8bb8
AV
165 sq_stats = &priv->channel[i]->sq[j].stats;
166
167 s->tso_packets += sq_stats->tso_packets;
168 s->tso_bytes += sq_stats->tso_bytes;
169 s->tx_queue_stopped += sq_stats->stopped;
170 s->tx_queue_wake += sq_stats->wake;
171 s->tx_queue_dropped += sq_stats->dropped;
172 tx_offload_none += sq_stats->csum_offload_none;
173 }
174 }
175
176 /* HW counters */
177 memset(in, 0, sizeof(in));
178
179 MLX5_SET(query_vport_counter_in, in, opcode,
180 MLX5_CMD_OP_QUERY_VPORT_COUNTER);
181 MLX5_SET(query_vport_counter_in, in, op_mod, 0);
182 MLX5_SET(query_vport_counter_in, in, other_vport, 0);
183
184 memset(out, 0, outlen);
185
186 if (mlx5_cmd_exec(mdev, in, sizeof(in), out, outlen))
187 goto free_out;
188
189#define MLX5_GET_CTR(p, x) \
190 MLX5_GET64(query_vport_counter_out, p, x)
191
192 s->rx_error_packets =
193 MLX5_GET_CTR(out, received_errors.packets);
194 s->rx_error_bytes =
195 MLX5_GET_CTR(out, received_errors.octets);
196 s->tx_error_packets =
197 MLX5_GET_CTR(out, transmit_errors.packets);
198 s->tx_error_bytes =
199 MLX5_GET_CTR(out, transmit_errors.octets);
200
201 s->rx_unicast_packets =
202 MLX5_GET_CTR(out, received_eth_unicast.packets);
203 s->rx_unicast_bytes =
204 MLX5_GET_CTR(out, received_eth_unicast.octets);
205 s->tx_unicast_packets =
206 MLX5_GET_CTR(out, transmitted_eth_unicast.packets);
207 s->tx_unicast_bytes =
208 MLX5_GET_CTR(out, transmitted_eth_unicast.octets);
209
210 s->rx_multicast_packets =
211 MLX5_GET_CTR(out, received_eth_multicast.packets);
212 s->rx_multicast_bytes =
213 MLX5_GET_CTR(out, received_eth_multicast.octets);
214 s->tx_multicast_packets =
215 MLX5_GET_CTR(out, transmitted_eth_multicast.packets);
216 s->tx_multicast_bytes =
217 MLX5_GET_CTR(out, transmitted_eth_multicast.octets);
218
219 s->rx_broadcast_packets =
220 MLX5_GET_CTR(out, received_eth_broadcast.packets);
221 s->rx_broadcast_bytes =
222 MLX5_GET_CTR(out, received_eth_broadcast.octets);
223 s->tx_broadcast_packets =
224 MLX5_GET_CTR(out, transmitted_eth_broadcast.packets);
225 s->tx_broadcast_bytes =
226 MLX5_GET_CTR(out, transmitted_eth_broadcast.octets);
227
228 s->rx_packets =
229 s->rx_unicast_packets +
230 s->rx_multicast_packets +
231 s->rx_broadcast_packets;
232 s->rx_bytes =
233 s->rx_unicast_bytes +
234 s->rx_multicast_bytes +
235 s->rx_broadcast_bytes;
236 s->tx_packets =
237 s->tx_unicast_packets +
238 s->tx_multicast_packets +
239 s->tx_broadcast_packets;
240 s->tx_bytes =
241 s->tx_unicast_bytes +
242 s->tx_multicast_bytes +
243 s->tx_broadcast_bytes;
244
245 /* Update calculated offload counters */
246 s->tx_csum_offload = s->tx_packets - tx_offload_none;
bbceefce
AS
247 s->rx_csum_good = s->rx_packets - s->rx_csum_none -
248 s->rx_csum_sw;
f62b8bb8 249
efea389d 250 mlx5e_update_pport_counters(priv);
f62b8bb8
AV
251free_out:
252 kvfree(out);
253}
254
255static void mlx5e_update_stats_work(struct work_struct *work)
256{
257 struct delayed_work *dwork = to_delayed_work(work);
258 struct mlx5e_priv *priv = container_of(dwork, struct mlx5e_priv,
259 update_stats_work);
260 mutex_lock(&priv->state_lock);
261 if (test_bit(MLX5E_STATE_OPENED, &priv->state)) {
262 mlx5e_update_stats(priv);
263 schedule_delayed_work(dwork,
264 msecs_to_jiffies(
265 MLX5E_UPDATE_STATS_INTERVAL));
266 }
267 mutex_unlock(&priv->state_lock);
268}
269
270static void __mlx5e_async_event(struct mlx5e_priv *priv,
271 enum mlx5_dev_event event)
272{
273 switch (event) {
274 case MLX5_DEV_EVENT_PORT_UP:
275 case MLX5_DEV_EVENT_PORT_DOWN:
276 schedule_work(&priv->update_carrier_work);
277 break;
278
279 default:
280 break;
281 }
282}
283
284static void mlx5e_async_event(struct mlx5_core_dev *mdev, void *vpriv,
285 enum mlx5_dev_event event, unsigned long param)
286{
287 struct mlx5e_priv *priv = vpriv;
288
289 spin_lock(&priv->async_events_spinlock);
290 if (test_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLE, &priv->state))
291 __mlx5e_async_event(priv, event);
292 spin_unlock(&priv->async_events_spinlock);
293}
294
295static void mlx5e_enable_async_events(struct mlx5e_priv *priv)
296{
297 set_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLE, &priv->state);
298}
299
300static void mlx5e_disable_async_events(struct mlx5e_priv *priv)
301{
302 spin_lock_irq(&priv->async_events_spinlock);
303 clear_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLE, &priv->state);
304 spin_unlock_irq(&priv->async_events_spinlock);
305}
306
facc9699
SM
307#define MLX5E_HW2SW_MTU(hwmtu) (hwmtu - (ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN))
308#define MLX5E_SW2HW_MTU(swmtu) (swmtu + (ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN))
309
f62b8bb8
AV
310static int mlx5e_create_rq(struct mlx5e_channel *c,
311 struct mlx5e_rq_param *param,
312 struct mlx5e_rq *rq)
313{
314 struct mlx5e_priv *priv = c->priv;
315 struct mlx5_core_dev *mdev = priv->mdev;
316 void *rqc = param->rqc;
317 void *rqc_wq = MLX5_ADDR_OF(rqc, rqc, wq);
318 int wq_sz;
319 int err;
320 int i;
321
311c7c71
SM
322 param->wq.db_numa_node = cpu_to_node(c->cpu);
323
f62b8bb8
AV
324 err = mlx5_wq_ll_create(mdev, &param->wq, rqc_wq, &rq->wq,
325 &rq->wq_ctrl);
326 if (err)
327 return err;
328
329 rq->wq.db = &rq->wq.db[MLX5_RCV_DBR];
330
331 wq_sz = mlx5_wq_ll_get_size(&rq->wq);
332 rq->skb = kzalloc_node(wq_sz * sizeof(*rq->skb), GFP_KERNEL,
333 cpu_to_node(c->cpu));
334 if (!rq->skb) {
335 err = -ENOMEM;
336 goto err_rq_wq_destroy;
337 }
338
339 rq->wqe_sz = (priv->params.lro_en) ? priv->params.lro_wqe_sz :
facc9699 340 MLX5E_SW2HW_MTU(priv->netdev->mtu);
fc11fbf9 341 rq->wqe_sz = SKB_DATA_ALIGN(rq->wqe_sz + MLX5E_NET_IP_ALIGN);
f62b8bb8
AV
342
343 for (i = 0; i < wq_sz; i++) {
344 struct mlx5e_rx_wqe *wqe = mlx5_wq_ll_get_wqe(&rq->wq, i);
fc11fbf9 345 u32 byte_count = rq->wqe_sz - MLX5E_NET_IP_ALIGN;
f62b8bb8
AV
346
347 wqe->data.lkey = c->mkey_be;
fc11fbf9
SM
348 wqe->data.byte_count =
349 cpu_to_be32(byte_count | MLX5_HW_START_PADDING);
f62b8bb8
AV
350 }
351
352 rq->pdev = c->pdev;
353 rq->netdev = c->netdev;
354 rq->channel = c;
355 rq->ix = c->ix;
50cfa25a 356 rq->priv = c->priv;
f62b8bb8
AV
357
358 return 0;
359
360err_rq_wq_destroy:
361 mlx5_wq_destroy(&rq->wq_ctrl);
362
363 return err;
364}
365
366static void mlx5e_destroy_rq(struct mlx5e_rq *rq)
367{
368 kfree(rq->skb);
369 mlx5_wq_destroy(&rq->wq_ctrl);
370}
371
372static int mlx5e_enable_rq(struct mlx5e_rq *rq, struct mlx5e_rq_param *param)
373{
50cfa25a 374 struct mlx5e_priv *priv = rq->priv;
f62b8bb8
AV
375 struct mlx5_core_dev *mdev = priv->mdev;
376
377 void *in;
378 void *rqc;
379 void *wq;
380 int inlen;
381 int err;
382
383 inlen = MLX5_ST_SZ_BYTES(create_rq_in) +
384 sizeof(u64) * rq->wq_ctrl.buf.npages;
385 in = mlx5_vzalloc(inlen);
386 if (!in)
387 return -ENOMEM;
388
389 rqc = MLX5_ADDR_OF(create_rq_in, in, ctx);
390 wq = MLX5_ADDR_OF(rqc, rqc, wq);
391
392 memcpy(rqc, param->rqc, sizeof(param->rqc));
393
97de9f31 394 MLX5_SET(rqc, rqc, cqn, rq->cq.mcq.cqn);
f62b8bb8
AV
395 MLX5_SET(rqc, rqc, state, MLX5_RQC_STATE_RST);
396 MLX5_SET(rqc, rqc, flush_in_error_en, 1);
f62b8bb8 397 MLX5_SET(wq, wq, log_wq_pg_sz, rq->wq_ctrl.buf.page_shift -
68cdf5d6 398 MLX5_ADAPTER_PAGE_SHIFT);
f62b8bb8
AV
399 MLX5_SET64(wq, wq, dbr_addr, rq->wq_ctrl.db.dma);
400
401 mlx5_fill_page_array(&rq->wq_ctrl.buf,
402 (__be64 *)MLX5_ADDR_OF(wq, wq, pas));
403
7db22ffb 404 err = mlx5_core_create_rq(mdev, in, inlen, &rq->rqn);
f62b8bb8
AV
405
406 kvfree(in);
407
408 return err;
409}
410
411static int mlx5e_modify_rq(struct mlx5e_rq *rq, int curr_state, int next_state)
412{
413 struct mlx5e_channel *c = rq->channel;
414 struct mlx5e_priv *priv = c->priv;
415 struct mlx5_core_dev *mdev = priv->mdev;
416
417 void *in;
418 void *rqc;
419 int inlen;
420 int err;
421
422 inlen = MLX5_ST_SZ_BYTES(modify_rq_in);
423 in = mlx5_vzalloc(inlen);
424 if (!in)
425 return -ENOMEM;
426
427 rqc = MLX5_ADDR_OF(modify_rq_in, in, ctx);
428
429 MLX5_SET(modify_rq_in, in, rq_state, curr_state);
430 MLX5_SET(rqc, rqc, state, next_state);
431
7db22ffb 432 err = mlx5_core_modify_rq(mdev, rq->rqn, in, inlen);
f62b8bb8
AV
433
434 kvfree(in);
435
436 return err;
437}
438
439static void mlx5e_disable_rq(struct mlx5e_rq *rq)
440{
50cfa25a 441 mlx5_core_destroy_rq(rq->priv->mdev, rq->rqn);
f62b8bb8
AV
442}
443
444static int mlx5e_wait_for_min_rx_wqes(struct mlx5e_rq *rq)
445{
01c196a2 446 unsigned long exp_time = jiffies + msecs_to_jiffies(20000);
f62b8bb8
AV
447 struct mlx5e_channel *c = rq->channel;
448 struct mlx5e_priv *priv = c->priv;
449 struct mlx5_wq_ll *wq = &rq->wq;
f62b8bb8 450
01c196a2 451 while (time_before(jiffies, exp_time)) {
f62b8bb8
AV
452 if (wq->cur_sz >= priv->params.min_rx_wqes)
453 return 0;
454
455 msleep(20);
456 }
457
458 return -ETIMEDOUT;
459}
460
461static int mlx5e_open_rq(struct mlx5e_channel *c,
462 struct mlx5e_rq_param *param,
463 struct mlx5e_rq *rq)
464{
465 int err;
466
467 err = mlx5e_create_rq(c, param, rq);
468 if (err)
469 return err;
470
471 err = mlx5e_enable_rq(rq, param);
472 if (err)
473 goto err_destroy_rq;
474
475 err = mlx5e_modify_rq(rq, MLX5_RQC_STATE_RST, MLX5_RQC_STATE_RDY);
476 if (err)
477 goto err_disable_rq;
478
479 set_bit(MLX5E_RQ_STATE_POST_WQES_ENABLE, &rq->state);
12be4b21 480 mlx5e_send_nop(&c->sq[0], true); /* trigger mlx5e_post_rx_wqes() */
f62b8bb8
AV
481
482 return 0;
483
484err_disable_rq:
485 mlx5e_disable_rq(rq);
486err_destroy_rq:
487 mlx5e_destroy_rq(rq);
488
489 return err;
490}
491
492static void mlx5e_close_rq(struct mlx5e_rq *rq)
493{
494 clear_bit(MLX5E_RQ_STATE_POST_WQES_ENABLE, &rq->state);
495 napi_synchronize(&rq->channel->napi); /* prevent mlx5e_post_rx_wqes */
496
497 mlx5e_modify_rq(rq, MLX5_RQC_STATE_RDY, MLX5_RQC_STATE_ERR);
498 while (!mlx5_wq_ll_is_empty(&rq->wq))
499 msleep(20);
500
501 /* avoid destroying rq before mlx5e_poll_rx_cq() is done with it */
502 napi_synchronize(&rq->channel->napi);
503
504 mlx5e_disable_rq(rq);
505 mlx5e_destroy_rq(rq);
506}
507
508static void mlx5e_free_sq_db(struct mlx5e_sq *sq)
509{
34802a42 510 kfree(sq->wqe_info);
f62b8bb8
AV
511 kfree(sq->dma_fifo);
512 kfree(sq->skb);
513}
514
515static int mlx5e_alloc_sq_db(struct mlx5e_sq *sq, int numa)
516{
517 int wq_sz = mlx5_wq_cyc_get_size(&sq->wq);
518 int df_sz = wq_sz * MLX5_SEND_WQEBB_NUM_DS;
519
520 sq->skb = kzalloc_node(wq_sz * sizeof(*sq->skb), GFP_KERNEL, numa);
521 sq->dma_fifo = kzalloc_node(df_sz * sizeof(*sq->dma_fifo), GFP_KERNEL,
522 numa);
34802a42
AS
523 sq->wqe_info = kzalloc_node(wq_sz * sizeof(*sq->wqe_info), GFP_KERNEL,
524 numa);
f62b8bb8 525
34802a42 526 if (!sq->skb || !sq->dma_fifo || !sq->wqe_info) {
f62b8bb8
AV
527 mlx5e_free_sq_db(sq);
528 return -ENOMEM;
529 }
530
531 sq->dma_fifo_mask = df_sz - 1;
532
533 return 0;
534}
535
536static int mlx5e_create_sq(struct mlx5e_channel *c,
537 int tc,
538 struct mlx5e_sq_param *param,
539 struct mlx5e_sq *sq)
540{
541 struct mlx5e_priv *priv = c->priv;
542 struct mlx5_core_dev *mdev = priv->mdev;
543
544 void *sqc = param->sqc;
545 void *sqc_wq = MLX5_ADDR_OF(sqc, sqc, wq);
03289b88 546 int txq_ix;
f62b8bb8
AV
547 int err;
548
549 err = mlx5_alloc_map_uar(mdev, &sq->uar);
550 if (err)
551 return err;
552
311c7c71
SM
553 param->wq.db_numa_node = cpu_to_node(c->cpu);
554
f62b8bb8
AV
555 err = mlx5_wq_cyc_create(mdev, &param->wq, sqc_wq, &sq->wq,
556 &sq->wq_ctrl);
557 if (err)
558 goto err_unmap_free_uar;
559
560 sq->wq.db = &sq->wq.db[MLX5_SND_DBR];
561 sq->uar_map = sq->uar.map;
88a85f99 562 sq->uar_bf_map = sq->uar.bf_map;
f62b8bb8 563 sq->bf_buf_size = (1 << MLX5_CAP_GEN(mdev, log_bf_reg_size)) / 2;
58d52291 564 sq->max_inline = param->max_inline;
f62b8bb8 565
7ec0bb22
DC
566 err = mlx5e_alloc_sq_db(sq, cpu_to_node(c->cpu));
567 if (err)
f62b8bb8
AV
568 goto err_sq_wq_destroy;
569
03289b88
SM
570 txq_ix = c->ix + tc * priv->params.num_channels;
571 sq->txq = netdev_get_tx_queue(priv->netdev, txq_ix);
f62b8bb8 572
88a85f99
AS
573 sq->pdev = c->pdev;
574 sq->mkey_be = c->mkey_be;
575 sq->channel = c;
576 sq->tc = tc;
577 sq->edge = (sq->wq.sz_m1 + 1) - MLX5_SEND_WQE_MAX_WQEBBS;
578 sq->bf_budget = MLX5E_SQ_BF_BUDGET;
03289b88 579 priv->txq_to_sq_map[txq_ix] = sq;
f62b8bb8
AV
580
581 return 0;
582
583err_sq_wq_destroy:
584 mlx5_wq_destroy(&sq->wq_ctrl);
585
586err_unmap_free_uar:
587 mlx5_unmap_free_uar(mdev, &sq->uar);
588
589 return err;
590}
591
592static void mlx5e_destroy_sq(struct mlx5e_sq *sq)
593{
594 struct mlx5e_channel *c = sq->channel;
595 struct mlx5e_priv *priv = c->priv;
596
597 mlx5e_free_sq_db(sq);
598 mlx5_wq_destroy(&sq->wq_ctrl);
599 mlx5_unmap_free_uar(priv->mdev, &sq->uar);
600}
601
602static int mlx5e_enable_sq(struct mlx5e_sq *sq, struct mlx5e_sq_param *param)
603{
604 struct mlx5e_channel *c = sq->channel;
605 struct mlx5e_priv *priv = c->priv;
606 struct mlx5_core_dev *mdev = priv->mdev;
607
608 void *in;
609 void *sqc;
610 void *wq;
611 int inlen;
612 int err;
613
614 inlen = MLX5_ST_SZ_BYTES(create_sq_in) +
615 sizeof(u64) * sq->wq_ctrl.buf.npages;
616 in = mlx5_vzalloc(inlen);
617 if (!in)
618 return -ENOMEM;
619
620 sqc = MLX5_ADDR_OF(create_sq_in, in, ctx);
621 wq = MLX5_ADDR_OF(sqc, sqc, wq);
622
623 memcpy(sqc, param->sqc, sizeof(param->sqc));
624
f62b8bb8
AV
625 MLX5_SET(sqc, sqc, tis_num_0, priv->tisn[sq->tc]);
626 MLX5_SET(sqc, sqc, cqn, c->sq[sq->tc].cq.mcq.cqn);
627 MLX5_SET(sqc, sqc, state, MLX5_SQC_STATE_RST);
628 MLX5_SET(sqc, sqc, tis_lst_sz, 1);
629 MLX5_SET(sqc, sqc, flush_in_error_en, 1);
630
631 MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_CYCLIC);
632 MLX5_SET(wq, wq, uar_page, sq->uar.index);
633 MLX5_SET(wq, wq, log_wq_pg_sz, sq->wq_ctrl.buf.page_shift -
68cdf5d6 634 MLX5_ADAPTER_PAGE_SHIFT);
f62b8bb8
AV
635 MLX5_SET64(wq, wq, dbr_addr, sq->wq_ctrl.db.dma);
636
637 mlx5_fill_page_array(&sq->wq_ctrl.buf,
638 (__be64 *)MLX5_ADDR_OF(wq, wq, pas));
639
7db22ffb 640 err = mlx5_core_create_sq(mdev, in, inlen, &sq->sqn);
f62b8bb8
AV
641
642 kvfree(in);
643
644 return err;
645}
646
647static int mlx5e_modify_sq(struct mlx5e_sq *sq, int curr_state, int next_state)
648{
649 struct mlx5e_channel *c = sq->channel;
650 struct mlx5e_priv *priv = c->priv;
651 struct mlx5_core_dev *mdev = priv->mdev;
652
653 void *in;
654 void *sqc;
655 int inlen;
656 int err;
657
658 inlen = MLX5_ST_SZ_BYTES(modify_sq_in);
659 in = mlx5_vzalloc(inlen);
660 if (!in)
661 return -ENOMEM;
662
663 sqc = MLX5_ADDR_OF(modify_sq_in, in, ctx);
664
665 MLX5_SET(modify_sq_in, in, sq_state, curr_state);
666 MLX5_SET(sqc, sqc, state, next_state);
667
7db22ffb 668 err = mlx5_core_modify_sq(mdev, sq->sqn, in, inlen);
f62b8bb8
AV
669
670 kvfree(in);
671
672 return err;
673}
674
675static void mlx5e_disable_sq(struct mlx5e_sq *sq)
676{
677 struct mlx5e_channel *c = sq->channel;
678 struct mlx5e_priv *priv = c->priv;
679 struct mlx5_core_dev *mdev = priv->mdev;
680
7db22ffb 681 mlx5_core_destroy_sq(mdev, sq->sqn);
f62b8bb8
AV
682}
683
684static int mlx5e_open_sq(struct mlx5e_channel *c,
685 int tc,
686 struct mlx5e_sq_param *param,
687 struct mlx5e_sq *sq)
688{
689 int err;
690
691 err = mlx5e_create_sq(c, tc, param, sq);
692 if (err)
693 return err;
694
695 err = mlx5e_enable_sq(sq, param);
696 if (err)
697 goto err_destroy_sq;
698
699 err = mlx5e_modify_sq(sq, MLX5_SQC_STATE_RST, MLX5_SQC_STATE_RDY);
700 if (err)
701 goto err_disable_sq;
702
703 set_bit(MLX5E_SQ_STATE_WAKE_TXQ_ENABLE, &sq->state);
704 netdev_tx_reset_queue(sq->txq);
705 netif_tx_start_queue(sq->txq);
706
707 return 0;
708
709err_disable_sq:
710 mlx5e_disable_sq(sq);
711err_destroy_sq:
712 mlx5e_destroy_sq(sq);
713
714 return err;
715}
716
717static inline void netif_tx_disable_queue(struct netdev_queue *txq)
718{
719 __netif_tx_lock_bh(txq);
720 netif_tx_stop_queue(txq);
721 __netif_tx_unlock_bh(txq);
722}
723
724static void mlx5e_close_sq(struct mlx5e_sq *sq)
725{
726 clear_bit(MLX5E_SQ_STATE_WAKE_TXQ_ENABLE, &sq->state);
727 napi_synchronize(&sq->channel->napi); /* prevent netif_tx_wake_queue */
728 netif_tx_disable_queue(sq->txq);
729
730 /* ensure hw is notified of all pending wqes */
731 if (mlx5e_sq_has_room_for(sq, 1))
12be4b21 732 mlx5e_send_nop(sq, true);
f62b8bb8
AV
733
734 mlx5e_modify_sq(sq, MLX5_SQC_STATE_RDY, MLX5_SQC_STATE_ERR);
735 while (sq->cc != sq->pc) /* wait till sq is empty */
736 msleep(20);
737
738 /* avoid destroying sq before mlx5e_poll_tx_cq() is done with it */
739 napi_synchronize(&sq->channel->napi);
740
741 mlx5e_disable_sq(sq);
742 mlx5e_destroy_sq(sq);
743}
744
745static int mlx5e_create_cq(struct mlx5e_channel *c,
746 struct mlx5e_cq_param *param,
747 struct mlx5e_cq *cq)
748{
749 struct mlx5e_priv *priv = c->priv;
750 struct mlx5_core_dev *mdev = priv->mdev;
751 struct mlx5_core_cq *mcq = &cq->mcq;
752 int eqn_not_used;
753 int irqn;
754 int err;
755 u32 i;
756
311c7c71
SM
757 param->wq.buf_numa_node = cpu_to_node(c->cpu);
758 param->wq.db_numa_node = cpu_to_node(c->cpu);
f62b8bb8
AV
759 param->eq_ix = c->ix;
760
761 err = mlx5_cqwq_create(mdev, &param->wq, param->cqc, &cq->wq,
762 &cq->wq_ctrl);
763 if (err)
764 return err;
765
766 mlx5_vector2eqn(mdev, param->eq_ix, &eqn_not_used, &irqn);
767
768 cq->napi = &c->napi;
769
770 mcq->cqe_sz = 64;
771 mcq->set_ci_db = cq->wq_ctrl.db.db;
772 mcq->arm_db = cq->wq_ctrl.db.db + 1;
773 *mcq->set_ci_db = 0;
774 *mcq->arm_db = 0;
775 mcq->vector = param->eq_ix;
776 mcq->comp = mlx5e_completion_event;
777 mcq->event = mlx5e_cq_error_event;
778 mcq->irqn = irqn;
779 mcq->uar = &priv->cq_uar;
780
781 for (i = 0; i < mlx5_cqwq_get_size(&cq->wq); i++) {
782 struct mlx5_cqe64 *cqe = mlx5_cqwq_get_wqe(&cq->wq, i);
783
784 cqe->op_own = 0xf1;
785 }
786
787 cq->channel = c;
50cfa25a 788 cq->priv = priv;
f62b8bb8
AV
789
790 return 0;
791}
792
793static void mlx5e_destroy_cq(struct mlx5e_cq *cq)
794{
795 mlx5_wq_destroy(&cq->wq_ctrl);
796}
797
798static int mlx5e_enable_cq(struct mlx5e_cq *cq, struct mlx5e_cq_param *param)
799{
50cfa25a 800 struct mlx5e_priv *priv = cq->priv;
f62b8bb8
AV
801 struct mlx5_core_dev *mdev = priv->mdev;
802 struct mlx5_core_cq *mcq = &cq->mcq;
803
804 void *in;
805 void *cqc;
806 int inlen;
807 int irqn_not_used;
808 int eqn;
809 int err;
810
811 inlen = MLX5_ST_SZ_BYTES(create_cq_in) +
812 sizeof(u64) * cq->wq_ctrl.buf.npages;
813 in = mlx5_vzalloc(inlen);
814 if (!in)
815 return -ENOMEM;
816
817 cqc = MLX5_ADDR_OF(create_cq_in, in, cq_context);
818
819 memcpy(cqc, param->cqc, sizeof(param->cqc));
820
821 mlx5_fill_page_array(&cq->wq_ctrl.buf,
822 (__be64 *)MLX5_ADDR_OF(create_cq_in, in, pas));
823
824 mlx5_vector2eqn(mdev, param->eq_ix, &eqn, &irqn_not_used);
825
826 MLX5_SET(cqc, cqc, c_eqn, eqn);
827 MLX5_SET(cqc, cqc, uar_page, mcq->uar->index);
828 MLX5_SET(cqc, cqc, log_page_size, cq->wq_ctrl.buf.page_shift -
68cdf5d6 829 MLX5_ADAPTER_PAGE_SHIFT);
f62b8bb8
AV
830 MLX5_SET64(cqc, cqc, dbr_addr, cq->wq_ctrl.db.dma);
831
832 err = mlx5_core_create_cq(mdev, mcq, in, inlen);
833
834 kvfree(in);
835
836 if (err)
837 return err;
838
839 mlx5e_cq_arm(cq);
840
841 return 0;
842}
843
844static void mlx5e_disable_cq(struct mlx5e_cq *cq)
845{
50cfa25a 846 struct mlx5e_priv *priv = cq->priv;
f62b8bb8
AV
847 struct mlx5_core_dev *mdev = priv->mdev;
848
849 mlx5_core_destroy_cq(mdev, &cq->mcq);
850}
851
852static int mlx5e_open_cq(struct mlx5e_channel *c,
853 struct mlx5e_cq_param *param,
854 struct mlx5e_cq *cq,
855 u16 moderation_usecs,
856 u16 moderation_frames)
857{
858 int err;
859 struct mlx5e_priv *priv = c->priv;
860 struct mlx5_core_dev *mdev = priv->mdev;
861
862 err = mlx5e_create_cq(c, param, cq);
863 if (err)
864 return err;
865
866 err = mlx5e_enable_cq(cq, param);
867 if (err)
868 goto err_destroy_cq;
869
870 err = mlx5_core_modify_cq_moderation(mdev, &cq->mcq,
871 moderation_usecs,
872 moderation_frames);
873 if (err)
874 goto err_destroy_cq;
875
876 return 0;
877
878err_destroy_cq:
879 mlx5e_destroy_cq(cq);
880
881 return err;
882}
883
884static void mlx5e_close_cq(struct mlx5e_cq *cq)
885{
886 mlx5e_disable_cq(cq);
887 mlx5e_destroy_cq(cq);
888}
889
890static int mlx5e_get_cpu(struct mlx5e_priv *priv, int ix)
891{
892 return cpumask_first(priv->mdev->priv.irq_info[ix].mask);
893}
894
895static int mlx5e_open_tx_cqs(struct mlx5e_channel *c,
896 struct mlx5e_channel_param *cparam)
897{
898 struct mlx5e_priv *priv = c->priv;
899 int err;
900 int tc;
901
902 for (tc = 0; tc < c->num_tc; tc++) {
903 err = mlx5e_open_cq(c, &cparam->tx_cq, &c->sq[tc].cq,
904 priv->params.tx_cq_moderation_usec,
905 priv->params.tx_cq_moderation_pkts);
906 if (err)
907 goto err_close_tx_cqs;
f62b8bb8
AV
908 }
909
910 return 0;
911
912err_close_tx_cqs:
913 for (tc--; tc >= 0; tc--)
914 mlx5e_close_cq(&c->sq[tc].cq);
915
916 return err;
917}
918
919static void mlx5e_close_tx_cqs(struct mlx5e_channel *c)
920{
921 int tc;
922
923 for (tc = 0; tc < c->num_tc; tc++)
924 mlx5e_close_cq(&c->sq[tc].cq);
925}
926
927static int mlx5e_open_sqs(struct mlx5e_channel *c,
928 struct mlx5e_channel_param *cparam)
929{
930 int err;
931 int tc;
932
933 for (tc = 0; tc < c->num_tc; tc++) {
934 err = mlx5e_open_sq(c, tc, &cparam->sq, &c->sq[tc]);
935 if (err)
936 goto err_close_sqs;
937 }
938
939 return 0;
940
941err_close_sqs:
942 for (tc--; tc >= 0; tc--)
943 mlx5e_close_sq(&c->sq[tc]);
944
945 return err;
946}
947
948static void mlx5e_close_sqs(struct mlx5e_channel *c)
949{
950 int tc;
951
952 for (tc = 0; tc < c->num_tc; tc++)
953 mlx5e_close_sq(&c->sq[tc]);
954}
955
5283af89 956static void mlx5e_build_channeltc_to_txq_map(struct mlx5e_priv *priv, int ix)
03289b88
SM
957{
958 int i;
959
960 for (i = 0; i < MLX5E_MAX_NUM_TC; i++)
5283af89
RS
961 priv->channeltc_to_txq_map[ix][i] =
962 ix + i * priv->params.num_channels;
03289b88
SM
963}
964
f62b8bb8
AV
965static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
966 struct mlx5e_channel_param *cparam,
967 struct mlx5e_channel **cp)
968{
969 struct net_device *netdev = priv->netdev;
970 int cpu = mlx5e_get_cpu(priv, ix);
971 struct mlx5e_channel *c;
972 int err;
973
974 c = kzalloc_node(sizeof(*c), GFP_KERNEL, cpu_to_node(cpu));
975 if (!c)
976 return -ENOMEM;
977
978 c->priv = priv;
979 c->ix = ix;
980 c->cpu = cpu;
981 c->pdev = &priv->mdev->pdev->dev;
982 c->netdev = priv->netdev;
983 c->mkey_be = cpu_to_be32(priv->mr.key);
a4418a6c 984 c->num_tc = priv->params.num_tc;
f62b8bb8 985
5283af89 986 mlx5e_build_channeltc_to_txq_map(priv, ix);
03289b88 987
f62b8bb8
AV
988 netif_napi_add(netdev, &c->napi, mlx5e_napi_poll, 64);
989
990 err = mlx5e_open_tx_cqs(c, cparam);
991 if (err)
992 goto err_napi_del;
993
994 err = mlx5e_open_cq(c, &cparam->rx_cq, &c->rq.cq,
995 priv->params.rx_cq_moderation_usec,
996 priv->params.rx_cq_moderation_pkts);
997 if (err)
998 goto err_close_tx_cqs;
f62b8bb8
AV
999
1000 napi_enable(&c->napi);
1001
1002 err = mlx5e_open_sqs(c, cparam);
1003 if (err)
1004 goto err_disable_napi;
1005
1006 err = mlx5e_open_rq(c, &cparam->rq, &c->rq);
1007 if (err)
1008 goto err_close_sqs;
1009
1010 netif_set_xps_queue(netdev, get_cpu_mask(c->cpu), ix);
1011 *cp = c;
1012
1013 return 0;
1014
1015err_close_sqs:
1016 mlx5e_close_sqs(c);
1017
1018err_disable_napi:
1019 napi_disable(&c->napi);
1020 mlx5e_close_cq(&c->rq.cq);
1021
1022err_close_tx_cqs:
1023 mlx5e_close_tx_cqs(c);
1024
1025err_napi_del:
1026 netif_napi_del(&c->napi);
7ae92ae5 1027 napi_hash_del(&c->napi);
f62b8bb8
AV
1028 kfree(c);
1029
1030 return err;
1031}
1032
1033static void mlx5e_close_channel(struct mlx5e_channel *c)
1034{
1035 mlx5e_close_rq(&c->rq);
1036 mlx5e_close_sqs(c);
1037 napi_disable(&c->napi);
1038 mlx5e_close_cq(&c->rq.cq);
1039 mlx5e_close_tx_cqs(c);
1040 netif_napi_del(&c->napi);
7ae92ae5
ED
1041
1042 napi_hash_del(&c->napi);
1043 synchronize_rcu();
1044
f62b8bb8
AV
1045 kfree(c);
1046}
1047
1048static void mlx5e_build_rq_param(struct mlx5e_priv *priv,
1049 struct mlx5e_rq_param *param)
1050{
1051 void *rqc = param->rqc;
1052 void *wq = MLX5_ADDR_OF(rqc, rqc, wq);
1053
1054 MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_LINKED_LIST);
1055 MLX5_SET(wq, wq, end_padding_mode, MLX5_WQ_END_PAD_MODE_ALIGN);
1056 MLX5_SET(wq, wq, log_wq_stride, ilog2(sizeof(struct mlx5e_rx_wqe)));
1057 MLX5_SET(wq, wq, log_wq_sz, priv->params.log_rq_size);
1058 MLX5_SET(wq, wq, pd, priv->pdn);
1059
311c7c71 1060 param->wq.buf_numa_node = dev_to_node(&priv->mdev->pdev->dev);
f62b8bb8
AV
1061 param->wq.linear = 1;
1062}
1063
1064static void mlx5e_build_sq_param(struct mlx5e_priv *priv,
1065 struct mlx5e_sq_param *param)
1066{
1067 void *sqc = param->sqc;
1068 void *wq = MLX5_ADDR_OF(sqc, sqc, wq);
1069
1070 MLX5_SET(wq, wq, log_wq_sz, priv->params.log_sq_size);
1071 MLX5_SET(wq, wq, log_wq_stride, ilog2(MLX5_SEND_WQE_BB));
1072 MLX5_SET(wq, wq, pd, priv->pdn);
1073
311c7c71 1074 param->wq.buf_numa_node = dev_to_node(&priv->mdev->pdev->dev);
58d52291 1075 param->max_inline = priv->params.tx_max_inline;
f62b8bb8
AV
1076}
1077
1078static void mlx5e_build_common_cq_param(struct mlx5e_priv *priv,
1079 struct mlx5e_cq_param *param)
1080{
1081 void *cqc = param->cqc;
1082
1083 MLX5_SET(cqc, cqc, uar_page, priv->cq_uar.index);
1084}
1085
1086static void mlx5e_build_rx_cq_param(struct mlx5e_priv *priv,
1087 struct mlx5e_cq_param *param)
1088{
1089 void *cqc = param->cqc;
1090
1091 MLX5_SET(cqc, cqc, log_cq_size, priv->params.log_rq_size);
1092
1093 mlx5e_build_common_cq_param(priv, param);
1094}
1095
1096static void mlx5e_build_tx_cq_param(struct mlx5e_priv *priv,
1097 struct mlx5e_cq_param *param)
1098{
1099 void *cqc = param->cqc;
1100
1101 MLX5_SET(cqc, cqc, log_cq_size, priv->params.log_sq_size);
1102
1103 mlx5e_build_common_cq_param(priv, param);
1104}
1105
1106static void mlx5e_build_channel_param(struct mlx5e_priv *priv,
1107 struct mlx5e_channel_param *cparam)
1108{
1109 memset(cparam, 0, sizeof(*cparam));
1110
1111 mlx5e_build_rq_param(priv, &cparam->rq);
1112 mlx5e_build_sq_param(priv, &cparam->sq);
1113 mlx5e_build_rx_cq_param(priv, &cparam->rx_cq);
1114 mlx5e_build_tx_cq_param(priv, &cparam->tx_cq);
1115}
1116
1117static int mlx5e_open_channels(struct mlx5e_priv *priv)
1118{
1119 struct mlx5e_channel_param cparam;
a4418a6c 1120 int nch = priv->params.num_channels;
03289b88 1121 int err = -ENOMEM;
f62b8bb8
AV
1122 int i;
1123 int j;
1124
a4418a6c
AS
1125 priv->channel = kcalloc(nch, sizeof(struct mlx5e_channel *),
1126 GFP_KERNEL);
03289b88 1127
a4418a6c 1128 priv->txq_to_sq_map = kcalloc(nch * priv->params.num_tc,
03289b88
SM
1129 sizeof(struct mlx5e_sq *), GFP_KERNEL);
1130
1131 if (!priv->channel || !priv->txq_to_sq_map)
1132 goto err_free_txq_to_sq_map;
f62b8bb8
AV
1133
1134 mlx5e_build_channel_param(priv, &cparam);
a4418a6c 1135 for (i = 0; i < nch; i++) {
f62b8bb8
AV
1136 err = mlx5e_open_channel(priv, i, &cparam, &priv->channel[i]);
1137 if (err)
1138 goto err_close_channels;
1139 }
1140
a4418a6c 1141 for (j = 0; j < nch; j++) {
f62b8bb8
AV
1142 err = mlx5e_wait_for_min_rx_wqes(&priv->channel[j]->rq);
1143 if (err)
1144 goto err_close_channels;
1145 }
1146
1147 return 0;
1148
1149err_close_channels:
1150 for (i--; i >= 0; i--)
1151 mlx5e_close_channel(priv->channel[i]);
1152
03289b88
SM
1153err_free_txq_to_sq_map:
1154 kfree(priv->txq_to_sq_map);
f62b8bb8
AV
1155 kfree(priv->channel);
1156
1157 return err;
1158}
1159
1160static void mlx5e_close_channels(struct mlx5e_priv *priv)
1161{
1162 int i;
1163
1164 for (i = 0; i < priv->params.num_channels; i++)
1165 mlx5e_close_channel(priv->channel[i]);
1166
03289b88 1167 kfree(priv->txq_to_sq_map);
f62b8bb8
AV
1168 kfree(priv->channel);
1169}
1170
2be6967c
SM
1171static int mlx5e_rx_hash_fn(int hfunc)
1172{
1173 return (hfunc == ETH_RSS_HASH_TOP) ?
1174 MLX5_RX_HASH_FN_TOEPLITZ :
1175 MLX5_RX_HASH_FN_INVERTED_XOR8;
1176}
1177
1178static int mlx5e_bits_invert(unsigned long a, int size)
1179{
1180 int inv = 0;
1181 int i;
1182
1183 for (i = 0; i < size; i++)
1184 inv |= (test_bit(size - i - 1, &a) ? 1 : 0) << i;
1185
1186 return inv;
1187}
1188
936896e9
AS
1189static void mlx5e_fill_indir_rqt_rqns(struct mlx5e_priv *priv, void *rqtc)
1190{
1191 int i;
1192
1193 for (i = 0; i < MLX5E_INDIR_RQT_SIZE; i++) {
1194 int ix = i;
1195
1196 if (priv->params.rss_hfunc == ETH_RSS_HASH_XOR)
1197 ix = mlx5e_bits_invert(i, MLX5E_LOG_INDIR_RQT_SIZE);
1198
2d75b2bc 1199 ix = priv->params.indirection_rqt[ix];
936896e9
AS
1200 ix = ix % priv->params.num_channels;
1201 MLX5_SET(rqtc, rqtc, rq_num[i],
1202 test_bit(MLX5E_STATE_OPENED, &priv->state) ?
1203 priv->channel[ix]->rq.rqn :
1204 priv->drop_rq.rqn);
1205 }
1206}
1207
4cbeaff5
AS
1208static void mlx5e_fill_rqt_rqns(struct mlx5e_priv *priv, void *rqtc,
1209 enum mlx5e_rqt_ix rqt_ix)
1210{
4cbeaff5
AS
1211
1212 switch (rqt_ix) {
1213 case MLX5E_INDIRECTION_RQT:
936896e9 1214 mlx5e_fill_indir_rqt_rqns(priv, rqtc);
4cbeaff5
AS
1215
1216 break;
1217
1218 default: /* MLX5E_SINGLE_RQ_RQT */
1219 MLX5_SET(rqtc, rqtc, rq_num[0],
5c50368f
AS
1220 test_bit(MLX5E_STATE_OPENED, &priv->state) ?
1221 priv->channel[0]->rq.rqn :
1222 priv->drop_rq.rqn);
4cbeaff5
AS
1223
1224 break;
1225 }
1226}
1227
40ab6a6e 1228static int mlx5e_create_rqt(struct mlx5e_priv *priv, enum mlx5e_rqt_ix rqt_ix)
f62b8bb8
AV
1229{
1230 struct mlx5_core_dev *mdev = priv->mdev;
1231 u32 *in;
f62b8bb8
AV
1232 void *rqtc;
1233 int inlen;
4cbeaff5 1234 int sz;
f62b8bb8 1235 int err;
4cbeaff5 1236
936896e9 1237 sz = (rqt_ix == MLX5E_SINGLE_RQ_RQT) ? 1 : MLX5E_INDIR_RQT_SIZE;
f62b8bb8 1238
f62b8bb8
AV
1239 inlen = MLX5_ST_SZ_BYTES(create_rqt_in) + sizeof(u32) * sz;
1240 in = mlx5_vzalloc(inlen);
1241 if (!in)
1242 return -ENOMEM;
1243
1244 rqtc = MLX5_ADDR_OF(create_rqt_in, in, rqt_context);
1245
1246 MLX5_SET(rqtc, rqtc, rqt_actual_size, sz);
1247 MLX5_SET(rqtc, rqtc, rqt_max_size, sz);
1248
4cbeaff5 1249 mlx5e_fill_rqt_rqns(priv, rqtc, rqt_ix);
2be6967c 1250
4cbeaff5 1251 err = mlx5_core_create_rqt(mdev, in, inlen, &priv->rqtn[rqt_ix]);
f62b8bb8
AV
1252
1253 kvfree(in);
1254
1255 return err;
1256}
1257
2d75b2bc 1258int mlx5e_redirect_rqt(struct mlx5e_priv *priv, enum mlx5e_rqt_ix rqt_ix)
5c50368f
AS
1259{
1260 struct mlx5_core_dev *mdev = priv->mdev;
1261 u32 *in;
1262 void *rqtc;
1263 int inlen;
5c50368f
AS
1264 int sz;
1265 int err;
1266
936896e9 1267 sz = (rqt_ix == MLX5E_SINGLE_RQ_RQT) ? 1 : MLX5E_INDIR_RQT_SIZE;
5c50368f
AS
1268
1269 inlen = MLX5_ST_SZ_BYTES(modify_rqt_in) + sizeof(u32) * sz;
1270 in = mlx5_vzalloc(inlen);
1271 if (!in)
1272 return -ENOMEM;
1273
1274 rqtc = MLX5_ADDR_OF(modify_rqt_in, in, ctx);
1275
1276 MLX5_SET(rqtc, rqtc, rqt_actual_size, sz);
1277
1278 mlx5e_fill_rqt_rqns(priv, rqtc, rqt_ix);
1279
1280 MLX5_SET(modify_rqt_in, in, bitmask.rqn_list, 1);
1281
1282 err = mlx5_core_modify_rqt(mdev, priv->rqtn[rqt_ix], in, inlen);
1283
1284 kvfree(in);
1285
1286 return err;
1287}
1288
40ab6a6e 1289static void mlx5e_destroy_rqt(struct mlx5e_priv *priv, enum mlx5e_rqt_ix rqt_ix)
f62b8bb8 1290{
4cbeaff5 1291 mlx5_core_destroy_rqt(priv->mdev, priv->rqtn[rqt_ix]);
f62b8bb8
AV
1292}
1293
40ab6a6e
AS
1294static void mlx5e_redirect_rqts(struct mlx5e_priv *priv)
1295{
1296 mlx5e_redirect_rqt(priv, MLX5E_INDIRECTION_RQT);
1297 mlx5e_redirect_rqt(priv, MLX5E_SINGLE_RQ_RQT);
1298}
1299
5c50368f
AS
1300static void mlx5e_build_tir_ctx_lro(void *tirc, struct mlx5e_priv *priv)
1301{
1302 if (!priv->params.lro_en)
1303 return;
1304
1305#define ROUGH_MAX_L2_L3_HDR_SZ 256
1306
1307 MLX5_SET(tirc, tirc, lro_enable_mask,
1308 MLX5_TIRC_LRO_ENABLE_MASK_IPV4_LRO |
1309 MLX5_TIRC_LRO_ENABLE_MASK_IPV6_LRO);
1310 MLX5_SET(tirc, tirc, lro_max_ip_payload_size,
1311 (priv->params.lro_wqe_sz -
1312 ROUGH_MAX_L2_L3_HDR_SZ) >> 8);
1313 MLX5_SET(tirc, tirc, lro_timeout_period_usecs,
1314 MLX5_CAP_ETH(priv->mdev,
d9a40271 1315 lro_timer_supported_periods[2]));
5c50368f
AS
1316}
1317
1318static int mlx5e_modify_tir_lro(struct mlx5e_priv *priv, int tt)
1319{
1320 struct mlx5_core_dev *mdev = priv->mdev;
1321
1322 void *in;
1323 void *tirc;
1324 int inlen;
1325 int err;
1326
1327 inlen = MLX5_ST_SZ_BYTES(modify_tir_in);
1328 in = mlx5_vzalloc(inlen);
1329 if (!in)
1330 return -ENOMEM;
1331
1332 MLX5_SET(modify_tir_in, in, bitmask.lro, 1);
1333 tirc = MLX5_ADDR_OF(modify_tir_in, in, ctx);
1334
1335 mlx5e_build_tir_ctx_lro(tirc, priv);
1336
1337 err = mlx5_core_modify_tir(mdev, priv->tirn[tt], in, inlen);
1338
1339 kvfree(in);
1340
1341 return err;
1342}
1343
66189961
TT
1344static int mlx5e_refresh_tir_self_loopback_enable(struct mlx5_core_dev *mdev,
1345 u32 tirn)
1346{
1347 void *in;
1348 int inlen;
1349 int err;
1350
1351 inlen = MLX5_ST_SZ_BYTES(modify_tir_in);
1352 in = mlx5_vzalloc(inlen);
1353 if (!in)
1354 return -ENOMEM;
1355
1356 MLX5_SET(modify_tir_in, in, bitmask.self_lb_en, 1);
1357
1358 err = mlx5_core_modify_tir(mdev, tirn, in, inlen);
1359
1360 kvfree(in);
1361
1362 return err;
1363}
1364
1365static int mlx5e_refresh_tirs_self_loopback_enable(struct mlx5e_priv *priv)
1366{
1367 int err;
1368 int i;
1369
1370 for (i = 0; i < MLX5E_NUM_TT; i++) {
1371 err = mlx5e_refresh_tir_self_loopback_enable(priv->mdev,
1372 priv->tirn[i]);
1373 if (err)
1374 return err;
1375 }
1376
1377 return 0;
1378}
1379
40ab6a6e
AS
1380static int mlx5e_set_dev_port_mtu(struct net_device *netdev)
1381{
1382 struct mlx5e_priv *priv = netdev_priv(netdev);
1383 struct mlx5_core_dev *mdev = priv->mdev;
1384 int hw_mtu;
1385 int err;
1386
1387 err = mlx5_set_port_mtu(mdev, MLX5E_SW2HW_MTU(netdev->mtu), 1);
1388 if (err)
1389 return err;
1390
1391 mlx5_query_port_oper_mtu(mdev, &hw_mtu, 1);
1392
1393 if (MLX5E_HW2SW_MTU(hw_mtu) != netdev->mtu)
1394 netdev_warn(netdev, "%s: Port MTU %d is different than netdev mtu %d\n",
1395 __func__, MLX5E_HW2SW_MTU(hw_mtu), netdev->mtu);
1396
1397 netdev->mtu = MLX5E_HW2SW_MTU(hw_mtu);
1398 return 0;
1399}
1400
1401int mlx5e_open_locked(struct net_device *netdev)
1402{
1403 struct mlx5e_priv *priv = netdev_priv(netdev);
1404 int num_txqs;
1405 int err;
1406
1407 set_bit(MLX5E_STATE_OPENED, &priv->state);
1408
1409 num_txqs = priv->params.num_channels * priv->params.num_tc;
1410 netif_set_real_num_tx_queues(netdev, num_txqs);
1411 netif_set_real_num_rx_queues(netdev, priv->params.num_channels);
1412
1413 err = mlx5e_set_dev_port_mtu(netdev);
1414 if (err)
343b29f3 1415 goto err_clear_state_opened_flag;
40ab6a6e
AS
1416
1417 err = mlx5e_open_channels(priv);
1418 if (err) {
1419 netdev_err(netdev, "%s: mlx5e_open_channels failed, %d\n",
1420 __func__, err);
343b29f3 1421 goto err_clear_state_opened_flag;
40ab6a6e
AS
1422 }
1423
66189961
TT
1424 err = mlx5e_refresh_tirs_self_loopback_enable(priv);
1425 if (err) {
1426 netdev_err(netdev, "%s: mlx5e_refresh_tirs_self_loopback_enable failed, %d\n",
1427 __func__, err);
1428 goto err_close_channels;
1429 }
1430
40ab6a6e
AS
1431 mlx5e_update_carrier(priv);
1432 mlx5e_redirect_rqts(priv);
40ab6a6e
AS
1433
1434 schedule_delayed_work(&priv->update_stats_work, 0);
40ab6a6e 1435
9b37b07f 1436 return 0;
343b29f3 1437
66189961
TT
1438err_close_channels:
1439 mlx5e_close_channels(priv);
343b29f3
AS
1440err_clear_state_opened_flag:
1441 clear_bit(MLX5E_STATE_OPENED, &priv->state);
1442 return err;
40ab6a6e
AS
1443}
1444
1445static int mlx5e_open(struct net_device *netdev)
1446{
1447 struct mlx5e_priv *priv = netdev_priv(netdev);
1448 int err;
1449
1450 mutex_lock(&priv->state_lock);
1451 err = mlx5e_open_locked(netdev);
1452 mutex_unlock(&priv->state_lock);
1453
1454 return err;
1455}
1456
1457int mlx5e_close_locked(struct net_device *netdev)
1458{
1459 struct mlx5e_priv *priv = netdev_priv(netdev);
1460
a1985740
AS
1461 /* May already be CLOSED in case a previous configuration operation
1462 * (e.g RX/TX queue size change) that involves close&open failed.
1463 */
1464 if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
1465 return 0;
1466
40ab6a6e
AS
1467 clear_bit(MLX5E_STATE_OPENED, &priv->state);
1468
1469 mlx5e_redirect_rqts(priv);
40ab6a6e
AS
1470 netif_carrier_off(priv->netdev);
1471 mlx5e_close_channels(priv);
1472
1473 return 0;
1474}
1475
1476static int mlx5e_close(struct net_device *netdev)
1477{
1478 struct mlx5e_priv *priv = netdev_priv(netdev);
1479 int err;
1480
1481 mutex_lock(&priv->state_lock);
1482 err = mlx5e_close_locked(netdev);
1483 mutex_unlock(&priv->state_lock);
1484
1485 return err;
1486}
1487
1488static int mlx5e_create_drop_rq(struct mlx5e_priv *priv,
1489 struct mlx5e_rq *rq,
1490 struct mlx5e_rq_param *param)
1491{
1492 struct mlx5_core_dev *mdev = priv->mdev;
1493 void *rqc = param->rqc;
1494 void *rqc_wq = MLX5_ADDR_OF(rqc, rqc, wq);
1495 int err;
1496
1497 param->wq.db_numa_node = param->wq.buf_numa_node;
1498
1499 err = mlx5_wq_ll_create(mdev, &param->wq, rqc_wq, &rq->wq,
1500 &rq->wq_ctrl);
1501 if (err)
1502 return err;
1503
1504 rq->priv = priv;
1505
1506 return 0;
1507}
1508
1509static int mlx5e_create_drop_cq(struct mlx5e_priv *priv,
1510 struct mlx5e_cq *cq,
1511 struct mlx5e_cq_param *param)
1512{
1513 struct mlx5_core_dev *mdev = priv->mdev;
1514 struct mlx5_core_cq *mcq = &cq->mcq;
1515 int eqn_not_used;
1516 int irqn;
1517 int err;
1518
1519 err = mlx5_cqwq_create(mdev, &param->wq, param->cqc, &cq->wq,
1520 &cq->wq_ctrl);
1521 if (err)
1522 return err;
1523
1524 mlx5_vector2eqn(mdev, param->eq_ix, &eqn_not_used, &irqn);
1525
1526 mcq->cqe_sz = 64;
1527 mcq->set_ci_db = cq->wq_ctrl.db.db;
1528 mcq->arm_db = cq->wq_ctrl.db.db + 1;
1529 *mcq->set_ci_db = 0;
1530 *mcq->arm_db = 0;
1531 mcq->vector = param->eq_ix;
1532 mcq->comp = mlx5e_completion_event;
1533 mcq->event = mlx5e_cq_error_event;
1534 mcq->irqn = irqn;
1535 mcq->uar = &priv->cq_uar;
1536
1537 cq->priv = priv;
1538
1539 return 0;
1540}
1541
1542static int mlx5e_open_drop_rq(struct mlx5e_priv *priv)
1543{
1544 struct mlx5e_cq_param cq_param;
1545 struct mlx5e_rq_param rq_param;
1546 struct mlx5e_rq *rq = &priv->drop_rq;
1547 struct mlx5e_cq *cq = &priv->drop_rq.cq;
1548 int err;
1549
1550 memset(&cq_param, 0, sizeof(cq_param));
1551 memset(&rq_param, 0, sizeof(rq_param));
1552 mlx5e_build_rx_cq_param(priv, &cq_param);
1553 mlx5e_build_rq_param(priv, &rq_param);
1554
1555 err = mlx5e_create_drop_cq(priv, cq, &cq_param);
1556 if (err)
1557 return err;
1558
1559 err = mlx5e_enable_cq(cq, &cq_param);
1560 if (err)
1561 goto err_destroy_cq;
1562
1563 err = mlx5e_create_drop_rq(priv, rq, &rq_param);
1564 if (err)
1565 goto err_disable_cq;
1566
1567 err = mlx5e_enable_rq(rq, &rq_param);
1568 if (err)
1569 goto err_destroy_rq;
1570
1571 return 0;
1572
1573err_destroy_rq:
1574 mlx5e_destroy_rq(&priv->drop_rq);
1575
1576err_disable_cq:
1577 mlx5e_disable_cq(&priv->drop_rq.cq);
1578
1579err_destroy_cq:
1580 mlx5e_destroy_cq(&priv->drop_rq.cq);
1581
1582 return err;
1583}
1584
1585static void mlx5e_close_drop_rq(struct mlx5e_priv *priv)
1586{
1587 mlx5e_disable_rq(&priv->drop_rq);
1588 mlx5e_destroy_rq(&priv->drop_rq);
1589 mlx5e_disable_cq(&priv->drop_rq.cq);
1590 mlx5e_destroy_cq(&priv->drop_rq.cq);
1591}
1592
1593static int mlx5e_create_tis(struct mlx5e_priv *priv, int tc)
1594{
1595 struct mlx5_core_dev *mdev = priv->mdev;
1596 u32 in[MLX5_ST_SZ_DW(create_tis_in)];
1597 void *tisc = MLX5_ADDR_OF(create_tis_in, in, ctx);
1598
1599 memset(in, 0, sizeof(in));
1600
1601 MLX5_SET(tisc, tisc, prio, tc);
1602 MLX5_SET(tisc, tisc, transport_domain, priv->tdn);
1603
1604 return mlx5_core_create_tis(mdev, in, sizeof(in), &priv->tisn[tc]);
1605}
1606
1607static void mlx5e_destroy_tis(struct mlx5e_priv *priv, int tc)
1608{
1609 mlx5_core_destroy_tis(priv->mdev, priv->tisn[tc]);
1610}
1611
1612static int mlx5e_create_tises(struct mlx5e_priv *priv)
1613{
1614 int err;
1615 int tc;
1616
1617 for (tc = 0; tc < priv->params.num_tc; tc++) {
1618 err = mlx5e_create_tis(priv, tc);
1619 if (err)
1620 goto err_close_tises;
1621 }
1622
1623 return 0;
1624
1625err_close_tises:
1626 for (tc--; tc >= 0; tc--)
1627 mlx5e_destroy_tis(priv, tc);
1628
1629 return err;
1630}
1631
1632static void mlx5e_destroy_tises(struct mlx5e_priv *priv)
1633{
1634 int tc;
1635
1636 for (tc = 0; tc < priv->params.num_tc; tc++)
1637 mlx5e_destroy_tis(priv, tc);
1638}
1639
f62b8bb8
AV
1640static void mlx5e_build_tir_ctx(struct mlx5e_priv *priv, u32 *tirc, int tt)
1641{
1642 void *hfso = MLX5_ADDR_OF(tirc, tirc, rx_hash_field_selector_outer);
1643
3191e05f
AS
1644 MLX5_SET(tirc, tirc, transport_domain, priv->tdn);
1645
5a6f8aef
AS
1646#define MLX5_HASH_IP (MLX5_HASH_FIELD_SEL_SRC_IP |\
1647 MLX5_HASH_FIELD_SEL_DST_IP)
f62b8bb8 1648
5a6f8aef
AS
1649#define MLX5_HASH_IP_L4PORTS (MLX5_HASH_FIELD_SEL_SRC_IP |\
1650 MLX5_HASH_FIELD_SEL_DST_IP |\
1651 MLX5_HASH_FIELD_SEL_L4_SPORT |\
1652 MLX5_HASH_FIELD_SEL_L4_DPORT)
f62b8bb8 1653
a741749f
AS
1654#define MLX5_HASH_IP_IPSEC_SPI (MLX5_HASH_FIELD_SEL_SRC_IP |\
1655 MLX5_HASH_FIELD_SEL_DST_IP |\
1656 MLX5_HASH_FIELD_SEL_IPSEC_SPI)
1657
5c50368f 1658 mlx5e_build_tir_ctx_lro(tirc, priv);
f62b8bb8 1659
4cbeaff5
AS
1660 MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_INDIRECT);
1661
f62b8bb8
AV
1662 switch (tt) {
1663 case MLX5E_TT_ANY:
4cbeaff5
AS
1664 MLX5_SET(tirc, tirc, indirect_table,
1665 priv->rqtn[MLX5E_SINGLE_RQ_RQT]);
1666 MLX5_SET(tirc, tirc, rx_hash_fn, MLX5_RX_HASH_FN_INVERTED_XOR8);
f62b8bb8
AV
1667 break;
1668 default:
f62b8bb8 1669 MLX5_SET(tirc, tirc, indirect_table,
4cbeaff5 1670 priv->rqtn[MLX5E_INDIRECTION_RQT]);
f62b8bb8 1671 MLX5_SET(tirc, tirc, rx_hash_fn,
2be6967c
SM
1672 mlx5e_rx_hash_fn(priv->params.rss_hfunc));
1673 if (priv->params.rss_hfunc == ETH_RSS_HASH_TOP) {
1674 void *rss_key = MLX5_ADDR_OF(tirc, tirc,
1675 rx_hash_toeplitz_key);
1676 size_t len = MLX5_FLD_SZ_BYTES(tirc,
1677 rx_hash_toeplitz_key);
1678
1679 MLX5_SET(tirc, tirc, rx_hash_symmetric, 1);
57afead5 1680 memcpy(rss_key, priv->params.toeplitz_hash_key, len);
2be6967c 1681 }
f62b8bb8
AV
1682 break;
1683 }
1684
1685 switch (tt) {
1686 case MLX5E_TT_IPV4_TCP:
1687 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
1688 MLX5_L3_PROT_TYPE_IPV4);
1689 MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
1690 MLX5_L4_PROT_TYPE_TCP);
1691 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
5a6f8aef 1692 MLX5_HASH_IP_L4PORTS);
f62b8bb8
AV
1693 break;
1694
1695 case MLX5E_TT_IPV6_TCP:
1696 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
1697 MLX5_L3_PROT_TYPE_IPV6);
1698 MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
1699 MLX5_L4_PROT_TYPE_TCP);
1700 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
5a6f8aef 1701 MLX5_HASH_IP_L4PORTS);
f62b8bb8
AV
1702 break;
1703
1704 case MLX5E_TT_IPV4_UDP:
1705 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
1706 MLX5_L3_PROT_TYPE_IPV4);
1707 MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
1708 MLX5_L4_PROT_TYPE_UDP);
1709 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
5a6f8aef 1710 MLX5_HASH_IP_L4PORTS);
f62b8bb8
AV
1711 break;
1712
1713 case MLX5E_TT_IPV6_UDP:
1714 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
1715 MLX5_L3_PROT_TYPE_IPV6);
1716 MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
1717 MLX5_L4_PROT_TYPE_UDP);
1718 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
5a6f8aef 1719 MLX5_HASH_IP_L4PORTS);
f62b8bb8
AV
1720 break;
1721
a741749f
AS
1722 case MLX5E_TT_IPV4_IPSEC_AH:
1723 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
1724 MLX5_L3_PROT_TYPE_IPV4);
1725 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
1726 MLX5_HASH_IP_IPSEC_SPI);
1727 break;
1728
1729 case MLX5E_TT_IPV6_IPSEC_AH:
1730 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
1731 MLX5_L3_PROT_TYPE_IPV6);
1732 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
1733 MLX5_HASH_IP_IPSEC_SPI);
1734 break;
1735
1736 case MLX5E_TT_IPV4_IPSEC_ESP:
1737 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
1738 MLX5_L3_PROT_TYPE_IPV4);
1739 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
1740 MLX5_HASH_IP_IPSEC_SPI);
1741 break;
1742
1743 case MLX5E_TT_IPV6_IPSEC_ESP:
1744 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
1745 MLX5_L3_PROT_TYPE_IPV6);
1746 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
1747 MLX5_HASH_IP_IPSEC_SPI);
1748 break;
1749
f62b8bb8
AV
1750 case MLX5E_TT_IPV4:
1751 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
1752 MLX5_L3_PROT_TYPE_IPV4);
1753 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
1754 MLX5_HASH_IP);
1755 break;
1756
1757 case MLX5E_TT_IPV6:
1758 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
1759 MLX5_L3_PROT_TYPE_IPV6);
1760 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
1761 MLX5_HASH_IP);
1762 break;
1763 }
1764}
1765
40ab6a6e 1766static int mlx5e_create_tir(struct mlx5e_priv *priv, int tt)
f62b8bb8
AV
1767{
1768 struct mlx5_core_dev *mdev = priv->mdev;
1769 u32 *in;
1770 void *tirc;
1771 int inlen;
1772 int err;
1773
1774 inlen = MLX5_ST_SZ_BYTES(create_tir_in);
1775 in = mlx5_vzalloc(inlen);
1776 if (!in)
1777 return -ENOMEM;
1778
1779 tirc = MLX5_ADDR_OF(create_tir_in, in, ctx);
1780
1781 mlx5e_build_tir_ctx(priv, tirc, tt);
1782
7db22ffb 1783 err = mlx5_core_create_tir(mdev, in, inlen, &priv->tirn[tt]);
f62b8bb8
AV
1784
1785 kvfree(in);
1786
1787 return err;
1788}
1789
40ab6a6e 1790static void mlx5e_destroy_tir(struct mlx5e_priv *priv, int tt)
f62b8bb8 1791{
7db22ffb 1792 mlx5_core_destroy_tir(priv->mdev, priv->tirn[tt]);
f62b8bb8
AV
1793}
1794
40ab6a6e 1795static int mlx5e_create_tirs(struct mlx5e_priv *priv)
f62b8bb8
AV
1796{
1797 int err;
1798 int i;
1799
1800 for (i = 0; i < MLX5E_NUM_TT; i++) {
40ab6a6e 1801 err = mlx5e_create_tir(priv, i);
f62b8bb8 1802 if (err)
40ab6a6e 1803 goto err_destroy_tirs;
f62b8bb8
AV
1804 }
1805
1806 return 0;
1807
40ab6a6e 1808err_destroy_tirs:
f62b8bb8 1809 for (i--; i >= 0; i--)
40ab6a6e 1810 mlx5e_destroy_tir(priv, i);
f62b8bb8
AV
1811
1812 return err;
1813}
1814
40ab6a6e 1815static void mlx5e_destroy_tirs(struct mlx5e_priv *priv)
f62b8bb8
AV
1816{
1817 int i;
1818
1819 for (i = 0; i < MLX5E_NUM_TT; i++)
40ab6a6e 1820 mlx5e_destroy_tir(priv, i);
f62b8bb8
AV
1821}
1822
f62b8bb8
AV
1823static struct rtnl_link_stats64 *
1824mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats)
1825{
1826 struct mlx5e_priv *priv = netdev_priv(dev);
1827 struct mlx5e_vport_stats *vstats = &priv->stats.vport;
1828
1829 stats->rx_packets = vstats->rx_packets;
1830 stats->rx_bytes = vstats->rx_bytes;
1831 stats->tx_packets = vstats->tx_packets;
1832 stats->tx_bytes = vstats->tx_bytes;
1833 stats->multicast = vstats->rx_multicast_packets +
1834 vstats->tx_multicast_packets;
1835 stats->tx_errors = vstats->tx_error_packets;
1836 stats->rx_errors = vstats->rx_error_packets;
1837 stats->tx_dropped = vstats->tx_queue_dropped;
1838 stats->rx_crc_errors = 0;
1839 stats->rx_length_errors = 0;
1840
1841 return stats;
1842}
1843
1844static void mlx5e_set_rx_mode(struct net_device *dev)
1845{
1846 struct mlx5e_priv *priv = netdev_priv(dev);
1847
1848 schedule_work(&priv->set_rx_mode_work);
1849}
1850
1851static int mlx5e_set_mac(struct net_device *netdev, void *addr)
1852{
1853 struct mlx5e_priv *priv = netdev_priv(netdev);
1854 struct sockaddr *saddr = addr;
1855
1856 if (!is_valid_ether_addr(saddr->sa_data))
1857 return -EADDRNOTAVAIL;
1858
1859 netif_addr_lock_bh(netdev);
1860 ether_addr_copy(netdev->dev_addr, saddr->sa_data);
1861 netif_addr_unlock_bh(netdev);
1862
1863 schedule_work(&priv->set_rx_mode_work);
1864
1865 return 0;
1866}
1867
1868static int mlx5e_set_features(struct net_device *netdev,
1869 netdev_features_t features)
1870{
1871 struct mlx5e_priv *priv = netdev_priv(netdev);
98e81b0a 1872 int err = 0;
f62b8bb8 1873 netdev_features_t changes = features ^ netdev->features;
f62b8bb8
AV
1874
1875 mutex_lock(&priv->state_lock);
f62b8bb8
AV
1876
1877 if (changes & NETIF_F_LRO) {
98e81b0a
AS
1878 bool was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state);
1879
1880 if (was_opened)
1881 mlx5e_close_locked(priv->netdev);
f62b8bb8 1882
98e81b0a 1883 priv->params.lro_en = !!(features & NETIF_F_LRO);
5c50368f
AS
1884 mlx5e_modify_tir_lro(priv, MLX5E_TT_IPV4_TCP);
1885 mlx5e_modify_tir_lro(priv, MLX5E_TT_IPV6_TCP);
98e81b0a
AS
1886
1887 if (was_opened)
1888 err = mlx5e_open_locked(priv->netdev);
1889 }
f62b8bb8 1890
9b37b07f
AS
1891 mutex_unlock(&priv->state_lock);
1892
f62b8bb8
AV
1893 if (changes & NETIF_F_HW_VLAN_CTAG_FILTER) {
1894 if (features & NETIF_F_HW_VLAN_CTAG_FILTER)
1895 mlx5e_enable_vlan_filter(priv);
1896 else
1897 mlx5e_disable_vlan_filter(priv);
1898 }
1899
fe9f4fe5 1900 return err;
f62b8bb8
AV
1901}
1902
1903static int mlx5e_change_mtu(struct net_device *netdev, int new_mtu)
1904{
1905 struct mlx5e_priv *priv = netdev_priv(netdev);
1906 struct mlx5_core_dev *mdev = priv->mdev;
98e81b0a 1907 bool was_opened;
f62b8bb8 1908 int max_mtu;
98e81b0a 1909 int err = 0;
f62b8bb8 1910
facc9699 1911 mlx5_query_port_max_mtu(mdev, &max_mtu, 1);
f62b8bb8 1912
50a9eea6
DT
1913 max_mtu = MLX5E_HW2SW_MTU(max_mtu);
1914
facc9699
SM
1915 if (new_mtu > max_mtu) {
1916 netdev_err(netdev,
1917 "%s: Bad MTU (%d) > (%d) Max\n",
1918 __func__, new_mtu, max_mtu);
f62b8bb8
AV
1919 return -EINVAL;
1920 }
1921
1922 mutex_lock(&priv->state_lock);
98e81b0a
AS
1923
1924 was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state);
1925 if (was_opened)
1926 mlx5e_close_locked(netdev);
1927
f62b8bb8 1928 netdev->mtu = new_mtu;
98e81b0a
AS
1929
1930 if (was_opened)
1931 err = mlx5e_open_locked(netdev);
1932
f62b8bb8
AV
1933 mutex_unlock(&priv->state_lock);
1934
1935 return err;
1936}
1937
66e49ded
SM
1938static int mlx5e_set_vf_mac(struct net_device *dev, int vf, u8 *mac)
1939{
1940 struct mlx5e_priv *priv = netdev_priv(dev);
1941 struct mlx5_core_dev *mdev = priv->mdev;
1942
1943 return mlx5_eswitch_set_vport_mac(mdev->priv.eswitch, vf + 1, mac);
1944}
1945
1946static int mlx5e_set_vf_vlan(struct net_device *dev, int vf, u16 vlan, u8 qos)
1947{
1948 struct mlx5e_priv *priv = netdev_priv(dev);
1949 struct mlx5_core_dev *mdev = priv->mdev;
1950
1951 return mlx5_eswitch_set_vport_vlan(mdev->priv.eswitch, vf + 1,
1952 vlan, qos);
1953}
1954
1955static int mlx5_vport_link2ifla(u8 esw_link)
1956{
1957 switch (esw_link) {
1958 case MLX5_ESW_VPORT_ADMIN_STATE_DOWN:
1959 return IFLA_VF_LINK_STATE_DISABLE;
1960 case MLX5_ESW_VPORT_ADMIN_STATE_UP:
1961 return IFLA_VF_LINK_STATE_ENABLE;
1962 }
1963 return IFLA_VF_LINK_STATE_AUTO;
1964}
1965
1966static int mlx5_ifla_link2vport(u8 ifla_link)
1967{
1968 switch (ifla_link) {
1969 case IFLA_VF_LINK_STATE_DISABLE:
1970 return MLX5_ESW_VPORT_ADMIN_STATE_DOWN;
1971 case IFLA_VF_LINK_STATE_ENABLE:
1972 return MLX5_ESW_VPORT_ADMIN_STATE_UP;
1973 }
1974 return MLX5_ESW_VPORT_ADMIN_STATE_AUTO;
1975}
1976
1977static int mlx5e_set_vf_link_state(struct net_device *dev, int vf,
1978 int link_state)
1979{
1980 struct mlx5e_priv *priv = netdev_priv(dev);
1981 struct mlx5_core_dev *mdev = priv->mdev;
1982
1983 return mlx5_eswitch_set_vport_state(mdev->priv.eswitch, vf + 1,
1984 mlx5_ifla_link2vport(link_state));
1985}
1986
1987static int mlx5e_get_vf_config(struct net_device *dev,
1988 int vf, struct ifla_vf_info *ivi)
1989{
1990 struct mlx5e_priv *priv = netdev_priv(dev);
1991 struct mlx5_core_dev *mdev = priv->mdev;
1992 int err;
1993
1994 err = mlx5_eswitch_get_vport_config(mdev->priv.eswitch, vf + 1, ivi);
1995 if (err)
1996 return err;
1997 ivi->linkstate = mlx5_vport_link2ifla(ivi->linkstate);
1998 return 0;
1999}
2000
2001static int mlx5e_get_vf_stats(struct net_device *dev,
2002 int vf, struct ifla_vf_stats *vf_stats)
2003{
2004 struct mlx5e_priv *priv = netdev_priv(dev);
2005 struct mlx5_core_dev *mdev = priv->mdev;
2006
2007 return mlx5_eswitch_get_vport_stats(mdev->priv.eswitch, vf + 1,
2008 vf_stats);
2009}
2010
f62b8bb8
AV
2011static struct net_device_ops mlx5e_netdev_ops = {
2012 .ndo_open = mlx5e_open,
2013 .ndo_stop = mlx5e_close,
2014 .ndo_start_xmit = mlx5e_xmit,
2015 .ndo_get_stats64 = mlx5e_get_stats,
2016 .ndo_set_rx_mode = mlx5e_set_rx_mode,
2017 .ndo_set_mac_address = mlx5e_set_mac,
2018 .ndo_vlan_rx_add_vid = mlx5e_vlan_rx_add_vid,
2019 .ndo_vlan_rx_kill_vid = mlx5e_vlan_rx_kill_vid,
2020 .ndo_set_features = mlx5e_set_features,
66e49ded 2021 .ndo_change_mtu = mlx5e_change_mtu
f62b8bb8
AV
2022};
2023
2024static int mlx5e_check_required_hca_cap(struct mlx5_core_dev *mdev)
2025{
2026 if (MLX5_CAP_GEN(mdev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
2027 return -ENOTSUPP;
2028 if (!MLX5_CAP_GEN(mdev, eth_net_offloads) ||
2029 !MLX5_CAP_GEN(mdev, nic_flow_table) ||
2030 !MLX5_CAP_ETH(mdev, csum_cap) ||
2031 !MLX5_CAP_ETH(mdev, max_lso_cap) ||
2032 !MLX5_CAP_ETH(mdev, vlan_cap) ||
796a27ec
GP
2033 !MLX5_CAP_ETH(mdev, rss_ind_tbl_cap) ||
2034 MLX5_CAP_FLOWTABLE(mdev,
2035 flow_table_properties_nic_receive.max_ft_level)
2036 < 3) {
f62b8bb8
AV
2037 mlx5_core_warn(mdev,
2038 "Not creating net device, some required device capabilities are missing\n");
2039 return -ENOTSUPP;
2040 }
66189961
TT
2041 if (!MLX5_CAP_ETH(mdev, self_lb_en_modifiable))
2042 mlx5_core_warn(mdev, "Self loop back prevention is not supported\n");
2043
f62b8bb8
AV
2044 return 0;
2045}
2046
58d52291
AS
2047u16 mlx5e_get_max_inline_cap(struct mlx5_core_dev *mdev)
2048{
2049 int bf_buf_size = (1 << MLX5_CAP_GEN(mdev, log_bf_reg_size)) / 2;
2050
2051 return bf_buf_size -
2052 sizeof(struct mlx5e_tx_wqe) +
2053 2 /*sizeof(mlx5e_tx_wqe.inline_hdr_start)*/;
2054}
2055
f62b8bb8
AV
2056static void mlx5e_build_netdev_priv(struct mlx5_core_dev *mdev,
2057 struct net_device *netdev,
936896e9 2058 int num_channels)
f62b8bb8
AV
2059{
2060 struct mlx5e_priv *priv = netdev_priv(netdev);
2d75b2bc 2061 int i;
f62b8bb8
AV
2062
2063 priv->params.log_sq_size =
2064 MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE;
2065 priv->params.log_rq_size =
2066 MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE;
2067 priv->params.rx_cq_moderation_usec =
2068 MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC;
2069 priv->params.rx_cq_moderation_pkts =
2070 MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_PKTS;
2071 priv->params.tx_cq_moderation_usec =
2072 MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC;
2073 priv->params.tx_cq_moderation_pkts =
2074 MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_PKTS;
58d52291 2075 priv->params.tx_max_inline = mlx5e_get_max_inline_cap(mdev);
f62b8bb8
AV
2076 priv->params.min_rx_wqes =
2077 MLX5E_PARAMS_DEFAULT_MIN_RX_WQES;
f62b8bb8
AV
2078 priv->params.num_tc = 1;
2079 priv->params.default_vlan_prio = 0;
2be6967c 2080 priv->params.rss_hfunc = ETH_RSS_HASH_XOR;
f62b8bb8 2081
57afead5
AS
2082 netdev_rss_key_fill(priv->params.toeplitz_hash_key,
2083 sizeof(priv->params.toeplitz_hash_key));
2084
2d75b2bc
AS
2085 for (i = 0; i < MLX5E_INDIR_RQT_SIZE; i++)
2086 priv->params.indirection_rqt[i] = i % num_channels;
2087
f62b8bb8
AV
2088 priv->params.lro_wqe_sz =
2089 MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ;
2090
2091 priv->mdev = mdev;
2092 priv->netdev = netdev;
936896e9 2093 priv->params.num_channels = num_channels;
f62b8bb8
AV
2094 priv->default_vlan_prio = priv->params.default_vlan_prio;
2095
2096 spin_lock_init(&priv->async_events_spinlock);
2097 mutex_init(&priv->state_lock);
2098
2099 INIT_WORK(&priv->update_carrier_work, mlx5e_update_carrier_work);
2100 INIT_WORK(&priv->set_rx_mode_work, mlx5e_set_rx_mode_work);
2101 INIT_DELAYED_WORK(&priv->update_stats_work, mlx5e_update_stats_work);
2102}
2103
2104static void mlx5e_set_netdev_dev_addr(struct net_device *netdev)
2105{
2106 struct mlx5e_priv *priv = netdev_priv(netdev);
2107
e1d7d349 2108 mlx5_query_nic_vport_mac_address(priv->mdev, 0, netdev->dev_addr);
108805fc
SM
2109 if (is_zero_ether_addr(netdev->dev_addr) &&
2110 !MLX5_CAP_GEN(priv->mdev, vport_group_manager)) {
2111 eth_hw_addr_random(netdev);
2112 mlx5_core_info(priv->mdev, "Assigned random MAC address %pM\n", netdev->dev_addr);
2113 }
f62b8bb8
AV
2114}
2115
2116static void mlx5e_build_netdev(struct net_device *netdev)
2117{
2118 struct mlx5e_priv *priv = netdev_priv(netdev);
2119 struct mlx5_core_dev *mdev = priv->mdev;
2120
2121 SET_NETDEV_DEV(netdev, &mdev->pdev->dev);
2122
a4418a6c 2123 if (priv->params.num_tc > 1)
f62b8bb8 2124 mlx5e_netdev_ops.ndo_select_queue = mlx5e_select_queue;
f62b8bb8 2125
66e49ded
SM
2126 if (MLX5_CAP_GEN(mdev, vport_group_manager)) {
2127 mlx5e_netdev_ops.ndo_set_vf_mac = mlx5e_set_vf_mac;
2128 mlx5e_netdev_ops.ndo_set_vf_vlan = mlx5e_set_vf_vlan;
2129 mlx5e_netdev_ops.ndo_get_vf_config = mlx5e_get_vf_config;
2130 mlx5e_netdev_ops.ndo_set_vf_link_state = mlx5e_set_vf_link_state;
2131 mlx5e_netdev_ops.ndo_get_vf_stats = mlx5e_get_vf_stats;
2132 }
2133
f62b8bb8
AV
2134 netdev->netdev_ops = &mlx5e_netdev_ops;
2135 netdev->watchdog_timeo = 15 * HZ;
2136
2137 netdev->ethtool_ops = &mlx5e_ethtool_ops;
2138
12be4b21 2139 netdev->vlan_features |= NETIF_F_SG;
f62b8bb8
AV
2140 netdev->vlan_features |= NETIF_F_IP_CSUM;
2141 netdev->vlan_features |= NETIF_F_IPV6_CSUM;
2142 netdev->vlan_features |= NETIF_F_GRO;
2143 netdev->vlan_features |= NETIF_F_TSO;
2144 netdev->vlan_features |= NETIF_F_TSO6;
2145 netdev->vlan_features |= NETIF_F_RXCSUM;
2146 netdev->vlan_features |= NETIF_F_RXHASH;
2147
2148 if (!!MLX5_CAP_ETH(mdev, lro_cap))
2149 netdev->vlan_features |= NETIF_F_LRO;
2150
2151 netdev->hw_features = netdev->vlan_features;
e4cf27bd 2152 netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX;
f62b8bb8
AV
2153 netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX;
2154 netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
2155
2156 netdev->features = netdev->hw_features;
2157 if (!priv->params.lro_en)
2158 netdev->features &= ~NETIF_F_LRO;
2159
2160 netdev->features |= NETIF_F_HIGHDMA;
2161
2162 netdev->priv_flags |= IFF_UNICAST_FLT;
2163
2164 mlx5e_set_netdev_dev_addr(netdev);
2165}
2166
2167static int mlx5e_create_mkey(struct mlx5e_priv *priv, u32 pdn,
2168 struct mlx5_core_mr *mr)
2169{
2170 struct mlx5_core_dev *mdev = priv->mdev;
2171 struct mlx5_create_mkey_mbox_in *in;
2172 int err;
2173
2174 in = mlx5_vzalloc(sizeof(*in));
2175 if (!in)
2176 return -ENOMEM;
2177
2178 in->seg.flags = MLX5_PERM_LOCAL_WRITE |
2179 MLX5_PERM_LOCAL_READ |
2180 MLX5_ACCESS_MODE_PA;
2181 in->seg.flags_pd = cpu_to_be32(pdn | MLX5_MKEY_LEN64);
2182 in->seg.qpn_mkey7_0 = cpu_to_be32(0xffffff << 8);
2183
2184 err = mlx5_core_create_mkey(mdev, mr, in, sizeof(*in), NULL, NULL,
2185 NULL);
2186
2187 kvfree(in);
2188
2189 return err;
2190}
2191
2192static void *mlx5e_create_netdev(struct mlx5_core_dev *mdev)
2193{
2194 struct net_device *netdev;
2195 struct mlx5e_priv *priv;
3435ab59 2196 int nch = mlx5e_get_max_num_channels(mdev);
f62b8bb8
AV
2197 int err;
2198
2199 if (mlx5e_check_required_hca_cap(mdev))
2200 return NULL;
2201
936896e9 2202 netdev = alloc_etherdev_mqs(sizeof(struct mlx5e_priv), nch, nch);
f62b8bb8
AV
2203 if (!netdev) {
2204 mlx5_core_err(mdev, "alloc_etherdev_mqs() failed\n");
2205 return NULL;
2206 }
2207
936896e9 2208 mlx5e_build_netdev_priv(mdev, netdev, nch);
f62b8bb8
AV
2209 mlx5e_build_netdev(netdev);
2210
2211 netif_carrier_off(netdev);
2212
2213 priv = netdev_priv(netdev);
2214
2215 err = mlx5_alloc_map_uar(mdev, &priv->cq_uar);
2216 if (err) {
1f2a3003 2217 mlx5_core_err(mdev, "alloc_map uar failed, %d\n", err);
f62b8bb8
AV
2218 goto err_free_netdev;
2219 }
2220
2221 err = mlx5_core_alloc_pd(mdev, &priv->pdn);
2222 if (err) {
1f2a3003 2223 mlx5_core_err(mdev, "alloc pd failed, %d\n", err);
f62b8bb8
AV
2224 goto err_unmap_free_uar;
2225 }
2226
3191e05f
AS
2227 err = mlx5_alloc_transport_domain(mdev, &priv->tdn);
2228 if (err) {
1f2a3003 2229 mlx5_core_err(mdev, "alloc td failed, %d\n", err);
3191e05f
AS
2230 goto err_dealloc_pd;
2231 }
2232
f62b8bb8
AV
2233 err = mlx5e_create_mkey(priv, priv->pdn, &priv->mr);
2234 if (err) {
1f2a3003 2235 mlx5_core_err(mdev, "create mkey failed, %d\n", err);
3191e05f 2236 goto err_dealloc_transport_domain;
f62b8bb8
AV
2237 }
2238
40ab6a6e 2239 err = mlx5e_create_tises(priv);
5c50368f 2240 if (err) {
40ab6a6e 2241 mlx5_core_warn(mdev, "create tises failed, %d\n", err);
5c50368f
AS
2242 goto err_destroy_mkey;
2243 }
2244
2245 err = mlx5e_open_drop_rq(priv);
2246 if (err) {
2247 mlx5_core_err(mdev, "open drop rq failed, %d\n", err);
40ab6a6e 2248 goto err_destroy_tises;
5c50368f
AS
2249 }
2250
40ab6a6e 2251 err = mlx5e_create_rqt(priv, MLX5E_INDIRECTION_RQT);
5c50368f 2252 if (err) {
40ab6a6e 2253 mlx5_core_warn(mdev, "create rqt(INDIR) failed, %d\n", err);
5c50368f
AS
2254 goto err_close_drop_rq;
2255 }
2256
40ab6a6e 2257 err = mlx5e_create_rqt(priv, MLX5E_SINGLE_RQ_RQT);
5c50368f 2258 if (err) {
40ab6a6e
AS
2259 mlx5_core_warn(mdev, "create rqt(SINGLE) failed, %d\n", err);
2260 goto err_destroy_rqt_indir;
5c50368f
AS
2261 }
2262
40ab6a6e 2263 err = mlx5e_create_tirs(priv);
5c50368f 2264 if (err) {
40ab6a6e
AS
2265 mlx5_core_warn(mdev, "create tirs failed, %d\n", err);
2266 goto err_destroy_rqt_single;
5c50368f
AS
2267 }
2268
40ab6a6e 2269 err = mlx5e_create_flow_tables(priv);
5c50368f 2270 if (err) {
40ab6a6e
AS
2271 mlx5_core_warn(mdev, "create flow tables failed, %d\n", err);
2272 goto err_destroy_tirs;
5c50368f
AS
2273 }
2274
2275 mlx5e_init_eth_addr(priv);
2276
f62b8bb8
AV
2277 err = register_netdev(netdev);
2278 if (err) {
1f2a3003 2279 mlx5_core_err(mdev, "register_netdev failed, %d\n", err);
40ab6a6e 2280 goto err_destroy_flow_tables;
f62b8bb8
AV
2281 }
2282
2283 mlx5e_enable_async_events(priv);
9b37b07f 2284 schedule_work(&priv->set_rx_mode_work);
f62b8bb8
AV
2285
2286 return priv;
2287
40ab6a6e
AS
2288err_destroy_flow_tables:
2289 mlx5e_destroy_flow_tables(priv);
5c50368f 2290
40ab6a6e
AS
2291err_destroy_tirs:
2292 mlx5e_destroy_tirs(priv);
5c50368f 2293
40ab6a6e
AS
2294err_destroy_rqt_single:
2295 mlx5e_destroy_rqt(priv, MLX5E_SINGLE_RQ_RQT);
5c50368f 2296
40ab6a6e
AS
2297err_destroy_rqt_indir:
2298 mlx5e_destroy_rqt(priv, MLX5E_INDIRECTION_RQT);
5c50368f
AS
2299
2300err_close_drop_rq:
2301 mlx5e_close_drop_rq(priv);
2302
40ab6a6e
AS
2303err_destroy_tises:
2304 mlx5e_destroy_tises(priv);
5c50368f 2305
f62b8bb8
AV
2306err_destroy_mkey:
2307 mlx5_core_destroy_mkey(mdev, &priv->mr);
2308
3191e05f
AS
2309err_dealloc_transport_domain:
2310 mlx5_dealloc_transport_domain(mdev, priv->tdn);
2311
f62b8bb8
AV
2312err_dealloc_pd:
2313 mlx5_core_dealloc_pd(mdev, priv->pdn);
2314
2315err_unmap_free_uar:
2316 mlx5_unmap_free_uar(mdev, &priv->cq_uar);
2317
2318err_free_netdev:
2319 free_netdev(netdev);
2320
2321 return NULL;
2322}
2323
2324static void mlx5e_destroy_netdev(struct mlx5_core_dev *mdev, void *vpriv)
2325{
2326 struct mlx5e_priv *priv = vpriv;
2327 struct net_device *netdev = priv->netdev;
2328
9b37b07f
AS
2329 set_bit(MLX5E_STATE_DESTROYING, &priv->state);
2330
2331 schedule_work(&priv->set_rx_mode_work);
1cefa326
AS
2332 mlx5e_disable_async_events(priv);
2333 flush_scheduled_work();
f62b8bb8 2334 unregister_netdev(netdev);
40ab6a6e
AS
2335 mlx5e_destroy_flow_tables(priv);
2336 mlx5e_destroy_tirs(priv);
2337 mlx5e_destroy_rqt(priv, MLX5E_SINGLE_RQ_RQT);
2338 mlx5e_destroy_rqt(priv, MLX5E_INDIRECTION_RQT);
5c50368f 2339 mlx5e_close_drop_rq(priv);
40ab6a6e 2340 mlx5e_destroy_tises(priv);
f62b8bb8 2341 mlx5_core_destroy_mkey(priv->mdev, &priv->mr);
3191e05f 2342 mlx5_dealloc_transport_domain(priv->mdev, priv->tdn);
f62b8bb8
AV
2343 mlx5_core_dealloc_pd(priv->mdev, priv->pdn);
2344 mlx5_unmap_free_uar(priv->mdev, &priv->cq_uar);
f62b8bb8
AV
2345 free_netdev(netdev);
2346}
2347
2348static void *mlx5e_get_netdev(void *vpriv)
2349{
2350 struct mlx5e_priv *priv = vpriv;
2351
2352 return priv->netdev;
2353}
2354
2355static struct mlx5_interface mlx5e_interface = {
2356 .add = mlx5e_create_netdev,
2357 .remove = mlx5e_destroy_netdev,
2358 .event = mlx5e_async_event,
2359 .protocol = MLX5_INTERFACE_PROTOCOL_ETH,
2360 .get_dev = mlx5e_get_netdev,
2361};
2362
2363void mlx5e_init(void)
2364{
2365 mlx5_register_interface(&mlx5e_interface);
2366}
2367
2368void mlx5e_cleanup(void)
2369{
2370 mlx5_unregister_interface(&mlx5e_interface);
2371}
This page took 0.178127 seconds and 5 git commands to generate.