2 * Copyright (c) 2015, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/mlx5/flow_table.h>
36 struct mlx5e_rq_param
{
37 u32 rqc
[MLX5_ST_SZ_DW(rqc
)];
38 struct mlx5_wq_param wq
;
41 struct mlx5e_sq_param
{
42 u32 sqc
[MLX5_ST_SZ_DW(sqc
)];
43 struct mlx5_wq_param wq
;
47 struct mlx5e_cq_param
{
48 u32 cqc
[MLX5_ST_SZ_DW(cqc
)];
49 struct mlx5_wq_param wq
;
53 struct mlx5e_channel_param
{
54 struct mlx5e_rq_param rq
;
55 struct mlx5e_sq_param sq
;
56 struct mlx5e_cq_param rx_cq
;
57 struct mlx5e_cq_param tx_cq
;
60 static void mlx5e_update_carrier(struct mlx5e_priv
*priv
)
62 struct mlx5_core_dev
*mdev
= priv
->mdev
;
65 port_state
= mlx5_query_vport_state(mdev
,
66 MLX5_QUERY_VPORT_STATE_IN_OP_MOD_VNIC_VPORT
);
68 if (port_state
== VPORT_STATE_UP
)
69 netif_carrier_on(priv
->netdev
);
71 netif_carrier_off(priv
->netdev
);
74 static void mlx5e_update_carrier_work(struct work_struct
*work
)
76 struct mlx5e_priv
*priv
= container_of(work
, struct mlx5e_priv
,
79 mutex_lock(&priv
->state_lock
);
80 if (test_bit(MLX5E_STATE_OPENED
, &priv
->state
))
81 mlx5e_update_carrier(priv
);
82 mutex_unlock(&priv
->state_lock
);
85 void mlx5e_update_stats(struct mlx5e_priv
*priv
)
87 struct mlx5_core_dev
*mdev
= priv
->mdev
;
88 struct mlx5e_vport_stats
*s
= &priv
->stats
.vport
;
89 struct mlx5e_rq_stats
*rq_stats
;
90 struct mlx5e_sq_stats
*sq_stats
;
91 u32 in
[MLX5_ST_SZ_DW(query_vport_counter_in
)];
93 int outlen
= MLX5_ST_SZ_BYTES(query_vport_counter_out
);
97 out
= mlx5_vzalloc(outlen
);
101 /* Collect firts the SW counters and then HW for consistency */
104 s
->tx_queue_stopped
= 0;
105 s
->tx_queue_wake
= 0;
106 s
->tx_queue_dropped
= 0;
112 for (i
= 0; i
< priv
->params
.num_channels
; i
++) {
113 rq_stats
= &priv
->channel
[i
]->rq
.stats
;
115 s
->lro_packets
+= rq_stats
->lro_packets
;
116 s
->lro_bytes
+= rq_stats
->lro_bytes
;
117 s
->rx_csum_none
+= rq_stats
->csum_none
;
118 s
->rx_wqe_err
+= rq_stats
->wqe_err
;
120 for (j
= 0; j
< priv
->params
.num_tc
; j
++) {
121 sq_stats
= &priv
->channel
[i
]->sq
[j
].stats
;
123 s
->tso_packets
+= sq_stats
->tso_packets
;
124 s
->tso_bytes
+= sq_stats
->tso_bytes
;
125 s
->tx_queue_stopped
+= sq_stats
->stopped
;
126 s
->tx_queue_wake
+= sq_stats
->wake
;
127 s
->tx_queue_dropped
+= sq_stats
->dropped
;
128 tx_offload_none
+= sq_stats
->csum_offload_none
;
133 memset(in
, 0, sizeof(in
));
135 MLX5_SET(query_vport_counter_in
, in
, opcode
,
136 MLX5_CMD_OP_QUERY_VPORT_COUNTER
);
137 MLX5_SET(query_vport_counter_in
, in
, op_mod
, 0);
138 MLX5_SET(query_vport_counter_in
, in
, other_vport
, 0);
140 memset(out
, 0, outlen
);
142 if (mlx5_cmd_exec(mdev
, in
, sizeof(in
), out
, outlen
))
145 #define MLX5_GET_CTR(p, x) \
146 MLX5_GET64(query_vport_counter_out, p, x)
148 s
->rx_error_packets
=
149 MLX5_GET_CTR(out
, received_errors
.packets
);
151 MLX5_GET_CTR(out
, received_errors
.octets
);
152 s
->tx_error_packets
=
153 MLX5_GET_CTR(out
, transmit_errors
.packets
);
155 MLX5_GET_CTR(out
, transmit_errors
.octets
);
157 s
->rx_unicast_packets
=
158 MLX5_GET_CTR(out
, received_eth_unicast
.packets
);
159 s
->rx_unicast_bytes
=
160 MLX5_GET_CTR(out
, received_eth_unicast
.octets
);
161 s
->tx_unicast_packets
=
162 MLX5_GET_CTR(out
, transmitted_eth_unicast
.packets
);
163 s
->tx_unicast_bytes
=
164 MLX5_GET_CTR(out
, transmitted_eth_unicast
.octets
);
166 s
->rx_multicast_packets
=
167 MLX5_GET_CTR(out
, received_eth_multicast
.packets
);
168 s
->rx_multicast_bytes
=
169 MLX5_GET_CTR(out
, received_eth_multicast
.octets
);
170 s
->tx_multicast_packets
=
171 MLX5_GET_CTR(out
, transmitted_eth_multicast
.packets
);
172 s
->tx_multicast_bytes
=
173 MLX5_GET_CTR(out
, transmitted_eth_multicast
.octets
);
175 s
->rx_broadcast_packets
=
176 MLX5_GET_CTR(out
, received_eth_broadcast
.packets
);
177 s
->rx_broadcast_bytes
=
178 MLX5_GET_CTR(out
, received_eth_broadcast
.octets
);
179 s
->tx_broadcast_packets
=
180 MLX5_GET_CTR(out
, transmitted_eth_broadcast
.packets
);
181 s
->tx_broadcast_bytes
=
182 MLX5_GET_CTR(out
, transmitted_eth_broadcast
.octets
);
185 s
->rx_unicast_packets
+
186 s
->rx_multicast_packets
+
187 s
->rx_broadcast_packets
;
189 s
->rx_unicast_bytes
+
190 s
->rx_multicast_bytes
+
191 s
->rx_broadcast_bytes
;
193 s
->tx_unicast_packets
+
194 s
->tx_multicast_packets
+
195 s
->tx_broadcast_packets
;
197 s
->tx_unicast_bytes
+
198 s
->tx_multicast_bytes
+
199 s
->tx_broadcast_bytes
;
201 /* Update calculated offload counters */
202 s
->tx_csum_offload
= s
->tx_packets
- tx_offload_none
;
203 s
->rx_csum_good
= s
->rx_packets
- s
->rx_csum_none
;
209 static void mlx5e_update_stats_work(struct work_struct
*work
)
211 struct delayed_work
*dwork
= to_delayed_work(work
);
212 struct mlx5e_priv
*priv
= container_of(dwork
, struct mlx5e_priv
,
214 mutex_lock(&priv
->state_lock
);
215 if (test_bit(MLX5E_STATE_OPENED
, &priv
->state
)) {
216 mlx5e_update_stats(priv
);
217 schedule_delayed_work(dwork
,
219 MLX5E_UPDATE_STATS_INTERVAL
));
221 mutex_unlock(&priv
->state_lock
);
224 static void __mlx5e_async_event(struct mlx5e_priv
*priv
,
225 enum mlx5_dev_event event
)
228 case MLX5_DEV_EVENT_PORT_UP
:
229 case MLX5_DEV_EVENT_PORT_DOWN
:
230 schedule_work(&priv
->update_carrier_work
);
238 static void mlx5e_async_event(struct mlx5_core_dev
*mdev
, void *vpriv
,
239 enum mlx5_dev_event event
, unsigned long param
)
241 struct mlx5e_priv
*priv
= vpriv
;
243 spin_lock(&priv
->async_events_spinlock
);
244 if (test_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLE
, &priv
->state
))
245 __mlx5e_async_event(priv
, event
);
246 spin_unlock(&priv
->async_events_spinlock
);
249 static void mlx5e_enable_async_events(struct mlx5e_priv
*priv
)
251 set_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLE
, &priv
->state
);
254 static void mlx5e_disable_async_events(struct mlx5e_priv
*priv
)
256 spin_lock_irq(&priv
->async_events_spinlock
);
257 clear_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLE
, &priv
->state
);
258 spin_unlock_irq(&priv
->async_events_spinlock
);
261 #define MLX5E_HW2SW_MTU(hwmtu) (hwmtu - (ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN))
262 #define MLX5E_SW2HW_MTU(swmtu) (swmtu + (ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN))
264 static int mlx5e_create_rq(struct mlx5e_channel
*c
,
265 struct mlx5e_rq_param
*param
,
268 struct mlx5e_priv
*priv
= c
->priv
;
269 struct mlx5_core_dev
*mdev
= priv
->mdev
;
270 void *rqc
= param
->rqc
;
271 void *rqc_wq
= MLX5_ADDR_OF(rqc
, rqc
, wq
);
276 param
->wq
.db_numa_node
= cpu_to_node(c
->cpu
);
278 err
= mlx5_wq_ll_create(mdev
, ¶m
->wq
, rqc_wq
, &rq
->wq
,
283 rq
->wq
.db
= &rq
->wq
.db
[MLX5_RCV_DBR
];
285 wq_sz
= mlx5_wq_ll_get_size(&rq
->wq
);
286 rq
->skb
= kzalloc_node(wq_sz
* sizeof(*rq
->skb
), GFP_KERNEL
,
287 cpu_to_node(c
->cpu
));
290 goto err_rq_wq_destroy
;
293 rq
->wqe_sz
= (priv
->params
.lro_en
) ? priv
->params
.lro_wqe_sz
:
294 MLX5E_SW2HW_MTU(priv
->netdev
->mtu
);
295 rq
->wqe_sz
= SKB_DATA_ALIGN(rq
->wqe_sz
+ MLX5E_NET_IP_ALIGN
);
297 for (i
= 0; i
< wq_sz
; i
++) {
298 struct mlx5e_rx_wqe
*wqe
= mlx5_wq_ll_get_wqe(&rq
->wq
, i
);
299 u32 byte_count
= rq
->wqe_sz
- MLX5E_NET_IP_ALIGN
;
301 wqe
->data
.lkey
= c
->mkey_be
;
302 wqe
->data
.byte_count
=
303 cpu_to_be32(byte_count
| MLX5_HW_START_PADDING
);
307 rq
->netdev
= c
->netdev
;
315 mlx5_wq_destroy(&rq
->wq_ctrl
);
320 static void mlx5e_destroy_rq(struct mlx5e_rq
*rq
)
323 mlx5_wq_destroy(&rq
->wq_ctrl
);
326 static int mlx5e_enable_rq(struct mlx5e_rq
*rq
, struct mlx5e_rq_param
*param
)
328 struct mlx5e_priv
*priv
= rq
->priv
;
329 struct mlx5_core_dev
*mdev
= priv
->mdev
;
337 inlen
= MLX5_ST_SZ_BYTES(create_rq_in
) +
338 sizeof(u64
) * rq
->wq_ctrl
.buf
.npages
;
339 in
= mlx5_vzalloc(inlen
);
343 rqc
= MLX5_ADDR_OF(create_rq_in
, in
, ctx
);
344 wq
= MLX5_ADDR_OF(rqc
, rqc
, wq
);
346 memcpy(rqc
, param
->rqc
, sizeof(param
->rqc
));
348 MLX5_SET(rqc
, rqc
, cqn
, rq
->cq
.mcq
.cqn
);
349 MLX5_SET(rqc
, rqc
, state
, MLX5_RQC_STATE_RST
);
350 MLX5_SET(rqc
, rqc
, flush_in_error_en
, 1);
351 MLX5_SET(wq
, wq
, log_wq_pg_sz
, rq
->wq_ctrl
.buf
.page_shift
-
352 MLX5_ADAPTER_PAGE_SHIFT
);
353 MLX5_SET64(wq
, wq
, dbr_addr
, rq
->wq_ctrl
.db
.dma
);
355 mlx5_fill_page_array(&rq
->wq_ctrl
.buf
,
356 (__be64
*)MLX5_ADDR_OF(wq
, wq
, pas
));
358 err
= mlx5_core_create_rq(mdev
, in
, inlen
, &rq
->rqn
);
365 static int mlx5e_modify_rq(struct mlx5e_rq
*rq
, int curr_state
, int next_state
)
367 struct mlx5e_channel
*c
= rq
->channel
;
368 struct mlx5e_priv
*priv
= c
->priv
;
369 struct mlx5_core_dev
*mdev
= priv
->mdev
;
376 inlen
= MLX5_ST_SZ_BYTES(modify_rq_in
);
377 in
= mlx5_vzalloc(inlen
);
381 rqc
= MLX5_ADDR_OF(modify_rq_in
, in
, ctx
);
383 MLX5_SET(modify_rq_in
, in
, rq_state
, curr_state
);
384 MLX5_SET(rqc
, rqc
, state
, next_state
);
386 err
= mlx5_core_modify_rq(mdev
, rq
->rqn
, in
, inlen
);
393 static void mlx5e_disable_rq(struct mlx5e_rq
*rq
)
395 mlx5_core_destroy_rq(rq
->priv
->mdev
, rq
->rqn
);
398 static int mlx5e_wait_for_min_rx_wqes(struct mlx5e_rq
*rq
)
400 struct mlx5e_channel
*c
= rq
->channel
;
401 struct mlx5e_priv
*priv
= c
->priv
;
402 struct mlx5_wq_ll
*wq
= &rq
->wq
;
405 for (i
= 0; i
< 1000; i
++) {
406 if (wq
->cur_sz
>= priv
->params
.min_rx_wqes
)
415 static int mlx5e_open_rq(struct mlx5e_channel
*c
,
416 struct mlx5e_rq_param
*param
,
421 err
= mlx5e_create_rq(c
, param
, rq
);
425 err
= mlx5e_enable_rq(rq
, param
);
429 err
= mlx5e_modify_rq(rq
, MLX5_RQC_STATE_RST
, MLX5_RQC_STATE_RDY
);
433 set_bit(MLX5E_RQ_STATE_POST_WQES_ENABLE
, &rq
->state
);
434 mlx5e_send_nop(&c
->sq
[0], true); /* trigger mlx5e_post_rx_wqes() */
439 mlx5e_disable_rq(rq
);
441 mlx5e_destroy_rq(rq
);
446 static void mlx5e_close_rq(struct mlx5e_rq
*rq
)
448 clear_bit(MLX5E_RQ_STATE_POST_WQES_ENABLE
, &rq
->state
);
449 napi_synchronize(&rq
->channel
->napi
); /* prevent mlx5e_post_rx_wqes */
451 mlx5e_modify_rq(rq
, MLX5_RQC_STATE_RDY
, MLX5_RQC_STATE_ERR
);
452 while (!mlx5_wq_ll_is_empty(&rq
->wq
))
455 /* avoid destroying rq before mlx5e_poll_rx_cq() is done with it */
456 napi_synchronize(&rq
->channel
->napi
);
458 mlx5e_disable_rq(rq
);
459 mlx5e_destroy_rq(rq
);
462 static void mlx5e_free_sq_db(struct mlx5e_sq
*sq
)
468 static int mlx5e_alloc_sq_db(struct mlx5e_sq
*sq
, int numa
)
470 int wq_sz
= mlx5_wq_cyc_get_size(&sq
->wq
);
471 int df_sz
= wq_sz
* MLX5_SEND_WQEBB_NUM_DS
;
473 sq
->skb
= kzalloc_node(wq_sz
* sizeof(*sq
->skb
), GFP_KERNEL
, numa
);
474 sq
->dma_fifo
= kzalloc_node(df_sz
* sizeof(*sq
->dma_fifo
), GFP_KERNEL
,
477 if (!sq
->skb
|| !sq
->dma_fifo
) {
478 mlx5e_free_sq_db(sq
);
482 sq
->dma_fifo_mask
= df_sz
- 1;
487 static int mlx5e_create_sq(struct mlx5e_channel
*c
,
489 struct mlx5e_sq_param
*param
,
492 struct mlx5e_priv
*priv
= c
->priv
;
493 struct mlx5_core_dev
*mdev
= priv
->mdev
;
495 void *sqc
= param
->sqc
;
496 void *sqc_wq
= MLX5_ADDR_OF(sqc
, sqc
, wq
);
500 err
= mlx5_alloc_map_uar(mdev
, &sq
->uar
);
504 param
->wq
.db_numa_node
= cpu_to_node(c
->cpu
);
506 err
= mlx5_wq_cyc_create(mdev
, ¶m
->wq
, sqc_wq
, &sq
->wq
,
509 goto err_unmap_free_uar
;
511 sq
->wq
.db
= &sq
->wq
.db
[MLX5_SND_DBR
];
512 sq
->uar_map
= sq
->uar
.map
;
513 sq
->uar_bf_map
= sq
->uar
.bf_map
;
514 sq
->bf_buf_size
= (1 << MLX5_CAP_GEN(mdev
, log_bf_reg_size
)) / 2;
515 sq
->max_inline
= param
->max_inline
;
517 err
= mlx5e_alloc_sq_db(sq
, cpu_to_node(c
->cpu
));
519 goto err_sq_wq_destroy
;
521 txq_ix
= c
->ix
+ tc
* priv
->params
.num_channels
;
522 sq
->txq
= netdev_get_tx_queue(priv
->netdev
, txq_ix
);
525 sq
->mkey_be
= c
->mkey_be
;
528 sq
->edge
= (sq
->wq
.sz_m1
+ 1) - MLX5_SEND_WQE_MAX_WQEBBS
;
529 sq
->bf_budget
= MLX5E_SQ_BF_BUDGET
;
530 priv
->txq_to_sq_map
[txq_ix
] = sq
;
535 mlx5_wq_destroy(&sq
->wq_ctrl
);
538 mlx5_unmap_free_uar(mdev
, &sq
->uar
);
543 static void mlx5e_destroy_sq(struct mlx5e_sq
*sq
)
545 struct mlx5e_channel
*c
= sq
->channel
;
546 struct mlx5e_priv
*priv
= c
->priv
;
548 mlx5e_free_sq_db(sq
);
549 mlx5_wq_destroy(&sq
->wq_ctrl
);
550 mlx5_unmap_free_uar(priv
->mdev
, &sq
->uar
);
553 static int mlx5e_enable_sq(struct mlx5e_sq
*sq
, struct mlx5e_sq_param
*param
)
555 struct mlx5e_channel
*c
= sq
->channel
;
556 struct mlx5e_priv
*priv
= c
->priv
;
557 struct mlx5_core_dev
*mdev
= priv
->mdev
;
565 inlen
= MLX5_ST_SZ_BYTES(create_sq_in
) +
566 sizeof(u64
) * sq
->wq_ctrl
.buf
.npages
;
567 in
= mlx5_vzalloc(inlen
);
571 sqc
= MLX5_ADDR_OF(create_sq_in
, in
, ctx
);
572 wq
= MLX5_ADDR_OF(sqc
, sqc
, wq
);
574 memcpy(sqc
, param
->sqc
, sizeof(param
->sqc
));
576 MLX5_SET(sqc
, sqc
, tis_num_0
, priv
->tisn
[sq
->tc
]);
577 MLX5_SET(sqc
, sqc
, cqn
, c
->sq
[sq
->tc
].cq
.mcq
.cqn
);
578 MLX5_SET(sqc
, sqc
, state
, MLX5_SQC_STATE_RST
);
579 MLX5_SET(sqc
, sqc
, tis_lst_sz
, 1);
580 MLX5_SET(sqc
, sqc
, flush_in_error_en
, 1);
582 MLX5_SET(wq
, wq
, wq_type
, MLX5_WQ_TYPE_CYCLIC
);
583 MLX5_SET(wq
, wq
, uar_page
, sq
->uar
.index
);
584 MLX5_SET(wq
, wq
, log_wq_pg_sz
, sq
->wq_ctrl
.buf
.page_shift
-
585 MLX5_ADAPTER_PAGE_SHIFT
);
586 MLX5_SET64(wq
, wq
, dbr_addr
, sq
->wq_ctrl
.db
.dma
);
588 mlx5_fill_page_array(&sq
->wq_ctrl
.buf
,
589 (__be64
*)MLX5_ADDR_OF(wq
, wq
, pas
));
591 err
= mlx5_core_create_sq(mdev
, in
, inlen
, &sq
->sqn
);
598 static int mlx5e_modify_sq(struct mlx5e_sq
*sq
, int curr_state
, int next_state
)
600 struct mlx5e_channel
*c
= sq
->channel
;
601 struct mlx5e_priv
*priv
= c
->priv
;
602 struct mlx5_core_dev
*mdev
= priv
->mdev
;
609 inlen
= MLX5_ST_SZ_BYTES(modify_sq_in
);
610 in
= mlx5_vzalloc(inlen
);
614 sqc
= MLX5_ADDR_OF(modify_sq_in
, in
, ctx
);
616 MLX5_SET(modify_sq_in
, in
, sq_state
, curr_state
);
617 MLX5_SET(sqc
, sqc
, state
, next_state
);
619 err
= mlx5_core_modify_sq(mdev
, sq
->sqn
, in
, inlen
);
626 static void mlx5e_disable_sq(struct mlx5e_sq
*sq
)
628 struct mlx5e_channel
*c
= sq
->channel
;
629 struct mlx5e_priv
*priv
= c
->priv
;
630 struct mlx5_core_dev
*mdev
= priv
->mdev
;
632 mlx5_core_destroy_sq(mdev
, sq
->sqn
);
635 static int mlx5e_open_sq(struct mlx5e_channel
*c
,
637 struct mlx5e_sq_param
*param
,
642 err
= mlx5e_create_sq(c
, tc
, param
, sq
);
646 err
= mlx5e_enable_sq(sq
, param
);
650 err
= mlx5e_modify_sq(sq
, MLX5_SQC_STATE_RST
, MLX5_SQC_STATE_RDY
);
654 set_bit(MLX5E_SQ_STATE_WAKE_TXQ_ENABLE
, &sq
->state
);
655 netdev_tx_reset_queue(sq
->txq
);
656 netif_tx_start_queue(sq
->txq
);
661 mlx5e_disable_sq(sq
);
663 mlx5e_destroy_sq(sq
);
668 static inline void netif_tx_disable_queue(struct netdev_queue
*txq
)
670 __netif_tx_lock_bh(txq
);
671 netif_tx_stop_queue(txq
);
672 __netif_tx_unlock_bh(txq
);
675 static void mlx5e_close_sq(struct mlx5e_sq
*sq
)
677 clear_bit(MLX5E_SQ_STATE_WAKE_TXQ_ENABLE
, &sq
->state
);
678 napi_synchronize(&sq
->channel
->napi
); /* prevent netif_tx_wake_queue */
679 netif_tx_disable_queue(sq
->txq
);
681 /* ensure hw is notified of all pending wqes */
682 if (mlx5e_sq_has_room_for(sq
, 1))
683 mlx5e_send_nop(sq
, true);
685 mlx5e_modify_sq(sq
, MLX5_SQC_STATE_RDY
, MLX5_SQC_STATE_ERR
);
686 while (sq
->cc
!= sq
->pc
) /* wait till sq is empty */
689 /* avoid destroying sq before mlx5e_poll_tx_cq() is done with it */
690 napi_synchronize(&sq
->channel
->napi
);
692 mlx5e_disable_sq(sq
);
693 mlx5e_destroy_sq(sq
);
696 static int mlx5e_create_cq(struct mlx5e_channel
*c
,
697 struct mlx5e_cq_param
*param
,
700 struct mlx5e_priv
*priv
= c
->priv
;
701 struct mlx5_core_dev
*mdev
= priv
->mdev
;
702 struct mlx5_core_cq
*mcq
= &cq
->mcq
;
708 param
->wq
.buf_numa_node
= cpu_to_node(c
->cpu
);
709 param
->wq
.db_numa_node
= cpu_to_node(c
->cpu
);
710 param
->eq_ix
= c
->ix
;
712 err
= mlx5_cqwq_create(mdev
, ¶m
->wq
, param
->cqc
, &cq
->wq
,
717 mlx5_vector2eqn(mdev
, param
->eq_ix
, &eqn_not_used
, &irqn
);
722 mcq
->set_ci_db
= cq
->wq_ctrl
.db
.db
;
723 mcq
->arm_db
= cq
->wq_ctrl
.db
.db
+ 1;
726 mcq
->vector
= param
->eq_ix
;
727 mcq
->comp
= mlx5e_completion_event
;
728 mcq
->event
= mlx5e_cq_error_event
;
730 mcq
->uar
= &priv
->cq_uar
;
732 for (i
= 0; i
< mlx5_cqwq_get_size(&cq
->wq
); i
++) {
733 struct mlx5_cqe64
*cqe
= mlx5_cqwq_get_wqe(&cq
->wq
, i
);
744 static void mlx5e_destroy_cq(struct mlx5e_cq
*cq
)
746 mlx5_wq_destroy(&cq
->wq_ctrl
);
749 static int mlx5e_enable_cq(struct mlx5e_cq
*cq
, struct mlx5e_cq_param
*param
)
751 struct mlx5e_priv
*priv
= cq
->priv
;
752 struct mlx5_core_dev
*mdev
= priv
->mdev
;
753 struct mlx5_core_cq
*mcq
= &cq
->mcq
;
762 inlen
= MLX5_ST_SZ_BYTES(create_cq_in
) +
763 sizeof(u64
) * cq
->wq_ctrl
.buf
.npages
;
764 in
= mlx5_vzalloc(inlen
);
768 cqc
= MLX5_ADDR_OF(create_cq_in
, in
, cq_context
);
770 memcpy(cqc
, param
->cqc
, sizeof(param
->cqc
));
772 mlx5_fill_page_array(&cq
->wq_ctrl
.buf
,
773 (__be64
*)MLX5_ADDR_OF(create_cq_in
, in
, pas
));
775 mlx5_vector2eqn(mdev
, param
->eq_ix
, &eqn
, &irqn_not_used
);
777 MLX5_SET(cqc
, cqc
, c_eqn
, eqn
);
778 MLX5_SET(cqc
, cqc
, uar_page
, mcq
->uar
->index
);
779 MLX5_SET(cqc
, cqc
, log_page_size
, cq
->wq_ctrl
.buf
.page_shift
-
780 MLX5_ADAPTER_PAGE_SHIFT
);
781 MLX5_SET64(cqc
, cqc
, dbr_addr
, cq
->wq_ctrl
.db
.dma
);
783 err
= mlx5_core_create_cq(mdev
, mcq
, in
, inlen
);
795 static void mlx5e_disable_cq(struct mlx5e_cq
*cq
)
797 struct mlx5e_priv
*priv
= cq
->priv
;
798 struct mlx5_core_dev
*mdev
= priv
->mdev
;
800 mlx5_core_destroy_cq(mdev
, &cq
->mcq
);
803 static int mlx5e_open_cq(struct mlx5e_channel
*c
,
804 struct mlx5e_cq_param
*param
,
806 u16 moderation_usecs
,
807 u16 moderation_frames
)
810 struct mlx5e_priv
*priv
= c
->priv
;
811 struct mlx5_core_dev
*mdev
= priv
->mdev
;
813 err
= mlx5e_create_cq(c
, param
, cq
);
817 err
= mlx5e_enable_cq(cq
, param
);
821 err
= mlx5_core_modify_cq_moderation(mdev
, &cq
->mcq
,
830 mlx5e_destroy_cq(cq
);
835 static void mlx5e_close_cq(struct mlx5e_cq
*cq
)
837 mlx5e_disable_cq(cq
);
838 mlx5e_destroy_cq(cq
);
841 static int mlx5e_get_cpu(struct mlx5e_priv
*priv
, int ix
)
843 return cpumask_first(priv
->mdev
->priv
.irq_info
[ix
].mask
);
846 static int mlx5e_open_tx_cqs(struct mlx5e_channel
*c
,
847 struct mlx5e_channel_param
*cparam
)
849 struct mlx5e_priv
*priv
= c
->priv
;
853 for (tc
= 0; tc
< c
->num_tc
; tc
++) {
854 err
= mlx5e_open_cq(c
, &cparam
->tx_cq
, &c
->sq
[tc
].cq
,
855 priv
->params
.tx_cq_moderation_usec
,
856 priv
->params
.tx_cq_moderation_pkts
);
858 goto err_close_tx_cqs
;
864 for (tc
--; tc
>= 0; tc
--)
865 mlx5e_close_cq(&c
->sq
[tc
].cq
);
870 static void mlx5e_close_tx_cqs(struct mlx5e_channel
*c
)
874 for (tc
= 0; tc
< c
->num_tc
; tc
++)
875 mlx5e_close_cq(&c
->sq
[tc
].cq
);
878 static int mlx5e_open_sqs(struct mlx5e_channel
*c
,
879 struct mlx5e_channel_param
*cparam
)
884 for (tc
= 0; tc
< c
->num_tc
; tc
++) {
885 err
= mlx5e_open_sq(c
, tc
, &cparam
->sq
, &c
->sq
[tc
]);
893 for (tc
--; tc
>= 0; tc
--)
894 mlx5e_close_sq(&c
->sq
[tc
]);
899 static void mlx5e_close_sqs(struct mlx5e_channel
*c
)
903 for (tc
= 0; tc
< c
->num_tc
; tc
++)
904 mlx5e_close_sq(&c
->sq
[tc
]);
907 static void mlx5e_build_tc_to_txq_map(struct mlx5e_channel
*c
,
912 for (i
= 0; i
< MLX5E_MAX_NUM_TC
; i
++)
913 c
->tc_to_txq_map
[i
] = c
->ix
+ i
* num_channels
;
916 static int mlx5e_open_channel(struct mlx5e_priv
*priv
, int ix
,
917 struct mlx5e_channel_param
*cparam
,
918 struct mlx5e_channel
**cp
)
920 struct net_device
*netdev
= priv
->netdev
;
921 int cpu
= mlx5e_get_cpu(priv
, ix
);
922 struct mlx5e_channel
*c
;
925 c
= kzalloc_node(sizeof(*c
), GFP_KERNEL
, cpu_to_node(cpu
));
932 c
->pdev
= &priv
->mdev
->pdev
->dev
;
933 c
->netdev
= priv
->netdev
;
934 c
->mkey_be
= cpu_to_be32(priv
->mr
.key
);
935 c
->num_tc
= priv
->params
.num_tc
;
937 mlx5e_build_tc_to_txq_map(c
, priv
->params
.num_channels
);
939 netif_napi_add(netdev
, &c
->napi
, mlx5e_napi_poll
, 64);
941 err
= mlx5e_open_tx_cqs(c
, cparam
);
945 err
= mlx5e_open_cq(c
, &cparam
->rx_cq
, &c
->rq
.cq
,
946 priv
->params
.rx_cq_moderation_usec
,
947 priv
->params
.rx_cq_moderation_pkts
);
949 goto err_close_tx_cqs
;
951 napi_enable(&c
->napi
);
953 err
= mlx5e_open_sqs(c
, cparam
);
955 goto err_disable_napi
;
957 err
= mlx5e_open_rq(c
, &cparam
->rq
, &c
->rq
);
961 netif_set_xps_queue(netdev
, get_cpu_mask(c
->cpu
), ix
);
970 napi_disable(&c
->napi
);
971 mlx5e_close_cq(&c
->rq
.cq
);
974 mlx5e_close_tx_cqs(c
);
977 netif_napi_del(&c
->napi
);
983 static void mlx5e_close_channel(struct mlx5e_channel
*c
)
985 mlx5e_close_rq(&c
->rq
);
987 napi_disable(&c
->napi
);
988 mlx5e_close_cq(&c
->rq
.cq
);
989 mlx5e_close_tx_cqs(c
);
990 netif_napi_del(&c
->napi
);
994 static void mlx5e_build_rq_param(struct mlx5e_priv
*priv
,
995 struct mlx5e_rq_param
*param
)
997 void *rqc
= param
->rqc
;
998 void *wq
= MLX5_ADDR_OF(rqc
, rqc
, wq
);
1000 MLX5_SET(wq
, wq
, wq_type
, MLX5_WQ_TYPE_LINKED_LIST
);
1001 MLX5_SET(wq
, wq
, end_padding_mode
, MLX5_WQ_END_PAD_MODE_ALIGN
);
1002 MLX5_SET(wq
, wq
, log_wq_stride
, ilog2(sizeof(struct mlx5e_rx_wqe
)));
1003 MLX5_SET(wq
, wq
, log_wq_sz
, priv
->params
.log_rq_size
);
1004 MLX5_SET(wq
, wq
, pd
, priv
->pdn
);
1006 param
->wq
.buf_numa_node
= dev_to_node(&priv
->mdev
->pdev
->dev
);
1007 param
->wq
.linear
= 1;
1010 static void mlx5e_build_sq_param(struct mlx5e_priv
*priv
,
1011 struct mlx5e_sq_param
*param
)
1013 void *sqc
= param
->sqc
;
1014 void *wq
= MLX5_ADDR_OF(sqc
, sqc
, wq
);
1016 MLX5_SET(wq
, wq
, log_wq_sz
, priv
->params
.log_sq_size
);
1017 MLX5_SET(wq
, wq
, log_wq_stride
, ilog2(MLX5_SEND_WQE_BB
));
1018 MLX5_SET(wq
, wq
, pd
, priv
->pdn
);
1020 param
->wq
.buf_numa_node
= dev_to_node(&priv
->mdev
->pdev
->dev
);
1021 param
->max_inline
= priv
->params
.tx_max_inline
;
1024 static void mlx5e_build_common_cq_param(struct mlx5e_priv
*priv
,
1025 struct mlx5e_cq_param
*param
)
1027 void *cqc
= param
->cqc
;
1029 MLX5_SET(cqc
, cqc
, uar_page
, priv
->cq_uar
.index
);
1032 static void mlx5e_build_rx_cq_param(struct mlx5e_priv
*priv
,
1033 struct mlx5e_cq_param
*param
)
1035 void *cqc
= param
->cqc
;
1037 MLX5_SET(cqc
, cqc
, log_cq_size
, priv
->params
.log_rq_size
);
1039 mlx5e_build_common_cq_param(priv
, param
);
1042 static void mlx5e_build_tx_cq_param(struct mlx5e_priv
*priv
,
1043 struct mlx5e_cq_param
*param
)
1045 void *cqc
= param
->cqc
;
1047 MLX5_SET(cqc
, cqc
, log_cq_size
, priv
->params
.log_sq_size
);
1049 mlx5e_build_common_cq_param(priv
, param
);
1052 static void mlx5e_build_channel_param(struct mlx5e_priv
*priv
,
1053 struct mlx5e_channel_param
*cparam
)
1055 memset(cparam
, 0, sizeof(*cparam
));
1057 mlx5e_build_rq_param(priv
, &cparam
->rq
);
1058 mlx5e_build_sq_param(priv
, &cparam
->sq
);
1059 mlx5e_build_rx_cq_param(priv
, &cparam
->rx_cq
);
1060 mlx5e_build_tx_cq_param(priv
, &cparam
->tx_cq
);
1063 static int mlx5e_open_channels(struct mlx5e_priv
*priv
)
1065 struct mlx5e_channel_param cparam
;
1066 int nch
= priv
->params
.num_channels
;
1071 priv
->channel
= kcalloc(nch
, sizeof(struct mlx5e_channel
*),
1074 priv
->txq_to_sq_map
= kcalloc(nch
* priv
->params
.num_tc
,
1075 sizeof(struct mlx5e_sq
*), GFP_KERNEL
);
1077 if (!priv
->channel
|| !priv
->txq_to_sq_map
)
1078 goto err_free_txq_to_sq_map
;
1080 mlx5e_build_channel_param(priv
, &cparam
);
1081 for (i
= 0; i
< nch
; i
++) {
1082 err
= mlx5e_open_channel(priv
, i
, &cparam
, &priv
->channel
[i
]);
1084 goto err_close_channels
;
1087 for (j
= 0; j
< nch
; j
++) {
1088 err
= mlx5e_wait_for_min_rx_wqes(&priv
->channel
[j
]->rq
);
1090 goto err_close_channels
;
1096 for (i
--; i
>= 0; i
--)
1097 mlx5e_close_channel(priv
->channel
[i
]);
1099 err_free_txq_to_sq_map
:
1100 kfree(priv
->txq_to_sq_map
);
1101 kfree(priv
->channel
);
1106 static void mlx5e_close_channels(struct mlx5e_priv
*priv
)
1110 for (i
= 0; i
< priv
->params
.num_channels
; i
++)
1111 mlx5e_close_channel(priv
->channel
[i
]);
1113 kfree(priv
->txq_to_sq_map
);
1114 kfree(priv
->channel
);
1117 static int mlx5e_rx_hash_fn(int hfunc
)
1119 return (hfunc
== ETH_RSS_HASH_TOP
) ?
1120 MLX5_RX_HASH_FN_TOEPLITZ
:
1121 MLX5_RX_HASH_FN_INVERTED_XOR8
;
1124 static int mlx5e_bits_invert(unsigned long a
, int size
)
1129 for (i
= 0; i
< size
; i
++)
1130 inv
|= (test_bit(size
- i
- 1, &a
) ? 1 : 0) << i
;
1135 static void mlx5e_fill_rqt_rqns(struct mlx5e_priv
*priv
, void *rqtc
,
1136 enum mlx5e_rqt_ix rqt_ix
)
1142 case MLX5E_INDIRECTION_RQT
:
1143 log_sz
= priv
->params
.rx_hash_log_tbl_sz
;
1144 for (i
= 0; i
< (1 << log_sz
); i
++) {
1147 if (priv
->params
.rss_hfunc
== ETH_RSS_HASH_XOR
)
1148 ix
= mlx5e_bits_invert(i
, log_sz
);
1150 ix
= ix
% priv
->params
.num_channels
;
1151 MLX5_SET(rqtc
, rqtc
, rq_num
[i
],
1152 test_bit(MLX5E_STATE_OPENED
, &priv
->state
) ?
1153 priv
->channel
[ix
]->rq
.rqn
:
1159 default: /* MLX5E_SINGLE_RQ_RQT */
1160 MLX5_SET(rqtc
, rqtc
, rq_num
[0],
1161 test_bit(MLX5E_STATE_OPENED
, &priv
->state
) ?
1162 priv
->channel
[0]->rq
.rqn
:
1169 static int mlx5e_create_rqt(struct mlx5e_priv
*priv
, enum mlx5e_rqt_ix rqt_ix
)
1171 struct mlx5_core_dev
*mdev
= priv
->mdev
;
1179 log_sz
= (rqt_ix
== MLX5E_SINGLE_RQ_RQT
) ? 0 :
1180 priv
->params
.rx_hash_log_tbl_sz
;
1183 inlen
= MLX5_ST_SZ_BYTES(create_rqt_in
) + sizeof(u32
) * sz
;
1184 in
= mlx5_vzalloc(inlen
);
1188 rqtc
= MLX5_ADDR_OF(create_rqt_in
, in
, rqt_context
);
1190 MLX5_SET(rqtc
, rqtc
, rqt_actual_size
, sz
);
1191 MLX5_SET(rqtc
, rqtc
, rqt_max_size
, sz
);
1193 mlx5e_fill_rqt_rqns(priv
, rqtc
, rqt_ix
);
1195 err
= mlx5_core_create_rqt(mdev
, in
, inlen
, &priv
->rqtn
[rqt_ix
]);
1202 static int mlx5e_redirect_rqt(struct mlx5e_priv
*priv
, enum mlx5e_rqt_ix rqt_ix
)
1204 struct mlx5_core_dev
*mdev
= priv
->mdev
;
1212 log_sz
= (rqt_ix
== MLX5E_SINGLE_RQ_RQT
) ? 0 :
1213 priv
->params
.rx_hash_log_tbl_sz
;
1216 inlen
= MLX5_ST_SZ_BYTES(modify_rqt_in
) + sizeof(u32
) * sz
;
1217 in
= mlx5_vzalloc(inlen
);
1221 rqtc
= MLX5_ADDR_OF(modify_rqt_in
, in
, ctx
);
1223 MLX5_SET(rqtc
, rqtc
, rqt_actual_size
, sz
);
1225 mlx5e_fill_rqt_rqns(priv
, rqtc
, rqt_ix
);
1227 MLX5_SET(modify_rqt_in
, in
, bitmask
.rqn_list
, 1);
1229 err
= mlx5_core_modify_rqt(mdev
, priv
->rqtn
[rqt_ix
], in
, inlen
);
1236 static void mlx5e_destroy_rqt(struct mlx5e_priv
*priv
, enum mlx5e_rqt_ix rqt_ix
)
1238 mlx5_core_destroy_rqt(priv
->mdev
, priv
->rqtn
[rqt_ix
]);
1241 static void mlx5e_redirect_rqts(struct mlx5e_priv
*priv
)
1243 mlx5e_redirect_rqt(priv
, MLX5E_INDIRECTION_RQT
);
1244 mlx5e_redirect_rqt(priv
, MLX5E_SINGLE_RQ_RQT
);
1247 static void mlx5e_build_tir_ctx_lro(void *tirc
, struct mlx5e_priv
*priv
)
1249 if (!priv
->params
.lro_en
)
1252 #define ROUGH_MAX_L2_L3_HDR_SZ 256
1254 MLX5_SET(tirc
, tirc
, lro_enable_mask
,
1255 MLX5_TIRC_LRO_ENABLE_MASK_IPV4_LRO
|
1256 MLX5_TIRC_LRO_ENABLE_MASK_IPV6_LRO
);
1257 MLX5_SET(tirc
, tirc
, lro_max_ip_payload_size
,
1258 (priv
->params
.lro_wqe_sz
-
1259 ROUGH_MAX_L2_L3_HDR_SZ
) >> 8);
1260 MLX5_SET(tirc
, tirc
, lro_timeout_period_usecs
,
1261 MLX5_CAP_ETH(priv
->mdev
,
1262 lro_timer_supported_periods
[3]));
1265 static int mlx5e_modify_tir_lro(struct mlx5e_priv
*priv
, int tt
)
1267 struct mlx5_core_dev
*mdev
= priv
->mdev
;
1274 inlen
= MLX5_ST_SZ_BYTES(modify_tir_in
);
1275 in
= mlx5_vzalloc(inlen
);
1279 MLX5_SET(modify_tir_in
, in
, bitmask
.lro
, 1);
1280 tirc
= MLX5_ADDR_OF(modify_tir_in
, in
, ctx
);
1282 mlx5e_build_tir_ctx_lro(tirc
, priv
);
1284 err
= mlx5_core_modify_tir(mdev
, priv
->tirn
[tt
], in
, inlen
);
1291 static int mlx5e_set_dev_port_mtu(struct net_device
*netdev
)
1293 struct mlx5e_priv
*priv
= netdev_priv(netdev
);
1294 struct mlx5_core_dev
*mdev
= priv
->mdev
;
1298 err
= mlx5_set_port_mtu(mdev
, MLX5E_SW2HW_MTU(netdev
->mtu
), 1);
1302 mlx5_query_port_oper_mtu(mdev
, &hw_mtu
, 1);
1304 if (MLX5E_HW2SW_MTU(hw_mtu
) != netdev
->mtu
)
1305 netdev_warn(netdev
, "%s: Port MTU %d is different than netdev mtu %d\n",
1306 __func__
, MLX5E_HW2SW_MTU(hw_mtu
), netdev
->mtu
);
1308 netdev
->mtu
= MLX5E_HW2SW_MTU(hw_mtu
);
1312 int mlx5e_open_locked(struct net_device
*netdev
)
1314 struct mlx5e_priv
*priv
= netdev_priv(netdev
);
1318 set_bit(MLX5E_STATE_OPENED
, &priv
->state
);
1320 num_txqs
= priv
->params
.num_channels
* priv
->params
.num_tc
;
1321 netif_set_real_num_tx_queues(netdev
, num_txqs
);
1322 netif_set_real_num_rx_queues(netdev
, priv
->params
.num_channels
);
1324 err
= mlx5e_set_dev_port_mtu(netdev
);
1328 err
= mlx5e_open_channels(priv
);
1330 netdev_err(netdev
, "%s: mlx5e_open_channels failed, %d\n",
1335 mlx5e_update_carrier(priv
);
1336 mlx5e_redirect_rqts(priv
);
1338 schedule_delayed_work(&priv
->update_stats_work
, 0);
1343 static int mlx5e_open(struct net_device
*netdev
)
1345 struct mlx5e_priv
*priv
= netdev_priv(netdev
);
1348 mutex_lock(&priv
->state_lock
);
1349 err
= mlx5e_open_locked(netdev
);
1350 mutex_unlock(&priv
->state_lock
);
1355 int mlx5e_close_locked(struct net_device
*netdev
)
1357 struct mlx5e_priv
*priv
= netdev_priv(netdev
);
1359 clear_bit(MLX5E_STATE_OPENED
, &priv
->state
);
1361 mlx5e_redirect_rqts(priv
);
1362 netif_carrier_off(priv
->netdev
);
1363 mlx5e_close_channels(priv
);
1368 static int mlx5e_close(struct net_device
*netdev
)
1370 struct mlx5e_priv
*priv
= netdev_priv(netdev
);
1373 mutex_lock(&priv
->state_lock
);
1374 err
= mlx5e_close_locked(netdev
);
1375 mutex_unlock(&priv
->state_lock
);
1380 static int mlx5e_create_drop_rq(struct mlx5e_priv
*priv
,
1381 struct mlx5e_rq
*rq
,
1382 struct mlx5e_rq_param
*param
)
1384 struct mlx5_core_dev
*mdev
= priv
->mdev
;
1385 void *rqc
= param
->rqc
;
1386 void *rqc_wq
= MLX5_ADDR_OF(rqc
, rqc
, wq
);
1389 param
->wq
.db_numa_node
= param
->wq
.buf_numa_node
;
1391 err
= mlx5_wq_ll_create(mdev
, ¶m
->wq
, rqc_wq
, &rq
->wq
,
1401 static int mlx5e_create_drop_cq(struct mlx5e_priv
*priv
,
1402 struct mlx5e_cq
*cq
,
1403 struct mlx5e_cq_param
*param
)
1405 struct mlx5_core_dev
*mdev
= priv
->mdev
;
1406 struct mlx5_core_cq
*mcq
= &cq
->mcq
;
1411 err
= mlx5_cqwq_create(mdev
, ¶m
->wq
, param
->cqc
, &cq
->wq
,
1416 mlx5_vector2eqn(mdev
, param
->eq_ix
, &eqn_not_used
, &irqn
);
1419 mcq
->set_ci_db
= cq
->wq_ctrl
.db
.db
;
1420 mcq
->arm_db
= cq
->wq_ctrl
.db
.db
+ 1;
1421 *mcq
->set_ci_db
= 0;
1423 mcq
->vector
= param
->eq_ix
;
1424 mcq
->comp
= mlx5e_completion_event
;
1425 mcq
->event
= mlx5e_cq_error_event
;
1427 mcq
->uar
= &priv
->cq_uar
;
1434 static int mlx5e_open_drop_rq(struct mlx5e_priv
*priv
)
1436 struct mlx5e_cq_param cq_param
;
1437 struct mlx5e_rq_param rq_param
;
1438 struct mlx5e_rq
*rq
= &priv
->drop_rq
;
1439 struct mlx5e_cq
*cq
= &priv
->drop_rq
.cq
;
1442 memset(&cq_param
, 0, sizeof(cq_param
));
1443 memset(&rq_param
, 0, sizeof(rq_param
));
1444 mlx5e_build_rx_cq_param(priv
, &cq_param
);
1445 mlx5e_build_rq_param(priv
, &rq_param
);
1447 err
= mlx5e_create_drop_cq(priv
, cq
, &cq_param
);
1451 err
= mlx5e_enable_cq(cq
, &cq_param
);
1453 goto err_destroy_cq
;
1455 err
= mlx5e_create_drop_rq(priv
, rq
, &rq_param
);
1457 goto err_disable_cq
;
1459 err
= mlx5e_enable_rq(rq
, &rq_param
);
1461 goto err_destroy_rq
;
1466 mlx5e_destroy_rq(&priv
->drop_rq
);
1469 mlx5e_disable_cq(&priv
->drop_rq
.cq
);
1472 mlx5e_destroy_cq(&priv
->drop_rq
.cq
);
1477 static void mlx5e_close_drop_rq(struct mlx5e_priv
*priv
)
1479 mlx5e_disable_rq(&priv
->drop_rq
);
1480 mlx5e_destroy_rq(&priv
->drop_rq
);
1481 mlx5e_disable_cq(&priv
->drop_rq
.cq
);
1482 mlx5e_destroy_cq(&priv
->drop_rq
.cq
);
1485 static int mlx5e_create_tis(struct mlx5e_priv
*priv
, int tc
)
1487 struct mlx5_core_dev
*mdev
= priv
->mdev
;
1488 u32 in
[MLX5_ST_SZ_DW(create_tis_in
)];
1489 void *tisc
= MLX5_ADDR_OF(create_tis_in
, in
, ctx
);
1491 memset(in
, 0, sizeof(in
));
1493 MLX5_SET(tisc
, tisc
, prio
, tc
);
1494 MLX5_SET(tisc
, tisc
, transport_domain
, priv
->tdn
);
1496 return mlx5_core_create_tis(mdev
, in
, sizeof(in
), &priv
->tisn
[tc
]);
1499 static void mlx5e_destroy_tis(struct mlx5e_priv
*priv
, int tc
)
1501 mlx5_core_destroy_tis(priv
->mdev
, priv
->tisn
[tc
]);
1504 static int mlx5e_create_tises(struct mlx5e_priv
*priv
)
1509 for (tc
= 0; tc
< priv
->params
.num_tc
; tc
++) {
1510 err
= mlx5e_create_tis(priv
, tc
);
1512 goto err_close_tises
;
1518 for (tc
--; tc
>= 0; tc
--)
1519 mlx5e_destroy_tis(priv
, tc
);
1524 static void mlx5e_destroy_tises(struct mlx5e_priv
*priv
)
1528 for (tc
= 0; tc
< priv
->params
.num_tc
; tc
++)
1529 mlx5e_destroy_tis(priv
, tc
);
1532 static void mlx5e_build_tir_ctx(struct mlx5e_priv
*priv
, u32
*tirc
, int tt
)
1534 void *hfso
= MLX5_ADDR_OF(tirc
, tirc
, rx_hash_field_selector_outer
);
1536 MLX5_SET(tirc
, tirc
, transport_domain
, priv
->tdn
);
1538 #define MLX5_HASH_IP (MLX5_HASH_FIELD_SEL_SRC_IP |\
1539 MLX5_HASH_FIELD_SEL_DST_IP)
1541 #define MLX5_HASH_IP_L4PORTS (MLX5_HASH_FIELD_SEL_SRC_IP |\
1542 MLX5_HASH_FIELD_SEL_DST_IP |\
1543 MLX5_HASH_FIELD_SEL_L4_SPORT |\
1544 MLX5_HASH_FIELD_SEL_L4_DPORT)
1546 #define MLX5_HASH_IP_IPSEC_SPI (MLX5_HASH_FIELD_SEL_SRC_IP |\
1547 MLX5_HASH_FIELD_SEL_DST_IP |\
1548 MLX5_HASH_FIELD_SEL_IPSEC_SPI)
1550 mlx5e_build_tir_ctx_lro(tirc
, priv
);
1552 MLX5_SET(tirc
, tirc
, disp_type
, MLX5_TIRC_DISP_TYPE_INDIRECT
);
1556 MLX5_SET(tirc
, tirc
, indirect_table
,
1557 priv
->rqtn
[MLX5E_SINGLE_RQ_RQT
]);
1558 MLX5_SET(tirc
, tirc
, rx_hash_fn
, MLX5_RX_HASH_FN_INVERTED_XOR8
);
1561 MLX5_SET(tirc
, tirc
, indirect_table
,
1562 priv
->rqtn
[MLX5E_INDIRECTION_RQT
]);
1563 MLX5_SET(tirc
, tirc
, rx_hash_fn
,
1564 mlx5e_rx_hash_fn(priv
->params
.rss_hfunc
));
1565 if (priv
->params
.rss_hfunc
== ETH_RSS_HASH_TOP
) {
1566 void *rss_key
= MLX5_ADDR_OF(tirc
, tirc
,
1567 rx_hash_toeplitz_key
);
1568 size_t len
= MLX5_FLD_SZ_BYTES(tirc
,
1569 rx_hash_toeplitz_key
);
1571 MLX5_SET(tirc
, tirc
, rx_hash_symmetric
, 1);
1572 netdev_rss_key_fill(rss_key
, len
);
1578 case MLX5E_TT_IPV4_TCP
:
1579 MLX5_SET(rx_hash_field_select
, hfso
, l3_prot_type
,
1580 MLX5_L3_PROT_TYPE_IPV4
);
1581 MLX5_SET(rx_hash_field_select
, hfso
, l4_prot_type
,
1582 MLX5_L4_PROT_TYPE_TCP
);
1583 MLX5_SET(rx_hash_field_select
, hfso
, selected_fields
,
1584 MLX5_HASH_IP_L4PORTS
);
1587 case MLX5E_TT_IPV6_TCP
:
1588 MLX5_SET(rx_hash_field_select
, hfso
, l3_prot_type
,
1589 MLX5_L3_PROT_TYPE_IPV6
);
1590 MLX5_SET(rx_hash_field_select
, hfso
, l4_prot_type
,
1591 MLX5_L4_PROT_TYPE_TCP
);
1592 MLX5_SET(rx_hash_field_select
, hfso
, selected_fields
,
1593 MLX5_HASH_IP_L4PORTS
);
1596 case MLX5E_TT_IPV4_UDP
:
1597 MLX5_SET(rx_hash_field_select
, hfso
, l3_prot_type
,
1598 MLX5_L3_PROT_TYPE_IPV4
);
1599 MLX5_SET(rx_hash_field_select
, hfso
, l4_prot_type
,
1600 MLX5_L4_PROT_TYPE_UDP
);
1601 MLX5_SET(rx_hash_field_select
, hfso
, selected_fields
,
1602 MLX5_HASH_IP_L4PORTS
);
1605 case MLX5E_TT_IPV6_UDP
:
1606 MLX5_SET(rx_hash_field_select
, hfso
, l3_prot_type
,
1607 MLX5_L3_PROT_TYPE_IPV6
);
1608 MLX5_SET(rx_hash_field_select
, hfso
, l4_prot_type
,
1609 MLX5_L4_PROT_TYPE_UDP
);
1610 MLX5_SET(rx_hash_field_select
, hfso
, selected_fields
,
1611 MLX5_HASH_IP_L4PORTS
);
1614 case MLX5E_TT_IPV4_IPSEC_AH
:
1615 MLX5_SET(rx_hash_field_select
, hfso
, l3_prot_type
,
1616 MLX5_L3_PROT_TYPE_IPV4
);
1617 MLX5_SET(rx_hash_field_select
, hfso
, selected_fields
,
1618 MLX5_HASH_IP_IPSEC_SPI
);
1621 case MLX5E_TT_IPV6_IPSEC_AH
:
1622 MLX5_SET(rx_hash_field_select
, hfso
, l3_prot_type
,
1623 MLX5_L3_PROT_TYPE_IPV6
);
1624 MLX5_SET(rx_hash_field_select
, hfso
, selected_fields
,
1625 MLX5_HASH_IP_IPSEC_SPI
);
1628 case MLX5E_TT_IPV4_IPSEC_ESP
:
1629 MLX5_SET(rx_hash_field_select
, hfso
, l3_prot_type
,
1630 MLX5_L3_PROT_TYPE_IPV4
);
1631 MLX5_SET(rx_hash_field_select
, hfso
, selected_fields
,
1632 MLX5_HASH_IP_IPSEC_SPI
);
1635 case MLX5E_TT_IPV6_IPSEC_ESP
:
1636 MLX5_SET(rx_hash_field_select
, hfso
, l3_prot_type
,
1637 MLX5_L3_PROT_TYPE_IPV6
);
1638 MLX5_SET(rx_hash_field_select
, hfso
, selected_fields
,
1639 MLX5_HASH_IP_IPSEC_SPI
);
1643 MLX5_SET(rx_hash_field_select
, hfso
, l3_prot_type
,
1644 MLX5_L3_PROT_TYPE_IPV4
);
1645 MLX5_SET(rx_hash_field_select
, hfso
, selected_fields
,
1650 MLX5_SET(rx_hash_field_select
, hfso
, l3_prot_type
,
1651 MLX5_L3_PROT_TYPE_IPV6
);
1652 MLX5_SET(rx_hash_field_select
, hfso
, selected_fields
,
1658 static int mlx5e_create_tir(struct mlx5e_priv
*priv
, int tt
)
1660 struct mlx5_core_dev
*mdev
= priv
->mdev
;
1666 inlen
= MLX5_ST_SZ_BYTES(create_tir_in
);
1667 in
= mlx5_vzalloc(inlen
);
1671 tirc
= MLX5_ADDR_OF(create_tir_in
, in
, ctx
);
1673 mlx5e_build_tir_ctx(priv
, tirc
, tt
);
1675 err
= mlx5_core_create_tir(mdev
, in
, inlen
, &priv
->tirn
[tt
]);
1682 static void mlx5e_destroy_tir(struct mlx5e_priv
*priv
, int tt
)
1684 mlx5_core_destroy_tir(priv
->mdev
, priv
->tirn
[tt
]);
1687 static int mlx5e_create_tirs(struct mlx5e_priv
*priv
)
1692 for (i
= 0; i
< MLX5E_NUM_TT
; i
++) {
1693 err
= mlx5e_create_tir(priv
, i
);
1695 goto err_destroy_tirs
;
1701 for (i
--; i
>= 0; i
--)
1702 mlx5e_destroy_tir(priv
, i
);
1707 static void mlx5e_destroy_tirs(struct mlx5e_priv
*priv
)
1711 for (i
= 0; i
< MLX5E_NUM_TT
; i
++)
1712 mlx5e_destroy_tir(priv
, i
);
1715 static struct rtnl_link_stats64
*
1716 mlx5e_get_stats(struct net_device
*dev
, struct rtnl_link_stats64
*stats
)
1718 struct mlx5e_priv
*priv
= netdev_priv(dev
);
1719 struct mlx5e_vport_stats
*vstats
= &priv
->stats
.vport
;
1721 stats
->rx_packets
= vstats
->rx_packets
;
1722 stats
->rx_bytes
= vstats
->rx_bytes
;
1723 stats
->tx_packets
= vstats
->tx_packets
;
1724 stats
->tx_bytes
= vstats
->tx_bytes
;
1725 stats
->multicast
= vstats
->rx_multicast_packets
+
1726 vstats
->tx_multicast_packets
;
1727 stats
->tx_errors
= vstats
->tx_error_packets
;
1728 stats
->rx_errors
= vstats
->rx_error_packets
;
1729 stats
->tx_dropped
= vstats
->tx_queue_dropped
;
1730 stats
->rx_crc_errors
= 0;
1731 stats
->rx_length_errors
= 0;
1736 static void mlx5e_set_rx_mode(struct net_device
*dev
)
1738 struct mlx5e_priv
*priv
= netdev_priv(dev
);
1740 schedule_work(&priv
->set_rx_mode_work
);
1743 static int mlx5e_set_mac(struct net_device
*netdev
, void *addr
)
1745 struct mlx5e_priv
*priv
= netdev_priv(netdev
);
1746 struct sockaddr
*saddr
= addr
;
1748 if (!is_valid_ether_addr(saddr
->sa_data
))
1749 return -EADDRNOTAVAIL
;
1751 netif_addr_lock_bh(netdev
);
1752 ether_addr_copy(netdev
->dev_addr
, saddr
->sa_data
);
1753 netif_addr_unlock_bh(netdev
);
1755 schedule_work(&priv
->set_rx_mode_work
);
1760 static int mlx5e_set_features(struct net_device
*netdev
,
1761 netdev_features_t features
)
1763 struct mlx5e_priv
*priv
= netdev_priv(netdev
);
1765 netdev_features_t changes
= features
^ netdev
->features
;
1767 mutex_lock(&priv
->state_lock
);
1769 if (changes
& NETIF_F_LRO
) {
1770 bool was_opened
= test_bit(MLX5E_STATE_OPENED
, &priv
->state
);
1773 mlx5e_close_locked(priv
->netdev
);
1775 priv
->params
.lro_en
= !!(features
& NETIF_F_LRO
);
1776 mlx5e_modify_tir_lro(priv
, MLX5E_TT_IPV4_TCP
);
1777 mlx5e_modify_tir_lro(priv
, MLX5E_TT_IPV6_TCP
);
1780 err
= mlx5e_open_locked(priv
->netdev
);
1783 mutex_unlock(&priv
->state_lock
);
1785 if (changes
& NETIF_F_HW_VLAN_CTAG_FILTER
) {
1786 if (features
& NETIF_F_HW_VLAN_CTAG_FILTER
)
1787 mlx5e_enable_vlan_filter(priv
);
1789 mlx5e_disable_vlan_filter(priv
);
1795 static int mlx5e_change_mtu(struct net_device
*netdev
, int new_mtu
)
1797 struct mlx5e_priv
*priv
= netdev_priv(netdev
);
1798 struct mlx5_core_dev
*mdev
= priv
->mdev
;
1803 mlx5_query_port_max_mtu(mdev
, &max_mtu
, 1);
1805 if (new_mtu
> max_mtu
) {
1807 "%s: Bad MTU (%d) > (%d) Max\n",
1808 __func__
, new_mtu
, max_mtu
);
1812 mutex_lock(&priv
->state_lock
);
1814 was_opened
= test_bit(MLX5E_STATE_OPENED
, &priv
->state
);
1816 mlx5e_close_locked(netdev
);
1818 netdev
->mtu
= new_mtu
;
1821 err
= mlx5e_open_locked(netdev
);
1823 mutex_unlock(&priv
->state_lock
);
1828 static struct net_device_ops mlx5e_netdev_ops
= {
1829 .ndo_open
= mlx5e_open
,
1830 .ndo_stop
= mlx5e_close
,
1831 .ndo_start_xmit
= mlx5e_xmit
,
1832 .ndo_get_stats64
= mlx5e_get_stats
,
1833 .ndo_set_rx_mode
= mlx5e_set_rx_mode
,
1834 .ndo_set_mac_address
= mlx5e_set_mac
,
1835 .ndo_vlan_rx_add_vid
= mlx5e_vlan_rx_add_vid
,
1836 .ndo_vlan_rx_kill_vid
= mlx5e_vlan_rx_kill_vid
,
1837 .ndo_set_features
= mlx5e_set_features
,
1838 .ndo_change_mtu
= mlx5e_change_mtu
,
1841 static int mlx5e_check_required_hca_cap(struct mlx5_core_dev
*mdev
)
1843 if (MLX5_CAP_GEN(mdev
, port_type
) != MLX5_CAP_PORT_TYPE_ETH
)
1845 if (!MLX5_CAP_GEN(mdev
, eth_net_offloads
) ||
1846 !MLX5_CAP_GEN(mdev
, nic_flow_table
) ||
1847 !MLX5_CAP_ETH(mdev
, csum_cap
) ||
1848 !MLX5_CAP_ETH(mdev
, max_lso_cap
) ||
1849 !MLX5_CAP_ETH(mdev
, vlan_cap
) ||
1850 !MLX5_CAP_ETH(mdev
, rss_ind_tbl_cap
) ||
1851 MLX5_CAP_FLOWTABLE(mdev
,
1852 flow_table_properties_nic_receive
.max_ft_level
)
1854 mlx5_core_warn(mdev
,
1855 "Not creating net device, some required device capabilities are missing\n");
1861 u16
mlx5e_get_max_inline_cap(struct mlx5_core_dev
*mdev
)
1863 int bf_buf_size
= (1 << MLX5_CAP_GEN(mdev
, log_bf_reg_size
)) / 2;
1865 return bf_buf_size
-
1866 sizeof(struct mlx5e_tx_wqe
) +
1867 2 /*sizeof(mlx5e_tx_wqe.inline_hdr_start)*/;
1870 static void mlx5e_build_netdev_priv(struct mlx5_core_dev
*mdev
,
1871 struct net_device
*netdev
,
1872 int num_comp_vectors
)
1874 struct mlx5e_priv
*priv
= netdev_priv(netdev
);
1876 priv
->params
.log_sq_size
=
1877 MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE
;
1878 priv
->params
.log_rq_size
=
1879 MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE
;
1880 priv
->params
.rx_cq_moderation_usec
=
1881 MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC
;
1882 priv
->params
.rx_cq_moderation_pkts
=
1883 MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_PKTS
;
1884 priv
->params
.tx_cq_moderation_usec
=
1885 MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC
;
1886 priv
->params
.tx_cq_moderation_pkts
=
1887 MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_PKTS
;
1888 priv
->params
.tx_max_inline
= mlx5e_get_max_inline_cap(mdev
);
1889 priv
->params
.min_rx_wqes
=
1890 MLX5E_PARAMS_DEFAULT_MIN_RX_WQES
;
1891 priv
->params
.rx_hash_log_tbl_sz
=
1892 (order_base_2(num_comp_vectors
) >
1893 MLX5E_PARAMS_DEFAULT_RX_HASH_LOG_TBL_SZ
) ?
1894 order_base_2(num_comp_vectors
) :
1895 MLX5E_PARAMS_DEFAULT_RX_HASH_LOG_TBL_SZ
;
1896 priv
->params
.num_tc
= 1;
1897 priv
->params
.default_vlan_prio
= 0;
1898 priv
->params
.rss_hfunc
= ETH_RSS_HASH_XOR
;
1900 priv
->params
.lro_en
= false && !!MLX5_CAP_ETH(priv
->mdev
, lro_cap
);
1901 priv
->params
.lro_wqe_sz
=
1902 MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ
;
1905 priv
->netdev
= netdev
;
1906 priv
->params
.num_channels
= num_comp_vectors
;
1907 priv
->default_vlan_prio
= priv
->params
.default_vlan_prio
;
1909 spin_lock_init(&priv
->async_events_spinlock
);
1910 mutex_init(&priv
->state_lock
);
1912 INIT_WORK(&priv
->update_carrier_work
, mlx5e_update_carrier_work
);
1913 INIT_WORK(&priv
->set_rx_mode_work
, mlx5e_set_rx_mode_work
);
1914 INIT_DELAYED_WORK(&priv
->update_stats_work
, mlx5e_update_stats_work
);
1917 static void mlx5e_set_netdev_dev_addr(struct net_device
*netdev
)
1919 struct mlx5e_priv
*priv
= netdev_priv(netdev
);
1921 mlx5_query_nic_vport_mac_address(priv
->mdev
, netdev
->dev_addr
);
1924 static void mlx5e_build_netdev(struct net_device
*netdev
)
1926 struct mlx5e_priv
*priv
= netdev_priv(netdev
);
1927 struct mlx5_core_dev
*mdev
= priv
->mdev
;
1929 SET_NETDEV_DEV(netdev
, &mdev
->pdev
->dev
);
1931 if (priv
->params
.num_tc
> 1)
1932 mlx5e_netdev_ops
.ndo_select_queue
= mlx5e_select_queue
;
1934 netdev
->netdev_ops
= &mlx5e_netdev_ops
;
1935 netdev
->watchdog_timeo
= 15 * HZ
;
1937 netdev
->ethtool_ops
= &mlx5e_ethtool_ops
;
1939 netdev
->vlan_features
|= NETIF_F_SG
;
1940 netdev
->vlan_features
|= NETIF_F_IP_CSUM
;
1941 netdev
->vlan_features
|= NETIF_F_IPV6_CSUM
;
1942 netdev
->vlan_features
|= NETIF_F_GRO
;
1943 netdev
->vlan_features
|= NETIF_F_TSO
;
1944 netdev
->vlan_features
|= NETIF_F_TSO6
;
1945 netdev
->vlan_features
|= NETIF_F_RXCSUM
;
1946 netdev
->vlan_features
|= NETIF_F_RXHASH
;
1948 if (!!MLX5_CAP_ETH(mdev
, lro_cap
))
1949 netdev
->vlan_features
|= NETIF_F_LRO
;
1951 netdev
->hw_features
= netdev
->vlan_features
;
1952 netdev
->hw_features
|= NETIF_F_HW_VLAN_CTAG_RX
;
1953 netdev
->hw_features
|= NETIF_F_HW_VLAN_CTAG_FILTER
;
1955 netdev
->features
= netdev
->hw_features
;
1956 if (!priv
->params
.lro_en
)
1957 netdev
->features
&= ~NETIF_F_LRO
;
1959 netdev
->features
|= NETIF_F_HIGHDMA
;
1961 netdev
->priv_flags
|= IFF_UNICAST_FLT
;
1963 mlx5e_set_netdev_dev_addr(netdev
);
1966 static int mlx5e_create_mkey(struct mlx5e_priv
*priv
, u32 pdn
,
1967 struct mlx5_core_mr
*mr
)
1969 struct mlx5_core_dev
*mdev
= priv
->mdev
;
1970 struct mlx5_create_mkey_mbox_in
*in
;
1973 in
= mlx5_vzalloc(sizeof(*in
));
1977 in
->seg
.flags
= MLX5_PERM_LOCAL_WRITE
|
1978 MLX5_PERM_LOCAL_READ
|
1979 MLX5_ACCESS_MODE_PA
;
1980 in
->seg
.flags_pd
= cpu_to_be32(pdn
| MLX5_MKEY_LEN64
);
1981 in
->seg
.qpn_mkey7_0
= cpu_to_be32(0xffffff << 8);
1983 err
= mlx5_core_create_mkey(mdev
, mr
, in
, sizeof(*in
), NULL
, NULL
,
1991 static void *mlx5e_create_netdev(struct mlx5_core_dev
*mdev
)
1993 struct net_device
*netdev
;
1994 struct mlx5e_priv
*priv
;
1995 int ncv
= mdev
->priv
.eq_table
.num_comp_vectors
;
1998 if (mlx5e_check_required_hca_cap(mdev
))
2001 netdev
= alloc_etherdev_mqs(sizeof(struct mlx5e_priv
), ncv
, ncv
);
2003 mlx5_core_err(mdev
, "alloc_etherdev_mqs() failed\n");
2007 mlx5e_build_netdev_priv(mdev
, netdev
, ncv
);
2008 mlx5e_build_netdev(netdev
);
2010 netif_carrier_off(netdev
);
2012 priv
= netdev_priv(netdev
);
2014 err
= mlx5_alloc_map_uar(mdev
, &priv
->cq_uar
);
2016 mlx5_core_err(mdev
, "alloc_map uar failed, %d\n", err
);
2017 goto err_free_netdev
;
2020 err
= mlx5_core_alloc_pd(mdev
, &priv
->pdn
);
2022 mlx5_core_err(mdev
, "alloc pd failed, %d\n", err
);
2023 goto err_unmap_free_uar
;
2026 err
= mlx5_alloc_transport_domain(mdev
, &priv
->tdn
);
2028 mlx5_core_err(mdev
, "alloc td failed, %d\n", err
);
2029 goto err_dealloc_pd
;
2032 err
= mlx5e_create_mkey(priv
, priv
->pdn
, &priv
->mr
);
2034 mlx5_core_err(mdev
, "create mkey failed, %d\n", err
);
2035 goto err_dealloc_transport_domain
;
2038 err
= mlx5e_create_tises(priv
);
2040 mlx5_core_warn(mdev
, "create tises failed, %d\n", err
);
2041 goto err_destroy_mkey
;
2044 err
= mlx5e_open_drop_rq(priv
);
2046 mlx5_core_err(mdev
, "open drop rq failed, %d\n", err
);
2047 goto err_destroy_tises
;
2050 err
= mlx5e_create_rqt(priv
, MLX5E_INDIRECTION_RQT
);
2052 mlx5_core_warn(mdev
, "create rqt(INDIR) failed, %d\n", err
);
2053 goto err_close_drop_rq
;
2056 err
= mlx5e_create_rqt(priv
, MLX5E_SINGLE_RQ_RQT
);
2058 mlx5_core_warn(mdev
, "create rqt(SINGLE) failed, %d\n", err
);
2059 goto err_destroy_rqt_indir
;
2062 err
= mlx5e_create_tirs(priv
);
2064 mlx5_core_warn(mdev
, "create tirs failed, %d\n", err
);
2065 goto err_destroy_rqt_single
;
2068 err
= mlx5e_create_flow_tables(priv
);
2070 mlx5_core_warn(mdev
, "create flow tables failed, %d\n", err
);
2071 goto err_destroy_tirs
;
2074 mlx5e_init_eth_addr(priv
);
2076 err
= register_netdev(netdev
);
2078 mlx5_core_err(mdev
, "register_netdev failed, %d\n", err
);
2079 goto err_destroy_flow_tables
;
2082 mlx5e_enable_async_events(priv
);
2083 schedule_work(&priv
->set_rx_mode_work
);
2087 err_destroy_flow_tables
:
2088 mlx5e_destroy_flow_tables(priv
);
2091 mlx5e_destroy_tirs(priv
);
2093 err_destroy_rqt_single
:
2094 mlx5e_destroy_rqt(priv
, MLX5E_SINGLE_RQ_RQT
);
2096 err_destroy_rqt_indir
:
2097 mlx5e_destroy_rqt(priv
, MLX5E_INDIRECTION_RQT
);
2100 mlx5e_close_drop_rq(priv
);
2103 mlx5e_destroy_tises(priv
);
2106 mlx5_core_destroy_mkey(mdev
, &priv
->mr
);
2108 err_dealloc_transport_domain
:
2109 mlx5_dealloc_transport_domain(mdev
, priv
->tdn
);
2112 mlx5_core_dealloc_pd(mdev
, priv
->pdn
);
2115 mlx5_unmap_free_uar(mdev
, &priv
->cq_uar
);
2118 free_netdev(netdev
);
2123 static void mlx5e_destroy_netdev(struct mlx5_core_dev
*mdev
, void *vpriv
)
2125 struct mlx5e_priv
*priv
= vpriv
;
2126 struct net_device
*netdev
= priv
->netdev
;
2128 set_bit(MLX5E_STATE_DESTROYING
, &priv
->state
);
2130 schedule_work(&priv
->set_rx_mode_work
);
2131 mlx5e_disable_async_events(priv
);
2132 flush_scheduled_work();
2133 unregister_netdev(netdev
);
2134 mlx5e_destroy_flow_tables(priv
);
2135 mlx5e_destroy_tirs(priv
);
2136 mlx5e_destroy_rqt(priv
, MLX5E_SINGLE_RQ_RQT
);
2137 mlx5e_destroy_rqt(priv
, MLX5E_INDIRECTION_RQT
);
2138 mlx5e_close_drop_rq(priv
);
2139 mlx5e_destroy_tises(priv
);
2140 mlx5_core_destroy_mkey(priv
->mdev
, &priv
->mr
);
2141 mlx5_dealloc_transport_domain(priv
->mdev
, priv
->tdn
);
2142 mlx5_core_dealloc_pd(priv
->mdev
, priv
->pdn
);
2143 mlx5_unmap_free_uar(priv
->mdev
, &priv
->cq_uar
);
2144 free_netdev(netdev
);
2147 static void *mlx5e_get_netdev(void *vpriv
)
2149 struct mlx5e_priv
*priv
= vpriv
;
2151 return priv
->netdev
;
2154 static struct mlx5_interface mlx5e_interface
= {
2155 .add
= mlx5e_create_netdev
,
2156 .remove
= mlx5e_destroy_netdev
,
2157 .event
= mlx5e_async_event
,
2158 .protocol
= MLX5_INTERFACE_PROTOCOL_ETH
,
2159 .get_dev
= mlx5e_get_netdev
,
2162 void mlx5e_init(void)
2164 mlx5_register_interface(&mlx5e_interface
);
2167 void mlx5e_cleanup(void)
2169 mlx5_unregister_interface(&mlx5e_interface
);