2 * Copyright (c) 2015-2016, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <net/tc_act/tc_gact.h>
34 #include <net/pkt_cls.h>
35 #include <linux/mlx5/fs.h>
36 #include <net/vxlan.h>
42 struct mlx5e_rq_param
{
43 u32 rqc
[MLX5_ST_SZ_DW(rqc
)];
44 struct mlx5_wq_param wq
;
48 struct mlx5e_sq_param
{
49 u32 sqc
[MLX5_ST_SZ_DW(sqc
)];
50 struct mlx5_wq_param wq
;
55 struct mlx5e_cq_param
{
56 u32 cqc
[MLX5_ST_SZ_DW(cqc
)];
57 struct mlx5_wq_param wq
;
62 struct mlx5e_channel_param
{
63 struct mlx5e_rq_param rq
;
64 struct mlx5e_sq_param sq
;
65 struct mlx5e_sq_param icosq
;
66 struct mlx5e_cq_param rx_cq
;
67 struct mlx5e_cq_param tx_cq
;
68 struct mlx5e_cq_param icosq_cq
;
71 static void mlx5e_update_carrier(struct mlx5e_priv
*priv
)
73 struct mlx5_core_dev
*mdev
= priv
->mdev
;
76 port_state
= mlx5_query_vport_state(mdev
,
77 MLX5_QUERY_VPORT_STATE_IN_OP_MOD_VNIC_VPORT
, 0);
79 if (port_state
== VPORT_STATE_UP
)
80 netif_carrier_on(priv
->netdev
);
82 netif_carrier_off(priv
->netdev
);
85 static void mlx5e_update_carrier_work(struct work_struct
*work
)
87 struct mlx5e_priv
*priv
= container_of(work
, struct mlx5e_priv
,
90 mutex_lock(&priv
->state_lock
);
91 if (test_bit(MLX5E_STATE_OPENED
, &priv
->state
))
92 mlx5e_update_carrier(priv
);
93 mutex_unlock(&priv
->state_lock
);
96 static void mlx5e_update_sw_counters(struct mlx5e_priv
*priv
)
98 struct mlx5e_sw_stats
*s
= &priv
->stats
.sw
;
99 struct mlx5e_rq_stats
*rq_stats
;
100 struct mlx5e_sq_stats
*sq_stats
;
101 u64 tx_offload_none
= 0;
104 memset(s
, 0, sizeof(*s
));
105 for (i
= 0; i
< priv
->params
.num_channels
; i
++) {
106 rq_stats
= &priv
->channel
[i
]->rq
.stats
;
108 s
->rx_packets
+= rq_stats
->packets
;
109 s
->rx_bytes
+= rq_stats
->bytes
;
110 s
->rx_lro_packets
+= rq_stats
->lro_packets
;
111 s
->rx_lro_bytes
+= rq_stats
->lro_bytes
;
112 s
->rx_csum_none
+= rq_stats
->csum_none
;
113 s
->rx_csum_complete
+= rq_stats
->csum_complete
;
114 s
->rx_csum_unnecessary_inner
+= rq_stats
->csum_unnecessary_inner
;
115 s
->rx_wqe_err
+= rq_stats
->wqe_err
;
116 s
->rx_mpwqe_filler
+= rq_stats
->mpwqe_filler
;
117 s
->rx_mpwqe_frag
+= rq_stats
->mpwqe_frag
;
118 s
->rx_buff_alloc_err
+= rq_stats
->buff_alloc_err
;
119 s
->rx_cqe_compress_blks
+= rq_stats
->cqe_compress_blks
;
120 s
->rx_cqe_compress_pkts
+= rq_stats
->cqe_compress_pkts
;
122 for (j
= 0; j
< priv
->params
.num_tc
; j
++) {
123 sq_stats
= &priv
->channel
[i
]->sq
[j
].stats
;
125 s
->tx_packets
+= sq_stats
->packets
;
126 s
->tx_bytes
+= sq_stats
->bytes
;
127 s
->tx_tso_packets
+= sq_stats
->tso_packets
;
128 s
->tx_tso_bytes
+= sq_stats
->tso_bytes
;
129 s
->tx_tso_inner_packets
+= sq_stats
->tso_inner_packets
;
130 s
->tx_tso_inner_bytes
+= sq_stats
->tso_inner_bytes
;
131 s
->tx_queue_stopped
+= sq_stats
->stopped
;
132 s
->tx_queue_wake
+= sq_stats
->wake
;
133 s
->tx_queue_dropped
+= sq_stats
->dropped
;
134 s
->tx_csum_partial_inner
+= sq_stats
->csum_partial_inner
;
135 tx_offload_none
+= sq_stats
->csum_none
;
139 /* Update calculated offload counters */
140 s
->tx_csum_partial
= s
->tx_packets
- tx_offload_none
- s
->tx_csum_partial_inner
;
141 s
->rx_csum_unnecessary
= s
->rx_packets
- s
->rx_csum_none
- s
->rx_csum_complete
;
143 s
->link_down_events_phy
= MLX5_GET(ppcnt_reg
,
144 priv
->stats
.pport
.phy_counters
,
145 counter_set
.phys_layer_cntrs
.link_down_events
);
148 static void mlx5e_update_vport_counters(struct mlx5e_priv
*priv
)
150 int outlen
= MLX5_ST_SZ_BYTES(query_vport_counter_out
);
151 u32
*out
= (u32
*)priv
->stats
.vport
.query_vport_out
;
152 u32 in
[MLX5_ST_SZ_DW(query_vport_counter_in
)];
153 struct mlx5_core_dev
*mdev
= priv
->mdev
;
155 memset(in
, 0, sizeof(in
));
157 MLX5_SET(query_vport_counter_in
, in
, opcode
,
158 MLX5_CMD_OP_QUERY_VPORT_COUNTER
);
159 MLX5_SET(query_vport_counter_in
, in
, op_mod
, 0);
160 MLX5_SET(query_vport_counter_in
, in
, other_vport
, 0);
162 memset(out
, 0, outlen
);
164 mlx5_cmd_exec(mdev
, in
, sizeof(in
), out
, outlen
);
167 static void mlx5e_update_pport_counters(struct mlx5e_priv
*priv
)
169 struct mlx5e_pport_stats
*pstats
= &priv
->stats
.pport
;
170 struct mlx5_core_dev
*mdev
= priv
->mdev
;
171 int sz
= MLX5_ST_SZ_BYTES(ppcnt_reg
);
176 in
= mlx5_vzalloc(sz
);
180 MLX5_SET(ppcnt_reg
, in
, local_port
, 1);
182 out
= pstats
->IEEE_802_3_counters
;
183 MLX5_SET(ppcnt_reg
, in
, grp
, MLX5_IEEE_802_3_COUNTERS_GROUP
);
184 mlx5_core_access_reg(mdev
, in
, sz
, out
, sz
, MLX5_REG_PPCNT
, 0, 0);
186 out
= pstats
->RFC_2863_counters
;
187 MLX5_SET(ppcnt_reg
, in
, grp
, MLX5_RFC_2863_COUNTERS_GROUP
);
188 mlx5_core_access_reg(mdev
, in
, sz
, out
, sz
, MLX5_REG_PPCNT
, 0, 0);
190 out
= pstats
->RFC_2819_counters
;
191 MLX5_SET(ppcnt_reg
, in
, grp
, MLX5_RFC_2819_COUNTERS_GROUP
);
192 mlx5_core_access_reg(mdev
, in
, sz
, out
, sz
, MLX5_REG_PPCNT
, 0, 0);
194 out
= pstats
->phy_counters
;
195 MLX5_SET(ppcnt_reg
, in
, grp
, MLX5_PHYSICAL_LAYER_COUNTERS_GROUP
);
196 mlx5_core_access_reg(mdev
, in
, sz
, out
, sz
, MLX5_REG_PPCNT
, 0, 0);
198 MLX5_SET(ppcnt_reg
, in
, grp
, MLX5_PER_PRIORITY_COUNTERS_GROUP
);
199 for (prio
= 0; prio
< NUM_PPORT_PRIO
; prio
++) {
200 out
= pstats
->per_prio_counters
[prio
];
201 MLX5_SET(ppcnt_reg
, in
, prio_tc
, prio
);
202 mlx5_core_access_reg(mdev
, in
, sz
, out
, sz
,
203 MLX5_REG_PPCNT
, 0, 0);
210 static void mlx5e_update_q_counter(struct mlx5e_priv
*priv
)
212 struct mlx5e_qcounter_stats
*qcnt
= &priv
->stats
.qcnt
;
214 if (!priv
->q_counter
)
217 mlx5_core_query_out_of_buffer(priv
->mdev
, priv
->q_counter
,
218 &qcnt
->rx_out_of_buffer
);
221 void mlx5e_update_stats(struct mlx5e_priv
*priv
)
223 mlx5e_update_q_counter(priv
);
224 mlx5e_update_vport_counters(priv
);
225 mlx5e_update_pport_counters(priv
);
226 mlx5e_update_sw_counters(priv
);
229 static void mlx5e_update_stats_work(struct work_struct
*work
)
231 struct delayed_work
*dwork
= to_delayed_work(work
);
232 struct mlx5e_priv
*priv
= container_of(dwork
, struct mlx5e_priv
,
234 mutex_lock(&priv
->state_lock
);
235 if (test_bit(MLX5E_STATE_OPENED
, &priv
->state
)) {
236 mlx5e_update_stats(priv
);
237 queue_delayed_work(priv
->wq
, dwork
,
238 msecs_to_jiffies(MLX5E_UPDATE_STATS_INTERVAL
));
240 mutex_unlock(&priv
->state_lock
);
243 static void mlx5e_async_event(struct mlx5_core_dev
*mdev
, void *vpriv
,
244 enum mlx5_dev_event event
, unsigned long param
)
246 struct mlx5e_priv
*priv
= vpriv
;
248 if (!test_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLED
, &priv
->state
))
252 case MLX5_DEV_EVENT_PORT_UP
:
253 case MLX5_DEV_EVENT_PORT_DOWN
:
254 queue_work(priv
->wq
, &priv
->update_carrier_work
);
262 static void mlx5e_enable_async_events(struct mlx5e_priv
*priv
)
264 set_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLED
, &priv
->state
);
267 static void mlx5e_disable_async_events(struct mlx5e_priv
*priv
)
269 clear_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLED
, &priv
->state
);
270 synchronize_irq(mlx5_get_msix_vec(priv
->mdev
, MLX5_EQ_VEC_ASYNC
));
273 #define MLX5E_HW2SW_MTU(hwmtu) (hwmtu - (ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN))
274 #define MLX5E_SW2HW_MTU(swmtu) (swmtu + (ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN))
276 static int mlx5e_create_rq(struct mlx5e_channel
*c
,
277 struct mlx5e_rq_param
*param
,
280 struct mlx5e_priv
*priv
= c
->priv
;
281 struct mlx5_core_dev
*mdev
= priv
->mdev
;
282 void *rqc
= param
->rqc
;
283 void *rqc_wq
= MLX5_ADDR_OF(rqc
, rqc
, wq
);
289 param
->wq
.db_numa_node
= cpu_to_node(c
->cpu
);
291 err
= mlx5_wq_ll_create(mdev
, ¶m
->wq
, rqc_wq
, &rq
->wq
,
296 rq
->wq
.db
= &rq
->wq
.db
[MLX5_RCV_DBR
];
298 wq_sz
= mlx5_wq_ll_get_size(&rq
->wq
);
300 switch (priv
->params
.rq_wq_type
) {
301 case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ
:
302 rq
->wqe_info
= kzalloc_node(wq_sz
* sizeof(*rq
->wqe_info
),
303 GFP_KERNEL
, cpu_to_node(c
->cpu
));
306 goto err_rq_wq_destroy
;
308 rq
->handle_rx_cqe
= mlx5e_handle_rx_cqe_mpwrq
;
309 rq
->alloc_wqe
= mlx5e_alloc_rx_mpwqe
;
311 rq
->mpwqe_stride_sz
= BIT(priv
->params
.mpwqe_log_stride_sz
);
312 rq
->mpwqe_num_strides
= BIT(priv
->params
.mpwqe_log_num_strides
);
313 rq
->wqe_sz
= rq
->mpwqe_stride_sz
* rq
->mpwqe_num_strides
;
314 byte_count
= rq
->wqe_sz
;
316 default: /* MLX5_WQ_TYPE_LINKED_LIST */
317 rq
->skb
= kzalloc_node(wq_sz
* sizeof(*rq
->skb
), GFP_KERNEL
,
318 cpu_to_node(c
->cpu
));
321 goto err_rq_wq_destroy
;
323 rq
->handle_rx_cqe
= mlx5e_handle_rx_cqe
;
324 rq
->alloc_wqe
= mlx5e_alloc_rx_wqe
;
326 rq
->wqe_sz
= (priv
->params
.lro_en
) ?
327 priv
->params
.lro_wqe_sz
:
328 MLX5E_SW2HW_MTU(priv
->netdev
->mtu
);
329 rq
->wqe_sz
= SKB_DATA_ALIGN(rq
->wqe_sz
);
330 byte_count
= rq
->wqe_sz
;
331 byte_count
|= MLX5_HW_START_PADDING
;
334 for (i
= 0; i
< wq_sz
; i
++) {
335 struct mlx5e_rx_wqe
*wqe
= mlx5_wq_ll_get_wqe(&rq
->wq
, i
);
337 wqe
->data
.byte_count
= cpu_to_be32(byte_count
);
340 INIT_WORK(&rq
->am
.work
, mlx5e_rx_am_work
);
341 rq
->am
.mode
= priv
->params
.rx_cq_period_mode
;
343 rq
->wq_type
= priv
->params
.rq_wq_type
;
345 rq
->netdev
= c
->netdev
;
346 rq
->tstamp
= &priv
->tstamp
;
350 rq
->mkey_be
= c
->mkey_be
;
351 rq
->umr_mkey_be
= cpu_to_be32(c
->priv
->umr_mkey
.key
);
356 mlx5_wq_destroy(&rq
->wq_ctrl
);
361 static void mlx5e_destroy_rq(struct mlx5e_rq
*rq
)
363 switch (rq
->wq_type
) {
364 case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ
:
367 default: /* MLX5_WQ_TYPE_LINKED_LIST */
371 mlx5_wq_destroy(&rq
->wq_ctrl
);
374 static int mlx5e_enable_rq(struct mlx5e_rq
*rq
, struct mlx5e_rq_param
*param
)
376 struct mlx5e_priv
*priv
= rq
->priv
;
377 struct mlx5_core_dev
*mdev
= priv
->mdev
;
385 inlen
= MLX5_ST_SZ_BYTES(create_rq_in
) +
386 sizeof(u64
) * rq
->wq_ctrl
.buf
.npages
;
387 in
= mlx5_vzalloc(inlen
);
391 rqc
= MLX5_ADDR_OF(create_rq_in
, in
, ctx
);
392 wq
= MLX5_ADDR_OF(rqc
, rqc
, wq
);
394 memcpy(rqc
, param
->rqc
, sizeof(param
->rqc
));
396 MLX5_SET(rqc
, rqc
, cqn
, rq
->cq
.mcq
.cqn
);
397 MLX5_SET(rqc
, rqc
, state
, MLX5_RQC_STATE_RST
);
398 MLX5_SET(rqc
, rqc
, flush_in_error_en
, 1);
399 MLX5_SET(rqc
, rqc
, vsd
, priv
->params
.vlan_strip_disable
);
400 MLX5_SET(wq
, wq
, log_wq_pg_sz
, rq
->wq_ctrl
.buf
.page_shift
-
401 MLX5_ADAPTER_PAGE_SHIFT
);
402 MLX5_SET64(wq
, wq
, dbr_addr
, rq
->wq_ctrl
.db
.dma
);
404 mlx5_fill_page_array(&rq
->wq_ctrl
.buf
,
405 (__be64
*)MLX5_ADDR_OF(wq
, wq
, pas
));
407 err
= mlx5_core_create_rq(mdev
, in
, inlen
, &rq
->rqn
);
414 static int mlx5e_modify_rq_state(struct mlx5e_rq
*rq
, int curr_state
,
417 struct mlx5e_channel
*c
= rq
->channel
;
418 struct mlx5e_priv
*priv
= c
->priv
;
419 struct mlx5_core_dev
*mdev
= priv
->mdev
;
426 inlen
= MLX5_ST_SZ_BYTES(modify_rq_in
);
427 in
= mlx5_vzalloc(inlen
);
431 rqc
= MLX5_ADDR_OF(modify_rq_in
, in
, ctx
);
433 MLX5_SET(modify_rq_in
, in
, rq_state
, curr_state
);
434 MLX5_SET(rqc
, rqc
, state
, next_state
);
436 err
= mlx5_core_modify_rq(mdev
, rq
->rqn
, in
, inlen
);
443 static int mlx5e_modify_rq_vsd(struct mlx5e_rq
*rq
, bool vsd
)
445 struct mlx5e_channel
*c
= rq
->channel
;
446 struct mlx5e_priv
*priv
= c
->priv
;
447 struct mlx5_core_dev
*mdev
= priv
->mdev
;
454 inlen
= MLX5_ST_SZ_BYTES(modify_rq_in
);
455 in
= mlx5_vzalloc(inlen
);
459 rqc
= MLX5_ADDR_OF(modify_rq_in
, in
, ctx
);
461 MLX5_SET(modify_rq_in
, in
, rq_state
, MLX5_RQC_STATE_RDY
);
462 MLX5_SET64(modify_rq_in
, in
, modify_bitmask
, MLX5_RQ_BITMASK_VSD
);
463 MLX5_SET(rqc
, rqc
, vsd
, vsd
);
464 MLX5_SET(rqc
, rqc
, state
, MLX5_RQC_STATE_RDY
);
466 err
= mlx5_core_modify_rq(mdev
, rq
->rqn
, in
, inlen
);
473 static void mlx5e_disable_rq(struct mlx5e_rq
*rq
)
475 mlx5_core_destroy_rq(rq
->priv
->mdev
, rq
->rqn
);
478 static int mlx5e_wait_for_min_rx_wqes(struct mlx5e_rq
*rq
)
480 unsigned long exp_time
= jiffies
+ msecs_to_jiffies(20000);
481 struct mlx5e_channel
*c
= rq
->channel
;
482 struct mlx5e_priv
*priv
= c
->priv
;
483 struct mlx5_wq_ll
*wq
= &rq
->wq
;
485 while (time_before(jiffies
, exp_time
)) {
486 if (wq
->cur_sz
>= priv
->params
.min_rx_wqes
)
495 static int mlx5e_open_rq(struct mlx5e_channel
*c
,
496 struct mlx5e_rq_param
*param
,
499 struct mlx5e_sq
*sq
= &c
->icosq
;
500 u16 pi
= sq
->pc
& sq
->wq
.sz_m1
;
503 err
= mlx5e_create_rq(c
, param
, rq
);
507 err
= mlx5e_enable_rq(rq
, param
);
511 err
= mlx5e_modify_rq_state(rq
, MLX5_RQC_STATE_RST
, MLX5_RQC_STATE_RDY
);
515 if (param
->am_enabled
)
516 set_bit(MLX5E_RQ_STATE_AM
, &c
->rq
.state
);
518 set_bit(MLX5E_RQ_STATE_POST_WQES_ENABLE
, &rq
->state
);
520 sq
->ico_wqe_info
[pi
].opcode
= MLX5_OPCODE_NOP
;
521 sq
->ico_wqe_info
[pi
].num_wqebbs
= 1;
522 mlx5e_send_nop(sq
, true); /* trigger mlx5e_post_rx_wqes() */
527 mlx5e_disable_rq(rq
);
529 mlx5e_destroy_rq(rq
);
534 static void mlx5e_close_rq(struct mlx5e_rq
*rq
)
536 clear_bit(MLX5E_RQ_STATE_POST_WQES_ENABLE
, &rq
->state
);
537 napi_synchronize(&rq
->channel
->napi
); /* prevent mlx5e_post_rx_wqes */
539 mlx5e_modify_rq_state(rq
, MLX5_RQC_STATE_RDY
, MLX5_RQC_STATE_ERR
);
540 while (!mlx5_wq_ll_is_empty(&rq
->wq
))
543 /* avoid destroying rq before mlx5e_poll_rx_cq() is done with it */
544 napi_synchronize(&rq
->channel
->napi
);
546 cancel_work_sync(&rq
->am
.work
);
548 mlx5e_disable_rq(rq
);
549 mlx5e_destroy_rq(rq
);
552 static void mlx5e_free_sq_db(struct mlx5e_sq
*sq
)
559 static int mlx5e_alloc_sq_db(struct mlx5e_sq
*sq
, int numa
)
561 int wq_sz
= mlx5_wq_cyc_get_size(&sq
->wq
);
562 int df_sz
= wq_sz
* MLX5_SEND_WQEBB_NUM_DS
;
564 sq
->skb
= kzalloc_node(wq_sz
* sizeof(*sq
->skb
), GFP_KERNEL
, numa
);
565 sq
->dma_fifo
= kzalloc_node(df_sz
* sizeof(*sq
->dma_fifo
), GFP_KERNEL
,
567 sq
->wqe_info
= kzalloc_node(wq_sz
* sizeof(*sq
->wqe_info
), GFP_KERNEL
,
570 if (!sq
->skb
|| !sq
->dma_fifo
|| !sq
->wqe_info
) {
571 mlx5e_free_sq_db(sq
);
575 sq
->dma_fifo_mask
= df_sz
- 1;
580 static int mlx5e_create_sq(struct mlx5e_channel
*c
,
582 struct mlx5e_sq_param
*param
,
585 struct mlx5e_priv
*priv
= c
->priv
;
586 struct mlx5_core_dev
*mdev
= priv
->mdev
;
588 void *sqc
= param
->sqc
;
589 void *sqc_wq
= MLX5_ADDR_OF(sqc
, sqc
, wq
);
592 err
= mlx5_alloc_map_uar(mdev
, &sq
->uar
, !!MLX5_CAP_GEN(mdev
, bf
));
596 param
->wq
.db_numa_node
= cpu_to_node(c
->cpu
);
598 err
= mlx5_wq_cyc_create(mdev
, ¶m
->wq
, sqc_wq
, &sq
->wq
,
601 goto err_unmap_free_uar
;
603 sq
->wq
.db
= &sq
->wq
.db
[MLX5_SND_DBR
];
604 if (sq
->uar
.bf_map
) {
605 set_bit(MLX5E_SQ_STATE_BF_ENABLE
, &sq
->state
);
606 sq
->uar_map
= sq
->uar
.bf_map
;
608 sq
->uar_map
= sq
->uar
.map
;
610 sq
->bf_buf_size
= (1 << MLX5_CAP_GEN(mdev
, log_bf_reg_size
)) / 2;
611 sq
->max_inline
= param
->max_inline
;
613 err
= mlx5e_alloc_sq_db(sq
, cpu_to_node(c
->cpu
));
615 goto err_sq_wq_destroy
;
618 u8 wq_sz
= mlx5_wq_cyc_get_size(&sq
->wq
);
620 sq
->ico_wqe_info
= kzalloc_node(sizeof(*sq
->ico_wqe_info
) *
623 cpu_to_node(c
->cpu
));
624 if (!sq
->ico_wqe_info
) {
631 txq_ix
= c
->ix
+ tc
* priv
->params
.num_channels
;
632 sq
->txq
= netdev_get_tx_queue(priv
->netdev
, txq_ix
);
633 priv
->txq_to_sq_map
[txq_ix
] = sq
;
637 sq
->tstamp
= &priv
->tstamp
;
638 sq
->mkey_be
= c
->mkey_be
;
641 sq
->edge
= (sq
->wq
.sz_m1
+ 1) - MLX5_SEND_WQE_MAX_WQEBBS
;
642 sq
->bf_budget
= MLX5E_SQ_BF_BUDGET
;
647 mlx5e_free_sq_db(sq
);
650 mlx5_wq_destroy(&sq
->wq_ctrl
);
653 mlx5_unmap_free_uar(mdev
, &sq
->uar
);
658 static void mlx5e_destroy_sq(struct mlx5e_sq
*sq
)
660 struct mlx5e_channel
*c
= sq
->channel
;
661 struct mlx5e_priv
*priv
= c
->priv
;
663 kfree(sq
->ico_wqe_info
);
664 mlx5e_free_sq_db(sq
);
665 mlx5_wq_destroy(&sq
->wq_ctrl
);
666 mlx5_unmap_free_uar(priv
->mdev
, &sq
->uar
);
669 static int mlx5e_enable_sq(struct mlx5e_sq
*sq
, struct mlx5e_sq_param
*param
)
671 struct mlx5e_channel
*c
= sq
->channel
;
672 struct mlx5e_priv
*priv
= c
->priv
;
673 struct mlx5_core_dev
*mdev
= priv
->mdev
;
681 inlen
= MLX5_ST_SZ_BYTES(create_sq_in
) +
682 sizeof(u64
) * sq
->wq_ctrl
.buf
.npages
;
683 in
= mlx5_vzalloc(inlen
);
687 sqc
= MLX5_ADDR_OF(create_sq_in
, in
, ctx
);
688 wq
= MLX5_ADDR_OF(sqc
, sqc
, wq
);
690 memcpy(sqc
, param
->sqc
, sizeof(param
->sqc
));
692 MLX5_SET(sqc
, sqc
, tis_num_0
, param
->icosq
? 0 : priv
->tisn
[sq
->tc
]);
693 MLX5_SET(sqc
, sqc
, cqn
, sq
->cq
.mcq
.cqn
);
694 MLX5_SET(sqc
, sqc
, state
, MLX5_SQC_STATE_RST
);
695 MLX5_SET(sqc
, sqc
, tis_lst_sz
, param
->icosq
? 0 : 1);
696 MLX5_SET(sqc
, sqc
, flush_in_error_en
, 1);
698 MLX5_SET(wq
, wq
, wq_type
, MLX5_WQ_TYPE_CYCLIC
);
699 MLX5_SET(wq
, wq
, uar_page
, sq
->uar
.index
);
700 MLX5_SET(wq
, wq
, log_wq_pg_sz
, sq
->wq_ctrl
.buf
.page_shift
-
701 MLX5_ADAPTER_PAGE_SHIFT
);
702 MLX5_SET64(wq
, wq
, dbr_addr
, sq
->wq_ctrl
.db
.dma
);
704 mlx5_fill_page_array(&sq
->wq_ctrl
.buf
,
705 (__be64
*)MLX5_ADDR_OF(wq
, wq
, pas
));
707 err
= mlx5_core_create_sq(mdev
, in
, inlen
, &sq
->sqn
);
714 static int mlx5e_modify_sq(struct mlx5e_sq
*sq
, int curr_state
,
715 int next_state
, bool update_rl
, int rl_index
)
717 struct mlx5e_channel
*c
= sq
->channel
;
718 struct mlx5e_priv
*priv
= c
->priv
;
719 struct mlx5_core_dev
*mdev
= priv
->mdev
;
726 inlen
= MLX5_ST_SZ_BYTES(modify_sq_in
);
727 in
= mlx5_vzalloc(inlen
);
731 sqc
= MLX5_ADDR_OF(modify_sq_in
, in
, ctx
);
733 MLX5_SET(modify_sq_in
, in
, sq_state
, curr_state
);
734 MLX5_SET(sqc
, sqc
, state
, next_state
);
735 if (update_rl
&& next_state
== MLX5_SQC_STATE_RDY
) {
736 MLX5_SET64(modify_sq_in
, in
, modify_bitmask
, 1);
737 MLX5_SET(sqc
, sqc
, packet_pacing_rate_limit_index
, rl_index
);
740 err
= mlx5_core_modify_sq(mdev
, sq
->sqn
, in
, inlen
);
747 static void mlx5e_disable_sq(struct mlx5e_sq
*sq
)
749 struct mlx5e_channel
*c
= sq
->channel
;
750 struct mlx5e_priv
*priv
= c
->priv
;
751 struct mlx5_core_dev
*mdev
= priv
->mdev
;
753 mlx5_core_destroy_sq(mdev
, sq
->sqn
);
755 mlx5_rl_remove_rate(mdev
, sq
->rate_limit
);
758 static int mlx5e_open_sq(struct mlx5e_channel
*c
,
760 struct mlx5e_sq_param
*param
,
765 err
= mlx5e_create_sq(c
, tc
, param
, sq
);
769 err
= mlx5e_enable_sq(sq
, param
);
773 err
= mlx5e_modify_sq(sq
, MLX5_SQC_STATE_RST
, MLX5_SQC_STATE_RDY
,
779 set_bit(MLX5E_SQ_STATE_WAKE_TXQ_ENABLE
, &sq
->state
);
780 netdev_tx_reset_queue(sq
->txq
);
781 netif_tx_start_queue(sq
->txq
);
787 mlx5e_disable_sq(sq
);
789 mlx5e_destroy_sq(sq
);
794 static inline void netif_tx_disable_queue(struct netdev_queue
*txq
)
796 __netif_tx_lock_bh(txq
);
797 netif_tx_stop_queue(txq
);
798 __netif_tx_unlock_bh(txq
);
801 static void mlx5e_close_sq(struct mlx5e_sq
*sq
)
804 clear_bit(MLX5E_SQ_STATE_WAKE_TXQ_ENABLE
, &sq
->state
);
805 /* prevent netif_tx_wake_queue */
806 napi_synchronize(&sq
->channel
->napi
);
807 netif_tx_disable_queue(sq
->txq
);
809 /* ensure hw is notified of all pending wqes */
810 if (mlx5e_sq_has_room_for(sq
, 1))
811 mlx5e_send_nop(sq
, true);
813 mlx5e_modify_sq(sq
, MLX5_SQC_STATE_RDY
, MLX5_SQC_STATE_ERR
,
817 while (sq
->cc
!= sq
->pc
) /* wait till sq is empty */
820 /* avoid destroying sq before mlx5e_poll_tx_cq() is done with it */
821 napi_synchronize(&sq
->channel
->napi
);
823 mlx5e_disable_sq(sq
);
824 mlx5e_destroy_sq(sq
);
827 static int mlx5e_create_cq(struct mlx5e_channel
*c
,
828 struct mlx5e_cq_param
*param
,
831 struct mlx5e_priv
*priv
= c
->priv
;
832 struct mlx5_core_dev
*mdev
= priv
->mdev
;
833 struct mlx5_core_cq
*mcq
= &cq
->mcq
;
839 param
->wq
.buf_numa_node
= cpu_to_node(c
->cpu
);
840 param
->wq
.db_numa_node
= cpu_to_node(c
->cpu
);
841 param
->eq_ix
= c
->ix
;
843 err
= mlx5_cqwq_create(mdev
, ¶m
->wq
, param
->cqc
, &cq
->wq
,
848 mlx5_vector2eqn(mdev
, param
->eq_ix
, &eqn_not_used
, &irqn
);
853 mcq
->set_ci_db
= cq
->wq_ctrl
.db
.db
;
854 mcq
->arm_db
= cq
->wq_ctrl
.db
.db
+ 1;
857 mcq
->vector
= param
->eq_ix
;
858 mcq
->comp
= mlx5e_completion_event
;
859 mcq
->event
= mlx5e_cq_error_event
;
861 mcq
->uar
= &mdev
->mlx5e_res
.cq_uar
;
863 for (i
= 0; i
< mlx5_cqwq_get_size(&cq
->wq
); i
++) {
864 struct mlx5_cqe64
*cqe
= mlx5_cqwq_get_wqe(&cq
->wq
, i
);
875 static void mlx5e_destroy_cq(struct mlx5e_cq
*cq
)
877 mlx5_wq_destroy(&cq
->wq_ctrl
);
880 static int mlx5e_enable_cq(struct mlx5e_cq
*cq
, struct mlx5e_cq_param
*param
)
882 struct mlx5e_priv
*priv
= cq
->priv
;
883 struct mlx5_core_dev
*mdev
= priv
->mdev
;
884 struct mlx5_core_cq
*mcq
= &cq
->mcq
;
889 unsigned int irqn_not_used
;
893 inlen
= MLX5_ST_SZ_BYTES(create_cq_in
) +
894 sizeof(u64
) * cq
->wq_ctrl
.buf
.npages
;
895 in
= mlx5_vzalloc(inlen
);
899 cqc
= MLX5_ADDR_OF(create_cq_in
, in
, cq_context
);
901 memcpy(cqc
, param
->cqc
, sizeof(param
->cqc
));
903 mlx5_fill_page_array(&cq
->wq_ctrl
.buf
,
904 (__be64
*)MLX5_ADDR_OF(create_cq_in
, in
, pas
));
906 mlx5_vector2eqn(mdev
, param
->eq_ix
, &eqn
, &irqn_not_used
);
908 MLX5_SET(cqc
, cqc
, cq_period_mode
, param
->cq_period_mode
);
909 MLX5_SET(cqc
, cqc
, c_eqn
, eqn
);
910 MLX5_SET(cqc
, cqc
, uar_page
, mcq
->uar
->index
);
911 MLX5_SET(cqc
, cqc
, log_page_size
, cq
->wq_ctrl
.buf
.page_shift
-
912 MLX5_ADAPTER_PAGE_SHIFT
);
913 MLX5_SET64(cqc
, cqc
, dbr_addr
, cq
->wq_ctrl
.db
.dma
);
915 err
= mlx5_core_create_cq(mdev
, mcq
, in
, inlen
);
927 static void mlx5e_disable_cq(struct mlx5e_cq
*cq
)
929 struct mlx5e_priv
*priv
= cq
->priv
;
930 struct mlx5_core_dev
*mdev
= priv
->mdev
;
932 mlx5_core_destroy_cq(mdev
, &cq
->mcq
);
935 static int mlx5e_open_cq(struct mlx5e_channel
*c
,
936 struct mlx5e_cq_param
*param
,
938 struct mlx5e_cq_moder moderation
)
941 struct mlx5e_priv
*priv
= c
->priv
;
942 struct mlx5_core_dev
*mdev
= priv
->mdev
;
944 err
= mlx5e_create_cq(c
, param
, cq
);
948 err
= mlx5e_enable_cq(cq
, param
);
952 if (MLX5_CAP_GEN(mdev
, cq_moderation
))
953 mlx5_core_modify_cq_moderation(mdev
, &cq
->mcq
,
959 mlx5e_destroy_cq(cq
);
964 static void mlx5e_close_cq(struct mlx5e_cq
*cq
)
966 mlx5e_disable_cq(cq
);
967 mlx5e_destroy_cq(cq
);
970 static int mlx5e_get_cpu(struct mlx5e_priv
*priv
, int ix
)
972 return cpumask_first(priv
->mdev
->priv
.irq_info
[ix
].mask
);
975 static int mlx5e_open_tx_cqs(struct mlx5e_channel
*c
,
976 struct mlx5e_channel_param
*cparam
)
978 struct mlx5e_priv
*priv
= c
->priv
;
982 for (tc
= 0; tc
< c
->num_tc
; tc
++) {
983 err
= mlx5e_open_cq(c
, &cparam
->tx_cq
, &c
->sq
[tc
].cq
,
984 priv
->params
.tx_cq_moderation
);
986 goto err_close_tx_cqs
;
992 for (tc
--; tc
>= 0; tc
--)
993 mlx5e_close_cq(&c
->sq
[tc
].cq
);
998 static void mlx5e_close_tx_cqs(struct mlx5e_channel
*c
)
1002 for (tc
= 0; tc
< c
->num_tc
; tc
++)
1003 mlx5e_close_cq(&c
->sq
[tc
].cq
);
1006 static int mlx5e_open_sqs(struct mlx5e_channel
*c
,
1007 struct mlx5e_channel_param
*cparam
)
1012 for (tc
= 0; tc
< c
->num_tc
; tc
++) {
1013 err
= mlx5e_open_sq(c
, tc
, &cparam
->sq
, &c
->sq
[tc
]);
1021 for (tc
--; tc
>= 0; tc
--)
1022 mlx5e_close_sq(&c
->sq
[tc
]);
1027 static void mlx5e_close_sqs(struct mlx5e_channel
*c
)
1031 for (tc
= 0; tc
< c
->num_tc
; tc
++)
1032 mlx5e_close_sq(&c
->sq
[tc
]);
1035 static void mlx5e_build_channeltc_to_txq_map(struct mlx5e_priv
*priv
, int ix
)
1039 for (i
= 0; i
< MLX5E_MAX_NUM_TC
; i
++)
1040 priv
->channeltc_to_txq_map
[ix
][i
] =
1041 ix
+ i
* priv
->params
.num_channels
;
1044 static int mlx5e_set_sq_maxrate(struct net_device
*dev
,
1045 struct mlx5e_sq
*sq
, u32 rate
)
1047 struct mlx5e_priv
*priv
= netdev_priv(dev
);
1048 struct mlx5_core_dev
*mdev
= priv
->mdev
;
1052 if (rate
== sq
->rate_limit
)
1057 /* remove current rl index to free space to next ones */
1058 mlx5_rl_remove_rate(mdev
, sq
->rate_limit
);
1063 err
= mlx5_rl_add_rate(mdev
, rate
, &rl_index
);
1065 netdev_err(dev
, "Failed configuring rate %u: %d\n",
1071 err
= mlx5e_modify_sq(sq
, MLX5_SQC_STATE_RDY
,
1072 MLX5_SQC_STATE_RDY
, true, rl_index
);
1074 netdev_err(dev
, "Failed configuring rate %u: %d\n",
1076 /* remove the rate from the table */
1078 mlx5_rl_remove_rate(mdev
, rate
);
1082 sq
->rate_limit
= rate
;
1086 static int mlx5e_set_tx_maxrate(struct net_device
*dev
, int index
, u32 rate
)
1088 struct mlx5e_priv
*priv
= netdev_priv(dev
);
1089 struct mlx5_core_dev
*mdev
= priv
->mdev
;
1090 struct mlx5e_sq
*sq
= priv
->txq_to_sq_map
[index
];
1093 if (!mlx5_rl_is_supported(mdev
)) {
1094 netdev_err(dev
, "Rate limiting is not supported on this device\n");
1098 /* rate is given in Mb/sec, HW config is in Kb/sec */
1101 /* Check whether rate in valid range, 0 is always valid */
1102 if (rate
&& !mlx5_rl_is_in_range(mdev
, rate
)) {
1103 netdev_err(dev
, "TX rate %u, is not in range\n", rate
);
1107 mutex_lock(&priv
->state_lock
);
1108 if (test_bit(MLX5E_STATE_OPENED
, &priv
->state
))
1109 err
= mlx5e_set_sq_maxrate(dev
, sq
, rate
);
1111 priv
->tx_rates
[index
] = rate
;
1112 mutex_unlock(&priv
->state_lock
);
1117 static int mlx5e_open_channel(struct mlx5e_priv
*priv
, int ix
,
1118 struct mlx5e_channel_param
*cparam
,
1119 struct mlx5e_channel
**cp
)
1121 struct mlx5e_cq_moder icosq_cq_moder
= {0, 0};
1122 struct net_device
*netdev
= priv
->netdev
;
1123 struct mlx5e_cq_moder rx_cq_profile
;
1124 int cpu
= mlx5e_get_cpu(priv
, ix
);
1125 struct mlx5e_channel
*c
;
1126 struct mlx5e_sq
*sq
;
1130 c
= kzalloc_node(sizeof(*c
), GFP_KERNEL
, cpu_to_node(cpu
));
1137 c
->pdev
= &priv
->mdev
->pdev
->dev
;
1138 c
->netdev
= priv
->netdev
;
1139 c
->mkey_be
= cpu_to_be32(priv
->mdev
->mlx5e_res
.mkey
.key
);
1140 c
->num_tc
= priv
->params
.num_tc
;
1142 if (priv
->params
.rx_am_enabled
)
1143 rx_cq_profile
= mlx5e_am_get_def_profile(priv
->params
.rx_cq_period_mode
);
1145 rx_cq_profile
= priv
->params
.rx_cq_moderation
;
1147 mlx5e_build_channeltc_to_txq_map(priv
, ix
);
1149 netif_napi_add(netdev
, &c
->napi
, mlx5e_napi_poll
, 64);
1151 err
= mlx5e_open_cq(c
, &cparam
->icosq_cq
, &c
->icosq
.cq
, icosq_cq_moder
);
1155 err
= mlx5e_open_tx_cqs(c
, cparam
);
1157 goto err_close_icosq_cq
;
1159 err
= mlx5e_open_cq(c
, &cparam
->rx_cq
, &c
->rq
.cq
,
1162 goto err_close_tx_cqs
;
1164 napi_enable(&c
->napi
);
1166 err
= mlx5e_open_sq(c
, 0, &cparam
->icosq
, &c
->icosq
);
1168 goto err_disable_napi
;
1170 err
= mlx5e_open_sqs(c
, cparam
);
1172 goto err_close_icosq
;
1174 for (i
= 0; i
< priv
->params
.num_tc
; i
++) {
1175 u32 txq_ix
= priv
->channeltc_to_txq_map
[ix
][i
];
1177 if (priv
->tx_rates
[txq_ix
]) {
1178 sq
= priv
->txq_to_sq_map
[txq_ix
];
1179 mlx5e_set_sq_maxrate(priv
->netdev
, sq
,
1180 priv
->tx_rates
[txq_ix
]);
1184 err
= mlx5e_open_rq(c
, &cparam
->rq
, &c
->rq
);
1188 netif_set_xps_queue(netdev
, get_cpu_mask(c
->cpu
), ix
);
1197 mlx5e_close_sq(&c
->icosq
);
1200 napi_disable(&c
->napi
);
1201 mlx5e_close_cq(&c
->rq
.cq
);
1204 mlx5e_close_tx_cqs(c
);
1207 mlx5e_close_cq(&c
->icosq
.cq
);
1210 netif_napi_del(&c
->napi
);
1211 napi_hash_del(&c
->napi
);
1217 static void mlx5e_close_channel(struct mlx5e_channel
*c
)
1219 mlx5e_close_rq(&c
->rq
);
1221 mlx5e_close_sq(&c
->icosq
);
1222 napi_disable(&c
->napi
);
1223 mlx5e_close_cq(&c
->rq
.cq
);
1224 mlx5e_close_tx_cqs(c
);
1225 mlx5e_close_cq(&c
->icosq
.cq
);
1226 netif_napi_del(&c
->napi
);
1228 napi_hash_del(&c
->napi
);
1234 static void mlx5e_build_rq_param(struct mlx5e_priv
*priv
,
1235 struct mlx5e_rq_param
*param
)
1237 void *rqc
= param
->rqc
;
1238 void *wq
= MLX5_ADDR_OF(rqc
, rqc
, wq
);
1240 switch (priv
->params
.rq_wq_type
) {
1241 case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ
:
1242 MLX5_SET(wq
, wq
, log_wqe_num_of_strides
,
1243 priv
->params
.mpwqe_log_num_strides
- 9);
1244 MLX5_SET(wq
, wq
, log_wqe_stride_size
,
1245 priv
->params
.mpwqe_log_stride_sz
- 6);
1246 MLX5_SET(wq
, wq
, wq_type
, MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ
);
1248 default: /* MLX5_WQ_TYPE_LINKED_LIST */
1249 MLX5_SET(wq
, wq
, wq_type
, MLX5_WQ_TYPE_LINKED_LIST
);
1252 MLX5_SET(wq
, wq
, end_padding_mode
, MLX5_WQ_END_PAD_MODE_ALIGN
);
1253 MLX5_SET(wq
, wq
, log_wq_stride
, ilog2(sizeof(struct mlx5e_rx_wqe
)));
1254 MLX5_SET(wq
, wq
, log_wq_sz
, priv
->params
.log_rq_size
);
1255 MLX5_SET(wq
, wq
, pd
, priv
->mdev
->mlx5e_res
.pdn
);
1256 MLX5_SET(rqc
, rqc
, counter_set_id
, priv
->q_counter
);
1258 param
->wq
.buf_numa_node
= dev_to_node(&priv
->mdev
->pdev
->dev
);
1259 param
->wq
.linear
= 1;
1261 param
->am_enabled
= priv
->params
.rx_am_enabled
;
1264 static void mlx5e_build_drop_rq_param(struct mlx5e_rq_param
*param
)
1266 void *rqc
= param
->rqc
;
1267 void *wq
= MLX5_ADDR_OF(rqc
, rqc
, wq
);
1269 MLX5_SET(wq
, wq
, wq_type
, MLX5_WQ_TYPE_LINKED_LIST
);
1270 MLX5_SET(wq
, wq
, log_wq_stride
, ilog2(sizeof(struct mlx5e_rx_wqe
)));
1273 static void mlx5e_build_sq_param_common(struct mlx5e_priv
*priv
,
1274 struct mlx5e_sq_param
*param
)
1276 void *sqc
= param
->sqc
;
1277 void *wq
= MLX5_ADDR_OF(sqc
, sqc
, wq
);
1279 MLX5_SET(wq
, wq
, log_wq_stride
, ilog2(MLX5_SEND_WQE_BB
));
1280 MLX5_SET(wq
, wq
, pd
, priv
->mdev
->mlx5e_res
.pdn
);
1282 param
->wq
.buf_numa_node
= dev_to_node(&priv
->mdev
->pdev
->dev
);
1285 static void mlx5e_build_sq_param(struct mlx5e_priv
*priv
,
1286 struct mlx5e_sq_param
*param
)
1288 void *sqc
= param
->sqc
;
1289 void *wq
= MLX5_ADDR_OF(sqc
, sqc
, wq
);
1291 mlx5e_build_sq_param_common(priv
, param
);
1292 MLX5_SET(wq
, wq
, log_wq_sz
, priv
->params
.log_sq_size
);
1294 param
->max_inline
= priv
->params
.tx_max_inline
;
1297 static void mlx5e_build_common_cq_param(struct mlx5e_priv
*priv
,
1298 struct mlx5e_cq_param
*param
)
1300 void *cqc
= param
->cqc
;
1302 MLX5_SET(cqc
, cqc
, uar_page
, priv
->mdev
->mlx5e_res
.cq_uar
.index
);
1305 static void mlx5e_build_rx_cq_param(struct mlx5e_priv
*priv
,
1306 struct mlx5e_cq_param
*param
)
1308 void *cqc
= param
->cqc
;
1311 switch (priv
->params
.rq_wq_type
) {
1312 case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ
:
1313 log_cq_size
= priv
->params
.log_rq_size
+
1314 priv
->params
.mpwqe_log_num_strides
;
1316 default: /* MLX5_WQ_TYPE_LINKED_LIST */
1317 log_cq_size
= priv
->params
.log_rq_size
;
1320 MLX5_SET(cqc
, cqc
, log_cq_size
, log_cq_size
);
1321 if (priv
->params
.rx_cqe_compress
) {
1322 MLX5_SET(cqc
, cqc
, mini_cqe_res_format
, MLX5_CQE_FORMAT_CSUM
);
1323 MLX5_SET(cqc
, cqc
, cqe_comp_en
, 1);
1326 mlx5e_build_common_cq_param(priv
, param
);
1328 param
->cq_period_mode
= priv
->params
.rx_cq_period_mode
;
1331 static void mlx5e_build_tx_cq_param(struct mlx5e_priv
*priv
,
1332 struct mlx5e_cq_param
*param
)
1334 void *cqc
= param
->cqc
;
1336 MLX5_SET(cqc
, cqc
, log_cq_size
, priv
->params
.log_sq_size
);
1338 mlx5e_build_common_cq_param(priv
, param
);
1340 param
->cq_period_mode
= MLX5_CQ_PERIOD_MODE_START_FROM_EQE
;
1343 static void mlx5e_build_ico_cq_param(struct mlx5e_priv
*priv
,
1344 struct mlx5e_cq_param
*param
,
1347 void *cqc
= param
->cqc
;
1349 MLX5_SET(cqc
, cqc
, log_cq_size
, log_wq_size
);
1351 mlx5e_build_common_cq_param(priv
, param
);
1353 param
->cq_period_mode
= MLX5_CQ_PERIOD_MODE_START_FROM_EQE
;
1356 static void mlx5e_build_icosq_param(struct mlx5e_priv
*priv
,
1357 struct mlx5e_sq_param
*param
,
1360 void *sqc
= param
->sqc
;
1361 void *wq
= MLX5_ADDR_OF(sqc
, sqc
, wq
);
1363 mlx5e_build_sq_param_common(priv
, param
);
1365 MLX5_SET(wq
, wq
, log_wq_sz
, log_wq_size
);
1366 MLX5_SET(sqc
, sqc
, reg_umr
, MLX5_CAP_ETH(priv
->mdev
, reg_umr_sq
));
1368 param
->icosq
= true;
1371 static void mlx5e_build_channel_param(struct mlx5e_priv
*priv
, struct mlx5e_channel_param
*cparam
)
1373 u8 icosq_log_wq_sz
= MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE
;
1375 mlx5e_build_rq_param(priv
, &cparam
->rq
);
1376 mlx5e_build_sq_param(priv
, &cparam
->sq
);
1377 mlx5e_build_icosq_param(priv
, &cparam
->icosq
, icosq_log_wq_sz
);
1378 mlx5e_build_rx_cq_param(priv
, &cparam
->rx_cq
);
1379 mlx5e_build_tx_cq_param(priv
, &cparam
->tx_cq
);
1380 mlx5e_build_ico_cq_param(priv
, &cparam
->icosq_cq
, icosq_log_wq_sz
);
1383 static int mlx5e_open_channels(struct mlx5e_priv
*priv
)
1385 struct mlx5e_channel_param
*cparam
;
1386 int nch
= priv
->params
.num_channels
;
1391 priv
->channel
= kcalloc(nch
, sizeof(struct mlx5e_channel
*),
1394 priv
->txq_to_sq_map
= kcalloc(nch
* priv
->params
.num_tc
,
1395 sizeof(struct mlx5e_sq
*), GFP_KERNEL
);
1397 cparam
= kzalloc(sizeof(struct mlx5e_channel_param
), GFP_KERNEL
);
1399 if (!priv
->channel
|| !priv
->txq_to_sq_map
|| !cparam
)
1400 goto err_free_txq_to_sq_map
;
1402 mlx5e_build_channel_param(priv
, cparam
);
1404 for (i
= 0; i
< nch
; i
++) {
1405 err
= mlx5e_open_channel(priv
, i
, cparam
, &priv
->channel
[i
]);
1407 goto err_close_channels
;
1410 for (j
= 0; j
< nch
; j
++) {
1411 err
= mlx5e_wait_for_min_rx_wqes(&priv
->channel
[j
]->rq
);
1413 goto err_close_channels
;
1420 for (i
--; i
>= 0; i
--)
1421 mlx5e_close_channel(priv
->channel
[i
]);
1423 err_free_txq_to_sq_map
:
1424 kfree(priv
->txq_to_sq_map
);
1425 kfree(priv
->channel
);
1431 static void mlx5e_close_channels(struct mlx5e_priv
*priv
)
1435 for (i
= 0; i
< priv
->params
.num_channels
; i
++)
1436 mlx5e_close_channel(priv
->channel
[i
]);
1438 kfree(priv
->txq_to_sq_map
);
1439 kfree(priv
->channel
);
1442 static int mlx5e_rx_hash_fn(int hfunc
)
1444 return (hfunc
== ETH_RSS_HASH_TOP
) ?
1445 MLX5_RX_HASH_FN_TOEPLITZ
:
1446 MLX5_RX_HASH_FN_INVERTED_XOR8
;
1449 static int mlx5e_bits_invert(unsigned long a
, int size
)
1454 for (i
= 0; i
< size
; i
++)
1455 inv
|= (test_bit(size
- i
- 1, &a
) ? 1 : 0) << i
;
1460 static void mlx5e_fill_indir_rqt_rqns(struct mlx5e_priv
*priv
, void *rqtc
)
1464 for (i
= 0; i
< MLX5E_INDIR_RQT_SIZE
; i
++) {
1468 if (priv
->params
.rss_hfunc
== ETH_RSS_HASH_XOR
)
1469 ix
= mlx5e_bits_invert(i
, MLX5E_LOG_INDIR_RQT_SIZE
);
1471 ix
= priv
->params
.indirection_rqt
[ix
];
1472 rqn
= test_bit(MLX5E_STATE_OPENED
, &priv
->state
) ?
1473 priv
->channel
[ix
]->rq
.rqn
:
1475 MLX5_SET(rqtc
, rqtc
, rq_num
[i
], rqn
);
1479 static void mlx5e_fill_direct_rqt_rqn(struct mlx5e_priv
*priv
, void *rqtc
,
1482 u32 rqn
= test_bit(MLX5E_STATE_OPENED
, &priv
->state
) ?
1483 priv
->channel
[ix
]->rq
.rqn
:
1486 MLX5_SET(rqtc
, rqtc
, rq_num
[0], rqn
);
1489 static int mlx5e_create_rqt(struct mlx5e_priv
*priv
, int sz
, int ix
, u32
*rqtn
)
1491 struct mlx5_core_dev
*mdev
= priv
->mdev
;
1497 inlen
= MLX5_ST_SZ_BYTES(create_rqt_in
) + sizeof(u32
) * sz
;
1498 in
= mlx5_vzalloc(inlen
);
1502 rqtc
= MLX5_ADDR_OF(create_rqt_in
, in
, rqt_context
);
1504 MLX5_SET(rqtc
, rqtc
, rqt_actual_size
, sz
);
1505 MLX5_SET(rqtc
, rqtc
, rqt_max_size
, sz
);
1507 if (sz
> 1) /* RSS */
1508 mlx5e_fill_indir_rqt_rqns(priv
, rqtc
);
1510 mlx5e_fill_direct_rqt_rqn(priv
, rqtc
, ix
);
1512 err
= mlx5_core_create_rqt(mdev
, in
, inlen
, rqtn
);
1518 static void mlx5e_destroy_rqt(struct mlx5e_priv
*priv
, u32 rqtn
)
1520 mlx5_core_destroy_rqt(priv
->mdev
, rqtn
);
1523 static int mlx5e_create_rqts(struct mlx5e_priv
*priv
)
1525 int nch
= mlx5e_get_max_num_channels(priv
->mdev
);
1531 rqtn
= &priv
->indir_rqtn
;
1532 err
= mlx5e_create_rqt(priv
, MLX5E_INDIR_RQT_SIZE
, 0, rqtn
);
1537 for (ix
= 0; ix
< nch
; ix
++) {
1538 rqtn
= &priv
->direct_tir
[ix
].rqtn
;
1539 err
= mlx5e_create_rqt(priv
, 1 /*size */, ix
, rqtn
);
1541 goto err_destroy_rqts
;
1547 for (ix
--; ix
>= 0; ix
--)
1548 mlx5e_destroy_rqt(priv
, priv
->direct_tir
[ix
].rqtn
);
1550 mlx5e_destroy_rqt(priv
, priv
->indir_rqtn
);
1555 static void mlx5e_destroy_rqts(struct mlx5e_priv
*priv
)
1557 int nch
= mlx5e_get_max_num_channels(priv
->mdev
);
1560 for (i
= 0; i
< nch
; i
++)
1561 mlx5e_destroy_rqt(priv
, priv
->direct_tir
[i
].rqtn
);
1563 mlx5e_destroy_rqt(priv
, priv
->indir_rqtn
);
1566 int mlx5e_redirect_rqt(struct mlx5e_priv
*priv
, u32 rqtn
, int sz
, int ix
)
1568 struct mlx5_core_dev
*mdev
= priv
->mdev
;
1574 inlen
= MLX5_ST_SZ_BYTES(modify_rqt_in
) + sizeof(u32
) * sz
;
1575 in
= mlx5_vzalloc(inlen
);
1579 rqtc
= MLX5_ADDR_OF(modify_rqt_in
, in
, ctx
);
1581 MLX5_SET(rqtc
, rqtc
, rqt_actual_size
, sz
);
1582 if (sz
> 1) /* RSS */
1583 mlx5e_fill_indir_rqt_rqns(priv
, rqtc
);
1585 mlx5e_fill_direct_rqt_rqn(priv
, rqtc
, ix
);
1587 MLX5_SET(modify_rqt_in
, in
, bitmask
.rqn_list
, 1);
1589 err
= mlx5_core_modify_rqt(mdev
, rqtn
, in
, inlen
);
1596 static void mlx5e_redirect_rqts(struct mlx5e_priv
*priv
)
1601 rqtn
= priv
->indir_rqtn
;
1602 mlx5e_redirect_rqt(priv
, rqtn
, MLX5E_INDIR_RQT_SIZE
, 0);
1603 for (ix
= 0; ix
< priv
->params
.num_channels
; ix
++) {
1604 rqtn
= priv
->direct_tir
[ix
].rqtn
;
1605 mlx5e_redirect_rqt(priv
, rqtn
, 1, ix
);
1609 static void mlx5e_build_tir_ctx_lro(void *tirc
, struct mlx5e_priv
*priv
)
1611 if (!priv
->params
.lro_en
)
1614 #define ROUGH_MAX_L2_L3_HDR_SZ 256
1616 MLX5_SET(tirc
, tirc
, lro_enable_mask
,
1617 MLX5_TIRC_LRO_ENABLE_MASK_IPV4_LRO
|
1618 MLX5_TIRC_LRO_ENABLE_MASK_IPV6_LRO
);
1619 MLX5_SET(tirc
, tirc
, lro_max_ip_payload_size
,
1620 (priv
->params
.lro_wqe_sz
-
1621 ROUGH_MAX_L2_L3_HDR_SZ
) >> 8);
1622 MLX5_SET(tirc
, tirc
, lro_timeout_period_usecs
,
1623 MLX5_CAP_ETH(priv
->mdev
,
1624 lro_timer_supported_periods
[2]));
1627 void mlx5e_build_tir_ctx_hash(void *tirc
, struct mlx5e_priv
*priv
)
1629 MLX5_SET(tirc
, tirc
, rx_hash_fn
,
1630 mlx5e_rx_hash_fn(priv
->params
.rss_hfunc
));
1631 if (priv
->params
.rss_hfunc
== ETH_RSS_HASH_TOP
) {
1632 void *rss_key
= MLX5_ADDR_OF(tirc
, tirc
,
1633 rx_hash_toeplitz_key
);
1634 size_t len
= MLX5_FLD_SZ_BYTES(tirc
,
1635 rx_hash_toeplitz_key
);
1637 MLX5_SET(tirc
, tirc
, rx_hash_symmetric
, 1);
1638 memcpy(rss_key
, priv
->params
.toeplitz_hash_key
, len
);
1642 static int mlx5e_modify_tirs_lro(struct mlx5e_priv
*priv
)
1644 struct mlx5_core_dev
*mdev
= priv
->mdev
;
1653 inlen
= MLX5_ST_SZ_BYTES(modify_tir_in
);
1654 in
= mlx5_vzalloc(inlen
);
1658 MLX5_SET(modify_tir_in
, in
, bitmask
.lro
, 1);
1659 tirc
= MLX5_ADDR_OF(modify_tir_in
, in
, ctx
);
1661 mlx5e_build_tir_ctx_lro(tirc
, priv
);
1663 for (tt
= 0; tt
< MLX5E_NUM_INDIR_TIRS
; tt
++) {
1664 err
= mlx5_core_modify_tir(mdev
, priv
->indir_tir
[tt
].tirn
, in
,
1670 for (ix
= 0; ix
< mlx5e_get_max_num_channels(mdev
); ix
++) {
1671 err
= mlx5_core_modify_tir(mdev
, priv
->direct_tir
[ix
].tirn
,
1683 static int mlx5e_set_mtu(struct mlx5e_priv
*priv
, u16 mtu
)
1685 struct mlx5_core_dev
*mdev
= priv
->mdev
;
1686 u16 hw_mtu
= MLX5E_SW2HW_MTU(mtu
);
1689 err
= mlx5_set_port_mtu(mdev
, hw_mtu
, 1);
1693 /* Update vport context MTU */
1694 mlx5_modify_nic_vport_mtu(mdev
, hw_mtu
);
1698 static void mlx5e_query_mtu(struct mlx5e_priv
*priv
, u16
*mtu
)
1700 struct mlx5_core_dev
*mdev
= priv
->mdev
;
1704 err
= mlx5_query_nic_vport_mtu(mdev
, &hw_mtu
);
1705 if (err
|| !hw_mtu
) /* fallback to port oper mtu */
1706 mlx5_query_port_oper_mtu(mdev
, &hw_mtu
, 1);
1708 *mtu
= MLX5E_HW2SW_MTU(hw_mtu
);
1711 static int mlx5e_set_dev_port_mtu(struct net_device
*netdev
)
1713 struct mlx5e_priv
*priv
= netdev_priv(netdev
);
1717 err
= mlx5e_set_mtu(priv
, netdev
->mtu
);
1721 mlx5e_query_mtu(priv
, &mtu
);
1722 if (mtu
!= netdev
->mtu
)
1723 netdev_warn(netdev
, "%s: VPort MTU %d is different than netdev mtu %d\n",
1724 __func__
, mtu
, netdev
->mtu
);
1730 static void mlx5e_netdev_set_tcs(struct net_device
*netdev
)
1732 struct mlx5e_priv
*priv
= netdev_priv(netdev
);
1733 int nch
= priv
->params
.num_channels
;
1734 int ntc
= priv
->params
.num_tc
;
1737 netdev_reset_tc(netdev
);
1742 netdev_set_num_tc(netdev
, ntc
);
1744 for (tc
= 0; tc
< ntc
; tc
++)
1745 netdev_set_tc_queue(netdev
, tc
, nch
, tc
* nch
);
1748 int mlx5e_open_locked(struct net_device
*netdev
)
1750 struct mlx5e_priv
*priv
= netdev_priv(netdev
);
1754 set_bit(MLX5E_STATE_OPENED
, &priv
->state
);
1756 mlx5e_netdev_set_tcs(netdev
);
1758 num_txqs
= priv
->params
.num_channels
* priv
->params
.num_tc
;
1759 netif_set_real_num_tx_queues(netdev
, num_txqs
);
1760 netif_set_real_num_rx_queues(netdev
, priv
->params
.num_channels
);
1762 err
= mlx5e_set_dev_port_mtu(netdev
);
1764 goto err_clear_state_opened_flag
;
1766 err
= mlx5e_open_channels(priv
);
1768 netdev_err(netdev
, "%s: mlx5e_open_channels failed, %d\n",
1770 goto err_clear_state_opened_flag
;
1773 err
= mlx5e_refresh_tirs_self_loopback_enable(priv
->mdev
);
1775 netdev_err(netdev
, "%s: mlx5e_refresh_tirs_self_loopback_enable failed, %d\n",
1777 goto err_close_channels
;
1780 mlx5e_redirect_rqts(priv
);
1781 mlx5e_update_carrier(priv
);
1782 mlx5e_timestamp_init(priv
);
1783 #ifdef CONFIG_RFS_ACCEL
1784 priv
->netdev
->rx_cpu_rmap
= priv
->mdev
->rmap
;
1787 queue_delayed_work(priv
->wq
, &priv
->update_stats_work
, 0);
1792 mlx5e_close_channels(priv
);
1793 err_clear_state_opened_flag
:
1794 clear_bit(MLX5E_STATE_OPENED
, &priv
->state
);
1798 static int mlx5e_open(struct net_device
*netdev
)
1800 struct mlx5e_priv
*priv
= netdev_priv(netdev
);
1803 mutex_lock(&priv
->state_lock
);
1804 err
= mlx5e_open_locked(netdev
);
1805 mutex_unlock(&priv
->state_lock
);
1810 int mlx5e_close_locked(struct net_device
*netdev
)
1812 struct mlx5e_priv
*priv
= netdev_priv(netdev
);
1814 /* May already be CLOSED in case a previous configuration operation
1815 * (e.g RX/TX queue size change) that involves close&open failed.
1817 if (!test_bit(MLX5E_STATE_OPENED
, &priv
->state
))
1820 clear_bit(MLX5E_STATE_OPENED
, &priv
->state
);
1822 mlx5e_timestamp_cleanup(priv
);
1823 netif_carrier_off(priv
->netdev
);
1824 mlx5e_redirect_rqts(priv
);
1825 mlx5e_close_channels(priv
);
1830 static int mlx5e_close(struct net_device
*netdev
)
1832 struct mlx5e_priv
*priv
= netdev_priv(netdev
);
1835 mutex_lock(&priv
->state_lock
);
1836 err
= mlx5e_close_locked(netdev
);
1837 mutex_unlock(&priv
->state_lock
);
1842 static int mlx5e_create_drop_rq(struct mlx5e_priv
*priv
,
1843 struct mlx5e_rq
*rq
,
1844 struct mlx5e_rq_param
*param
)
1846 struct mlx5_core_dev
*mdev
= priv
->mdev
;
1847 void *rqc
= param
->rqc
;
1848 void *rqc_wq
= MLX5_ADDR_OF(rqc
, rqc
, wq
);
1851 param
->wq
.db_numa_node
= param
->wq
.buf_numa_node
;
1853 err
= mlx5_wq_ll_create(mdev
, ¶m
->wq
, rqc_wq
, &rq
->wq
,
1863 static int mlx5e_create_drop_cq(struct mlx5e_priv
*priv
,
1864 struct mlx5e_cq
*cq
,
1865 struct mlx5e_cq_param
*param
)
1867 struct mlx5_core_dev
*mdev
= priv
->mdev
;
1868 struct mlx5_core_cq
*mcq
= &cq
->mcq
;
1873 err
= mlx5_cqwq_create(mdev
, ¶m
->wq
, param
->cqc
, &cq
->wq
,
1878 mlx5_vector2eqn(mdev
, param
->eq_ix
, &eqn_not_used
, &irqn
);
1881 mcq
->set_ci_db
= cq
->wq_ctrl
.db
.db
;
1882 mcq
->arm_db
= cq
->wq_ctrl
.db
.db
+ 1;
1883 *mcq
->set_ci_db
= 0;
1885 mcq
->vector
= param
->eq_ix
;
1886 mcq
->comp
= mlx5e_completion_event
;
1887 mcq
->event
= mlx5e_cq_error_event
;
1889 mcq
->uar
= &mdev
->mlx5e_res
.cq_uar
;
1896 static int mlx5e_open_drop_rq(struct mlx5e_priv
*priv
)
1898 struct mlx5e_cq_param cq_param
;
1899 struct mlx5e_rq_param rq_param
;
1900 struct mlx5e_rq
*rq
= &priv
->drop_rq
;
1901 struct mlx5e_cq
*cq
= &priv
->drop_rq
.cq
;
1904 memset(&cq_param
, 0, sizeof(cq_param
));
1905 memset(&rq_param
, 0, sizeof(rq_param
));
1906 mlx5e_build_drop_rq_param(&rq_param
);
1908 err
= mlx5e_create_drop_cq(priv
, cq
, &cq_param
);
1912 err
= mlx5e_enable_cq(cq
, &cq_param
);
1914 goto err_destroy_cq
;
1916 err
= mlx5e_create_drop_rq(priv
, rq
, &rq_param
);
1918 goto err_disable_cq
;
1920 err
= mlx5e_enable_rq(rq
, &rq_param
);
1922 goto err_destroy_rq
;
1927 mlx5e_destroy_rq(&priv
->drop_rq
);
1930 mlx5e_disable_cq(&priv
->drop_rq
.cq
);
1933 mlx5e_destroy_cq(&priv
->drop_rq
.cq
);
1938 static void mlx5e_close_drop_rq(struct mlx5e_priv
*priv
)
1940 mlx5e_disable_rq(&priv
->drop_rq
);
1941 mlx5e_destroy_rq(&priv
->drop_rq
);
1942 mlx5e_disable_cq(&priv
->drop_rq
.cq
);
1943 mlx5e_destroy_cq(&priv
->drop_rq
.cq
);
1946 static int mlx5e_create_tis(struct mlx5e_priv
*priv
, int tc
)
1948 struct mlx5_core_dev
*mdev
= priv
->mdev
;
1949 u32 in
[MLX5_ST_SZ_DW(create_tis_in
)];
1950 void *tisc
= MLX5_ADDR_OF(create_tis_in
, in
, ctx
);
1952 memset(in
, 0, sizeof(in
));
1954 MLX5_SET(tisc
, tisc
, prio
, tc
<< 1);
1955 MLX5_SET(tisc
, tisc
, transport_domain
, mdev
->mlx5e_res
.td
.tdn
);
1957 return mlx5_core_create_tis(mdev
, in
, sizeof(in
), &priv
->tisn
[tc
]);
1960 static void mlx5e_destroy_tis(struct mlx5e_priv
*priv
, int tc
)
1962 mlx5_core_destroy_tis(priv
->mdev
, priv
->tisn
[tc
]);
1965 static int mlx5e_create_tises(struct mlx5e_priv
*priv
)
1970 for (tc
= 0; tc
< MLX5E_MAX_NUM_TC
; tc
++) {
1971 err
= mlx5e_create_tis(priv
, tc
);
1973 goto err_close_tises
;
1979 for (tc
--; tc
>= 0; tc
--)
1980 mlx5e_destroy_tis(priv
, tc
);
1985 static void mlx5e_destroy_tises(struct mlx5e_priv
*priv
)
1989 for (tc
= 0; tc
< MLX5E_MAX_NUM_TC
; tc
++)
1990 mlx5e_destroy_tis(priv
, tc
);
1993 static void mlx5e_build_indir_tir_ctx(struct mlx5e_priv
*priv
, u32
*tirc
,
1994 enum mlx5e_traffic_types tt
)
1996 void *hfso
= MLX5_ADDR_OF(tirc
, tirc
, rx_hash_field_selector_outer
);
1998 MLX5_SET(tirc
, tirc
, transport_domain
, priv
->mdev
->mlx5e_res
.td
.tdn
);
2000 #define MLX5_HASH_IP (MLX5_HASH_FIELD_SEL_SRC_IP |\
2001 MLX5_HASH_FIELD_SEL_DST_IP)
2003 #define MLX5_HASH_IP_L4PORTS (MLX5_HASH_FIELD_SEL_SRC_IP |\
2004 MLX5_HASH_FIELD_SEL_DST_IP |\
2005 MLX5_HASH_FIELD_SEL_L4_SPORT |\
2006 MLX5_HASH_FIELD_SEL_L4_DPORT)
2008 #define MLX5_HASH_IP_IPSEC_SPI (MLX5_HASH_FIELD_SEL_SRC_IP |\
2009 MLX5_HASH_FIELD_SEL_DST_IP |\
2010 MLX5_HASH_FIELD_SEL_IPSEC_SPI)
2012 mlx5e_build_tir_ctx_lro(tirc
, priv
);
2014 MLX5_SET(tirc
, tirc
, disp_type
, MLX5_TIRC_DISP_TYPE_INDIRECT
);
2015 MLX5_SET(tirc
, tirc
, indirect_table
, priv
->indir_rqtn
);
2016 mlx5e_build_tir_ctx_hash(tirc
, priv
);
2019 case MLX5E_TT_IPV4_TCP
:
2020 MLX5_SET(rx_hash_field_select
, hfso
, l3_prot_type
,
2021 MLX5_L3_PROT_TYPE_IPV4
);
2022 MLX5_SET(rx_hash_field_select
, hfso
, l4_prot_type
,
2023 MLX5_L4_PROT_TYPE_TCP
);
2024 MLX5_SET(rx_hash_field_select
, hfso
, selected_fields
,
2025 MLX5_HASH_IP_L4PORTS
);
2028 case MLX5E_TT_IPV6_TCP
:
2029 MLX5_SET(rx_hash_field_select
, hfso
, l3_prot_type
,
2030 MLX5_L3_PROT_TYPE_IPV6
);
2031 MLX5_SET(rx_hash_field_select
, hfso
, l4_prot_type
,
2032 MLX5_L4_PROT_TYPE_TCP
);
2033 MLX5_SET(rx_hash_field_select
, hfso
, selected_fields
,
2034 MLX5_HASH_IP_L4PORTS
);
2037 case MLX5E_TT_IPV4_UDP
:
2038 MLX5_SET(rx_hash_field_select
, hfso
, l3_prot_type
,
2039 MLX5_L3_PROT_TYPE_IPV4
);
2040 MLX5_SET(rx_hash_field_select
, hfso
, l4_prot_type
,
2041 MLX5_L4_PROT_TYPE_UDP
);
2042 MLX5_SET(rx_hash_field_select
, hfso
, selected_fields
,
2043 MLX5_HASH_IP_L4PORTS
);
2046 case MLX5E_TT_IPV6_UDP
:
2047 MLX5_SET(rx_hash_field_select
, hfso
, l3_prot_type
,
2048 MLX5_L3_PROT_TYPE_IPV6
);
2049 MLX5_SET(rx_hash_field_select
, hfso
, l4_prot_type
,
2050 MLX5_L4_PROT_TYPE_UDP
);
2051 MLX5_SET(rx_hash_field_select
, hfso
, selected_fields
,
2052 MLX5_HASH_IP_L4PORTS
);
2055 case MLX5E_TT_IPV4_IPSEC_AH
:
2056 MLX5_SET(rx_hash_field_select
, hfso
, l3_prot_type
,
2057 MLX5_L3_PROT_TYPE_IPV4
);
2058 MLX5_SET(rx_hash_field_select
, hfso
, selected_fields
,
2059 MLX5_HASH_IP_IPSEC_SPI
);
2062 case MLX5E_TT_IPV6_IPSEC_AH
:
2063 MLX5_SET(rx_hash_field_select
, hfso
, l3_prot_type
,
2064 MLX5_L3_PROT_TYPE_IPV6
);
2065 MLX5_SET(rx_hash_field_select
, hfso
, selected_fields
,
2066 MLX5_HASH_IP_IPSEC_SPI
);
2069 case MLX5E_TT_IPV4_IPSEC_ESP
:
2070 MLX5_SET(rx_hash_field_select
, hfso
, l3_prot_type
,
2071 MLX5_L3_PROT_TYPE_IPV4
);
2072 MLX5_SET(rx_hash_field_select
, hfso
, selected_fields
,
2073 MLX5_HASH_IP_IPSEC_SPI
);
2076 case MLX5E_TT_IPV6_IPSEC_ESP
:
2077 MLX5_SET(rx_hash_field_select
, hfso
, l3_prot_type
,
2078 MLX5_L3_PROT_TYPE_IPV6
);
2079 MLX5_SET(rx_hash_field_select
, hfso
, selected_fields
,
2080 MLX5_HASH_IP_IPSEC_SPI
);
2084 MLX5_SET(rx_hash_field_select
, hfso
, l3_prot_type
,
2085 MLX5_L3_PROT_TYPE_IPV4
);
2086 MLX5_SET(rx_hash_field_select
, hfso
, selected_fields
,
2091 MLX5_SET(rx_hash_field_select
, hfso
, l3_prot_type
,
2092 MLX5_L3_PROT_TYPE_IPV6
);
2093 MLX5_SET(rx_hash_field_select
, hfso
, selected_fields
,
2098 "mlx5e_build_indir_tir_ctx: bad traffic type!\n");
2102 static void mlx5e_build_direct_tir_ctx(struct mlx5e_priv
*priv
, u32
*tirc
,
2105 MLX5_SET(tirc
, tirc
, transport_domain
, priv
->mdev
->mlx5e_res
.td
.tdn
);
2107 mlx5e_build_tir_ctx_lro(tirc
, priv
);
2109 MLX5_SET(tirc
, tirc
, disp_type
, MLX5_TIRC_DISP_TYPE_INDIRECT
);
2110 MLX5_SET(tirc
, tirc
, indirect_table
, rqtn
);
2111 MLX5_SET(tirc
, tirc
, rx_hash_fn
, MLX5_RX_HASH_FN_INVERTED_XOR8
);
2114 static int mlx5e_create_tirs(struct mlx5e_priv
*priv
)
2116 int nch
= mlx5e_get_max_num_channels(priv
->mdev
);
2117 struct mlx5e_tir
*tir
;
2125 inlen
= MLX5_ST_SZ_BYTES(create_tir_in
);
2126 in
= mlx5_vzalloc(inlen
);
2131 for (tt
= 0; tt
< MLX5E_NUM_INDIR_TIRS
; tt
++) {
2132 memset(in
, 0, inlen
);
2133 tir
= &priv
->indir_tir
[tt
];
2134 tirc
= MLX5_ADDR_OF(create_tir_in
, in
, ctx
);
2135 mlx5e_build_indir_tir_ctx(priv
, tirc
, tt
);
2136 err
= mlx5e_create_tir(priv
->mdev
, tir
, in
, inlen
);
2138 goto err_destroy_tirs
;
2142 for (ix
= 0; ix
< nch
; ix
++) {
2143 memset(in
, 0, inlen
);
2144 tir
= &priv
->direct_tir
[ix
];
2145 tirc
= MLX5_ADDR_OF(create_tir_in
, in
, ctx
);
2146 mlx5e_build_direct_tir_ctx(priv
, tirc
,
2147 priv
->direct_tir
[ix
].rqtn
);
2148 err
= mlx5e_create_tir(priv
->mdev
, tir
, in
, inlen
);
2150 goto err_destroy_ch_tirs
;
2157 err_destroy_ch_tirs
:
2158 for (ix
--; ix
>= 0; ix
--)
2159 mlx5e_destroy_tir(priv
->mdev
, &priv
->direct_tir
[ix
]);
2162 for (tt
--; tt
>= 0; tt
--)
2163 mlx5e_destroy_tir(priv
->mdev
, &priv
->indir_tir
[tt
]);
2170 static void mlx5e_destroy_tirs(struct mlx5e_priv
*priv
)
2172 int nch
= mlx5e_get_max_num_channels(priv
->mdev
);
2175 for (i
= 0; i
< nch
; i
++)
2176 mlx5e_destroy_tir(priv
->mdev
, &priv
->direct_tir
[i
]);
2178 for (i
= 0; i
< MLX5E_NUM_INDIR_TIRS
; i
++)
2179 mlx5e_destroy_tir(priv
->mdev
, &priv
->indir_tir
[i
]);
2182 int mlx5e_modify_rqs_vsd(struct mlx5e_priv
*priv
, bool vsd
)
2187 if (!test_bit(MLX5E_STATE_OPENED
, &priv
->state
))
2190 for (i
= 0; i
< priv
->params
.num_channels
; i
++) {
2191 err
= mlx5e_modify_rq_vsd(&priv
->channel
[i
]->rq
, vsd
);
2199 static int mlx5e_setup_tc(struct net_device
*netdev
, u8 tc
)
2201 struct mlx5e_priv
*priv
= netdev_priv(netdev
);
2205 if (tc
&& tc
!= MLX5E_MAX_NUM_TC
)
2208 mutex_lock(&priv
->state_lock
);
2210 was_opened
= test_bit(MLX5E_STATE_OPENED
, &priv
->state
);
2212 mlx5e_close_locked(priv
->netdev
);
2214 priv
->params
.num_tc
= tc
? tc
: 1;
2217 err
= mlx5e_open_locked(priv
->netdev
);
2219 mutex_unlock(&priv
->state_lock
);
2224 static int mlx5e_ndo_setup_tc(struct net_device
*dev
, u32 handle
,
2225 __be16 proto
, struct tc_to_netdev
*tc
)
2227 struct mlx5e_priv
*priv
= netdev_priv(dev
);
2229 if (TC_H_MAJ(handle
) != TC_H_MAJ(TC_H_INGRESS
))
2233 case TC_SETUP_CLSFLOWER
:
2234 switch (tc
->cls_flower
->command
) {
2235 case TC_CLSFLOWER_REPLACE
:
2236 return mlx5e_configure_flower(priv
, proto
, tc
->cls_flower
);
2237 case TC_CLSFLOWER_DESTROY
:
2238 return mlx5e_delete_flower(priv
, tc
->cls_flower
);
2239 case TC_CLSFLOWER_STATS
:
2240 return mlx5e_stats_flower(priv
, tc
->cls_flower
);
2247 if (tc
->type
!= TC_SETUP_MQPRIO
)
2250 return mlx5e_setup_tc(dev
, tc
->tc
);
2253 static struct rtnl_link_stats64
*
2254 mlx5e_get_stats(struct net_device
*dev
, struct rtnl_link_stats64
*stats
)
2256 struct mlx5e_priv
*priv
= netdev_priv(dev
);
2257 struct mlx5e_sw_stats
*sstats
= &priv
->stats
.sw
;
2258 struct mlx5e_vport_stats
*vstats
= &priv
->stats
.vport
;
2259 struct mlx5e_pport_stats
*pstats
= &priv
->stats
.pport
;
2261 stats
->rx_packets
= sstats
->rx_packets
;
2262 stats
->rx_bytes
= sstats
->rx_bytes
;
2263 stats
->tx_packets
= sstats
->tx_packets
;
2264 stats
->tx_bytes
= sstats
->tx_bytes
;
2266 stats
->rx_dropped
= priv
->stats
.qcnt
.rx_out_of_buffer
;
2267 stats
->tx_dropped
= sstats
->tx_queue_dropped
;
2269 stats
->rx_length_errors
=
2270 PPORT_802_3_GET(pstats
, a_in_range_length_errors
) +
2271 PPORT_802_3_GET(pstats
, a_out_of_range_length_field
) +
2272 PPORT_802_3_GET(pstats
, a_frame_too_long_errors
);
2273 stats
->rx_crc_errors
=
2274 PPORT_802_3_GET(pstats
, a_frame_check_sequence_errors
);
2275 stats
->rx_frame_errors
= PPORT_802_3_GET(pstats
, a_alignment_errors
);
2276 stats
->tx_aborted_errors
= PPORT_2863_GET(pstats
, if_out_discards
);
2277 stats
->tx_carrier_errors
=
2278 PPORT_802_3_GET(pstats
, a_symbol_error_during_carrier
);
2279 stats
->rx_errors
= stats
->rx_length_errors
+ stats
->rx_crc_errors
+
2280 stats
->rx_frame_errors
;
2281 stats
->tx_errors
= stats
->tx_aborted_errors
+ stats
->tx_carrier_errors
;
2283 /* vport multicast also counts packets that are dropped due to steering
2284 * or rx out of buffer
2287 VPORT_COUNTER_GET(vstats
, received_eth_multicast
.packets
);
2292 static void mlx5e_set_rx_mode(struct net_device
*dev
)
2294 struct mlx5e_priv
*priv
= netdev_priv(dev
);
2296 queue_work(priv
->wq
, &priv
->set_rx_mode_work
);
2299 static int mlx5e_set_mac(struct net_device
*netdev
, void *addr
)
2301 struct mlx5e_priv
*priv
= netdev_priv(netdev
);
2302 struct sockaddr
*saddr
= addr
;
2304 if (!is_valid_ether_addr(saddr
->sa_data
))
2305 return -EADDRNOTAVAIL
;
2307 netif_addr_lock_bh(netdev
);
2308 ether_addr_copy(netdev
->dev_addr
, saddr
->sa_data
);
2309 netif_addr_unlock_bh(netdev
);
2311 queue_work(priv
->wq
, &priv
->set_rx_mode_work
);
2316 #define MLX5E_SET_FEATURE(netdev, feature, enable) \
2319 netdev->features |= feature; \
2321 netdev->features &= ~feature; \
2324 typedef int (*mlx5e_feature_handler
)(struct net_device
*netdev
, bool enable
);
2326 static int set_feature_lro(struct net_device
*netdev
, bool enable
)
2328 struct mlx5e_priv
*priv
= netdev_priv(netdev
);
2329 bool was_opened
= test_bit(MLX5E_STATE_OPENED
, &priv
->state
);
2332 mutex_lock(&priv
->state_lock
);
2334 if (was_opened
&& (priv
->params
.rq_wq_type
== MLX5_WQ_TYPE_LINKED_LIST
))
2335 mlx5e_close_locked(priv
->netdev
);
2337 priv
->params
.lro_en
= enable
;
2338 err
= mlx5e_modify_tirs_lro(priv
);
2340 netdev_err(netdev
, "lro modify failed, %d\n", err
);
2341 priv
->params
.lro_en
= !enable
;
2344 if (was_opened
&& (priv
->params
.rq_wq_type
== MLX5_WQ_TYPE_LINKED_LIST
))
2345 mlx5e_open_locked(priv
->netdev
);
2347 mutex_unlock(&priv
->state_lock
);
2352 static int set_feature_vlan_filter(struct net_device
*netdev
, bool enable
)
2354 struct mlx5e_priv
*priv
= netdev_priv(netdev
);
2357 mlx5e_enable_vlan_filter(priv
);
2359 mlx5e_disable_vlan_filter(priv
);
2364 static int set_feature_tc_num_filters(struct net_device
*netdev
, bool enable
)
2366 struct mlx5e_priv
*priv
= netdev_priv(netdev
);
2368 if (!enable
&& mlx5e_tc_num_filters(priv
)) {
2370 "Active offloaded tc filters, can't turn hw_tc_offload off\n");
2377 static int set_feature_rx_all(struct net_device
*netdev
, bool enable
)
2379 struct mlx5e_priv
*priv
= netdev_priv(netdev
);
2380 struct mlx5_core_dev
*mdev
= priv
->mdev
;
2382 return mlx5_set_port_fcs(mdev
, !enable
);
2385 static int set_feature_rx_vlan(struct net_device
*netdev
, bool enable
)
2387 struct mlx5e_priv
*priv
= netdev_priv(netdev
);
2390 mutex_lock(&priv
->state_lock
);
2392 priv
->params
.vlan_strip_disable
= !enable
;
2393 err
= mlx5e_modify_rqs_vsd(priv
, !enable
);
2395 priv
->params
.vlan_strip_disable
= enable
;
2397 mutex_unlock(&priv
->state_lock
);
2402 #ifdef CONFIG_RFS_ACCEL
2403 static int set_feature_arfs(struct net_device
*netdev
, bool enable
)
2405 struct mlx5e_priv
*priv
= netdev_priv(netdev
);
2409 err
= mlx5e_arfs_enable(priv
);
2411 err
= mlx5e_arfs_disable(priv
);
2417 static int mlx5e_handle_feature(struct net_device
*netdev
,
2418 netdev_features_t wanted_features
,
2419 netdev_features_t feature
,
2420 mlx5e_feature_handler feature_handler
)
2422 netdev_features_t changes
= wanted_features
^ netdev
->features
;
2423 bool enable
= !!(wanted_features
& feature
);
2426 if (!(changes
& feature
))
2429 err
= feature_handler(netdev
, enable
);
2431 netdev_err(netdev
, "%s feature 0x%llx failed err %d\n",
2432 enable
? "Enable" : "Disable", feature
, err
);
2436 MLX5E_SET_FEATURE(netdev
, feature
, enable
);
2440 static int mlx5e_set_features(struct net_device
*netdev
,
2441 netdev_features_t features
)
2445 err
= mlx5e_handle_feature(netdev
, features
, NETIF_F_LRO
,
2447 err
|= mlx5e_handle_feature(netdev
, features
,
2448 NETIF_F_HW_VLAN_CTAG_FILTER
,
2449 set_feature_vlan_filter
);
2450 err
|= mlx5e_handle_feature(netdev
, features
, NETIF_F_HW_TC
,
2451 set_feature_tc_num_filters
);
2452 err
|= mlx5e_handle_feature(netdev
, features
, NETIF_F_RXALL
,
2453 set_feature_rx_all
);
2454 err
|= mlx5e_handle_feature(netdev
, features
, NETIF_F_HW_VLAN_CTAG_RX
,
2455 set_feature_rx_vlan
);
2456 #ifdef CONFIG_RFS_ACCEL
2457 err
|= mlx5e_handle_feature(netdev
, features
, NETIF_F_NTUPLE
,
2461 return err
? -EINVAL
: 0;
2464 #define MXL5_HW_MIN_MTU 64
2465 #define MXL5E_MIN_MTU (MXL5_HW_MIN_MTU + ETH_FCS_LEN)
2467 static int mlx5e_change_mtu(struct net_device
*netdev
, int new_mtu
)
2469 struct mlx5e_priv
*priv
= netdev_priv(netdev
);
2470 struct mlx5_core_dev
*mdev
= priv
->mdev
;
2476 mlx5_query_port_max_mtu(mdev
, &max_mtu
, 1);
2478 max_mtu
= MLX5E_HW2SW_MTU(max_mtu
);
2479 min_mtu
= MLX5E_HW2SW_MTU(MXL5E_MIN_MTU
);
2481 if (new_mtu
> max_mtu
|| new_mtu
< min_mtu
) {
2483 "%s: Bad MTU (%d), valid range is: [%d..%d]\n",
2484 __func__
, new_mtu
, min_mtu
, max_mtu
);
2488 mutex_lock(&priv
->state_lock
);
2490 was_opened
= test_bit(MLX5E_STATE_OPENED
, &priv
->state
);
2492 mlx5e_close_locked(netdev
);
2494 netdev
->mtu
= new_mtu
;
2497 err
= mlx5e_open_locked(netdev
);
2499 mutex_unlock(&priv
->state_lock
);
2504 static int mlx5e_ioctl(struct net_device
*dev
, struct ifreq
*ifr
, int cmd
)
2508 return mlx5e_hwstamp_set(dev
, ifr
);
2510 return mlx5e_hwstamp_get(dev
, ifr
);
2516 static int mlx5e_set_vf_mac(struct net_device
*dev
, int vf
, u8
*mac
)
2518 struct mlx5e_priv
*priv
= netdev_priv(dev
);
2519 struct mlx5_core_dev
*mdev
= priv
->mdev
;
2521 return mlx5_eswitch_set_vport_mac(mdev
->priv
.eswitch
, vf
+ 1, mac
);
2524 static int mlx5e_set_vf_vlan(struct net_device
*dev
, int vf
, u16 vlan
, u8 qos
)
2526 struct mlx5e_priv
*priv
= netdev_priv(dev
);
2527 struct mlx5_core_dev
*mdev
= priv
->mdev
;
2529 return mlx5_eswitch_set_vport_vlan(mdev
->priv
.eswitch
, vf
+ 1,
2533 static int mlx5e_set_vf_spoofchk(struct net_device
*dev
, int vf
, bool setting
)
2535 struct mlx5e_priv
*priv
= netdev_priv(dev
);
2536 struct mlx5_core_dev
*mdev
= priv
->mdev
;
2538 return mlx5_eswitch_set_vport_spoofchk(mdev
->priv
.eswitch
, vf
+ 1, setting
);
2541 static int mlx5e_set_vf_trust(struct net_device
*dev
, int vf
, bool setting
)
2543 struct mlx5e_priv
*priv
= netdev_priv(dev
);
2544 struct mlx5_core_dev
*mdev
= priv
->mdev
;
2546 return mlx5_eswitch_set_vport_trust(mdev
->priv
.eswitch
, vf
+ 1, setting
);
2548 static int mlx5_vport_link2ifla(u8 esw_link
)
2551 case MLX5_ESW_VPORT_ADMIN_STATE_DOWN
:
2552 return IFLA_VF_LINK_STATE_DISABLE
;
2553 case MLX5_ESW_VPORT_ADMIN_STATE_UP
:
2554 return IFLA_VF_LINK_STATE_ENABLE
;
2556 return IFLA_VF_LINK_STATE_AUTO
;
2559 static int mlx5_ifla_link2vport(u8 ifla_link
)
2561 switch (ifla_link
) {
2562 case IFLA_VF_LINK_STATE_DISABLE
:
2563 return MLX5_ESW_VPORT_ADMIN_STATE_DOWN
;
2564 case IFLA_VF_LINK_STATE_ENABLE
:
2565 return MLX5_ESW_VPORT_ADMIN_STATE_UP
;
2567 return MLX5_ESW_VPORT_ADMIN_STATE_AUTO
;
2570 static int mlx5e_set_vf_link_state(struct net_device
*dev
, int vf
,
2573 struct mlx5e_priv
*priv
= netdev_priv(dev
);
2574 struct mlx5_core_dev
*mdev
= priv
->mdev
;
2576 return mlx5_eswitch_set_vport_state(mdev
->priv
.eswitch
, vf
+ 1,
2577 mlx5_ifla_link2vport(link_state
));
2580 static int mlx5e_get_vf_config(struct net_device
*dev
,
2581 int vf
, struct ifla_vf_info
*ivi
)
2583 struct mlx5e_priv
*priv
= netdev_priv(dev
);
2584 struct mlx5_core_dev
*mdev
= priv
->mdev
;
2587 err
= mlx5_eswitch_get_vport_config(mdev
->priv
.eswitch
, vf
+ 1, ivi
);
2590 ivi
->linkstate
= mlx5_vport_link2ifla(ivi
->linkstate
);
2594 static int mlx5e_get_vf_stats(struct net_device
*dev
,
2595 int vf
, struct ifla_vf_stats
*vf_stats
)
2597 struct mlx5e_priv
*priv
= netdev_priv(dev
);
2598 struct mlx5_core_dev
*mdev
= priv
->mdev
;
2600 return mlx5_eswitch_get_vport_stats(mdev
->priv
.eswitch
, vf
+ 1,
2604 static void mlx5e_add_vxlan_port(struct net_device
*netdev
,
2605 struct udp_tunnel_info
*ti
)
2607 struct mlx5e_priv
*priv
= netdev_priv(netdev
);
2609 if (ti
->type
!= UDP_TUNNEL_TYPE_VXLAN
)
2612 if (!mlx5e_vxlan_allowed(priv
->mdev
))
2615 mlx5e_vxlan_queue_work(priv
, ti
->sa_family
, be16_to_cpu(ti
->port
), 1);
2618 static void mlx5e_del_vxlan_port(struct net_device
*netdev
,
2619 struct udp_tunnel_info
*ti
)
2621 struct mlx5e_priv
*priv
= netdev_priv(netdev
);
2623 if (ti
->type
!= UDP_TUNNEL_TYPE_VXLAN
)
2626 if (!mlx5e_vxlan_allowed(priv
->mdev
))
2629 mlx5e_vxlan_queue_work(priv
, ti
->sa_family
, be16_to_cpu(ti
->port
), 0);
2632 static netdev_features_t
mlx5e_vxlan_features_check(struct mlx5e_priv
*priv
,
2633 struct sk_buff
*skb
,
2634 netdev_features_t features
)
2636 struct udphdr
*udph
;
2640 switch (vlan_get_protocol(skb
)) {
2641 case htons(ETH_P_IP
):
2642 proto
= ip_hdr(skb
)->protocol
;
2644 case htons(ETH_P_IPV6
):
2645 proto
= ipv6_hdr(skb
)->nexthdr
;
2651 if (proto
== IPPROTO_UDP
) {
2652 udph
= udp_hdr(skb
);
2653 port
= be16_to_cpu(udph
->dest
);
2656 /* Verify if UDP port is being offloaded by HW */
2657 if (port
&& mlx5e_vxlan_lookup_port(priv
, port
))
2661 /* Disable CSUM and GSO if the udp dport is not offloaded by HW */
2662 return features
& ~(NETIF_F_CSUM_MASK
| NETIF_F_GSO_MASK
);
2665 static netdev_features_t
mlx5e_features_check(struct sk_buff
*skb
,
2666 struct net_device
*netdev
,
2667 netdev_features_t features
)
2669 struct mlx5e_priv
*priv
= netdev_priv(netdev
);
2671 features
= vlan_features_check(skb
, features
);
2672 features
= vxlan_features_check(skb
, features
);
2674 /* Validate if the tunneled packet is being offloaded by HW */
2675 if (skb
->encapsulation
&&
2676 (features
& NETIF_F_CSUM_MASK
|| features
& NETIF_F_GSO_MASK
))
2677 return mlx5e_vxlan_features_check(priv
, skb
, features
);
2682 static const struct net_device_ops mlx5e_netdev_ops_basic
= {
2683 .ndo_open
= mlx5e_open
,
2684 .ndo_stop
= mlx5e_close
,
2685 .ndo_start_xmit
= mlx5e_xmit
,
2686 .ndo_setup_tc
= mlx5e_ndo_setup_tc
,
2687 .ndo_select_queue
= mlx5e_select_queue
,
2688 .ndo_get_stats64
= mlx5e_get_stats
,
2689 .ndo_set_rx_mode
= mlx5e_set_rx_mode
,
2690 .ndo_set_mac_address
= mlx5e_set_mac
,
2691 .ndo_vlan_rx_add_vid
= mlx5e_vlan_rx_add_vid
,
2692 .ndo_vlan_rx_kill_vid
= mlx5e_vlan_rx_kill_vid
,
2693 .ndo_set_features
= mlx5e_set_features
,
2694 .ndo_change_mtu
= mlx5e_change_mtu
,
2695 .ndo_do_ioctl
= mlx5e_ioctl
,
2696 .ndo_set_tx_maxrate
= mlx5e_set_tx_maxrate
,
2697 #ifdef CONFIG_RFS_ACCEL
2698 .ndo_rx_flow_steer
= mlx5e_rx_flow_steer
,
2702 static const struct net_device_ops mlx5e_netdev_ops_sriov
= {
2703 .ndo_open
= mlx5e_open
,
2704 .ndo_stop
= mlx5e_close
,
2705 .ndo_start_xmit
= mlx5e_xmit
,
2706 .ndo_setup_tc
= mlx5e_ndo_setup_tc
,
2707 .ndo_select_queue
= mlx5e_select_queue
,
2708 .ndo_get_stats64
= mlx5e_get_stats
,
2709 .ndo_set_rx_mode
= mlx5e_set_rx_mode
,
2710 .ndo_set_mac_address
= mlx5e_set_mac
,
2711 .ndo_vlan_rx_add_vid
= mlx5e_vlan_rx_add_vid
,
2712 .ndo_vlan_rx_kill_vid
= mlx5e_vlan_rx_kill_vid
,
2713 .ndo_set_features
= mlx5e_set_features
,
2714 .ndo_change_mtu
= mlx5e_change_mtu
,
2715 .ndo_do_ioctl
= mlx5e_ioctl
,
2716 .ndo_udp_tunnel_add
= mlx5e_add_vxlan_port
,
2717 .ndo_udp_tunnel_del
= mlx5e_del_vxlan_port
,
2718 .ndo_set_tx_maxrate
= mlx5e_set_tx_maxrate
,
2719 .ndo_features_check
= mlx5e_features_check
,
2720 #ifdef CONFIG_RFS_ACCEL
2721 .ndo_rx_flow_steer
= mlx5e_rx_flow_steer
,
2723 .ndo_set_vf_mac
= mlx5e_set_vf_mac
,
2724 .ndo_set_vf_vlan
= mlx5e_set_vf_vlan
,
2725 .ndo_set_vf_spoofchk
= mlx5e_set_vf_spoofchk
,
2726 .ndo_set_vf_trust
= mlx5e_set_vf_trust
,
2727 .ndo_get_vf_config
= mlx5e_get_vf_config
,
2728 .ndo_set_vf_link_state
= mlx5e_set_vf_link_state
,
2729 .ndo_get_vf_stats
= mlx5e_get_vf_stats
,
2732 static int mlx5e_check_required_hca_cap(struct mlx5_core_dev
*mdev
)
2734 if (MLX5_CAP_GEN(mdev
, port_type
) != MLX5_CAP_PORT_TYPE_ETH
)
2736 if (!MLX5_CAP_GEN(mdev
, eth_net_offloads
) ||
2737 !MLX5_CAP_GEN(mdev
, nic_flow_table
) ||
2738 !MLX5_CAP_ETH(mdev
, csum_cap
) ||
2739 !MLX5_CAP_ETH(mdev
, max_lso_cap
) ||
2740 !MLX5_CAP_ETH(mdev
, vlan_cap
) ||
2741 !MLX5_CAP_ETH(mdev
, rss_ind_tbl_cap
) ||
2742 MLX5_CAP_FLOWTABLE(mdev
,
2743 flow_table_properties_nic_receive
.max_ft_level
)
2745 mlx5_core_warn(mdev
,
2746 "Not creating net device, some required device capabilities are missing\n");
2749 if (!MLX5_CAP_ETH(mdev
, self_lb_en_modifiable
))
2750 mlx5_core_warn(mdev
, "Self loop back prevention is not supported\n");
2751 if (!MLX5_CAP_GEN(mdev
, cq_moderation
))
2752 mlx5_core_warn(mdev
, "CQ modiration is not supported\n");
2757 u16
mlx5e_get_max_inline_cap(struct mlx5_core_dev
*mdev
)
2759 int bf_buf_size
= (1 << MLX5_CAP_GEN(mdev
, log_bf_reg_size
)) / 2;
2761 return bf_buf_size
-
2762 sizeof(struct mlx5e_tx_wqe
) +
2763 2 /*sizeof(mlx5e_tx_wqe.inline_hdr_start)*/;
2766 #ifdef CONFIG_MLX5_CORE_EN_DCB
2767 static void mlx5e_ets_init(struct mlx5e_priv
*priv
)
2771 priv
->params
.ets
.ets_cap
= mlx5_max_tc(priv
->mdev
) + 1;
2772 for (i
= 0; i
< priv
->params
.ets
.ets_cap
; i
++) {
2773 priv
->params
.ets
.tc_tx_bw
[i
] = MLX5E_MAX_BW_ALLOC
;
2774 priv
->params
.ets
.tc_tsa
[i
] = IEEE_8021QAZ_TSA_VENDOR
;
2775 priv
->params
.ets
.prio_tc
[i
] = i
;
2778 /* tclass[prio=0]=1, tclass[prio=1]=0, tclass[prio=i]=i (for i>1) */
2779 priv
->params
.ets
.prio_tc
[0] = 1;
2780 priv
->params
.ets
.prio_tc
[1] = 0;
2784 void mlx5e_build_default_indir_rqt(struct mlx5_core_dev
*mdev
,
2785 u32
*indirection_rqt
, int len
,
2788 int node
= mdev
->priv
.numa_node
;
2789 int node_num_of_cores
;
2793 node
= first_online_node
;
2795 node_num_of_cores
= cpumask_weight(cpumask_of_node(node
));
2797 if (node_num_of_cores
)
2798 num_channels
= min_t(int, num_channels
, node_num_of_cores
);
2800 for (i
= 0; i
< len
; i
++)
2801 indirection_rqt
[i
] = i
% num_channels
;
2804 static bool mlx5e_check_fragmented_striding_rq_cap(struct mlx5_core_dev
*mdev
)
2806 return MLX5_CAP_GEN(mdev
, striding_rq
) &&
2807 MLX5_CAP_GEN(mdev
, umr_ptr_rlky
) &&
2808 MLX5_CAP_ETH(mdev
, reg_umr_sq
);
2811 static int mlx5e_get_pci_bw(struct mlx5_core_dev
*mdev
, u32
*pci_bw
)
2813 enum pcie_link_width width
;
2814 enum pci_bus_speed speed
;
2817 err
= pcie_get_minimum_link(mdev
->pdev
, &speed
, &width
);
2821 if (speed
== PCI_SPEED_UNKNOWN
|| width
== PCIE_LNK_WIDTH_UNKNOWN
)
2825 case PCIE_SPEED_2_5GT
:
2826 *pci_bw
= 2500 * width
;
2828 case PCIE_SPEED_5_0GT
:
2829 *pci_bw
= 5000 * width
;
2831 case PCIE_SPEED_8_0GT
:
2832 *pci_bw
= 8000 * width
;
2841 static bool cqe_compress_heuristic(u32 link_speed
, u32 pci_bw
)
2843 return (link_speed
&& pci_bw
&&
2844 (pci_bw
< 40000) && (pci_bw
< link_speed
));
2847 void mlx5e_set_rx_cq_mode_params(struct mlx5e_params
*params
, u8 cq_period_mode
)
2849 params
->rx_cq_period_mode
= cq_period_mode
;
2851 params
->rx_cq_moderation
.pkts
=
2852 MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_PKTS
;
2853 params
->rx_cq_moderation
.usec
=
2854 MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC
;
2856 if (cq_period_mode
== MLX5_CQ_PERIOD_MODE_START_FROM_CQE
)
2857 params
->rx_cq_moderation
.usec
=
2858 MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC_FROM_CQE
;
2861 static void mlx5e_build_netdev_priv(struct mlx5_core_dev
*mdev
,
2862 struct net_device
*netdev
,
2865 struct mlx5e_priv
*priv
= netdev_priv(netdev
);
2868 u8 cq_period_mode
= MLX5_CAP_GEN(mdev
, cq_period_start_from_cqe
) ?
2869 MLX5_CQ_PERIOD_MODE_START_FROM_CQE
:
2870 MLX5_CQ_PERIOD_MODE_START_FROM_EQE
;
2872 priv
->params
.log_sq_size
=
2873 MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE
;
2874 priv
->params
.rq_wq_type
= mlx5e_check_fragmented_striding_rq_cap(mdev
) ?
2875 MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ
:
2876 MLX5_WQ_TYPE_LINKED_LIST
;
2878 /* set CQE compression */
2879 priv
->params
.rx_cqe_compress_admin
= false;
2880 if (MLX5_CAP_GEN(mdev
, cqe_compression
) &&
2881 MLX5_CAP_GEN(mdev
, vport_group_manager
)) {
2882 mlx5e_get_max_linkspeed(mdev
, &link_speed
);
2883 mlx5e_get_pci_bw(mdev
, &pci_bw
);
2884 mlx5_core_dbg(mdev
, "Max link speed = %d, PCI BW = %d\n",
2885 link_speed
, pci_bw
);
2886 priv
->params
.rx_cqe_compress_admin
=
2887 cqe_compress_heuristic(link_speed
, pci_bw
);
2890 priv
->params
.rx_cqe_compress
= priv
->params
.rx_cqe_compress_admin
;
2892 switch (priv
->params
.rq_wq_type
) {
2893 case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ
:
2894 priv
->params
.log_rq_size
= MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE_MPW
;
2895 priv
->params
.mpwqe_log_stride_sz
=
2896 priv
->params
.rx_cqe_compress
?
2897 MLX5_MPWRQ_LOG_STRIDE_SIZE_CQE_COMPRESS
:
2898 MLX5_MPWRQ_LOG_STRIDE_SIZE
;
2899 priv
->params
.mpwqe_log_num_strides
= MLX5_MPWRQ_LOG_WQE_SZ
-
2900 priv
->params
.mpwqe_log_stride_sz
;
2901 priv
->params
.lro_en
= true;
2903 default: /* MLX5_WQ_TYPE_LINKED_LIST */
2904 priv
->params
.log_rq_size
= MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE
;
2907 mlx5_core_info(mdev
,
2908 "MLX5E: StrdRq(%d) RqSz(%ld) StrdSz(%ld) RxCqeCmprss(%d)\n",
2909 priv
->params
.rq_wq_type
== MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ
,
2910 BIT(priv
->params
.log_rq_size
),
2911 BIT(priv
->params
.mpwqe_log_stride_sz
),
2912 priv
->params
.rx_cqe_compress_admin
);
2914 priv
->params
.min_rx_wqes
= mlx5_min_rx_wqes(priv
->params
.rq_wq_type
,
2915 BIT(priv
->params
.log_rq_size
));
2917 priv
->params
.rx_am_enabled
= MLX5_CAP_GEN(mdev
, cq_moderation
);
2918 mlx5e_set_rx_cq_mode_params(&priv
->params
, cq_period_mode
);
2920 priv
->params
.tx_cq_moderation
.usec
=
2921 MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC
;
2922 priv
->params
.tx_cq_moderation
.pkts
=
2923 MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_PKTS
;
2924 priv
->params
.tx_max_inline
= mlx5e_get_max_inline_cap(mdev
);
2925 priv
->params
.num_tc
= 1;
2926 priv
->params
.rss_hfunc
= ETH_RSS_HASH_XOR
;
2928 netdev_rss_key_fill(priv
->params
.toeplitz_hash_key
,
2929 sizeof(priv
->params
.toeplitz_hash_key
));
2931 mlx5e_build_default_indir_rqt(mdev
, priv
->params
.indirection_rqt
,
2932 MLX5E_INDIR_RQT_SIZE
, num_channels
);
2934 priv
->params
.lro_wqe_sz
=
2935 MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ
;
2937 /* Initialize pflags */
2938 MLX5E_SET_PRIV_FLAG(priv
, MLX5E_PFLAG_RX_CQE_BASED_MODER
,
2939 priv
->params
.rx_cq_period_mode
== MLX5_CQ_PERIOD_MODE_START_FROM_CQE
);
2942 priv
->netdev
= netdev
;
2943 priv
->params
.num_channels
= num_channels
;
2945 #ifdef CONFIG_MLX5_CORE_EN_DCB
2946 mlx5e_ets_init(priv
);
2949 mutex_init(&priv
->state_lock
);
2951 INIT_WORK(&priv
->update_carrier_work
, mlx5e_update_carrier_work
);
2952 INIT_WORK(&priv
->set_rx_mode_work
, mlx5e_set_rx_mode_work
);
2953 INIT_DELAYED_WORK(&priv
->update_stats_work
, mlx5e_update_stats_work
);
2956 static void mlx5e_set_netdev_dev_addr(struct net_device
*netdev
)
2958 struct mlx5e_priv
*priv
= netdev_priv(netdev
);
2960 mlx5_query_nic_vport_mac_address(priv
->mdev
, 0, netdev
->dev_addr
);
2961 if (is_zero_ether_addr(netdev
->dev_addr
) &&
2962 !MLX5_CAP_GEN(priv
->mdev
, vport_group_manager
)) {
2963 eth_hw_addr_random(netdev
);
2964 mlx5_core_info(priv
->mdev
, "Assigned random MAC address %pM\n", netdev
->dev_addr
);
2968 static void mlx5e_build_netdev(struct net_device
*netdev
)
2970 struct mlx5e_priv
*priv
= netdev_priv(netdev
);
2971 struct mlx5_core_dev
*mdev
= priv
->mdev
;
2975 SET_NETDEV_DEV(netdev
, &mdev
->pdev
->dev
);
2977 if (MLX5_CAP_GEN(mdev
, vport_group_manager
)) {
2978 netdev
->netdev_ops
= &mlx5e_netdev_ops_sriov
;
2979 #ifdef CONFIG_MLX5_CORE_EN_DCB
2980 netdev
->dcbnl_ops
= &mlx5e_dcbnl_ops
;
2983 netdev
->netdev_ops
= &mlx5e_netdev_ops_basic
;
2986 netdev
->watchdog_timeo
= 15 * HZ
;
2988 netdev
->ethtool_ops
= &mlx5e_ethtool_ops
;
2990 netdev
->vlan_features
|= NETIF_F_SG
;
2991 netdev
->vlan_features
|= NETIF_F_IP_CSUM
;
2992 netdev
->vlan_features
|= NETIF_F_IPV6_CSUM
;
2993 netdev
->vlan_features
|= NETIF_F_GRO
;
2994 netdev
->vlan_features
|= NETIF_F_TSO
;
2995 netdev
->vlan_features
|= NETIF_F_TSO6
;
2996 netdev
->vlan_features
|= NETIF_F_RXCSUM
;
2997 netdev
->vlan_features
|= NETIF_F_RXHASH
;
2999 if (!!MLX5_CAP_ETH(mdev
, lro_cap
))
3000 netdev
->vlan_features
|= NETIF_F_LRO
;
3002 netdev
->hw_features
= netdev
->vlan_features
;
3003 netdev
->hw_features
|= NETIF_F_HW_VLAN_CTAG_TX
;
3004 netdev
->hw_features
|= NETIF_F_HW_VLAN_CTAG_RX
;
3005 netdev
->hw_features
|= NETIF_F_HW_VLAN_CTAG_FILTER
;
3007 if (mlx5e_vxlan_allowed(mdev
)) {
3008 netdev
->hw_features
|= NETIF_F_GSO_UDP_TUNNEL
|
3009 NETIF_F_GSO_UDP_TUNNEL_CSUM
|
3010 NETIF_F_GSO_PARTIAL
;
3011 netdev
->hw_enc_features
|= NETIF_F_IP_CSUM
;
3012 netdev
->hw_enc_features
|= NETIF_F_IPV6_CSUM
;
3013 netdev
->hw_enc_features
|= NETIF_F_TSO
;
3014 netdev
->hw_enc_features
|= NETIF_F_TSO6
;
3015 netdev
->hw_enc_features
|= NETIF_F_GSO_UDP_TUNNEL
;
3016 netdev
->hw_enc_features
|= NETIF_F_GSO_UDP_TUNNEL_CSUM
|
3017 NETIF_F_GSO_PARTIAL
;
3018 netdev
->gso_partial_features
= NETIF_F_GSO_UDP_TUNNEL_CSUM
;
3021 mlx5_query_port_fcs(mdev
, &fcs_supported
, &fcs_enabled
);
3024 netdev
->hw_features
|= NETIF_F_RXALL
;
3026 netdev
->features
= netdev
->hw_features
;
3027 if (!priv
->params
.lro_en
)
3028 netdev
->features
&= ~NETIF_F_LRO
;
3031 netdev
->features
&= ~NETIF_F_RXALL
;
3033 #define FT_CAP(f) MLX5_CAP_FLOWTABLE(mdev, flow_table_properties_nic_receive.f)
3034 if (FT_CAP(flow_modify_en
) &&
3035 FT_CAP(modify_root
) &&
3036 FT_CAP(identified_miss_table_mode
) &&
3037 FT_CAP(flow_table_modify
)) {
3038 netdev
->hw_features
|= NETIF_F_HW_TC
;
3039 #ifdef CONFIG_RFS_ACCEL
3040 netdev
->hw_features
|= NETIF_F_NTUPLE
;
3044 netdev
->features
|= NETIF_F_HIGHDMA
;
3046 netdev
->priv_flags
|= IFF_UNICAST_FLT
;
3048 mlx5e_set_netdev_dev_addr(netdev
);
3051 static void mlx5e_create_q_counter(struct mlx5e_priv
*priv
)
3053 struct mlx5_core_dev
*mdev
= priv
->mdev
;
3056 err
= mlx5_core_alloc_q_counter(mdev
, &priv
->q_counter
);
3058 mlx5_core_warn(mdev
, "alloc queue counter failed, %d\n", err
);
3059 priv
->q_counter
= 0;
3063 static void mlx5e_destroy_q_counter(struct mlx5e_priv
*priv
)
3065 if (!priv
->q_counter
)
3068 mlx5_core_dealloc_q_counter(priv
->mdev
, priv
->q_counter
);
3071 static int mlx5e_create_umr_mkey(struct mlx5e_priv
*priv
)
3073 struct mlx5_core_dev
*mdev
= priv
->mdev
;
3074 struct mlx5_create_mkey_mbox_in
*in
;
3075 struct mlx5_mkey_seg
*mkc
;
3076 int inlen
= sizeof(*in
);
3078 mlx5e_get_max_num_channels(mdev
) * MLX5_CHANNEL_MAX_NUM_MTTS
;
3081 in
= mlx5_vzalloc(inlen
);
3086 mkc
->status
= MLX5_MKEY_STATUS_FREE
;
3087 mkc
->flags
= MLX5_PERM_UMR_EN
|
3088 MLX5_PERM_LOCAL_READ
|
3089 MLX5_PERM_LOCAL_WRITE
|
3090 MLX5_ACCESS_MODE_MTT
;
3092 mkc
->qpn_mkey7_0
= cpu_to_be32(0xffffff << 8);
3093 mkc
->flags_pd
= cpu_to_be32(mdev
->mlx5e_res
.pdn
);
3094 mkc
->len
= cpu_to_be64(npages
<< PAGE_SHIFT
);
3095 mkc
->xlt_oct_size
= cpu_to_be32(mlx5e_get_mtt_octw(npages
));
3096 mkc
->log2_page_size
= PAGE_SHIFT
;
3098 err
= mlx5_core_create_mkey(mdev
, &priv
->umr_mkey
, in
, inlen
, NULL
,
3106 static void *mlx5e_create_netdev(struct mlx5_core_dev
*mdev
)
3108 struct net_device
*netdev
;
3109 struct mlx5e_priv
*priv
;
3110 int nch
= mlx5e_get_max_num_channels(mdev
);
3113 netdev
= alloc_etherdev_mqs(sizeof(struct mlx5e_priv
),
3114 nch
* MLX5E_MAX_NUM_TC
,
3117 mlx5_core_err(mdev
, "alloc_etherdev_mqs() failed\n");
3121 mlx5e_build_netdev_priv(mdev
, netdev
, nch
);
3122 mlx5e_build_netdev(netdev
);
3124 netif_carrier_off(netdev
);
3126 priv
= netdev_priv(netdev
);
3128 priv
->wq
= create_singlethread_workqueue("mlx5e");
3130 goto err_free_netdev
;
3132 err
= mlx5e_create_umr_mkey(priv
);
3134 mlx5_core_err(mdev
, "create umr mkey failed, %d\n", err
);
3135 goto err_destroy_wq
;
3138 err
= mlx5e_create_tises(priv
);
3140 mlx5_core_warn(mdev
, "create tises failed, %d\n", err
);
3141 goto err_destroy_umr_mkey
;
3144 err
= mlx5e_open_drop_rq(priv
);
3146 mlx5_core_err(mdev
, "open drop rq failed, %d\n", err
);
3147 goto err_destroy_tises
;
3150 err
= mlx5e_create_rqts(priv
);
3152 mlx5_core_warn(mdev
, "create rqts failed, %d\n", err
);
3153 goto err_close_drop_rq
;
3156 err
= mlx5e_create_tirs(priv
);
3158 mlx5_core_warn(mdev
, "create tirs failed, %d\n", err
);
3159 goto err_destroy_rqts
;
3162 err
= mlx5e_create_flow_steering(priv
);
3164 mlx5_core_warn(mdev
, "create flow steering failed, %d\n", err
);
3165 goto err_destroy_tirs
;
3168 mlx5e_create_q_counter(priv
);
3170 mlx5e_init_l2_addr(priv
);
3172 mlx5e_vxlan_init(priv
);
3174 err
= mlx5e_tc_init(priv
);
3176 goto err_dealloc_q_counters
;
3178 #ifdef CONFIG_MLX5_CORE_EN_DCB
3179 mlx5e_dcbnl_ieee_setets_core(priv
, &priv
->params
.ets
);
3182 err
= register_netdev(netdev
);
3184 mlx5_core_err(mdev
, "register_netdev failed, %d\n", err
);
3185 goto err_tc_cleanup
;
3188 if (mlx5e_vxlan_allowed(mdev
)) {
3190 udp_tunnel_get_rx_info(netdev
);
3194 mlx5e_enable_async_events(priv
);
3195 queue_work(priv
->wq
, &priv
->set_rx_mode_work
);
3200 mlx5e_tc_cleanup(priv
);
3202 err_dealloc_q_counters
:
3203 mlx5e_destroy_q_counter(priv
);
3204 mlx5e_destroy_flow_steering(priv
);
3207 mlx5e_destroy_tirs(priv
);
3210 mlx5e_destroy_rqts(priv
);
3213 mlx5e_close_drop_rq(priv
);
3216 mlx5e_destroy_tises(priv
);
3218 err_destroy_umr_mkey
:
3219 mlx5_core_destroy_mkey(mdev
, &priv
->umr_mkey
);
3222 destroy_workqueue(priv
->wq
);
3225 free_netdev(netdev
);
3230 static void *mlx5e_add(struct mlx5_core_dev
*mdev
)
3234 if (mlx5e_check_required_hca_cap(mdev
))
3237 if (mlx5e_create_mdev_resources(mdev
))
3240 ret
= mlx5e_create_netdev(mdev
);
3242 mlx5e_destroy_mdev_resources(mdev
);
3248 static void mlx5e_destroy_netdev(struct mlx5_core_dev
*mdev
,
3249 struct mlx5e_priv
*priv
)
3251 struct net_device
*netdev
= priv
->netdev
;
3253 set_bit(MLX5E_STATE_DESTROYING
, &priv
->state
);
3255 queue_work(priv
->wq
, &priv
->set_rx_mode_work
);
3256 mlx5e_disable_async_events(priv
);
3257 flush_workqueue(priv
->wq
);
3258 if (test_bit(MLX5_INTERFACE_STATE_SHUTDOWN
, &mdev
->intf_state
)) {
3259 netif_device_detach(netdev
);
3260 mlx5e_close(netdev
);
3262 unregister_netdev(netdev
);
3265 mlx5e_tc_cleanup(priv
);
3266 mlx5e_vxlan_cleanup(priv
);
3267 mlx5e_destroy_q_counter(priv
);
3268 mlx5e_destroy_flow_steering(priv
);
3269 mlx5e_destroy_tirs(priv
);
3270 mlx5e_destroy_rqts(priv
);
3271 mlx5e_close_drop_rq(priv
);
3272 mlx5e_destroy_tises(priv
);
3273 mlx5_core_destroy_mkey(priv
->mdev
, &priv
->umr_mkey
);
3274 cancel_delayed_work_sync(&priv
->update_stats_work
);
3275 destroy_workqueue(priv
->wq
);
3277 if (!test_bit(MLX5_INTERFACE_STATE_SHUTDOWN
, &mdev
->intf_state
))
3278 free_netdev(netdev
);
3281 static void mlx5e_remove(struct mlx5_core_dev
*mdev
, void *vpriv
)
3283 struct mlx5e_priv
*priv
= vpriv
;
3285 mlx5e_destroy_netdev(mdev
, priv
);
3286 mlx5e_destroy_mdev_resources(mdev
);
3289 static void *mlx5e_get_netdev(void *vpriv
)
3291 struct mlx5e_priv
*priv
= vpriv
;
3293 return priv
->netdev
;
3296 static struct mlx5_interface mlx5e_interface
= {
3298 .remove
= mlx5e_remove
,
3299 .event
= mlx5e_async_event
,
3300 .protocol
= MLX5_INTERFACE_PROTOCOL_ETH
,
3301 .get_dev
= mlx5e_get_netdev
,
3304 void mlx5e_init(void)
3306 mlx5e_build_ptys2ethtool_map();
3307 mlx5_register_interface(&mlx5e_interface
);
3310 void mlx5e_cleanup(void)
3312 mlx5_unregister_interface(&mlx5e_interface
);