2 * Copyright (c) 2015-2016, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <net/tc_act/tc_gact.h>
34 #include <net/pkt_cls.h>
35 #include <linux/mlx5/fs.h>
36 #include <net/vxlan.h>
42 struct mlx5e_rq_param
{
43 u32 rqc
[MLX5_ST_SZ_DW(rqc
)];
44 struct mlx5_wq_param wq
;
48 struct mlx5e_sq_param
{
49 u32 sqc
[MLX5_ST_SZ_DW(sqc
)];
50 struct mlx5_wq_param wq
;
56 struct mlx5e_cq_param
{
57 u32 cqc
[MLX5_ST_SZ_DW(cqc
)];
58 struct mlx5_wq_param wq
;
63 struct mlx5e_channel_param
{
64 struct mlx5e_rq_param rq
;
65 struct mlx5e_sq_param sq
;
66 struct mlx5e_sq_param icosq
;
67 struct mlx5e_cq_param rx_cq
;
68 struct mlx5e_cq_param tx_cq
;
69 struct mlx5e_cq_param icosq_cq
;
72 static void mlx5e_update_carrier(struct mlx5e_priv
*priv
)
74 struct mlx5_core_dev
*mdev
= priv
->mdev
;
77 port_state
= mlx5_query_vport_state(mdev
,
78 MLX5_QUERY_VPORT_STATE_IN_OP_MOD_VNIC_VPORT
, 0);
80 if (port_state
== VPORT_STATE_UP
) {
81 netdev_info(priv
->netdev
, "Link up\n");
82 netif_carrier_on(priv
->netdev
);
84 netdev_info(priv
->netdev
, "Link down\n");
85 netif_carrier_off(priv
->netdev
);
89 static void mlx5e_update_carrier_work(struct work_struct
*work
)
91 struct mlx5e_priv
*priv
= container_of(work
, struct mlx5e_priv
,
94 mutex_lock(&priv
->state_lock
);
95 if (test_bit(MLX5E_STATE_OPENED
, &priv
->state
))
96 mlx5e_update_carrier(priv
);
97 mutex_unlock(&priv
->state_lock
);
100 static void mlx5e_tx_timeout_work(struct work_struct
*work
)
102 struct mlx5e_priv
*priv
= container_of(work
, struct mlx5e_priv
,
107 mutex_lock(&priv
->state_lock
);
108 if (!test_bit(MLX5E_STATE_OPENED
, &priv
->state
))
110 mlx5e_close_locked(priv
->netdev
);
111 err
= mlx5e_open_locked(priv
->netdev
);
113 netdev_err(priv
->netdev
, "mlx5e_open_locked failed recovering from a tx_timeout, err(%d).\n",
116 mutex_unlock(&priv
->state_lock
);
120 static void mlx5e_update_sw_counters(struct mlx5e_priv
*priv
)
122 struct mlx5e_sw_stats
*s
= &priv
->stats
.sw
;
123 struct mlx5e_rq_stats
*rq_stats
;
124 struct mlx5e_sq_stats
*sq_stats
;
125 u64 tx_offload_none
= 0;
128 memset(s
, 0, sizeof(*s
));
129 for (i
= 0; i
< priv
->params
.num_channels
; i
++) {
130 rq_stats
= &priv
->channel
[i
]->rq
.stats
;
132 s
->rx_packets
+= rq_stats
->packets
;
133 s
->rx_bytes
+= rq_stats
->bytes
;
134 s
->rx_lro_packets
+= rq_stats
->lro_packets
;
135 s
->rx_lro_bytes
+= rq_stats
->lro_bytes
;
136 s
->rx_csum_none
+= rq_stats
->csum_none
;
137 s
->rx_csum_complete
+= rq_stats
->csum_complete
;
138 s
->rx_csum_unnecessary_inner
+= rq_stats
->csum_unnecessary_inner
;
139 s
->rx_wqe_err
+= rq_stats
->wqe_err
;
140 s
->rx_mpwqe_filler
+= rq_stats
->mpwqe_filler
;
141 s
->rx_mpwqe_frag
+= rq_stats
->mpwqe_frag
;
142 s
->rx_buff_alloc_err
+= rq_stats
->buff_alloc_err
;
143 s
->rx_cqe_compress_blks
+= rq_stats
->cqe_compress_blks
;
144 s
->rx_cqe_compress_pkts
+= rq_stats
->cqe_compress_pkts
;
146 for (j
= 0; j
< priv
->params
.num_tc
; j
++) {
147 sq_stats
= &priv
->channel
[i
]->sq
[j
].stats
;
149 s
->tx_packets
+= sq_stats
->packets
;
150 s
->tx_bytes
+= sq_stats
->bytes
;
151 s
->tx_tso_packets
+= sq_stats
->tso_packets
;
152 s
->tx_tso_bytes
+= sq_stats
->tso_bytes
;
153 s
->tx_tso_inner_packets
+= sq_stats
->tso_inner_packets
;
154 s
->tx_tso_inner_bytes
+= sq_stats
->tso_inner_bytes
;
155 s
->tx_queue_stopped
+= sq_stats
->stopped
;
156 s
->tx_queue_wake
+= sq_stats
->wake
;
157 s
->tx_queue_dropped
+= sq_stats
->dropped
;
158 s
->tx_xmit_more
+= sq_stats
->xmit_more
;
159 s
->tx_csum_partial_inner
+= sq_stats
->csum_partial_inner
;
160 tx_offload_none
+= sq_stats
->csum_none
;
164 /* Update calculated offload counters */
165 s
->tx_csum_partial
= s
->tx_packets
- tx_offload_none
- s
->tx_csum_partial_inner
;
166 s
->rx_csum_unnecessary
= s
->rx_packets
- s
->rx_csum_none
- s
->rx_csum_complete
;
168 s
->link_down_events_phy
= MLX5_GET(ppcnt_reg
,
169 priv
->stats
.pport
.phy_counters
,
170 counter_set
.phys_layer_cntrs
.link_down_events
);
173 static void mlx5e_update_vport_counters(struct mlx5e_priv
*priv
)
175 int outlen
= MLX5_ST_SZ_BYTES(query_vport_counter_out
);
176 u32
*out
= (u32
*)priv
->stats
.vport
.query_vport_out
;
177 u32 in
[MLX5_ST_SZ_DW(query_vport_counter_in
)] = {0};
178 struct mlx5_core_dev
*mdev
= priv
->mdev
;
180 MLX5_SET(query_vport_counter_in
, in
, opcode
,
181 MLX5_CMD_OP_QUERY_VPORT_COUNTER
);
182 MLX5_SET(query_vport_counter_in
, in
, op_mod
, 0);
183 MLX5_SET(query_vport_counter_in
, in
, other_vport
, 0);
185 memset(out
, 0, outlen
);
186 mlx5_cmd_exec(mdev
, in
, sizeof(in
), out
, outlen
);
189 static void mlx5e_update_pport_counters(struct mlx5e_priv
*priv
)
191 struct mlx5e_pport_stats
*pstats
= &priv
->stats
.pport
;
192 struct mlx5_core_dev
*mdev
= priv
->mdev
;
193 int sz
= MLX5_ST_SZ_BYTES(ppcnt_reg
);
198 in
= mlx5_vzalloc(sz
);
202 MLX5_SET(ppcnt_reg
, in
, local_port
, 1);
204 out
= pstats
->IEEE_802_3_counters
;
205 MLX5_SET(ppcnt_reg
, in
, grp
, MLX5_IEEE_802_3_COUNTERS_GROUP
);
206 mlx5_core_access_reg(mdev
, in
, sz
, out
, sz
, MLX5_REG_PPCNT
, 0, 0);
208 out
= pstats
->RFC_2863_counters
;
209 MLX5_SET(ppcnt_reg
, in
, grp
, MLX5_RFC_2863_COUNTERS_GROUP
);
210 mlx5_core_access_reg(mdev
, in
, sz
, out
, sz
, MLX5_REG_PPCNT
, 0, 0);
212 out
= pstats
->RFC_2819_counters
;
213 MLX5_SET(ppcnt_reg
, in
, grp
, MLX5_RFC_2819_COUNTERS_GROUP
);
214 mlx5_core_access_reg(mdev
, in
, sz
, out
, sz
, MLX5_REG_PPCNT
, 0, 0);
216 out
= pstats
->phy_counters
;
217 MLX5_SET(ppcnt_reg
, in
, grp
, MLX5_PHYSICAL_LAYER_COUNTERS_GROUP
);
218 mlx5_core_access_reg(mdev
, in
, sz
, out
, sz
, MLX5_REG_PPCNT
, 0, 0);
220 MLX5_SET(ppcnt_reg
, in
, grp
, MLX5_PER_PRIORITY_COUNTERS_GROUP
);
221 for (prio
= 0; prio
< NUM_PPORT_PRIO
; prio
++) {
222 out
= pstats
->per_prio_counters
[prio
];
223 MLX5_SET(ppcnt_reg
, in
, prio_tc
, prio
);
224 mlx5_core_access_reg(mdev
, in
, sz
, out
, sz
,
225 MLX5_REG_PPCNT
, 0, 0);
232 static void mlx5e_update_q_counter(struct mlx5e_priv
*priv
)
234 struct mlx5e_qcounter_stats
*qcnt
= &priv
->stats
.qcnt
;
236 if (!priv
->q_counter
)
239 mlx5_core_query_out_of_buffer(priv
->mdev
, priv
->q_counter
,
240 &qcnt
->rx_out_of_buffer
);
243 void mlx5e_update_stats(struct mlx5e_priv
*priv
)
245 mlx5e_update_q_counter(priv
);
246 mlx5e_update_vport_counters(priv
);
247 mlx5e_update_pport_counters(priv
);
248 mlx5e_update_sw_counters(priv
);
251 void mlx5e_update_stats_work(struct work_struct
*work
)
253 struct delayed_work
*dwork
= to_delayed_work(work
);
254 struct mlx5e_priv
*priv
= container_of(dwork
, struct mlx5e_priv
,
256 mutex_lock(&priv
->state_lock
);
257 if (test_bit(MLX5E_STATE_OPENED
, &priv
->state
)) {
258 priv
->profile
->update_stats(priv
);
259 queue_delayed_work(priv
->wq
, dwork
,
260 msecs_to_jiffies(MLX5E_UPDATE_STATS_INTERVAL
));
262 mutex_unlock(&priv
->state_lock
);
265 static void mlx5e_async_event(struct mlx5_core_dev
*mdev
, void *vpriv
,
266 enum mlx5_dev_event event
, unsigned long param
)
268 struct mlx5e_priv
*priv
= vpriv
;
270 if (!test_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLED
, &priv
->state
))
274 case MLX5_DEV_EVENT_PORT_UP
:
275 case MLX5_DEV_EVENT_PORT_DOWN
:
276 queue_work(priv
->wq
, &priv
->update_carrier_work
);
284 static void mlx5e_enable_async_events(struct mlx5e_priv
*priv
)
286 set_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLED
, &priv
->state
);
289 static void mlx5e_disable_async_events(struct mlx5e_priv
*priv
)
291 clear_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLED
, &priv
->state
);
292 synchronize_irq(mlx5_get_msix_vec(priv
->mdev
, MLX5_EQ_VEC_ASYNC
));
295 #define MLX5E_HW2SW_MTU(hwmtu) (hwmtu - (ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN))
296 #define MLX5E_SW2HW_MTU(swmtu) (swmtu + (ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN))
298 static int mlx5e_create_rq(struct mlx5e_channel
*c
,
299 struct mlx5e_rq_param
*param
,
302 struct mlx5e_priv
*priv
= c
->priv
;
303 struct mlx5_core_dev
*mdev
= priv
->mdev
;
304 void *rqc
= param
->rqc
;
305 void *rqc_wq
= MLX5_ADDR_OF(rqc
, rqc
, wq
);
311 param
->wq
.db_numa_node
= cpu_to_node(c
->cpu
);
313 err
= mlx5_wq_ll_create(mdev
, ¶m
->wq
, rqc_wq
, &rq
->wq
,
318 rq
->wq
.db
= &rq
->wq
.db
[MLX5_RCV_DBR
];
320 wq_sz
= mlx5_wq_ll_get_size(&rq
->wq
);
322 switch (priv
->params
.rq_wq_type
) {
323 case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ
:
324 rq
->wqe_info
= kzalloc_node(wq_sz
* sizeof(*rq
->wqe_info
),
325 GFP_KERNEL
, cpu_to_node(c
->cpu
));
328 goto err_rq_wq_destroy
;
330 rq
->handle_rx_cqe
= mlx5e_handle_rx_cqe_mpwrq
;
331 rq
->alloc_wqe
= mlx5e_alloc_rx_mpwqe
;
332 rq
->dealloc_wqe
= mlx5e_dealloc_rx_mpwqe
;
334 rq
->mpwqe_mtt_offset
= c
->ix
*
335 MLX5E_REQUIRED_MTTS(1, BIT(priv
->params
.log_rq_size
));
337 rq
->mpwqe_stride_sz
= BIT(priv
->params
.mpwqe_log_stride_sz
);
338 rq
->mpwqe_num_strides
= BIT(priv
->params
.mpwqe_log_num_strides
);
339 rq
->wqe_sz
= rq
->mpwqe_stride_sz
* rq
->mpwqe_num_strides
;
340 byte_count
= rq
->wqe_sz
;
342 default: /* MLX5_WQ_TYPE_LINKED_LIST */
343 rq
->skb
= kzalloc_node(wq_sz
* sizeof(*rq
->skb
), GFP_KERNEL
,
344 cpu_to_node(c
->cpu
));
347 goto err_rq_wq_destroy
;
349 rq
->handle_rx_cqe
= mlx5e_handle_rx_cqe
;
350 rq
->alloc_wqe
= mlx5e_alloc_rx_wqe
;
351 rq
->dealloc_wqe
= mlx5e_dealloc_rx_wqe
;
353 rq
->wqe_sz
= (priv
->params
.lro_en
) ?
354 priv
->params
.lro_wqe_sz
:
355 MLX5E_SW2HW_MTU(priv
->netdev
->mtu
);
356 rq
->wqe_sz
= SKB_DATA_ALIGN(rq
->wqe_sz
);
357 byte_count
= rq
->wqe_sz
;
358 byte_count
|= MLX5_HW_START_PADDING
;
361 for (i
= 0; i
< wq_sz
; i
++) {
362 struct mlx5e_rx_wqe
*wqe
= mlx5_wq_ll_get_wqe(&rq
->wq
, i
);
364 wqe
->data
.byte_count
= cpu_to_be32(byte_count
);
367 INIT_WORK(&rq
->am
.work
, mlx5e_rx_am_work
);
368 rq
->am
.mode
= priv
->params
.rx_cq_period_mode
;
370 rq
->wq_type
= priv
->params
.rq_wq_type
;
372 rq
->netdev
= c
->netdev
;
373 rq
->tstamp
= &priv
->tstamp
;
377 rq
->mkey_be
= c
->mkey_be
;
378 rq
->umr_mkey_be
= cpu_to_be32(c
->priv
->umr_mkey
.key
);
383 mlx5_wq_destroy(&rq
->wq_ctrl
);
388 static void mlx5e_destroy_rq(struct mlx5e_rq
*rq
)
390 switch (rq
->wq_type
) {
391 case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ
:
394 default: /* MLX5_WQ_TYPE_LINKED_LIST */
398 mlx5_wq_destroy(&rq
->wq_ctrl
);
401 static int mlx5e_enable_rq(struct mlx5e_rq
*rq
, struct mlx5e_rq_param
*param
)
403 struct mlx5e_priv
*priv
= rq
->priv
;
404 struct mlx5_core_dev
*mdev
= priv
->mdev
;
412 inlen
= MLX5_ST_SZ_BYTES(create_rq_in
) +
413 sizeof(u64
) * rq
->wq_ctrl
.buf
.npages
;
414 in
= mlx5_vzalloc(inlen
);
418 rqc
= MLX5_ADDR_OF(create_rq_in
, in
, ctx
);
419 wq
= MLX5_ADDR_OF(rqc
, rqc
, wq
);
421 memcpy(rqc
, param
->rqc
, sizeof(param
->rqc
));
423 MLX5_SET(rqc
, rqc
, cqn
, rq
->cq
.mcq
.cqn
);
424 MLX5_SET(rqc
, rqc
, state
, MLX5_RQC_STATE_RST
);
425 MLX5_SET(rqc
, rqc
, vsd
, priv
->params
.vlan_strip_disable
);
426 MLX5_SET(wq
, wq
, log_wq_pg_sz
, rq
->wq_ctrl
.buf
.page_shift
-
427 MLX5_ADAPTER_PAGE_SHIFT
);
428 MLX5_SET64(wq
, wq
, dbr_addr
, rq
->wq_ctrl
.db
.dma
);
430 mlx5_fill_page_array(&rq
->wq_ctrl
.buf
,
431 (__be64
*)MLX5_ADDR_OF(wq
, wq
, pas
));
433 err
= mlx5_core_create_rq(mdev
, in
, inlen
, &rq
->rqn
);
440 static int mlx5e_modify_rq_state(struct mlx5e_rq
*rq
, int curr_state
,
443 struct mlx5e_channel
*c
= rq
->channel
;
444 struct mlx5e_priv
*priv
= c
->priv
;
445 struct mlx5_core_dev
*mdev
= priv
->mdev
;
452 inlen
= MLX5_ST_SZ_BYTES(modify_rq_in
);
453 in
= mlx5_vzalloc(inlen
);
457 rqc
= MLX5_ADDR_OF(modify_rq_in
, in
, ctx
);
459 MLX5_SET(modify_rq_in
, in
, rq_state
, curr_state
);
460 MLX5_SET(rqc
, rqc
, state
, next_state
);
462 err
= mlx5_core_modify_rq(mdev
, rq
->rqn
, in
, inlen
);
469 static int mlx5e_modify_rq_vsd(struct mlx5e_rq
*rq
, bool vsd
)
471 struct mlx5e_channel
*c
= rq
->channel
;
472 struct mlx5e_priv
*priv
= c
->priv
;
473 struct mlx5_core_dev
*mdev
= priv
->mdev
;
480 inlen
= MLX5_ST_SZ_BYTES(modify_rq_in
);
481 in
= mlx5_vzalloc(inlen
);
485 rqc
= MLX5_ADDR_OF(modify_rq_in
, in
, ctx
);
487 MLX5_SET(modify_rq_in
, in
, rq_state
, MLX5_RQC_STATE_RDY
);
488 MLX5_SET64(modify_rq_in
, in
, modify_bitmask
,
489 MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_VSD
);
490 MLX5_SET(rqc
, rqc
, vsd
, vsd
);
491 MLX5_SET(rqc
, rqc
, state
, MLX5_RQC_STATE_RDY
);
493 err
= mlx5_core_modify_rq(mdev
, rq
->rqn
, in
, inlen
);
500 static void mlx5e_disable_rq(struct mlx5e_rq
*rq
)
502 mlx5_core_destroy_rq(rq
->priv
->mdev
, rq
->rqn
);
505 static int mlx5e_wait_for_min_rx_wqes(struct mlx5e_rq
*rq
)
507 unsigned long exp_time
= jiffies
+ msecs_to_jiffies(20000);
508 struct mlx5e_channel
*c
= rq
->channel
;
509 struct mlx5e_priv
*priv
= c
->priv
;
510 struct mlx5_wq_ll
*wq
= &rq
->wq
;
512 while (time_before(jiffies
, exp_time
)) {
513 if (wq
->cur_sz
>= priv
->params
.min_rx_wqes
)
522 static void mlx5e_free_rx_descs(struct mlx5e_rq
*rq
)
524 struct mlx5_wq_ll
*wq
= &rq
->wq
;
525 struct mlx5e_rx_wqe
*wqe
;
529 /* UMR WQE (if in progress) is always at wq->head */
530 if (test_bit(MLX5E_RQ_STATE_UMR_WQE_IN_PROGRESS
, &rq
->state
))
531 mlx5e_free_rx_fragmented_mpwqe(rq
, &rq
->wqe_info
[wq
->head
]);
533 while (!mlx5_wq_ll_is_empty(wq
)) {
534 wqe_ix_be
= *wq
->tail_next
;
535 wqe_ix
= be16_to_cpu(wqe_ix_be
);
536 wqe
= mlx5_wq_ll_get_wqe(&rq
->wq
, wqe_ix
);
537 rq
->dealloc_wqe(rq
, wqe_ix
);
538 mlx5_wq_ll_pop(&rq
->wq
, wqe_ix_be
,
539 &wqe
->next
.next_wqe_index
);
543 static int mlx5e_open_rq(struct mlx5e_channel
*c
,
544 struct mlx5e_rq_param
*param
,
547 struct mlx5e_sq
*sq
= &c
->icosq
;
548 u16 pi
= sq
->pc
& sq
->wq
.sz_m1
;
551 err
= mlx5e_create_rq(c
, param
, rq
);
555 err
= mlx5e_enable_rq(rq
, param
);
559 err
= mlx5e_modify_rq_state(rq
, MLX5_RQC_STATE_RST
, MLX5_RQC_STATE_RDY
);
563 if (param
->am_enabled
)
564 set_bit(MLX5E_RQ_STATE_AM
, &c
->rq
.state
);
566 sq
->ico_wqe_info
[pi
].opcode
= MLX5_OPCODE_NOP
;
567 sq
->ico_wqe_info
[pi
].num_wqebbs
= 1;
568 mlx5e_send_nop(sq
, true); /* trigger mlx5e_post_rx_wqes() */
573 mlx5e_disable_rq(rq
);
575 mlx5e_destroy_rq(rq
);
580 static void mlx5e_close_rq(struct mlx5e_rq
*rq
)
582 set_bit(MLX5E_RQ_STATE_FLUSH
, &rq
->state
);
583 napi_synchronize(&rq
->channel
->napi
); /* prevent mlx5e_post_rx_wqes */
584 cancel_work_sync(&rq
->am
.work
);
586 mlx5e_disable_rq(rq
);
587 mlx5e_free_rx_descs(rq
);
588 mlx5e_destroy_rq(rq
);
591 static void mlx5e_free_sq_db(struct mlx5e_sq
*sq
)
598 static int mlx5e_alloc_sq_db(struct mlx5e_sq
*sq
, int numa
)
600 int wq_sz
= mlx5_wq_cyc_get_size(&sq
->wq
);
601 int df_sz
= wq_sz
* MLX5_SEND_WQEBB_NUM_DS
;
603 sq
->skb
= kzalloc_node(wq_sz
* sizeof(*sq
->skb
), GFP_KERNEL
, numa
);
604 sq
->dma_fifo
= kzalloc_node(df_sz
* sizeof(*sq
->dma_fifo
), GFP_KERNEL
,
606 sq
->wqe_info
= kzalloc_node(wq_sz
* sizeof(*sq
->wqe_info
), GFP_KERNEL
,
609 if (!sq
->skb
|| !sq
->dma_fifo
|| !sq
->wqe_info
) {
610 mlx5e_free_sq_db(sq
);
614 sq
->dma_fifo_mask
= df_sz
- 1;
619 static int mlx5e_create_sq(struct mlx5e_channel
*c
,
621 struct mlx5e_sq_param
*param
,
624 struct mlx5e_priv
*priv
= c
->priv
;
625 struct mlx5_core_dev
*mdev
= priv
->mdev
;
627 void *sqc
= param
->sqc
;
628 void *sqc_wq
= MLX5_ADDR_OF(sqc
, sqc
, wq
);
631 err
= mlx5_alloc_map_uar(mdev
, &sq
->uar
, !!MLX5_CAP_GEN(mdev
, bf
));
635 param
->wq
.db_numa_node
= cpu_to_node(c
->cpu
);
637 err
= mlx5_wq_cyc_create(mdev
, ¶m
->wq
, sqc_wq
, &sq
->wq
,
640 goto err_unmap_free_uar
;
642 sq
->wq
.db
= &sq
->wq
.db
[MLX5_SND_DBR
];
643 if (sq
->uar
.bf_map
) {
644 set_bit(MLX5E_SQ_STATE_BF_ENABLE
, &sq
->state
);
645 sq
->uar_map
= sq
->uar
.bf_map
;
647 sq
->uar_map
= sq
->uar
.map
;
649 sq
->bf_buf_size
= (1 << MLX5_CAP_GEN(mdev
, log_bf_reg_size
)) / 2;
650 sq
->max_inline
= param
->max_inline
;
651 sq
->min_inline_mode
=
652 MLX5_CAP_ETH(mdev
, wqe_inline_mode
) == MLX5E_INLINE_MODE_VPORT_CONTEXT
?
653 param
->min_inline_mode
: 0;
655 err
= mlx5e_alloc_sq_db(sq
, cpu_to_node(c
->cpu
));
657 goto err_sq_wq_destroy
;
660 u8 wq_sz
= mlx5_wq_cyc_get_size(&sq
->wq
);
662 sq
->ico_wqe_info
= kzalloc_node(sizeof(*sq
->ico_wqe_info
) *
665 cpu_to_node(c
->cpu
));
666 if (!sq
->ico_wqe_info
) {
673 txq_ix
= c
->ix
+ tc
* priv
->params
.num_channels
;
674 sq
->txq
= netdev_get_tx_queue(priv
->netdev
, txq_ix
);
675 priv
->txq_to_sq_map
[txq_ix
] = sq
;
679 sq
->tstamp
= &priv
->tstamp
;
680 sq
->mkey_be
= c
->mkey_be
;
683 sq
->edge
= (sq
->wq
.sz_m1
+ 1) - MLX5_SEND_WQE_MAX_WQEBBS
;
684 sq
->bf_budget
= MLX5E_SQ_BF_BUDGET
;
689 mlx5e_free_sq_db(sq
);
692 mlx5_wq_destroy(&sq
->wq_ctrl
);
695 mlx5_unmap_free_uar(mdev
, &sq
->uar
);
700 static void mlx5e_destroy_sq(struct mlx5e_sq
*sq
)
702 struct mlx5e_channel
*c
= sq
->channel
;
703 struct mlx5e_priv
*priv
= c
->priv
;
705 kfree(sq
->ico_wqe_info
);
706 mlx5e_free_sq_db(sq
);
707 mlx5_wq_destroy(&sq
->wq_ctrl
);
708 mlx5_unmap_free_uar(priv
->mdev
, &sq
->uar
);
711 static int mlx5e_enable_sq(struct mlx5e_sq
*sq
, struct mlx5e_sq_param
*param
)
713 struct mlx5e_channel
*c
= sq
->channel
;
714 struct mlx5e_priv
*priv
= c
->priv
;
715 struct mlx5_core_dev
*mdev
= priv
->mdev
;
723 inlen
= MLX5_ST_SZ_BYTES(create_sq_in
) +
724 sizeof(u64
) * sq
->wq_ctrl
.buf
.npages
;
725 in
= mlx5_vzalloc(inlen
);
729 sqc
= MLX5_ADDR_OF(create_sq_in
, in
, ctx
);
730 wq
= MLX5_ADDR_OF(sqc
, sqc
, wq
);
732 memcpy(sqc
, param
->sqc
, sizeof(param
->sqc
));
734 MLX5_SET(sqc
, sqc
, tis_num_0
, param
->icosq
? 0 : priv
->tisn
[sq
->tc
]);
735 MLX5_SET(sqc
, sqc
, cqn
, sq
->cq
.mcq
.cqn
);
736 MLX5_SET(sqc
, sqc
, min_wqe_inline_mode
, sq
->min_inline_mode
);
737 MLX5_SET(sqc
, sqc
, state
, MLX5_SQC_STATE_RST
);
738 MLX5_SET(sqc
, sqc
, tis_lst_sz
, param
->icosq
? 0 : 1);
739 MLX5_SET(sqc
, sqc
, flush_in_error_en
, 1);
741 MLX5_SET(wq
, wq
, wq_type
, MLX5_WQ_TYPE_CYCLIC
);
742 MLX5_SET(wq
, wq
, uar_page
, sq
->uar
.index
);
743 MLX5_SET(wq
, wq
, log_wq_pg_sz
, sq
->wq_ctrl
.buf
.page_shift
-
744 MLX5_ADAPTER_PAGE_SHIFT
);
745 MLX5_SET64(wq
, wq
, dbr_addr
, sq
->wq_ctrl
.db
.dma
);
747 mlx5_fill_page_array(&sq
->wq_ctrl
.buf
,
748 (__be64
*)MLX5_ADDR_OF(wq
, wq
, pas
));
750 err
= mlx5_core_create_sq(mdev
, in
, inlen
, &sq
->sqn
);
757 static int mlx5e_modify_sq(struct mlx5e_sq
*sq
, int curr_state
,
758 int next_state
, bool update_rl
, int rl_index
)
760 struct mlx5e_channel
*c
= sq
->channel
;
761 struct mlx5e_priv
*priv
= c
->priv
;
762 struct mlx5_core_dev
*mdev
= priv
->mdev
;
769 inlen
= MLX5_ST_SZ_BYTES(modify_sq_in
);
770 in
= mlx5_vzalloc(inlen
);
774 sqc
= MLX5_ADDR_OF(modify_sq_in
, in
, ctx
);
776 MLX5_SET(modify_sq_in
, in
, sq_state
, curr_state
);
777 MLX5_SET(sqc
, sqc
, state
, next_state
);
778 if (update_rl
&& next_state
== MLX5_SQC_STATE_RDY
) {
779 MLX5_SET64(modify_sq_in
, in
, modify_bitmask
, 1);
780 MLX5_SET(sqc
, sqc
, packet_pacing_rate_limit_index
, rl_index
);
783 err
= mlx5_core_modify_sq(mdev
, sq
->sqn
, in
, inlen
);
790 static void mlx5e_disable_sq(struct mlx5e_sq
*sq
)
792 struct mlx5e_channel
*c
= sq
->channel
;
793 struct mlx5e_priv
*priv
= c
->priv
;
794 struct mlx5_core_dev
*mdev
= priv
->mdev
;
796 mlx5_core_destroy_sq(mdev
, sq
->sqn
);
798 mlx5_rl_remove_rate(mdev
, sq
->rate_limit
);
801 static int mlx5e_open_sq(struct mlx5e_channel
*c
,
803 struct mlx5e_sq_param
*param
,
808 err
= mlx5e_create_sq(c
, tc
, param
, sq
);
812 err
= mlx5e_enable_sq(sq
, param
);
816 err
= mlx5e_modify_sq(sq
, MLX5_SQC_STATE_RST
, MLX5_SQC_STATE_RDY
,
822 netdev_tx_reset_queue(sq
->txq
);
823 netif_tx_start_queue(sq
->txq
);
829 mlx5e_disable_sq(sq
);
831 mlx5e_destroy_sq(sq
);
836 static inline void netif_tx_disable_queue(struct netdev_queue
*txq
)
838 __netif_tx_lock_bh(txq
);
839 netif_tx_stop_queue(txq
);
840 __netif_tx_unlock_bh(txq
);
843 static void mlx5e_close_sq(struct mlx5e_sq
*sq
)
845 set_bit(MLX5E_SQ_STATE_FLUSH
, &sq
->state
);
846 /* prevent netif_tx_wake_queue */
847 napi_synchronize(&sq
->channel
->napi
);
850 netif_tx_disable_queue(sq
->txq
);
852 /* last doorbell out, godspeed .. */
853 if (mlx5e_sq_has_room_for(sq
, 1))
854 mlx5e_send_nop(sq
, true);
857 mlx5e_disable_sq(sq
);
858 mlx5e_free_tx_descs(sq
);
859 mlx5e_destroy_sq(sq
);
862 static int mlx5e_create_cq(struct mlx5e_channel
*c
,
863 struct mlx5e_cq_param
*param
,
866 struct mlx5e_priv
*priv
= c
->priv
;
867 struct mlx5_core_dev
*mdev
= priv
->mdev
;
868 struct mlx5_core_cq
*mcq
= &cq
->mcq
;
874 param
->wq
.buf_numa_node
= cpu_to_node(c
->cpu
);
875 param
->wq
.db_numa_node
= cpu_to_node(c
->cpu
);
876 param
->eq_ix
= c
->ix
;
878 err
= mlx5_cqwq_create(mdev
, ¶m
->wq
, param
->cqc
, &cq
->wq
,
883 mlx5_vector2eqn(mdev
, param
->eq_ix
, &eqn_not_used
, &irqn
);
888 mcq
->set_ci_db
= cq
->wq_ctrl
.db
.db
;
889 mcq
->arm_db
= cq
->wq_ctrl
.db
.db
+ 1;
892 mcq
->vector
= param
->eq_ix
;
893 mcq
->comp
= mlx5e_completion_event
;
894 mcq
->event
= mlx5e_cq_error_event
;
896 mcq
->uar
= &mdev
->mlx5e_res
.cq_uar
;
898 for (i
= 0; i
< mlx5_cqwq_get_size(&cq
->wq
); i
++) {
899 struct mlx5_cqe64
*cqe
= mlx5_cqwq_get_wqe(&cq
->wq
, i
);
910 static void mlx5e_destroy_cq(struct mlx5e_cq
*cq
)
912 mlx5_wq_destroy(&cq
->wq_ctrl
);
915 static int mlx5e_enable_cq(struct mlx5e_cq
*cq
, struct mlx5e_cq_param
*param
)
917 struct mlx5e_priv
*priv
= cq
->priv
;
918 struct mlx5_core_dev
*mdev
= priv
->mdev
;
919 struct mlx5_core_cq
*mcq
= &cq
->mcq
;
924 unsigned int irqn_not_used
;
928 inlen
= MLX5_ST_SZ_BYTES(create_cq_in
) +
929 sizeof(u64
) * cq
->wq_ctrl
.buf
.npages
;
930 in
= mlx5_vzalloc(inlen
);
934 cqc
= MLX5_ADDR_OF(create_cq_in
, in
, cq_context
);
936 memcpy(cqc
, param
->cqc
, sizeof(param
->cqc
));
938 mlx5_fill_page_array(&cq
->wq_ctrl
.buf
,
939 (__be64
*)MLX5_ADDR_OF(create_cq_in
, in
, pas
));
941 mlx5_vector2eqn(mdev
, param
->eq_ix
, &eqn
, &irqn_not_used
);
943 MLX5_SET(cqc
, cqc
, cq_period_mode
, param
->cq_period_mode
);
944 MLX5_SET(cqc
, cqc
, c_eqn
, eqn
);
945 MLX5_SET(cqc
, cqc
, uar_page
, mcq
->uar
->index
);
946 MLX5_SET(cqc
, cqc
, log_page_size
, cq
->wq_ctrl
.buf
.page_shift
-
947 MLX5_ADAPTER_PAGE_SHIFT
);
948 MLX5_SET64(cqc
, cqc
, dbr_addr
, cq
->wq_ctrl
.db
.dma
);
950 err
= mlx5_core_create_cq(mdev
, mcq
, in
, inlen
);
962 static void mlx5e_disable_cq(struct mlx5e_cq
*cq
)
964 struct mlx5e_priv
*priv
= cq
->priv
;
965 struct mlx5_core_dev
*mdev
= priv
->mdev
;
967 mlx5_core_destroy_cq(mdev
, &cq
->mcq
);
970 static int mlx5e_open_cq(struct mlx5e_channel
*c
,
971 struct mlx5e_cq_param
*param
,
973 struct mlx5e_cq_moder moderation
)
976 struct mlx5e_priv
*priv
= c
->priv
;
977 struct mlx5_core_dev
*mdev
= priv
->mdev
;
979 err
= mlx5e_create_cq(c
, param
, cq
);
983 err
= mlx5e_enable_cq(cq
, param
);
987 if (MLX5_CAP_GEN(mdev
, cq_moderation
))
988 mlx5_core_modify_cq_moderation(mdev
, &cq
->mcq
,
994 mlx5e_destroy_cq(cq
);
999 static void mlx5e_close_cq(struct mlx5e_cq
*cq
)
1001 mlx5e_disable_cq(cq
);
1002 mlx5e_destroy_cq(cq
);
1005 static int mlx5e_get_cpu(struct mlx5e_priv
*priv
, int ix
)
1007 return cpumask_first(priv
->mdev
->priv
.irq_info
[ix
].mask
);
1010 static int mlx5e_open_tx_cqs(struct mlx5e_channel
*c
,
1011 struct mlx5e_channel_param
*cparam
)
1013 struct mlx5e_priv
*priv
= c
->priv
;
1017 for (tc
= 0; tc
< c
->num_tc
; tc
++) {
1018 err
= mlx5e_open_cq(c
, &cparam
->tx_cq
, &c
->sq
[tc
].cq
,
1019 priv
->params
.tx_cq_moderation
);
1021 goto err_close_tx_cqs
;
1027 for (tc
--; tc
>= 0; tc
--)
1028 mlx5e_close_cq(&c
->sq
[tc
].cq
);
1033 static void mlx5e_close_tx_cqs(struct mlx5e_channel
*c
)
1037 for (tc
= 0; tc
< c
->num_tc
; tc
++)
1038 mlx5e_close_cq(&c
->sq
[tc
].cq
);
1041 static int mlx5e_open_sqs(struct mlx5e_channel
*c
,
1042 struct mlx5e_channel_param
*cparam
)
1047 for (tc
= 0; tc
< c
->num_tc
; tc
++) {
1048 err
= mlx5e_open_sq(c
, tc
, &cparam
->sq
, &c
->sq
[tc
]);
1056 for (tc
--; tc
>= 0; tc
--)
1057 mlx5e_close_sq(&c
->sq
[tc
]);
1062 static void mlx5e_close_sqs(struct mlx5e_channel
*c
)
1066 for (tc
= 0; tc
< c
->num_tc
; tc
++)
1067 mlx5e_close_sq(&c
->sq
[tc
]);
1070 static void mlx5e_build_channeltc_to_txq_map(struct mlx5e_priv
*priv
, int ix
)
1074 for (i
= 0; i
< priv
->profile
->max_tc
; i
++)
1075 priv
->channeltc_to_txq_map
[ix
][i
] =
1076 ix
+ i
* priv
->params
.num_channels
;
1079 static int mlx5e_set_sq_maxrate(struct net_device
*dev
,
1080 struct mlx5e_sq
*sq
, u32 rate
)
1082 struct mlx5e_priv
*priv
= netdev_priv(dev
);
1083 struct mlx5_core_dev
*mdev
= priv
->mdev
;
1087 if (rate
== sq
->rate_limit
)
1092 /* remove current rl index to free space to next ones */
1093 mlx5_rl_remove_rate(mdev
, sq
->rate_limit
);
1098 err
= mlx5_rl_add_rate(mdev
, rate
, &rl_index
);
1100 netdev_err(dev
, "Failed configuring rate %u: %d\n",
1106 err
= mlx5e_modify_sq(sq
, MLX5_SQC_STATE_RDY
,
1107 MLX5_SQC_STATE_RDY
, true, rl_index
);
1109 netdev_err(dev
, "Failed configuring rate %u: %d\n",
1111 /* remove the rate from the table */
1113 mlx5_rl_remove_rate(mdev
, rate
);
1117 sq
->rate_limit
= rate
;
1121 static int mlx5e_set_tx_maxrate(struct net_device
*dev
, int index
, u32 rate
)
1123 struct mlx5e_priv
*priv
= netdev_priv(dev
);
1124 struct mlx5_core_dev
*mdev
= priv
->mdev
;
1125 struct mlx5e_sq
*sq
= priv
->txq_to_sq_map
[index
];
1128 if (!mlx5_rl_is_supported(mdev
)) {
1129 netdev_err(dev
, "Rate limiting is not supported on this device\n");
1133 /* rate is given in Mb/sec, HW config is in Kb/sec */
1136 /* Check whether rate in valid range, 0 is always valid */
1137 if (rate
&& !mlx5_rl_is_in_range(mdev
, rate
)) {
1138 netdev_err(dev
, "TX rate %u, is not in range\n", rate
);
1142 mutex_lock(&priv
->state_lock
);
1143 if (test_bit(MLX5E_STATE_OPENED
, &priv
->state
))
1144 err
= mlx5e_set_sq_maxrate(dev
, sq
, rate
);
1146 priv
->tx_rates
[index
] = rate
;
1147 mutex_unlock(&priv
->state_lock
);
1152 static int mlx5e_open_channel(struct mlx5e_priv
*priv
, int ix
,
1153 struct mlx5e_channel_param
*cparam
,
1154 struct mlx5e_channel
**cp
)
1156 struct mlx5e_cq_moder icosq_cq_moder
= {0, 0};
1157 struct net_device
*netdev
= priv
->netdev
;
1158 struct mlx5e_cq_moder rx_cq_profile
;
1159 int cpu
= mlx5e_get_cpu(priv
, ix
);
1160 struct mlx5e_channel
*c
;
1161 struct mlx5e_sq
*sq
;
1165 c
= kzalloc_node(sizeof(*c
), GFP_KERNEL
, cpu_to_node(cpu
));
1172 c
->pdev
= &priv
->mdev
->pdev
->dev
;
1173 c
->netdev
= priv
->netdev
;
1174 c
->mkey_be
= cpu_to_be32(priv
->mdev
->mlx5e_res
.mkey
.key
);
1175 c
->num_tc
= priv
->params
.num_tc
;
1177 if (priv
->params
.rx_am_enabled
)
1178 rx_cq_profile
= mlx5e_am_get_def_profile(priv
->params
.rx_cq_period_mode
);
1180 rx_cq_profile
= priv
->params
.rx_cq_moderation
;
1182 mlx5e_build_channeltc_to_txq_map(priv
, ix
);
1184 netif_napi_add(netdev
, &c
->napi
, mlx5e_napi_poll
, 64);
1186 err
= mlx5e_open_cq(c
, &cparam
->icosq_cq
, &c
->icosq
.cq
, icosq_cq_moder
);
1190 err
= mlx5e_open_tx_cqs(c
, cparam
);
1192 goto err_close_icosq_cq
;
1194 err
= mlx5e_open_cq(c
, &cparam
->rx_cq
, &c
->rq
.cq
,
1197 goto err_close_tx_cqs
;
1199 napi_enable(&c
->napi
);
1201 err
= mlx5e_open_sq(c
, 0, &cparam
->icosq
, &c
->icosq
);
1203 goto err_disable_napi
;
1205 err
= mlx5e_open_sqs(c
, cparam
);
1207 goto err_close_icosq
;
1209 for (i
= 0; i
< priv
->params
.num_tc
; i
++) {
1210 u32 txq_ix
= priv
->channeltc_to_txq_map
[ix
][i
];
1212 if (priv
->tx_rates
[txq_ix
]) {
1213 sq
= priv
->txq_to_sq_map
[txq_ix
];
1214 mlx5e_set_sq_maxrate(priv
->netdev
, sq
,
1215 priv
->tx_rates
[txq_ix
]);
1219 err
= mlx5e_open_rq(c
, &cparam
->rq
, &c
->rq
);
1223 netif_set_xps_queue(netdev
, get_cpu_mask(c
->cpu
), ix
);
1232 mlx5e_close_sq(&c
->icosq
);
1235 napi_disable(&c
->napi
);
1236 mlx5e_close_cq(&c
->rq
.cq
);
1239 mlx5e_close_tx_cqs(c
);
1242 mlx5e_close_cq(&c
->icosq
.cq
);
1245 netif_napi_del(&c
->napi
);
1246 napi_hash_del(&c
->napi
);
1252 static void mlx5e_close_channel(struct mlx5e_channel
*c
)
1254 mlx5e_close_rq(&c
->rq
);
1256 mlx5e_close_sq(&c
->icosq
);
1257 napi_disable(&c
->napi
);
1258 mlx5e_close_cq(&c
->rq
.cq
);
1259 mlx5e_close_tx_cqs(c
);
1260 mlx5e_close_cq(&c
->icosq
.cq
);
1261 netif_napi_del(&c
->napi
);
1263 napi_hash_del(&c
->napi
);
1269 static void mlx5e_build_rq_param(struct mlx5e_priv
*priv
,
1270 struct mlx5e_rq_param
*param
)
1272 void *rqc
= param
->rqc
;
1273 void *wq
= MLX5_ADDR_OF(rqc
, rqc
, wq
);
1275 switch (priv
->params
.rq_wq_type
) {
1276 case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ
:
1277 MLX5_SET(wq
, wq
, log_wqe_num_of_strides
,
1278 priv
->params
.mpwqe_log_num_strides
- 9);
1279 MLX5_SET(wq
, wq
, log_wqe_stride_size
,
1280 priv
->params
.mpwqe_log_stride_sz
- 6);
1281 MLX5_SET(wq
, wq
, wq_type
, MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ
);
1283 default: /* MLX5_WQ_TYPE_LINKED_LIST */
1284 MLX5_SET(wq
, wq
, wq_type
, MLX5_WQ_TYPE_LINKED_LIST
);
1287 MLX5_SET(wq
, wq
, end_padding_mode
, MLX5_WQ_END_PAD_MODE_ALIGN
);
1288 MLX5_SET(wq
, wq
, log_wq_stride
, ilog2(sizeof(struct mlx5e_rx_wqe
)));
1289 MLX5_SET(wq
, wq
, log_wq_sz
, priv
->params
.log_rq_size
);
1290 MLX5_SET(wq
, wq
, pd
, priv
->mdev
->mlx5e_res
.pdn
);
1291 MLX5_SET(rqc
, rqc
, counter_set_id
, priv
->q_counter
);
1293 param
->wq
.buf_numa_node
= dev_to_node(&priv
->mdev
->pdev
->dev
);
1294 param
->wq
.linear
= 1;
1296 param
->am_enabled
= priv
->params
.rx_am_enabled
;
1299 static void mlx5e_build_drop_rq_param(struct mlx5e_rq_param
*param
)
1301 void *rqc
= param
->rqc
;
1302 void *wq
= MLX5_ADDR_OF(rqc
, rqc
, wq
);
1304 MLX5_SET(wq
, wq
, wq_type
, MLX5_WQ_TYPE_LINKED_LIST
);
1305 MLX5_SET(wq
, wq
, log_wq_stride
, ilog2(sizeof(struct mlx5e_rx_wqe
)));
1308 static void mlx5e_build_sq_param_common(struct mlx5e_priv
*priv
,
1309 struct mlx5e_sq_param
*param
)
1311 void *sqc
= param
->sqc
;
1312 void *wq
= MLX5_ADDR_OF(sqc
, sqc
, wq
);
1314 MLX5_SET(wq
, wq
, log_wq_stride
, ilog2(MLX5_SEND_WQE_BB
));
1315 MLX5_SET(wq
, wq
, pd
, priv
->mdev
->mlx5e_res
.pdn
);
1317 param
->wq
.buf_numa_node
= dev_to_node(&priv
->mdev
->pdev
->dev
);
1320 static void mlx5e_build_sq_param(struct mlx5e_priv
*priv
,
1321 struct mlx5e_sq_param
*param
)
1323 void *sqc
= param
->sqc
;
1324 void *wq
= MLX5_ADDR_OF(sqc
, sqc
, wq
);
1326 mlx5e_build_sq_param_common(priv
, param
);
1327 MLX5_SET(wq
, wq
, log_wq_sz
, priv
->params
.log_sq_size
);
1329 param
->max_inline
= priv
->params
.tx_max_inline
;
1330 param
->min_inline_mode
= priv
->params
.tx_min_inline_mode
;
1333 static void mlx5e_build_common_cq_param(struct mlx5e_priv
*priv
,
1334 struct mlx5e_cq_param
*param
)
1336 void *cqc
= param
->cqc
;
1338 MLX5_SET(cqc
, cqc
, uar_page
, priv
->mdev
->mlx5e_res
.cq_uar
.index
);
1341 static void mlx5e_build_rx_cq_param(struct mlx5e_priv
*priv
,
1342 struct mlx5e_cq_param
*param
)
1344 void *cqc
= param
->cqc
;
1347 switch (priv
->params
.rq_wq_type
) {
1348 case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ
:
1349 log_cq_size
= priv
->params
.log_rq_size
+
1350 priv
->params
.mpwqe_log_num_strides
;
1352 default: /* MLX5_WQ_TYPE_LINKED_LIST */
1353 log_cq_size
= priv
->params
.log_rq_size
;
1356 MLX5_SET(cqc
, cqc
, log_cq_size
, log_cq_size
);
1357 if (priv
->params
.rx_cqe_compress
) {
1358 MLX5_SET(cqc
, cqc
, mini_cqe_res_format
, MLX5_CQE_FORMAT_CSUM
);
1359 MLX5_SET(cqc
, cqc
, cqe_comp_en
, 1);
1362 mlx5e_build_common_cq_param(priv
, param
);
1364 param
->cq_period_mode
= priv
->params
.rx_cq_period_mode
;
1367 static void mlx5e_build_tx_cq_param(struct mlx5e_priv
*priv
,
1368 struct mlx5e_cq_param
*param
)
1370 void *cqc
= param
->cqc
;
1372 MLX5_SET(cqc
, cqc
, log_cq_size
, priv
->params
.log_sq_size
);
1374 mlx5e_build_common_cq_param(priv
, param
);
1376 param
->cq_period_mode
= MLX5_CQ_PERIOD_MODE_START_FROM_EQE
;
1379 static void mlx5e_build_ico_cq_param(struct mlx5e_priv
*priv
,
1380 struct mlx5e_cq_param
*param
,
1383 void *cqc
= param
->cqc
;
1385 MLX5_SET(cqc
, cqc
, log_cq_size
, log_wq_size
);
1387 mlx5e_build_common_cq_param(priv
, param
);
1389 param
->cq_period_mode
= MLX5_CQ_PERIOD_MODE_START_FROM_EQE
;
1392 static void mlx5e_build_icosq_param(struct mlx5e_priv
*priv
,
1393 struct mlx5e_sq_param
*param
,
1396 void *sqc
= param
->sqc
;
1397 void *wq
= MLX5_ADDR_OF(sqc
, sqc
, wq
);
1399 mlx5e_build_sq_param_common(priv
, param
);
1401 MLX5_SET(wq
, wq
, log_wq_sz
, log_wq_size
);
1402 MLX5_SET(sqc
, sqc
, reg_umr
, MLX5_CAP_ETH(priv
->mdev
, reg_umr_sq
));
1404 param
->icosq
= true;
1407 static void mlx5e_build_channel_param(struct mlx5e_priv
*priv
, struct mlx5e_channel_param
*cparam
)
1409 u8 icosq_log_wq_sz
= MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE
;
1411 mlx5e_build_rq_param(priv
, &cparam
->rq
);
1412 mlx5e_build_sq_param(priv
, &cparam
->sq
);
1413 mlx5e_build_icosq_param(priv
, &cparam
->icosq
, icosq_log_wq_sz
);
1414 mlx5e_build_rx_cq_param(priv
, &cparam
->rx_cq
);
1415 mlx5e_build_tx_cq_param(priv
, &cparam
->tx_cq
);
1416 mlx5e_build_ico_cq_param(priv
, &cparam
->icosq_cq
, icosq_log_wq_sz
);
1419 static int mlx5e_open_channels(struct mlx5e_priv
*priv
)
1421 struct mlx5e_channel_param
*cparam
;
1422 int nch
= priv
->params
.num_channels
;
1427 priv
->channel
= kcalloc(nch
, sizeof(struct mlx5e_channel
*),
1430 priv
->txq_to_sq_map
= kcalloc(nch
* priv
->params
.num_tc
,
1431 sizeof(struct mlx5e_sq
*), GFP_KERNEL
);
1433 cparam
= kzalloc(sizeof(struct mlx5e_channel_param
), GFP_KERNEL
);
1435 if (!priv
->channel
|| !priv
->txq_to_sq_map
|| !cparam
)
1436 goto err_free_txq_to_sq_map
;
1438 mlx5e_build_channel_param(priv
, cparam
);
1440 for (i
= 0; i
< nch
; i
++) {
1441 err
= mlx5e_open_channel(priv
, i
, cparam
, &priv
->channel
[i
]);
1443 goto err_close_channels
;
1446 for (j
= 0; j
< nch
; j
++) {
1447 err
= mlx5e_wait_for_min_rx_wqes(&priv
->channel
[j
]->rq
);
1449 goto err_close_channels
;
1452 /* FIXME: This is a W/A for tx timeout watch dog false alarm when
1453 * polling for inactive tx queues.
1455 netif_tx_start_all_queues(priv
->netdev
);
1461 for (i
--; i
>= 0; i
--)
1462 mlx5e_close_channel(priv
->channel
[i
]);
1464 err_free_txq_to_sq_map
:
1465 kfree(priv
->txq_to_sq_map
);
1466 kfree(priv
->channel
);
1472 static void mlx5e_close_channels(struct mlx5e_priv
*priv
)
1476 /* FIXME: This is a W/A only for tx timeout watch dog false alarm when
1477 * polling for inactive tx queues.
1479 netif_tx_stop_all_queues(priv
->netdev
);
1480 netif_tx_disable(priv
->netdev
);
1482 for (i
= 0; i
< priv
->params
.num_channels
; i
++)
1483 mlx5e_close_channel(priv
->channel
[i
]);
1485 kfree(priv
->txq_to_sq_map
);
1486 kfree(priv
->channel
);
1489 static int mlx5e_rx_hash_fn(int hfunc
)
1491 return (hfunc
== ETH_RSS_HASH_TOP
) ?
1492 MLX5_RX_HASH_FN_TOEPLITZ
:
1493 MLX5_RX_HASH_FN_INVERTED_XOR8
;
1496 static int mlx5e_bits_invert(unsigned long a
, int size
)
1501 for (i
= 0; i
< size
; i
++)
1502 inv
|= (test_bit(size
- i
- 1, &a
) ? 1 : 0) << i
;
1507 static void mlx5e_fill_indir_rqt_rqns(struct mlx5e_priv
*priv
, void *rqtc
)
1511 for (i
= 0; i
< MLX5E_INDIR_RQT_SIZE
; i
++) {
1515 if (priv
->params
.rss_hfunc
== ETH_RSS_HASH_XOR
)
1516 ix
= mlx5e_bits_invert(i
, MLX5E_LOG_INDIR_RQT_SIZE
);
1518 ix
= priv
->params
.indirection_rqt
[ix
];
1519 rqn
= test_bit(MLX5E_STATE_OPENED
, &priv
->state
) ?
1520 priv
->channel
[ix
]->rq
.rqn
:
1522 MLX5_SET(rqtc
, rqtc
, rq_num
[i
], rqn
);
1526 static void mlx5e_fill_direct_rqt_rqn(struct mlx5e_priv
*priv
, void *rqtc
,
1529 u32 rqn
= test_bit(MLX5E_STATE_OPENED
, &priv
->state
) ?
1530 priv
->channel
[ix
]->rq
.rqn
:
1533 MLX5_SET(rqtc
, rqtc
, rq_num
[0], rqn
);
1536 static int mlx5e_create_rqt(struct mlx5e_priv
*priv
, int sz
,
1537 int ix
, struct mlx5e_rqt
*rqt
)
1539 struct mlx5_core_dev
*mdev
= priv
->mdev
;
1545 inlen
= MLX5_ST_SZ_BYTES(create_rqt_in
) + sizeof(u32
) * sz
;
1546 in
= mlx5_vzalloc(inlen
);
1550 rqtc
= MLX5_ADDR_OF(create_rqt_in
, in
, rqt_context
);
1552 MLX5_SET(rqtc
, rqtc
, rqt_actual_size
, sz
);
1553 MLX5_SET(rqtc
, rqtc
, rqt_max_size
, sz
);
1555 if (sz
> 1) /* RSS */
1556 mlx5e_fill_indir_rqt_rqns(priv
, rqtc
);
1558 mlx5e_fill_direct_rqt_rqn(priv
, rqtc
, ix
);
1560 err
= mlx5_core_create_rqt(mdev
, in
, inlen
, &rqt
->rqtn
);
1562 rqt
->enabled
= true;
1568 void mlx5e_destroy_rqt(struct mlx5e_priv
*priv
, struct mlx5e_rqt
*rqt
)
1570 rqt
->enabled
= false;
1571 mlx5_core_destroy_rqt(priv
->mdev
, rqt
->rqtn
);
1574 static int mlx5e_create_indirect_rqts(struct mlx5e_priv
*priv
)
1576 struct mlx5e_rqt
*rqt
= &priv
->indir_rqt
;
1578 return mlx5e_create_rqt(priv
, MLX5E_INDIR_RQT_SIZE
, 0, rqt
);
1581 int mlx5e_create_direct_rqts(struct mlx5e_priv
*priv
)
1583 struct mlx5e_rqt
*rqt
;
1587 for (ix
= 0; ix
< priv
->profile
->max_nch(priv
->mdev
); ix
++) {
1588 rqt
= &priv
->direct_tir
[ix
].rqt
;
1589 err
= mlx5e_create_rqt(priv
, 1 /*size */, ix
, rqt
);
1591 goto err_destroy_rqts
;
1597 for (ix
--; ix
>= 0; ix
--)
1598 mlx5e_destroy_rqt(priv
, &priv
->direct_tir
[ix
].rqt
);
1603 int mlx5e_redirect_rqt(struct mlx5e_priv
*priv
, u32 rqtn
, int sz
, int ix
)
1605 struct mlx5_core_dev
*mdev
= priv
->mdev
;
1611 inlen
= MLX5_ST_SZ_BYTES(modify_rqt_in
) + sizeof(u32
) * sz
;
1612 in
= mlx5_vzalloc(inlen
);
1616 rqtc
= MLX5_ADDR_OF(modify_rqt_in
, in
, ctx
);
1618 MLX5_SET(rqtc
, rqtc
, rqt_actual_size
, sz
);
1619 if (sz
> 1) /* RSS */
1620 mlx5e_fill_indir_rqt_rqns(priv
, rqtc
);
1622 mlx5e_fill_direct_rqt_rqn(priv
, rqtc
, ix
);
1624 MLX5_SET(modify_rqt_in
, in
, bitmask
.rqn_list
, 1);
1626 err
= mlx5_core_modify_rqt(mdev
, rqtn
, in
, inlen
);
1633 static void mlx5e_redirect_rqts(struct mlx5e_priv
*priv
)
1638 if (priv
->indir_rqt
.enabled
) {
1639 rqtn
= priv
->indir_rqt
.rqtn
;
1640 mlx5e_redirect_rqt(priv
, rqtn
, MLX5E_INDIR_RQT_SIZE
, 0);
1643 for (ix
= 0; ix
< priv
->params
.num_channels
; ix
++) {
1644 if (!priv
->direct_tir
[ix
].rqt
.enabled
)
1646 rqtn
= priv
->direct_tir
[ix
].rqt
.rqtn
;
1647 mlx5e_redirect_rqt(priv
, rqtn
, 1, ix
);
1651 static void mlx5e_build_tir_ctx_lro(void *tirc
, struct mlx5e_priv
*priv
)
1653 if (!priv
->params
.lro_en
)
1656 #define ROUGH_MAX_L2_L3_HDR_SZ 256
1658 MLX5_SET(tirc
, tirc
, lro_enable_mask
,
1659 MLX5_TIRC_LRO_ENABLE_MASK_IPV4_LRO
|
1660 MLX5_TIRC_LRO_ENABLE_MASK_IPV6_LRO
);
1661 MLX5_SET(tirc
, tirc
, lro_max_ip_payload_size
,
1662 (priv
->params
.lro_wqe_sz
-
1663 ROUGH_MAX_L2_L3_HDR_SZ
) >> 8);
1664 MLX5_SET(tirc
, tirc
, lro_timeout_period_usecs
,
1665 MLX5_CAP_ETH(priv
->mdev
,
1666 lro_timer_supported_periods
[2]));
1669 void mlx5e_build_tir_ctx_hash(void *tirc
, struct mlx5e_priv
*priv
)
1671 MLX5_SET(tirc
, tirc
, rx_hash_fn
,
1672 mlx5e_rx_hash_fn(priv
->params
.rss_hfunc
));
1673 if (priv
->params
.rss_hfunc
== ETH_RSS_HASH_TOP
) {
1674 void *rss_key
= MLX5_ADDR_OF(tirc
, tirc
,
1675 rx_hash_toeplitz_key
);
1676 size_t len
= MLX5_FLD_SZ_BYTES(tirc
,
1677 rx_hash_toeplitz_key
);
1679 MLX5_SET(tirc
, tirc
, rx_hash_symmetric
, 1);
1680 memcpy(rss_key
, priv
->params
.toeplitz_hash_key
, len
);
1684 static int mlx5e_modify_tirs_lro(struct mlx5e_priv
*priv
)
1686 struct mlx5_core_dev
*mdev
= priv
->mdev
;
1695 inlen
= MLX5_ST_SZ_BYTES(modify_tir_in
);
1696 in
= mlx5_vzalloc(inlen
);
1700 MLX5_SET(modify_tir_in
, in
, bitmask
.lro
, 1);
1701 tirc
= MLX5_ADDR_OF(modify_tir_in
, in
, ctx
);
1703 mlx5e_build_tir_ctx_lro(tirc
, priv
);
1705 for (tt
= 0; tt
< MLX5E_NUM_INDIR_TIRS
; tt
++) {
1706 err
= mlx5_core_modify_tir(mdev
, priv
->indir_tir
[tt
].tirn
, in
,
1712 for (ix
= 0; ix
< priv
->profile
->max_nch(priv
->mdev
); ix
++) {
1713 err
= mlx5_core_modify_tir(mdev
, priv
->direct_tir
[ix
].tirn
,
1725 static int mlx5e_set_mtu(struct mlx5e_priv
*priv
, u16 mtu
)
1727 struct mlx5_core_dev
*mdev
= priv
->mdev
;
1728 u16 hw_mtu
= MLX5E_SW2HW_MTU(mtu
);
1731 err
= mlx5_set_port_mtu(mdev
, hw_mtu
, 1);
1735 /* Update vport context MTU */
1736 mlx5_modify_nic_vport_mtu(mdev
, hw_mtu
);
1740 static void mlx5e_query_mtu(struct mlx5e_priv
*priv
, u16
*mtu
)
1742 struct mlx5_core_dev
*mdev
= priv
->mdev
;
1746 err
= mlx5_query_nic_vport_mtu(mdev
, &hw_mtu
);
1747 if (err
|| !hw_mtu
) /* fallback to port oper mtu */
1748 mlx5_query_port_oper_mtu(mdev
, &hw_mtu
, 1);
1750 *mtu
= MLX5E_HW2SW_MTU(hw_mtu
);
1753 static int mlx5e_set_dev_port_mtu(struct net_device
*netdev
)
1755 struct mlx5e_priv
*priv
= netdev_priv(netdev
);
1759 err
= mlx5e_set_mtu(priv
, netdev
->mtu
);
1763 mlx5e_query_mtu(priv
, &mtu
);
1764 if (mtu
!= netdev
->mtu
)
1765 netdev_warn(netdev
, "%s: VPort MTU %d is different than netdev mtu %d\n",
1766 __func__
, mtu
, netdev
->mtu
);
1772 static void mlx5e_netdev_set_tcs(struct net_device
*netdev
)
1774 struct mlx5e_priv
*priv
= netdev_priv(netdev
);
1775 int nch
= priv
->params
.num_channels
;
1776 int ntc
= priv
->params
.num_tc
;
1779 netdev_reset_tc(netdev
);
1784 netdev_set_num_tc(netdev
, ntc
);
1786 /* Map netdev TCs to offset 0
1787 * We have our own UP to TXQ mapping for QoS
1789 for (tc
= 0; tc
< ntc
; tc
++)
1790 netdev_set_tc_queue(netdev
, tc
, nch
, 0);
1793 int mlx5e_open_locked(struct net_device
*netdev
)
1795 struct mlx5e_priv
*priv
= netdev_priv(netdev
);
1796 struct mlx5_core_dev
*mdev
= priv
->mdev
;
1800 set_bit(MLX5E_STATE_OPENED
, &priv
->state
);
1802 mlx5e_netdev_set_tcs(netdev
);
1804 num_txqs
= priv
->params
.num_channels
* priv
->params
.num_tc
;
1805 netif_set_real_num_tx_queues(netdev
, num_txqs
);
1806 netif_set_real_num_rx_queues(netdev
, priv
->params
.num_channels
);
1808 err
= mlx5e_open_channels(priv
);
1810 netdev_err(netdev
, "%s: mlx5e_open_channels failed, %d\n",
1812 goto err_clear_state_opened_flag
;
1815 err
= mlx5e_refresh_tirs_self_loopback_enable(priv
->mdev
);
1817 netdev_err(netdev
, "%s: mlx5e_refresh_tirs_self_loopback_enable failed, %d\n",
1819 goto err_close_channels
;
1822 mlx5e_redirect_rqts(priv
);
1823 mlx5e_update_carrier(priv
);
1824 mlx5e_timestamp_init(priv
);
1825 #ifdef CONFIG_RFS_ACCEL
1826 priv
->netdev
->rx_cpu_rmap
= priv
->mdev
->rmap
;
1828 if (priv
->profile
->update_stats
)
1829 queue_delayed_work(priv
->wq
, &priv
->update_stats_work
, 0);
1831 if (MLX5_CAP_GEN(mdev
, vport_group_manager
)) {
1832 err
= mlx5e_add_sqs_fwd_rules(priv
);
1834 goto err_close_channels
;
1839 mlx5e_close_channels(priv
);
1840 err_clear_state_opened_flag
:
1841 clear_bit(MLX5E_STATE_OPENED
, &priv
->state
);
1845 int mlx5e_open(struct net_device
*netdev
)
1847 struct mlx5e_priv
*priv
= netdev_priv(netdev
);
1850 mutex_lock(&priv
->state_lock
);
1851 err
= mlx5e_open_locked(netdev
);
1852 mutex_unlock(&priv
->state_lock
);
1857 int mlx5e_close_locked(struct net_device
*netdev
)
1859 struct mlx5e_priv
*priv
= netdev_priv(netdev
);
1860 struct mlx5_core_dev
*mdev
= priv
->mdev
;
1862 /* May already be CLOSED in case a previous configuration operation
1863 * (e.g RX/TX queue size change) that involves close&open failed.
1865 if (!test_bit(MLX5E_STATE_OPENED
, &priv
->state
))
1868 clear_bit(MLX5E_STATE_OPENED
, &priv
->state
);
1870 if (MLX5_CAP_GEN(mdev
, vport_group_manager
))
1871 mlx5e_remove_sqs_fwd_rules(priv
);
1873 mlx5e_timestamp_cleanup(priv
);
1874 netif_carrier_off(priv
->netdev
);
1875 mlx5e_redirect_rqts(priv
);
1876 mlx5e_close_channels(priv
);
1881 int mlx5e_close(struct net_device
*netdev
)
1883 struct mlx5e_priv
*priv
= netdev_priv(netdev
);
1886 if (!netif_device_present(netdev
))
1889 mutex_lock(&priv
->state_lock
);
1890 err
= mlx5e_close_locked(netdev
);
1891 mutex_unlock(&priv
->state_lock
);
1896 static int mlx5e_create_drop_rq(struct mlx5e_priv
*priv
,
1897 struct mlx5e_rq
*rq
,
1898 struct mlx5e_rq_param
*param
)
1900 struct mlx5_core_dev
*mdev
= priv
->mdev
;
1901 void *rqc
= param
->rqc
;
1902 void *rqc_wq
= MLX5_ADDR_OF(rqc
, rqc
, wq
);
1905 param
->wq
.db_numa_node
= param
->wq
.buf_numa_node
;
1907 err
= mlx5_wq_ll_create(mdev
, ¶m
->wq
, rqc_wq
, &rq
->wq
,
1917 static int mlx5e_create_drop_cq(struct mlx5e_priv
*priv
,
1918 struct mlx5e_cq
*cq
,
1919 struct mlx5e_cq_param
*param
)
1921 struct mlx5_core_dev
*mdev
= priv
->mdev
;
1922 struct mlx5_core_cq
*mcq
= &cq
->mcq
;
1927 err
= mlx5_cqwq_create(mdev
, ¶m
->wq
, param
->cqc
, &cq
->wq
,
1932 mlx5_vector2eqn(mdev
, param
->eq_ix
, &eqn_not_used
, &irqn
);
1935 mcq
->set_ci_db
= cq
->wq_ctrl
.db
.db
;
1936 mcq
->arm_db
= cq
->wq_ctrl
.db
.db
+ 1;
1937 *mcq
->set_ci_db
= 0;
1939 mcq
->vector
= param
->eq_ix
;
1940 mcq
->comp
= mlx5e_completion_event
;
1941 mcq
->event
= mlx5e_cq_error_event
;
1943 mcq
->uar
= &mdev
->mlx5e_res
.cq_uar
;
1950 static int mlx5e_open_drop_rq(struct mlx5e_priv
*priv
)
1952 struct mlx5e_cq_param cq_param
;
1953 struct mlx5e_rq_param rq_param
;
1954 struct mlx5e_rq
*rq
= &priv
->drop_rq
;
1955 struct mlx5e_cq
*cq
= &priv
->drop_rq
.cq
;
1958 memset(&cq_param
, 0, sizeof(cq_param
));
1959 memset(&rq_param
, 0, sizeof(rq_param
));
1960 mlx5e_build_drop_rq_param(&rq_param
);
1962 err
= mlx5e_create_drop_cq(priv
, cq
, &cq_param
);
1966 err
= mlx5e_enable_cq(cq
, &cq_param
);
1968 goto err_destroy_cq
;
1970 err
= mlx5e_create_drop_rq(priv
, rq
, &rq_param
);
1972 goto err_disable_cq
;
1974 err
= mlx5e_enable_rq(rq
, &rq_param
);
1976 goto err_destroy_rq
;
1981 mlx5e_destroy_rq(&priv
->drop_rq
);
1984 mlx5e_disable_cq(&priv
->drop_rq
.cq
);
1987 mlx5e_destroy_cq(&priv
->drop_rq
.cq
);
1992 static void mlx5e_close_drop_rq(struct mlx5e_priv
*priv
)
1994 mlx5e_disable_rq(&priv
->drop_rq
);
1995 mlx5e_destroy_rq(&priv
->drop_rq
);
1996 mlx5e_disable_cq(&priv
->drop_rq
.cq
);
1997 mlx5e_destroy_cq(&priv
->drop_rq
.cq
);
2000 static int mlx5e_create_tis(struct mlx5e_priv
*priv
, int tc
)
2002 struct mlx5_core_dev
*mdev
= priv
->mdev
;
2003 u32 in
[MLX5_ST_SZ_DW(create_tis_in
)] = {0};
2004 void *tisc
= MLX5_ADDR_OF(create_tis_in
, in
, ctx
);
2006 MLX5_SET(tisc
, tisc
, prio
, tc
<< 1);
2007 MLX5_SET(tisc
, tisc
, transport_domain
, mdev
->mlx5e_res
.td
.tdn
);
2009 if (mlx5_lag_is_lacp_owner(mdev
))
2010 MLX5_SET(tisc
, tisc
, strict_lag_tx_port_affinity
, 1);
2012 return mlx5_core_create_tis(mdev
, in
, sizeof(in
), &priv
->tisn
[tc
]);
2015 static void mlx5e_destroy_tis(struct mlx5e_priv
*priv
, int tc
)
2017 mlx5_core_destroy_tis(priv
->mdev
, priv
->tisn
[tc
]);
2020 int mlx5e_create_tises(struct mlx5e_priv
*priv
)
2025 for (tc
= 0; tc
< priv
->profile
->max_tc
; tc
++) {
2026 err
= mlx5e_create_tis(priv
, tc
);
2028 goto err_close_tises
;
2034 for (tc
--; tc
>= 0; tc
--)
2035 mlx5e_destroy_tis(priv
, tc
);
2040 void mlx5e_cleanup_nic_tx(struct mlx5e_priv
*priv
)
2044 for (tc
= 0; tc
< priv
->profile
->max_tc
; tc
++)
2045 mlx5e_destroy_tis(priv
, tc
);
2048 static void mlx5e_build_indir_tir_ctx(struct mlx5e_priv
*priv
, u32
*tirc
,
2049 enum mlx5e_traffic_types tt
)
2051 void *hfso
= MLX5_ADDR_OF(tirc
, tirc
, rx_hash_field_selector_outer
);
2053 MLX5_SET(tirc
, tirc
, transport_domain
, priv
->mdev
->mlx5e_res
.td
.tdn
);
2055 #define MLX5_HASH_IP (MLX5_HASH_FIELD_SEL_SRC_IP |\
2056 MLX5_HASH_FIELD_SEL_DST_IP)
2058 #define MLX5_HASH_IP_L4PORTS (MLX5_HASH_FIELD_SEL_SRC_IP |\
2059 MLX5_HASH_FIELD_SEL_DST_IP |\
2060 MLX5_HASH_FIELD_SEL_L4_SPORT |\
2061 MLX5_HASH_FIELD_SEL_L4_DPORT)
2063 #define MLX5_HASH_IP_IPSEC_SPI (MLX5_HASH_FIELD_SEL_SRC_IP |\
2064 MLX5_HASH_FIELD_SEL_DST_IP |\
2065 MLX5_HASH_FIELD_SEL_IPSEC_SPI)
2067 mlx5e_build_tir_ctx_lro(tirc
, priv
);
2069 MLX5_SET(tirc
, tirc
, disp_type
, MLX5_TIRC_DISP_TYPE_INDIRECT
);
2070 MLX5_SET(tirc
, tirc
, indirect_table
, priv
->indir_rqt
.rqtn
);
2071 mlx5e_build_tir_ctx_hash(tirc
, priv
);
2074 case MLX5E_TT_IPV4_TCP
:
2075 MLX5_SET(rx_hash_field_select
, hfso
, l3_prot_type
,
2076 MLX5_L3_PROT_TYPE_IPV4
);
2077 MLX5_SET(rx_hash_field_select
, hfso
, l4_prot_type
,
2078 MLX5_L4_PROT_TYPE_TCP
);
2079 MLX5_SET(rx_hash_field_select
, hfso
, selected_fields
,
2080 MLX5_HASH_IP_L4PORTS
);
2083 case MLX5E_TT_IPV6_TCP
:
2084 MLX5_SET(rx_hash_field_select
, hfso
, l3_prot_type
,
2085 MLX5_L3_PROT_TYPE_IPV6
);
2086 MLX5_SET(rx_hash_field_select
, hfso
, l4_prot_type
,
2087 MLX5_L4_PROT_TYPE_TCP
);
2088 MLX5_SET(rx_hash_field_select
, hfso
, selected_fields
,
2089 MLX5_HASH_IP_L4PORTS
);
2092 case MLX5E_TT_IPV4_UDP
:
2093 MLX5_SET(rx_hash_field_select
, hfso
, l3_prot_type
,
2094 MLX5_L3_PROT_TYPE_IPV4
);
2095 MLX5_SET(rx_hash_field_select
, hfso
, l4_prot_type
,
2096 MLX5_L4_PROT_TYPE_UDP
);
2097 MLX5_SET(rx_hash_field_select
, hfso
, selected_fields
,
2098 MLX5_HASH_IP_L4PORTS
);
2101 case MLX5E_TT_IPV6_UDP
:
2102 MLX5_SET(rx_hash_field_select
, hfso
, l3_prot_type
,
2103 MLX5_L3_PROT_TYPE_IPV6
);
2104 MLX5_SET(rx_hash_field_select
, hfso
, l4_prot_type
,
2105 MLX5_L4_PROT_TYPE_UDP
);
2106 MLX5_SET(rx_hash_field_select
, hfso
, selected_fields
,
2107 MLX5_HASH_IP_L4PORTS
);
2110 case MLX5E_TT_IPV4_IPSEC_AH
:
2111 MLX5_SET(rx_hash_field_select
, hfso
, l3_prot_type
,
2112 MLX5_L3_PROT_TYPE_IPV4
);
2113 MLX5_SET(rx_hash_field_select
, hfso
, selected_fields
,
2114 MLX5_HASH_IP_IPSEC_SPI
);
2117 case MLX5E_TT_IPV6_IPSEC_AH
:
2118 MLX5_SET(rx_hash_field_select
, hfso
, l3_prot_type
,
2119 MLX5_L3_PROT_TYPE_IPV6
);
2120 MLX5_SET(rx_hash_field_select
, hfso
, selected_fields
,
2121 MLX5_HASH_IP_IPSEC_SPI
);
2124 case MLX5E_TT_IPV4_IPSEC_ESP
:
2125 MLX5_SET(rx_hash_field_select
, hfso
, l3_prot_type
,
2126 MLX5_L3_PROT_TYPE_IPV4
);
2127 MLX5_SET(rx_hash_field_select
, hfso
, selected_fields
,
2128 MLX5_HASH_IP_IPSEC_SPI
);
2131 case MLX5E_TT_IPV6_IPSEC_ESP
:
2132 MLX5_SET(rx_hash_field_select
, hfso
, l3_prot_type
,
2133 MLX5_L3_PROT_TYPE_IPV6
);
2134 MLX5_SET(rx_hash_field_select
, hfso
, selected_fields
,
2135 MLX5_HASH_IP_IPSEC_SPI
);
2139 MLX5_SET(rx_hash_field_select
, hfso
, l3_prot_type
,
2140 MLX5_L3_PROT_TYPE_IPV4
);
2141 MLX5_SET(rx_hash_field_select
, hfso
, selected_fields
,
2146 MLX5_SET(rx_hash_field_select
, hfso
, l3_prot_type
,
2147 MLX5_L3_PROT_TYPE_IPV6
);
2148 MLX5_SET(rx_hash_field_select
, hfso
, selected_fields
,
2153 "mlx5e_build_indir_tir_ctx: bad traffic type!\n");
2157 static void mlx5e_build_direct_tir_ctx(struct mlx5e_priv
*priv
, u32
*tirc
,
2160 MLX5_SET(tirc
, tirc
, transport_domain
, priv
->mdev
->mlx5e_res
.td
.tdn
);
2162 mlx5e_build_tir_ctx_lro(tirc
, priv
);
2164 MLX5_SET(tirc
, tirc
, disp_type
, MLX5_TIRC_DISP_TYPE_INDIRECT
);
2165 MLX5_SET(tirc
, tirc
, indirect_table
, rqtn
);
2166 MLX5_SET(tirc
, tirc
, rx_hash_fn
, MLX5_RX_HASH_FN_INVERTED_XOR8
);
2169 static int mlx5e_create_indirect_tirs(struct mlx5e_priv
*priv
)
2171 struct mlx5e_tir
*tir
;
2178 inlen
= MLX5_ST_SZ_BYTES(create_tir_in
);
2179 in
= mlx5_vzalloc(inlen
);
2183 for (tt
= 0; tt
< MLX5E_NUM_INDIR_TIRS
; tt
++) {
2184 memset(in
, 0, inlen
);
2185 tir
= &priv
->indir_tir
[tt
];
2186 tirc
= MLX5_ADDR_OF(create_tir_in
, in
, ctx
);
2187 mlx5e_build_indir_tir_ctx(priv
, tirc
, tt
);
2188 err
= mlx5e_create_tir(priv
->mdev
, tir
, in
, inlen
);
2190 goto err_destroy_tirs
;
2198 for (tt
--; tt
>= 0; tt
--)
2199 mlx5e_destroy_tir(priv
->mdev
, &priv
->indir_tir
[tt
]);
2206 int mlx5e_create_direct_tirs(struct mlx5e_priv
*priv
)
2208 int nch
= priv
->profile
->max_nch(priv
->mdev
);
2209 struct mlx5e_tir
*tir
;
2216 inlen
= MLX5_ST_SZ_BYTES(create_tir_in
);
2217 in
= mlx5_vzalloc(inlen
);
2221 for (ix
= 0; ix
< nch
; ix
++) {
2222 memset(in
, 0, inlen
);
2223 tir
= &priv
->direct_tir
[ix
];
2224 tirc
= MLX5_ADDR_OF(create_tir_in
, in
, ctx
);
2225 mlx5e_build_direct_tir_ctx(priv
, tirc
,
2226 priv
->direct_tir
[ix
].rqt
.rqtn
);
2227 err
= mlx5e_create_tir(priv
->mdev
, tir
, in
, inlen
);
2229 goto err_destroy_ch_tirs
;
2236 err_destroy_ch_tirs
:
2237 for (ix
--; ix
>= 0; ix
--)
2238 mlx5e_destroy_tir(priv
->mdev
, &priv
->direct_tir
[ix
]);
2245 static void mlx5e_destroy_indirect_tirs(struct mlx5e_priv
*priv
)
2249 for (i
= 0; i
< MLX5E_NUM_INDIR_TIRS
; i
++)
2250 mlx5e_destroy_tir(priv
->mdev
, &priv
->indir_tir
[i
]);
2253 void mlx5e_destroy_direct_tirs(struct mlx5e_priv
*priv
)
2255 int nch
= priv
->profile
->max_nch(priv
->mdev
);
2258 for (i
= 0; i
< nch
; i
++)
2259 mlx5e_destroy_tir(priv
->mdev
, &priv
->direct_tir
[i
]);
2262 int mlx5e_modify_rqs_vsd(struct mlx5e_priv
*priv
, bool vsd
)
2267 if (!test_bit(MLX5E_STATE_OPENED
, &priv
->state
))
2270 for (i
= 0; i
< priv
->params
.num_channels
; i
++) {
2271 err
= mlx5e_modify_rq_vsd(&priv
->channel
[i
]->rq
, vsd
);
2279 static int mlx5e_setup_tc(struct net_device
*netdev
, u8 tc
)
2281 struct mlx5e_priv
*priv
= netdev_priv(netdev
);
2285 if (tc
&& tc
!= MLX5E_MAX_NUM_TC
)
2288 mutex_lock(&priv
->state_lock
);
2290 was_opened
= test_bit(MLX5E_STATE_OPENED
, &priv
->state
);
2292 mlx5e_close_locked(priv
->netdev
);
2294 priv
->params
.num_tc
= tc
? tc
: 1;
2297 err
= mlx5e_open_locked(priv
->netdev
);
2299 mutex_unlock(&priv
->state_lock
);
2304 static int mlx5e_ndo_setup_tc(struct net_device
*dev
, u32 handle
,
2305 __be16 proto
, struct tc_to_netdev
*tc
)
2307 struct mlx5e_priv
*priv
= netdev_priv(dev
);
2309 if (TC_H_MAJ(handle
) != TC_H_MAJ(TC_H_INGRESS
))
2313 case TC_SETUP_CLSFLOWER
:
2314 switch (tc
->cls_flower
->command
) {
2315 case TC_CLSFLOWER_REPLACE
:
2316 return mlx5e_configure_flower(priv
, proto
, tc
->cls_flower
);
2317 case TC_CLSFLOWER_DESTROY
:
2318 return mlx5e_delete_flower(priv
, tc
->cls_flower
);
2319 case TC_CLSFLOWER_STATS
:
2320 return mlx5e_stats_flower(priv
, tc
->cls_flower
);
2327 if (tc
->type
!= TC_SETUP_MQPRIO
)
2330 return mlx5e_setup_tc(dev
, tc
->tc
);
2333 struct rtnl_link_stats64
*
2334 mlx5e_get_stats(struct net_device
*dev
, struct rtnl_link_stats64
*stats
)
2336 struct mlx5e_priv
*priv
= netdev_priv(dev
);
2337 struct mlx5e_sw_stats
*sstats
= &priv
->stats
.sw
;
2338 struct mlx5e_vport_stats
*vstats
= &priv
->stats
.vport
;
2339 struct mlx5e_pport_stats
*pstats
= &priv
->stats
.pport
;
2341 stats
->rx_packets
= sstats
->rx_packets
;
2342 stats
->rx_bytes
= sstats
->rx_bytes
;
2343 stats
->tx_packets
= sstats
->tx_packets
;
2344 stats
->tx_bytes
= sstats
->tx_bytes
;
2346 stats
->rx_dropped
= priv
->stats
.qcnt
.rx_out_of_buffer
;
2347 stats
->tx_dropped
= sstats
->tx_queue_dropped
;
2349 stats
->rx_length_errors
=
2350 PPORT_802_3_GET(pstats
, a_in_range_length_errors
) +
2351 PPORT_802_3_GET(pstats
, a_out_of_range_length_field
) +
2352 PPORT_802_3_GET(pstats
, a_frame_too_long_errors
);
2353 stats
->rx_crc_errors
=
2354 PPORT_802_3_GET(pstats
, a_frame_check_sequence_errors
);
2355 stats
->rx_frame_errors
= PPORT_802_3_GET(pstats
, a_alignment_errors
);
2356 stats
->tx_aborted_errors
= PPORT_2863_GET(pstats
, if_out_discards
);
2357 stats
->tx_carrier_errors
=
2358 PPORT_802_3_GET(pstats
, a_symbol_error_during_carrier
);
2359 stats
->rx_errors
= stats
->rx_length_errors
+ stats
->rx_crc_errors
+
2360 stats
->rx_frame_errors
;
2361 stats
->tx_errors
= stats
->tx_aborted_errors
+ stats
->tx_carrier_errors
;
2363 /* vport multicast also counts packets that are dropped due to steering
2364 * or rx out of buffer
2367 VPORT_COUNTER_GET(vstats
, received_eth_multicast
.packets
);
2372 static void mlx5e_set_rx_mode(struct net_device
*dev
)
2374 struct mlx5e_priv
*priv
= netdev_priv(dev
);
2376 queue_work(priv
->wq
, &priv
->set_rx_mode_work
);
2379 static int mlx5e_set_mac(struct net_device
*netdev
, void *addr
)
2381 struct mlx5e_priv
*priv
= netdev_priv(netdev
);
2382 struct sockaddr
*saddr
= addr
;
2384 if (!is_valid_ether_addr(saddr
->sa_data
))
2385 return -EADDRNOTAVAIL
;
2387 netif_addr_lock_bh(netdev
);
2388 ether_addr_copy(netdev
->dev_addr
, saddr
->sa_data
);
2389 netif_addr_unlock_bh(netdev
);
2391 queue_work(priv
->wq
, &priv
->set_rx_mode_work
);
2396 #define MLX5E_SET_FEATURE(netdev, feature, enable) \
2399 netdev->features |= feature; \
2401 netdev->features &= ~feature; \
2404 typedef int (*mlx5e_feature_handler
)(struct net_device
*netdev
, bool enable
);
2406 static int set_feature_lro(struct net_device
*netdev
, bool enable
)
2408 struct mlx5e_priv
*priv
= netdev_priv(netdev
);
2409 bool was_opened
= test_bit(MLX5E_STATE_OPENED
, &priv
->state
);
2412 mutex_lock(&priv
->state_lock
);
2414 if (was_opened
&& (priv
->params
.rq_wq_type
== MLX5_WQ_TYPE_LINKED_LIST
))
2415 mlx5e_close_locked(priv
->netdev
);
2417 priv
->params
.lro_en
= enable
;
2418 err
= mlx5e_modify_tirs_lro(priv
);
2420 netdev_err(netdev
, "lro modify failed, %d\n", err
);
2421 priv
->params
.lro_en
= !enable
;
2424 if (was_opened
&& (priv
->params
.rq_wq_type
== MLX5_WQ_TYPE_LINKED_LIST
))
2425 mlx5e_open_locked(priv
->netdev
);
2427 mutex_unlock(&priv
->state_lock
);
2432 static int set_feature_vlan_filter(struct net_device
*netdev
, bool enable
)
2434 struct mlx5e_priv
*priv
= netdev_priv(netdev
);
2437 mlx5e_enable_vlan_filter(priv
);
2439 mlx5e_disable_vlan_filter(priv
);
2444 static int set_feature_tc_num_filters(struct net_device
*netdev
, bool enable
)
2446 struct mlx5e_priv
*priv
= netdev_priv(netdev
);
2448 if (!enable
&& mlx5e_tc_num_filters(priv
)) {
2450 "Active offloaded tc filters, can't turn hw_tc_offload off\n");
2457 static int set_feature_rx_all(struct net_device
*netdev
, bool enable
)
2459 struct mlx5e_priv
*priv
= netdev_priv(netdev
);
2460 struct mlx5_core_dev
*mdev
= priv
->mdev
;
2462 return mlx5_set_port_fcs(mdev
, !enable
);
2465 static int set_feature_rx_vlan(struct net_device
*netdev
, bool enable
)
2467 struct mlx5e_priv
*priv
= netdev_priv(netdev
);
2470 mutex_lock(&priv
->state_lock
);
2472 priv
->params
.vlan_strip_disable
= !enable
;
2473 err
= mlx5e_modify_rqs_vsd(priv
, !enable
);
2475 priv
->params
.vlan_strip_disable
= enable
;
2477 mutex_unlock(&priv
->state_lock
);
2482 #ifdef CONFIG_RFS_ACCEL
2483 static int set_feature_arfs(struct net_device
*netdev
, bool enable
)
2485 struct mlx5e_priv
*priv
= netdev_priv(netdev
);
2489 err
= mlx5e_arfs_enable(priv
);
2491 err
= mlx5e_arfs_disable(priv
);
2497 static int mlx5e_handle_feature(struct net_device
*netdev
,
2498 netdev_features_t wanted_features
,
2499 netdev_features_t feature
,
2500 mlx5e_feature_handler feature_handler
)
2502 netdev_features_t changes
= wanted_features
^ netdev
->features
;
2503 bool enable
= !!(wanted_features
& feature
);
2506 if (!(changes
& feature
))
2509 err
= feature_handler(netdev
, enable
);
2511 netdev_err(netdev
, "%s feature 0x%llx failed err %d\n",
2512 enable
? "Enable" : "Disable", feature
, err
);
2516 MLX5E_SET_FEATURE(netdev
, feature
, enable
);
2520 static int mlx5e_set_features(struct net_device
*netdev
,
2521 netdev_features_t features
)
2525 err
= mlx5e_handle_feature(netdev
, features
, NETIF_F_LRO
,
2527 err
|= mlx5e_handle_feature(netdev
, features
,
2528 NETIF_F_HW_VLAN_CTAG_FILTER
,
2529 set_feature_vlan_filter
);
2530 err
|= mlx5e_handle_feature(netdev
, features
, NETIF_F_HW_TC
,
2531 set_feature_tc_num_filters
);
2532 err
|= mlx5e_handle_feature(netdev
, features
, NETIF_F_RXALL
,
2533 set_feature_rx_all
);
2534 err
|= mlx5e_handle_feature(netdev
, features
, NETIF_F_HW_VLAN_CTAG_RX
,
2535 set_feature_rx_vlan
);
2536 #ifdef CONFIG_RFS_ACCEL
2537 err
|= mlx5e_handle_feature(netdev
, features
, NETIF_F_NTUPLE
,
2541 return err
? -EINVAL
: 0;
2544 #define MXL5_HW_MIN_MTU 64
2545 #define MXL5E_MIN_MTU (MXL5_HW_MIN_MTU + ETH_FCS_LEN)
2547 static int mlx5e_change_mtu(struct net_device
*netdev
, int new_mtu
)
2549 struct mlx5e_priv
*priv
= netdev_priv(netdev
);
2550 struct mlx5_core_dev
*mdev
= priv
->mdev
;
2557 mlx5_query_port_max_mtu(mdev
, &max_mtu
, 1);
2559 max_mtu
= MLX5E_HW2SW_MTU(max_mtu
);
2560 min_mtu
= MLX5E_HW2SW_MTU(MXL5E_MIN_MTU
);
2562 if (new_mtu
> max_mtu
|| new_mtu
< min_mtu
) {
2564 "%s: Bad MTU (%d), valid range is: [%d..%d]\n",
2565 __func__
, new_mtu
, min_mtu
, max_mtu
);
2569 mutex_lock(&priv
->state_lock
);
2571 reset
= !priv
->params
.lro_en
&&
2572 (priv
->params
.rq_wq_type
!=
2573 MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ
);
2575 was_opened
= test_bit(MLX5E_STATE_OPENED
, &priv
->state
);
2576 if (was_opened
&& reset
)
2577 mlx5e_close_locked(netdev
);
2579 netdev
->mtu
= new_mtu
;
2580 mlx5e_set_dev_port_mtu(netdev
);
2582 if (was_opened
&& reset
)
2583 err
= mlx5e_open_locked(netdev
);
2585 mutex_unlock(&priv
->state_lock
);
2590 static int mlx5e_ioctl(struct net_device
*dev
, struct ifreq
*ifr
, int cmd
)
2594 return mlx5e_hwstamp_set(dev
, ifr
);
2596 return mlx5e_hwstamp_get(dev
, ifr
);
2602 static int mlx5e_set_vf_mac(struct net_device
*dev
, int vf
, u8
*mac
)
2604 struct mlx5e_priv
*priv
= netdev_priv(dev
);
2605 struct mlx5_core_dev
*mdev
= priv
->mdev
;
2607 return mlx5_eswitch_set_vport_mac(mdev
->priv
.eswitch
, vf
+ 1, mac
);
2610 static int mlx5e_set_vf_vlan(struct net_device
*dev
, int vf
, u16 vlan
, u8 qos
)
2612 struct mlx5e_priv
*priv
= netdev_priv(dev
);
2613 struct mlx5_core_dev
*mdev
= priv
->mdev
;
2615 return mlx5_eswitch_set_vport_vlan(mdev
->priv
.eswitch
, vf
+ 1,
2619 static int mlx5e_set_vf_spoofchk(struct net_device
*dev
, int vf
, bool setting
)
2621 struct mlx5e_priv
*priv
= netdev_priv(dev
);
2622 struct mlx5_core_dev
*mdev
= priv
->mdev
;
2624 return mlx5_eswitch_set_vport_spoofchk(mdev
->priv
.eswitch
, vf
+ 1, setting
);
2627 static int mlx5e_set_vf_trust(struct net_device
*dev
, int vf
, bool setting
)
2629 struct mlx5e_priv
*priv
= netdev_priv(dev
);
2630 struct mlx5_core_dev
*mdev
= priv
->mdev
;
2632 return mlx5_eswitch_set_vport_trust(mdev
->priv
.eswitch
, vf
+ 1, setting
);
2634 static int mlx5_vport_link2ifla(u8 esw_link
)
2637 case MLX5_ESW_VPORT_ADMIN_STATE_DOWN
:
2638 return IFLA_VF_LINK_STATE_DISABLE
;
2639 case MLX5_ESW_VPORT_ADMIN_STATE_UP
:
2640 return IFLA_VF_LINK_STATE_ENABLE
;
2642 return IFLA_VF_LINK_STATE_AUTO
;
2645 static int mlx5_ifla_link2vport(u8 ifla_link
)
2647 switch (ifla_link
) {
2648 case IFLA_VF_LINK_STATE_DISABLE
:
2649 return MLX5_ESW_VPORT_ADMIN_STATE_DOWN
;
2650 case IFLA_VF_LINK_STATE_ENABLE
:
2651 return MLX5_ESW_VPORT_ADMIN_STATE_UP
;
2653 return MLX5_ESW_VPORT_ADMIN_STATE_AUTO
;
2656 static int mlx5e_set_vf_link_state(struct net_device
*dev
, int vf
,
2659 struct mlx5e_priv
*priv
= netdev_priv(dev
);
2660 struct mlx5_core_dev
*mdev
= priv
->mdev
;
2662 return mlx5_eswitch_set_vport_state(mdev
->priv
.eswitch
, vf
+ 1,
2663 mlx5_ifla_link2vport(link_state
));
2666 static int mlx5e_get_vf_config(struct net_device
*dev
,
2667 int vf
, struct ifla_vf_info
*ivi
)
2669 struct mlx5e_priv
*priv
= netdev_priv(dev
);
2670 struct mlx5_core_dev
*mdev
= priv
->mdev
;
2673 err
= mlx5_eswitch_get_vport_config(mdev
->priv
.eswitch
, vf
+ 1, ivi
);
2676 ivi
->linkstate
= mlx5_vport_link2ifla(ivi
->linkstate
);
2680 static int mlx5e_get_vf_stats(struct net_device
*dev
,
2681 int vf
, struct ifla_vf_stats
*vf_stats
)
2683 struct mlx5e_priv
*priv
= netdev_priv(dev
);
2684 struct mlx5_core_dev
*mdev
= priv
->mdev
;
2686 return mlx5_eswitch_get_vport_stats(mdev
->priv
.eswitch
, vf
+ 1,
2690 static void mlx5e_add_vxlan_port(struct net_device
*netdev
,
2691 struct udp_tunnel_info
*ti
)
2693 struct mlx5e_priv
*priv
= netdev_priv(netdev
);
2695 if (ti
->type
!= UDP_TUNNEL_TYPE_VXLAN
)
2698 if (!mlx5e_vxlan_allowed(priv
->mdev
))
2701 mlx5e_vxlan_queue_work(priv
, ti
->sa_family
, be16_to_cpu(ti
->port
), 1);
2704 static void mlx5e_del_vxlan_port(struct net_device
*netdev
,
2705 struct udp_tunnel_info
*ti
)
2707 struct mlx5e_priv
*priv
= netdev_priv(netdev
);
2709 if (ti
->type
!= UDP_TUNNEL_TYPE_VXLAN
)
2712 if (!mlx5e_vxlan_allowed(priv
->mdev
))
2715 mlx5e_vxlan_queue_work(priv
, ti
->sa_family
, be16_to_cpu(ti
->port
), 0);
2718 static netdev_features_t
mlx5e_vxlan_features_check(struct mlx5e_priv
*priv
,
2719 struct sk_buff
*skb
,
2720 netdev_features_t features
)
2722 struct udphdr
*udph
;
2726 switch (vlan_get_protocol(skb
)) {
2727 case htons(ETH_P_IP
):
2728 proto
= ip_hdr(skb
)->protocol
;
2730 case htons(ETH_P_IPV6
):
2731 proto
= ipv6_hdr(skb
)->nexthdr
;
2737 if (proto
== IPPROTO_UDP
) {
2738 udph
= udp_hdr(skb
);
2739 port
= be16_to_cpu(udph
->dest
);
2742 /* Verify if UDP port is being offloaded by HW */
2743 if (port
&& mlx5e_vxlan_lookup_port(priv
, port
))
2747 /* Disable CSUM and GSO if the udp dport is not offloaded by HW */
2748 return features
& ~(NETIF_F_CSUM_MASK
| NETIF_F_GSO_MASK
);
2751 static netdev_features_t
mlx5e_features_check(struct sk_buff
*skb
,
2752 struct net_device
*netdev
,
2753 netdev_features_t features
)
2755 struct mlx5e_priv
*priv
= netdev_priv(netdev
);
2757 features
= vlan_features_check(skb
, features
);
2758 features
= vxlan_features_check(skb
, features
);
2760 /* Validate if the tunneled packet is being offloaded by HW */
2761 if (skb
->encapsulation
&&
2762 (features
& NETIF_F_CSUM_MASK
|| features
& NETIF_F_GSO_MASK
))
2763 return mlx5e_vxlan_features_check(priv
, skb
, features
);
2768 static void mlx5e_tx_timeout(struct net_device
*dev
)
2770 struct mlx5e_priv
*priv
= netdev_priv(dev
);
2771 bool sched_work
= false;
2774 netdev_err(dev
, "TX timeout detected\n");
2776 for (i
= 0; i
< priv
->params
.num_channels
* priv
->params
.num_tc
; i
++) {
2777 struct mlx5e_sq
*sq
= priv
->txq_to_sq_map
[i
];
2779 if (!netif_xmit_stopped(netdev_get_tx_queue(dev
, i
)))
2782 set_bit(MLX5E_SQ_STATE_FLUSH
, &sq
->state
);
2783 netdev_err(dev
, "TX timeout on queue: %d, SQ: 0x%x, CQ: 0x%x, SQ Cons: 0x%x SQ Prod: 0x%x\n",
2784 i
, sq
->sqn
, sq
->cq
.mcq
.cqn
, sq
->cc
, sq
->pc
);
2787 if (sched_work
&& test_bit(MLX5E_STATE_OPENED
, &priv
->state
))
2788 schedule_work(&priv
->tx_timeout_work
);
2791 static const struct net_device_ops mlx5e_netdev_ops_basic
= {
2792 .ndo_open
= mlx5e_open
,
2793 .ndo_stop
= mlx5e_close
,
2794 .ndo_start_xmit
= mlx5e_xmit
,
2795 .ndo_setup_tc
= mlx5e_ndo_setup_tc
,
2796 .ndo_select_queue
= mlx5e_select_queue
,
2797 .ndo_get_stats64
= mlx5e_get_stats
,
2798 .ndo_set_rx_mode
= mlx5e_set_rx_mode
,
2799 .ndo_set_mac_address
= mlx5e_set_mac
,
2800 .ndo_vlan_rx_add_vid
= mlx5e_vlan_rx_add_vid
,
2801 .ndo_vlan_rx_kill_vid
= mlx5e_vlan_rx_kill_vid
,
2802 .ndo_set_features
= mlx5e_set_features
,
2803 .ndo_change_mtu
= mlx5e_change_mtu
,
2804 .ndo_do_ioctl
= mlx5e_ioctl
,
2805 .ndo_set_tx_maxrate
= mlx5e_set_tx_maxrate
,
2806 #ifdef CONFIG_RFS_ACCEL
2807 .ndo_rx_flow_steer
= mlx5e_rx_flow_steer
,
2809 .ndo_tx_timeout
= mlx5e_tx_timeout
,
2812 static const struct net_device_ops mlx5e_netdev_ops_sriov
= {
2813 .ndo_open
= mlx5e_open
,
2814 .ndo_stop
= mlx5e_close
,
2815 .ndo_start_xmit
= mlx5e_xmit
,
2816 .ndo_setup_tc
= mlx5e_ndo_setup_tc
,
2817 .ndo_select_queue
= mlx5e_select_queue
,
2818 .ndo_get_stats64
= mlx5e_get_stats
,
2819 .ndo_set_rx_mode
= mlx5e_set_rx_mode
,
2820 .ndo_set_mac_address
= mlx5e_set_mac
,
2821 .ndo_vlan_rx_add_vid
= mlx5e_vlan_rx_add_vid
,
2822 .ndo_vlan_rx_kill_vid
= mlx5e_vlan_rx_kill_vid
,
2823 .ndo_set_features
= mlx5e_set_features
,
2824 .ndo_change_mtu
= mlx5e_change_mtu
,
2825 .ndo_do_ioctl
= mlx5e_ioctl
,
2826 .ndo_udp_tunnel_add
= mlx5e_add_vxlan_port
,
2827 .ndo_udp_tunnel_del
= mlx5e_del_vxlan_port
,
2828 .ndo_set_tx_maxrate
= mlx5e_set_tx_maxrate
,
2829 .ndo_features_check
= mlx5e_features_check
,
2830 #ifdef CONFIG_RFS_ACCEL
2831 .ndo_rx_flow_steer
= mlx5e_rx_flow_steer
,
2833 .ndo_set_vf_mac
= mlx5e_set_vf_mac
,
2834 .ndo_set_vf_vlan
= mlx5e_set_vf_vlan
,
2835 .ndo_set_vf_spoofchk
= mlx5e_set_vf_spoofchk
,
2836 .ndo_set_vf_trust
= mlx5e_set_vf_trust
,
2837 .ndo_get_vf_config
= mlx5e_get_vf_config
,
2838 .ndo_set_vf_link_state
= mlx5e_set_vf_link_state
,
2839 .ndo_get_vf_stats
= mlx5e_get_vf_stats
,
2840 .ndo_tx_timeout
= mlx5e_tx_timeout
,
2843 static int mlx5e_check_required_hca_cap(struct mlx5_core_dev
*mdev
)
2845 if (MLX5_CAP_GEN(mdev
, port_type
) != MLX5_CAP_PORT_TYPE_ETH
)
2847 if (!MLX5_CAP_GEN(mdev
, eth_net_offloads
) ||
2848 !MLX5_CAP_GEN(mdev
, nic_flow_table
) ||
2849 !MLX5_CAP_ETH(mdev
, csum_cap
) ||
2850 !MLX5_CAP_ETH(mdev
, max_lso_cap
) ||
2851 !MLX5_CAP_ETH(mdev
, vlan_cap
) ||
2852 !MLX5_CAP_ETH(mdev
, rss_ind_tbl_cap
) ||
2853 MLX5_CAP_FLOWTABLE(mdev
,
2854 flow_table_properties_nic_receive
.max_ft_level
)
2856 mlx5_core_warn(mdev
,
2857 "Not creating net device, some required device capabilities are missing\n");
2860 if (!MLX5_CAP_ETH(mdev
, self_lb_en_modifiable
))
2861 mlx5_core_warn(mdev
, "Self loop back prevention is not supported\n");
2862 if (!MLX5_CAP_GEN(mdev
, cq_moderation
))
2863 mlx5_core_warn(mdev
, "CQ modiration is not supported\n");
2868 u16
mlx5e_get_max_inline_cap(struct mlx5_core_dev
*mdev
)
2870 int bf_buf_size
= (1 << MLX5_CAP_GEN(mdev
, log_bf_reg_size
)) / 2;
2872 return bf_buf_size
-
2873 sizeof(struct mlx5e_tx_wqe
) +
2874 2 /*sizeof(mlx5e_tx_wqe.inline_hdr_start)*/;
2877 #ifdef CONFIG_MLX5_CORE_EN_DCB
2878 static void mlx5e_ets_init(struct mlx5e_priv
*priv
)
2882 priv
->params
.ets
.ets_cap
= mlx5_max_tc(priv
->mdev
) + 1;
2883 for (i
= 0; i
< priv
->params
.ets
.ets_cap
; i
++) {
2884 priv
->params
.ets
.tc_tx_bw
[i
] = MLX5E_MAX_BW_ALLOC
;
2885 priv
->params
.ets
.tc_tsa
[i
] = IEEE_8021QAZ_TSA_VENDOR
;
2886 priv
->params
.ets
.prio_tc
[i
] = i
;
2889 /* tclass[prio=0]=1, tclass[prio=1]=0, tclass[prio=i]=i (for i>1) */
2890 priv
->params
.ets
.prio_tc
[0] = 1;
2891 priv
->params
.ets
.prio_tc
[1] = 0;
2895 void mlx5e_build_default_indir_rqt(struct mlx5_core_dev
*mdev
,
2896 u32
*indirection_rqt
, int len
,
2899 int node
= mdev
->priv
.numa_node
;
2900 int node_num_of_cores
;
2904 node
= first_online_node
;
2906 node_num_of_cores
= cpumask_weight(cpumask_of_node(node
));
2908 if (node_num_of_cores
)
2909 num_channels
= min_t(int, num_channels
, node_num_of_cores
);
2911 for (i
= 0; i
< len
; i
++)
2912 indirection_rqt
[i
] = i
% num_channels
;
2915 static bool mlx5e_check_fragmented_striding_rq_cap(struct mlx5_core_dev
*mdev
)
2917 return MLX5_CAP_GEN(mdev
, striding_rq
) &&
2918 MLX5_CAP_GEN(mdev
, umr_ptr_rlky
) &&
2919 MLX5_CAP_ETH(mdev
, reg_umr_sq
);
2922 static int mlx5e_get_pci_bw(struct mlx5_core_dev
*mdev
, u32
*pci_bw
)
2924 enum pcie_link_width width
;
2925 enum pci_bus_speed speed
;
2928 err
= pcie_get_minimum_link(mdev
->pdev
, &speed
, &width
);
2932 if (speed
== PCI_SPEED_UNKNOWN
|| width
== PCIE_LNK_WIDTH_UNKNOWN
)
2936 case PCIE_SPEED_2_5GT
:
2937 *pci_bw
= 2500 * width
;
2939 case PCIE_SPEED_5_0GT
:
2940 *pci_bw
= 5000 * width
;
2942 case PCIE_SPEED_8_0GT
:
2943 *pci_bw
= 8000 * width
;
2952 static bool cqe_compress_heuristic(u32 link_speed
, u32 pci_bw
)
2954 return (link_speed
&& pci_bw
&&
2955 (pci_bw
< 40000) && (pci_bw
< link_speed
));
2958 void mlx5e_set_rx_cq_mode_params(struct mlx5e_params
*params
, u8 cq_period_mode
)
2960 params
->rx_cq_period_mode
= cq_period_mode
;
2962 params
->rx_cq_moderation
.pkts
=
2963 MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_PKTS
;
2964 params
->rx_cq_moderation
.usec
=
2965 MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC
;
2967 if (cq_period_mode
== MLX5_CQ_PERIOD_MODE_START_FROM_CQE
)
2968 params
->rx_cq_moderation
.usec
=
2969 MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC_FROM_CQE
;
2972 static void mlx5e_query_min_inline(struct mlx5_core_dev
*mdev
,
2973 u8
*min_inline_mode
)
2975 switch (MLX5_CAP_ETH(mdev
, wqe_inline_mode
)) {
2976 case MLX5E_INLINE_MODE_L2
:
2977 *min_inline_mode
= MLX5_INLINE_MODE_L2
;
2979 case MLX5E_INLINE_MODE_VPORT_CONTEXT
:
2980 mlx5_query_nic_vport_min_inline(mdev
,
2983 case MLX5_INLINE_MODE_NOT_REQUIRED
:
2984 *min_inline_mode
= MLX5_INLINE_MODE_NONE
;
2989 static void mlx5e_build_nic_netdev_priv(struct mlx5_core_dev
*mdev
,
2990 struct net_device
*netdev
,
2991 const struct mlx5e_profile
*profile
,
2994 struct mlx5e_priv
*priv
= netdev_priv(netdev
);
2997 u8 cq_period_mode
= MLX5_CAP_GEN(mdev
, cq_period_start_from_cqe
) ?
2998 MLX5_CQ_PERIOD_MODE_START_FROM_CQE
:
2999 MLX5_CQ_PERIOD_MODE_START_FROM_EQE
;
3001 priv
->params
.log_sq_size
=
3002 MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE
;
3003 priv
->params
.rq_wq_type
= mlx5e_check_fragmented_striding_rq_cap(mdev
) ?
3004 MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ
:
3005 MLX5_WQ_TYPE_LINKED_LIST
;
3007 /* set CQE compression */
3008 priv
->params
.rx_cqe_compress_admin
= false;
3009 if (MLX5_CAP_GEN(mdev
, cqe_compression
) &&
3010 MLX5_CAP_GEN(mdev
, vport_group_manager
)) {
3011 mlx5e_get_max_linkspeed(mdev
, &link_speed
);
3012 mlx5e_get_pci_bw(mdev
, &pci_bw
);
3013 mlx5_core_dbg(mdev
, "Max link speed = %d, PCI BW = %d\n",
3014 link_speed
, pci_bw
);
3015 priv
->params
.rx_cqe_compress_admin
=
3016 cqe_compress_heuristic(link_speed
, pci_bw
);
3019 priv
->params
.rx_cqe_compress
= priv
->params
.rx_cqe_compress_admin
;
3021 switch (priv
->params
.rq_wq_type
) {
3022 case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ
:
3023 priv
->params
.log_rq_size
= MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE_MPW
;
3024 priv
->params
.mpwqe_log_stride_sz
=
3025 priv
->params
.rx_cqe_compress
?
3026 MLX5_MPWRQ_LOG_STRIDE_SIZE_CQE_COMPRESS
:
3027 MLX5_MPWRQ_LOG_STRIDE_SIZE
;
3028 priv
->params
.mpwqe_log_num_strides
= MLX5_MPWRQ_LOG_WQE_SZ
-
3029 priv
->params
.mpwqe_log_stride_sz
;
3030 priv
->params
.lro_en
= true;
3032 default: /* MLX5_WQ_TYPE_LINKED_LIST */
3033 priv
->params
.log_rq_size
= MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE
;
3036 mlx5_core_info(mdev
,
3037 "MLX5E: StrdRq(%d) RqSz(%ld) StrdSz(%ld) RxCqeCmprss(%d)\n",
3038 priv
->params
.rq_wq_type
== MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ
,
3039 BIT(priv
->params
.log_rq_size
),
3040 BIT(priv
->params
.mpwqe_log_stride_sz
),
3041 priv
->params
.rx_cqe_compress_admin
);
3043 priv
->params
.min_rx_wqes
= mlx5_min_rx_wqes(priv
->params
.rq_wq_type
,
3044 BIT(priv
->params
.log_rq_size
));
3046 priv
->params
.rx_am_enabled
= MLX5_CAP_GEN(mdev
, cq_moderation
);
3047 mlx5e_set_rx_cq_mode_params(&priv
->params
, cq_period_mode
);
3049 priv
->params
.tx_cq_moderation
.usec
=
3050 MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC
;
3051 priv
->params
.tx_cq_moderation
.pkts
=
3052 MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_PKTS
;
3053 priv
->params
.tx_max_inline
= mlx5e_get_max_inline_cap(mdev
);
3054 mlx5e_query_min_inline(mdev
, &priv
->params
.tx_min_inline_mode
);
3055 priv
->params
.num_tc
= 1;
3056 priv
->params
.rss_hfunc
= ETH_RSS_HASH_XOR
;
3058 netdev_rss_key_fill(priv
->params
.toeplitz_hash_key
,
3059 sizeof(priv
->params
.toeplitz_hash_key
));
3061 mlx5e_build_default_indir_rqt(mdev
, priv
->params
.indirection_rqt
,
3062 MLX5E_INDIR_RQT_SIZE
, profile
->max_nch(mdev
));
3064 priv
->params
.lro_wqe_sz
=
3065 MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ
;
3067 /* Initialize pflags */
3068 MLX5E_SET_PRIV_FLAG(priv
, MLX5E_PFLAG_RX_CQE_BASED_MODER
,
3069 priv
->params
.rx_cq_period_mode
== MLX5_CQ_PERIOD_MODE_START_FROM_CQE
);
3072 priv
->netdev
= netdev
;
3073 priv
->params
.num_channels
= profile
->max_nch(mdev
);
3074 priv
->profile
= profile
;
3075 priv
->ppriv
= ppriv
;
3077 #ifdef CONFIG_MLX5_CORE_EN_DCB
3078 mlx5e_ets_init(priv
);
3081 mutex_init(&priv
->state_lock
);
3083 INIT_WORK(&priv
->update_carrier_work
, mlx5e_update_carrier_work
);
3084 INIT_WORK(&priv
->set_rx_mode_work
, mlx5e_set_rx_mode_work
);
3085 INIT_WORK(&priv
->tx_timeout_work
, mlx5e_tx_timeout_work
);
3086 INIT_DELAYED_WORK(&priv
->update_stats_work
, mlx5e_update_stats_work
);
3089 static void mlx5e_set_netdev_dev_addr(struct net_device
*netdev
)
3091 struct mlx5e_priv
*priv
= netdev_priv(netdev
);
3093 mlx5_query_nic_vport_mac_address(priv
->mdev
, 0, netdev
->dev_addr
);
3094 if (is_zero_ether_addr(netdev
->dev_addr
) &&
3095 !MLX5_CAP_GEN(priv
->mdev
, vport_group_manager
)) {
3096 eth_hw_addr_random(netdev
);
3097 mlx5_core_info(priv
->mdev
, "Assigned random MAC address %pM\n", netdev
->dev_addr
);
3101 static const struct switchdev_ops mlx5e_switchdev_ops
= {
3102 .switchdev_port_attr_get
= mlx5e_attr_get
,
3105 static void mlx5e_build_nic_netdev(struct net_device
*netdev
)
3107 struct mlx5e_priv
*priv
= netdev_priv(netdev
);
3108 struct mlx5_core_dev
*mdev
= priv
->mdev
;
3112 SET_NETDEV_DEV(netdev
, &mdev
->pdev
->dev
);
3114 if (MLX5_CAP_GEN(mdev
, vport_group_manager
)) {
3115 netdev
->netdev_ops
= &mlx5e_netdev_ops_sriov
;
3116 #ifdef CONFIG_MLX5_CORE_EN_DCB
3117 netdev
->dcbnl_ops
= &mlx5e_dcbnl_ops
;
3120 netdev
->netdev_ops
= &mlx5e_netdev_ops_basic
;
3123 netdev
->watchdog_timeo
= 15 * HZ
;
3125 netdev
->ethtool_ops
= &mlx5e_ethtool_ops
;
3127 netdev
->vlan_features
|= NETIF_F_SG
;
3128 netdev
->vlan_features
|= NETIF_F_IP_CSUM
;
3129 netdev
->vlan_features
|= NETIF_F_IPV6_CSUM
;
3130 netdev
->vlan_features
|= NETIF_F_GRO
;
3131 netdev
->vlan_features
|= NETIF_F_TSO
;
3132 netdev
->vlan_features
|= NETIF_F_TSO6
;
3133 netdev
->vlan_features
|= NETIF_F_RXCSUM
;
3134 netdev
->vlan_features
|= NETIF_F_RXHASH
;
3136 if (!!MLX5_CAP_ETH(mdev
, lro_cap
))
3137 netdev
->vlan_features
|= NETIF_F_LRO
;
3139 netdev
->hw_features
= netdev
->vlan_features
;
3140 netdev
->hw_features
|= NETIF_F_HW_VLAN_CTAG_TX
;
3141 netdev
->hw_features
|= NETIF_F_HW_VLAN_CTAG_RX
;
3142 netdev
->hw_features
|= NETIF_F_HW_VLAN_CTAG_FILTER
;
3144 if (mlx5e_vxlan_allowed(mdev
)) {
3145 netdev
->hw_features
|= NETIF_F_GSO_UDP_TUNNEL
|
3146 NETIF_F_GSO_UDP_TUNNEL_CSUM
|
3147 NETIF_F_GSO_PARTIAL
;
3148 netdev
->hw_enc_features
|= NETIF_F_IP_CSUM
;
3149 netdev
->hw_enc_features
|= NETIF_F_IPV6_CSUM
;
3150 netdev
->hw_enc_features
|= NETIF_F_TSO
;
3151 netdev
->hw_enc_features
|= NETIF_F_TSO6
;
3152 netdev
->hw_enc_features
|= NETIF_F_GSO_UDP_TUNNEL
;
3153 netdev
->hw_enc_features
|= NETIF_F_GSO_UDP_TUNNEL_CSUM
|
3154 NETIF_F_GSO_PARTIAL
;
3155 netdev
->gso_partial_features
= NETIF_F_GSO_UDP_TUNNEL_CSUM
;
3158 mlx5_query_port_fcs(mdev
, &fcs_supported
, &fcs_enabled
);
3161 netdev
->hw_features
|= NETIF_F_RXALL
;
3163 netdev
->features
= netdev
->hw_features
;
3164 if (!priv
->params
.lro_en
)
3165 netdev
->features
&= ~NETIF_F_LRO
;
3168 netdev
->features
&= ~NETIF_F_RXALL
;
3170 #define FT_CAP(f) MLX5_CAP_FLOWTABLE(mdev, flow_table_properties_nic_receive.f)
3171 if (FT_CAP(flow_modify_en
) &&
3172 FT_CAP(modify_root
) &&
3173 FT_CAP(identified_miss_table_mode
) &&
3174 FT_CAP(flow_table_modify
)) {
3175 netdev
->hw_features
|= NETIF_F_HW_TC
;
3176 #ifdef CONFIG_RFS_ACCEL
3177 netdev
->hw_features
|= NETIF_F_NTUPLE
;
3181 netdev
->features
|= NETIF_F_HIGHDMA
;
3183 netdev
->priv_flags
|= IFF_UNICAST_FLT
;
3185 mlx5e_set_netdev_dev_addr(netdev
);
3187 #ifdef CONFIG_NET_SWITCHDEV
3188 if (MLX5_CAP_GEN(mdev
, vport_group_manager
))
3189 netdev
->switchdev_ops
= &mlx5e_switchdev_ops
;
3193 static void mlx5e_create_q_counter(struct mlx5e_priv
*priv
)
3195 struct mlx5_core_dev
*mdev
= priv
->mdev
;
3198 err
= mlx5_core_alloc_q_counter(mdev
, &priv
->q_counter
);
3200 mlx5_core_warn(mdev
, "alloc queue counter failed, %d\n", err
);
3201 priv
->q_counter
= 0;
3205 static void mlx5e_destroy_q_counter(struct mlx5e_priv
*priv
)
3207 if (!priv
->q_counter
)
3210 mlx5_core_dealloc_q_counter(priv
->mdev
, priv
->q_counter
);
3213 static int mlx5e_create_umr_mkey(struct mlx5e_priv
*priv
)
3215 struct mlx5_core_dev
*mdev
= priv
->mdev
;
3216 u64 npages
= MLX5E_REQUIRED_MTTS(priv
->profile
->max_nch(mdev
),
3217 BIT(MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE_MPW
));
3218 int inlen
= MLX5_ST_SZ_BYTES(create_mkey_in
);
3223 in
= mlx5_vzalloc(inlen
);
3227 mkc
= MLX5_ADDR_OF(create_mkey_in
, in
, memory_key_mkey_entry
);
3229 npages
= min_t(u32
, ALIGN(U16_MAX
, 4) * 2, npages
);
3231 MLX5_SET(mkc
, mkc
, free
, 1);
3232 MLX5_SET(mkc
, mkc
, umr_en
, 1);
3233 MLX5_SET(mkc
, mkc
, lw
, 1);
3234 MLX5_SET(mkc
, mkc
, lr
, 1);
3235 MLX5_SET(mkc
, mkc
, access_mode
, MLX5_MKC_ACCESS_MODE_MTT
);
3237 MLX5_SET(mkc
, mkc
, qpn
, 0xffffff);
3238 MLX5_SET(mkc
, mkc
, pd
, mdev
->mlx5e_res
.pdn
);
3239 MLX5_SET64(mkc
, mkc
, len
, npages
<< PAGE_SHIFT
);
3240 MLX5_SET(mkc
, mkc
, translations_octword_size
,
3241 MLX5_MTT_OCTW(npages
));
3242 MLX5_SET(mkc
, mkc
, log_page_size
, PAGE_SHIFT
);
3244 err
= mlx5_core_create_mkey(mdev
, &priv
->umr_mkey
, in
, inlen
);
3250 static void mlx5e_nic_init(struct mlx5_core_dev
*mdev
,
3251 struct net_device
*netdev
,
3252 const struct mlx5e_profile
*profile
,
3255 struct mlx5e_priv
*priv
= netdev_priv(netdev
);
3257 mlx5e_build_nic_netdev_priv(mdev
, netdev
, profile
, ppriv
);
3258 mlx5e_build_nic_netdev(netdev
);
3259 mlx5e_vxlan_init(priv
);
3262 static void mlx5e_nic_cleanup(struct mlx5e_priv
*priv
)
3264 struct mlx5_core_dev
*mdev
= priv
->mdev
;
3265 struct mlx5_eswitch
*esw
= mdev
->priv
.eswitch
;
3267 mlx5e_vxlan_cleanup(priv
);
3269 if (MLX5_CAP_GEN(mdev
, vport_group_manager
))
3270 mlx5_eswitch_unregister_vport_rep(esw
, 0);
3273 static int mlx5e_init_nic_rx(struct mlx5e_priv
*priv
)
3275 struct mlx5_core_dev
*mdev
= priv
->mdev
;
3279 err
= mlx5e_create_indirect_rqts(priv
);
3281 mlx5_core_warn(mdev
, "create indirect rqts failed, %d\n", err
);
3285 err
= mlx5e_create_direct_rqts(priv
);
3287 mlx5_core_warn(mdev
, "create direct rqts failed, %d\n", err
);
3288 goto err_destroy_indirect_rqts
;
3291 err
= mlx5e_create_indirect_tirs(priv
);
3293 mlx5_core_warn(mdev
, "create indirect tirs failed, %d\n", err
);
3294 goto err_destroy_direct_rqts
;
3297 err
= mlx5e_create_direct_tirs(priv
);
3299 mlx5_core_warn(mdev
, "create direct tirs failed, %d\n", err
);
3300 goto err_destroy_indirect_tirs
;
3303 err
= mlx5e_create_flow_steering(priv
);
3305 mlx5_core_warn(mdev
, "create flow steering failed, %d\n", err
);
3306 goto err_destroy_direct_tirs
;
3309 err
= mlx5e_tc_init(priv
);
3311 goto err_destroy_flow_steering
;
3315 err_destroy_flow_steering
:
3316 mlx5e_destroy_flow_steering(priv
);
3317 err_destroy_direct_tirs
:
3318 mlx5e_destroy_direct_tirs(priv
);
3319 err_destroy_indirect_tirs
:
3320 mlx5e_destroy_indirect_tirs(priv
);
3321 err_destroy_direct_rqts
:
3322 for (i
= 0; i
< priv
->profile
->max_nch(mdev
); i
++)
3323 mlx5e_destroy_rqt(priv
, &priv
->direct_tir
[i
].rqt
);
3324 err_destroy_indirect_rqts
:
3325 mlx5e_destroy_rqt(priv
, &priv
->indir_rqt
);
3329 static void mlx5e_cleanup_nic_rx(struct mlx5e_priv
*priv
)
3333 mlx5e_tc_cleanup(priv
);
3334 mlx5e_destroy_flow_steering(priv
);
3335 mlx5e_destroy_direct_tirs(priv
);
3336 mlx5e_destroy_indirect_tirs(priv
);
3337 for (i
= 0; i
< priv
->profile
->max_nch(priv
->mdev
); i
++)
3338 mlx5e_destroy_rqt(priv
, &priv
->direct_tir
[i
].rqt
);
3339 mlx5e_destroy_rqt(priv
, &priv
->indir_rqt
);
3342 static int mlx5e_init_nic_tx(struct mlx5e_priv
*priv
)
3346 err
= mlx5e_create_tises(priv
);
3348 mlx5_core_warn(priv
->mdev
, "create tises failed, %d\n", err
);
3352 #ifdef CONFIG_MLX5_CORE_EN_DCB
3353 mlx5e_dcbnl_ieee_setets_core(priv
, &priv
->params
.ets
);
3358 static void mlx5e_nic_enable(struct mlx5e_priv
*priv
)
3360 struct net_device
*netdev
= priv
->netdev
;
3361 struct mlx5_core_dev
*mdev
= priv
->mdev
;
3362 struct mlx5_eswitch
*esw
= mdev
->priv
.eswitch
;
3363 struct mlx5_eswitch_rep rep
;
3365 mlx5_lag_add(mdev
, netdev
);
3367 if (mlx5e_vxlan_allowed(mdev
)) {
3369 udp_tunnel_get_rx_info(netdev
);
3373 mlx5e_enable_async_events(priv
);
3374 queue_work(priv
->wq
, &priv
->set_rx_mode_work
);
3376 if (MLX5_CAP_GEN(mdev
, vport_group_manager
)) {
3377 mlx5_query_nic_vport_mac_address(mdev
, 0, rep
.hw_id
);
3378 rep
.load
= mlx5e_nic_rep_load
;
3379 rep
.unload
= mlx5e_nic_rep_unload
;
3381 rep
.priv_data
= priv
;
3382 mlx5_eswitch_register_vport_rep(esw
, &rep
);
3386 static void mlx5e_nic_disable(struct mlx5e_priv
*priv
)
3388 queue_work(priv
->wq
, &priv
->set_rx_mode_work
);
3389 mlx5e_disable_async_events(priv
);
3390 mlx5_lag_remove(priv
->mdev
);
3393 static const struct mlx5e_profile mlx5e_nic_profile
= {
3394 .init
= mlx5e_nic_init
,
3395 .cleanup
= mlx5e_nic_cleanup
,
3396 .init_rx
= mlx5e_init_nic_rx
,
3397 .cleanup_rx
= mlx5e_cleanup_nic_rx
,
3398 .init_tx
= mlx5e_init_nic_tx
,
3399 .cleanup_tx
= mlx5e_cleanup_nic_tx
,
3400 .enable
= mlx5e_nic_enable
,
3401 .disable
= mlx5e_nic_disable
,
3402 .update_stats
= mlx5e_update_stats
,
3403 .max_nch
= mlx5e_get_max_num_channels
,
3404 .max_tc
= MLX5E_MAX_NUM_TC
,
3407 struct net_device
*mlx5e_create_netdev(struct mlx5_core_dev
*mdev
,
3408 const struct mlx5e_profile
*profile
,
3411 int nch
= profile
->max_nch(mdev
);
3412 struct net_device
*netdev
;
3413 struct mlx5e_priv
*priv
;
3415 netdev
= alloc_etherdev_mqs(sizeof(struct mlx5e_priv
),
3416 nch
* profile
->max_tc
,
3419 mlx5_core_err(mdev
, "alloc_etherdev_mqs() failed\n");
3423 profile
->init(mdev
, netdev
, profile
, ppriv
);
3425 netif_carrier_off(netdev
);
3427 priv
= netdev_priv(netdev
);
3429 priv
->wq
= create_singlethread_workqueue("mlx5e");
3431 goto err_cleanup_nic
;
3436 profile
->cleanup(priv
);
3437 free_netdev(netdev
);
3442 int mlx5e_attach_netdev(struct mlx5_core_dev
*mdev
, struct net_device
*netdev
)
3444 const struct mlx5e_profile
*profile
;
3445 struct mlx5e_priv
*priv
;
3448 priv
= netdev_priv(netdev
);
3449 profile
= priv
->profile
;
3450 clear_bit(MLX5E_STATE_DESTROYING
, &priv
->state
);
3452 err
= mlx5e_create_umr_mkey(priv
);
3454 mlx5_core_err(mdev
, "create umr mkey failed, %d\n", err
);
3458 err
= profile
->init_tx(priv
);
3460 goto err_destroy_umr_mkey
;
3462 err
= mlx5e_open_drop_rq(priv
);
3464 mlx5_core_err(mdev
, "open drop rq failed, %d\n", err
);
3465 goto err_cleanup_tx
;
3468 err
= profile
->init_rx(priv
);
3470 goto err_close_drop_rq
;
3472 mlx5e_create_q_counter(priv
);
3474 mlx5e_init_l2_addr(priv
);
3476 mlx5e_set_dev_port_mtu(netdev
);
3478 if (profile
->enable
)
3479 profile
->enable(priv
);
3482 if (netif_running(netdev
))
3484 netif_device_attach(netdev
);
3490 mlx5e_close_drop_rq(priv
);
3493 profile
->cleanup_tx(priv
);
3495 err_destroy_umr_mkey
:
3496 mlx5_core_destroy_mkey(mdev
, &priv
->umr_mkey
);
3502 static void mlx5e_register_vport_rep(struct mlx5_core_dev
*mdev
)
3504 struct mlx5_eswitch
*esw
= mdev
->priv
.eswitch
;
3505 int total_vfs
= MLX5_TOTAL_VPORTS(mdev
);
3509 if (!MLX5_CAP_GEN(mdev
, vport_group_manager
))
3512 mlx5_query_nic_vport_mac_address(mdev
, 0, mac
);
3514 for (vport
= 1; vport
< total_vfs
; vport
++) {
3515 struct mlx5_eswitch_rep rep
;
3517 rep
.load
= mlx5e_vport_rep_load
;
3518 rep
.unload
= mlx5e_vport_rep_unload
;
3520 ether_addr_copy(rep
.hw_id
, mac
);
3521 mlx5_eswitch_register_vport_rep(esw
, &rep
);
3525 void mlx5e_detach_netdev(struct mlx5_core_dev
*mdev
, struct net_device
*netdev
)
3527 struct mlx5e_priv
*priv
= netdev_priv(netdev
);
3528 const struct mlx5e_profile
*profile
= priv
->profile
;
3530 set_bit(MLX5E_STATE_DESTROYING
, &priv
->state
);
3531 if (profile
->disable
)
3532 profile
->disable(priv
);
3534 flush_workqueue(priv
->wq
);
3537 if (netif_running(netdev
))
3538 mlx5e_close(netdev
);
3539 netif_device_detach(netdev
);
3542 mlx5e_destroy_q_counter(priv
);
3543 profile
->cleanup_rx(priv
);
3544 mlx5e_close_drop_rq(priv
);
3545 profile
->cleanup_tx(priv
);
3546 mlx5_core_destroy_mkey(priv
->mdev
, &priv
->umr_mkey
);
3547 cancel_delayed_work_sync(&priv
->update_stats_work
);
3550 /* mlx5e_attach and mlx5e_detach scope should be only creating/destroying
3551 * hardware contexts and to connect it to the current netdev.
3553 static int mlx5e_attach(struct mlx5_core_dev
*mdev
, void *vpriv
)
3555 struct mlx5e_priv
*priv
= vpriv
;
3556 struct net_device
*netdev
= priv
->netdev
;
3559 if (netif_device_present(netdev
))
3562 err
= mlx5e_create_mdev_resources(mdev
);
3566 err
= mlx5e_attach_netdev(mdev
, netdev
);
3568 mlx5e_destroy_mdev_resources(mdev
);
3575 static void mlx5e_detach(struct mlx5_core_dev
*mdev
, void *vpriv
)
3577 struct mlx5e_priv
*priv
= vpriv
;
3578 struct net_device
*netdev
= priv
->netdev
;
3580 if (!netif_device_present(netdev
))
3583 mlx5e_detach_netdev(mdev
, netdev
);
3584 mlx5e_destroy_mdev_resources(mdev
);
3587 static void *mlx5e_add(struct mlx5_core_dev
*mdev
)
3589 struct mlx5_eswitch
*esw
= mdev
->priv
.eswitch
;
3590 int total_vfs
= MLX5_TOTAL_VPORTS(mdev
);
3595 struct net_device
*netdev
;
3597 err
= mlx5e_check_required_hca_cap(mdev
);
3601 mlx5e_register_vport_rep(mdev
);
3603 if (MLX5_CAP_GEN(mdev
, vport_group_manager
))
3604 ppriv
= &esw
->offloads
.vport_reps
[0];
3606 netdev
= mlx5e_create_netdev(mdev
, &mlx5e_nic_profile
, ppriv
);
3608 mlx5_core_err(mdev
, "mlx5e_create_netdev failed\n");
3609 goto err_unregister_reps
;
3612 priv
= netdev_priv(netdev
);
3614 err
= mlx5e_attach(mdev
, priv
);
3616 mlx5_core_err(mdev
, "mlx5e_attach failed, %d\n", err
);
3617 goto err_destroy_netdev
;
3620 err
= register_netdev(netdev
);
3622 mlx5_core_err(mdev
, "register_netdev failed, %d\n", err
);
3629 mlx5e_detach(mdev
, priv
);
3632 mlx5e_destroy_netdev(mdev
, priv
);
3634 err_unregister_reps
:
3635 for (vport
= 1; vport
< total_vfs
; vport
++)
3636 mlx5_eswitch_unregister_vport_rep(esw
, vport
);
3641 void mlx5e_destroy_netdev(struct mlx5_core_dev
*mdev
, struct mlx5e_priv
*priv
)
3643 const struct mlx5e_profile
*profile
= priv
->profile
;
3644 struct net_device
*netdev
= priv
->netdev
;
3646 unregister_netdev(netdev
);
3647 destroy_workqueue(priv
->wq
);
3648 if (profile
->cleanup
)
3649 profile
->cleanup(priv
);
3650 free_netdev(netdev
);
3653 static void mlx5e_remove(struct mlx5_core_dev
*mdev
, void *vpriv
)
3655 struct mlx5_eswitch
*esw
= mdev
->priv
.eswitch
;
3656 int total_vfs
= MLX5_TOTAL_VPORTS(mdev
);
3657 struct mlx5e_priv
*priv
= vpriv
;
3660 for (vport
= 1; vport
< total_vfs
; vport
++)
3661 mlx5_eswitch_unregister_vport_rep(esw
, vport
);
3663 mlx5e_detach(mdev
, vpriv
);
3664 mlx5e_destroy_netdev(mdev
, priv
);
3667 static void *mlx5e_get_netdev(void *vpriv
)
3669 struct mlx5e_priv
*priv
= vpriv
;
3671 return priv
->netdev
;
3674 static struct mlx5_interface mlx5e_interface
= {
3676 .remove
= mlx5e_remove
,
3677 .attach
= mlx5e_attach
,
3678 .detach
= mlx5e_detach
,
3679 .event
= mlx5e_async_event
,
3680 .protocol
= MLX5_INTERFACE_PROTOCOL_ETH
,
3681 .get_dev
= mlx5e_get_netdev
,
3684 void mlx5e_init(void)
3686 mlx5e_build_ptys2ethtool_map();
3687 mlx5_register_interface(&mlx5e_interface
);
3690 void mlx5e_cleanup(void)
3692 mlx5_unregister_interface(&mlx5e_interface
);