2 * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/etherdevice.h>
34 #include <linux/mlx5/driver.h>
35 #include <linux/mlx5/mlx5_ifc.h>
36 #include <linux/mlx5/vport.h>
37 #include <linux/mlx5/fs.h>
38 #include "mlx5_core.h"
41 struct mlx5_flow_rule
*
42 mlx5_eswitch_add_send_to_vport_rule(struct mlx5_eswitch
*esw
, int vport
, u32 sqn
)
44 struct mlx5_flow_destination dest
;
45 struct mlx5_flow_rule
*flow_rule
;
46 int match_header
= MLX5_MATCH_MISC_PARAMETERS
;
47 u32
*match_v
, *match_c
;
50 match_v
= kzalloc(MLX5_ST_SZ_BYTES(fte_match_param
), GFP_KERNEL
);
51 match_c
= kzalloc(MLX5_ST_SZ_BYTES(fte_match_param
), GFP_KERNEL
);
52 if (!match_v
|| !match_c
) {
53 esw_warn(esw
->dev
, "FDB: Failed to alloc match parameters\n");
54 flow_rule
= ERR_PTR(-ENOMEM
);
58 misc
= MLX5_ADDR_OF(fte_match_param
, match_v
, misc_parameters
);
59 MLX5_SET(fte_match_set_misc
, misc
, source_sqn
, sqn
);
60 MLX5_SET(fte_match_set_misc
, misc
, source_port
, 0x0); /* source vport is 0 */
62 misc
= MLX5_ADDR_OF(fte_match_param
, match_c
, misc_parameters
);
63 MLX5_SET_TO_ONES(fte_match_set_misc
, misc
, source_sqn
);
64 MLX5_SET_TO_ONES(fte_match_set_misc
, misc
, source_port
);
66 dest
.type
= MLX5_FLOW_DESTINATION_TYPE_VPORT
;
67 dest
.vport_num
= vport
;
69 flow_rule
= mlx5_add_flow_rule(esw
->fdb_table
.fdb
, match_header
, match_c
,
70 match_v
, MLX5_FLOW_CONTEXT_ACTION_FWD_DEST
,
72 if (IS_ERR(flow_rule
))
73 esw_warn(esw
->dev
, "FDB: Failed to add send to vport rule err %ld\n", PTR_ERR(flow_rule
));
80 static int esw_add_fdb_miss_rule(struct mlx5_eswitch
*esw
)
82 struct mlx5_flow_destination dest
;
83 struct mlx5_flow_rule
*flow_rule
= NULL
;
84 u32
*match_v
, *match_c
;
87 match_v
= kzalloc(MLX5_ST_SZ_BYTES(fte_match_param
), GFP_KERNEL
);
88 match_c
= kzalloc(MLX5_ST_SZ_BYTES(fte_match_param
), GFP_KERNEL
);
89 if (!match_v
|| !match_c
) {
90 esw_warn(esw
->dev
, "FDB: Failed to alloc match parameters\n");
95 dest
.type
= MLX5_FLOW_DESTINATION_TYPE_VPORT
;
98 flow_rule
= mlx5_add_flow_rule(esw
->fdb_table
.fdb
, 0, match_c
, match_v
,
99 MLX5_FLOW_CONTEXT_ACTION_FWD_DEST
, 0, &dest
);
100 if (IS_ERR(flow_rule
)) {
101 err
= PTR_ERR(flow_rule
);
102 esw_warn(esw
->dev
, "FDB: Failed to add miss flow rule err %d\n", err
);
106 esw
->fdb_table
.offloads
.miss_rule
= flow_rule
;
113 #define MAX_PF_SQ 256
115 static int esw_create_offloads_fdb_table(struct mlx5_eswitch
*esw
, int nvports
)
117 int inlen
= MLX5_ST_SZ_BYTES(create_flow_group_in
);
118 struct mlx5_core_dev
*dev
= esw
->dev
;
119 struct mlx5_flow_namespace
*root_ns
;
120 struct mlx5_flow_table
*fdb
= NULL
;
121 struct mlx5_flow_group
*g
;
123 void *match_criteria
;
124 int table_size
, ix
, err
= 0;
126 flow_group_in
= mlx5_vzalloc(inlen
);
130 root_ns
= mlx5_get_flow_namespace(dev
, MLX5_FLOW_NAMESPACE_FDB
);
132 esw_warn(dev
, "Failed to get FDB flow namespace\n");
136 esw_debug(dev
, "Create offloads FDB table, log_max_size(%d)\n",
137 MLX5_CAP_ESW_FLOWTABLE_FDB(dev
, log_max_ft_size
));
139 table_size
= nvports
+ MAX_PF_SQ
+ 1;
140 fdb
= mlx5_create_flow_table(root_ns
, 0, table_size
, 0);
143 esw_warn(dev
, "Failed to create FDB Table err %d\n", err
);
146 esw
->fdb_table
.fdb
= fdb
;
148 /* create send-to-vport group */
149 memset(flow_group_in
, 0, inlen
);
150 MLX5_SET(create_flow_group_in
, flow_group_in
, match_criteria_enable
,
151 MLX5_MATCH_MISC_PARAMETERS
);
153 match_criteria
= MLX5_ADDR_OF(create_flow_group_in
, flow_group_in
, match_criteria
);
155 MLX5_SET_TO_ONES(fte_match_param
, match_criteria
, misc_parameters
.source_sqn
);
156 MLX5_SET_TO_ONES(fte_match_param
, match_criteria
, misc_parameters
.source_port
);
158 ix
= nvports
+ MAX_PF_SQ
;
159 MLX5_SET(create_flow_group_in
, flow_group_in
, start_flow_index
, 0);
160 MLX5_SET(create_flow_group_in
, flow_group_in
, end_flow_index
, ix
- 1);
162 g
= mlx5_create_flow_group(fdb
, flow_group_in
);
165 esw_warn(dev
, "Failed to create send-to-vport flow group err(%d)\n", err
);
168 esw
->fdb_table
.offloads
.send_to_vport_grp
= g
;
170 /* create miss group */
171 memset(flow_group_in
, 0, inlen
);
172 MLX5_SET(create_flow_group_in
, flow_group_in
, match_criteria_enable
, 0);
174 MLX5_SET(create_flow_group_in
, flow_group_in
, start_flow_index
, ix
);
175 MLX5_SET(create_flow_group_in
, flow_group_in
, end_flow_index
, ix
+ 1);
177 g
= mlx5_create_flow_group(fdb
, flow_group_in
);
180 esw_warn(dev
, "Failed to create miss flow group err(%d)\n", err
);
183 esw
->fdb_table
.offloads
.miss_grp
= g
;
185 err
= esw_add_fdb_miss_rule(esw
);
192 mlx5_destroy_flow_group(esw
->fdb_table
.offloads
.miss_grp
);
194 mlx5_destroy_flow_group(esw
->fdb_table
.offloads
.send_to_vport_grp
);
196 mlx5_destroy_flow_table(fdb
);
199 kvfree(flow_group_in
);
203 static void esw_destroy_offloads_fdb_table(struct mlx5_eswitch
*esw
)
205 if (!esw
->fdb_table
.fdb
)
208 esw_debug(esw
->dev
, "Destroy offloads FDB Table\n");
209 mlx5_del_flow_rule(esw
->fdb_table
.offloads
.miss_rule
);
210 mlx5_destroy_flow_group(esw
->fdb_table
.offloads
.send_to_vport_grp
);
211 mlx5_destroy_flow_group(esw
->fdb_table
.offloads
.miss_grp
);
213 mlx5_destroy_flow_table(esw
->fdb_table
.fdb
);
216 static int esw_create_offloads_table(struct mlx5_eswitch
*esw
)
218 struct mlx5_flow_namespace
*ns
;
219 struct mlx5_flow_table
*ft_offloads
;
220 struct mlx5_core_dev
*dev
= esw
->dev
;
223 ns
= mlx5_get_flow_namespace(dev
, MLX5_FLOW_NAMESPACE_OFFLOADS
);
225 esw_warn(esw
->dev
, "Failed to get offloads flow namespace\n");
229 ft_offloads
= mlx5_create_flow_table(ns
, 0, dev
->priv
.sriov
.num_vfs
+ 2, 0);
230 if (IS_ERR(ft_offloads
)) {
231 err
= PTR_ERR(ft_offloads
);
232 esw_warn(esw
->dev
, "Failed to create offloads table, err %d\n", err
);
236 esw
->offloads
.ft_offloads
= ft_offloads
;
240 static void esw_destroy_offloads_table(struct mlx5_eswitch
*esw
)
242 struct mlx5_esw_offload
*offloads
= &esw
->offloads
;
244 mlx5_destroy_flow_table(offloads
->ft_offloads
);
247 static int esw_create_vport_rx_group(struct mlx5_eswitch
*esw
)
249 int inlen
= MLX5_ST_SZ_BYTES(create_flow_group_in
);
250 struct mlx5_flow_group
*g
;
251 struct mlx5_priv
*priv
= &esw
->dev
->priv
;
253 void *match_criteria
, *misc
;
255 int nvports
= priv
->sriov
.num_vfs
+ 2;
257 flow_group_in
= mlx5_vzalloc(inlen
);
261 /* create vport rx group */
262 memset(flow_group_in
, 0, inlen
);
263 MLX5_SET(create_flow_group_in
, flow_group_in
, match_criteria_enable
,
264 MLX5_MATCH_MISC_PARAMETERS
);
266 match_criteria
= MLX5_ADDR_OF(create_flow_group_in
, flow_group_in
, match_criteria
);
267 misc
= MLX5_ADDR_OF(fte_match_param
, match_criteria
, misc_parameters
);
268 MLX5_SET_TO_ONES(fte_match_set_misc
, misc
, source_port
);
270 MLX5_SET(create_flow_group_in
, flow_group_in
, start_flow_index
, 0);
271 MLX5_SET(create_flow_group_in
, flow_group_in
, end_flow_index
, nvports
- 1);
273 g
= mlx5_create_flow_group(esw
->offloads
.ft_offloads
, flow_group_in
);
277 mlx5_core_warn(esw
->dev
, "Failed to create vport rx group err %d\n", err
);
281 esw
->offloads
.vport_rx_group
= g
;
283 kfree(flow_group_in
);
287 static void esw_destroy_vport_rx_group(struct mlx5_eswitch
*esw
)
289 mlx5_destroy_flow_group(esw
->offloads
.vport_rx_group
);
292 struct mlx5_flow_rule
*
293 mlx5_eswitch_create_vport_rx_rule(struct mlx5_eswitch
*esw
, int vport
, u32 tirn
)
295 struct mlx5_flow_destination dest
;
296 struct mlx5_flow_rule
*flow_rule
;
297 int match_header
= MLX5_MATCH_MISC_PARAMETERS
;
298 u32
*match_v
, *match_c
;
301 match_v
= kzalloc(MLX5_ST_SZ_BYTES(fte_match_param
), GFP_KERNEL
);
302 match_c
= kzalloc(MLX5_ST_SZ_BYTES(fte_match_param
), GFP_KERNEL
);
303 if (!match_v
|| !match_c
) {
304 esw_warn(esw
->dev
, "Failed to alloc match parameters\n");
305 flow_rule
= ERR_PTR(-ENOMEM
);
309 misc
= MLX5_ADDR_OF(fte_match_param
, match_v
, misc_parameters
);
310 MLX5_SET(fte_match_set_misc
, misc
, source_port
, vport
);
312 misc
= MLX5_ADDR_OF(fte_match_param
, match_c
, misc_parameters
);
313 MLX5_SET_TO_ONES(fte_match_set_misc
, misc
, source_port
);
315 dest
.type
= MLX5_FLOW_DESTINATION_TYPE_TIR
;
318 flow_rule
= mlx5_add_flow_rule(esw
->offloads
.ft_offloads
, match_header
, match_c
,
319 match_v
, MLX5_FLOW_CONTEXT_ACTION_FWD_DEST
,
321 if (IS_ERR(flow_rule
)) {
322 esw_warn(esw
->dev
, "fs offloads: Failed to add vport rx rule err %ld\n", PTR_ERR(flow_rule
));
332 static int esw_offloads_start(struct mlx5_eswitch
*esw
)
334 int err
, num_vfs
= esw
->dev
->priv
.sriov
.num_vfs
;
336 if (esw
->mode
!= SRIOV_LEGACY
) {
337 esw_warn(esw
->dev
, "Can't set offloads mode, SRIOV legacy not enabled\n");
341 mlx5_eswitch_disable_sriov(esw
);
342 err
= mlx5_eswitch_enable_sriov(esw
, num_vfs
, SRIOV_OFFLOADS
);
344 esw_warn(esw
->dev
, "Failed set eswitch to offloads, err %d\n", err
);
348 int esw_offloads_init(struct mlx5_eswitch
*esw
, int nvports
)
352 err
= esw_create_offloads_fdb_table(esw
, nvports
);
356 err
= esw_create_offloads_table(esw
);
360 err
= esw_create_vport_rx_group(esw
);
367 esw_destroy_offloads_table(esw
);
370 esw_destroy_offloads_fdb_table(esw
);
374 static int esw_offloads_stop(struct mlx5_eswitch
*esw
)
376 int err
, num_vfs
= esw
->dev
->priv
.sriov
.num_vfs
;
378 mlx5_eswitch_disable_sriov(esw
);
379 err
= mlx5_eswitch_enable_sriov(esw
, num_vfs
, SRIOV_LEGACY
);
381 esw_warn(esw
->dev
, "Failed set eswitch legacy mode. err %d\n", err
);
386 void esw_offloads_cleanup(struct mlx5_eswitch
*esw
, int nvports
)
388 esw_destroy_vport_rx_group(esw
);
389 esw_destroy_offloads_table(esw
);
390 esw_destroy_offloads_fdb_table(esw
);
393 static int mlx5_esw_mode_from_devlink(u16 mode
, u16
*mlx5_mode
)
396 case DEVLINK_ESWITCH_MODE_LEGACY
:
397 *mlx5_mode
= SRIOV_LEGACY
;
399 case DEVLINK_ESWITCH_MODE_SWITCHDEV
:
400 *mlx5_mode
= SRIOV_OFFLOADS
;
409 int mlx5_devlink_eswitch_mode_set(struct devlink
*devlink
, u16 mode
)
411 struct mlx5_core_dev
*dev
;
412 u16 cur_mlx5_mode
, mlx5_mode
= 0;
414 dev
= devlink_priv(devlink
);
416 if (!MLX5_CAP_GEN(dev
, vport_group_manager
))
419 cur_mlx5_mode
= dev
->priv
.eswitch
->mode
;
421 if (cur_mlx5_mode
== SRIOV_NONE
)
424 if (mlx5_esw_mode_from_devlink(mode
, &mlx5_mode
))
427 if (cur_mlx5_mode
== mlx5_mode
)
430 if (mode
== DEVLINK_ESWITCH_MODE_SWITCHDEV
)
431 return esw_offloads_start(dev
->priv
.eswitch
);
432 else if (mode
== DEVLINK_ESWITCH_MODE_LEGACY
)
433 return esw_offloads_stop(dev
->priv
.eswitch
);
438 int mlx5_devlink_eswitch_mode_get(struct devlink
*devlink
, u16
*mode
)
440 struct mlx5_core_dev
*dev
;
442 dev
= devlink_priv(devlink
);
444 if (!MLX5_CAP_GEN(dev
, vport_group_manager
))
447 if (dev
->priv
.eswitch
->mode
== SRIOV_NONE
)
450 *mode
= dev
->priv
.eswitch
->mode
;