Merge remote-tracking branch 'iommu/next'
[deliverable/linux.git] / drivers / net / ethernet / mellanox / mlx5 / core / eswitch.c
1 /*
2 * Copyright (c) 2015, Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33 #include <linux/etherdevice.h>
34 #include <linux/mlx5/driver.h>
35 #include <linux/mlx5/mlx5_ifc.h>
36 #include <linux/mlx5/vport.h>
37 #include <linux/mlx5/fs.h>
38 #include "mlx5_core.h"
39 #include "eswitch.h"
40
41 #define UPLINK_VPORT 0xFFFF
42
43 enum {
44 MLX5_ACTION_NONE = 0,
45 MLX5_ACTION_ADD = 1,
46 MLX5_ACTION_DEL = 2,
47 };
48
49 /* E-Switch UC L2 table hash node */
50 struct esw_uc_addr {
51 struct l2addr_node node;
52 u32 table_index;
53 u32 vport;
54 };
55
56 /* E-Switch MC FDB table hash node */
57 struct esw_mc_addr { /* SRIOV only */
58 struct l2addr_node node;
59 struct mlx5_flow_rule *uplink_rule; /* Forward to uplink rule */
60 u32 refcnt;
61 };
62
63 /* Vport UC/MC hash node */
64 struct vport_addr {
65 struct l2addr_node node;
66 u8 action;
67 u32 vport;
68 struct mlx5_flow_rule *flow_rule; /* SRIOV only */
69 /* A flag indicating that mac was added due to mc promiscuous vport */
70 bool mc_promisc;
71 };
72
73 enum {
74 UC_ADDR_CHANGE = BIT(0),
75 MC_ADDR_CHANGE = BIT(1),
76 PROMISC_CHANGE = BIT(3),
77 };
78
79 /* Vport context events */
80 #define SRIOV_VPORT_EVENTS (UC_ADDR_CHANGE | \
81 MC_ADDR_CHANGE | \
82 PROMISC_CHANGE)
83
84 int esw_offloads_init(struct mlx5_eswitch *esw, int nvports);
85 void esw_offloads_cleanup(struct mlx5_eswitch *esw, int nvports);
86
87 static int arm_vport_context_events_cmd(struct mlx5_core_dev *dev, u16 vport,
88 u32 events_mask)
89 {
90 int in[MLX5_ST_SZ_DW(modify_nic_vport_context_in)] = {0};
91 int out[MLX5_ST_SZ_DW(modify_nic_vport_context_out)] = {0};
92 void *nic_vport_ctx;
93
94 MLX5_SET(modify_nic_vport_context_in, in,
95 opcode, MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT);
96 MLX5_SET(modify_nic_vport_context_in, in, field_select.change_event, 1);
97 MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport);
98 if (vport)
99 MLX5_SET(modify_nic_vport_context_in, in, other_vport, 1);
100 nic_vport_ctx = MLX5_ADDR_OF(modify_nic_vport_context_in,
101 in, nic_vport_context);
102
103 MLX5_SET(nic_vport_context, nic_vport_ctx, arm_change_event, 1);
104
105 if (events_mask & UC_ADDR_CHANGE)
106 MLX5_SET(nic_vport_context, nic_vport_ctx,
107 event_on_uc_address_change, 1);
108 if (events_mask & MC_ADDR_CHANGE)
109 MLX5_SET(nic_vport_context, nic_vport_ctx,
110 event_on_mc_address_change, 1);
111 if (events_mask & PROMISC_CHANGE)
112 MLX5_SET(nic_vport_context, nic_vport_ctx,
113 event_on_promisc_change, 1);
114
115 return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
116 }
117
118 /* E-Switch vport context HW commands */
119 static int modify_esw_vport_context_cmd(struct mlx5_core_dev *dev, u16 vport,
120 void *in, int inlen)
121 {
122 u32 out[MLX5_ST_SZ_DW(modify_esw_vport_context_out)] = {0};
123
124 MLX5_SET(modify_esw_vport_context_in, in, opcode,
125 MLX5_CMD_OP_MODIFY_ESW_VPORT_CONTEXT);
126 MLX5_SET(modify_esw_vport_context_in, in, vport_number, vport);
127 if (vport)
128 MLX5_SET(modify_esw_vport_context_in, in, other_vport, 1);
129 return mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
130 }
131
132 static int modify_esw_vport_cvlan(struct mlx5_core_dev *dev, u32 vport,
133 u16 vlan, u8 qos, bool set)
134 {
135 u32 in[MLX5_ST_SZ_DW(modify_esw_vport_context_in)] = {0};
136
137 if (!MLX5_CAP_ESW(dev, vport_cvlan_strip) ||
138 !MLX5_CAP_ESW(dev, vport_cvlan_insert_if_not_exist))
139 return -ENOTSUPP;
140
141 esw_debug(dev, "Set Vport[%d] VLAN %d qos %d set=%d\n",
142 vport, vlan, qos, set);
143 if (set) {
144 MLX5_SET(modify_esw_vport_context_in, in,
145 esw_vport_context.vport_cvlan_strip, 1);
146 /* insert only if no vlan in packet */
147 MLX5_SET(modify_esw_vport_context_in, in,
148 esw_vport_context.vport_cvlan_insert, 1);
149 MLX5_SET(modify_esw_vport_context_in, in,
150 esw_vport_context.cvlan_pcp, qos);
151 MLX5_SET(modify_esw_vport_context_in, in,
152 esw_vport_context.cvlan_id, vlan);
153 }
154
155 MLX5_SET(modify_esw_vport_context_in, in,
156 field_select.vport_cvlan_strip, 1);
157 MLX5_SET(modify_esw_vport_context_in, in,
158 field_select.vport_cvlan_insert, 1);
159
160 return modify_esw_vport_context_cmd(dev, vport, in, sizeof(in));
161 }
162
163 /* HW L2 Table (MPFS) management */
164 static int set_l2_table_entry_cmd(struct mlx5_core_dev *dev, u32 index,
165 u8 *mac, u8 vlan_valid, u16 vlan)
166 {
167 u32 in[MLX5_ST_SZ_DW(set_l2_table_entry_in)] = {0};
168 u32 out[MLX5_ST_SZ_DW(set_l2_table_entry_out)] = {0};
169 u8 *in_mac_addr;
170
171 MLX5_SET(set_l2_table_entry_in, in, opcode,
172 MLX5_CMD_OP_SET_L2_TABLE_ENTRY);
173 MLX5_SET(set_l2_table_entry_in, in, table_index, index);
174 MLX5_SET(set_l2_table_entry_in, in, vlan_valid, vlan_valid);
175 MLX5_SET(set_l2_table_entry_in, in, vlan, vlan);
176
177 in_mac_addr = MLX5_ADDR_OF(set_l2_table_entry_in, in, mac_address);
178 ether_addr_copy(&in_mac_addr[2], mac);
179
180 return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
181 }
182
183 static int del_l2_table_entry_cmd(struct mlx5_core_dev *dev, u32 index)
184 {
185 u32 in[MLX5_ST_SZ_DW(delete_l2_table_entry_in)] = {0};
186 u32 out[MLX5_ST_SZ_DW(delete_l2_table_entry_out)] = {0};
187
188 MLX5_SET(delete_l2_table_entry_in, in, opcode,
189 MLX5_CMD_OP_DELETE_L2_TABLE_ENTRY);
190 MLX5_SET(delete_l2_table_entry_in, in, table_index, index);
191 return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
192 }
193
194 static int alloc_l2_table_index(struct mlx5_l2_table *l2_table, u32 *ix)
195 {
196 int err = 0;
197
198 *ix = find_first_zero_bit(l2_table->bitmap, l2_table->size);
199 if (*ix >= l2_table->size)
200 err = -ENOSPC;
201 else
202 __set_bit(*ix, l2_table->bitmap);
203
204 return err;
205 }
206
207 static void free_l2_table_index(struct mlx5_l2_table *l2_table, u32 ix)
208 {
209 __clear_bit(ix, l2_table->bitmap);
210 }
211
212 static int set_l2_table_entry(struct mlx5_core_dev *dev, u8 *mac,
213 u8 vlan_valid, u16 vlan,
214 u32 *index)
215 {
216 struct mlx5_l2_table *l2_table = &dev->priv.eswitch->l2_table;
217 int err;
218
219 err = alloc_l2_table_index(l2_table, index);
220 if (err)
221 return err;
222
223 err = set_l2_table_entry_cmd(dev, *index, mac, vlan_valid, vlan);
224 if (err)
225 free_l2_table_index(l2_table, *index);
226
227 return err;
228 }
229
230 static void del_l2_table_entry(struct mlx5_core_dev *dev, u32 index)
231 {
232 struct mlx5_l2_table *l2_table = &dev->priv.eswitch->l2_table;
233
234 del_l2_table_entry_cmd(dev, index);
235 free_l2_table_index(l2_table, index);
236 }
237
238 /* E-Switch FDB */
239 static struct mlx5_flow_rule *
240 __esw_fdb_set_vport_rule(struct mlx5_eswitch *esw, u32 vport, bool rx_rule,
241 u8 mac_c[ETH_ALEN], u8 mac_v[ETH_ALEN])
242 {
243 int match_header = (is_zero_ether_addr(mac_c) ? 0 :
244 MLX5_MATCH_OUTER_HEADERS);
245 struct mlx5_flow_rule *flow_rule = NULL;
246 struct mlx5_flow_destination dest;
247 struct mlx5_flow_spec *spec;
248 void *mv_misc = NULL;
249 void *mc_misc = NULL;
250 u8 *dmac_v = NULL;
251 u8 *dmac_c = NULL;
252
253 if (rx_rule)
254 match_header |= MLX5_MATCH_MISC_PARAMETERS;
255
256 spec = mlx5_vzalloc(sizeof(*spec));
257 if (!spec) {
258 esw_warn(esw->dev, "FDB: Failed to alloc match parameters\n");
259 return NULL;
260 }
261 dmac_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
262 outer_headers.dmac_47_16);
263 dmac_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
264 outer_headers.dmac_47_16);
265
266 if (match_header & MLX5_MATCH_OUTER_HEADERS) {
267 ether_addr_copy(dmac_v, mac_v);
268 ether_addr_copy(dmac_c, mac_c);
269 }
270
271 if (match_header & MLX5_MATCH_MISC_PARAMETERS) {
272 mv_misc = MLX5_ADDR_OF(fte_match_param, spec->match_value,
273 misc_parameters);
274 mc_misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
275 misc_parameters);
276 MLX5_SET(fte_match_set_misc, mv_misc, source_port, UPLINK_VPORT);
277 MLX5_SET_TO_ONES(fte_match_set_misc, mc_misc, source_port);
278 }
279
280 dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
281 dest.vport_num = vport;
282
283 esw_debug(esw->dev,
284 "\tFDB add rule dmac_v(%pM) dmac_c(%pM) -> vport(%d)\n",
285 dmac_v, dmac_c, vport);
286 spec->match_criteria_enable = match_header;
287 flow_rule =
288 mlx5_add_flow_rule(esw->fdb_table.fdb, spec,
289 MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
290 0, &dest);
291 if (IS_ERR(flow_rule)) {
292 esw_warn(esw->dev,
293 "FDB: Failed to add flow rule: dmac_v(%pM) dmac_c(%pM) -> vport(%d), err(%ld)\n",
294 dmac_v, dmac_c, vport, PTR_ERR(flow_rule));
295 flow_rule = NULL;
296 }
297
298 kvfree(spec);
299 return flow_rule;
300 }
301
302 static struct mlx5_flow_rule *
303 esw_fdb_set_vport_rule(struct mlx5_eswitch *esw, u8 mac[ETH_ALEN], u32 vport)
304 {
305 u8 mac_c[ETH_ALEN];
306
307 eth_broadcast_addr(mac_c);
308 return __esw_fdb_set_vport_rule(esw, vport, false, mac_c, mac);
309 }
310
311 static struct mlx5_flow_rule *
312 esw_fdb_set_vport_allmulti_rule(struct mlx5_eswitch *esw, u32 vport)
313 {
314 u8 mac_c[ETH_ALEN];
315 u8 mac_v[ETH_ALEN];
316
317 eth_zero_addr(mac_c);
318 eth_zero_addr(mac_v);
319 mac_c[0] = 0x01;
320 mac_v[0] = 0x01;
321 return __esw_fdb_set_vport_rule(esw, vport, false, mac_c, mac_v);
322 }
323
324 static struct mlx5_flow_rule *
325 esw_fdb_set_vport_promisc_rule(struct mlx5_eswitch *esw, u32 vport)
326 {
327 u8 mac_c[ETH_ALEN];
328 u8 mac_v[ETH_ALEN];
329
330 eth_zero_addr(mac_c);
331 eth_zero_addr(mac_v);
332 return __esw_fdb_set_vport_rule(esw, vport, true, mac_c, mac_v);
333 }
334
335 static int esw_create_legacy_fdb_table(struct mlx5_eswitch *esw, int nvports)
336 {
337 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
338 struct mlx5_core_dev *dev = esw->dev;
339 struct mlx5_flow_namespace *root_ns;
340 struct mlx5_flow_table *fdb;
341 struct mlx5_flow_group *g;
342 void *match_criteria;
343 int table_size;
344 u32 *flow_group_in;
345 u8 *dmac;
346 int err = 0;
347
348 esw_debug(dev, "Create FDB log_max_size(%d)\n",
349 MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size));
350
351 root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_FDB);
352 if (!root_ns) {
353 esw_warn(dev, "Failed to get FDB flow namespace\n");
354 return -ENOMEM;
355 }
356
357 flow_group_in = mlx5_vzalloc(inlen);
358 if (!flow_group_in)
359 return -ENOMEM;
360 memset(flow_group_in, 0, inlen);
361
362 table_size = BIT(MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size));
363 fdb = mlx5_create_flow_table(root_ns, 0, table_size, 0);
364 if (IS_ERR(fdb)) {
365 err = PTR_ERR(fdb);
366 esw_warn(dev, "Failed to create FDB Table err %d\n", err);
367 goto out;
368 }
369 esw->fdb_table.fdb = fdb;
370
371 /* Addresses group : Full match unicast/multicast addresses */
372 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
373 MLX5_MATCH_OUTER_HEADERS);
374 match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria);
375 dmac = MLX5_ADDR_OF(fte_match_param, match_criteria, outer_headers.dmac_47_16);
376 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
377 /* Preserve 2 entries for allmulti and promisc rules*/
378 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, table_size - 3);
379 eth_broadcast_addr(dmac);
380 g = mlx5_create_flow_group(fdb, flow_group_in);
381 if (IS_ERR(g)) {
382 err = PTR_ERR(g);
383 esw_warn(dev, "Failed to create flow group err(%d)\n", err);
384 goto out;
385 }
386 esw->fdb_table.legacy.addr_grp = g;
387
388 /* Allmulti group : One rule that forwards any mcast traffic */
389 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
390 MLX5_MATCH_OUTER_HEADERS);
391 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, table_size - 2);
392 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, table_size - 2);
393 eth_zero_addr(dmac);
394 dmac[0] = 0x01;
395 g = mlx5_create_flow_group(fdb, flow_group_in);
396 if (IS_ERR(g)) {
397 err = PTR_ERR(g);
398 esw_warn(dev, "Failed to create allmulti flow group err(%d)\n", err);
399 goto out;
400 }
401 esw->fdb_table.legacy.allmulti_grp = g;
402
403 /* Promiscuous group :
404 * One rule that forward all unmatched traffic from previous groups
405 */
406 eth_zero_addr(dmac);
407 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
408 MLX5_MATCH_MISC_PARAMETERS);
409 MLX5_SET_TO_ONES(fte_match_param, match_criteria, misc_parameters.source_port);
410 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, table_size - 1);
411 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, table_size - 1);
412 g = mlx5_create_flow_group(fdb, flow_group_in);
413 if (IS_ERR(g)) {
414 err = PTR_ERR(g);
415 esw_warn(dev, "Failed to create promisc flow group err(%d)\n", err);
416 goto out;
417 }
418 esw->fdb_table.legacy.promisc_grp = g;
419
420 out:
421 if (err) {
422 if (!IS_ERR_OR_NULL(esw->fdb_table.legacy.allmulti_grp)) {
423 mlx5_destroy_flow_group(esw->fdb_table.legacy.allmulti_grp);
424 esw->fdb_table.legacy.allmulti_grp = NULL;
425 }
426 if (!IS_ERR_OR_NULL(esw->fdb_table.legacy.addr_grp)) {
427 mlx5_destroy_flow_group(esw->fdb_table.legacy.addr_grp);
428 esw->fdb_table.legacy.addr_grp = NULL;
429 }
430 if (!IS_ERR_OR_NULL(esw->fdb_table.fdb)) {
431 mlx5_destroy_flow_table(esw->fdb_table.fdb);
432 esw->fdb_table.fdb = NULL;
433 }
434 }
435
436 kvfree(flow_group_in);
437 return err;
438 }
439
440 static void esw_destroy_legacy_fdb_table(struct mlx5_eswitch *esw)
441 {
442 if (!esw->fdb_table.fdb)
443 return;
444
445 esw_debug(esw->dev, "Destroy FDB Table\n");
446 mlx5_destroy_flow_group(esw->fdb_table.legacy.promisc_grp);
447 mlx5_destroy_flow_group(esw->fdb_table.legacy.allmulti_grp);
448 mlx5_destroy_flow_group(esw->fdb_table.legacy.addr_grp);
449 mlx5_destroy_flow_table(esw->fdb_table.fdb);
450 esw->fdb_table.fdb = NULL;
451 esw->fdb_table.legacy.addr_grp = NULL;
452 esw->fdb_table.legacy.allmulti_grp = NULL;
453 esw->fdb_table.legacy.promisc_grp = NULL;
454 }
455
456 /* E-Switch vport UC/MC lists management */
457 typedef int (*vport_addr_action)(struct mlx5_eswitch *esw,
458 struct vport_addr *vaddr);
459
460 static int esw_add_uc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr)
461 {
462 struct hlist_head *hash = esw->l2_table.l2_hash;
463 struct esw_uc_addr *esw_uc;
464 u8 *mac = vaddr->node.addr;
465 u32 vport = vaddr->vport;
466 int err;
467
468 esw_uc = l2addr_hash_find(hash, mac, struct esw_uc_addr);
469 if (esw_uc) {
470 esw_warn(esw->dev,
471 "Failed to set L2 mac(%pM) for vport(%d), mac is already in use by vport(%d)\n",
472 mac, vport, esw_uc->vport);
473 return -EEXIST;
474 }
475
476 esw_uc = l2addr_hash_add(hash, mac, struct esw_uc_addr, GFP_KERNEL);
477 if (!esw_uc)
478 return -ENOMEM;
479 esw_uc->vport = vport;
480
481 err = set_l2_table_entry(esw->dev, mac, 0, 0, &esw_uc->table_index);
482 if (err)
483 goto abort;
484
485 /* SRIOV is enabled: Forward UC MAC to vport */
486 if (esw->fdb_table.fdb && esw->mode == SRIOV_LEGACY)
487 vaddr->flow_rule = esw_fdb_set_vport_rule(esw, mac, vport);
488
489 esw_debug(esw->dev, "\tADDED UC MAC: vport[%d] %pM index:%d fr(%p)\n",
490 vport, mac, esw_uc->table_index, vaddr->flow_rule);
491 return err;
492 abort:
493 l2addr_hash_del(esw_uc);
494 return err;
495 }
496
497 static int esw_del_uc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr)
498 {
499 struct hlist_head *hash = esw->l2_table.l2_hash;
500 struct esw_uc_addr *esw_uc;
501 u8 *mac = vaddr->node.addr;
502 u32 vport = vaddr->vport;
503
504 esw_uc = l2addr_hash_find(hash, mac, struct esw_uc_addr);
505 if (!esw_uc || esw_uc->vport != vport) {
506 esw_debug(esw->dev,
507 "MAC(%pM) doesn't belong to vport (%d)\n",
508 mac, vport);
509 return -EINVAL;
510 }
511 esw_debug(esw->dev, "\tDELETE UC MAC: vport[%d] %pM index:%d fr(%p)\n",
512 vport, mac, esw_uc->table_index, vaddr->flow_rule);
513
514 del_l2_table_entry(esw->dev, esw_uc->table_index);
515
516 if (vaddr->flow_rule)
517 mlx5_del_flow_rule(vaddr->flow_rule);
518 vaddr->flow_rule = NULL;
519
520 l2addr_hash_del(esw_uc);
521 return 0;
522 }
523
524 static void update_allmulti_vports(struct mlx5_eswitch *esw,
525 struct vport_addr *vaddr,
526 struct esw_mc_addr *esw_mc)
527 {
528 u8 *mac = vaddr->node.addr;
529 u32 vport_idx = 0;
530
531 for (vport_idx = 0; vport_idx < esw->total_vports; vport_idx++) {
532 struct mlx5_vport *vport = &esw->vports[vport_idx];
533 struct hlist_head *vport_hash = vport->mc_list;
534 struct vport_addr *iter_vaddr =
535 l2addr_hash_find(vport_hash,
536 mac,
537 struct vport_addr);
538 if (IS_ERR_OR_NULL(vport->allmulti_rule) ||
539 vaddr->vport == vport_idx)
540 continue;
541 switch (vaddr->action) {
542 case MLX5_ACTION_ADD:
543 if (iter_vaddr)
544 continue;
545 iter_vaddr = l2addr_hash_add(vport_hash, mac,
546 struct vport_addr,
547 GFP_KERNEL);
548 if (!iter_vaddr) {
549 esw_warn(esw->dev,
550 "ALL-MULTI: Failed to add MAC(%pM) to vport[%d] DB\n",
551 mac, vport_idx);
552 continue;
553 }
554 iter_vaddr->vport = vport_idx;
555 iter_vaddr->flow_rule =
556 esw_fdb_set_vport_rule(esw,
557 mac,
558 vport_idx);
559 iter_vaddr->mc_promisc = true;
560 break;
561 case MLX5_ACTION_DEL:
562 if (!iter_vaddr)
563 continue;
564 mlx5_del_flow_rule(iter_vaddr->flow_rule);
565 l2addr_hash_del(iter_vaddr);
566 break;
567 }
568 }
569 }
570
571 static int esw_add_mc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr)
572 {
573 struct hlist_head *hash = esw->mc_table;
574 struct esw_mc_addr *esw_mc;
575 u8 *mac = vaddr->node.addr;
576 u32 vport = vaddr->vport;
577
578 if (!esw->fdb_table.fdb)
579 return 0;
580
581 esw_mc = l2addr_hash_find(hash, mac, struct esw_mc_addr);
582 if (esw_mc)
583 goto add;
584
585 esw_mc = l2addr_hash_add(hash, mac, struct esw_mc_addr, GFP_KERNEL);
586 if (!esw_mc)
587 return -ENOMEM;
588
589 esw_mc->uplink_rule = /* Forward MC MAC to Uplink */
590 esw_fdb_set_vport_rule(esw, mac, UPLINK_VPORT);
591
592 /* Add this multicast mac to all the mc promiscuous vports */
593 update_allmulti_vports(esw, vaddr, esw_mc);
594
595 add:
596 /* If the multicast mac is added as a result of mc promiscuous vport,
597 * don't increment the multicast ref count
598 */
599 if (!vaddr->mc_promisc)
600 esw_mc->refcnt++;
601
602 /* Forward MC MAC to vport */
603 vaddr->flow_rule = esw_fdb_set_vport_rule(esw, mac, vport);
604 esw_debug(esw->dev,
605 "\tADDED MC MAC: vport[%d] %pM fr(%p) refcnt(%d) uplinkfr(%p)\n",
606 vport, mac, vaddr->flow_rule,
607 esw_mc->refcnt, esw_mc->uplink_rule);
608 return 0;
609 }
610
611 static int esw_del_mc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr)
612 {
613 struct hlist_head *hash = esw->mc_table;
614 struct esw_mc_addr *esw_mc;
615 u8 *mac = vaddr->node.addr;
616 u32 vport = vaddr->vport;
617
618 if (!esw->fdb_table.fdb)
619 return 0;
620
621 esw_mc = l2addr_hash_find(hash, mac, struct esw_mc_addr);
622 if (!esw_mc) {
623 esw_warn(esw->dev,
624 "Failed to find eswitch MC addr for MAC(%pM) vport(%d)",
625 mac, vport);
626 return -EINVAL;
627 }
628 esw_debug(esw->dev,
629 "\tDELETE MC MAC: vport[%d] %pM fr(%p) refcnt(%d) uplinkfr(%p)\n",
630 vport, mac, vaddr->flow_rule, esw_mc->refcnt,
631 esw_mc->uplink_rule);
632
633 if (vaddr->flow_rule)
634 mlx5_del_flow_rule(vaddr->flow_rule);
635 vaddr->flow_rule = NULL;
636
637 /* If the multicast mac is added as a result of mc promiscuous vport,
638 * don't decrement the multicast ref count.
639 */
640 if (vaddr->mc_promisc || (--esw_mc->refcnt > 0))
641 return 0;
642
643 /* Remove this multicast mac from all the mc promiscuous vports */
644 update_allmulti_vports(esw, vaddr, esw_mc);
645
646 if (esw_mc->uplink_rule)
647 mlx5_del_flow_rule(esw_mc->uplink_rule);
648
649 l2addr_hash_del(esw_mc);
650 return 0;
651 }
652
653 /* Apply vport UC/MC list to HW l2 table and FDB table */
654 static void esw_apply_vport_addr_list(struct mlx5_eswitch *esw,
655 u32 vport_num, int list_type)
656 {
657 struct mlx5_vport *vport = &esw->vports[vport_num];
658 bool is_uc = list_type == MLX5_NVPRT_LIST_TYPE_UC;
659 vport_addr_action vport_addr_add;
660 vport_addr_action vport_addr_del;
661 struct vport_addr *addr;
662 struct l2addr_node *node;
663 struct hlist_head *hash;
664 struct hlist_node *tmp;
665 int hi;
666
667 vport_addr_add = is_uc ? esw_add_uc_addr :
668 esw_add_mc_addr;
669 vport_addr_del = is_uc ? esw_del_uc_addr :
670 esw_del_mc_addr;
671
672 hash = is_uc ? vport->uc_list : vport->mc_list;
673 for_each_l2hash_node(node, tmp, hash, hi) {
674 addr = container_of(node, struct vport_addr, node);
675 switch (addr->action) {
676 case MLX5_ACTION_ADD:
677 vport_addr_add(esw, addr);
678 addr->action = MLX5_ACTION_NONE;
679 break;
680 case MLX5_ACTION_DEL:
681 vport_addr_del(esw, addr);
682 l2addr_hash_del(addr);
683 break;
684 }
685 }
686 }
687
688 /* Sync vport UC/MC list from vport context */
689 static void esw_update_vport_addr_list(struct mlx5_eswitch *esw,
690 u32 vport_num, int list_type)
691 {
692 struct mlx5_vport *vport = &esw->vports[vport_num];
693 bool is_uc = list_type == MLX5_NVPRT_LIST_TYPE_UC;
694 u8 (*mac_list)[ETH_ALEN];
695 struct l2addr_node *node;
696 struct vport_addr *addr;
697 struct hlist_head *hash;
698 struct hlist_node *tmp;
699 int size;
700 int err;
701 int hi;
702 int i;
703
704 size = is_uc ? MLX5_MAX_UC_PER_VPORT(esw->dev) :
705 MLX5_MAX_MC_PER_VPORT(esw->dev);
706
707 mac_list = kcalloc(size, ETH_ALEN, GFP_KERNEL);
708 if (!mac_list)
709 return;
710
711 hash = is_uc ? vport->uc_list : vport->mc_list;
712
713 for_each_l2hash_node(node, tmp, hash, hi) {
714 addr = container_of(node, struct vport_addr, node);
715 addr->action = MLX5_ACTION_DEL;
716 }
717
718 if (!vport->enabled)
719 goto out;
720
721 err = mlx5_query_nic_vport_mac_list(esw->dev, vport_num, list_type,
722 mac_list, &size);
723 if (err)
724 goto out;
725 esw_debug(esw->dev, "vport[%d] context update %s list size (%d)\n",
726 vport_num, is_uc ? "UC" : "MC", size);
727
728 for (i = 0; i < size; i++) {
729 if (is_uc && !is_valid_ether_addr(mac_list[i]))
730 continue;
731
732 if (!is_uc && !is_multicast_ether_addr(mac_list[i]))
733 continue;
734
735 addr = l2addr_hash_find(hash, mac_list[i], struct vport_addr);
736 if (addr) {
737 addr->action = MLX5_ACTION_NONE;
738 /* If this mac was previously added because of allmulti
739 * promiscuous rx mode, its now converted to be original
740 * vport mac.
741 */
742 if (addr->mc_promisc) {
743 struct esw_mc_addr *esw_mc =
744 l2addr_hash_find(esw->mc_table,
745 mac_list[i],
746 struct esw_mc_addr);
747 if (!esw_mc) {
748 esw_warn(esw->dev,
749 "Failed to MAC(%pM) in mcast DB\n",
750 mac_list[i]);
751 continue;
752 }
753 esw_mc->refcnt++;
754 addr->mc_promisc = false;
755 }
756 continue;
757 }
758
759 addr = l2addr_hash_add(hash, mac_list[i], struct vport_addr,
760 GFP_KERNEL);
761 if (!addr) {
762 esw_warn(esw->dev,
763 "Failed to add MAC(%pM) to vport[%d] DB\n",
764 mac_list[i], vport_num);
765 continue;
766 }
767 addr->vport = vport_num;
768 addr->action = MLX5_ACTION_ADD;
769 }
770 out:
771 kfree(mac_list);
772 }
773
774 /* Sync vport UC/MC list from vport context
775 * Must be called after esw_update_vport_addr_list
776 */
777 static void esw_update_vport_mc_promisc(struct mlx5_eswitch *esw, u32 vport_num)
778 {
779 struct mlx5_vport *vport = &esw->vports[vport_num];
780 struct l2addr_node *node;
781 struct vport_addr *addr;
782 struct hlist_head *hash;
783 struct hlist_node *tmp;
784 int hi;
785
786 hash = vport->mc_list;
787
788 for_each_l2hash_node(node, tmp, esw->mc_table, hi) {
789 u8 *mac = node->addr;
790
791 addr = l2addr_hash_find(hash, mac, struct vport_addr);
792 if (addr) {
793 if (addr->action == MLX5_ACTION_DEL)
794 addr->action = MLX5_ACTION_NONE;
795 continue;
796 }
797 addr = l2addr_hash_add(hash, mac, struct vport_addr,
798 GFP_KERNEL);
799 if (!addr) {
800 esw_warn(esw->dev,
801 "Failed to add allmulti MAC(%pM) to vport[%d] DB\n",
802 mac, vport_num);
803 continue;
804 }
805 addr->vport = vport_num;
806 addr->action = MLX5_ACTION_ADD;
807 addr->mc_promisc = true;
808 }
809 }
810
811 /* Apply vport rx mode to HW FDB table */
812 static void esw_apply_vport_rx_mode(struct mlx5_eswitch *esw, u32 vport_num,
813 bool promisc, bool mc_promisc)
814 {
815 struct esw_mc_addr *allmulti_addr = esw->mc_promisc;
816 struct mlx5_vport *vport = &esw->vports[vport_num];
817
818 if (IS_ERR_OR_NULL(vport->allmulti_rule) != mc_promisc)
819 goto promisc;
820
821 if (mc_promisc) {
822 vport->allmulti_rule =
823 esw_fdb_set_vport_allmulti_rule(esw, vport_num);
824 if (!allmulti_addr->uplink_rule)
825 allmulti_addr->uplink_rule =
826 esw_fdb_set_vport_allmulti_rule(esw,
827 UPLINK_VPORT);
828 allmulti_addr->refcnt++;
829 } else if (vport->allmulti_rule) {
830 mlx5_del_flow_rule(vport->allmulti_rule);
831 vport->allmulti_rule = NULL;
832
833 if (--allmulti_addr->refcnt > 0)
834 goto promisc;
835
836 if (allmulti_addr->uplink_rule)
837 mlx5_del_flow_rule(allmulti_addr->uplink_rule);
838 allmulti_addr->uplink_rule = NULL;
839 }
840
841 promisc:
842 if (IS_ERR_OR_NULL(vport->promisc_rule) != promisc)
843 return;
844
845 if (promisc) {
846 vport->promisc_rule = esw_fdb_set_vport_promisc_rule(esw,
847 vport_num);
848 } else if (vport->promisc_rule) {
849 mlx5_del_flow_rule(vport->promisc_rule);
850 vport->promisc_rule = NULL;
851 }
852 }
853
854 /* Sync vport rx mode from vport context */
855 static void esw_update_vport_rx_mode(struct mlx5_eswitch *esw, u32 vport_num)
856 {
857 struct mlx5_vport *vport = &esw->vports[vport_num];
858 int promisc_all = 0;
859 int promisc_uc = 0;
860 int promisc_mc = 0;
861 int err;
862
863 err = mlx5_query_nic_vport_promisc(esw->dev,
864 vport_num,
865 &promisc_uc,
866 &promisc_mc,
867 &promisc_all);
868 if (err)
869 return;
870 esw_debug(esw->dev, "vport[%d] context update rx mode promisc_all=%d, all_multi=%d\n",
871 vport_num, promisc_all, promisc_mc);
872
873 if (!vport->info.trusted || !vport->enabled) {
874 promisc_uc = 0;
875 promisc_mc = 0;
876 promisc_all = 0;
877 }
878
879 esw_apply_vport_rx_mode(esw, vport_num, promisc_all,
880 (promisc_all || promisc_mc));
881 }
882
883 static void esw_vport_change_handle_locked(struct mlx5_vport *vport)
884 {
885 struct mlx5_core_dev *dev = vport->dev;
886 struct mlx5_eswitch *esw = dev->priv.eswitch;
887 u8 mac[ETH_ALEN];
888
889 mlx5_query_nic_vport_mac_address(dev, vport->vport, mac);
890 esw_debug(dev, "vport[%d] Context Changed: perm mac: %pM\n",
891 vport->vport, mac);
892
893 if (vport->enabled_events & UC_ADDR_CHANGE) {
894 esw_update_vport_addr_list(esw, vport->vport,
895 MLX5_NVPRT_LIST_TYPE_UC);
896 esw_apply_vport_addr_list(esw, vport->vport,
897 MLX5_NVPRT_LIST_TYPE_UC);
898 }
899
900 if (vport->enabled_events & MC_ADDR_CHANGE) {
901 esw_update_vport_addr_list(esw, vport->vport,
902 MLX5_NVPRT_LIST_TYPE_MC);
903 }
904
905 if (vport->enabled_events & PROMISC_CHANGE) {
906 esw_update_vport_rx_mode(esw, vport->vport);
907 if (!IS_ERR_OR_NULL(vport->allmulti_rule))
908 esw_update_vport_mc_promisc(esw, vport->vport);
909 }
910
911 if (vport->enabled_events & (PROMISC_CHANGE | MC_ADDR_CHANGE)) {
912 esw_apply_vport_addr_list(esw, vport->vport,
913 MLX5_NVPRT_LIST_TYPE_MC);
914 }
915
916 esw_debug(esw->dev, "vport[%d] Context Changed: Done\n", vport->vport);
917 if (vport->enabled)
918 arm_vport_context_events_cmd(dev, vport->vport,
919 vport->enabled_events);
920 }
921
922 static void esw_vport_change_handler(struct work_struct *work)
923 {
924 struct mlx5_vport *vport =
925 container_of(work, struct mlx5_vport, vport_change_handler);
926 struct mlx5_eswitch *esw = vport->dev->priv.eswitch;
927
928 mutex_lock(&esw->state_lock);
929 esw_vport_change_handle_locked(vport);
930 mutex_unlock(&esw->state_lock);
931 }
932
933 static void esw_vport_enable_egress_acl(struct mlx5_eswitch *esw,
934 struct mlx5_vport *vport)
935 {
936 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
937 struct mlx5_flow_group *vlan_grp = NULL;
938 struct mlx5_flow_group *drop_grp = NULL;
939 struct mlx5_core_dev *dev = esw->dev;
940 struct mlx5_flow_namespace *root_ns;
941 struct mlx5_flow_table *acl;
942 void *match_criteria;
943 u32 *flow_group_in;
944 /* The egress acl table contains 2 rules:
945 * 1)Allow traffic with vlan_tag=vst_vlan_id
946 * 2)Drop all other traffic.
947 */
948 int table_size = 2;
949 int err = 0;
950
951 if (!MLX5_CAP_ESW_EGRESS_ACL(dev, ft_support) ||
952 !IS_ERR_OR_NULL(vport->egress.acl))
953 return;
954
955 esw_debug(dev, "Create vport[%d] egress ACL log_max_size(%d)\n",
956 vport->vport, MLX5_CAP_ESW_EGRESS_ACL(dev, log_max_ft_size));
957
958 root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_ESW_EGRESS);
959 if (!root_ns) {
960 esw_warn(dev, "Failed to get E-Switch egress flow namespace\n");
961 return;
962 }
963
964 flow_group_in = mlx5_vzalloc(inlen);
965 if (!flow_group_in)
966 return;
967
968 acl = mlx5_create_vport_flow_table(root_ns, 0, table_size, 0, vport->vport);
969 if (IS_ERR(acl)) {
970 err = PTR_ERR(acl);
971 esw_warn(dev, "Failed to create E-Switch vport[%d] egress flow Table, err(%d)\n",
972 vport->vport, err);
973 goto out;
974 }
975
976 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
977 match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria);
978 MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.vlan_tag);
979 MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.first_vid);
980 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
981 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 0);
982
983 vlan_grp = mlx5_create_flow_group(acl, flow_group_in);
984 if (IS_ERR(vlan_grp)) {
985 err = PTR_ERR(vlan_grp);
986 esw_warn(dev, "Failed to create E-Switch vport[%d] egress allowed vlans flow group, err(%d)\n",
987 vport->vport, err);
988 goto out;
989 }
990
991 memset(flow_group_in, 0, inlen);
992 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 1);
993 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 1);
994 drop_grp = mlx5_create_flow_group(acl, flow_group_in);
995 if (IS_ERR(drop_grp)) {
996 err = PTR_ERR(drop_grp);
997 esw_warn(dev, "Failed to create E-Switch vport[%d] egress drop flow group, err(%d)\n",
998 vport->vport, err);
999 goto out;
1000 }
1001
1002 vport->egress.acl = acl;
1003 vport->egress.drop_grp = drop_grp;
1004 vport->egress.allowed_vlans_grp = vlan_grp;
1005 out:
1006 kvfree(flow_group_in);
1007 if (err && !IS_ERR_OR_NULL(vlan_grp))
1008 mlx5_destroy_flow_group(vlan_grp);
1009 if (err && !IS_ERR_OR_NULL(acl))
1010 mlx5_destroy_flow_table(acl);
1011 }
1012
1013 static void esw_vport_cleanup_egress_rules(struct mlx5_eswitch *esw,
1014 struct mlx5_vport *vport)
1015 {
1016 if (!IS_ERR_OR_NULL(vport->egress.allowed_vlan))
1017 mlx5_del_flow_rule(vport->egress.allowed_vlan);
1018
1019 if (!IS_ERR_OR_NULL(vport->egress.drop_rule))
1020 mlx5_del_flow_rule(vport->egress.drop_rule);
1021
1022 vport->egress.allowed_vlan = NULL;
1023 vport->egress.drop_rule = NULL;
1024 }
1025
1026 static void esw_vport_disable_egress_acl(struct mlx5_eswitch *esw,
1027 struct mlx5_vport *vport)
1028 {
1029 if (IS_ERR_OR_NULL(vport->egress.acl))
1030 return;
1031
1032 esw_debug(esw->dev, "Destroy vport[%d] E-Switch egress ACL\n", vport->vport);
1033
1034 esw_vport_cleanup_egress_rules(esw, vport);
1035 mlx5_destroy_flow_group(vport->egress.allowed_vlans_grp);
1036 mlx5_destroy_flow_group(vport->egress.drop_grp);
1037 mlx5_destroy_flow_table(vport->egress.acl);
1038 vport->egress.allowed_vlans_grp = NULL;
1039 vport->egress.drop_grp = NULL;
1040 vport->egress.acl = NULL;
1041 }
1042
1043 static void esw_vport_enable_ingress_acl(struct mlx5_eswitch *esw,
1044 struct mlx5_vport *vport)
1045 {
1046 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
1047 struct mlx5_core_dev *dev = esw->dev;
1048 struct mlx5_flow_namespace *root_ns;
1049 struct mlx5_flow_table *acl;
1050 struct mlx5_flow_group *g;
1051 void *match_criteria;
1052 u32 *flow_group_in;
1053 /* The ingress acl table contains 4 groups
1054 * (2 active rules at the same time -
1055 * 1 allow rule from one of the first 3 groups.
1056 * 1 drop rule from the last group):
1057 * 1)Allow untagged traffic with smac=original mac.
1058 * 2)Allow untagged traffic.
1059 * 3)Allow traffic with smac=original mac.
1060 * 4)Drop all other traffic.
1061 */
1062 int table_size = 4;
1063 int err = 0;
1064
1065 if (!MLX5_CAP_ESW_INGRESS_ACL(dev, ft_support) ||
1066 !IS_ERR_OR_NULL(vport->ingress.acl))
1067 return;
1068
1069 esw_debug(dev, "Create vport[%d] ingress ACL log_max_size(%d)\n",
1070 vport->vport, MLX5_CAP_ESW_INGRESS_ACL(dev, log_max_ft_size));
1071
1072 root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_ESW_INGRESS);
1073 if (!root_ns) {
1074 esw_warn(dev, "Failed to get E-Switch ingress flow namespace\n");
1075 return;
1076 }
1077
1078 flow_group_in = mlx5_vzalloc(inlen);
1079 if (!flow_group_in)
1080 return;
1081
1082 acl = mlx5_create_vport_flow_table(root_ns, 0, table_size, 0, vport->vport);
1083 if (IS_ERR(acl)) {
1084 err = PTR_ERR(acl);
1085 esw_warn(dev, "Failed to create E-Switch vport[%d] ingress flow Table, err(%d)\n",
1086 vport->vport, err);
1087 goto out;
1088 }
1089 vport->ingress.acl = acl;
1090
1091 match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria);
1092
1093 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1094 MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.vlan_tag);
1095 MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.smac_47_16);
1096 MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.smac_15_0);
1097 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
1098 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 0);
1099
1100 g = mlx5_create_flow_group(acl, flow_group_in);
1101 if (IS_ERR(g)) {
1102 err = PTR_ERR(g);
1103 esw_warn(dev, "Failed to create E-Switch vport[%d] ingress untagged spoofchk flow group, err(%d)\n",
1104 vport->vport, err);
1105 goto out;
1106 }
1107 vport->ingress.allow_untagged_spoofchk_grp = g;
1108
1109 memset(flow_group_in, 0, inlen);
1110 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1111 MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.vlan_tag);
1112 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 1);
1113 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 1);
1114
1115 g = mlx5_create_flow_group(acl, flow_group_in);
1116 if (IS_ERR(g)) {
1117 err = PTR_ERR(g);
1118 esw_warn(dev, "Failed to create E-Switch vport[%d] ingress untagged flow group, err(%d)\n",
1119 vport->vport, err);
1120 goto out;
1121 }
1122 vport->ingress.allow_untagged_only_grp = g;
1123
1124 memset(flow_group_in, 0, inlen);
1125 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1126 MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.smac_47_16);
1127 MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.smac_15_0);
1128 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 2);
1129 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 2);
1130
1131 g = mlx5_create_flow_group(acl, flow_group_in);
1132 if (IS_ERR(g)) {
1133 err = PTR_ERR(g);
1134 esw_warn(dev, "Failed to create E-Switch vport[%d] ingress spoofchk flow group, err(%d)\n",
1135 vport->vport, err);
1136 goto out;
1137 }
1138 vport->ingress.allow_spoofchk_only_grp = g;
1139
1140 memset(flow_group_in, 0, inlen);
1141 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 3);
1142 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 3);
1143
1144 g = mlx5_create_flow_group(acl, flow_group_in);
1145 if (IS_ERR(g)) {
1146 err = PTR_ERR(g);
1147 esw_warn(dev, "Failed to create E-Switch vport[%d] ingress drop flow group, err(%d)\n",
1148 vport->vport, err);
1149 goto out;
1150 }
1151 vport->ingress.drop_grp = g;
1152
1153 out:
1154 if (err) {
1155 if (!IS_ERR_OR_NULL(vport->ingress.allow_spoofchk_only_grp))
1156 mlx5_destroy_flow_group(
1157 vport->ingress.allow_spoofchk_only_grp);
1158 if (!IS_ERR_OR_NULL(vport->ingress.allow_untagged_only_grp))
1159 mlx5_destroy_flow_group(
1160 vport->ingress.allow_untagged_only_grp);
1161 if (!IS_ERR_OR_NULL(vport->ingress.allow_untagged_spoofchk_grp))
1162 mlx5_destroy_flow_group(
1163 vport->ingress.allow_untagged_spoofchk_grp);
1164 if (!IS_ERR_OR_NULL(vport->ingress.acl))
1165 mlx5_destroy_flow_table(vport->ingress.acl);
1166 }
1167
1168 kvfree(flow_group_in);
1169 }
1170
1171 static void esw_vport_cleanup_ingress_rules(struct mlx5_eswitch *esw,
1172 struct mlx5_vport *vport)
1173 {
1174 if (!IS_ERR_OR_NULL(vport->ingress.drop_rule))
1175 mlx5_del_flow_rule(vport->ingress.drop_rule);
1176
1177 if (!IS_ERR_OR_NULL(vport->ingress.allow_rule))
1178 mlx5_del_flow_rule(vport->ingress.allow_rule);
1179
1180 vport->ingress.drop_rule = NULL;
1181 vport->ingress.allow_rule = NULL;
1182 }
1183
1184 static void esw_vport_disable_ingress_acl(struct mlx5_eswitch *esw,
1185 struct mlx5_vport *vport)
1186 {
1187 if (IS_ERR_OR_NULL(vport->ingress.acl))
1188 return;
1189
1190 esw_debug(esw->dev, "Destroy vport[%d] E-Switch ingress ACL\n", vport->vport);
1191
1192 esw_vport_cleanup_ingress_rules(esw, vport);
1193 mlx5_destroy_flow_group(vport->ingress.allow_spoofchk_only_grp);
1194 mlx5_destroy_flow_group(vport->ingress.allow_untagged_only_grp);
1195 mlx5_destroy_flow_group(vport->ingress.allow_untagged_spoofchk_grp);
1196 mlx5_destroy_flow_group(vport->ingress.drop_grp);
1197 mlx5_destroy_flow_table(vport->ingress.acl);
1198 vport->ingress.acl = NULL;
1199 vport->ingress.drop_grp = NULL;
1200 vport->ingress.allow_spoofchk_only_grp = NULL;
1201 vport->ingress.allow_untagged_only_grp = NULL;
1202 vport->ingress.allow_untagged_spoofchk_grp = NULL;
1203 }
1204
1205 static int esw_vport_ingress_config(struct mlx5_eswitch *esw,
1206 struct mlx5_vport *vport)
1207 {
1208 struct mlx5_flow_spec *spec;
1209 int err = 0;
1210 u8 *smac_v;
1211
1212 if (vport->info.spoofchk && !is_valid_ether_addr(vport->info.mac)) {
1213 mlx5_core_warn(esw->dev,
1214 "vport[%d] configure ingress rules failed, illegal mac with spoofchk\n",
1215 vport->vport);
1216 return -EPERM;
1217
1218 }
1219
1220 esw_vport_cleanup_ingress_rules(esw, vport);
1221
1222 if (!vport->info.vlan && !vport->info.qos && !vport->info.spoofchk) {
1223 esw_vport_disable_ingress_acl(esw, vport);
1224 return 0;
1225 }
1226
1227 esw_vport_enable_ingress_acl(esw, vport);
1228
1229 esw_debug(esw->dev,
1230 "vport[%d] configure ingress rules, vlan(%d) qos(%d)\n",
1231 vport->vport, vport->info.vlan, vport->info.qos);
1232
1233 spec = mlx5_vzalloc(sizeof(*spec));
1234 if (!spec) {
1235 err = -ENOMEM;
1236 esw_warn(esw->dev, "vport[%d] configure ingress rules failed, err(%d)\n",
1237 vport->vport, err);
1238 goto out;
1239 }
1240
1241 if (vport->info.vlan || vport->info.qos)
1242 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.vlan_tag);
1243
1244 if (vport->info.spoofchk) {
1245 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.smac_47_16);
1246 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.smac_15_0);
1247 smac_v = MLX5_ADDR_OF(fte_match_param,
1248 spec->match_value,
1249 outer_headers.smac_47_16);
1250 ether_addr_copy(smac_v, vport->info.mac);
1251 }
1252
1253 spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
1254 vport->ingress.allow_rule =
1255 mlx5_add_flow_rule(vport->ingress.acl, spec,
1256 MLX5_FLOW_CONTEXT_ACTION_ALLOW,
1257 0, NULL);
1258 if (IS_ERR(vport->ingress.allow_rule)) {
1259 err = PTR_ERR(vport->ingress.allow_rule);
1260 esw_warn(esw->dev,
1261 "vport[%d] configure ingress allow rule, err(%d)\n",
1262 vport->vport, err);
1263 vport->ingress.allow_rule = NULL;
1264 goto out;
1265 }
1266
1267 memset(spec, 0, sizeof(*spec));
1268 vport->ingress.drop_rule =
1269 mlx5_add_flow_rule(vport->ingress.acl, spec,
1270 MLX5_FLOW_CONTEXT_ACTION_DROP,
1271 0, NULL);
1272 if (IS_ERR(vport->ingress.drop_rule)) {
1273 err = PTR_ERR(vport->ingress.drop_rule);
1274 esw_warn(esw->dev,
1275 "vport[%d] configure ingress drop rule, err(%d)\n",
1276 vport->vport, err);
1277 vport->ingress.drop_rule = NULL;
1278 goto out;
1279 }
1280
1281 out:
1282 if (err)
1283 esw_vport_cleanup_ingress_rules(esw, vport);
1284 kvfree(spec);
1285 return err;
1286 }
1287
1288 static int esw_vport_egress_config(struct mlx5_eswitch *esw,
1289 struct mlx5_vport *vport)
1290 {
1291 struct mlx5_flow_spec *spec;
1292 int err = 0;
1293
1294 esw_vport_cleanup_egress_rules(esw, vport);
1295
1296 if (!vport->info.vlan && !vport->info.qos) {
1297 esw_vport_disable_egress_acl(esw, vport);
1298 return 0;
1299 }
1300
1301 esw_vport_enable_egress_acl(esw, vport);
1302
1303 esw_debug(esw->dev,
1304 "vport[%d] configure egress rules, vlan(%d) qos(%d)\n",
1305 vport->vport, vport->info.vlan, vport->info.qos);
1306
1307 spec = mlx5_vzalloc(sizeof(*spec));
1308 if (!spec) {
1309 err = -ENOMEM;
1310 esw_warn(esw->dev, "vport[%d] configure egress rules failed, err(%d)\n",
1311 vport->vport, err);
1312 goto out;
1313 }
1314
1315 /* Allowed vlan rule */
1316 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.vlan_tag);
1317 MLX5_SET_TO_ONES(fte_match_param, spec->match_value, outer_headers.vlan_tag);
1318 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.first_vid);
1319 MLX5_SET(fte_match_param, spec->match_value, outer_headers.first_vid, vport->info.vlan);
1320
1321 spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
1322 vport->egress.allowed_vlan =
1323 mlx5_add_flow_rule(vport->egress.acl, spec,
1324 MLX5_FLOW_CONTEXT_ACTION_ALLOW,
1325 0, NULL);
1326 if (IS_ERR(vport->egress.allowed_vlan)) {
1327 err = PTR_ERR(vport->egress.allowed_vlan);
1328 esw_warn(esw->dev,
1329 "vport[%d] configure egress allowed vlan rule failed, err(%d)\n",
1330 vport->vport, err);
1331 vport->egress.allowed_vlan = NULL;
1332 goto out;
1333 }
1334
1335 /* Drop others rule (star rule) */
1336 memset(spec, 0, sizeof(*spec));
1337 vport->egress.drop_rule =
1338 mlx5_add_flow_rule(vport->egress.acl, spec,
1339 MLX5_FLOW_CONTEXT_ACTION_DROP,
1340 0, NULL);
1341 if (IS_ERR(vport->egress.drop_rule)) {
1342 err = PTR_ERR(vport->egress.drop_rule);
1343 esw_warn(esw->dev,
1344 "vport[%d] configure egress drop rule failed, err(%d)\n",
1345 vport->vport, err);
1346 vport->egress.drop_rule = NULL;
1347 }
1348 out:
1349 kvfree(spec);
1350 return err;
1351 }
1352
1353 static void node_guid_gen_from_mac(u64 *node_guid, u8 mac[ETH_ALEN])
1354 {
1355 ((u8 *)node_guid)[7] = mac[0];
1356 ((u8 *)node_guid)[6] = mac[1];
1357 ((u8 *)node_guid)[5] = mac[2];
1358 ((u8 *)node_guid)[4] = 0xff;
1359 ((u8 *)node_guid)[3] = 0xfe;
1360 ((u8 *)node_guid)[2] = mac[3];
1361 ((u8 *)node_guid)[1] = mac[4];
1362 ((u8 *)node_guid)[0] = mac[5];
1363 }
1364
1365 static void esw_apply_vport_conf(struct mlx5_eswitch *esw,
1366 struct mlx5_vport *vport)
1367 {
1368 int vport_num = vport->vport;
1369
1370 if (!vport_num)
1371 return;
1372
1373 mlx5_modify_vport_admin_state(esw->dev,
1374 MLX5_QUERY_VPORT_STATE_IN_OP_MOD_ESW_VPORT,
1375 vport_num,
1376 vport->info.link_state);
1377 mlx5_modify_nic_vport_mac_address(esw->dev, vport_num, vport->info.mac);
1378 mlx5_modify_nic_vport_node_guid(esw->dev, vport_num, vport->info.node_guid);
1379 modify_esw_vport_cvlan(esw->dev, vport_num, vport->info.vlan, vport->info.qos,
1380 (vport->info.vlan || vport->info.qos));
1381
1382 /* Only legacy mode needs ACLs */
1383 if (esw->mode == SRIOV_LEGACY) {
1384 esw_vport_ingress_config(esw, vport);
1385 esw_vport_egress_config(esw, vport);
1386 }
1387 }
1388 static void esw_enable_vport(struct mlx5_eswitch *esw, int vport_num,
1389 int enable_events)
1390 {
1391 struct mlx5_vport *vport = &esw->vports[vport_num];
1392
1393 mutex_lock(&esw->state_lock);
1394 WARN_ON(vport->enabled);
1395
1396 esw_debug(esw->dev, "Enabling VPORT(%d)\n", vport_num);
1397
1398 /* Restore old vport configuration */
1399 esw_apply_vport_conf(esw, vport);
1400
1401 /* Sync with current vport context */
1402 vport->enabled_events = enable_events;
1403 vport->enabled = true;
1404
1405 /* only PF is trusted by default */
1406 if (!vport_num)
1407 vport->info.trusted = true;
1408
1409 esw_vport_change_handle_locked(vport);
1410
1411 esw->enabled_vports++;
1412 esw_debug(esw->dev, "Enabled VPORT(%d)\n", vport_num);
1413 mutex_unlock(&esw->state_lock);
1414 }
1415
1416 static void esw_disable_vport(struct mlx5_eswitch *esw, int vport_num)
1417 {
1418 struct mlx5_vport *vport = &esw->vports[vport_num];
1419
1420 if (!vport->enabled)
1421 return;
1422
1423 esw_debug(esw->dev, "Disabling vport(%d)\n", vport_num);
1424 /* Mark this vport as disabled to discard new events */
1425 vport->enabled = false;
1426
1427 synchronize_irq(mlx5_get_msix_vec(esw->dev, MLX5_EQ_VEC_ASYNC));
1428 /* Wait for current already scheduled events to complete */
1429 flush_workqueue(esw->work_queue);
1430 /* Disable events from this vport */
1431 arm_vport_context_events_cmd(esw->dev, vport->vport, 0);
1432 mutex_lock(&esw->state_lock);
1433 /* We don't assume VFs will cleanup after themselves.
1434 * Calling vport change handler while vport is disabled will cleanup
1435 * the vport resources.
1436 */
1437 esw_vport_change_handle_locked(vport);
1438 vport->enabled_events = 0;
1439
1440 if (vport_num && esw->mode == SRIOV_LEGACY) {
1441 mlx5_modify_vport_admin_state(esw->dev,
1442 MLX5_QUERY_VPORT_STATE_IN_OP_MOD_ESW_VPORT,
1443 vport_num,
1444 MLX5_ESW_VPORT_ADMIN_STATE_DOWN);
1445 esw_vport_disable_egress_acl(esw, vport);
1446 esw_vport_disable_ingress_acl(esw, vport);
1447 }
1448 esw->enabled_vports--;
1449 mutex_unlock(&esw->state_lock);
1450 }
1451
1452 /* Public E-Switch API */
1453 int mlx5_eswitch_enable_sriov(struct mlx5_eswitch *esw, int nvfs, int mode)
1454 {
1455 int err;
1456 int i, enabled_events;
1457
1458 if (!esw || !MLX5_CAP_GEN(esw->dev, vport_group_manager) ||
1459 MLX5_CAP_GEN(esw->dev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
1460 return 0;
1461
1462 if (!MLX5_CAP_GEN(esw->dev, eswitch_flow_table) ||
1463 !MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, ft_support)) {
1464 esw_warn(esw->dev, "E-Switch FDB is not supported, aborting ...\n");
1465 return -ENOTSUPP;
1466 }
1467
1468 if (!MLX5_CAP_ESW_INGRESS_ACL(esw->dev, ft_support))
1469 esw_warn(esw->dev, "E-Switch ingress ACL is not supported by FW\n");
1470
1471 if (!MLX5_CAP_ESW_EGRESS_ACL(esw->dev, ft_support))
1472 esw_warn(esw->dev, "E-Switch engress ACL is not supported by FW\n");
1473
1474 esw_info(esw->dev, "E-Switch enable SRIOV: nvfs(%d) mode (%d)\n", nvfs, mode);
1475 esw->mode = mode;
1476 esw_disable_vport(esw, 0);
1477
1478 if (mode == SRIOV_LEGACY)
1479 err = esw_create_legacy_fdb_table(esw, nvfs + 1);
1480 else
1481 err = esw_offloads_init(esw, nvfs + 1);
1482 if (err)
1483 goto abort;
1484
1485 enabled_events = (mode == SRIOV_LEGACY) ? SRIOV_VPORT_EVENTS : UC_ADDR_CHANGE;
1486 for (i = 0; i <= nvfs; i++)
1487 esw_enable_vport(esw, i, enabled_events);
1488
1489 esw_info(esw->dev, "SRIOV enabled: active vports(%d)\n",
1490 esw->enabled_vports);
1491 return 0;
1492
1493 abort:
1494 esw_enable_vport(esw, 0, UC_ADDR_CHANGE);
1495 return err;
1496 }
1497
1498 void mlx5_eswitch_disable_sriov(struct mlx5_eswitch *esw)
1499 {
1500 struct esw_mc_addr *mc_promisc;
1501 int nvports;
1502 int i;
1503
1504 if (!esw || !MLX5_CAP_GEN(esw->dev, vport_group_manager) ||
1505 MLX5_CAP_GEN(esw->dev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
1506 return;
1507
1508 esw_info(esw->dev, "disable SRIOV: active vports(%d) mode(%d)\n",
1509 esw->enabled_vports, esw->mode);
1510
1511 mc_promisc = esw->mc_promisc;
1512 nvports = esw->enabled_vports;
1513
1514 for (i = 0; i < esw->total_vports; i++)
1515 esw_disable_vport(esw, i);
1516
1517 if (mc_promisc && mc_promisc->uplink_rule)
1518 mlx5_del_flow_rule(mc_promisc->uplink_rule);
1519
1520 if (esw->mode == SRIOV_LEGACY)
1521 esw_destroy_legacy_fdb_table(esw);
1522 else if (esw->mode == SRIOV_OFFLOADS)
1523 esw_offloads_cleanup(esw, nvports);
1524
1525 esw->mode = SRIOV_NONE;
1526 /* VPORT 0 (PF) must be enabled back with non-sriov configuration */
1527 esw_enable_vport(esw, 0, UC_ADDR_CHANGE);
1528 }
1529
1530 void mlx5_eswitch_attach(struct mlx5_eswitch *esw)
1531 {
1532 if (!esw || !MLX5_CAP_GEN(esw->dev, vport_group_manager) ||
1533 MLX5_CAP_GEN(esw->dev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
1534 return;
1535
1536 esw_enable_vport(esw, 0, UC_ADDR_CHANGE);
1537 /* VF Vports will be enabled when SRIOV is enabled */
1538 }
1539
1540 void mlx5_eswitch_detach(struct mlx5_eswitch *esw)
1541 {
1542 if (!esw || !MLX5_CAP_GEN(esw->dev, vport_group_manager) ||
1543 MLX5_CAP_GEN(esw->dev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
1544 return;
1545
1546 esw_disable_vport(esw, 0);
1547 }
1548
1549 int mlx5_eswitch_init(struct mlx5_core_dev *dev)
1550 {
1551 int l2_table_size = 1 << MLX5_CAP_GEN(dev, log_max_l2_table);
1552 int total_vports = MLX5_TOTAL_VPORTS(dev);
1553 struct esw_mc_addr *mc_promisc;
1554 struct mlx5_eswitch *esw;
1555 int vport_num;
1556 int err;
1557
1558 if (!MLX5_CAP_GEN(dev, vport_group_manager) ||
1559 MLX5_CAP_GEN(dev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
1560 return 0;
1561
1562 esw_info(dev,
1563 "Total vports %d, l2 table size(%d), per vport: max uc(%d) max mc(%d)\n",
1564 total_vports, l2_table_size,
1565 MLX5_MAX_UC_PER_VPORT(dev),
1566 MLX5_MAX_MC_PER_VPORT(dev));
1567
1568 esw = kzalloc(sizeof(*esw), GFP_KERNEL);
1569 if (!esw)
1570 return -ENOMEM;
1571
1572 esw->dev = dev;
1573
1574 esw->l2_table.bitmap = kcalloc(BITS_TO_LONGS(l2_table_size),
1575 sizeof(uintptr_t), GFP_KERNEL);
1576 if (!esw->l2_table.bitmap) {
1577 err = -ENOMEM;
1578 goto abort;
1579 }
1580 esw->l2_table.size = l2_table_size;
1581
1582 mc_promisc = kzalloc(sizeof(*mc_promisc), GFP_KERNEL);
1583 if (!mc_promisc) {
1584 err = -ENOMEM;
1585 goto abort;
1586 }
1587 esw->mc_promisc = mc_promisc;
1588
1589 esw->work_queue = create_singlethread_workqueue("mlx5_esw_wq");
1590 if (!esw->work_queue) {
1591 err = -ENOMEM;
1592 goto abort;
1593 }
1594
1595 esw->vports = kcalloc(total_vports, sizeof(struct mlx5_vport),
1596 GFP_KERNEL);
1597 if (!esw->vports) {
1598 err = -ENOMEM;
1599 goto abort;
1600 }
1601
1602 esw->offloads.vport_reps =
1603 kzalloc(total_vports * sizeof(struct mlx5_eswitch_rep),
1604 GFP_KERNEL);
1605 if (!esw->offloads.vport_reps) {
1606 err = -ENOMEM;
1607 goto abort;
1608 }
1609
1610 mutex_init(&esw->state_lock);
1611
1612 for (vport_num = 0; vport_num < total_vports; vport_num++) {
1613 struct mlx5_vport *vport = &esw->vports[vport_num];
1614
1615 vport->vport = vport_num;
1616 vport->info.link_state = MLX5_ESW_VPORT_ADMIN_STATE_AUTO;
1617 vport->dev = dev;
1618 INIT_WORK(&vport->vport_change_handler,
1619 esw_vport_change_handler);
1620 }
1621
1622 esw->total_vports = total_vports;
1623 esw->enabled_vports = 0;
1624 esw->mode = SRIOV_NONE;
1625
1626 dev->priv.eswitch = esw;
1627 return 0;
1628 abort:
1629 if (esw->work_queue)
1630 destroy_workqueue(esw->work_queue);
1631 kfree(esw->l2_table.bitmap);
1632 kfree(esw->vports);
1633 kfree(esw->offloads.vport_reps);
1634 kfree(esw);
1635 return err;
1636 }
1637
1638 void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw)
1639 {
1640 if (!esw || !MLX5_CAP_GEN(esw->dev, vport_group_manager) ||
1641 MLX5_CAP_GEN(esw->dev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
1642 return;
1643
1644 esw_info(esw->dev, "cleanup\n");
1645
1646 esw->dev->priv.eswitch = NULL;
1647 destroy_workqueue(esw->work_queue);
1648 kfree(esw->l2_table.bitmap);
1649 kfree(esw->mc_promisc);
1650 kfree(esw->offloads.vport_reps);
1651 kfree(esw->vports);
1652 kfree(esw);
1653 }
1654
1655 void mlx5_eswitch_vport_event(struct mlx5_eswitch *esw, struct mlx5_eqe *eqe)
1656 {
1657 struct mlx5_eqe_vport_change *vc_eqe = &eqe->data.vport_change;
1658 u16 vport_num = be16_to_cpu(vc_eqe->vport_num);
1659 struct mlx5_vport *vport;
1660
1661 if (!esw) {
1662 pr_warn("MLX5 E-Switch: vport %d got an event while eswitch is not initialized\n",
1663 vport_num);
1664 return;
1665 }
1666
1667 vport = &esw->vports[vport_num];
1668 if (vport->enabled)
1669 queue_work(esw->work_queue, &vport->vport_change_handler);
1670 }
1671
1672 /* Vport Administration */
1673 #define ESW_ALLOWED(esw) \
1674 (esw && MLX5_CAP_GEN(esw->dev, vport_group_manager) && mlx5_core_is_pf(esw->dev))
1675 #define LEGAL_VPORT(esw, vport) (vport >= 0 && vport < esw->total_vports)
1676
1677 int mlx5_eswitch_set_vport_mac(struct mlx5_eswitch *esw,
1678 int vport, u8 mac[ETH_ALEN])
1679 {
1680 struct mlx5_vport *evport;
1681 u64 node_guid;
1682 int err = 0;
1683
1684 if (!ESW_ALLOWED(esw))
1685 return -EPERM;
1686 if (!LEGAL_VPORT(esw, vport))
1687 return -EINVAL;
1688
1689 mutex_lock(&esw->state_lock);
1690 evport = &esw->vports[vport];
1691
1692 if (evport->info.spoofchk && !is_valid_ether_addr(mac)) {
1693 mlx5_core_warn(esw->dev,
1694 "MAC invalidation is not allowed when spoofchk is on, vport(%d)\n",
1695 vport);
1696 err = -EPERM;
1697 goto unlock;
1698 }
1699
1700 err = mlx5_modify_nic_vport_mac_address(esw->dev, vport, mac);
1701 if (err) {
1702 mlx5_core_warn(esw->dev,
1703 "Failed to mlx5_modify_nic_vport_mac vport(%d) err=(%d)\n",
1704 vport, err);
1705 goto unlock;
1706 }
1707
1708 node_guid_gen_from_mac(&node_guid, mac);
1709 err = mlx5_modify_nic_vport_node_guid(esw->dev, vport, node_guid);
1710 if (err)
1711 mlx5_core_warn(esw->dev,
1712 "Failed to set vport %d node guid, err = %d. RDMA_CM will not function properly for this VF.\n",
1713 vport, err);
1714
1715 ether_addr_copy(evport->info.mac, mac);
1716 evport->info.node_guid = node_guid;
1717 if (evport->enabled && esw->mode == SRIOV_LEGACY)
1718 err = esw_vport_ingress_config(esw, evport);
1719
1720 unlock:
1721 mutex_unlock(&esw->state_lock);
1722 return err;
1723 }
1724
1725 int mlx5_eswitch_set_vport_state(struct mlx5_eswitch *esw,
1726 int vport, int link_state)
1727 {
1728 struct mlx5_vport *evport;
1729 int err = 0;
1730
1731 if (!ESW_ALLOWED(esw))
1732 return -EPERM;
1733 if (!LEGAL_VPORT(esw, vport))
1734 return -EINVAL;
1735
1736 mutex_lock(&esw->state_lock);
1737 evport = &esw->vports[vport];
1738
1739 err = mlx5_modify_vport_admin_state(esw->dev,
1740 MLX5_QUERY_VPORT_STATE_IN_OP_MOD_ESW_VPORT,
1741 vport, link_state);
1742 if (err) {
1743 mlx5_core_warn(esw->dev,
1744 "Failed to set vport %d link state, err = %d",
1745 vport, err);
1746 goto unlock;
1747 }
1748
1749 evport->info.link_state = link_state;
1750
1751 unlock:
1752 mutex_unlock(&esw->state_lock);
1753 return 0;
1754 }
1755
1756 int mlx5_eswitch_get_vport_config(struct mlx5_eswitch *esw,
1757 int vport, struct ifla_vf_info *ivi)
1758 {
1759 struct mlx5_vport *evport;
1760
1761 if (!ESW_ALLOWED(esw))
1762 return -EPERM;
1763 if (!LEGAL_VPORT(esw, vport))
1764 return -EINVAL;
1765
1766 evport = &esw->vports[vport];
1767
1768 memset(ivi, 0, sizeof(*ivi));
1769 ivi->vf = vport - 1;
1770
1771 mutex_lock(&esw->state_lock);
1772 ether_addr_copy(ivi->mac, evport->info.mac);
1773 ivi->linkstate = evport->info.link_state;
1774 ivi->vlan = evport->info.vlan;
1775 ivi->qos = evport->info.qos;
1776 ivi->spoofchk = evport->info.spoofchk;
1777 ivi->trusted = evport->info.trusted;
1778 mutex_unlock(&esw->state_lock);
1779
1780 return 0;
1781 }
1782
1783 int mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw,
1784 int vport, u16 vlan, u8 qos)
1785 {
1786 struct mlx5_vport *evport;
1787 int err = 0;
1788 int set = 0;
1789
1790 if (!ESW_ALLOWED(esw))
1791 return -EPERM;
1792 if (!LEGAL_VPORT(esw, vport) || (vlan > 4095) || (qos > 7))
1793 return -EINVAL;
1794
1795 if (vlan || qos)
1796 set = 1;
1797
1798 mutex_lock(&esw->state_lock);
1799 evport = &esw->vports[vport];
1800
1801 err = modify_esw_vport_cvlan(esw->dev, vport, vlan, qos, set);
1802 if (err)
1803 goto unlock;
1804
1805 evport->info.vlan = vlan;
1806 evport->info.qos = qos;
1807 if (evport->enabled && esw->mode == SRIOV_LEGACY) {
1808 err = esw_vport_ingress_config(esw, evport);
1809 if (err)
1810 goto unlock;
1811 err = esw_vport_egress_config(esw, evport);
1812 }
1813
1814 unlock:
1815 mutex_unlock(&esw->state_lock);
1816 return err;
1817 }
1818
1819 int mlx5_eswitch_set_vport_spoofchk(struct mlx5_eswitch *esw,
1820 int vport, bool spoofchk)
1821 {
1822 struct mlx5_vport *evport;
1823 bool pschk;
1824 int err = 0;
1825
1826 if (!ESW_ALLOWED(esw))
1827 return -EPERM;
1828 if (!LEGAL_VPORT(esw, vport))
1829 return -EINVAL;
1830
1831 mutex_lock(&esw->state_lock);
1832 evport = &esw->vports[vport];
1833 pschk = evport->info.spoofchk;
1834 evport->info.spoofchk = spoofchk;
1835 if (evport->enabled && esw->mode == SRIOV_LEGACY)
1836 err = esw_vport_ingress_config(esw, evport);
1837 if (err)
1838 evport->info.spoofchk = pschk;
1839 mutex_unlock(&esw->state_lock);
1840
1841 return err;
1842 }
1843
1844 int mlx5_eswitch_set_vport_trust(struct mlx5_eswitch *esw,
1845 int vport, bool setting)
1846 {
1847 struct mlx5_vport *evport;
1848
1849 if (!ESW_ALLOWED(esw))
1850 return -EPERM;
1851 if (!LEGAL_VPORT(esw, vport))
1852 return -EINVAL;
1853
1854 mutex_lock(&esw->state_lock);
1855 evport = &esw->vports[vport];
1856 evport->info.trusted = setting;
1857 if (evport->enabled)
1858 esw_vport_change_handle_locked(evport);
1859 mutex_unlock(&esw->state_lock);
1860
1861 return 0;
1862 }
1863
1864 int mlx5_eswitch_get_vport_stats(struct mlx5_eswitch *esw,
1865 int vport,
1866 struct ifla_vf_stats *vf_stats)
1867 {
1868 int outlen = MLX5_ST_SZ_BYTES(query_vport_counter_out);
1869 u32 in[MLX5_ST_SZ_DW(query_vport_counter_in)] = {0};
1870 int err = 0;
1871 u32 *out;
1872
1873 if (!ESW_ALLOWED(esw))
1874 return -EPERM;
1875 if (!LEGAL_VPORT(esw, vport))
1876 return -EINVAL;
1877
1878 out = mlx5_vzalloc(outlen);
1879 if (!out)
1880 return -ENOMEM;
1881
1882 MLX5_SET(query_vport_counter_in, in, opcode,
1883 MLX5_CMD_OP_QUERY_VPORT_COUNTER);
1884 MLX5_SET(query_vport_counter_in, in, op_mod, 0);
1885 MLX5_SET(query_vport_counter_in, in, vport_number, vport);
1886 if (vport)
1887 MLX5_SET(query_vport_counter_in, in, other_vport, 1);
1888
1889 memset(out, 0, outlen);
1890 err = mlx5_cmd_exec(esw->dev, in, sizeof(in), out, outlen);
1891 if (err)
1892 goto free_out;
1893
1894 #define MLX5_GET_CTR(p, x) \
1895 MLX5_GET64(query_vport_counter_out, p, x)
1896
1897 memset(vf_stats, 0, sizeof(*vf_stats));
1898 vf_stats->rx_packets =
1899 MLX5_GET_CTR(out, received_eth_unicast.packets) +
1900 MLX5_GET_CTR(out, received_eth_multicast.packets) +
1901 MLX5_GET_CTR(out, received_eth_broadcast.packets);
1902
1903 vf_stats->rx_bytes =
1904 MLX5_GET_CTR(out, received_eth_unicast.octets) +
1905 MLX5_GET_CTR(out, received_eth_multicast.octets) +
1906 MLX5_GET_CTR(out, received_eth_broadcast.octets);
1907
1908 vf_stats->tx_packets =
1909 MLX5_GET_CTR(out, transmitted_eth_unicast.packets) +
1910 MLX5_GET_CTR(out, transmitted_eth_multicast.packets) +
1911 MLX5_GET_CTR(out, transmitted_eth_broadcast.packets);
1912
1913 vf_stats->tx_bytes =
1914 MLX5_GET_CTR(out, transmitted_eth_unicast.octets) +
1915 MLX5_GET_CTR(out, transmitted_eth_multicast.octets) +
1916 MLX5_GET_CTR(out, transmitted_eth_broadcast.octets);
1917
1918 vf_stats->multicast =
1919 MLX5_GET_CTR(out, received_eth_multicast.packets);
1920
1921 vf_stats->broadcast =
1922 MLX5_GET_CTR(out, received_eth_broadcast.packets);
1923
1924 free_out:
1925 kvfree(out);
1926 return err;
1927 }
This page took 0.07559 seconds and 5 git commands to generate.