2 * drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
3 * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
4 * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com>
5 * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com>
6 * Copyright (c) 2015 Elad Raz <eladr@mellanox.com>
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
20 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
37 #include <linux/kernel.h>
38 #include <linux/types.h>
39 #include <linux/netdevice.h>
40 #include <linux/etherdevice.h>
41 #include <linux/slab.h>
42 #include <linux/device.h>
43 #include <linux/skbuff.h>
44 #include <linux/if_vlan.h>
45 #include <linux/if_bridge.h>
46 #include <linux/workqueue.h>
47 #include <linux/jiffies.h>
48 #include <linux/rtnetlink.h>
49 #include <net/switchdev.h>
55 static u16
mlxsw_sp_port_vid_to_fid_get(struct mlxsw_sp_port
*mlxsw_sp_port
,
60 if (mlxsw_sp_port_is_vport(mlxsw_sp_port
)) {
61 u16 vfid
= mlxsw_sp_vport_vfid_get(mlxsw_sp_port
);
63 fid
= mlxsw_sp_vfid_to_fid(vfid
);
67 fid
= mlxsw_sp_port
->pvid
;
72 static struct mlxsw_sp_port
*
73 mlxsw_sp_port_orig_get(struct net_device
*dev
,
74 struct mlxsw_sp_port
*mlxsw_sp_port
)
76 struct mlxsw_sp_port
*mlxsw_sp_vport
;
79 if (!is_vlan_dev(dev
))
82 vid
= vlan_dev_vlan_id(dev
);
83 mlxsw_sp_vport
= mlxsw_sp_port_vport_find(mlxsw_sp_port
, vid
);
84 WARN_ON(!mlxsw_sp_vport
);
86 return mlxsw_sp_vport
;
89 static int mlxsw_sp_port_attr_get(struct net_device
*dev
,
90 struct switchdev_attr
*attr
)
92 struct mlxsw_sp_port
*mlxsw_sp_port
= netdev_priv(dev
);
93 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
95 mlxsw_sp_port
= mlxsw_sp_port_orig_get(attr
->orig_dev
, mlxsw_sp_port
);
100 case SWITCHDEV_ATTR_ID_PORT_PARENT_ID
:
101 attr
->u
.ppid
.id_len
= sizeof(mlxsw_sp
->base_mac
);
102 memcpy(&attr
->u
.ppid
.id
, &mlxsw_sp
->base_mac
,
103 attr
->u
.ppid
.id_len
);
105 case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS
:
106 attr
->u
.brport_flags
=
107 (mlxsw_sp_port
->learning
? BR_LEARNING
: 0) |
108 (mlxsw_sp_port
->learning_sync
? BR_LEARNING_SYNC
: 0) |
109 (mlxsw_sp_port
->uc_flood
? BR_FLOOD
: 0);
118 static int mlxsw_sp_port_stp_state_set(struct mlxsw_sp_port
*mlxsw_sp_port
,
121 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
122 enum mlxsw_reg_spms_state spms_state
;
128 case BR_STATE_FORWARDING
:
129 spms_state
= MLXSW_REG_SPMS_STATE_FORWARDING
;
131 case BR_STATE_LEARNING
:
132 spms_state
= MLXSW_REG_SPMS_STATE_LEARNING
;
134 case BR_STATE_LISTENING
: /* fall-through */
135 case BR_STATE_DISABLED
: /* fall-through */
136 case BR_STATE_BLOCKING
:
137 spms_state
= MLXSW_REG_SPMS_STATE_DISCARDING
;
143 spms_pl
= kmalloc(MLXSW_REG_SPMS_LEN
, GFP_KERNEL
);
146 mlxsw_reg_spms_pack(spms_pl
, mlxsw_sp_port
->local_port
);
148 if (mlxsw_sp_port_is_vport(mlxsw_sp_port
)) {
149 vid
= mlxsw_sp_vport_vid_get(mlxsw_sp_port
);
150 mlxsw_reg_spms_vid_pack(spms_pl
, vid
, spms_state
);
152 for_each_set_bit(vid
, mlxsw_sp_port
->active_vlans
, VLAN_N_VID
)
153 mlxsw_reg_spms_vid_pack(spms_pl
, vid
, spms_state
);
156 err
= mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(spms
), spms_pl
);
161 static int mlxsw_sp_port_attr_stp_state_set(struct mlxsw_sp_port
*mlxsw_sp_port
,
162 struct switchdev_trans
*trans
,
165 if (switchdev_trans_ph_prepare(trans
))
168 mlxsw_sp_port
->stp_state
= state
;
169 return mlxsw_sp_port_stp_state_set(mlxsw_sp_port
, state
);
172 static bool mlxsw_sp_vfid_is_vport_br(u16 vfid
)
174 return vfid
>= MLXSW_SP_VFID_PORT_MAX
;
177 static int __mlxsw_sp_port_flood_set(struct mlxsw_sp_port
*mlxsw_sp_port
,
178 u16 idx_begin
, u16 idx_end
, bool set
,
181 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
182 u16 local_port
= mlxsw_sp_port
->local_port
;
183 enum mlxsw_flood_table_type table_type
;
184 u16 range
= idx_end
- idx_begin
+ 1;
188 if (mlxsw_sp_port_is_vport(mlxsw_sp_port
)) {
189 table_type
= MLXSW_REG_SFGC_TABLE_TYPE_FID
;
190 if (mlxsw_sp_vfid_is_vport_br(idx_begin
))
191 local_port
= mlxsw_sp_port
->local_port
;
193 local_port
= MLXSW_PORT_CPU_PORT
;
195 table_type
= MLXSW_REG_SFGC_TABLE_TYPE_FID_OFFEST
;
198 sftr_pl
= kmalloc(MLXSW_REG_SFTR_LEN
, GFP_KERNEL
);
202 mlxsw_reg_sftr_pack(sftr_pl
, MLXSW_SP_FLOOD_TABLE_UC
, idx_begin
,
203 table_type
, range
, local_port
, set
);
204 err
= mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(sftr
), sftr_pl
);
208 /* Flooding control allows one to decide whether a given port will
209 * flood unicast traffic for which there is no FDB entry.
214 mlxsw_reg_sftr_pack(sftr_pl
, MLXSW_SP_FLOOD_TABLE_BM
, idx_begin
,
215 table_type
, range
, local_port
, set
);
216 err
= mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(sftr
), sftr_pl
);
218 goto err_flood_bm_set
;
223 mlxsw_reg_sftr_pack(sftr_pl
, MLXSW_SP_FLOOD_TABLE_UC
, idx_begin
,
224 table_type
, range
, local_port
, !set
);
225 mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(sftr
), sftr_pl
);
231 static int mlxsw_sp_port_uc_flood_set(struct mlxsw_sp_port
*mlxsw_sp_port
,
234 struct net_device
*dev
= mlxsw_sp_port
->dev
;
235 u16 vid
, last_visited_vid
;
238 if (mlxsw_sp_port_is_vport(mlxsw_sp_port
)) {
239 u16 vfid
= mlxsw_sp_vport_vfid_get(mlxsw_sp_port
);
241 return __mlxsw_sp_port_flood_set(mlxsw_sp_port
, vfid
, vfid
,
245 for_each_set_bit(vid
, mlxsw_sp_port
->active_vlans
, VLAN_N_VID
) {
246 err
= __mlxsw_sp_port_flood_set(mlxsw_sp_port
, vid
, vid
, set
,
249 last_visited_vid
= vid
;
250 goto err_port_flood_set
;
257 for_each_set_bit(vid
, mlxsw_sp_port
->active_vlans
, last_visited_vid
)
258 __mlxsw_sp_port_flood_set(mlxsw_sp_port
, vid
, vid
, !set
, true);
259 netdev_err(dev
, "Failed to configure unicast flooding\n");
263 int mlxsw_sp_vport_flood_set(struct mlxsw_sp_port
*mlxsw_sp_vport
, u16 vfid
,
264 bool set
, bool only_uc
)
266 /* In case of vFIDs, index into the flooding table is relative to
267 * the start of the vFIDs range.
269 return __mlxsw_sp_port_flood_set(mlxsw_sp_vport
, vfid
, vfid
, set
,
273 static int mlxsw_sp_port_attr_br_flags_set(struct mlxsw_sp_port
*mlxsw_sp_port
,
274 struct switchdev_trans
*trans
,
275 unsigned long brport_flags
)
277 unsigned long uc_flood
= mlxsw_sp_port
->uc_flood
? BR_FLOOD
: 0;
281 if (!mlxsw_sp_port
->bridged
)
284 if (switchdev_trans_ph_prepare(trans
))
287 if ((uc_flood
^ brport_flags
) & BR_FLOOD
) {
288 set
= mlxsw_sp_port
->uc_flood
? false : true;
289 err
= mlxsw_sp_port_uc_flood_set(mlxsw_sp_port
, set
);
294 mlxsw_sp_port
->uc_flood
= brport_flags
& BR_FLOOD
? 1 : 0;
295 mlxsw_sp_port
->learning
= brport_flags
& BR_LEARNING
? 1 : 0;
296 mlxsw_sp_port
->learning_sync
= brport_flags
& BR_LEARNING_SYNC
? 1 : 0;
301 static int mlxsw_sp_ageing_set(struct mlxsw_sp
*mlxsw_sp
, u32 ageing_time
)
303 char sfdat_pl
[MLXSW_REG_SFDAT_LEN
];
306 mlxsw_reg_sfdat_pack(sfdat_pl
, ageing_time
);
307 err
= mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(sfdat
), sfdat_pl
);
310 mlxsw_sp
->ageing_time
= ageing_time
;
314 static int mlxsw_sp_port_attr_br_ageing_set(struct mlxsw_sp_port
*mlxsw_sp_port
,
315 struct switchdev_trans
*trans
,
316 unsigned long ageing_clock_t
)
318 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
319 unsigned long ageing_jiffies
= clock_t_to_jiffies(ageing_clock_t
);
320 u32 ageing_time
= jiffies_to_msecs(ageing_jiffies
) / 1000;
322 if (switchdev_trans_ph_prepare(trans
)) {
323 if (ageing_time
< MLXSW_SP_MIN_AGEING_TIME
||
324 ageing_time
> MLXSW_SP_MAX_AGEING_TIME
)
330 return mlxsw_sp_ageing_set(mlxsw_sp
, ageing_time
);
333 static int mlxsw_sp_port_attr_br_vlan_set(struct mlxsw_sp_port
*mlxsw_sp_port
,
334 struct switchdev_trans
*trans
,
335 struct net_device
*orig_dev
,
338 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
340 /* SWITCHDEV_TRANS_PREPARE phase */
341 if ((!vlan_enabled
) && (mlxsw_sp
->master_bridge
.dev
== orig_dev
)) {
342 netdev_err(mlxsw_sp_port
->dev
, "Bridge must be vlan-aware\n");
349 static int mlxsw_sp_port_attr_set(struct net_device
*dev
,
350 const struct switchdev_attr
*attr
,
351 struct switchdev_trans
*trans
)
353 struct mlxsw_sp_port
*mlxsw_sp_port
= netdev_priv(dev
);
356 mlxsw_sp_port
= mlxsw_sp_port_orig_get(attr
->orig_dev
, mlxsw_sp_port
);
361 case SWITCHDEV_ATTR_ID_PORT_STP_STATE
:
362 err
= mlxsw_sp_port_attr_stp_state_set(mlxsw_sp_port
, trans
,
365 case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS
:
366 err
= mlxsw_sp_port_attr_br_flags_set(mlxsw_sp_port
, trans
,
367 attr
->u
.brport_flags
);
369 case SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME
:
370 err
= mlxsw_sp_port_attr_br_ageing_set(mlxsw_sp_port
, trans
,
371 attr
->u
.ageing_time
);
373 case SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING
:
374 err
= mlxsw_sp_port_attr_br_vlan_set(mlxsw_sp_port
, trans
,
376 attr
->u
.vlan_filtering
);
386 static int __mlxsw_sp_port_pvid_set(struct mlxsw_sp_port
*mlxsw_sp_port
,
389 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
390 char spvid_pl
[MLXSW_REG_SPVID_LEN
];
392 mlxsw_reg_spvid_pack(spvid_pl
, mlxsw_sp_port
->local_port
, vid
);
393 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(spvid
), spvid_pl
);
396 static int mlxsw_sp_port_allow_untagged_set(struct mlxsw_sp_port
*mlxsw_sp_port
,
399 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
400 char spaft_pl
[MLXSW_REG_SPAFT_LEN
];
402 mlxsw_reg_spaft_pack(spaft_pl
, mlxsw_sp_port
->local_port
, allow
);
403 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(spaft
), spaft_pl
);
406 int mlxsw_sp_port_pvid_set(struct mlxsw_sp_port
*mlxsw_sp_port
, u16 vid
)
408 struct net_device
*dev
= mlxsw_sp_port
->dev
;
412 err
= mlxsw_sp_port_allow_untagged_set(mlxsw_sp_port
, false);
414 netdev_err(dev
, "Failed to disallow untagged traffic\n");
418 err
= __mlxsw_sp_port_pvid_set(mlxsw_sp_port
, vid
);
420 netdev_err(dev
, "Failed to set PVID\n");
424 /* Only allow if not already allowed. */
425 if (!mlxsw_sp_port
->pvid
) {
426 err
= mlxsw_sp_port_allow_untagged_set(mlxsw_sp_port
,
429 netdev_err(dev
, "Failed to allow untagged traffic\n");
430 goto err_port_allow_untagged_set
;
435 mlxsw_sp_port
->pvid
= vid
;
438 err_port_allow_untagged_set
:
439 __mlxsw_sp_port_pvid_set(mlxsw_sp_port
, mlxsw_sp_port
->pvid
);
443 static int mlxsw_sp_fid_create(struct mlxsw_sp
*mlxsw_sp
, u16 fid
)
445 char sfmr_pl
[MLXSW_REG_SFMR_LEN
];
448 mlxsw_reg_sfmr_pack(sfmr_pl
, MLXSW_REG_SFMR_OP_CREATE_FID
, fid
, fid
);
449 err
= mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(sfmr
), sfmr_pl
);
454 set_bit(fid
, mlxsw_sp
->active_fids
);
458 static void mlxsw_sp_fid_destroy(struct mlxsw_sp
*mlxsw_sp
, u16 fid
)
460 char sfmr_pl
[MLXSW_REG_SFMR_LEN
];
462 clear_bit(fid
, mlxsw_sp
->active_fids
);
464 mlxsw_reg_sfmr_pack(sfmr_pl
, MLXSW_REG_SFMR_OP_DESTROY_FID
,
466 mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(sfmr
), sfmr_pl
);
469 static int mlxsw_sp_port_fid_map(struct mlxsw_sp_port
*mlxsw_sp_port
, u16 fid
)
471 enum mlxsw_reg_svfa_mt mt
;
473 if (!list_empty(&mlxsw_sp_port
->vports_list
))
474 mt
= MLXSW_REG_SVFA_MT_PORT_VID_TO_FID
;
476 mt
= MLXSW_REG_SVFA_MT_VID_TO_FID
;
478 return mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port
, mt
, true, fid
, fid
);
481 static int mlxsw_sp_port_fid_unmap(struct mlxsw_sp_port
*mlxsw_sp_port
, u16 fid
)
483 enum mlxsw_reg_svfa_mt mt
;
485 if (list_empty(&mlxsw_sp_port
->vports_list
))
488 mt
= MLXSW_REG_SVFA_MT_PORT_VID_TO_FID
;
489 return mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port
, mt
, false, fid
, fid
);
492 static int mlxsw_sp_port_add_vids(struct net_device
*dev
, u16 vid_begin
,
498 for (vid
= vid_begin
; vid
<= vid_end
; vid
++) {
499 err
= mlxsw_sp_port_add_vid(dev
, 0, vid
);
501 goto err_port_add_vid
;
506 for (vid
--; vid
>= vid_begin
; vid
--)
507 mlxsw_sp_port_kill_vid(dev
, 0, vid
);
511 static int __mlxsw_sp_port_vlans_set(struct mlxsw_sp_port
*mlxsw_sp_port
,
512 u16 vid_begin
, u16 vid_end
, bool is_member
,
518 for (vid
= vid_begin
; vid
<= vid_end
;
519 vid
+= MLXSW_REG_SPVM_REC_MAX_COUNT
) {
520 vid_e
= min((u16
) (vid
+ MLXSW_REG_SPVM_REC_MAX_COUNT
- 1),
523 err
= mlxsw_sp_port_vlan_set(mlxsw_sp_port
, vid
, vid_e
,
524 is_member
, untagged
);
532 static int __mlxsw_sp_port_vlans_add(struct mlxsw_sp_port
*mlxsw_sp_port
,
533 u16 vid_begin
, u16 vid_end
,
534 bool flag_untagged
, bool flag_pvid
)
536 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
537 struct net_device
*dev
= mlxsw_sp_port
->dev
;
538 u16 vid
, last_visited_vid
, old_pvid
;
539 enum mlxsw_reg_svfa_mt mt
;
542 /* In case this is invoked with BRIDGE_FLAGS_SELF and port is
543 * not bridged, then packets ingressing through the port with
544 * the specified VIDs will be directed to CPU.
546 if (!mlxsw_sp_port
->bridged
)
547 return mlxsw_sp_port_add_vids(dev
, vid_begin
, vid_end
);
549 for (vid
= vid_begin
; vid
<= vid_end
; vid
++) {
550 if (!test_bit(vid
, mlxsw_sp
->active_fids
)) {
551 err
= mlxsw_sp_fid_create(mlxsw_sp
, vid
);
553 netdev_err(dev
, "Failed to create FID=%d\n",
558 /* When creating a FID, we set a VID to FID mapping
559 * regardless of the port's mode.
561 mt
= MLXSW_REG_SVFA_MT_VID_TO_FID
;
562 err
= mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port
, mt
,
565 netdev_err(dev
, "Failed to create FID=VID=%d mapping\n",
567 goto err_port_vid_to_fid_set
;
572 /* Set FID mapping according to port's mode */
573 for (vid
= vid_begin
; vid
<= vid_end
; vid
++) {
574 err
= mlxsw_sp_port_fid_map(mlxsw_sp_port
, vid
);
576 netdev_err(dev
, "Failed to map FID=%d", vid
);
577 last_visited_vid
= --vid
;
578 goto err_port_fid_map
;
582 err
= __mlxsw_sp_port_flood_set(mlxsw_sp_port
, vid_begin
, vid_end
,
585 netdev_err(dev
, "Failed to configure flooding\n");
586 goto err_port_flood_set
;
589 err
= __mlxsw_sp_port_vlans_set(mlxsw_sp_port
, vid_begin
, vid_end
,
590 true, flag_untagged
);
592 netdev_err(dev
, "Unable to add VIDs %d-%d\n", vid_begin
,
594 goto err_port_vlans_set
;
597 old_pvid
= mlxsw_sp_port
->pvid
;
598 if (flag_pvid
&& old_pvid
!= vid_begin
) {
599 err
= mlxsw_sp_port_pvid_set(mlxsw_sp_port
, vid_begin
);
601 netdev_err(dev
, "Unable to add PVID %d\n", vid_begin
);
602 goto err_port_pvid_set
;
604 } else if (!flag_pvid
&& old_pvid
>= vid_begin
&& old_pvid
<= vid_end
) {
605 err
= mlxsw_sp_port_pvid_set(mlxsw_sp_port
, 0);
607 netdev_err(dev
, "Unable to del PVID\n");
608 goto err_port_pvid_set
;
612 /* Changing activity bits only if HW operation succeded */
613 for (vid
= vid_begin
; vid
<= vid_end
; vid
++) {
614 set_bit(vid
, mlxsw_sp_port
->active_vlans
);
616 set_bit(vid
, mlxsw_sp_port
->untagged_vlans
);
618 clear_bit(vid
, mlxsw_sp_port
->untagged_vlans
);
621 /* STP state change must be done after we set active VLANs */
622 err
= mlxsw_sp_port_stp_state_set(mlxsw_sp_port
,
623 mlxsw_sp_port
->stp_state
);
625 netdev_err(dev
, "Failed to set STP state\n");
626 goto err_port_stp_state_set
;
631 err_port_vid_to_fid_set
:
632 mlxsw_sp_fid_destroy(mlxsw_sp
, vid
);
635 err_port_stp_state_set
:
636 for (vid
= vid_begin
; vid
<= vid_end
; vid
++)
637 clear_bit(vid
, mlxsw_sp_port
->active_vlans
);
638 if (old_pvid
!= mlxsw_sp_port
->pvid
)
639 mlxsw_sp_port_pvid_set(mlxsw_sp_port
, old_pvid
);
641 __mlxsw_sp_port_vlans_set(mlxsw_sp_port
, vid_begin
, vid_end
, false,
644 __mlxsw_sp_port_flood_set(mlxsw_sp_port
, vid_begin
, vid_end
, false,
647 last_visited_vid
= vid_end
;
649 for (vid
= last_visited_vid
; vid
>= vid_begin
; vid
--)
650 mlxsw_sp_port_fid_unmap(mlxsw_sp_port
, vid
);
654 static int mlxsw_sp_port_vlans_add(struct mlxsw_sp_port
*mlxsw_sp_port
,
655 const struct switchdev_obj_port_vlan
*vlan
,
656 struct switchdev_trans
*trans
)
658 bool flag_untagged
= vlan
->flags
& BRIDGE_VLAN_INFO_UNTAGGED
;
659 bool flag_pvid
= vlan
->flags
& BRIDGE_VLAN_INFO_PVID
;
661 if (switchdev_trans_ph_prepare(trans
))
664 return __mlxsw_sp_port_vlans_add(mlxsw_sp_port
,
665 vlan
->vid_begin
, vlan
->vid_end
,
666 flag_untagged
, flag_pvid
);
669 static enum mlxsw_reg_sfd_rec_policy
mlxsw_sp_sfd_rec_policy(bool dynamic
)
671 return dynamic
? MLXSW_REG_SFD_REC_POLICY_DYNAMIC_ENTRY_INGRESS
:
672 MLXSW_REG_SFD_REC_POLICY_STATIC_ENTRY
;
675 static enum mlxsw_reg_sfd_op
mlxsw_sp_sfd_op(bool adding
)
677 return adding
? MLXSW_REG_SFD_OP_WRITE_EDIT
:
678 MLXSW_REG_SFD_OP_WRITE_REMOVE
;
681 static int mlxsw_sp_port_fdb_uc_op(struct mlxsw_sp
*mlxsw_sp
, u8 local_port
,
682 const char *mac
, u16 fid
, bool adding
,
688 sfd_pl
= kmalloc(MLXSW_REG_SFD_LEN
, GFP_KERNEL
);
692 mlxsw_reg_sfd_pack(sfd_pl
, mlxsw_sp_sfd_op(adding
), 0);
693 mlxsw_reg_sfd_uc_pack(sfd_pl
, 0, mlxsw_sp_sfd_rec_policy(dynamic
),
694 mac
, fid
, MLXSW_REG_SFD_REC_ACTION_NOP
,
696 err
= mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(sfd
), sfd_pl
);
702 static int mlxsw_sp_port_fdb_uc_lag_op(struct mlxsw_sp
*mlxsw_sp
, u16 lag_id
,
703 const char *mac
, u16 fid
, u16 lag_vid
,
704 bool adding
, bool dynamic
)
709 sfd_pl
= kmalloc(MLXSW_REG_SFD_LEN
, GFP_KERNEL
);
713 mlxsw_reg_sfd_pack(sfd_pl
, mlxsw_sp_sfd_op(adding
), 0);
714 mlxsw_reg_sfd_uc_lag_pack(sfd_pl
, 0, mlxsw_sp_sfd_rec_policy(dynamic
),
715 mac
, fid
, MLXSW_REG_SFD_REC_ACTION_NOP
,
717 err
= mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(sfd
), sfd_pl
);
724 mlxsw_sp_port_fdb_static_add(struct mlxsw_sp_port
*mlxsw_sp_port
,
725 const struct switchdev_obj_port_fdb
*fdb
,
726 struct switchdev_trans
*trans
)
728 u16 fid
= mlxsw_sp_port_vid_to_fid_get(mlxsw_sp_port
, fdb
->vid
);
731 if (switchdev_trans_ph_prepare(trans
))
734 if (mlxsw_sp_port_is_vport(mlxsw_sp_port
)) {
735 lag_vid
= mlxsw_sp_vport_vid_get(mlxsw_sp_port
);
738 if (!mlxsw_sp_port
->lagged
)
739 return mlxsw_sp_port_fdb_uc_op(mlxsw_sp_port
->mlxsw_sp
,
740 mlxsw_sp_port
->local_port
,
741 fdb
->addr
, fid
, true, false);
743 return mlxsw_sp_port_fdb_uc_lag_op(mlxsw_sp_port
->mlxsw_sp
,
744 mlxsw_sp_port
->lag_id
,
745 fdb
->addr
, fid
, lag_vid
,
749 static int mlxsw_sp_port_mdb_op(struct mlxsw_sp
*mlxsw_sp
, const char *addr
,
750 u16 fid
, u16 mid
, bool adding
)
755 sfd_pl
= kmalloc(MLXSW_REG_SFD_LEN
, GFP_KERNEL
);
759 mlxsw_reg_sfd_pack(sfd_pl
, mlxsw_sp_sfd_op(adding
), 0);
760 mlxsw_reg_sfd_mc_pack(sfd_pl
, 0, addr
, fid
,
761 MLXSW_REG_SFD_REC_ACTION_NOP
, mid
);
762 err
= mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(sfd
), sfd_pl
);
767 static int mlxsw_sp_port_smid_set(struct mlxsw_sp_port
*mlxsw_sp_port
, u16 mid
,
768 bool add
, bool clear_all_ports
)
770 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
774 smid_pl
= kmalloc(MLXSW_REG_SMID_LEN
, GFP_KERNEL
);
778 mlxsw_reg_smid_pack(smid_pl
, mid
, mlxsw_sp_port
->local_port
, add
);
779 if (clear_all_ports
) {
780 for (i
= 1; i
< MLXSW_PORT_MAX_PORTS
; i
++)
781 if (mlxsw_sp
->ports
[i
])
782 mlxsw_reg_smid_port_mask_set(smid_pl
, i
, 1);
784 err
= mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(smid
), smid_pl
);
789 static struct mlxsw_sp_mid
*__mlxsw_sp_mc_get(struct mlxsw_sp
*mlxsw_sp
,
790 const unsigned char *addr
,
793 struct mlxsw_sp_mid
*mid
;
795 list_for_each_entry(mid
, &mlxsw_sp
->br_mids
.list
, list
) {
796 if (ether_addr_equal(mid
->addr
, addr
) && mid
->vid
== vid
)
802 static struct mlxsw_sp_mid
*__mlxsw_sp_mc_alloc(struct mlxsw_sp
*mlxsw_sp
,
803 const unsigned char *addr
,
806 struct mlxsw_sp_mid
*mid
;
809 mid_idx
= find_first_zero_bit(mlxsw_sp
->br_mids
.mapped
,
811 if (mid_idx
== MLXSW_SP_MID_MAX
)
814 mid
= kzalloc(sizeof(*mid
), GFP_KERNEL
);
818 set_bit(mid_idx
, mlxsw_sp
->br_mids
.mapped
);
819 ether_addr_copy(mid
->addr
, addr
);
823 list_add_tail(&mid
->list
, &mlxsw_sp
->br_mids
.list
);
828 static int __mlxsw_sp_mc_dec_ref(struct mlxsw_sp
*mlxsw_sp
,
829 struct mlxsw_sp_mid
*mid
)
831 if (--mid
->ref_count
== 0) {
832 list_del(&mid
->list
);
833 clear_bit(mid
->mid
, mlxsw_sp
->br_mids
.mapped
);
840 static int mlxsw_sp_port_mdb_add(struct mlxsw_sp_port
*mlxsw_sp_port
,
841 const struct switchdev_obj_port_mdb
*mdb
,
842 struct switchdev_trans
*trans
)
844 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
845 struct net_device
*dev
= mlxsw_sp_port
->dev
;
846 struct mlxsw_sp_mid
*mid
;
847 u16 fid
= mlxsw_sp_port_vid_to_fid_get(mlxsw_sp_port
, mdb
->vid
);
850 if (switchdev_trans_ph_prepare(trans
))
853 mid
= __mlxsw_sp_mc_get(mlxsw_sp
, mdb
->addr
, mdb
->vid
);
855 mid
= __mlxsw_sp_mc_alloc(mlxsw_sp
, mdb
->addr
, mdb
->vid
);
857 netdev_err(dev
, "Unable to allocate MC group\n");
863 err
= mlxsw_sp_port_smid_set(mlxsw_sp_port
, mid
->mid
, true,
864 mid
->ref_count
== 1);
866 netdev_err(dev
, "Unable to set SMID\n");
870 if (mid
->ref_count
== 1) {
871 err
= mlxsw_sp_port_mdb_op(mlxsw_sp
, mdb
->addr
, fid
, mid
->mid
,
874 netdev_err(dev
, "Unable to set MC SFD\n");
882 __mlxsw_sp_mc_dec_ref(mlxsw_sp
, mid
);
886 static int mlxsw_sp_port_obj_add(struct net_device
*dev
,
887 const struct switchdev_obj
*obj
,
888 struct switchdev_trans
*trans
)
890 struct mlxsw_sp_port
*mlxsw_sp_port
= netdev_priv(dev
);
893 mlxsw_sp_port
= mlxsw_sp_port_orig_get(obj
->orig_dev
, mlxsw_sp_port
);
898 case SWITCHDEV_OBJ_ID_PORT_VLAN
:
899 if (mlxsw_sp_port_is_vport(mlxsw_sp_port
))
902 err
= mlxsw_sp_port_vlans_add(mlxsw_sp_port
,
903 SWITCHDEV_OBJ_PORT_VLAN(obj
),
906 case SWITCHDEV_OBJ_ID_PORT_FDB
:
907 err
= mlxsw_sp_port_fdb_static_add(mlxsw_sp_port
,
908 SWITCHDEV_OBJ_PORT_FDB(obj
),
911 case SWITCHDEV_OBJ_ID_PORT_MDB
:
912 err
= mlxsw_sp_port_mdb_add(mlxsw_sp_port
,
913 SWITCHDEV_OBJ_PORT_MDB(obj
),
924 static int mlxsw_sp_port_kill_vids(struct net_device
*dev
, u16 vid_begin
,
930 for (vid
= vid_begin
; vid
<= vid_end
; vid
++) {
931 err
= mlxsw_sp_port_kill_vid(dev
, 0, vid
);
939 static int __mlxsw_sp_port_vlans_del(struct mlxsw_sp_port
*mlxsw_sp_port
,
940 u16 vid_begin
, u16 vid_end
, bool init
)
942 struct net_device
*dev
= mlxsw_sp_port
->dev
;
946 /* In case this is invoked with BRIDGE_FLAGS_SELF and port is
947 * not bridged, then prevent packets ingressing through the
948 * port with the specified VIDs from being trapped to CPU.
950 if (!init
&& !mlxsw_sp_port
->bridged
)
951 return mlxsw_sp_port_kill_vids(dev
, vid_begin
, vid_end
);
953 err
= __mlxsw_sp_port_vlans_set(mlxsw_sp_port
, vid_begin
, vid_end
,
956 netdev_err(dev
, "Unable to del VIDs %d-%d\n", vid_begin
,
964 pvid
= mlxsw_sp_port
->pvid
;
965 if (pvid
>= vid_begin
&& pvid
<= vid_end
) {
966 err
= mlxsw_sp_port_pvid_set(mlxsw_sp_port
, 0);
968 netdev_err(dev
, "Unable to del PVID %d\n", pvid
);
973 err
= __mlxsw_sp_port_flood_set(mlxsw_sp_port
, vid_begin
, vid_end
,
976 netdev_err(dev
, "Failed to clear flooding\n");
980 for (vid
= vid_begin
; vid
<= vid_end
; vid
++) {
981 /* Remove FID mapping in case of Virtual mode */
982 err
= mlxsw_sp_port_fid_unmap(mlxsw_sp_port
, vid
);
984 netdev_err(dev
, "Failed to unmap FID=%d", vid
);
990 /* Changing activity bits only if HW operation succeded */
991 for (vid
= vid_begin
; vid
<= vid_end
; vid
++)
992 clear_bit(vid
, mlxsw_sp_port
->active_vlans
);
997 static int mlxsw_sp_port_vlans_del(struct mlxsw_sp_port
*mlxsw_sp_port
,
998 const struct switchdev_obj_port_vlan
*vlan
)
1000 return __mlxsw_sp_port_vlans_del(mlxsw_sp_port
,
1001 vlan
->vid_begin
, vlan
->vid_end
, false);
1004 void mlxsw_sp_port_active_vlans_del(struct mlxsw_sp_port
*mlxsw_sp_port
)
1008 for_each_set_bit(vid
, mlxsw_sp_port
->active_vlans
, VLAN_N_VID
)
1009 __mlxsw_sp_port_vlans_del(mlxsw_sp_port
, vid
, vid
, false);
1013 mlxsw_sp_port_fdb_static_del(struct mlxsw_sp_port
*mlxsw_sp_port
,
1014 const struct switchdev_obj_port_fdb
*fdb
)
1016 u16 fid
= mlxsw_sp_port_vid_to_fid_get(mlxsw_sp_port
, fdb
->vid
);
1019 if (mlxsw_sp_port_is_vport(mlxsw_sp_port
)) {
1020 lag_vid
= mlxsw_sp_vport_vid_get(mlxsw_sp_port
);
1023 if (!mlxsw_sp_port
->lagged
)
1024 return mlxsw_sp_port_fdb_uc_op(mlxsw_sp_port
->mlxsw_sp
,
1025 mlxsw_sp_port
->local_port
,
1029 return mlxsw_sp_port_fdb_uc_lag_op(mlxsw_sp_port
->mlxsw_sp
,
1030 mlxsw_sp_port
->lag_id
,
1031 fdb
->addr
, fid
, lag_vid
,
1035 static int mlxsw_sp_port_mdb_del(struct mlxsw_sp_port
*mlxsw_sp_port
,
1036 const struct switchdev_obj_port_mdb
*mdb
)
1038 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
1039 struct net_device
*dev
= mlxsw_sp_port
->dev
;
1040 struct mlxsw_sp_mid
*mid
;
1041 u16 fid
= mlxsw_sp_port_vid_to_fid_get(mlxsw_sp_port
, mdb
->vid
);
1045 mid
= __mlxsw_sp_mc_get(mlxsw_sp
, mdb
->addr
, mdb
->vid
);
1047 netdev_err(dev
, "Unable to remove port from MC DB\n");
1051 err
= mlxsw_sp_port_smid_set(mlxsw_sp_port
, mid
->mid
, false, false);
1053 netdev_err(dev
, "Unable to remove port from SMID\n");
1056 if (__mlxsw_sp_mc_dec_ref(mlxsw_sp
, mid
)) {
1057 err
= mlxsw_sp_port_mdb_op(mlxsw_sp
, mdb
->addr
, fid
, mid_idx
,
1060 netdev_err(dev
, "Unable to remove MC SFD\n");
1066 static int mlxsw_sp_port_obj_del(struct net_device
*dev
,
1067 const struct switchdev_obj
*obj
)
1069 struct mlxsw_sp_port
*mlxsw_sp_port
= netdev_priv(dev
);
1072 mlxsw_sp_port
= mlxsw_sp_port_orig_get(obj
->orig_dev
, mlxsw_sp_port
);
1077 case SWITCHDEV_OBJ_ID_PORT_VLAN
:
1078 if (mlxsw_sp_port_is_vport(mlxsw_sp_port
))
1081 err
= mlxsw_sp_port_vlans_del(mlxsw_sp_port
,
1082 SWITCHDEV_OBJ_PORT_VLAN(obj
));
1084 case SWITCHDEV_OBJ_ID_PORT_FDB
:
1085 err
= mlxsw_sp_port_fdb_static_del(mlxsw_sp_port
,
1086 SWITCHDEV_OBJ_PORT_FDB(obj
));
1088 case SWITCHDEV_OBJ_ID_PORT_MDB
:
1089 err
= mlxsw_sp_port_mdb_del(mlxsw_sp_port
,
1090 SWITCHDEV_OBJ_PORT_MDB(obj
));
1100 static struct mlxsw_sp_port
*mlxsw_sp_lag_rep_port(struct mlxsw_sp
*mlxsw_sp
,
1103 struct mlxsw_sp_port
*mlxsw_sp_port
;
1106 for (i
= 0; i
< MLXSW_SP_PORT_PER_LAG_MAX
; i
++) {
1107 mlxsw_sp_port
= mlxsw_sp_port_lagged_get(mlxsw_sp
, lag_id
, i
);
1109 return mlxsw_sp_port
;
1114 static int mlxsw_sp_port_fdb_dump(struct mlxsw_sp_port
*mlxsw_sp_port
,
1115 struct switchdev_obj_port_fdb
*fdb
,
1116 switchdev_obj_dump_cb_t
*cb
,
1117 struct net_device
*orig_dev
)
1119 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
1120 struct mlxsw_sp_port
*tmp
;
1132 sfd_pl
= kmalloc(MLXSW_REG_SFD_LEN
, GFP_KERNEL
);
1136 if (mlxsw_sp_port_is_vport(mlxsw_sp_port
)) {
1139 tmp
= mlxsw_sp_vport_vfid_get(mlxsw_sp_port
);
1140 vport_fid
= mlxsw_sp_vfid_to_fid(tmp
);
1143 mlxsw_reg_sfd_pack(sfd_pl
, MLXSW_REG_SFD_OP_QUERY_DUMP
, 0);
1145 mlxsw_reg_sfd_num_rec_set(sfd_pl
, MLXSW_REG_SFD_REC_MAX_COUNT
);
1146 err
= mlxsw_reg_query(mlxsw_sp
->core
, MLXSW_REG(sfd
), sfd_pl
);
1150 num_rec
= mlxsw_reg_sfd_num_rec_get(sfd_pl
);
1152 /* Even in case of error, we have to run the dump to the end
1153 * so the session in firmware is finished.
1158 for (i
= 0; i
< num_rec
; i
++) {
1159 switch (mlxsw_reg_sfd_rec_type_get(sfd_pl
, i
)) {
1160 case MLXSW_REG_SFD_REC_TYPE_UNICAST
:
1161 mlxsw_reg_sfd_uc_unpack(sfd_pl
, i
, mac
, &fid
,
1163 if (local_port
== mlxsw_sp_port
->local_port
) {
1164 if (vport_fid
&& vport_fid
== fid
)
1166 else if (!vport_fid
&&
1167 !mlxsw_sp_fid_is_vfid(fid
))
1171 ether_addr_copy(fdb
->addr
, mac
);
1172 fdb
->ndm_state
= NUD_REACHABLE
;
1173 err
= cb(&fdb
->obj
);
1178 case MLXSW_REG_SFD_REC_TYPE_UNICAST_LAG
:
1179 mlxsw_reg_sfd_uc_lag_unpack(sfd_pl
, i
,
1180 mac
, &fid
, &lag_id
);
1181 tmp
= mlxsw_sp_lag_rep_port(mlxsw_sp
, lag_id
);
1182 if (tmp
&& tmp
->local_port
==
1183 mlxsw_sp_port
->local_port
) {
1184 /* LAG records can only point to LAG
1185 * devices or VLAN devices on top.
1187 if (!netif_is_lag_master(orig_dev
) &&
1188 !is_vlan_dev(orig_dev
))
1190 if (vport_fid
&& vport_fid
== fid
)
1192 else if (!vport_fid
&&
1193 !mlxsw_sp_fid_is_vfid(fid
))
1197 ether_addr_copy(fdb
->addr
, mac
);
1198 fdb
->ndm_state
= NUD_REACHABLE
;
1199 err
= cb(&fdb
->obj
);
1206 } while (num_rec
== MLXSW_REG_SFD_REC_MAX_COUNT
);
1210 return stored_err
? stored_err
: err
;
1213 static int mlxsw_sp_port_vlan_dump(struct mlxsw_sp_port
*mlxsw_sp_port
,
1214 struct switchdev_obj_port_vlan
*vlan
,
1215 switchdev_obj_dump_cb_t
*cb
)
1220 if (mlxsw_sp_port_is_vport(mlxsw_sp_port
)) {
1222 vlan
->vid_begin
= mlxsw_sp_vport_vid_get(mlxsw_sp_port
);
1223 vlan
->vid_end
= mlxsw_sp_vport_vid_get(mlxsw_sp_port
);
1224 return cb(&vlan
->obj
);
1227 for_each_set_bit(vid
, mlxsw_sp_port
->active_vlans
, VLAN_N_VID
) {
1229 if (vid
== mlxsw_sp_port
->pvid
)
1230 vlan
->flags
|= BRIDGE_VLAN_INFO_PVID
;
1231 if (test_bit(vid
, mlxsw_sp_port
->untagged_vlans
))
1232 vlan
->flags
|= BRIDGE_VLAN_INFO_UNTAGGED
;
1233 vlan
->vid_begin
= vid
;
1234 vlan
->vid_end
= vid
;
1235 err
= cb(&vlan
->obj
);
1242 static int mlxsw_sp_port_obj_dump(struct net_device
*dev
,
1243 struct switchdev_obj
*obj
,
1244 switchdev_obj_dump_cb_t
*cb
)
1246 struct mlxsw_sp_port
*mlxsw_sp_port
= netdev_priv(dev
);
1249 mlxsw_sp_port
= mlxsw_sp_port_orig_get(obj
->orig_dev
, mlxsw_sp_port
);
1254 case SWITCHDEV_OBJ_ID_PORT_VLAN
:
1255 err
= mlxsw_sp_port_vlan_dump(mlxsw_sp_port
,
1256 SWITCHDEV_OBJ_PORT_VLAN(obj
), cb
);
1258 case SWITCHDEV_OBJ_ID_PORT_FDB
:
1259 err
= mlxsw_sp_port_fdb_dump(mlxsw_sp_port
,
1260 SWITCHDEV_OBJ_PORT_FDB(obj
), cb
,
1271 static const struct switchdev_ops mlxsw_sp_port_switchdev_ops
= {
1272 .switchdev_port_attr_get
= mlxsw_sp_port_attr_get
,
1273 .switchdev_port_attr_set
= mlxsw_sp_port_attr_set
,
1274 .switchdev_port_obj_add
= mlxsw_sp_port_obj_add
,
1275 .switchdev_port_obj_del
= mlxsw_sp_port_obj_del
,
1276 .switchdev_port_obj_dump
= mlxsw_sp_port_obj_dump
,
1279 static void mlxsw_sp_fdb_call_notifiers(bool learning_sync
, bool adding
,
1281 struct net_device
*dev
)
1283 struct switchdev_notifier_fdb_info info
;
1284 unsigned long notifier_type
;
1286 if (learning_sync
) {
1289 notifier_type
= adding
? SWITCHDEV_FDB_ADD
: SWITCHDEV_FDB_DEL
;
1290 call_switchdev_notifiers(notifier_type
, dev
, &info
.info
);
1294 static void mlxsw_sp_fdb_notify_mac_process(struct mlxsw_sp
*mlxsw_sp
,
1295 char *sfn_pl
, int rec_index
,
1298 struct mlxsw_sp_port
*mlxsw_sp_port
;
1302 bool do_notification
= true;
1305 mlxsw_reg_sfn_mac_unpack(sfn_pl
, rec_index
, mac
, &fid
, &local_port
);
1306 mlxsw_sp_port
= mlxsw_sp
->ports
[local_port
];
1307 if (!mlxsw_sp_port
) {
1308 dev_err_ratelimited(mlxsw_sp
->bus_info
->dev
, "Incorrect local port in FDB notification\n");
1312 if (mlxsw_sp_fid_is_vfid(fid
)) {
1313 u16 vfid
= mlxsw_sp_fid_to_vfid(fid
);
1314 struct mlxsw_sp_port
*mlxsw_sp_vport
;
1316 mlxsw_sp_vport
= mlxsw_sp_port_vport_find_by_vfid(mlxsw_sp_port
,
1318 if (!mlxsw_sp_vport
) {
1319 netdev_err(mlxsw_sp_port
->dev
, "Failed to find a matching vPort following FDB notification\n");
1323 /* Override the physical port with the vPort. */
1324 mlxsw_sp_port
= mlxsw_sp_vport
;
1329 adding
= adding
&& mlxsw_sp_port
->learning
;
1332 err
= mlxsw_sp_port_fdb_uc_op(mlxsw_sp
, local_port
, mac
, fid
,
1335 if (net_ratelimit())
1336 netdev_err(mlxsw_sp_port
->dev
, "Failed to set FDB entry\n");
1340 if (!do_notification
)
1342 mlxsw_sp_fdb_call_notifiers(mlxsw_sp_port
->learning_sync
,
1343 adding
, mac
, vid
, mlxsw_sp_port
->dev
);
1348 do_notification
= false;
1352 static void mlxsw_sp_fdb_notify_mac_lag_process(struct mlxsw_sp
*mlxsw_sp
,
1353 char *sfn_pl
, int rec_index
,
1356 struct mlxsw_sp_port
*mlxsw_sp_port
;
1357 struct net_device
*dev
;
1362 bool do_notification
= true;
1365 mlxsw_reg_sfn_mac_lag_unpack(sfn_pl
, rec_index
, mac
, &fid
, &lag_id
);
1366 mlxsw_sp_port
= mlxsw_sp_lag_rep_port(mlxsw_sp
, lag_id
);
1367 if (!mlxsw_sp_port
) {
1368 dev_err_ratelimited(mlxsw_sp
->bus_info
->dev
, "Cannot find port representor for LAG\n");
1372 if (mlxsw_sp_fid_is_vfid(fid
)) {
1373 u16 vfid
= mlxsw_sp_fid_to_vfid(fid
);
1374 struct mlxsw_sp_port
*mlxsw_sp_vport
;
1376 mlxsw_sp_vport
= mlxsw_sp_port_vport_find_by_vfid(mlxsw_sp_port
,
1378 if (!mlxsw_sp_vport
) {
1379 netdev_err(mlxsw_sp_port
->dev
, "Failed to find a matching vPort following FDB notification\n");
1383 lag_vid
= mlxsw_sp_vport_vid_get(mlxsw_sp_vport
);
1384 dev
= mlxsw_sp_vport
->dev
;
1386 /* Override the physical port with the vPort. */
1387 mlxsw_sp_port
= mlxsw_sp_vport
;
1389 dev
= mlxsw_sp_lag_get(mlxsw_sp
, lag_id
)->dev
;
1393 adding
= adding
&& mlxsw_sp_port
->learning
;
1396 err
= mlxsw_sp_port_fdb_uc_lag_op(mlxsw_sp
, lag_id
, mac
, fid
, lag_vid
,
1399 if (net_ratelimit())
1400 netdev_err(mlxsw_sp_port
->dev
, "Failed to set FDB entry\n");
1404 if (!do_notification
)
1406 mlxsw_sp_fdb_call_notifiers(mlxsw_sp_port
->learning_sync
, adding
, mac
,
1412 do_notification
= false;
1416 static void mlxsw_sp_fdb_notify_rec_process(struct mlxsw_sp
*mlxsw_sp
,
1417 char *sfn_pl
, int rec_index
)
1419 switch (mlxsw_reg_sfn_rec_type_get(sfn_pl
, rec_index
)) {
1420 case MLXSW_REG_SFN_REC_TYPE_LEARNED_MAC
:
1421 mlxsw_sp_fdb_notify_mac_process(mlxsw_sp
, sfn_pl
,
1424 case MLXSW_REG_SFN_REC_TYPE_AGED_OUT_MAC
:
1425 mlxsw_sp_fdb_notify_mac_process(mlxsw_sp
, sfn_pl
,
1428 case MLXSW_REG_SFN_REC_TYPE_LEARNED_MAC_LAG
:
1429 mlxsw_sp_fdb_notify_mac_lag_process(mlxsw_sp
, sfn_pl
,
1432 case MLXSW_REG_SFN_REC_TYPE_AGED_OUT_MAC_LAG
:
1433 mlxsw_sp_fdb_notify_mac_lag_process(mlxsw_sp
, sfn_pl
,
1439 static void mlxsw_sp_fdb_notify_work_schedule(struct mlxsw_sp
*mlxsw_sp
)
1441 mlxsw_core_schedule_dw(&mlxsw_sp
->fdb_notify
.dw
,
1442 msecs_to_jiffies(mlxsw_sp
->fdb_notify
.interval
));
1445 static void mlxsw_sp_fdb_notify_work(struct work_struct
*work
)
1447 struct mlxsw_sp
*mlxsw_sp
;
1453 sfn_pl
= kmalloc(MLXSW_REG_SFN_LEN
, GFP_KERNEL
);
1457 mlxsw_sp
= container_of(work
, struct mlxsw_sp
, fdb_notify
.dw
.work
);
1461 mlxsw_reg_sfn_pack(sfn_pl
);
1462 err
= mlxsw_reg_query(mlxsw_sp
->core
, MLXSW_REG(sfn
), sfn_pl
);
1464 dev_err_ratelimited(mlxsw_sp
->bus_info
->dev
, "Failed to get FDB notifications\n");
1467 num_rec
= mlxsw_reg_sfn_num_rec_get(sfn_pl
);
1468 for (i
= 0; i
< num_rec
; i
++)
1469 mlxsw_sp_fdb_notify_rec_process(mlxsw_sp
, sfn_pl
, i
);
1475 mlxsw_sp_fdb_notify_work_schedule(mlxsw_sp
);
1478 static int mlxsw_sp_fdb_init(struct mlxsw_sp
*mlxsw_sp
)
1482 err
= mlxsw_sp_ageing_set(mlxsw_sp
, MLXSW_SP_DEFAULT_AGEING_TIME
);
1484 dev_err(mlxsw_sp
->bus_info
->dev
, "Failed to set default ageing time\n");
1487 INIT_DELAYED_WORK(&mlxsw_sp
->fdb_notify
.dw
, mlxsw_sp_fdb_notify_work
);
1488 mlxsw_sp
->fdb_notify
.interval
= MLXSW_SP_DEFAULT_LEARNING_INTERVAL
;
1489 mlxsw_sp_fdb_notify_work_schedule(mlxsw_sp
);
1493 static void mlxsw_sp_fdb_fini(struct mlxsw_sp
*mlxsw_sp
)
1495 cancel_delayed_work_sync(&mlxsw_sp
->fdb_notify
.dw
);
1498 static void mlxsw_sp_fids_fini(struct mlxsw_sp
*mlxsw_sp
)
1502 for_each_set_bit(fid
, mlxsw_sp
->active_fids
, VLAN_N_VID
)
1503 mlxsw_sp_fid_destroy(mlxsw_sp
, fid
);
1506 int mlxsw_sp_switchdev_init(struct mlxsw_sp
*mlxsw_sp
)
1508 return mlxsw_sp_fdb_init(mlxsw_sp
);
1511 void mlxsw_sp_switchdev_fini(struct mlxsw_sp
*mlxsw_sp
)
1513 mlxsw_sp_fdb_fini(mlxsw_sp
);
1514 mlxsw_sp_fids_fini(mlxsw_sp
);
1517 int mlxsw_sp_port_vlan_init(struct mlxsw_sp_port
*mlxsw_sp_port
)
1519 struct net_device
*dev
= mlxsw_sp_port
->dev
;
1522 /* Allow only untagged packets to ingress and tag them internally
1525 mlxsw_sp_port
->pvid
= 1;
1526 err
= __mlxsw_sp_port_vlans_del(mlxsw_sp_port
, 0, VLAN_N_VID
- 1,
1529 netdev_err(dev
, "Unable to init VLANs\n");
1533 /* Add implicit VLAN interface in the device, so that untagged
1534 * packets will be classified to the default vFID.
1536 err
= mlxsw_sp_port_add_vid(dev
, 0, 1);
1538 netdev_err(dev
, "Failed to configure default vFID\n");
1543 void mlxsw_sp_port_switchdev_init(struct mlxsw_sp_port
*mlxsw_sp_port
)
1545 mlxsw_sp_port
->dev
->switchdev_ops
= &mlxsw_sp_port_switchdev_ops
;
1548 void mlxsw_sp_port_switchdev_fini(struct mlxsw_sp_port
*mlxsw_sp_port
)