2 * drivers/net/ethernet/mellanox/mlxsw/spectrum.c
3 * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
4 * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com>
5 * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com>
6 * Copyright (c) 2015 Elad Raz <eladr@mellanox.com>
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
20 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
37 #include <linux/kernel.h>
38 #include <linux/module.h>
39 #include <linux/types.h>
40 #include <linux/netdevice.h>
41 #include <linux/etherdevice.h>
42 #include <linux/ethtool.h>
43 #include <linux/slab.h>
44 #include <linux/device.h>
45 #include <linux/skbuff.h>
46 #include <linux/if_vlan.h>
47 #include <linux/if_bridge.h>
48 #include <linux/workqueue.h>
49 #include <linux/jiffies.h>
50 #include <linux/bitops.h>
51 #include <linux/list.h>
52 #include <linux/notifier.h>
53 #include <linux/dcbnl.h>
54 #include <net/switchdev.h>
55 #include <generated/utsrelease.h>
64 static const char mlxsw_sp_driver_name
[] = "mlxsw_spectrum";
65 static const char mlxsw_sp_driver_version
[] = "1.0";
71 MLXSW_ITEM32(tx
, hdr
, version
, 0x00, 28, 4);
74 * Packet control type.
75 * 0 - Ethernet control (e.g. EMADs, LACP)
78 MLXSW_ITEM32(tx
, hdr
, ctl
, 0x00, 26, 2);
81 * Packet protocol type. Must be set to 1 (Ethernet).
83 MLXSW_ITEM32(tx
, hdr
, proto
, 0x00, 21, 3);
85 /* tx_hdr_rx_is_router
86 * Packet is sent from the router. Valid for data packets only.
88 MLXSW_ITEM32(tx
, hdr
, rx_is_router
, 0x00, 19, 1);
91 * Indicates if the 'fid' field is valid and should be used for
92 * forwarding lookup. Valid for data packets only.
94 MLXSW_ITEM32(tx
, hdr
, fid_valid
, 0x00, 16, 1);
97 * Switch partition ID. Must be set to 0.
99 MLXSW_ITEM32(tx
, hdr
, swid
, 0x00, 12, 3);
101 /* tx_hdr_control_tclass
102 * Indicates if the packet should use the control TClass and not one
103 * of the data TClasses.
105 MLXSW_ITEM32(tx
, hdr
, control_tclass
, 0x00, 6, 1);
108 * Egress TClass to be used on the egress device on the egress port.
110 MLXSW_ITEM32(tx
, hdr
, etclass
, 0x00, 0, 4);
113 * Destination local port for unicast packets.
114 * Destination multicast ID for multicast packets.
116 * Control packets are directed to a specific egress port, while data
117 * packets are transmitted through the CPU port (0) into the switch partition,
118 * where forwarding rules are applied.
120 MLXSW_ITEM32(tx
, hdr
, port_mid
, 0x04, 16, 16);
123 * Forwarding ID used for L2 forwarding lookup. Valid only if 'fid_valid' is
124 * set, otherwise calculated based on the packet's VID using VID to FID mapping.
125 * Valid for data packets only.
127 MLXSW_ITEM32(tx
, hdr
, fid
, 0x08, 0, 16);
131 * 6 - Control packets
133 MLXSW_ITEM32(tx
, hdr
, type
, 0x0C, 0, 4);
135 static void mlxsw_sp_txhdr_construct(struct sk_buff
*skb
,
136 const struct mlxsw_tx_info
*tx_info
)
138 char *txhdr
= skb_push(skb
, MLXSW_TXHDR_LEN
);
140 memset(txhdr
, 0, MLXSW_TXHDR_LEN
);
142 mlxsw_tx_hdr_version_set(txhdr
, MLXSW_TXHDR_VERSION_1
);
143 mlxsw_tx_hdr_ctl_set(txhdr
, MLXSW_TXHDR_ETH_CTL
);
144 mlxsw_tx_hdr_proto_set(txhdr
, MLXSW_TXHDR_PROTO_ETH
);
145 mlxsw_tx_hdr_swid_set(txhdr
, 0);
146 mlxsw_tx_hdr_control_tclass_set(txhdr
, 1);
147 mlxsw_tx_hdr_port_mid_set(txhdr
, tx_info
->local_port
);
148 mlxsw_tx_hdr_type_set(txhdr
, MLXSW_TXHDR_TYPE_CONTROL
);
151 static int mlxsw_sp_base_mac_get(struct mlxsw_sp
*mlxsw_sp
)
153 char spad_pl
[MLXSW_REG_SPAD_LEN
];
156 err
= mlxsw_reg_query(mlxsw_sp
->core
, MLXSW_REG(spad
), spad_pl
);
159 mlxsw_reg_spad_base_mac_memcpy_from(spad_pl
, mlxsw_sp
->base_mac
);
163 static int mlxsw_sp_port_admin_status_set(struct mlxsw_sp_port
*mlxsw_sp_port
,
166 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
167 char paos_pl
[MLXSW_REG_PAOS_LEN
];
169 mlxsw_reg_paos_pack(paos_pl
, mlxsw_sp_port
->local_port
,
170 is_up
? MLXSW_PORT_ADMIN_STATUS_UP
:
171 MLXSW_PORT_ADMIN_STATUS_DOWN
);
172 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(paos
), paos_pl
);
175 static int mlxsw_sp_port_oper_status_get(struct mlxsw_sp_port
*mlxsw_sp_port
,
178 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
179 char paos_pl
[MLXSW_REG_PAOS_LEN
];
183 mlxsw_reg_paos_pack(paos_pl
, mlxsw_sp_port
->local_port
, 0);
184 err
= mlxsw_reg_query(mlxsw_sp
->core
, MLXSW_REG(paos
), paos_pl
);
187 oper_status
= mlxsw_reg_paos_oper_status_get(paos_pl
);
188 *p_is_up
= oper_status
== MLXSW_PORT_ADMIN_STATUS_UP
? true : false;
192 static int mlxsw_sp_port_dev_addr_set(struct mlxsw_sp_port
*mlxsw_sp_port
,
195 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
196 char ppad_pl
[MLXSW_REG_PPAD_LEN
];
198 mlxsw_reg_ppad_pack(ppad_pl
, true, mlxsw_sp_port
->local_port
);
199 mlxsw_reg_ppad_mac_memcpy_to(ppad_pl
, addr
);
200 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(ppad
), ppad_pl
);
203 static int mlxsw_sp_port_dev_addr_init(struct mlxsw_sp_port
*mlxsw_sp_port
)
205 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
206 unsigned char *addr
= mlxsw_sp_port
->dev
->dev_addr
;
208 ether_addr_copy(addr
, mlxsw_sp
->base_mac
);
209 addr
[ETH_ALEN
- 1] += mlxsw_sp_port
->local_port
;
210 return mlxsw_sp_port_dev_addr_set(mlxsw_sp_port
, addr
);
213 static int mlxsw_sp_port_stp_state_set(struct mlxsw_sp_port
*mlxsw_sp_port
,
214 u16 vid
, enum mlxsw_reg_spms_state state
)
216 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
220 spms_pl
= kmalloc(MLXSW_REG_SPMS_LEN
, GFP_KERNEL
);
223 mlxsw_reg_spms_pack(spms_pl
, mlxsw_sp_port
->local_port
);
224 mlxsw_reg_spms_vid_pack(spms_pl
, vid
, state
);
225 err
= mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(spms
), spms_pl
);
230 static int mlxsw_sp_port_mtu_set(struct mlxsw_sp_port
*mlxsw_sp_port
, u16 mtu
)
232 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
233 char pmtu_pl
[MLXSW_REG_PMTU_LEN
];
237 mtu
+= MLXSW_TXHDR_LEN
+ ETH_HLEN
;
238 mlxsw_reg_pmtu_pack(pmtu_pl
, mlxsw_sp_port
->local_port
, 0);
239 err
= mlxsw_reg_query(mlxsw_sp
->core
, MLXSW_REG(pmtu
), pmtu_pl
);
242 max_mtu
= mlxsw_reg_pmtu_max_mtu_get(pmtu_pl
);
247 mlxsw_reg_pmtu_pack(pmtu_pl
, mlxsw_sp_port
->local_port
, mtu
);
248 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(pmtu
), pmtu_pl
);
251 static int __mlxsw_sp_port_swid_set(struct mlxsw_sp
*mlxsw_sp
, u8 local_port
,
254 char pspa_pl
[MLXSW_REG_PSPA_LEN
];
256 mlxsw_reg_pspa_pack(pspa_pl
, swid
, local_port
);
257 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(pspa
), pspa_pl
);
260 static int mlxsw_sp_port_swid_set(struct mlxsw_sp_port
*mlxsw_sp_port
, u8 swid
)
262 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
264 return __mlxsw_sp_port_swid_set(mlxsw_sp
, mlxsw_sp_port
->local_port
,
268 static int mlxsw_sp_port_vp_mode_set(struct mlxsw_sp_port
*mlxsw_sp_port
,
271 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
272 char svpe_pl
[MLXSW_REG_SVPE_LEN
];
274 mlxsw_reg_svpe_pack(svpe_pl
, mlxsw_sp_port
->local_port
, enable
);
275 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(svpe
), svpe_pl
);
278 int mlxsw_sp_port_vid_to_fid_set(struct mlxsw_sp_port
*mlxsw_sp_port
,
279 enum mlxsw_reg_svfa_mt mt
, bool valid
, u16 fid
,
282 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
283 char svfa_pl
[MLXSW_REG_SVFA_LEN
];
285 mlxsw_reg_svfa_pack(svfa_pl
, mlxsw_sp_port
->local_port
, mt
, valid
,
287 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(svfa
), svfa_pl
);
290 static int mlxsw_sp_port_vid_learning_set(struct mlxsw_sp_port
*mlxsw_sp_port
,
291 u16 vid
, bool learn_enable
)
293 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
297 spvmlr_pl
= kmalloc(MLXSW_REG_SPVMLR_LEN
, GFP_KERNEL
);
300 mlxsw_reg_spvmlr_pack(spvmlr_pl
, mlxsw_sp_port
->local_port
, vid
, vid
,
302 err
= mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(spvmlr
), spvmlr_pl
);
308 mlxsw_sp_port_system_port_mapping_set(struct mlxsw_sp_port
*mlxsw_sp_port
)
310 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
311 char sspr_pl
[MLXSW_REG_SSPR_LEN
];
313 mlxsw_reg_sspr_pack(sspr_pl
, mlxsw_sp_port
->local_port
);
314 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(sspr
), sspr_pl
);
317 static int mlxsw_sp_port_module_info_get(struct mlxsw_sp
*mlxsw_sp
,
318 u8 local_port
, u8
*p_module
,
319 u8
*p_width
, u8
*p_lane
)
321 char pmlp_pl
[MLXSW_REG_PMLP_LEN
];
324 mlxsw_reg_pmlp_pack(pmlp_pl
, local_port
);
325 err
= mlxsw_reg_query(mlxsw_sp
->core
, MLXSW_REG(pmlp
), pmlp_pl
);
328 *p_module
= mlxsw_reg_pmlp_module_get(pmlp_pl
, 0);
329 *p_width
= mlxsw_reg_pmlp_width_get(pmlp_pl
);
330 *p_lane
= mlxsw_reg_pmlp_tx_lane_get(pmlp_pl
, 0);
334 static int mlxsw_sp_port_module_map(struct mlxsw_sp
*mlxsw_sp
, u8 local_port
,
335 u8 module
, u8 width
, u8 lane
)
337 char pmlp_pl
[MLXSW_REG_PMLP_LEN
];
340 mlxsw_reg_pmlp_pack(pmlp_pl
, local_port
);
341 mlxsw_reg_pmlp_width_set(pmlp_pl
, width
);
342 for (i
= 0; i
< width
; i
++) {
343 mlxsw_reg_pmlp_module_set(pmlp_pl
, i
, module
);
344 mlxsw_reg_pmlp_tx_lane_set(pmlp_pl
, i
, lane
+ i
); /* Rx & Tx */
347 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(pmlp
), pmlp_pl
);
350 static int mlxsw_sp_port_module_unmap(struct mlxsw_sp
*mlxsw_sp
, u8 local_port
)
352 char pmlp_pl
[MLXSW_REG_PMLP_LEN
];
354 mlxsw_reg_pmlp_pack(pmlp_pl
, local_port
);
355 mlxsw_reg_pmlp_width_set(pmlp_pl
, 0);
356 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(pmlp
), pmlp_pl
);
359 static int mlxsw_sp_port_open(struct net_device
*dev
)
361 struct mlxsw_sp_port
*mlxsw_sp_port
= netdev_priv(dev
);
364 err
= mlxsw_sp_port_admin_status_set(mlxsw_sp_port
, true);
367 netif_start_queue(dev
);
371 static int mlxsw_sp_port_stop(struct net_device
*dev
)
373 struct mlxsw_sp_port
*mlxsw_sp_port
= netdev_priv(dev
);
375 netif_stop_queue(dev
);
376 return mlxsw_sp_port_admin_status_set(mlxsw_sp_port
, false);
379 static netdev_tx_t
mlxsw_sp_port_xmit(struct sk_buff
*skb
,
380 struct net_device
*dev
)
382 struct mlxsw_sp_port
*mlxsw_sp_port
= netdev_priv(dev
);
383 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
384 struct mlxsw_sp_port_pcpu_stats
*pcpu_stats
;
385 const struct mlxsw_tx_info tx_info
= {
386 .local_port
= mlxsw_sp_port
->local_port
,
392 if (mlxsw_core_skb_transmit_busy(mlxsw_sp
->core
, &tx_info
))
393 return NETDEV_TX_BUSY
;
395 if (unlikely(skb_headroom(skb
) < MLXSW_TXHDR_LEN
)) {
396 struct sk_buff
*skb_orig
= skb
;
398 skb
= skb_realloc_headroom(skb
, MLXSW_TXHDR_LEN
);
400 this_cpu_inc(mlxsw_sp_port
->pcpu_stats
->tx_dropped
);
401 dev_kfree_skb_any(skb_orig
);
406 if (eth_skb_pad(skb
)) {
407 this_cpu_inc(mlxsw_sp_port
->pcpu_stats
->tx_dropped
);
411 mlxsw_sp_txhdr_construct(skb
, &tx_info
);
413 /* Due to a race we might fail here because of a full queue. In that
414 * unlikely case we simply drop the packet.
416 err
= mlxsw_core_skb_transmit(mlxsw_sp
->core
, skb
, &tx_info
);
419 pcpu_stats
= this_cpu_ptr(mlxsw_sp_port
->pcpu_stats
);
420 u64_stats_update_begin(&pcpu_stats
->syncp
);
421 pcpu_stats
->tx_packets
++;
422 pcpu_stats
->tx_bytes
+= len
;
423 u64_stats_update_end(&pcpu_stats
->syncp
);
425 this_cpu_inc(mlxsw_sp_port
->pcpu_stats
->tx_dropped
);
426 dev_kfree_skb_any(skb
);
431 static void mlxsw_sp_set_rx_mode(struct net_device
*dev
)
435 static int mlxsw_sp_port_set_mac_address(struct net_device
*dev
, void *p
)
437 struct mlxsw_sp_port
*mlxsw_sp_port
= netdev_priv(dev
);
438 struct sockaddr
*addr
= p
;
441 if (!is_valid_ether_addr(addr
->sa_data
))
442 return -EADDRNOTAVAIL
;
444 err
= mlxsw_sp_port_dev_addr_set(mlxsw_sp_port
, addr
->sa_data
);
447 memcpy(dev
->dev_addr
, addr
->sa_data
, dev
->addr_len
);
451 static void mlxsw_sp_pg_buf_pack(char *pbmc_pl
, int pg_index
, int mtu
,
452 bool pause_en
, bool pfc_en
, u16 delay
)
454 u16 pg_size
= 2 * MLXSW_SP_BYTES_TO_CELLS(mtu
);
456 delay
= pfc_en
? mlxsw_sp_pfc_delay_get(mtu
, delay
) :
457 MLXSW_SP_PAUSE_DELAY
;
459 if (pause_en
|| pfc_en
)
460 mlxsw_reg_pbmc_lossless_buffer_pack(pbmc_pl
, pg_index
,
461 pg_size
+ delay
, pg_size
);
463 mlxsw_reg_pbmc_lossy_buffer_pack(pbmc_pl
, pg_index
, pg_size
);
466 int __mlxsw_sp_port_headroom_set(struct mlxsw_sp_port
*mlxsw_sp_port
, int mtu
,
467 u8
*prio_tc
, bool pause_en
,
468 struct ieee_pfc
*my_pfc
)
470 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
471 u8 pfc_en
= !!my_pfc
? my_pfc
->pfc_en
: 0;
472 u16 delay
= !!my_pfc
? my_pfc
->delay
: 0;
473 char pbmc_pl
[MLXSW_REG_PBMC_LEN
];
476 mlxsw_reg_pbmc_pack(pbmc_pl
, mlxsw_sp_port
->local_port
, 0, 0);
477 err
= mlxsw_reg_query(mlxsw_sp
->core
, MLXSW_REG(pbmc
), pbmc_pl
);
481 for (i
= 0; i
< IEEE_8021QAZ_MAX_TCS
; i
++) {
482 bool configure
= false;
485 for (j
= 0; j
< IEEE_8021QAZ_MAX_TCS
; j
++) {
486 if (prio_tc
[j
] == i
) {
487 pfc
= pfc_en
& BIT(j
);
495 mlxsw_sp_pg_buf_pack(pbmc_pl
, i
, mtu
, pause_en
, pfc
, delay
);
498 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(pbmc
), pbmc_pl
);
501 static int mlxsw_sp_port_headroom_set(struct mlxsw_sp_port
*mlxsw_sp_port
,
502 int mtu
, bool pause_en
)
504 u8 def_prio_tc
[IEEE_8021QAZ_MAX_TCS
] = {0};
505 bool dcb_en
= !!mlxsw_sp_port
->dcb
.ets
;
506 struct ieee_pfc
*my_pfc
;
509 prio_tc
= dcb_en
? mlxsw_sp_port
->dcb
.ets
->prio_tc
: def_prio_tc
;
510 my_pfc
= dcb_en
? mlxsw_sp_port
->dcb
.pfc
: NULL
;
512 return __mlxsw_sp_port_headroom_set(mlxsw_sp_port
, mtu
, prio_tc
,
516 static int mlxsw_sp_port_change_mtu(struct net_device
*dev
, int mtu
)
518 struct mlxsw_sp_port
*mlxsw_sp_port
= netdev_priv(dev
);
519 bool pause_en
= mlxsw_sp_port_is_pause_en(mlxsw_sp_port
);
522 err
= mlxsw_sp_port_headroom_set(mlxsw_sp_port
, mtu
, pause_en
);
525 err
= mlxsw_sp_port_mtu_set(mlxsw_sp_port
, mtu
);
527 goto err_port_mtu_set
;
532 mlxsw_sp_port_headroom_set(mlxsw_sp_port
, dev
->mtu
, pause_en
);
536 static struct rtnl_link_stats64
*
537 mlxsw_sp_port_get_stats64(struct net_device
*dev
,
538 struct rtnl_link_stats64
*stats
)
540 struct mlxsw_sp_port
*mlxsw_sp_port
= netdev_priv(dev
);
541 struct mlxsw_sp_port_pcpu_stats
*p
;
542 u64 rx_packets
, rx_bytes
, tx_packets
, tx_bytes
;
547 for_each_possible_cpu(i
) {
548 p
= per_cpu_ptr(mlxsw_sp_port
->pcpu_stats
, i
);
550 start
= u64_stats_fetch_begin_irq(&p
->syncp
);
551 rx_packets
= p
->rx_packets
;
552 rx_bytes
= p
->rx_bytes
;
553 tx_packets
= p
->tx_packets
;
554 tx_bytes
= p
->tx_bytes
;
555 } while (u64_stats_fetch_retry_irq(&p
->syncp
, start
));
557 stats
->rx_packets
+= rx_packets
;
558 stats
->rx_bytes
+= rx_bytes
;
559 stats
->tx_packets
+= tx_packets
;
560 stats
->tx_bytes
+= tx_bytes
;
561 /* tx_dropped is u32, updated without syncp protection. */
562 tx_dropped
+= p
->tx_dropped
;
564 stats
->tx_dropped
= tx_dropped
;
568 int mlxsw_sp_port_vlan_set(struct mlxsw_sp_port
*mlxsw_sp_port
, u16 vid_begin
,
569 u16 vid_end
, bool is_member
, bool untagged
)
571 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
575 spvm_pl
= kmalloc(MLXSW_REG_SPVM_LEN
, GFP_KERNEL
);
579 mlxsw_reg_spvm_pack(spvm_pl
, mlxsw_sp_port
->local_port
, vid_begin
,
580 vid_end
, is_member
, untagged
);
581 err
= mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(spvm
), spvm_pl
);
586 static int mlxsw_sp_port_vp_mode_trans(struct mlxsw_sp_port
*mlxsw_sp_port
)
588 enum mlxsw_reg_svfa_mt mt
= MLXSW_REG_SVFA_MT_PORT_VID_TO_FID
;
589 u16 vid
, last_visited_vid
;
592 for_each_set_bit(vid
, mlxsw_sp_port
->active_vlans
, VLAN_N_VID
) {
593 err
= mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port
, mt
, true, vid
,
596 last_visited_vid
= vid
;
597 goto err_port_vid_to_fid_set
;
601 err
= mlxsw_sp_port_vp_mode_set(mlxsw_sp_port
, true);
603 last_visited_vid
= VLAN_N_VID
;
604 goto err_port_vid_to_fid_set
;
609 err_port_vid_to_fid_set
:
610 for_each_set_bit(vid
, mlxsw_sp_port
->active_vlans
, last_visited_vid
)
611 mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port
, mt
, false, vid
,
616 static int mlxsw_sp_port_vlan_mode_trans(struct mlxsw_sp_port
*mlxsw_sp_port
)
618 enum mlxsw_reg_svfa_mt mt
= MLXSW_REG_SVFA_MT_PORT_VID_TO_FID
;
622 err
= mlxsw_sp_port_vp_mode_set(mlxsw_sp_port
, false);
626 for_each_set_bit(vid
, mlxsw_sp_port
->active_vlans
, VLAN_N_VID
) {
627 err
= mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port
, mt
, false,
636 static struct mlxsw_sp_vfid
*
637 mlxsw_sp_vfid_find(const struct mlxsw_sp
*mlxsw_sp
, u16 vid
)
639 struct mlxsw_sp_vfid
*vfid
;
641 list_for_each_entry(vfid
, &mlxsw_sp
->port_vfids
.list
, list
) {
642 if (vfid
->vid
== vid
)
649 static u16
mlxsw_sp_avail_vfid_get(const struct mlxsw_sp
*mlxsw_sp
)
651 return find_first_zero_bit(mlxsw_sp
->port_vfids
.mapped
,
652 MLXSW_SP_VFID_PORT_MAX
);
655 static int __mlxsw_sp_vfid_create(struct mlxsw_sp
*mlxsw_sp
, u16 vfid
)
657 u16 fid
= mlxsw_sp_vfid_to_fid(vfid
);
658 char sfmr_pl
[MLXSW_REG_SFMR_LEN
];
660 mlxsw_reg_sfmr_pack(sfmr_pl
, MLXSW_REG_SFMR_OP_CREATE_FID
, fid
, 0);
661 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(sfmr
), sfmr_pl
);
664 static void __mlxsw_sp_vfid_destroy(struct mlxsw_sp
*mlxsw_sp
, u16 vfid
)
666 u16 fid
= mlxsw_sp_vfid_to_fid(vfid
);
667 char sfmr_pl
[MLXSW_REG_SFMR_LEN
];
669 mlxsw_reg_sfmr_pack(sfmr_pl
, MLXSW_REG_SFMR_OP_DESTROY_FID
, fid
, 0);
670 mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(sfmr
), sfmr_pl
);
673 static struct mlxsw_sp_vfid
*mlxsw_sp_vfid_create(struct mlxsw_sp
*mlxsw_sp
,
676 struct device
*dev
= mlxsw_sp
->bus_info
->dev
;
677 struct mlxsw_sp_vfid
*vfid
;
681 n_vfid
= mlxsw_sp_avail_vfid_get(mlxsw_sp
);
682 if (n_vfid
== MLXSW_SP_VFID_PORT_MAX
) {
683 dev_err(dev
, "No available vFIDs\n");
684 return ERR_PTR(-ERANGE
);
687 err
= __mlxsw_sp_vfid_create(mlxsw_sp
, n_vfid
);
689 dev_err(dev
, "Failed to create vFID=%d\n", n_vfid
);
693 vfid
= kzalloc(sizeof(*vfid
), GFP_KERNEL
);
695 goto err_allocate_vfid
;
700 list_add(&vfid
->list
, &mlxsw_sp
->port_vfids
.list
);
701 set_bit(n_vfid
, mlxsw_sp
->port_vfids
.mapped
);
706 __mlxsw_sp_vfid_destroy(mlxsw_sp
, n_vfid
);
707 return ERR_PTR(-ENOMEM
);
710 static void mlxsw_sp_vfid_destroy(struct mlxsw_sp
*mlxsw_sp
,
711 struct mlxsw_sp_vfid
*vfid
)
713 clear_bit(vfid
->vfid
, mlxsw_sp
->port_vfids
.mapped
);
714 list_del(&vfid
->list
);
716 __mlxsw_sp_vfid_destroy(mlxsw_sp
, vfid
->vfid
);
721 static struct mlxsw_sp_port
*
722 mlxsw_sp_port_vport_create(struct mlxsw_sp_port
*mlxsw_sp_port
,
723 struct mlxsw_sp_vfid
*vfid
)
725 struct mlxsw_sp_port
*mlxsw_sp_vport
;
727 mlxsw_sp_vport
= kzalloc(sizeof(*mlxsw_sp_vport
), GFP_KERNEL
);
731 /* dev will be set correctly after the VLAN device is linked
732 * with the real device. In case of bridge SELF invocation, dev
735 mlxsw_sp_vport
->dev
= mlxsw_sp_port
->dev
;
736 mlxsw_sp_vport
->mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
737 mlxsw_sp_vport
->local_port
= mlxsw_sp_port
->local_port
;
738 mlxsw_sp_vport
->stp_state
= BR_STATE_FORWARDING
;
739 mlxsw_sp_vport
->lagged
= mlxsw_sp_port
->lagged
;
740 mlxsw_sp_vport
->lag_id
= mlxsw_sp_port
->lag_id
;
741 mlxsw_sp_vport
->vport
.vfid
= vfid
;
742 mlxsw_sp_vport
->vport
.vid
= vfid
->vid
;
744 list_add(&mlxsw_sp_vport
->vport
.list
, &mlxsw_sp_port
->vports_list
);
746 return mlxsw_sp_vport
;
749 static void mlxsw_sp_port_vport_destroy(struct mlxsw_sp_port
*mlxsw_sp_vport
)
751 list_del(&mlxsw_sp_vport
->vport
.list
);
752 kfree(mlxsw_sp_vport
);
755 int mlxsw_sp_port_add_vid(struct net_device
*dev
, __be16 __always_unused proto
,
758 struct mlxsw_sp_port
*mlxsw_sp_port
= netdev_priv(dev
);
759 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
760 struct mlxsw_sp_port
*mlxsw_sp_vport
;
761 struct mlxsw_sp_vfid
*vfid
;
764 /* VLAN 0 is added to HW filter when device goes up, but it is
765 * reserved in our case, so simply return.
770 if (mlxsw_sp_port_vport_find(mlxsw_sp_port
, vid
)) {
771 netdev_warn(dev
, "VID=%d already configured\n", vid
);
775 vfid
= mlxsw_sp_vfid_find(mlxsw_sp
, vid
);
777 vfid
= mlxsw_sp_vfid_create(mlxsw_sp
, vid
);
779 netdev_err(dev
, "Failed to create vFID for VID=%d\n",
781 return PTR_ERR(vfid
);
785 mlxsw_sp_vport
= mlxsw_sp_port_vport_create(mlxsw_sp_port
, vfid
);
786 if (!mlxsw_sp_vport
) {
787 netdev_err(dev
, "Failed to create vPort for VID=%d\n", vid
);
789 goto err_port_vport_create
;
792 if (!vfid
->nr_vports
) {
793 err
= mlxsw_sp_vport_flood_set(mlxsw_sp_vport
, vfid
->vfid
,
796 netdev_err(dev
, "Failed to setup flooding for vFID=%d\n",
798 goto err_vport_flood_set
;
802 /* When adding the first VLAN interface on a bridged port we need to
803 * transition all the active 802.1Q bridge VLANs to use explicit
804 * {Port, VID} to FID mappings and set the port's mode to Virtual mode.
806 if (list_is_singular(&mlxsw_sp_port
->vports_list
)) {
807 err
= mlxsw_sp_port_vp_mode_trans(mlxsw_sp_port
);
809 netdev_err(dev
, "Failed to set to Virtual mode\n");
810 goto err_port_vp_mode_trans
;
814 err
= mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_vport
,
815 MLXSW_REG_SVFA_MT_PORT_VID_TO_FID
,
817 mlxsw_sp_vfid_to_fid(vfid
->vfid
),
820 netdev_err(dev
, "Failed to map {Port, VID=%d} to vFID=%d\n",
822 goto err_port_vid_to_fid_set
;
825 err
= mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport
, vid
, false);
827 netdev_err(dev
, "Failed to disable learning for VID=%d\n", vid
);
828 goto err_port_vid_learning_set
;
831 err
= mlxsw_sp_port_vlan_set(mlxsw_sp_vport
, vid
, vid
, true, false);
833 netdev_err(dev
, "Failed to set VLAN membership for VID=%d\n",
835 goto err_port_add_vid
;
838 err
= mlxsw_sp_port_stp_state_set(mlxsw_sp_vport
, vid
,
839 MLXSW_REG_SPMS_STATE_FORWARDING
);
841 netdev_err(dev
, "Failed to set STP state for VID=%d\n", vid
);
842 goto err_port_stp_state_set
;
849 err_port_stp_state_set
:
850 mlxsw_sp_port_vlan_set(mlxsw_sp_vport
, vid
, vid
, false, false);
852 mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport
, vid
, true);
853 err_port_vid_learning_set
:
854 mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_vport
,
855 MLXSW_REG_SVFA_MT_PORT_VID_TO_FID
, false,
856 mlxsw_sp_vfid_to_fid(vfid
->vfid
), vid
);
857 err_port_vid_to_fid_set
:
858 if (list_is_singular(&mlxsw_sp_port
->vports_list
))
859 mlxsw_sp_port_vlan_mode_trans(mlxsw_sp_port
);
860 err_port_vp_mode_trans
:
861 if (!vfid
->nr_vports
)
862 mlxsw_sp_vport_flood_set(mlxsw_sp_vport
, vfid
->vfid
, false,
865 mlxsw_sp_port_vport_destroy(mlxsw_sp_vport
);
866 err_port_vport_create
:
867 if (!vfid
->nr_vports
)
868 mlxsw_sp_vfid_destroy(mlxsw_sp
, vfid
);
872 int mlxsw_sp_port_kill_vid(struct net_device
*dev
,
873 __be16 __always_unused proto
, u16 vid
)
875 struct mlxsw_sp_port
*mlxsw_sp_port
= netdev_priv(dev
);
876 struct mlxsw_sp_port
*mlxsw_sp_vport
;
877 struct mlxsw_sp_vfid
*vfid
;
880 /* VLAN 0 is removed from HW filter when device goes down, but
881 * it is reserved in our case, so simply return.
886 mlxsw_sp_vport
= mlxsw_sp_port_vport_find(mlxsw_sp_port
, vid
);
887 if (!mlxsw_sp_vport
) {
888 netdev_warn(dev
, "VID=%d does not exist\n", vid
);
892 vfid
= mlxsw_sp_vport
->vport
.vfid
;
894 err
= mlxsw_sp_port_stp_state_set(mlxsw_sp_vport
, vid
,
895 MLXSW_REG_SPMS_STATE_DISCARDING
);
897 netdev_err(dev
, "Failed to set STP state for VID=%d\n", vid
);
901 err
= mlxsw_sp_port_vlan_set(mlxsw_sp_vport
, vid
, vid
, false, false);
903 netdev_err(dev
, "Failed to set VLAN membership for VID=%d\n",
908 err
= mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport
, vid
, true);
910 netdev_err(dev
, "Failed to enable learning for VID=%d\n", vid
);
914 err
= mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_vport
,
915 MLXSW_REG_SVFA_MT_PORT_VID_TO_FID
,
917 mlxsw_sp_vfid_to_fid(vfid
->vfid
),
920 netdev_err(dev
, "Failed to invalidate {Port, VID=%d} to vFID=%d mapping\n",
925 /* When removing the last VLAN interface on a bridged port we need to
926 * transition all active 802.1Q bridge VLANs to use VID to FID
927 * mappings and set port's mode to VLAN mode.
929 if (list_is_singular(&mlxsw_sp_port
->vports_list
)) {
930 err
= mlxsw_sp_port_vlan_mode_trans(mlxsw_sp_port
);
932 netdev_err(dev
, "Failed to set to VLAN mode\n");
938 mlxsw_sp_port_vport_destroy(mlxsw_sp_vport
);
940 /* Destroy the vFID if no vPorts are assigned to it anymore. */
941 if (!vfid
->nr_vports
)
942 mlxsw_sp_vfid_destroy(mlxsw_sp_port
->mlxsw_sp
, vfid
);
947 static int mlxsw_sp_port_get_phys_port_name(struct net_device
*dev
, char *name
,
950 struct mlxsw_sp_port
*mlxsw_sp_port
= netdev_priv(dev
);
951 u8 module
= mlxsw_sp_port
->mapping
.module
;
952 u8 width
= mlxsw_sp_port
->mapping
.width
;
953 u8 lane
= mlxsw_sp_port
->mapping
.lane
;
956 if (!mlxsw_sp_port
->split
)
957 err
= snprintf(name
, len
, "p%d", module
+ 1);
959 err
= snprintf(name
, len
, "p%ds%d", module
+ 1,
968 static const struct net_device_ops mlxsw_sp_port_netdev_ops
= {
969 .ndo_open
= mlxsw_sp_port_open
,
970 .ndo_stop
= mlxsw_sp_port_stop
,
971 .ndo_start_xmit
= mlxsw_sp_port_xmit
,
972 .ndo_set_rx_mode
= mlxsw_sp_set_rx_mode
,
973 .ndo_set_mac_address
= mlxsw_sp_port_set_mac_address
,
974 .ndo_change_mtu
= mlxsw_sp_port_change_mtu
,
975 .ndo_get_stats64
= mlxsw_sp_port_get_stats64
,
976 .ndo_vlan_rx_add_vid
= mlxsw_sp_port_add_vid
,
977 .ndo_vlan_rx_kill_vid
= mlxsw_sp_port_kill_vid
,
978 .ndo_fdb_add
= switchdev_port_fdb_add
,
979 .ndo_fdb_del
= switchdev_port_fdb_del
,
980 .ndo_fdb_dump
= switchdev_port_fdb_dump
,
981 .ndo_bridge_setlink
= switchdev_port_bridge_setlink
,
982 .ndo_bridge_getlink
= switchdev_port_bridge_getlink
,
983 .ndo_bridge_dellink
= switchdev_port_bridge_dellink
,
984 .ndo_get_phys_port_name
= mlxsw_sp_port_get_phys_port_name
,
987 static void mlxsw_sp_port_get_drvinfo(struct net_device
*dev
,
988 struct ethtool_drvinfo
*drvinfo
)
990 struct mlxsw_sp_port
*mlxsw_sp_port
= netdev_priv(dev
);
991 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
993 strlcpy(drvinfo
->driver
, mlxsw_sp_driver_name
, sizeof(drvinfo
->driver
));
994 strlcpy(drvinfo
->version
, mlxsw_sp_driver_version
,
995 sizeof(drvinfo
->version
));
996 snprintf(drvinfo
->fw_version
, sizeof(drvinfo
->fw_version
),
998 mlxsw_sp
->bus_info
->fw_rev
.major
,
999 mlxsw_sp
->bus_info
->fw_rev
.minor
,
1000 mlxsw_sp
->bus_info
->fw_rev
.subminor
);
1001 strlcpy(drvinfo
->bus_info
, mlxsw_sp
->bus_info
->device_name
,
1002 sizeof(drvinfo
->bus_info
));
1005 static void mlxsw_sp_port_get_pauseparam(struct net_device
*dev
,
1006 struct ethtool_pauseparam
*pause
)
1008 struct mlxsw_sp_port
*mlxsw_sp_port
= netdev_priv(dev
);
1010 pause
->rx_pause
= mlxsw_sp_port
->link
.rx_pause
;
1011 pause
->tx_pause
= mlxsw_sp_port
->link
.tx_pause
;
1014 static int mlxsw_sp_port_pause_set(struct mlxsw_sp_port
*mlxsw_sp_port
,
1015 struct ethtool_pauseparam
*pause
)
1017 char pfcc_pl
[MLXSW_REG_PFCC_LEN
];
1019 mlxsw_reg_pfcc_pack(pfcc_pl
, mlxsw_sp_port
->local_port
);
1020 mlxsw_reg_pfcc_pprx_set(pfcc_pl
, pause
->rx_pause
);
1021 mlxsw_reg_pfcc_pptx_set(pfcc_pl
, pause
->tx_pause
);
1023 return mlxsw_reg_write(mlxsw_sp_port
->mlxsw_sp
->core
, MLXSW_REG(pfcc
),
1027 static int mlxsw_sp_port_set_pauseparam(struct net_device
*dev
,
1028 struct ethtool_pauseparam
*pause
)
1030 struct mlxsw_sp_port
*mlxsw_sp_port
= netdev_priv(dev
);
1031 bool pause_en
= pause
->tx_pause
|| pause
->rx_pause
;
1034 if (mlxsw_sp_port
->dcb
.pfc
&& mlxsw_sp_port
->dcb
.pfc
->pfc_en
) {
1035 netdev_err(dev
, "PFC already enabled on port\n");
1039 if (pause
->autoneg
) {
1040 netdev_err(dev
, "PAUSE frames autonegotiation isn't supported\n");
1044 err
= mlxsw_sp_port_headroom_set(mlxsw_sp_port
, dev
->mtu
, pause_en
);
1046 netdev_err(dev
, "Failed to configure port's headroom\n");
1050 err
= mlxsw_sp_port_pause_set(mlxsw_sp_port
, pause
);
1052 netdev_err(dev
, "Failed to set PAUSE parameters\n");
1053 goto err_port_pause_configure
;
1056 mlxsw_sp_port
->link
.rx_pause
= pause
->rx_pause
;
1057 mlxsw_sp_port
->link
.tx_pause
= pause
->tx_pause
;
1061 err_port_pause_configure
:
1062 pause_en
= mlxsw_sp_port_is_pause_en(mlxsw_sp_port
);
1063 mlxsw_sp_port_headroom_set(mlxsw_sp_port
, dev
->mtu
, pause_en
);
1067 struct mlxsw_sp_port_hw_stats
{
1068 char str
[ETH_GSTRING_LEN
];
1069 u64 (*getter
)(char *payload
);
1072 static const struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_stats
[] = {
1074 .str
= "a_frames_transmitted_ok",
1075 .getter
= mlxsw_reg_ppcnt_a_frames_transmitted_ok_get
,
1078 .str
= "a_frames_received_ok",
1079 .getter
= mlxsw_reg_ppcnt_a_frames_received_ok_get
,
1082 .str
= "a_frame_check_sequence_errors",
1083 .getter
= mlxsw_reg_ppcnt_a_frame_check_sequence_errors_get
,
1086 .str
= "a_alignment_errors",
1087 .getter
= mlxsw_reg_ppcnt_a_alignment_errors_get
,
1090 .str
= "a_octets_transmitted_ok",
1091 .getter
= mlxsw_reg_ppcnt_a_octets_transmitted_ok_get
,
1094 .str
= "a_octets_received_ok",
1095 .getter
= mlxsw_reg_ppcnt_a_octets_received_ok_get
,
1098 .str
= "a_multicast_frames_xmitted_ok",
1099 .getter
= mlxsw_reg_ppcnt_a_multicast_frames_xmitted_ok_get
,
1102 .str
= "a_broadcast_frames_xmitted_ok",
1103 .getter
= mlxsw_reg_ppcnt_a_broadcast_frames_xmitted_ok_get
,
1106 .str
= "a_multicast_frames_received_ok",
1107 .getter
= mlxsw_reg_ppcnt_a_multicast_frames_received_ok_get
,
1110 .str
= "a_broadcast_frames_received_ok",
1111 .getter
= mlxsw_reg_ppcnt_a_broadcast_frames_received_ok_get
,
1114 .str
= "a_in_range_length_errors",
1115 .getter
= mlxsw_reg_ppcnt_a_in_range_length_errors_get
,
1118 .str
= "a_out_of_range_length_field",
1119 .getter
= mlxsw_reg_ppcnt_a_out_of_range_length_field_get
,
1122 .str
= "a_frame_too_long_errors",
1123 .getter
= mlxsw_reg_ppcnt_a_frame_too_long_errors_get
,
1126 .str
= "a_symbol_error_during_carrier",
1127 .getter
= mlxsw_reg_ppcnt_a_symbol_error_during_carrier_get
,
1130 .str
= "a_mac_control_frames_transmitted",
1131 .getter
= mlxsw_reg_ppcnt_a_mac_control_frames_transmitted_get
,
1134 .str
= "a_mac_control_frames_received",
1135 .getter
= mlxsw_reg_ppcnt_a_mac_control_frames_received_get
,
1138 .str
= "a_unsupported_opcodes_received",
1139 .getter
= mlxsw_reg_ppcnt_a_unsupported_opcodes_received_get
,
1142 .str
= "a_pause_mac_ctrl_frames_received",
1143 .getter
= mlxsw_reg_ppcnt_a_pause_mac_ctrl_frames_received_get
,
1146 .str
= "a_pause_mac_ctrl_frames_xmitted",
1147 .getter
= mlxsw_reg_ppcnt_a_pause_mac_ctrl_frames_transmitted_get
,
1151 #define MLXSW_SP_PORT_HW_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_stats)
1153 static void mlxsw_sp_port_get_strings(struct net_device
*dev
,
1154 u32 stringset
, u8
*data
)
1159 switch (stringset
) {
1161 for (i
= 0; i
< MLXSW_SP_PORT_HW_STATS_LEN
; i
++) {
1162 memcpy(p
, mlxsw_sp_port_hw_stats
[i
].str
,
1164 p
+= ETH_GSTRING_LEN
;
1170 static int mlxsw_sp_port_set_phys_id(struct net_device
*dev
,
1171 enum ethtool_phys_id_state state
)
1173 struct mlxsw_sp_port
*mlxsw_sp_port
= netdev_priv(dev
);
1174 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
1175 char mlcr_pl
[MLXSW_REG_MLCR_LEN
];
1179 case ETHTOOL_ID_ACTIVE
:
1182 case ETHTOOL_ID_INACTIVE
:
1189 mlxsw_reg_mlcr_pack(mlcr_pl
, mlxsw_sp_port
->local_port
, active
);
1190 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(mlcr
), mlcr_pl
);
1193 static void mlxsw_sp_port_get_stats(struct net_device
*dev
,
1194 struct ethtool_stats
*stats
, u64
*data
)
1196 struct mlxsw_sp_port
*mlxsw_sp_port
= netdev_priv(dev
);
1197 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
1198 char ppcnt_pl
[MLXSW_REG_PPCNT_LEN
];
1202 mlxsw_reg_ppcnt_pack(ppcnt_pl
, mlxsw_sp_port
->local_port
,
1203 MLXSW_REG_PPCNT_IEEE_8023_CNT
, 0);
1204 err
= mlxsw_reg_query(mlxsw_sp
->core
, MLXSW_REG(ppcnt
), ppcnt_pl
);
1205 for (i
= 0; i
< MLXSW_SP_PORT_HW_STATS_LEN
; i
++)
1206 data
[i
] = !err
? mlxsw_sp_port_hw_stats
[i
].getter(ppcnt_pl
) : 0;
1209 static int mlxsw_sp_port_get_sset_count(struct net_device
*dev
, int sset
)
1213 return MLXSW_SP_PORT_HW_STATS_LEN
;
1219 struct mlxsw_sp_port_link_mode
{
1226 static const struct mlxsw_sp_port_link_mode mlxsw_sp_port_link_mode
[] = {
1228 .mask
= MLXSW_REG_PTYS_ETH_SPEED_100BASE_T
,
1229 .supported
= SUPPORTED_100baseT_Full
,
1230 .advertised
= ADVERTISED_100baseT_Full
,
1234 .mask
= MLXSW_REG_PTYS_ETH_SPEED_100BASE_TX
,
1238 .mask
= MLXSW_REG_PTYS_ETH_SPEED_SGMII
|
1239 MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX
,
1240 .supported
= SUPPORTED_1000baseKX_Full
,
1241 .advertised
= ADVERTISED_1000baseKX_Full
,
1245 .mask
= MLXSW_REG_PTYS_ETH_SPEED_10GBASE_T
,
1246 .supported
= SUPPORTED_10000baseT_Full
,
1247 .advertised
= ADVERTISED_10000baseT_Full
,
1251 .mask
= MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CX4
|
1252 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4
,
1253 .supported
= SUPPORTED_10000baseKX4_Full
,
1254 .advertised
= ADVERTISED_10000baseKX4_Full
,
1258 .mask
= MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR
|
1259 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR
|
1260 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR
|
1261 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_ER_LR
,
1262 .supported
= SUPPORTED_10000baseKR_Full
,
1263 .advertised
= ADVERTISED_10000baseKR_Full
,
1267 .mask
= MLXSW_REG_PTYS_ETH_SPEED_20GBASE_KR2
,
1268 .supported
= SUPPORTED_20000baseKR2_Full
,
1269 .advertised
= ADVERTISED_20000baseKR2_Full
,
1273 .mask
= MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4
,
1274 .supported
= SUPPORTED_40000baseCR4_Full
,
1275 .advertised
= ADVERTISED_40000baseCR4_Full
,
1279 .mask
= MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4
,
1280 .supported
= SUPPORTED_40000baseKR4_Full
,
1281 .advertised
= ADVERTISED_40000baseKR4_Full
,
1285 .mask
= MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4
,
1286 .supported
= SUPPORTED_40000baseSR4_Full
,
1287 .advertised
= ADVERTISED_40000baseSR4_Full
,
1291 .mask
= MLXSW_REG_PTYS_ETH_SPEED_40GBASE_LR4_ER4
,
1292 .supported
= SUPPORTED_40000baseLR4_Full
,
1293 .advertised
= ADVERTISED_40000baseLR4_Full
,
1297 .mask
= MLXSW_REG_PTYS_ETH_SPEED_25GBASE_CR
|
1298 MLXSW_REG_PTYS_ETH_SPEED_25GBASE_KR
|
1299 MLXSW_REG_PTYS_ETH_SPEED_25GBASE_SR
,
1303 .mask
= MLXSW_REG_PTYS_ETH_SPEED_50GBASE_KR4
|
1304 MLXSW_REG_PTYS_ETH_SPEED_50GBASE_CR2
|
1305 MLXSW_REG_PTYS_ETH_SPEED_50GBASE_KR2
,
1309 .mask
= MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4
,
1310 .supported
= SUPPORTED_56000baseKR4_Full
,
1311 .advertised
= ADVERTISED_56000baseKR4_Full
,
1315 .mask
= MLXSW_REG_PTYS_ETH_SPEED_100GBASE_CR4
|
1316 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4
|
1317 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4
|
1318 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_LR4_ER4
,
1323 #define MLXSW_SP_PORT_LINK_MODE_LEN ARRAY_SIZE(mlxsw_sp_port_link_mode)
1325 static u32
mlxsw_sp_from_ptys_supported_port(u32 ptys_eth_proto
)
1327 if (ptys_eth_proto
& (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR
|
1328 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR
|
1329 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4
|
1330 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4
|
1331 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4
|
1332 MLXSW_REG_PTYS_ETH_SPEED_SGMII
))
1333 return SUPPORTED_FIBRE
;
1335 if (ptys_eth_proto
& (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR
|
1336 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4
|
1337 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4
|
1338 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4
|
1339 MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX
))
1340 return SUPPORTED_Backplane
;
1344 static u32
mlxsw_sp_from_ptys_supported_link(u32 ptys_eth_proto
)
1349 for (i
= 0; i
< MLXSW_SP_PORT_LINK_MODE_LEN
; i
++) {
1350 if (ptys_eth_proto
& mlxsw_sp_port_link_mode
[i
].mask
)
1351 modes
|= mlxsw_sp_port_link_mode
[i
].supported
;
1356 static u32
mlxsw_sp_from_ptys_advert_link(u32 ptys_eth_proto
)
1361 for (i
= 0; i
< MLXSW_SP_PORT_LINK_MODE_LEN
; i
++) {
1362 if (ptys_eth_proto
& mlxsw_sp_port_link_mode
[i
].mask
)
1363 modes
|= mlxsw_sp_port_link_mode
[i
].advertised
;
1368 static void mlxsw_sp_from_ptys_speed_duplex(bool carrier_ok
, u32 ptys_eth_proto
,
1369 struct ethtool_cmd
*cmd
)
1371 u32 speed
= SPEED_UNKNOWN
;
1372 u8 duplex
= DUPLEX_UNKNOWN
;
1378 for (i
= 0; i
< MLXSW_SP_PORT_LINK_MODE_LEN
; i
++) {
1379 if (ptys_eth_proto
& mlxsw_sp_port_link_mode
[i
].mask
) {
1380 speed
= mlxsw_sp_port_link_mode
[i
].speed
;
1381 duplex
= DUPLEX_FULL
;
1386 ethtool_cmd_speed_set(cmd
, speed
);
1387 cmd
->duplex
= duplex
;
1390 static u8
mlxsw_sp_port_connector_port(u32 ptys_eth_proto
)
1392 if (ptys_eth_proto
& (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR
|
1393 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4
|
1394 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4
|
1395 MLXSW_REG_PTYS_ETH_SPEED_SGMII
))
1398 if (ptys_eth_proto
& (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR
|
1399 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4
|
1400 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_CR4
))
1403 if (ptys_eth_proto
& (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR
|
1404 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4
|
1405 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4
|
1406 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4
))
1412 static int mlxsw_sp_port_get_settings(struct net_device
*dev
,
1413 struct ethtool_cmd
*cmd
)
1415 struct mlxsw_sp_port
*mlxsw_sp_port
= netdev_priv(dev
);
1416 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
1417 char ptys_pl
[MLXSW_REG_PTYS_LEN
];
1419 u32 eth_proto_admin
;
1423 mlxsw_reg_ptys_pack(ptys_pl
, mlxsw_sp_port
->local_port
, 0);
1424 err
= mlxsw_reg_query(mlxsw_sp
->core
, MLXSW_REG(ptys
), ptys_pl
);
1426 netdev_err(dev
, "Failed to get proto");
1429 mlxsw_reg_ptys_unpack(ptys_pl
, ð_proto_cap
,
1430 ð_proto_admin
, ð_proto_oper
);
1432 cmd
->supported
= mlxsw_sp_from_ptys_supported_port(eth_proto_cap
) |
1433 mlxsw_sp_from_ptys_supported_link(eth_proto_cap
) |
1434 SUPPORTED_Pause
| SUPPORTED_Asym_Pause
;
1435 cmd
->advertising
= mlxsw_sp_from_ptys_advert_link(eth_proto_admin
);
1436 mlxsw_sp_from_ptys_speed_duplex(netif_carrier_ok(dev
),
1437 eth_proto_oper
, cmd
);
1439 eth_proto_oper
= eth_proto_oper
? eth_proto_oper
: eth_proto_cap
;
1440 cmd
->port
= mlxsw_sp_port_connector_port(eth_proto_oper
);
1441 cmd
->lp_advertising
= mlxsw_sp_from_ptys_advert_link(eth_proto_oper
);
1443 cmd
->transceiver
= XCVR_INTERNAL
;
1447 static u32
mlxsw_sp_to_ptys_advert_link(u32 advertising
)
1452 for (i
= 0; i
< MLXSW_SP_PORT_LINK_MODE_LEN
; i
++) {
1453 if (advertising
& mlxsw_sp_port_link_mode
[i
].advertised
)
1454 ptys_proto
|= mlxsw_sp_port_link_mode
[i
].mask
;
1459 static u32
mlxsw_sp_to_ptys_speed(u32 speed
)
1464 for (i
= 0; i
< MLXSW_SP_PORT_LINK_MODE_LEN
; i
++) {
1465 if (speed
== mlxsw_sp_port_link_mode
[i
].speed
)
1466 ptys_proto
|= mlxsw_sp_port_link_mode
[i
].mask
;
1471 static u32
mlxsw_sp_to_ptys_upper_speed(u32 upper_speed
)
1476 for (i
= 0; i
< MLXSW_SP_PORT_LINK_MODE_LEN
; i
++) {
1477 if (mlxsw_sp_port_link_mode
[i
].speed
<= upper_speed
)
1478 ptys_proto
|= mlxsw_sp_port_link_mode
[i
].mask
;
1483 static int mlxsw_sp_port_set_settings(struct net_device
*dev
,
1484 struct ethtool_cmd
*cmd
)
1486 struct mlxsw_sp_port
*mlxsw_sp_port
= netdev_priv(dev
);
1487 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
1488 char ptys_pl
[MLXSW_REG_PTYS_LEN
];
1492 u32 eth_proto_admin
;
1496 speed
= ethtool_cmd_speed(cmd
);
1498 eth_proto_new
= cmd
->autoneg
== AUTONEG_ENABLE
?
1499 mlxsw_sp_to_ptys_advert_link(cmd
->advertising
) :
1500 mlxsw_sp_to_ptys_speed(speed
);
1502 mlxsw_reg_ptys_pack(ptys_pl
, mlxsw_sp_port
->local_port
, 0);
1503 err
= mlxsw_reg_query(mlxsw_sp
->core
, MLXSW_REG(ptys
), ptys_pl
);
1505 netdev_err(dev
, "Failed to get proto");
1508 mlxsw_reg_ptys_unpack(ptys_pl
, ð_proto_cap
, ð_proto_admin
, NULL
);
1510 eth_proto_new
= eth_proto_new
& eth_proto_cap
;
1511 if (!eth_proto_new
) {
1512 netdev_err(dev
, "Not supported proto admin requested");
1515 if (eth_proto_new
== eth_proto_admin
)
1518 mlxsw_reg_ptys_pack(ptys_pl
, mlxsw_sp_port
->local_port
, eth_proto_new
);
1519 err
= mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(ptys
), ptys_pl
);
1521 netdev_err(dev
, "Failed to set proto admin");
1525 err
= mlxsw_sp_port_oper_status_get(mlxsw_sp_port
, &is_up
);
1527 netdev_err(dev
, "Failed to get oper status");
1533 err
= mlxsw_sp_port_admin_status_set(mlxsw_sp_port
, false);
1535 netdev_err(dev
, "Failed to set admin status");
1539 err
= mlxsw_sp_port_admin_status_set(mlxsw_sp_port
, true);
1541 netdev_err(dev
, "Failed to set admin status");
1548 static const struct ethtool_ops mlxsw_sp_port_ethtool_ops
= {
1549 .get_drvinfo
= mlxsw_sp_port_get_drvinfo
,
1550 .get_link
= ethtool_op_get_link
,
1551 .get_pauseparam
= mlxsw_sp_port_get_pauseparam
,
1552 .set_pauseparam
= mlxsw_sp_port_set_pauseparam
,
1553 .get_strings
= mlxsw_sp_port_get_strings
,
1554 .set_phys_id
= mlxsw_sp_port_set_phys_id
,
1555 .get_ethtool_stats
= mlxsw_sp_port_get_stats
,
1556 .get_sset_count
= mlxsw_sp_port_get_sset_count
,
1557 .get_settings
= mlxsw_sp_port_get_settings
,
1558 .set_settings
= mlxsw_sp_port_set_settings
,
1562 mlxsw_sp_port_speed_by_width_set(struct mlxsw_sp_port
*mlxsw_sp_port
, u8 width
)
1564 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
1565 u32 upper_speed
= MLXSW_SP_PORT_BASE_SPEED
* width
;
1566 char ptys_pl
[MLXSW_REG_PTYS_LEN
];
1567 u32 eth_proto_admin
;
1569 eth_proto_admin
= mlxsw_sp_to_ptys_upper_speed(upper_speed
);
1570 mlxsw_reg_ptys_pack(ptys_pl
, mlxsw_sp_port
->local_port
,
1572 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(ptys
), ptys_pl
);
1575 int mlxsw_sp_port_ets_set(struct mlxsw_sp_port
*mlxsw_sp_port
,
1576 enum mlxsw_reg_qeec_hr hr
, u8 index
, u8 next_index
,
1577 bool dwrr
, u8 dwrr_weight
)
1579 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
1580 char qeec_pl
[MLXSW_REG_QEEC_LEN
];
1582 mlxsw_reg_qeec_pack(qeec_pl
, mlxsw_sp_port
->local_port
, hr
, index
,
1584 mlxsw_reg_qeec_de_set(qeec_pl
, true);
1585 mlxsw_reg_qeec_dwrr_set(qeec_pl
, dwrr
);
1586 mlxsw_reg_qeec_dwrr_weight_set(qeec_pl
, dwrr_weight
);
1587 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(qeec
), qeec_pl
);
1590 int mlxsw_sp_port_ets_maxrate_set(struct mlxsw_sp_port
*mlxsw_sp_port
,
1591 enum mlxsw_reg_qeec_hr hr
, u8 index
,
1592 u8 next_index
, u32 maxrate
)
1594 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
1595 char qeec_pl
[MLXSW_REG_QEEC_LEN
];
1597 mlxsw_reg_qeec_pack(qeec_pl
, mlxsw_sp_port
->local_port
, hr
, index
,
1599 mlxsw_reg_qeec_mase_set(qeec_pl
, true);
1600 mlxsw_reg_qeec_max_shaper_rate_set(qeec_pl
, maxrate
);
1601 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(qeec
), qeec_pl
);
1604 int mlxsw_sp_port_prio_tc_set(struct mlxsw_sp_port
*mlxsw_sp_port
,
1605 u8 switch_prio
, u8 tclass
)
1607 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
1608 char qtct_pl
[MLXSW_REG_QTCT_LEN
];
1610 mlxsw_reg_qtct_pack(qtct_pl
, mlxsw_sp_port
->local_port
, switch_prio
,
1612 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(qtct
), qtct_pl
);
1615 static int mlxsw_sp_port_ets_init(struct mlxsw_sp_port
*mlxsw_sp_port
)
1619 /* Setup the elements hierarcy, so that each TC is linked to
1620 * one subgroup, which are all member in the same group.
1622 err
= mlxsw_sp_port_ets_set(mlxsw_sp_port
,
1623 MLXSW_REG_QEEC_HIERARCY_GROUP
, 0, 0, false,
1627 for (i
= 0; i
< IEEE_8021QAZ_MAX_TCS
; i
++) {
1628 err
= mlxsw_sp_port_ets_set(mlxsw_sp_port
,
1629 MLXSW_REG_QEEC_HIERARCY_SUBGROUP
, i
,
1634 for (i
= 0; i
< IEEE_8021QAZ_MAX_TCS
; i
++) {
1635 err
= mlxsw_sp_port_ets_set(mlxsw_sp_port
,
1636 MLXSW_REG_QEEC_HIERARCY_TC
, i
, i
,
1642 /* Make sure the max shaper is disabled in all hierarcies that
1645 err
= mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port
,
1646 MLXSW_REG_QEEC_HIERARCY_PORT
, 0, 0,
1647 MLXSW_REG_QEEC_MAS_DIS
);
1650 for (i
= 0; i
< IEEE_8021QAZ_MAX_TCS
; i
++) {
1651 err
= mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port
,
1652 MLXSW_REG_QEEC_HIERARCY_SUBGROUP
,
1654 MLXSW_REG_QEEC_MAS_DIS
);
1658 for (i
= 0; i
< IEEE_8021QAZ_MAX_TCS
; i
++) {
1659 err
= mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port
,
1660 MLXSW_REG_QEEC_HIERARCY_TC
,
1662 MLXSW_REG_QEEC_MAS_DIS
);
1667 /* Map all priorities to traffic class 0. */
1668 for (i
= 0; i
< IEEE_8021QAZ_MAX_TCS
; i
++) {
1669 err
= mlxsw_sp_port_prio_tc_set(mlxsw_sp_port
, i
, 0);
1677 static int mlxsw_sp_port_create(struct mlxsw_sp
*mlxsw_sp
, u8 local_port
,
1678 bool split
, u8 module
, u8 width
, u8 lane
)
1680 struct mlxsw_sp_port
*mlxsw_sp_port
;
1681 struct net_device
*dev
;
1685 dev
= alloc_etherdev(sizeof(struct mlxsw_sp_port
));
1688 mlxsw_sp_port
= netdev_priv(dev
);
1689 mlxsw_sp_port
->dev
= dev
;
1690 mlxsw_sp_port
->mlxsw_sp
= mlxsw_sp
;
1691 mlxsw_sp_port
->local_port
= local_port
;
1692 mlxsw_sp_port
->split
= split
;
1693 mlxsw_sp_port
->mapping
.module
= module
;
1694 mlxsw_sp_port
->mapping
.width
= width
;
1695 mlxsw_sp_port
->mapping
.lane
= lane
;
1696 bytes
= DIV_ROUND_UP(VLAN_N_VID
, BITS_PER_BYTE
);
1697 mlxsw_sp_port
->active_vlans
= kzalloc(bytes
, GFP_KERNEL
);
1698 if (!mlxsw_sp_port
->active_vlans
) {
1700 goto err_port_active_vlans_alloc
;
1702 mlxsw_sp_port
->untagged_vlans
= kzalloc(bytes
, GFP_KERNEL
);
1703 if (!mlxsw_sp_port
->untagged_vlans
) {
1705 goto err_port_untagged_vlans_alloc
;
1707 INIT_LIST_HEAD(&mlxsw_sp_port
->vports_list
);
1709 mlxsw_sp_port
->pcpu_stats
=
1710 netdev_alloc_pcpu_stats(struct mlxsw_sp_port_pcpu_stats
);
1711 if (!mlxsw_sp_port
->pcpu_stats
) {
1713 goto err_alloc_stats
;
1716 dev
->netdev_ops
= &mlxsw_sp_port_netdev_ops
;
1717 dev
->ethtool_ops
= &mlxsw_sp_port_ethtool_ops
;
1719 err
= mlxsw_sp_port_dev_addr_init(mlxsw_sp_port
);
1721 dev_err(mlxsw_sp
->bus_info
->dev
, "Port %d: Unable to init port mac address\n",
1722 mlxsw_sp_port
->local_port
);
1723 goto err_dev_addr_init
;
1726 netif_carrier_off(dev
);
1728 dev
->features
|= NETIF_F_NETNS_LOCAL
| NETIF_F_LLTX
| NETIF_F_SG
|
1729 NETIF_F_HW_VLAN_CTAG_FILTER
;
1731 /* Each packet needs to have a Tx header (metadata) on top all other
1734 dev
->hard_header_len
+= MLXSW_TXHDR_LEN
;
1736 err
= mlxsw_sp_port_system_port_mapping_set(mlxsw_sp_port
);
1738 dev_err(mlxsw_sp
->bus_info
->dev
, "Port %d: Failed to set system port mapping\n",
1739 mlxsw_sp_port
->local_port
);
1740 goto err_port_system_port_mapping_set
;
1743 err
= mlxsw_sp_port_swid_set(mlxsw_sp_port
, 0);
1745 dev_err(mlxsw_sp
->bus_info
->dev
, "Port %d: Failed to set SWID\n",
1746 mlxsw_sp_port
->local_port
);
1747 goto err_port_swid_set
;
1750 err
= mlxsw_sp_port_speed_by_width_set(mlxsw_sp_port
, width
);
1752 dev_err(mlxsw_sp
->bus_info
->dev
, "Port %d: Failed to enable speeds\n",
1753 mlxsw_sp_port
->local_port
);
1754 goto err_port_speed_by_width_set
;
1757 err
= mlxsw_sp_port_mtu_set(mlxsw_sp_port
, ETH_DATA_LEN
);
1759 dev_err(mlxsw_sp
->bus_info
->dev
, "Port %d: Failed to set MTU\n",
1760 mlxsw_sp_port
->local_port
);
1761 goto err_port_mtu_set
;
1764 err
= mlxsw_sp_port_admin_status_set(mlxsw_sp_port
, false);
1766 goto err_port_admin_status_set
;
1768 err
= mlxsw_sp_port_buffers_init(mlxsw_sp_port
);
1770 dev_err(mlxsw_sp
->bus_info
->dev
, "Port %d: Failed to initialize buffers\n",
1771 mlxsw_sp_port
->local_port
);
1772 goto err_port_buffers_init
;
1775 err
= mlxsw_sp_port_ets_init(mlxsw_sp_port
);
1777 dev_err(mlxsw_sp
->bus_info
->dev
, "Port %d: Failed to initialize ETS\n",
1778 mlxsw_sp_port
->local_port
);
1779 goto err_port_ets_init
;
1782 /* ETS and buffers must be initialized before DCB. */
1783 err
= mlxsw_sp_port_dcb_init(mlxsw_sp_port
);
1785 dev_err(mlxsw_sp
->bus_info
->dev
, "Port %d: Failed to initialize DCB\n",
1786 mlxsw_sp_port
->local_port
);
1787 goto err_port_dcb_init
;
1790 mlxsw_sp_port_switchdev_init(mlxsw_sp_port
);
1791 err
= register_netdev(dev
);
1793 dev_err(mlxsw_sp
->bus_info
->dev
, "Port %d: Failed to register netdev\n",
1794 mlxsw_sp_port
->local_port
);
1795 goto err_register_netdev
;
1798 err
= mlxsw_core_port_init(mlxsw_sp
->core
, &mlxsw_sp_port
->core_port
,
1799 mlxsw_sp_port
->local_port
, dev
,
1800 mlxsw_sp_port
->split
, module
);
1802 dev_err(mlxsw_sp
->bus_info
->dev
, "Port %d: Failed to init core port\n",
1803 mlxsw_sp_port
->local_port
);
1804 goto err_core_port_init
;
1807 err
= mlxsw_sp_port_vlan_init(mlxsw_sp_port
);
1809 goto err_port_vlan_init
;
1811 mlxsw_sp
->ports
[local_port
] = mlxsw_sp_port
;
1815 mlxsw_core_port_fini(&mlxsw_sp_port
->core_port
);
1817 unregister_netdev(dev
);
1818 err_register_netdev
:
1821 err_port_buffers_init
:
1822 err_port_admin_status_set
:
1824 err_port_speed_by_width_set
:
1826 err_port_system_port_mapping_set
:
1828 free_percpu(mlxsw_sp_port
->pcpu_stats
);
1830 kfree(mlxsw_sp_port
->untagged_vlans
);
1831 err_port_untagged_vlans_alloc
:
1832 kfree(mlxsw_sp_port
->active_vlans
);
1833 err_port_active_vlans_alloc
:
1838 static void mlxsw_sp_port_vports_fini(struct mlxsw_sp_port
*mlxsw_sp_port
)
1840 struct net_device
*dev
= mlxsw_sp_port
->dev
;
1841 struct mlxsw_sp_port
*mlxsw_sp_vport
, *tmp
;
1843 list_for_each_entry_safe(mlxsw_sp_vport
, tmp
,
1844 &mlxsw_sp_port
->vports_list
, vport
.list
) {
1845 u16 vid
= mlxsw_sp_vport_vid_get(mlxsw_sp_vport
);
1847 /* vPorts created for VLAN devices should already be gone
1848 * by now, since we unregistered the port netdev.
1850 WARN_ON(is_vlan_dev(mlxsw_sp_vport
->dev
));
1851 mlxsw_sp_port_kill_vid(dev
, 0, vid
);
1855 static void mlxsw_sp_port_remove(struct mlxsw_sp
*mlxsw_sp
, u8 local_port
)
1857 struct mlxsw_sp_port
*mlxsw_sp_port
= mlxsw_sp
->ports
[local_port
];
1861 mlxsw_sp
->ports
[local_port
] = NULL
;
1862 mlxsw_core_port_fini(&mlxsw_sp_port
->core_port
);
1863 unregister_netdev(mlxsw_sp_port
->dev
); /* This calls ndo_stop */
1864 mlxsw_sp_port_dcb_fini(mlxsw_sp_port
);
1865 mlxsw_sp_port_vports_fini(mlxsw_sp_port
);
1866 mlxsw_sp_port_switchdev_fini(mlxsw_sp_port
);
1867 mlxsw_sp_port_swid_set(mlxsw_sp_port
, MLXSW_PORT_SWID_DISABLED_PORT
);
1868 mlxsw_sp_port_module_unmap(mlxsw_sp
, mlxsw_sp_port
->local_port
);
1869 free_percpu(mlxsw_sp_port
->pcpu_stats
);
1870 kfree(mlxsw_sp_port
->untagged_vlans
);
1871 kfree(mlxsw_sp_port
->active_vlans
);
1872 free_netdev(mlxsw_sp_port
->dev
);
1875 static void mlxsw_sp_ports_remove(struct mlxsw_sp
*mlxsw_sp
)
1879 for (i
= 1; i
< MLXSW_PORT_MAX_PORTS
; i
++)
1880 mlxsw_sp_port_remove(mlxsw_sp
, i
);
1881 kfree(mlxsw_sp
->ports
);
1884 static int mlxsw_sp_ports_create(struct mlxsw_sp
*mlxsw_sp
)
1886 u8 module
, width
, lane
;
1891 alloc_size
= sizeof(struct mlxsw_sp_port
*) * MLXSW_PORT_MAX_PORTS
;
1892 mlxsw_sp
->ports
= kzalloc(alloc_size
, GFP_KERNEL
);
1893 if (!mlxsw_sp
->ports
)
1896 for (i
= 1; i
< MLXSW_PORT_MAX_PORTS
; i
++) {
1897 err
= mlxsw_sp_port_module_info_get(mlxsw_sp
, i
, &module
,
1900 goto err_port_module_info_get
;
1903 mlxsw_sp
->port_to_module
[i
] = module
;
1904 err
= mlxsw_sp_port_create(mlxsw_sp
, i
, false, module
, width
,
1907 goto err_port_create
;
1912 err_port_module_info_get
:
1913 for (i
--; i
>= 1; i
--)
1914 mlxsw_sp_port_remove(mlxsw_sp
, i
);
1915 kfree(mlxsw_sp
->ports
);
1919 static u8
mlxsw_sp_cluster_base_port_get(u8 local_port
)
1921 u8 offset
= (local_port
- 1) % MLXSW_SP_PORTS_PER_CLUSTER_MAX
;
1923 return local_port
- offset
;
1926 static int mlxsw_sp_port_split_create(struct mlxsw_sp
*mlxsw_sp
, u8 base_port
,
1927 u8 module
, unsigned int count
)
1929 u8 width
= MLXSW_PORT_MODULE_MAX_WIDTH
/ count
;
1932 for (i
= 0; i
< count
; i
++) {
1933 err
= mlxsw_sp_port_module_map(mlxsw_sp
, base_port
+ i
, module
,
1936 goto err_port_module_map
;
1939 for (i
= 0; i
< count
; i
++) {
1940 err
= __mlxsw_sp_port_swid_set(mlxsw_sp
, base_port
+ i
, 0);
1942 goto err_port_swid_set
;
1945 for (i
= 0; i
< count
; i
++) {
1946 err
= mlxsw_sp_port_create(mlxsw_sp
, base_port
+ i
, true,
1947 module
, width
, i
* width
);
1949 goto err_port_create
;
1955 for (i
--; i
>= 0; i
--)
1956 mlxsw_sp_port_remove(mlxsw_sp
, base_port
+ i
);
1959 for (i
--; i
>= 0; i
--)
1960 __mlxsw_sp_port_swid_set(mlxsw_sp
, base_port
+ i
,
1961 MLXSW_PORT_SWID_DISABLED_PORT
);
1963 err_port_module_map
:
1964 for (i
--; i
>= 0; i
--)
1965 mlxsw_sp_port_module_unmap(mlxsw_sp
, base_port
+ i
);
1969 static void mlxsw_sp_port_unsplit_create(struct mlxsw_sp
*mlxsw_sp
,
1970 u8 base_port
, unsigned int count
)
1972 u8 local_port
, module
, width
= MLXSW_PORT_MODULE_MAX_WIDTH
;
1975 /* Split by four means we need to re-create two ports, otherwise
1980 for (i
= 0; i
< count
; i
++) {
1981 local_port
= base_port
+ i
* 2;
1982 module
= mlxsw_sp
->port_to_module
[local_port
];
1984 mlxsw_sp_port_module_map(mlxsw_sp
, local_port
, module
, width
,
1988 for (i
= 0; i
< count
; i
++)
1989 __mlxsw_sp_port_swid_set(mlxsw_sp
, base_port
+ i
* 2, 0);
1991 for (i
= 0; i
< count
; i
++) {
1992 local_port
= base_port
+ i
* 2;
1993 module
= mlxsw_sp
->port_to_module
[local_port
];
1995 mlxsw_sp_port_create(mlxsw_sp
, local_port
, false, module
,
2000 static int mlxsw_sp_port_split(struct mlxsw_core
*mlxsw_core
, u8 local_port
,
2003 struct mlxsw_sp
*mlxsw_sp
= mlxsw_core_driver_priv(mlxsw_core
);
2004 struct mlxsw_sp_port
*mlxsw_sp_port
;
2005 u8 module
, cur_width
, base_port
;
2009 mlxsw_sp_port
= mlxsw_sp
->ports
[local_port
];
2010 if (!mlxsw_sp_port
) {
2011 dev_err(mlxsw_sp
->bus_info
->dev
, "Port number \"%d\" does not exist\n",
2016 module
= mlxsw_sp_port
->mapping
.module
;
2017 cur_width
= mlxsw_sp_port
->mapping
.width
;
2019 if (count
!= 2 && count
!= 4) {
2020 netdev_err(mlxsw_sp_port
->dev
, "Port can only be split into 2 or 4 ports\n");
2024 if (cur_width
!= MLXSW_PORT_MODULE_MAX_WIDTH
) {
2025 netdev_err(mlxsw_sp_port
->dev
, "Port cannot be split further\n");
2029 /* Make sure we have enough slave (even) ports for the split. */
2031 base_port
= local_port
;
2032 if (mlxsw_sp
->ports
[base_port
+ 1]) {
2033 netdev_err(mlxsw_sp_port
->dev
, "Invalid split configuration\n");
2037 base_port
= mlxsw_sp_cluster_base_port_get(local_port
);
2038 if (mlxsw_sp
->ports
[base_port
+ 1] ||
2039 mlxsw_sp
->ports
[base_port
+ 3]) {
2040 netdev_err(mlxsw_sp_port
->dev
, "Invalid split configuration\n");
2045 for (i
= 0; i
< count
; i
++)
2046 mlxsw_sp_port_remove(mlxsw_sp
, base_port
+ i
);
2048 err
= mlxsw_sp_port_split_create(mlxsw_sp
, base_port
, module
, count
);
2050 dev_err(mlxsw_sp
->bus_info
->dev
, "Failed to create split ports\n");
2051 goto err_port_split_create
;
2056 err_port_split_create
:
2057 mlxsw_sp_port_unsplit_create(mlxsw_sp
, base_port
, count
);
2061 static int mlxsw_sp_port_unsplit(struct mlxsw_core
*mlxsw_core
, u8 local_port
)
2063 struct mlxsw_sp
*mlxsw_sp
= mlxsw_core_driver_priv(mlxsw_core
);
2064 struct mlxsw_sp_port
*mlxsw_sp_port
;
2065 u8 cur_width
, base_port
;
2069 mlxsw_sp_port
= mlxsw_sp
->ports
[local_port
];
2070 if (!mlxsw_sp_port
) {
2071 dev_err(mlxsw_sp
->bus_info
->dev
, "Port number \"%d\" does not exist\n",
2076 if (!mlxsw_sp_port
->split
) {
2077 netdev_err(mlxsw_sp_port
->dev
, "Port wasn't split\n");
2081 cur_width
= mlxsw_sp_port
->mapping
.width
;
2082 count
= cur_width
== 1 ? 4 : 2;
2084 base_port
= mlxsw_sp_cluster_base_port_get(local_port
);
2086 /* Determine which ports to remove. */
2087 if (count
== 2 && local_port
>= base_port
+ 2)
2088 base_port
= base_port
+ 2;
2090 for (i
= 0; i
< count
; i
++)
2091 mlxsw_sp_port_remove(mlxsw_sp
, base_port
+ i
);
2093 mlxsw_sp_port_unsplit_create(mlxsw_sp
, base_port
, count
);
2098 static void mlxsw_sp_pude_event_func(const struct mlxsw_reg_info
*reg
,
2099 char *pude_pl
, void *priv
)
2101 struct mlxsw_sp
*mlxsw_sp
= priv
;
2102 struct mlxsw_sp_port
*mlxsw_sp_port
;
2103 enum mlxsw_reg_pude_oper_status status
;
2106 local_port
= mlxsw_reg_pude_local_port_get(pude_pl
);
2107 mlxsw_sp_port
= mlxsw_sp
->ports
[local_port
];
2108 if (!mlxsw_sp_port
) {
2109 dev_warn(mlxsw_sp
->bus_info
->dev
, "Port %d: Link event received for non-existent port\n",
2114 status
= mlxsw_reg_pude_oper_status_get(pude_pl
);
2115 if (status
== MLXSW_PORT_OPER_STATUS_UP
) {
2116 netdev_info(mlxsw_sp_port
->dev
, "link up\n");
2117 netif_carrier_on(mlxsw_sp_port
->dev
);
2119 netdev_info(mlxsw_sp_port
->dev
, "link down\n");
2120 netif_carrier_off(mlxsw_sp_port
->dev
);
2124 static struct mlxsw_event_listener mlxsw_sp_pude_event
= {
2125 .func
= mlxsw_sp_pude_event_func
,
2126 .trap_id
= MLXSW_TRAP_ID_PUDE
,
2129 static int mlxsw_sp_event_register(struct mlxsw_sp
*mlxsw_sp
,
2130 enum mlxsw_event_trap_id trap_id
)
2132 struct mlxsw_event_listener
*el
;
2133 char hpkt_pl
[MLXSW_REG_HPKT_LEN
];
2137 case MLXSW_TRAP_ID_PUDE
:
2138 el
= &mlxsw_sp_pude_event
;
2141 err
= mlxsw_core_event_listener_register(mlxsw_sp
->core
, el
, mlxsw_sp
);
2145 mlxsw_reg_hpkt_pack(hpkt_pl
, MLXSW_REG_HPKT_ACTION_FORWARD
, trap_id
);
2146 err
= mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(hpkt
), hpkt_pl
);
2148 goto err_event_trap_set
;
2153 mlxsw_core_event_listener_unregister(mlxsw_sp
->core
, el
, mlxsw_sp
);
2157 static void mlxsw_sp_event_unregister(struct mlxsw_sp
*mlxsw_sp
,
2158 enum mlxsw_event_trap_id trap_id
)
2160 struct mlxsw_event_listener
*el
;
2163 case MLXSW_TRAP_ID_PUDE
:
2164 el
= &mlxsw_sp_pude_event
;
2167 mlxsw_core_event_listener_unregister(mlxsw_sp
->core
, el
, mlxsw_sp
);
2170 static void mlxsw_sp_rx_listener_func(struct sk_buff
*skb
, u8 local_port
,
2173 struct mlxsw_sp
*mlxsw_sp
= priv
;
2174 struct mlxsw_sp_port
*mlxsw_sp_port
= mlxsw_sp
->ports
[local_port
];
2175 struct mlxsw_sp_port_pcpu_stats
*pcpu_stats
;
2177 if (unlikely(!mlxsw_sp_port
)) {
2178 dev_warn_ratelimited(mlxsw_sp
->bus_info
->dev
, "Port %d: skb received for non-existent port\n",
2183 skb
->dev
= mlxsw_sp_port
->dev
;
2185 pcpu_stats
= this_cpu_ptr(mlxsw_sp_port
->pcpu_stats
);
2186 u64_stats_update_begin(&pcpu_stats
->syncp
);
2187 pcpu_stats
->rx_packets
++;
2188 pcpu_stats
->rx_bytes
+= skb
->len
;
2189 u64_stats_update_end(&pcpu_stats
->syncp
);
2191 skb
->protocol
= eth_type_trans(skb
, skb
->dev
);
2192 netif_receive_skb(skb
);
2195 static const struct mlxsw_rx_listener mlxsw_sp_rx_listener
[] = {
2197 .func
= mlxsw_sp_rx_listener_func
,
2198 .local_port
= MLXSW_PORT_DONT_CARE
,
2199 .trap_id
= MLXSW_TRAP_ID_FDB_MC
,
2201 /* Traps for specific L2 packet types, not trapped as FDB MC */
2203 .func
= mlxsw_sp_rx_listener_func
,
2204 .local_port
= MLXSW_PORT_DONT_CARE
,
2205 .trap_id
= MLXSW_TRAP_ID_STP
,
2208 .func
= mlxsw_sp_rx_listener_func
,
2209 .local_port
= MLXSW_PORT_DONT_CARE
,
2210 .trap_id
= MLXSW_TRAP_ID_LACP
,
2213 .func
= mlxsw_sp_rx_listener_func
,
2214 .local_port
= MLXSW_PORT_DONT_CARE
,
2215 .trap_id
= MLXSW_TRAP_ID_EAPOL
,
2218 .func
= mlxsw_sp_rx_listener_func
,
2219 .local_port
= MLXSW_PORT_DONT_CARE
,
2220 .trap_id
= MLXSW_TRAP_ID_LLDP
,
2223 .func
= mlxsw_sp_rx_listener_func
,
2224 .local_port
= MLXSW_PORT_DONT_CARE
,
2225 .trap_id
= MLXSW_TRAP_ID_MMRP
,
2228 .func
= mlxsw_sp_rx_listener_func
,
2229 .local_port
= MLXSW_PORT_DONT_CARE
,
2230 .trap_id
= MLXSW_TRAP_ID_MVRP
,
2233 .func
= mlxsw_sp_rx_listener_func
,
2234 .local_port
= MLXSW_PORT_DONT_CARE
,
2235 .trap_id
= MLXSW_TRAP_ID_RPVST
,
2238 .func
= mlxsw_sp_rx_listener_func
,
2239 .local_port
= MLXSW_PORT_DONT_CARE
,
2240 .trap_id
= MLXSW_TRAP_ID_DHCP
,
2243 .func
= mlxsw_sp_rx_listener_func
,
2244 .local_port
= MLXSW_PORT_DONT_CARE
,
2245 .trap_id
= MLXSW_TRAP_ID_IGMP_QUERY
,
2248 .func
= mlxsw_sp_rx_listener_func
,
2249 .local_port
= MLXSW_PORT_DONT_CARE
,
2250 .trap_id
= MLXSW_TRAP_ID_IGMP_V1_REPORT
,
2253 .func
= mlxsw_sp_rx_listener_func
,
2254 .local_port
= MLXSW_PORT_DONT_CARE
,
2255 .trap_id
= MLXSW_TRAP_ID_IGMP_V2_REPORT
,
2258 .func
= mlxsw_sp_rx_listener_func
,
2259 .local_port
= MLXSW_PORT_DONT_CARE
,
2260 .trap_id
= MLXSW_TRAP_ID_IGMP_V2_LEAVE
,
2263 .func
= mlxsw_sp_rx_listener_func
,
2264 .local_port
= MLXSW_PORT_DONT_CARE
,
2265 .trap_id
= MLXSW_TRAP_ID_IGMP_V3_REPORT
,
2269 static int mlxsw_sp_traps_init(struct mlxsw_sp
*mlxsw_sp
)
2271 char htgt_pl
[MLXSW_REG_HTGT_LEN
];
2272 char hpkt_pl
[MLXSW_REG_HPKT_LEN
];
2276 mlxsw_reg_htgt_pack(htgt_pl
, MLXSW_REG_HTGT_TRAP_GROUP_RX
);
2277 err
= mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(htgt
), htgt_pl
);
2281 mlxsw_reg_htgt_pack(htgt_pl
, MLXSW_REG_HTGT_TRAP_GROUP_CTRL
);
2282 err
= mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(htgt
), htgt_pl
);
2286 for (i
= 0; i
< ARRAY_SIZE(mlxsw_sp_rx_listener
); i
++) {
2287 err
= mlxsw_core_rx_listener_register(mlxsw_sp
->core
,
2288 &mlxsw_sp_rx_listener
[i
],
2291 goto err_rx_listener_register
;
2293 mlxsw_reg_hpkt_pack(hpkt_pl
, MLXSW_REG_HPKT_ACTION_TRAP_TO_CPU
,
2294 mlxsw_sp_rx_listener
[i
].trap_id
);
2295 err
= mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(hpkt
), hpkt_pl
);
2297 goto err_rx_trap_set
;
2302 mlxsw_core_rx_listener_unregister(mlxsw_sp
->core
,
2303 &mlxsw_sp_rx_listener
[i
],
2305 err_rx_listener_register
:
2306 for (i
--; i
>= 0; i
--) {
2307 mlxsw_reg_hpkt_pack(hpkt_pl
, MLXSW_REG_HPKT_ACTION_FORWARD
,
2308 mlxsw_sp_rx_listener
[i
].trap_id
);
2309 mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(hpkt
), hpkt_pl
);
2311 mlxsw_core_rx_listener_unregister(mlxsw_sp
->core
,
2312 &mlxsw_sp_rx_listener
[i
],
2318 static void mlxsw_sp_traps_fini(struct mlxsw_sp
*mlxsw_sp
)
2320 char hpkt_pl
[MLXSW_REG_HPKT_LEN
];
2323 for (i
= 0; i
< ARRAY_SIZE(mlxsw_sp_rx_listener
); i
++) {
2324 mlxsw_reg_hpkt_pack(hpkt_pl
, MLXSW_REG_HPKT_ACTION_FORWARD
,
2325 mlxsw_sp_rx_listener
[i
].trap_id
);
2326 mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(hpkt
), hpkt_pl
);
2328 mlxsw_core_rx_listener_unregister(mlxsw_sp
->core
,
2329 &mlxsw_sp_rx_listener
[i
],
2334 static int __mlxsw_sp_flood_init(struct mlxsw_core
*mlxsw_core
,
2335 enum mlxsw_reg_sfgc_type type
,
2336 enum mlxsw_reg_sfgc_bridge_type bridge_type
)
2338 enum mlxsw_flood_table_type table_type
;
2339 enum mlxsw_sp_flood_table flood_table
;
2340 char sfgc_pl
[MLXSW_REG_SFGC_LEN
];
2342 if (bridge_type
== MLXSW_REG_SFGC_BRIDGE_TYPE_VFID
)
2343 table_type
= MLXSW_REG_SFGC_TABLE_TYPE_FID
;
2345 table_type
= MLXSW_REG_SFGC_TABLE_TYPE_FID_OFFEST
;
2347 if (type
== MLXSW_REG_SFGC_TYPE_UNKNOWN_UNICAST
)
2348 flood_table
= MLXSW_SP_FLOOD_TABLE_UC
;
2350 flood_table
= MLXSW_SP_FLOOD_TABLE_BM
;
2352 mlxsw_reg_sfgc_pack(sfgc_pl
, type
, bridge_type
, table_type
,
2354 return mlxsw_reg_write(mlxsw_core
, MLXSW_REG(sfgc
), sfgc_pl
);
2357 static int mlxsw_sp_flood_init(struct mlxsw_sp
*mlxsw_sp
)
2361 for (type
= 0; type
< MLXSW_REG_SFGC_TYPE_MAX
; type
++) {
2362 if (type
== MLXSW_REG_SFGC_TYPE_RESERVED
)
2365 err
= __mlxsw_sp_flood_init(mlxsw_sp
->core
, type
,
2366 MLXSW_REG_SFGC_BRIDGE_TYPE_VFID
);
2370 err
= __mlxsw_sp_flood_init(mlxsw_sp
->core
, type
,
2371 MLXSW_REG_SFGC_BRIDGE_TYPE_1Q_FID
);
2379 static int mlxsw_sp_lag_init(struct mlxsw_sp
*mlxsw_sp
)
2381 char slcr_pl
[MLXSW_REG_SLCR_LEN
];
2383 mlxsw_reg_slcr_pack(slcr_pl
, MLXSW_REG_SLCR_LAG_HASH_SMAC
|
2384 MLXSW_REG_SLCR_LAG_HASH_DMAC
|
2385 MLXSW_REG_SLCR_LAG_HASH_ETHERTYPE
|
2386 MLXSW_REG_SLCR_LAG_HASH_VLANID
|
2387 MLXSW_REG_SLCR_LAG_HASH_SIP
|
2388 MLXSW_REG_SLCR_LAG_HASH_DIP
|
2389 MLXSW_REG_SLCR_LAG_HASH_SPORT
|
2390 MLXSW_REG_SLCR_LAG_HASH_DPORT
|
2391 MLXSW_REG_SLCR_LAG_HASH_IPPROTO
);
2392 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(slcr
), slcr_pl
);
2395 static int mlxsw_sp_init(struct mlxsw_core
*mlxsw_core
,
2396 const struct mlxsw_bus_info
*mlxsw_bus_info
)
2398 struct mlxsw_sp
*mlxsw_sp
= mlxsw_core_driver_priv(mlxsw_core
);
2401 mlxsw_sp
->core
= mlxsw_core
;
2402 mlxsw_sp
->bus_info
= mlxsw_bus_info
;
2403 INIT_LIST_HEAD(&mlxsw_sp
->port_vfids
.list
);
2404 INIT_LIST_HEAD(&mlxsw_sp
->br_vfids
.list
);
2405 INIT_LIST_HEAD(&mlxsw_sp
->br_mids
.list
);
2407 err
= mlxsw_sp_base_mac_get(mlxsw_sp
);
2409 dev_err(mlxsw_sp
->bus_info
->dev
, "Failed to get base mac\n");
2413 err
= mlxsw_sp_ports_create(mlxsw_sp
);
2415 dev_err(mlxsw_sp
->bus_info
->dev
, "Failed to create ports\n");
2419 err
= mlxsw_sp_event_register(mlxsw_sp
, MLXSW_TRAP_ID_PUDE
);
2421 dev_err(mlxsw_sp
->bus_info
->dev
, "Failed to register for PUDE events\n");
2422 goto err_event_register
;
2425 err
= mlxsw_sp_traps_init(mlxsw_sp
);
2427 dev_err(mlxsw_sp
->bus_info
->dev
, "Failed to set traps for RX\n");
2428 goto err_rx_listener_register
;
2431 err
= mlxsw_sp_flood_init(mlxsw_sp
);
2433 dev_err(mlxsw_sp
->bus_info
->dev
, "Failed to initialize flood tables\n");
2434 goto err_flood_init
;
2437 err
= mlxsw_sp_buffers_init(mlxsw_sp
);
2439 dev_err(mlxsw_sp
->bus_info
->dev
, "Failed to initialize buffers\n");
2440 goto err_buffers_init
;
2443 err
= mlxsw_sp_lag_init(mlxsw_sp
);
2445 dev_err(mlxsw_sp
->bus_info
->dev
, "Failed to initialize LAG\n");
2449 err
= mlxsw_sp_switchdev_init(mlxsw_sp
);
2451 dev_err(mlxsw_sp
->bus_info
->dev
, "Failed to initialize switchdev\n");
2452 goto err_switchdev_init
;
2459 mlxsw_sp_buffers_fini(mlxsw_sp
);
2462 mlxsw_sp_traps_fini(mlxsw_sp
);
2463 err_rx_listener_register
:
2464 mlxsw_sp_event_unregister(mlxsw_sp
, MLXSW_TRAP_ID_PUDE
);
2466 mlxsw_sp_ports_remove(mlxsw_sp
);
2470 static void mlxsw_sp_fini(struct mlxsw_core
*mlxsw_core
)
2472 struct mlxsw_sp
*mlxsw_sp
= mlxsw_core_driver_priv(mlxsw_core
);
2474 mlxsw_sp_switchdev_fini(mlxsw_sp
);
2475 mlxsw_sp_buffers_fini(mlxsw_sp
);
2476 mlxsw_sp_traps_fini(mlxsw_sp
);
2477 mlxsw_sp_event_unregister(mlxsw_sp
, MLXSW_TRAP_ID_PUDE
);
2478 mlxsw_sp_ports_remove(mlxsw_sp
);
2481 static struct mlxsw_config_profile mlxsw_sp_config_profile
= {
2482 .used_max_vepa_channels
= 1,
2483 .max_vepa_channels
= 0,
2485 .max_lag
= MLXSW_SP_LAG_MAX
,
2486 .used_max_port_per_lag
= 1,
2487 .max_port_per_lag
= MLXSW_SP_PORT_PER_LAG_MAX
,
2489 .max_mid
= MLXSW_SP_MID_MAX
,
2492 .used_max_system_port
= 1,
2493 .max_system_port
= 64,
2494 .used_max_vlan_groups
= 1,
2495 .max_vlan_groups
= 127,
2496 .used_max_regions
= 1,
2498 .used_flood_tables
= 1,
2499 .used_flood_mode
= 1,
2501 .max_fid_offset_flood_tables
= 2,
2502 .fid_offset_flood_table_size
= VLAN_N_VID
- 1,
2503 .max_fid_flood_tables
= 2,
2504 .fid_flood_table_size
= MLXSW_SP_VFID_MAX
,
2505 .used_max_ib_mc
= 1,
2512 .type
= MLXSW_PORT_SWID_TYPE_ETH
,
2517 static struct mlxsw_driver mlxsw_sp_driver
= {
2518 .kind
= MLXSW_DEVICE_KIND_SPECTRUM
,
2519 .owner
= THIS_MODULE
,
2520 .priv_size
= sizeof(struct mlxsw_sp
),
2521 .init
= mlxsw_sp_init
,
2522 .fini
= mlxsw_sp_fini
,
2523 .port_split
= mlxsw_sp_port_split
,
2524 .port_unsplit
= mlxsw_sp_port_unsplit
,
2525 .sb_pool_get
= mlxsw_sp_sb_pool_get
,
2526 .sb_pool_set
= mlxsw_sp_sb_pool_set
,
2527 .sb_port_pool_get
= mlxsw_sp_sb_port_pool_get
,
2528 .sb_port_pool_set
= mlxsw_sp_sb_port_pool_set
,
2529 .sb_tc_pool_bind_get
= mlxsw_sp_sb_tc_pool_bind_get
,
2530 .sb_tc_pool_bind_set
= mlxsw_sp_sb_tc_pool_bind_set
,
2531 .sb_occ_snapshot
= mlxsw_sp_sb_occ_snapshot
,
2532 .sb_occ_max_clear
= mlxsw_sp_sb_occ_max_clear
,
2533 .sb_occ_port_pool_get
= mlxsw_sp_sb_occ_port_pool_get
,
2534 .sb_occ_tc_port_bind_get
= mlxsw_sp_sb_occ_tc_port_bind_get
,
2535 .txhdr_construct
= mlxsw_sp_txhdr_construct
,
2536 .txhdr_len
= MLXSW_TXHDR_LEN
,
2537 .profile
= &mlxsw_sp_config_profile
,
2541 mlxsw_sp_port_fdb_flush_by_port(const struct mlxsw_sp_port
*mlxsw_sp_port
)
2543 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
2544 char sfdf_pl
[MLXSW_REG_SFDF_LEN
];
2546 mlxsw_reg_sfdf_pack(sfdf_pl
, MLXSW_REG_SFDF_FLUSH_PER_PORT
);
2547 mlxsw_reg_sfdf_system_port_set(sfdf_pl
, mlxsw_sp_port
->local_port
);
2549 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(sfdf
), sfdf_pl
);
2553 mlxsw_sp_port_fdb_flush_by_port_fid(const struct mlxsw_sp_port
*mlxsw_sp_port
,
2556 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
2557 char sfdf_pl
[MLXSW_REG_SFDF_LEN
];
2559 mlxsw_reg_sfdf_pack(sfdf_pl
, MLXSW_REG_SFDF_FLUSH_PER_PORT_AND_FID
);
2560 mlxsw_reg_sfdf_fid_set(sfdf_pl
, fid
);
2561 mlxsw_reg_sfdf_port_fid_system_port_set(sfdf_pl
,
2562 mlxsw_sp_port
->local_port
);
2564 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(sfdf
), sfdf_pl
);
2568 mlxsw_sp_port_fdb_flush_by_lag_id(const struct mlxsw_sp_port
*mlxsw_sp_port
)
2570 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
2571 char sfdf_pl
[MLXSW_REG_SFDF_LEN
];
2573 mlxsw_reg_sfdf_pack(sfdf_pl
, MLXSW_REG_SFDF_FLUSH_PER_LAG
);
2574 mlxsw_reg_sfdf_lag_id_set(sfdf_pl
, mlxsw_sp_port
->lag_id
);
2576 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(sfdf
), sfdf_pl
);
2580 mlxsw_sp_port_fdb_flush_by_lag_id_fid(const struct mlxsw_sp_port
*mlxsw_sp_port
,
2583 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
2584 char sfdf_pl
[MLXSW_REG_SFDF_LEN
];
2586 mlxsw_reg_sfdf_pack(sfdf_pl
, MLXSW_REG_SFDF_FLUSH_PER_LAG_AND_FID
);
2587 mlxsw_reg_sfdf_fid_set(sfdf_pl
, fid
);
2588 mlxsw_reg_sfdf_lag_fid_lag_id_set(sfdf_pl
, mlxsw_sp_port
->lag_id
);
2590 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(sfdf
), sfdf_pl
);
2594 __mlxsw_sp_port_fdb_flush(const struct mlxsw_sp_port
*mlxsw_sp_port
)
2596 int err
, last_err
= 0;
2599 for (vid
= 1; vid
< VLAN_N_VID
- 1; vid
++) {
2600 err
= mlxsw_sp_port_fdb_flush_by_port_fid(mlxsw_sp_port
, vid
);
2609 __mlxsw_sp_port_fdb_flush_lagged(const struct mlxsw_sp_port
*mlxsw_sp_port
)
2611 int err
, last_err
= 0;
2614 for (vid
= 1; vid
< VLAN_N_VID
- 1; vid
++) {
2615 err
= mlxsw_sp_port_fdb_flush_by_lag_id_fid(mlxsw_sp_port
, vid
);
2623 static int mlxsw_sp_port_fdb_flush(struct mlxsw_sp_port
*mlxsw_sp_port
)
2625 if (!list_empty(&mlxsw_sp_port
->vports_list
))
2626 if (mlxsw_sp_port
->lagged
)
2627 return __mlxsw_sp_port_fdb_flush_lagged(mlxsw_sp_port
);
2629 return __mlxsw_sp_port_fdb_flush(mlxsw_sp_port
);
2631 if (mlxsw_sp_port
->lagged
)
2632 return mlxsw_sp_port_fdb_flush_by_lag_id(mlxsw_sp_port
);
2634 return mlxsw_sp_port_fdb_flush_by_port(mlxsw_sp_port
);
2637 static int mlxsw_sp_vport_fdb_flush(struct mlxsw_sp_port
*mlxsw_sp_vport
)
2639 u16 vfid
= mlxsw_sp_vport_vfid_get(mlxsw_sp_vport
);
2640 u16 fid
= mlxsw_sp_vfid_to_fid(vfid
);
2642 if (mlxsw_sp_vport
->lagged
)
2643 return mlxsw_sp_port_fdb_flush_by_lag_id_fid(mlxsw_sp_vport
,
2646 return mlxsw_sp_port_fdb_flush_by_port_fid(mlxsw_sp_vport
, fid
);
2649 static bool mlxsw_sp_port_dev_check(const struct net_device
*dev
)
2651 return dev
->netdev_ops
== &mlxsw_sp_port_netdev_ops
;
2654 static int mlxsw_sp_port_bridge_join(struct mlxsw_sp_port
*mlxsw_sp_port
)
2656 struct net_device
*dev
= mlxsw_sp_port
->dev
;
2659 /* When port is not bridged untagged packets are tagged with
2660 * PVID=VID=1, thereby creating an implicit VLAN interface in
2661 * the device. Remove it and let bridge code take care of its
2664 err
= mlxsw_sp_port_kill_vid(dev
, 0, 1);
2668 mlxsw_sp_port
->learning
= 1;
2669 mlxsw_sp_port
->learning_sync
= 1;
2670 mlxsw_sp_port
->uc_flood
= 1;
2671 mlxsw_sp_port
->bridged
= 1;
2676 static int mlxsw_sp_port_bridge_leave(struct mlxsw_sp_port
*mlxsw_sp_port
,
2679 struct net_device
*dev
= mlxsw_sp_port
->dev
;
2681 if (flush_fdb
&& mlxsw_sp_port_fdb_flush(mlxsw_sp_port
))
2682 netdev_err(mlxsw_sp_port
->dev
, "Failed to flush FDB\n");
2684 mlxsw_sp_port_pvid_set(mlxsw_sp_port
, 1);
2686 mlxsw_sp_port
->learning
= 0;
2687 mlxsw_sp_port
->learning_sync
= 0;
2688 mlxsw_sp_port
->uc_flood
= 0;
2689 mlxsw_sp_port
->bridged
= 0;
2691 /* Add implicit VLAN interface in the device, so that untagged
2692 * packets will be classified to the default vFID.
2694 return mlxsw_sp_port_add_vid(dev
, 0, 1);
2697 static bool mlxsw_sp_master_bridge_check(struct mlxsw_sp
*mlxsw_sp
,
2698 struct net_device
*br_dev
)
2700 return !mlxsw_sp
->master_bridge
.dev
||
2701 mlxsw_sp
->master_bridge
.dev
== br_dev
;
2704 static void mlxsw_sp_master_bridge_inc(struct mlxsw_sp
*mlxsw_sp
,
2705 struct net_device
*br_dev
)
2707 mlxsw_sp
->master_bridge
.dev
= br_dev
;
2708 mlxsw_sp
->master_bridge
.ref_count
++;
2711 static void mlxsw_sp_master_bridge_dec(struct mlxsw_sp
*mlxsw_sp
,
2712 struct net_device
*br_dev
)
2714 if (--mlxsw_sp
->master_bridge
.ref_count
== 0)
2715 mlxsw_sp
->master_bridge
.dev
= NULL
;
2718 static int mlxsw_sp_lag_create(struct mlxsw_sp
*mlxsw_sp
, u16 lag_id
)
2720 char sldr_pl
[MLXSW_REG_SLDR_LEN
];
2722 mlxsw_reg_sldr_lag_create_pack(sldr_pl
, lag_id
);
2723 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(sldr
), sldr_pl
);
2726 static int mlxsw_sp_lag_destroy(struct mlxsw_sp
*mlxsw_sp
, u16 lag_id
)
2728 char sldr_pl
[MLXSW_REG_SLDR_LEN
];
2730 mlxsw_reg_sldr_lag_destroy_pack(sldr_pl
, lag_id
);
2731 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(sldr
), sldr_pl
);
2734 static int mlxsw_sp_lag_col_port_add(struct mlxsw_sp_port
*mlxsw_sp_port
,
2735 u16 lag_id
, u8 port_index
)
2737 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
2738 char slcor_pl
[MLXSW_REG_SLCOR_LEN
];
2740 mlxsw_reg_slcor_port_add_pack(slcor_pl
, mlxsw_sp_port
->local_port
,
2741 lag_id
, port_index
);
2742 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(slcor
), slcor_pl
);
2745 static int mlxsw_sp_lag_col_port_remove(struct mlxsw_sp_port
*mlxsw_sp_port
,
2748 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
2749 char slcor_pl
[MLXSW_REG_SLCOR_LEN
];
2751 mlxsw_reg_slcor_port_remove_pack(slcor_pl
, mlxsw_sp_port
->local_port
,
2753 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(slcor
), slcor_pl
);
2756 static int mlxsw_sp_lag_col_port_enable(struct mlxsw_sp_port
*mlxsw_sp_port
,
2759 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
2760 char slcor_pl
[MLXSW_REG_SLCOR_LEN
];
2762 mlxsw_reg_slcor_col_enable_pack(slcor_pl
, mlxsw_sp_port
->local_port
,
2764 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(slcor
), slcor_pl
);
2767 static int mlxsw_sp_lag_col_port_disable(struct mlxsw_sp_port
*mlxsw_sp_port
,
2770 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
2771 char slcor_pl
[MLXSW_REG_SLCOR_LEN
];
2773 mlxsw_reg_slcor_col_disable_pack(slcor_pl
, mlxsw_sp_port
->local_port
,
2775 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(slcor
), slcor_pl
);
2778 static int mlxsw_sp_lag_index_get(struct mlxsw_sp
*mlxsw_sp
,
2779 struct net_device
*lag_dev
,
2782 struct mlxsw_sp_upper
*lag
;
2783 int free_lag_id
= -1;
2786 for (i
= 0; i
< MLXSW_SP_LAG_MAX
; i
++) {
2787 lag
= mlxsw_sp_lag_get(mlxsw_sp
, i
);
2788 if (lag
->ref_count
) {
2789 if (lag
->dev
== lag_dev
) {
2793 } else if (free_lag_id
< 0) {
2797 if (free_lag_id
< 0)
2799 *p_lag_id
= free_lag_id
;
2804 mlxsw_sp_master_lag_check(struct mlxsw_sp
*mlxsw_sp
,
2805 struct net_device
*lag_dev
,
2806 struct netdev_lag_upper_info
*lag_upper_info
)
2810 if (mlxsw_sp_lag_index_get(mlxsw_sp
, lag_dev
, &lag_id
) != 0)
2812 if (lag_upper_info
->tx_type
!= NETDEV_LAG_TX_TYPE_HASH
)
2817 static int mlxsw_sp_port_lag_index_get(struct mlxsw_sp
*mlxsw_sp
,
2818 u16 lag_id
, u8
*p_port_index
)
2822 for (i
= 0; i
< MLXSW_SP_PORT_PER_LAG_MAX
; i
++) {
2823 if (!mlxsw_sp_port_lagged_get(mlxsw_sp
, lag_id
, i
)) {
2831 static int mlxsw_sp_port_lag_join(struct mlxsw_sp_port
*mlxsw_sp_port
,
2832 struct net_device
*lag_dev
)
2834 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
2835 struct mlxsw_sp_upper
*lag
;
2840 err
= mlxsw_sp_lag_index_get(mlxsw_sp
, lag_dev
, &lag_id
);
2843 lag
= mlxsw_sp_lag_get(mlxsw_sp
, lag_id
);
2844 if (!lag
->ref_count
) {
2845 err
= mlxsw_sp_lag_create(mlxsw_sp
, lag_id
);
2851 err
= mlxsw_sp_port_lag_index_get(mlxsw_sp
, lag_id
, &port_index
);
2854 err
= mlxsw_sp_lag_col_port_add(mlxsw_sp_port
, lag_id
, port_index
);
2856 goto err_col_port_add
;
2857 err
= mlxsw_sp_lag_col_port_enable(mlxsw_sp_port
, lag_id
);
2859 goto err_col_port_enable
;
2861 mlxsw_core_lag_mapping_set(mlxsw_sp
->core
, lag_id
, port_index
,
2862 mlxsw_sp_port
->local_port
);
2863 mlxsw_sp_port
->lag_id
= lag_id
;
2864 mlxsw_sp_port
->lagged
= 1;
2868 err_col_port_enable
:
2869 mlxsw_sp_lag_col_port_remove(mlxsw_sp_port
, lag_id
);
2871 if (!lag
->ref_count
)
2872 mlxsw_sp_lag_destroy(mlxsw_sp
, lag_id
);
2876 static int mlxsw_sp_vport_bridge_leave(struct mlxsw_sp_port
*mlxsw_sp_vport
,
2877 struct net_device
*br_dev
,
2880 static int mlxsw_sp_port_lag_leave(struct mlxsw_sp_port
*mlxsw_sp_port
,
2881 struct net_device
*lag_dev
)
2883 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
2884 struct mlxsw_sp_port
*mlxsw_sp_vport
;
2885 struct mlxsw_sp_upper
*lag
;
2886 u16 lag_id
= mlxsw_sp_port
->lag_id
;
2889 if (!mlxsw_sp_port
->lagged
)
2891 lag
= mlxsw_sp_lag_get(mlxsw_sp
, lag_id
);
2892 WARN_ON(lag
->ref_count
== 0);
2894 err
= mlxsw_sp_lag_col_port_disable(mlxsw_sp_port
, lag_id
);
2897 err
= mlxsw_sp_lag_col_port_remove(mlxsw_sp_port
, lag_id
);
2901 /* In case we leave a LAG device that has bridges built on top,
2902 * then their teardown sequence is never issued and we need to
2903 * invoke the necessary cleanup routines ourselves.
2905 list_for_each_entry(mlxsw_sp_vport
, &mlxsw_sp_port
->vports_list
,
2907 struct net_device
*br_dev
;
2909 if (!mlxsw_sp_vport
->bridged
)
2912 br_dev
= mlxsw_sp_vport_br_get(mlxsw_sp_vport
);
2913 mlxsw_sp_vport_bridge_leave(mlxsw_sp_vport
, br_dev
, false);
2916 if (mlxsw_sp_port
->bridged
) {
2917 mlxsw_sp_port_active_vlans_del(mlxsw_sp_port
);
2918 mlxsw_sp_port_bridge_leave(mlxsw_sp_port
, false);
2919 mlxsw_sp_master_bridge_dec(mlxsw_sp
, NULL
);
2922 if (lag
->ref_count
== 1) {
2923 if (mlxsw_sp_port_fdb_flush_by_lag_id(mlxsw_sp_port
))
2924 netdev_err(mlxsw_sp_port
->dev
, "Failed to flush FDB\n");
2925 err
= mlxsw_sp_lag_destroy(mlxsw_sp
, lag_id
);
2930 mlxsw_core_lag_mapping_clear(mlxsw_sp
->core
, lag_id
,
2931 mlxsw_sp_port
->local_port
);
2932 mlxsw_sp_port
->lagged
= 0;
2937 static int mlxsw_sp_lag_dist_port_add(struct mlxsw_sp_port
*mlxsw_sp_port
,
2940 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
2941 char sldr_pl
[MLXSW_REG_SLDR_LEN
];
2943 mlxsw_reg_sldr_lag_add_port_pack(sldr_pl
, lag_id
,
2944 mlxsw_sp_port
->local_port
);
2945 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(sldr
), sldr_pl
);
2948 static int mlxsw_sp_lag_dist_port_remove(struct mlxsw_sp_port
*mlxsw_sp_port
,
2951 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
2952 char sldr_pl
[MLXSW_REG_SLDR_LEN
];
2954 mlxsw_reg_sldr_lag_remove_port_pack(sldr_pl
, lag_id
,
2955 mlxsw_sp_port
->local_port
);
2956 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(sldr
), sldr_pl
);
2959 static int mlxsw_sp_port_lag_tx_en_set(struct mlxsw_sp_port
*mlxsw_sp_port
,
2960 bool lag_tx_enabled
)
2963 return mlxsw_sp_lag_dist_port_add(mlxsw_sp_port
,
2964 mlxsw_sp_port
->lag_id
);
2966 return mlxsw_sp_lag_dist_port_remove(mlxsw_sp_port
,
2967 mlxsw_sp_port
->lag_id
);
2970 static int mlxsw_sp_port_lag_changed(struct mlxsw_sp_port
*mlxsw_sp_port
,
2971 struct netdev_lag_lower_state_info
*info
)
2973 return mlxsw_sp_port_lag_tx_en_set(mlxsw_sp_port
, info
->tx_enabled
);
2976 static int mlxsw_sp_port_vlan_link(struct mlxsw_sp_port
*mlxsw_sp_port
,
2977 struct net_device
*vlan_dev
)
2979 struct mlxsw_sp_port
*mlxsw_sp_vport
;
2980 u16 vid
= vlan_dev_vlan_id(vlan_dev
);
2982 mlxsw_sp_vport
= mlxsw_sp_port_vport_find(mlxsw_sp_port
, vid
);
2983 if (!mlxsw_sp_vport
) {
2984 WARN_ON(!mlxsw_sp_vport
);
2988 mlxsw_sp_vport
->dev
= vlan_dev
;
2993 static int mlxsw_sp_port_vlan_unlink(struct mlxsw_sp_port
*mlxsw_sp_port
,
2994 struct net_device
*vlan_dev
)
2996 struct mlxsw_sp_port
*mlxsw_sp_vport
;
2997 u16 vid
= vlan_dev_vlan_id(vlan_dev
);
2999 mlxsw_sp_vport
= mlxsw_sp_port_vport_find(mlxsw_sp_port
, vid
);
3000 if (!mlxsw_sp_vport
) {
3001 WARN_ON(!mlxsw_sp_vport
);
3005 /* When removing a VLAN device while still bridged we should first
3006 * remove it from the bridge, as we receive the bridge's notification
3007 * when the vPort is already gone.
3009 if (mlxsw_sp_vport
->bridged
) {
3010 struct net_device
*br_dev
;
3012 br_dev
= mlxsw_sp_vport_br_get(mlxsw_sp_vport
);
3013 mlxsw_sp_vport_bridge_leave(mlxsw_sp_vport
, br_dev
, true);
3016 mlxsw_sp_vport
->dev
= mlxsw_sp_port
->dev
;
3021 static int mlxsw_sp_netdevice_port_upper_event(struct net_device
*dev
,
3022 unsigned long event
, void *ptr
)
3024 struct netdev_notifier_changeupper_info
*info
;
3025 struct mlxsw_sp_port
*mlxsw_sp_port
;
3026 struct net_device
*upper_dev
;
3027 struct mlxsw_sp
*mlxsw_sp
;
3030 mlxsw_sp_port
= netdev_priv(dev
);
3031 mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
3035 case NETDEV_PRECHANGEUPPER
:
3036 upper_dev
= info
->upper_dev
;
3037 if (!info
->master
|| !info
->linking
)
3039 /* HW limitation forbids to put ports to multiple bridges. */
3040 if (netif_is_bridge_master(upper_dev
) &&
3041 !mlxsw_sp_master_bridge_check(mlxsw_sp
, upper_dev
))
3043 if (netif_is_lag_master(upper_dev
) &&
3044 !mlxsw_sp_master_lag_check(mlxsw_sp
, upper_dev
,
3048 case NETDEV_CHANGEUPPER
:
3049 upper_dev
= info
->upper_dev
;
3050 if (is_vlan_dev(upper_dev
)) {
3052 err
= mlxsw_sp_port_vlan_link(mlxsw_sp_port
,
3055 err
= mlxsw_sp_port_vlan_unlink(mlxsw_sp_port
,
3057 } else if (netif_is_bridge_master(upper_dev
)) {
3058 if (info
->linking
) {
3059 err
= mlxsw_sp_port_bridge_join(mlxsw_sp_port
);
3060 mlxsw_sp_master_bridge_inc(mlxsw_sp
, upper_dev
);
3062 err
= mlxsw_sp_port_bridge_leave(mlxsw_sp_port
,
3064 mlxsw_sp_master_bridge_dec(mlxsw_sp
, upper_dev
);
3066 } else if (netif_is_lag_master(upper_dev
)) {
3068 err
= mlxsw_sp_port_lag_join(mlxsw_sp_port
,
3071 err
= mlxsw_sp_port_lag_leave(mlxsw_sp_port
,
3080 static int mlxsw_sp_netdevice_port_lower_event(struct net_device
*dev
,
3081 unsigned long event
, void *ptr
)
3083 struct netdev_notifier_changelowerstate_info
*info
;
3084 struct mlxsw_sp_port
*mlxsw_sp_port
;
3087 mlxsw_sp_port
= netdev_priv(dev
);
3091 case NETDEV_CHANGELOWERSTATE
:
3092 if (netif_is_lag_port(dev
) && mlxsw_sp_port
->lagged
) {
3093 err
= mlxsw_sp_port_lag_changed(mlxsw_sp_port
,
3094 info
->lower_state_info
);
3096 netdev_err(dev
, "Failed to reflect link aggregation lower state change\n");
3104 static int mlxsw_sp_netdevice_port_event(struct net_device
*dev
,
3105 unsigned long event
, void *ptr
)
3108 case NETDEV_PRECHANGEUPPER
:
3109 case NETDEV_CHANGEUPPER
:
3110 return mlxsw_sp_netdevice_port_upper_event(dev
, event
, ptr
);
3111 case NETDEV_CHANGELOWERSTATE
:
3112 return mlxsw_sp_netdevice_port_lower_event(dev
, event
, ptr
);
3118 static int mlxsw_sp_netdevice_lag_event(struct net_device
*lag_dev
,
3119 unsigned long event
, void *ptr
)
3121 struct net_device
*dev
;
3122 struct list_head
*iter
;
3125 netdev_for_each_lower_dev(lag_dev
, dev
, iter
) {
3126 if (mlxsw_sp_port_dev_check(dev
)) {
3127 ret
= mlxsw_sp_netdevice_port_event(dev
, event
, ptr
);
3136 static struct mlxsw_sp_vfid
*
3137 mlxsw_sp_br_vfid_find(const struct mlxsw_sp
*mlxsw_sp
,
3138 const struct net_device
*br_dev
)
3140 struct mlxsw_sp_vfid
*vfid
;
3142 list_for_each_entry(vfid
, &mlxsw_sp
->br_vfids
.list
, list
) {
3143 if (vfid
->br_dev
== br_dev
)
3150 static u16
mlxsw_sp_vfid_to_br_vfid(u16 vfid
)
3152 return vfid
- MLXSW_SP_VFID_PORT_MAX
;
3155 static u16
mlxsw_sp_br_vfid_to_vfid(u16 br_vfid
)
3157 return MLXSW_SP_VFID_PORT_MAX
+ br_vfid
;
3160 static u16
mlxsw_sp_avail_br_vfid_get(const struct mlxsw_sp
*mlxsw_sp
)
3162 return find_first_zero_bit(mlxsw_sp
->br_vfids
.mapped
,
3163 MLXSW_SP_VFID_BR_MAX
);
3166 static struct mlxsw_sp_vfid
*mlxsw_sp_br_vfid_create(struct mlxsw_sp
*mlxsw_sp
,
3167 struct net_device
*br_dev
)
3169 struct device
*dev
= mlxsw_sp
->bus_info
->dev
;
3170 struct mlxsw_sp_vfid
*vfid
;
3174 n_vfid
= mlxsw_sp_br_vfid_to_vfid(mlxsw_sp_avail_br_vfid_get(mlxsw_sp
));
3175 if (n_vfid
== MLXSW_SP_VFID_MAX
) {
3176 dev_err(dev
, "No available vFIDs\n");
3177 return ERR_PTR(-ERANGE
);
3180 err
= __mlxsw_sp_vfid_create(mlxsw_sp
, n_vfid
);
3182 dev_err(dev
, "Failed to create vFID=%d\n", n_vfid
);
3183 return ERR_PTR(err
);
3186 vfid
= kzalloc(sizeof(*vfid
), GFP_KERNEL
);
3188 goto err_allocate_vfid
;
3190 vfid
->vfid
= n_vfid
;
3191 vfid
->br_dev
= br_dev
;
3193 list_add(&vfid
->list
, &mlxsw_sp
->br_vfids
.list
);
3194 set_bit(mlxsw_sp_vfid_to_br_vfid(n_vfid
), mlxsw_sp
->br_vfids
.mapped
);
3199 __mlxsw_sp_vfid_destroy(mlxsw_sp
, n_vfid
);
3200 return ERR_PTR(-ENOMEM
);
3203 static void mlxsw_sp_br_vfid_destroy(struct mlxsw_sp
*mlxsw_sp
,
3204 struct mlxsw_sp_vfid
*vfid
)
3206 u16 br_vfid
= mlxsw_sp_vfid_to_br_vfid(vfid
->vfid
);
3208 clear_bit(br_vfid
, mlxsw_sp
->br_vfids
.mapped
);
3209 list_del(&vfid
->list
);
3211 __mlxsw_sp_vfid_destroy(mlxsw_sp
, vfid
->vfid
);
3216 static int mlxsw_sp_vport_bridge_leave(struct mlxsw_sp_port
*mlxsw_sp_vport
,
3217 struct net_device
*br_dev
,
3220 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_vport
->mlxsw_sp
;
3221 u16 vid
= mlxsw_sp_vport_vid_get(mlxsw_sp_vport
);
3222 struct net_device
*dev
= mlxsw_sp_vport
->dev
;
3223 struct mlxsw_sp_vfid
*vfid
, *new_vfid
;
3226 vfid
= mlxsw_sp_br_vfid_find(mlxsw_sp
, br_dev
);
3232 /* We need a vFID to go back to after leaving the bridge's vFID. */
3233 new_vfid
= mlxsw_sp_vfid_find(mlxsw_sp
, vid
);
3235 new_vfid
= mlxsw_sp_vfid_create(mlxsw_sp
, vid
);
3236 if (IS_ERR(new_vfid
)) {
3237 netdev_err(dev
, "Failed to create vFID for VID=%d\n",
3239 return PTR_ERR(new_vfid
);
3243 /* Invalidate existing {Port, VID} to vFID mapping and create a new
3244 * one for the new vFID.
3246 err
= mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_vport
,
3247 MLXSW_REG_SVFA_MT_PORT_VID_TO_FID
,
3249 mlxsw_sp_vfid_to_fid(vfid
->vfid
),
3252 netdev_err(dev
, "Failed to invalidate {Port, VID} to vFID=%d mapping\n",
3254 goto err_port_vid_to_fid_invalidate
;
3257 err
= mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_vport
,
3258 MLXSW_REG_SVFA_MT_PORT_VID_TO_FID
,
3260 mlxsw_sp_vfid_to_fid(new_vfid
->vfid
),
3263 netdev_err(dev
, "Failed to map {Port, VID} to vFID=%d\n",
3265 goto err_port_vid_to_fid_validate
;
3268 err
= mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport
, vid
, false);
3270 netdev_err(dev
, "Failed to disable learning\n");
3271 goto err_port_vid_learning_set
;
3274 err
= mlxsw_sp_vport_flood_set(mlxsw_sp_vport
, vfid
->vfid
, false,
3277 netdev_err(dev
, "Failed clear to clear flooding\n");
3278 goto err_vport_flood_set
;
3281 err
= mlxsw_sp_port_stp_state_set(mlxsw_sp_vport
, vid
,
3282 MLXSW_REG_SPMS_STATE_FORWARDING
);
3284 netdev_err(dev
, "Failed to set STP state\n");
3285 goto err_port_stp_state_set
;
3288 if (flush_fdb
&& mlxsw_sp_vport_fdb_flush(mlxsw_sp_vport
))
3289 netdev_err(dev
, "Failed to flush FDB\n");
3291 /* Switch between the vFIDs and destroy the old one if needed. */
3292 new_vfid
->nr_vports
++;
3293 mlxsw_sp_vport
->vport
.vfid
= new_vfid
;
3295 if (!vfid
->nr_vports
)
3296 mlxsw_sp_br_vfid_destroy(mlxsw_sp
, vfid
);
3298 mlxsw_sp_vport
->learning
= 0;
3299 mlxsw_sp_vport
->learning_sync
= 0;
3300 mlxsw_sp_vport
->uc_flood
= 0;
3301 mlxsw_sp_vport
->bridged
= 0;
3305 err_port_stp_state_set
:
3306 err_vport_flood_set
:
3307 err_port_vid_learning_set
:
3308 err_port_vid_to_fid_validate
:
3309 err_port_vid_to_fid_invalidate
:
3310 /* Rollback vFID only if new. */
3311 if (!new_vfid
->nr_vports
)
3312 mlxsw_sp_vfid_destroy(mlxsw_sp
, new_vfid
);
3316 static int mlxsw_sp_vport_bridge_join(struct mlxsw_sp_port
*mlxsw_sp_vport
,
3317 struct net_device
*br_dev
)
3319 struct mlxsw_sp_vfid
*old_vfid
= mlxsw_sp_vport
->vport
.vfid
;
3320 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_vport
->mlxsw_sp
;
3321 u16 vid
= mlxsw_sp_vport_vid_get(mlxsw_sp_vport
);
3322 struct net_device
*dev
= mlxsw_sp_vport
->dev
;
3323 struct mlxsw_sp_vfid
*vfid
;
3326 vfid
= mlxsw_sp_br_vfid_find(mlxsw_sp
, br_dev
);
3328 vfid
= mlxsw_sp_br_vfid_create(mlxsw_sp
, br_dev
);
3330 netdev_err(dev
, "Failed to create bridge vFID\n");
3331 return PTR_ERR(vfid
);
3335 err
= mlxsw_sp_vport_flood_set(mlxsw_sp_vport
, vfid
->vfid
, true, false);
3337 netdev_err(dev
, "Failed to setup flooding for vFID=%d\n",
3339 goto err_port_flood_set
;
3342 err
= mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport
, vid
, true);
3344 netdev_err(dev
, "Failed to enable learning\n");
3345 goto err_port_vid_learning_set
;
3348 /* We need to invalidate existing {Port, VID} to vFID mapping and
3349 * create a new one for the bridge's vFID.
3351 err
= mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_vport
,
3352 MLXSW_REG_SVFA_MT_PORT_VID_TO_FID
,
3354 mlxsw_sp_vfid_to_fid(old_vfid
->vfid
),
3357 netdev_err(dev
, "Failed to invalidate {Port, VID} to vFID=%d mapping\n",
3359 goto err_port_vid_to_fid_invalidate
;
3362 err
= mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_vport
,
3363 MLXSW_REG_SVFA_MT_PORT_VID_TO_FID
,
3365 mlxsw_sp_vfid_to_fid(vfid
->vfid
),
3368 netdev_err(dev
, "Failed to map {Port, VID} to vFID=%d\n",
3370 goto err_port_vid_to_fid_validate
;
3373 /* Switch between the vFIDs and destroy the old one if needed. */
3375 mlxsw_sp_vport
->vport
.vfid
= vfid
;
3376 old_vfid
->nr_vports
--;
3377 if (!old_vfid
->nr_vports
)
3378 mlxsw_sp_vfid_destroy(mlxsw_sp
, old_vfid
);
3380 mlxsw_sp_vport
->learning
= 1;
3381 mlxsw_sp_vport
->learning_sync
= 1;
3382 mlxsw_sp_vport
->uc_flood
= 1;
3383 mlxsw_sp_vport
->bridged
= 1;
3387 err_port_vid_to_fid_validate
:
3388 mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_vport
,
3389 MLXSW_REG_SVFA_MT_PORT_VID_TO_FID
, false,
3390 mlxsw_sp_vfid_to_fid(old_vfid
->vfid
), vid
);
3391 err_port_vid_to_fid_invalidate
:
3392 mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport
, vid
, false);
3393 err_port_vid_learning_set
:
3394 mlxsw_sp_vport_flood_set(mlxsw_sp_vport
, vfid
->vfid
, false, false);
3396 if (!vfid
->nr_vports
)
3397 mlxsw_sp_br_vfid_destroy(mlxsw_sp
, vfid
);
3402 mlxsw_sp_port_master_bridge_check(const struct mlxsw_sp_port
*mlxsw_sp_port
,
3403 const struct net_device
*br_dev
)
3405 struct mlxsw_sp_port
*mlxsw_sp_vport
;
3407 list_for_each_entry(mlxsw_sp_vport
, &mlxsw_sp_port
->vports_list
,
3409 if (mlxsw_sp_vport_br_get(mlxsw_sp_vport
) == br_dev
)
3416 static int mlxsw_sp_netdevice_vport_event(struct net_device
*dev
,
3417 unsigned long event
, void *ptr
,
3420 struct mlxsw_sp_port
*mlxsw_sp_port
= netdev_priv(dev
);
3421 struct netdev_notifier_changeupper_info
*info
= ptr
;
3422 struct mlxsw_sp_port
*mlxsw_sp_vport
;
3423 struct net_device
*upper_dev
;
3426 mlxsw_sp_vport
= mlxsw_sp_port_vport_find(mlxsw_sp_port
, vid
);
3429 case NETDEV_PRECHANGEUPPER
:
3430 upper_dev
= info
->upper_dev
;
3431 if (!info
->master
|| !info
->linking
)
3433 if (!netif_is_bridge_master(upper_dev
))
3435 /* We can't have multiple VLAN interfaces configured on
3436 * the same port and being members in the same bridge.
3438 if (!mlxsw_sp_port_master_bridge_check(mlxsw_sp_port
,
3442 case NETDEV_CHANGEUPPER
:
3443 upper_dev
= info
->upper_dev
;
3446 if (info
->linking
) {
3447 if (!mlxsw_sp_vport
) {
3448 WARN_ON(!mlxsw_sp_vport
);
3451 err
= mlxsw_sp_vport_bridge_join(mlxsw_sp_vport
,
3454 /* We ignore bridge's unlinking notifications if vPort
3455 * is gone, since we already left the bridge when the
3456 * VLAN device was unlinked from the real device.
3458 if (!mlxsw_sp_vport
)
3460 err
= mlxsw_sp_vport_bridge_leave(mlxsw_sp_vport
,
3468 static int mlxsw_sp_netdevice_lag_vport_event(struct net_device
*lag_dev
,
3469 unsigned long event
, void *ptr
,
3472 struct net_device
*dev
;
3473 struct list_head
*iter
;
3476 netdev_for_each_lower_dev(lag_dev
, dev
, iter
) {
3477 if (mlxsw_sp_port_dev_check(dev
)) {
3478 ret
= mlxsw_sp_netdevice_vport_event(dev
, event
, ptr
,
3488 static int mlxsw_sp_netdevice_vlan_event(struct net_device
*vlan_dev
,
3489 unsigned long event
, void *ptr
)
3491 struct net_device
*real_dev
= vlan_dev_real_dev(vlan_dev
);
3492 u16 vid
= vlan_dev_vlan_id(vlan_dev
);
3494 if (mlxsw_sp_port_dev_check(real_dev
))
3495 return mlxsw_sp_netdevice_vport_event(real_dev
, event
, ptr
,
3497 else if (netif_is_lag_master(real_dev
))
3498 return mlxsw_sp_netdevice_lag_vport_event(real_dev
, event
, ptr
,
3504 static int mlxsw_sp_netdevice_event(struct notifier_block
*unused
,
3505 unsigned long event
, void *ptr
)
3507 struct net_device
*dev
= netdev_notifier_info_to_dev(ptr
);
3510 if (mlxsw_sp_port_dev_check(dev
))
3511 err
= mlxsw_sp_netdevice_port_event(dev
, event
, ptr
);
3512 else if (netif_is_lag_master(dev
))
3513 err
= mlxsw_sp_netdevice_lag_event(dev
, event
, ptr
);
3514 else if (is_vlan_dev(dev
))
3515 err
= mlxsw_sp_netdevice_vlan_event(dev
, event
, ptr
);
3517 return notifier_from_errno(err
);
3520 static struct notifier_block mlxsw_sp_netdevice_nb __read_mostly
= {
3521 .notifier_call
= mlxsw_sp_netdevice_event
,
3524 static int __init
mlxsw_sp_module_init(void)
3528 register_netdevice_notifier(&mlxsw_sp_netdevice_nb
);
3529 err
= mlxsw_core_driver_register(&mlxsw_sp_driver
);
3531 goto err_core_driver_register
;
3534 err_core_driver_register
:
3535 unregister_netdevice_notifier(&mlxsw_sp_netdevice_nb
);
3539 static void __exit
mlxsw_sp_module_exit(void)
3541 mlxsw_core_driver_unregister(&mlxsw_sp_driver
);
3542 unregister_netdevice_notifier(&mlxsw_sp_netdevice_nb
);
3545 module_init(mlxsw_sp_module_init
);
3546 module_exit(mlxsw_sp_module_exit
);
3548 MODULE_LICENSE("Dual BSD/GPL");
3549 MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>");
3550 MODULE_DESCRIPTION("Mellanox Spectrum driver");
3551 MODULE_MLXSW_DRIVER_ALIAS(MLXSW_DEVICE_KIND_SPECTRUM
);