Merge tag 'tpmdd-next-20160902' into next
[deliverable/linux.git] / drivers / net / dsa / bcm_sf2.c
1 /*
2 * Broadcom Starfighter 2 DSA switch driver
3 *
4 * Copyright (C) 2014, Broadcom Corporation
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 */
11
12 #include <linux/list.h>
13 #include <linux/module.h>
14 #include <linux/netdevice.h>
15 #include <linux/interrupt.h>
16 #include <linux/platform_device.h>
17 #include <linux/of.h>
18 #include <linux/phy.h>
19 #include <linux/phy_fixed.h>
20 #include <linux/mii.h>
21 #include <linux/of.h>
22 #include <linux/of_irq.h>
23 #include <linux/of_address.h>
24 #include <linux/of_net.h>
25 #include <linux/of_mdio.h>
26 #include <net/dsa.h>
27 #include <linux/ethtool.h>
28 #include <linux/if_bridge.h>
29 #include <linux/brcmphy.h>
30 #include <linux/etherdevice.h>
31 #include <net/switchdev.h>
32
33 #include "bcm_sf2.h"
34 #include "bcm_sf2_regs.h"
35
36 /* String, offset, and register size in bytes if different from 4 bytes */
37 static const struct bcm_sf2_hw_stats bcm_sf2_mib[] = {
38 { "TxOctets", 0x000, 8 },
39 { "TxDropPkts", 0x020 },
40 { "TxQPKTQ0", 0x030 },
41 { "TxBroadcastPkts", 0x040 },
42 { "TxMulticastPkts", 0x050 },
43 { "TxUnicastPKts", 0x060 },
44 { "TxCollisions", 0x070 },
45 { "TxSingleCollision", 0x080 },
46 { "TxMultipleCollision", 0x090 },
47 { "TxDeferredCollision", 0x0a0 },
48 { "TxLateCollision", 0x0b0 },
49 { "TxExcessiveCollision", 0x0c0 },
50 { "TxFrameInDisc", 0x0d0 },
51 { "TxPausePkts", 0x0e0 },
52 { "TxQPKTQ1", 0x0f0 },
53 { "TxQPKTQ2", 0x100 },
54 { "TxQPKTQ3", 0x110 },
55 { "TxQPKTQ4", 0x120 },
56 { "TxQPKTQ5", 0x130 },
57 { "RxOctets", 0x140, 8 },
58 { "RxUndersizePkts", 0x160 },
59 { "RxPausePkts", 0x170 },
60 { "RxPkts64Octets", 0x180 },
61 { "RxPkts65to127Octets", 0x190 },
62 { "RxPkts128to255Octets", 0x1a0 },
63 { "RxPkts256to511Octets", 0x1b0 },
64 { "RxPkts512to1023Octets", 0x1c0 },
65 { "RxPkts1024toMaxPktsOctets", 0x1d0 },
66 { "RxOversizePkts", 0x1e0 },
67 { "RxJabbers", 0x1f0 },
68 { "RxAlignmentErrors", 0x200 },
69 { "RxFCSErrors", 0x210 },
70 { "RxGoodOctets", 0x220, 8 },
71 { "RxDropPkts", 0x240 },
72 { "RxUnicastPkts", 0x250 },
73 { "RxMulticastPkts", 0x260 },
74 { "RxBroadcastPkts", 0x270 },
75 { "RxSAChanges", 0x280 },
76 { "RxFragments", 0x290 },
77 { "RxJumboPkt", 0x2a0 },
78 { "RxSymblErr", 0x2b0 },
79 { "InRangeErrCount", 0x2c0 },
80 { "OutRangeErrCount", 0x2d0 },
81 { "EEELpiEvent", 0x2e0 },
82 { "EEELpiDuration", 0x2f0 },
83 { "RxDiscard", 0x300, 8 },
84 { "TxQPKTQ6", 0x320 },
85 { "TxQPKTQ7", 0x330 },
86 { "TxPkts64Octets", 0x340 },
87 { "TxPkts65to127Octets", 0x350 },
88 { "TxPkts128to255Octets", 0x360 },
89 { "TxPkts256to511Ocets", 0x370 },
90 { "TxPkts512to1023Ocets", 0x380 },
91 { "TxPkts1024toMaxPktOcets", 0x390 },
92 };
93
94 #define BCM_SF2_STATS_SIZE ARRAY_SIZE(bcm_sf2_mib)
95
96 static void bcm_sf2_sw_get_strings(struct dsa_switch *ds,
97 int port, uint8_t *data)
98 {
99 unsigned int i;
100
101 for (i = 0; i < BCM_SF2_STATS_SIZE; i++)
102 memcpy(data + i * ETH_GSTRING_LEN,
103 bcm_sf2_mib[i].string, ETH_GSTRING_LEN);
104 }
105
106 static void bcm_sf2_sw_get_ethtool_stats(struct dsa_switch *ds,
107 int port, uint64_t *data)
108 {
109 struct bcm_sf2_priv *priv = ds_to_priv(ds);
110 const struct bcm_sf2_hw_stats *s;
111 unsigned int i;
112 u64 val = 0;
113 u32 offset;
114
115 mutex_lock(&priv->stats_mutex);
116
117 /* Now fetch the per-port counters */
118 for (i = 0; i < BCM_SF2_STATS_SIZE; i++) {
119 s = &bcm_sf2_mib[i];
120
121 /* Do a latched 64-bit read if needed */
122 offset = s->reg + CORE_P_MIB_OFFSET(port);
123 if (s->sizeof_stat == 8)
124 val = core_readq(priv, offset);
125 else
126 val = core_readl(priv, offset);
127
128 data[i] = (u64)val;
129 }
130
131 mutex_unlock(&priv->stats_mutex);
132 }
133
134 static int bcm_sf2_sw_get_sset_count(struct dsa_switch *ds)
135 {
136 return BCM_SF2_STATS_SIZE;
137 }
138
139 static const char *bcm_sf2_sw_drv_probe(struct device *dsa_dev,
140 struct device *host_dev, int sw_addr,
141 void **_priv)
142 {
143 struct bcm_sf2_priv *priv;
144
145 priv = devm_kzalloc(dsa_dev, sizeof(*priv), GFP_KERNEL);
146 if (!priv)
147 return NULL;
148 *_priv = priv;
149
150 return "Broadcom Starfighter 2";
151 }
152
153 static void bcm_sf2_imp_vlan_setup(struct dsa_switch *ds, int cpu_port)
154 {
155 struct bcm_sf2_priv *priv = ds_to_priv(ds);
156 unsigned int i;
157 u32 reg;
158
159 /* Enable the IMP Port to be in the same VLAN as the other ports
160 * on a per-port basis such that we only have Port i and IMP in
161 * the same VLAN.
162 */
163 for (i = 0; i < priv->hw_params.num_ports; i++) {
164 if (!((1 << i) & ds->enabled_port_mask))
165 continue;
166
167 reg = core_readl(priv, CORE_PORT_VLAN_CTL_PORT(i));
168 reg |= (1 << cpu_port);
169 core_writel(priv, reg, CORE_PORT_VLAN_CTL_PORT(i));
170 }
171 }
172
173 static void bcm_sf2_imp_setup(struct dsa_switch *ds, int port)
174 {
175 struct bcm_sf2_priv *priv = ds_to_priv(ds);
176 u32 reg, val;
177
178 /* Enable the port memories */
179 reg = core_readl(priv, CORE_MEM_PSM_VDD_CTRL);
180 reg &= ~P_TXQ_PSM_VDD(port);
181 core_writel(priv, reg, CORE_MEM_PSM_VDD_CTRL);
182
183 /* Enable Broadcast, Multicast, Unicast forwarding to IMP port */
184 reg = core_readl(priv, CORE_IMP_CTL);
185 reg |= (RX_BCST_EN | RX_MCST_EN | RX_UCST_EN);
186 reg &= ~(RX_DIS | TX_DIS);
187 core_writel(priv, reg, CORE_IMP_CTL);
188
189 /* Enable forwarding */
190 core_writel(priv, SW_FWDG_EN, CORE_SWMODE);
191
192 /* Enable IMP port in dumb mode */
193 reg = core_readl(priv, CORE_SWITCH_CTRL);
194 reg |= MII_DUMB_FWDG_EN;
195 core_writel(priv, reg, CORE_SWITCH_CTRL);
196
197 /* Resolve which bit controls the Broadcom tag */
198 switch (port) {
199 case 8:
200 val = BRCM_HDR_EN_P8;
201 break;
202 case 7:
203 val = BRCM_HDR_EN_P7;
204 break;
205 case 5:
206 val = BRCM_HDR_EN_P5;
207 break;
208 default:
209 val = 0;
210 break;
211 }
212
213 /* Enable Broadcom tags for IMP port */
214 reg = core_readl(priv, CORE_BRCM_HDR_CTRL);
215 reg |= val;
216 core_writel(priv, reg, CORE_BRCM_HDR_CTRL);
217
218 /* Enable reception Broadcom tag for CPU TX (switch RX) to
219 * allow us to tag outgoing frames
220 */
221 reg = core_readl(priv, CORE_BRCM_HDR_RX_DIS);
222 reg &= ~(1 << port);
223 core_writel(priv, reg, CORE_BRCM_HDR_RX_DIS);
224
225 /* Enable transmission of Broadcom tags from the switch (CPU RX) to
226 * allow delivering frames to the per-port net_devices
227 */
228 reg = core_readl(priv, CORE_BRCM_HDR_TX_DIS);
229 reg &= ~(1 << port);
230 core_writel(priv, reg, CORE_BRCM_HDR_TX_DIS);
231
232 /* Force link status for IMP port */
233 reg = core_readl(priv, CORE_STS_OVERRIDE_IMP);
234 reg |= (MII_SW_OR | LINK_STS);
235 core_writel(priv, reg, CORE_STS_OVERRIDE_IMP);
236 }
237
238 static void bcm_sf2_eee_enable_set(struct dsa_switch *ds, int port, bool enable)
239 {
240 struct bcm_sf2_priv *priv = ds_to_priv(ds);
241 u32 reg;
242
243 reg = core_readl(priv, CORE_EEE_EN_CTRL);
244 if (enable)
245 reg |= 1 << port;
246 else
247 reg &= ~(1 << port);
248 core_writel(priv, reg, CORE_EEE_EN_CTRL);
249 }
250
251 static void bcm_sf2_gphy_enable_set(struct dsa_switch *ds, bool enable)
252 {
253 struct bcm_sf2_priv *priv = ds_to_priv(ds);
254 u32 reg;
255
256 reg = reg_readl(priv, REG_SPHY_CNTRL);
257 if (enable) {
258 reg |= PHY_RESET;
259 reg &= ~(EXT_PWR_DOWN | IDDQ_BIAS | CK25_DIS);
260 reg_writel(priv, reg, REG_SPHY_CNTRL);
261 udelay(21);
262 reg = reg_readl(priv, REG_SPHY_CNTRL);
263 reg &= ~PHY_RESET;
264 } else {
265 reg |= EXT_PWR_DOWN | IDDQ_BIAS | PHY_RESET;
266 reg_writel(priv, reg, REG_SPHY_CNTRL);
267 mdelay(1);
268 reg |= CK25_DIS;
269 }
270 reg_writel(priv, reg, REG_SPHY_CNTRL);
271
272 /* Use PHY-driven LED signaling */
273 if (!enable) {
274 reg = reg_readl(priv, REG_LED_CNTRL(0));
275 reg |= SPDLNK_SRC_SEL;
276 reg_writel(priv, reg, REG_LED_CNTRL(0));
277 }
278 }
279
280 static inline void bcm_sf2_port_intr_enable(struct bcm_sf2_priv *priv,
281 int port)
282 {
283 unsigned int off;
284
285 switch (port) {
286 case 7:
287 off = P7_IRQ_OFF;
288 break;
289 case 0:
290 /* Port 0 interrupts are located on the first bank */
291 intrl2_0_mask_clear(priv, P_IRQ_MASK(P0_IRQ_OFF));
292 return;
293 default:
294 off = P_IRQ_OFF(port);
295 break;
296 }
297
298 intrl2_1_mask_clear(priv, P_IRQ_MASK(off));
299 }
300
301 static inline void bcm_sf2_port_intr_disable(struct bcm_sf2_priv *priv,
302 int port)
303 {
304 unsigned int off;
305
306 switch (port) {
307 case 7:
308 off = P7_IRQ_OFF;
309 break;
310 case 0:
311 /* Port 0 interrupts are located on the first bank */
312 intrl2_0_mask_set(priv, P_IRQ_MASK(P0_IRQ_OFF));
313 intrl2_0_writel(priv, P_IRQ_MASK(P0_IRQ_OFF), INTRL2_CPU_CLEAR);
314 return;
315 default:
316 off = P_IRQ_OFF(port);
317 break;
318 }
319
320 intrl2_1_mask_set(priv, P_IRQ_MASK(off));
321 intrl2_1_writel(priv, P_IRQ_MASK(off), INTRL2_CPU_CLEAR);
322 }
323
324 static int bcm_sf2_port_setup(struct dsa_switch *ds, int port,
325 struct phy_device *phy)
326 {
327 struct bcm_sf2_priv *priv = ds_to_priv(ds);
328 s8 cpu_port = ds->dst[ds->index].cpu_port;
329 u32 reg;
330
331 /* Clear the memory power down */
332 reg = core_readl(priv, CORE_MEM_PSM_VDD_CTRL);
333 reg &= ~P_TXQ_PSM_VDD(port);
334 core_writel(priv, reg, CORE_MEM_PSM_VDD_CTRL);
335
336 /* Clear the Rx and Tx disable bits and set to no spanning tree */
337 core_writel(priv, 0, CORE_G_PCTL_PORT(port));
338
339 /* Re-enable the GPHY and re-apply workarounds */
340 if (priv->int_phy_mask & 1 << port && priv->hw_params.num_gphy == 1) {
341 bcm_sf2_gphy_enable_set(ds, true);
342 if (phy) {
343 /* if phy_stop() has been called before, phy
344 * will be in halted state, and phy_start()
345 * will call resume.
346 *
347 * the resume path does not configure back
348 * autoneg settings, and since we hard reset
349 * the phy manually here, we need to reset the
350 * state machine also.
351 */
352 phy->state = PHY_READY;
353 phy_init_hw(phy);
354 }
355 }
356
357 /* Enable MoCA port interrupts to get notified */
358 if (port == priv->moca_port)
359 bcm_sf2_port_intr_enable(priv, port);
360
361 /* Set this port, and only this one to be in the default VLAN,
362 * if member of a bridge, restore its membership prior to
363 * bringing down this port.
364 */
365 reg = core_readl(priv, CORE_PORT_VLAN_CTL_PORT(port));
366 reg &= ~PORT_VLAN_CTRL_MASK;
367 reg |= (1 << port);
368 reg |= priv->port_sts[port].vlan_ctl_mask;
369 core_writel(priv, reg, CORE_PORT_VLAN_CTL_PORT(port));
370
371 bcm_sf2_imp_vlan_setup(ds, cpu_port);
372
373 /* If EEE was enabled, restore it */
374 if (priv->port_sts[port].eee.eee_enabled)
375 bcm_sf2_eee_enable_set(ds, port, true);
376
377 return 0;
378 }
379
380 static void bcm_sf2_port_disable(struct dsa_switch *ds, int port,
381 struct phy_device *phy)
382 {
383 struct bcm_sf2_priv *priv = ds_to_priv(ds);
384 u32 off, reg;
385
386 if (priv->wol_ports_mask & (1 << port))
387 return;
388
389 if (port == priv->moca_port)
390 bcm_sf2_port_intr_disable(priv, port);
391
392 if (priv->int_phy_mask & 1 << port && priv->hw_params.num_gphy == 1)
393 bcm_sf2_gphy_enable_set(ds, false);
394
395 if (dsa_is_cpu_port(ds, port))
396 off = CORE_IMP_CTL;
397 else
398 off = CORE_G_PCTL_PORT(port);
399
400 reg = core_readl(priv, off);
401 reg |= RX_DIS | TX_DIS;
402 core_writel(priv, reg, off);
403
404 /* Power down the port memory */
405 reg = core_readl(priv, CORE_MEM_PSM_VDD_CTRL);
406 reg |= P_TXQ_PSM_VDD(port);
407 core_writel(priv, reg, CORE_MEM_PSM_VDD_CTRL);
408 }
409
410 /* Returns 0 if EEE was not enabled, or 1 otherwise
411 */
412 static int bcm_sf2_eee_init(struct dsa_switch *ds, int port,
413 struct phy_device *phy)
414 {
415 struct bcm_sf2_priv *priv = ds_to_priv(ds);
416 struct ethtool_eee *p = &priv->port_sts[port].eee;
417 int ret;
418
419 p->supported = (SUPPORTED_1000baseT_Full | SUPPORTED_100baseT_Full);
420
421 ret = phy_init_eee(phy, 0);
422 if (ret)
423 return 0;
424
425 bcm_sf2_eee_enable_set(ds, port, true);
426
427 return 1;
428 }
429
430 static int bcm_sf2_sw_get_eee(struct dsa_switch *ds, int port,
431 struct ethtool_eee *e)
432 {
433 struct bcm_sf2_priv *priv = ds_to_priv(ds);
434 struct ethtool_eee *p = &priv->port_sts[port].eee;
435 u32 reg;
436
437 reg = core_readl(priv, CORE_EEE_LPI_INDICATE);
438 e->eee_enabled = p->eee_enabled;
439 e->eee_active = !!(reg & (1 << port));
440
441 return 0;
442 }
443
444 static int bcm_sf2_sw_set_eee(struct dsa_switch *ds, int port,
445 struct phy_device *phydev,
446 struct ethtool_eee *e)
447 {
448 struct bcm_sf2_priv *priv = ds_to_priv(ds);
449 struct ethtool_eee *p = &priv->port_sts[port].eee;
450
451 p->eee_enabled = e->eee_enabled;
452
453 if (!p->eee_enabled) {
454 bcm_sf2_eee_enable_set(ds, port, false);
455 } else {
456 p->eee_enabled = bcm_sf2_eee_init(ds, port, phydev);
457 if (!p->eee_enabled)
458 return -EOPNOTSUPP;
459 }
460
461 return 0;
462 }
463
464 static int bcm_sf2_fast_age_op(struct bcm_sf2_priv *priv)
465 {
466 unsigned int timeout = 1000;
467 u32 reg;
468
469 reg = core_readl(priv, CORE_FAST_AGE_CTRL);
470 reg |= EN_AGE_PORT | EN_AGE_VLAN | EN_AGE_DYNAMIC | FAST_AGE_STR_DONE;
471 core_writel(priv, reg, CORE_FAST_AGE_CTRL);
472
473 do {
474 reg = core_readl(priv, CORE_FAST_AGE_CTRL);
475 if (!(reg & FAST_AGE_STR_DONE))
476 break;
477
478 cpu_relax();
479 } while (timeout--);
480
481 if (!timeout)
482 return -ETIMEDOUT;
483
484 core_writel(priv, 0, CORE_FAST_AGE_CTRL);
485
486 return 0;
487 }
488
489 /* Fast-ageing of ARL entries for a given port, equivalent to an ARL
490 * flush for that port.
491 */
492 static int bcm_sf2_sw_fast_age_port(struct dsa_switch *ds, int port)
493 {
494 struct bcm_sf2_priv *priv = ds_to_priv(ds);
495
496 core_writel(priv, port, CORE_FAST_AGE_PORT);
497
498 return bcm_sf2_fast_age_op(priv);
499 }
500
501 static int bcm_sf2_sw_fast_age_vlan(struct bcm_sf2_priv *priv, u16 vid)
502 {
503 core_writel(priv, vid, CORE_FAST_AGE_VID);
504
505 return bcm_sf2_fast_age_op(priv);
506 }
507
508 static int bcm_sf2_vlan_op_wait(struct bcm_sf2_priv *priv)
509 {
510 unsigned int timeout = 10;
511 u32 reg;
512
513 do {
514 reg = core_readl(priv, CORE_ARLA_VTBL_RWCTRL);
515 if (!(reg & ARLA_VTBL_STDN))
516 return 0;
517
518 usleep_range(1000, 2000);
519 } while (timeout--);
520
521 return -ETIMEDOUT;
522 }
523
524 static int bcm_sf2_vlan_op(struct bcm_sf2_priv *priv, u8 op)
525 {
526 core_writel(priv, ARLA_VTBL_STDN | op, CORE_ARLA_VTBL_RWCTRL);
527
528 return bcm_sf2_vlan_op_wait(priv);
529 }
530
531 static void bcm_sf2_set_vlan_entry(struct bcm_sf2_priv *priv, u16 vid,
532 struct bcm_sf2_vlan *vlan)
533 {
534 int ret;
535
536 core_writel(priv, vid & VTBL_ADDR_INDEX_MASK, CORE_ARLA_VTBL_ADDR);
537 core_writel(priv, vlan->untag << UNTAG_MAP_SHIFT | vlan->members,
538 CORE_ARLA_VTBL_ENTRY);
539
540 ret = bcm_sf2_vlan_op(priv, ARLA_VTBL_CMD_WRITE);
541 if (ret)
542 pr_err("failed to write VLAN entry\n");
543 }
544
545 static int bcm_sf2_get_vlan_entry(struct bcm_sf2_priv *priv, u16 vid,
546 struct bcm_sf2_vlan *vlan)
547 {
548 u32 entry;
549 int ret;
550
551 core_writel(priv, vid & VTBL_ADDR_INDEX_MASK, CORE_ARLA_VTBL_ADDR);
552
553 ret = bcm_sf2_vlan_op(priv, ARLA_VTBL_CMD_READ);
554 if (ret)
555 return ret;
556
557 entry = core_readl(priv, CORE_ARLA_VTBL_ENTRY);
558 vlan->members = entry & FWD_MAP_MASK;
559 vlan->untag = (entry >> UNTAG_MAP_SHIFT) & UNTAG_MAP_MASK;
560
561 return 0;
562 }
563
564 static int bcm_sf2_sw_br_join(struct dsa_switch *ds, int port,
565 struct net_device *bridge)
566 {
567 struct bcm_sf2_priv *priv = ds_to_priv(ds);
568 s8 cpu_port = ds->dst->cpu_port;
569 unsigned int i;
570 u32 reg, p_ctl;
571
572 /* Make this port leave the all VLANs join since we will have proper
573 * VLAN entries from now on
574 */
575 reg = core_readl(priv, CORE_JOIN_ALL_VLAN_EN);
576 reg &= ~BIT(port);
577 if ((reg & BIT(cpu_port)) == BIT(cpu_port))
578 reg &= ~BIT(cpu_port);
579 core_writel(priv, reg, CORE_JOIN_ALL_VLAN_EN);
580
581 priv->port_sts[port].bridge_dev = bridge;
582 p_ctl = core_readl(priv, CORE_PORT_VLAN_CTL_PORT(port));
583
584 for (i = 0; i < priv->hw_params.num_ports; i++) {
585 if (priv->port_sts[i].bridge_dev != bridge)
586 continue;
587
588 /* Add this local port to the remote port VLAN control
589 * membership and update the remote port bitmask
590 */
591 reg = core_readl(priv, CORE_PORT_VLAN_CTL_PORT(i));
592 reg |= 1 << port;
593 core_writel(priv, reg, CORE_PORT_VLAN_CTL_PORT(i));
594 priv->port_sts[i].vlan_ctl_mask = reg;
595
596 p_ctl |= 1 << i;
597 }
598
599 /* Configure the local port VLAN control membership to include
600 * remote ports and update the local port bitmask
601 */
602 core_writel(priv, p_ctl, CORE_PORT_VLAN_CTL_PORT(port));
603 priv->port_sts[port].vlan_ctl_mask = p_ctl;
604
605 return 0;
606 }
607
608 static void bcm_sf2_sw_br_leave(struct dsa_switch *ds, int port)
609 {
610 struct bcm_sf2_priv *priv = ds_to_priv(ds);
611 struct net_device *bridge = priv->port_sts[port].bridge_dev;
612 s8 cpu_port = ds->dst->cpu_port;
613 unsigned int i;
614 u32 reg, p_ctl;
615
616 p_ctl = core_readl(priv, CORE_PORT_VLAN_CTL_PORT(port));
617
618 for (i = 0; i < priv->hw_params.num_ports; i++) {
619 /* Don't touch the remaining ports */
620 if (priv->port_sts[i].bridge_dev != bridge)
621 continue;
622
623 reg = core_readl(priv, CORE_PORT_VLAN_CTL_PORT(i));
624 reg &= ~(1 << port);
625 core_writel(priv, reg, CORE_PORT_VLAN_CTL_PORT(i));
626 priv->port_sts[port].vlan_ctl_mask = reg;
627
628 /* Prevent self removal to preserve isolation */
629 if (port != i)
630 p_ctl &= ~(1 << i);
631 }
632
633 core_writel(priv, p_ctl, CORE_PORT_VLAN_CTL_PORT(port));
634 priv->port_sts[port].vlan_ctl_mask = p_ctl;
635 priv->port_sts[port].bridge_dev = NULL;
636
637 /* Make this port join all VLANs without VLAN entries */
638 reg = core_readl(priv, CORE_JOIN_ALL_VLAN_EN);
639 reg |= BIT(port);
640 if (!(reg & BIT(cpu_port)))
641 reg |= BIT(cpu_port);
642 core_writel(priv, reg, CORE_JOIN_ALL_VLAN_EN);
643 }
644
645 static void bcm_sf2_sw_br_set_stp_state(struct dsa_switch *ds, int port,
646 u8 state)
647 {
648 struct bcm_sf2_priv *priv = ds_to_priv(ds);
649 u8 hw_state, cur_hw_state;
650 u32 reg;
651
652 reg = core_readl(priv, CORE_G_PCTL_PORT(port));
653 cur_hw_state = reg & (G_MISTP_STATE_MASK << G_MISTP_STATE_SHIFT);
654
655 switch (state) {
656 case BR_STATE_DISABLED:
657 hw_state = G_MISTP_DIS_STATE;
658 break;
659 case BR_STATE_LISTENING:
660 hw_state = G_MISTP_LISTEN_STATE;
661 break;
662 case BR_STATE_LEARNING:
663 hw_state = G_MISTP_LEARN_STATE;
664 break;
665 case BR_STATE_FORWARDING:
666 hw_state = G_MISTP_FWD_STATE;
667 break;
668 case BR_STATE_BLOCKING:
669 hw_state = G_MISTP_BLOCK_STATE;
670 break;
671 default:
672 pr_err("%s: invalid STP state: %d\n", __func__, state);
673 return;
674 }
675
676 /* Fast-age ARL entries if we are moving a port from Learning or
677 * Forwarding (cur_hw_state) state to Disabled, Blocking or Listening
678 * state (hw_state)
679 */
680 if (cur_hw_state != hw_state) {
681 if (cur_hw_state >= G_MISTP_LEARN_STATE &&
682 hw_state <= G_MISTP_LISTEN_STATE) {
683 if (bcm_sf2_sw_fast_age_port(ds, port)) {
684 pr_err("%s: fast-ageing failed\n", __func__);
685 return;
686 }
687 }
688 }
689
690 reg = core_readl(priv, CORE_G_PCTL_PORT(port));
691 reg &= ~(G_MISTP_STATE_MASK << G_MISTP_STATE_SHIFT);
692 reg |= hw_state;
693 core_writel(priv, reg, CORE_G_PCTL_PORT(port));
694 }
695
696 /* Address Resolution Logic routines */
697 static int bcm_sf2_arl_op_wait(struct bcm_sf2_priv *priv)
698 {
699 unsigned int timeout = 10;
700 u32 reg;
701
702 do {
703 reg = core_readl(priv, CORE_ARLA_RWCTL);
704 if (!(reg & ARL_STRTDN))
705 return 0;
706
707 usleep_range(1000, 2000);
708 } while (timeout--);
709
710 return -ETIMEDOUT;
711 }
712
713 static int bcm_sf2_arl_rw_op(struct bcm_sf2_priv *priv, unsigned int op)
714 {
715 u32 cmd;
716
717 if (op > ARL_RW)
718 return -EINVAL;
719
720 cmd = core_readl(priv, CORE_ARLA_RWCTL);
721 cmd &= ~IVL_SVL_SELECT;
722 cmd |= ARL_STRTDN;
723 if (op)
724 cmd |= ARL_RW;
725 else
726 cmd &= ~ARL_RW;
727 core_writel(priv, cmd, CORE_ARLA_RWCTL);
728
729 return bcm_sf2_arl_op_wait(priv);
730 }
731
732 static int bcm_sf2_arl_read(struct bcm_sf2_priv *priv, u64 mac,
733 u16 vid, struct bcm_sf2_arl_entry *ent, u8 *idx,
734 bool is_valid)
735 {
736 unsigned int i;
737 int ret;
738
739 ret = bcm_sf2_arl_op_wait(priv);
740 if (ret)
741 return ret;
742
743 /* Read the 4 bins */
744 for (i = 0; i < 4; i++) {
745 u64 mac_vid;
746 u32 fwd_entry;
747
748 mac_vid = core_readq(priv, CORE_ARLA_MACVID_ENTRY(i));
749 fwd_entry = core_readl(priv, CORE_ARLA_FWD_ENTRY(i));
750 bcm_sf2_arl_to_entry(ent, mac_vid, fwd_entry);
751
752 if (ent->is_valid && is_valid) {
753 *idx = i;
754 return 0;
755 }
756
757 /* This is the MAC we just deleted */
758 if (!is_valid && (mac_vid & mac))
759 return 0;
760 }
761
762 return -ENOENT;
763 }
764
765 static int bcm_sf2_arl_op(struct bcm_sf2_priv *priv, int op, int port,
766 const unsigned char *addr, u16 vid, bool is_valid)
767 {
768 struct bcm_sf2_arl_entry ent;
769 u32 fwd_entry;
770 u64 mac, mac_vid = 0;
771 u8 idx = 0;
772 int ret;
773
774 /* Convert the array into a 64-bit MAC */
775 mac = bcm_sf2_mac_to_u64(addr);
776
777 /* Perform a read for the given MAC and VID */
778 core_writeq(priv, mac, CORE_ARLA_MAC);
779 core_writel(priv, vid, CORE_ARLA_VID);
780
781 /* Issue a read operation for this MAC */
782 ret = bcm_sf2_arl_rw_op(priv, 1);
783 if (ret)
784 return ret;
785
786 ret = bcm_sf2_arl_read(priv, mac, vid, &ent, &idx, is_valid);
787 /* If this is a read, just finish now */
788 if (op)
789 return ret;
790
791 /* We could not find a matching MAC, so reset to a new entry */
792 if (ret) {
793 fwd_entry = 0;
794 idx = 0;
795 }
796
797 memset(&ent, 0, sizeof(ent));
798 ent.port = port;
799 ent.is_valid = is_valid;
800 ent.vid = vid;
801 ent.is_static = true;
802 memcpy(ent.mac, addr, ETH_ALEN);
803 bcm_sf2_arl_from_entry(&mac_vid, &fwd_entry, &ent);
804
805 core_writeq(priv, mac_vid, CORE_ARLA_MACVID_ENTRY(idx));
806 core_writel(priv, fwd_entry, CORE_ARLA_FWD_ENTRY(idx));
807
808 ret = bcm_sf2_arl_rw_op(priv, 0);
809 if (ret)
810 return ret;
811
812 /* Re-read the entry to check */
813 return bcm_sf2_arl_read(priv, mac, vid, &ent, &idx, is_valid);
814 }
815
816 static int bcm_sf2_sw_fdb_prepare(struct dsa_switch *ds, int port,
817 const struct switchdev_obj_port_fdb *fdb,
818 struct switchdev_trans *trans)
819 {
820 /* We do not need to do anything specific here yet */
821 return 0;
822 }
823
824 static void bcm_sf2_sw_fdb_add(struct dsa_switch *ds, int port,
825 const struct switchdev_obj_port_fdb *fdb,
826 struct switchdev_trans *trans)
827 {
828 struct bcm_sf2_priv *priv = ds_to_priv(ds);
829
830 if (bcm_sf2_arl_op(priv, 0, port, fdb->addr, fdb->vid, true))
831 pr_err("%s: failed to add MAC address\n", __func__);
832 }
833
834 static int bcm_sf2_sw_fdb_del(struct dsa_switch *ds, int port,
835 const struct switchdev_obj_port_fdb *fdb)
836 {
837 struct bcm_sf2_priv *priv = ds_to_priv(ds);
838
839 return bcm_sf2_arl_op(priv, 0, port, fdb->addr, fdb->vid, false);
840 }
841
842 static int bcm_sf2_arl_search_wait(struct bcm_sf2_priv *priv)
843 {
844 unsigned timeout = 1000;
845 u32 reg;
846
847 do {
848 reg = core_readl(priv, CORE_ARLA_SRCH_CTL);
849 if (!(reg & ARLA_SRCH_STDN))
850 return 0;
851
852 if (reg & ARLA_SRCH_VLID)
853 return 0;
854
855 usleep_range(1000, 2000);
856 } while (timeout--);
857
858 return -ETIMEDOUT;
859 }
860
861 static void bcm_sf2_arl_search_rd(struct bcm_sf2_priv *priv, u8 idx,
862 struct bcm_sf2_arl_entry *ent)
863 {
864 u64 mac_vid;
865 u32 fwd_entry;
866
867 mac_vid = core_readq(priv, CORE_ARLA_SRCH_RSLT_MACVID(idx));
868 fwd_entry = core_readl(priv, CORE_ARLA_SRCH_RSLT(idx));
869 bcm_sf2_arl_to_entry(ent, mac_vid, fwd_entry);
870 }
871
872 static int bcm_sf2_sw_fdb_copy(struct net_device *dev, int port,
873 const struct bcm_sf2_arl_entry *ent,
874 struct switchdev_obj_port_fdb *fdb,
875 int (*cb)(struct switchdev_obj *obj))
876 {
877 if (!ent->is_valid)
878 return 0;
879
880 if (port != ent->port)
881 return 0;
882
883 ether_addr_copy(fdb->addr, ent->mac);
884 fdb->vid = ent->vid;
885 fdb->ndm_state = ent->is_static ? NUD_NOARP : NUD_REACHABLE;
886
887 return cb(&fdb->obj);
888 }
889
890 static int bcm_sf2_sw_fdb_dump(struct dsa_switch *ds, int port,
891 struct switchdev_obj_port_fdb *fdb,
892 int (*cb)(struct switchdev_obj *obj))
893 {
894 struct bcm_sf2_priv *priv = ds_to_priv(ds);
895 struct net_device *dev = ds->ports[port].netdev;
896 struct bcm_sf2_arl_entry results[2];
897 unsigned int count = 0;
898 int ret;
899
900 /* Start search operation */
901 core_writel(priv, ARLA_SRCH_STDN, CORE_ARLA_SRCH_CTL);
902
903 do {
904 ret = bcm_sf2_arl_search_wait(priv);
905 if (ret)
906 return ret;
907
908 /* Read both entries, then return their values back */
909 bcm_sf2_arl_search_rd(priv, 0, &results[0]);
910 ret = bcm_sf2_sw_fdb_copy(dev, port, &results[0], fdb, cb);
911 if (ret)
912 return ret;
913
914 bcm_sf2_arl_search_rd(priv, 1, &results[1]);
915 ret = bcm_sf2_sw_fdb_copy(dev, port, &results[1], fdb, cb);
916 if (ret)
917 return ret;
918
919 if (!results[0].is_valid && !results[1].is_valid)
920 break;
921
922 } while (count++ < CORE_ARLA_NUM_ENTRIES);
923
924 return 0;
925 }
926
927 static int bcm_sf2_sw_indir_rw(struct bcm_sf2_priv *priv, int op, int addr,
928 int regnum, u16 val)
929 {
930 int ret = 0;
931 u32 reg;
932
933 reg = reg_readl(priv, REG_SWITCH_CNTRL);
934 reg |= MDIO_MASTER_SEL;
935 reg_writel(priv, reg, REG_SWITCH_CNTRL);
936
937 /* Page << 8 | offset */
938 reg = 0x70;
939 reg <<= 2;
940 core_writel(priv, addr, reg);
941
942 /* Page << 8 | offset */
943 reg = 0x80 << 8 | regnum << 1;
944 reg <<= 2;
945
946 if (op)
947 ret = core_readl(priv, reg);
948 else
949 core_writel(priv, val, reg);
950
951 reg = reg_readl(priv, REG_SWITCH_CNTRL);
952 reg &= ~MDIO_MASTER_SEL;
953 reg_writel(priv, reg, REG_SWITCH_CNTRL);
954
955 return ret & 0xffff;
956 }
957
958 static int bcm_sf2_sw_mdio_read(struct mii_bus *bus, int addr, int regnum)
959 {
960 struct bcm_sf2_priv *priv = bus->priv;
961
962 /* Intercept reads from Broadcom pseudo-PHY address, else, send
963 * them to our master MDIO bus controller
964 */
965 if (addr == BRCM_PSEUDO_PHY_ADDR && priv->indir_phy_mask & BIT(addr))
966 return bcm_sf2_sw_indir_rw(priv, 1, addr, regnum, 0);
967 else
968 return mdiobus_read(priv->master_mii_bus, addr, regnum);
969 }
970
971 static int bcm_sf2_sw_mdio_write(struct mii_bus *bus, int addr, int regnum,
972 u16 val)
973 {
974 struct bcm_sf2_priv *priv = bus->priv;
975
976 /* Intercept writes to the Broadcom pseudo-PHY address, else,
977 * send them to our master MDIO bus controller
978 */
979 if (addr == BRCM_PSEUDO_PHY_ADDR && priv->indir_phy_mask & BIT(addr))
980 bcm_sf2_sw_indir_rw(priv, 0, addr, regnum, val);
981 else
982 mdiobus_write(priv->master_mii_bus, addr, regnum, val);
983
984 return 0;
985 }
986
987 static irqreturn_t bcm_sf2_switch_0_isr(int irq, void *dev_id)
988 {
989 struct bcm_sf2_priv *priv = dev_id;
990
991 priv->irq0_stat = intrl2_0_readl(priv, INTRL2_CPU_STATUS) &
992 ~priv->irq0_mask;
993 intrl2_0_writel(priv, priv->irq0_stat, INTRL2_CPU_CLEAR);
994
995 return IRQ_HANDLED;
996 }
997
998 static irqreturn_t bcm_sf2_switch_1_isr(int irq, void *dev_id)
999 {
1000 struct bcm_sf2_priv *priv = dev_id;
1001
1002 priv->irq1_stat = intrl2_1_readl(priv, INTRL2_CPU_STATUS) &
1003 ~priv->irq1_mask;
1004 intrl2_1_writel(priv, priv->irq1_stat, INTRL2_CPU_CLEAR);
1005
1006 if (priv->irq1_stat & P_LINK_UP_IRQ(P7_IRQ_OFF))
1007 priv->port_sts[7].link = 1;
1008 if (priv->irq1_stat & P_LINK_DOWN_IRQ(P7_IRQ_OFF))
1009 priv->port_sts[7].link = 0;
1010
1011 return IRQ_HANDLED;
1012 }
1013
1014 static int bcm_sf2_sw_rst(struct bcm_sf2_priv *priv)
1015 {
1016 unsigned int timeout = 1000;
1017 u32 reg;
1018
1019 reg = core_readl(priv, CORE_WATCHDOG_CTRL);
1020 reg |= SOFTWARE_RESET | EN_CHIP_RST | EN_SW_RESET;
1021 core_writel(priv, reg, CORE_WATCHDOG_CTRL);
1022
1023 do {
1024 reg = core_readl(priv, CORE_WATCHDOG_CTRL);
1025 if (!(reg & SOFTWARE_RESET))
1026 break;
1027
1028 usleep_range(1000, 2000);
1029 } while (timeout-- > 0);
1030
1031 if (timeout == 0)
1032 return -ETIMEDOUT;
1033
1034 return 0;
1035 }
1036
1037 static void bcm_sf2_intr_disable(struct bcm_sf2_priv *priv)
1038 {
1039 intrl2_0_writel(priv, 0xffffffff, INTRL2_CPU_MASK_SET);
1040 intrl2_0_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR);
1041 intrl2_0_writel(priv, 0, INTRL2_CPU_MASK_CLEAR);
1042 intrl2_1_writel(priv, 0xffffffff, INTRL2_CPU_MASK_SET);
1043 intrl2_1_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR);
1044 intrl2_1_writel(priv, 0, INTRL2_CPU_MASK_CLEAR);
1045 }
1046
1047 static void bcm_sf2_identify_ports(struct bcm_sf2_priv *priv,
1048 struct device_node *dn)
1049 {
1050 struct device_node *port;
1051 const char *phy_mode_str;
1052 int mode;
1053 unsigned int port_num;
1054 int ret;
1055
1056 priv->moca_port = -1;
1057
1058 for_each_available_child_of_node(dn, port) {
1059 if (of_property_read_u32(port, "reg", &port_num))
1060 continue;
1061
1062 /* Internal PHYs get assigned a specific 'phy-mode' property
1063 * value: "internal" to help flag them before MDIO probing
1064 * has completed, since they might be turned off at that
1065 * time
1066 */
1067 mode = of_get_phy_mode(port);
1068 if (mode < 0) {
1069 ret = of_property_read_string(port, "phy-mode",
1070 &phy_mode_str);
1071 if (ret < 0)
1072 continue;
1073
1074 if (!strcasecmp(phy_mode_str, "internal"))
1075 priv->int_phy_mask |= 1 << port_num;
1076 }
1077
1078 if (mode == PHY_INTERFACE_MODE_MOCA)
1079 priv->moca_port = port_num;
1080 }
1081 }
1082
1083 static int bcm_sf2_mdio_register(struct dsa_switch *ds)
1084 {
1085 struct bcm_sf2_priv *priv = ds_to_priv(ds);
1086 struct device_node *dn;
1087 static int index;
1088 int err;
1089
1090 /* Find our integrated MDIO bus node */
1091 dn = of_find_compatible_node(NULL, NULL, "brcm,unimac-mdio");
1092 priv->master_mii_bus = of_mdio_find_bus(dn);
1093 if (!priv->master_mii_bus)
1094 return -EPROBE_DEFER;
1095
1096 get_device(&priv->master_mii_bus->dev);
1097 priv->master_mii_dn = dn;
1098
1099 priv->slave_mii_bus = devm_mdiobus_alloc(ds->dev);
1100 if (!priv->slave_mii_bus)
1101 return -ENOMEM;
1102
1103 priv->slave_mii_bus->priv = priv;
1104 priv->slave_mii_bus->name = "sf2 slave mii";
1105 priv->slave_mii_bus->read = bcm_sf2_sw_mdio_read;
1106 priv->slave_mii_bus->write = bcm_sf2_sw_mdio_write;
1107 snprintf(priv->slave_mii_bus->id, MII_BUS_ID_SIZE, "sf2-%d",
1108 index++);
1109 priv->slave_mii_bus->dev.of_node = dn;
1110
1111 /* Include the pseudo-PHY address to divert reads towards our
1112 * workaround. This is only required for 7445D0, since 7445E0
1113 * disconnects the internal switch pseudo-PHY such that we can use the
1114 * regular SWITCH_MDIO master controller instead.
1115 *
1116 * Here we flag the pseudo PHY as needing special treatment and would
1117 * otherwise make all other PHY read/writes go to the master MDIO bus
1118 * controller that comes with this switch backed by the "mdio-unimac"
1119 * driver.
1120 */
1121 if (of_machine_is_compatible("brcm,bcm7445d0"))
1122 priv->indir_phy_mask |= (1 << BRCM_PSEUDO_PHY_ADDR);
1123 else
1124 priv->indir_phy_mask = 0;
1125
1126 ds->phys_mii_mask = priv->indir_phy_mask;
1127 ds->slave_mii_bus = priv->slave_mii_bus;
1128 priv->slave_mii_bus->parent = ds->dev->parent;
1129 priv->slave_mii_bus->phy_mask = ~priv->indir_phy_mask;
1130
1131 if (dn)
1132 err = of_mdiobus_register(priv->slave_mii_bus, dn);
1133 else
1134 err = mdiobus_register(priv->slave_mii_bus);
1135
1136 if (err)
1137 of_node_put(dn);
1138
1139 return err;
1140 }
1141
1142 static void bcm_sf2_mdio_unregister(struct bcm_sf2_priv *priv)
1143 {
1144 mdiobus_unregister(priv->slave_mii_bus);
1145 if (priv->master_mii_dn)
1146 of_node_put(priv->master_mii_dn);
1147 }
1148
1149 static int bcm_sf2_sw_set_addr(struct dsa_switch *ds, u8 *addr)
1150 {
1151 return 0;
1152 }
1153
1154 static u32 bcm_sf2_sw_get_phy_flags(struct dsa_switch *ds, int port)
1155 {
1156 struct bcm_sf2_priv *priv = ds_to_priv(ds);
1157
1158 /* The BCM7xxx PHY driver expects to find the integrated PHY revision
1159 * in bits 15:8 and the patch level in bits 7:0 which is exactly what
1160 * the REG_PHY_REVISION register layout is.
1161 */
1162
1163 return priv->hw_params.gphy_rev;
1164 }
1165
1166 static void bcm_sf2_sw_adjust_link(struct dsa_switch *ds, int port,
1167 struct phy_device *phydev)
1168 {
1169 struct bcm_sf2_priv *priv = ds_to_priv(ds);
1170 u32 id_mode_dis = 0, port_mode;
1171 const char *str = NULL;
1172 u32 reg;
1173
1174 switch (phydev->interface) {
1175 case PHY_INTERFACE_MODE_RGMII:
1176 str = "RGMII (no delay)";
1177 id_mode_dis = 1;
1178 case PHY_INTERFACE_MODE_RGMII_TXID:
1179 if (!str)
1180 str = "RGMII (TX delay)";
1181 port_mode = EXT_GPHY;
1182 break;
1183 case PHY_INTERFACE_MODE_MII:
1184 str = "MII";
1185 port_mode = EXT_EPHY;
1186 break;
1187 case PHY_INTERFACE_MODE_REVMII:
1188 str = "Reverse MII";
1189 port_mode = EXT_REVMII;
1190 break;
1191 default:
1192 /* All other PHYs: internal and MoCA */
1193 goto force_link;
1194 }
1195
1196 /* If the link is down, just disable the interface to conserve power */
1197 if (!phydev->link) {
1198 reg = reg_readl(priv, REG_RGMII_CNTRL_P(port));
1199 reg &= ~RGMII_MODE_EN;
1200 reg_writel(priv, reg, REG_RGMII_CNTRL_P(port));
1201 goto force_link;
1202 }
1203
1204 /* Clear id_mode_dis bit, and the existing port mode, but
1205 * make sure we enable the RGMII block for data to pass
1206 */
1207 reg = reg_readl(priv, REG_RGMII_CNTRL_P(port));
1208 reg &= ~ID_MODE_DIS;
1209 reg &= ~(PORT_MODE_MASK << PORT_MODE_SHIFT);
1210 reg &= ~(RX_PAUSE_EN | TX_PAUSE_EN);
1211
1212 reg |= port_mode | RGMII_MODE_EN;
1213 if (id_mode_dis)
1214 reg |= ID_MODE_DIS;
1215
1216 if (phydev->pause) {
1217 if (phydev->asym_pause)
1218 reg |= TX_PAUSE_EN;
1219 reg |= RX_PAUSE_EN;
1220 }
1221
1222 reg_writel(priv, reg, REG_RGMII_CNTRL_P(port));
1223
1224 pr_info("Port %d configured for %s\n", port, str);
1225
1226 force_link:
1227 /* Force link settings detected from the PHY */
1228 reg = SW_OVERRIDE;
1229 switch (phydev->speed) {
1230 case SPEED_1000:
1231 reg |= SPDSTS_1000 << SPEED_SHIFT;
1232 break;
1233 case SPEED_100:
1234 reg |= SPDSTS_100 << SPEED_SHIFT;
1235 break;
1236 }
1237
1238 if (phydev->link)
1239 reg |= LINK_STS;
1240 if (phydev->duplex == DUPLEX_FULL)
1241 reg |= DUPLX_MODE;
1242
1243 core_writel(priv, reg, CORE_STS_OVERRIDE_GMIIP_PORT(port));
1244 }
1245
1246 static void bcm_sf2_sw_fixed_link_update(struct dsa_switch *ds, int port,
1247 struct fixed_phy_status *status)
1248 {
1249 struct bcm_sf2_priv *priv = ds_to_priv(ds);
1250 u32 duplex, pause;
1251 u32 reg;
1252
1253 duplex = core_readl(priv, CORE_DUPSTS);
1254 pause = core_readl(priv, CORE_PAUSESTS);
1255
1256 status->link = 0;
1257
1258 /* MoCA port is special as we do not get link status from CORE_LNKSTS,
1259 * which means that we need to force the link at the port override
1260 * level to get the data to flow. We do use what the interrupt handler
1261 * did determine before.
1262 *
1263 * For the other ports, we just force the link status, since this is
1264 * a fixed PHY device.
1265 */
1266 if (port == priv->moca_port) {
1267 status->link = priv->port_sts[port].link;
1268 /* For MoCA interfaces, also force a link down notification
1269 * since some version of the user-space daemon (mocad) use
1270 * cmd->autoneg to force the link, which messes up the PHY
1271 * state machine and make it go in PHY_FORCING state instead.
1272 */
1273 if (!status->link)
1274 netif_carrier_off(ds->ports[port].netdev);
1275 status->duplex = 1;
1276 } else {
1277 status->link = 1;
1278 status->duplex = !!(duplex & (1 << port));
1279 }
1280
1281 reg = core_readl(priv, CORE_STS_OVERRIDE_GMIIP_PORT(port));
1282 reg |= SW_OVERRIDE;
1283 if (status->link)
1284 reg |= LINK_STS;
1285 else
1286 reg &= ~LINK_STS;
1287 core_writel(priv, reg, CORE_STS_OVERRIDE_GMIIP_PORT(port));
1288
1289 if ((pause & (1 << port)) &&
1290 (pause & (1 << (port + PAUSESTS_TX_PAUSE_SHIFT)))) {
1291 status->asym_pause = 1;
1292 status->pause = 1;
1293 }
1294
1295 if (pause & (1 << port))
1296 status->pause = 1;
1297 }
1298
1299 static int bcm_sf2_sw_suspend(struct dsa_switch *ds)
1300 {
1301 struct bcm_sf2_priv *priv = ds_to_priv(ds);
1302 unsigned int port;
1303
1304 bcm_sf2_intr_disable(priv);
1305
1306 /* Disable all ports physically present including the IMP
1307 * port, the other ones have already been disabled during
1308 * bcm_sf2_sw_setup
1309 */
1310 for (port = 0; port < DSA_MAX_PORTS; port++) {
1311 if ((1 << port) & ds->enabled_port_mask ||
1312 dsa_is_cpu_port(ds, port))
1313 bcm_sf2_port_disable(ds, port, NULL);
1314 }
1315
1316 return 0;
1317 }
1318
1319 static int bcm_sf2_sw_resume(struct dsa_switch *ds)
1320 {
1321 struct bcm_sf2_priv *priv = ds_to_priv(ds);
1322 unsigned int port;
1323 int ret;
1324
1325 ret = bcm_sf2_sw_rst(priv);
1326 if (ret) {
1327 pr_err("%s: failed to software reset switch\n", __func__);
1328 return ret;
1329 }
1330
1331 if (priv->hw_params.num_gphy == 1)
1332 bcm_sf2_gphy_enable_set(ds, true);
1333
1334 for (port = 0; port < DSA_MAX_PORTS; port++) {
1335 if ((1 << port) & ds->enabled_port_mask)
1336 bcm_sf2_port_setup(ds, port, NULL);
1337 else if (dsa_is_cpu_port(ds, port))
1338 bcm_sf2_imp_setup(ds, port);
1339 }
1340
1341 return 0;
1342 }
1343
1344 static void bcm_sf2_sw_get_wol(struct dsa_switch *ds, int port,
1345 struct ethtool_wolinfo *wol)
1346 {
1347 struct net_device *p = ds->dst[ds->index].master_netdev;
1348 struct bcm_sf2_priv *priv = ds_to_priv(ds);
1349 struct ethtool_wolinfo pwol;
1350
1351 /* Get the parent device WoL settings */
1352 p->ethtool_ops->get_wol(p, &pwol);
1353
1354 /* Advertise the parent device supported settings */
1355 wol->supported = pwol.supported;
1356 memset(&wol->sopass, 0, sizeof(wol->sopass));
1357
1358 if (pwol.wolopts & WAKE_MAGICSECURE)
1359 memcpy(&wol->sopass, pwol.sopass, sizeof(wol->sopass));
1360
1361 if (priv->wol_ports_mask & (1 << port))
1362 wol->wolopts = pwol.wolopts;
1363 else
1364 wol->wolopts = 0;
1365 }
1366
1367 static int bcm_sf2_sw_set_wol(struct dsa_switch *ds, int port,
1368 struct ethtool_wolinfo *wol)
1369 {
1370 struct net_device *p = ds->dst[ds->index].master_netdev;
1371 struct bcm_sf2_priv *priv = ds_to_priv(ds);
1372 s8 cpu_port = ds->dst[ds->index].cpu_port;
1373 struct ethtool_wolinfo pwol;
1374
1375 p->ethtool_ops->get_wol(p, &pwol);
1376 if (wol->wolopts & ~pwol.supported)
1377 return -EINVAL;
1378
1379 if (wol->wolopts)
1380 priv->wol_ports_mask |= (1 << port);
1381 else
1382 priv->wol_ports_mask &= ~(1 << port);
1383
1384 /* If we have at least one port enabled, make sure the CPU port
1385 * is also enabled. If the CPU port is the last one enabled, we disable
1386 * it since this configuration does not make sense.
1387 */
1388 if (priv->wol_ports_mask && priv->wol_ports_mask != (1 << cpu_port))
1389 priv->wol_ports_mask |= (1 << cpu_port);
1390 else
1391 priv->wol_ports_mask &= ~(1 << cpu_port);
1392
1393 return p->ethtool_ops->set_wol(p, wol);
1394 }
1395
1396 static void bcm_sf2_enable_vlan(struct bcm_sf2_priv *priv, bool enable)
1397 {
1398 u32 mgmt, vc0, vc1, vc4, vc5;
1399
1400 mgmt = core_readl(priv, CORE_SWMODE);
1401 vc0 = core_readl(priv, CORE_VLAN_CTRL0);
1402 vc1 = core_readl(priv, CORE_VLAN_CTRL1);
1403 vc4 = core_readl(priv, CORE_VLAN_CTRL4);
1404 vc5 = core_readl(priv, CORE_VLAN_CTRL5);
1405
1406 mgmt &= ~SW_FWDG_MODE;
1407
1408 if (enable) {
1409 vc0 |= VLAN_EN | VLAN_LEARN_MODE_IVL;
1410 vc1 |= EN_RSV_MCAST_UNTAG | EN_RSV_MCAST_FWDMAP;
1411 vc4 &= ~(INGR_VID_CHK_MASK << INGR_VID_CHK_SHIFT);
1412 vc4 |= INGR_VID_CHK_DROP;
1413 vc5 |= DROP_VTABLE_MISS | EN_VID_FFF_FWD;
1414 } else {
1415 vc0 &= ~(VLAN_EN | VLAN_LEARN_MODE_IVL);
1416 vc1 &= ~(EN_RSV_MCAST_UNTAG | EN_RSV_MCAST_FWDMAP);
1417 vc4 &= ~(INGR_VID_CHK_MASK << INGR_VID_CHK_SHIFT);
1418 vc5 &= ~(DROP_VTABLE_MISS | EN_VID_FFF_FWD);
1419 vc4 |= INGR_VID_CHK_VID_VIOL_IMP;
1420 }
1421
1422 core_writel(priv, vc0, CORE_VLAN_CTRL0);
1423 core_writel(priv, vc1, CORE_VLAN_CTRL1);
1424 core_writel(priv, 0, CORE_VLAN_CTRL3);
1425 core_writel(priv, vc4, CORE_VLAN_CTRL4);
1426 core_writel(priv, vc5, CORE_VLAN_CTRL5);
1427 core_writel(priv, mgmt, CORE_SWMODE);
1428 }
1429
1430 static void bcm_sf2_sw_configure_vlan(struct dsa_switch *ds)
1431 {
1432 struct bcm_sf2_priv *priv = ds_to_priv(ds);
1433 unsigned int port;
1434
1435 /* Clear all VLANs */
1436 bcm_sf2_vlan_op(priv, ARLA_VTBL_CMD_CLEAR);
1437
1438 for (port = 0; port < priv->hw_params.num_ports; port++) {
1439 if (!((1 << port) & ds->enabled_port_mask))
1440 continue;
1441
1442 core_writel(priv, 1, CORE_DEFAULT_1Q_TAG_P(port));
1443 }
1444 }
1445
1446 static int bcm_sf2_sw_vlan_filtering(struct dsa_switch *ds, int port,
1447 bool vlan_filtering)
1448 {
1449 return 0;
1450 }
1451
1452 static int bcm_sf2_sw_vlan_prepare(struct dsa_switch *ds, int port,
1453 const struct switchdev_obj_port_vlan *vlan,
1454 struct switchdev_trans *trans)
1455 {
1456 struct bcm_sf2_priv *priv = ds_to_priv(ds);
1457
1458 bcm_sf2_enable_vlan(priv, true);
1459
1460 return 0;
1461 }
1462
1463 static void bcm_sf2_sw_vlan_add(struct dsa_switch *ds, int port,
1464 const struct switchdev_obj_port_vlan *vlan,
1465 struct switchdev_trans *trans)
1466 {
1467 struct bcm_sf2_priv *priv = ds_to_priv(ds);
1468 bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
1469 bool pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID;
1470 s8 cpu_port = ds->dst->cpu_port;
1471 struct bcm_sf2_vlan *vl;
1472 u16 vid;
1473
1474 for (vid = vlan->vid_begin; vid <= vlan->vid_end; ++vid) {
1475 vl = &priv->vlans[vid];
1476
1477 bcm_sf2_get_vlan_entry(priv, vid, vl);
1478
1479 vl->members |= BIT(port) | BIT(cpu_port);
1480 if (untagged)
1481 vl->untag |= BIT(port) | BIT(cpu_port);
1482 else
1483 vl->untag &= ~(BIT(port) | BIT(cpu_port));
1484
1485 bcm_sf2_set_vlan_entry(priv, vid, vl);
1486 bcm_sf2_sw_fast_age_vlan(priv, vid);
1487 }
1488
1489 if (pvid) {
1490 core_writel(priv, vlan->vid_end, CORE_DEFAULT_1Q_TAG_P(port));
1491 core_writel(priv, vlan->vid_end,
1492 CORE_DEFAULT_1Q_TAG_P(cpu_port));
1493 bcm_sf2_sw_fast_age_vlan(priv, vid);
1494 }
1495 }
1496
1497 static int bcm_sf2_sw_vlan_del(struct dsa_switch *ds, int port,
1498 const struct switchdev_obj_port_vlan *vlan)
1499 {
1500 struct bcm_sf2_priv *priv = ds_to_priv(ds);
1501 bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
1502 s8 cpu_port = ds->dst->cpu_port;
1503 struct bcm_sf2_vlan *vl;
1504 u16 vid, pvid;
1505 int ret;
1506
1507 pvid = core_readl(priv, CORE_DEFAULT_1Q_TAG_P(port));
1508
1509 for (vid = vlan->vid_begin; vid <= vlan->vid_end; ++vid) {
1510 vl = &priv->vlans[vid];
1511
1512 ret = bcm_sf2_get_vlan_entry(priv, vid, vl);
1513 if (ret)
1514 return ret;
1515
1516 vl->members &= ~BIT(port);
1517 if ((vl->members & BIT(cpu_port)) == BIT(cpu_port))
1518 vl->members = 0;
1519 if (pvid == vid)
1520 pvid = 0;
1521 if (untagged) {
1522 vl->untag &= ~BIT(port);
1523 if ((vl->untag & BIT(port)) == BIT(cpu_port))
1524 vl->untag = 0;
1525 }
1526
1527 bcm_sf2_set_vlan_entry(priv, vid, vl);
1528 bcm_sf2_sw_fast_age_vlan(priv, vid);
1529 }
1530
1531 core_writel(priv, pvid, CORE_DEFAULT_1Q_TAG_P(port));
1532 core_writel(priv, pvid, CORE_DEFAULT_1Q_TAG_P(cpu_port));
1533 bcm_sf2_sw_fast_age_vlan(priv, vid);
1534
1535 return 0;
1536 }
1537
1538 static int bcm_sf2_sw_vlan_dump(struct dsa_switch *ds, int port,
1539 struct switchdev_obj_port_vlan *vlan,
1540 int (*cb)(struct switchdev_obj *obj))
1541 {
1542 struct bcm_sf2_priv *priv = ds_to_priv(ds);
1543 struct bcm_sf2_port_status *p = &priv->port_sts[port];
1544 struct bcm_sf2_vlan *vl;
1545 u16 vid, pvid;
1546 int err = 0;
1547
1548 pvid = core_readl(priv, CORE_DEFAULT_1Q_TAG_P(port));
1549
1550 for (vid = 0; vid < VLAN_N_VID; vid++) {
1551 vl = &priv->vlans[vid];
1552
1553 if (!(vl->members & BIT(port)))
1554 continue;
1555
1556 vlan->vid_begin = vlan->vid_end = vid;
1557 vlan->flags = 0;
1558
1559 if (vl->untag & BIT(port))
1560 vlan->flags |= BRIDGE_VLAN_INFO_UNTAGGED;
1561 if (p->pvid == vid)
1562 vlan->flags |= BRIDGE_VLAN_INFO_PVID;
1563
1564 err = cb(&vlan->obj);
1565 if (err)
1566 break;
1567 }
1568
1569 return err;
1570 }
1571
1572 static int bcm_sf2_sw_setup(struct dsa_switch *ds)
1573 {
1574 const char *reg_names[BCM_SF2_REGS_NUM] = BCM_SF2_REGS_NAME;
1575 struct bcm_sf2_priv *priv = ds_to_priv(ds);
1576 struct device_node *dn;
1577 void __iomem **base;
1578 unsigned int port;
1579 unsigned int i;
1580 u32 reg, rev;
1581 int ret;
1582
1583 spin_lock_init(&priv->indir_lock);
1584 mutex_init(&priv->stats_mutex);
1585
1586 /* All the interesting properties are at the parent device_node
1587 * level
1588 */
1589 dn = ds->cd->of_node->parent;
1590 bcm_sf2_identify_ports(priv, ds->cd->of_node);
1591
1592 priv->irq0 = irq_of_parse_and_map(dn, 0);
1593 priv->irq1 = irq_of_parse_and_map(dn, 1);
1594
1595 base = &priv->core;
1596 for (i = 0; i < BCM_SF2_REGS_NUM; i++) {
1597 *base = of_iomap(dn, i);
1598 if (*base == NULL) {
1599 pr_err("unable to find register: %s\n", reg_names[i]);
1600 ret = -ENOMEM;
1601 goto out_unmap;
1602 }
1603 base++;
1604 }
1605
1606 ret = bcm_sf2_sw_rst(priv);
1607 if (ret) {
1608 pr_err("unable to software reset switch: %d\n", ret);
1609 goto out_unmap;
1610 }
1611
1612 ret = bcm_sf2_mdio_register(ds);
1613 if (ret) {
1614 pr_err("failed to register MDIO bus\n");
1615 goto out_unmap;
1616 }
1617
1618 /* Disable all interrupts and request them */
1619 bcm_sf2_intr_disable(priv);
1620
1621 ret = request_irq(priv->irq0, bcm_sf2_switch_0_isr, 0,
1622 "switch_0", priv);
1623 if (ret < 0) {
1624 pr_err("failed to request switch_0 IRQ\n");
1625 goto out_mdio;
1626 }
1627
1628 ret = request_irq(priv->irq1, bcm_sf2_switch_1_isr, 0,
1629 "switch_1", priv);
1630 if (ret < 0) {
1631 pr_err("failed to request switch_1 IRQ\n");
1632 goto out_free_irq0;
1633 }
1634
1635 /* Reset the MIB counters */
1636 reg = core_readl(priv, CORE_GMNCFGCFG);
1637 reg |= RST_MIB_CNT;
1638 core_writel(priv, reg, CORE_GMNCFGCFG);
1639 reg &= ~RST_MIB_CNT;
1640 core_writel(priv, reg, CORE_GMNCFGCFG);
1641
1642 /* Get the maximum number of ports for this switch */
1643 priv->hw_params.num_ports = core_readl(priv, CORE_IMP0_PRT_ID) + 1;
1644 if (priv->hw_params.num_ports > DSA_MAX_PORTS)
1645 priv->hw_params.num_ports = DSA_MAX_PORTS;
1646
1647 /* Assume a single GPHY setup if we can't read that property */
1648 if (of_property_read_u32(dn, "brcm,num-gphy",
1649 &priv->hw_params.num_gphy))
1650 priv->hw_params.num_gphy = 1;
1651
1652 /* Enable all valid ports and disable those unused */
1653 for (port = 0; port < priv->hw_params.num_ports; port++) {
1654 /* IMP port receives special treatment */
1655 if ((1 << port) & ds->enabled_port_mask)
1656 bcm_sf2_port_setup(ds, port, NULL);
1657 else if (dsa_is_cpu_port(ds, port))
1658 bcm_sf2_imp_setup(ds, port);
1659 else
1660 bcm_sf2_port_disable(ds, port, NULL);
1661 }
1662
1663 bcm_sf2_sw_configure_vlan(ds);
1664
1665 rev = reg_readl(priv, REG_SWITCH_REVISION);
1666 priv->hw_params.top_rev = (rev >> SWITCH_TOP_REV_SHIFT) &
1667 SWITCH_TOP_REV_MASK;
1668 priv->hw_params.core_rev = (rev & SF2_REV_MASK);
1669
1670 rev = reg_readl(priv, REG_PHY_REVISION);
1671 priv->hw_params.gphy_rev = rev & PHY_REVISION_MASK;
1672
1673 pr_info("Starfighter 2 top: %x.%02x, core: %x.%02x base: 0x%p, IRQs: %d, %d\n",
1674 priv->hw_params.top_rev >> 8, priv->hw_params.top_rev & 0xff,
1675 priv->hw_params.core_rev >> 8, priv->hw_params.core_rev & 0xff,
1676 priv->core, priv->irq0, priv->irq1);
1677
1678 return 0;
1679
1680 out_free_irq0:
1681 free_irq(priv->irq0, priv);
1682 out_mdio:
1683 bcm_sf2_mdio_unregister(priv);
1684 out_unmap:
1685 base = &priv->core;
1686 for (i = 0; i < BCM_SF2_REGS_NUM; i++) {
1687 if (*base)
1688 iounmap(*base);
1689 base++;
1690 }
1691 return ret;
1692 }
1693
1694 static struct dsa_switch_driver bcm_sf2_switch_driver = {
1695 .tag_protocol = DSA_TAG_PROTO_BRCM,
1696 .probe = bcm_sf2_sw_drv_probe,
1697 .setup = bcm_sf2_sw_setup,
1698 .set_addr = bcm_sf2_sw_set_addr,
1699 .get_phy_flags = bcm_sf2_sw_get_phy_flags,
1700 .get_strings = bcm_sf2_sw_get_strings,
1701 .get_ethtool_stats = bcm_sf2_sw_get_ethtool_stats,
1702 .get_sset_count = bcm_sf2_sw_get_sset_count,
1703 .adjust_link = bcm_sf2_sw_adjust_link,
1704 .fixed_link_update = bcm_sf2_sw_fixed_link_update,
1705 .suspend = bcm_sf2_sw_suspend,
1706 .resume = bcm_sf2_sw_resume,
1707 .get_wol = bcm_sf2_sw_get_wol,
1708 .set_wol = bcm_sf2_sw_set_wol,
1709 .port_enable = bcm_sf2_port_setup,
1710 .port_disable = bcm_sf2_port_disable,
1711 .get_eee = bcm_sf2_sw_get_eee,
1712 .set_eee = bcm_sf2_sw_set_eee,
1713 .port_bridge_join = bcm_sf2_sw_br_join,
1714 .port_bridge_leave = bcm_sf2_sw_br_leave,
1715 .port_stp_state_set = bcm_sf2_sw_br_set_stp_state,
1716 .port_fdb_prepare = bcm_sf2_sw_fdb_prepare,
1717 .port_fdb_add = bcm_sf2_sw_fdb_add,
1718 .port_fdb_del = bcm_sf2_sw_fdb_del,
1719 .port_fdb_dump = bcm_sf2_sw_fdb_dump,
1720 .port_vlan_filtering = bcm_sf2_sw_vlan_filtering,
1721 .port_vlan_prepare = bcm_sf2_sw_vlan_prepare,
1722 .port_vlan_add = bcm_sf2_sw_vlan_add,
1723 .port_vlan_del = bcm_sf2_sw_vlan_del,
1724 .port_vlan_dump = bcm_sf2_sw_vlan_dump,
1725 };
1726
1727 static int __init bcm_sf2_init(void)
1728 {
1729 register_switch_driver(&bcm_sf2_switch_driver);
1730
1731 return 0;
1732 }
1733 module_init(bcm_sf2_init);
1734
1735 static void __exit bcm_sf2_exit(void)
1736 {
1737 unregister_switch_driver(&bcm_sf2_switch_driver);
1738 }
1739 module_exit(bcm_sf2_exit);
1740
1741 MODULE_AUTHOR("Broadcom Corporation");
1742 MODULE_DESCRIPTION("Driver for Broadcom Starfighter 2 ethernet switch chip");
1743 MODULE_LICENSE("GPL");
1744 MODULE_ALIAS("platform:brcm-sf2");
This page took 0.0711 seconds and 5 git commands to generate.