Merge tag 'mac80211-next-for-davem-2015-10-05' of git://git.kernel.org/pub/scm/linux...
[deliverable/linux.git] / drivers / net / dsa / bcm_sf2.c
1 /*
2 * Broadcom Starfighter 2 DSA switch driver
3 *
4 * Copyright (C) 2014, Broadcom Corporation
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 */
11
12 #include <linux/list.h>
13 #include <linux/module.h>
14 #include <linux/netdevice.h>
15 #include <linux/interrupt.h>
16 #include <linux/platform_device.h>
17 #include <linux/of.h>
18 #include <linux/phy.h>
19 #include <linux/phy_fixed.h>
20 #include <linux/mii.h>
21 #include <linux/of.h>
22 #include <linux/of_irq.h>
23 #include <linux/of_address.h>
24 #include <net/dsa.h>
25 #include <linux/ethtool.h>
26 #include <linux/if_bridge.h>
27 #include <linux/brcmphy.h>
28
29 #include "bcm_sf2.h"
30 #include "bcm_sf2_regs.h"
31
32 /* String, offset, and register size in bytes if different from 4 bytes */
33 static const struct bcm_sf2_hw_stats bcm_sf2_mib[] = {
34 { "TxOctets", 0x000, 8 },
35 { "TxDropPkts", 0x020 },
36 { "TxQPKTQ0", 0x030 },
37 { "TxBroadcastPkts", 0x040 },
38 { "TxMulticastPkts", 0x050 },
39 { "TxUnicastPKts", 0x060 },
40 { "TxCollisions", 0x070 },
41 { "TxSingleCollision", 0x080 },
42 { "TxMultipleCollision", 0x090 },
43 { "TxDeferredCollision", 0x0a0 },
44 { "TxLateCollision", 0x0b0 },
45 { "TxExcessiveCollision", 0x0c0 },
46 { "TxFrameInDisc", 0x0d0 },
47 { "TxPausePkts", 0x0e0 },
48 { "TxQPKTQ1", 0x0f0 },
49 { "TxQPKTQ2", 0x100 },
50 { "TxQPKTQ3", 0x110 },
51 { "TxQPKTQ4", 0x120 },
52 { "TxQPKTQ5", 0x130 },
53 { "RxOctets", 0x140, 8 },
54 { "RxUndersizePkts", 0x160 },
55 { "RxPausePkts", 0x170 },
56 { "RxPkts64Octets", 0x180 },
57 { "RxPkts65to127Octets", 0x190 },
58 { "RxPkts128to255Octets", 0x1a0 },
59 { "RxPkts256to511Octets", 0x1b0 },
60 { "RxPkts512to1023Octets", 0x1c0 },
61 { "RxPkts1024toMaxPktsOctets", 0x1d0 },
62 { "RxOversizePkts", 0x1e0 },
63 { "RxJabbers", 0x1f0 },
64 { "RxAlignmentErrors", 0x200 },
65 { "RxFCSErrors", 0x210 },
66 { "RxGoodOctets", 0x220, 8 },
67 { "RxDropPkts", 0x240 },
68 { "RxUnicastPkts", 0x250 },
69 { "RxMulticastPkts", 0x260 },
70 { "RxBroadcastPkts", 0x270 },
71 { "RxSAChanges", 0x280 },
72 { "RxFragments", 0x290 },
73 { "RxJumboPkt", 0x2a0 },
74 { "RxSymblErr", 0x2b0 },
75 { "InRangeErrCount", 0x2c0 },
76 { "OutRangeErrCount", 0x2d0 },
77 { "EEELpiEvent", 0x2e0 },
78 { "EEELpiDuration", 0x2f0 },
79 { "RxDiscard", 0x300, 8 },
80 { "TxQPKTQ6", 0x320 },
81 { "TxQPKTQ7", 0x330 },
82 { "TxPkts64Octets", 0x340 },
83 { "TxPkts65to127Octets", 0x350 },
84 { "TxPkts128to255Octets", 0x360 },
85 { "TxPkts256to511Ocets", 0x370 },
86 { "TxPkts512to1023Ocets", 0x380 },
87 { "TxPkts1024toMaxPktOcets", 0x390 },
88 };
89
90 #define BCM_SF2_STATS_SIZE ARRAY_SIZE(bcm_sf2_mib)
91
92 static void bcm_sf2_sw_get_strings(struct dsa_switch *ds,
93 int port, uint8_t *data)
94 {
95 unsigned int i;
96
97 for (i = 0; i < BCM_SF2_STATS_SIZE; i++)
98 memcpy(data + i * ETH_GSTRING_LEN,
99 bcm_sf2_mib[i].string, ETH_GSTRING_LEN);
100 }
101
102 static void bcm_sf2_sw_get_ethtool_stats(struct dsa_switch *ds,
103 int port, uint64_t *data)
104 {
105 struct bcm_sf2_priv *priv = ds_to_priv(ds);
106 const struct bcm_sf2_hw_stats *s;
107 unsigned int i;
108 u64 val = 0;
109 u32 offset;
110
111 mutex_lock(&priv->stats_mutex);
112
113 /* Now fetch the per-port counters */
114 for (i = 0; i < BCM_SF2_STATS_SIZE; i++) {
115 s = &bcm_sf2_mib[i];
116
117 /* Do a latched 64-bit read if needed */
118 offset = s->reg + CORE_P_MIB_OFFSET(port);
119 if (s->sizeof_stat == 8)
120 val = core_readq(priv, offset);
121 else
122 val = core_readl(priv, offset);
123
124 data[i] = (u64)val;
125 }
126
127 mutex_unlock(&priv->stats_mutex);
128 }
129
130 static int bcm_sf2_sw_get_sset_count(struct dsa_switch *ds)
131 {
132 return BCM_SF2_STATS_SIZE;
133 }
134
135 static char *bcm_sf2_sw_probe(struct device *host_dev, int sw_addr)
136 {
137 return "Broadcom Starfighter 2";
138 }
139
140 static void bcm_sf2_imp_vlan_setup(struct dsa_switch *ds, int cpu_port)
141 {
142 struct bcm_sf2_priv *priv = ds_to_priv(ds);
143 unsigned int i;
144 u32 reg;
145
146 /* Enable the IMP Port to be in the same VLAN as the other ports
147 * on a per-port basis such that we only have Port i and IMP in
148 * the same VLAN.
149 */
150 for (i = 0; i < priv->hw_params.num_ports; i++) {
151 if (!((1 << i) & ds->phys_port_mask))
152 continue;
153
154 reg = core_readl(priv, CORE_PORT_VLAN_CTL_PORT(i));
155 reg |= (1 << cpu_port);
156 core_writel(priv, reg, CORE_PORT_VLAN_CTL_PORT(i));
157 }
158 }
159
160 static void bcm_sf2_imp_setup(struct dsa_switch *ds, int port)
161 {
162 struct bcm_sf2_priv *priv = ds_to_priv(ds);
163 u32 reg, val;
164
165 /* Enable the port memories */
166 reg = core_readl(priv, CORE_MEM_PSM_VDD_CTRL);
167 reg &= ~P_TXQ_PSM_VDD(port);
168 core_writel(priv, reg, CORE_MEM_PSM_VDD_CTRL);
169
170 /* Enable Broadcast, Multicast, Unicast forwarding to IMP port */
171 reg = core_readl(priv, CORE_IMP_CTL);
172 reg |= (RX_BCST_EN | RX_MCST_EN | RX_UCST_EN);
173 reg &= ~(RX_DIS | TX_DIS);
174 core_writel(priv, reg, CORE_IMP_CTL);
175
176 /* Enable forwarding */
177 core_writel(priv, SW_FWDG_EN, CORE_SWMODE);
178
179 /* Enable IMP port in dumb mode */
180 reg = core_readl(priv, CORE_SWITCH_CTRL);
181 reg |= MII_DUMB_FWDG_EN;
182 core_writel(priv, reg, CORE_SWITCH_CTRL);
183
184 /* Resolve which bit controls the Broadcom tag */
185 switch (port) {
186 case 8:
187 val = BRCM_HDR_EN_P8;
188 break;
189 case 7:
190 val = BRCM_HDR_EN_P7;
191 break;
192 case 5:
193 val = BRCM_HDR_EN_P5;
194 break;
195 default:
196 val = 0;
197 break;
198 }
199
200 /* Enable Broadcom tags for IMP port */
201 reg = core_readl(priv, CORE_BRCM_HDR_CTRL);
202 reg |= val;
203 core_writel(priv, reg, CORE_BRCM_HDR_CTRL);
204
205 /* Enable reception Broadcom tag for CPU TX (switch RX) to
206 * allow us to tag outgoing frames
207 */
208 reg = core_readl(priv, CORE_BRCM_HDR_RX_DIS);
209 reg &= ~(1 << port);
210 core_writel(priv, reg, CORE_BRCM_HDR_RX_DIS);
211
212 /* Enable transmission of Broadcom tags from the switch (CPU RX) to
213 * allow delivering frames to the per-port net_devices
214 */
215 reg = core_readl(priv, CORE_BRCM_HDR_TX_DIS);
216 reg &= ~(1 << port);
217 core_writel(priv, reg, CORE_BRCM_HDR_TX_DIS);
218
219 /* Force link status for IMP port */
220 reg = core_readl(priv, CORE_STS_OVERRIDE_IMP);
221 reg |= (MII_SW_OR | LINK_STS);
222 core_writel(priv, reg, CORE_STS_OVERRIDE_IMP);
223 }
224
225 static void bcm_sf2_eee_enable_set(struct dsa_switch *ds, int port, bool enable)
226 {
227 struct bcm_sf2_priv *priv = ds_to_priv(ds);
228 u32 reg;
229
230 reg = core_readl(priv, CORE_EEE_EN_CTRL);
231 if (enable)
232 reg |= 1 << port;
233 else
234 reg &= ~(1 << port);
235 core_writel(priv, reg, CORE_EEE_EN_CTRL);
236 }
237
238 static void bcm_sf2_gphy_enable_set(struct dsa_switch *ds, bool enable)
239 {
240 struct bcm_sf2_priv *priv = ds_to_priv(ds);
241 u32 reg;
242
243 reg = reg_readl(priv, REG_SPHY_CNTRL);
244 if (enable) {
245 reg |= PHY_RESET;
246 reg &= ~(EXT_PWR_DOWN | IDDQ_BIAS | CK25_DIS);
247 reg_writel(priv, reg, REG_SPHY_CNTRL);
248 udelay(21);
249 reg = reg_readl(priv, REG_SPHY_CNTRL);
250 reg &= ~PHY_RESET;
251 } else {
252 reg |= EXT_PWR_DOWN | IDDQ_BIAS | PHY_RESET;
253 reg_writel(priv, reg, REG_SPHY_CNTRL);
254 mdelay(1);
255 reg |= CK25_DIS;
256 }
257 reg_writel(priv, reg, REG_SPHY_CNTRL);
258
259 /* Use PHY-driven LED signaling */
260 if (!enable) {
261 reg = reg_readl(priv, REG_LED_CNTRL(0));
262 reg |= SPDLNK_SRC_SEL;
263 reg_writel(priv, reg, REG_LED_CNTRL(0));
264 }
265 }
266
267 static int bcm_sf2_port_setup(struct dsa_switch *ds, int port,
268 struct phy_device *phy)
269 {
270 struct bcm_sf2_priv *priv = ds_to_priv(ds);
271 s8 cpu_port = ds->dst[ds->index].cpu_port;
272 u32 reg;
273
274 /* Clear the memory power down */
275 reg = core_readl(priv, CORE_MEM_PSM_VDD_CTRL);
276 reg &= ~P_TXQ_PSM_VDD(port);
277 core_writel(priv, reg, CORE_MEM_PSM_VDD_CTRL);
278
279 /* Clear the Rx and Tx disable bits and set to no spanning tree */
280 core_writel(priv, 0, CORE_G_PCTL_PORT(port));
281
282 /* Re-enable the GPHY and re-apply workarounds */
283 if (port == 0 && priv->hw_params.num_gphy == 1) {
284 bcm_sf2_gphy_enable_set(ds, true);
285 if (phy) {
286 /* if phy_stop() has been called before, phy
287 * will be in halted state, and phy_start()
288 * will call resume.
289 *
290 * the resume path does not configure back
291 * autoneg settings, and since we hard reset
292 * the phy manually here, we need to reset the
293 * state machine also.
294 */
295 phy->state = PHY_READY;
296 phy_init_hw(phy);
297 }
298 }
299
300 /* Enable port 7 interrupts to get notified */
301 if (port == 7)
302 intrl2_1_mask_clear(priv, P_IRQ_MASK(P7_IRQ_OFF));
303
304 /* Set this port, and only this one to be in the default VLAN,
305 * if member of a bridge, restore its membership prior to
306 * bringing down this port.
307 */
308 reg = core_readl(priv, CORE_PORT_VLAN_CTL_PORT(port));
309 reg &= ~PORT_VLAN_CTRL_MASK;
310 reg |= (1 << port);
311 reg |= priv->port_sts[port].vlan_ctl_mask;
312 core_writel(priv, reg, CORE_PORT_VLAN_CTL_PORT(port));
313
314 bcm_sf2_imp_vlan_setup(ds, cpu_port);
315
316 /* If EEE was enabled, restore it */
317 if (priv->port_sts[port].eee.eee_enabled)
318 bcm_sf2_eee_enable_set(ds, port, true);
319
320 return 0;
321 }
322
323 static void bcm_sf2_port_disable(struct dsa_switch *ds, int port,
324 struct phy_device *phy)
325 {
326 struct bcm_sf2_priv *priv = ds_to_priv(ds);
327 u32 off, reg;
328
329 if (priv->wol_ports_mask & (1 << port))
330 return;
331
332 if (port == 7) {
333 intrl2_1_mask_set(priv, P_IRQ_MASK(P7_IRQ_OFF));
334 intrl2_1_writel(priv, P_IRQ_MASK(P7_IRQ_OFF), INTRL2_CPU_CLEAR);
335 }
336
337 if (port == 0 && priv->hw_params.num_gphy == 1)
338 bcm_sf2_gphy_enable_set(ds, false);
339
340 if (dsa_is_cpu_port(ds, port))
341 off = CORE_IMP_CTL;
342 else
343 off = CORE_G_PCTL_PORT(port);
344
345 reg = core_readl(priv, off);
346 reg |= RX_DIS | TX_DIS;
347 core_writel(priv, reg, off);
348
349 /* Power down the port memory */
350 reg = core_readl(priv, CORE_MEM_PSM_VDD_CTRL);
351 reg |= P_TXQ_PSM_VDD(port);
352 core_writel(priv, reg, CORE_MEM_PSM_VDD_CTRL);
353 }
354
355 /* Returns 0 if EEE was not enabled, or 1 otherwise
356 */
357 static int bcm_sf2_eee_init(struct dsa_switch *ds, int port,
358 struct phy_device *phy)
359 {
360 struct bcm_sf2_priv *priv = ds_to_priv(ds);
361 struct ethtool_eee *p = &priv->port_sts[port].eee;
362 int ret;
363
364 p->supported = (SUPPORTED_1000baseT_Full | SUPPORTED_100baseT_Full);
365
366 ret = phy_init_eee(phy, 0);
367 if (ret)
368 return 0;
369
370 bcm_sf2_eee_enable_set(ds, port, true);
371
372 return 1;
373 }
374
375 static int bcm_sf2_sw_get_eee(struct dsa_switch *ds, int port,
376 struct ethtool_eee *e)
377 {
378 struct bcm_sf2_priv *priv = ds_to_priv(ds);
379 struct ethtool_eee *p = &priv->port_sts[port].eee;
380 u32 reg;
381
382 reg = core_readl(priv, CORE_EEE_LPI_INDICATE);
383 e->eee_enabled = p->eee_enabled;
384 e->eee_active = !!(reg & (1 << port));
385
386 return 0;
387 }
388
389 static int bcm_sf2_sw_set_eee(struct dsa_switch *ds, int port,
390 struct phy_device *phydev,
391 struct ethtool_eee *e)
392 {
393 struct bcm_sf2_priv *priv = ds_to_priv(ds);
394 struct ethtool_eee *p = &priv->port_sts[port].eee;
395
396 p->eee_enabled = e->eee_enabled;
397
398 if (!p->eee_enabled) {
399 bcm_sf2_eee_enable_set(ds, port, false);
400 } else {
401 p->eee_enabled = bcm_sf2_eee_init(ds, port, phydev);
402 if (!p->eee_enabled)
403 return -EOPNOTSUPP;
404 }
405
406 return 0;
407 }
408
409 /* Fast-ageing of ARL entries for a given port, equivalent to an ARL
410 * flush for that port.
411 */
412 static int bcm_sf2_sw_fast_age_port(struct dsa_switch *ds, int port)
413 {
414 struct bcm_sf2_priv *priv = ds_to_priv(ds);
415 unsigned int timeout = 1000;
416 u32 reg;
417
418 core_writel(priv, port, CORE_FAST_AGE_PORT);
419
420 reg = core_readl(priv, CORE_FAST_AGE_CTRL);
421 reg |= EN_AGE_PORT | EN_AGE_DYNAMIC | FAST_AGE_STR_DONE;
422 core_writel(priv, reg, CORE_FAST_AGE_CTRL);
423
424 do {
425 reg = core_readl(priv, CORE_FAST_AGE_CTRL);
426 if (!(reg & FAST_AGE_STR_DONE))
427 break;
428
429 cpu_relax();
430 } while (timeout--);
431
432 if (!timeout)
433 return -ETIMEDOUT;
434
435 core_writel(priv, 0, CORE_FAST_AGE_CTRL);
436
437 return 0;
438 }
439
440 static int bcm_sf2_sw_br_join(struct dsa_switch *ds, int port,
441 u32 br_port_mask)
442 {
443 struct bcm_sf2_priv *priv = ds_to_priv(ds);
444 unsigned int i;
445 u32 reg, p_ctl;
446
447 p_ctl = core_readl(priv, CORE_PORT_VLAN_CTL_PORT(port));
448
449 for (i = 0; i < priv->hw_params.num_ports; i++) {
450 if (!((1 << i) & br_port_mask))
451 continue;
452
453 /* Add this local port to the remote port VLAN control
454 * membership and update the remote port bitmask
455 */
456 reg = core_readl(priv, CORE_PORT_VLAN_CTL_PORT(i));
457 reg |= 1 << port;
458 core_writel(priv, reg, CORE_PORT_VLAN_CTL_PORT(i));
459 priv->port_sts[i].vlan_ctl_mask = reg;
460
461 p_ctl |= 1 << i;
462 }
463
464 /* Configure the local port VLAN control membership to include
465 * remote ports and update the local port bitmask
466 */
467 core_writel(priv, p_ctl, CORE_PORT_VLAN_CTL_PORT(port));
468 priv->port_sts[port].vlan_ctl_mask = p_ctl;
469
470 return 0;
471 }
472
473 static int bcm_sf2_sw_br_leave(struct dsa_switch *ds, int port,
474 u32 br_port_mask)
475 {
476 struct bcm_sf2_priv *priv = ds_to_priv(ds);
477 unsigned int i;
478 u32 reg, p_ctl;
479
480 p_ctl = core_readl(priv, CORE_PORT_VLAN_CTL_PORT(port));
481
482 for (i = 0; i < priv->hw_params.num_ports; i++) {
483 /* Don't touch the remaining ports */
484 if (!((1 << i) & br_port_mask))
485 continue;
486
487 reg = core_readl(priv, CORE_PORT_VLAN_CTL_PORT(i));
488 reg &= ~(1 << port);
489 core_writel(priv, reg, CORE_PORT_VLAN_CTL_PORT(i));
490 priv->port_sts[port].vlan_ctl_mask = reg;
491
492 /* Prevent self removal to preserve isolation */
493 if (port != i)
494 p_ctl &= ~(1 << i);
495 }
496
497 core_writel(priv, p_ctl, CORE_PORT_VLAN_CTL_PORT(port));
498 priv->port_sts[port].vlan_ctl_mask = p_ctl;
499
500 return 0;
501 }
502
503 static int bcm_sf2_sw_br_set_stp_state(struct dsa_switch *ds, int port,
504 u8 state)
505 {
506 struct bcm_sf2_priv *priv = ds_to_priv(ds);
507 u8 hw_state, cur_hw_state;
508 int ret = 0;
509 u32 reg;
510
511 reg = core_readl(priv, CORE_G_PCTL_PORT(port));
512 cur_hw_state = reg & (G_MISTP_STATE_MASK << G_MISTP_STATE_SHIFT);
513
514 switch (state) {
515 case BR_STATE_DISABLED:
516 hw_state = G_MISTP_DIS_STATE;
517 break;
518 case BR_STATE_LISTENING:
519 hw_state = G_MISTP_LISTEN_STATE;
520 break;
521 case BR_STATE_LEARNING:
522 hw_state = G_MISTP_LEARN_STATE;
523 break;
524 case BR_STATE_FORWARDING:
525 hw_state = G_MISTP_FWD_STATE;
526 break;
527 case BR_STATE_BLOCKING:
528 hw_state = G_MISTP_BLOCK_STATE;
529 break;
530 default:
531 pr_err("%s: invalid STP state: %d\n", __func__, state);
532 return -EINVAL;
533 }
534
535 /* Fast-age ARL entries if we are moving a port from Learning or
536 * Forwarding (cur_hw_state) state to Disabled, Blocking or Listening
537 * state (hw_state)
538 */
539 if (cur_hw_state != hw_state) {
540 if (cur_hw_state >= G_MISTP_LEARN_STATE &&
541 hw_state <= G_MISTP_LISTEN_STATE) {
542 ret = bcm_sf2_sw_fast_age_port(ds, port);
543 if (ret) {
544 pr_err("%s: fast-ageing failed\n", __func__);
545 return ret;
546 }
547 }
548 }
549
550 reg = core_readl(priv, CORE_G_PCTL_PORT(port));
551 reg &= ~(G_MISTP_STATE_MASK << G_MISTP_STATE_SHIFT);
552 reg |= hw_state;
553 core_writel(priv, reg, CORE_G_PCTL_PORT(port));
554
555 return 0;
556 }
557
558 static irqreturn_t bcm_sf2_switch_0_isr(int irq, void *dev_id)
559 {
560 struct bcm_sf2_priv *priv = dev_id;
561
562 priv->irq0_stat = intrl2_0_readl(priv, INTRL2_CPU_STATUS) &
563 ~priv->irq0_mask;
564 intrl2_0_writel(priv, priv->irq0_stat, INTRL2_CPU_CLEAR);
565
566 return IRQ_HANDLED;
567 }
568
569 static irqreturn_t bcm_sf2_switch_1_isr(int irq, void *dev_id)
570 {
571 struct bcm_sf2_priv *priv = dev_id;
572
573 priv->irq1_stat = intrl2_1_readl(priv, INTRL2_CPU_STATUS) &
574 ~priv->irq1_mask;
575 intrl2_1_writel(priv, priv->irq1_stat, INTRL2_CPU_CLEAR);
576
577 if (priv->irq1_stat & P_LINK_UP_IRQ(P7_IRQ_OFF))
578 priv->port_sts[7].link = 1;
579 if (priv->irq1_stat & P_LINK_DOWN_IRQ(P7_IRQ_OFF))
580 priv->port_sts[7].link = 0;
581
582 return IRQ_HANDLED;
583 }
584
585 static int bcm_sf2_sw_rst(struct bcm_sf2_priv *priv)
586 {
587 unsigned int timeout = 1000;
588 u32 reg;
589
590 reg = core_readl(priv, CORE_WATCHDOG_CTRL);
591 reg |= SOFTWARE_RESET | EN_CHIP_RST | EN_SW_RESET;
592 core_writel(priv, reg, CORE_WATCHDOG_CTRL);
593
594 do {
595 reg = core_readl(priv, CORE_WATCHDOG_CTRL);
596 if (!(reg & SOFTWARE_RESET))
597 break;
598
599 usleep_range(1000, 2000);
600 } while (timeout-- > 0);
601
602 if (timeout == 0)
603 return -ETIMEDOUT;
604
605 return 0;
606 }
607
608 static void bcm_sf2_intr_disable(struct bcm_sf2_priv *priv)
609 {
610 intrl2_0_writel(priv, 0xffffffff, INTRL2_CPU_MASK_SET);
611 intrl2_0_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR);
612 intrl2_0_writel(priv, 0, INTRL2_CPU_MASK_CLEAR);
613 intrl2_1_writel(priv, 0xffffffff, INTRL2_CPU_MASK_SET);
614 intrl2_1_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR);
615 intrl2_1_writel(priv, 0, INTRL2_CPU_MASK_CLEAR);
616 }
617
618 static int bcm_sf2_sw_setup(struct dsa_switch *ds)
619 {
620 const char *reg_names[BCM_SF2_REGS_NUM] = BCM_SF2_REGS_NAME;
621 struct bcm_sf2_priv *priv = ds_to_priv(ds);
622 struct device_node *dn;
623 void __iomem **base;
624 unsigned int port;
625 unsigned int i;
626 u32 reg, rev;
627 int ret;
628
629 spin_lock_init(&priv->indir_lock);
630 mutex_init(&priv->stats_mutex);
631
632 /* All the interesting properties are at the parent device_node
633 * level
634 */
635 dn = ds->pd->of_node->parent;
636
637 priv->irq0 = irq_of_parse_and_map(dn, 0);
638 priv->irq1 = irq_of_parse_and_map(dn, 1);
639
640 base = &priv->core;
641 for (i = 0; i < BCM_SF2_REGS_NUM; i++) {
642 *base = of_iomap(dn, i);
643 if (*base == NULL) {
644 pr_err("unable to find register: %s\n", reg_names[i]);
645 ret = -ENOMEM;
646 goto out_unmap;
647 }
648 base++;
649 }
650
651 ret = bcm_sf2_sw_rst(priv);
652 if (ret) {
653 pr_err("unable to software reset switch: %d\n", ret);
654 goto out_unmap;
655 }
656
657 /* Disable all interrupts and request them */
658 bcm_sf2_intr_disable(priv);
659
660 ret = request_irq(priv->irq0, bcm_sf2_switch_0_isr, 0,
661 "switch_0", priv);
662 if (ret < 0) {
663 pr_err("failed to request switch_0 IRQ\n");
664 goto out_unmap;
665 }
666
667 ret = request_irq(priv->irq1, bcm_sf2_switch_1_isr, 0,
668 "switch_1", priv);
669 if (ret < 0) {
670 pr_err("failed to request switch_1 IRQ\n");
671 goto out_free_irq0;
672 }
673
674 /* Reset the MIB counters */
675 reg = core_readl(priv, CORE_GMNCFGCFG);
676 reg |= RST_MIB_CNT;
677 core_writel(priv, reg, CORE_GMNCFGCFG);
678 reg &= ~RST_MIB_CNT;
679 core_writel(priv, reg, CORE_GMNCFGCFG);
680
681 /* Get the maximum number of ports for this switch */
682 priv->hw_params.num_ports = core_readl(priv, CORE_IMP0_PRT_ID) + 1;
683 if (priv->hw_params.num_ports > DSA_MAX_PORTS)
684 priv->hw_params.num_ports = DSA_MAX_PORTS;
685
686 /* Assume a single GPHY setup if we can't read that property */
687 if (of_property_read_u32(dn, "brcm,num-gphy",
688 &priv->hw_params.num_gphy))
689 priv->hw_params.num_gphy = 1;
690
691 /* Enable all valid ports and disable those unused */
692 for (port = 0; port < priv->hw_params.num_ports; port++) {
693 /* IMP port receives special treatment */
694 if ((1 << port) & ds->phys_port_mask)
695 bcm_sf2_port_setup(ds, port, NULL);
696 else if (dsa_is_cpu_port(ds, port))
697 bcm_sf2_imp_setup(ds, port);
698 else
699 bcm_sf2_port_disable(ds, port, NULL);
700 }
701
702 /* Include the pseudo-PHY address and the broadcast PHY address to
703 * divert reads towards our workaround. This is only required for
704 * 7445D0, since 7445E0 disconnects the internal switch pseudo-PHY such
705 * that we can use the regular SWITCH_MDIO master controller instead.
706 *
707 * By default, DSA initializes ds->phys_mii_mask to ds->phys_port_mask
708 * to have a 1:1 mapping between Port address and PHY address in order
709 * to utilize the slave_mii_bus instance to read from Port PHYs. This is
710 * not what we want here, so we initialize phys_mii_mask 0 to always
711 * utilize the "master" MDIO bus backed by the "mdio-unimac" driver.
712 */
713 if (of_machine_is_compatible("brcm,bcm7445d0"))
714 ds->phys_mii_mask |= ((1 << BRCM_PSEUDO_PHY_ADDR) | (1 << 0));
715 else
716 ds->phys_mii_mask = 0;
717
718 rev = reg_readl(priv, REG_SWITCH_REVISION);
719 priv->hw_params.top_rev = (rev >> SWITCH_TOP_REV_SHIFT) &
720 SWITCH_TOP_REV_MASK;
721 priv->hw_params.core_rev = (rev & SF2_REV_MASK);
722
723 rev = reg_readl(priv, REG_PHY_REVISION);
724 priv->hw_params.gphy_rev = rev & PHY_REVISION_MASK;
725
726 pr_info("Starfighter 2 top: %x.%02x, core: %x.%02x base: 0x%p, IRQs: %d, %d\n",
727 priv->hw_params.top_rev >> 8, priv->hw_params.top_rev & 0xff,
728 priv->hw_params.core_rev >> 8, priv->hw_params.core_rev & 0xff,
729 priv->core, priv->irq0, priv->irq1);
730
731 return 0;
732
733 out_free_irq0:
734 free_irq(priv->irq0, priv);
735 out_unmap:
736 base = &priv->core;
737 for (i = 0; i < BCM_SF2_REGS_NUM; i++) {
738 if (*base)
739 iounmap(*base);
740 base++;
741 }
742 return ret;
743 }
744
745 static int bcm_sf2_sw_set_addr(struct dsa_switch *ds, u8 *addr)
746 {
747 return 0;
748 }
749
750 static u32 bcm_sf2_sw_get_phy_flags(struct dsa_switch *ds, int port)
751 {
752 struct bcm_sf2_priv *priv = ds_to_priv(ds);
753
754 /* The BCM7xxx PHY driver expects to find the integrated PHY revision
755 * in bits 15:8 and the patch level in bits 7:0 which is exactly what
756 * the REG_PHY_REVISION register layout is.
757 */
758
759 return priv->hw_params.gphy_rev;
760 }
761
762 static int bcm_sf2_sw_indir_rw(struct dsa_switch *ds, int op, int addr,
763 int regnum, u16 val)
764 {
765 struct bcm_sf2_priv *priv = ds_to_priv(ds);
766 int ret = 0;
767 u32 reg;
768
769 reg = reg_readl(priv, REG_SWITCH_CNTRL);
770 reg |= MDIO_MASTER_SEL;
771 reg_writel(priv, reg, REG_SWITCH_CNTRL);
772
773 /* Page << 8 | offset */
774 reg = 0x70;
775 reg <<= 2;
776 core_writel(priv, addr, reg);
777
778 /* Page << 8 | offset */
779 reg = 0x80 << 8 | regnum << 1;
780 reg <<= 2;
781
782 if (op)
783 ret = core_readl(priv, reg);
784 else
785 core_writel(priv, val, reg);
786
787 reg = reg_readl(priv, REG_SWITCH_CNTRL);
788 reg &= ~MDIO_MASTER_SEL;
789 reg_writel(priv, reg, REG_SWITCH_CNTRL);
790
791 return ret & 0xffff;
792 }
793
794 static int bcm_sf2_sw_phy_read(struct dsa_switch *ds, int addr, int regnum)
795 {
796 /* Intercept reads from the MDIO broadcast address or Broadcom
797 * pseudo-PHY address
798 */
799 switch (addr) {
800 case 0:
801 case BRCM_PSEUDO_PHY_ADDR:
802 return bcm_sf2_sw_indir_rw(ds, 1, addr, regnum, 0);
803 default:
804 return 0xffff;
805 }
806 }
807
808 static int bcm_sf2_sw_phy_write(struct dsa_switch *ds, int addr, int regnum,
809 u16 val)
810 {
811 /* Intercept writes to the MDIO broadcast address or Broadcom
812 * pseudo-PHY address
813 */
814 switch (addr) {
815 case 0:
816 case BRCM_PSEUDO_PHY_ADDR:
817 bcm_sf2_sw_indir_rw(ds, 0, addr, regnum, val);
818 break;
819 }
820
821 return 0;
822 }
823
824 static void bcm_sf2_sw_adjust_link(struct dsa_switch *ds, int port,
825 struct phy_device *phydev)
826 {
827 struct bcm_sf2_priv *priv = ds_to_priv(ds);
828 u32 id_mode_dis = 0, port_mode;
829 const char *str = NULL;
830 u32 reg;
831
832 switch (phydev->interface) {
833 case PHY_INTERFACE_MODE_RGMII:
834 str = "RGMII (no delay)";
835 id_mode_dis = 1;
836 case PHY_INTERFACE_MODE_RGMII_TXID:
837 if (!str)
838 str = "RGMII (TX delay)";
839 port_mode = EXT_GPHY;
840 break;
841 case PHY_INTERFACE_MODE_MII:
842 str = "MII";
843 port_mode = EXT_EPHY;
844 break;
845 case PHY_INTERFACE_MODE_REVMII:
846 str = "Reverse MII";
847 port_mode = EXT_REVMII;
848 break;
849 default:
850 /* All other PHYs: internal and MoCA */
851 goto force_link;
852 }
853
854 /* If the link is down, just disable the interface to conserve power */
855 if (!phydev->link) {
856 reg = reg_readl(priv, REG_RGMII_CNTRL_P(port));
857 reg &= ~RGMII_MODE_EN;
858 reg_writel(priv, reg, REG_RGMII_CNTRL_P(port));
859 goto force_link;
860 }
861
862 /* Clear id_mode_dis bit, and the existing port mode, but
863 * make sure we enable the RGMII block for data to pass
864 */
865 reg = reg_readl(priv, REG_RGMII_CNTRL_P(port));
866 reg &= ~ID_MODE_DIS;
867 reg &= ~(PORT_MODE_MASK << PORT_MODE_SHIFT);
868 reg &= ~(RX_PAUSE_EN | TX_PAUSE_EN);
869
870 reg |= port_mode | RGMII_MODE_EN;
871 if (id_mode_dis)
872 reg |= ID_MODE_DIS;
873
874 if (phydev->pause) {
875 if (phydev->asym_pause)
876 reg |= TX_PAUSE_EN;
877 reg |= RX_PAUSE_EN;
878 }
879
880 reg_writel(priv, reg, REG_RGMII_CNTRL_P(port));
881
882 pr_info("Port %d configured for %s\n", port, str);
883
884 force_link:
885 /* Force link settings detected from the PHY */
886 reg = SW_OVERRIDE;
887 switch (phydev->speed) {
888 case SPEED_1000:
889 reg |= SPDSTS_1000 << SPEED_SHIFT;
890 break;
891 case SPEED_100:
892 reg |= SPDSTS_100 << SPEED_SHIFT;
893 break;
894 }
895
896 if (phydev->link)
897 reg |= LINK_STS;
898 if (phydev->duplex == DUPLEX_FULL)
899 reg |= DUPLX_MODE;
900
901 core_writel(priv, reg, CORE_STS_OVERRIDE_GMIIP_PORT(port));
902 }
903
904 static void bcm_sf2_sw_fixed_link_update(struct dsa_switch *ds, int port,
905 struct fixed_phy_status *status)
906 {
907 struct bcm_sf2_priv *priv = ds_to_priv(ds);
908 u32 duplex, pause;
909 u32 reg;
910
911 duplex = core_readl(priv, CORE_DUPSTS);
912 pause = core_readl(priv, CORE_PAUSESTS);
913
914 status->link = 0;
915
916 /* Port 7 is special as we do not get link status from CORE_LNKSTS,
917 * which means that we need to force the link at the port override
918 * level to get the data to flow. We do use what the interrupt handler
919 * did determine before.
920 *
921 * For the other ports, we just force the link status, since this is
922 * a fixed PHY device.
923 */
924 if (port == 7) {
925 status->link = priv->port_sts[port].link;
926 /* For MoCA interfaces, also force a link down notification
927 * since some version of the user-space daemon (mocad) use
928 * cmd->autoneg to force the link, which messes up the PHY
929 * state machine and make it go in PHY_FORCING state instead.
930 */
931 if (!status->link)
932 netif_carrier_off(ds->ports[port]);
933 status->duplex = 1;
934 } else {
935 status->link = 1;
936 status->duplex = !!(duplex & (1 << port));
937 }
938
939 reg = core_readl(priv, CORE_STS_OVERRIDE_GMIIP_PORT(port));
940 reg |= SW_OVERRIDE;
941 if (status->link)
942 reg |= LINK_STS;
943 else
944 reg &= ~LINK_STS;
945 core_writel(priv, reg, CORE_STS_OVERRIDE_GMIIP_PORT(port));
946
947 if ((pause & (1 << port)) &&
948 (pause & (1 << (port + PAUSESTS_TX_PAUSE_SHIFT)))) {
949 status->asym_pause = 1;
950 status->pause = 1;
951 }
952
953 if (pause & (1 << port))
954 status->pause = 1;
955 }
956
957 static int bcm_sf2_sw_suspend(struct dsa_switch *ds)
958 {
959 struct bcm_sf2_priv *priv = ds_to_priv(ds);
960 unsigned int port;
961
962 bcm_sf2_intr_disable(priv);
963
964 /* Disable all ports physically present including the IMP
965 * port, the other ones have already been disabled during
966 * bcm_sf2_sw_setup
967 */
968 for (port = 0; port < DSA_MAX_PORTS; port++) {
969 if ((1 << port) & ds->phys_port_mask ||
970 dsa_is_cpu_port(ds, port))
971 bcm_sf2_port_disable(ds, port, NULL);
972 }
973
974 return 0;
975 }
976
977 static int bcm_sf2_sw_resume(struct dsa_switch *ds)
978 {
979 struct bcm_sf2_priv *priv = ds_to_priv(ds);
980 unsigned int port;
981 int ret;
982
983 ret = bcm_sf2_sw_rst(priv);
984 if (ret) {
985 pr_err("%s: failed to software reset switch\n", __func__);
986 return ret;
987 }
988
989 if (priv->hw_params.num_gphy == 1)
990 bcm_sf2_gphy_enable_set(ds, true);
991
992 for (port = 0; port < DSA_MAX_PORTS; port++) {
993 if ((1 << port) & ds->phys_port_mask)
994 bcm_sf2_port_setup(ds, port, NULL);
995 else if (dsa_is_cpu_port(ds, port))
996 bcm_sf2_imp_setup(ds, port);
997 }
998
999 return 0;
1000 }
1001
1002 static void bcm_sf2_sw_get_wol(struct dsa_switch *ds, int port,
1003 struct ethtool_wolinfo *wol)
1004 {
1005 struct net_device *p = ds->dst[ds->index].master_netdev;
1006 struct bcm_sf2_priv *priv = ds_to_priv(ds);
1007 struct ethtool_wolinfo pwol;
1008
1009 /* Get the parent device WoL settings */
1010 p->ethtool_ops->get_wol(p, &pwol);
1011
1012 /* Advertise the parent device supported settings */
1013 wol->supported = pwol.supported;
1014 memset(&wol->sopass, 0, sizeof(wol->sopass));
1015
1016 if (pwol.wolopts & WAKE_MAGICSECURE)
1017 memcpy(&wol->sopass, pwol.sopass, sizeof(wol->sopass));
1018
1019 if (priv->wol_ports_mask & (1 << port))
1020 wol->wolopts = pwol.wolopts;
1021 else
1022 wol->wolopts = 0;
1023 }
1024
1025 static int bcm_sf2_sw_set_wol(struct dsa_switch *ds, int port,
1026 struct ethtool_wolinfo *wol)
1027 {
1028 struct net_device *p = ds->dst[ds->index].master_netdev;
1029 struct bcm_sf2_priv *priv = ds_to_priv(ds);
1030 s8 cpu_port = ds->dst[ds->index].cpu_port;
1031 struct ethtool_wolinfo pwol;
1032
1033 p->ethtool_ops->get_wol(p, &pwol);
1034 if (wol->wolopts & ~pwol.supported)
1035 return -EINVAL;
1036
1037 if (wol->wolopts)
1038 priv->wol_ports_mask |= (1 << port);
1039 else
1040 priv->wol_ports_mask &= ~(1 << port);
1041
1042 /* If we have at least one port enabled, make sure the CPU port
1043 * is also enabled. If the CPU port is the last one enabled, we disable
1044 * it since this configuration does not make sense.
1045 */
1046 if (priv->wol_ports_mask && priv->wol_ports_mask != (1 << cpu_port))
1047 priv->wol_ports_mask |= (1 << cpu_port);
1048 else
1049 priv->wol_ports_mask &= ~(1 << cpu_port);
1050
1051 return p->ethtool_ops->set_wol(p, wol);
1052 }
1053
1054 static struct dsa_switch_driver bcm_sf2_switch_driver = {
1055 .tag_protocol = DSA_TAG_PROTO_BRCM,
1056 .priv_size = sizeof(struct bcm_sf2_priv),
1057 .probe = bcm_sf2_sw_probe,
1058 .setup = bcm_sf2_sw_setup,
1059 .set_addr = bcm_sf2_sw_set_addr,
1060 .get_phy_flags = bcm_sf2_sw_get_phy_flags,
1061 .phy_read = bcm_sf2_sw_phy_read,
1062 .phy_write = bcm_sf2_sw_phy_write,
1063 .get_strings = bcm_sf2_sw_get_strings,
1064 .get_ethtool_stats = bcm_sf2_sw_get_ethtool_stats,
1065 .get_sset_count = bcm_sf2_sw_get_sset_count,
1066 .adjust_link = bcm_sf2_sw_adjust_link,
1067 .fixed_link_update = bcm_sf2_sw_fixed_link_update,
1068 .suspend = bcm_sf2_sw_suspend,
1069 .resume = bcm_sf2_sw_resume,
1070 .get_wol = bcm_sf2_sw_get_wol,
1071 .set_wol = bcm_sf2_sw_set_wol,
1072 .port_enable = bcm_sf2_port_setup,
1073 .port_disable = bcm_sf2_port_disable,
1074 .get_eee = bcm_sf2_sw_get_eee,
1075 .set_eee = bcm_sf2_sw_set_eee,
1076 .port_join_bridge = bcm_sf2_sw_br_join,
1077 .port_leave_bridge = bcm_sf2_sw_br_leave,
1078 .port_stp_update = bcm_sf2_sw_br_set_stp_state,
1079 };
1080
1081 static int __init bcm_sf2_init(void)
1082 {
1083 register_switch_driver(&bcm_sf2_switch_driver);
1084
1085 return 0;
1086 }
1087 module_init(bcm_sf2_init);
1088
1089 static void __exit bcm_sf2_exit(void)
1090 {
1091 unregister_switch_driver(&bcm_sf2_switch_driver);
1092 }
1093 module_exit(bcm_sf2_exit);
1094
1095 MODULE_AUTHOR("Broadcom Corporation");
1096 MODULE_DESCRIPTION("Driver for Broadcom Starfighter 2 ethernet switch chip");
1097 MODULE_LICENSE("GPL");
1098 MODULE_ALIAS("platform:brcm-sf2");
This page took 0.084617 seconds and 6 git commands to generate.