Merge remote-tracking branch 'xen-tip/linux-next'
[deliverable/linux.git] / net / dsa / dsa2.c
1 /*
2 * net/dsa/dsa2.c - Hardware switch handling, binding version 2
3 * Copyright (c) 2008-2009 Marvell Semiconductor
4 * Copyright (c) 2013 Florian Fainelli <florian@openwrt.org>
5 * Copyright (c) 2016 Andrew Lunn <andrew@lunn.ch>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 */
12
13 #include <linux/device.h>
14 #include <linux/err.h>
15 #include <linux/list.h>
16 #include <linux/slab.h>
17 #include <linux/rtnetlink.h>
18 #include <net/dsa.h>
19 #include <linux/of.h>
20 #include <linux/of_net.h>
21 #include "dsa_priv.h"
22
23 static LIST_HEAD(dsa_switch_trees);
24 static DEFINE_MUTEX(dsa2_mutex);
25
26 static struct dsa_switch_tree *dsa_get_dst(u32 tree)
27 {
28 struct dsa_switch_tree *dst;
29
30 list_for_each_entry(dst, &dsa_switch_trees, list)
31 if (dst->tree == tree)
32 return dst;
33 return NULL;
34 }
35
36 static void dsa_free_dst(struct kref *ref)
37 {
38 struct dsa_switch_tree *dst = container_of(ref, struct dsa_switch_tree,
39 refcount);
40
41 list_del(&dst->list);
42 kfree(dst);
43 }
44
45 static void dsa_put_dst(struct dsa_switch_tree *dst)
46 {
47 kref_put(&dst->refcount, dsa_free_dst);
48 }
49
50 static struct dsa_switch_tree *dsa_add_dst(u32 tree)
51 {
52 struct dsa_switch_tree *dst;
53
54 dst = kzalloc(sizeof(*dst), GFP_KERNEL);
55 if (!dst)
56 return NULL;
57 dst->tree = tree;
58 dst->cpu_switch = -1;
59 INIT_LIST_HEAD(&dst->list);
60 list_add_tail(&dsa_switch_trees, &dst->list);
61 kref_init(&dst->refcount);
62
63 return dst;
64 }
65
66 static void dsa_dst_add_ds(struct dsa_switch_tree *dst,
67 struct dsa_switch *ds, u32 index)
68 {
69 kref_get(&dst->refcount);
70 dst->ds[index] = ds;
71 }
72
73 static void dsa_dst_del_ds(struct dsa_switch_tree *dst,
74 struct dsa_switch *ds, u32 index)
75 {
76 dst->ds[index] = NULL;
77 kref_put(&dst->refcount, dsa_free_dst);
78 }
79
80 static bool dsa_port_is_dsa(struct device_node *port)
81 {
82 const char *name;
83
84 name = of_get_property(port, "label", NULL);
85 if (!name)
86 return false;
87
88 if (!strcmp(name, "dsa"))
89 return true;
90
91 return false;
92 }
93
94 static bool dsa_port_is_cpu(struct device_node *port)
95 {
96 const char *name;
97
98 name = of_get_property(port, "label", NULL);
99 if (!name)
100 return false;
101
102 if (!strcmp(name, "cpu"))
103 return true;
104
105 return false;
106 }
107
108 static bool dsa_ds_find_port(struct dsa_switch *ds,
109 struct device_node *port)
110 {
111 u32 index;
112
113 for (index = 0; index < DSA_MAX_PORTS; index++)
114 if (ds->ports[index].dn == port)
115 return true;
116 return false;
117 }
118
119 static struct dsa_switch *dsa_dst_find_port(struct dsa_switch_tree *dst,
120 struct device_node *port)
121 {
122 struct dsa_switch *ds;
123 u32 index;
124
125 for (index = 0; index < DSA_MAX_SWITCHES; index++) {
126 ds = dst->ds[index];
127 if (!ds)
128 continue;
129
130 if (dsa_ds_find_port(ds, port))
131 return ds;
132 }
133
134 return NULL;
135 }
136
137 static int dsa_port_complete(struct dsa_switch_tree *dst,
138 struct dsa_switch *src_ds,
139 struct device_node *port,
140 u32 src_port)
141 {
142 struct device_node *link;
143 int index;
144 struct dsa_switch *dst_ds;
145
146 for (index = 0;; index++) {
147 link = of_parse_phandle(port, "link", index);
148 if (!link)
149 break;
150
151 dst_ds = dsa_dst_find_port(dst, link);
152 of_node_put(link);
153
154 if (!dst_ds)
155 return 1;
156
157 src_ds->rtable[dst_ds->index] = src_port;
158 }
159
160 return 0;
161 }
162
163 /* A switch is complete if all the DSA ports phandles point to ports
164 * known in the tree. A return value of 1 means the tree is not
165 * complete. This is not an error condition. A value of 0 is
166 * success.
167 */
168 static int dsa_ds_complete(struct dsa_switch_tree *dst, struct dsa_switch *ds)
169 {
170 struct device_node *port;
171 u32 index;
172 int err;
173
174 for (index = 0; index < DSA_MAX_PORTS; index++) {
175 port = ds->ports[index].dn;
176 if (!port)
177 continue;
178
179 if (!dsa_port_is_dsa(port))
180 continue;
181
182 err = dsa_port_complete(dst, ds, port, index);
183 if (err != 0)
184 return err;
185
186 ds->dsa_port_mask |= BIT(index);
187 }
188
189 return 0;
190 }
191
192 /* A tree is complete if all the DSA ports phandles point to ports
193 * known in the tree. A return value of 1 means the tree is not
194 * complete. This is not an error condition. A value of 0 is
195 * success.
196 */
197 static int dsa_dst_complete(struct dsa_switch_tree *dst)
198 {
199 struct dsa_switch *ds;
200 u32 index;
201 int err;
202
203 for (index = 0; index < DSA_MAX_SWITCHES; index++) {
204 ds = dst->ds[index];
205 if (!ds)
206 continue;
207
208 err = dsa_ds_complete(dst, ds);
209 if (err != 0)
210 return err;
211 }
212
213 return 0;
214 }
215
216 static int dsa_dsa_port_apply(struct device_node *port, u32 index,
217 struct dsa_switch *ds)
218 {
219 int err;
220
221 err = dsa_cpu_dsa_setup(ds, ds->dev, port, index);
222 if (err) {
223 dev_warn(ds->dev, "Failed to setup dsa port %d: %d\n",
224 index, err);
225 return err;
226 }
227
228 return 0;
229 }
230
231 static void dsa_dsa_port_unapply(struct device_node *port, u32 index,
232 struct dsa_switch *ds)
233 {
234 dsa_cpu_dsa_destroy(port);
235 }
236
237 static int dsa_cpu_port_apply(struct device_node *port, u32 index,
238 struct dsa_switch *ds)
239 {
240 int err;
241
242 err = dsa_cpu_dsa_setup(ds, ds->dev, port, index);
243 if (err) {
244 dev_warn(ds->dev, "Failed to setup cpu port %d: %d\n",
245 index, err);
246 return err;
247 }
248
249 ds->cpu_port_mask |= BIT(index);
250
251 return 0;
252 }
253
254 static void dsa_cpu_port_unapply(struct device_node *port, u32 index,
255 struct dsa_switch *ds)
256 {
257 dsa_cpu_dsa_destroy(port);
258 ds->cpu_port_mask &= ~BIT(index);
259
260 }
261
262 static int dsa_user_port_apply(struct device_node *port, u32 index,
263 struct dsa_switch *ds)
264 {
265 const char *name;
266 int err;
267
268 name = of_get_property(port, "label", NULL);
269
270 err = dsa_slave_create(ds, ds->dev, index, name);
271 if (err) {
272 dev_warn(ds->dev, "Failed to create slave %d: %d\n",
273 index, err);
274 return err;
275 }
276
277 return 0;
278 }
279
280 static void dsa_user_port_unapply(struct device_node *port, u32 index,
281 struct dsa_switch *ds)
282 {
283 if (ds->ports[index].netdev) {
284 dsa_slave_destroy(ds->ports[index].netdev);
285 ds->ports[index].netdev = NULL;
286 ds->enabled_port_mask &= ~(1 << index);
287 }
288 }
289
290 static int dsa_ds_apply(struct dsa_switch_tree *dst, struct dsa_switch *ds)
291 {
292 struct device_node *port;
293 u32 index;
294 int err;
295
296 /* Initialize ds->phys_mii_mask before registering the slave MDIO bus
297 * driver and before ops->setup() has run, since the switch drivers and
298 * the slave MDIO bus driver rely on these values for probing PHY
299 * devices or not
300 */
301 ds->phys_mii_mask = ds->enabled_port_mask;
302
303 err = ds->ops->setup(ds);
304 if (err < 0)
305 return err;
306
307 err = ds->ops->set_addr(ds, dst->master_netdev->dev_addr);
308 if (err < 0)
309 return err;
310
311 err = ds->ops->set_addr(ds, dst->master_netdev->dev_addr);
312 if (err < 0)
313 return err;
314
315 if (!ds->slave_mii_bus && ds->ops->phy_read) {
316 ds->slave_mii_bus = devm_mdiobus_alloc(ds->dev);
317 if (!ds->slave_mii_bus)
318 return -ENOMEM;
319
320 dsa_slave_mii_bus_init(ds);
321
322 err = mdiobus_register(ds->slave_mii_bus);
323 if (err < 0)
324 return err;
325 }
326
327 for (index = 0; index < DSA_MAX_PORTS; index++) {
328 port = ds->ports[index].dn;
329 if (!port)
330 continue;
331
332 if (dsa_port_is_dsa(port)) {
333 err = dsa_dsa_port_apply(port, index, ds);
334 if (err)
335 return err;
336 continue;
337 }
338
339 if (dsa_port_is_cpu(port)) {
340 err = dsa_cpu_port_apply(port, index, ds);
341 if (err)
342 return err;
343 continue;
344 }
345
346 err = dsa_user_port_apply(port, index, ds);
347 if (err)
348 continue;
349 }
350
351 return 0;
352 }
353
354 static void dsa_ds_unapply(struct dsa_switch_tree *dst, struct dsa_switch *ds)
355 {
356 struct device_node *port;
357 u32 index;
358
359 for (index = 0; index < DSA_MAX_PORTS; index++) {
360 port = ds->ports[index].dn;
361 if (!port)
362 continue;
363
364 if (dsa_port_is_dsa(port)) {
365 dsa_dsa_port_unapply(port, index, ds);
366 continue;
367 }
368
369 if (dsa_port_is_cpu(port)) {
370 dsa_cpu_port_unapply(port, index, ds);
371 continue;
372 }
373
374 dsa_user_port_unapply(port, index, ds);
375 }
376
377 if (ds->slave_mii_bus && ds->ops->phy_read)
378 mdiobus_unregister(ds->slave_mii_bus);
379 }
380
381 static int dsa_dst_apply(struct dsa_switch_tree *dst)
382 {
383 struct dsa_switch *ds;
384 u32 index;
385 int err;
386
387 for (index = 0; index < DSA_MAX_SWITCHES; index++) {
388 ds = dst->ds[index];
389 if (!ds)
390 continue;
391
392 err = dsa_ds_apply(dst, ds);
393 if (err)
394 return err;
395 }
396
397 err = dsa_cpu_port_ethtool_setup(dst->ds[0]);
398 if (err)
399 return err;
400
401 /* If we use a tagging format that doesn't have an ethertype
402 * field, make sure that all packets from this point on get
403 * sent to the tag format's receive function.
404 */
405 wmb();
406 dst->master_netdev->dsa_ptr = (void *)dst;
407 dst->applied = true;
408
409 return 0;
410 }
411
412 static void dsa_dst_unapply(struct dsa_switch_tree *dst)
413 {
414 struct dsa_switch *ds;
415 u32 index;
416
417 if (!dst->applied)
418 return;
419
420 dst->master_netdev->dsa_ptr = NULL;
421
422 /* If we used a tagging format that doesn't have an ethertype
423 * field, make sure that all packets from this point get sent
424 * without the tag and go through the regular receive path.
425 */
426 wmb();
427
428 for (index = 0; index < DSA_MAX_SWITCHES; index++) {
429 ds = dst->ds[index];
430 if (!ds)
431 continue;
432
433 dsa_ds_unapply(dst, ds);
434 }
435
436 dsa_cpu_port_ethtool_restore(dst->ds[0]);
437
438 pr_info("DSA: tree %d unapplied\n", dst->tree);
439 dst->applied = false;
440 }
441
442 static int dsa_cpu_parse(struct device_node *port, u32 index,
443 struct dsa_switch_tree *dst,
444 struct dsa_switch *ds)
445 {
446 enum dsa_tag_protocol tag_protocol;
447 struct net_device *ethernet_dev;
448 struct device_node *ethernet;
449
450 ethernet = of_parse_phandle(port, "ethernet", 0);
451 if (!ethernet)
452 return -EINVAL;
453
454 ethernet_dev = of_find_net_device_by_node(ethernet);
455 if (!ethernet_dev)
456 return -EPROBE_DEFER;
457
458 if (!ds->master_netdev)
459 ds->master_netdev = ethernet_dev;
460
461 if (!dst->master_netdev)
462 dst->master_netdev = ethernet_dev;
463
464 if (dst->cpu_switch == -1) {
465 dst->cpu_switch = ds->index;
466 dst->cpu_port = index;
467 }
468
469 tag_protocol = ds->ops->get_tag_protocol(ds);
470 dst->tag_ops = dsa_resolve_tag_protocol(tag_protocol);
471 if (IS_ERR(dst->tag_ops)) {
472 dev_warn(ds->dev, "No tagger for this switch\n");
473 return PTR_ERR(dst->tag_ops);
474 }
475
476 dst->rcv = dst->tag_ops->rcv;
477
478 return 0;
479 }
480
481 static int dsa_ds_parse(struct dsa_switch_tree *dst, struct dsa_switch *ds)
482 {
483 struct device_node *port;
484 u32 index;
485 int err;
486
487 for (index = 0; index < DSA_MAX_PORTS; index++) {
488 port = ds->ports[index].dn;
489 if (!port)
490 continue;
491
492 if (dsa_port_is_cpu(port)) {
493 err = dsa_cpu_parse(port, index, dst, ds);
494 if (err)
495 return err;
496 }
497 }
498
499 pr_info("DSA: switch %d %d parsed\n", dst->tree, ds->index);
500
501 return 0;
502 }
503
504 static int dsa_dst_parse(struct dsa_switch_tree *dst)
505 {
506 struct dsa_switch *ds;
507 u32 index;
508 int err;
509
510 for (index = 0; index < DSA_MAX_SWITCHES; index++) {
511 ds = dst->ds[index];
512 if (!ds)
513 continue;
514
515 err = dsa_ds_parse(dst, ds);
516 if (err)
517 return err;
518 }
519
520 if (!dst->master_netdev) {
521 pr_warn("Tree has no master device\n");
522 return -EINVAL;
523 }
524
525 pr_info("DSA: tree %d parsed\n", dst->tree);
526
527 return 0;
528 }
529
530 static int dsa_parse_ports_dn(struct device_node *ports, struct dsa_switch *ds)
531 {
532 struct device_node *port;
533 int err;
534 u32 reg;
535
536 for_each_available_child_of_node(ports, port) {
537 err = of_property_read_u32(port, "reg", &reg);
538 if (err)
539 return err;
540
541 if (reg >= DSA_MAX_PORTS)
542 return -EINVAL;
543
544 ds->ports[reg].dn = port;
545
546 /* Initialize enabled_port_mask now for ops->setup()
547 * to have access to a correct value, just like what
548 * net/dsa/dsa.c::dsa_switch_setup_one does.
549 */
550 if (!dsa_port_is_cpu(port))
551 ds->enabled_port_mask |= 1 << reg;
552 }
553
554 return 0;
555 }
556
557 static int dsa_parse_member(struct device_node *np, u32 *tree, u32 *index)
558 {
559 int err;
560
561 *tree = *index = 0;
562
563 err = of_property_read_u32_index(np, "dsa,member", 0, tree);
564 if (err) {
565 /* Does not exist, but it is optional */
566 if (err == -EINVAL)
567 return 0;
568 return err;
569 }
570
571 err = of_property_read_u32_index(np, "dsa,member", 1, index);
572 if (err)
573 return err;
574
575 if (*index >= DSA_MAX_SWITCHES)
576 return -EINVAL;
577
578 return 0;
579 }
580
581 static struct device_node *dsa_get_ports(struct dsa_switch *ds,
582 struct device_node *np)
583 {
584 struct device_node *ports;
585
586 ports = of_get_child_by_name(np, "ports");
587 if (!ports) {
588 dev_err(ds->dev, "no ports child node found\n");
589 return ERR_PTR(-EINVAL);
590 }
591
592 return ports;
593 }
594
595 static int _dsa_register_switch(struct dsa_switch *ds, struct device_node *np)
596 {
597 struct device_node *ports = dsa_get_ports(ds, np);
598 struct dsa_switch_tree *dst;
599 u32 tree, index;
600 int i, err;
601
602 err = dsa_parse_member(np, &tree, &index);
603 if (err)
604 return err;
605
606 if (IS_ERR(ports))
607 return PTR_ERR(ports);
608
609 err = dsa_parse_ports_dn(ports, ds);
610 if (err)
611 return err;
612
613 dst = dsa_get_dst(tree);
614 if (!dst) {
615 dst = dsa_add_dst(tree);
616 if (!dst)
617 return -ENOMEM;
618 }
619
620 if (dst->ds[index]) {
621 err = -EBUSY;
622 goto out;
623 }
624
625 ds->dst = dst;
626 ds->index = index;
627
628 /* Initialize the routing table */
629 for (i = 0; i < DSA_MAX_SWITCHES; ++i)
630 ds->rtable[i] = DSA_RTABLE_NONE;
631
632 dsa_dst_add_ds(dst, ds, index);
633
634 err = dsa_dst_complete(dst);
635 if (err < 0)
636 goto out_del_dst;
637
638 if (err == 1) {
639 /* Not all switches registered yet */
640 err = 0;
641 goto out;
642 }
643
644 if (dst->applied) {
645 pr_info("DSA: Disjoint trees?\n");
646 return -EINVAL;
647 }
648
649 err = dsa_dst_parse(dst);
650 if (err)
651 goto out_del_dst;
652
653 err = dsa_dst_apply(dst);
654 if (err) {
655 dsa_dst_unapply(dst);
656 goto out_del_dst;
657 }
658
659 dsa_put_dst(dst);
660 return 0;
661
662 out_del_dst:
663 dsa_dst_del_ds(dst, ds, ds->index);
664 out:
665 dsa_put_dst(dst);
666
667 return err;
668 }
669
670 int dsa_register_switch(struct dsa_switch *ds, struct device_node *np)
671 {
672 int err;
673
674 mutex_lock(&dsa2_mutex);
675 err = _dsa_register_switch(ds, np);
676 mutex_unlock(&dsa2_mutex);
677
678 return err;
679 }
680 EXPORT_SYMBOL_GPL(dsa_register_switch);
681
682 static void _dsa_unregister_switch(struct dsa_switch *ds)
683 {
684 struct dsa_switch_tree *dst = ds->dst;
685
686 dsa_dst_unapply(dst);
687
688 dsa_dst_del_ds(dst, ds, ds->index);
689 }
690
691 void dsa_unregister_switch(struct dsa_switch *ds)
692 {
693 mutex_lock(&dsa2_mutex);
694 _dsa_unregister_switch(ds);
695 mutex_unlock(&dsa2_mutex);
696 }
697 EXPORT_SYMBOL_GPL(dsa_unregister_switch);
This page took 0.04717 seconds and 5 git commands to generate.