switchdev: introduce switchdev add/del obj ops
[deliverable/linux.git] / net / switchdev / switchdev.c
CommitLineData
007f790c
JP
1/*
2 * net/switchdev/switchdev.c - Switch device API
3 * Copyright (c) 2014 Jiri Pirko <jiri@resnulli.us>
f8f21471 4 * Copyright (c) 2014-2015 Scott Feldman <sfeldma@gmail.com>
007f790c
JP
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 */
11
12#include <linux/kernel.h>
13#include <linux/types.h>
14#include <linux/init.h>
03bf0c28
JP
15#include <linux/mutex.h>
16#include <linux/notifier.h>
007f790c 17#include <linux/netdevice.h>
5e8d9049 18#include <net/ip_fib.h>
007f790c
JP
19#include <net/switchdev.h>
20
3094333d
SF
21/**
22 * switchdev_port_attr_get - Get port attribute
23 *
24 * @dev: port device
25 * @attr: attribute to get
26 */
27int switchdev_port_attr_get(struct net_device *dev, struct switchdev_attr *attr)
28{
29 const struct switchdev_ops *ops = dev->switchdev_ops;
30 struct net_device *lower_dev;
31 struct list_head *iter;
32 struct switchdev_attr first = {
33 .id = SWITCHDEV_ATTR_UNDEFINED
34 };
35 int err = -EOPNOTSUPP;
36
37 if (ops && ops->switchdev_port_attr_get)
38 return ops->switchdev_port_attr_get(dev, attr);
39
40 if (attr->flags & SWITCHDEV_F_NO_RECURSE)
41 return err;
42
43 /* Switch device port(s) may be stacked under
44 * bond/team/vlan dev, so recurse down to get attr on
45 * each port. Return -ENODATA if attr values don't
46 * compare across ports.
47 */
48
49 netdev_for_each_lower_dev(dev, lower_dev, iter) {
50 err = switchdev_port_attr_get(lower_dev, attr);
51 if (err)
52 break;
53 if (first.id == SWITCHDEV_ATTR_UNDEFINED)
54 first = *attr;
55 else if (memcmp(&first, attr, sizeof(*attr)))
56 return -ENODATA;
57 }
58
59 return err;
60}
61EXPORT_SYMBOL_GPL(switchdev_port_attr_get);
62
63static int __switchdev_port_attr_set(struct net_device *dev,
64 struct switchdev_attr *attr)
65{
66 const struct switchdev_ops *ops = dev->switchdev_ops;
67 struct net_device *lower_dev;
68 struct list_head *iter;
69 int err = -EOPNOTSUPP;
70
71 if (ops && ops->switchdev_port_attr_set)
72 return ops->switchdev_port_attr_set(dev, attr);
73
74 if (attr->flags & SWITCHDEV_F_NO_RECURSE)
75 return err;
76
77 /* Switch device port(s) may be stacked under
78 * bond/team/vlan dev, so recurse down to set attr on
79 * each port.
80 */
81
82 netdev_for_each_lower_dev(dev, lower_dev, iter) {
83 err = __switchdev_port_attr_set(lower_dev, attr);
84 if (err)
85 break;
86 }
87
88 return err;
89}
90
91struct switchdev_attr_set_work {
92 struct work_struct work;
93 struct net_device *dev;
94 struct switchdev_attr attr;
95};
96
97static void switchdev_port_attr_set_work(struct work_struct *work)
98{
99 struct switchdev_attr_set_work *asw =
100 container_of(work, struct switchdev_attr_set_work, work);
101 int err;
102
103 rtnl_lock();
104 err = switchdev_port_attr_set(asw->dev, &asw->attr);
105 BUG_ON(err);
106 rtnl_unlock();
107
108 dev_put(asw->dev);
109 kfree(work);
110}
111
112static int switchdev_port_attr_set_defer(struct net_device *dev,
113 struct switchdev_attr *attr)
114{
115 struct switchdev_attr_set_work *asw;
116
117 asw = kmalloc(sizeof(*asw), GFP_ATOMIC);
118 if (!asw)
119 return -ENOMEM;
120
121 INIT_WORK(&asw->work, switchdev_port_attr_set_work);
122
123 dev_hold(dev);
124 asw->dev = dev;
125 memcpy(&asw->attr, attr, sizeof(asw->attr));
126
127 schedule_work(&asw->work);
128
129 return 0;
130}
131
132/**
133 * switchdev_port_attr_set - Set port attribute
134 *
135 * @dev: port device
136 * @attr: attribute to set
137 *
138 * Use a 2-phase prepare-commit transaction model to ensure
139 * system is not left in a partially updated state due to
140 * failure from driver/device.
141 */
142int switchdev_port_attr_set(struct net_device *dev, struct switchdev_attr *attr)
143{
144 int err;
145
146 if (!rtnl_is_locked()) {
147 /* Running prepare-commit transaction across stacked
148 * devices requires nothing moves, so if rtnl_lock is
149 * not held, schedule a worker thread to hold rtnl_lock
150 * while setting attr.
151 */
152
153 return switchdev_port_attr_set_defer(dev, attr);
154 }
155
156 /* Phase I: prepare for attr set. Driver/device should fail
157 * here if there are going to be issues in the commit phase,
158 * such as lack of resources or support. The driver/device
159 * should reserve resources needed for the commit phase here,
160 * but should not commit the attr.
161 */
162
163 attr->trans = SWITCHDEV_TRANS_PREPARE;
164 err = __switchdev_port_attr_set(dev, attr);
165 if (err) {
166 /* Prepare phase failed: abort the transaction. Any
167 * resources reserved in the prepare phase are
168 * released.
169 */
170
171 attr->trans = SWITCHDEV_TRANS_ABORT;
172 __switchdev_port_attr_set(dev, attr);
173
174 return err;
175 }
176
177 /* Phase II: commit attr set. This cannot fail as a fault
178 * of driver/device. If it does, it's a bug in the driver/device
179 * because the driver said everythings was OK in phase I.
180 */
181
182 attr->trans = SWITCHDEV_TRANS_COMMIT;
183 err = __switchdev_port_attr_set(dev, attr);
184 BUG_ON(err);
185
186 return err;
187}
188EXPORT_SYMBOL_GPL(switchdev_port_attr_set);
189
491d0f15
SF
190int __switchdev_port_obj_add(struct net_device *dev, struct switchdev_obj *obj)
191{
192 const struct switchdev_ops *ops = dev->switchdev_ops;
193 struct net_device *lower_dev;
194 struct list_head *iter;
195 int err = -EOPNOTSUPP;
196
197 if (ops && ops->switchdev_port_obj_add)
198 return ops->switchdev_port_obj_add(dev, obj);
199
200 /* Switch device port(s) may be stacked under
201 * bond/team/vlan dev, so recurse down to add object on
202 * each port.
203 */
204
205 netdev_for_each_lower_dev(dev, lower_dev, iter) {
206 err = __switchdev_port_obj_add(lower_dev, obj);
207 if (err)
208 break;
209 }
210
211 return err;
212}
213
214/**
215 * switchdev_port_obj_add - Add port object
216 *
217 * @dev: port device
218 * @obj: object to add
219 *
220 * Use a 2-phase prepare-commit transaction model to ensure
221 * system is not left in a partially updated state due to
222 * failure from driver/device.
223 *
224 * rtnl_lock must be held.
225 */
226int switchdev_port_obj_add(struct net_device *dev, struct switchdev_obj *obj)
227{
228 int err;
229
230 ASSERT_RTNL();
231
232 /* Phase I: prepare for obj add. Driver/device should fail
233 * here if there are going to be issues in the commit phase,
234 * such as lack of resources or support. The driver/device
235 * should reserve resources needed for the commit phase here,
236 * but should not commit the obj.
237 */
238
239 obj->trans = SWITCHDEV_TRANS_PREPARE;
240 err = __switchdev_port_obj_add(dev, obj);
241 if (err) {
242 /* Prepare phase failed: abort the transaction. Any
243 * resources reserved in the prepare phase are
244 * released.
245 */
246
247 obj->trans = SWITCHDEV_TRANS_ABORT;
248 __switchdev_port_obj_add(dev, obj);
249
250 return err;
251 }
252
253 /* Phase II: commit obj add. This cannot fail as a fault
254 * of driver/device. If it does, it's a bug in the driver/device
255 * because the driver said everythings was OK in phase I.
256 */
257
258 obj->trans = SWITCHDEV_TRANS_COMMIT;
259 err = __switchdev_port_obj_add(dev, obj);
260 WARN(err, "%s: Commit of object (id=%d) failed.\n", dev->name, obj->id);
261
262 return err;
263}
264EXPORT_SYMBOL_GPL(switchdev_port_obj_add);
265
266/**
267 * switchdev_port_obj_del - Delete port object
268 *
269 * @dev: port device
270 * @obj: object to delete
271 */
272int switchdev_port_obj_del(struct net_device *dev, struct switchdev_obj *obj)
273{
274 const struct switchdev_ops *ops = dev->switchdev_ops;
275 struct net_device *lower_dev;
276 struct list_head *iter;
277 int err = -EOPNOTSUPP;
278
279 if (ops && ops->switchdev_port_obj_del)
280 return ops->switchdev_port_obj_del(dev, obj);
281
282 /* Switch device port(s) may be stacked under
283 * bond/team/vlan dev, so recurse down to delete object on
284 * each port.
285 */
286
287 netdev_for_each_lower_dev(dev, lower_dev, iter) {
288 err = switchdev_port_obj_del(lower_dev, obj);
289 if (err)
290 break;
291 }
292
293 return err;
294}
295EXPORT_SYMBOL_GPL(switchdev_port_obj_del);
296
ebb9a03a
JP
297static DEFINE_MUTEX(switchdev_mutex);
298static RAW_NOTIFIER_HEAD(switchdev_notif_chain);
03bf0c28
JP
299
300/**
ebb9a03a 301 * register_switchdev_notifier - Register notifier
03bf0c28
JP
302 * @nb: notifier_block
303 *
304 * Register switch device notifier. This should be used by code
305 * which needs to monitor events happening in particular device.
306 * Return values are same as for atomic_notifier_chain_register().
307 */
ebb9a03a 308int register_switchdev_notifier(struct notifier_block *nb)
03bf0c28
JP
309{
310 int err;
311
ebb9a03a
JP
312 mutex_lock(&switchdev_mutex);
313 err = raw_notifier_chain_register(&switchdev_notif_chain, nb);
314 mutex_unlock(&switchdev_mutex);
03bf0c28
JP
315 return err;
316}
ebb9a03a 317EXPORT_SYMBOL_GPL(register_switchdev_notifier);
03bf0c28
JP
318
319/**
ebb9a03a 320 * unregister_switchdev_notifier - Unregister notifier
03bf0c28
JP
321 * @nb: notifier_block
322 *
323 * Unregister switch device notifier.
324 * Return values are same as for atomic_notifier_chain_unregister().
325 */
ebb9a03a 326int unregister_switchdev_notifier(struct notifier_block *nb)
03bf0c28
JP
327{
328 int err;
329
ebb9a03a
JP
330 mutex_lock(&switchdev_mutex);
331 err = raw_notifier_chain_unregister(&switchdev_notif_chain, nb);
332 mutex_unlock(&switchdev_mutex);
03bf0c28
JP
333 return err;
334}
ebb9a03a 335EXPORT_SYMBOL_GPL(unregister_switchdev_notifier);
03bf0c28
JP
336
337/**
ebb9a03a 338 * call_switchdev_notifiers - Call notifiers
03bf0c28
JP
339 * @val: value passed unmodified to notifier function
340 * @dev: port device
341 * @info: notifier information data
342 *
343 * Call all network notifier blocks. This should be called by driver
344 * when it needs to propagate hardware event.
345 * Return values are same as for atomic_notifier_call_chain().
346 */
ebb9a03a
JP
347int call_switchdev_notifiers(unsigned long val, struct net_device *dev,
348 struct switchdev_notifier_info *info)
03bf0c28
JP
349{
350 int err;
351
352 info->dev = dev;
ebb9a03a
JP
353 mutex_lock(&switchdev_mutex);
354 err = raw_notifier_call_chain(&switchdev_notif_chain, val, info);
355 mutex_unlock(&switchdev_mutex);
03bf0c28
JP
356 return err;
357}
ebb9a03a 358EXPORT_SYMBOL_GPL(call_switchdev_notifiers);
8a44dbb2
RP
359
360/**
ebb9a03a 361 * switchdev_port_bridge_setlink - Notify switch device port of bridge
8a44dbb2
RP
362 * port attributes
363 *
364 * @dev: port device
365 * @nlh: netlink msg with bridge port attributes
366 * @flags: bridge setlink flags
367 *
368 * Notify switch device port of bridge port attributes
369 */
ebb9a03a
JP
370int switchdev_port_bridge_setlink(struct net_device *dev,
371 struct nlmsghdr *nlh, u16 flags)
8a44dbb2
RP
372{
373 const struct net_device_ops *ops = dev->netdev_ops;
374
375 if (!(dev->features & NETIF_F_HW_SWITCH_OFFLOAD))
376 return 0;
377
378 if (!ops->ndo_bridge_setlink)
379 return -EOPNOTSUPP;
380
381 return ops->ndo_bridge_setlink(dev, nlh, flags);
382}
ebb9a03a 383EXPORT_SYMBOL_GPL(switchdev_port_bridge_setlink);
8a44dbb2
RP
384
385/**
ebb9a03a 386 * switchdev_port_bridge_dellink - Notify switch device port of bridge
8a44dbb2
RP
387 * port attribute delete
388 *
389 * @dev: port device
390 * @nlh: netlink msg with bridge port attributes
391 * @flags: bridge setlink flags
392 *
393 * Notify switch device port of bridge port attribute delete
394 */
ebb9a03a
JP
395int switchdev_port_bridge_dellink(struct net_device *dev,
396 struct nlmsghdr *nlh, u16 flags)
8a44dbb2
RP
397{
398 const struct net_device_ops *ops = dev->netdev_ops;
399
400 if (!(dev->features & NETIF_F_HW_SWITCH_OFFLOAD))
401 return 0;
402
403 if (!ops->ndo_bridge_dellink)
404 return -EOPNOTSUPP;
405
406 return ops->ndo_bridge_dellink(dev, nlh, flags);
407}
ebb9a03a 408EXPORT_SYMBOL_GPL(switchdev_port_bridge_dellink);
8a44dbb2
RP
409
410/**
ebb9a03a
JP
411 * ndo_dflt_switchdev_port_bridge_setlink - default ndo bridge setlink
412 * op for master devices
8a44dbb2
RP
413 *
414 * @dev: port device
415 * @nlh: netlink msg with bridge port attributes
416 * @flags: bridge setlink flags
417 *
418 * Notify master device slaves of bridge port attributes
419 */
ebb9a03a
JP
420int ndo_dflt_switchdev_port_bridge_setlink(struct net_device *dev,
421 struct nlmsghdr *nlh, u16 flags)
8a44dbb2
RP
422{
423 struct net_device *lower_dev;
424 struct list_head *iter;
425 int ret = 0, err = 0;
426
427 if (!(dev->features & NETIF_F_HW_SWITCH_OFFLOAD))
428 return ret;
429
430 netdev_for_each_lower_dev(dev, lower_dev, iter) {
ebb9a03a 431 err = switchdev_port_bridge_setlink(lower_dev, nlh, flags);
8a44dbb2
RP
432 if (err && err != -EOPNOTSUPP)
433 ret = err;
434 }
435
436 return ret;
437}
ebb9a03a 438EXPORT_SYMBOL_GPL(ndo_dflt_switchdev_port_bridge_setlink);
8a44dbb2
RP
439
440/**
ebb9a03a
JP
441 * ndo_dflt_switchdev_port_bridge_dellink - default ndo bridge dellink
442 * op for master devices
8a44dbb2
RP
443 *
444 * @dev: port device
445 * @nlh: netlink msg with bridge port attributes
446 * @flags: bridge dellink flags
447 *
448 * Notify master device slaves of bridge port attribute deletes
449 */
ebb9a03a
JP
450int ndo_dflt_switchdev_port_bridge_dellink(struct net_device *dev,
451 struct nlmsghdr *nlh, u16 flags)
8a44dbb2
RP
452{
453 struct net_device *lower_dev;
454 struct list_head *iter;
455 int ret = 0, err = 0;
456
457 if (!(dev->features & NETIF_F_HW_SWITCH_OFFLOAD))
458 return ret;
459
460 netdev_for_each_lower_dev(dev, lower_dev, iter) {
ebb9a03a 461 err = switchdev_port_bridge_dellink(lower_dev, nlh, flags);
8a44dbb2
RP
462 if (err && err != -EOPNOTSUPP)
463 ret = err;
464 }
465
466 return ret;
467}
ebb9a03a 468EXPORT_SYMBOL_GPL(ndo_dflt_switchdev_port_bridge_dellink);
5e8d9049 469
ebb9a03a 470static struct net_device *switchdev_get_lowest_dev(struct net_device *dev)
b5d6fbde 471{
9d47c0a2 472 const struct switchdev_ops *ops = dev->switchdev_ops;
b5d6fbde
SF
473 struct net_device *lower_dev;
474 struct net_device *port_dev;
475 struct list_head *iter;
476
477 /* Recusively search down until we find a sw port dev.
f8e20a9f 478 * (A sw port dev supports switchdev_port_attr_get).
b5d6fbde
SF
479 */
480
f8e20a9f 481 if (ops && ops->switchdev_port_attr_get)
b5d6fbde
SF
482 return dev;
483
484 netdev_for_each_lower_dev(dev, lower_dev, iter) {
ebb9a03a 485 port_dev = switchdev_get_lowest_dev(lower_dev);
b5d6fbde
SF
486 if (port_dev)
487 return port_dev;
488 }
489
490 return NULL;
491}
492
ebb9a03a 493static struct net_device *switchdev_get_dev_by_nhs(struct fib_info *fi)
b5d6fbde 494{
f8e20a9f
SF
495 struct switchdev_attr attr = {
496 .id = SWITCHDEV_ATTR_PORT_PARENT_ID,
497 };
498 struct switchdev_attr prev_attr;
b5d6fbde
SF
499 struct net_device *dev = NULL;
500 int nhsel;
501
502 /* For this route, all nexthop devs must be on the same switch. */
503
504 for (nhsel = 0; nhsel < fi->fib_nhs; nhsel++) {
505 const struct fib_nh *nh = &fi->fib_nh[nhsel];
506
507 if (!nh->nh_dev)
508 return NULL;
509
ebb9a03a 510 dev = switchdev_get_lowest_dev(nh->nh_dev);
b5d6fbde
SF
511 if (!dev)
512 return NULL;
513
f8e20a9f 514 if (switchdev_port_attr_get(dev, &attr))
b5d6fbde
SF
515 return NULL;
516
517 if (nhsel > 0) {
f8e20a9f 518 if (prev_attr.ppid.id_len != attr.ppid.id_len)
b5d6fbde 519 return NULL;
f8e20a9f
SF
520 if (memcmp(prev_attr.ppid.id, attr.ppid.id,
521 attr.ppid.id_len))
b5d6fbde
SF
522 return NULL;
523 }
524
f8e20a9f 525 prev_attr = attr;
b5d6fbde
SF
526 }
527
528 return dev;
529}
530
5e8d9049 531/**
ebb9a03a 532 * switchdev_fib_ipv4_add - Add IPv4 route entry to switch
5e8d9049
SF
533 *
534 * @dst: route's IPv4 destination address
535 * @dst_len: destination address length (prefix length)
536 * @fi: route FIB info structure
537 * @tos: route TOS
538 * @type: route type
f8f21471 539 * @nlflags: netlink flags passed in (NLM_F_*)
5e8d9049
SF
540 * @tb_id: route table ID
541 *
542 * Add IPv4 route entry to switch device.
543 */
ebb9a03a
JP
544int switchdev_fib_ipv4_add(u32 dst, int dst_len, struct fib_info *fi,
545 u8 tos, u8 type, u32 nlflags, u32 tb_id)
5e8d9049 546{
b5d6fbde 547 struct net_device *dev;
9d47c0a2 548 const struct switchdev_ops *ops;
b5d6fbde
SF
549 int err = 0;
550
8e05fd71
SF
551 /* Don't offload route if using custom ip rules or if
552 * IPv4 FIB offloading has been disabled completely.
553 */
554
e1315db1
SF
555#ifdef CONFIG_IP_MULTIPLE_TABLES
556 if (fi->fib_net->ipv4.fib_has_custom_rules)
557 return 0;
558#endif
559
560 if (fi->fib_net->ipv4.fib_offload_disabled)
104616e7
SF
561 return 0;
562
ebb9a03a 563 dev = switchdev_get_dev_by_nhs(fi);
b5d6fbde
SF
564 if (!dev)
565 return 0;
9d47c0a2 566 ops = dev->switchdev_ops;
b5d6fbde 567
9d47c0a2
JP
568 if (ops->switchdev_fib_ipv4_add) {
569 err = ops->switchdev_fib_ipv4_add(dev, htonl(dst), dst_len,
570 fi, tos, type, nlflags,
571 tb_id);
b5d6fbde
SF
572 if (!err)
573 fi->fib_flags |= RTNH_F_EXTERNAL;
574 }
575
576 return err;
5e8d9049 577}
ebb9a03a 578EXPORT_SYMBOL_GPL(switchdev_fib_ipv4_add);
5e8d9049
SF
579
580/**
ebb9a03a 581 * switchdev_fib_ipv4_del - Delete IPv4 route entry from switch
5e8d9049
SF
582 *
583 * @dst: route's IPv4 destination address
584 * @dst_len: destination address length (prefix length)
585 * @fi: route FIB info structure
586 * @tos: route TOS
587 * @type: route type
588 * @tb_id: route table ID
589 *
590 * Delete IPv4 route entry from switch device.
591 */
ebb9a03a
JP
592int switchdev_fib_ipv4_del(u32 dst, int dst_len, struct fib_info *fi,
593 u8 tos, u8 type, u32 tb_id)
5e8d9049 594{
b5d6fbde 595 struct net_device *dev;
9d47c0a2 596 const struct switchdev_ops *ops;
b5d6fbde
SF
597 int err = 0;
598
599 if (!(fi->fib_flags & RTNH_F_EXTERNAL))
600 return 0;
601
ebb9a03a 602 dev = switchdev_get_dev_by_nhs(fi);
b5d6fbde
SF
603 if (!dev)
604 return 0;
9d47c0a2 605 ops = dev->switchdev_ops;
b5d6fbde 606
9d47c0a2
JP
607 if (ops->switchdev_fib_ipv4_del) {
608 err = ops->switchdev_fib_ipv4_del(dev, htonl(dst), dst_len,
609 fi, tos, type, tb_id);
b5d6fbde
SF
610 if (!err)
611 fi->fib_flags &= ~RTNH_F_EXTERNAL;
612 }
613
614 return err;
5e8d9049 615}
ebb9a03a 616EXPORT_SYMBOL_GPL(switchdev_fib_ipv4_del);
8e05fd71
SF
617
618/**
ebb9a03a 619 * switchdev_fib_ipv4_abort - Abort an IPv4 FIB operation
8e05fd71
SF
620 *
621 * @fi: route FIB info structure
622 */
ebb9a03a 623void switchdev_fib_ipv4_abort(struct fib_info *fi)
8e05fd71
SF
624{
625 /* There was a problem installing this route to the offload
626 * device. For now, until we come up with more refined
627 * policy handling, abruptly end IPv4 fib offloading for
628 * for entire net by flushing offload device(s) of all
629 * IPv4 routes, and mark IPv4 fib offloading broken from
630 * this point forward.
631 */
632
633 fib_flush_external(fi->fib_net);
634 fi->fib_net->ipv4.fib_offload_disabled = true;
635}
ebb9a03a 636EXPORT_SYMBOL_GPL(switchdev_fib_ipv4_abort);
This page took 0.078992 seconds and 5 git commands to generate.