2 * net/core/fib_rules.c Generic Routing Rules
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License as
6 * published by the Free Software Foundation, version 2.
8 * Authors: Thomas Graf <tgraf@suug.ch>
11 #include <linux/types.h>
12 #include <linux/kernel.h>
13 #include <linux/list.h>
14 #include <net/net_namespace.h>
15 #include <net/fib_rules.h>
17 static LIST_HEAD(rules_ops
);
18 static DEFINE_SPINLOCK(rules_mod_lock
);
20 static void notify_rule_change(int event
, struct fib_rule
*rule
,
21 struct fib_rules_ops
*ops
, struct nlmsghdr
*nlh
,
24 static struct fib_rules_ops
*lookup_rules_ops(int family
)
26 struct fib_rules_ops
*ops
;
29 list_for_each_entry_rcu(ops
, &rules_ops
, list
) {
30 if (ops
->family
== family
) {
31 if (!try_module_get(ops
->owner
))
42 static void rules_ops_put(struct fib_rules_ops
*ops
)
45 module_put(ops
->owner
);
48 static void flush_route_cache(struct fib_rules_ops
*ops
)
54 int fib_rules_register(struct fib_rules_ops
*ops
)
57 struct fib_rules_ops
*o
;
59 if (ops
->rule_size
< sizeof(struct fib_rule
))
62 if (ops
->match
== NULL
|| ops
->configure
== NULL
||
63 ops
->compare
== NULL
|| ops
->fill
== NULL
||
67 spin_lock(&rules_mod_lock
);
68 list_for_each_entry(o
, &rules_ops
, list
)
69 if (ops
->family
== o
->family
)
72 list_add_tail_rcu(&ops
->list
, &rules_ops
);
75 spin_unlock(&rules_mod_lock
);
80 EXPORT_SYMBOL_GPL(fib_rules_register
);
82 static void cleanup_ops(struct fib_rules_ops
*ops
)
84 struct fib_rule
*rule
, *tmp
;
86 list_for_each_entry_safe(rule
, tmp
, ops
->rules_list
, list
) {
87 list_del_rcu(&rule
->list
);
92 int fib_rules_unregister(struct fib_rules_ops
*ops
)
95 struct fib_rules_ops
*o
;
97 spin_lock(&rules_mod_lock
);
98 list_for_each_entry(o
, &rules_ops
, list
) {
100 list_del_rcu(&o
->list
);
108 spin_unlock(&rules_mod_lock
);
115 EXPORT_SYMBOL_GPL(fib_rules_unregister
);
117 static int fib_rule_match(struct fib_rule
*rule
, struct fib_rules_ops
*ops
,
118 struct flowi
*fl
, int flags
)
122 if (rule
->ifindex
&& (rule
->ifindex
!= fl
->iif
))
125 if ((rule
->mark
^ fl
->mark
) & rule
->mark_mask
)
128 ret
= ops
->match(rule
, fl
, flags
);
130 return (rule
->flags
& FIB_RULE_INVERT
) ? !ret
: ret
;
133 int fib_rules_lookup(struct fib_rules_ops
*ops
, struct flowi
*fl
,
134 int flags
, struct fib_lookup_arg
*arg
)
136 struct fib_rule
*rule
;
141 list_for_each_entry_rcu(rule
, ops
->rules_list
, list
) {
143 if (!fib_rule_match(rule
, ops
, fl
, flags
))
146 if (rule
->action
== FR_ACT_GOTO
) {
147 struct fib_rule
*target
;
149 target
= rcu_dereference(rule
->ctarget
);
150 if (target
== NULL
) {
156 } else if (rule
->action
== FR_ACT_NOP
)
159 err
= ops
->action(rule
, fl
, flags
, arg
);
161 if (err
!= -EAGAIN
) {
175 EXPORT_SYMBOL_GPL(fib_rules_lookup
);
177 static int validate_rulemsg(struct fib_rule_hdr
*frh
, struct nlattr
**tb
,
178 struct fib_rules_ops
*ops
)
183 if (tb
[FRA_SRC
] == NULL
||
184 frh
->src_len
> (ops
->addr_size
* 8) ||
185 nla_len(tb
[FRA_SRC
]) != ops
->addr_size
)
189 if (tb
[FRA_DST
] == NULL
||
190 frh
->dst_len
> (ops
->addr_size
* 8) ||
191 nla_len(tb
[FRA_DST
]) != ops
->addr_size
)
199 static int fib_nl_newrule(struct sk_buff
*skb
, struct nlmsghdr
* nlh
, void *arg
)
201 struct fib_rule_hdr
*frh
= nlmsg_data(nlh
);
202 struct fib_rules_ops
*ops
= NULL
;
203 struct fib_rule
*rule
, *r
, *last
= NULL
;
204 struct nlattr
*tb
[FRA_MAX
+1];
205 int err
= -EINVAL
, unresolved
= 0;
207 if (nlh
->nlmsg_len
< nlmsg_msg_size(sizeof(*frh
)))
210 ops
= lookup_rules_ops(frh
->family
);
216 err
= nlmsg_parse(nlh
, sizeof(*frh
), tb
, FRA_MAX
, ops
->policy
);
220 err
= validate_rulemsg(frh
, tb
, ops
);
224 rule
= kzalloc(ops
->rule_size
, GFP_KERNEL
);
230 if (tb
[FRA_PRIORITY
])
231 rule
->pref
= nla_get_u32(tb
[FRA_PRIORITY
]);
233 if (tb
[FRA_IFNAME
]) {
234 struct net_device
*dev
;
237 nla_strlcpy(rule
->ifname
, tb
[FRA_IFNAME
], IFNAMSIZ
);
238 dev
= __dev_get_by_name(rule
->ifname
);
240 rule
->ifindex
= dev
->ifindex
;
243 if (tb
[FRA_FWMARK
]) {
244 rule
->mark
= nla_get_u32(tb
[FRA_FWMARK
]);
246 /* compatibility: if the mark value is non-zero all bits
247 * are compared unless a mask is explicitly specified.
249 rule
->mark_mask
= 0xFFFFFFFF;
253 rule
->mark_mask
= nla_get_u32(tb
[FRA_FWMASK
]);
255 rule
->action
= frh
->action
;
256 rule
->flags
= frh
->flags
;
257 rule
->table
= frh_get_table(frh
, tb
);
259 if (!rule
->pref
&& ops
->default_pref
)
260 rule
->pref
= ops
->default_pref();
264 if (rule
->action
!= FR_ACT_GOTO
)
267 rule
->target
= nla_get_u32(tb
[FRA_GOTO
]);
268 /* Backward jumps are prohibited to avoid endless loops */
269 if (rule
->target
<= rule
->pref
)
272 list_for_each_entry(r
, ops
->rules_list
, list
) {
273 if (r
->pref
== rule
->target
) {
279 if (rule
->ctarget
== NULL
)
281 } else if (rule
->action
== FR_ACT_GOTO
)
284 err
= ops
->configure(rule
, skb
, nlh
, frh
, tb
);
288 list_for_each_entry(r
, ops
->rules_list
, list
) {
289 if (r
->pref
> rule
->pref
)
296 if (ops
->unresolved_rules
) {
298 * There are unresolved goto rules in the list, check if
299 * any of them are pointing to this new rule.
301 list_for_each_entry(r
, ops
->rules_list
, list
) {
302 if (r
->action
== FR_ACT_GOTO
&&
303 r
->target
== rule
->pref
) {
304 BUG_ON(r
->ctarget
!= NULL
);
305 rcu_assign_pointer(r
->ctarget
, rule
);
306 if (--ops
->unresolved_rules
== 0)
312 if (rule
->action
== FR_ACT_GOTO
)
313 ops
->nr_goto_rules
++;
316 ops
->unresolved_rules
++;
319 list_add_rcu(&rule
->list
, &last
->list
);
321 list_add_rcu(&rule
->list
, ops
->rules_list
);
323 notify_rule_change(RTM_NEWRULE
, rule
, ops
, nlh
, NETLINK_CB(skb
).pid
);
324 flush_route_cache(ops
);
335 static int fib_nl_delrule(struct sk_buff
*skb
, struct nlmsghdr
* nlh
, void *arg
)
337 struct fib_rule_hdr
*frh
= nlmsg_data(nlh
);
338 struct fib_rules_ops
*ops
= NULL
;
339 struct fib_rule
*rule
, *tmp
;
340 struct nlattr
*tb
[FRA_MAX
+1];
343 if (nlh
->nlmsg_len
< nlmsg_msg_size(sizeof(*frh
)))
346 ops
= lookup_rules_ops(frh
->family
);
352 err
= nlmsg_parse(nlh
, sizeof(*frh
), tb
, FRA_MAX
, ops
->policy
);
356 err
= validate_rulemsg(frh
, tb
, ops
);
360 list_for_each_entry(rule
, ops
->rules_list
, list
) {
361 if (frh
->action
&& (frh
->action
!= rule
->action
))
364 if (frh
->table
&& (frh_get_table(frh
, tb
) != rule
->table
))
367 if (tb
[FRA_PRIORITY
] &&
368 (rule
->pref
!= nla_get_u32(tb
[FRA_PRIORITY
])))
371 if (tb
[FRA_IFNAME
] &&
372 nla_strcmp(tb
[FRA_IFNAME
], rule
->ifname
))
375 if (tb
[FRA_FWMARK
] &&
376 (rule
->mark
!= nla_get_u32(tb
[FRA_FWMARK
])))
379 if (tb
[FRA_FWMASK
] &&
380 (rule
->mark_mask
!= nla_get_u32(tb
[FRA_FWMASK
])))
383 if (!ops
->compare(rule
, frh
, tb
))
386 if (rule
->flags
& FIB_RULE_PERMANENT
) {
391 list_del_rcu(&rule
->list
);
393 if (rule
->action
== FR_ACT_GOTO
)
394 ops
->nr_goto_rules
--;
397 * Check if this rule is a target to any of them. If so,
398 * disable them. As this operation is eventually very
399 * expensive, it is only performed if goto rules have
400 * actually been added.
402 if (ops
->nr_goto_rules
> 0) {
403 list_for_each_entry(tmp
, ops
->rules_list
, list
) {
404 if (tmp
->ctarget
== rule
) {
405 rcu_assign_pointer(tmp
->ctarget
, NULL
);
406 ops
->unresolved_rules
++;
412 notify_rule_change(RTM_DELRULE
, rule
, ops
, nlh
,
413 NETLINK_CB(skb
).pid
);
415 flush_route_cache(ops
);
426 static inline size_t fib_rule_nlmsg_size(struct fib_rules_ops
*ops
,
427 struct fib_rule
*rule
)
429 size_t payload
= NLMSG_ALIGN(sizeof(struct fib_rule_hdr
))
430 + nla_total_size(IFNAMSIZ
) /* FRA_IFNAME */
431 + nla_total_size(4) /* FRA_PRIORITY */
432 + nla_total_size(4) /* FRA_TABLE */
433 + nla_total_size(4) /* FRA_FWMARK */
434 + nla_total_size(4); /* FRA_FWMASK */
436 if (ops
->nlmsg_payload
)
437 payload
+= ops
->nlmsg_payload(rule
);
442 static int fib_nl_fill_rule(struct sk_buff
*skb
, struct fib_rule
*rule
,
443 u32 pid
, u32 seq
, int type
, int flags
,
444 struct fib_rules_ops
*ops
)
446 struct nlmsghdr
*nlh
;
447 struct fib_rule_hdr
*frh
;
449 nlh
= nlmsg_put(skb
, pid
, seq
, type
, sizeof(*frh
), flags
);
453 frh
= nlmsg_data(nlh
);
454 frh
->table
= rule
->table
;
455 NLA_PUT_U32(skb
, FRA_TABLE
, rule
->table
);
458 frh
->action
= rule
->action
;
459 frh
->flags
= rule
->flags
;
461 if (rule
->action
== FR_ACT_GOTO
&& rule
->ctarget
== NULL
)
462 frh
->flags
|= FIB_RULE_UNRESOLVED
;
464 if (rule
->ifname
[0]) {
465 NLA_PUT_STRING(skb
, FRA_IFNAME
, rule
->ifname
);
467 if (rule
->ifindex
== -1)
468 frh
->flags
|= FIB_RULE_DEV_DETACHED
;
472 NLA_PUT_U32(skb
, FRA_PRIORITY
, rule
->pref
);
475 NLA_PUT_U32(skb
, FRA_FWMARK
, rule
->mark
);
477 if (rule
->mark_mask
|| rule
->mark
)
478 NLA_PUT_U32(skb
, FRA_FWMASK
, rule
->mark_mask
);
481 NLA_PUT_U32(skb
, FRA_GOTO
, rule
->target
);
483 if (ops
->fill(rule
, skb
, nlh
, frh
) < 0)
484 goto nla_put_failure
;
486 return nlmsg_end(skb
, nlh
);
489 nlmsg_cancel(skb
, nlh
);
493 static int dump_rules(struct sk_buff
*skb
, struct netlink_callback
*cb
,
494 struct fib_rules_ops
*ops
)
497 struct fib_rule
*rule
;
499 list_for_each_entry(rule
, ops
->rules_list
, list
) {
500 if (idx
< cb
->args
[1])
503 if (fib_nl_fill_rule(skb
, rule
, NETLINK_CB(cb
->skb
).pid
,
504 cb
->nlh
->nlmsg_seq
, RTM_NEWRULE
,
505 NLM_F_MULTI
, ops
) < 0)
516 static int fib_nl_dumprule(struct sk_buff
*skb
, struct netlink_callback
*cb
)
518 struct fib_rules_ops
*ops
;
521 family
= rtnl_msg_family(cb
->nlh
);
522 if (family
!= AF_UNSPEC
) {
523 /* Protocol specific dump request */
524 ops
= lookup_rules_ops(family
);
526 return -EAFNOSUPPORT
;
528 return dump_rules(skb
, cb
, ops
);
532 list_for_each_entry_rcu(ops
, &rules_ops
, list
) {
533 if (idx
< cb
->args
[0] || !try_module_get(ops
->owner
))
536 if (dump_rules(skb
, cb
, ops
) < 0)
549 static void notify_rule_change(int event
, struct fib_rule
*rule
,
550 struct fib_rules_ops
*ops
, struct nlmsghdr
*nlh
,
556 skb
= nlmsg_new(fib_rule_nlmsg_size(ops
, rule
), GFP_KERNEL
);
560 err
= fib_nl_fill_rule(skb
, rule
, pid
, nlh
->nlmsg_seq
, event
, 0, ops
);
562 /* -EMSGSIZE implies BUG in fib_rule_nlmsg_size() */
563 WARN_ON(err
== -EMSGSIZE
);
567 err
= rtnl_notify(skb
, pid
, ops
->nlgroup
, nlh
, GFP_KERNEL
);
570 rtnl_set_sk_err(ops
->nlgroup
, err
);
573 static void attach_rules(struct list_head
*rules
, struct net_device
*dev
)
575 struct fib_rule
*rule
;
577 list_for_each_entry(rule
, rules
, list
) {
578 if (rule
->ifindex
== -1 &&
579 strcmp(dev
->name
, rule
->ifname
) == 0)
580 rule
->ifindex
= dev
->ifindex
;
584 static void detach_rules(struct list_head
*rules
, struct net_device
*dev
)
586 struct fib_rule
*rule
;
588 list_for_each_entry(rule
, rules
, list
)
589 if (rule
->ifindex
== dev
->ifindex
)
594 static int fib_rules_event(struct notifier_block
*this, unsigned long event
,
597 struct net_device
*dev
= ptr
;
598 struct fib_rules_ops
*ops
;
600 if (dev
->nd_net
!= &init_net
)
607 case NETDEV_REGISTER
:
608 list_for_each_entry(ops
, &rules_ops
, list
)
609 attach_rules(ops
->rules_list
, dev
);
612 case NETDEV_UNREGISTER
:
613 list_for_each_entry(ops
, &rules_ops
, list
)
614 detach_rules(ops
->rules_list
, dev
);
623 static struct notifier_block fib_rules_notifier
= {
624 .notifier_call
= fib_rules_event
,
627 static int __init
fib_rules_init(void)
629 rtnl_register(PF_UNSPEC
, RTM_NEWRULE
, fib_nl_newrule
, NULL
);
630 rtnl_register(PF_UNSPEC
, RTM_DELRULE
, fib_nl_delrule
, NULL
);
631 rtnl_register(PF_UNSPEC
, RTM_GETRULE
, NULL
, fib_nl_dumprule
);
633 return register_netdevice_notifier(&fib_rules_notifier
);
636 subsys_initcall(fib_rules_init
);