2 * IP multicast routing support for mrouted 3.6/3.8
4 * (c) 1995 Alan Cox, <alan@lxorguk.ukuu.org.uk>
5 * Linux Consultancy and Custom Driver Development
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
13 * Michael Chastain : Incorrect size of copying.
14 * Alan Cox : Added the cache manager code
15 * Alan Cox : Fixed the clone/copy bug and device race.
16 * Mike McLagan : Routing by source
17 * Malcolm Beattie : Buffer handling fixes.
18 * Alexey Kuznetsov : Double buffer free and other fixes.
19 * SVR Anand : Fixed several multicast bugs and problems.
20 * Alexey Kuznetsov : Status, optimisations and more.
21 * Brad Parker : Better behaviour on mrouted upcall
23 * Carlos Picoto : PIMv1 Support
24 * Pavlin Ivanov Radoslavov: PIMv2 Registers must checksum only PIM header
25 * Relax this requirement to work with older peers.
29 #include <asm/uaccess.h>
30 #include <linux/types.h>
31 #include <linux/capability.h>
32 #include <linux/errno.h>
33 #include <linux/timer.h>
35 #include <linux/kernel.h>
36 #include <linux/fcntl.h>
37 #include <linux/stat.h>
38 #include <linux/socket.h>
40 #include <linux/inet.h>
41 #include <linux/netdevice.h>
42 #include <linux/inetdevice.h>
43 #include <linux/igmp.h>
44 #include <linux/proc_fs.h>
45 #include <linux/seq_file.h>
46 #include <linux/mroute.h>
47 #include <linux/init.h>
48 #include <linux/if_ether.h>
49 #include <linux/slab.h>
50 #include <net/net_namespace.h>
52 #include <net/protocol.h>
53 #include <linux/skbuff.h>
54 #include <net/route.h>
59 #include <linux/notifier.h>
60 #include <linux/if_arp.h>
61 #include <linux/netfilter_ipv4.h>
62 #include <linux/compat.h>
63 #include <linux/export.h>
64 #include <net/ip_tunnels.h>
65 #include <net/checksum.h>
66 #include <net/netlink.h>
67 #include <net/fib_rules.h>
68 #include <linux/netconf.h>
70 #if defined(CONFIG_IP_PIMSM_V1) || defined(CONFIG_IP_PIMSM_V2)
71 #define CONFIG_IP_PIMSM 1
75 struct list_head list
;
78 struct sock __rcu
*mroute_sk
;
79 struct timer_list ipmr_expire_timer
;
80 struct list_head mfc_unres_queue
;
81 struct list_head mfc_cache_array
[MFC_LINES
];
82 struct vif_device vif_table
[MAXVIFS
];
84 atomic_t cache_resolve_queue_len
;
85 bool mroute_do_assert
;
87 #if defined(CONFIG_IP_PIMSM_V1) || defined(CONFIG_IP_PIMSM_V2)
88 int mroute_reg_vif_num
;
93 struct fib_rule common
;
100 /* Big lock, protecting vif table, mrt cache and mroute socket state.
101 * Note that the changes are semaphored via rtnl_lock.
104 static DEFINE_RWLOCK(mrt_lock
);
107 * Multicast router control variables
110 #define VIF_EXISTS(_mrt, _idx) ((_mrt)->vif_table[_idx].dev != NULL)
112 /* Special spinlock for queue of unresolved entries */
113 static DEFINE_SPINLOCK(mfc_unres_lock
);
115 /* We return to original Alan's scheme. Hash table of resolved
116 * entries is changed only in process context and protected
117 * with weak lock mrt_lock. Queue of unresolved entries is protected
118 * with strong spinlock mfc_unres_lock.
120 * In this case data path is free of exclusive locks at all.
123 static struct kmem_cache
*mrt_cachep __read_mostly
;
125 static struct mr_table
*ipmr_new_table(struct net
*net
, u32 id
);
126 static void ipmr_free_table(struct mr_table
*mrt
);
128 static void ip_mr_forward(struct net
*net
, struct mr_table
*mrt
,
129 struct sk_buff
*skb
, struct mfc_cache
*cache
,
131 static int ipmr_cache_report(struct mr_table
*mrt
,
132 struct sk_buff
*pkt
, vifi_t vifi
, int assert);
133 static int __ipmr_fill_mroute(struct mr_table
*mrt
, struct sk_buff
*skb
,
134 struct mfc_cache
*c
, struct rtmsg
*rtm
);
135 static void mroute_netlink_event(struct mr_table
*mrt
, struct mfc_cache
*mfc
,
137 static void mroute_clean_tables(struct mr_table
*mrt
);
138 static void ipmr_expire_process(unsigned long arg
);
140 #ifdef CONFIG_IP_MROUTE_MULTIPLE_TABLES
141 #define ipmr_for_each_table(mrt, net) \
142 list_for_each_entry_rcu(mrt, &net->ipv4.mr_tables, list)
144 static struct mr_table
*ipmr_get_table(struct net
*net
, u32 id
)
146 struct mr_table
*mrt
;
148 ipmr_for_each_table(mrt
, net
) {
155 static int ipmr_fib_lookup(struct net
*net
, struct flowi4
*flp4
,
156 struct mr_table
**mrt
)
159 struct ipmr_result res
;
160 struct fib_lookup_arg arg
= {
162 .flags
= FIB_LOOKUP_NOREF
,
165 err
= fib_rules_lookup(net
->ipv4
.mr_rules_ops
,
166 flowi4_to_flowi(flp4
), 0, &arg
);
173 static int ipmr_rule_action(struct fib_rule
*rule
, struct flowi
*flp
,
174 int flags
, struct fib_lookup_arg
*arg
)
176 struct ipmr_result
*res
= arg
->result
;
177 struct mr_table
*mrt
;
179 switch (rule
->action
) {
182 case FR_ACT_UNREACHABLE
:
184 case FR_ACT_PROHIBIT
:
186 case FR_ACT_BLACKHOLE
:
191 mrt
= ipmr_get_table(rule
->fr_net
, rule
->table
);
198 static int ipmr_rule_match(struct fib_rule
*rule
, struct flowi
*fl
, int flags
)
203 static const struct nla_policy ipmr_rule_policy
[FRA_MAX
+ 1] = {
207 static int ipmr_rule_configure(struct fib_rule
*rule
, struct sk_buff
*skb
,
208 struct fib_rule_hdr
*frh
, struct nlattr
**tb
)
213 static int ipmr_rule_compare(struct fib_rule
*rule
, struct fib_rule_hdr
*frh
,
219 static int ipmr_rule_fill(struct fib_rule
*rule
, struct sk_buff
*skb
,
220 struct fib_rule_hdr
*frh
)
228 static const struct fib_rules_ops __net_initconst ipmr_rules_ops_template
= {
229 .family
= RTNL_FAMILY_IPMR
,
230 .rule_size
= sizeof(struct ipmr_rule
),
231 .addr_size
= sizeof(u32
),
232 .action
= ipmr_rule_action
,
233 .match
= ipmr_rule_match
,
234 .configure
= ipmr_rule_configure
,
235 .compare
= ipmr_rule_compare
,
236 .default_pref
= fib_default_rule_pref
,
237 .fill
= ipmr_rule_fill
,
238 .nlgroup
= RTNLGRP_IPV4_RULE
,
239 .policy
= ipmr_rule_policy
,
240 .owner
= THIS_MODULE
,
243 static int __net_init
ipmr_rules_init(struct net
*net
)
245 struct fib_rules_ops
*ops
;
246 struct mr_table
*mrt
;
249 ops
= fib_rules_register(&ipmr_rules_ops_template
, net
);
253 INIT_LIST_HEAD(&net
->ipv4
.mr_tables
);
255 mrt
= ipmr_new_table(net
, RT_TABLE_DEFAULT
);
261 err
= fib_default_rule_add(ops
, 0x7fff, RT_TABLE_DEFAULT
, 0);
265 net
->ipv4
.mr_rules_ops
= ops
;
271 fib_rules_unregister(ops
);
275 static void __net_exit
ipmr_rules_exit(struct net
*net
)
277 struct mr_table
*mrt
, *next
;
279 list_for_each_entry_safe(mrt
, next
, &net
->ipv4
.mr_tables
, list
) {
280 list_del(&mrt
->list
);
281 ipmr_free_table(mrt
);
283 fib_rules_unregister(net
->ipv4
.mr_rules_ops
);
286 #define ipmr_for_each_table(mrt, net) \
287 for (mrt = net->ipv4.mrt; mrt; mrt = NULL)
289 static struct mr_table
*ipmr_get_table(struct net
*net
, u32 id
)
291 return net
->ipv4
.mrt
;
294 static int ipmr_fib_lookup(struct net
*net
, struct flowi4
*flp4
,
295 struct mr_table
**mrt
)
297 *mrt
= net
->ipv4
.mrt
;
301 static int __net_init
ipmr_rules_init(struct net
*net
)
303 net
->ipv4
.mrt
= ipmr_new_table(net
, RT_TABLE_DEFAULT
);
304 return net
->ipv4
.mrt
? 0 : -ENOMEM
;
307 static void __net_exit
ipmr_rules_exit(struct net
*net
)
309 ipmr_free_table(net
->ipv4
.mrt
);
313 static struct mr_table
*ipmr_new_table(struct net
*net
, u32 id
)
315 struct mr_table
*mrt
;
318 mrt
= ipmr_get_table(net
, id
);
322 mrt
= kzalloc(sizeof(*mrt
), GFP_KERNEL
);
325 write_pnet(&mrt
->net
, net
);
328 /* Forwarding cache */
329 for (i
= 0; i
< MFC_LINES
; i
++)
330 INIT_LIST_HEAD(&mrt
->mfc_cache_array
[i
]);
332 INIT_LIST_HEAD(&mrt
->mfc_unres_queue
);
334 setup_timer(&mrt
->ipmr_expire_timer
, ipmr_expire_process
,
337 #ifdef CONFIG_IP_PIMSM
338 mrt
->mroute_reg_vif_num
= -1;
340 #ifdef CONFIG_IP_MROUTE_MULTIPLE_TABLES
341 list_add_tail_rcu(&mrt
->list
, &net
->ipv4
.mr_tables
);
346 static void ipmr_free_table(struct mr_table
*mrt
)
348 del_timer_sync(&mrt
->ipmr_expire_timer
);
349 mroute_clean_tables(mrt
);
353 /* Service routines creating virtual interfaces: DVMRP tunnels and PIMREG */
355 static void ipmr_del_tunnel(struct net_device
*dev
, struct vifctl
*v
)
357 struct net
*net
= dev_net(dev
);
361 dev
= __dev_get_by_name(net
, "tunl0");
363 const struct net_device_ops
*ops
= dev
->netdev_ops
;
365 struct ip_tunnel_parm p
;
367 memset(&p
, 0, sizeof(p
));
368 p
.iph
.daddr
= v
->vifc_rmt_addr
.s_addr
;
369 p
.iph
.saddr
= v
->vifc_lcl_addr
.s_addr
;
372 p
.iph
.protocol
= IPPROTO_IPIP
;
373 sprintf(p
.name
, "dvmrp%d", v
->vifc_vifi
);
374 ifr
.ifr_ifru
.ifru_data
= (__force
void __user
*)&p
;
376 if (ops
->ndo_do_ioctl
) {
377 mm_segment_t oldfs
= get_fs();
380 ops
->ndo_do_ioctl(dev
, &ifr
, SIOCDELTUNNEL
);
387 struct net_device
*ipmr_new_tunnel(struct net
*net
, struct vifctl
*v
)
389 struct net_device
*dev
;
391 dev
= __dev_get_by_name(net
, "tunl0");
394 const struct net_device_ops
*ops
= dev
->netdev_ops
;
397 struct ip_tunnel_parm p
;
398 struct in_device
*in_dev
;
400 memset(&p
, 0, sizeof(p
));
401 p
.iph
.daddr
= v
->vifc_rmt_addr
.s_addr
;
402 p
.iph
.saddr
= v
->vifc_lcl_addr
.s_addr
;
405 p
.iph
.protocol
= IPPROTO_IPIP
;
406 sprintf(p
.name
, "dvmrp%d", v
->vifc_vifi
);
407 ifr
.ifr_ifru
.ifru_data
= (__force
void __user
*)&p
;
409 if (ops
->ndo_do_ioctl
) {
410 mm_segment_t oldfs
= get_fs();
413 err
= ops
->ndo_do_ioctl(dev
, &ifr
, SIOCADDTUNNEL
);
421 (dev
= __dev_get_by_name(net
, p
.name
)) != NULL
) {
422 dev
->flags
|= IFF_MULTICAST
;
424 in_dev
= __in_dev_get_rtnl(dev
);
428 ipv4_devconf_setall(in_dev
);
429 neigh_parms_data_state_setall(in_dev
->arp_parms
);
430 IPV4_DEVCONF(in_dev
->cnf
, RP_FILTER
) = 0;
440 /* allow the register to be completed before unregistering. */
444 unregister_netdevice(dev
);
448 #ifdef CONFIG_IP_PIMSM
450 static netdev_tx_t
reg_vif_xmit(struct sk_buff
*skb
, struct net_device
*dev
)
452 struct net
*net
= dev_net(dev
);
453 struct mr_table
*mrt
;
454 struct flowi4 fl4
= {
455 .flowi4_oif
= dev
->ifindex
,
456 .flowi4_iif
= skb
->skb_iif
? : LOOPBACK_IFINDEX
,
457 .flowi4_mark
= skb
->mark
,
461 err
= ipmr_fib_lookup(net
, &fl4
, &mrt
);
467 read_lock(&mrt_lock
);
468 dev
->stats
.tx_bytes
+= skb
->len
;
469 dev
->stats
.tx_packets
++;
470 ipmr_cache_report(mrt
, skb
, mrt
->mroute_reg_vif_num
, IGMPMSG_WHOLEPKT
);
471 read_unlock(&mrt_lock
);
476 static const struct net_device_ops reg_vif_netdev_ops
= {
477 .ndo_start_xmit
= reg_vif_xmit
,
480 static void reg_vif_setup(struct net_device
*dev
)
482 dev
->type
= ARPHRD_PIMREG
;
483 dev
->mtu
= ETH_DATA_LEN
- sizeof(struct iphdr
) - 8;
484 dev
->flags
= IFF_NOARP
;
485 dev
->netdev_ops
= ®_vif_netdev_ops
;
486 dev
->destructor
= free_netdev
;
487 dev
->features
|= NETIF_F_NETNS_LOCAL
;
490 static struct net_device
*ipmr_reg_vif(struct net
*net
, struct mr_table
*mrt
)
492 struct net_device
*dev
;
493 struct in_device
*in_dev
;
496 if (mrt
->id
== RT_TABLE_DEFAULT
)
497 sprintf(name
, "pimreg");
499 sprintf(name
, "pimreg%u", mrt
->id
);
501 dev
= alloc_netdev(0, name
, NET_NAME_UNKNOWN
, reg_vif_setup
);
506 dev_net_set(dev
, net
);
508 if (register_netdevice(dev
)) {
515 in_dev
= __in_dev_get_rcu(dev
);
521 ipv4_devconf_setall(in_dev
);
522 neigh_parms_data_state_setall(in_dev
->arp_parms
);
523 IPV4_DEVCONF(in_dev
->cnf
, RP_FILTER
) = 0;
534 /* allow the register to be completed before unregistering. */
538 unregister_netdevice(dev
);
544 * vif_delete - Delete a VIF entry
545 * @notify: Set to 1, if the caller is a notifier_call
548 static int vif_delete(struct mr_table
*mrt
, int vifi
, int notify
,
549 struct list_head
*head
)
551 struct vif_device
*v
;
552 struct net_device
*dev
;
553 struct in_device
*in_dev
;
555 if (vifi
< 0 || vifi
>= mrt
->maxvif
)
556 return -EADDRNOTAVAIL
;
558 v
= &mrt
->vif_table
[vifi
];
560 write_lock_bh(&mrt_lock
);
565 write_unlock_bh(&mrt_lock
);
566 return -EADDRNOTAVAIL
;
569 #ifdef CONFIG_IP_PIMSM
570 if (vifi
== mrt
->mroute_reg_vif_num
)
571 mrt
->mroute_reg_vif_num
= -1;
574 if (vifi
+ 1 == mrt
->maxvif
) {
577 for (tmp
= vifi
- 1; tmp
>= 0; tmp
--) {
578 if (VIF_EXISTS(mrt
, tmp
))
584 write_unlock_bh(&mrt_lock
);
586 dev_set_allmulti(dev
, -1);
588 in_dev
= __in_dev_get_rtnl(dev
);
590 IPV4_DEVCONF(in_dev
->cnf
, MC_FORWARDING
)--;
591 inet_netconf_notify_devconf(dev_net(dev
),
592 NETCONFA_MC_FORWARDING
,
593 dev
->ifindex
, &in_dev
->cnf
);
594 ip_rt_multicast_event(in_dev
);
597 if (v
->flags
& (VIFF_TUNNEL
| VIFF_REGISTER
) && !notify
)
598 unregister_netdevice_queue(dev
, head
);
604 static void ipmr_cache_free_rcu(struct rcu_head
*head
)
606 struct mfc_cache
*c
= container_of(head
, struct mfc_cache
, rcu
);
608 kmem_cache_free(mrt_cachep
, c
);
611 static inline void ipmr_cache_free(struct mfc_cache
*c
)
613 call_rcu(&c
->rcu
, ipmr_cache_free_rcu
);
616 /* Destroy an unresolved cache entry, killing queued skbs
617 * and reporting error to netlink readers.
620 static void ipmr_destroy_unres(struct mr_table
*mrt
, struct mfc_cache
*c
)
622 struct net
*net
= read_pnet(&mrt
->net
);
626 atomic_dec(&mrt
->cache_resolve_queue_len
);
628 while ((skb
= skb_dequeue(&c
->mfc_un
.unres
.unresolved
))) {
629 if (ip_hdr(skb
)->version
== 0) {
630 struct nlmsghdr
*nlh
= (struct nlmsghdr
*)skb_pull(skb
, sizeof(struct iphdr
));
631 nlh
->nlmsg_type
= NLMSG_ERROR
;
632 nlh
->nlmsg_len
= nlmsg_msg_size(sizeof(struct nlmsgerr
));
633 skb_trim(skb
, nlh
->nlmsg_len
);
635 e
->error
= -ETIMEDOUT
;
636 memset(&e
->msg
, 0, sizeof(e
->msg
));
638 rtnl_unicast(skb
, net
, NETLINK_CB(skb
).portid
);
648 /* Timer process for the unresolved queue. */
650 static void ipmr_expire_process(unsigned long arg
)
652 struct mr_table
*mrt
= (struct mr_table
*)arg
;
654 unsigned long expires
;
655 struct mfc_cache
*c
, *next
;
657 if (!spin_trylock(&mfc_unres_lock
)) {
658 mod_timer(&mrt
->ipmr_expire_timer
, jiffies
+HZ
/10);
662 if (list_empty(&mrt
->mfc_unres_queue
))
668 list_for_each_entry_safe(c
, next
, &mrt
->mfc_unres_queue
, list
) {
669 if (time_after(c
->mfc_un
.unres
.expires
, now
)) {
670 unsigned long interval
= c
->mfc_un
.unres
.expires
- now
;
671 if (interval
< expires
)
677 mroute_netlink_event(mrt
, c
, RTM_DELROUTE
);
678 ipmr_destroy_unres(mrt
, c
);
681 if (!list_empty(&mrt
->mfc_unres_queue
))
682 mod_timer(&mrt
->ipmr_expire_timer
, jiffies
+ expires
);
685 spin_unlock(&mfc_unres_lock
);
688 /* Fill oifs list. It is called under write locked mrt_lock. */
690 static void ipmr_update_thresholds(struct mr_table
*mrt
, struct mfc_cache
*cache
,
695 cache
->mfc_un
.res
.minvif
= MAXVIFS
;
696 cache
->mfc_un
.res
.maxvif
= 0;
697 memset(cache
->mfc_un
.res
.ttls
, 255, MAXVIFS
);
699 for (vifi
= 0; vifi
< mrt
->maxvif
; vifi
++) {
700 if (VIF_EXISTS(mrt
, vifi
) &&
701 ttls
[vifi
] && ttls
[vifi
] < 255) {
702 cache
->mfc_un
.res
.ttls
[vifi
] = ttls
[vifi
];
703 if (cache
->mfc_un
.res
.minvif
> vifi
)
704 cache
->mfc_un
.res
.minvif
= vifi
;
705 if (cache
->mfc_un
.res
.maxvif
<= vifi
)
706 cache
->mfc_un
.res
.maxvif
= vifi
+ 1;
711 static int vif_add(struct net
*net
, struct mr_table
*mrt
,
712 struct vifctl
*vifc
, int mrtsock
)
714 int vifi
= vifc
->vifc_vifi
;
715 struct vif_device
*v
= &mrt
->vif_table
[vifi
];
716 struct net_device
*dev
;
717 struct in_device
*in_dev
;
721 if (VIF_EXISTS(mrt
, vifi
))
724 switch (vifc
->vifc_flags
) {
725 #ifdef CONFIG_IP_PIMSM
728 * Special Purpose VIF in PIM
729 * All the packets will be sent to the daemon
731 if (mrt
->mroute_reg_vif_num
>= 0)
733 dev
= ipmr_reg_vif(net
, mrt
);
736 err
= dev_set_allmulti(dev
, 1);
738 unregister_netdevice(dev
);
745 dev
= ipmr_new_tunnel(net
, vifc
);
748 err
= dev_set_allmulti(dev
, 1);
750 ipmr_del_tunnel(dev
, vifc
);
756 case VIFF_USE_IFINDEX
:
758 if (vifc
->vifc_flags
== VIFF_USE_IFINDEX
) {
759 dev
= dev_get_by_index(net
, vifc
->vifc_lcl_ifindex
);
760 if (dev
&& __in_dev_get_rtnl(dev
) == NULL
) {
762 return -EADDRNOTAVAIL
;
765 dev
= ip_dev_find(net
, vifc
->vifc_lcl_addr
.s_addr
);
768 return -EADDRNOTAVAIL
;
769 err
= dev_set_allmulti(dev
, 1);
779 in_dev
= __in_dev_get_rtnl(dev
);
782 return -EADDRNOTAVAIL
;
784 IPV4_DEVCONF(in_dev
->cnf
, MC_FORWARDING
)++;
785 inet_netconf_notify_devconf(net
, NETCONFA_MC_FORWARDING
, dev
->ifindex
,
787 ip_rt_multicast_event(in_dev
);
789 /* Fill in the VIF structures */
791 v
->rate_limit
= vifc
->vifc_rate_limit
;
792 v
->local
= vifc
->vifc_lcl_addr
.s_addr
;
793 v
->remote
= vifc
->vifc_rmt_addr
.s_addr
;
794 v
->flags
= vifc
->vifc_flags
;
796 v
->flags
|= VIFF_STATIC
;
797 v
->threshold
= vifc
->vifc_threshold
;
802 v
->link
= dev
->ifindex
;
803 if (v
->flags
& (VIFF_TUNNEL
| VIFF_REGISTER
))
804 v
->link
= dev
->iflink
;
806 /* And finish update writing critical data */
807 write_lock_bh(&mrt_lock
);
809 #ifdef CONFIG_IP_PIMSM
810 if (v
->flags
& VIFF_REGISTER
)
811 mrt
->mroute_reg_vif_num
= vifi
;
813 if (vifi
+1 > mrt
->maxvif
)
814 mrt
->maxvif
= vifi
+1;
815 write_unlock_bh(&mrt_lock
);
819 /* called with rcu_read_lock() */
820 static struct mfc_cache
*ipmr_cache_find(struct mr_table
*mrt
,
824 int line
= MFC_HASH(mcastgrp
, origin
);
827 list_for_each_entry_rcu(c
, &mrt
->mfc_cache_array
[line
], list
) {
828 if (c
->mfc_origin
== origin
&& c
->mfc_mcastgrp
== mcastgrp
)
834 /* Look for a (*,*,oif) entry */
835 static struct mfc_cache
*ipmr_cache_find_any_parent(struct mr_table
*mrt
,
838 int line
= MFC_HASH(htonl(INADDR_ANY
), htonl(INADDR_ANY
));
841 list_for_each_entry_rcu(c
, &mrt
->mfc_cache_array
[line
], list
)
842 if (c
->mfc_origin
== htonl(INADDR_ANY
) &&
843 c
->mfc_mcastgrp
== htonl(INADDR_ANY
) &&
844 c
->mfc_un
.res
.ttls
[vifi
] < 255)
850 /* Look for a (*,G) entry */
851 static struct mfc_cache
*ipmr_cache_find_any(struct mr_table
*mrt
,
852 __be32 mcastgrp
, int vifi
)
854 int line
= MFC_HASH(mcastgrp
, htonl(INADDR_ANY
));
855 struct mfc_cache
*c
, *proxy
;
857 if (mcastgrp
== htonl(INADDR_ANY
))
860 list_for_each_entry_rcu(c
, &mrt
->mfc_cache_array
[line
], list
)
861 if (c
->mfc_origin
== htonl(INADDR_ANY
) &&
862 c
->mfc_mcastgrp
== mcastgrp
) {
863 if (c
->mfc_un
.res
.ttls
[vifi
] < 255)
866 /* It's ok if the vifi is part of the static tree */
867 proxy
= ipmr_cache_find_any_parent(mrt
,
869 if (proxy
&& proxy
->mfc_un
.res
.ttls
[vifi
] < 255)
874 return ipmr_cache_find_any_parent(mrt
, vifi
);
878 * Allocate a multicast cache entry
880 static struct mfc_cache
*ipmr_cache_alloc(void)
882 struct mfc_cache
*c
= kmem_cache_zalloc(mrt_cachep
, GFP_KERNEL
);
885 c
->mfc_un
.res
.minvif
= MAXVIFS
;
889 static struct mfc_cache
*ipmr_cache_alloc_unres(void)
891 struct mfc_cache
*c
= kmem_cache_zalloc(mrt_cachep
, GFP_ATOMIC
);
894 skb_queue_head_init(&c
->mfc_un
.unres
.unresolved
);
895 c
->mfc_un
.unres
.expires
= jiffies
+ 10*HZ
;
901 * A cache entry has gone into a resolved state from queued
904 static void ipmr_cache_resolve(struct net
*net
, struct mr_table
*mrt
,
905 struct mfc_cache
*uc
, struct mfc_cache
*c
)
910 /* Play the pending entries through our router */
912 while ((skb
= __skb_dequeue(&uc
->mfc_un
.unres
.unresolved
))) {
913 if (ip_hdr(skb
)->version
== 0) {
914 struct nlmsghdr
*nlh
= (struct nlmsghdr
*)skb_pull(skb
, sizeof(struct iphdr
));
916 if (__ipmr_fill_mroute(mrt
, skb
, c
, nlmsg_data(nlh
)) > 0) {
917 nlh
->nlmsg_len
= skb_tail_pointer(skb
) -
920 nlh
->nlmsg_type
= NLMSG_ERROR
;
921 nlh
->nlmsg_len
= nlmsg_msg_size(sizeof(struct nlmsgerr
));
922 skb_trim(skb
, nlh
->nlmsg_len
);
924 e
->error
= -EMSGSIZE
;
925 memset(&e
->msg
, 0, sizeof(e
->msg
));
928 rtnl_unicast(skb
, net
, NETLINK_CB(skb
).portid
);
930 ip_mr_forward(net
, mrt
, skb
, c
, 0);
936 * Bounce a cache query up to mrouted. We could use netlink for this but mrouted
937 * expects the following bizarre scheme.
939 * Called under mrt_lock.
942 static int ipmr_cache_report(struct mr_table
*mrt
,
943 struct sk_buff
*pkt
, vifi_t vifi
, int assert)
946 const int ihl
= ip_hdrlen(pkt
);
947 struct igmphdr
*igmp
;
949 struct sock
*mroute_sk
;
952 #ifdef CONFIG_IP_PIMSM
953 if (assert == IGMPMSG_WHOLEPKT
)
954 skb
= skb_realloc_headroom(pkt
, sizeof(struct iphdr
));
957 skb
= alloc_skb(128, GFP_ATOMIC
);
962 #ifdef CONFIG_IP_PIMSM
963 if (assert == IGMPMSG_WHOLEPKT
) {
964 /* Ugly, but we have no choice with this interface.
965 * Duplicate old header, fix ihl, length etc.
966 * And all this only to mangle msg->im_msgtype and
967 * to set msg->im_mbz to "mbz" :-)
969 skb_push(skb
, sizeof(struct iphdr
));
970 skb_reset_network_header(skb
);
971 skb_reset_transport_header(skb
);
972 msg
= (struct igmpmsg
*)skb_network_header(skb
);
973 memcpy(msg
, skb_network_header(pkt
), sizeof(struct iphdr
));
974 msg
->im_msgtype
= IGMPMSG_WHOLEPKT
;
976 msg
->im_vif
= mrt
->mroute_reg_vif_num
;
977 ip_hdr(skb
)->ihl
= sizeof(struct iphdr
) >> 2;
978 ip_hdr(skb
)->tot_len
= htons(ntohs(ip_hdr(pkt
)->tot_len
) +
979 sizeof(struct iphdr
));
984 /* Copy the IP header */
986 skb_set_network_header(skb
, skb
->len
);
988 skb_copy_to_linear_data(skb
, pkt
->data
, ihl
);
989 ip_hdr(skb
)->protocol
= 0; /* Flag to the kernel this is a route add */
990 msg
= (struct igmpmsg
*)skb_network_header(skb
);
992 skb_dst_set(skb
, dst_clone(skb_dst(pkt
)));
996 igmp
= (struct igmphdr
*)skb_put(skb
, sizeof(struct igmphdr
));
998 msg
->im_msgtype
= assert;
1000 ip_hdr(skb
)->tot_len
= htons(skb
->len
); /* Fix the length */
1001 skb
->transport_header
= skb
->network_header
;
1005 mroute_sk
= rcu_dereference(mrt
->mroute_sk
);
1006 if (mroute_sk
== NULL
) {
1012 /* Deliver to mrouted */
1014 ret
= sock_queue_rcv_skb(mroute_sk
, skb
);
1017 net_warn_ratelimited("mroute: pending queue full, dropping entries\n");
1025 * Queue a packet for resolution. It gets locked cache entry!
1029 ipmr_cache_unresolved(struct mr_table
*mrt
, vifi_t vifi
, struct sk_buff
*skb
)
1033 struct mfc_cache
*c
;
1034 const struct iphdr
*iph
= ip_hdr(skb
);
1036 spin_lock_bh(&mfc_unres_lock
);
1037 list_for_each_entry(c
, &mrt
->mfc_unres_queue
, list
) {
1038 if (c
->mfc_mcastgrp
== iph
->daddr
&&
1039 c
->mfc_origin
== iph
->saddr
) {
1046 /* Create a new entry if allowable */
1048 if (atomic_read(&mrt
->cache_resolve_queue_len
) >= 10 ||
1049 (c
= ipmr_cache_alloc_unres()) == NULL
) {
1050 spin_unlock_bh(&mfc_unres_lock
);
1056 /* Fill in the new cache entry */
1059 c
->mfc_origin
= iph
->saddr
;
1060 c
->mfc_mcastgrp
= iph
->daddr
;
1062 /* Reflect first query at mrouted. */
1064 err
= ipmr_cache_report(mrt
, skb
, vifi
, IGMPMSG_NOCACHE
);
1066 /* If the report failed throw the cache entry
1069 spin_unlock_bh(&mfc_unres_lock
);
1076 atomic_inc(&mrt
->cache_resolve_queue_len
);
1077 list_add(&c
->list
, &mrt
->mfc_unres_queue
);
1078 mroute_netlink_event(mrt
, c
, RTM_NEWROUTE
);
1080 if (atomic_read(&mrt
->cache_resolve_queue_len
) == 1)
1081 mod_timer(&mrt
->ipmr_expire_timer
, c
->mfc_un
.unres
.expires
);
1084 /* See if we can append the packet */
1086 if (c
->mfc_un
.unres
.unresolved
.qlen
> 3) {
1090 skb_queue_tail(&c
->mfc_un
.unres
.unresolved
, skb
);
1094 spin_unlock_bh(&mfc_unres_lock
);
1099 * MFC cache manipulation by user space mroute daemon
1102 static int ipmr_mfc_delete(struct mr_table
*mrt
, struct mfcctl
*mfc
, int parent
)
1105 struct mfc_cache
*c
, *next
;
1107 line
= MFC_HASH(mfc
->mfcc_mcastgrp
.s_addr
, mfc
->mfcc_origin
.s_addr
);
1109 list_for_each_entry_safe(c
, next
, &mrt
->mfc_cache_array
[line
], list
) {
1110 if (c
->mfc_origin
== mfc
->mfcc_origin
.s_addr
&&
1111 c
->mfc_mcastgrp
== mfc
->mfcc_mcastgrp
.s_addr
&&
1112 (parent
== -1 || parent
== c
->mfc_parent
)) {
1113 list_del_rcu(&c
->list
);
1114 mroute_netlink_event(mrt
, c
, RTM_DELROUTE
);
1122 static int ipmr_mfc_add(struct net
*net
, struct mr_table
*mrt
,
1123 struct mfcctl
*mfc
, int mrtsock
, int parent
)
1127 struct mfc_cache
*uc
, *c
;
1129 if (mfc
->mfcc_parent
>= MAXVIFS
)
1132 line
= MFC_HASH(mfc
->mfcc_mcastgrp
.s_addr
, mfc
->mfcc_origin
.s_addr
);
1134 list_for_each_entry(c
, &mrt
->mfc_cache_array
[line
], list
) {
1135 if (c
->mfc_origin
== mfc
->mfcc_origin
.s_addr
&&
1136 c
->mfc_mcastgrp
== mfc
->mfcc_mcastgrp
.s_addr
&&
1137 (parent
== -1 || parent
== c
->mfc_parent
)) {
1144 write_lock_bh(&mrt_lock
);
1145 c
->mfc_parent
= mfc
->mfcc_parent
;
1146 ipmr_update_thresholds(mrt
, c
, mfc
->mfcc_ttls
);
1148 c
->mfc_flags
|= MFC_STATIC
;
1149 write_unlock_bh(&mrt_lock
);
1150 mroute_netlink_event(mrt
, c
, RTM_NEWROUTE
);
1154 if (mfc
->mfcc_mcastgrp
.s_addr
!= htonl(INADDR_ANY
) &&
1155 !ipv4_is_multicast(mfc
->mfcc_mcastgrp
.s_addr
))
1158 c
= ipmr_cache_alloc();
1162 c
->mfc_origin
= mfc
->mfcc_origin
.s_addr
;
1163 c
->mfc_mcastgrp
= mfc
->mfcc_mcastgrp
.s_addr
;
1164 c
->mfc_parent
= mfc
->mfcc_parent
;
1165 ipmr_update_thresholds(mrt
, c
, mfc
->mfcc_ttls
);
1167 c
->mfc_flags
|= MFC_STATIC
;
1169 list_add_rcu(&c
->list
, &mrt
->mfc_cache_array
[line
]);
1172 * Check to see if we resolved a queued list. If so we
1173 * need to send on the frames and tidy up.
1176 spin_lock_bh(&mfc_unres_lock
);
1177 list_for_each_entry(uc
, &mrt
->mfc_unres_queue
, list
) {
1178 if (uc
->mfc_origin
== c
->mfc_origin
&&
1179 uc
->mfc_mcastgrp
== c
->mfc_mcastgrp
) {
1180 list_del(&uc
->list
);
1181 atomic_dec(&mrt
->cache_resolve_queue_len
);
1186 if (list_empty(&mrt
->mfc_unres_queue
))
1187 del_timer(&mrt
->ipmr_expire_timer
);
1188 spin_unlock_bh(&mfc_unres_lock
);
1191 ipmr_cache_resolve(net
, mrt
, uc
, c
);
1192 ipmr_cache_free(uc
);
1194 mroute_netlink_event(mrt
, c
, RTM_NEWROUTE
);
1199 * Close the multicast socket, and clear the vif tables etc
1202 static void mroute_clean_tables(struct mr_table
*mrt
)
1206 struct mfc_cache
*c
, *next
;
1208 /* Shut down all active vif entries */
1210 for (i
= 0; i
< mrt
->maxvif
; i
++) {
1211 if (!(mrt
->vif_table
[i
].flags
& VIFF_STATIC
))
1212 vif_delete(mrt
, i
, 0, &list
);
1214 unregister_netdevice_many(&list
);
1216 /* Wipe the cache */
1218 for (i
= 0; i
< MFC_LINES
; i
++) {
1219 list_for_each_entry_safe(c
, next
, &mrt
->mfc_cache_array
[i
], list
) {
1220 if (c
->mfc_flags
& MFC_STATIC
)
1222 list_del_rcu(&c
->list
);
1223 mroute_netlink_event(mrt
, c
, RTM_DELROUTE
);
1228 if (atomic_read(&mrt
->cache_resolve_queue_len
) != 0) {
1229 spin_lock_bh(&mfc_unres_lock
);
1230 list_for_each_entry_safe(c
, next
, &mrt
->mfc_unres_queue
, list
) {
1232 mroute_netlink_event(mrt
, c
, RTM_DELROUTE
);
1233 ipmr_destroy_unres(mrt
, c
);
1235 spin_unlock_bh(&mfc_unres_lock
);
1239 /* called from ip_ra_control(), before an RCU grace period,
1240 * we dont need to call synchronize_rcu() here
1242 static void mrtsock_destruct(struct sock
*sk
)
1244 struct net
*net
= sock_net(sk
);
1245 struct mr_table
*mrt
;
1248 ipmr_for_each_table(mrt
, net
) {
1249 if (sk
== rtnl_dereference(mrt
->mroute_sk
)) {
1250 IPV4_DEVCONF_ALL(net
, MC_FORWARDING
)--;
1251 inet_netconf_notify_devconf(net
, NETCONFA_MC_FORWARDING
,
1252 NETCONFA_IFINDEX_ALL
,
1253 net
->ipv4
.devconf_all
);
1254 RCU_INIT_POINTER(mrt
->mroute_sk
, NULL
);
1255 mroute_clean_tables(mrt
);
1262 * Socket options and virtual interface manipulation. The whole
1263 * virtual interface system is a complete heap, but unfortunately
1264 * that's how BSD mrouted happens to think. Maybe one day with a proper
1265 * MOSPF/PIM router set up we can clean this up.
1268 int ip_mroute_setsockopt(struct sock
*sk
, int optname
, char __user
*optval
, unsigned int optlen
)
1270 int ret
, parent
= 0;
1273 struct net
*net
= sock_net(sk
);
1274 struct mr_table
*mrt
;
1276 if (sk
->sk_type
!= SOCK_RAW
||
1277 inet_sk(sk
)->inet_num
!= IPPROTO_IGMP
)
1280 mrt
= ipmr_get_table(net
, raw_sk(sk
)->ipmr_table
? : RT_TABLE_DEFAULT
);
1284 if (optname
!= MRT_INIT
) {
1285 if (sk
!= rcu_access_pointer(mrt
->mroute_sk
) &&
1286 !ns_capable(net
->user_ns
, CAP_NET_ADMIN
))
1292 if (optlen
!= sizeof(int))
1296 if (rtnl_dereference(mrt
->mroute_sk
)) {
1301 ret
= ip_ra_control(sk
, 1, mrtsock_destruct
);
1303 rcu_assign_pointer(mrt
->mroute_sk
, sk
);
1304 IPV4_DEVCONF_ALL(net
, MC_FORWARDING
)++;
1305 inet_netconf_notify_devconf(net
, NETCONFA_MC_FORWARDING
,
1306 NETCONFA_IFINDEX_ALL
,
1307 net
->ipv4
.devconf_all
);
1312 if (sk
!= rcu_access_pointer(mrt
->mroute_sk
))
1314 return ip_ra_control(sk
, 0, NULL
);
1317 if (optlen
!= sizeof(vif
))
1319 if (copy_from_user(&vif
, optval
, sizeof(vif
)))
1321 if (vif
.vifc_vifi
>= MAXVIFS
)
1324 if (optname
== MRT_ADD_VIF
) {
1325 ret
= vif_add(net
, mrt
, &vif
,
1326 sk
== rtnl_dereference(mrt
->mroute_sk
));
1328 ret
= vif_delete(mrt
, vif
.vifc_vifi
, 0, NULL
);
1334 * Manipulate the forwarding caches. These live
1335 * in a sort of kernel/user symbiosis.
1340 case MRT_ADD_MFC_PROXY
:
1341 case MRT_DEL_MFC_PROXY
:
1342 if (optlen
!= sizeof(mfc
))
1344 if (copy_from_user(&mfc
, optval
, sizeof(mfc
)))
1347 parent
= mfc
.mfcc_parent
;
1349 if (optname
== MRT_DEL_MFC
|| optname
== MRT_DEL_MFC_PROXY
)
1350 ret
= ipmr_mfc_delete(mrt
, &mfc
, parent
);
1352 ret
= ipmr_mfc_add(net
, mrt
, &mfc
,
1353 sk
== rtnl_dereference(mrt
->mroute_sk
),
1358 * Control PIM assert.
1363 if (optlen
!= sizeof(v
))
1365 if (get_user(v
, (int __user
*)optval
))
1367 mrt
->mroute_do_assert
= v
;
1370 #ifdef CONFIG_IP_PIMSM
1375 if (optlen
!= sizeof(v
))
1377 if (get_user(v
, (int __user
*)optval
))
1383 if (v
!= mrt
->mroute_do_pim
) {
1384 mrt
->mroute_do_pim
= v
;
1385 mrt
->mroute_do_assert
= v
;
1391 #ifdef CONFIG_IP_MROUTE_MULTIPLE_TABLES
1396 if (optlen
!= sizeof(u32
))
1398 if (get_user(v
, (u32 __user
*)optval
))
1401 /* "pimreg%u" should not exceed 16 bytes (IFNAMSIZ) */
1402 if (v
!= RT_TABLE_DEFAULT
&& v
>= 1000000000)
1407 if (sk
== rtnl_dereference(mrt
->mroute_sk
)) {
1410 if (!ipmr_new_table(net
, v
))
1413 raw_sk(sk
)->ipmr_table
= v
;
1420 * Spurious command, or MRT_VERSION which you cannot
1424 return -ENOPROTOOPT
;
1429 * Getsock opt support for the multicast routing system.
1432 int ip_mroute_getsockopt(struct sock
*sk
, int optname
, char __user
*optval
, int __user
*optlen
)
1436 struct net
*net
= sock_net(sk
);
1437 struct mr_table
*mrt
;
1439 if (sk
->sk_type
!= SOCK_RAW
||
1440 inet_sk(sk
)->inet_num
!= IPPROTO_IGMP
)
1443 mrt
= ipmr_get_table(net
, raw_sk(sk
)->ipmr_table
? : RT_TABLE_DEFAULT
);
1447 if (optname
!= MRT_VERSION
&&
1448 #ifdef CONFIG_IP_PIMSM
1449 optname
!= MRT_PIM
&&
1451 optname
!= MRT_ASSERT
)
1452 return -ENOPROTOOPT
;
1454 if (get_user(olr
, optlen
))
1457 olr
= min_t(unsigned int, olr
, sizeof(int));
1461 if (put_user(olr
, optlen
))
1463 if (optname
== MRT_VERSION
)
1465 #ifdef CONFIG_IP_PIMSM
1466 else if (optname
== MRT_PIM
)
1467 val
= mrt
->mroute_do_pim
;
1470 val
= mrt
->mroute_do_assert
;
1471 if (copy_to_user(optval
, &val
, olr
))
1477 * The IP multicast ioctl support routines.
1480 int ipmr_ioctl(struct sock
*sk
, int cmd
, void __user
*arg
)
1482 struct sioc_sg_req sr
;
1483 struct sioc_vif_req vr
;
1484 struct vif_device
*vif
;
1485 struct mfc_cache
*c
;
1486 struct net
*net
= sock_net(sk
);
1487 struct mr_table
*mrt
;
1489 mrt
= ipmr_get_table(net
, raw_sk(sk
)->ipmr_table
? : RT_TABLE_DEFAULT
);
1495 if (copy_from_user(&vr
, arg
, sizeof(vr
)))
1497 if (vr
.vifi
>= mrt
->maxvif
)
1499 read_lock(&mrt_lock
);
1500 vif
= &mrt
->vif_table
[vr
.vifi
];
1501 if (VIF_EXISTS(mrt
, vr
.vifi
)) {
1502 vr
.icount
= vif
->pkt_in
;
1503 vr
.ocount
= vif
->pkt_out
;
1504 vr
.ibytes
= vif
->bytes_in
;
1505 vr
.obytes
= vif
->bytes_out
;
1506 read_unlock(&mrt_lock
);
1508 if (copy_to_user(arg
, &vr
, sizeof(vr
)))
1512 read_unlock(&mrt_lock
);
1513 return -EADDRNOTAVAIL
;
1515 if (copy_from_user(&sr
, arg
, sizeof(sr
)))
1519 c
= ipmr_cache_find(mrt
, sr
.src
.s_addr
, sr
.grp
.s_addr
);
1521 sr
.pktcnt
= c
->mfc_un
.res
.pkt
;
1522 sr
.bytecnt
= c
->mfc_un
.res
.bytes
;
1523 sr
.wrong_if
= c
->mfc_un
.res
.wrong_if
;
1526 if (copy_to_user(arg
, &sr
, sizeof(sr
)))
1531 return -EADDRNOTAVAIL
;
1533 return -ENOIOCTLCMD
;
1537 #ifdef CONFIG_COMPAT
1538 struct compat_sioc_sg_req
{
1541 compat_ulong_t pktcnt
;
1542 compat_ulong_t bytecnt
;
1543 compat_ulong_t wrong_if
;
1546 struct compat_sioc_vif_req
{
1547 vifi_t vifi
; /* Which iface */
1548 compat_ulong_t icount
;
1549 compat_ulong_t ocount
;
1550 compat_ulong_t ibytes
;
1551 compat_ulong_t obytes
;
1554 int ipmr_compat_ioctl(struct sock
*sk
, unsigned int cmd
, void __user
*arg
)
1556 struct compat_sioc_sg_req sr
;
1557 struct compat_sioc_vif_req vr
;
1558 struct vif_device
*vif
;
1559 struct mfc_cache
*c
;
1560 struct net
*net
= sock_net(sk
);
1561 struct mr_table
*mrt
;
1563 mrt
= ipmr_get_table(net
, raw_sk(sk
)->ipmr_table
? : RT_TABLE_DEFAULT
);
1569 if (copy_from_user(&vr
, arg
, sizeof(vr
)))
1571 if (vr
.vifi
>= mrt
->maxvif
)
1573 read_lock(&mrt_lock
);
1574 vif
= &mrt
->vif_table
[vr
.vifi
];
1575 if (VIF_EXISTS(mrt
, vr
.vifi
)) {
1576 vr
.icount
= vif
->pkt_in
;
1577 vr
.ocount
= vif
->pkt_out
;
1578 vr
.ibytes
= vif
->bytes_in
;
1579 vr
.obytes
= vif
->bytes_out
;
1580 read_unlock(&mrt_lock
);
1582 if (copy_to_user(arg
, &vr
, sizeof(vr
)))
1586 read_unlock(&mrt_lock
);
1587 return -EADDRNOTAVAIL
;
1589 if (copy_from_user(&sr
, arg
, sizeof(sr
)))
1593 c
= ipmr_cache_find(mrt
, sr
.src
.s_addr
, sr
.grp
.s_addr
);
1595 sr
.pktcnt
= c
->mfc_un
.res
.pkt
;
1596 sr
.bytecnt
= c
->mfc_un
.res
.bytes
;
1597 sr
.wrong_if
= c
->mfc_un
.res
.wrong_if
;
1600 if (copy_to_user(arg
, &sr
, sizeof(sr
)))
1605 return -EADDRNOTAVAIL
;
1607 return -ENOIOCTLCMD
;
1613 static int ipmr_device_event(struct notifier_block
*this, unsigned long event
, void *ptr
)
1615 struct net_device
*dev
= netdev_notifier_info_to_dev(ptr
);
1616 struct net
*net
= dev_net(dev
);
1617 struct mr_table
*mrt
;
1618 struct vif_device
*v
;
1621 if (event
!= NETDEV_UNREGISTER
)
1624 ipmr_for_each_table(mrt
, net
) {
1625 v
= &mrt
->vif_table
[0];
1626 for (ct
= 0; ct
< mrt
->maxvif
; ct
++, v
++) {
1628 vif_delete(mrt
, ct
, 1, NULL
);
1635 static struct notifier_block ip_mr_notifier
= {
1636 .notifier_call
= ipmr_device_event
,
1640 * Encapsulate a packet by attaching a valid IPIP header to it.
1641 * This avoids tunnel drivers and other mess and gives us the speed so
1642 * important for multicast video.
1645 static void ip_encap(struct sk_buff
*skb
, __be32 saddr
, __be32 daddr
)
1648 const struct iphdr
*old_iph
= ip_hdr(skb
);
1650 skb_push(skb
, sizeof(struct iphdr
));
1651 skb
->transport_header
= skb
->network_header
;
1652 skb_reset_network_header(skb
);
1656 iph
->tos
= old_iph
->tos
;
1657 iph
->ttl
= old_iph
->ttl
;
1661 iph
->protocol
= IPPROTO_IPIP
;
1663 iph
->tot_len
= htons(skb
->len
);
1664 ip_select_ident(skb
, NULL
);
1667 memset(&(IPCB(skb
)->opt
), 0, sizeof(IPCB(skb
)->opt
));
1671 static inline int ipmr_forward_finish(struct sk_buff
*skb
)
1673 struct ip_options
*opt
= &(IPCB(skb
)->opt
);
1675 IP_INC_STATS_BH(dev_net(skb_dst(skb
)->dev
), IPSTATS_MIB_OUTFORWDATAGRAMS
);
1676 IP_ADD_STATS_BH(dev_net(skb_dst(skb
)->dev
), IPSTATS_MIB_OUTOCTETS
, skb
->len
);
1678 if (unlikely(opt
->optlen
))
1679 ip_forward_options(skb
);
1681 return dst_output(skb
);
1685 * Processing handlers for ipmr_forward
1688 static void ipmr_queue_xmit(struct net
*net
, struct mr_table
*mrt
,
1689 struct sk_buff
*skb
, struct mfc_cache
*c
, int vifi
)
1691 const struct iphdr
*iph
= ip_hdr(skb
);
1692 struct vif_device
*vif
= &mrt
->vif_table
[vifi
];
1693 struct net_device
*dev
;
1698 if (vif
->dev
== NULL
)
1701 #ifdef CONFIG_IP_PIMSM
1702 if (vif
->flags
& VIFF_REGISTER
) {
1704 vif
->bytes_out
+= skb
->len
;
1705 vif
->dev
->stats
.tx_bytes
+= skb
->len
;
1706 vif
->dev
->stats
.tx_packets
++;
1707 ipmr_cache_report(mrt
, skb
, vifi
, IGMPMSG_WHOLEPKT
);
1712 if (vif
->flags
& VIFF_TUNNEL
) {
1713 rt
= ip_route_output_ports(net
, &fl4
, NULL
,
1714 vif
->remote
, vif
->local
,
1717 RT_TOS(iph
->tos
), vif
->link
);
1720 encap
= sizeof(struct iphdr
);
1722 rt
= ip_route_output_ports(net
, &fl4
, NULL
, iph
->daddr
, 0,
1725 RT_TOS(iph
->tos
), vif
->link
);
1732 if (skb
->len
+encap
> dst_mtu(&rt
->dst
) && (ntohs(iph
->frag_off
) & IP_DF
)) {
1733 /* Do not fragment multicasts. Alas, IPv4 does not
1734 * allow to send ICMP, so that packets will disappear
1738 IP_INC_STATS_BH(dev_net(dev
), IPSTATS_MIB_FRAGFAILS
);
1743 encap
+= LL_RESERVED_SPACE(dev
) + rt
->dst
.header_len
;
1745 if (skb_cow(skb
, encap
)) {
1751 vif
->bytes_out
+= skb
->len
;
1754 skb_dst_set(skb
, &rt
->dst
);
1755 ip_decrease_ttl(ip_hdr(skb
));
1757 /* FIXME: forward and output firewalls used to be called here.
1758 * What do we do with netfilter? -- RR
1760 if (vif
->flags
& VIFF_TUNNEL
) {
1761 ip_encap(skb
, vif
->local
, vif
->remote
);
1762 /* FIXME: extra output firewall step used to be here. --RR */
1763 vif
->dev
->stats
.tx_packets
++;
1764 vif
->dev
->stats
.tx_bytes
+= skb
->len
;
1767 IPCB(skb
)->flags
|= IPSKB_FORWARDED
;
1770 * RFC1584 teaches, that DVMRP/PIM router must deliver packets locally
1771 * not only before forwarding, but after forwarding on all output
1772 * interfaces. It is clear, if mrouter runs a multicasting
1773 * program, it should receive packets not depending to what interface
1774 * program is joined.
1775 * If we will not make it, the program will have to join on all
1776 * interfaces. On the other hand, multihoming host (or router, but
1777 * not mrouter) cannot join to more than one interface - it will
1778 * result in receiving multiple packets.
1780 NF_HOOK(NFPROTO_IPV4
, NF_INET_FORWARD
, skb
, skb
->dev
, dev
,
1781 ipmr_forward_finish
);
1788 static int ipmr_find_vif(struct mr_table
*mrt
, struct net_device
*dev
)
1792 for (ct
= mrt
->maxvif
-1; ct
>= 0; ct
--) {
1793 if (mrt
->vif_table
[ct
].dev
== dev
)
1799 /* "local" means that we should preserve one skb (for local delivery) */
1801 static void ip_mr_forward(struct net
*net
, struct mr_table
*mrt
,
1802 struct sk_buff
*skb
, struct mfc_cache
*cache
,
1807 int true_vifi
= ipmr_find_vif(mrt
, skb
->dev
);
1809 vif
= cache
->mfc_parent
;
1810 cache
->mfc_un
.res
.pkt
++;
1811 cache
->mfc_un
.res
.bytes
+= skb
->len
;
1813 if (cache
->mfc_origin
== htonl(INADDR_ANY
) && true_vifi
>= 0) {
1814 struct mfc_cache
*cache_proxy
;
1816 /* For an (*,G) entry, we only check that the incomming
1817 * interface is part of the static tree.
1819 cache_proxy
= ipmr_cache_find_any_parent(mrt
, vif
);
1821 cache_proxy
->mfc_un
.res
.ttls
[true_vifi
] < 255)
1826 * Wrong interface: drop packet and (maybe) send PIM assert.
1828 if (mrt
->vif_table
[vif
].dev
!= skb
->dev
) {
1829 if (rt_is_output_route(skb_rtable(skb
))) {
1830 /* It is our own packet, looped back.
1831 * Very complicated situation...
1833 * The best workaround until routing daemons will be
1834 * fixed is not to redistribute packet, if it was
1835 * send through wrong interface. It means, that
1836 * multicast applications WILL NOT work for
1837 * (S,G), which have default multicast route pointing
1838 * to wrong oif. In any case, it is not a good
1839 * idea to use multicasting applications on router.
1844 cache
->mfc_un
.res
.wrong_if
++;
1846 if (true_vifi
>= 0 && mrt
->mroute_do_assert
&&
1847 /* pimsm uses asserts, when switching from RPT to SPT,
1848 * so that we cannot check that packet arrived on an oif.
1849 * It is bad, but otherwise we would need to move pretty
1850 * large chunk of pimd to kernel. Ough... --ANK
1852 (mrt
->mroute_do_pim
||
1853 cache
->mfc_un
.res
.ttls
[true_vifi
] < 255) &&
1855 cache
->mfc_un
.res
.last_assert
+ MFC_ASSERT_THRESH
)) {
1856 cache
->mfc_un
.res
.last_assert
= jiffies
;
1857 ipmr_cache_report(mrt
, skb
, true_vifi
, IGMPMSG_WRONGVIF
);
1863 mrt
->vif_table
[vif
].pkt_in
++;
1864 mrt
->vif_table
[vif
].bytes_in
+= skb
->len
;
1869 if (cache
->mfc_origin
== htonl(INADDR_ANY
) &&
1870 cache
->mfc_mcastgrp
== htonl(INADDR_ANY
)) {
1871 if (true_vifi
>= 0 &&
1872 true_vifi
!= cache
->mfc_parent
&&
1874 cache
->mfc_un
.res
.ttls
[cache
->mfc_parent
]) {
1875 /* It's an (*,*) entry and the packet is not coming from
1876 * the upstream: forward the packet to the upstream
1879 psend
= cache
->mfc_parent
;
1884 for (ct
= cache
->mfc_un
.res
.maxvif
- 1;
1885 ct
>= cache
->mfc_un
.res
.minvif
; ct
--) {
1886 /* For (*,G) entry, don't forward to the incoming interface */
1887 if ((cache
->mfc_origin
!= htonl(INADDR_ANY
) ||
1889 ip_hdr(skb
)->ttl
> cache
->mfc_un
.res
.ttls
[ct
]) {
1891 struct sk_buff
*skb2
= skb_clone(skb
, GFP_ATOMIC
);
1894 ipmr_queue_xmit(net
, mrt
, skb2
, cache
,
1903 struct sk_buff
*skb2
= skb_clone(skb
, GFP_ATOMIC
);
1906 ipmr_queue_xmit(net
, mrt
, skb2
, cache
, psend
);
1908 ipmr_queue_xmit(net
, mrt
, skb
, cache
, psend
);
1918 static struct mr_table
*ipmr_rt_fib_lookup(struct net
*net
, struct sk_buff
*skb
)
1920 struct rtable
*rt
= skb_rtable(skb
);
1921 struct iphdr
*iph
= ip_hdr(skb
);
1922 struct flowi4 fl4
= {
1923 .daddr
= iph
->daddr
,
1924 .saddr
= iph
->saddr
,
1925 .flowi4_tos
= RT_TOS(iph
->tos
),
1926 .flowi4_oif
= (rt_is_output_route(rt
) ?
1927 skb
->dev
->ifindex
: 0),
1928 .flowi4_iif
= (rt_is_output_route(rt
) ?
1931 .flowi4_mark
= skb
->mark
,
1933 struct mr_table
*mrt
;
1936 err
= ipmr_fib_lookup(net
, &fl4
, &mrt
);
1938 return ERR_PTR(err
);
1943 * Multicast packets for forwarding arrive here
1944 * Called with rcu_read_lock();
1947 int ip_mr_input(struct sk_buff
*skb
)
1949 struct mfc_cache
*cache
;
1950 struct net
*net
= dev_net(skb
->dev
);
1951 int local
= skb_rtable(skb
)->rt_flags
& RTCF_LOCAL
;
1952 struct mr_table
*mrt
;
1954 /* Packet is looped back after forward, it should not be
1955 * forwarded second time, but still can be delivered locally.
1957 if (IPCB(skb
)->flags
& IPSKB_FORWARDED
)
1960 mrt
= ipmr_rt_fib_lookup(net
, skb
);
1963 return PTR_ERR(mrt
);
1966 if (IPCB(skb
)->opt
.router_alert
) {
1967 if (ip_call_ra_chain(skb
))
1969 } else if (ip_hdr(skb
)->protocol
== IPPROTO_IGMP
) {
1970 /* IGMPv1 (and broken IGMPv2 implementations sort of
1971 * Cisco IOS <= 11.2(8)) do not put router alert
1972 * option to IGMP packets destined to routable
1973 * groups. It is very bad, because it means
1974 * that we can forward NO IGMP messages.
1976 struct sock
*mroute_sk
;
1978 mroute_sk
= rcu_dereference(mrt
->mroute_sk
);
1981 raw_rcv(mroute_sk
, skb
);
1987 /* already under rcu_read_lock() */
1988 cache
= ipmr_cache_find(mrt
, ip_hdr(skb
)->saddr
, ip_hdr(skb
)->daddr
);
1989 if (cache
== NULL
) {
1990 int vif
= ipmr_find_vif(mrt
, skb
->dev
);
1993 cache
= ipmr_cache_find_any(mrt
, ip_hdr(skb
)->daddr
,
1998 * No usable cache entry
2000 if (cache
== NULL
) {
2004 struct sk_buff
*skb2
= skb_clone(skb
, GFP_ATOMIC
);
2005 ip_local_deliver(skb
);
2011 read_lock(&mrt_lock
);
2012 vif
= ipmr_find_vif(mrt
, skb
->dev
);
2014 int err2
= ipmr_cache_unresolved(mrt
, vif
, skb
);
2015 read_unlock(&mrt_lock
);
2019 read_unlock(&mrt_lock
);
2024 read_lock(&mrt_lock
);
2025 ip_mr_forward(net
, mrt
, skb
, cache
, local
);
2026 read_unlock(&mrt_lock
);
2029 return ip_local_deliver(skb
);
2035 return ip_local_deliver(skb
);
2040 #ifdef CONFIG_IP_PIMSM
2041 /* called with rcu_read_lock() */
2042 static int __pim_rcv(struct mr_table
*mrt
, struct sk_buff
*skb
,
2043 unsigned int pimlen
)
2045 struct net_device
*reg_dev
= NULL
;
2046 struct iphdr
*encap
;
2048 encap
= (struct iphdr
*)(skb_transport_header(skb
) + pimlen
);
2051 * a. packet is really sent to a multicast group
2052 * b. packet is not a NULL-REGISTER
2053 * c. packet is not truncated
2055 if (!ipv4_is_multicast(encap
->daddr
) ||
2056 encap
->tot_len
== 0 ||
2057 ntohs(encap
->tot_len
) + pimlen
> skb
->len
)
2060 read_lock(&mrt_lock
);
2061 if (mrt
->mroute_reg_vif_num
>= 0)
2062 reg_dev
= mrt
->vif_table
[mrt
->mroute_reg_vif_num
].dev
;
2063 read_unlock(&mrt_lock
);
2065 if (reg_dev
== NULL
)
2068 skb
->mac_header
= skb
->network_header
;
2069 skb_pull(skb
, (u8
*)encap
- skb
->data
);
2070 skb_reset_network_header(skb
);
2071 skb
->protocol
= htons(ETH_P_IP
);
2072 skb
->ip_summed
= CHECKSUM_NONE
;
2074 skb_tunnel_rx(skb
, reg_dev
, dev_net(reg_dev
));
2078 return NET_RX_SUCCESS
;
2082 #ifdef CONFIG_IP_PIMSM_V1
2084 * Handle IGMP messages of PIMv1
2087 int pim_rcv_v1(struct sk_buff
*skb
)
2089 struct igmphdr
*pim
;
2090 struct net
*net
= dev_net(skb
->dev
);
2091 struct mr_table
*mrt
;
2093 if (!pskb_may_pull(skb
, sizeof(*pim
) + sizeof(struct iphdr
)))
2096 pim
= igmp_hdr(skb
);
2098 mrt
= ipmr_rt_fib_lookup(net
, skb
);
2101 if (!mrt
->mroute_do_pim
||
2102 pim
->group
!= PIM_V1_VERSION
|| pim
->code
!= PIM_V1_REGISTER
)
2105 if (__pim_rcv(mrt
, skb
, sizeof(*pim
))) {
2113 #ifdef CONFIG_IP_PIMSM_V2
2114 static int pim_rcv(struct sk_buff
*skb
)
2116 struct pimreghdr
*pim
;
2117 struct net
*net
= dev_net(skb
->dev
);
2118 struct mr_table
*mrt
;
2120 if (!pskb_may_pull(skb
, sizeof(*pim
) + sizeof(struct iphdr
)))
2123 pim
= (struct pimreghdr
*)skb_transport_header(skb
);
2124 if (pim
->type
!= ((PIM_VERSION
<< 4) | (PIM_REGISTER
)) ||
2125 (pim
->flags
& PIM_NULL_REGISTER
) ||
2126 (ip_compute_csum((void *)pim
, sizeof(*pim
)) != 0 &&
2127 csum_fold(skb_checksum(skb
, 0, skb
->len
, 0))))
2130 mrt
= ipmr_rt_fib_lookup(net
, skb
);
2133 if (__pim_rcv(mrt
, skb
, sizeof(*pim
))) {
2141 static int __ipmr_fill_mroute(struct mr_table
*mrt
, struct sk_buff
*skb
,
2142 struct mfc_cache
*c
, struct rtmsg
*rtm
)
2145 struct rtnexthop
*nhp
;
2146 struct nlattr
*mp_attr
;
2147 struct rta_mfc_stats mfcs
;
2149 /* If cache is unresolved, don't try to parse IIF and OIF */
2150 if (c
->mfc_parent
>= MAXVIFS
)
2153 if (VIF_EXISTS(mrt
, c
->mfc_parent
) &&
2154 nla_put_u32(skb
, RTA_IIF
, mrt
->vif_table
[c
->mfc_parent
].dev
->ifindex
) < 0)
2157 if (!(mp_attr
= nla_nest_start(skb
, RTA_MULTIPATH
)))
2160 for (ct
= c
->mfc_un
.res
.minvif
; ct
< c
->mfc_un
.res
.maxvif
; ct
++) {
2161 if (VIF_EXISTS(mrt
, ct
) && c
->mfc_un
.res
.ttls
[ct
] < 255) {
2162 if (!(nhp
= nla_reserve_nohdr(skb
, sizeof(*nhp
)))) {
2163 nla_nest_cancel(skb
, mp_attr
);
2167 nhp
->rtnh_flags
= 0;
2168 nhp
->rtnh_hops
= c
->mfc_un
.res
.ttls
[ct
];
2169 nhp
->rtnh_ifindex
= mrt
->vif_table
[ct
].dev
->ifindex
;
2170 nhp
->rtnh_len
= sizeof(*nhp
);
2174 nla_nest_end(skb
, mp_attr
);
2176 mfcs
.mfcs_packets
= c
->mfc_un
.res
.pkt
;
2177 mfcs
.mfcs_bytes
= c
->mfc_un
.res
.bytes
;
2178 mfcs
.mfcs_wrong_if
= c
->mfc_un
.res
.wrong_if
;
2179 if (nla_put(skb
, RTA_MFC_STATS
, sizeof(mfcs
), &mfcs
) < 0)
2182 rtm
->rtm_type
= RTN_MULTICAST
;
2186 int ipmr_get_route(struct net
*net
, struct sk_buff
*skb
,
2187 __be32 saddr
, __be32 daddr
,
2188 struct rtmsg
*rtm
, int nowait
)
2190 struct mfc_cache
*cache
;
2191 struct mr_table
*mrt
;
2194 mrt
= ipmr_get_table(net
, RT_TABLE_DEFAULT
);
2199 cache
= ipmr_cache_find(mrt
, saddr
, daddr
);
2200 if (cache
== NULL
&& skb
->dev
) {
2201 int vif
= ipmr_find_vif(mrt
, skb
->dev
);
2204 cache
= ipmr_cache_find_any(mrt
, daddr
, vif
);
2206 if (cache
== NULL
) {
2207 struct sk_buff
*skb2
;
2209 struct net_device
*dev
;
2218 read_lock(&mrt_lock
);
2220 vif
= ipmr_find_vif(mrt
, dev
);
2222 read_unlock(&mrt_lock
);
2226 skb2
= skb_clone(skb
, GFP_ATOMIC
);
2228 read_unlock(&mrt_lock
);
2233 skb_push(skb2
, sizeof(struct iphdr
));
2234 skb_reset_network_header(skb2
);
2236 iph
->ihl
= sizeof(struct iphdr
) >> 2;
2240 err
= ipmr_cache_unresolved(mrt
, vif
, skb2
);
2241 read_unlock(&mrt_lock
);
2246 read_lock(&mrt_lock
);
2247 if (!nowait
&& (rtm
->rtm_flags
& RTM_F_NOTIFY
))
2248 cache
->mfc_flags
|= MFC_NOTIFY
;
2249 err
= __ipmr_fill_mroute(mrt
, skb
, cache
, rtm
);
2250 read_unlock(&mrt_lock
);
2255 static int ipmr_fill_mroute(struct mr_table
*mrt
, struct sk_buff
*skb
,
2256 u32 portid
, u32 seq
, struct mfc_cache
*c
, int cmd
,
2259 struct nlmsghdr
*nlh
;
2263 nlh
= nlmsg_put(skb
, portid
, seq
, cmd
, sizeof(*rtm
), flags
);
2267 rtm
= nlmsg_data(nlh
);
2268 rtm
->rtm_family
= RTNL_FAMILY_IPMR
;
2269 rtm
->rtm_dst_len
= 32;
2270 rtm
->rtm_src_len
= 32;
2272 rtm
->rtm_table
= mrt
->id
;
2273 if (nla_put_u32(skb
, RTA_TABLE
, mrt
->id
))
2274 goto nla_put_failure
;
2275 rtm
->rtm_type
= RTN_MULTICAST
;
2276 rtm
->rtm_scope
= RT_SCOPE_UNIVERSE
;
2277 if (c
->mfc_flags
& MFC_STATIC
)
2278 rtm
->rtm_protocol
= RTPROT_STATIC
;
2280 rtm
->rtm_protocol
= RTPROT_MROUTED
;
2283 if (nla_put_be32(skb
, RTA_SRC
, c
->mfc_origin
) ||
2284 nla_put_be32(skb
, RTA_DST
, c
->mfc_mcastgrp
))
2285 goto nla_put_failure
;
2286 err
= __ipmr_fill_mroute(mrt
, skb
, c
, rtm
);
2287 /* do not break the dump if cache is unresolved */
2288 if (err
< 0 && err
!= -ENOENT
)
2289 goto nla_put_failure
;
2291 nlmsg_end(skb
, nlh
);
2295 nlmsg_cancel(skb
, nlh
);
2299 static size_t mroute_msgsize(bool unresolved
, int maxvif
)
2302 NLMSG_ALIGN(sizeof(struct rtmsg
))
2303 + nla_total_size(4) /* RTA_TABLE */
2304 + nla_total_size(4) /* RTA_SRC */
2305 + nla_total_size(4) /* RTA_DST */
2310 + nla_total_size(4) /* RTA_IIF */
2311 + nla_total_size(0) /* RTA_MULTIPATH */
2312 + maxvif
* NLA_ALIGN(sizeof(struct rtnexthop
))
2314 + nla_total_size(sizeof(struct rta_mfc_stats
))
2320 static void mroute_netlink_event(struct mr_table
*mrt
, struct mfc_cache
*mfc
,
2323 struct net
*net
= read_pnet(&mrt
->net
);
2324 struct sk_buff
*skb
;
2327 skb
= nlmsg_new(mroute_msgsize(mfc
->mfc_parent
>= MAXVIFS
, mrt
->maxvif
),
2332 err
= ipmr_fill_mroute(mrt
, skb
, 0, 0, mfc
, cmd
, 0);
2336 rtnl_notify(skb
, net
, 0, RTNLGRP_IPV4_MROUTE
, NULL
, GFP_ATOMIC
);
2342 rtnl_set_sk_err(net
, RTNLGRP_IPV4_MROUTE
, err
);
2345 static int ipmr_rtm_dumproute(struct sk_buff
*skb
, struct netlink_callback
*cb
)
2347 struct net
*net
= sock_net(skb
->sk
);
2348 struct mr_table
*mrt
;
2349 struct mfc_cache
*mfc
;
2350 unsigned int t
= 0, s_t
;
2351 unsigned int h
= 0, s_h
;
2352 unsigned int e
= 0, s_e
;
2359 ipmr_for_each_table(mrt
, net
) {
2364 for (h
= s_h
; h
< MFC_LINES
; h
++) {
2365 list_for_each_entry_rcu(mfc
, &mrt
->mfc_cache_array
[h
], list
) {
2368 if (ipmr_fill_mroute(mrt
, skb
,
2369 NETLINK_CB(cb
->skb
).portid
,
2379 spin_lock_bh(&mfc_unres_lock
);
2380 list_for_each_entry(mfc
, &mrt
->mfc_unres_queue
, list
) {
2383 if (ipmr_fill_mroute(mrt
, skb
,
2384 NETLINK_CB(cb
->skb
).portid
,
2388 spin_unlock_bh(&mfc_unres_lock
);
2394 spin_unlock_bh(&mfc_unres_lock
);
2410 #ifdef CONFIG_PROC_FS
2412 * The /proc interfaces to multicast routing :
2413 * /proc/net/ip_mr_cache & /proc/net/ip_mr_vif
2415 struct ipmr_vif_iter
{
2416 struct seq_net_private p
;
2417 struct mr_table
*mrt
;
2421 static struct vif_device
*ipmr_vif_seq_idx(struct net
*net
,
2422 struct ipmr_vif_iter
*iter
,
2425 struct mr_table
*mrt
= iter
->mrt
;
2427 for (iter
->ct
= 0; iter
->ct
< mrt
->maxvif
; ++iter
->ct
) {
2428 if (!VIF_EXISTS(mrt
, iter
->ct
))
2431 return &mrt
->vif_table
[iter
->ct
];
2436 static void *ipmr_vif_seq_start(struct seq_file
*seq
, loff_t
*pos
)
2437 __acquires(mrt_lock
)
2439 struct ipmr_vif_iter
*iter
= seq
->private;
2440 struct net
*net
= seq_file_net(seq
);
2441 struct mr_table
*mrt
;
2443 mrt
= ipmr_get_table(net
, RT_TABLE_DEFAULT
);
2445 return ERR_PTR(-ENOENT
);
2449 read_lock(&mrt_lock
);
2450 return *pos
? ipmr_vif_seq_idx(net
, seq
->private, *pos
- 1)
2454 static void *ipmr_vif_seq_next(struct seq_file
*seq
, void *v
, loff_t
*pos
)
2456 struct ipmr_vif_iter
*iter
= seq
->private;
2457 struct net
*net
= seq_file_net(seq
);
2458 struct mr_table
*mrt
= iter
->mrt
;
2461 if (v
== SEQ_START_TOKEN
)
2462 return ipmr_vif_seq_idx(net
, iter
, 0);
2464 while (++iter
->ct
< mrt
->maxvif
) {
2465 if (!VIF_EXISTS(mrt
, iter
->ct
))
2467 return &mrt
->vif_table
[iter
->ct
];
2472 static void ipmr_vif_seq_stop(struct seq_file
*seq
, void *v
)
2473 __releases(mrt_lock
)
2475 read_unlock(&mrt_lock
);
2478 static int ipmr_vif_seq_show(struct seq_file
*seq
, void *v
)
2480 struct ipmr_vif_iter
*iter
= seq
->private;
2481 struct mr_table
*mrt
= iter
->mrt
;
2483 if (v
== SEQ_START_TOKEN
) {
2485 "Interface BytesIn PktsIn BytesOut PktsOut Flags Local Remote\n");
2487 const struct vif_device
*vif
= v
;
2488 const char *name
= vif
->dev
? vif
->dev
->name
: "none";
2491 "%2Zd %-10s %8ld %7ld %8ld %7ld %05X %08X %08X\n",
2492 vif
- mrt
->vif_table
,
2493 name
, vif
->bytes_in
, vif
->pkt_in
,
2494 vif
->bytes_out
, vif
->pkt_out
,
2495 vif
->flags
, vif
->local
, vif
->remote
);
2500 static const struct seq_operations ipmr_vif_seq_ops
= {
2501 .start
= ipmr_vif_seq_start
,
2502 .next
= ipmr_vif_seq_next
,
2503 .stop
= ipmr_vif_seq_stop
,
2504 .show
= ipmr_vif_seq_show
,
2507 static int ipmr_vif_open(struct inode
*inode
, struct file
*file
)
2509 return seq_open_net(inode
, file
, &ipmr_vif_seq_ops
,
2510 sizeof(struct ipmr_vif_iter
));
2513 static const struct file_operations ipmr_vif_fops
= {
2514 .owner
= THIS_MODULE
,
2515 .open
= ipmr_vif_open
,
2517 .llseek
= seq_lseek
,
2518 .release
= seq_release_net
,
2521 struct ipmr_mfc_iter
{
2522 struct seq_net_private p
;
2523 struct mr_table
*mrt
;
2524 struct list_head
*cache
;
2529 static struct mfc_cache
*ipmr_mfc_seq_idx(struct net
*net
,
2530 struct ipmr_mfc_iter
*it
, loff_t pos
)
2532 struct mr_table
*mrt
= it
->mrt
;
2533 struct mfc_cache
*mfc
;
2536 for (it
->ct
= 0; it
->ct
< MFC_LINES
; it
->ct
++) {
2537 it
->cache
= &mrt
->mfc_cache_array
[it
->ct
];
2538 list_for_each_entry_rcu(mfc
, it
->cache
, list
)
2544 spin_lock_bh(&mfc_unres_lock
);
2545 it
->cache
= &mrt
->mfc_unres_queue
;
2546 list_for_each_entry(mfc
, it
->cache
, list
)
2549 spin_unlock_bh(&mfc_unres_lock
);
2556 static void *ipmr_mfc_seq_start(struct seq_file
*seq
, loff_t
*pos
)
2558 struct ipmr_mfc_iter
*it
= seq
->private;
2559 struct net
*net
= seq_file_net(seq
);
2560 struct mr_table
*mrt
;
2562 mrt
= ipmr_get_table(net
, RT_TABLE_DEFAULT
);
2564 return ERR_PTR(-ENOENT
);
2569 return *pos
? ipmr_mfc_seq_idx(net
, seq
->private, *pos
- 1)
2573 static void *ipmr_mfc_seq_next(struct seq_file
*seq
, void *v
, loff_t
*pos
)
2575 struct mfc_cache
*mfc
= v
;
2576 struct ipmr_mfc_iter
*it
= seq
->private;
2577 struct net
*net
= seq_file_net(seq
);
2578 struct mr_table
*mrt
= it
->mrt
;
2582 if (v
== SEQ_START_TOKEN
)
2583 return ipmr_mfc_seq_idx(net
, seq
->private, 0);
2585 if (mfc
->list
.next
!= it
->cache
)
2586 return list_entry(mfc
->list
.next
, struct mfc_cache
, list
);
2588 if (it
->cache
== &mrt
->mfc_unres_queue
)
2591 BUG_ON(it
->cache
!= &mrt
->mfc_cache_array
[it
->ct
]);
2593 while (++it
->ct
< MFC_LINES
) {
2594 it
->cache
= &mrt
->mfc_cache_array
[it
->ct
];
2595 if (list_empty(it
->cache
))
2597 return list_first_entry(it
->cache
, struct mfc_cache
, list
);
2600 /* exhausted cache_array, show unresolved */
2602 it
->cache
= &mrt
->mfc_unres_queue
;
2605 spin_lock_bh(&mfc_unres_lock
);
2606 if (!list_empty(it
->cache
))
2607 return list_first_entry(it
->cache
, struct mfc_cache
, list
);
2610 spin_unlock_bh(&mfc_unres_lock
);
2616 static void ipmr_mfc_seq_stop(struct seq_file
*seq
, void *v
)
2618 struct ipmr_mfc_iter
*it
= seq
->private;
2619 struct mr_table
*mrt
= it
->mrt
;
2621 if (it
->cache
== &mrt
->mfc_unres_queue
)
2622 spin_unlock_bh(&mfc_unres_lock
);
2623 else if (it
->cache
== &mrt
->mfc_cache_array
[it
->ct
])
2627 static int ipmr_mfc_seq_show(struct seq_file
*seq
, void *v
)
2631 if (v
== SEQ_START_TOKEN
) {
2633 "Group Origin Iif Pkts Bytes Wrong Oifs\n");
2635 const struct mfc_cache
*mfc
= v
;
2636 const struct ipmr_mfc_iter
*it
= seq
->private;
2637 const struct mr_table
*mrt
= it
->mrt
;
2639 seq_printf(seq
, "%08X %08X %-3hd",
2640 (__force u32
) mfc
->mfc_mcastgrp
,
2641 (__force u32
) mfc
->mfc_origin
,
2644 if (it
->cache
!= &mrt
->mfc_unres_queue
) {
2645 seq_printf(seq
, " %8lu %8lu %8lu",
2646 mfc
->mfc_un
.res
.pkt
,
2647 mfc
->mfc_un
.res
.bytes
,
2648 mfc
->mfc_un
.res
.wrong_if
);
2649 for (n
= mfc
->mfc_un
.res
.minvif
;
2650 n
< mfc
->mfc_un
.res
.maxvif
; n
++) {
2651 if (VIF_EXISTS(mrt
, n
) &&
2652 mfc
->mfc_un
.res
.ttls
[n
] < 255)
2655 n
, mfc
->mfc_un
.res
.ttls
[n
]);
2658 /* unresolved mfc_caches don't contain
2659 * pkt, bytes and wrong_if values
2661 seq_printf(seq
, " %8lu %8lu %8lu", 0ul, 0ul, 0ul);
2663 seq_putc(seq
, '\n');
2668 static const struct seq_operations ipmr_mfc_seq_ops
= {
2669 .start
= ipmr_mfc_seq_start
,
2670 .next
= ipmr_mfc_seq_next
,
2671 .stop
= ipmr_mfc_seq_stop
,
2672 .show
= ipmr_mfc_seq_show
,
2675 static int ipmr_mfc_open(struct inode
*inode
, struct file
*file
)
2677 return seq_open_net(inode
, file
, &ipmr_mfc_seq_ops
,
2678 sizeof(struct ipmr_mfc_iter
));
2681 static const struct file_operations ipmr_mfc_fops
= {
2682 .owner
= THIS_MODULE
,
2683 .open
= ipmr_mfc_open
,
2685 .llseek
= seq_lseek
,
2686 .release
= seq_release_net
,
2690 #ifdef CONFIG_IP_PIMSM_V2
2691 static const struct net_protocol pim_protocol
= {
2699 * Setup for IP multicast routing
2701 static int __net_init
ipmr_net_init(struct net
*net
)
2705 err
= ipmr_rules_init(net
);
2709 #ifdef CONFIG_PROC_FS
2711 if (!proc_create("ip_mr_vif", 0, net
->proc_net
, &ipmr_vif_fops
))
2713 if (!proc_create("ip_mr_cache", 0, net
->proc_net
, &ipmr_mfc_fops
))
2714 goto proc_cache_fail
;
2718 #ifdef CONFIG_PROC_FS
2720 remove_proc_entry("ip_mr_vif", net
->proc_net
);
2722 ipmr_rules_exit(net
);
2728 static void __net_exit
ipmr_net_exit(struct net
*net
)
2730 #ifdef CONFIG_PROC_FS
2731 remove_proc_entry("ip_mr_cache", net
->proc_net
);
2732 remove_proc_entry("ip_mr_vif", net
->proc_net
);
2734 ipmr_rules_exit(net
);
2737 static struct pernet_operations ipmr_net_ops
= {
2738 .init
= ipmr_net_init
,
2739 .exit
= ipmr_net_exit
,
2742 int __init
ip_mr_init(void)
2746 mrt_cachep
= kmem_cache_create("ip_mrt_cache",
2747 sizeof(struct mfc_cache
),
2748 0, SLAB_HWCACHE_ALIGN
| SLAB_PANIC
,
2753 err
= register_pernet_subsys(&ipmr_net_ops
);
2755 goto reg_pernet_fail
;
2757 err
= register_netdevice_notifier(&ip_mr_notifier
);
2759 goto reg_notif_fail
;
2760 #ifdef CONFIG_IP_PIMSM_V2
2761 if (inet_add_protocol(&pim_protocol
, IPPROTO_PIM
) < 0) {
2762 pr_err("%s: can't add PIM protocol\n", __func__
);
2764 goto add_proto_fail
;
2767 rtnl_register(RTNL_FAMILY_IPMR
, RTM_GETROUTE
,
2768 NULL
, ipmr_rtm_dumproute
, NULL
);
2771 #ifdef CONFIG_IP_PIMSM_V2
2773 unregister_netdevice_notifier(&ip_mr_notifier
);
2776 unregister_pernet_subsys(&ipmr_net_ops
);
2778 kmem_cache_destroy(mrt_cachep
);