2 * VXLAN: Virtual eXtensible Local Area Network
4 * Copyright (c) 2012-2013 Vyatta Inc.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
14 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
16 #include <linux/kernel.h>
17 #include <linux/types.h>
18 #include <linux/module.h>
19 #include <linux/errno.h>
20 #include <linux/slab.h>
21 #include <linux/skbuff.h>
22 #include <linux/rculist.h>
23 #include <linux/netdevice.h>
26 #include <linux/udp.h>
27 #include <linux/igmp.h>
28 #include <linux/etherdevice.h>
29 #include <linux/if_ether.h>
30 #include <linux/hash.h>
31 #include <linux/ethtool.h>
33 #include <net/ndisc.h>
35 #include <net/ip_tunnels.h>
38 #include <net/rtnetlink.h>
39 #include <net/route.h>
40 #include <net/dsfield.h>
41 #include <net/inet_ecn.h>
42 #include <net/net_namespace.h>
43 #include <net/netns/generic.h>
45 #define VXLAN_VERSION "0.1"
47 #define PORT_HASH_BITS 8
48 #define PORT_HASH_SIZE (1<<PORT_HASH_BITS)
49 #define VNI_HASH_BITS 10
50 #define VNI_HASH_SIZE (1<<VNI_HASH_BITS)
51 #define FDB_HASH_BITS 8
52 #define FDB_HASH_SIZE (1<<FDB_HASH_BITS)
53 #define FDB_AGE_DEFAULT 300 /* 5 min */
54 #define FDB_AGE_INTERVAL (10 * HZ) /* rescan interval */
56 #define VXLAN_N_VID (1u << 24)
57 #define VXLAN_VID_MASK (VXLAN_N_VID - 1)
58 /* IP header + UDP + VXLAN + Ethernet header */
59 #define VXLAN_HEADROOM (20 + 8 + 8 + 14)
61 #define VXLAN_FLAGS 0x08000000 /* struct vxlanhdr.vx_flags required value. */
63 /* VXLAN protocol header */
69 /* UDP port for VXLAN traffic.
70 * The IANA assigned port is 4789, but the Linux default is 8472
71 * for compatibility with early adopters.
73 static unsigned short vxlan_port __read_mostly
= 8472;
74 module_param_named(udp_port
, vxlan_port
, ushort
, 0444);
75 MODULE_PARM_DESC(udp_port
, "Destination UDP port");
77 static bool log_ecn_error
= true;
78 module_param(log_ecn_error
, bool, 0644);
79 MODULE_PARM_DESC(log_ecn_error
, "Log packets received with corrupted ECN");
81 static int vxlan_net_id
;
83 static const u8 all_zeros_mac
[ETH_ALEN
];
85 /* per UDP socket information */
87 struct hlist_node hlist
;
89 struct work_struct del_work
;
92 struct hlist_head vni_list
[VNI_HASH_SIZE
];
95 /* per-network namespace private data for this module */
97 struct list_head vxlan_list
;
98 struct hlist_head sock_list
[PORT_HASH_SIZE
];
107 struct list_head list
;
111 /* Forwarding table entry */
113 struct hlist_node hlist
; /* linked list of entries */
115 unsigned long updated
; /* jiffies */
117 struct list_head remotes
;
118 u16 state
; /* see ndm_state */
119 u8 flags
; /* see ndm_flags */
120 u8 eth_addr
[ETH_ALEN
];
123 /* Pseudo network device */
125 struct hlist_node hlist
; /* vni hash table */
126 struct list_head next
; /* vxlan's per namespace list */
127 struct vxlan_sock
*vn_sock
; /* listening socket */
128 struct net_device
*dev
;
129 struct vxlan_rdst default_dst
; /* default destination */
130 __be32 saddr
; /* source address */
132 __u16 port_min
; /* source port range */
134 __u8 tos
; /* TOS override */
136 u32 flags
; /* VXLAN_F_* below */
138 struct work_struct sock_work
;
139 struct work_struct igmp_join
;
140 struct work_struct igmp_leave
;
142 unsigned long age_interval
;
143 struct timer_list age_timer
;
144 spinlock_t hash_lock
;
145 unsigned int addrcnt
;
146 unsigned int addrmax
;
148 struct hlist_head fdb_head
[FDB_HASH_SIZE
];
151 #define VXLAN_F_LEARN 0x01
152 #define VXLAN_F_PROXY 0x02
153 #define VXLAN_F_RSC 0x04
154 #define VXLAN_F_L2MISS 0x08
155 #define VXLAN_F_L3MISS 0x10
157 /* salt for hash table */
158 static u32 vxlan_salt __read_mostly
;
159 static struct workqueue_struct
*vxlan_wq
;
161 static void vxlan_sock_work(struct work_struct
*work
);
163 /* Virtual Network hash table head */
164 static inline struct hlist_head
*vni_head(struct vxlan_sock
*vs
, u32 id
)
166 return &vs
->vni_list
[hash_32(id
, VNI_HASH_BITS
)];
169 /* Socket hash table head */
170 static inline struct hlist_head
*vs_head(struct net
*net
, __be16 port
)
172 struct vxlan_net
*vn
= net_generic(net
, vxlan_net_id
);
174 return &vn
->sock_list
[hash_32(ntohs(port
), PORT_HASH_BITS
)];
177 /* First remote destination for a forwarding entry.
178 * Guaranteed to be non-NULL because remotes are never deleted.
180 static inline struct vxlan_rdst
*first_remote(struct vxlan_fdb
*fdb
)
182 return list_first_or_null_rcu(&fdb
->remotes
, struct vxlan_rdst
, list
);
185 /* Find VXLAN socket based on network namespace and UDP port */
186 static struct vxlan_sock
*vxlan_find_port(struct net
*net
, __be16 port
)
188 struct vxlan_sock
*vs
;
190 hlist_for_each_entry_rcu(vs
, vs_head(net
, port
), hlist
) {
191 if (inet_sk(vs
->sock
->sk
)->inet_sport
== port
)
197 /* Look up VNI in a per net namespace table */
198 static struct vxlan_dev
*vxlan_find_vni(struct net
*net
, u32 id
, __be16 port
)
200 struct vxlan_sock
*vs
;
201 struct vxlan_dev
*vxlan
;
203 vs
= vxlan_find_port(net
, port
);
207 hlist_for_each_entry_rcu(vxlan
, vni_head(vs
, id
), hlist
) {
208 if (vxlan
->default_dst
.remote_vni
== id
)
215 /* Fill in neighbour message in skbuff. */
216 static int vxlan_fdb_info(struct sk_buff
*skb
, struct vxlan_dev
*vxlan
,
217 const struct vxlan_fdb
*fdb
,
218 u32 portid
, u32 seq
, int type
, unsigned int flags
,
219 const struct vxlan_rdst
*rdst
)
221 unsigned long now
= jiffies
;
222 struct nda_cacheinfo ci
;
223 struct nlmsghdr
*nlh
;
225 bool send_ip
, send_eth
;
227 nlh
= nlmsg_put(skb
, portid
, seq
, type
, sizeof(*ndm
), flags
);
231 ndm
= nlmsg_data(nlh
);
232 memset(ndm
, 0, sizeof(*ndm
));
234 send_eth
= send_ip
= true;
236 if (type
== RTM_GETNEIGH
) {
237 ndm
->ndm_family
= AF_INET
;
238 send_ip
= rdst
->remote_ip
!= htonl(INADDR_ANY
);
239 send_eth
= !is_zero_ether_addr(fdb
->eth_addr
);
241 ndm
->ndm_family
= AF_BRIDGE
;
242 ndm
->ndm_state
= fdb
->state
;
243 ndm
->ndm_ifindex
= vxlan
->dev
->ifindex
;
244 ndm
->ndm_flags
= fdb
->flags
;
245 ndm
->ndm_type
= NDA_DST
;
247 if (send_eth
&& nla_put(skb
, NDA_LLADDR
, ETH_ALEN
, &fdb
->eth_addr
))
248 goto nla_put_failure
;
250 if (send_ip
&& nla_put_be32(skb
, NDA_DST
, rdst
->remote_ip
))
251 goto nla_put_failure
;
253 if (rdst
->remote_port
&& rdst
->remote_port
!= vxlan
->dst_port
&&
254 nla_put_be16(skb
, NDA_PORT
, rdst
->remote_port
))
255 goto nla_put_failure
;
256 if (rdst
->remote_vni
!= vxlan
->default_dst
.remote_vni
&&
257 nla_put_u32(skb
, NDA_VNI
, rdst
->remote_vni
))
258 goto nla_put_failure
;
259 if (rdst
->remote_ifindex
&&
260 nla_put_u32(skb
, NDA_IFINDEX
, rdst
->remote_ifindex
))
261 goto nla_put_failure
;
263 ci
.ndm_used
= jiffies_to_clock_t(now
- fdb
->used
);
264 ci
.ndm_confirmed
= 0;
265 ci
.ndm_updated
= jiffies_to_clock_t(now
- fdb
->updated
);
268 if (nla_put(skb
, NDA_CACHEINFO
, sizeof(ci
), &ci
))
269 goto nla_put_failure
;
271 return nlmsg_end(skb
, nlh
);
274 nlmsg_cancel(skb
, nlh
);
278 static inline size_t vxlan_nlmsg_size(void)
280 return NLMSG_ALIGN(sizeof(struct ndmsg
))
281 + nla_total_size(ETH_ALEN
) /* NDA_LLADDR */
282 + nla_total_size(sizeof(__be32
)) /* NDA_DST */
283 + nla_total_size(sizeof(__be16
)) /* NDA_PORT */
284 + nla_total_size(sizeof(__be32
)) /* NDA_VNI */
285 + nla_total_size(sizeof(__u32
)) /* NDA_IFINDEX */
286 + nla_total_size(sizeof(struct nda_cacheinfo
));
289 static void vxlan_fdb_notify(struct vxlan_dev
*vxlan
,
290 struct vxlan_fdb
*fdb
, int type
)
292 struct net
*net
= dev_net(vxlan
->dev
);
296 skb
= nlmsg_new(vxlan_nlmsg_size(), GFP_ATOMIC
);
300 err
= vxlan_fdb_info(skb
, vxlan
, fdb
, 0, 0, type
, 0, first_remote(fdb
));
302 /* -EMSGSIZE implies BUG in vxlan_nlmsg_size() */
303 WARN_ON(err
== -EMSGSIZE
);
308 rtnl_notify(skb
, net
, 0, RTNLGRP_NEIGH
, NULL
, GFP_ATOMIC
);
312 rtnl_set_sk_err(net
, RTNLGRP_NEIGH
, err
);
315 static void vxlan_ip_miss(struct net_device
*dev
, __be32 ipa
)
317 struct vxlan_dev
*vxlan
= netdev_priv(dev
);
318 struct vxlan_fdb f
= {
321 struct vxlan_rdst remote
= {
322 .remote_ip
= ipa
, /* goes to NDA_DST */
323 .remote_vni
= VXLAN_N_VID
,
326 INIT_LIST_HEAD(&f
.remotes
);
327 list_add_rcu(&remote
.list
, &f
.remotes
);
329 vxlan_fdb_notify(vxlan
, &f
, RTM_GETNEIGH
);
332 static void vxlan_fdb_miss(struct vxlan_dev
*vxlan
, const u8 eth_addr
[ETH_ALEN
])
334 struct vxlan_fdb f
= {
338 INIT_LIST_HEAD(&f
.remotes
);
339 memcpy(f
.eth_addr
, eth_addr
, ETH_ALEN
);
341 vxlan_fdb_notify(vxlan
, &f
, RTM_GETNEIGH
);
344 /* Hash Ethernet address */
345 static u32
eth_hash(const unsigned char *addr
)
347 u64 value
= get_unaligned((u64
*)addr
);
349 /* only want 6 bytes */
355 return hash_64(value
, FDB_HASH_BITS
);
358 /* Hash chain to use given mac address */
359 static inline struct hlist_head
*vxlan_fdb_head(struct vxlan_dev
*vxlan
,
362 return &vxlan
->fdb_head
[eth_hash(mac
)];
365 /* Look up Ethernet address in forwarding table */
366 static struct vxlan_fdb
*__vxlan_find_mac(struct vxlan_dev
*vxlan
,
370 struct hlist_head
*head
= vxlan_fdb_head(vxlan
, mac
);
373 hlist_for_each_entry_rcu(f
, head
, hlist
) {
374 if (compare_ether_addr(mac
, f
->eth_addr
) == 0)
381 static struct vxlan_fdb
*vxlan_find_mac(struct vxlan_dev
*vxlan
,
386 f
= __vxlan_find_mac(vxlan
, mac
);
393 /* caller should hold vxlan->hash_lock */
394 static struct vxlan_rdst
*vxlan_fdb_find_rdst(struct vxlan_fdb
*f
,
395 __be32 ip
, __be16 port
,
396 __u32 vni
, __u32 ifindex
)
398 struct vxlan_rdst
*rd
;
400 list_for_each_entry(rd
, &f
->remotes
, list
) {
401 if (rd
->remote_ip
== ip
&&
402 rd
->remote_port
== port
&&
403 rd
->remote_vni
== vni
&&
404 rd
->remote_ifindex
== ifindex
)
411 /* Add/update destinations for multicast */
412 static int vxlan_fdb_append(struct vxlan_fdb
*f
,
413 __be32 ip
, __be16 port
, __u32 vni
, __u32 ifindex
)
415 struct vxlan_rdst
*rd
;
417 rd
= vxlan_fdb_find_rdst(f
, ip
, port
, vni
, ifindex
);
421 rd
= kmalloc(sizeof(*rd
), GFP_ATOMIC
);
425 rd
->remote_port
= port
;
426 rd
->remote_vni
= vni
;
427 rd
->remote_ifindex
= ifindex
;
429 list_add_tail_rcu(&rd
->list
, &f
->remotes
);
434 /* Add new entry to forwarding table -- assumes lock held */
435 static int vxlan_fdb_create(struct vxlan_dev
*vxlan
,
436 const u8
*mac
, __be32 ip
,
437 __u16 state
, __u16 flags
,
438 __be16 port
, __u32 vni
, __u32 ifindex
,
444 f
= __vxlan_find_mac(vxlan
, mac
);
446 if (flags
& NLM_F_EXCL
) {
447 netdev_dbg(vxlan
->dev
,
448 "lost race to create %pM\n", mac
);
451 if (f
->state
!= state
) {
453 f
->updated
= jiffies
;
456 if (f
->flags
!= ndm_flags
) {
457 f
->flags
= ndm_flags
;
458 f
->updated
= jiffies
;
461 if ((flags
& NLM_F_APPEND
) &&
462 (is_multicast_ether_addr(f
->eth_addr
) ||
463 is_zero_ether_addr(f
->eth_addr
))) {
464 int rc
= vxlan_fdb_append(f
, ip
, port
, vni
, ifindex
);
471 if (!(flags
& NLM_F_CREATE
))
474 if (vxlan
->addrmax
&& vxlan
->addrcnt
>= vxlan
->addrmax
)
477 netdev_dbg(vxlan
->dev
, "add %pM -> %pI4\n", mac
, &ip
);
478 f
= kmalloc(sizeof(*f
), GFP_ATOMIC
);
484 f
->flags
= ndm_flags
;
485 f
->updated
= f
->used
= jiffies
;
486 INIT_LIST_HEAD(&f
->remotes
);
487 memcpy(f
->eth_addr
, mac
, ETH_ALEN
);
489 vxlan_fdb_append(f
, ip
, port
, vni
, ifindex
);
492 hlist_add_head_rcu(&f
->hlist
,
493 vxlan_fdb_head(vxlan
, mac
));
497 vxlan_fdb_notify(vxlan
, f
, RTM_NEWNEIGH
);
502 static void vxlan_fdb_free_rdst(struct rcu_head
*head
)
504 struct vxlan_rdst
*rd
= container_of(head
, struct vxlan_rdst
, rcu
);
508 static void vxlan_fdb_free(struct rcu_head
*head
)
510 struct vxlan_fdb
*f
= container_of(head
, struct vxlan_fdb
, rcu
);
511 struct vxlan_rdst
*rd
, *nd
;
513 list_for_each_entry_safe(rd
, nd
, &f
->remotes
, list
)
518 static void vxlan_fdb_destroy(struct vxlan_dev
*vxlan
, struct vxlan_fdb
*f
)
520 netdev_dbg(vxlan
->dev
,
521 "delete %pM\n", f
->eth_addr
);
524 vxlan_fdb_notify(vxlan
, f
, RTM_DELNEIGH
);
526 hlist_del_rcu(&f
->hlist
);
527 call_rcu(&f
->rcu
, vxlan_fdb_free
);
530 static int vxlan_fdb_parse(struct nlattr
*tb
[], struct vxlan_dev
*vxlan
,
531 __be32
*ip
, __be16
*port
, u32
*vni
, u32
*ifindex
)
533 struct net
*net
= dev_net(vxlan
->dev
);
536 if (nla_len(tb
[NDA_DST
]) != sizeof(__be32
))
537 return -EAFNOSUPPORT
;
539 *ip
= nla_get_be32(tb
[NDA_DST
]);
541 *ip
= htonl(INADDR_ANY
);
545 if (nla_len(tb
[NDA_PORT
]) != sizeof(__be16
))
547 *port
= nla_get_be16(tb
[NDA_PORT
]);
549 *port
= vxlan
->dst_port
;
553 if (nla_len(tb
[NDA_VNI
]) != sizeof(u32
))
555 *vni
= nla_get_u32(tb
[NDA_VNI
]);
557 *vni
= vxlan
->default_dst
.remote_vni
;
560 if (tb
[NDA_IFINDEX
]) {
561 struct net_device
*tdev
;
563 if (nla_len(tb
[NDA_IFINDEX
]) != sizeof(u32
))
565 *ifindex
= nla_get_u32(tb
[NDA_IFINDEX
]);
566 tdev
= dev_get_by_index(net
, *ifindex
);
568 return -EADDRNOTAVAIL
;
577 /* Add static entry (via netlink) */
578 static int vxlan_fdb_add(struct ndmsg
*ndm
, struct nlattr
*tb
[],
579 struct net_device
*dev
,
580 const unsigned char *addr
, u16 flags
)
582 struct vxlan_dev
*vxlan
= netdev_priv(dev
);
583 /* struct net *net = dev_net(vxlan->dev); */
589 if (!(ndm
->ndm_state
& (NUD_PERMANENT
|NUD_REACHABLE
))) {
590 pr_info("RTM_NEWNEIGH with invalid state %#x\n",
595 if (tb
[NDA_DST
] == NULL
)
598 err
= vxlan_fdb_parse(tb
, vxlan
, &ip
, &port
, &vni
, &ifindex
);
602 spin_lock_bh(&vxlan
->hash_lock
);
603 err
= vxlan_fdb_create(vxlan
, addr
, ip
, ndm
->ndm_state
, flags
,
604 port
, vni
, ifindex
, ndm
->ndm_flags
);
605 spin_unlock_bh(&vxlan
->hash_lock
);
610 /* Delete entry (via netlink) */
611 static int vxlan_fdb_delete(struct ndmsg
*ndm
, struct nlattr
*tb
[],
612 struct net_device
*dev
,
613 const unsigned char *addr
)
615 struct vxlan_dev
*vxlan
= netdev_priv(dev
);
617 struct vxlan_rdst
*rd
= NULL
;
623 err
= vxlan_fdb_parse(tb
, vxlan
, &ip
, &port
, &vni
, &ifindex
);
629 spin_lock_bh(&vxlan
->hash_lock
);
630 f
= vxlan_find_mac(vxlan
, addr
);
634 if (ip
!= htonl(INADDR_ANY
)) {
635 rd
= vxlan_fdb_find_rdst(f
, ip
, port
, vni
, ifindex
);
642 /* remove a destination if it's not the only one on the list,
643 * otherwise destroy the fdb entry
645 if (rd
&& !list_is_singular(&f
->remotes
)) {
646 list_del_rcu(&rd
->list
);
647 call_rcu(&rd
->rcu
, vxlan_fdb_free_rdst
);
651 vxlan_fdb_destroy(vxlan
, f
);
654 spin_unlock_bh(&vxlan
->hash_lock
);
659 /* Dump forwarding table */
660 static int vxlan_fdb_dump(struct sk_buff
*skb
, struct netlink_callback
*cb
,
661 struct net_device
*dev
, int idx
)
663 struct vxlan_dev
*vxlan
= netdev_priv(dev
);
666 for (h
= 0; h
< FDB_HASH_SIZE
; ++h
) {
670 hlist_for_each_entry_rcu(f
, &vxlan
->fdb_head
[h
], hlist
) {
671 struct vxlan_rdst
*rd
;
673 if (idx
< cb
->args
[0])
676 list_for_each_entry_rcu(rd
, &f
->remotes
, list
) {
677 err
= vxlan_fdb_info(skb
, vxlan
, f
,
678 NETLINK_CB(cb
->skb
).portid
,
693 /* Watch incoming packets to learn mapping between Ethernet address
694 * and Tunnel endpoint.
695 * Return true if packet is bogus and should be droppped.
697 static bool vxlan_snoop(struct net_device
*dev
,
698 __be32 src_ip
, const u8
*src_mac
)
700 struct vxlan_dev
*vxlan
= netdev_priv(dev
);
703 f
= vxlan_find_mac(vxlan
, src_mac
);
705 struct vxlan_rdst
*rdst
= first_remote(f
);
707 if (likely(rdst
->remote_ip
== src_ip
))
710 /* Don't migrate static entries, drop packets */
711 if (f
->state
& NUD_NOARP
)
716 "%pM migrated from %pI4 to %pI4\n",
717 src_mac
, &rdst
->remote_ip
, &src_ip
);
719 rdst
->remote_ip
= src_ip
;
720 f
->updated
= jiffies
;
721 vxlan_fdb_notify(vxlan
, f
, RTM_NEWNEIGH
);
723 /* learned new entry */
724 spin_lock(&vxlan
->hash_lock
);
726 /* close off race between vxlan_flush and incoming packets */
727 if (netif_running(dev
))
728 vxlan_fdb_create(vxlan
, src_mac
, src_ip
,
730 NLM_F_EXCL
|NLM_F_CREATE
,
732 vxlan
->default_dst
.remote_vni
,
734 spin_unlock(&vxlan
->hash_lock
);
740 /* See if multicast group is already in use by other ID */
741 static bool vxlan_group_used(struct vxlan_net
*vn
, __be32 remote_ip
)
743 struct vxlan_dev
*vxlan
;
745 list_for_each_entry(vxlan
, &vn
->vxlan_list
, next
) {
746 if (!netif_running(vxlan
->dev
))
749 if (vxlan
->default_dst
.remote_ip
== remote_ip
)
756 static void vxlan_sock_hold(struct vxlan_sock
*vs
)
758 atomic_inc(&vs
->refcnt
);
761 static void vxlan_sock_release(struct vxlan_net
*vn
, struct vxlan_sock
*vs
)
763 if (!atomic_dec_and_test(&vs
->refcnt
))
766 spin_lock(&vn
->sock_lock
);
767 hlist_del_rcu(&vs
->hlist
);
768 spin_unlock(&vn
->sock_lock
);
770 queue_work(vxlan_wq
, &vs
->del_work
);
773 /* Callback to update multicast group membership when first VNI on
774 * multicast asddress is brought up
775 * Done as workqueue because ip_mc_join_group acquires RTNL.
777 static void vxlan_igmp_join(struct work_struct
*work
)
779 struct vxlan_dev
*vxlan
= container_of(work
, struct vxlan_dev
, igmp_join
);
780 struct vxlan_net
*vn
= net_generic(dev_net(vxlan
->dev
), vxlan_net_id
);
781 struct vxlan_sock
*vs
= vxlan
->vn_sock
;
782 struct sock
*sk
= vs
->sock
->sk
;
783 struct ip_mreqn mreq
= {
784 .imr_multiaddr
.s_addr
= vxlan
->default_dst
.remote_ip
,
785 .imr_ifindex
= vxlan
->default_dst
.remote_ifindex
,
789 ip_mc_join_group(sk
, &mreq
);
792 vxlan_sock_release(vn
, vs
);
796 /* Inverse of vxlan_igmp_join when last VNI is brought down */
797 static void vxlan_igmp_leave(struct work_struct
*work
)
799 struct vxlan_dev
*vxlan
= container_of(work
, struct vxlan_dev
, igmp_leave
);
800 struct vxlan_net
*vn
= net_generic(dev_net(vxlan
->dev
), vxlan_net_id
);
801 struct vxlan_sock
*vs
= vxlan
->vn_sock
;
802 struct sock
*sk
= vs
->sock
->sk
;
803 struct ip_mreqn mreq
= {
804 .imr_multiaddr
.s_addr
= vxlan
->default_dst
.remote_ip
,
805 .imr_ifindex
= vxlan
->default_dst
.remote_ifindex
,
809 ip_mc_leave_group(sk
, &mreq
);
812 vxlan_sock_release(vn
, vs
);
816 /* Callback from net/ipv4/udp.c to receive packets */
817 static int vxlan_udp_encap_recv(struct sock
*sk
, struct sk_buff
*skb
)
820 struct vxlanhdr
*vxh
;
821 struct vxlan_dev
*vxlan
;
822 struct pcpu_tstats
*stats
;
827 /* pop off outer UDP header */
828 __skb_pull(skb
, sizeof(struct udphdr
));
830 /* Need Vxlan and inner Ethernet header to be present */
831 if (!pskb_may_pull(skb
, sizeof(struct vxlanhdr
)))
834 /* Drop packets with reserved bits set */
835 vxh
= (struct vxlanhdr
*) skb
->data
;
836 if (vxh
->vx_flags
!= htonl(VXLAN_FLAGS
) ||
837 (vxh
->vx_vni
& htonl(0xff))) {
838 netdev_dbg(skb
->dev
, "invalid vxlan flags=%#x vni=%#x\n",
839 ntohl(vxh
->vx_flags
), ntohl(vxh
->vx_vni
));
843 __skb_pull(skb
, sizeof(struct vxlanhdr
));
845 /* Is this VNI defined? */
846 vni
= ntohl(vxh
->vx_vni
) >> 8;
847 port
= inet_sk(sk
)->inet_sport
;
848 vxlan
= vxlan_find_vni(sock_net(sk
), vni
, port
);
850 netdev_dbg(skb
->dev
, "unknown vni %d port %u\n",
855 if (!pskb_may_pull(skb
, ETH_HLEN
)) {
856 vxlan
->dev
->stats
.rx_length_errors
++;
857 vxlan
->dev
->stats
.rx_errors
++;
861 skb_reset_mac_header(skb
);
863 /* Re-examine inner Ethernet packet */
865 skb
->protocol
= eth_type_trans(skb
, vxlan
->dev
);
867 /* Ignore packet loops (and multicast echo) */
868 if (compare_ether_addr(eth_hdr(skb
)->h_source
,
869 vxlan
->dev
->dev_addr
) == 0)
872 if ((vxlan
->flags
& VXLAN_F_LEARN
) &&
873 vxlan_snoop(skb
->dev
, oip
->saddr
, eth_hdr(skb
)->h_source
))
876 __skb_tunnel_rx(skb
, vxlan
->dev
);
877 skb_reset_network_header(skb
);
879 /* If the NIC driver gave us an encapsulated packet with
880 * CHECKSUM_UNNECESSARY and Rx checksum feature is enabled,
881 * leave the CHECKSUM_UNNECESSARY, the device checksummed it
882 * for us. Otherwise force the upper layers to verify it.
884 if (skb
->ip_summed
!= CHECKSUM_UNNECESSARY
|| !skb
->encapsulation
||
885 !(vxlan
->dev
->features
& NETIF_F_RXCSUM
))
886 skb
->ip_summed
= CHECKSUM_NONE
;
888 skb
->encapsulation
= 0;
890 err
= IP_ECN_decapsulate(oip
, skb
);
893 net_info_ratelimited("non-ECT from %pI4 with TOS=%#x\n",
894 &oip
->saddr
, oip
->tos
);
896 ++vxlan
->dev
->stats
.rx_frame_errors
;
897 ++vxlan
->dev
->stats
.rx_errors
;
902 stats
= this_cpu_ptr(vxlan
->dev
->tstats
);
903 u64_stats_update_begin(&stats
->syncp
);
905 stats
->rx_bytes
+= skb
->len
;
906 u64_stats_update_end(&stats
->syncp
);
912 /* Put UDP header back */
913 __skb_push(skb
, sizeof(struct udphdr
));
917 /* Consume bad packet */
922 static int arp_reduce(struct net_device
*dev
, struct sk_buff
*skb
)
924 struct vxlan_dev
*vxlan
= netdev_priv(dev
);
930 if (dev
->flags
& IFF_NOARP
)
933 if (!pskb_may_pull(skb
, arp_hdr_len(dev
))) {
934 dev
->stats
.tx_dropped
++;
939 if ((parp
->ar_hrd
!= htons(ARPHRD_ETHER
) &&
940 parp
->ar_hrd
!= htons(ARPHRD_IEEE802
)) ||
941 parp
->ar_pro
!= htons(ETH_P_IP
) ||
942 parp
->ar_op
!= htons(ARPOP_REQUEST
) ||
943 parp
->ar_hln
!= dev
->addr_len
||
946 arpptr
= (u8
*)parp
+ sizeof(struct arphdr
);
948 arpptr
+= dev
->addr_len
; /* sha */
949 memcpy(&sip
, arpptr
, sizeof(sip
));
950 arpptr
+= sizeof(sip
);
951 arpptr
+= dev
->addr_len
; /* tha */
952 memcpy(&tip
, arpptr
, sizeof(tip
));
954 if (ipv4_is_loopback(tip
) ||
955 ipv4_is_multicast(tip
))
958 n
= neigh_lookup(&arp_tbl
, &tip
, dev
);
962 struct sk_buff
*reply
;
964 if (!(n
->nud_state
& NUD_CONNECTED
)) {
969 f
= vxlan_find_mac(vxlan
, n
->ha
);
970 if (f
&& first_remote(f
)->remote_ip
== htonl(INADDR_ANY
)) {
971 /* bridge-local neighbor */
976 reply
= arp_create(ARPOP_REPLY
, ETH_P_ARP
, sip
, dev
, tip
, sha
,
981 skb_reset_mac_header(reply
);
982 __skb_pull(reply
, skb_network_offset(reply
));
983 reply
->ip_summed
= CHECKSUM_UNNECESSARY
;
984 reply
->pkt_type
= PACKET_HOST
;
986 if (netif_rx_ni(reply
) == NET_RX_DROP
)
987 dev
->stats
.rx_dropped
++;
988 } else if (vxlan
->flags
& VXLAN_F_L3MISS
)
989 vxlan_ip_miss(dev
, tip
);
995 static bool route_shortcircuit(struct net_device
*dev
, struct sk_buff
*skb
)
997 struct vxlan_dev
*vxlan
= netdev_priv(dev
);
1001 if (is_multicast_ether_addr(eth_hdr(skb
)->h_dest
))
1005 switch (ntohs(eth_hdr(skb
)->h_proto
)) {
1007 if (!pskb_may_pull(skb
, sizeof(struct iphdr
)))
1010 n
= neigh_lookup(&arp_tbl
, &pip
->daddr
, dev
);
1019 diff
= compare_ether_addr(eth_hdr(skb
)->h_dest
, n
->ha
) != 0;
1021 memcpy(eth_hdr(skb
)->h_source
, eth_hdr(skb
)->h_dest
,
1023 memcpy(eth_hdr(skb
)->h_dest
, n
->ha
, dev
->addr_len
);
1027 } else if (vxlan
->flags
& VXLAN_F_L3MISS
)
1028 vxlan_ip_miss(dev
, pip
->daddr
);
1032 static void vxlan_sock_put(struct sk_buff
*skb
)
1037 /* On transmit, associate with the tunnel socket */
1038 static void vxlan_set_owner(struct net_device
*dev
, struct sk_buff
*skb
)
1040 struct vxlan_dev
*vxlan
= netdev_priv(dev
);
1041 struct sock
*sk
= vxlan
->vn_sock
->sock
->sk
;
1046 skb
->destructor
= vxlan_sock_put
;
1049 /* Compute source port for outgoing packet
1050 * first choice to use L4 flow hash since it will spread
1051 * better and maybe available from hardware
1052 * secondary choice is to use jhash on the Ethernet header
1054 static __be16
vxlan_src_port(const struct vxlan_dev
*vxlan
, struct sk_buff
*skb
)
1056 unsigned int range
= (vxlan
->port_max
- vxlan
->port_min
) + 1;
1059 hash
= skb_get_rxhash(skb
);
1061 hash
= jhash(skb
->data
, 2 * ETH_ALEN
,
1062 (__force u32
) skb
->protocol
);
1064 return htons((((u64
) hash
* range
) >> 32) + vxlan
->port_min
);
1067 static int handle_offloads(struct sk_buff
*skb
)
1069 if (skb_is_gso(skb
)) {
1070 int err
= skb_unclone(skb
, GFP_ATOMIC
);
1074 skb_shinfo(skb
)->gso_type
|= SKB_GSO_UDP_TUNNEL
;
1075 } else if (skb
->ip_summed
!= CHECKSUM_PARTIAL
)
1076 skb
->ip_summed
= CHECKSUM_NONE
;
1081 /* Bypass encapsulation if the destination is local */
1082 static void vxlan_encap_bypass(struct sk_buff
*skb
, struct vxlan_dev
*src_vxlan
,
1083 struct vxlan_dev
*dst_vxlan
)
1085 struct pcpu_tstats
*tx_stats
= this_cpu_ptr(src_vxlan
->dev
->tstats
);
1086 struct pcpu_tstats
*rx_stats
= this_cpu_ptr(dst_vxlan
->dev
->tstats
);
1088 skb
->pkt_type
= PACKET_HOST
;
1089 skb
->encapsulation
= 0;
1090 skb
->dev
= dst_vxlan
->dev
;
1091 __skb_pull(skb
, skb_network_offset(skb
));
1093 if (dst_vxlan
->flags
& VXLAN_F_LEARN
)
1094 vxlan_snoop(skb
->dev
, htonl(INADDR_LOOPBACK
),
1095 eth_hdr(skb
)->h_source
);
1097 u64_stats_update_begin(&tx_stats
->syncp
);
1098 tx_stats
->tx_packets
++;
1099 tx_stats
->tx_bytes
+= skb
->len
;
1100 u64_stats_update_end(&tx_stats
->syncp
);
1102 if (netif_rx(skb
) == NET_RX_SUCCESS
) {
1103 u64_stats_update_begin(&rx_stats
->syncp
);
1104 rx_stats
->rx_packets
++;
1105 rx_stats
->rx_bytes
+= skb
->len
;
1106 u64_stats_update_end(&rx_stats
->syncp
);
1108 skb
->dev
->stats
.rx_dropped
++;
1112 static void vxlan_xmit_one(struct sk_buff
*skb
, struct net_device
*dev
,
1113 struct vxlan_rdst
*rdst
, bool did_rsc
)
1115 struct vxlan_dev
*vxlan
= netdev_priv(dev
);
1117 const struct iphdr
*old_iph
;
1118 struct vxlanhdr
*vxh
;
1122 __be16 src_port
, dst_port
;
1128 dst_port
= rdst
->remote_port
? rdst
->remote_port
: vxlan
->dst_port
;
1129 vni
= rdst
->remote_vni
;
1130 dst
= rdst
->remote_ip
;
1134 /* short-circuited back to local bridge */
1135 vxlan_encap_bypass(skb
, vxlan
, vxlan
);
1141 if (!skb
->encapsulation
) {
1142 skb_reset_inner_headers(skb
);
1143 skb
->encapsulation
= 1;
1146 /* Need space for new headers (invalidates iph ptr) */
1147 if (skb_cow_head(skb
, VXLAN_HEADROOM
))
1150 old_iph
= ip_hdr(skb
);
1153 if (!ttl
&& IN_MULTICAST(ntohl(dst
)))
1158 tos
= ip_tunnel_get_dsfield(old_iph
, skb
);
1160 src_port
= vxlan_src_port(vxlan
, skb
);
1162 memset(&fl4
, 0, sizeof(fl4
));
1163 fl4
.flowi4_oif
= rdst
->remote_ifindex
;
1164 fl4
.flowi4_tos
= RT_TOS(tos
);
1166 fl4
.saddr
= vxlan
->saddr
;
1168 rt
= ip_route_output_key(dev_net(dev
), &fl4
);
1170 netdev_dbg(dev
, "no route to %pI4\n", &dst
);
1171 dev
->stats
.tx_carrier_errors
++;
1175 if (rt
->dst
.dev
== dev
) {
1176 netdev_dbg(dev
, "circular route to %pI4\n", &dst
);
1178 dev
->stats
.collisions
++;
1182 /* Bypass encapsulation if the destination is local */
1183 if (rt
->rt_flags
& RTCF_LOCAL
&&
1184 !(rt
->rt_flags
& (RTCF_BROADCAST
| RTCF_MULTICAST
))) {
1185 struct vxlan_dev
*dst_vxlan
;
1188 dst_vxlan
= vxlan_find_vni(dev_net(dev
), vni
, dst_port
);
1191 vxlan_encap_bypass(skb
, vxlan
, dst_vxlan
);
1194 vxh
= (struct vxlanhdr
*) __skb_push(skb
, sizeof(*vxh
));
1195 vxh
->vx_flags
= htonl(VXLAN_FLAGS
);
1196 vxh
->vx_vni
= htonl(vni
<< 8);
1198 __skb_push(skb
, sizeof(*uh
));
1199 skb_reset_transport_header(skb
);
1202 uh
->dest
= dst_port
;
1203 uh
->source
= src_port
;
1205 uh
->len
= htons(skb
->len
);
1208 vxlan_set_owner(dev
, skb
);
1210 if (handle_offloads(skb
))
1213 tos
= ip_tunnel_ecn_encap(tos
, old_iph
, skb
);
1214 ttl
= ttl
? : ip4_dst_hoplimit(&rt
->dst
);
1216 err
= iptunnel_xmit(dev_net(dev
), rt
, skb
, fl4
.saddr
, dst
,
1217 IPPROTO_UDP
, tos
, ttl
, df
);
1218 iptunnel_xmit_stats(err
, &dev
->stats
, dev
->tstats
);
1223 dev
->stats
.tx_dropped
++;
1227 dev
->stats
.tx_errors
++;
1232 /* Transmit local packets over Vxlan
1234 * Outer IP header inherits ECN and DF from inner header.
1235 * Outer UDP destination is the VXLAN assigned port.
1236 * source port is based on hash of flow
1238 static netdev_tx_t
vxlan_xmit(struct sk_buff
*skb
, struct net_device
*dev
)
1240 struct vxlan_dev
*vxlan
= netdev_priv(dev
);
1242 bool did_rsc
= false;
1243 struct vxlan_rdst
*rdst
;
1244 struct vxlan_fdb
*f
;
1246 skb_reset_mac_header(skb
);
1249 if ((vxlan
->flags
& VXLAN_F_PROXY
) && ntohs(eth
->h_proto
) == ETH_P_ARP
)
1250 return arp_reduce(dev
, skb
);
1252 f
= vxlan_find_mac(vxlan
, eth
->h_dest
);
1255 if (f
&& (f
->flags
& NTF_ROUTER
) && (vxlan
->flags
& VXLAN_F_RSC
) &&
1256 ntohs(eth
->h_proto
) == ETH_P_IP
) {
1257 did_rsc
= route_shortcircuit(dev
, skb
);
1259 f
= vxlan_find_mac(vxlan
, eth
->h_dest
);
1263 f
= vxlan_find_mac(vxlan
, all_zeros_mac
);
1265 if ((vxlan
->flags
& VXLAN_F_L2MISS
) &&
1266 !is_multicast_ether_addr(eth
->h_dest
))
1267 vxlan_fdb_miss(vxlan
, eth
->h_dest
);
1269 dev
->stats
.tx_dropped
++;
1271 return NETDEV_TX_OK
;
1275 list_for_each_entry_rcu(rdst
, &f
->remotes
, list
) {
1276 struct sk_buff
*skb1
;
1278 skb1
= skb_clone(skb
, GFP_ATOMIC
);
1280 vxlan_xmit_one(skb1
, dev
, rdst
, did_rsc
);
1284 return NETDEV_TX_OK
;
1287 /* Walk the forwarding table and purge stale entries */
1288 static void vxlan_cleanup(unsigned long arg
)
1290 struct vxlan_dev
*vxlan
= (struct vxlan_dev
*) arg
;
1291 unsigned long next_timer
= jiffies
+ FDB_AGE_INTERVAL
;
1294 if (!netif_running(vxlan
->dev
))
1297 spin_lock_bh(&vxlan
->hash_lock
);
1298 for (h
= 0; h
< FDB_HASH_SIZE
; ++h
) {
1299 struct hlist_node
*p
, *n
;
1300 hlist_for_each_safe(p
, n
, &vxlan
->fdb_head
[h
]) {
1302 = container_of(p
, struct vxlan_fdb
, hlist
);
1303 unsigned long timeout
;
1305 if (f
->state
& NUD_PERMANENT
)
1308 timeout
= f
->used
+ vxlan
->age_interval
* HZ
;
1309 if (time_before_eq(timeout
, jiffies
)) {
1310 netdev_dbg(vxlan
->dev
,
1311 "garbage collect %pM\n",
1313 f
->state
= NUD_STALE
;
1314 vxlan_fdb_destroy(vxlan
, f
);
1315 } else if (time_before(timeout
, next_timer
))
1316 next_timer
= timeout
;
1319 spin_unlock_bh(&vxlan
->hash_lock
);
1321 mod_timer(&vxlan
->age_timer
, next_timer
);
1324 /* Setup stats when device is created */
1325 static int vxlan_init(struct net_device
*dev
)
1327 struct vxlan_dev
*vxlan
= netdev_priv(dev
);
1328 struct vxlan_net
*vn
= net_generic(dev_net(dev
), vxlan_net_id
);
1329 struct vxlan_sock
*vs
;
1330 __u32 vni
= vxlan
->default_dst
.remote_vni
;
1332 dev
->tstats
= alloc_percpu(struct pcpu_tstats
);
1336 spin_lock(&vn
->sock_lock
);
1337 vs
= vxlan_find_port(dev_net(dev
), vxlan
->dst_port
);
1339 /* If we have a socket with same port already, reuse it */
1340 atomic_inc(&vs
->refcnt
);
1341 vxlan
->vn_sock
= vs
;
1342 hlist_add_head_rcu(&vxlan
->hlist
, vni_head(vs
, vni
));
1344 /* otherwise make new socket outside of RTNL */
1346 queue_work(vxlan_wq
, &vxlan
->sock_work
);
1348 spin_unlock(&vn
->sock_lock
);
1353 static void vxlan_fdb_delete_default(struct vxlan_dev
*vxlan
)
1355 struct vxlan_fdb
*f
;
1357 spin_lock_bh(&vxlan
->hash_lock
);
1358 f
= __vxlan_find_mac(vxlan
, all_zeros_mac
);
1360 vxlan_fdb_destroy(vxlan
, f
);
1361 spin_unlock_bh(&vxlan
->hash_lock
);
1364 static void vxlan_uninit(struct net_device
*dev
)
1366 struct vxlan_dev
*vxlan
= netdev_priv(dev
);
1367 struct vxlan_net
*vn
= net_generic(dev_net(dev
), vxlan_net_id
);
1368 struct vxlan_sock
*vs
= vxlan
->vn_sock
;
1370 vxlan_fdb_delete_default(vxlan
);
1373 vxlan_sock_release(vn
, vs
);
1374 free_percpu(dev
->tstats
);
1377 /* Start ageing timer and join group when device is brought up */
1378 static int vxlan_open(struct net_device
*dev
)
1380 struct vxlan_net
*vn
= net_generic(dev_net(dev
), vxlan_net_id
);
1381 struct vxlan_dev
*vxlan
= netdev_priv(dev
);
1382 struct vxlan_sock
*vs
= vxlan
->vn_sock
;
1384 /* socket hasn't been created */
1388 if (IN_MULTICAST(ntohl(vxlan
->default_dst
.remote_ip
)) &&
1389 ! vxlan_group_used(vn
, vxlan
->default_dst
.remote_ip
)) {
1390 vxlan_sock_hold(vs
);
1392 queue_work(vxlan_wq
, &vxlan
->igmp_join
);
1395 if (vxlan
->age_interval
)
1396 mod_timer(&vxlan
->age_timer
, jiffies
+ FDB_AGE_INTERVAL
);
1401 /* Purge the forwarding table */
1402 static void vxlan_flush(struct vxlan_dev
*vxlan
)
1406 spin_lock_bh(&vxlan
->hash_lock
);
1407 for (h
= 0; h
< FDB_HASH_SIZE
; ++h
) {
1408 struct hlist_node
*p
, *n
;
1409 hlist_for_each_safe(p
, n
, &vxlan
->fdb_head
[h
]) {
1411 = container_of(p
, struct vxlan_fdb
, hlist
);
1412 /* the all_zeros_mac entry is deleted at vxlan_uninit */
1413 if (!is_zero_ether_addr(f
->eth_addr
))
1414 vxlan_fdb_destroy(vxlan
, f
);
1417 spin_unlock_bh(&vxlan
->hash_lock
);
1420 /* Cleanup timer and forwarding table on shutdown */
1421 static int vxlan_stop(struct net_device
*dev
)
1423 struct vxlan_net
*vn
= net_generic(dev_net(dev
), vxlan_net_id
);
1424 struct vxlan_dev
*vxlan
= netdev_priv(dev
);
1425 struct vxlan_sock
*vs
= vxlan
->vn_sock
;
1427 if (vs
&& IN_MULTICAST(ntohl(vxlan
->default_dst
.remote_ip
)) &&
1428 ! vxlan_group_used(vn
, vxlan
->default_dst
.remote_ip
)) {
1429 vxlan_sock_hold(vs
);
1431 queue_work(vxlan_wq
, &vxlan
->igmp_leave
);
1434 del_timer_sync(&vxlan
->age_timer
);
1441 /* Stub, nothing needs to be done. */
1442 static void vxlan_set_multicast_list(struct net_device
*dev
)
1446 static const struct net_device_ops vxlan_netdev_ops
= {
1447 .ndo_init
= vxlan_init
,
1448 .ndo_uninit
= vxlan_uninit
,
1449 .ndo_open
= vxlan_open
,
1450 .ndo_stop
= vxlan_stop
,
1451 .ndo_start_xmit
= vxlan_xmit
,
1452 .ndo_get_stats64
= ip_tunnel_get_stats64
,
1453 .ndo_set_rx_mode
= vxlan_set_multicast_list
,
1454 .ndo_change_mtu
= eth_change_mtu
,
1455 .ndo_validate_addr
= eth_validate_addr
,
1456 .ndo_set_mac_address
= eth_mac_addr
,
1457 .ndo_fdb_add
= vxlan_fdb_add
,
1458 .ndo_fdb_del
= vxlan_fdb_delete
,
1459 .ndo_fdb_dump
= vxlan_fdb_dump
,
1462 /* Info for udev, that this is a virtual tunnel endpoint */
1463 static struct device_type vxlan_type
= {
1467 /* Initialize the device structure. */
1468 static void vxlan_setup(struct net_device
*dev
)
1470 struct vxlan_dev
*vxlan
= netdev_priv(dev
);
1474 eth_hw_addr_random(dev
);
1476 dev
->hard_header_len
= ETH_HLEN
+ VXLAN_HEADROOM
;
1478 dev
->netdev_ops
= &vxlan_netdev_ops
;
1479 dev
->destructor
= free_netdev
;
1480 SET_NETDEV_DEVTYPE(dev
, &vxlan_type
);
1482 dev
->tx_queue_len
= 0;
1483 dev
->features
|= NETIF_F_LLTX
;
1484 dev
->features
|= NETIF_F_NETNS_LOCAL
;
1485 dev
->features
|= NETIF_F_SG
| NETIF_F_HW_CSUM
;
1486 dev
->features
|= NETIF_F_RXCSUM
;
1487 dev
->features
|= NETIF_F_GSO_SOFTWARE
;
1489 dev
->hw_features
|= NETIF_F_SG
| NETIF_F_HW_CSUM
| NETIF_F_RXCSUM
;
1490 dev
->hw_features
|= NETIF_F_GSO_SOFTWARE
;
1491 dev
->priv_flags
&= ~IFF_XMIT_DST_RELEASE
;
1492 dev
->priv_flags
|= IFF_LIVE_ADDR_CHANGE
;
1494 INIT_LIST_HEAD(&vxlan
->next
);
1495 spin_lock_init(&vxlan
->hash_lock
);
1496 INIT_WORK(&vxlan
->igmp_join
, vxlan_igmp_join
);
1497 INIT_WORK(&vxlan
->igmp_leave
, vxlan_igmp_leave
);
1498 INIT_WORK(&vxlan
->sock_work
, vxlan_sock_work
);
1500 init_timer_deferrable(&vxlan
->age_timer
);
1501 vxlan
->age_timer
.function
= vxlan_cleanup
;
1502 vxlan
->age_timer
.data
= (unsigned long) vxlan
;
1504 inet_get_local_port_range(&low
, &high
);
1505 vxlan
->port_min
= low
;
1506 vxlan
->port_max
= high
;
1507 vxlan
->dst_port
= htons(vxlan_port
);
1511 for (h
= 0; h
< FDB_HASH_SIZE
; ++h
)
1512 INIT_HLIST_HEAD(&vxlan
->fdb_head
[h
]);
1515 static const struct nla_policy vxlan_policy
[IFLA_VXLAN_MAX
+ 1] = {
1516 [IFLA_VXLAN_ID
] = { .type
= NLA_U32
},
1517 [IFLA_VXLAN_GROUP
] = { .len
= FIELD_SIZEOF(struct iphdr
, daddr
) },
1518 [IFLA_VXLAN_LINK
] = { .type
= NLA_U32
},
1519 [IFLA_VXLAN_LOCAL
] = { .len
= FIELD_SIZEOF(struct iphdr
, saddr
) },
1520 [IFLA_VXLAN_TOS
] = { .type
= NLA_U8
},
1521 [IFLA_VXLAN_TTL
] = { .type
= NLA_U8
},
1522 [IFLA_VXLAN_LEARNING
] = { .type
= NLA_U8
},
1523 [IFLA_VXLAN_AGEING
] = { .type
= NLA_U32
},
1524 [IFLA_VXLAN_LIMIT
] = { .type
= NLA_U32
},
1525 [IFLA_VXLAN_PORT_RANGE
] = { .len
= sizeof(struct ifla_vxlan_port_range
) },
1526 [IFLA_VXLAN_PROXY
] = { .type
= NLA_U8
},
1527 [IFLA_VXLAN_RSC
] = { .type
= NLA_U8
},
1528 [IFLA_VXLAN_L2MISS
] = { .type
= NLA_U8
},
1529 [IFLA_VXLAN_L3MISS
] = { .type
= NLA_U8
},
1530 [IFLA_VXLAN_PORT
] = { .type
= NLA_U16
},
1533 static int vxlan_validate(struct nlattr
*tb
[], struct nlattr
*data
[])
1535 if (tb
[IFLA_ADDRESS
]) {
1536 if (nla_len(tb
[IFLA_ADDRESS
]) != ETH_ALEN
) {
1537 pr_debug("invalid link address (not ethernet)\n");
1541 if (!is_valid_ether_addr(nla_data(tb
[IFLA_ADDRESS
]))) {
1542 pr_debug("invalid all zero ethernet address\n");
1543 return -EADDRNOTAVAIL
;
1550 if (data
[IFLA_VXLAN_ID
]) {
1551 __u32 id
= nla_get_u32(data
[IFLA_VXLAN_ID
]);
1552 if (id
>= VXLAN_VID_MASK
)
1556 if (data
[IFLA_VXLAN_PORT_RANGE
]) {
1557 const struct ifla_vxlan_port_range
*p
1558 = nla_data(data
[IFLA_VXLAN_PORT_RANGE
]);
1560 if (ntohs(p
->high
) < ntohs(p
->low
)) {
1561 pr_debug("port range %u .. %u not valid\n",
1562 ntohs(p
->low
), ntohs(p
->high
));
1570 static void vxlan_get_drvinfo(struct net_device
*netdev
,
1571 struct ethtool_drvinfo
*drvinfo
)
1573 strlcpy(drvinfo
->version
, VXLAN_VERSION
, sizeof(drvinfo
->version
));
1574 strlcpy(drvinfo
->driver
, "vxlan", sizeof(drvinfo
->driver
));
1577 static const struct ethtool_ops vxlan_ethtool_ops
= {
1578 .get_drvinfo
= vxlan_get_drvinfo
,
1579 .get_link
= ethtool_op_get_link
,
1582 static void vxlan_del_work(struct work_struct
*work
)
1584 struct vxlan_sock
*vs
= container_of(work
, struct vxlan_sock
, del_work
);
1586 sk_release_kernel(vs
->sock
->sk
);
1590 static struct vxlan_sock
*vxlan_socket_create(struct net
*net
, __be16 port
)
1592 struct vxlan_sock
*vs
;
1594 struct sockaddr_in vxlan_addr
= {
1595 .sin_family
= AF_INET
,
1596 .sin_addr
.s_addr
= htonl(INADDR_ANY
),
1602 vs
= kmalloc(sizeof(*vs
), GFP_KERNEL
);
1604 return ERR_PTR(-ENOMEM
);
1606 for (h
= 0; h
< VNI_HASH_SIZE
; ++h
)
1607 INIT_HLIST_HEAD(&vs
->vni_list
[h
]);
1609 INIT_WORK(&vs
->del_work
, vxlan_del_work
);
1611 /* Create UDP socket for encapsulation receive. */
1612 rc
= sock_create_kern(AF_INET
, SOCK_DGRAM
, IPPROTO_UDP
, &vs
->sock
);
1614 pr_debug("UDP socket create failed\n");
1619 /* Put in proper namespace */
1621 sk_change_net(sk
, net
);
1623 rc
= kernel_bind(vs
->sock
, (struct sockaddr
*) &vxlan_addr
,
1624 sizeof(vxlan_addr
));
1626 pr_debug("bind for UDP socket %pI4:%u (%d)\n",
1627 &vxlan_addr
.sin_addr
, ntohs(vxlan_addr
.sin_port
), rc
);
1628 sk_release_kernel(sk
);
1633 /* Disable multicast loopback */
1634 inet_sk(sk
)->mc_loop
= 0;
1636 /* Mark socket as an encapsulation socket. */
1637 udp_sk(sk
)->encap_type
= 1;
1638 udp_sk(sk
)->encap_rcv
= vxlan_udp_encap_recv
;
1640 atomic_set(&vs
->refcnt
, 1);
1645 /* Scheduled at device creation to bind to a socket */
1646 static void vxlan_sock_work(struct work_struct
*work
)
1648 struct vxlan_dev
*vxlan
1649 = container_of(work
, struct vxlan_dev
, sock_work
);
1650 struct net_device
*dev
= vxlan
->dev
;
1651 struct net
*net
= dev_net(dev
);
1652 __u32 vni
= vxlan
->default_dst
.remote_vni
;
1653 __be16 port
= vxlan
->dst_port
;
1654 struct vxlan_net
*vn
= net_generic(net
, vxlan_net_id
);
1655 struct vxlan_sock
*nvs
, *ovs
;
1657 nvs
= vxlan_socket_create(net
, port
);
1659 netdev_err(vxlan
->dev
, "Can not create UDP socket, %ld\n",
1664 spin_lock(&vn
->sock_lock
);
1665 /* Look again to see if can reuse socket */
1666 ovs
= vxlan_find_port(net
, port
);
1668 atomic_inc(&ovs
->refcnt
);
1669 vxlan
->vn_sock
= ovs
;
1670 hlist_add_head_rcu(&vxlan
->hlist
, vni_head(ovs
, vni
));
1671 spin_unlock(&vn
->sock_lock
);
1673 sk_release_kernel(nvs
->sock
->sk
);
1676 vxlan
->vn_sock
= nvs
;
1677 hlist_add_head_rcu(&nvs
->hlist
, vs_head(net
, port
));
1678 hlist_add_head_rcu(&vxlan
->hlist
, vni_head(nvs
, vni
));
1679 spin_unlock(&vn
->sock_lock
);
1685 static int vxlan_newlink(struct net
*net
, struct net_device
*dev
,
1686 struct nlattr
*tb
[], struct nlattr
*data
[])
1688 struct vxlan_net
*vn
= net_generic(net
, vxlan_net_id
);
1689 struct vxlan_dev
*vxlan
= netdev_priv(dev
);
1690 struct vxlan_rdst
*dst
= &vxlan
->default_dst
;
1694 if (!data
[IFLA_VXLAN_ID
])
1697 vni
= nla_get_u32(data
[IFLA_VXLAN_ID
]);
1698 dst
->remote_vni
= vni
;
1700 if (data
[IFLA_VXLAN_GROUP
])
1701 dst
->remote_ip
= nla_get_be32(data
[IFLA_VXLAN_GROUP
]);
1703 if (data
[IFLA_VXLAN_LOCAL
])
1704 vxlan
->saddr
= nla_get_be32(data
[IFLA_VXLAN_LOCAL
]);
1706 if (data
[IFLA_VXLAN_LINK
] &&
1707 (dst
->remote_ifindex
= nla_get_u32(data
[IFLA_VXLAN_LINK
]))) {
1708 struct net_device
*lowerdev
1709 = __dev_get_by_index(net
, dst
->remote_ifindex
);
1712 pr_info("ifindex %d does not exist\n", dst
->remote_ifindex
);
1717 dev
->mtu
= lowerdev
->mtu
- VXLAN_HEADROOM
;
1719 /* update header length based on lower device */
1720 dev
->hard_header_len
= lowerdev
->hard_header_len
+
1724 if (data
[IFLA_VXLAN_TOS
])
1725 vxlan
->tos
= nla_get_u8(data
[IFLA_VXLAN_TOS
]);
1727 if (data
[IFLA_VXLAN_TTL
])
1728 vxlan
->ttl
= nla_get_u8(data
[IFLA_VXLAN_TTL
]);
1730 if (!data
[IFLA_VXLAN_LEARNING
] || nla_get_u8(data
[IFLA_VXLAN_LEARNING
]))
1731 vxlan
->flags
|= VXLAN_F_LEARN
;
1733 if (data
[IFLA_VXLAN_AGEING
])
1734 vxlan
->age_interval
= nla_get_u32(data
[IFLA_VXLAN_AGEING
]);
1736 vxlan
->age_interval
= FDB_AGE_DEFAULT
;
1738 if (data
[IFLA_VXLAN_PROXY
] && nla_get_u8(data
[IFLA_VXLAN_PROXY
]))
1739 vxlan
->flags
|= VXLAN_F_PROXY
;
1741 if (data
[IFLA_VXLAN_RSC
] && nla_get_u8(data
[IFLA_VXLAN_RSC
]))
1742 vxlan
->flags
|= VXLAN_F_RSC
;
1744 if (data
[IFLA_VXLAN_L2MISS
] && nla_get_u8(data
[IFLA_VXLAN_L2MISS
]))
1745 vxlan
->flags
|= VXLAN_F_L2MISS
;
1747 if (data
[IFLA_VXLAN_L3MISS
] && nla_get_u8(data
[IFLA_VXLAN_L3MISS
]))
1748 vxlan
->flags
|= VXLAN_F_L3MISS
;
1750 if (data
[IFLA_VXLAN_LIMIT
])
1751 vxlan
->addrmax
= nla_get_u32(data
[IFLA_VXLAN_LIMIT
]);
1753 if (data
[IFLA_VXLAN_PORT_RANGE
]) {
1754 const struct ifla_vxlan_port_range
*p
1755 = nla_data(data
[IFLA_VXLAN_PORT_RANGE
]);
1756 vxlan
->port_min
= ntohs(p
->low
);
1757 vxlan
->port_max
= ntohs(p
->high
);
1760 if (data
[IFLA_VXLAN_PORT
])
1761 vxlan
->dst_port
= nla_get_be16(data
[IFLA_VXLAN_PORT
]);
1763 if (vxlan_find_vni(net
, vni
, vxlan
->dst_port
)) {
1764 pr_info("duplicate VNI %u\n", vni
);
1768 SET_ETHTOOL_OPS(dev
, &vxlan_ethtool_ops
);
1770 /* create an fdb entry for default destination */
1771 err
= vxlan_fdb_create(vxlan
, all_zeros_mac
,
1772 vxlan
->default_dst
.remote_ip
,
1773 NUD_REACHABLE
|NUD_PERMANENT
,
1774 NLM_F_EXCL
|NLM_F_CREATE
,
1775 vxlan
->dst_port
, vxlan
->default_dst
.remote_vni
,
1776 vxlan
->default_dst
.remote_ifindex
, NTF_SELF
);
1780 err
= register_netdevice(dev
);
1782 vxlan_fdb_delete_default(vxlan
);
1786 list_add(&vxlan
->next
, &vn
->vxlan_list
);
1791 static void vxlan_dellink(struct net_device
*dev
, struct list_head
*head
)
1793 struct vxlan_net
*vn
= net_generic(dev_net(dev
), vxlan_net_id
);
1794 struct vxlan_dev
*vxlan
= netdev_priv(dev
);
1796 flush_workqueue(vxlan_wq
);
1798 spin_lock(&vn
->sock_lock
);
1799 hlist_del_rcu(&vxlan
->hlist
);
1800 spin_unlock(&vn
->sock_lock
);
1802 list_del(&vxlan
->next
);
1803 unregister_netdevice_queue(dev
, head
);
1806 static size_t vxlan_get_size(const struct net_device
*dev
)
1809 return nla_total_size(sizeof(__u32
)) + /* IFLA_VXLAN_ID */
1810 nla_total_size(sizeof(__be32
)) +/* IFLA_VXLAN_GROUP */
1811 nla_total_size(sizeof(__u32
)) + /* IFLA_VXLAN_LINK */
1812 nla_total_size(sizeof(__be32
))+ /* IFLA_VXLAN_LOCAL */
1813 nla_total_size(sizeof(__u8
)) + /* IFLA_VXLAN_TTL */
1814 nla_total_size(sizeof(__u8
)) + /* IFLA_VXLAN_TOS */
1815 nla_total_size(sizeof(__u8
)) + /* IFLA_VXLAN_LEARNING */
1816 nla_total_size(sizeof(__u8
)) + /* IFLA_VXLAN_PROXY */
1817 nla_total_size(sizeof(__u8
)) + /* IFLA_VXLAN_RSC */
1818 nla_total_size(sizeof(__u8
)) + /* IFLA_VXLAN_L2MISS */
1819 nla_total_size(sizeof(__u8
)) + /* IFLA_VXLAN_L3MISS */
1820 nla_total_size(sizeof(__u32
)) + /* IFLA_VXLAN_AGEING */
1821 nla_total_size(sizeof(__u32
)) + /* IFLA_VXLAN_LIMIT */
1822 nla_total_size(sizeof(struct ifla_vxlan_port_range
)) +
1823 nla_total_size(sizeof(__be16
))+ /* IFLA_VXLAN_PORT */
1827 static int vxlan_fill_info(struct sk_buff
*skb
, const struct net_device
*dev
)
1829 const struct vxlan_dev
*vxlan
= netdev_priv(dev
);
1830 const struct vxlan_rdst
*dst
= &vxlan
->default_dst
;
1831 struct ifla_vxlan_port_range ports
= {
1832 .low
= htons(vxlan
->port_min
),
1833 .high
= htons(vxlan
->port_max
),
1836 if (nla_put_u32(skb
, IFLA_VXLAN_ID
, dst
->remote_vni
))
1837 goto nla_put_failure
;
1839 if (dst
->remote_ip
&& nla_put_be32(skb
, IFLA_VXLAN_GROUP
, dst
->remote_ip
))
1840 goto nla_put_failure
;
1842 if (dst
->remote_ifindex
&& nla_put_u32(skb
, IFLA_VXLAN_LINK
, dst
->remote_ifindex
))
1843 goto nla_put_failure
;
1845 if (vxlan
->saddr
&& nla_put_be32(skb
, IFLA_VXLAN_LOCAL
, vxlan
->saddr
))
1846 goto nla_put_failure
;
1848 if (nla_put_u8(skb
, IFLA_VXLAN_TTL
, vxlan
->ttl
) ||
1849 nla_put_u8(skb
, IFLA_VXLAN_TOS
, vxlan
->tos
) ||
1850 nla_put_u8(skb
, IFLA_VXLAN_LEARNING
,
1851 !!(vxlan
->flags
& VXLAN_F_LEARN
)) ||
1852 nla_put_u8(skb
, IFLA_VXLAN_PROXY
,
1853 !!(vxlan
->flags
& VXLAN_F_PROXY
)) ||
1854 nla_put_u8(skb
, IFLA_VXLAN_RSC
, !!(vxlan
->flags
& VXLAN_F_RSC
)) ||
1855 nla_put_u8(skb
, IFLA_VXLAN_L2MISS
,
1856 !!(vxlan
->flags
& VXLAN_F_L2MISS
)) ||
1857 nla_put_u8(skb
, IFLA_VXLAN_L3MISS
,
1858 !!(vxlan
->flags
& VXLAN_F_L3MISS
)) ||
1859 nla_put_u32(skb
, IFLA_VXLAN_AGEING
, vxlan
->age_interval
) ||
1860 nla_put_u32(skb
, IFLA_VXLAN_LIMIT
, vxlan
->addrmax
) ||
1861 nla_put_be16(skb
, IFLA_VXLAN_PORT
, vxlan
->dst_port
))
1862 goto nla_put_failure
;
1864 if (nla_put(skb
, IFLA_VXLAN_PORT_RANGE
, sizeof(ports
), &ports
))
1865 goto nla_put_failure
;
1873 static struct rtnl_link_ops vxlan_link_ops __read_mostly
= {
1875 .maxtype
= IFLA_VXLAN_MAX
,
1876 .policy
= vxlan_policy
,
1877 .priv_size
= sizeof(struct vxlan_dev
),
1878 .setup
= vxlan_setup
,
1879 .validate
= vxlan_validate
,
1880 .newlink
= vxlan_newlink
,
1881 .dellink
= vxlan_dellink
,
1882 .get_size
= vxlan_get_size
,
1883 .fill_info
= vxlan_fill_info
,
1886 static __net_init
int vxlan_init_net(struct net
*net
)
1888 struct vxlan_net
*vn
= net_generic(net
, vxlan_net_id
);
1891 INIT_LIST_HEAD(&vn
->vxlan_list
);
1892 spin_lock_init(&vn
->sock_lock
);
1894 for (h
= 0; h
< PORT_HASH_SIZE
; ++h
)
1895 INIT_HLIST_HEAD(&vn
->sock_list
[h
]);
1900 static __net_exit
void vxlan_exit_net(struct net
*net
)
1902 struct vxlan_net
*vn
= net_generic(net
, vxlan_net_id
);
1903 struct vxlan_dev
*vxlan
;
1907 list_for_each_entry(vxlan
, &vn
->vxlan_list
, next
)
1908 unregister_netdevice_queue(vxlan
->dev
, &list
);
1909 unregister_netdevice_many(&list
);
1913 static struct pernet_operations vxlan_net_ops
= {
1914 .init
= vxlan_init_net
,
1915 .exit
= vxlan_exit_net
,
1916 .id
= &vxlan_net_id
,
1917 .size
= sizeof(struct vxlan_net
),
1920 static int __init
vxlan_init_module(void)
1924 vxlan_wq
= alloc_workqueue("vxlan", 0, 0);
1928 get_random_bytes(&vxlan_salt
, sizeof(vxlan_salt
));
1930 rc
= register_pernet_device(&vxlan_net_ops
);
1934 rc
= rtnl_link_register(&vxlan_link_ops
);
1941 unregister_pernet_device(&vxlan_net_ops
);
1943 destroy_workqueue(vxlan_wq
);
1946 late_initcall(vxlan_init_module
);
1948 static void __exit
vxlan_cleanup_module(void)
1950 rtnl_link_unregister(&vxlan_link_ops
);
1951 destroy_workqueue(vxlan_wq
);
1952 unregister_pernet_device(&vxlan_net_ops
);
1955 module_exit(vxlan_cleanup_module
);
1957 MODULE_LICENSE("GPL");
1958 MODULE_VERSION(VXLAN_VERSION
);
1959 MODULE_AUTHOR("Stephen Hemminger <stephen@networkplumber.org>");
1960 MODULE_ALIAS_RTNL_LINK("vxlan");