vxlan: Use checksum partial with remote checksum offload
[deliverable/linux.git] / drivers / net / vxlan.c
1 /*
2 * VXLAN: Virtual eXtensible Local Area Network
3 *
4 * Copyright (c) 2012-2013 Vyatta Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10
11 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
12
13 #include <linux/kernel.h>
14 #include <linux/types.h>
15 #include <linux/module.h>
16 #include <linux/errno.h>
17 #include <linux/slab.h>
18 #include <linux/skbuff.h>
19 #include <linux/rculist.h>
20 #include <linux/netdevice.h>
21 #include <linux/in.h>
22 #include <linux/ip.h>
23 #include <linux/udp.h>
24 #include <linux/igmp.h>
25 #include <linux/etherdevice.h>
26 #include <linux/if_ether.h>
27 #include <linux/if_vlan.h>
28 #include <linux/hash.h>
29 #include <linux/ethtool.h>
30 #include <net/arp.h>
31 #include <net/ndisc.h>
32 #include <net/ip.h>
33 #include <net/ip_tunnels.h>
34 #include <net/icmp.h>
35 #include <net/udp.h>
36 #include <net/udp_tunnel.h>
37 #include <net/rtnetlink.h>
38 #include <net/route.h>
39 #include <net/dsfield.h>
40 #include <net/inet_ecn.h>
41 #include <net/net_namespace.h>
42 #include <net/netns/generic.h>
43 #include <net/vxlan.h>
44 #include <net/protocol.h>
45 #include <net/udp_tunnel.h>
46 #if IS_ENABLED(CONFIG_IPV6)
47 #include <net/ipv6.h>
48 #include <net/addrconf.h>
49 #include <net/ip6_tunnel.h>
50 #include <net/ip6_checksum.h>
51 #endif
52
53 #define VXLAN_VERSION "0.1"
54
55 #define PORT_HASH_BITS 8
56 #define PORT_HASH_SIZE (1<<PORT_HASH_BITS)
57 #define VNI_HASH_BITS 10
58 #define VNI_HASH_SIZE (1<<VNI_HASH_BITS)
59 #define FDB_HASH_BITS 8
60 #define FDB_HASH_SIZE (1<<FDB_HASH_BITS)
61 #define FDB_AGE_DEFAULT 300 /* 5 min */
62 #define FDB_AGE_INTERVAL (10 * HZ) /* rescan interval */
63
64 /* UDP port for VXLAN traffic.
65 * The IANA assigned port is 4789, but the Linux default is 8472
66 * for compatibility with early adopters.
67 */
68 static unsigned short vxlan_port __read_mostly = 8472;
69 module_param_named(udp_port, vxlan_port, ushort, 0444);
70 MODULE_PARM_DESC(udp_port, "Destination UDP port");
71
72 static bool log_ecn_error = true;
73 module_param(log_ecn_error, bool, 0644);
74 MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN");
75
76 static int vxlan_net_id;
77
78 static const u8 all_zeros_mac[ETH_ALEN];
79
80 /* per-network namespace private data for this module */
81 struct vxlan_net {
82 struct list_head vxlan_list;
83 struct hlist_head sock_list[PORT_HASH_SIZE];
84 spinlock_t sock_lock;
85 };
86
87 union vxlan_addr {
88 struct sockaddr_in sin;
89 struct sockaddr_in6 sin6;
90 struct sockaddr sa;
91 };
92
93 struct vxlan_rdst {
94 union vxlan_addr remote_ip;
95 __be16 remote_port;
96 u32 remote_vni;
97 u32 remote_ifindex;
98 struct list_head list;
99 struct rcu_head rcu;
100 };
101
102 /* Forwarding table entry */
103 struct vxlan_fdb {
104 struct hlist_node hlist; /* linked list of entries */
105 struct rcu_head rcu;
106 unsigned long updated; /* jiffies */
107 unsigned long used;
108 struct list_head remotes;
109 u16 state; /* see ndm_state */
110 u8 flags; /* see ndm_flags */
111 u8 eth_addr[ETH_ALEN];
112 };
113
114 /* Pseudo network device */
115 struct vxlan_dev {
116 struct hlist_node hlist; /* vni hash table */
117 struct list_head next; /* vxlan's per namespace list */
118 struct vxlan_sock *vn_sock; /* listening socket */
119 struct net_device *dev;
120 struct net *net; /* netns for packet i/o */
121 struct vxlan_rdst default_dst; /* default destination */
122 union vxlan_addr saddr; /* source address */
123 __be16 dst_port;
124 __u16 port_min; /* source port range */
125 __u16 port_max;
126 __u8 tos; /* TOS override */
127 __u8 ttl;
128 u32 flags; /* VXLAN_F_* in vxlan.h */
129
130 struct work_struct sock_work;
131 struct work_struct igmp_join;
132 struct work_struct igmp_leave;
133
134 unsigned long age_interval;
135 struct timer_list age_timer;
136 spinlock_t hash_lock;
137 unsigned int addrcnt;
138 unsigned int addrmax;
139
140 struct hlist_head fdb_head[FDB_HASH_SIZE];
141 };
142
143 /* salt for hash table */
144 static u32 vxlan_salt __read_mostly;
145 static struct workqueue_struct *vxlan_wq;
146
147 static void vxlan_sock_work(struct work_struct *work);
148
149 #if IS_ENABLED(CONFIG_IPV6)
150 static inline
151 bool vxlan_addr_equal(const union vxlan_addr *a, const union vxlan_addr *b)
152 {
153 if (a->sa.sa_family != b->sa.sa_family)
154 return false;
155 if (a->sa.sa_family == AF_INET6)
156 return ipv6_addr_equal(&a->sin6.sin6_addr, &b->sin6.sin6_addr);
157 else
158 return a->sin.sin_addr.s_addr == b->sin.sin_addr.s_addr;
159 }
160
161 static inline bool vxlan_addr_any(const union vxlan_addr *ipa)
162 {
163 if (ipa->sa.sa_family == AF_INET6)
164 return ipv6_addr_any(&ipa->sin6.sin6_addr);
165 else
166 return ipa->sin.sin_addr.s_addr == htonl(INADDR_ANY);
167 }
168
169 static inline bool vxlan_addr_multicast(const union vxlan_addr *ipa)
170 {
171 if (ipa->sa.sa_family == AF_INET6)
172 return ipv6_addr_is_multicast(&ipa->sin6.sin6_addr);
173 else
174 return IN_MULTICAST(ntohl(ipa->sin.sin_addr.s_addr));
175 }
176
177 static int vxlan_nla_get_addr(union vxlan_addr *ip, struct nlattr *nla)
178 {
179 if (nla_len(nla) >= sizeof(struct in6_addr)) {
180 nla_memcpy(&ip->sin6.sin6_addr, nla, sizeof(struct in6_addr));
181 ip->sa.sa_family = AF_INET6;
182 return 0;
183 } else if (nla_len(nla) >= sizeof(__be32)) {
184 ip->sin.sin_addr.s_addr = nla_get_be32(nla);
185 ip->sa.sa_family = AF_INET;
186 return 0;
187 } else {
188 return -EAFNOSUPPORT;
189 }
190 }
191
192 static int vxlan_nla_put_addr(struct sk_buff *skb, int attr,
193 const union vxlan_addr *ip)
194 {
195 if (ip->sa.sa_family == AF_INET6)
196 return nla_put(skb, attr, sizeof(struct in6_addr), &ip->sin6.sin6_addr);
197 else
198 return nla_put_be32(skb, attr, ip->sin.sin_addr.s_addr);
199 }
200
201 #else /* !CONFIG_IPV6 */
202
203 static inline
204 bool vxlan_addr_equal(const union vxlan_addr *a, const union vxlan_addr *b)
205 {
206 return a->sin.sin_addr.s_addr == b->sin.sin_addr.s_addr;
207 }
208
209 static inline bool vxlan_addr_any(const union vxlan_addr *ipa)
210 {
211 return ipa->sin.sin_addr.s_addr == htonl(INADDR_ANY);
212 }
213
214 static inline bool vxlan_addr_multicast(const union vxlan_addr *ipa)
215 {
216 return IN_MULTICAST(ntohl(ipa->sin.sin_addr.s_addr));
217 }
218
219 static int vxlan_nla_get_addr(union vxlan_addr *ip, struct nlattr *nla)
220 {
221 if (nla_len(nla) >= sizeof(struct in6_addr)) {
222 return -EAFNOSUPPORT;
223 } else if (nla_len(nla) >= sizeof(__be32)) {
224 ip->sin.sin_addr.s_addr = nla_get_be32(nla);
225 ip->sa.sa_family = AF_INET;
226 return 0;
227 } else {
228 return -EAFNOSUPPORT;
229 }
230 }
231
232 static int vxlan_nla_put_addr(struct sk_buff *skb, int attr,
233 const union vxlan_addr *ip)
234 {
235 return nla_put_be32(skb, attr, ip->sin.sin_addr.s_addr);
236 }
237 #endif
238
239 /* Virtual Network hash table head */
240 static inline struct hlist_head *vni_head(struct vxlan_sock *vs, u32 id)
241 {
242 return &vs->vni_list[hash_32(id, VNI_HASH_BITS)];
243 }
244
245 /* Socket hash table head */
246 static inline struct hlist_head *vs_head(struct net *net, __be16 port)
247 {
248 struct vxlan_net *vn = net_generic(net, vxlan_net_id);
249
250 return &vn->sock_list[hash_32(ntohs(port), PORT_HASH_BITS)];
251 }
252
253 /* First remote destination for a forwarding entry.
254 * Guaranteed to be non-NULL because remotes are never deleted.
255 */
256 static inline struct vxlan_rdst *first_remote_rcu(struct vxlan_fdb *fdb)
257 {
258 return list_entry_rcu(fdb->remotes.next, struct vxlan_rdst, list);
259 }
260
261 static inline struct vxlan_rdst *first_remote_rtnl(struct vxlan_fdb *fdb)
262 {
263 return list_first_entry(&fdb->remotes, struct vxlan_rdst, list);
264 }
265
266 /* Find VXLAN socket based on network namespace, address family and UDP port
267 * and enabled unshareable flags.
268 */
269 static struct vxlan_sock *vxlan_find_sock(struct net *net, sa_family_t family,
270 __be16 port, u32 flags)
271 {
272 struct vxlan_sock *vs;
273
274 flags &= VXLAN_F_RCV_FLAGS;
275
276 hlist_for_each_entry_rcu(vs, vs_head(net, port), hlist) {
277 if (inet_sk(vs->sock->sk)->inet_sport == port &&
278 inet_sk(vs->sock->sk)->sk.sk_family == family &&
279 vs->flags == flags)
280 return vs;
281 }
282 return NULL;
283 }
284
285 static struct vxlan_dev *vxlan_vs_find_vni(struct vxlan_sock *vs, u32 id)
286 {
287 struct vxlan_dev *vxlan;
288
289 hlist_for_each_entry_rcu(vxlan, vni_head(vs, id), hlist) {
290 if (vxlan->default_dst.remote_vni == id)
291 return vxlan;
292 }
293
294 return NULL;
295 }
296
297 /* Look up VNI in a per net namespace table */
298 static struct vxlan_dev *vxlan_find_vni(struct net *net, u32 id,
299 sa_family_t family, __be16 port,
300 u32 flags)
301 {
302 struct vxlan_sock *vs;
303
304 vs = vxlan_find_sock(net, family, port, flags);
305 if (!vs)
306 return NULL;
307
308 return vxlan_vs_find_vni(vs, id);
309 }
310
311 /* Fill in neighbour message in skbuff. */
312 static int vxlan_fdb_info(struct sk_buff *skb, struct vxlan_dev *vxlan,
313 const struct vxlan_fdb *fdb,
314 u32 portid, u32 seq, int type, unsigned int flags,
315 const struct vxlan_rdst *rdst)
316 {
317 unsigned long now = jiffies;
318 struct nda_cacheinfo ci;
319 struct nlmsghdr *nlh;
320 struct ndmsg *ndm;
321 bool send_ip, send_eth;
322
323 nlh = nlmsg_put(skb, portid, seq, type, sizeof(*ndm), flags);
324 if (nlh == NULL)
325 return -EMSGSIZE;
326
327 ndm = nlmsg_data(nlh);
328 memset(ndm, 0, sizeof(*ndm));
329
330 send_eth = send_ip = true;
331
332 if (type == RTM_GETNEIGH) {
333 ndm->ndm_family = AF_INET;
334 send_ip = !vxlan_addr_any(&rdst->remote_ip);
335 send_eth = !is_zero_ether_addr(fdb->eth_addr);
336 } else
337 ndm->ndm_family = AF_BRIDGE;
338 ndm->ndm_state = fdb->state;
339 ndm->ndm_ifindex = vxlan->dev->ifindex;
340 ndm->ndm_flags = fdb->flags;
341 ndm->ndm_type = RTN_UNICAST;
342
343 if (!net_eq(dev_net(vxlan->dev), vxlan->net) &&
344 nla_put_s32(skb, NDA_LINK_NETNSID,
345 peernet2id(dev_net(vxlan->dev), vxlan->net)))
346 goto nla_put_failure;
347
348 if (send_eth && nla_put(skb, NDA_LLADDR, ETH_ALEN, &fdb->eth_addr))
349 goto nla_put_failure;
350
351 if (send_ip && vxlan_nla_put_addr(skb, NDA_DST, &rdst->remote_ip))
352 goto nla_put_failure;
353
354 if (rdst->remote_port && rdst->remote_port != vxlan->dst_port &&
355 nla_put_be16(skb, NDA_PORT, rdst->remote_port))
356 goto nla_put_failure;
357 if (rdst->remote_vni != vxlan->default_dst.remote_vni &&
358 nla_put_u32(skb, NDA_VNI, rdst->remote_vni))
359 goto nla_put_failure;
360 if (rdst->remote_ifindex &&
361 nla_put_u32(skb, NDA_IFINDEX, rdst->remote_ifindex))
362 goto nla_put_failure;
363
364 ci.ndm_used = jiffies_to_clock_t(now - fdb->used);
365 ci.ndm_confirmed = 0;
366 ci.ndm_updated = jiffies_to_clock_t(now - fdb->updated);
367 ci.ndm_refcnt = 0;
368
369 if (nla_put(skb, NDA_CACHEINFO, sizeof(ci), &ci))
370 goto nla_put_failure;
371
372 nlmsg_end(skb, nlh);
373 return 0;
374
375 nla_put_failure:
376 nlmsg_cancel(skb, nlh);
377 return -EMSGSIZE;
378 }
379
380 static inline size_t vxlan_nlmsg_size(void)
381 {
382 return NLMSG_ALIGN(sizeof(struct ndmsg))
383 + nla_total_size(ETH_ALEN) /* NDA_LLADDR */
384 + nla_total_size(sizeof(struct in6_addr)) /* NDA_DST */
385 + nla_total_size(sizeof(__be16)) /* NDA_PORT */
386 + nla_total_size(sizeof(__be32)) /* NDA_VNI */
387 + nla_total_size(sizeof(__u32)) /* NDA_IFINDEX */
388 + nla_total_size(sizeof(__s32)) /* NDA_LINK_NETNSID */
389 + nla_total_size(sizeof(struct nda_cacheinfo));
390 }
391
392 static void vxlan_fdb_notify(struct vxlan_dev *vxlan, struct vxlan_fdb *fdb,
393 struct vxlan_rdst *rd, int type)
394 {
395 struct net *net = dev_net(vxlan->dev);
396 struct sk_buff *skb;
397 int err = -ENOBUFS;
398
399 skb = nlmsg_new(vxlan_nlmsg_size(), GFP_ATOMIC);
400 if (skb == NULL)
401 goto errout;
402
403 err = vxlan_fdb_info(skb, vxlan, fdb, 0, 0, type, 0, rd);
404 if (err < 0) {
405 /* -EMSGSIZE implies BUG in vxlan_nlmsg_size() */
406 WARN_ON(err == -EMSGSIZE);
407 kfree_skb(skb);
408 goto errout;
409 }
410
411 rtnl_notify(skb, net, 0, RTNLGRP_NEIGH, NULL, GFP_ATOMIC);
412 return;
413 errout:
414 if (err < 0)
415 rtnl_set_sk_err(net, RTNLGRP_NEIGH, err);
416 }
417
418 static void vxlan_ip_miss(struct net_device *dev, union vxlan_addr *ipa)
419 {
420 struct vxlan_dev *vxlan = netdev_priv(dev);
421 struct vxlan_fdb f = {
422 .state = NUD_STALE,
423 };
424 struct vxlan_rdst remote = {
425 .remote_ip = *ipa, /* goes to NDA_DST */
426 .remote_vni = VXLAN_N_VID,
427 };
428
429 vxlan_fdb_notify(vxlan, &f, &remote, RTM_GETNEIGH);
430 }
431
432 static void vxlan_fdb_miss(struct vxlan_dev *vxlan, const u8 eth_addr[ETH_ALEN])
433 {
434 struct vxlan_fdb f = {
435 .state = NUD_STALE,
436 };
437 struct vxlan_rdst remote = { };
438
439 memcpy(f.eth_addr, eth_addr, ETH_ALEN);
440
441 vxlan_fdb_notify(vxlan, &f, &remote, RTM_GETNEIGH);
442 }
443
444 /* Hash Ethernet address */
445 static u32 eth_hash(const unsigned char *addr)
446 {
447 u64 value = get_unaligned((u64 *)addr);
448
449 /* only want 6 bytes */
450 #ifdef __BIG_ENDIAN
451 value >>= 16;
452 #else
453 value <<= 16;
454 #endif
455 return hash_64(value, FDB_HASH_BITS);
456 }
457
458 /* Hash chain to use given mac address */
459 static inline struct hlist_head *vxlan_fdb_head(struct vxlan_dev *vxlan,
460 const u8 *mac)
461 {
462 return &vxlan->fdb_head[eth_hash(mac)];
463 }
464
465 /* Look up Ethernet address in forwarding table */
466 static struct vxlan_fdb *__vxlan_find_mac(struct vxlan_dev *vxlan,
467 const u8 *mac)
468 {
469 struct hlist_head *head = vxlan_fdb_head(vxlan, mac);
470 struct vxlan_fdb *f;
471
472 hlist_for_each_entry_rcu(f, head, hlist) {
473 if (ether_addr_equal(mac, f->eth_addr))
474 return f;
475 }
476
477 return NULL;
478 }
479
480 static struct vxlan_fdb *vxlan_find_mac(struct vxlan_dev *vxlan,
481 const u8 *mac)
482 {
483 struct vxlan_fdb *f;
484
485 f = __vxlan_find_mac(vxlan, mac);
486 if (f)
487 f->used = jiffies;
488
489 return f;
490 }
491
492 /* caller should hold vxlan->hash_lock */
493 static struct vxlan_rdst *vxlan_fdb_find_rdst(struct vxlan_fdb *f,
494 union vxlan_addr *ip, __be16 port,
495 __u32 vni, __u32 ifindex)
496 {
497 struct vxlan_rdst *rd;
498
499 list_for_each_entry(rd, &f->remotes, list) {
500 if (vxlan_addr_equal(&rd->remote_ip, ip) &&
501 rd->remote_port == port &&
502 rd->remote_vni == vni &&
503 rd->remote_ifindex == ifindex)
504 return rd;
505 }
506
507 return NULL;
508 }
509
510 /* Replace destination of unicast mac */
511 static int vxlan_fdb_replace(struct vxlan_fdb *f,
512 union vxlan_addr *ip, __be16 port, __u32 vni, __u32 ifindex)
513 {
514 struct vxlan_rdst *rd;
515
516 rd = vxlan_fdb_find_rdst(f, ip, port, vni, ifindex);
517 if (rd)
518 return 0;
519
520 rd = list_first_entry_or_null(&f->remotes, struct vxlan_rdst, list);
521 if (!rd)
522 return 0;
523 rd->remote_ip = *ip;
524 rd->remote_port = port;
525 rd->remote_vni = vni;
526 rd->remote_ifindex = ifindex;
527 return 1;
528 }
529
530 /* Add/update destinations for multicast */
531 static int vxlan_fdb_append(struct vxlan_fdb *f,
532 union vxlan_addr *ip, __be16 port, __u32 vni,
533 __u32 ifindex, struct vxlan_rdst **rdp)
534 {
535 struct vxlan_rdst *rd;
536
537 rd = vxlan_fdb_find_rdst(f, ip, port, vni, ifindex);
538 if (rd)
539 return 0;
540
541 rd = kmalloc(sizeof(*rd), GFP_ATOMIC);
542 if (rd == NULL)
543 return -ENOBUFS;
544 rd->remote_ip = *ip;
545 rd->remote_port = port;
546 rd->remote_vni = vni;
547 rd->remote_ifindex = ifindex;
548
549 list_add_tail_rcu(&rd->list, &f->remotes);
550
551 *rdp = rd;
552 return 1;
553 }
554
555 static struct vxlanhdr *vxlan_gro_remcsum(struct sk_buff *skb,
556 unsigned int off,
557 struct vxlanhdr *vh, size_t hdrlen,
558 u32 data, struct gro_remcsum *grc,
559 bool nopartial)
560 {
561 size_t start, offset, plen;
562
563 if (skb->remcsum_offload)
564 return NULL;
565
566 if (!NAPI_GRO_CB(skb)->csum_valid)
567 return NULL;
568
569 start = (data & VXLAN_RCO_MASK) << VXLAN_RCO_SHIFT;
570 offset = start + ((data & VXLAN_RCO_UDP) ?
571 offsetof(struct udphdr, check) :
572 offsetof(struct tcphdr, check));
573
574 plen = hdrlen + offset + sizeof(u16);
575
576 /* Pull checksum that will be written */
577 if (skb_gro_header_hard(skb, off + plen)) {
578 vh = skb_gro_header_slow(skb, off + plen, off);
579 if (!vh)
580 return NULL;
581 }
582
583 skb_gro_remcsum_process(skb, (void *)vh + hdrlen,
584 start, offset, grc, nopartial);
585
586 skb->remcsum_offload = 1;
587
588 return vh;
589 }
590
591 static struct sk_buff **vxlan_gro_receive(struct sk_buff **head,
592 struct sk_buff *skb,
593 struct udp_offload *uoff)
594 {
595 struct sk_buff *p, **pp = NULL;
596 struct vxlanhdr *vh, *vh2;
597 unsigned int hlen, off_vx;
598 int flush = 1;
599 struct vxlan_sock *vs = container_of(uoff, struct vxlan_sock,
600 udp_offloads);
601 u32 flags;
602 struct gro_remcsum grc;
603
604 skb_gro_remcsum_init(&grc);
605
606 off_vx = skb_gro_offset(skb);
607 hlen = off_vx + sizeof(*vh);
608 vh = skb_gro_header_fast(skb, off_vx);
609 if (skb_gro_header_hard(skb, hlen)) {
610 vh = skb_gro_header_slow(skb, hlen, off_vx);
611 if (unlikely(!vh))
612 goto out;
613 }
614
615 skb_gro_pull(skb, sizeof(struct vxlanhdr)); /* pull vxlan header */
616 skb_gro_postpull_rcsum(skb, vh, sizeof(struct vxlanhdr));
617
618 flags = ntohl(vh->vx_flags);
619
620 if ((flags & VXLAN_HF_RCO) && (vs->flags & VXLAN_F_REMCSUM_RX)) {
621 vh = vxlan_gro_remcsum(skb, off_vx, vh, sizeof(struct vxlanhdr),
622 ntohl(vh->vx_vni), &grc,
623 !!(vs->flags &
624 VXLAN_F_REMCSUM_NOPARTIAL));
625
626 if (!vh)
627 goto out;
628 }
629
630 flush = 0;
631
632 for (p = *head; p; p = p->next) {
633 if (!NAPI_GRO_CB(p)->same_flow)
634 continue;
635
636 vh2 = (struct vxlanhdr *)(p->data + off_vx);
637 if (vh->vx_flags != vh2->vx_flags ||
638 vh->vx_vni != vh2->vx_vni) {
639 NAPI_GRO_CB(p)->same_flow = 0;
640 continue;
641 }
642 }
643
644 pp = eth_gro_receive(head, skb);
645
646 out:
647 skb_gro_remcsum_cleanup(skb, &grc);
648 NAPI_GRO_CB(skb)->flush |= flush;
649
650 return pp;
651 }
652
653 static int vxlan_gro_complete(struct sk_buff *skb, int nhoff,
654 struct udp_offload *uoff)
655 {
656 udp_tunnel_gro_complete(skb, nhoff);
657
658 return eth_gro_complete(skb, nhoff + sizeof(struct vxlanhdr));
659 }
660
661 /* Notify netdevs that UDP port started listening */
662 static void vxlan_notify_add_rx_port(struct vxlan_sock *vs)
663 {
664 struct net_device *dev;
665 struct sock *sk = vs->sock->sk;
666 struct net *net = sock_net(sk);
667 sa_family_t sa_family = sk->sk_family;
668 __be16 port = inet_sk(sk)->inet_sport;
669 int err;
670
671 if (sa_family == AF_INET) {
672 err = udp_add_offload(&vs->udp_offloads);
673 if (err)
674 pr_warn("vxlan: udp_add_offload failed with status %d\n", err);
675 }
676
677 rcu_read_lock();
678 for_each_netdev_rcu(net, dev) {
679 if (dev->netdev_ops->ndo_add_vxlan_port)
680 dev->netdev_ops->ndo_add_vxlan_port(dev, sa_family,
681 port);
682 }
683 rcu_read_unlock();
684 }
685
686 /* Notify netdevs that UDP port is no more listening */
687 static void vxlan_notify_del_rx_port(struct vxlan_sock *vs)
688 {
689 struct net_device *dev;
690 struct sock *sk = vs->sock->sk;
691 struct net *net = sock_net(sk);
692 sa_family_t sa_family = sk->sk_family;
693 __be16 port = inet_sk(sk)->inet_sport;
694
695 rcu_read_lock();
696 for_each_netdev_rcu(net, dev) {
697 if (dev->netdev_ops->ndo_del_vxlan_port)
698 dev->netdev_ops->ndo_del_vxlan_port(dev, sa_family,
699 port);
700 }
701 rcu_read_unlock();
702
703 if (sa_family == AF_INET)
704 udp_del_offload(&vs->udp_offloads);
705 }
706
707 /* Add new entry to forwarding table -- assumes lock held */
708 static int vxlan_fdb_create(struct vxlan_dev *vxlan,
709 const u8 *mac, union vxlan_addr *ip,
710 __u16 state, __u16 flags,
711 __be16 port, __u32 vni, __u32 ifindex,
712 __u8 ndm_flags)
713 {
714 struct vxlan_rdst *rd = NULL;
715 struct vxlan_fdb *f;
716 int notify = 0;
717
718 f = __vxlan_find_mac(vxlan, mac);
719 if (f) {
720 if (flags & NLM_F_EXCL) {
721 netdev_dbg(vxlan->dev,
722 "lost race to create %pM\n", mac);
723 return -EEXIST;
724 }
725 if (f->state != state) {
726 f->state = state;
727 f->updated = jiffies;
728 notify = 1;
729 }
730 if (f->flags != ndm_flags) {
731 f->flags = ndm_flags;
732 f->updated = jiffies;
733 notify = 1;
734 }
735 if ((flags & NLM_F_REPLACE)) {
736 /* Only change unicasts */
737 if (!(is_multicast_ether_addr(f->eth_addr) ||
738 is_zero_ether_addr(f->eth_addr))) {
739 int rc = vxlan_fdb_replace(f, ip, port, vni,
740 ifindex);
741
742 if (rc < 0)
743 return rc;
744 notify |= rc;
745 } else
746 return -EOPNOTSUPP;
747 }
748 if ((flags & NLM_F_APPEND) &&
749 (is_multicast_ether_addr(f->eth_addr) ||
750 is_zero_ether_addr(f->eth_addr))) {
751 int rc = vxlan_fdb_append(f, ip, port, vni, ifindex,
752 &rd);
753
754 if (rc < 0)
755 return rc;
756 notify |= rc;
757 }
758 } else {
759 if (!(flags & NLM_F_CREATE))
760 return -ENOENT;
761
762 if (vxlan->addrmax && vxlan->addrcnt >= vxlan->addrmax)
763 return -ENOSPC;
764
765 /* Disallow replace to add a multicast entry */
766 if ((flags & NLM_F_REPLACE) &&
767 (is_multicast_ether_addr(mac) || is_zero_ether_addr(mac)))
768 return -EOPNOTSUPP;
769
770 netdev_dbg(vxlan->dev, "add %pM -> %pIS\n", mac, ip);
771 f = kmalloc(sizeof(*f), GFP_ATOMIC);
772 if (!f)
773 return -ENOMEM;
774
775 notify = 1;
776 f->state = state;
777 f->flags = ndm_flags;
778 f->updated = f->used = jiffies;
779 INIT_LIST_HEAD(&f->remotes);
780 memcpy(f->eth_addr, mac, ETH_ALEN);
781
782 vxlan_fdb_append(f, ip, port, vni, ifindex, &rd);
783
784 ++vxlan->addrcnt;
785 hlist_add_head_rcu(&f->hlist,
786 vxlan_fdb_head(vxlan, mac));
787 }
788
789 if (notify) {
790 if (rd == NULL)
791 rd = first_remote_rtnl(f);
792 vxlan_fdb_notify(vxlan, f, rd, RTM_NEWNEIGH);
793 }
794
795 return 0;
796 }
797
798 static void vxlan_fdb_free(struct rcu_head *head)
799 {
800 struct vxlan_fdb *f = container_of(head, struct vxlan_fdb, rcu);
801 struct vxlan_rdst *rd, *nd;
802
803 list_for_each_entry_safe(rd, nd, &f->remotes, list)
804 kfree(rd);
805 kfree(f);
806 }
807
808 static void vxlan_fdb_destroy(struct vxlan_dev *vxlan, struct vxlan_fdb *f)
809 {
810 netdev_dbg(vxlan->dev,
811 "delete %pM\n", f->eth_addr);
812
813 --vxlan->addrcnt;
814 vxlan_fdb_notify(vxlan, f, first_remote_rtnl(f), RTM_DELNEIGH);
815
816 hlist_del_rcu(&f->hlist);
817 call_rcu(&f->rcu, vxlan_fdb_free);
818 }
819
820 static int vxlan_fdb_parse(struct nlattr *tb[], struct vxlan_dev *vxlan,
821 union vxlan_addr *ip, __be16 *port, u32 *vni, u32 *ifindex)
822 {
823 struct net *net = dev_net(vxlan->dev);
824 int err;
825
826 if (tb[NDA_DST]) {
827 err = vxlan_nla_get_addr(ip, tb[NDA_DST]);
828 if (err)
829 return err;
830 } else {
831 union vxlan_addr *remote = &vxlan->default_dst.remote_ip;
832 if (remote->sa.sa_family == AF_INET) {
833 ip->sin.sin_addr.s_addr = htonl(INADDR_ANY);
834 ip->sa.sa_family = AF_INET;
835 #if IS_ENABLED(CONFIG_IPV6)
836 } else {
837 ip->sin6.sin6_addr = in6addr_any;
838 ip->sa.sa_family = AF_INET6;
839 #endif
840 }
841 }
842
843 if (tb[NDA_PORT]) {
844 if (nla_len(tb[NDA_PORT]) != sizeof(__be16))
845 return -EINVAL;
846 *port = nla_get_be16(tb[NDA_PORT]);
847 } else {
848 *port = vxlan->dst_port;
849 }
850
851 if (tb[NDA_VNI]) {
852 if (nla_len(tb[NDA_VNI]) != sizeof(u32))
853 return -EINVAL;
854 *vni = nla_get_u32(tb[NDA_VNI]);
855 } else {
856 *vni = vxlan->default_dst.remote_vni;
857 }
858
859 if (tb[NDA_IFINDEX]) {
860 struct net_device *tdev;
861
862 if (nla_len(tb[NDA_IFINDEX]) != sizeof(u32))
863 return -EINVAL;
864 *ifindex = nla_get_u32(tb[NDA_IFINDEX]);
865 tdev = __dev_get_by_index(net, *ifindex);
866 if (!tdev)
867 return -EADDRNOTAVAIL;
868 } else {
869 *ifindex = 0;
870 }
871
872 return 0;
873 }
874
875 /* Add static entry (via netlink) */
876 static int vxlan_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
877 struct net_device *dev,
878 const unsigned char *addr, u16 vid, u16 flags)
879 {
880 struct vxlan_dev *vxlan = netdev_priv(dev);
881 /* struct net *net = dev_net(vxlan->dev); */
882 union vxlan_addr ip;
883 __be16 port;
884 u32 vni, ifindex;
885 int err;
886
887 if (!(ndm->ndm_state & (NUD_PERMANENT|NUD_REACHABLE))) {
888 pr_info("RTM_NEWNEIGH with invalid state %#x\n",
889 ndm->ndm_state);
890 return -EINVAL;
891 }
892
893 if (tb[NDA_DST] == NULL)
894 return -EINVAL;
895
896 err = vxlan_fdb_parse(tb, vxlan, &ip, &port, &vni, &ifindex);
897 if (err)
898 return err;
899
900 if (vxlan->default_dst.remote_ip.sa.sa_family != ip.sa.sa_family)
901 return -EAFNOSUPPORT;
902
903 spin_lock_bh(&vxlan->hash_lock);
904 err = vxlan_fdb_create(vxlan, addr, &ip, ndm->ndm_state, flags,
905 port, vni, ifindex, ndm->ndm_flags);
906 spin_unlock_bh(&vxlan->hash_lock);
907
908 return err;
909 }
910
911 /* Delete entry (via netlink) */
912 static int vxlan_fdb_delete(struct ndmsg *ndm, struct nlattr *tb[],
913 struct net_device *dev,
914 const unsigned char *addr, u16 vid)
915 {
916 struct vxlan_dev *vxlan = netdev_priv(dev);
917 struct vxlan_fdb *f;
918 struct vxlan_rdst *rd = NULL;
919 union vxlan_addr ip;
920 __be16 port;
921 u32 vni, ifindex;
922 int err;
923
924 err = vxlan_fdb_parse(tb, vxlan, &ip, &port, &vni, &ifindex);
925 if (err)
926 return err;
927
928 err = -ENOENT;
929
930 spin_lock_bh(&vxlan->hash_lock);
931 f = vxlan_find_mac(vxlan, addr);
932 if (!f)
933 goto out;
934
935 if (!vxlan_addr_any(&ip)) {
936 rd = vxlan_fdb_find_rdst(f, &ip, port, vni, ifindex);
937 if (!rd)
938 goto out;
939 }
940
941 err = 0;
942
943 /* remove a destination if it's not the only one on the list,
944 * otherwise destroy the fdb entry
945 */
946 if (rd && !list_is_singular(&f->remotes)) {
947 list_del_rcu(&rd->list);
948 vxlan_fdb_notify(vxlan, f, rd, RTM_DELNEIGH);
949 kfree_rcu(rd, rcu);
950 goto out;
951 }
952
953 vxlan_fdb_destroy(vxlan, f);
954
955 out:
956 spin_unlock_bh(&vxlan->hash_lock);
957
958 return err;
959 }
960
961 /* Dump forwarding table */
962 static int vxlan_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb,
963 struct net_device *dev,
964 struct net_device *filter_dev, int idx)
965 {
966 struct vxlan_dev *vxlan = netdev_priv(dev);
967 unsigned int h;
968
969 for (h = 0; h < FDB_HASH_SIZE; ++h) {
970 struct vxlan_fdb *f;
971 int err;
972
973 hlist_for_each_entry_rcu(f, &vxlan->fdb_head[h], hlist) {
974 struct vxlan_rdst *rd;
975
976 if (idx < cb->args[0])
977 goto skip;
978
979 list_for_each_entry_rcu(rd, &f->remotes, list) {
980 err = vxlan_fdb_info(skb, vxlan, f,
981 NETLINK_CB(cb->skb).portid,
982 cb->nlh->nlmsg_seq,
983 RTM_NEWNEIGH,
984 NLM_F_MULTI, rd);
985 if (err < 0)
986 goto out;
987 }
988 skip:
989 ++idx;
990 }
991 }
992 out:
993 return idx;
994 }
995
996 /* Watch incoming packets to learn mapping between Ethernet address
997 * and Tunnel endpoint.
998 * Return true if packet is bogus and should be droppped.
999 */
1000 static bool vxlan_snoop(struct net_device *dev,
1001 union vxlan_addr *src_ip, const u8 *src_mac)
1002 {
1003 struct vxlan_dev *vxlan = netdev_priv(dev);
1004 struct vxlan_fdb *f;
1005
1006 f = vxlan_find_mac(vxlan, src_mac);
1007 if (likely(f)) {
1008 struct vxlan_rdst *rdst = first_remote_rcu(f);
1009
1010 if (likely(vxlan_addr_equal(&rdst->remote_ip, src_ip)))
1011 return false;
1012
1013 /* Don't migrate static entries, drop packets */
1014 if (f->state & NUD_NOARP)
1015 return true;
1016
1017 if (net_ratelimit())
1018 netdev_info(dev,
1019 "%pM migrated from %pIS to %pIS\n",
1020 src_mac, &rdst->remote_ip.sa, &src_ip->sa);
1021
1022 rdst->remote_ip = *src_ip;
1023 f->updated = jiffies;
1024 vxlan_fdb_notify(vxlan, f, rdst, RTM_NEWNEIGH);
1025 } else {
1026 /* learned new entry */
1027 spin_lock(&vxlan->hash_lock);
1028
1029 /* close off race between vxlan_flush and incoming packets */
1030 if (netif_running(dev))
1031 vxlan_fdb_create(vxlan, src_mac, src_ip,
1032 NUD_REACHABLE,
1033 NLM_F_EXCL|NLM_F_CREATE,
1034 vxlan->dst_port,
1035 vxlan->default_dst.remote_vni,
1036 0, NTF_SELF);
1037 spin_unlock(&vxlan->hash_lock);
1038 }
1039
1040 return false;
1041 }
1042
1043 /* See if multicast group is already in use by other ID */
1044 static bool vxlan_group_used(struct vxlan_net *vn, struct vxlan_dev *dev)
1045 {
1046 struct vxlan_dev *vxlan;
1047
1048 /* The vxlan_sock is only used by dev, leaving group has
1049 * no effect on other vxlan devices.
1050 */
1051 if (atomic_read(&dev->vn_sock->refcnt) == 1)
1052 return false;
1053
1054 list_for_each_entry(vxlan, &vn->vxlan_list, next) {
1055 if (!netif_running(vxlan->dev) || vxlan == dev)
1056 continue;
1057
1058 if (vxlan->vn_sock != dev->vn_sock)
1059 continue;
1060
1061 if (!vxlan_addr_equal(&vxlan->default_dst.remote_ip,
1062 &dev->default_dst.remote_ip))
1063 continue;
1064
1065 if (vxlan->default_dst.remote_ifindex !=
1066 dev->default_dst.remote_ifindex)
1067 continue;
1068
1069 return true;
1070 }
1071
1072 return false;
1073 }
1074
1075 static void vxlan_sock_hold(struct vxlan_sock *vs)
1076 {
1077 atomic_inc(&vs->refcnt);
1078 }
1079
1080 void vxlan_sock_release(struct vxlan_sock *vs)
1081 {
1082 struct sock *sk = vs->sock->sk;
1083 struct net *net = sock_net(sk);
1084 struct vxlan_net *vn = net_generic(net, vxlan_net_id);
1085
1086 if (!atomic_dec_and_test(&vs->refcnt))
1087 return;
1088
1089 spin_lock(&vn->sock_lock);
1090 hlist_del_rcu(&vs->hlist);
1091 vxlan_notify_del_rx_port(vs);
1092 spin_unlock(&vn->sock_lock);
1093
1094 queue_work(vxlan_wq, &vs->del_work);
1095 }
1096 EXPORT_SYMBOL_GPL(vxlan_sock_release);
1097
1098 /* Callback to update multicast group membership when first VNI on
1099 * multicast asddress is brought up
1100 * Done as workqueue because ip_mc_join_group acquires RTNL.
1101 */
1102 static void vxlan_igmp_join(struct work_struct *work)
1103 {
1104 struct vxlan_dev *vxlan = container_of(work, struct vxlan_dev, igmp_join);
1105 struct vxlan_sock *vs = vxlan->vn_sock;
1106 struct sock *sk = vs->sock->sk;
1107 union vxlan_addr *ip = &vxlan->default_dst.remote_ip;
1108 int ifindex = vxlan->default_dst.remote_ifindex;
1109
1110 lock_sock(sk);
1111 if (ip->sa.sa_family == AF_INET) {
1112 struct ip_mreqn mreq = {
1113 .imr_multiaddr.s_addr = ip->sin.sin_addr.s_addr,
1114 .imr_ifindex = ifindex,
1115 };
1116
1117 ip_mc_join_group(sk, &mreq);
1118 #if IS_ENABLED(CONFIG_IPV6)
1119 } else {
1120 ipv6_stub->ipv6_sock_mc_join(sk, ifindex,
1121 &ip->sin6.sin6_addr);
1122 #endif
1123 }
1124 release_sock(sk);
1125
1126 vxlan_sock_release(vs);
1127 dev_put(vxlan->dev);
1128 }
1129
1130 /* Inverse of vxlan_igmp_join when last VNI is brought down */
1131 static void vxlan_igmp_leave(struct work_struct *work)
1132 {
1133 struct vxlan_dev *vxlan = container_of(work, struct vxlan_dev, igmp_leave);
1134 struct vxlan_sock *vs = vxlan->vn_sock;
1135 struct sock *sk = vs->sock->sk;
1136 union vxlan_addr *ip = &vxlan->default_dst.remote_ip;
1137 int ifindex = vxlan->default_dst.remote_ifindex;
1138
1139 lock_sock(sk);
1140 if (ip->sa.sa_family == AF_INET) {
1141 struct ip_mreqn mreq = {
1142 .imr_multiaddr.s_addr = ip->sin.sin_addr.s_addr,
1143 .imr_ifindex = ifindex,
1144 };
1145
1146 ip_mc_leave_group(sk, &mreq);
1147 #if IS_ENABLED(CONFIG_IPV6)
1148 } else {
1149 ipv6_stub->ipv6_sock_mc_drop(sk, ifindex,
1150 &ip->sin6.sin6_addr);
1151 #endif
1152 }
1153
1154 release_sock(sk);
1155
1156 vxlan_sock_release(vs);
1157 dev_put(vxlan->dev);
1158 }
1159
1160 static struct vxlanhdr *vxlan_remcsum(struct sk_buff *skb, struct vxlanhdr *vh,
1161 size_t hdrlen, u32 data, bool nopartial)
1162 {
1163 size_t start, offset, plen;
1164
1165 start = (data & VXLAN_RCO_MASK) << VXLAN_RCO_SHIFT;
1166 offset = start + ((data & VXLAN_RCO_UDP) ?
1167 offsetof(struct udphdr, check) :
1168 offsetof(struct tcphdr, check));
1169
1170 plen = hdrlen + offset + sizeof(u16);
1171
1172 if (!pskb_may_pull(skb, plen))
1173 return NULL;
1174
1175 vh = (struct vxlanhdr *)(udp_hdr(skb) + 1);
1176
1177 skb_remcsum_process(skb, (void *)vh + hdrlen, start, offset,
1178 nopartial);
1179
1180 return vh;
1181 }
1182
1183 /* Callback from net/ipv4/udp.c to receive packets */
1184 static int vxlan_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
1185 {
1186 struct vxlan_sock *vs;
1187 struct vxlanhdr *vxh;
1188 u32 flags, vni;
1189 struct vxlan_metadata md = {0};
1190
1191 /* Need Vxlan and inner Ethernet header to be present */
1192 if (!pskb_may_pull(skb, VXLAN_HLEN))
1193 goto error;
1194
1195 vxh = (struct vxlanhdr *)(udp_hdr(skb) + 1);
1196 flags = ntohl(vxh->vx_flags);
1197 vni = ntohl(vxh->vx_vni);
1198
1199 if (flags & VXLAN_HF_VNI) {
1200 flags &= ~VXLAN_HF_VNI;
1201 } else {
1202 /* VNI flag always required to be set */
1203 goto bad_flags;
1204 }
1205
1206 if (iptunnel_pull_header(skb, VXLAN_HLEN, htons(ETH_P_TEB)))
1207 goto drop;
1208 vxh = (struct vxlanhdr *)(udp_hdr(skb) + 1);
1209
1210 vs = rcu_dereference_sk_user_data(sk);
1211 if (!vs)
1212 goto drop;
1213
1214 if ((flags & VXLAN_HF_RCO) && (vs->flags & VXLAN_F_REMCSUM_RX)) {
1215 vxh = vxlan_remcsum(skb, vxh, sizeof(struct vxlanhdr), vni,
1216 !!(vs->flags & VXLAN_F_REMCSUM_NOPARTIAL));
1217 if (!vxh)
1218 goto drop;
1219
1220 flags &= ~VXLAN_HF_RCO;
1221 vni &= VXLAN_VID_MASK;
1222 }
1223
1224 /* For backwards compatibility, only allow reserved fields to be
1225 * used by VXLAN extensions if explicitly requested.
1226 */
1227 if ((flags & VXLAN_HF_GBP) && (vs->flags & VXLAN_F_GBP)) {
1228 struct vxlanhdr_gbp *gbp;
1229
1230 gbp = (struct vxlanhdr_gbp *)vxh;
1231 md.gbp = ntohs(gbp->policy_id);
1232
1233 if (gbp->dont_learn)
1234 md.gbp |= VXLAN_GBP_DONT_LEARN;
1235
1236 if (gbp->policy_applied)
1237 md.gbp |= VXLAN_GBP_POLICY_APPLIED;
1238
1239 flags &= ~VXLAN_GBP_USED_BITS;
1240 }
1241
1242 if (flags || (vni & ~VXLAN_VID_MASK)) {
1243 /* If there are any unprocessed flags remaining treat
1244 * this as a malformed packet. This behavior diverges from
1245 * VXLAN RFC (RFC7348) which stipulates that bits in reserved
1246 * in reserved fields are to be ignored. The approach here
1247 * maintains compatbility with previous stack code, and also
1248 * is more robust and provides a little more security in
1249 * adding extensions to VXLAN.
1250 */
1251
1252 goto bad_flags;
1253 }
1254
1255 md.vni = vxh->vx_vni;
1256 vs->rcv(vs, skb, &md);
1257 return 0;
1258
1259 drop:
1260 /* Consume bad packet */
1261 kfree_skb(skb);
1262 return 0;
1263
1264 bad_flags:
1265 netdev_dbg(skb->dev, "invalid vxlan flags=%#x vni=%#x\n",
1266 ntohl(vxh->vx_flags), ntohl(vxh->vx_vni));
1267
1268 error:
1269 /* Return non vxlan pkt */
1270 return 1;
1271 }
1272
1273 static void vxlan_rcv(struct vxlan_sock *vs, struct sk_buff *skb,
1274 struct vxlan_metadata *md)
1275 {
1276 struct iphdr *oip = NULL;
1277 struct ipv6hdr *oip6 = NULL;
1278 struct vxlan_dev *vxlan;
1279 struct pcpu_sw_netstats *stats;
1280 union vxlan_addr saddr;
1281 __u32 vni;
1282 int err = 0;
1283 union vxlan_addr *remote_ip;
1284
1285 vni = ntohl(md->vni) >> 8;
1286 /* Is this VNI defined? */
1287 vxlan = vxlan_vs_find_vni(vs, vni);
1288 if (!vxlan)
1289 goto drop;
1290
1291 remote_ip = &vxlan->default_dst.remote_ip;
1292 skb_reset_mac_header(skb);
1293 skb_scrub_packet(skb, !net_eq(vxlan->net, dev_net(vxlan->dev)));
1294 skb->protocol = eth_type_trans(skb, vxlan->dev);
1295 skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN);
1296
1297 /* Ignore packet loops (and multicast echo) */
1298 if (ether_addr_equal(eth_hdr(skb)->h_source, vxlan->dev->dev_addr))
1299 goto drop;
1300
1301 /* Re-examine inner Ethernet packet */
1302 if (remote_ip->sa.sa_family == AF_INET) {
1303 oip = ip_hdr(skb);
1304 saddr.sin.sin_addr.s_addr = oip->saddr;
1305 saddr.sa.sa_family = AF_INET;
1306 #if IS_ENABLED(CONFIG_IPV6)
1307 } else {
1308 oip6 = ipv6_hdr(skb);
1309 saddr.sin6.sin6_addr = oip6->saddr;
1310 saddr.sa.sa_family = AF_INET6;
1311 #endif
1312 }
1313
1314 if ((vxlan->flags & VXLAN_F_LEARN) &&
1315 vxlan_snoop(skb->dev, &saddr, eth_hdr(skb)->h_source))
1316 goto drop;
1317
1318 skb_reset_network_header(skb);
1319 skb->mark = md->gbp;
1320
1321 if (oip6)
1322 err = IP6_ECN_decapsulate(oip6, skb);
1323 if (oip)
1324 err = IP_ECN_decapsulate(oip, skb);
1325
1326 if (unlikely(err)) {
1327 if (log_ecn_error) {
1328 if (oip6)
1329 net_info_ratelimited("non-ECT from %pI6\n",
1330 &oip6->saddr);
1331 if (oip)
1332 net_info_ratelimited("non-ECT from %pI4 with TOS=%#x\n",
1333 &oip->saddr, oip->tos);
1334 }
1335 if (err > 1) {
1336 ++vxlan->dev->stats.rx_frame_errors;
1337 ++vxlan->dev->stats.rx_errors;
1338 goto drop;
1339 }
1340 }
1341
1342 stats = this_cpu_ptr(vxlan->dev->tstats);
1343 u64_stats_update_begin(&stats->syncp);
1344 stats->rx_packets++;
1345 stats->rx_bytes += skb->len;
1346 u64_stats_update_end(&stats->syncp);
1347
1348 netif_rx(skb);
1349
1350 return;
1351 drop:
1352 /* Consume bad packet */
1353 kfree_skb(skb);
1354 }
1355
1356 static int arp_reduce(struct net_device *dev, struct sk_buff *skb)
1357 {
1358 struct vxlan_dev *vxlan = netdev_priv(dev);
1359 struct arphdr *parp;
1360 u8 *arpptr, *sha;
1361 __be32 sip, tip;
1362 struct neighbour *n;
1363
1364 if (dev->flags & IFF_NOARP)
1365 goto out;
1366
1367 if (!pskb_may_pull(skb, arp_hdr_len(dev))) {
1368 dev->stats.tx_dropped++;
1369 goto out;
1370 }
1371 parp = arp_hdr(skb);
1372
1373 if ((parp->ar_hrd != htons(ARPHRD_ETHER) &&
1374 parp->ar_hrd != htons(ARPHRD_IEEE802)) ||
1375 parp->ar_pro != htons(ETH_P_IP) ||
1376 parp->ar_op != htons(ARPOP_REQUEST) ||
1377 parp->ar_hln != dev->addr_len ||
1378 parp->ar_pln != 4)
1379 goto out;
1380 arpptr = (u8 *)parp + sizeof(struct arphdr);
1381 sha = arpptr;
1382 arpptr += dev->addr_len; /* sha */
1383 memcpy(&sip, arpptr, sizeof(sip));
1384 arpptr += sizeof(sip);
1385 arpptr += dev->addr_len; /* tha */
1386 memcpy(&tip, arpptr, sizeof(tip));
1387
1388 if (ipv4_is_loopback(tip) ||
1389 ipv4_is_multicast(tip))
1390 goto out;
1391
1392 n = neigh_lookup(&arp_tbl, &tip, dev);
1393
1394 if (n) {
1395 struct vxlan_fdb *f;
1396 struct sk_buff *reply;
1397
1398 if (!(n->nud_state & NUD_CONNECTED)) {
1399 neigh_release(n);
1400 goto out;
1401 }
1402
1403 f = vxlan_find_mac(vxlan, n->ha);
1404 if (f && vxlan_addr_any(&(first_remote_rcu(f)->remote_ip))) {
1405 /* bridge-local neighbor */
1406 neigh_release(n);
1407 goto out;
1408 }
1409
1410 reply = arp_create(ARPOP_REPLY, ETH_P_ARP, sip, dev, tip, sha,
1411 n->ha, sha);
1412
1413 neigh_release(n);
1414
1415 if (reply == NULL)
1416 goto out;
1417
1418 skb_reset_mac_header(reply);
1419 __skb_pull(reply, skb_network_offset(reply));
1420 reply->ip_summed = CHECKSUM_UNNECESSARY;
1421 reply->pkt_type = PACKET_HOST;
1422
1423 if (netif_rx_ni(reply) == NET_RX_DROP)
1424 dev->stats.rx_dropped++;
1425 } else if (vxlan->flags & VXLAN_F_L3MISS) {
1426 union vxlan_addr ipa = {
1427 .sin.sin_addr.s_addr = tip,
1428 .sin.sin_family = AF_INET,
1429 };
1430
1431 vxlan_ip_miss(dev, &ipa);
1432 }
1433 out:
1434 consume_skb(skb);
1435 return NETDEV_TX_OK;
1436 }
1437
1438 #if IS_ENABLED(CONFIG_IPV6)
1439 static struct sk_buff *vxlan_na_create(struct sk_buff *request,
1440 struct neighbour *n, bool isrouter)
1441 {
1442 struct net_device *dev = request->dev;
1443 struct sk_buff *reply;
1444 struct nd_msg *ns, *na;
1445 struct ipv6hdr *pip6;
1446 u8 *daddr;
1447 int na_olen = 8; /* opt hdr + ETH_ALEN for target */
1448 int ns_olen;
1449 int i, len;
1450
1451 if (dev == NULL)
1452 return NULL;
1453
1454 len = LL_RESERVED_SPACE(dev) + sizeof(struct ipv6hdr) +
1455 sizeof(*na) + na_olen + dev->needed_tailroom;
1456 reply = alloc_skb(len, GFP_ATOMIC);
1457 if (reply == NULL)
1458 return NULL;
1459
1460 reply->protocol = htons(ETH_P_IPV6);
1461 reply->dev = dev;
1462 skb_reserve(reply, LL_RESERVED_SPACE(request->dev));
1463 skb_push(reply, sizeof(struct ethhdr));
1464 skb_set_mac_header(reply, 0);
1465
1466 ns = (struct nd_msg *)skb_transport_header(request);
1467
1468 daddr = eth_hdr(request)->h_source;
1469 ns_olen = request->len - skb_transport_offset(request) - sizeof(*ns);
1470 for (i = 0; i < ns_olen-1; i += (ns->opt[i+1]<<3)) {
1471 if (ns->opt[i] == ND_OPT_SOURCE_LL_ADDR) {
1472 daddr = ns->opt + i + sizeof(struct nd_opt_hdr);
1473 break;
1474 }
1475 }
1476
1477 /* Ethernet header */
1478 ether_addr_copy(eth_hdr(reply)->h_dest, daddr);
1479 ether_addr_copy(eth_hdr(reply)->h_source, n->ha);
1480 eth_hdr(reply)->h_proto = htons(ETH_P_IPV6);
1481 reply->protocol = htons(ETH_P_IPV6);
1482
1483 skb_pull(reply, sizeof(struct ethhdr));
1484 skb_set_network_header(reply, 0);
1485 skb_put(reply, sizeof(struct ipv6hdr));
1486
1487 /* IPv6 header */
1488
1489 pip6 = ipv6_hdr(reply);
1490 memset(pip6, 0, sizeof(struct ipv6hdr));
1491 pip6->version = 6;
1492 pip6->priority = ipv6_hdr(request)->priority;
1493 pip6->nexthdr = IPPROTO_ICMPV6;
1494 pip6->hop_limit = 255;
1495 pip6->daddr = ipv6_hdr(request)->saddr;
1496 pip6->saddr = *(struct in6_addr *)n->primary_key;
1497
1498 skb_pull(reply, sizeof(struct ipv6hdr));
1499 skb_set_transport_header(reply, 0);
1500
1501 na = (struct nd_msg *)skb_put(reply, sizeof(*na) + na_olen);
1502
1503 /* Neighbor Advertisement */
1504 memset(na, 0, sizeof(*na)+na_olen);
1505 na->icmph.icmp6_type = NDISC_NEIGHBOUR_ADVERTISEMENT;
1506 na->icmph.icmp6_router = isrouter;
1507 na->icmph.icmp6_override = 1;
1508 na->icmph.icmp6_solicited = 1;
1509 na->target = ns->target;
1510 ether_addr_copy(&na->opt[2], n->ha);
1511 na->opt[0] = ND_OPT_TARGET_LL_ADDR;
1512 na->opt[1] = na_olen >> 3;
1513
1514 na->icmph.icmp6_cksum = csum_ipv6_magic(&pip6->saddr,
1515 &pip6->daddr, sizeof(*na)+na_olen, IPPROTO_ICMPV6,
1516 csum_partial(na, sizeof(*na)+na_olen, 0));
1517
1518 pip6->payload_len = htons(sizeof(*na)+na_olen);
1519
1520 skb_push(reply, sizeof(struct ipv6hdr));
1521
1522 reply->ip_summed = CHECKSUM_UNNECESSARY;
1523
1524 return reply;
1525 }
1526
1527 static int neigh_reduce(struct net_device *dev, struct sk_buff *skb)
1528 {
1529 struct vxlan_dev *vxlan = netdev_priv(dev);
1530 struct nd_msg *msg;
1531 const struct ipv6hdr *iphdr;
1532 const struct in6_addr *saddr, *daddr;
1533 struct neighbour *n;
1534 struct inet6_dev *in6_dev;
1535
1536 in6_dev = __in6_dev_get(dev);
1537 if (!in6_dev)
1538 goto out;
1539
1540 iphdr = ipv6_hdr(skb);
1541 saddr = &iphdr->saddr;
1542 daddr = &iphdr->daddr;
1543
1544 msg = (struct nd_msg *)skb_transport_header(skb);
1545 if (msg->icmph.icmp6_code != 0 ||
1546 msg->icmph.icmp6_type != NDISC_NEIGHBOUR_SOLICITATION)
1547 goto out;
1548
1549 if (ipv6_addr_loopback(daddr) ||
1550 ipv6_addr_is_multicast(&msg->target))
1551 goto out;
1552
1553 n = neigh_lookup(ipv6_stub->nd_tbl, &msg->target, dev);
1554
1555 if (n) {
1556 struct vxlan_fdb *f;
1557 struct sk_buff *reply;
1558
1559 if (!(n->nud_state & NUD_CONNECTED)) {
1560 neigh_release(n);
1561 goto out;
1562 }
1563
1564 f = vxlan_find_mac(vxlan, n->ha);
1565 if (f && vxlan_addr_any(&(first_remote_rcu(f)->remote_ip))) {
1566 /* bridge-local neighbor */
1567 neigh_release(n);
1568 goto out;
1569 }
1570
1571 reply = vxlan_na_create(skb, n,
1572 !!(f ? f->flags & NTF_ROUTER : 0));
1573
1574 neigh_release(n);
1575
1576 if (reply == NULL)
1577 goto out;
1578
1579 if (netif_rx_ni(reply) == NET_RX_DROP)
1580 dev->stats.rx_dropped++;
1581
1582 } else if (vxlan->flags & VXLAN_F_L3MISS) {
1583 union vxlan_addr ipa = {
1584 .sin6.sin6_addr = msg->target,
1585 .sin6.sin6_family = AF_INET6,
1586 };
1587
1588 vxlan_ip_miss(dev, &ipa);
1589 }
1590
1591 out:
1592 consume_skb(skb);
1593 return NETDEV_TX_OK;
1594 }
1595 #endif
1596
1597 static bool route_shortcircuit(struct net_device *dev, struct sk_buff *skb)
1598 {
1599 struct vxlan_dev *vxlan = netdev_priv(dev);
1600 struct neighbour *n;
1601
1602 if (is_multicast_ether_addr(eth_hdr(skb)->h_dest))
1603 return false;
1604
1605 n = NULL;
1606 switch (ntohs(eth_hdr(skb)->h_proto)) {
1607 case ETH_P_IP:
1608 {
1609 struct iphdr *pip;
1610
1611 if (!pskb_may_pull(skb, sizeof(struct iphdr)))
1612 return false;
1613 pip = ip_hdr(skb);
1614 n = neigh_lookup(&arp_tbl, &pip->daddr, dev);
1615 if (!n && (vxlan->flags & VXLAN_F_L3MISS)) {
1616 union vxlan_addr ipa = {
1617 .sin.sin_addr.s_addr = pip->daddr,
1618 .sin.sin_family = AF_INET,
1619 };
1620
1621 vxlan_ip_miss(dev, &ipa);
1622 return false;
1623 }
1624
1625 break;
1626 }
1627 #if IS_ENABLED(CONFIG_IPV6)
1628 case ETH_P_IPV6:
1629 {
1630 struct ipv6hdr *pip6;
1631
1632 if (!pskb_may_pull(skb, sizeof(struct ipv6hdr)))
1633 return false;
1634 pip6 = ipv6_hdr(skb);
1635 n = neigh_lookup(ipv6_stub->nd_tbl, &pip6->daddr, dev);
1636 if (!n && (vxlan->flags & VXLAN_F_L3MISS)) {
1637 union vxlan_addr ipa = {
1638 .sin6.sin6_addr = pip6->daddr,
1639 .sin6.sin6_family = AF_INET6,
1640 };
1641
1642 vxlan_ip_miss(dev, &ipa);
1643 return false;
1644 }
1645
1646 break;
1647 }
1648 #endif
1649 default:
1650 return false;
1651 }
1652
1653 if (n) {
1654 bool diff;
1655
1656 diff = !ether_addr_equal(eth_hdr(skb)->h_dest, n->ha);
1657 if (diff) {
1658 memcpy(eth_hdr(skb)->h_source, eth_hdr(skb)->h_dest,
1659 dev->addr_len);
1660 memcpy(eth_hdr(skb)->h_dest, n->ha, dev->addr_len);
1661 }
1662 neigh_release(n);
1663 return diff;
1664 }
1665
1666 return false;
1667 }
1668
1669 static void vxlan_build_gbp_hdr(struct vxlanhdr *vxh, u32 vxflags,
1670 struct vxlan_metadata *md)
1671 {
1672 struct vxlanhdr_gbp *gbp;
1673
1674 if (!md->gbp)
1675 return;
1676
1677 gbp = (struct vxlanhdr_gbp *)vxh;
1678 vxh->vx_flags |= htonl(VXLAN_HF_GBP);
1679
1680 if (md->gbp & VXLAN_GBP_DONT_LEARN)
1681 gbp->dont_learn = 1;
1682
1683 if (md->gbp & VXLAN_GBP_POLICY_APPLIED)
1684 gbp->policy_applied = 1;
1685
1686 gbp->policy_id = htons(md->gbp & VXLAN_GBP_ID_MASK);
1687 }
1688
1689 #if IS_ENABLED(CONFIG_IPV6)
1690 static int vxlan6_xmit_skb(struct dst_entry *dst, struct sk_buff *skb,
1691 struct net_device *dev, struct in6_addr *saddr,
1692 struct in6_addr *daddr, __u8 prio, __u8 ttl,
1693 __be16 src_port, __be16 dst_port,
1694 struct vxlan_metadata *md, bool xnet, u32 vxflags)
1695 {
1696 struct vxlanhdr *vxh;
1697 int min_headroom;
1698 int err;
1699 bool udp_sum = !(vxflags & VXLAN_F_UDP_ZERO_CSUM6_TX);
1700 int type = udp_sum ? SKB_GSO_UDP_TUNNEL_CSUM : SKB_GSO_UDP_TUNNEL;
1701 u16 hdrlen = sizeof(struct vxlanhdr);
1702
1703 if ((vxflags & VXLAN_F_REMCSUM_TX) &&
1704 skb->ip_summed == CHECKSUM_PARTIAL) {
1705 int csum_start = skb_checksum_start_offset(skb);
1706
1707 if (csum_start <= VXLAN_MAX_REMCSUM_START &&
1708 !(csum_start & VXLAN_RCO_SHIFT_MASK) &&
1709 (skb->csum_offset == offsetof(struct udphdr, check) ||
1710 skb->csum_offset == offsetof(struct tcphdr, check))) {
1711 udp_sum = false;
1712 type |= SKB_GSO_TUNNEL_REMCSUM;
1713 }
1714 }
1715
1716 skb = iptunnel_handle_offloads(skb, udp_sum, type);
1717 if (IS_ERR(skb)) {
1718 err = -EINVAL;
1719 goto err;
1720 }
1721
1722 skb_scrub_packet(skb, xnet);
1723
1724 min_headroom = LL_RESERVED_SPACE(dst->dev) + dst->header_len
1725 + VXLAN_HLEN + sizeof(struct ipv6hdr)
1726 + (skb_vlan_tag_present(skb) ? VLAN_HLEN : 0);
1727
1728 /* Need space for new headers (invalidates iph ptr) */
1729 err = skb_cow_head(skb, min_headroom);
1730 if (unlikely(err)) {
1731 kfree_skb(skb);
1732 goto err;
1733 }
1734
1735 skb = vlan_hwaccel_push_inside(skb);
1736 if (WARN_ON(!skb)) {
1737 err = -ENOMEM;
1738 goto err;
1739 }
1740
1741 vxh = (struct vxlanhdr *) __skb_push(skb, sizeof(*vxh));
1742 vxh->vx_flags = htonl(VXLAN_HF_VNI);
1743 vxh->vx_vni = md->vni;
1744
1745 if (type & SKB_GSO_TUNNEL_REMCSUM) {
1746 u32 data = (skb_checksum_start_offset(skb) - hdrlen) >>
1747 VXLAN_RCO_SHIFT;
1748
1749 if (skb->csum_offset == offsetof(struct udphdr, check))
1750 data |= VXLAN_RCO_UDP;
1751
1752 vxh->vx_vni |= htonl(data);
1753 vxh->vx_flags |= htonl(VXLAN_HF_RCO);
1754
1755 if (!skb_is_gso(skb)) {
1756 skb->ip_summed = CHECKSUM_NONE;
1757 skb->encapsulation = 0;
1758 }
1759 }
1760
1761 if (vxflags & VXLAN_F_GBP)
1762 vxlan_build_gbp_hdr(vxh, vxflags, md);
1763
1764 skb_set_inner_protocol(skb, htons(ETH_P_TEB));
1765
1766 udp_tunnel6_xmit_skb(dst, skb, dev, saddr, daddr, prio,
1767 ttl, src_port, dst_port,
1768 !!(vxflags & VXLAN_F_UDP_ZERO_CSUM6_TX));
1769 return 0;
1770 err:
1771 dst_release(dst);
1772 return err;
1773 }
1774 #endif
1775
1776 int vxlan_xmit_skb(struct rtable *rt, struct sk_buff *skb,
1777 __be32 src, __be32 dst, __u8 tos, __u8 ttl, __be16 df,
1778 __be16 src_port, __be16 dst_port,
1779 struct vxlan_metadata *md, bool xnet, u32 vxflags)
1780 {
1781 struct vxlanhdr *vxh;
1782 int min_headroom;
1783 int err;
1784 bool udp_sum = !!(vxflags & VXLAN_F_UDP_CSUM);
1785 int type = udp_sum ? SKB_GSO_UDP_TUNNEL_CSUM : SKB_GSO_UDP_TUNNEL;
1786 u16 hdrlen = sizeof(struct vxlanhdr);
1787
1788 if ((vxflags & VXLAN_F_REMCSUM_TX) &&
1789 skb->ip_summed == CHECKSUM_PARTIAL) {
1790 int csum_start = skb_checksum_start_offset(skb);
1791
1792 if (csum_start <= VXLAN_MAX_REMCSUM_START &&
1793 !(csum_start & VXLAN_RCO_SHIFT_MASK) &&
1794 (skb->csum_offset == offsetof(struct udphdr, check) ||
1795 skb->csum_offset == offsetof(struct tcphdr, check))) {
1796 udp_sum = false;
1797 type |= SKB_GSO_TUNNEL_REMCSUM;
1798 }
1799 }
1800
1801 skb = iptunnel_handle_offloads(skb, udp_sum, type);
1802 if (IS_ERR(skb))
1803 return PTR_ERR(skb);
1804
1805 min_headroom = LL_RESERVED_SPACE(rt->dst.dev) + rt->dst.header_len
1806 + VXLAN_HLEN + sizeof(struct iphdr)
1807 + (skb_vlan_tag_present(skb) ? VLAN_HLEN : 0);
1808
1809 /* Need space for new headers (invalidates iph ptr) */
1810 err = skb_cow_head(skb, min_headroom);
1811 if (unlikely(err)) {
1812 kfree_skb(skb);
1813 return err;
1814 }
1815
1816 skb = vlan_hwaccel_push_inside(skb);
1817 if (WARN_ON(!skb))
1818 return -ENOMEM;
1819
1820 vxh = (struct vxlanhdr *) __skb_push(skb, sizeof(*vxh));
1821 vxh->vx_flags = htonl(VXLAN_HF_VNI);
1822 vxh->vx_vni = md->vni;
1823
1824 if (type & SKB_GSO_TUNNEL_REMCSUM) {
1825 u32 data = (skb_checksum_start_offset(skb) - hdrlen) >>
1826 VXLAN_RCO_SHIFT;
1827
1828 if (skb->csum_offset == offsetof(struct udphdr, check))
1829 data |= VXLAN_RCO_UDP;
1830
1831 vxh->vx_vni |= htonl(data);
1832 vxh->vx_flags |= htonl(VXLAN_HF_RCO);
1833
1834 if (!skb_is_gso(skb)) {
1835 skb->ip_summed = CHECKSUM_NONE;
1836 skb->encapsulation = 0;
1837 }
1838 }
1839
1840 if (vxflags & VXLAN_F_GBP)
1841 vxlan_build_gbp_hdr(vxh, vxflags, md);
1842
1843 skb_set_inner_protocol(skb, htons(ETH_P_TEB));
1844
1845 return udp_tunnel_xmit_skb(rt, skb, src, dst, tos,
1846 ttl, df, src_port, dst_port, xnet,
1847 !(vxflags & VXLAN_F_UDP_CSUM));
1848 }
1849 EXPORT_SYMBOL_GPL(vxlan_xmit_skb);
1850
1851 /* Bypass encapsulation if the destination is local */
1852 static void vxlan_encap_bypass(struct sk_buff *skb, struct vxlan_dev *src_vxlan,
1853 struct vxlan_dev *dst_vxlan)
1854 {
1855 struct pcpu_sw_netstats *tx_stats, *rx_stats;
1856 union vxlan_addr loopback;
1857 union vxlan_addr *remote_ip = &dst_vxlan->default_dst.remote_ip;
1858 struct net_device *dev = skb->dev;
1859 int len = skb->len;
1860
1861 tx_stats = this_cpu_ptr(src_vxlan->dev->tstats);
1862 rx_stats = this_cpu_ptr(dst_vxlan->dev->tstats);
1863 skb->pkt_type = PACKET_HOST;
1864 skb->encapsulation = 0;
1865 skb->dev = dst_vxlan->dev;
1866 __skb_pull(skb, skb_network_offset(skb));
1867
1868 if (remote_ip->sa.sa_family == AF_INET) {
1869 loopback.sin.sin_addr.s_addr = htonl(INADDR_LOOPBACK);
1870 loopback.sa.sa_family = AF_INET;
1871 #if IS_ENABLED(CONFIG_IPV6)
1872 } else {
1873 loopback.sin6.sin6_addr = in6addr_loopback;
1874 loopback.sa.sa_family = AF_INET6;
1875 #endif
1876 }
1877
1878 if (dst_vxlan->flags & VXLAN_F_LEARN)
1879 vxlan_snoop(skb->dev, &loopback, eth_hdr(skb)->h_source);
1880
1881 u64_stats_update_begin(&tx_stats->syncp);
1882 tx_stats->tx_packets++;
1883 tx_stats->tx_bytes += len;
1884 u64_stats_update_end(&tx_stats->syncp);
1885
1886 if (netif_rx(skb) == NET_RX_SUCCESS) {
1887 u64_stats_update_begin(&rx_stats->syncp);
1888 rx_stats->rx_packets++;
1889 rx_stats->rx_bytes += len;
1890 u64_stats_update_end(&rx_stats->syncp);
1891 } else {
1892 dev->stats.rx_dropped++;
1893 }
1894 }
1895
1896 static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
1897 struct vxlan_rdst *rdst, bool did_rsc)
1898 {
1899 struct vxlan_dev *vxlan = netdev_priv(dev);
1900 struct rtable *rt = NULL;
1901 const struct iphdr *old_iph;
1902 struct flowi4 fl4;
1903 union vxlan_addr *dst;
1904 struct vxlan_metadata md;
1905 __be16 src_port = 0, dst_port;
1906 u32 vni;
1907 __be16 df = 0;
1908 __u8 tos, ttl;
1909 int err;
1910
1911 dst_port = rdst->remote_port ? rdst->remote_port : vxlan->dst_port;
1912 vni = rdst->remote_vni;
1913 dst = &rdst->remote_ip;
1914
1915 if (vxlan_addr_any(dst)) {
1916 if (did_rsc) {
1917 /* short-circuited back to local bridge */
1918 vxlan_encap_bypass(skb, vxlan, vxlan);
1919 return;
1920 }
1921 goto drop;
1922 }
1923
1924 old_iph = ip_hdr(skb);
1925
1926 ttl = vxlan->ttl;
1927 if (!ttl && vxlan_addr_multicast(dst))
1928 ttl = 1;
1929
1930 tos = vxlan->tos;
1931 if (tos == 1)
1932 tos = ip_tunnel_get_dsfield(old_iph, skb);
1933
1934 src_port = udp_flow_src_port(dev_net(dev), skb, vxlan->port_min,
1935 vxlan->port_max, true);
1936
1937 if (dst->sa.sa_family == AF_INET) {
1938 memset(&fl4, 0, sizeof(fl4));
1939 fl4.flowi4_oif = rdst->remote_ifindex;
1940 fl4.flowi4_tos = RT_TOS(tos);
1941 fl4.daddr = dst->sin.sin_addr.s_addr;
1942 fl4.saddr = vxlan->saddr.sin.sin_addr.s_addr;
1943
1944 rt = ip_route_output_key(vxlan->net, &fl4);
1945 if (IS_ERR(rt)) {
1946 netdev_dbg(dev, "no route to %pI4\n",
1947 &dst->sin.sin_addr.s_addr);
1948 dev->stats.tx_carrier_errors++;
1949 goto tx_error;
1950 }
1951
1952 if (rt->dst.dev == dev) {
1953 netdev_dbg(dev, "circular route to %pI4\n",
1954 &dst->sin.sin_addr.s_addr);
1955 dev->stats.collisions++;
1956 goto rt_tx_error;
1957 }
1958
1959 /* Bypass encapsulation if the destination is local */
1960 if (rt->rt_flags & RTCF_LOCAL &&
1961 !(rt->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))) {
1962 struct vxlan_dev *dst_vxlan;
1963
1964 ip_rt_put(rt);
1965 dst_vxlan = vxlan_find_vni(vxlan->net, vni,
1966 dst->sa.sa_family, dst_port,
1967 vxlan->flags);
1968 if (!dst_vxlan)
1969 goto tx_error;
1970 vxlan_encap_bypass(skb, vxlan, dst_vxlan);
1971 return;
1972 }
1973
1974 tos = ip_tunnel_ecn_encap(tos, old_iph, skb);
1975 ttl = ttl ? : ip4_dst_hoplimit(&rt->dst);
1976 md.vni = htonl(vni << 8);
1977 md.gbp = skb->mark;
1978
1979 err = vxlan_xmit_skb(rt, skb, fl4.saddr,
1980 dst->sin.sin_addr.s_addr, tos, ttl, df,
1981 src_port, dst_port, &md,
1982 !net_eq(vxlan->net, dev_net(vxlan->dev)),
1983 vxlan->flags);
1984 if (err < 0) {
1985 /* skb is already freed. */
1986 skb = NULL;
1987 goto rt_tx_error;
1988 }
1989
1990 iptunnel_xmit_stats(err, &dev->stats, dev->tstats);
1991 #if IS_ENABLED(CONFIG_IPV6)
1992 } else {
1993 struct sock *sk = vxlan->vn_sock->sock->sk;
1994 struct dst_entry *ndst;
1995 struct flowi6 fl6;
1996 u32 flags;
1997
1998 memset(&fl6, 0, sizeof(fl6));
1999 fl6.flowi6_oif = rdst->remote_ifindex;
2000 fl6.daddr = dst->sin6.sin6_addr;
2001 fl6.saddr = vxlan->saddr.sin6.sin6_addr;
2002 fl6.flowi6_proto = IPPROTO_UDP;
2003
2004 if (ipv6_stub->ipv6_dst_lookup(sk, &ndst, &fl6)) {
2005 netdev_dbg(dev, "no route to %pI6\n",
2006 &dst->sin6.sin6_addr);
2007 dev->stats.tx_carrier_errors++;
2008 goto tx_error;
2009 }
2010
2011 if (ndst->dev == dev) {
2012 netdev_dbg(dev, "circular route to %pI6\n",
2013 &dst->sin6.sin6_addr);
2014 dst_release(ndst);
2015 dev->stats.collisions++;
2016 goto tx_error;
2017 }
2018
2019 /* Bypass encapsulation if the destination is local */
2020 flags = ((struct rt6_info *)ndst)->rt6i_flags;
2021 if (flags & RTF_LOCAL &&
2022 !(flags & (RTCF_BROADCAST | RTCF_MULTICAST))) {
2023 struct vxlan_dev *dst_vxlan;
2024
2025 dst_release(ndst);
2026 dst_vxlan = vxlan_find_vni(vxlan->net, vni,
2027 dst->sa.sa_family, dst_port,
2028 vxlan->flags);
2029 if (!dst_vxlan)
2030 goto tx_error;
2031 vxlan_encap_bypass(skb, vxlan, dst_vxlan);
2032 return;
2033 }
2034
2035 ttl = ttl ? : ip6_dst_hoplimit(ndst);
2036 md.vni = htonl(vni << 8);
2037 md.gbp = skb->mark;
2038
2039 err = vxlan6_xmit_skb(ndst, skb, dev, &fl6.saddr, &fl6.daddr,
2040 0, ttl, src_port, dst_port, &md,
2041 !net_eq(vxlan->net, dev_net(vxlan->dev)),
2042 vxlan->flags);
2043 #endif
2044 }
2045
2046 return;
2047
2048 drop:
2049 dev->stats.tx_dropped++;
2050 goto tx_free;
2051
2052 rt_tx_error:
2053 ip_rt_put(rt);
2054 tx_error:
2055 dev->stats.tx_errors++;
2056 tx_free:
2057 dev_kfree_skb(skb);
2058 }
2059
2060 /* Transmit local packets over Vxlan
2061 *
2062 * Outer IP header inherits ECN and DF from inner header.
2063 * Outer UDP destination is the VXLAN assigned port.
2064 * source port is based on hash of flow
2065 */
2066 static netdev_tx_t vxlan_xmit(struct sk_buff *skb, struct net_device *dev)
2067 {
2068 struct vxlan_dev *vxlan = netdev_priv(dev);
2069 struct ethhdr *eth;
2070 bool did_rsc = false;
2071 struct vxlan_rdst *rdst, *fdst = NULL;
2072 struct vxlan_fdb *f;
2073
2074 skb_reset_mac_header(skb);
2075 eth = eth_hdr(skb);
2076
2077 if ((vxlan->flags & VXLAN_F_PROXY)) {
2078 if (ntohs(eth->h_proto) == ETH_P_ARP)
2079 return arp_reduce(dev, skb);
2080 #if IS_ENABLED(CONFIG_IPV6)
2081 else if (ntohs(eth->h_proto) == ETH_P_IPV6 &&
2082 pskb_may_pull(skb, sizeof(struct ipv6hdr)
2083 + sizeof(struct nd_msg)) &&
2084 ipv6_hdr(skb)->nexthdr == IPPROTO_ICMPV6) {
2085 struct nd_msg *msg;
2086
2087 msg = (struct nd_msg *)skb_transport_header(skb);
2088 if (msg->icmph.icmp6_code == 0 &&
2089 msg->icmph.icmp6_type == NDISC_NEIGHBOUR_SOLICITATION)
2090 return neigh_reduce(dev, skb);
2091 }
2092 eth = eth_hdr(skb);
2093 #endif
2094 }
2095
2096 f = vxlan_find_mac(vxlan, eth->h_dest);
2097 did_rsc = false;
2098
2099 if (f && (f->flags & NTF_ROUTER) && (vxlan->flags & VXLAN_F_RSC) &&
2100 (ntohs(eth->h_proto) == ETH_P_IP ||
2101 ntohs(eth->h_proto) == ETH_P_IPV6)) {
2102 did_rsc = route_shortcircuit(dev, skb);
2103 if (did_rsc)
2104 f = vxlan_find_mac(vxlan, eth->h_dest);
2105 }
2106
2107 if (f == NULL) {
2108 f = vxlan_find_mac(vxlan, all_zeros_mac);
2109 if (f == NULL) {
2110 if ((vxlan->flags & VXLAN_F_L2MISS) &&
2111 !is_multicast_ether_addr(eth->h_dest))
2112 vxlan_fdb_miss(vxlan, eth->h_dest);
2113
2114 dev->stats.tx_dropped++;
2115 kfree_skb(skb);
2116 return NETDEV_TX_OK;
2117 }
2118 }
2119
2120 list_for_each_entry_rcu(rdst, &f->remotes, list) {
2121 struct sk_buff *skb1;
2122
2123 if (!fdst) {
2124 fdst = rdst;
2125 continue;
2126 }
2127 skb1 = skb_clone(skb, GFP_ATOMIC);
2128 if (skb1)
2129 vxlan_xmit_one(skb1, dev, rdst, did_rsc);
2130 }
2131
2132 if (fdst)
2133 vxlan_xmit_one(skb, dev, fdst, did_rsc);
2134 else
2135 kfree_skb(skb);
2136 return NETDEV_TX_OK;
2137 }
2138
2139 /* Walk the forwarding table and purge stale entries */
2140 static void vxlan_cleanup(unsigned long arg)
2141 {
2142 struct vxlan_dev *vxlan = (struct vxlan_dev *) arg;
2143 unsigned long next_timer = jiffies + FDB_AGE_INTERVAL;
2144 unsigned int h;
2145
2146 if (!netif_running(vxlan->dev))
2147 return;
2148
2149 spin_lock_bh(&vxlan->hash_lock);
2150 for (h = 0; h < FDB_HASH_SIZE; ++h) {
2151 struct hlist_node *p, *n;
2152 hlist_for_each_safe(p, n, &vxlan->fdb_head[h]) {
2153 struct vxlan_fdb *f
2154 = container_of(p, struct vxlan_fdb, hlist);
2155 unsigned long timeout;
2156
2157 if (f->state & NUD_PERMANENT)
2158 continue;
2159
2160 timeout = f->used + vxlan->age_interval * HZ;
2161 if (time_before_eq(timeout, jiffies)) {
2162 netdev_dbg(vxlan->dev,
2163 "garbage collect %pM\n",
2164 f->eth_addr);
2165 f->state = NUD_STALE;
2166 vxlan_fdb_destroy(vxlan, f);
2167 } else if (time_before(timeout, next_timer))
2168 next_timer = timeout;
2169 }
2170 }
2171 spin_unlock_bh(&vxlan->hash_lock);
2172
2173 mod_timer(&vxlan->age_timer, next_timer);
2174 }
2175
2176 static void vxlan_vs_add_dev(struct vxlan_sock *vs, struct vxlan_dev *vxlan)
2177 {
2178 __u32 vni = vxlan->default_dst.remote_vni;
2179
2180 vxlan->vn_sock = vs;
2181 hlist_add_head_rcu(&vxlan->hlist, vni_head(vs, vni));
2182 }
2183
2184 /* Setup stats when device is created */
2185 static int vxlan_init(struct net_device *dev)
2186 {
2187 struct vxlan_dev *vxlan = netdev_priv(dev);
2188 struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id);
2189 struct vxlan_sock *vs;
2190 bool ipv6 = vxlan->flags & VXLAN_F_IPV6;
2191
2192 dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
2193 if (!dev->tstats)
2194 return -ENOMEM;
2195
2196 spin_lock(&vn->sock_lock);
2197 vs = vxlan_find_sock(vxlan->net, ipv6 ? AF_INET6 : AF_INET,
2198 vxlan->dst_port, vxlan->flags);
2199 if (vs && atomic_add_unless(&vs->refcnt, 1, 0)) {
2200 /* If we have a socket with same port already, reuse it */
2201 vxlan_vs_add_dev(vs, vxlan);
2202 } else {
2203 /* otherwise make new socket outside of RTNL */
2204 dev_hold(dev);
2205 queue_work(vxlan_wq, &vxlan->sock_work);
2206 }
2207 spin_unlock(&vn->sock_lock);
2208
2209 return 0;
2210 }
2211
2212 static void vxlan_fdb_delete_default(struct vxlan_dev *vxlan)
2213 {
2214 struct vxlan_fdb *f;
2215
2216 spin_lock_bh(&vxlan->hash_lock);
2217 f = __vxlan_find_mac(vxlan, all_zeros_mac);
2218 if (f)
2219 vxlan_fdb_destroy(vxlan, f);
2220 spin_unlock_bh(&vxlan->hash_lock);
2221 }
2222
2223 static void vxlan_uninit(struct net_device *dev)
2224 {
2225 struct vxlan_dev *vxlan = netdev_priv(dev);
2226 struct vxlan_sock *vs = vxlan->vn_sock;
2227
2228 vxlan_fdb_delete_default(vxlan);
2229
2230 if (vs)
2231 vxlan_sock_release(vs);
2232 free_percpu(dev->tstats);
2233 }
2234
2235 /* Start ageing timer and join group when device is brought up */
2236 static int vxlan_open(struct net_device *dev)
2237 {
2238 struct vxlan_dev *vxlan = netdev_priv(dev);
2239 struct vxlan_sock *vs = vxlan->vn_sock;
2240
2241 /* socket hasn't been created */
2242 if (!vs)
2243 return -ENOTCONN;
2244
2245 if (vxlan_addr_multicast(&vxlan->default_dst.remote_ip)) {
2246 vxlan_sock_hold(vs);
2247 dev_hold(dev);
2248 queue_work(vxlan_wq, &vxlan->igmp_join);
2249 }
2250
2251 if (vxlan->age_interval)
2252 mod_timer(&vxlan->age_timer, jiffies + FDB_AGE_INTERVAL);
2253
2254 return 0;
2255 }
2256
2257 /* Purge the forwarding table */
2258 static void vxlan_flush(struct vxlan_dev *vxlan)
2259 {
2260 unsigned int h;
2261
2262 spin_lock_bh(&vxlan->hash_lock);
2263 for (h = 0; h < FDB_HASH_SIZE; ++h) {
2264 struct hlist_node *p, *n;
2265 hlist_for_each_safe(p, n, &vxlan->fdb_head[h]) {
2266 struct vxlan_fdb *f
2267 = container_of(p, struct vxlan_fdb, hlist);
2268 /* the all_zeros_mac entry is deleted at vxlan_uninit */
2269 if (!is_zero_ether_addr(f->eth_addr))
2270 vxlan_fdb_destroy(vxlan, f);
2271 }
2272 }
2273 spin_unlock_bh(&vxlan->hash_lock);
2274 }
2275
2276 /* Cleanup timer and forwarding table on shutdown */
2277 static int vxlan_stop(struct net_device *dev)
2278 {
2279 struct vxlan_dev *vxlan = netdev_priv(dev);
2280 struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id);
2281 struct vxlan_sock *vs = vxlan->vn_sock;
2282
2283 if (vs && vxlan_addr_multicast(&vxlan->default_dst.remote_ip) &&
2284 !vxlan_group_used(vn, vxlan)) {
2285 vxlan_sock_hold(vs);
2286 dev_hold(dev);
2287 queue_work(vxlan_wq, &vxlan->igmp_leave);
2288 }
2289
2290 del_timer_sync(&vxlan->age_timer);
2291
2292 vxlan_flush(vxlan);
2293
2294 return 0;
2295 }
2296
2297 /* Stub, nothing needs to be done. */
2298 static void vxlan_set_multicast_list(struct net_device *dev)
2299 {
2300 }
2301
2302 static int vxlan_change_mtu(struct net_device *dev, int new_mtu)
2303 {
2304 struct vxlan_dev *vxlan = netdev_priv(dev);
2305 struct vxlan_rdst *dst = &vxlan->default_dst;
2306 struct net_device *lowerdev;
2307 int max_mtu;
2308
2309 lowerdev = __dev_get_by_index(vxlan->net, dst->remote_ifindex);
2310 if (lowerdev == NULL)
2311 return eth_change_mtu(dev, new_mtu);
2312
2313 if (dst->remote_ip.sa.sa_family == AF_INET6)
2314 max_mtu = lowerdev->mtu - VXLAN6_HEADROOM;
2315 else
2316 max_mtu = lowerdev->mtu - VXLAN_HEADROOM;
2317
2318 if (new_mtu < 68 || new_mtu > max_mtu)
2319 return -EINVAL;
2320
2321 dev->mtu = new_mtu;
2322 return 0;
2323 }
2324
2325 static const struct net_device_ops vxlan_netdev_ops = {
2326 .ndo_init = vxlan_init,
2327 .ndo_uninit = vxlan_uninit,
2328 .ndo_open = vxlan_open,
2329 .ndo_stop = vxlan_stop,
2330 .ndo_start_xmit = vxlan_xmit,
2331 .ndo_get_stats64 = ip_tunnel_get_stats64,
2332 .ndo_set_rx_mode = vxlan_set_multicast_list,
2333 .ndo_change_mtu = vxlan_change_mtu,
2334 .ndo_validate_addr = eth_validate_addr,
2335 .ndo_set_mac_address = eth_mac_addr,
2336 .ndo_fdb_add = vxlan_fdb_add,
2337 .ndo_fdb_del = vxlan_fdb_delete,
2338 .ndo_fdb_dump = vxlan_fdb_dump,
2339 };
2340
2341 /* Info for udev, that this is a virtual tunnel endpoint */
2342 static struct device_type vxlan_type = {
2343 .name = "vxlan",
2344 };
2345
2346 /* Calls the ndo_add_vxlan_port of the caller in order to
2347 * supply the listening VXLAN udp ports. Callers are expected
2348 * to implement the ndo_add_vxlan_port.
2349 */
2350 void vxlan_get_rx_port(struct net_device *dev)
2351 {
2352 struct vxlan_sock *vs;
2353 struct net *net = dev_net(dev);
2354 struct vxlan_net *vn = net_generic(net, vxlan_net_id);
2355 sa_family_t sa_family;
2356 __be16 port;
2357 unsigned int i;
2358
2359 spin_lock(&vn->sock_lock);
2360 for (i = 0; i < PORT_HASH_SIZE; ++i) {
2361 hlist_for_each_entry_rcu(vs, &vn->sock_list[i], hlist) {
2362 port = inet_sk(vs->sock->sk)->inet_sport;
2363 sa_family = vs->sock->sk->sk_family;
2364 dev->netdev_ops->ndo_add_vxlan_port(dev, sa_family,
2365 port);
2366 }
2367 }
2368 spin_unlock(&vn->sock_lock);
2369 }
2370 EXPORT_SYMBOL_GPL(vxlan_get_rx_port);
2371
2372 /* Initialize the device structure. */
2373 static void vxlan_setup(struct net_device *dev)
2374 {
2375 struct vxlan_dev *vxlan = netdev_priv(dev);
2376 unsigned int h;
2377
2378 eth_hw_addr_random(dev);
2379 ether_setup(dev);
2380 if (vxlan->default_dst.remote_ip.sa.sa_family == AF_INET6)
2381 dev->needed_headroom = ETH_HLEN + VXLAN6_HEADROOM;
2382 else
2383 dev->needed_headroom = ETH_HLEN + VXLAN_HEADROOM;
2384
2385 dev->netdev_ops = &vxlan_netdev_ops;
2386 dev->destructor = free_netdev;
2387 SET_NETDEV_DEVTYPE(dev, &vxlan_type);
2388
2389 dev->tx_queue_len = 0;
2390 dev->features |= NETIF_F_LLTX;
2391 dev->features |= NETIF_F_SG | NETIF_F_HW_CSUM;
2392 dev->features |= NETIF_F_RXCSUM;
2393 dev->features |= NETIF_F_GSO_SOFTWARE;
2394
2395 dev->vlan_features = dev->features;
2396 dev->features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_STAG_TX;
2397 dev->hw_features |= NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_RXCSUM;
2398 dev->hw_features |= NETIF_F_GSO_SOFTWARE;
2399 dev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_STAG_TX;
2400 netif_keep_dst(dev);
2401 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
2402
2403 INIT_LIST_HEAD(&vxlan->next);
2404 spin_lock_init(&vxlan->hash_lock);
2405 INIT_WORK(&vxlan->igmp_join, vxlan_igmp_join);
2406 INIT_WORK(&vxlan->igmp_leave, vxlan_igmp_leave);
2407 INIT_WORK(&vxlan->sock_work, vxlan_sock_work);
2408
2409 init_timer_deferrable(&vxlan->age_timer);
2410 vxlan->age_timer.function = vxlan_cleanup;
2411 vxlan->age_timer.data = (unsigned long) vxlan;
2412
2413 vxlan->dst_port = htons(vxlan_port);
2414
2415 vxlan->dev = dev;
2416
2417 for (h = 0; h < FDB_HASH_SIZE; ++h)
2418 INIT_HLIST_HEAD(&vxlan->fdb_head[h]);
2419 }
2420
2421 static const struct nla_policy vxlan_policy[IFLA_VXLAN_MAX + 1] = {
2422 [IFLA_VXLAN_ID] = { .type = NLA_U32 },
2423 [IFLA_VXLAN_GROUP] = { .len = FIELD_SIZEOF(struct iphdr, daddr) },
2424 [IFLA_VXLAN_GROUP6] = { .len = sizeof(struct in6_addr) },
2425 [IFLA_VXLAN_LINK] = { .type = NLA_U32 },
2426 [IFLA_VXLAN_LOCAL] = { .len = FIELD_SIZEOF(struct iphdr, saddr) },
2427 [IFLA_VXLAN_LOCAL6] = { .len = sizeof(struct in6_addr) },
2428 [IFLA_VXLAN_TOS] = { .type = NLA_U8 },
2429 [IFLA_VXLAN_TTL] = { .type = NLA_U8 },
2430 [IFLA_VXLAN_LEARNING] = { .type = NLA_U8 },
2431 [IFLA_VXLAN_AGEING] = { .type = NLA_U32 },
2432 [IFLA_VXLAN_LIMIT] = { .type = NLA_U32 },
2433 [IFLA_VXLAN_PORT_RANGE] = { .len = sizeof(struct ifla_vxlan_port_range) },
2434 [IFLA_VXLAN_PROXY] = { .type = NLA_U8 },
2435 [IFLA_VXLAN_RSC] = { .type = NLA_U8 },
2436 [IFLA_VXLAN_L2MISS] = { .type = NLA_U8 },
2437 [IFLA_VXLAN_L3MISS] = { .type = NLA_U8 },
2438 [IFLA_VXLAN_PORT] = { .type = NLA_U16 },
2439 [IFLA_VXLAN_UDP_CSUM] = { .type = NLA_U8 },
2440 [IFLA_VXLAN_UDP_ZERO_CSUM6_TX] = { .type = NLA_U8 },
2441 [IFLA_VXLAN_UDP_ZERO_CSUM6_RX] = { .type = NLA_U8 },
2442 [IFLA_VXLAN_REMCSUM_TX] = { .type = NLA_U8 },
2443 [IFLA_VXLAN_REMCSUM_RX] = { .type = NLA_U8 },
2444 [IFLA_VXLAN_GBP] = { .type = NLA_FLAG, },
2445 [IFLA_VXLAN_REMCSUM_NOPARTIAL] = { .type = NLA_FLAG },
2446 };
2447
2448 static int vxlan_validate(struct nlattr *tb[], struct nlattr *data[])
2449 {
2450 if (tb[IFLA_ADDRESS]) {
2451 if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN) {
2452 pr_debug("invalid link address (not ethernet)\n");
2453 return -EINVAL;
2454 }
2455
2456 if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS]))) {
2457 pr_debug("invalid all zero ethernet address\n");
2458 return -EADDRNOTAVAIL;
2459 }
2460 }
2461
2462 if (!data)
2463 return -EINVAL;
2464
2465 if (data[IFLA_VXLAN_ID]) {
2466 __u32 id = nla_get_u32(data[IFLA_VXLAN_ID]);
2467 if (id >= VXLAN_VID_MASK)
2468 return -ERANGE;
2469 }
2470
2471 if (data[IFLA_VXLAN_PORT_RANGE]) {
2472 const struct ifla_vxlan_port_range *p
2473 = nla_data(data[IFLA_VXLAN_PORT_RANGE]);
2474
2475 if (ntohs(p->high) < ntohs(p->low)) {
2476 pr_debug("port range %u .. %u not valid\n",
2477 ntohs(p->low), ntohs(p->high));
2478 return -EINVAL;
2479 }
2480 }
2481
2482 return 0;
2483 }
2484
2485 static void vxlan_get_drvinfo(struct net_device *netdev,
2486 struct ethtool_drvinfo *drvinfo)
2487 {
2488 strlcpy(drvinfo->version, VXLAN_VERSION, sizeof(drvinfo->version));
2489 strlcpy(drvinfo->driver, "vxlan", sizeof(drvinfo->driver));
2490 }
2491
2492 static const struct ethtool_ops vxlan_ethtool_ops = {
2493 .get_drvinfo = vxlan_get_drvinfo,
2494 .get_link = ethtool_op_get_link,
2495 };
2496
2497 static void vxlan_del_work(struct work_struct *work)
2498 {
2499 struct vxlan_sock *vs = container_of(work, struct vxlan_sock, del_work);
2500 udp_tunnel_sock_release(vs->sock);
2501 kfree_rcu(vs, rcu);
2502 }
2503
2504 static struct socket *vxlan_create_sock(struct net *net, bool ipv6,
2505 __be16 port, u32 flags)
2506 {
2507 struct socket *sock;
2508 struct udp_port_cfg udp_conf;
2509 int err;
2510
2511 memset(&udp_conf, 0, sizeof(udp_conf));
2512
2513 if (ipv6) {
2514 udp_conf.family = AF_INET6;
2515 udp_conf.use_udp6_rx_checksums =
2516 !(flags & VXLAN_F_UDP_ZERO_CSUM6_RX);
2517 } else {
2518 udp_conf.family = AF_INET;
2519 udp_conf.local_ip.s_addr = INADDR_ANY;
2520 }
2521
2522 udp_conf.local_udp_port = port;
2523
2524 /* Open UDP socket */
2525 err = udp_sock_create(net, &udp_conf, &sock);
2526 if (err < 0)
2527 return ERR_PTR(err);
2528
2529 return sock;
2530 }
2531
2532 /* Create new listen socket if needed */
2533 static struct vxlan_sock *vxlan_socket_create(struct net *net, __be16 port,
2534 vxlan_rcv_t *rcv, void *data,
2535 u32 flags)
2536 {
2537 struct vxlan_net *vn = net_generic(net, vxlan_net_id);
2538 struct vxlan_sock *vs;
2539 struct socket *sock;
2540 unsigned int h;
2541 bool ipv6 = !!(flags & VXLAN_F_IPV6);
2542 struct udp_tunnel_sock_cfg tunnel_cfg;
2543
2544 vs = kzalloc(sizeof(*vs), GFP_KERNEL);
2545 if (!vs)
2546 return ERR_PTR(-ENOMEM);
2547
2548 for (h = 0; h < VNI_HASH_SIZE; ++h)
2549 INIT_HLIST_HEAD(&vs->vni_list[h]);
2550
2551 INIT_WORK(&vs->del_work, vxlan_del_work);
2552
2553 sock = vxlan_create_sock(net, ipv6, port, flags);
2554 if (IS_ERR(sock)) {
2555 kfree(vs);
2556 return ERR_CAST(sock);
2557 }
2558
2559 vs->sock = sock;
2560 atomic_set(&vs->refcnt, 1);
2561 vs->rcv = rcv;
2562 vs->data = data;
2563 vs->flags = (flags & VXLAN_F_RCV_FLAGS);
2564
2565 /* Initialize the vxlan udp offloads structure */
2566 vs->udp_offloads.port = port;
2567 vs->udp_offloads.callbacks.gro_receive = vxlan_gro_receive;
2568 vs->udp_offloads.callbacks.gro_complete = vxlan_gro_complete;
2569
2570 spin_lock(&vn->sock_lock);
2571 hlist_add_head_rcu(&vs->hlist, vs_head(net, port));
2572 vxlan_notify_add_rx_port(vs);
2573 spin_unlock(&vn->sock_lock);
2574
2575 /* Mark socket as an encapsulation socket. */
2576 tunnel_cfg.sk_user_data = vs;
2577 tunnel_cfg.encap_type = 1;
2578 tunnel_cfg.encap_rcv = vxlan_udp_encap_recv;
2579 tunnel_cfg.encap_destroy = NULL;
2580
2581 setup_udp_tunnel_sock(net, sock, &tunnel_cfg);
2582
2583 return vs;
2584 }
2585
2586 struct vxlan_sock *vxlan_sock_add(struct net *net, __be16 port,
2587 vxlan_rcv_t *rcv, void *data,
2588 bool no_share, u32 flags)
2589 {
2590 struct vxlan_net *vn = net_generic(net, vxlan_net_id);
2591 struct vxlan_sock *vs;
2592 bool ipv6 = flags & VXLAN_F_IPV6;
2593
2594 vs = vxlan_socket_create(net, port, rcv, data, flags);
2595 if (!IS_ERR(vs))
2596 return vs;
2597
2598 if (no_share) /* Return error if sharing is not allowed. */
2599 return vs;
2600
2601 spin_lock(&vn->sock_lock);
2602 vs = vxlan_find_sock(net, ipv6 ? AF_INET6 : AF_INET, port, flags);
2603 if (vs && ((vs->rcv != rcv) ||
2604 !atomic_add_unless(&vs->refcnt, 1, 0)))
2605 vs = ERR_PTR(-EBUSY);
2606 spin_unlock(&vn->sock_lock);
2607
2608 if (!vs)
2609 vs = ERR_PTR(-EINVAL);
2610
2611 return vs;
2612 }
2613 EXPORT_SYMBOL_GPL(vxlan_sock_add);
2614
2615 /* Scheduled at device creation to bind to a socket */
2616 static void vxlan_sock_work(struct work_struct *work)
2617 {
2618 struct vxlan_dev *vxlan = container_of(work, struct vxlan_dev, sock_work);
2619 struct net *net = vxlan->net;
2620 struct vxlan_net *vn = net_generic(net, vxlan_net_id);
2621 __be16 port = vxlan->dst_port;
2622 struct vxlan_sock *nvs;
2623
2624 nvs = vxlan_sock_add(net, port, vxlan_rcv, NULL, false, vxlan->flags);
2625 spin_lock(&vn->sock_lock);
2626 if (!IS_ERR(nvs))
2627 vxlan_vs_add_dev(nvs, vxlan);
2628 spin_unlock(&vn->sock_lock);
2629
2630 dev_put(vxlan->dev);
2631 }
2632
2633 static int vxlan_newlink(struct net *src_net, struct net_device *dev,
2634 struct nlattr *tb[], struct nlattr *data[])
2635 {
2636 struct vxlan_net *vn = net_generic(src_net, vxlan_net_id);
2637 struct vxlan_dev *vxlan = netdev_priv(dev);
2638 struct vxlan_rdst *dst = &vxlan->default_dst;
2639 __u32 vni;
2640 int err;
2641 bool use_ipv6 = false;
2642
2643 if (!data[IFLA_VXLAN_ID])
2644 return -EINVAL;
2645
2646 vxlan->net = src_net;
2647
2648 vni = nla_get_u32(data[IFLA_VXLAN_ID]);
2649 dst->remote_vni = vni;
2650
2651 /* Unless IPv6 is explicitly requested, assume IPv4 */
2652 dst->remote_ip.sa.sa_family = AF_INET;
2653 if (data[IFLA_VXLAN_GROUP]) {
2654 dst->remote_ip.sin.sin_addr.s_addr = nla_get_be32(data[IFLA_VXLAN_GROUP]);
2655 } else if (data[IFLA_VXLAN_GROUP6]) {
2656 if (!IS_ENABLED(CONFIG_IPV6))
2657 return -EPFNOSUPPORT;
2658
2659 nla_memcpy(&dst->remote_ip.sin6.sin6_addr, data[IFLA_VXLAN_GROUP6],
2660 sizeof(struct in6_addr));
2661 dst->remote_ip.sa.sa_family = AF_INET6;
2662 use_ipv6 = true;
2663 }
2664
2665 if (data[IFLA_VXLAN_LOCAL]) {
2666 vxlan->saddr.sin.sin_addr.s_addr = nla_get_be32(data[IFLA_VXLAN_LOCAL]);
2667 vxlan->saddr.sa.sa_family = AF_INET;
2668 } else if (data[IFLA_VXLAN_LOCAL6]) {
2669 if (!IS_ENABLED(CONFIG_IPV6))
2670 return -EPFNOSUPPORT;
2671
2672 /* TODO: respect scope id */
2673 nla_memcpy(&vxlan->saddr.sin6.sin6_addr, data[IFLA_VXLAN_LOCAL6],
2674 sizeof(struct in6_addr));
2675 vxlan->saddr.sa.sa_family = AF_INET6;
2676 use_ipv6 = true;
2677 }
2678
2679 if (data[IFLA_VXLAN_LINK] &&
2680 (dst->remote_ifindex = nla_get_u32(data[IFLA_VXLAN_LINK]))) {
2681 struct net_device *lowerdev
2682 = __dev_get_by_index(src_net, dst->remote_ifindex);
2683
2684 if (!lowerdev) {
2685 pr_info("ifindex %d does not exist\n", dst->remote_ifindex);
2686 return -ENODEV;
2687 }
2688
2689 #if IS_ENABLED(CONFIG_IPV6)
2690 if (use_ipv6) {
2691 struct inet6_dev *idev = __in6_dev_get(lowerdev);
2692 if (idev && idev->cnf.disable_ipv6) {
2693 pr_info("IPv6 is disabled via sysctl\n");
2694 return -EPERM;
2695 }
2696 vxlan->flags |= VXLAN_F_IPV6;
2697 }
2698 #endif
2699
2700 if (!tb[IFLA_MTU])
2701 dev->mtu = lowerdev->mtu - (use_ipv6 ? VXLAN6_HEADROOM : VXLAN_HEADROOM);
2702
2703 dev->needed_headroom = lowerdev->hard_header_len +
2704 (use_ipv6 ? VXLAN6_HEADROOM : VXLAN_HEADROOM);
2705 } else if (use_ipv6)
2706 vxlan->flags |= VXLAN_F_IPV6;
2707
2708 if (data[IFLA_VXLAN_TOS])
2709 vxlan->tos = nla_get_u8(data[IFLA_VXLAN_TOS]);
2710
2711 if (data[IFLA_VXLAN_TTL])
2712 vxlan->ttl = nla_get_u8(data[IFLA_VXLAN_TTL]);
2713
2714 if (!data[IFLA_VXLAN_LEARNING] || nla_get_u8(data[IFLA_VXLAN_LEARNING]))
2715 vxlan->flags |= VXLAN_F_LEARN;
2716
2717 if (data[IFLA_VXLAN_AGEING])
2718 vxlan->age_interval = nla_get_u32(data[IFLA_VXLAN_AGEING]);
2719 else
2720 vxlan->age_interval = FDB_AGE_DEFAULT;
2721
2722 if (data[IFLA_VXLAN_PROXY] && nla_get_u8(data[IFLA_VXLAN_PROXY]))
2723 vxlan->flags |= VXLAN_F_PROXY;
2724
2725 if (data[IFLA_VXLAN_RSC] && nla_get_u8(data[IFLA_VXLAN_RSC]))
2726 vxlan->flags |= VXLAN_F_RSC;
2727
2728 if (data[IFLA_VXLAN_L2MISS] && nla_get_u8(data[IFLA_VXLAN_L2MISS]))
2729 vxlan->flags |= VXLAN_F_L2MISS;
2730
2731 if (data[IFLA_VXLAN_L3MISS] && nla_get_u8(data[IFLA_VXLAN_L3MISS]))
2732 vxlan->flags |= VXLAN_F_L3MISS;
2733
2734 if (data[IFLA_VXLAN_LIMIT])
2735 vxlan->addrmax = nla_get_u32(data[IFLA_VXLAN_LIMIT]);
2736
2737 if (data[IFLA_VXLAN_PORT_RANGE]) {
2738 const struct ifla_vxlan_port_range *p
2739 = nla_data(data[IFLA_VXLAN_PORT_RANGE]);
2740 vxlan->port_min = ntohs(p->low);
2741 vxlan->port_max = ntohs(p->high);
2742 }
2743
2744 if (data[IFLA_VXLAN_PORT])
2745 vxlan->dst_port = nla_get_be16(data[IFLA_VXLAN_PORT]);
2746
2747 if (data[IFLA_VXLAN_UDP_CSUM] && nla_get_u8(data[IFLA_VXLAN_UDP_CSUM]))
2748 vxlan->flags |= VXLAN_F_UDP_CSUM;
2749
2750 if (data[IFLA_VXLAN_UDP_ZERO_CSUM6_TX] &&
2751 nla_get_u8(data[IFLA_VXLAN_UDP_ZERO_CSUM6_TX]))
2752 vxlan->flags |= VXLAN_F_UDP_ZERO_CSUM6_TX;
2753
2754 if (data[IFLA_VXLAN_UDP_ZERO_CSUM6_RX] &&
2755 nla_get_u8(data[IFLA_VXLAN_UDP_ZERO_CSUM6_RX]))
2756 vxlan->flags |= VXLAN_F_UDP_ZERO_CSUM6_RX;
2757
2758 if (data[IFLA_VXLAN_REMCSUM_TX] &&
2759 nla_get_u8(data[IFLA_VXLAN_REMCSUM_TX]))
2760 vxlan->flags |= VXLAN_F_REMCSUM_TX;
2761
2762 if (data[IFLA_VXLAN_REMCSUM_RX] &&
2763 nla_get_u8(data[IFLA_VXLAN_REMCSUM_RX]))
2764 vxlan->flags |= VXLAN_F_REMCSUM_RX;
2765
2766 if (data[IFLA_VXLAN_GBP])
2767 vxlan->flags |= VXLAN_F_GBP;
2768
2769 if (data[IFLA_VXLAN_REMCSUM_NOPARTIAL])
2770 vxlan->flags |= VXLAN_F_REMCSUM_NOPARTIAL;
2771
2772 if (vxlan_find_vni(src_net, vni, use_ipv6 ? AF_INET6 : AF_INET,
2773 vxlan->dst_port, vxlan->flags)) {
2774 pr_info("duplicate VNI %u\n", vni);
2775 return -EEXIST;
2776 }
2777
2778 dev->ethtool_ops = &vxlan_ethtool_ops;
2779
2780 /* create an fdb entry for a valid default destination */
2781 if (!vxlan_addr_any(&vxlan->default_dst.remote_ip)) {
2782 err = vxlan_fdb_create(vxlan, all_zeros_mac,
2783 &vxlan->default_dst.remote_ip,
2784 NUD_REACHABLE|NUD_PERMANENT,
2785 NLM_F_EXCL|NLM_F_CREATE,
2786 vxlan->dst_port,
2787 vxlan->default_dst.remote_vni,
2788 vxlan->default_dst.remote_ifindex,
2789 NTF_SELF);
2790 if (err)
2791 return err;
2792 }
2793
2794 err = register_netdevice(dev);
2795 if (err) {
2796 vxlan_fdb_delete_default(vxlan);
2797 return err;
2798 }
2799
2800 list_add(&vxlan->next, &vn->vxlan_list);
2801
2802 return 0;
2803 }
2804
2805 static void vxlan_dellink(struct net_device *dev, struct list_head *head)
2806 {
2807 struct vxlan_dev *vxlan = netdev_priv(dev);
2808 struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id);
2809
2810 spin_lock(&vn->sock_lock);
2811 if (!hlist_unhashed(&vxlan->hlist))
2812 hlist_del_rcu(&vxlan->hlist);
2813 spin_unlock(&vn->sock_lock);
2814
2815 list_del(&vxlan->next);
2816 unregister_netdevice_queue(dev, head);
2817 }
2818
2819 static size_t vxlan_get_size(const struct net_device *dev)
2820 {
2821
2822 return nla_total_size(sizeof(__u32)) + /* IFLA_VXLAN_ID */
2823 nla_total_size(sizeof(struct in6_addr)) + /* IFLA_VXLAN_GROUP{6} */
2824 nla_total_size(sizeof(__u32)) + /* IFLA_VXLAN_LINK */
2825 nla_total_size(sizeof(struct in6_addr)) + /* IFLA_VXLAN_LOCAL{6} */
2826 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_TTL */
2827 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_TOS */
2828 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_LEARNING */
2829 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_PROXY */
2830 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_RSC */
2831 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_L2MISS */
2832 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_L3MISS */
2833 nla_total_size(sizeof(__u32)) + /* IFLA_VXLAN_AGEING */
2834 nla_total_size(sizeof(__u32)) + /* IFLA_VXLAN_LIMIT */
2835 nla_total_size(sizeof(struct ifla_vxlan_port_range)) +
2836 nla_total_size(sizeof(__be16)) + /* IFLA_VXLAN_PORT */
2837 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_UDP_CSUM */
2838 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_UDP_ZERO_CSUM6_TX */
2839 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_UDP_ZERO_CSUM6_RX */
2840 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_REMCSUM_TX */
2841 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_REMCSUM_RX */
2842 0;
2843 }
2844
2845 static int vxlan_fill_info(struct sk_buff *skb, const struct net_device *dev)
2846 {
2847 const struct vxlan_dev *vxlan = netdev_priv(dev);
2848 const struct vxlan_rdst *dst = &vxlan->default_dst;
2849 struct ifla_vxlan_port_range ports = {
2850 .low = htons(vxlan->port_min),
2851 .high = htons(vxlan->port_max),
2852 };
2853
2854 if (nla_put_u32(skb, IFLA_VXLAN_ID, dst->remote_vni))
2855 goto nla_put_failure;
2856
2857 if (!vxlan_addr_any(&dst->remote_ip)) {
2858 if (dst->remote_ip.sa.sa_family == AF_INET) {
2859 if (nla_put_be32(skb, IFLA_VXLAN_GROUP,
2860 dst->remote_ip.sin.sin_addr.s_addr))
2861 goto nla_put_failure;
2862 #if IS_ENABLED(CONFIG_IPV6)
2863 } else {
2864 if (nla_put(skb, IFLA_VXLAN_GROUP6, sizeof(struct in6_addr),
2865 &dst->remote_ip.sin6.sin6_addr))
2866 goto nla_put_failure;
2867 #endif
2868 }
2869 }
2870
2871 if (dst->remote_ifindex && nla_put_u32(skb, IFLA_VXLAN_LINK, dst->remote_ifindex))
2872 goto nla_put_failure;
2873
2874 if (!vxlan_addr_any(&vxlan->saddr)) {
2875 if (vxlan->saddr.sa.sa_family == AF_INET) {
2876 if (nla_put_be32(skb, IFLA_VXLAN_LOCAL,
2877 vxlan->saddr.sin.sin_addr.s_addr))
2878 goto nla_put_failure;
2879 #if IS_ENABLED(CONFIG_IPV6)
2880 } else {
2881 if (nla_put(skb, IFLA_VXLAN_LOCAL6, sizeof(struct in6_addr),
2882 &vxlan->saddr.sin6.sin6_addr))
2883 goto nla_put_failure;
2884 #endif
2885 }
2886 }
2887
2888 if (nla_put_u8(skb, IFLA_VXLAN_TTL, vxlan->ttl) ||
2889 nla_put_u8(skb, IFLA_VXLAN_TOS, vxlan->tos) ||
2890 nla_put_u8(skb, IFLA_VXLAN_LEARNING,
2891 !!(vxlan->flags & VXLAN_F_LEARN)) ||
2892 nla_put_u8(skb, IFLA_VXLAN_PROXY,
2893 !!(vxlan->flags & VXLAN_F_PROXY)) ||
2894 nla_put_u8(skb, IFLA_VXLAN_RSC, !!(vxlan->flags & VXLAN_F_RSC)) ||
2895 nla_put_u8(skb, IFLA_VXLAN_L2MISS,
2896 !!(vxlan->flags & VXLAN_F_L2MISS)) ||
2897 nla_put_u8(skb, IFLA_VXLAN_L3MISS,
2898 !!(vxlan->flags & VXLAN_F_L3MISS)) ||
2899 nla_put_u32(skb, IFLA_VXLAN_AGEING, vxlan->age_interval) ||
2900 nla_put_u32(skb, IFLA_VXLAN_LIMIT, vxlan->addrmax) ||
2901 nla_put_be16(skb, IFLA_VXLAN_PORT, vxlan->dst_port) ||
2902 nla_put_u8(skb, IFLA_VXLAN_UDP_CSUM,
2903 !!(vxlan->flags & VXLAN_F_UDP_CSUM)) ||
2904 nla_put_u8(skb, IFLA_VXLAN_UDP_ZERO_CSUM6_TX,
2905 !!(vxlan->flags & VXLAN_F_UDP_ZERO_CSUM6_TX)) ||
2906 nla_put_u8(skb, IFLA_VXLAN_UDP_ZERO_CSUM6_RX,
2907 !!(vxlan->flags & VXLAN_F_UDP_ZERO_CSUM6_RX)) ||
2908 nla_put_u8(skb, IFLA_VXLAN_REMCSUM_TX,
2909 !!(vxlan->flags & VXLAN_F_REMCSUM_TX)) ||
2910 nla_put_u8(skb, IFLA_VXLAN_REMCSUM_RX,
2911 !!(vxlan->flags & VXLAN_F_REMCSUM_RX)))
2912 goto nla_put_failure;
2913
2914 if (nla_put(skb, IFLA_VXLAN_PORT_RANGE, sizeof(ports), &ports))
2915 goto nla_put_failure;
2916
2917 if (vxlan->flags & VXLAN_F_GBP &&
2918 nla_put_flag(skb, IFLA_VXLAN_GBP))
2919 goto nla_put_failure;
2920
2921 if (vxlan->flags & VXLAN_F_REMCSUM_NOPARTIAL &&
2922 nla_put_flag(skb, IFLA_VXLAN_REMCSUM_NOPARTIAL))
2923 goto nla_put_failure;
2924
2925 return 0;
2926
2927 nla_put_failure:
2928 return -EMSGSIZE;
2929 }
2930
2931 static struct net *vxlan_get_link_net(const struct net_device *dev)
2932 {
2933 struct vxlan_dev *vxlan = netdev_priv(dev);
2934
2935 return vxlan->net;
2936 }
2937
2938 static struct rtnl_link_ops vxlan_link_ops __read_mostly = {
2939 .kind = "vxlan",
2940 .maxtype = IFLA_VXLAN_MAX,
2941 .policy = vxlan_policy,
2942 .priv_size = sizeof(struct vxlan_dev),
2943 .setup = vxlan_setup,
2944 .validate = vxlan_validate,
2945 .newlink = vxlan_newlink,
2946 .dellink = vxlan_dellink,
2947 .get_size = vxlan_get_size,
2948 .fill_info = vxlan_fill_info,
2949 .get_link_net = vxlan_get_link_net,
2950 };
2951
2952 static void vxlan_handle_lowerdev_unregister(struct vxlan_net *vn,
2953 struct net_device *dev)
2954 {
2955 struct vxlan_dev *vxlan, *next;
2956 LIST_HEAD(list_kill);
2957
2958 list_for_each_entry_safe(vxlan, next, &vn->vxlan_list, next) {
2959 struct vxlan_rdst *dst = &vxlan->default_dst;
2960
2961 /* In case we created vxlan device with carrier
2962 * and we loose the carrier due to module unload
2963 * we also need to remove vxlan device. In other
2964 * cases, it's not necessary and remote_ifindex
2965 * is 0 here, so no matches.
2966 */
2967 if (dst->remote_ifindex == dev->ifindex)
2968 vxlan_dellink(vxlan->dev, &list_kill);
2969 }
2970
2971 unregister_netdevice_many(&list_kill);
2972 }
2973
2974 static int vxlan_lowerdev_event(struct notifier_block *unused,
2975 unsigned long event, void *ptr)
2976 {
2977 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
2978 struct vxlan_net *vn = net_generic(dev_net(dev), vxlan_net_id);
2979
2980 if (event == NETDEV_UNREGISTER)
2981 vxlan_handle_lowerdev_unregister(vn, dev);
2982
2983 return NOTIFY_DONE;
2984 }
2985
2986 static struct notifier_block vxlan_notifier_block __read_mostly = {
2987 .notifier_call = vxlan_lowerdev_event,
2988 };
2989
2990 static __net_init int vxlan_init_net(struct net *net)
2991 {
2992 struct vxlan_net *vn = net_generic(net, vxlan_net_id);
2993 unsigned int h;
2994
2995 INIT_LIST_HEAD(&vn->vxlan_list);
2996 spin_lock_init(&vn->sock_lock);
2997
2998 for (h = 0; h < PORT_HASH_SIZE; ++h)
2999 INIT_HLIST_HEAD(&vn->sock_list[h]);
3000
3001 return 0;
3002 }
3003
3004 static void __net_exit vxlan_exit_net(struct net *net)
3005 {
3006 struct vxlan_net *vn = net_generic(net, vxlan_net_id);
3007 struct vxlan_dev *vxlan, *next;
3008 struct net_device *dev, *aux;
3009 LIST_HEAD(list);
3010
3011 rtnl_lock();
3012 for_each_netdev_safe(net, dev, aux)
3013 if (dev->rtnl_link_ops == &vxlan_link_ops)
3014 unregister_netdevice_queue(dev, &list);
3015
3016 list_for_each_entry_safe(vxlan, next, &vn->vxlan_list, next) {
3017 /* If vxlan->dev is in the same netns, it has already been added
3018 * to the list by the previous loop.
3019 */
3020 if (!net_eq(dev_net(vxlan->dev), net))
3021 unregister_netdevice_queue(dev, &list);
3022 }
3023
3024 unregister_netdevice_many(&list);
3025 rtnl_unlock();
3026 }
3027
3028 static struct pernet_operations vxlan_net_ops = {
3029 .init = vxlan_init_net,
3030 .exit = vxlan_exit_net,
3031 .id = &vxlan_net_id,
3032 .size = sizeof(struct vxlan_net),
3033 };
3034
3035 static int __init vxlan_init_module(void)
3036 {
3037 int rc;
3038
3039 vxlan_wq = alloc_workqueue("vxlan", 0, 0);
3040 if (!vxlan_wq)
3041 return -ENOMEM;
3042
3043 get_random_bytes(&vxlan_salt, sizeof(vxlan_salt));
3044
3045 rc = register_pernet_subsys(&vxlan_net_ops);
3046 if (rc)
3047 goto out1;
3048
3049 rc = register_netdevice_notifier(&vxlan_notifier_block);
3050 if (rc)
3051 goto out2;
3052
3053 rc = rtnl_link_register(&vxlan_link_ops);
3054 if (rc)
3055 goto out3;
3056
3057 return 0;
3058 out3:
3059 unregister_netdevice_notifier(&vxlan_notifier_block);
3060 out2:
3061 unregister_pernet_subsys(&vxlan_net_ops);
3062 out1:
3063 destroy_workqueue(vxlan_wq);
3064 return rc;
3065 }
3066 late_initcall(vxlan_init_module);
3067
3068 static void __exit vxlan_cleanup_module(void)
3069 {
3070 rtnl_link_unregister(&vxlan_link_ops);
3071 unregister_netdevice_notifier(&vxlan_notifier_block);
3072 destroy_workqueue(vxlan_wq);
3073 unregister_pernet_subsys(&vxlan_net_ops);
3074 /* rcu_barrier() is called by netns */
3075 }
3076 module_exit(vxlan_cleanup_module);
3077
3078 MODULE_LICENSE("GPL");
3079 MODULE_VERSION(VXLAN_VERSION);
3080 MODULE_AUTHOR("Stephen Hemminger <stephen@networkplumber.org>");
3081 MODULE_DESCRIPTION("Driver for VXLAN encapsulated traffic");
3082 MODULE_ALIAS_RTNL_LINK("vxlan");
This page took 0.165853 seconds and 5 git commands to generate.