Merge remote-tracking branch 'net-next/master' into mac80211-next
[deliverable/linux.git] / net / ipv4 / fou.c
1 #include <linux/module.h>
2 #include <linux/errno.h>
3 #include <linux/socket.h>
4 #include <linux/skbuff.h>
5 #include <linux/ip.h>
6 #include <linux/udp.h>
7 #include <linux/types.h>
8 #include <linux/kernel.h>
9 #include <net/genetlink.h>
10 #include <net/gue.h>
11 #include <net/ip.h>
12 #include <net/protocol.h>
13 #include <net/udp.h>
14 #include <net/udp_tunnel.h>
15 #include <net/xfrm.h>
16 #include <uapi/linux/fou.h>
17 #include <uapi/linux/genetlink.h>
18
19 struct fou {
20 struct socket *sock;
21 u8 protocol;
22 u8 flags;
23 __be16 port;
24 u16 type;
25 struct udp_offload udp_offloads;
26 struct list_head list;
27 };
28
29 #define FOU_F_REMCSUM_NOPARTIAL BIT(0)
30
31 struct fou_cfg {
32 u16 type;
33 u8 protocol;
34 u8 flags;
35 struct udp_port_cfg udp_config;
36 };
37
38 static unsigned int fou_net_id;
39
40 struct fou_net {
41 struct list_head fou_list;
42 struct mutex fou_lock;
43 };
44
45 static inline struct fou *fou_from_sock(struct sock *sk)
46 {
47 return sk->sk_user_data;
48 }
49
50 static void fou_recv_pull(struct sk_buff *skb, size_t len)
51 {
52 struct iphdr *iph = ip_hdr(skb);
53
54 /* Remove 'len' bytes from the packet (UDP header and
55 * FOU header if present).
56 */
57 iph->tot_len = htons(ntohs(iph->tot_len) - len);
58 __skb_pull(skb, len);
59 skb_postpull_rcsum(skb, udp_hdr(skb), len);
60 skb_reset_transport_header(skb);
61 }
62
63 static int fou_udp_recv(struct sock *sk, struct sk_buff *skb)
64 {
65 struct fou *fou = fou_from_sock(sk);
66
67 if (!fou)
68 return 1;
69
70 fou_recv_pull(skb, sizeof(struct udphdr));
71
72 return -fou->protocol;
73 }
74
75 static struct guehdr *gue_remcsum(struct sk_buff *skb, struct guehdr *guehdr,
76 void *data, size_t hdrlen, u8 ipproto,
77 bool nopartial)
78 {
79 __be16 *pd = data;
80 size_t start = ntohs(pd[0]);
81 size_t offset = ntohs(pd[1]);
82 size_t plen = sizeof(struct udphdr) + hdrlen +
83 max_t(size_t, offset + sizeof(u16), start);
84
85 if (skb->remcsum_offload)
86 return guehdr;
87
88 if (!pskb_may_pull(skb, plen))
89 return NULL;
90 guehdr = (struct guehdr *)&udp_hdr(skb)[1];
91
92 skb_remcsum_process(skb, (void *)guehdr + hdrlen,
93 start, offset, nopartial);
94
95 return guehdr;
96 }
97
98 static int gue_control_message(struct sk_buff *skb, struct guehdr *guehdr)
99 {
100 /* No support yet */
101 kfree_skb(skb);
102 return 0;
103 }
104
105 static int gue_udp_recv(struct sock *sk, struct sk_buff *skb)
106 {
107 struct fou *fou = fou_from_sock(sk);
108 size_t len, optlen, hdrlen;
109 struct guehdr *guehdr;
110 void *data;
111 u16 doffset = 0;
112
113 if (!fou)
114 return 1;
115
116 len = sizeof(struct udphdr) + sizeof(struct guehdr);
117 if (!pskb_may_pull(skb, len))
118 goto drop;
119
120 guehdr = (struct guehdr *)&udp_hdr(skb)[1];
121
122 optlen = guehdr->hlen << 2;
123 len += optlen;
124
125 if (!pskb_may_pull(skb, len))
126 goto drop;
127
128 /* guehdr may change after pull */
129 guehdr = (struct guehdr *)&udp_hdr(skb)[1];
130
131 hdrlen = sizeof(struct guehdr) + optlen;
132
133 if (guehdr->version != 0 || validate_gue_flags(guehdr, optlen))
134 goto drop;
135
136 hdrlen = sizeof(struct guehdr) + optlen;
137
138 ip_hdr(skb)->tot_len = htons(ntohs(ip_hdr(skb)->tot_len) - len);
139
140 /* Pull csum through the guehdr now . This can be used if
141 * there is a remote checksum offload.
142 */
143 skb_postpull_rcsum(skb, udp_hdr(skb), len);
144
145 data = &guehdr[1];
146
147 if (guehdr->flags & GUE_FLAG_PRIV) {
148 __be32 flags = *(__be32 *)(data + doffset);
149
150 doffset += GUE_LEN_PRIV;
151
152 if (flags & GUE_PFLAG_REMCSUM) {
153 guehdr = gue_remcsum(skb, guehdr, data + doffset,
154 hdrlen, guehdr->proto_ctype,
155 !!(fou->flags &
156 FOU_F_REMCSUM_NOPARTIAL));
157 if (!guehdr)
158 goto drop;
159
160 data = &guehdr[1];
161
162 doffset += GUE_PLEN_REMCSUM;
163 }
164 }
165
166 if (unlikely(guehdr->control))
167 return gue_control_message(skb, guehdr);
168
169 __skb_pull(skb, sizeof(struct udphdr) + hdrlen);
170 skb_reset_transport_header(skb);
171
172 return -guehdr->proto_ctype;
173
174 drop:
175 kfree_skb(skb);
176 return 0;
177 }
178
179 static struct sk_buff **fou_gro_receive(struct sk_buff **head,
180 struct sk_buff *skb,
181 struct udp_offload *uoff)
182 {
183 const struct net_offload *ops;
184 struct sk_buff **pp = NULL;
185 u8 proto = NAPI_GRO_CB(skb)->proto;
186 const struct net_offload **offloads;
187
188 rcu_read_lock();
189 offloads = NAPI_GRO_CB(skb)->is_ipv6 ? inet6_offloads : inet_offloads;
190 ops = rcu_dereference(offloads[proto]);
191 if (!ops || !ops->callbacks.gro_receive)
192 goto out_unlock;
193
194 pp = ops->callbacks.gro_receive(head, skb);
195
196 out_unlock:
197 rcu_read_unlock();
198
199 return pp;
200 }
201
202 static int fou_gro_complete(struct sk_buff *skb, int nhoff,
203 struct udp_offload *uoff)
204 {
205 const struct net_offload *ops;
206 u8 proto = NAPI_GRO_CB(skb)->proto;
207 int err = -ENOSYS;
208 const struct net_offload **offloads;
209
210 udp_tunnel_gro_complete(skb, nhoff);
211
212 rcu_read_lock();
213 offloads = NAPI_GRO_CB(skb)->is_ipv6 ? inet6_offloads : inet_offloads;
214 ops = rcu_dereference(offloads[proto]);
215 if (WARN_ON(!ops || !ops->callbacks.gro_complete))
216 goto out_unlock;
217
218 err = ops->callbacks.gro_complete(skb, nhoff);
219
220 out_unlock:
221 rcu_read_unlock();
222
223 return err;
224 }
225
226 static struct guehdr *gue_gro_remcsum(struct sk_buff *skb, unsigned int off,
227 struct guehdr *guehdr, void *data,
228 size_t hdrlen, struct gro_remcsum *grc,
229 bool nopartial)
230 {
231 __be16 *pd = data;
232 size_t start = ntohs(pd[0]);
233 size_t offset = ntohs(pd[1]);
234
235 if (skb->remcsum_offload)
236 return guehdr;
237
238 if (!NAPI_GRO_CB(skb)->csum_valid)
239 return NULL;
240
241 guehdr = skb_gro_remcsum_process(skb, (void *)guehdr, off, hdrlen,
242 start, offset, grc, nopartial);
243
244 skb->remcsum_offload = 1;
245
246 return guehdr;
247 }
248
249 static struct sk_buff **gue_gro_receive(struct sk_buff **head,
250 struct sk_buff *skb,
251 struct udp_offload *uoff)
252 {
253 const struct net_offload **offloads;
254 const struct net_offload *ops;
255 struct sk_buff **pp = NULL;
256 struct sk_buff *p;
257 struct guehdr *guehdr;
258 size_t len, optlen, hdrlen, off;
259 void *data;
260 u16 doffset = 0;
261 int flush = 1;
262 struct fou *fou = container_of(uoff, struct fou, udp_offloads);
263 struct gro_remcsum grc;
264
265 skb_gro_remcsum_init(&grc);
266
267 off = skb_gro_offset(skb);
268 len = off + sizeof(*guehdr);
269
270 guehdr = skb_gro_header_fast(skb, off);
271 if (skb_gro_header_hard(skb, len)) {
272 guehdr = skb_gro_header_slow(skb, len, off);
273 if (unlikely(!guehdr))
274 goto out;
275 }
276
277 optlen = guehdr->hlen << 2;
278 len += optlen;
279
280 if (skb_gro_header_hard(skb, len)) {
281 guehdr = skb_gro_header_slow(skb, len, off);
282 if (unlikely(!guehdr))
283 goto out;
284 }
285
286 if (unlikely(guehdr->control) || guehdr->version != 0 ||
287 validate_gue_flags(guehdr, optlen))
288 goto out;
289
290 hdrlen = sizeof(*guehdr) + optlen;
291
292 /* Adjust NAPI_GRO_CB(skb)->csum to account for guehdr,
293 * this is needed if there is a remote checkcsum offload.
294 */
295 skb_gro_postpull_rcsum(skb, guehdr, hdrlen);
296
297 data = &guehdr[1];
298
299 if (guehdr->flags & GUE_FLAG_PRIV) {
300 __be32 flags = *(__be32 *)(data + doffset);
301
302 doffset += GUE_LEN_PRIV;
303
304 if (flags & GUE_PFLAG_REMCSUM) {
305 guehdr = gue_gro_remcsum(skb, off, guehdr,
306 data + doffset, hdrlen, &grc,
307 !!(fou->flags &
308 FOU_F_REMCSUM_NOPARTIAL));
309
310 if (!guehdr)
311 goto out;
312
313 data = &guehdr[1];
314
315 doffset += GUE_PLEN_REMCSUM;
316 }
317 }
318
319 skb_gro_pull(skb, hdrlen);
320
321 flush = 0;
322
323 for (p = *head; p; p = p->next) {
324 const struct guehdr *guehdr2;
325
326 if (!NAPI_GRO_CB(p)->same_flow)
327 continue;
328
329 guehdr2 = (struct guehdr *)(p->data + off);
330
331 /* Compare base GUE header to be equal (covers
332 * hlen, version, proto_ctype, and flags.
333 */
334 if (guehdr->word != guehdr2->word) {
335 NAPI_GRO_CB(p)->same_flow = 0;
336 continue;
337 }
338
339 /* Compare optional fields are the same. */
340 if (guehdr->hlen && memcmp(&guehdr[1], &guehdr2[1],
341 guehdr->hlen << 2)) {
342 NAPI_GRO_CB(p)->same_flow = 0;
343 continue;
344 }
345 }
346
347 rcu_read_lock();
348 offloads = NAPI_GRO_CB(skb)->is_ipv6 ? inet6_offloads : inet_offloads;
349 ops = rcu_dereference(offloads[guehdr->proto_ctype]);
350 if (WARN_ON_ONCE(!ops || !ops->callbacks.gro_receive))
351 goto out_unlock;
352
353 pp = ops->callbacks.gro_receive(head, skb);
354
355 out_unlock:
356 rcu_read_unlock();
357 out:
358 NAPI_GRO_CB(skb)->flush |= flush;
359 skb_gro_remcsum_cleanup(skb, &grc);
360
361 return pp;
362 }
363
364 static int gue_gro_complete(struct sk_buff *skb, int nhoff,
365 struct udp_offload *uoff)
366 {
367 const struct net_offload **offloads;
368 struct guehdr *guehdr = (struct guehdr *)(skb->data + nhoff);
369 const struct net_offload *ops;
370 unsigned int guehlen;
371 u8 proto;
372 int err = -ENOENT;
373
374 proto = guehdr->proto_ctype;
375
376 guehlen = sizeof(*guehdr) + (guehdr->hlen << 2);
377
378 rcu_read_lock();
379 offloads = NAPI_GRO_CB(skb)->is_ipv6 ? inet6_offloads : inet_offloads;
380 ops = rcu_dereference(offloads[proto]);
381 if (WARN_ON(!ops || !ops->callbacks.gro_complete))
382 goto out_unlock;
383
384 err = ops->callbacks.gro_complete(skb, nhoff + guehlen);
385
386 out_unlock:
387 rcu_read_unlock();
388 return err;
389 }
390
391 static int fou_add_to_port_list(struct net *net, struct fou *fou)
392 {
393 struct fou_net *fn = net_generic(net, fou_net_id);
394 struct fou *fout;
395
396 mutex_lock(&fn->fou_lock);
397 list_for_each_entry(fout, &fn->fou_list, list) {
398 if (fou->port == fout->port) {
399 mutex_unlock(&fn->fou_lock);
400 return -EALREADY;
401 }
402 }
403
404 list_add(&fou->list, &fn->fou_list);
405 mutex_unlock(&fn->fou_lock);
406
407 return 0;
408 }
409
410 static void fou_release(struct fou *fou)
411 {
412 struct socket *sock = fou->sock;
413 struct sock *sk = sock->sk;
414
415 if (sk->sk_family == AF_INET)
416 udp_del_offload(&fou->udp_offloads);
417 list_del(&fou->list);
418 udp_tunnel_sock_release(sock);
419
420 kfree(fou);
421 }
422
423 static int fou_encap_init(struct sock *sk, struct fou *fou, struct fou_cfg *cfg)
424 {
425 udp_sk(sk)->encap_rcv = fou_udp_recv;
426 fou->protocol = cfg->protocol;
427 fou->udp_offloads.callbacks.gro_receive = fou_gro_receive;
428 fou->udp_offloads.callbacks.gro_complete = fou_gro_complete;
429 fou->udp_offloads.port = cfg->udp_config.local_udp_port;
430 fou->udp_offloads.ipproto = cfg->protocol;
431
432 return 0;
433 }
434
435 static int gue_encap_init(struct sock *sk, struct fou *fou, struct fou_cfg *cfg)
436 {
437 udp_sk(sk)->encap_rcv = gue_udp_recv;
438 fou->udp_offloads.callbacks.gro_receive = gue_gro_receive;
439 fou->udp_offloads.callbacks.gro_complete = gue_gro_complete;
440 fou->udp_offloads.port = cfg->udp_config.local_udp_port;
441
442 return 0;
443 }
444
445 static int fou_create(struct net *net, struct fou_cfg *cfg,
446 struct socket **sockp)
447 {
448 struct socket *sock = NULL;
449 struct fou *fou = NULL;
450 struct sock *sk;
451 int err;
452
453 /* Open UDP socket */
454 err = udp_sock_create(net, &cfg->udp_config, &sock);
455 if (err < 0)
456 goto error;
457
458 /* Allocate FOU port structure */
459 fou = kzalloc(sizeof(*fou), GFP_KERNEL);
460 if (!fou) {
461 err = -ENOMEM;
462 goto error;
463 }
464
465 sk = sock->sk;
466
467 fou->flags = cfg->flags;
468 fou->port = cfg->udp_config.local_udp_port;
469
470 /* Initial for fou type */
471 switch (cfg->type) {
472 case FOU_ENCAP_DIRECT:
473 err = fou_encap_init(sk, fou, cfg);
474 if (err)
475 goto error;
476 break;
477 case FOU_ENCAP_GUE:
478 err = gue_encap_init(sk, fou, cfg);
479 if (err)
480 goto error;
481 break;
482 default:
483 err = -EINVAL;
484 goto error;
485 }
486
487 fou->type = cfg->type;
488
489 udp_sk(sk)->encap_type = 1;
490 udp_encap_enable();
491
492 sk->sk_user_data = fou;
493 fou->sock = sock;
494
495 inet_inc_convert_csum(sk);
496
497 sk->sk_allocation = GFP_ATOMIC;
498
499 if (cfg->udp_config.family == AF_INET) {
500 err = udp_add_offload(&fou->udp_offloads);
501 if (err)
502 goto error;
503 }
504
505 err = fou_add_to_port_list(net, fou);
506 if (err)
507 goto error;
508
509 if (sockp)
510 *sockp = sock;
511
512 return 0;
513
514 error:
515 kfree(fou);
516 if (sock)
517 udp_tunnel_sock_release(sock);
518
519 return err;
520 }
521
522 static int fou_destroy(struct net *net, struct fou_cfg *cfg)
523 {
524 struct fou_net *fn = net_generic(net, fou_net_id);
525 __be16 port = cfg->udp_config.local_udp_port;
526 int err = -EINVAL;
527 struct fou *fou;
528
529 mutex_lock(&fn->fou_lock);
530 list_for_each_entry(fou, &fn->fou_list, list) {
531 if (fou->port == port) {
532 fou_release(fou);
533 err = 0;
534 break;
535 }
536 }
537 mutex_unlock(&fn->fou_lock);
538
539 return err;
540 }
541
542 static struct genl_family fou_nl_family = {
543 .id = GENL_ID_GENERATE,
544 .hdrsize = 0,
545 .name = FOU_GENL_NAME,
546 .version = FOU_GENL_VERSION,
547 .maxattr = FOU_ATTR_MAX,
548 .netnsok = true,
549 };
550
551 static struct nla_policy fou_nl_policy[FOU_ATTR_MAX + 1] = {
552 [FOU_ATTR_PORT] = { .type = NLA_U16, },
553 [FOU_ATTR_AF] = { .type = NLA_U8, },
554 [FOU_ATTR_IPPROTO] = { .type = NLA_U8, },
555 [FOU_ATTR_TYPE] = { .type = NLA_U8, },
556 [FOU_ATTR_REMCSUM_NOPARTIAL] = { .type = NLA_FLAG, },
557 };
558
559 static int parse_nl_config(struct genl_info *info,
560 struct fou_cfg *cfg)
561 {
562 memset(cfg, 0, sizeof(*cfg));
563
564 cfg->udp_config.family = AF_INET;
565
566 if (info->attrs[FOU_ATTR_AF]) {
567 u8 family = nla_get_u8(info->attrs[FOU_ATTR_AF]);
568
569 if (family != AF_INET)
570 return -EINVAL;
571
572 cfg->udp_config.family = family;
573 }
574
575 if (info->attrs[FOU_ATTR_PORT]) {
576 __be16 port = nla_get_be16(info->attrs[FOU_ATTR_PORT]);
577
578 cfg->udp_config.local_udp_port = port;
579 }
580
581 if (info->attrs[FOU_ATTR_IPPROTO])
582 cfg->protocol = nla_get_u8(info->attrs[FOU_ATTR_IPPROTO]);
583
584 if (info->attrs[FOU_ATTR_TYPE])
585 cfg->type = nla_get_u8(info->attrs[FOU_ATTR_TYPE]);
586
587 if (info->attrs[FOU_ATTR_REMCSUM_NOPARTIAL])
588 cfg->flags |= FOU_F_REMCSUM_NOPARTIAL;
589
590 return 0;
591 }
592
593 static int fou_nl_cmd_add_port(struct sk_buff *skb, struct genl_info *info)
594 {
595 struct net *net = genl_info_net(info);
596 struct fou_cfg cfg;
597 int err;
598
599 err = parse_nl_config(info, &cfg);
600 if (err)
601 return err;
602
603 return fou_create(net, &cfg, NULL);
604 }
605
606 static int fou_nl_cmd_rm_port(struct sk_buff *skb, struct genl_info *info)
607 {
608 struct net *net = genl_info_net(info);
609 struct fou_cfg cfg;
610 int err;
611
612 err = parse_nl_config(info, &cfg);
613 if (err)
614 return err;
615
616 return fou_destroy(net, &cfg);
617 }
618
619 static int fou_fill_info(struct fou *fou, struct sk_buff *msg)
620 {
621 if (nla_put_u8(msg, FOU_ATTR_AF, fou->sock->sk->sk_family) ||
622 nla_put_be16(msg, FOU_ATTR_PORT, fou->port) ||
623 nla_put_u8(msg, FOU_ATTR_IPPROTO, fou->protocol) ||
624 nla_put_u8(msg, FOU_ATTR_TYPE, fou->type))
625 return -1;
626
627 if (fou->flags & FOU_F_REMCSUM_NOPARTIAL)
628 if (nla_put_flag(msg, FOU_ATTR_REMCSUM_NOPARTIAL))
629 return -1;
630 return 0;
631 }
632
633 static int fou_dump_info(struct fou *fou, u32 portid, u32 seq,
634 u32 flags, struct sk_buff *skb, u8 cmd)
635 {
636 void *hdr;
637
638 hdr = genlmsg_put(skb, portid, seq, &fou_nl_family, flags, cmd);
639 if (!hdr)
640 return -ENOMEM;
641
642 if (fou_fill_info(fou, skb) < 0)
643 goto nla_put_failure;
644
645 genlmsg_end(skb, hdr);
646 return 0;
647
648 nla_put_failure:
649 genlmsg_cancel(skb, hdr);
650 return -EMSGSIZE;
651 }
652
653 static int fou_nl_cmd_get_port(struct sk_buff *skb, struct genl_info *info)
654 {
655 struct net *net = genl_info_net(info);
656 struct fou_net *fn = net_generic(net, fou_net_id);
657 struct sk_buff *msg;
658 struct fou_cfg cfg;
659 struct fou *fout;
660 __be16 port;
661 int ret;
662
663 ret = parse_nl_config(info, &cfg);
664 if (ret)
665 return ret;
666 port = cfg.udp_config.local_udp_port;
667 if (port == 0)
668 return -EINVAL;
669
670 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
671 if (!msg)
672 return -ENOMEM;
673
674 ret = -ESRCH;
675 mutex_lock(&fn->fou_lock);
676 list_for_each_entry(fout, &fn->fou_list, list) {
677 if (port == fout->port) {
678 ret = fou_dump_info(fout, info->snd_portid,
679 info->snd_seq, 0, msg,
680 info->genlhdr->cmd);
681 break;
682 }
683 }
684 mutex_unlock(&fn->fou_lock);
685 if (ret < 0)
686 goto out_free;
687
688 return genlmsg_reply(msg, info);
689
690 out_free:
691 nlmsg_free(msg);
692 return ret;
693 }
694
695 static int fou_nl_dump(struct sk_buff *skb, struct netlink_callback *cb)
696 {
697 struct net *net = sock_net(skb->sk);
698 struct fou_net *fn = net_generic(net, fou_net_id);
699 struct fou *fout;
700 int idx = 0, ret;
701
702 mutex_lock(&fn->fou_lock);
703 list_for_each_entry(fout, &fn->fou_list, list) {
704 if (idx++ < cb->args[0])
705 continue;
706 ret = fou_dump_info(fout, NETLINK_CB(cb->skb).portid,
707 cb->nlh->nlmsg_seq, NLM_F_MULTI,
708 skb, FOU_CMD_GET);
709 if (ret)
710 break;
711 }
712 mutex_unlock(&fn->fou_lock);
713
714 cb->args[0] = idx;
715 return skb->len;
716 }
717
718 static const struct genl_ops fou_nl_ops[] = {
719 {
720 .cmd = FOU_CMD_ADD,
721 .doit = fou_nl_cmd_add_port,
722 .policy = fou_nl_policy,
723 .flags = GENL_ADMIN_PERM,
724 },
725 {
726 .cmd = FOU_CMD_DEL,
727 .doit = fou_nl_cmd_rm_port,
728 .policy = fou_nl_policy,
729 .flags = GENL_ADMIN_PERM,
730 },
731 {
732 .cmd = FOU_CMD_GET,
733 .doit = fou_nl_cmd_get_port,
734 .dumpit = fou_nl_dump,
735 .policy = fou_nl_policy,
736 },
737 };
738
739 size_t fou_encap_hlen(struct ip_tunnel_encap *e)
740 {
741 return sizeof(struct udphdr);
742 }
743 EXPORT_SYMBOL(fou_encap_hlen);
744
745 size_t gue_encap_hlen(struct ip_tunnel_encap *e)
746 {
747 size_t len;
748 bool need_priv = false;
749
750 len = sizeof(struct udphdr) + sizeof(struct guehdr);
751
752 if (e->flags & TUNNEL_ENCAP_FLAG_REMCSUM) {
753 len += GUE_PLEN_REMCSUM;
754 need_priv = true;
755 }
756
757 len += need_priv ? GUE_LEN_PRIV : 0;
758
759 return len;
760 }
761 EXPORT_SYMBOL(gue_encap_hlen);
762
763 static void fou_build_udp(struct sk_buff *skb, struct ip_tunnel_encap *e,
764 struct flowi4 *fl4, u8 *protocol, __be16 sport)
765 {
766 struct udphdr *uh;
767
768 skb_push(skb, sizeof(struct udphdr));
769 skb_reset_transport_header(skb);
770
771 uh = udp_hdr(skb);
772
773 uh->dest = e->dport;
774 uh->source = sport;
775 uh->len = htons(skb->len);
776 uh->check = 0;
777 udp_set_csum(!(e->flags & TUNNEL_ENCAP_FLAG_CSUM), skb,
778 fl4->saddr, fl4->daddr, skb->len);
779
780 *protocol = IPPROTO_UDP;
781 }
782
783 int fou_build_header(struct sk_buff *skb, struct ip_tunnel_encap *e,
784 u8 *protocol, struct flowi4 *fl4)
785 {
786 bool csum = !!(e->flags & TUNNEL_ENCAP_FLAG_CSUM);
787 int type = csum ? SKB_GSO_UDP_TUNNEL_CSUM : SKB_GSO_UDP_TUNNEL;
788 __be16 sport;
789
790 skb = iptunnel_handle_offloads(skb, csum, type);
791
792 if (IS_ERR(skb))
793 return PTR_ERR(skb);
794
795 sport = e->sport ? : udp_flow_src_port(dev_net(skb->dev),
796 skb, 0, 0, false);
797 fou_build_udp(skb, e, fl4, protocol, sport);
798
799 return 0;
800 }
801 EXPORT_SYMBOL(fou_build_header);
802
803 int gue_build_header(struct sk_buff *skb, struct ip_tunnel_encap *e,
804 u8 *protocol, struct flowi4 *fl4)
805 {
806 bool csum = !!(e->flags & TUNNEL_ENCAP_FLAG_CSUM);
807 int type = csum ? SKB_GSO_UDP_TUNNEL_CSUM : SKB_GSO_UDP_TUNNEL;
808 struct guehdr *guehdr;
809 size_t hdrlen, optlen = 0;
810 __be16 sport;
811 void *data;
812 bool need_priv = false;
813
814 if ((e->flags & TUNNEL_ENCAP_FLAG_REMCSUM) &&
815 skb->ip_summed == CHECKSUM_PARTIAL) {
816 csum = false;
817 optlen += GUE_PLEN_REMCSUM;
818 type |= SKB_GSO_TUNNEL_REMCSUM;
819 need_priv = true;
820 }
821
822 optlen += need_priv ? GUE_LEN_PRIV : 0;
823
824 skb = iptunnel_handle_offloads(skb, csum, type);
825
826 if (IS_ERR(skb))
827 return PTR_ERR(skb);
828
829 /* Get source port (based on flow hash) before skb_push */
830 sport = e->sport ? : udp_flow_src_port(dev_net(skb->dev),
831 skb, 0, 0, false);
832
833 hdrlen = sizeof(struct guehdr) + optlen;
834
835 skb_push(skb, hdrlen);
836
837 guehdr = (struct guehdr *)skb->data;
838
839 guehdr->control = 0;
840 guehdr->version = 0;
841 guehdr->hlen = optlen >> 2;
842 guehdr->flags = 0;
843 guehdr->proto_ctype = *protocol;
844
845 data = &guehdr[1];
846
847 if (need_priv) {
848 __be32 *flags = data;
849
850 guehdr->flags |= GUE_FLAG_PRIV;
851 *flags = 0;
852 data += GUE_LEN_PRIV;
853
854 if (type & SKB_GSO_TUNNEL_REMCSUM) {
855 u16 csum_start = skb_checksum_start_offset(skb);
856 __be16 *pd = data;
857
858 if (csum_start < hdrlen)
859 return -EINVAL;
860
861 csum_start -= hdrlen;
862 pd[0] = htons(csum_start);
863 pd[1] = htons(csum_start + skb->csum_offset);
864
865 if (!skb_is_gso(skb)) {
866 skb->ip_summed = CHECKSUM_NONE;
867 skb->encapsulation = 0;
868 }
869
870 *flags |= GUE_PFLAG_REMCSUM;
871 data += GUE_PLEN_REMCSUM;
872 }
873
874 }
875
876 fou_build_udp(skb, e, fl4, protocol, sport);
877
878 return 0;
879 }
880 EXPORT_SYMBOL(gue_build_header);
881
882 #ifdef CONFIG_NET_FOU_IP_TUNNELS
883
884 static const struct ip_tunnel_encap_ops fou_iptun_ops = {
885 .encap_hlen = fou_encap_hlen,
886 .build_header = fou_build_header,
887 };
888
889 static const struct ip_tunnel_encap_ops gue_iptun_ops = {
890 .encap_hlen = gue_encap_hlen,
891 .build_header = gue_build_header,
892 };
893
894 static int ip_tunnel_encap_add_fou_ops(void)
895 {
896 int ret;
897
898 ret = ip_tunnel_encap_add_ops(&fou_iptun_ops, TUNNEL_ENCAP_FOU);
899 if (ret < 0) {
900 pr_err("can't add fou ops\n");
901 return ret;
902 }
903
904 ret = ip_tunnel_encap_add_ops(&gue_iptun_ops, TUNNEL_ENCAP_GUE);
905 if (ret < 0) {
906 pr_err("can't add gue ops\n");
907 ip_tunnel_encap_del_ops(&fou_iptun_ops, TUNNEL_ENCAP_FOU);
908 return ret;
909 }
910
911 return 0;
912 }
913
914 static void ip_tunnel_encap_del_fou_ops(void)
915 {
916 ip_tunnel_encap_del_ops(&fou_iptun_ops, TUNNEL_ENCAP_FOU);
917 ip_tunnel_encap_del_ops(&gue_iptun_ops, TUNNEL_ENCAP_GUE);
918 }
919
920 #else
921
922 static int ip_tunnel_encap_add_fou_ops(void)
923 {
924 return 0;
925 }
926
927 static void ip_tunnel_encap_del_fou_ops(void)
928 {
929 }
930
931 #endif
932
933 static __net_init int fou_init_net(struct net *net)
934 {
935 struct fou_net *fn = net_generic(net, fou_net_id);
936
937 INIT_LIST_HEAD(&fn->fou_list);
938 mutex_init(&fn->fou_lock);
939 return 0;
940 }
941
942 static __net_exit void fou_exit_net(struct net *net)
943 {
944 struct fou_net *fn = net_generic(net, fou_net_id);
945 struct fou *fou, *next;
946
947 /* Close all the FOU sockets */
948 mutex_lock(&fn->fou_lock);
949 list_for_each_entry_safe(fou, next, &fn->fou_list, list)
950 fou_release(fou);
951 mutex_unlock(&fn->fou_lock);
952 }
953
954 static struct pernet_operations fou_net_ops = {
955 .init = fou_init_net,
956 .exit = fou_exit_net,
957 .id = &fou_net_id,
958 .size = sizeof(struct fou_net),
959 };
960
961 static int __init fou_init(void)
962 {
963 int ret;
964
965 ret = register_pernet_device(&fou_net_ops);
966 if (ret)
967 goto exit;
968
969 ret = genl_register_family_with_ops(&fou_nl_family,
970 fou_nl_ops);
971 if (ret < 0)
972 goto unregister;
973
974 ret = ip_tunnel_encap_add_fou_ops();
975 if (ret == 0)
976 return 0;
977
978 genl_unregister_family(&fou_nl_family);
979 unregister:
980 unregister_pernet_device(&fou_net_ops);
981 exit:
982 return ret;
983 }
984
985 static void __exit fou_fini(void)
986 {
987 ip_tunnel_encap_del_fou_ops();
988 genl_unregister_family(&fou_nl_family);
989 unregister_pernet_device(&fou_net_ops);
990 }
991
992 module_init(fou_init);
993 module_exit(fou_fini);
994 MODULE_AUTHOR("Tom Herbert <therbert@google.com>");
995 MODULE_LICENSE("GPL");
This page took 0.05521 seconds and 6 git commands to generate.