tcp: tso: allow CA_CWR state in tcp_tso_should_defer()
[deliverable/linux.git] / net / ipv4 / fou.c
1 #include <linux/module.h>
2 #include <linux/errno.h>
3 #include <linux/socket.h>
4 #include <linux/skbuff.h>
5 #include <linux/ip.h>
6 #include <linux/udp.h>
7 #include <linux/types.h>
8 #include <linux/kernel.h>
9 #include <net/genetlink.h>
10 #include <net/gue.h>
11 #include <net/ip.h>
12 #include <net/protocol.h>
13 #include <net/udp.h>
14 #include <net/udp_tunnel.h>
15 #include <net/xfrm.h>
16 #include <uapi/linux/fou.h>
17 #include <uapi/linux/genetlink.h>
18
19 static DEFINE_SPINLOCK(fou_lock);
20 static LIST_HEAD(fou_list);
21
22 struct fou {
23 struct socket *sock;
24 u8 protocol;
25 u8 flags;
26 u16 port;
27 struct udp_offload udp_offloads;
28 struct list_head list;
29 };
30
31 #define FOU_F_REMCSUM_NOPARTIAL BIT(0)
32
33 struct fou_cfg {
34 u16 type;
35 u8 protocol;
36 u8 flags;
37 struct udp_port_cfg udp_config;
38 };
39
40 static inline struct fou *fou_from_sock(struct sock *sk)
41 {
42 return sk->sk_user_data;
43 }
44
45 static void fou_recv_pull(struct sk_buff *skb, size_t len)
46 {
47 struct iphdr *iph = ip_hdr(skb);
48
49 /* Remove 'len' bytes from the packet (UDP header and
50 * FOU header if present).
51 */
52 iph->tot_len = htons(ntohs(iph->tot_len) - len);
53 __skb_pull(skb, len);
54 skb_postpull_rcsum(skb, udp_hdr(skb), len);
55 skb_reset_transport_header(skb);
56 }
57
58 static int fou_udp_recv(struct sock *sk, struct sk_buff *skb)
59 {
60 struct fou *fou = fou_from_sock(sk);
61
62 if (!fou)
63 return 1;
64
65 fou_recv_pull(skb, sizeof(struct udphdr));
66
67 return -fou->protocol;
68 }
69
70 static struct guehdr *gue_remcsum(struct sk_buff *skb, struct guehdr *guehdr,
71 void *data, size_t hdrlen, u8 ipproto,
72 bool nopartial)
73 {
74 __be16 *pd = data;
75 size_t start = ntohs(pd[0]);
76 size_t offset = ntohs(pd[1]);
77 size_t plen = hdrlen + max_t(size_t, offset + sizeof(u16), start);
78
79 if (!pskb_may_pull(skb, plen))
80 return NULL;
81 guehdr = (struct guehdr *)&udp_hdr(skb)[1];
82
83 skb_remcsum_process(skb, (void *)guehdr + hdrlen,
84 start, offset, nopartial);
85
86 return guehdr;
87 }
88
89 static int gue_control_message(struct sk_buff *skb, struct guehdr *guehdr)
90 {
91 /* No support yet */
92 kfree_skb(skb);
93 return 0;
94 }
95
96 static int gue_udp_recv(struct sock *sk, struct sk_buff *skb)
97 {
98 struct fou *fou = fou_from_sock(sk);
99 size_t len, optlen, hdrlen;
100 struct guehdr *guehdr;
101 void *data;
102 u16 doffset = 0;
103
104 if (!fou)
105 return 1;
106
107 len = sizeof(struct udphdr) + sizeof(struct guehdr);
108 if (!pskb_may_pull(skb, len))
109 goto drop;
110
111 guehdr = (struct guehdr *)&udp_hdr(skb)[1];
112
113 optlen = guehdr->hlen << 2;
114 len += optlen;
115
116 if (!pskb_may_pull(skb, len))
117 goto drop;
118
119 /* guehdr may change after pull */
120 guehdr = (struct guehdr *)&udp_hdr(skb)[1];
121
122 hdrlen = sizeof(struct guehdr) + optlen;
123
124 if (guehdr->version != 0 || validate_gue_flags(guehdr, optlen))
125 goto drop;
126
127 hdrlen = sizeof(struct guehdr) + optlen;
128
129 ip_hdr(skb)->tot_len = htons(ntohs(ip_hdr(skb)->tot_len) - len);
130
131 /* Pull csum through the guehdr now . This can be used if
132 * there is a remote checksum offload.
133 */
134 skb_postpull_rcsum(skb, udp_hdr(skb), len);
135
136 data = &guehdr[1];
137
138 if (guehdr->flags & GUE_FLAG_PRIV) {
139 __be32 flags = *(__be32 *)(data + doffset);
140
141 doffset += GUE_LEN_PRIV;
142
143 if (flags & GUE_PFLAG_REMCSUM) {
144 guehdr = gue_remcsum(skb, guehdr, data + doffset,
145 hdrlen, guehdr->proto_ctype,
146 !!(fou->flags &
147 FOU_F_REMCSUM_NOPARTIAL));
148 if (!guehdr)
149 goto drop;
150
151 data = &guehdr[1];
152
153 doffset += GUE_PLEN_REMCSUM;
154 }
155 }
156
157 if (unlikely(guehdr->control))
158 return gue_control_message(skb, guehdr);
159
160 __skb_pull(skb, sizeof(struct udphdr) + hdrlen);
161 skb_reset_transport_header(skb);
162
163 return -guehdr->proto_ctype;
164
165 drop:
166 kfree_skb(skb);
167 return 0;
168 }
169
170 static struct sk_buff **fou_gro_receive(struct sk_buff **head,
171 struct sk_buff *skb,
172 struct udp_offload *uoff)
173 {
174 const struct net_offload *ops;
175 struct sk_buff **pp = NULL;
176 u8 proto = NAPI_GRO_CB(skb)->proto;
177 const struct net_offload **offloads;
178
179 rcu_read_lock();
180 offloads = NAPI_GRO_CB(skb)->is_ipv6 ? inet6_offloads : inet_offloads;
181 ops = rcu_dereference(offloads[proto]);
182 if (!ops || !ops->callbacks.gro_receive)
183 goto out_unlock;
184
185 pp = ops->callbacks.gro_receive(head, skb);
186
187 out_unlock:
188 rcu_read_unlock();
189
190 return pp;
191 }
192
193 static int fou_gro_complete(struct sk_buff *skb, int nhoff,
194 struct udp_offload *uoff)
195 {
196 const struct net_offload *ops;
197 u8 proto = NAPI_GRO_CB(skb)->proto;
198 int err = -ENOSYS;
199 const struct net_offload **offloads;
200
201 udp_tunnel_gro_complete(skb, nhoff);
202
203 rcu_read_lock();
204 offloads = NAPI_GRO_CB(skb)->is_ipv6 ? inet6_offloads : inet_offloads;
205 ops = rcu_dereference(offloads[proto]);
206 if (WARN_ON(!ops || !ops->callbacks.gro_complete))
207 goto out_unlock;
208
209 err = ops->callbacks.gro_complete(skb, nhoff);
210
211 out_unlock:
212 rcu_read_unlock();
213
214 return err;
215 }
216
217 static struct guehdr *gue_gro_remcsum(struct sk_buff *skb, unsigned int off,
218 struct guehdr *guehdr, void *data,
219 size_t hdrlen, u8 ipproto,
220 struct gro_remcsum *grc, bool nopartial)
221 {
222 __be16 *pd = data;
223 size_t start = ntohs(pd[0]);
224 size_t offset = ntohs(pd[1]);
225 size_t plen = hdrlen + max_t(size_t, offset + sizeof(u16), start);
226
227 if (skb->remcsum_offload)
228 return NULL;
229
230 if (!NAPI_GRO_CB(skb)->csum_valid)
231 return NULL;
232
233 /* Pull checksum that will be written */
234 if (skb_gro_header_hard(skb, off + plen)) {
235 guehdr = skb_gro_header_slow(skb, off + plen, off);
236 if (!guehdr)
237 return NULL;
238 }
239
240 skb_gro_remcsum_process(skb, (void *)guehdr + hdrlen,
241 start, offset, grc, nopartial);
242
243 skb->remcsum_offload = 1;
244
245 return guehdr;
246 }
247
248 static struct sk_buff **gue_gro_receive(struct sk_buff **head,
249 struct sk_buff *skb,
250 struct udp_offload *uoff)
251 {
252 const struct net_offload **offloads;
253 const struct net_offload *ops;
254 struct sk_buff **pp = NULL;
255 struct sk_buff *p;
256 struct guehdr *guehdr;
257 size_t len, optlen, hdrlen, off;
258 void *data;
259 u16 doffset = 0;
260 int flush = 1;
261 struct fou *fou = container_of(uoff, struct fou, udp_offloads);
262 struct gro_remcsum grc;
263
264 skb_gro_remcsum_init(&grc);
265
266 off = skb_gro_offset(skb);
267 len = off + sizeof(*guehdr);
268
269 guehdr = skb_gro_header_fast(skb, off);
270 if (skb_gro_header_hard(skb, len)) {
271 guehdr = skb_gro_header_slow(skb, len, off);
272 if (unlikely(!guehdr))
273 goto out;
274 }
275
276 optlen = guehdr->hlen << 2;
277 len += optlen;
278
279 if (skb_gro_header_hard(skb, len)) {
280 guehdr = skb_gro_header_slow(skb, len, off);
281 if (unlikely(!guehdr))
282 goto out;
283 }
284
285 if (unlikely(guehdr->control) || guehdr->version != 0 ||
286 validate_gue_flags(guehdr, optlen))
287 goto out;
288
289 hdrlen = sizeof(*guehdr) + optlen;
290
291 /* Adjust NAPI_GRO_CB(skb)->csum to account for guehdr,
292 * this is needed if there is a remote checkcsum offload.
293 */
294 skb_gro_postpull_rcsum(skb, guehdr, hdrlen);
295
296 data = &guehdr[1];
297
298 if (guehdr->flags & GUE_FLAG_PRIV) {
299 __be32 flags = *(__be32 *)(data + doffset);
300
301 doffset += GUE_LEN_PRIV;
302
303 if (flags & GUE_PFLAG_REMCSUM) {
304 guehdr = gue_gro_remcsum(skb, off, guehdr,
305 data + doffset, hdrlen,
306 guehdr->proto_ctype, &grc,
307 !!(fou->flags &
308 FOU_F_REMCSUM_NOPARTIAL));
309 if (!guehdr)
310 goto out;
311
312 data = &guehdr[1];
313
314 doffset += GUE_PLEN_REMCSUM;
315 }
316 }
317
318 skb_gro_pull(skb, hdrlen);
319
320 flush = 0;
321
322 for (p = *head; p; p = p->next) {
323 const struct guehdr *guehdr2;
324
325 if (!NAPI_GRO_CB(p)->same_flow)
326 continue;
327
328 guehdr2 = (struct guehdr *)(p->data + off);
329
330 /* Compare base GUE header to be equal (covers
331 * hlen, version, proto_ctype, and flags.
332 */
333 if (guehdr->word != guehdr2->word) {
334 NAPI_GRO_CB(p)->same_flow = 0;
335 continue;
336 }
337
338 /* Compare optional fields are the same. */
339 if (guehdr->hlen && memcmp(&guehdr[1], &guehdr2[1],
340 guehdr->hlen << 2)) {
341 NAPI_GRO_CB(p)->same_flow = 0;
342 continue;
343 }
344 }
345
346 rcu_read_lock();
347 offloads = NAPI_GRO_CB(skb)->is_ipv6 ? inet6_offloads : inet_offloads;
348 ops = rcu_dereference(offloads[guehdr->proto_ctype]);
349 if (WARN_ON(!ops || !ops->callbacks.gro_receive))
350 goto out_unlock;
351
352 pp = ops->callbacks.gro_receive(head, skb);
353
354 out_unlock:
355 rcu_read_unlock();
356 out:
357 NAPI_GRO_CB(skb)->flush |= flush;
358 skb_gro_remcsum_cleanup(skb, &grc);
359
360 return pp;
361 }
362
363 static int gue_gro_complete(struct sk_buff *skb, int nhoff,
364 struct udp_offload *uoff)
365 {
366 const struct net_offload **offloads;
367 struct guehdr *guehdr = (struct guehdr *)(skb->data + nhoff);
368 const struct net_offload *ops;
369 unsigned int guehlen;
370 u8 proto;
371 int err = -ENOENT;
372
373 proto = guehdr->proto_ctype;
374
375 guehlen = sizeof(*guehdr) + (guehdr->hlen << 2);
376
377 rcu_read_lock();
378 offloads = NAPI_GRO_CB(skb)->is_ipv6 ? inet6_offloads : inet_offloads;
379 ops = rcu_dereference(offloads[proto]);
380 if (WARN_ON(!ops || !ops->callbacks.gro_complete))
381 goto out_unlock;
382
383 err = ops->callbacks.gro_complete(skb, nhoff + guehlen);
384
385 out_unlock:
386 rcu_read_unlock();
387 return err;
388 }
389
390 static int fou_add_to_port_list(struct fou *fou)
391 {
392 struct fou *fout;
393
394 spin_lock(&fou_lock);
395 list_for_each_entry(fout, &fou_list, list) {
396 if (fou->port == fout->port) {
397 spin_unlock(&fou_lock);
398 return -EALREADY;
399 }
400 }
401
402 list_add(&fou->list, &fou_list);
403 spin_unlock(&fou_lock);
404
405 return 0;
406 }
407
408 static void fou_release(struct fou *fou)
409 {
410 struct socket *sock = fou->sock;
411 struct sock *sk = sock->sk;
412
413 udp_del_offload(&fou->udp_offloads);
414
415 list_del(&fou->list);
416
417 /* Remove hooks into tunnel socket */
418 sk->sk_user_data = NULL;
419
420 sock_release(sock);
421
422 kfree(fou);
423 }
424
425 static int fou_encap_init(struct sock *sk, struct fou *fou, struct fou_cfg *cfg)
426 {
427 udp_sk(sk)->encap_rcv = fou_udp_recv;
428 fou->protocol = cfg->protocol;
429 fou->udp_offloads.callbacks.gro_receive = fou_gro_receive;
430 fou->udp_offloads.callbacks.gro_complete = fou_gro_complete;
431 fou->udp_offloads.port = cfg->udp_config.local_udp_port;
432 fou->udp_offloads.ipproto = cfg->protocol;
433
434 return 0;
435 }
436
437 static int gue_encap_init(struct sock *sk, struct fou *fou, struct fou_cfg *cfg)
438 {
439 udp_sk(sk)->encap_rcv = gue_udp_recv;
440 fou->udp_offloads.callbacks.gro_receive = gue_gro_receive;
441 fou->udp_offloads.callbacks.gro_complete = gue_gro_complete;
442 fou->udp_offloads.port = cfg->udp_config.local_udp_port;
443
444 return 0;
445 }
446
447 static int fou_create(struct net *net, struct fou_cfg *cfg,
448 struct socket **sockp)
449 {
450 struct fou *fou = NULL;
451 int err;
452 struct socket *sock = NULL;
453 struct sock *sk;
454
455 /* Open UDP socket */
456 err = udp_sock_create(net, &cfg->udp_config, &sock);
457 if (err < 0)
458 goto error;
459
460 /* Allocate FOU port structure */
461 fou = kzalloc(sizeof(*fou), GFP_KERNEL);
462 if (!fou) {
463 err = -ENOMEM;
464 goto error;
465 }
466
467 sk = sock->sk;
468
469 fou->flags = cfg->flags;
470 fou->port = cfg->udp_config.local_udp_port;
471
472 /* Initial for fou type */
473 switch (cfg->type) {
474 case FOU_ENCAP_DIRECT:
475 err = fou_encap_init(sk, fou, cfg);
476 if (err)
477 goto error;
478 break;
479 case FOU_ENCAP_GUE:
480 err = gue_encap_init(sk, fou, cfg);
481 if (err)
482 goto error;
483 break;
484 default:
485 err = -EINVAL;
486 goto error;
487 }
488
489 udp_sk(sk)->encap_type = 1;
490 udp_encap_enable();
491
492 sk->sk_user_data = fou;
493 fou->sock = sock;
494
495 inet_inc_convert_csum(sk);
496
497 sk->sk_allocation = GFP_ATOMIC;
498
499 if (cfg->udp_config.family == AF_INET) {
500 err = udp_add_offload(&fou->udp_offloads);
501 if (err)
502 goto error;
503 }
504
505 err = fou_add_to_port_list(fou);
506 if (err)
507 goto error;
508
509 if (sockp)
510 *sockp = sock;
511
512 return 0;
513
514 error:
515 kfree(fou);
516 if (sock)
517 sock_release(sock);
518
519 return err;
520 }
521
522 static int fou_destroy(struct net *net, struct fou_cfg *cfg)
523 {
524 struct fou *fou;
525 u16 port = cfg->udp_config.local_udp_port;
526 int err = -EINVAL;
527
528 spin_lock(&fou_lock);
529 list_for_each_entry(fou, &fou_list, list) {
530 if (fou->port == port) {
531 udp_del_offload(&fou->udp_offloads);
532 fou_release(fou);
533 err = 0;
534 break;
535 }
536 }
537 spin_unlock(&fou_lock);
538
539 return err;
540 }
541
542 static struct genl_family fou_nl_family = {
543 .id = GENL_ID_GENERATE,
544 .hdrsize = 0,
545 .name = FOU_GENL_NAME,
546 .version = FOU_GENL_VERSION,
547 .maxattr = FOU_ATTR_MAX,
548 .netnsok = true,
549 };
550
551 static struct nla_policy fou_nl_policy[FOU_ATTR_MAX + 1] = {
552 [FOU_ATTR_PORT] = { .type = NLA_U16, },
553 [FOU_ATTR_AF] = { .type = NLA_U8, },
554 [FOU_ATTR_IPPROTO] = { .type = NLA_U8, },
555 [FOU_ATTR_TYPE] = { .type = NLA_U8, },
556 [FOU_ATTR_REMCSUM_NOPARTIAL] = { .type = NLA_FLAG, },
557 };
558
559 static int parse_nl_config(struct genl_info *info,
560 struct fou_cfg *cfg)
561 {
562 memset(cfg, 0, sizeof(*cfg));
563
564 cfg->udp_config.family = AF_INET;
565
566 if (info->attrs[FOU_ATTR_AF]) {
567 u8 family = nla_get_u8(info->attrs[FOU_ATTR_AF]);
568
569 if (family != AF_INET && family != AF_INET6)
570 return -EINVAL;
571
572 cfg->udp_config.family = family;
573 }
574
575 if (info->attrs[FOU_ATTR_PORT]) {
576 u16 port = nla_get_u16(info->attrs[FOU_ATTR_PORT]);
577
578 cfg->udp_config.local_udp_port = port;
579 }
580
581 if (info->attrs[FOU_ATTR_IPPROTO])
582 cfg->protocol = nla_get_u8(info->attrs[FOU_ATTR_IPPROTO]);
583
584 if (info->attrs[FOU_ATTR_TYPE])
585 cfg->type = nla_get_u8(info->attrs[FOU_ATTR_TYPE]);
586
587 if (info->attrs[FOU_ATTR_REMCSUM_NOPARTIAL])
588 cfg->flags |= FOU_F_REMCSUM_NOPARTIAL;
589
590 return 0;
591 }
592
593 static int fou_nl_cmd_add_port(struct sk_buff *skb, struct genl_info *info)
594 {
595 struct fou_cfg cfg;
596 int err;
597
598 err = parse_nl_config(info, &cfg);
599 if (err)
600 return err;
601
602 return fou_create(&init_net, &cfg, NULL);
603 }
604
605 static int fou_nl_cmd_rm_port(struct sk_buff *skb, struct genl_info *info)
606 {
607 struct fou_cfg cfg;
608
609 parse_nl_config(info, &cfg);
610
611 return fou_destroy(&init_net, &cfg);
612 }
613
614 static const struct genl_ops fou_nl_ops[] = {
615 {
616 .cmd = FOU_CMD_ADD,
617 .doit = fou_nl_cmd_add_port,
618 .policy = fou_nl_policy,
619 .flags = GENL_ADMIN_PERM,
620 },
621 {
622 .cmd = FOU_CMD_DEL,
623 .doit = fou_nl_cmd_rm_port,
624 .policy = fou_nl_policy,
625 .flags = GENL_ADMIN_PERM,
626 },
627 };
628
629 size_t fou_encap_hlen(struct ip_tunnel_encap *e)
630 {
631 return sizeof(struct udphdr);
632 }
633 EXPORT_SYMBOL(fou_encap_hlen);
634
635 size_t gue_encap_hlen(struct ip_tunnel_encap *e)
636 {
637 size_t len;
638 bool need_priv = false;
639
640 len = sizeof(struct udphdr) + sizeof(struct guehdr);
641
642 if (e->flags & TUNNEL_ENCAP_FLAG_REMCSUM) {
643 len += GUE_PLEN_REMCSUM;
644 need_priv = true;
645 }
646
647 len += need_priv ? GUE_LEN_PRIV : 0;
648
649 return len;
650 }
651 EXPORT_SYMBOL(gue_encap_hlen);
652
653 static void fou_build_udp(struct sk_buff *skb, struct ip_tunnel_encap *e,
654 struct flowi4 *fl4, u8 *protocol, __be16 sport)
655 {
656 struct udphdr *uh;
657
658 skb_push(skb, sizeof(struct udphdr));
659 skb_reset_transport_header(skb);
660
661 uh = udp_hdr(skb);
662
663 uh->dest = e->dport;
664 uh->source = sport;
665 uh->len = htons(skb->len);
666 uh->check = 0;
667 udp_set_csum(!(e->flags & TUNNEL_ENCAP_FLAG_CSUM), skb,
668 fl4->saddr, fl4->daddr, skb->len);
669
670 *protocol = IPPROTO_UDP;
671 }
672
673 int fou_build_header(struct sk_buff *skb, struct ip_tunnel_encap *e,
674 u8 *protocol, struct flowi4 *fl4)
675 {
676 bool csum = !!(e->flags & TUNNEL_ENCAP_FLAG_CSUM);
677 int type = csum ? SKB_GSO_UDP_TUNNEL_CSUM : SKB_GSO_UDP_TUNNEL;
678 __be16 sport;
679
680 skb = iptunnel_handle_offloads(skb, csum, type);
681
682 if (IS_ERR(skb))
683 return PTR_ERR(skb);
684
685 sport = e->sport ? : udp_flow_src_port(dev_net(skb->dev),
686 skb, 0, 0, false);
687 fou_build_udp(skb, e, fl4, protocol, sport);
688
689 return 0;
690 }
691 EXPORT_SYMBOL(fou_build_header);
692
693 int gue_build_header(struct sk_buff *skb, struct ip_tunnel_encap *e,
694 u8 *protocol, struct flowi4 *fl4)
695 {
696 bool csum = !!(e->flags & TUNNEL_ENCAP_FLAG_CSUM);
697 int type = csum ? SKB_GSO_UDP_TUNNEL_CSUM : SKB_GSO_UDP_TUNNEL;
698 struct guehdr *guehdr;
699 size_t hdrlen, optlen = 0;
700 __be16 sport;
701 void *data;
702 bool need_priv = false;
703
704 if ((e->flags & TUNNEL_ENCAP_FLAG_REMCSUM) &&
705 skb->ip_summed == CHECKSUM_PARTIAL) {
706 csum = false;
707 optlen += GUE_PLEN_REMCSUM;
708 type |= SKB_GSO_TUNNEL_REMCSUM;
709 need_priv = true;
710 }
711
712 optlen += need_priv ? GUE_LEN_PRIV : 0;
713
714 skb = iptunnel_handle_offloads(skb, csum, type);
715
716 if (IS_ERR(skb))
717 return PTR_ERR(skb);
718
719 /* Get source port (based on flow hash) before skb_push */
720 sport = e->sport ? : udp_flow_src_port(dev_net(skb->dev),
721 skb, 0, 0, false);
722
723 hdrlen = sizeof(struct guehdr) + optlen;
724
725 skb_push(skb, hdrlen);
726
727 guehdr = (struct guehdr *)skb->data;
728
729 guehdr->control = 0;
730 guehdr->version = 0;
731 guehdr->hlen = optlen >> 2;
732 guehdr->flags = 0;
733 guehdr->proto_ctype = *protocol;
734
735 data = &guehdr[1];
736
737 if (need_priv) {
738 __be32 *flags = data;
739
740 guehdr->flags |= GUE_FLAG_PRIV;
741 *flags = 0;
742 data += GUE_LEN_PRIV;
743
744 if (type & SKB_GSO_TUNNEL_REMCSUM) {
745 u16 csum_start = skb_checksum_start_offset(skb);
746 __be16 *pd = data;
747
748 if (csum_start < hdrlen)
749 return -EINVAL;
750
751 csum_start -= hdrlen;
752 pd[0] = htons(csum_start);
753 pd[1] = htons(csum_start + skb->csum_offset);
754
755 if (!skb_is_gso(skb)) {
756 skb->ip_summed = CHECKSUM_NONE;
757 skb->encapsulation = 0;
758 }
759
760 *flags |= GUE_PFLAG_REMCSUM;
761 data += GUE_PLEN_REMCSUM;
762 }
763
764 }
765
766 fou_build_udp(skb, e, fl4, protocol, sport);
767
768 return 0;
769 }
770 EXPORT_SYMBOL(gue_build_header);
771
772 #ifdef CONFIG_NET_FOU_IP_TUNNELS
773
774 static const struct ip_tunnel_encap_ops __read_mostly fou_iptun_ops = {
775 .encap_hlen = fou_encap_hlen,
776 .build_header = fou_build_header,
777 };
778
779 static const struct ip_tunnel_encap_ops __read_mostly gue_iptun_ops = {
780 .encap_hlen = gue_encap_hlen,
781 .build_header = gue_build_header,
782 };
783
784 static int ip_tunnel_encap_add_fou_ops(void)
785 {
786 int ret;
787
788 ret = ip_tunnel_encap_add_ops(&fou_iptun_ops, TUNNEL_ENCAP_FOU);
789 if (ret < 0) {
790 pr_err("can't add fou ops\n");
791 return ret;
792 }
793
794 ret = ip_tunnel_encap_add_ops(&gue_iptun_ops, TUNNEL_ENCAP_GUE);
795 if (ret < 0) {
796 pr_err("can't add gue ops\n");
797 ip_tunnel_encap_del_ops(&fou_iptun_ops, TUNNEL_ENCAP_FOU);
798 return ret;
799 }
800
801 return 0;
802 }
803
804 static void ip_tunnel_encap_del_fou_ops(void)
805 {
806 ip_tunnel_encap_del_ops(&fou_iptun_ops, TUNNEL_ENCAP_FOU);
807 ip_tunnel_encap_del_ops(&gue_iptun_ops, TUNNEL_ENCAP_GUE);
808 }
809
810 #else
811
812 static int ip_tunnel_encap_add_fou_ops(void)
813 {
814 return 0;
815 }
816
817 static void ip_tunnel_encap_del_fou_ops(void)
818 {
819 }
820
821 #endif
822
823 static int __init fou_init(void)
824 {
825 int ret;
826
827 ret = genl_register_family_with_ops(&fou_nl_family,
828 fou_nl_ops);
829
830 if (ret < 0)
831 goto exit;
832
833 ret = ip_tunnel_encap_add_fou_ops();
834 if (ret < 0)
835 genl_unregister_family(&fou_nl_family);
836
837 exit:
838 return ret;
839 }
840
841 static void __exit fou_fini(void)
842 {
843 struct fou *fou, *next;
844
845 ip_tunnel_encap_del_fou_ops();
846
847 genl_unregister_family(&fou_nl_family);
848
849 /* Close all the FOU sockets */
850
851 spin_lock(&fou_lock);
852 list_for_each_entry_safe(fou, next, &fou_list, list)
853 fou_release(fou);
854 spin_unlock(&fou_lock);
855 }
856
857 module_init(fou_init);
858 module_exit(fou_fini);
859 MODULE_AUTHOR("Tom Herbert <therbert@google.com>");
860 MODULE_LICENSE("GPL");
This page took 0.048088 seconds and 5 git commands to generate.