Merge tag 'arc-3.19-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/vgupta/arc
[deliverable/linux.git] / net / ipv6 / ip6_output.c
1 /*
2 * IPv6 output functions
3 * Linux INET6 implementation
4 *
5 * Authors:
6 * Pedro Roque <roque@di.fc.ul.pt>
7 *
8 * Based on linux/net/ipv4/ip_output.c
9 *
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version
13 * 2 of the License, or (at your option) any later version.
14 *
15 * Changes:
16 * A.N.Kuznetsov : airthmetics in fragmentation.
17 * extension headers are implemented.
18 * route changes now work.
19 * ip6_forward does not confuse sniffers.
20 * etc.
21 *
22 * H. von Brand : Added missing #include <linux/string.h>
23 * Imran Patel : frag id should be in NBO
24 * Kazunori MIYAZAWA @USAGI
25 * : add ip6_append_data and related functions
26 * for datagram xmit
27 */
28
29 #include <linux/errno.h>
30 #include <linux/kernel.h>
31 #include <linux/string.h>
32 #include <linux/socket.h>
33 #include <linux/net.h>
34 #include <linux/netdevice.h>
35 #include <linux/if_arp.h>
36 #include <linux/in6.h>
37 #include <linux/tcp.h>
38 #include <linux/route.h>
39 #include <linux/module.h>
40 #include <linux/slab.h>
41
42 #include <linux/netfilter.h>
43 #include <linux/netfilter_ipv6.h>
44
45 #include <net/sock.h>
46 #include <net/snmp.h>
47
48 #include <net/ipv6.h>
49 #include <net/ndisc.h>
50 #include <net/protocol.h>
51 #include <net/ip6_route.h>
52 #include <net/addrconf.h>
53 #include <net/rawv6.h>
54 #include <net/icmp.h>
55 #include <net/xfrm.h>
56 #include <net/checksum.h>
57 #include <linux/mroute6.h>
58
59 static int ip6_finish_output2(struct sk_buff *skb)
60 {
61 struct dst_entry *dst = skb_dst(skb);
62 struct net_device *dev = dst->dev;
63 struct neighbour *neigh;
64 struct in6_addr *nexthop;
65 int ret;
66
67 skb->protocol = htons(ETH_P_IPV6);
68 skb->dev = dev;
69
70 if (ipv6_addr_is_multicast(&ipv6_hdr(skb)->daddr)) {
71 struct inet6_dev *idev = ip6_dst_idev(skb_dst(skb));
72
73 if (!(dev->flags & IFF_LOOPBACK) && sk_mc_loop(skb->sk) &&
74 ((mroute6_socket(dev_net(dev), skb) &&
75 !(IP6CB(skb)->flags & IP6SKB_FORWARDED)) ||
76 ipv6_chk_mcast_addr(dev, &ipv6_hdr(skb)->daddr,
77 &ipv6_hdr(skb)->saddr))) {
78 struct sk_buff *newskb = skb_clone(skb, GFP_ATOMIC);
79
80 /* Do not check for IFF_ALLMULTI; multicast routing
81 is not supported in any case.
82 */
83 if (newskb)
84 NF_HOOK(NFPROTO_IPV6, NF_INET_POST_ROUTING,
85 newskb, NULL, newskb->dev,
86 dev_loopback_xmit);
87
88 if (ipv6_hdr(skb)->hop_limit == 0) {
89 IP6_INC_STATS(dev_net(dev), idev,
90 IPSTATS_MIB_OUTDISCARDS);
91 kfree_skb(skb);
92 return 0;
93 }
94 }
95
96 IP6_UPD_PO_STATS(dev_net(dev), idev, IPSTATS_MIB_OUTMCAST,
97 skb->len);
98
99 if (IPV6_ADDR_MC_SCOPE(&ipv6_hdr(skb)->daddr) <=
100 IPV6_ADDR_SCOPE_NODELOCAL &&
101 !(dev->flags & IFF_LOOPBACK)) {
102 kfree_skb(skb);
103 return 0;
104 }
105 }
106
107 rcu_read_lock_bh();
108 nexthop = rt6_nexthop((struct rt6_info *)dst);
109 neigh = __ipv6_neigh_lookup_noref(dst->dev, nexthop);
110 if (unlikely(!neigh))
111 neigh = __neigh_create(&nd_tbl, nexthop, dst->dev, false);
112 if (!IS_ERR(neigh)) {
113 ret = dst_neigh_output(dst, neigh, skb);
114 rcu_read_unlock_bh();
115 return ret;
116 }
117 rcu_read_unlock_bh();
118
119 IP6_INC_STATS(dev_net(dst->dev),
120 ip6_dst_idev(dst), IPSTATS_MIB_OUTNOROUTES);
121 kfree_skb(skb);
122 return -EINVAL;
123 }
124
125 static int ip6_finish_output(struct sk_buff *skb)
126 {
127 if ((skb->len > ip6_skb_dst_mtu(skb) && !skb_is_gso(skb)) ||
128 dst_allfrag(skb_dst(skb)) ||
129 (IP6CB(skb)->frag_max_size && skb->len > IP6CB(skb)->frag_max_size))
130 return ip6_fragment(skb, ip6_finish_output2);
131 else
132 return ip6_finish_output2(skb);
133 }
134
135 int ip6_output(struct sock *sk, struct sk_buff *skb)
136 {
137 struct net_device *dev = skb_dst(skb)->dev;
138 struct inet6_dev *idev = ip6_dst_idev(skb_dst(skb));
139 if (unlikely(idev->cnf.disable_ipv6)) {
140 IP6_INC_STATS(dev_net(dev), idev,
141 IPSTATS_MIB_OUTDISCARDS);
142 kfree_skb(skb);
143 return 0;
144 }
145
146 return NF_HOOK_COND(NFPROTO_IPV6, NF_INET_POST_ROUTING, skb, NULL, dev,
147 ip6_finish_output,
148 !(IP6CB(skb)->flags & IP6SKB_REROUTED));
149 }
150
151 /*
152 * xmit an sk_buff (used by TCP, SCTP and DCCP)
153 */
154
155 int ip6_xmit(struct sock *sk, struct sk_buff *skb, struct flowi6 *fl6,
156 struct ipv6_txoptions *opt, int tclass)
157 {
158 struct net *net = sock_net(sk);
159 struct ipv6_pinfo *np = inet6_sk(sk);
160 struct in6_addr *first_hop = &fl6->daddr;
161 struct dst_entry *dst = skb_dst(skb);
162 struct ipv6hdr *hdr;
163 u8 proto = fl6->flowi6_proto;
164 int seg_len = skb->len;
165 int hlimit = -1;
166 u32 mtu;
167
168 if (opt) {
169 unsigned int head_room;
170
171 /* First: exthdrs may take lots of space (~8K for now)
172 MAX_HEADER is not enough.
173 */
174 head_room = opt->opt_nflen + opt->opt_flen;
175 seg_len += head_room;
176 head_room += sizeof(struct ipv6hdr) + LL_RESERVED_SPACE(dst->dev);
177
178 if (skb_headroom(skb) < head_room) {
179 struct sk_buff *skb2 = skb_realloc_headroom(skb, head_room);
180 if (skb2 == NULL) {
181 IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
182 IPSTATS_MIB_OUTDISCARDS);
183 kfree_skb(skb);
184 return -ENOBUFS;
185 }
186 consume_skb(skb);
187 skb = skb2;
188 skb_set_owner_w(skb, sk);
189 }
190 if (opt->opt_flen)
191 ipv6_push_frag_opts(skb, opt, &proto);
192 if (opt->opt_nflen)
193 ipv6_push_nfrag_opts(skb, opt, &proto, &first_hop);
194 }
195
196 skb_push(skb, sizeof(struct ipv6hdr));
197 skb_reset_network_header(skb);
198 hdr = ipv6_hdr(skb);
199
200 /*
201 * Fill in the IPv6 header
202 */
203 if (np)
204 hlimit = np->hop_limit;
205 if (hlimit < 0)
206 hlimit = ip6_dst_hoplimit(dst);
207
208 ip6_flow_hdr(hdr, tclass, ip6_make_flowlabel(net, skb, fl6->flowlabel,
209 np->autoflowlabel));
210
211 hdr->payload_len = htons(seg_len);
212 hdr->nexthdr = proto;
213 hdr->hop_limit = hlimit;
214
215 hdr->saddr = fl6->saddr;
216 hdr->daddr = *first_hop;
217
218 skb->protocol = htons(ETH_P_IPV6);
219 skb->priority = sk->sk_priority;
220 skb->mark = sk->sk_mark;
221
222 mtu = dst_mtu(dst);
223 if ((skb->len <= mtu) || skb->ignore_df || skb_is_gso(skb)) {
224 IP6_UPD_PO_STATS(net, ip6_dst_idev(skb_dst(skb)),
225 IPSTATS_MIB_OUT, skb->len);
226 return NF_HOOK(NFPROTO_IPV6, NF_INET_LOCAL_OUT, skb, NULL,
227 dst->dev, dst_output);
228 }
229
230 skb->dev = dst->dev;
231 ipv6_local_error(sk, EMSGSIZE, fl6, mtu);
232 IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_FRAGFAILS);
233 kfree_skb(skb);
234 return -EMSGSIZE;
235 }
236 EXPORT_SYMBOL(ip6_xmit);
237
238 static int ip6_call_ra_chain(struct sk_buff *skb, int sel)
239 {
240 struct ip6_ra_chain *ra;
241 struct sock *last = NULL;
242
243 read_lock(&ip6_ra_lock);
244 for (ra = ip6_ra_chain; ra; ra = ra->next) {
245 struct sock *sk = ra->sk;
246 if (sk && ra->sel == sel &&
247 (!sk->sk_bound_dev_if ||
248 sk->sk_bound_dev_if == skb->dev->ifindex)) {
249 if (last) {
250 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
251 if (skb2)
252 rawv6_rcv(last, skb2);
253 }
254 last = sk;
255 }
256 }
257
258 if (last) {
259 rawv6_rcv(last, skb);
260 read_unlock(&ip6_ra_lock);
261 return 1;
262 }
263 read_unlock(&ip6_ra_lock);
264 return 0;
265 }
266
267 static int ip6_forward_proxy_check(struct sk_buff *skb)
268 {
269 struct ipv6hdr *hdr = ipv6_hdr(skb);
270 u8 nexthdr = hdr->nexthdr;
271 __be16 frag_off;
272 int offset;
273
274 if (ipv6_ext_hdr(nexthdr)) {
275 offset = ipv6_skip_exthdr(skb, sizeof(*hdr), &nexthdr, &frag_off);
276 if (offset < 0)
277 return 0;
278 } else
279 offset = sizeof(struct ipv6hdr);
280
281 if (nexthdr == IPPROTO_ICMPV6) {
282 struct icmp6hdr *icmp6;
283
284 if (!pskb_may_pull(skb, (skb_network_header(skb) +
285 offset + 1 - skb->data)))
286 return 0;
287
288 icmp6 = (struct icmp6hdr *)(skb_network_header(skb) + offset);
289
290 switch (icmp6->icmp6_type) {
291 case NDISC_ROUTER_SOLICITATION:
292 case NDISC_ROUTER_ADVERTISEMENT:
293 case NDISC_NEIGHBOUR_SOLICITATION:
294 case NDISC_NEIGHBOUR_ADVERTISEMENT:
295 case NDISC_REDIRECT:
296 /* For reaction involving unicast neighbor discovery
297 * message destined to the proxied address, pass it to
298 * input function.
299 */
300 return 1;
301 default:
302 break;
303 }
304 }
305
306 /*
307 * The proxying router can't forward traffic sent to a link-local
308 * address, so signal the sender and discard the packet. This
309 * behavior is clarified by the MIPv6 specification.
310 */
311 if (ipv6_addr_type(&hdr->daddr) & IPV6_ADDR_LINKLOCAL) {
312 dst_link_failure(skb);
313 return -1;
314 }
315
316 return 0;
317 }
318
319 static inline int ip6_forward_finish(struct sk_buff *skb)
320 {
321 return dst_output(skb);
322 }
323
324 static unsigned int ip6_dst_mtu_forward(const struct dst_entry *dst)
325 {
326 unsigned int mtu;
327 struct inet6_dev *idev;
328
329 if (dst_metric_locked(dst, RTAX_MTU)) {
330 mtu = dst_metric_raw(dst, RTAX_MTU);
331 if (mtu)
332 return mtu;
333 }
334
335 mtu = IPV6_MIN_MTU;
336 rcu_read_lock();
337 idev = __in6_dev_get(dst->dev);
338 if (idev)
339 mtu = idev->cnf.mtu6;
340 rcu_read_unlock();
341
342 return mtu;
343 }
344
345 static bool ip6_pkt_too_big(const struct sk_buff *skb, unsigned int mtu)
346 {
347 if (skb->len <= mtu)
348 return false;
349
350 /* ipv6 conntrack defrag sets max_frag_size + ignore_df */
351 if (IP6CB(skb)->frag_max_size && IP6CB(skb)->frag_max_size > mtu)
352 return true;
353
354 if (skb->ignore_df)
355 return false;
356
357 if (skb_is_gso(skb) && skb_gso_network_seglen(skb) <= mtu)
358 return false;
359
360 return true;
361 }
362
363 int ip6_forward(struct sk_buff *skb)
364 {
365 struct dst_entry *dst = skb_dst(skb);
366 struct ipv6hdr *hdr = ipv6_hdr(skb);
367 struct inet6_skb_parm *opt = IP6CB(skb);
368 struct net *net = dev_net(dst->dev);
369 u32 mtu;
370
371 if (net->ipv6.devconf_all->forwarding == 0)
372 goto error;
373
374 if (skb->pkt_type != PACKET_HOST)
375 goto drop;
376
377 if (skb_warn_if_lro(skb))
378 goto drop;
379
380 if (!xfrm6_policy_check(NULL, XFRM_POLICY_FWD, skb)) {
381 IP6_INC_STATS_BH(net, ip6_dst_idev(dst),
382 IPSTATS_MIB_INDISCARDS);
383 goto drop;
384 }
385
386 skb_forward_csum(skb);
387
388 /*
389 * We DO NOT make any processing on
390 * RA packets, pushing them to user level AS IS
391 * without ane WARRANTY that application will be able
392 * to interpret them. The reason is that we
393 * cannot make anything clever here.
394 *
395 * We are not end-node, so that if packet contains
396 * AH/ESP, we cannot make anything.
397 * Defragmentation also would be mistake, RA packets
398 * cannot be fragmented, because there is no warranty
399 * that different fragments will go along one path. --ANK
400 */
401 if (unlikely(opt->flags & IP6SKB_ROUTERALERT)) {
402 if (ip6_call_ra_chain(skb, ntohs(opt->ra)))
403 return 0;
404 }
405
406 /*
407 * check and decrement ttl
408 */
409 if (hdr->hop_limit <= 1) {
410 /* Force OUTPUT device used as source address */
411 skb->dev = dst->dev;
412 icmpv6_send(skb, ICMPV6_TIME_EXCEED, ICMPV6_EXC_HOPLIMIT, 0);
413 IP6_INC_STATS_BH(net, ip6_dst_idev(dst),
414 IPSTATS_MIB_INHDRERRORS);
415
416 kfree_skb(skb);
417 return -ETIMEDOUT;
418 }
419
420 /* XXX: idev->cnf.proxy_ndp? */
421 if (net->ipv6.devconf_all->proxy_ndp &&
422 pneigh_lookup(&nd_tbl, net, &hdr->daddr, skb->dev, 0)) {
423 int proxied = ip6_forward_proxy_check(skb);
424 if (proxied > 0)
425 return ip6_input(skb);
426 else if (proxied < 0) {
427 IP6_INC_STATS_BH(net, ip6_dst_idev(dst),
428 IPSTATS_MIB_INDISCARDS);
429 goto drop;
430 }
431 }
432
433 if (!xfrm6_route_forward(skb)) {
434 IP6_INC_STATS_BH(net, ip6_dst_idev(dst),
435 IPSTATS_MIB_INDISCARDS);
436 goto drop;
437 }
438 dst = skb_dst(skb);
439
440 /* IPv6 specs say nothing about it, but it is clear that we cannot
441 send redirects to source routed frames.
442 We don't send redirects to frames decapsulated from IPsec.
443 */
444 if (skb->dev == dst->dev && opt->srcrt == 0 && !skb_sec_path(skb)) {
445 struct in6_addr *target = NULL;
446 struct inet_peer *peer;
447 struct rt6_info *rt;
448
449 /*
450 * incoming and outgoing devices are the same
451 * send a redirect.
452 */
453
454 rt = (struct rt6_info *) dst;
455 if (rt->rt6i_flags & RTF_GATEWAY)
456 target = &rt->rt6i_gateway;
457 else
458 target = &hdr->daddr;
459
460 peer = inet_getpeer_v6(net->ipv6.peers, &rt->rt6i_dst.addr, 1);
461
462 /* Limit redirects both by destination (here)
463 and by source (inside ndisc_send_redirect)
464 */
465 if (inet_peer_xrlim_allow(peer, 1*HZ))
466 ndisc_send_redirect(skb, target);
467 if (peer)
468 inet_putpeer(peer);
469 } else {
470 int addrtype = ipv6_addr_type(&hdr->saddr);
471
472 /* This check is security critical. */
473 if (addrtype == IPV6_ADDR_ANY ||
474 addrtype & (IPV6_ADDR_MULTICAST | IPV6_ADDR_LOOPBACK))
475 goto error;
476 if (addrtype & IPV6_ADDR_LINKLOCAL) {
477 icmpv6_send(skb, ICMPV6_DEST_UNREACH,
478 ICMPV6_NOT_NEIGHBOUR, 0);
479 goto error;
480 }
481 }
482
483 mtu = ip6_dst_mtu_forward(dst);
484 if (mtu < IPV6_MIN_MTU)
485 mtu = IPV6_MIN_MTU;
486
487 if (ip6_pkt_too_big(skb, mtu)) {
488 /* Again, force OUTPUT device used as source address */
489 skb->dev = dst->dev;
490 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
491 IP6_INC_STATS_BH(net, ip6_dst_idev(dst),
492 IPSTATS_MIB_INTOOBIGERRORS);
493 IP6_INC_STATS_BH(net, ip6_dst_idev(dst),
494 IPSTATS_MIB_FRAGFAILS);
495 kfree_skb(skb);
496 return -EMSGSIZE;
497 }
498
499 if (skb_cow(skb, dst->dev->hard_header_len)) {
500 IP6_INC_STATS_BH(net, ip6_dst_idev(dst),
501 IPSTATS_MIB_OUTDISCARDS);
502 goto drop;
503 }
504
505 hdr = ipv6_hdr(skb);
506
507 /* Mangling hops number delayed to point after skb COW */
508
509 hdr->hop_limit--;
510
511 IP6_INC_STATS_BH(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTFORWDATAGRAMS);
512 IP6_ADD_STATS_BH(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTOCTETS, skb->len);
513 return NF_HOOK(NFPROTO_IPV6, NF_INET_FORWARD, skb, skb->dev, dst->dev,
514 ip6_forward_finish);
515
516 error:
517 IP6_INC_STATS_BH(net, ip6_dst_idev(dst), IPSTATS_MIB_INADDRERRORS);
518 drop:
519 kfree_skb(skb);
520 return -EINVAL;
521 }
522
523 static void ip6_copy_metadata(struct sk_buff *to, struct sk_buff *from)
524 {
525 to->pkt_type = from->pkt_type;
526 to->priority = from->priority;
527 to->protocol = from->protocol;
528 skb_dst_drop(to);
529 skb_dst_set(to, dst_clone(skb_dst(from)));
530 to->dev = from->dev;
531 to->mark = from->mark;
532
533 #ifdef CONFIG_NET_SCHED
534 to->tc_index = from->tc_index;
535 #endif
536 nf_copy(to, from);
537 skb_copy_secmark(to, from);
538 }
539
540 static void ipv6_select_ident(struct frag_hdr *fhdr, struct rt6_info *rt)
541 {
542 static u32 ip6_idents_hashrnd __read_mostly;
543 u32 hash, id;
544
545 net_get_random_once(&ip6_idents_hashrnd, sizeof(ip6_idents_hashrnd));
546
547 hash = __ipv6_addr_jhash(&rt->rt6i_dst.addr, ip6_idents_hashrnd);
548 hash = __ipv6_addr_jhash(&rt->rt6i_src.addr, hash);
549
550 id = ip_idents_reserve(hash, 1);
551 fhdr->identification = htonl(id);
552 }
553
554 int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *))
555 {
556 struct sk_buff *frag;
557 struct rt6_info *rt = (struct rt6_info *)skb_dst(skb);
558 struct ipv6_pinfo *np = skb->sk ? inet6_sk(skb->sk) : NULL;
559 struct ipv6hdr *tmp_hdr;
560 struct frag_hdr *fh;
561 unsigned int mtu, hlen, left, len;
562 int hroom, troom;
563 __be32 frag_id = 0;
564 int ptr, offset = 0, err = 0;
565 u8 *prevhdr, nexthdr = 0;
566 struct net *net = dev_net(skb_dst(skb)->dev);
567
568 hlen = ip6_find_1stfragopt(skb, &prevhdr);
569 nexthdr = *prevhdr;
570
571 mtu = ip6_skb_dst_mtu(skb);
572
573 /* We must not fragment if the socket is set to force MTU discovery
574 * or if the skb it not generated by a local socket.
575 */
576 if (unlikely(!skb->ignore_df && skb->len > mtu) ||
577 (IP6CB(skb)->frag_max_size &&
578 IP6CB(skb)->frag_max_size > mtu)) {
579 if (skb->sk && dst_allfrag(skb_dst(skb)))
580 sk_nocaps_add(skb->sk, NETIF_F_GSO_MASK);
581
582 skb->dev = skb_dst(skb)->dev;
583 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
584 IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
585 IPSTATS_MIB_FRAGFAILS);
586 kfree_skb(skb);
587 return -EMSGSIZE;
588 }
589
590 if (np && np->frag_size < mtu) {
591 if (np->frag_size)
592 mtu = np->frag_size;
593 }
594 mtu -= hlen + sizeof(struct frag_hdr);
595
596 if (skb_has_frag_list(skb)) {
597 int first_len = skb_pagelen(skb);
598 struct sk_buff *frag2;
599
600 if (first_len - hlen > mtu ||
601 ((first_len - hlen) & 7) ||
602 skb_cloned(skb))
603 goto slow_path;
604
605 skb_walk_frags(skb, frag) {
606 /* Correct geometry. */
607 if (frag->len > mtu ||
608 ((frag->len & 7) && frag->next) ||
609 skb_headroom(frag) < hlen)
610 goto slow_path_clean;
611
612 /* Partially cloned skb? */
613 if (skb_shared(frag))
614 goto slow_path_clean;
615
616 BUG_ON(frag->sk);
617 if (skb->sk) {
618 frag->sk = skb->sk;
619 frag->destructor = sock_wfree;
620 }
621 skb->truesize -= frag->truesize;
622 }
623
624 err = 0;
625 offset = 0;
626 frag = skb_shinfo(skb)->frag_list;
627 skb_frag_list_init(skb);
628 /* BUILD HEADER */
629
630 *prevhdr = NEXTHDR_FRAGMENT;
631 tmp_hdr = kmemdup(skb_network_header(skb), hlen, GFP_ATOMIC);
632 if (!tmp_hdr) {
633 IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
634 IPSTATS_MIB_FRAGFAILS);
635 return -ENOMEM;
636 }
637
638 __skb_pull(skb, hlen);
639 fh = (struct frag_hdr *)__skb_push(skb, sizeof(struct frag_hdr));
640 __skb_push(skb, hlen);
641 skb_reset_network_header(skb);
642 memcpy(skb_network_header(skb), tmp_hdr, hlen);
643
644 ipv6_select_ident(fh, rt);
645 fh->nexthdr = nexthdr;
646 fh->reserved = 0;
647 fh->frag_off = htons(IP6_MF);
648 frag_id = fh->identification;
649
650 first_len = skb_pagelen(skb);
651 skb->data_len = first_len - skb_headlen(skb);
652 skb->len = first_len;
653 ipv6_hdr(skb)->payload_len = htons(first_len -
654 sizeof(struct ipv6hdr));
655
656 dst_hold(&rt->dst);
657
658 for (;;) {
659 /* Prepare header of the next frame,
660 * before previous one went down. */
661 if (frag) {
662 frag->ip_summed = CHECKSUM_NONE;
663 skb_reset_transport_header(frag);
664 fh = (struct frag_hdr *)__skb_push(frag, sizeof(struct frag_hdr));
665 __skb_push(frag, hlen);
666 skb_reset_network_header(frag);
667 memcpy(skb_network_header(frag), tmp_hdr,
668 hlen);
669 offset += skb->len - hlen - sizeof(struct frag_hdr);
670 fh->nexthdr = nexthdr;
671 fh->reserved = 0;
672 fh->frag_off = htons(offset);
673 if (frag->next != NULL)
674 fh->frag_off |= htons(IP6_MF);
675 fh->identification = frag_id;
676 ipv6_hdr(frag)->payload_len =
677 htons(frag->len -
678 sizeof(struct ipv6hdr));
679 ip6_copy_metadata(frag, skb);
680 }
681
682 err = output(skb);
683 if (!err)
684 IP6_INC_STATS(net, ip6_dst_idev(&rt->dst),
685 IPSTATS_MIB_FRAGCREATES);
686
687 if (err || !frag)
688 break;
689
690 skb = frag;
691 frag = skb->next;
692 skb->next = NULL;
693 }
694
695 kfree(tmp_hdr);
696
697 if (err == 0) {
698 IP6_INC_STATS(net, ip6_dst_idev(&rt->dst),
699 IPSTATS_MIB_FRAGOKS);
700 ip6_rt_put(rt);
701 return 0;
702 }
703
704 kfree_skb_list(frag);
705
706 IP6_INC_STATS(net, ip6_dst_idev(&rt->dst),
707 IPSTATS_MIB_FRAGFAILS);
708 ip6_rt_put(rt);
709 return err;
710
711 slow_path_clean:
712 skb_walk_frags(skb, frag2) {
713 if (frag2 == frag)
714 break;
715 frag2->sk = NULL;
716 frag2->destructor = NULL;
717 skb->truesize += frag2->truesize;
718 }
719 }
720
721 slow_path:
722 if ((skb->ip_summed == CHECKSUM_PARTIAL) &&
723 skb_checksum_help(skb))
724 goto fail;
725
726 left = skb->len - hlen; /* Space per frame */
727 ptr = hlen; /* Where to start from */
728
729 /*
730 * Fragment the datagram.
731 */
732
733 *prevhdr = NEXTHDR_FRAGMENT;
734 hroom = LL_RESERVED_SPACE(rt->dst.dev);
735 troom = rt->dst.dev->needed_tailroom;
736
737 /*
738 * Keep copying data until we run out.
739 */
740 while (left > 0) {
741 len = left;
742 /* IF: it doesn't fit, use 'mtu' - the data space left */
743 if (len > mtu)
744 len = mtu;
745 /* IF: we are not sending up to and including the packet end
746 then align the next start on an eight byte boundary */
747 if (len < left) {
748 len &= ~7;
749 }
750
751 /* Allocate buffer */
752 frag = alloc_skb(len + hlen + sizeof(struct frag_hdr) +
753 hroom + troom, GFP_ATOMIC);
754 if (!frag) {
755 IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
756 IPSTATS_MIB_FRAGFAILS);
757 err = -ENOMEM;
758 goto fail;
759 }
760
761 /*
762 * Set up data on packet
763 */
764
765 ip6_copy_metadata(frag, skb);
766 skb_reserve(frag, hroom);
767 skb_put(frag, len + hlen + sizeof(struct frag_hdr));
768 skb_reset_network_header(frag);
769 fh = (struct frag_hdr *)(skb_network_header(frag) + hlen);
770 frag->transport_header = (frag->network_header + hlen +
771 sizeof(struct frag_hdr));
772
773 /*
774 * Charge the memory for the fragment to any owner
775 * it might possess
776 */
777 if (skb->sk)
778 skb_set_owner_w(frag, skb->sk);
779
780 /*
781 * Copy the packet header into the new buffer.
782 */
783 skb_copy_from_linear_data(skb, skb_network_header(frag), hlen);
784
785 /*
786 * Build fragment header.
787 */
788 fh->nexthdr = nexthdr;
789 fh->reserved = 0;
790 if (!frag_id) {
791 ipv6_select_ident(fh, rt);
792 frag_id = fh->identification;
793 } else
794 fh->identification = frag_id;
795
796 /*
797 * Copy a block of the IP datagram.
798 */
799 BUG_ON(skb_copy_bits(skb, ptr, skb_transport_header(frag),
800 len));
801 left -= len;
802
803 fh->frag_off = htons(offset);
804 if (left > 0)
805 fh->frag_off |= htons(IP6_MF);
806 ipv6_hdr(frag)->payload_len = htons(frag->len -
807 sizeof(struct ipv6hdr));
808
809 ptr += len;
810 offset += len;
811
812 /*
813 * Put this fragment into the sending queue.
814 */
815 err = output(frag);
816 if (err)
817 goto fail;
818
819 IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
820 IPSTATS_MIB_FRAGCREATES);
821 }
822 IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
823 IPSTATS_MIB_FRAGOKS);
824 consume_skb(skb);
825 return err;
826
827 fail:
828 IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
829 IPSTATS_MIB_FRAGFAILS);
830 kfree_skb(skb);
831 return err;
832 }
833
834 static inline int ip6_rt_check(const struct rt6key *rt_key,
835 const struct in6_addr *fl_addr,
836 const struct in6_addr *addr_cache)
837 {
838 return (rt_key->plen != 128 || !ipv6_addr_equal(fl_addr, &rt_key->addr)) &&
839 (addr_cache == NULL || !ipv6_addr_equal(fl_addr, addr_cache));
840 }
841
842 static struct dst_entry *ip6_sk_dst_check(struct sock *sk,
843 struct dst_entry *dst,
844 const struct flowi6 *fl6)
845 {
846 struct ipv6_pinfo *np = inet6_sk(sk);
847 struct rt6_info *rt;
848
849 if (!dst)
850 goto out;
851
852 if (dst->ops->family != AF_INET6) {
853 dst_release(dst);
854 return NULL;
855 }
856
857 rt = (struct rt6_info *)dst;
858 /* Yes, checking route validity in not connected
859 * case is not very simple. Take into account,
860 * that we do not support routing by source, TOS,
861 * and MSG_DONTROUTE --ANK (980726)
862 *
863 * 1. ip6_rt_check(): If route was host route,
864 * check that cached destination is current.
865 * If it is network route, we still may
866 * check its validity using saved pointer
867 * to the last used address: daddr_cache.
868 * We do not want to save whole address now,
869 * (because main consumer of this service
870 * is tcp, which has not this problem),
871 * so that the last trick works only on connected
872 * sockets.
873 * 2. oif also should be the same.
874 */
875 if (ip6_rt_check(&rt->rt6i_dst, &fl6->daddr, np->daddr_cache) ||
876 #ifdef CONFIG_IPV6_SUBTREES
877 ip6_rt_check(&rt->rt6i_src, &fl6->saddr, np->saddr_cache) ||
878 #endif
879 (fl6->flowi6_oif && fl6->flowi6_oif != dst->dev->ifindex)) {
880 dst_release(dst);
881 dst = NULL;
882 }
883
884 out:
885 return dst;
886 }
887
888 static int ip6_dst_lookup_tail(struct sock *sk,
889 struct dst_entry **dst, struct flowi6 *fl6)
890 {
891 struct net *net = sock_net(sk);
892 #ifdef CONFIG_IPV6_OPTIMISTIC_DAD
893 struct neighbour *n;
894 struct rt6_info *rt;
895 #endif
896 int err;
897
898 if (*dst == NULL)
899 *dst = ip6_route_output(net, sk, fl6);
900
901 err = (*dst)->error;
902 if (err)
903 goto out_err_release;
904
905 if (ipv6_addr_any(&fl6->saddr)) {
906 struct rt6_info *rt = (struct rt6_info *) *dst;
907 err = ip6_route_get_saddr(net, rt, &fl6->daddr,
908 sk ? inet6_sk(sk)->srcprefs : 0,
909 &fl6->saddr);
910 if (err)
911 goto out_err_release;
912 }
913
914 #ifdef CONFIG_IPV6_OPTIMISTIC_DAD
915 /*
916 * Here if the dst entry we've looked up
917 * has a neighbour entry that is in the INCOMPLETE
918 * state and the src address from the flow is
919 * marked as OPTIMISTIC, we release the found
920 * dst entry and replace it instead with the
921 * dst entry of the nexthop router
922 */
923 rt = (struct rt6_info *) *dst;
924 rcu_read_lock_bh();
925 n = __ipv6_neigh_lookup_noref(rt->dst.dev, rt6_nexthop(rt));
926 err = n && !(n->nud_state & NUD_VALID) ? -EINVAL : 0;
927 rcu_read_unlock_bh();
928
929 if (err) {
930 struct inet6_ifaddr *ifp;
931 struct flowi6 fl_gw6;
932 int redirect;
933
934 ifp = ipv6_get_ifaddr(net, &fl6->saddr,
935 (*dst)->dev, 1);
936
937 redirect = (ifp && ifp->flags & IFA_F_OPTIMISTIC);
938 if (ifp)
939 in6_ifa_put(ifp);
940
941 if (redirect) {
942 /*
943 * We need to get the dst entry for the
944 * default router instead
945 */
946 dst_release(*dst);
947 memcpy(&fl_gw6, fl6, sizeof(struct flowi6));
948 memset(&fl_gw6.daddr, 0, sizeof(struct in6_addr));
949 *dst = ip6_route_output(net, sk, &fl_gw6);
950 err = (*dst)->error;
951 if (err)
952 goto out_err_release;
953 }
954 }
955 #endif
956
957 return 0;
958
959 out_err_release:
960 if (err == -ENETUNREACH)
961 IP6_INC_STATS(net, NULL, IPSTATS_MIB_OUTNOROUTES);
962 dst_release(*dst);
963 *dst = NULL;
964 return err;
965 }
966
967 /**
968 * ip6_dst_lookup - perform route lookup on flow
969 * @sk: socket which provides route info
970 * @dst: pointer to dst_entry * for result
971 * @fl6: flow to lookup
972 *
973 * This function performs a route lookup on the given flow.
974 *
975 * It returns zero on success, or a standard errno code on error.
976 */
977 int ip6_dst_lookup(struct sock *sk, struct dst_entry **dst, struct flowi6 *fl6)
978 {
979 *dst = NULL;
980 return ip6_dst_lookup_tail(sk, dst, fl6);
981 }
982 EXPORT_SYMBOL_GPL(ip6_dst_lookup);
983
984 /**
985 * ip6_dst_lookup_flow - perform route lookup on flow with ipsec
986 * @sk: socket which provides route info
987 * @fl6: flow to lookup
988 * @final_dst: final destination address for ipsec lookup
989 *
990 * This function performs a route lookup on the given flow.
991 *
992 * It returns a valid dst pointer on success, or a pointer encoded
993 * error code.
994 */
995 struct dst_entry *ip6_dst_lookup_flow(struct sock *sk, struct flowi6 *fl6,
996 const struct in6_addr *final_dst)
997 {
998 struct dst_entry *dst = NULL;
999 int err;
1000
1001 err = ip6_dst_lookup_tail(sk, &dst, fl6);
1002 if (err)
1003 return ERR_PTR(err);
1004 if (final_dst)
1005 fl6->daddr = *final_dst;
1006
1007 return xfrm_lookup_route(sock_net(sk), dst, flowi6_to_flowi(fl6), sk, 0);
1008 }
1009 EXPORT_SYMBOL_GPL(ip6_dst_lookup_flow);
1010
1011 /**
1012 * ip6_sk_dst_lookup_flow - perform socket cached route lookup on flow
1013 * @sk: socket which provides the dst cache and route info
1014 * @fl6: flow to lookup
1015 * @final_dst: final destination address for ipsec lookup
1016 *
1017 * This function performs a route lookup on the given flow with the
1018 * possibility of using the cached route in the socket if it is valid.
1019 * It will take the socket dst lock when operating on the dst cache.
1020 * As a result, this function can only be used in process context.
1021 *
1022 * It returns a valid dst pointer on success, or a pointer encoded
1023 * error code.
1024 */
1025 struct dst_entry *ip6_sk_dst_lookup_flow(struct sock *sk, struct flowi6 *fl6,
1026 const struct in6_addr *final_dst)
1027 {
1028 struct dst_entry *dst = sk_dst_check(sk, inet6_sk(sk)->dst_cookie);
1029 int err;
1030
1031 dst = ip6_sk_dst_check(sk, dst, fl6);
1032
1033 err = ip6_dst_lookup_tail(sk, &dst, fl6);
1034 if (err)
1035 return ERR_PTR(err);
1036 if (final_dst)
1037 fl6->daddr = *final_dst;
1038
1039 return xfrm_lookup_route(sock_net(sk), dst, flowi6_to_flowi(fl6), sk, 0);
1040 }
1041 EXPORT_SYMBOL_GPL(ip6_sk_dst_lookup_flow);
1042
1043 static inline int ip6_ufo_append_data(struct sock *sk,
1044 int getfrag(void *from, char *to, int offset, int len,
1045 int odd, struct sk_buff *skb),
1046 void *from, int length, int hh_len, int fragheaderlen,
1047 int transhdrlen, int mtu, unsigned int flags,
1048 struct rt6_info *rt)
1049
1050 {
1051 struct sk_buff *skb;
1052 struct frag_hdr fhdr;
1053 int err;
1054
1055 /* There is support for UDP large send offload by network
1056 * device, so create one single skb packet containing complete
1057 * udp datagram
1058 */
1059 skb = skb_peek_tail(&sk->sk_write_queue);
1060 if (skb == NULL) {
1061 skb = sock_alloc_send_skb(sk,
1062 hh_len + fragheaderlen + transhdrlen + 20,
1063 (flags & MSG_DONTWAIT), &err);
1064 if (skb == NULL)
1065 return err;
1066
1067 /* reserve space for Hardware header */
1068 skb_reserve(skb, hh_len);
1069
1070 /* create space for UDP/IP header */
1071 skb_put(skb, fragheaderlen + transhdrlen);
1072
1073 /* initialize network header pointer */
1074 skb_reset_network_header(skb);
1075
1076 /* initialize protocol header pointer */
1077 skb->transport_header = skb->network_header + fragheaderlen;
1078
1079 skb->protocol = htons(ETH_P_IPV6);
1080 skb->csum = 0;
1081
1082 __skb_queue_tail(&sk->sk_write_queue, skb);
1083 } else if (skb_is_gso(skb)) {
1084 goto append;
1085 }
1086
1087 skb->ip_summed = CHECKSUM_PARTIAL;
1088 /* Specify the length of each IPv6 datagram fragment.
1089 * It has to be a multiple of 8.
1090 */
1091 skb_shinfo(skb)->gso_size = (mtu - fragheaderlen -
1092 sizeof(struct frag_hdr)) & ~7;
1093 skb_shinfo(skb)->gso_type = SKB_GSO_UDP;
1094 ipv6_select_ident(&fhdr, rt);
1095 skb_shinfo(skb)->ip6_frag_id = fhdr.identification;
1096
1097 append:
1098 return skb_append_datato_frags(sk, skb, getfrag, from,
1099 (length - transhdrlen));
1100 }
1101
1102 static inline struct ipv6_opt_hdr *ip6_opt_dup(struct ipv6_opt_hdr *src,
1103 gfp_t gfp)
1104 {
1105 return src ? kmemdup(src, (src->hdrlen + 1) * 8, gfp) : NULL;
1106 }
1107
1108 static inline struct ipv6_rt_hdr *ip6_rthdr_dup(struct ipv6_rt_hdr *src,
1109 gfp_t gfp)
1110 {
1111 return src ? kmemdup(src, (src->hdrlen + 1) * 8, gfp) : NULL;
1112 }
1113
1114 static void ip6_append_data_mtu(unsigned int *mtu,
1115 int *maxfraglen,
1116 unsigned int fragheaderlen,
1117 struct sk_buff *skb,
1118 struct rt6_info *rt,
1119 unsigned int orig_mtu)
1120 {
1121 if (!(rt->dst.flags & DST_XFRM_TUNNEL)) {
1122 if (skb == NULL) {
1123 /* first fragment, reserve header_len */
1124 *mtu = orig_mtu - rt->dst.header_len;
1125
1126 } else {
1127 /*
1128 * this fragment is not first, the headers
1129 * space is regarded as data space.
1130 */
1131 *mtu = orig_mtu;
1132 }
1133 *maxfraglen = ((*mtu - fragheaderlen) & ~7)
1134 + fragheaderlen - sizeof(struct frag_hdr);
1135 }
1136 }
1137
1138 int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to,
1139 int offset, int len, int odd, struct sk_buff *skb),
1140 void *from, int length, int transhdrlen,
1141 int hlimit, int tclass, struct ipv6_txoptions *opt, struct flowi6 *fl6,
1142 struct rt6_info *rt, unsigned int flags, int dontfrag)
1143 {
1144 struct inet_sock *inet = inet_sk(sk);
1145 struct ipv6_pinfo *np = inet6_sk(sk);
1146 struct inet_cork *cork;
1147 struct sk_buff *skb, *skb_prev = NULL;
1148 unsigned int maxfraglen, fragheaderlen, mtu, orig_mtu;
1149 int exthdrlen;
1150 int dst_exthdrlen;
1151 int hh_len;
1152 int copy;
1153 int err;
1154 int offset = 0;
1155 __u8 tx_flags = 0;
1156 u32 tskey = 0;
1157
1158 if (flags&MSG_PROBE)
1159 return 0;
1160 cork = &inet->cork.base;
1161 if (skb_queue_empty(&sk->sk_write_queue)) {
1162 /*
1163 * setup for corking
1164 */
1165 if (opt) {
1166 if (WARN_ON(np->cork.opt))
1167 return -EINVAL;
1168
1169 np->cork.opt = kzalloc(opt->tot_len, sk->sk_allocation);
1170 if (unlikely(np->cork.opt == NULL))
1171 return -ENOBUFS;
1172
1173 np->cork.opt->tot_len = opt->tot_len;
1174 np->cork.opt->opt_flen = opt->opt_flen;
1175 np->cork.opt->opt_nflen = opt->opt_nflen;
1176
1177 np->cork.opt->dst0opt = ip6_opt_dup(opt->dst0opt,
1178 sk->sk_allocation);
1179 if (opt->dst0opt && !np->cork.opt->dst0opt)
1180 return -ENOBUFS;
1181
1182 np->cork.opt->dst1opt = ip6_opt_dup(opt->dst1opt,
1183 sk->sk_allocation);
1184 if (opt->dst1opt && !np->cork.opt->dst1opt)
1185 return -ENOBUFS;
1186
1187 np->cork.opt->hopopt = ip6_opt_dup(opt->hopopt,
1188 sk->sk_allocation);
1189 if (opt->hopopt && !np->cork.opt->hopopt)
1190 return -ENOBUFS;
1191
1192 np->cork.opt->srcrt = ip6_rthdr_dup(opt->srcrt,
1193 sk->sk_allocation);
1194 if (opt->srcrt && !np->cork.opt->srcrt)
1195 return -ENOBUFS;
1196
1197 /* need source address above miyazawa*/
1198 }
1199 dst_hold(&rt->dst);
1200 cork->dst = &rt->dst;
1201 inet->cork.fl.u.ip6 = *fl6;
1202 np->cork.hop_limit = hlimit;
1203 np->cork.tclass = tclass;
1204 if (rt->dst.flags & DST_XFRM_TUNNEL)
1205 mtu = np->pmtudisc >= IPV6_PMTUDISC_PROBE ?
1206 rt->dst.dev->mtu : dst_mtu(&rt->dst);
1207 else
1208 mtu = np->pmtudisc >= IPV6_PMTUDISC_PROBE ?
1209 rt->dst.dev->mtu : dst_mtu(rt->dst.path);
1210 if (np->frag_size < mtu) {
1211 if (np->frag_size)
1212 mtu = np->frag_size;
1213 }
1214 cork->fragsize = mtu;
1215 if (dst_allfrag(rt->dst.path))
1216 cork->flags |= IPCORK_ALLFRAG;
1217 cork->length = 0;
1218 exthdrlen = (opt ? opt->opt_flen : 0);
1219 length += exthdrlen;
1220 transhdrlen += exthdrlen;
1221 dst_exthdrlen = rt->dst.header_len - rt->rt6i_nfheader_len;
1222 } else {
1223 rt = (struct rt6_info *)cork->dst;
1224 fl6 = &inet->cork.fl.u.ip6;
1225 opt = np->cork.opt;
1226 transhdrlen = 0;
1227 exthdrlen = 0;
1228 dst_exthdrlen = 0;
1229 mtu = cork->fragsize;
1230 }
1231 orig_mtu = mtu;
1232
1233 hh_len = LL_RESERVED_SPACE(rt->dst.dev);
1234
1235 fragheaderlen = sizeof(struct ipv6hdr) + rt->rt6i_nfheader_len +
1236 (opt ? opt->opt_nflen : 0);
1237 maxfraglen = ((mtu - fragheaderlen) & ~7) + fragheaderlen -
1238 sizeof(struct frag_hdr);
1239
1240 if (mtu <= sizeof(struct ipv6hdr) + IPV6_MAXPLEN) {
1241 unsigned int maxnonfragsize, headersize;
1242
1243 headersize = sizeof(struct ipv6hdr) +
1244 (opt ? opt->opt_flen + opt->opt_nflen : 0) +
1245 (dst_allfrag(&rt->dst) ?
1246 sizeof(struct frag_hdr) : 0) +
1247 rt->rt6i_nfheader_len;
1248
1249 if (ip6_sk_ignore_df(sk))
1250 maxnonfragsize = sizeof(struct ipv6hdr) + IPV6_MAXPLEN;
1251 else
1252 maxnonfragsize = mtu;
1253
1254 /* dontfrag active */
1255 if ((cork->length + length > mtu - headersize) && dontfrag &&
1256 (sk->sk_protocol == IPPROTO_UDP ||
1257 sk->sk_protocol == IPPROTO_RAW)) {
1258 ipv6_local_rxpmtu(sk, fl6, mtu - headersize +
1259 sizeof(struct ipv6hdr));
1260 goto emsgsize;
1261 }
1262
1263 if (cork->length + length > maxnonfragsize - headersize) {
1264 emsgsize:
1265 ipv6_local_error(sk, EMSGSIZE, fl6,
1266 mtu - headersize +
1267 sizeof(struct ipv6hdr));
1268 return -EMSGSIZE;
1269 }
1270 }
1271
1272 if (sk->sk_type == SOCK_DGRAM || sk->sk_type == SOCK_RAW) {
1273 sock_tx_timestamp(sk, &tx_flags);
1274 if (tx_flags & SKBTX_ANY_SW_TSTAMP &&
1275 sk->sk_tsflags & SOF_TIMESTAMPING_OPT_ID)
1276 tskey = sk->sk_tskey++;
1277 }
1278
1279 /*
1280 * Let's try using as much space as possible.
1281 * Use MTU if total length of the message fits into the MTU.
1282 * Otherwise, we need to reserve fragment header and
1283 * fragment alignment (= 8-15 octects, in total).
1284 *
1285 * Note that we may need to "move" the data from the tail of
1286 * of the buffer to the new fragment when we split
1287 * the message.
1288 *
1289 * FIXME: It may be fragmented into multiple chunks
1290 * at once if non-fragmentable extension headers
1291 * are too large.
1292 * --yoshfuji
1293 */
1294
1295 skb = skb_peek_tail(&sk->sk_write_queue);
1296 cork->length += length;
1297 if (((length > mtu) ||
1298 (skb && skb_is_gso(skb))) &&
1299 (sk->sk_protocol == IPPROTO_UDP) &&
1300 (rt->dst.dev->features & NETIF_F_UFO)) {
1301 err = ip6_ufo_append_data(sk, getfrag, from, length,
1302 hh_len, fragheaderlen,
1303 transhdrlen, mtu, flags, rt);
1304 if (err)
1305 goto error;
1306 return 0;
1307 }
1308
1309 if (!skb)
1310 goto alloc_new_skb;
1311
1312 while (length > 0) {
1313 /* Check if the remaining data fits into current packet. */
1314 copy = (cork->length <= mtu && !(cork->flags & IPCORK_ALLFRAG) ? mtu : maxfraglen) - skb->len;
1315 if (copy < length)
1316 copy = maxfraglen - skb->len;
1317
1318 if (copy <= 0) {
1319 char *data;
1320 unsigned int datalen;
1321 unsigned int fraglen;
1322 unsigned int fraggap;
1323 unsigned int alloclen;
1324 alloc_new_skb:
1325 /* There's no room in the current skb */
1326 if (skb)
1327 fraggap = skb->len - maxfraglen;
1328 else
1329 fraggap = 0;
1330 /* update mtu and maxfraglen if necessary */
1331 if (skb == NULL || skb_prev == NULL)
1332 ip6_append_data_mtu(&mtu, &maxfraglen,
1333 fragheaderlen, skb, rt,
1334 orig_mtu);
1335
1336 skb_prev = skb;
1337
1338 /*
1339 * If remaining data exceeds the mtu,
1340 * we know we need more fragment(s).
1341 */
1342 datalen = length + fraggap;
1343
1344 if (datalen > (cork->length <= mtu && !(cork->flags & IPCORK_ALLFRAG) ? mtu : maxfraglen) - fragheaderlen)
1345 datalen = maxfraglen - fragheaderlen - rt->dst.trailer_len;
1346 if ((flags & MSG_MORE) &&
1347 !(rt->dst.dev->features&NETIF_F_SG))
1348 alloclen = mtu;
1349 else
1350 alloclen = datalen + fragheaderlen;
1351
1352 alloclen += dst_exthdrlen;
1353
1354 if (datalen != length + fraggap) {
1355 /*
1356 * this is not the last fragment, the trailer
1357 * space is regarded as data space.
1358 */
1359 datalen += rt->dst.trailer_len;
1360 }
1361
1362 alloclen += rt->dst.trailer_len;
1363 fraglen = datalen + fragheaderlen;
1364
1365 /*
1366 * We just reserve space for fragment header.
1367 * Note: this may be overallocation if the message
1368 * (without MSG_MORE) fits into the MTU.
1369 */
1370 alloclen += sizeof(struct frag_hdr);
1371
1372 if (transhdrlen) {
1373 skb = sock_alloc_send_skb(sk,
1374 alloclen + hh_len,
1375 (flags & MSG_DONTWAIT), &err);
1376 } else {
1377 skb = NULL;
1378 if (atomic_read(&sk->sk_wmem_alloc) <=
1379 2 * sk->sk_sndbuf)
1380 skb = sock_wmalloc(sk,
1381 alloclen + hh_len, 1,
1382 sk->sk_allocation);
1383 if (unlikely(skb == NULL))
1384 err = -ENOBUFS;
1385 }
1386 if (skb == NULL)
1387 goto error;
1388 /*
1389 * Fill in the control structures
1390 */
1391 skb->protocol = htons(ETH_P_IPV6);
1392 skb->ip_summed = CHECKSUM_NONE;
1393 skb->csum = 0;
1394 /* reserve for fragmentation and ipsec header */
1395 skb_reserve(skb, hh_len + sizeof(struct frag_hdr) +
1396 dst_exthdrlen);
1397
1398 /* Only the initial fragment is time stamped */
1399 skb_shinfo(skb)->tx_flags = tx_flags;
1400 tx_flags = 0;
1401 skb_shinfo(skb)->tskey = tskey;
1402 tskey = 0;
1403
1404 /*
1405 * Find where to start putting bytes
1406 */
1407 data = skb_put(skb, fraglen);
1408 skb_set_network_header(skb, exthdrlen);
1409 data += fragheaderlen;
1410 skb->transport_header = (skb->network_header +
1411 fragheaderlen);
1412 if (fraggap) {
1413 skb->csum = skb_copy_and_csum_bits(
1414 skb_prev, maxfraglen,
1415 data + transhdrlen, fraggap, 0);
1416 skb_prev->csum = csum_sub(skb_prev->csum,
1417 skb->csum);
1418 data += fraggap;
1419 pskb_trim_unique(skb_prev, maxfraglen);
1420 }
1421 copy = datalen - transhdrlen - fraggap;
1422
1423 if (copy < 0) {
1424 err = -EINVAL;
1425 kfree_skb(skb);
1426 goto error;
1427 } else if (copy > 0 && getfrag(from, data + transhdrlen, offset, copy, fraggap, skb) < 0) {
1428 err = -EFAULT;
1429 kfree_skb(skb);
1430 goto error;
1431 }
1432
1433 offset += copy;
1434 length -= datalen - fraggap;
1435 transhdrlen = 0;
1436 exthdrlen = 0;
1437 dst_exthdrlen = 0;
1438
1439 /*
1440 * Put the packet on the pending queue
1441 */
1442 __skb_queue_tail(&sk->sk_write_queue, skb);
1443 continue;
1444 }
1445
1446 if (copy > length)
1447 copy = length;
1448
1449 if (!(rt->dst.dev->features&NETIF_F_SG)) {
1450 unsigned int off;
1451
1452 off = skb->len;
1453 if (getfrag(from, skb_put(skb, copy),
1454 offset, copy, off, skb) < 0) {
1455 __skb_trim(skb, off);
1456 err = -EFAULT;
1457 goto error;
1458 }
1459 } else {
1460 int i = skb_shinfo(skb)->nr_frags;
1461 struct page_frag *pfrag = sk_page_frag(sk);
1462
1463 err = -ENOMEM;
1464 if (!sk_page_frag_refill(sk, pfrag))
1465 goto error;
1466
1467 if (!skb_can_coalesce(skb, i, pfrag->page,
1468 pfrag->offset)) {
1469 err = -EMSGSIZE;
1470 if (i == MAX_SKB_FRAGS)
1471 goto error;
1472
1473 __skb_fill_page_desc(skb, i, pfrag->page,
1474 pfrag->offset, 0);
1475 skb_shinfo(skb)->nr_frags = ++i;
1476 get_page(pfrag->page);
1477 }
1478 copy = min_t(int, copy, pfrag->size - pfrag->offset);
1479 if (getfrag(from,
1480 page_address(pfrag->page) + pfrag->offset,
1481 offset, copy, skb->len, skb) < 0)
1482 goto error_efault;
1483
1484 pfrag->offset += copy;
1485 skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], copy);
1486 skb->len += copy;
1487 skb->data_len += copy;
1488 skb->truesize += copy;
1489 atomic_add(copy, &sk->sk_wmem_alloc);
1490 }
1491 offset += copy;
1492 length -= copy;
1493 }
1494
1495 return 0;
1496
1497 error_efault:
1498 err = -EFAULT;
1499 error:
1500 cork->length -= length;
1501 IP6_INC_STATS(sock_net(sk), rt->rt6i_idev, IPSTATS_MIB_OUTDISCARDS);
1502 return err;
1503 }
1504 EXPORT_SYMBOL_GPL(ip6_append_data);
1505
1506 static void ip6_cork_release(struct inet_sock *inet, struct ipv6_pinfo *np)
1507 {
1508 if (np->cork.opt) {
1509 kfree(np->cork.opt->dst0opt);
1510 kfree(np->cork.opt->dst1opt);
1511 kfree(np->cork.opt->hopopt);
1512 kfree(np->cork.opt->srcrt);
1513 kfree(np->cork.opt);
1514 np->cork.opt = NULL;
1515 }
1516
1517 if (inet->cork.base.dst) {
1518 dst_release(inet->cork.base.dst);
1519 inet->cork.base.dst = NULL;
1520 inet->cork.base.flags &= ~IPCORK_ALLFRAG;
1521 }
1522 memset(&inet->cork.fl, 0, sizeof(inet->cork.fl));
1523 }
1524
1525 int ip6_push_pending_frames(struct sock *sk)
1526 {
1527 struct sk_buff *skb, *tmp_skb;
1528 struct sk_buff **tail_skb;
1529 struct in6_addr final_dst_buf, *final_dst = &final_dst_buf;
1530 struct inet_sock *inet = inet_sk(sk);
1531 struct ipv6_pinfo *np = inet6_sk(sk);
1532 struct net *net = sock_net(sk);
1533 struct ipv6hdr *hdr;
1534 struct ipv6_txoptions *opt = np->cork.opt;
1535 struct rt6_info *rt = (struct rt6_info *)inet->cork.base.dst;
1536 struct flowi6 *fl6 = &inet->cork.fl.u.ip6;
1537 unsigned char proto = fl6->flowi6_proto;
1538 int err = 0;
1539
1540 skb = __skb_dequeue(&sk->sk_write_queue);
1541 if (skb == NULL)
1542 goto out;
1543 tail_skb = &(skb_shinfo(skb)->frag_list);
1544
1545 /* move skb->data to ip header from ext header */
1546 if (skb->data < skb_network_header(skb))
1547 __skb_pull(skb, skb_network_offset(skb));
1548 while ((tmp_skb = __skb_dequeue(&sk->sk_write_queue)) != NULL) {
1549 __skb_pull(tmp_skb, skb_network_header_len(skb));
1550 *tail_skb = tmp_skb;
1551 tail_skb = &(tmp_skb->next);
1552 skb->len += tmp_skb->len;
1553 skb->data_len += tmp_skb->len;
1554 skb->truesize += tmp_skb->truesize;
1555 tmp_skb->destructor = NULL;
1556 tmp_skb->sk = NULL;
1557 }
1558
1559 /* Allow local fragmentation. */
1560 skb->ignore_df = ip6_sk_ignore_df(sk);
1561
1562 *final_dst = fl6->daddr;
1563 __skb_pull(skb, skb_network_header_len(skb));
1564 if (opt && opt->opt_flen)
1565 ipv6_push_frag_opts(skb, opt, &proto);
1566 if (opt && opt->opt_nflen)
1567 ipv6_push_nfrag_opts(skb, opt, &proto, &final_dst);
1568
1569 skb_push(skb, sizeof(struct ipv6hdr));
1570 skb_reset_network_header(skb);
1571 hdr = ipv6_hdr(skb);
1572
1573 ip6_flow_hdr(hdr, np->cork.tclass,
1574 ip6_make_flowlabel(net, skb, fl6->flowlabel,
1575 np->autoflowlabel));
1576 hdr->hop_limit = np->cork.hop_limit;
1577 hdr->nexthdr = proto;
1578 hdr->saddr = fl6->saddr;
1579 hdr->daddr = *final_dst;
1580
1581 skb->priority = sk->sk_priority;
1582 skb->mark = sk->sk_mark;
1583
1584 skb_dst_set(skb, dst_clone(&rt->dst));
1585 IP6_UPD_PO_STATS(net, rt->rt6i_idev, IPSTATS_MIB_OUT, skb->len);
1586 if (proto == IPPROTO_ICMPV6) {
1587 struct inet6_dev *idev = ip6_dst_idev(skb_dst(skb));
1588
1589 ICMP6MSGOUT_INC_STATS(net, idev, icmp6_hdr(skb)->icmp6_type);
1590 ICMP6_INC_STATS(net, idev, ICMP6_MIB_OUTMSGS);
1591 }
1592
1593 err = ip6_local_out(skb);
1594 if (err) {
1595 if (err > 0)
1596 err = net_xmit_errno(err);
1597 if (err)
1598 goto error;
1599 }
1600
1601 out:
1602 ip6_cork_release(inet, np);
1603 return err;
1604 error:
1605 IP6_INC_STATS(net, rt->rt6i_idev, IPSTATS_MIB_OUTDISCARDS);
1606 goto out;
1607 }
1608 EXPORT_SYMBOL_GPL(ip6_push_pending_frames);
1609
1610 void ip6_flush_pending_frames(struct sock *sk)
1611 {
1612 struct sk_buff *skb;
1613
1614 while ((skb = __skb_dequeue_tail(&sk->sk_write_queue)) != NULL) {
1615 if (skb_dst(skb))
1616 IP6_INC_STATS(sock_net(sk), ip6_dst_idev(skb_dst(skb)),
1617 IPSTATS_MIB_OUTDISCARDS);
1618 kfree_skb(skb);
1619 }
1620
1621 ip6_cork_release(inet_sk(sk), inet6_sk(sk));
1622 }
1623 EXPORT_SYMBOL_GPL(ip6_flush_pending_frames);
This page took 0.062314 seconds and 6 git commands to generate.