Merge branch 'for-linville' of git://github.com/kvalo/ath
[deliverable/linux.git] / include / net / ipv6.h
1 /*
2 * Linux INET6 implementation
3 *
4 * Authors:
5 * Pedro Roque <roque@di.fc.ul.pt>
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
11 */
12
13 #ifndef _NET_IPV6_H
14 #define _NET_IPV6_H
15
16 #include <linux/ipv6.h>
17 #include <linux/hardirq.h>
18 #include <linux/jhash.h>
19 #include <net/if_inet6.h>
20 #include <net/ndisc.h>
21 #include <net/flow.h>
22 #include <net/snmp.h>
23
24 #define SIN6_LEN_RFC2133 24
25
26 #define IPV6_MAXPLEN 65535
27
28 /*
29 * NextHeader field of IPv6 header
30 */
31
32 #define NEXTHDR_HOP 0 /* Hop-by-hop option header. */
33 #define NEXTHDR_TCP 6 /* TCP segment. */
34 #define NEXTHDR_UDP 17 /* UDP message. */
35 #define NEXTHDR_IPV6 41 /* IPv6 in IPv6 */
36 #define NEXTHDR_ROUTING 43 /* Routing header. */
37 #define NEXTHDR_FRAGMENT 44 /* Fragmentation/reassembly header. */
38 #define NEXTHDR_GRE 47 /* GRE header. */
39 #define NEXTHDR_ESP 50 /* Encapsulating security payload. */
40 #define NEXTHDR_AUTH 51 /* Authentication header. */
41 #define NEXTHDR_ICMP 58 /* ICMP for IPv6. */
42 #define NEXTHDR_NONE 59 /* No next header */
43 #define NEXTHDR_DEST 60 /* Destination options header. */
44 #define NEXTHDR_SCTP 132 /* SCTP message. */
45 #define NEXTHDR_MOBILITY 135 /* Mobility header. */
46
47 #define NEXTHDR_MAX 255
48
49
50
51 #define IPV6_DEFAULT_HOPLIMIT 64
52 #define IPV6_DEFAULT_MCASTHOPS 1
53
54 /*
55 * Addr type
56 *
57 * type - unicast | multicast
58 * scope - local | site | global
59 * v4 - compat
60 * v4mapped
61 * any
62 * loopback
63 */
64
65 #define IPV6_ADDR_ANY 0x0000U
66
67 #define IPV6_ADDR_UNICAST 0x0001U
68 #define IPV6_ADDR_MULTICAST 0x0002U
69
70 #define IPV6_ADDR_LOOPBACK 0x0010U
71 #define IPV6_ADDR_LINKLOCAL 0x0020U
72 #define IPV6_ADDR_SITELOCAL 0x0040U
73
74 #define IPV6_ADDR_COMPATv4 0x0080U
75
76 #define IPV6_ADDR_SCOPE_MASK 0x00f0U
77
78 #define IPV6_ADDR_MAPPED 0x1000U
79
80 /*
81 * Addr scopes
82 */
83 #define IPV6_ADDR_MC_SCOPE(a) \
84 ((a)->s6_addr[1] & 0x0f) /* nonstandard */
85 #define __IPV6_ADDR_SCOPE_INVALID -1
86 #define IPV6_ADDR_SCOPE_NODELOCAL 0x01
87 #define IPV6_ADDR_SCOPE_LINKLOCAL 0x02
88 #define IPV6_ADDR_SCOPE_SITELOCAL 0x05
89 #define IPV6_ADDR_SCOPE_ORGLOCAL 0x08
90 #define IPV6_ADDR_SCOPE_GLOBAL 0x0e
91
92 /*
93 * Addr flags
94 */
95 #define IPV6_ADDR_MC_FLAG_TRANSIENT(a) \
96 ((a)->s6_addr[1] & 0x10)
97 #define IPV6_ADDR_MC_FLAG_PREFIX(a) \
98 ((a)->s6_addr[1] & 0x20)
99 #define IPV6_ADDR_MC_FLAG_RENDEZVOUS(a) \
100 ((a)->s6_addr[1] & 0x40)
101
102 /*
103 * fragmentation header
104 */
105
106 struct frag_hdr {
107 __u8 nexthdr;
108 __u8 reserved;
109 __be16 frag_off;
110 __be32 identification;
111 };
112
113 #define IP6_MF 0x0001
114 #define IP6_OFFSET 0xFFF8
115
116 #include <net/sock.h>
117
118 /* sysctls */
119 extern int sysctl_mld_max_msf;
120
121 #define _DEVINC(net, statname, modifier, idev, field) \
122 ({ \
123 struct inet6_dev *_idev = (idev); \
124 if (likely(_idev != NULL)) \
125 SNMP_INC_STATS##modifier((_idev)->stats.statname, (field)); \
126 SNMP_INC_STATS##modifier((net)->mib.statname##_statistics, (field));\
127 })
128
129 /* per device counters are atomic_long_t */
130 #define _DEVINCATOMIC(net, statname, modifier, idev, field) \
131 ({ \
132 struct inet6_dev *_idev = (idev); \
133 if (likely(_idev != NULL)) \
134 SNMP_INC_STATS_ATOMIC_LONG((_idev)->stats.statname##dev, (field)); \
135 SNMP_INC_STATS##modifier((net)->mib.statname##_statistics, (field));\
136 })
137
138 /* per device and per net counters are atomic_long_t */
139 #define _DEVINC_ATOMIC_ATOMIC(net, statname, idev, field) \
140 ({ \
141 struct inet6_dev *_idev = (idev); \
142 if (likely(_idev != NULL)) \
143 SNMP_INC_STATS_ATOMIC_LONG((_idev)->stats.statname##dev, (field)); \
144 SNMP_INC_STATS_ATOMIC_LONG((net)->mib.statname##_statistics, (field));\
145 })
146
147 #define _DEVADD(net, statname, modifier, idev, field, val) \
148 ({ \
149 struct inet6_dev *_idev = (idev); \
150 if (likely(_idev != NULL)) \
151 SNMP_ADD_STATS##modifier((_idev)->stats.statname, (field), (val)); \
152 SNMP_ADD_STATS##modifier((net)->mib.statname##_statistics, (field), (val));\
153 })
154
155 #define _DEVUPD(net, statname, modifier, idev, field, val) \
156 ({ \
157 struct inet6_dev *_idev = (idev); \
158 if (likely(_idev != NULL)) \
159 SNMP_UPD_PO_STATS##modifier((_idev)->stats.statname, field, (val)); \
160 SNMP_UPD_PO_STATS##modifier((net)->mib.statname##_statistics, field, (val));\
161 })
162
163 /* MIBs */
164
165 #define IP6_INC_STATS(net, idev,field) \
166 _DEVINC(net, ipv6, 64, idev, field)
167 #define IP6_INC_STATS_BH(net, idev,field) \
168 _DEVINC(net, ipv6, 64_BH, idev, field)
169 #define IP6_ADD_STATS(net, idev,field,val) \
170 _DEVADD(net, ipv6, 64, idev, field, val)
171 #define IP6_ADD_STATS_BH(net, idev,field,val) \
172 _DEVADD(net, ipv6, 64_BH, idev, field, val)
173 #define IP6_UPD_PO_STATS(net, idev,field,val) \
174 _DEVUPD(net, ipv6, 64, idev, field, val)
175 #define IP6_UPD_PO_STATS_BH(net, idev,field,val) \
176 _DEVUPD(net, ipv6, 64_BH, idev, field, val)
177 #define ICMP6_INC_STATS(net, idev, field) \
178 _DEVINCATOMIC(net, icmpv6, , idev, field)
179 #define ICMP6_INC_STATS_BH(net, idev, field) \
180 _DEVINCATOMIC(net, icmpv6, _BH, idev, field)
181
182 #define ICMP6MSGOUT_INC_STATS(net, idev, field) \
183 _DEVINC_ATOMIC_ATOMIC(net, icmpv6msg, idev, field +256)
184 #define ICMP6MSGOUT_INC_STATS_BH(net, idev, field) \
185 _DEVINC_ATOMIC_ATOMIC(net, icmpv6msg, idev, field +256)
186 #define ICMP6MSGIN_INC_STATS_BH(net, idev, field) \
187 _DEVINC_ATOMIC_ATOMIC(net, icmpv6msg, idev, field)
188
189 struct ip6_ra_chain {
190 struct ip6_ra_chain *next;
191 struct sock *sk;
192 int sel;
193 void (*destructor)(struct sock *);
194 };
195
196 extern struct ip6_ra_chain *ip6_ra_chain;
197 extern rwlock_t ip6_ra_lock;
198
199 /*
200 This structure is prepared by protocol, when parsing
201 ancillary data and passed to IPv6.
202 */
203
204 struct ipv6_txoptions {
205 /* Length of this structure */
206 int tot_len;
207
208 /* length of extension headers */
209
210 __u16 opt_flen; /* after fragment hdr */
211 __u16 opt_nflen; /* before fragment hdr */
212
213 struct ipv6_opt_hdr *hopopt;
214 struct ipv6_opt_hdr *dst0opt;
215 struct ipv6_rt_hdr *srcrt; /* Routing Header */
216 struct ipv6_opt_hdr *dst1opt;
217
218 /* Option buffer, as read by IPV6_PKTOPTIONS, starts here. */
219 };
220
221 struct ip6_flowlabel {
222 struct ip6_flowlabel __rcu *next;
223 __be32 label;
224 atomic_t users;
225 struct in6_addr dst;
226 struct ipv6_txoptions *opt;
227 unsigned long linger;
228 struct rcu_head rcu;
229 u8 share;
230 union {
231 struct pid *pid;
232 kuid_t uid;
233 } owner;
234 unsigned long lastuse;
235 unsigned long expires;
236 struct net *fl_net;
237 };
238
239 #define IPV6_FLOWINFO_MASK cpu_to_be32(0x0FFFFFFF)
240 #define IPV6_FLOWLABEL_MASK cpu_to_be32(0x000FFFFF)
241 #define IPV6_TCLASS_MASK (IPV6_FLOWINFO_MASK & ~IPV6_FLOWLABEL_MASK)
242 #define IPV6_TCLASS_SHIFT 20
243
244 struct ipv6_fl_socklist {
245 struct ipv6_fl_socklist __rcu *next;
246 struct ip6_flowlabel *fl;
247 struct rcu_head rcu;
248 };
249
250 struct ip6_flowlabel *fl6_sock_lookup(struct sock *sk, __be32 label);
251 struct ipv6_txoptions *fl6_merge_options(struct ipv6_txoptions *opt_space,
252 struct ip6_flowlabel *fl,
253 struct ipv6_txoptions *fopt);
254 void fl6_free_socklist(struct sock *sk);
255 int ipv6_flowlabel_opt(struct sock *sk, char __user *optval, int optlen);
256 int ipv6_flowlabel_opt_get(struct sock *sk, struct in6_flowlabel_req *freq,
257 int flags);
258 int ip6_flowlabel_init(void);
259 void ip6_flowlabel_cleanup(void);
260
261 static inline void fl6_sock_release(struct ip6_flowlabel *fl)
262 {
263 if (fl)
264 atomic_dec(&fl->users);
265 }
266
267 void icmpv6_notify(struct sk_buff *skb, u8 type, u8 code, __be32 info);
268
269 int icmpv6_push_pending_frames(struct sock *sk, struct flowi6 *fl6,
270 struct icmp6hdr *thdr, int len);
271
272 int ip6_ra_control(struct sock *sk, int sel);
273
274 int ipv6_parse_hopopts(struct sk_buff *skb);
275
276 struct ipv6_txoptions *ipv6_dup_options(struct sock *sk,
277 struct ipv6_txoptions *opt);
278 struct ipv6_txoptions *ipv6_renew_options(struct sock *sk,
279 struct ipv6_txoptions *opt,
280 int newtype,
281 struct ipv6_opt_hdr __user *newopt,
282 int newoptlen);
283 struct ipv6_txoptions *ipv6_fixup_options(struct ipv6_txoptions *opt_space,
284 struct ipv6_txoptions *opt);
285
286 bool ipv6_opt_accepted(const struct sock *sk, const struct sk_buff *skb);
287
288 static inline bool ipv6_accept_ra(struct inet6_dev *idev)
289 {
290 /* If forwarding is enabled, RA are not accepted unless the special
291 * hybrid mode (accept_ra=2) is enabled.
292 */
293 return idev->cnf.forwarding ? idev->cnf.accept_ra == 2 :
294 idev->cnf.accept_ra;
295 }
296
297 #if IS_ENABLED(CONFIG_IPV6)
298 static inline int ip6_frag_nqueues(struct net *net)
299 {
300 return net->ipv6.frags.nqueues;
301 }
302
303 static inline int ip6_frag_mem(struct net *net)
304 {
305 return sum_frag_mem_limit(&net->ipv6.frags);
306 }
307 #endif
308
309 #define IPV6_FRAG_HIGH_THRESH (4 * 1024*1024) /* 4194304 */
310 #define IPV6_FRAG_LOW_THRESH (3 * 1024*1024) /* 3145728 */
311 #define IPV6_FRAG_TIMEOUT (60 * HZ) /* 60 seconds */
312
313 int __ipv6_addr_type(const struct in6_addr *addr);
314 static inline int ipv6_addr_type(const struct in6_addr *addr)
315 {
316 return __ipv6_addr_type(addr) & 0xffff;
317 }
318
319 static inline int ipv6_addr_scope(const struct in6_addr *addr)
320 {
321 return __ipv6_addr_type(addr) & IPV6_ADDR_SCOPE_MASK;
322 }
323
324 static inline int __ipv6_addr_src_scope(int type)
325 {
326 return (type == IPV6_ADDR_ANY) ? __IPV6_ADDR_SCOPE_INVALID : (type >> 16);
327 }
328
329 static inline int ipv6_addr_src_scope(const struct in6_addr *addr)
330 {
331 return __ipv6_addr_src_scope(__ipv6_addr_type(addr));
332 }
333
334 static inline bool __ipv6_addr_needs_scope_id(int type)
335 {
336 return type & IPV6_ADDR_LINKLOCAL ||
337 (type & IPV6_ADDR_MULTICAST &&
338 (type & (IPV6_ADDR_LOOPBACK|IPV6_ADDR_LINKLOCAL)));
339 }
340
341 static inline __u32 ipv6_iface_scope_id(const struct in6_addr *addr, int iface)
342 {
343 return __ipv6_addr_needs_scope_id(__ipv6_addr_type(addr)) ? iface : 0;
344 }
345
346 static inline int ipv6_addr_cmp(const struct in6_addr *a1, const struct in6_addr *a2)
347 {
348 return memcmp(a1, a2, sizeof(struct in6_addr));
349 }
350
351 static inline bool
352 ipv6_masked_addr_cmp(const struct in6_addr *a1, const struct in6_addr *m,
353 const struct in6_addr *a2)
354 {
355 #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64
356 const unsigned long *ul1 = (const unsigned long *)a1;
357 const unsigned long *ulm = (const unsigned long *)m;
358 const unsigned long *ul2 = (const unsigned long *)a2;
359
360 return !!(((ul1[0] ^ ul2[0]) & ulm[0]) |
361 ((ul1[1] ^ ul2[1]) & ulm[1]));
362 #else
363 return !!(((a1->s6_addr32[0] ^ a2->s6_addr32[0]) & m->s6_addr32[0]) |
364 ((a1->s6_addr32[1] ^ a2->s6_addr32[1]) & m->s6_addr32[1]) |
365 ((a1->s6_addr32[2] ^ a2->s6_addr32[2]) & m->s6_addr32[2]) |
366 ((a1->s6_addr32[3] ^ a2->s6_addr32[3]) & m->s6_addr32[3]));
367 #endif
368 }
369
370 static inline void ipv6_addr_prefix(struct in6_addr *pfx,
371 const struct in6_addr *addr,
372 int plen)
373 {
374 /* caller must guarantee 0 <= plen <= 128 */
375 int o = plen >> 3,
376 b = plen & 0x7;
377
378 memset(pfx->s6_addr, 0, sizeof(pfx->s6_addr));
379 memcpy(pfx->s6_addr, addr, o);
380 if (b != 0)
381 pfx->s6_addr[o] = addr->s6_addr[o] & (0xff00 >> b);
382 }
383
384 static inline void __ipv6_addr_set_half(__be32 *addr,
385 __be32 wh, __be32 wl)
386 {
387 #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64
388 #if defined(__BIG_ENDIAN)
389 if (__builtin_constant_p(wh) && __builtin_constant_p(wl)) {
390 *(__force u64 *)addr = ((__force u64)(wh) << 32 | (__force u64)(wl));
391 return;
392 }
393 #elif defined(__LITTLE_ENDIAN)
394 if (__builtin_constant_p(wl) && __builtin_constant_p(wh)) {
395 *(__force u64 *)addr = ((__force u64)(wl) << 32 | (__force u64)(wh));
396 return;
397 }
398 #endif
399 #endif
400 addr[0] = wh;
401 addr[1] = wl;
402 }
403
404 static inline void ipv6_addr_set(struct in6_addr *addr,
405 __be32 w1, __be32 w2,
406 __be32 w3, __be32 w4)
407 {
408 __ipv6_addr_set_half(&addr->s6_addr32[0], w1, w2);
409 __ipv6_addr_set_half(&addr->s6_addr32[2], w3, w4);
410 }
411
412 static inline bool ipv6_addr_equal(const struct in6_addr *a1,
413 const struct in6_addr *a2)
414 {
415 #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64
416 const unsigned long *ul1 = (const unsigned long *)a1;
417 const unsigned long *ul2 = (const unsigned long *)a2;
418
419 return ((ul1[0] ^ ul2[0]) | (ul1[1] ^ ul2[1])) == 0UL;
420 #else
421 return ((a1->s6_addr32[0] ^ a2->s6_addr32[0]) |
422 (a1->s6_addr32[1] ^ a2->s6_addr32[1]) |
423 (a1->s6_addr32[2] ^ a2->s6_addr32[2]) |
424 (a1->s6_addr32[3] ^ a2->s6_addr32[3])) == 0;
425 #endif
426 }
427
428 #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64
429 static inline bool __ipv6_prefix_equal64_half(const __be64 *a1,
430 const __be64 *a2,
431 unsigned int len)
432 {
433 if (len && ((*a1 ^ *a2) & cpu_to_be64((~0UL) << (64 - len))))
434 return false;
435 return true;
436 }
437
438 static inline bool ipv6_prefix_equal(const struct in6_addr *addr1,
439 const struct in6_addr *addr2,
440 unsigned int prefixlen)
441 {
442 const __be64 *a1 = (const __be64 *)addr1;
443 const __be64 *a2 = (const __be64 *)addr2;
444
445 if (prefixlen >= 64) {
446 if (a1[0] ^ a2[0])
447 return false;
448 return __ipv6_prefix_equal64_half(a1 + 1, a2 + 1, prefixlen - 64);
449 }
450 return __ipv6_prefix_equal64_half(a1, a2, prefixlen);
451 }
452 #else
453 static inline bool ipv6_prefix_equal(const struct in6_addr *addr1,
454 const struct in6_addr *addr2,
455 unsigned int prefixlen)
456 {
457 const __be32 *a1 = addr1->s6_addr32;
458 const __be32 *a2 = addr2->s6_addr32;
459 unsigned int pdw, pbi;
460
461 /* check complete u32 in prefix */
462 pdw = prefixlen >> 5;
463 if (pdw && memcmp(a1, a2, pdw << 2))
464 return false;
465
466 /* check incomplete u32 in prefix */
467 pbi = prefixlen & 0x1f;
468 if (pbi && ((a1[pdw] ^ a2[pdw]) & htonl((0xffffffff) << (32 - pbi))))
469 return false;
470
471 return true;
472 }
473 #endif
474
475 struct inet_frag_queue;
476
477 enum ip6_defrag_users {
478 IP6_DEFRAG_LOCAL_DELIVER,
479 IP6_DEFRAG_CONNTRACK_IN,
480 __IP6_DEFRAG_CONNTRACK_IN = IP6_DEFRAG_CONNTRACK_IN + USHRT_MAX,
481 IP6_DEFRAG_CONNTRACK_OUT,
482 __IP6_DEFRAG_CONNTRACK_OUT = IP6_DEFRAG_CONNTRACK_OUT + USHRT_MAX,
483 IP6_DEFRAG_CONNTRACK_BRIDGE_IN,
484 __IP6_DEFRAG_CONNTRACK_BRIDGE_IN = IP6_DEFRAG_CONNTRACK_BRIDGE_IN + USHRT_MAX,
485 };
486
487 struct ip6_create_arg {
488 __be32 id;
489 u32 user;
490 const struct in6_addr *src;
491 const struct in6_addr *dst;
492 u8 ecn;
493 };
494
495 void ip6_frag_init(struct inet_frag_queue *q, void *a);
496 bool ip6_frag_match(struct inet_frag_queue *q, void *a);
497
498 /*
499 * Equivalent of ipv4 struct ip
500 */
501 struct frag_queue {
502 struct inet_frag_queue q;
503
504 __be32 id; /* fragment id */
505 u32 user;
506 struct in6_addr saddr;
507 struct in6_addr daddr;
508
509 int iif;
510 unsigned int csum;
511 __u16 nhoffset;
512 u8 ecn;
513 };
514
515 void ip6_expire_frag_queue(struct net *net, struct frag_queue *fq,
516 struct inet_frags *frags);
517
518 static inline bool ipv6_addr_any(const struct in6_addr *a)
519 {
520 #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64
521 const unsigned long *ul = (const unsigned long *)a;
522
523 return (ul[0] | ul[1]) == 0UL;
524 #else
525 return (a->s6_addr32[0] | a->s6_addr32[1] |
526 a->s6_addr32[2] | a->s6_addr32[3]) == 0;
527 #endif
528 }
529
530 static inline u32 ipv6_addr_hash(const struct in6_addr *a)
531 {
532 #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64
533 const unsigned long *ul = (const unsigned long *)a;
534 unsigned long x = ul[0] ^ ul[1];
535
536 return (u32)(x ^ (x >> 32));
537 #else
538 return (__force u32)(a->s6_addr32[0] ^ a->s6_addr32[1] ^
539 a->s6_addr32[2] ^ a->s6_addr32[3]);
540 #endif
541 }
542
543 /* more secured version of ipv6_addr_hash() */
544 static inline u32 __ipv6_addr_jhash(const struct in6_addr *a, const u32 initval)
545 {
546 u32 v = (__force u32)a->s6_addr32[0] ^ (__force u32)a->s6_addr32[1];
547
548 return jhash_3words(v,
549 (__force u32)a->s6_addr32[2],
550 (__force u32)a->s6_addr32[3],
551 initval);
552 }
553
554 static inline bool ipv6_addr_loopback(const struct in6_addr *a)
555 {
556 #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64
557 const unsigned long *ul = (const unsigned long *)a;
558
559 return (ul[0] | (ul[1] ^ cpu_to_be64(1))) == 0UL;
560 #else
561 return (a->s6_addr32[0] | a->s6_addr32[1] |
562 a->s6_addr32[2] | (a->s6_addr32[3] ^ htonl(1))) == 0;
563 #endif
564 }
565
566 static inline bool ipv6_addr_v4mapped(const struct in6_addr *a)
567 {
568 return (
569 #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64
570 *(__be64 *)a |
571 #else
572 (a->s6_addr32[0] | a->s6_addr32[1]) |
573 #endif
574 (a->s6_addr32[2] ^ htonl(0x0000ffff))) == 0UL;
575 }
576
577 /*
578 * Check for a RFC 4843 ORCHID address
579 * (Overlay Routable Cryptographic Hash Identifiers)
580 */
581 static inline bool ipv6_addr_orchid(const struct in6_addr *a)
582 {
583 return (a->s6_addr32[0] & htonl(0xfffffff0)) == htonl(0x20010010);
584 }
585
586 static inline void ipv6_addr_set_v4mapped(const __be32 addr,
587 struct in6_addr *v4mapped)
588 {
589 ipv6_addr_set(v4mapped,
590 0, 0,
591 htonl(0x0000FFFF),
592 addr);
593 }
594
595 /*
596 * find the first different bit between two addresses
597 * length of address must be a multiple of 32bits
598 */
599 static inline int __ipv6_addr_diff32(const void *token1, const void *token2, int addrlen)
600 {
601 const __be32 *a1 = token1, *a2 = token2;
602 int i;
603
604 addrlen >>= 2;
605
606 for (i = 0; i < addrlen; i++) {
607 __be32 xb = a1[i] ^ a2[i];
608 if (xb)
609 return i * 32 + 31 - __fls(ntohl(xb));
610 }
611
612 /*
613 * we should *never* get to this point since that
614 * would mean the addrs are equal
615 *
616 * However, we do get to it 8) And exacly, when
617 * addresses are equal 8)
618 *
619 * ip route add 1111::/128 via ...
620 * ip route add 1111::/64 via ...
621 * and we are here.
622 *
623 * Ideally, this function should stop comparison
624 * at prefix length. It does not, but it is still OK,
625 * if returned value is greater than prefix length.
626 * --ANK (980803)
627 */
628 return addrlen << 5;
629 }
630
631 #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64
632 static inline int __ipv6_addr_diff64(const void *token1, const void *token2, int addrlen)
633 {
634 const __be64 *a1 = token1, *a2 = token2;
635 int i;
636
637 addrlen >>= 3;
638
639 for (i = 0; i < addrlen; i++) {
640 __be64 xb = a1[i] ^ a2[i];
641 if (xb)
642 return i * 64 + 63 - __fls(be64_to_cpu(xb));
643 }
644
645 return addrlen << 6;
646 }
647 #endif
648
649 static inline int __ipv6_addr_diff(const void *token1, const void *token2, int addrlen)
650 {
651 #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64
652 if (__builtin_constant_p(addrlen) && !(addrlen & 7))
653 return __ipv6_addr_diff64(token1, token2, addrlen);
654 #endif
655 return __ipv6_addr_diff32(token1, token2, addrlen);
656 }
657
658 static inline int ipv6_addr_diff(const struct in6_addr *a1, const struct in6_addr *a2)
659 {
660 return __ipv6_addr_diff(a1, a2, sizeof(struct in6_addr));
661 }
662
663 void ipv6_select_ident(struct frag_hdr *fhdr, struct rt6_info *rt);
664
665 int ip6_dst_hoplimit(struct dst_entry *dst);
666
667 /*
668 * Header manipulation
669 */
670 static inline void ip6_flow_hdr(struct ipv6hdr *hdr, unsigned int tclass,
671 __be32 flowlabel)
672 {
673 *(__be32 *)hdr = htonl(0x60000000 | (tclass << 20)) | flowlabel;
674 }
675
676 static inline __be32 ip6_flowinfo(const struct ipv6hdr *hdr)
677 {
678 return *(__be32 *)hdr & IPV6_FLOWINFO_MASK;
679 }
680
681 static inline __be32 ip6_flowlabel(const struct ipv6hdr *hdr)
682 {
683 return *(__be32 *)hdr & IPV6_FLOWLABEL_MASK;
684 }
685
686 static inline u8 ip6_tclass(__be32 flowinfo)
687 {
688 return ntohl(flowinfo & IPV6_TCLASS_MASK) >> IPV6_TCLASS_SHIFT;
689 }
690 /*
691 * Prototypes exported by ipv6
692 */
693
694 /*
695 * rcv function (called from netdevice level)
696 */
697
698 int ipv6_rcv(struct sk_buff *skb, struct net_device *dev,
699 struct packet_type *pt, struct net_device *orig_dev);
700
701 int ip6_rcv_finish(struct sk_buff *skb);
702
703 /*
704 * upper-layer output functions
705 */
706 int ip6_xmit(struct sock *sk, struct sk_buff *skb, struct flowi6 *fl6,
707 struct ipv6_txoptions *opt, int tclass);
708
709 int ip6_find_1stfragopt(struct sk_buff *skb, u8 **nexthdr);
710
711 int ip6_append_data(struct sock *sk,
712 int getfrag(void *from, char *to, int offset, int len,
713 int odd, struct sk_buff *skb),
714 void *from, int length, int transhdrlen, int hlimit,
715 int tclass, struct ipv6_txoptions *opt, struct flowi6 *fl6,
716 struct rt6_info *rt, unsigned int flags, int dontfrag);
717
718 int ip6_push_pending_frames(struct sock *sk);
719
720 void ip6_flush_pending_frames(struct sock *sk);
721
722 int ip6_dst_lookup(struct sock *sk, struct dst_entry **dst, struct flowi6 *fl6);
723 struct dst_entry *ip6_dst_lookup_flow(struct sock *sk, struct flowi6 *fl6,
724 const struct in6_addr *final_dst);
725 struct dst_entry *ip6_sk_dst_lookup_flow(struct sock *sk, struct flowi6 *fl6,
726 const struct in6_addr *final_dst);
727 struct dst_entry *ip6_blackhole_route(struct net *net,
728 struct dst_entry *orig_dst);
729
730 /*
731 * skb processing functions
732 */
733
734 int ip6_output(struct sock *sk, struct sk_buff *skb);
735 int ip6_forward(struct sk_buff *skb);
736 int ip6_input(struct sk_buff *skb);
737 int ip6_mc_input(struct sk_buff *skb);
738
739 int __ip6_local_out(struct sk_buff *skb);
740 int ip6_local_out(struct sk_buff *skb);
741
742 /*
743 * Extension header (options) processing
744 */
745
746 void ipv6_push_nfrag_opts(struct sk_buff *skb, struct ipv6_txoptions *opt,
747 u8 *proto, struct in6_addr **daddr_p);
748 void ipv6_push_frag_opts(struct sk_buff *skb, struct ipv6_txoptions *opt,
749 u8 *proto);
750
751 int ipv6_skip_exthdr(const struct sk_buff *, int start, u8 *nexthdrp,
752 __be16 *frag_offp);
753
754 bool ipv6_ext_hdr(u8 nexthdr);
755
756 enum {
757 IP6_FH_F_FRAG = (1 << 0),
758 IP6_FH_F_AUTH = (1 << 1),
759 IP6_FH_F_SKIP_RH = (1 << 2),
760 };
761
762 /* find specified header and get offset to it */
763 int ipv6_find_hdr(const struct sk_buff *skb, unsigned int *offset, int target,
764 unsigned short *fragoff, int *fragflg);
765
766 int ipv6_find_tlv(struct sk_buff *skb, int offset, int type);
767
768 struct in6_addr *fl6_update_dst(struct flowi6 *fl6,
769 const struct ipv6_txoptions *opt,
770 struct in6_addr *orig);
771
772 /*
773 * socket options (ipv6_sockglue.c)
774 */
775
776 int ipv6_setsockopt(struct sock *sk, int level, int optname,
777 char __user *optval, unsigned int optlen);
778 int ipv6_getsockopt(struct sock *sk, int level, int optname,
779 char __user *optval, int __user *optlen);
780 int compat_ipv6_setsockopt(struct sock *sk, int level, int optname,
781 char __user *optval, unsigned int optlen);
782 int compat_ipv6_getsockopt(struct sock *sk, int level, int optname,
783 char __user *optval, int __user *optlen);
784
785 int ip6_datagram_connect(struct sock *sk, struct sockaddr *addr, int addr_len);
786 int ip6_datagram_connect_v6_only(struct sock *sk, struct sockaddr *addr,
787 int addr_len);
788
789 int ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len,
790 int *addr_len);
791 int ipv6_recv_rxpmtu(struct sock *sk, struct msghdr *msg, int len,
792 int *addr_len);
793 void ipv6_icmp_error(struct sock *sk, struct sk_buff *skb, int err, __be16 port,
794 u32 info, u8 *payload);
795 void ipv6_local_error(struct sock *sk, int err, struct flowi6 *fl6, u32 info);
796 void ipv6_local_rxpmtu(struct sock *sk, struct flowi6 *fl6, u32 mtu);
797
798 int inet6_release(struct socket *sock);
799 int inet6_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len);
800 int inet6_getname(struct socket *sock, struct sockaddr *uaddr, int *uaddr_len,
801 int peer);
802 int inet6_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg);
803
804 int inet6_hash_connect(struct inet_timewait_death_row *death_row,
805 struct sock *sk);
806
807 /*
808 * reassembly.c
809 */
810 extern const struct proto_ops inet6_stream_ops;
811 extern const struct proto_ops inet6_dgram_ops;
812
813 struct group_source_req;
814 struct group_filter;
815
816 int ip6_mc_source(int add, int omode, struct sock *sk,
817 struct group_source_req *pgsr);
818 int ip6_mc_msfilter(struct sock *sk, struct group_filter *gsf);
819 int ip6_mc_msfget(struct sock *sk, struct group_filter *gsf,
820 struct group_filter __user *optval, int __user *optlen);
821
822 #ifdef CONFIG_PROC_FS
823 int ac6_proc_init(struct net *net);
824 void ac6_proc_exit(struct net *net);
825 int raw6_proc_init(void);
826 void raw6_proc_exit(void);
827 int tcp6_proc_init(struct net *net);
828 void tcp6_proc_exit(struct net *net);
829 int udp6_proc_init(struct net *net);
830 void udp6_proc_exit(struct net *net);
831 int udplite6_proc_init(void);
832 void udplite6_proc_exit(void);
833 int ipv6_misc_proc_init(void);
834 void ipv6_misc_proc_exit(void);
835 int snmp6_register_dev(struct inet6_dev *idev);
836 int snmp6_unregister_dev(struct inet6_dev *idev);
837
838 #else
839 static inline int ac6_proc_init(struct net *net) { return 0; }
840 static inline void ac6_proc_exit(struct net *net) { }
841 static inline int snmp6_register_dev(struct inet6_dev *idev) { return 0; }
842 static inline int snmp6_unregister_dev(struct inet6_dev *idev) { return 0; }
843 #endif
844
845 #ifdef CONFIG_SYSCTL
846 extern struct ctl_table ipv6_route_table_template[];
847
848 struct ctl_table *ipv6_icmp_sysctl_init(struct net *net);
849 struct ctl_table *ipv6_route_sysctl_init(struct net *net);
850 int ipv6_sysctl_register(void);
851 void ipv6_sysctl_unregister(void);
852 #endif
853
854 #endif /* _NET_IPV6_H */
This page took 0.050198 seconds and 6 git commands to generate.