[IPSEC]: Replace x->type->{local,remote}_addr with flags
[deliverable/linux.git] / include / net / xfrm.h
1 #ifndef _NET_XFRM_H
2 #define _NET_XFRM_H
3
4 #include <linux/compiler.h>
5 #include <linux/xfrm.h>
6 #include <linux/spinlock.h>
7 #include <linux/list.h>
8 #include <linux/skbuff.h>
9 #include <linux/socket.h>
10 #include <linux/pfkeyv2.h>
11 #include <linux/ipsec.h>
12 #include <linux/in6.h>
13 #include <linux/mutex.h>
14 #include <linux/audit.h>
15
16 #include <net/sock.h>
17 #include <net/dst.h>
18 #include <net/ip.h>
19 #include <net/route.h>
20 #include <net/ipv6.h>
21 #include <net/ip6_fib.h>
22
23 #define XFRM_PROTO_ESP 50
24 #define XFRM_PROTO_AH 51
25 #define XFRM_PROTO_COMP 108
26 #define XFRM_PROTO_IPIP 4
27 #define XFRM_PROTO_IPV6 41
28 #define XFRM_PROTO_ROUTING IPPROTO_ROUTING
29 #define XFRM_PROTO_DSTOPTS IPPROTO_DSTOPTS
30
31 #define XFRM_ALIGN8(len) (((len) + 7) & ~7)
32 #define MODULE_ALIAS_XFRM_MODE(family, encap) \
33 MODULE_ALIAS("xfrm-mode-" __stringify(family) "-" __stringify(encap))
34 #define MODULE_ALIAS_XFRM_TYPE(family, proto) \
35 MODULE_ALIAS("xfrm-type-" __stringify(family) "-" __stringify(proto))
36
37 extern struct sock *xfrm_nl;
38 extern u32 sysctl_xfrm_aevent_etime;
39 extern u32 sysctl_xfrm_aevent_rseqth;
40 extern int sysctl_xfrm_larval_drop;
41 extern u32 sysctl_xfrm_acq_expires;
42
43 extern struct mutex xfrm_cfg_mutex;
44
45 /* Organization of SPD aka "XFRM rules"
46 ------------------------------------
47
48 Basic objects:
49 - policy rule, struct xfrm_policy (=SPD entry)
50 - bundle of transformations, struct dst_entry == struct xfrm_dst (=SA bundle)
51 - instance of a transformer, struct xfrm_state (=SA)
52 - template to clone xfrm_state, struct xfrm_tmpl
53
54 SPD is plain linear list of xfrm_policy rules, ordered by priority.
55 (To be compatible with existing pfkeyv2 implementations,
56 many rules with priority of 0x7fffffff are allowed to exist and
57 such rules are ordered in an unpredictable way, thanks to bsd folks.)
58
59 Lookup is plain linear search until the first match with selector.
60
61 If "action" is "block", then we prohibit the flow, otherwise:
62 if "xfrms_nr" is zero, the flow passes untransformed. Otherwise,
63 policy entry has list of up to XFRM_MAX_DEPTH transformations,
64 described by templates xfrm_tmpl. Each template is resolved
65 to a complete xfrm_state (see below) and we pack bundle of transformations
66 to a dst_entry returned to requestor.
67
68 dst -. xfrm .-> xfrm_state #1
69 |---. child .-> dst -. xfrm .-> xfrm_state #2
70 |---. child .-> dst -. xfrm .-> xfrm_state #3
71 |---. child .-> NULL
72
73 Bundles are cached at xrfm_policy struct (field ->bundles).
74
75
76 Resolution of xrfm_tmpl
77 -----------------------
78 Template contains:
79 1. ->mode Mode: transport or tunnel
80 2. ->id.proto Protocol: AH/ESP/IPCOMP
81 3. ->id.daddr Remote tunnel endpoint, ignored for transport mode.
82 Q: allow to resolve security gateway?
83 4. ->id.spi If not zero, static SPI.
84 5. ->saddr Local tunnel endpoint, ignored for transport mode.
85 6. ->algos List of allowed algos. Plain bitmask now.
86 Q: ealgos, aalgos, calgos. What a mess...
87 7. ->share Sharing mode.
88 Q: how to implement private sharing mode? To add struct sock* to
89 flow id?
90
91 Having this template we search through SAD searching for entries
92 with appropriate mode/proto/algo, permitted by selector.
93 If no appropriate entry found, it is requested from key manager.
94
95 PROBLEMS:
96 Q: How to find all the bundles referring to a physical path for
97 PMTU discovery? Seems, dst should contain list of all parents...
98 and enter to infinite locking hierarchy disaster.
99 No! It is easier, we will not search for them, let them find us.
100 We add genid to each dst plus pointer to genid of raw IP route,
101 pmtu disc will update pmtu on raw IP route and increase its genid.
102 dst_check() will see this for top level and trigger resyncing
103 metrics. Plus, it will be made via sk->sk_dst_cache. Solved.
104 */
105
106 /* Full description of state of transformer. */
107 struct xfrm_state
108 {
109 /* Note: bydst is re-used during gc */
110 struct hlist_node bydst;
111 struct hlist_node bysrc;
112 struct hlist_node byspi;
113
114 atomic_t refcnt;
115 spinlock_t lock;
116
117 struct xfrm_id id;
118 struct xfrm_selector sel;
119
120 u32 genid;
121
122 /* Key manger bits */
123 struct {
124 u8 state;
125 u8 dying;
126 u32 seq;
127 } km;
128
129 /* Parameters of this state. */
130 struct {
131 u32 reqid;
132 u8 mode;
133 u8 replay_window;
134 u8 aalgo, ealgo, calgo;
135 u8 flags;
136 u16 family;
137 xfrm_address_t saddr;
138 int header_len;
139 int trailer_len;
140 } props;
141
142 struct xfrm_lifetime_cfg lft;
143
144 /* Data for transformer */
145 struct xfrm_algo *aalg;
146 struct xfrm_algo *ealg;
147 struct xfrm_algo *calg;
148
149 /* Data for encapsulator */
150 struct xfrm_encap_tmpl *encap;
151
152 /* Data for care-of address */
153 xfrm_address_t *coaddr;
154
155 /* IPComp needs an IPIP tunnel for handling uncompressed packets */
156 struct xfrm_state *tunnel;
157
158 /* If a tunnel, number of users + 1 */
159 atomic_t tunnel_users;
160
161 /* State for replay detection */
162 struct xfrm_replay_state replay;
163
164 /* Replay detection state at the time we sent the last notification */
165 struct xfrm_replay_state preplay;
166
167 /* internal flag that only holds state for delayed aevent at the
168 * moment
169 */
170 u32 xflags;
171
172 /* Replay detection notification settings */
173 u32 replay_maxage;
174 u32 replay_maxdiff;
175
176 /* Replay detection notification timer */
177 struct timer_list rtimer;
178
179 /* Statistics */
180 struct xfrm_stats stats;
181
182 struct xfrm_lifetime_cur curlft;
183 struct timer_list timer;
184
185 /* Last used time */
186 u64 lastused;
187
188 /* Reference to data common to all the instances of this
189 * transformer. */
190 struct xfrm_type *type;
191 struct xfrm_mode *inner_mode;
192 struct xfrm_mode *outer_mode;
193
194 /* Security context */
195 struct xfrm_sec_ctx *security;
196
197 /* Private data of this transformer, format is opaque,
198 * interpreted by xfrm_type methods. */
199 void *data;
200 };
201
202 /* xflags - make enum if more show up */
203 #define XFRM_TIME_DEFER 1
204
205 enum {
206 XFRM_STATE_VOID,
207 XFRM_STATE_ACQ,
208 XFRM_STATE_VALID,
209 XFRM_STATE_ERROR,
210 XFRM_STATE_EXPIRED,
211 XFRM_STATE_DEAD
212 };
213
214 /* callback structure passed from either netlink or pfkey */
215 struct km_event
216 {
217 union {
218 u32 hard;
219 u32 proto;
220 u32 byid;
221 u32 aevent;
222 u32 type;
223 } data;
224
225 u32 seq;
226 u32 pid;
227 u32 event;
228 };
229
230 struct xfrm_type;
231 struct xfrm_dst;
232 struct xfrm_policy_afinfo {
233 unsigned short family;
234 struct dst_ops *dst_ops;
235 void (*garbage_collect)(void);
236 int (*dst_lookup)(struct xfrm_dst **dst, struct flowi *fl);
237 int (*get_saddr)(xfrm_address_t *saddr, xfrm_address_t *daddr);
238 struct dst_entry *(*find_bundle)(struct flowi *fl, struct xfrm_policy *policy);
239 int (*bundle_create)(struct xfrm_policy *policy,
240 struct xfrm_state **xfrm,
241 int nx,
242 struct flowi *fl,
243 struct dst_entry **dst_p);
244 void (*decode_session)(struct sk_buff *skb,
245 struct flowi *fl);
246 };
247
248 extern int xfrm_policy_register_afinfo(struct xfrm_policy_afinfo *afinfo);
249 extern int xfrm_policy_unregister_afinfo(struct xfrm_policy_afinfo *afinfo);
250 extern void km_policy_notify(struct xfrm_policy *xp, int dir, struct km_event *c);
251 extern void km_state_notify(struct xfrm_state *x, struct km_event *c);
252
253 struct xfrm_tmpl;
254 extern int km_query(struct xfrm_state *x, struct xfrm_tmpl *t, struct xfrm_policy *pol);
255 extern void km_state_expired(struct xfrm_state *x, int hard, u32 pid);
256 extern int __xfrm_state_delete(struct xfrm_state *x);
257
258 struct xfrm_state_afinfo {
259 unsigned int family;
260 struct module *owner;
261 struct xfrm_type *type_map[IPPROTO_MAX];
262 struct xfrm_mode *mode_map[XFRM_MODE_MAX];
263 int (*init_flags)(struct xfrm_state *x);
264 void (*init_tempsel)(struct xfrm_state *x, struct flowi *fl,
265 struct xfrm_tmpl *tmpl,
266 xfrm_address_t *daddr, xfrm_address_t *saddr);
267 int (*tmpl_sort)(struct xfrm_tmpl **dst, struct xfrm_tmpl **src, int n);
268 int (*state_sort)(struct xfrm_state **dst, struct xfrm_state **src, int n);
269 int (*output)(struct sk_buff *skb);
270 };
271
272 extern int xfrm_state_register_afinfo(struct xfrm_state_afinfo *afinfo);
273 extern int xfrm_state_unregister_afinfo(struct xfrm_state_afinfo *afinfo);
274
275 extern void xfrm_state_delete_tunnel(struct xfrm_state *x);
276
277 struct xfrm_type
278 {
279 char *description;
280 struct module *owner;
281 __u8 proto;
282 __u8 flags;
283 #define XFRM_TYPE_NON_FRAGMENT 1
284 #define XFRM_TYPE_REPLAY_PROT 2
285 #define XFRM_TYPE_LOCAL_COADDR 4
286 #define XFRM_TYPE_REMOTE_COADDR 8
287
288 int (*init_state)(struct xfrm_state *x);
289 void (*destructor)(struct xfrm_state *);
290 int (*input)(struct xfrm_state *, struct sk_buff *skb);
291 int (*output)(struct xfrm_state *, struct sk_buff *pskb);
292 int (*reject)(struct xfrm_state *, struct sk_buff *, struct flowi *);
293 int (*hdr_offset)(struct xfrm_state *, struct sk_buff *, u8 **);
294 /* Estimate maximal size of result of transformation of a dgram */
295 u32 (*get_mtu)(struct xfrm_state *, int size);
296 };
297
298 extern int xfrm_register_type(struct xfrm_type *type, unsigned short family);
299 extern int xfrm_unregister_type(struct xfrm_type *type, unsigned short family);
300
301 struct xfrm_mode {
302 int (*input)(struct xfrm_state *x, struct sk_buff *skb);
303
304 /*
305 * Add encapsulation header.
306 *
307 * On exit, the transport header will be set to the start of the
308 * encapsulation header to be filled in by x->type->output and
309 * the mac header will be set to the nextheader (protocol for
310 * IPv4) field of the extension header directly preceding the
311 * encapsulation header, or in its absence, that of the top IP
312 * header. The value of the network header will always point
313 * to the top IP header while skb->data will point to the payload.
314 */
315 int (*output)(struct xfrm_state *x,struct sk_buff *skb);
316
317 struct xfrm_state_afinfo *afinfo;
318 struct module *owner;
319 unsigned int encap;
320 int flags;
321 };
322
323 /* Flags for xfrm_mode. */
324 enum {
325 XFRM_MODE_FLAG_TUNNEL = 1,
326 };
327
328 extern int xfrm_register_mode(struct xfrm_mode *mode, int family);
329 extern int xfrm_unregister_mode(struct xfrm_mode *mode, int family);
330
331 struct xfrm_tmpl
332 {
333 /* id in template is interpreted as:
334 * daddr - destination of tunnel, may be zero for transport mode.
335 * spi - zero to acquire spi. Not zero if spi is static, then
336 * daddr must be fixed too.
337 * proto - AH/ESP/IPCOMP
338 */
339 struct xfrm_id id;
340
341 /* Source address of tunnel. Ignored, if it is not a tunnel. */
342 xfrm_address_t saddr;
343
344 unsigned short encap_family;
345
346 __u32 reqid;
347
348 /* Mode: transport, tunnel etc. */
349 __u8 mode;
350
351 /* Sharing mode: unique, this session only, this user only etc. */
352 __u8 share;
353
354 /* May skip this transfomration if no SA is found */
355 __u8 optional;
356
357 /* Bit mask of algos allowed for acquisition */
358 __u32 aalgos;
359 __u32 ealgos;
360 __u32 calgos;
361 };
362
363 #define XFRM_MAX_DEPTH 6
364
365 struct xfrm_policy
366 {
367 struct xfrm_policy *next;
368 struct hlist_node bydst;
369 struct hlist_node byidx;
370
371 /* This lock only affects elements except for entry. */
372 rwlock_t lock;
373 atomic_t refcnt;
374 struct timer_list timer;
375
376 u32 priority;
377 u32 index;
378 struct xfrm_selector selector;
379 struct xfrm_lifetime_cfg lft;
380 struct xfrm_lifetime_cur curlft;
381 struct dst_entry *bundles;
382 u16 family;
383 u8 type;
384 u8 action;
385 u8 flags;
386 u8 dead;
387 u8 xfrm_nr;
388 /* XXX 1 byte hole, try to pack */
389 struct xfrm_sec_ctx *security;
390 struct xfrm_tmpl xfrm_vec[XFRM_MAX_DEPTH];
391 };
392
393 struct xfrm_migrate {
394 xfrm_address_t old_daddr;
395 xfrm_address_t old_saddr;
396 xfrm_address_t new_daddr;
397 xfrm_address_t new_saddr;
398 u8 proto;
399 u8 mode;
400 u16 reserved;
401 u32 reqid;
402 u16 old_family;
403 u16 new_family;
404 };
405
406 #define XFRM_KM_TIMEOUT 30
407 /* which seqno */
408 #define XFRM_REPLAY_SEQ 1
409 #define XFRM_REPLAY_OSEQ 2
410 #define XFRM_REPLAY_SEQ_MASK 3
411 /* what happened */
412 #define XFRM_REPLAY_UPDATE XFRM_AE_CR
413 #define XFRM_REPLAY_TIMEOUT XFRM_AE_CE
414
415 /* default aevent timeout in units of 100ms */
416 #define XFRM_AE_ETIME 10
417 /* Async Event timer multiplier */
418 #define XFRM_AE_ETH_M 10
419 /* default seq threshold size */
420 #define XFRM_AE_SEQT_SIZE 2
421
422 struct xfrm_mgr
423 {
424 struct list_head list;
425 char *id;
426 int (*notify)(struct xfrm_state *x, struct km_event *c);
427 int (*acquire)(struct xfrm_state *x, struct xfrm_tmpl *, struct xfrm_policy *xp, int dir);
428 struct xfrm_policy *(*compile_policy)(struct sock *sk, int opt, u8 *data, int len, int *dir);
429 int (*new_mapping)(struct xfrm_state *x, xfrm_address_t *ipaddr, __be16 sport);
430 int (*notify_policy)(struct xfrm_policy *x, int dir, struct km_event *c);
431 int (*report)(u8 proto, struct xfrm_selector *sel, xfrm_address_t *addr);
432 int (*migrate)(struct xfrm_selector *sel, u8 dir, u8 type, struct xfrm_migrate *m, int num_bundles);
433 };
434
435 extern int xfrm_register_km(struct xfrm_mgr *km);
436 extern int xfrm_unregister_km(struct xfrm_mgr *km);
437
438 extern unsigned int xfrm_policy_count[XFRM_POLICY_MAX*2];
439
440 /*
441 * This structure is used for the duration where packets are being
442 * transformed by IPsec. As soon as the packet leaves IPsec the
443 * area beyond the generic IP part may be overwritten.
444 */
445 struct xfrm_skb_cb {
446 union {
447 struct inet_skb_parm h4;
448 struct inet6_skb_parm h6;
449 } header;
450
451 /* Sequence number for replay protection. */
452 u64 seq;
453 };
454
455 #define XFRM_SKB_CB(__skb) ((struct xfrm_skb_cb *)&((__skb)->cb[0]))
456
457 /* Audit Information */
458 struct xfrm_audit
459 {
460 u32 loginuid;
461 u32 secid;
462 };
463
464 #ifdef CONFIG_AUDITSYSCALL
465 static inline struct audit_buffer *xfrm_audit_start(u32 auid, u32 sid)
466 {
467 struct audit_buffer *audit_buf = NULL;
468 char *secctx;
469 u32 secctx_len;
470
471 audit_buf = audit_log_start(current->audit_context, GFP_ATOMIC,
472 AUDIT_MAC_IPSEC_EVENT);
473 if (audit_buf == NULL)
474 return NULL;
475
476 audit_log_format(audit_buf, "auid=%u", auid);
477
478 if (sid != 0 &&
479 security_secid_to_secctx(sid, &secctx, &secctx_len) == 0) {
480 audit_log_format(audit_buf, " subj=%s", secctx);
481 security_release_secctx(secctx, secctx_len);
482 } else
483 audit_log_task_context(audit_buf);
484 return audit_buf;
485 }
486
487 extern void xfrm_audit_policy_add(struct xfrm_policy *xp, int result,
488 u32 auid, u32 sid);
489 extern void xfrm_audit_policy_delete(struct xfrm_policy *xp, int result,
490 u32 auid, u32 sid);
491 extern void xfrm_audit_state_add(struct xfrm_state *x, int result,
492 u32 auid, u32 sid);
493 extern void xfrm_audit_state_delete(struct xfrm_state *x, int result,
494 u32 auid, u32 sid);
495 #else
496 #define xfrm_audit_policy_add(x, r, a, s) do { ; } while (0)
497 #define xfrm_audit_policy_delete(x, r, a, s) do { ; } while (0)
498 #define xfrm_audit_state_add(x, r, a, s) do { ; } while (0)
499 #define xfrm_audit_state_delete(x, r, a, s) do { ; } while (0)
500 #endif /* CONFIG_AUDITSYSCALL */
501
502 static inline void xfrm_pol_hold(struct xfrm_policy *policy)
503 {
504 if (likely(policy != NULL))
505 atomic_inc(&policy->refcnt);
506 }
507
508 extern void __xfrm_policy_destroy(struct xfrm_policy *policy);
509
510 static inline void xfrm_pol_put(struct xfrm_policy *policy)
511 {
512 if (atomic_dec_and_test(&policy->refcnt))
513 __xfrm_policy_destroy(policy);
514 }
515
516 #ifdef CONFIG_XFRM_SUB_POLICY
517 static inline void xfrm_pols_put(struct xfrm_policy **pols, int npols)
518 {
519 int i;
520 for (i = npols - 1; i >= 0; --i)
521 xfrm_pol_put(pols[i]);
522 }
523 #else
524 static inline void xfrm_pols_put(struct xfrm_policy **pols, int npols)
525 {
526 xfrm_pol_put(pols[0]);
527 }
528 #endif
529
530 extern void __xfrm_state_destroy(struct xfrm_state *);
531
532 static inline void __xfrm_state_put(struct xfrm_state *x)
533 {
534 atomic_dec(&x->refcnt);
535 }
536
537 static inline void xfrm_state_put(struct xfrm_state *x)
538 {
539 if (atomic_dec_and_test(&x->refcnt))
540 __xfrm_state_destroy(x);
541 }
542
543 static inline void xfrm_state_hold(struct xfrm_state *x)
544 {
545 atomic_inc(&x->refcnt);
546 }
547
548 static __inline__ int addr_match(void *token1, void *token2, int prefixlen)
549 {
550 __be32 *a1 = token1;
551 __be32 *a2 = token2;
552 int pdw;
553 int pbi;
554
555 pdw = prefixlen >> 5; /* num of whole __u32 in prefix */
556 pbi = prefixlen & 0x1f; /* num of bits in incomplete u32 in prefix */
557
558 if (pdw)
559 if (memcmp(a1, a2, pdw << 2))
560 return 0;
561
562 if (pbi) {
563 __be32 mask;
564
565 mask = htonl((0xffffffff) << (32 - pbi));
566
567 if ((a1[pdw] ^ a2[pdw]) & mask)
568 return 0;
569 }
570
571 return 1;
572 }
573
574 static __inline__
575 __be16 xfrm_flowi_sport(struct flowi *fl)
576 {
577 __be16 port;
578 switch(fl->proto) {
579 case IPPROTO_TCP:
580 case IPPROTO_UDP:
581 case IPPROTO_UDPLITE:
582 case IPPROTO_SCTP:
583 port = fl->fl_ip_sport;
584 break;
585 case IPPROTO_ICMP:
586 case IPPROTO_ICMPV6:
587 port = htons(fl->fl_icmp_type);
588 break;
589 case IPPROTO_MH:
590 port = htons(fl->fl_mh_type);
591 break;
592 default:
593 port = 0; /*XXX*/
594 }
595 return port;
596 }
597
598 static __inline__
599 __be16 xfrm_flowi_dport(struct flowi *fl)
600 {
601 __be16 port;
602 switch(fl->proto) {
603 case IPPROTO_TCP:
604 case IPPROTO_UDP:
605 case IPPROTO_UDPLITE:
606 case IPPROTO_SCTP:
607 port = fl->fl_ip_dport;
608 break;
609 case IPPROTO_ICMP:
610 case IPPROTO_ICMPV6:
611 port = htons(fl->fl_icmp_code);
612 break;
613 default:
614 port = 0; /*XXX*/
615 }
616 return port;
617 }
618
619 extern int xfrm_selector_match(struct xfrm_selector *sel, struct flowi *fl,
620 unsigned short family);
621
622 #ifdef CONFIG_SECURITY_NETWORK_XFRM
623 /* If neither has a context --> match
624 * Otherwise, both must have a context and the sids, doi, alg must match
625 */
626 static inline int xfrm_sec_ctx_match(struct xfrm_sec_ctx *s1, struct xfrm_sec_ctx *s2)
627 {
628 return ((!s1 && !s2) ||
629 (s1 && s2 &&
630 (s1->ctx_sid == s2->ctx_sid) &&
631 (s1->ctx_doi == s2->ctx_doi) &&
632 (s1->ctx_alg == s2->ctx_alg)));
633 }
634 #else
635 static inline int xfrm_sec_ctx_match(struct xfrm_sec_ctx *s1, struct xfrm_sec_ctx *s2)
636 {
637 return 1;
638 }
639 #endif
640
641 /* A struct encoding bundle of transformations to apply to some set of flow.
642 *
643 * dst->child points to the next element of bundle.
644 * dst->xfrm points to an instanse of transformer.
645 *
646 * Due to unfortunate limitations of current routing cache, which we
647 * have no time to fix, it mirrors struct rtable and bound to the same
648 * routing key, including saddr,daddr. However, we can have many of
649 * bundles differing by session id. All the bundles grow from a parent
650 * policy rule.
651 */
652 struct xfrm_dst
653 {
654 union {
655 struct dst_entry dst;
656 struct rtable rt;
657 struct rt6_info rt6;
658 } u;
659 struct dst_entry *route;
660 #ifdef CONFIG_XFRM_SUB_POLICY
661 struct flowi *origin;
662 struct xfrm_selector *partner;
663 #endif
664 u32 genid;
665 u32 route_mtu_cached;
666 u32 child_mtu_cached;
667 u32 route_cookie;
668 u32 path_cookie;
669 };
670
671 static inline void xfrm_dst_destroy(struct xfrm_dst *xdst)
672 {
673 dst_release(xdst->route);
674 if (likely(xdst->u.dst.xfrm))
675 xfrm_state_put(xdst->u.dst.xfrm);
676 #ifdef CONFIG_XFRM_SUB_POLICY
677 kfree(xdst->origin);
678 xdst->origin = NULL;
679 kfree(xdst->partner);
680 xdst->partner = NULL;
681 #endif
682 }
683
684 extern void xfrm_dst_ifdown(struct dst_entry *dst, struct net_device *dev);
685
686 struct sec_path
687 {
688 atomic_t refcnt;
689 int len;
690 struct xfrm_state *xvec[XFRM_MAX_DEPTH];
691 };
692
693 static inline struct sec_path *
694 secpath_get(struct sec_path *sp)
695 {
696 if (sp)
697 atomic_inc(&sp->refcnt);
698 return sp;
699 }
700
701 extern void __secpath_destroy(struct sec_path *sp);
702
703 static inline void
704 secpath_put(struct sec_path *sp)
705 {
706 if (sp && atomic_dec_and_test(&sp->refcnt))
707 __secpath_destroy(sp);
708 }
709
710 extern struct sec_path *secpath_dup(struct sec_path *src);
711
712 static inline void
713 secpath_reset(struct sk_buff *skb)
714 {
715 #ifdef CONFIG_XFRM
716 secpath_put(skb->sp);
717 skb->sp = NULL;
718 #endif
719 }
720
721 static inline int
722 xfrm_addr_any(xfrm_address_t *addr, unsigned short family)
723 {
724 switch (family) {
725 case AF_INET:
726 return addr->a4 == 0;
727 case AF_INET6:
728 return ipv6_addr_any((struct in6_addr *)&addr->a6);
729 }
730 return 0;
731 }
732
733 static inline int
734 __xfrm4_state_addr_cmp(struct xfrm_tmpl *tmpl, struct xfrm_state *x)
735 {
736 return (tmpl->saddr.a4 &&
737 tmpl->saddr.a4 != x->props.saddr.a4);
738 }
739
740 static inline int
741 __xfrm6_state_addr_cmp(struct xfrm_tmpl *tmpl, struct xfrm_state *x)
742 {
743 return (!ipv6_addr_any((struct in6_addr*)&tmpl->saddr) &&
744 ipv6_addr_cmp((struct in6_addr *)&tmpl->saddr, (struct in6_addr*)&x->props.saddr));
745 }
746
747 static inline int
748 xfrm_state_addr_cmp(struct xfrm_tmpl *tmpl, struct xfrm_state *x, unsigned short family)
749 {
750 switch (family) {
751 case AF_INET:
752 return __xfrm4_state_addr_cmp(tmpl, x);
753 case AF_INET6:
754 return __xfrm6_state_addr_cmp(tmpl, x);
755 }
756 return !0;
757 }
758
759 #ifdef CONFIG_XFRM
760
761 extern int __xfrm_policy_check(struct sock *, int dir, struct sk_buff *skb, unsigned short family);
762
763 static inline int xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *skb, unsigned short family)
764 {
765 if (sk && sk->sk_policy[XFRM_POLICY_IN])
766 return __xfrm_policy_check(sk, dir, skb, family);
767
768 return (!xfrm_policy_count[dir] && !skb->sp) ||
769 (skb->dst->flags & DST_NOPOLICY) ||
770 __xfrm_policy_check(sk, dir, skb, family);
771 }
772
773 static inline int xfrm4_policy_check(struct sock *sk, int dir, struct sk_buff *skb)
774 {
775 return xfrm_policy_check(sk, dir, skb, AF_INET);
776 }
777
778 static inline int xfrm6_policy_check(struct sock *sk, int dir, struct sk_buff *skb)
779 {
780 return xfrm_policy_check(sk, dir, skb, AF_INET6);
781 }
782
783 extern int xfrm_decode_session(struct sk_buff *skb, struct flowi *fl, unsigned short family);
784 extern int __xfrm_route_forward(struct sk_buff *skb, unsigned short family);
785
786 static inline int xfrm_route_forward(struct sk_buff *skb, unsigned short family)
787 {
788 return !xfrm_policy_count[XFRM_POLICY_OUT] ||
789 (skb->dst->flags & DST_NOXFRM) ||
790 __xfrm_route_forward(skb, family);
791 }
792
793 static inline int xfrm4_route_forward(struct sk_buff *skb)
794 {
795 return xfrm_route_forward(skb, AF_INET);
796 }
797
798 static inline int xfrm6_route_forward(struct sk_buff *skb)
799 {
800 return xfrm_route_forward(skb, AF_INET6);
801 }
802
803 extern int __xfrm_sk_clone_policy(struct sock *sk);
804
805 static inline int xfrm_sk_clone_policy(struct sock *sk)
806 {
807 if (unlikely(sk->sk_policy[0] || sk->sk_policy[1]))
808 return __xfrm_sk_clone_policy(sk);
809 return 0;
810 }
811
812 extern int xfrm_policy_delete(struct xfrm_policy *pol, int dir);
813
814 static inline void xfrm_sk_free_policy(struct sock *sk)
815 {
816 if (unlikely(sk->sk_policy[0] != NULL)) {
817 xfrm_policy_delete(sk->sk_policy[0], XFRM_POLICY_MAX);
818 sk->sk_policy[0] = NULL;
819 }
820 if (unlikely(sk->sk_policy[1] != NULL)) {
821 xfrm_policy_delete(sk->sk_policy[1], XFRM_POLICY_MAX+1);
822 sk->sk_policy[1] = NULL;
823 }
824 }
825
826 #else
827
828 static inline void xfrm_sk_free_policy(struct sock *sk) {}
829 static inline int xfrm_sk_clone_policy(struct sock *sk) { return 0; }
830 static inline int xfrm6_route_forward(struct sk_buff *skb) { return 1; }
831 static inline int xfrm4_route_forward(struct sk_buff *skb) { return 1; }
832 static inline int xfrm6_policy_check(struct sock *sk, int dir, struct sk_buff *skb)
833 {
834 return 1;
835 }
836 static inline int xfrm4_policy_check(struct sock *sk, int dir, struct sk_buff *skb)
837 {
838 return 1;
839 }
840 static inline int xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *skb, unsigned short family)
841 {
842 return 1;
843 }
844 #endif
845
846 static __inline__
847 xfrm_address_t *xfrm_flowi_daddr(struct flowi *fl, unsigned short family)
848 {
849 switch (family){
850 case AF_INET:
851 return (xfrm_address_t *)&fl->fl4_dst;
852 case AF_INET6:
853 return (xfrm_address_t *)&fl->fl6_dst;
854 }
855 return NULL;
856 }
857
858 static __inline__
859 xfrm_address_t *xfrm_flowi_saddr(struct flowi *fl, unsigned short family)
860 {
861 switch (family){
862 case AF_INET:
863 return (xfrm_address_t *)&fl->fl4_src;
864 case AF_INET6:
865 return (xfrm_address_t *)&fl->fl6_src;
866 }
867 return NULL;
868 }
869
870 static __inline__ int
871 __xfrm4_state_addr_check(struct xfrm_state *x,
872 xfrm_address_t *daddr, xfrm_address_t *saddr)
873 {
874 if (daddr->a4 == x->id.daddr.a4 &&
875 (saddr->a4 == x->props.saddr.a4 || !saddr->a4 || !x->props.saddr.a4))
876 return 1;
877 return 0;
878 }
879
880 static __inline__ int
881 __xfrm6_state_addr_check(struct xfrm_state *x,
882 xfrm_address_t *daddr, xfrm_address_t *saddr)
883 {
884 if (!ipv6_addr_cmp((struct in6_addr *)daddr, (struct in6_addr *)&x->id.daddr) &&
885 (!ipv6_addr_cmp((struct in6_addr *)saddr, (struct in6_addr *)&x->props.saddr)||
886 ipv6_addr_any((struct in6_addr *)saddr) ||
887 ipv6_addr_any((struct in6_addr *)&x->props.saddr)))
888 return 1;
889 return 0;
890 }
891
892 static __inline__ int
893 xfrm_state_addr_check(struct xfrm_state *x,
894 xfrm_address_t *daddr, xfrm_address_t *saddr,
895 unsigned short family)
896 {
897 switch (family) {
898 case AF_INET:
899 return __xfrm4_state_addr_check(x, daddr, saddr);
900 case AF_INET6:
901 return __xfrm6_state_addr_check(x, daddr, saddr);
902 }
903 return 0;
904 }
905
906 static __inline__ int
907 xfrm_state_addr_flow_check(struct xfrm_state *x, struct flowi *fl,
908 unsigned short family)
909 {
910 switch (family) {
911 case AF_INET:
912 return __xfrm4_state_addr_check(x,
913 (xfrm_address_t *)&fl->fl4_dst,
914 (xfrm_address_t *)&fl->fl4_src);
915 case AF_INET6:
916 return __xfrm6_state_addr_check(x,
917 (xfrm_address_t *)&fl->fl6_dst,
918 (xfrm_address_t *)&fl->fl6_src);
919 }
920 return 0;
921 }
922
923 static inline int xfrm_state_kern(struct xfrm_state *x)
924 {
925 return atomic_read(&x->tunnel_users);
926 }
927
928 static inline int xfrm_id_proto_match(u8 proto, u8 userproto)
929 {
930 return (!userproto || proto == userproto ||
931 (userproto == IPSEC_PROTO_ANY && (proto == IPPROTO_AH ||
932 proto == IPPROTO_ESP ||
933 proto == IPPROTO_COMP)));
934 }
935
936 /*
937 * xfrm algorithm information
938 */
939 struct xfrm_algo_auth_info {
940 u16 icv_truncbits;
941 u16 icv_fullbits;
942 };
943
944 struct xfrm_algo_encr_info {
945 u16 blockbits;
946 u16 defkeybits;
947 };
948
949 struct xfrm_algo_comp_info {
950 u16 threshold;
951 };
952
953 struct xfrm_algo_desc {
954 char *name;
955 char *compat;
956 u8 available:1;
957 union {
958 struct xfrm_algo_auth_info auth;
959 struct xfrm_algo_encr_info encr;
960 struct xfrm_algo_comp_info comp;
961 } uinfo;
962 struct sadb_alg desc;
963 };
964
965 /* XFRM tunnel handlers. */
966 struct xfrm_tunnel {
967 int (*handler)(struct sk_buff *skb);
968 int (*err_handler)(struct sk_buff *skb, __u32 info);
969
970 struct xfrm_tunnel *next;
971 int priority;
972 };
973
974 struct xfrm6_tunnel {
975 int (*handler)(struct sk_buff *skb);
976 int (*err_handler)(struct sk_buff *skb, struct inet6_skb_parm *opt,
977 int type, int code, int offset, __be32 info);
978 struct xfrm6_tunnel *next;
979 int priority;
980 };
981
982 extern void xfrm_init(void);
983 extern void xfrm4_init(void);
984 extern void xfrm6_init(void);
985 extern void xfrm6_fini(void);
986 extern void xfrm_state_init(void);
987 extern void xfrm4_state_init(void);
988 extern void xfrm6_state_init(void);
989 extern void xfrm6_state_fini(void);
990
991 extern int xfrm_state_walk(u8 proto, int (*func)(struct xfrm_state *, int, void*), void *);
992 extern struct xfrm_state *xfrm_state_alloc(void);
993 extern struct xfrm_state *xfrm_state_find(xfrm_address_t *daddr, xfrm_address_t *saddr,
994 struct flowi *fl, struct xfrm_tmpl *tmpl,
995 struct xfrm_policy *pol, int *err,
996 unsigned short family);
997 extern struct xfrm_state * xfrm_stateonly_find(xfrm_address_t *daddr,
998 xfrm_address_t *saddr,
999 unsigned short family,
1000 u8 mode, u8 proto, u32 reqid);
1001 extern int xfrm_state_check_expire(struct xfrm_state *x);
1002 extern void xfrm_state_insert(struct xfrm_state *x);
1003 extern int xfrm_state_add(struct xfrm_state *x);
1004 extern int xfrm_state_update(struct xfrm_state *x);
1005 extern struct xfrm_state *xfrm_state_lookup(xfrm_address_t *daddr, __be32 spi, u8 proto, unsigned short family);
1006 extern struct xfrm_state *xfrm_state_lookup_byaddr(xfrm_address_t *daddr, xfrm_address_t *saddr, u8 proto, unsigned short family);
1007 #ifdef CONFIG_XFRM_SUB_POLICY
1008 extern int xfrm_tmpl_sort(struct xfrm_tmpl **dst, struct xfrm_tmpl **src,
1009 int n, unsigned short family);
1010 extern int xfrm_state_sort(struct xfrm_state **dst, struct xfrm_state **src,
1011 int n, unsigned short family);
1012 #else
1013 static inline int xfrm_tmpl_sort(struct xfrm_tmpl **dst, struct xfrm_tmpl **src,
1014 int n, unsigned short family)
1015 {
1016 return -ENOSYS;
1017 }
1018
1019 static inline int xfrm_state_sort(struct xfrm_state **dst, struct xfrm_state **src,
1020 int n, unsigned short family)
1021 {
1022 return -ENOSYS;
1023 }
1024 #endif
1025
1026 struct xfrmk_sadinfo {
1027 u32 sadhcnt; /* current hash bkts */
1028 u32 sadhmcnt; /* max allowed hash bkts */
1029 u32 sadcnt; /* current running count */
1030 };
1031
1032 struct xfrmk_spdinfo {
1033 u32 incnt;
1034 u32 outcnt;
1035 u32 fwdcnt;
1036 u32 inscnt;
1037 u32 outscnt;
1038 u32 fwdscnt;
1039 u32 spdhcnt;
1040 u32 spdhmcnt;
1041 };
1042
1043 extern struct xfrm_state *xfrm_find_acq_byseq(u32 seq);
1044 extern int xfrm_state_delete(struct xfrm_state *x);
1045 extern int xfrm_state_flush(u8 proto, struct xfrm_audit *audit_info);
1046 extern void xfrm_sad_getinfo(struct xfrmk_sadinfo *si);
1047 extern void xfrm_spd_getinfo(struct xfrmk_spdinfo *si);
1048 extern int xfrm_replay_check(struct xfrm_state *x, __be32 seq);
1049 extern void xfrm_replay_advance(struct xfrm_state *x, __be32 seq);
1050 extern void xfrm_replay_notify(struct xfrm_state *x, int event);
1051 extern int xfrm_state_mtu(struct xfrm_state *x, int mtu);
1052 extern int xfrm_init_state(struct xfrm_state *x);
1053 extern int xfrm_output(struct sk_buff *skb);
1054 extern int xfrm4_rcv_encap(struct sk_buff *skb, int nexthdr, __be32 spi,
1055 int encap_type);
1056 extern int xfrm4_rcv(struct sk_buff *skb);
1057
1058 static inline int xfrm4_rcv_spi(struct sk_buff *skb, int nexthdr, __be32 spi)
1059 {
1060 return xfrm4_rcv_encap(skb, nexthdr, spi, 0);
1061 }
1062
1063 extern int xfrm4_output(struct sk_buff *skb);
1064 extern int xfrm4_tunnel_register(struct xfrm_tunnel *handler, unsigned short family);
1065 extern int xfrm4_tunnel_deregister(struct xfrm_tunnel *handler, unsigned short family);
1066 extern int xfrm6_rcv_spi(struct sk_buff *skb, int nexthdr, __be32 spi);
1067 extern int xfrm6_rcv(struct sk_buff *skb);
1068 extern int xfrm6_input_addr(struct sk_buff *skb, xfrm_address_t *daddr,
1069 xfrm_address_t *saddr, u8 proto);
1070 extern int xfrm6_tunnel_register(struct xfrm6_tunnel *handler, unsigned short family);
1071 extern int xfrm6_tunnel_deregister(struct xfrm6_tunnel *handler, unsigned short family);
1072 extern __be32 xfrm6_tunnel_alloc_spi(xfrm_address_t *saddr);
1073 extern void xfrm6_tunnel_free_spi(xfrm_address_t *saddr);
1074 extern __be32 xfrm6_tunnel_spi_lookup(xfrm_address_t *saddr);
1075 extern int xfrm6_output(struct sk_buff *skb);
1076 extern int xfrm6_find_1stfragopt(struct xfrm_state *x, struct sk_buff *skb,
1077 u8 **prevhdr);
1078
1079 #ifdef CONFIG_XFRM
1080 extern int xfrm4_udp_encap_rcv(struct sock *sk, struct sk_buff *skb);
1081 extern int xfrm_user_policy(struct sock *sk, int optname, u8 __user *optval, int optlen);
1082 extern int xfrm_dst_lookup(struct xfrm_dst **dst, struct flowi *fl, unsigned short family);
1083 #else
1084 static inline int xfrm_user_policy(struct sock *sk, int optname, u8 __user *optval, int optlen)
1085 {
1086 return -ENOPROTOOPT;
1087 }
1088
1089 static inline int xfrm4_udp_encap_rcv(struct sock *sk, struct sk_buff *skb)
1090 {
1091 /* should not happen */
1092 kfree_skb(skb);
1093 return 0;
1094 }
1095
1096 static inline int xfrm_dst_lookup(struct xfrm_dst **dst, struct flowi *fl, unsigned short family)
1097 {
1098 return -EINVAL;
1099 }
1100 #endif
1101
1102 struct xfrm_policy *xfrm_policy_alloc(gfp_t gfp);
1103 extern int xfrm_policy_walk(u8 type, int (*func)(struct xfrm_policy *, int, int, void*), void *);
1104 int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl);
1105 struct xfrm_policy *xfrm_policy_bysel_ctx(u8 type, int dir,
1106 struct xfrm_selector *sel,
1107 struct xfrm_sec_ctx *ctx, int delete,
1108 int *err);
1109 struct xfrm_policy *xfrm_policy_byid(u8, int dir, u32 id, int delete, int *err);
1110 int xfrm_policy_flush(u8 type, struct xfrm_audit *audit_info);
1111 u32 xfrm_get_acqseq(void);
1112 extern int xfrm_alloc_spi(struct xfrm_state *x, u32 minspi, u32 maxspi);
1113 struct xfrm_state * xfrm_find_acq(u8 mode, u32 reqid, u8 proto,
1114 xfrm_address_t *daddr, xfrm_address_t *saddr,
1115 int create, unsigned short family);
1116 extern int xfrm_policy_flush(u8 type, struct xfrm_audit *audit_info);
1117 extern int xfrm_sk_policy_insert(struct sock *sk, int dir, struct xfrm_policy *pol);
1118 extern int xfrm_bundle_ok(struct xfrm_policy *pol, struct xfrm_dst *xdst,
1119 struct flowi *fl, int family, int strict);
1120 extern void xfrm_init_pmtu(struct dst_entry *dst);
1121
1122 #ifdef CONFIG_XFRM_MIGRATE
1123 extern int km_migrate(struct xfrm_selector *sel, u8 dir, u8 type,
1124 struct xfrm_migrate *m, int num_bundles);
1125 extern struct xfrm_state * xfrm_migrate_state_find(struct xfrm_migrate *m);
1126 extern struct xfrm_state * xfrm_state_migrate(struct xfrm_state *x,
1127 struct xfrm_migrate *m);
1128 extern int xfrm_migrate(struct xfrm_selector *sel, u8 dir, u8 type,
1129 struct xfrm_migrate *m, int num_bundles);
1130 #endif
1131
1132 extern wait_queue_head_t km_waitq;
1133 extern int km_new_mapping(struct xfrm_state *x, xfrm_address_t *ipaddr, __be16 sport);
1134 extern void km_policy_expired(struct xfrm_policy *pol, int dir, int hard, u32 pid);
1135 extern int km_report(u8 proto, struct xfrm_selector *sel, xfrm_address_t *addr);
1136
1137 extern void xfrm_input_init(void);
1138 extern int xfrm_parse_spi(struct sk_buff *skb, u8 nexthdr, __be32 *spi, __be32 *seq);
1139
1140 extern void xfrm_probe_algs(void);
1141 extern int xfrm_count_auth_supported(void);
1142 extern int xfrm_count_enc_supported(void);
1143 extern struct xfrm_algo_desc *xfrm_aalg_get_byidx(unsigned int idx);
1144 extern struct xfrm_algo_desc *xfrm_ealg_get_byidx(unsigned int idx);
1145 extern struct xfrm_algo_desc *xfrm_aalg_get_byid(int alg_id);
1146 extern struct xfrm_algo_desc *xfrm_ealg_get_byid(int alg_id);
1147 extern struct xfrm_algo_desc *xfrm_calg_get_byid(int alg_id);
1148 extern struct xfrm_algo_desc *xfrm_aalg_get_byname(char *name, int probe);
1149 extern struct xfrm_algo_desc *xfrm_ealg_get_byname(char *name, int probe);
1150 extern struct xfrm_algo_desc *xfrm_calg_get_byname(char *name, int probe);
1151
1152 struct hash_desc;
1153 struct scatterlist;
1154 typedef int (icv_update_fn_t)(struct hash_desc *, struct scatterlist *,
1155 unsigned int);
1156
1157 extern int skb_icv_walk(const struct sk_buff *skb, struct hash_desc *tfm,
1158 int offset, int len, icv_update_fn_t icv_update);
1159
1160 static inline int xfrm_addr_cmp(xfrm_address_t *a, xfrm_address_t *b,
1161 int family)
1162 {
1163 switch (family) {
1164 default:
1165 case AF_INET:
1166 return (__force __u32)a->a4 - (__force __u32)b->a4;
1167 case AF_INET6:
1168 return ipv6_addr_cmp((struct in6_addr *)a,
1169 (struct in6_addr *)b);
1170 }
1171 }
1172
1173 static inline int xfrm_policy_id2dir(u32 index)
1174 {
1175 return index & 7;
1176 }
1177
1178 static inline int xfrm_aevent_is_on(void)
1179 {
1180 struct sock *nlsk;
1181 int ret = 0;
1182
1183 rcu_read_lock();
1184 nlsk = rcu_dereference(xfrm_nl);
1185 if (nlsk)
1186 ret = netlink_has_listeners(nlsk, XFRMNLGRP_AEVENTS);
1187 rcu_read_unlock();
1188 return ret;
1189 }
1190
1191 static inline int xfrm_alg_len(struct xfrm_algo *alg)
1192 {
1193 return sizeof(*alg) + ((alg->alg_key_len + 7) / 8);
1194 }
1195
1196 #ifdef CONFIG_XFRM_MIGRATE
1197 static inline struct xfrm_algo *xfrm_algo_clone(struct xfrm_algo *orig)
1198 {
1199 return kmemdup(orig, xfrm_alg_len(orig), GFP_KERNEL);
1200 }
1201
1202 static inline void xfrm_states_put(struct xfrm_state **states, int n)
1203 {
1204 int i;
1205 for (i = 0; i < n; i++)
1206 xfrm_state_put(*(states + i));
1207 }
1208
1209 static inline void xfrm_states_delete(struct xfrm_state **states, int n)
1210 {
1211 int i;
1212 for (i = 0; i < n; i++)
1213 xfrm_state_delete(*(states + i));
1214 }
1215 #endif
1216
1217 #endif /* _NET_XFRM_H */
This page took 0.070649 seconds and 6 git commands to generate.