xfrm: Mark flowi arg to xfrm_type->reject() const.
[deliverable/linux.git] / include / net / xfrm.h
1 #ifndef _NET_XFRM_H
2 #define _NET_XFRM_H
3
4 #include <linux/compiler.h>
5 #include <linux/xfrm.h>
6 #include <linux/spinlock.h>
7 #include <linux/list.h>
8 #include <linux/skbuff.h>
9 #include <linux/socket.h>
10 #include <linux/pfkeyv2.h>
11 #include <linux/ipsec.h>
12 #include <linux/in6.h>
13 #include <linux/mutex.h>
14 #include <linux/audit.h>
15 #include <linux/slab.h>
16
17 #include <net/sock.h>
18 #include <net/dst.h>
19 #include <net/ip.h>
20 #include <net/route.h>
21 #include <net/ipv6.h>
22 #include <net/ip6_fib.h>
23 #include <net/flow.h>
24
25 #include <linux/interrupt.h>
26
27 #ifdef CONFIG_XFRM_STATISTICS
28 #include <net/snmp.h>
29 #endif
30
31 #define XFRM_PROTO_ESP 50
32 #define XFRM_PROTO_AH 51
33 #define XFRM_PROTO_COMP 108
34 #define XFRM_PROTO_IPIP 4
35 #define XFRM_PROTO_IPV6 41
36 #define XFRM_PROTO_ROUTING IPPROTO_ROUTING
37 #define XFRM_PROTO_DSTOPTS IPPROTO_DSTOPTS
38
39 #define XFRM_ALIGN4(len) (((len) + 3) & ~3)
40 #define XFRM_ALIGN8(len) (((len) + 7) & ~7)
41 #define MODULE_ALIAS_XFRM_MODE(family, encap) \
42 MODULE_ALIAS("xfrm-mode-" __stringify(family) "-" __stringify(encap))
43 #define MODULE_ALIAS_XFRM_TYPE(family, proto) \
44 MODULE_ALIAS("xfrm-type-" __stringify(family) "-" __stringify(proto))
45
46 #ifdef CONFIG_XFRM_STATISTICS
47 #define XFRM_INC_STATS(net, field) SNMP_INC_STATS((net)->mib.xfrm_statistics, field)
48 #define XFRM_INC_STATS_BH(net, field) SNMP_INC_STATS_BH((net)->mib.xfrm_statistics, field)
49 #define XFRM_INC_STATS_USER(net, field) SNMP_INC_STATS_USER((net)-mib.xfrm_statistics, field)
50 #else
51 #define XFRM_INC_STATS(net, field) ((void)(net))
52 #define XFRM_INC_STATS_BH(net, field) ((void)(net))
53 #define XFRM_INC_STATS_USER(net, field) ((void)(net))
54 #endif
55
56 extern struct mutex xfrm_cfg_mutex;
57
58 /* Organization of SPD aka "XFRM rules"
59 ------------------------------------
60
61 Basic objects:
62 - policy rule, struct xfrm_policy (=SPD entry)
63 - bundle of transformations, struct dst_entry == struct xfrm_dst (=SA bundle)
64 - instance of a transformer, struct xfrm_state (=SA)
65 - template to clone xfrm_state, struct xfrm_tmpl
66
67 SPD is plain linear list of xfrm_policy rules, ordered by priority.
68 (To be compatible with existing pfkeyv2 implementations,
69 many rules with priority of 0x7fffffff are allowed to exist and
70 such rules are ordered in an unpredictable way, thanks to bsd folks.)
71
72 Lookup is plain linear search until the first match with selector.
73
74 If "action" is "block", then we prohibit the flow, otherwise:
75 if "xfrms_nr" is zero, the flow passes untransformed. Otherwise,
76 policy entry has list of up to XFRM_MAX_DEPTH transformations,
77 described by templates xfrm_tmpl. Each template is resolved
78 to a complete xfrm_state (see below) and we pack bundle of transformations
79 to a dst_entry returned to requestor.
80
81 dst -. xfrm .-> xfrm_state #1
82 |---. child .-> dst -. xfrm .-> xfrm_state #2
83 |---. child .-> dst -. xfrm .-> xfrm_state #3
84 |---. child .-> NULL
85
86 Bundles are cached at xrfm_policy struct (field ->bundles).
87
88
89 Resolution of xrfm_tmpl
90 -----------------------
91 Template contains:
92 1. ->mode Mode: transport or tunnel
93 2. ->id.proto Protocol: AH/ESP/IPCOMP
94 3. ->id.daddr Remote tunnel endpoint, ignored for transport mode.
95 Q: allow to resolve security gateway?
96 4. ->id.spi If not zero, static SPI.
97 5. ->saddr Local tunnel endpoint, ignored for transport mode.
98 6. ->algos List of allowed algos. Plain bitmask now.
99 Q: ealgos, aalgos, calgos. What a mess...
100 7. ->share Sharing mode.
101 Q: how to implement private sharing mode? To add struct sock* to
102 flow id?
103
104 Having this template we search through SAD searching for entries
105 with appropriate mode/proto/algo, permitted by selector.
106 If no appropriate entry found, it is requested from key manager.
107
108 PROBLEMS:
109 Q: How to find all the bundles referring to a physical path for
110 PMTU discovery? Seems, dst should contain list of all parents...
111 and enter to infinite locking hierarchy disaster.
112 No! It is easier, we will not search for them, let them find us.
113 We add genid to each dst plus pointer to genid of raw IP route,
114 pmtu disc will update pmtu on raw IP route and increase its genid.
115 dst_check() will see this for top level and trigger resyncing
116 metrics. Plus, it will be made via sk->sk_dst_cache. Solved.
117 */
118
119 struct xfrm_state_walk {
120 struct list_head all;
121 u8 state;
122 union {
123 u8 dying;
124 u8 proto;
125 };
126 u32 seq;
127 };
128
129 /* Full description of state of transformer. */
130 struct xfrm_state {
131 #ifdef CONFIG_NET_NS
132 struct net *xs_net;
133 #endif
134 union {
135 struct hlist_node gclist;
136 struct hlist_node bydst;
137 };
138 struct hlist_node bysrc;
139 struct hlist_node byspi;
140
141 atomic_t refcnt;
142 spinlock_t lock;
143
144 struct xfrm_id id;
145 struct xfrm_selector sel;
146 struct xfrm_mark mark;
147 u32 tfcpad;
148
149 u32 genid;
150
151 /* Key manager bits */
152 struct xfrm_state_walk km;
153
154 /* Parameters of this state. */
155 struct {
156 u32 reqid;
157 u8 mode;
158 u8 replay_window;
159 u8 aalgo, ealgo, calgo;
160 u8 flags;
161 u16 family;
162 xfrm_address_t saddr;
163 int header_len;
164 int trailer_len;
165 } props;
166
167 struct xfrm_lifetime_cfg lft;
168
169 /* Data for transformer */
170 struct xfrm_algo_auth *aalg;
171 struct xfrm_algo *ealg;
172 struct xfrm_algo *calg;
173 struct xfrm_algo_aead *aead;
174
175 /* Data for encapsulator */
176 struct xfrm_encap_tmpl *encap;
177
178 /* Data for care-of address */
179 xfrm_address_t *coaddr;
180
181 /* IPComp needs an IPIP tunnel for handling uncompressed packets */
182 struct xfrm_state *tunnel;
183
184 /* If a tunnel, number of users + 1 */
185 atomic_t tunnel_users;
186
187 /* State for replay detection */
188 struct xfrm_replay_state replay;
189
190 /* Replay detection state at the time we sent the last notification */
191 struct xfrm_replay_state preplay;
192
193 /* internal flag that only holds state for delayed aevent at the
194 * moment
195 */
196 u32 xflags;
197
198 /* Replay detection notification settings */
199 u32 replay_maxage;
200 u32 replay_maxdiff;
201
202 /* Replay detection notification timer */
203 struct timer_list rtimer;
204
205 /* Statistics */
206 struct xfrm_stats stats;
207
208 struct xfrm_lifetime_cur curlft;
209 struct tasklet_hrtimer mtimer;
210
211 /* Last used time */
212 unsigned long lastused;
213
214 /* Reference to data common to all the instances of this
215 * transformer. */
216 const struct xfrm_type *type;
217 struct xfrm_mode *inner_mode;
218 struct xfrm_mode *inner_mode_iaf;
219 struct xfrm_mode *outer_mode;
220
221 /* Security context */
222 struct xfrm_sec_ctx *security;
223
224 /* Private data of this transformer, format is opaque,
225 * interpreted by xfrm_type methods. */
226 void *data;
227 };
228
229 static inline struct net *xs_net(struct xfrm_state *x)
230 {
231 return read_pnet(&x->xs_net);
232 }
233
234 /* xflags - make enum if more show up */
235 #define XFRM_TIME_DEFER 1
236
237 enum {
238 XFRM_STATE_VOID,
239 XFRM_STATE_ACQ,
240 XFRM_STATE_VALID,
241 XFRM_STATE_ERROR,
242 XFRM_STATE_EXPIRED,
243 XFRM_STATE_DEAD
244 };
245
246 /* callback structure passed from either netlink or pfkey */
247 struct km_event {
248 union {
249 u32 hard;
250 u32 proto;
251 u32 byid;
252 u32 aevent;
253 u32 type;
254 } data;
255
256 u32 seq;
257 u32 pid;
258 u32 event;
259 struct net *net;
260 };
261
262 struct net_device;
263 struct xfrm_type;
264 struct xfrm_dst;
265 struct xfrm_policy_afinfo {
266 unsigned short family;
267 struct dst_ops *dst_ops;
268 void (*garbage_collect)(struct net *net);
269 struct dst_entry *(*dst_lookup)(struct net *net, int tos,
270 xfrm_address_t *saddr,
271 xfrm_address_t *daddr);
272 int (*get_saddr)(struct net *net, xfrm_address_t *saddr, xfrm_address_t *daddr);
273 void (*decode_session)(struct sk_buff *skb,
274 struct flowi *fl,
275 int reverse);
276 int (*get_tos)(const struct flowi *fl);
277 int (*init_path)(struct xfrm_dst *path,
278 struct dst_entry *dst,
279 int nfheader_len);
280 int (*fill_dst)(struct xfrm_dst *xdst,
281 struct net_device *dev,
282 const struct flowi *fl);
283 };
284
285 extern int xfrm_policy_register_afinfo(struct xfrm_policy_afinfo *afinfo);
286 extern int xfrm_policy_unregister_afinfo(struct xfrm_policy_afinfo *afinfo);
287 extern void km_policy_notify(struct xfrm_policy *xp, int dir, struct km_event *c);
288 extern void km_state_notify(struct xfrm_state *x, struct km_event *c);
289
290 struct xfrm_tmpl;
291 extern int km_query(struct xfrm_state *x, struct xfrm_tmpl *t, struct xfrm_policy *pol);
292 extern void km_state_expired(struct xfrm_state *x, int hard, u32 pid);
293 extern int __xfrm_state_delete(struct xfrm_state *x);
294
295 struct xfrm_state_afinfo {
296 unsigned int family;
297 unsigned int proto;
298 __be16 eth_proto;
299 struct module *owner;
300 const struct xfrm_type *type_map[IPPROTO_MAX];
301 struct xfrm_mode *mode_map[XFRM_MODE_MAX];
302 int (*init_flags)(struct xfrm_state *x);
303 void (*init_tempsel)(struct xfrm_selector *sel,
304 const struct flowi *fl);
305 void (*init_temprop)(struct xfrm_state *x, struct xfrm_tmpl *tmpl,
306 xfrm_address_t *daddr, xfrm_address_t *saddr);
307 int (*tmpl_sort)(struct xfrm_tmpl **dst, struct xfrm_tmpl **src, int n);
308 int (*state_sort)(struct xfrm_state **dst, struct xfrm_state **src, int n);
309 int (*output)(struct sk_buff *skb);
310 int (*extract_input)(struct xfrm_state *x,
311 struct sk_buff *skb);
312 int (*extract_output)(struct xfrm_state *x,
313 struct sk_buff *skb);
314 int (*transport_finish)(struct sk_buff *skb,
315 int async);
316 };
317
318 extern int xfrm_state_register_afinfo(struct xfrm_state_afinfo *afinfo);
319 extern int xfrm_state_unregister_afinfo(struct xfrm_state_afinfo *afinfo);
320
321 extern void xfrm_state_delete_tunnel(struct xfrm_state *x);
322
323 struct xfrm_type {
324 char *description;
325 struct module *owner;
326 u8 proto;
327 u8 flags;
328 #define XFRM_TYPE_NON_FRAGMENT 1
329 #define XFRM_TYPE_REPLAY_PROT 2
330 #define XFRM_TYPE_LOCAL_COADDR 4
331 #define XFRM_TYPE_REMOTE_COADDR 8
332
333 int (*init_state)(struct xfrm_state *x);
334 void (*destructor)(struct xfrm_state *);
335 int (*input)(struct xfrm_state *, struct sk_buff *skb);
336 int (*output)(struct xfrm_state *, struct sk_buff *pskb);
337 int (*reject)(struct xfrm_state *, struct sk_buff *,
338 const struct flowi *);
339 int (*hdr_offset)(struct xfrm_state *, struct sk_buff *, u8 **);
340 /* Estimate maximal size of result of transformation of a dgram */
341 u32 (*get_mtu)(struct xfrm_state *, int size);
342 };
343
344 extern int xfrm_register_type(const struct xfrm_type *type, unsigned short family);
345 extern int xfrm_unregister_type(const struct xfrm_type *type, unsigned short family);
346
347 struct xfrm_mode {
348 /*
349 * Remove encapsulation header.
350 *
351 * The IP header will be moved over the top of the encapsulation
352 * header.
353 *
354 * On entry, the transport header shall point to where the IP header
355 * should be and the network header shall be set to where the IP
356 * header currently is. skb->data shall point to the start of the
357 * payload.
358 */
359 int (*input2)(struct xfrm_state *x, struct sk_buff *skb);
360
361 /*
362 * This is the actual input entry point.
363 *
364 * For transport mode and equivalent this would be identical to
365 * input2 (which does not need to be set). While tunnel mode
366 * and equivalent would set this to the tunnel encapsulation function
367 * xfrm4_prepare_input that would in turn call input2.
368 */
369 int (*input)(struct xfrm_state *x, struct sk_buff *skb);
370
371 /*
372 * Add encapsulation header.
373 *
374 * On exit, the transport header will be set to the start of the
375 * encapsulation header to be filled in by x->type->output and
376 * the mac header will be set to the nextheader (protocol for
377 * IPv4) field of the extension header directly preceding the
378 * encapsulation header, or in its absence, that of the top IP
379 * header. The value of the network header will always point
380 * to the top IP header while skb->data will point to the payload.
381 */
382 int (*output2)(struct xfrm_state *x,struct sk_buff *skb);
383
384 /*
385 * This is the actual output entry point.
386 *
387 * For transport mode and equivalent this would be identical to
388 * output2 (which does not need to be set). While tunnel mode
389 * and equivalent would set this to a tunnel encapsulation function
390 * (xfrm4_prepare_output or xfrm6_prepare_output) that would in turn
391 * call output2.
392 */
393 int (*output)(struct xfrm_state *x, struct sk_buff *skb);
394
395 struct xfrm_state_afinfo *afinfo;
396 struct module *owner;
397 unsigned int encap;
398 int flags;
399 };
400
401 /* Flags for xfrm_mode. */
402 enum {
403 XFRM_MODE_FLAG_TUNNEL = 1,
404 };
405
406 extern int xfrm_register_mode(struct xfrm_mode *mode, int family);
407 extern int xfrm_unregister_mode(struct xfrm_mode *mode, int family);
408
409 static inline int xfrm_af2proto(unsigned int family)
410 {
411 switch(family) {
412 case AF_INET:
413 return IPPROTO_IPIP;
414 case AF_INET6:
415 return IPPROTO_IPV6;
416 default:
417 return 0;
418 }
419 }
420
421 static inline struct xfrm_mode *xfrm_ip2inner_mode(struct xfrm_state *x, int ipproto)
422 {
423 if ((ipproto == IPPROTO_IPIP && x->props.family == AF_INET) ||
424 (ipproto == IPPROTO_IPV6 && x->props.family == AF_INET6))
425 return x->inner_mode;
426 else
427 return x->inner_mode_iaf;
428 }
429
430 struct xfrm_tmpl {
431 /* id in template is interpreted as:
432 * daddr - destination of tunnel, may be zero for transport mode.
433 * spi - zero to acquire spi. Not zero if spi is static, then
434 * daddr must be fixed too.
435 * proto - AH/ESP/IPCOMP
436 */
437 struct xfrm_id id;
438
439 /* Source address of tunnel. Ignored, if it is not a tunnel. */
440 xfrm_address_t saddr;
441
442 unsigned short encap_family;
443
444 u32 reqid;
445
446 /* Mode: transport, tunnel etc. */
447 u8 mode;
448
449 /* Sharing mode: unique, this session only, this user only etc. */
450 u8 share;
451
452 /* May skip this transfomration if no SA is found */
453 u8 optional;
454
455 /* Skip aalgos/ealgos/calgos checks. */
456 u8 allalgs;
457
458 /* Bit mask of algos allowed for acquisition */
459 u32 aalgos;
460 u32 ealgos;
461 u32 calgos;
462 };
463
464 #define XFRM_MAX_DEPTH 6
465
466 struct xfrm_policy_walk_entry {
467 struct list_head all;
468 u8 dead;
469 };
470
471 struct xfrm_policy_walk {
472 struct xfrm_policy_walk_entry walk;
473 u8 type;
474 u32 seq;
475 };
476
477 struct xfrm_policy {
478 #ifdef CONFIG_NET_NS
479 struct net *xp_net;
480 #endif
481 struct hlist_node bydst;
482 struct hlist_node byidx;
483
484 /* This lock only affects elements except for entry. */
485 rwlock_t lock;
486 atomic_t refcnt;
487 struct timer_list timer;
488
489 struct flow_cache_object flo;
490 atomic_t genid;
491 u32 priority;
492 u32 index;
493 struct xfrm_mark mark;
494 struct xfrm_selector selector;
495 struct xfrm_lifetime_cfg lft;
496 struct xfrm_lifetime_cur curlft;
497 struct xfrm_policy_walk_entry walk;
498 u8 type;
499 u8 action;
500 u8 flags;
501 u8 xfrm_nr;
502 u16 family;
503 struct xfrm_sec_ctx *security;
504 struct xfrm_tmpl xfrm_vec[XFRM_MAX_DEPTH];
505 };
506
507 static inline struct net *xp_net(struct xfrm_policy *xp)
508 {
509 return read_pnet(&xp->xp_net);
510 }
511
512 struct xfrm_kmaddress {
513 xfrm_address_t local;
514 xfrm_address_t remote;
515 u32 reserved;
516 u16 family;
517 };
518
519 struct xfrm_migrate {
520 xfrm_address_t old_daddr;
521 xfrm_address_t old_saddr;
522 xfrm_address_t new_daddr;
523 xfrm_address_t new_saddr;
524 u8 proto;
525 u8 mode;
526 u16 reserved;
527 u32 reqid;
528 u16 old_family;
529 u16 new_family;
530 };
531
532 #define XFRM_KM_TIMEOUT 30
533 /* which seqno */
534 #define XFRM_REPLAY_SEQ 1
535 #define XFRM_REPLAY_OSEQ 2
536 #define XFRM_REPLAY_SEQ_MASK 3
537 /* what happened */
538 #define XFRM_REPLAY_UPDATE XFRM_AE_CR
539 #define XFRM_REPLAY_TIMEOUT XFRM_AE_CE
540
541 /* default aevent timeout in units of 100ms */
542 #define XFRM_AE_ETIME 10
543 /* Async Event timer multiplier */
544 #define XFRM_AE_ETH_M 10
545 /* default seq threshold size */
546 #define XFRM_AE_SEQT_SIZE 2
547
548 struct xfrm_mgr {
549 struct list_head list;
550 char *id;
551 int (*notify)(struct xfrm_state *x, struct km_event *c);
552 int (*acquire)(struct xfrm_state *x, struct xfrm_tmpl *, struct xfrm_policy *xp, int dir);
553 struct xfrm_policy *(*compile_policy)(struct sock *sk, int opt, u8 *data, int len, int *dir);
554 int (*new_mapping)(struct xfrm_state *x, xfrm_address_t *ipaddr, __be16 sport);
555 int (*notify_policy)(struct xfrm_policy *x, int dir, struct km_event *c);
556 int (*report)(struct net *net, u8 proto, struct xfrm_selector *sel, xfrm_address_t *addr);
557 int (*migrate)(struct xfrm_selector *sel, u8 dir, u8 type, struct xfrm_migrate *m, int num_bundles, struct xfrm_kmaddress *k);
558 };
559
560 extern int xfrm_register_km(struct xfrm_mgr *km);
561 extern int xfrm_unregister_km(struct xfrm_mgr *km);
562
563 /*
564 * This structure is used for the duration where packets are being
565 * transformed by IPsec. As soon as the packet leaves IPsec the
566 * area beyond the generic IP part may be overwritten.
567 */
568 struct xfrm_skb_cb {
569 union {
570 struct inet_skb_parm h4;
571 struct inet6_skb_parm h6;
572 } header;
573
574 /* Sequence number for replay protection. */
575 union {
576 u64 output;
577 __be32 input;
578 } seq;
579 };
580
581 #define XFRM_SKB_CB(__skb) ((struct xfrm_skb_cb *)&((__skb)->cb[0]))
582
583 /*
584 * This structure is used by the afinfo prepare_input/prepare_output functions
585 * to transmit header information to the mode input/output functions.
586 */
587 struct xfrm_mode_skb_cb {
588 union {
589 struct inet_skb_parm h4;
590 struct inet6_skb_parm h6;
591 } header;
592
593 /* Copied from header for IPv4, always set to zero and DF for IPv6. */
594 __be16 id;
595 __be16 frag_off;
596
597 /* IP header length (excluding options or extension headers). */
598 u8 ihl;
599
600 /* TOS for IPv4, class for IPv6. */
601 u8 tos;
602
603 /* TTL for IPv4, hop limitfor IPv6. */
604 u8 ttl;
605
606 /* Protocol for IPv4, NH for IPv6. */
607 u8 protocol;
608
609 /* Option length for IPv4, zero for IPv6. */
610 u8 optlen;
611
612 /* Used by IPv6 only, zero for IPv4. */
613 u8 flow_lbl[3];
614 };
615
616 #define XFRM_MODE_SKB_CB(__skb) ((struct xfrm_mode_skb_cb *)&((__skb)->cb[0]))
617
618 /*
619 * This structure is used by the input processing to locate the SPI and
620 * related information.
621 */
622 struct xfrm_spi_skb_cb {
623 union {
624 struct inet_skb_parm h4;
625 struct inet6_skb_parm h6;
626 } header;
627
628 unsigned int daddroff;
629 unsigned int family;
630 };
631
632 #define XFRM_SPI_SKB_CB(__skb) ((struct xfrm_spi_skb_cb *)&((__skb)->cb[0]))
633
634 /* Audit Information */
635 struct xfrm_audit {
636 u32 secid;
637 uid_t loginuid;
638 u32 sessionid;
639 };
640
641 #ifdef CONFIG_AUDITSYSCALL
642 static inline struct audit_buffer *xfrm_audit_start(const char *op)
643 {
644 struct audit_buffer *audit_buf = NULL;
645
646 if (audit_enabled == 0)
647 return NULL;
648 audit_buf = audit_log_start(current->audit_context, GFP_ATOMIC,
649 AUDIT_MAC_IPSEC_EVENT);
650 if (audit_buf == NULL)
651 return NULL;
652 audit_log_format(audit_buf, "op=%s", op);
653 return audit_buf;
654 }
655
656 static inline void xfrm_audit_helper_usrinfo(uid_t auid, u32 ses, u32 secid,
657 struct audit_buffer *audit_buf)
658 {
659 char *secctx;
660 u32 secctx_len;
661
662 audit_log_format(audit_buf, " auid=%u ses=%u", auid, ses);
663 if (secid != 0 &&
664 security_secid_to_secctx(secid, &secctx, &secctx_len) == 0) {
665 audit_log_format(audit_buf, " subj=%s", secctx);
666 security_release_secctx(secctx, secctx_len);
667 } else
668 audit_log_task_context(audit_buf);
669 }
670
671 extern void xfrm_audit_policy_add(struct xfrm_policy *xp, int result,
672 u32 auid, u32 ses, u32 secid);
673 extern void xfrm_audit_policy_delete(struct xfrm_policy *xp, int result,
674 u32 auid, u32 ses, u32 secid);
675 extern void xfrm_audit_state_add(struct xfrm_state *x, int result,
676 u32 auid, u32 ses, u32 secid);
677 extern void xfrm_audit_state_delete(struct xfrm_state *x, int result,
678 u32 auid, u32 ses, u32 secid);
679 extern void xfrm_audit_state_replay_overflow(struct xfrm_state *x,
680 struct sk_buff *skb);
681 extern void xfrm_audit_state_notfound_simple(struct sk_buff *skb, u16 family);
682 extern void xfrm_audit_state_notfound(struct sk_buff *skb, u16 family,
683 __be32 net_spi, __be32 net_seq);
684 extern void xfrm_audit_state_icvfail(struct xfrm_state *x,
685 struct sk_buff *skb, u8 proto);
686 #else
687
688 static inline void xfrm_audit_policy_add(struct xfrm_policy *xp, int result,
689 u32 auid, u32 ses, u32 secid)
690 {
691 }
692
693 static inline void xfrm_audit_policy_delete(struct xfrm_policy *xp, int result,
694 u32 auid, u32 ses, u32 secid)
695 {
696 }
697
698 static inline void xfrm_audit_state_add(struct xfrm_state *x, int result,
699 u32 auid, u32 ses, u32 secid)
700 {
701 }
702
703 static inline void xfrm_audit_state_delete(struct xfrm_state *x, int result,
704 u32 auid, u32 ses, u32 secid)
705 {
706 }
707
708 static inline void xfrm_audit_state_replay_overflow(struct xfrm_state *x,
709 struct sk_buff *skb)
710 {
711 }
712
713 static inline void xfrm_audit_state_notfound_simple(struct sk_buff *skb,
714 u16 family)
715 {
716 }
717
718 static inline void xfrm_audit_state_notfound(struct sk_buff *skb, u16 family,
719 __be32 net_spi, __be32 net_seq)
720 {
721 }
722
723 static inline void xfrm_audit_state_icvfail(struct xfrm_state *x,
724 struct sk_buff *skb, u8 proto)
725 {
726 }
727 #endif /* CONFIG_AUDITSYSCALL */
728
729 static inline void xfrm_pol_hold(struct xfrm_policy *policy)
730 {
731 if (likely(policy != NULL))
732 atomic_inc(&policy->refcnt);
733 }
734
735 extern void xfrm_policy_destroy(struct xfrm_policy *policy);
736
737 static inline void xfrm_pol_put(struct xfrm_policy *policy)
738 {
739 if (atomic_dec_and_test(&policy->refcnt))
740 xfrm_policy_destroy(policy);
741 }
742
743 static inline void xfrm_pols_put(struct xfrm_policy **pols, int npols)
744 {
745 int i;
746 for (i = npols - 1; i >= 0; --i)
747 xfrm_pol_put(pols[i]);
748 }
749
750 extern void __xfrm_state_destroy(struct xfrm_state *);
751
752 static inline void __xfrm_state_put(struct xfrm_state *x)
753 {
754 atomic_dec(&x->refcnt);
755 }
756
757 static inline void xfrm_state_put(struct xfrm_state *x)
758 {
759 if (atomic_dec_and_test(&x->refcnt))
760 __xfrm_state_destroy(x);
761 }
762
763 static inline void xfrm_state_hold(struct xfrm_state *x)
764 {
765 atomic_inc(&x->refcnt);
766 }
767
768 static __inline__ int addr_match(void *token1, void *token2, int prefixlen)
769 {
770 __be32 *a1 = token1;
771 __be32 *a2 = token2;
772 int pdw;
773 int pbi;
774
775 pdw = prefixlen >> 5; /* num of whole u32 in prefix */
776 pbi = prefixlen & 0x1f; /* num of bits in incomplete u32 in prefix */
777
778 if (pdw)
779 if (memcmp(a1, a2, pdw << 2))
780 return 0;
781
782 if (pbi) {
783 __be32 mask;
784
785 mask = htonl((0xffffffff) << (32 - pbi));
786
787 if ((a1[pdw] ^ a2[pdw]) & mask)
788 return 0;
789 }
790
791 return 1;
792 }
793
794 static __inline__
795 __be16 xfrm_flowi_sport(const struct flowi *fl)
796 {
797 __be16 port;
798 switch(fl->proto) {
799 case IPPROTO_TCP:
800 case IPPROTO_UDP:
801 case IPPROTO_UDPLITE:
802 case IPPROTO_SCTP:
803 port = fl->fl_ip_sport;
804 break;
805 case IPPROTO_ICMP:
806 case IPPROTO_ICMPV6:
807 port = htons(fl->fl_icmp_type);
808 break;
809 case IPPROTO_MH:
810 port = htons(fl->fl_mh_type);
811 break;
812 case IPPROTO_GRE:
813 port = htons(ntohl(fl->fl_gre_key) >> 16);
814 break;
815 default:
816 port = 0; /*XXX*/
817 }
818 return port;
819 }
820
821 static __inline__
822 __be16 xfrm_flowi_dport(const struct flowi *fl)
823 {
824 __be16 port;
825 switch(fl->proto) {
826 case IPPROTO_TCP:
827 case IPPROTO_UDP:
828 case IPPROTO_UDPLITE:
829 case IPPROTO_SCTP:
830 port = fl->fl_ip_dport;
831 break;
832 case IPPROTO_ICMP:
833 case IPPROTO_ICMPV6:
834 port = htons(fl->fl_icmp_code);
835 break;
836 case IPPROTO_GRE:
837 port = htons(ntohl(fl->fl_gre_key) & 0xffff);
838 break;
839 default:
840 port = 0; /*XXX*/
841 }
842 return port;
843 }
844
845 extern int xfrm_selector_match(struct xfrm_selector *sel, struct flowi *fl,
846 unsigned short family);
847
848 #ifdef CONFIG_SECURITY_NETWORK_XFRM
849 /* If neither has a context --> match
850 * Otherwise, both must have a context and the sids, doi, alg must match
851 */
852 static inline int xfrm_sec_ctx_match(struct xfrm_sec_ctx *s1, struct xfrm_sec_ctx *s2)
853 {
854 return ((!s1 && !s2) ||
855 (s1 && s2 &&
856 (s1->ctx_sid == s2->ctx_sid) &&
857 (s1->ctx_doi == s2->ctx_doi) &&
858 (s1->ctx_alg == s2->ctx_alg)));
859 }
860 #else
861 static inline int xfrm_sec_ctx_match(struct xfrm_sec_ctx *s1, struct xfrm_sec_ctx *s2)
862 {
863 return 1;
864 }
865 #endif
866
867 /* A struct encoding bundle of transformations to apply to some set of flow.
868 *
869 * dst->child points to the next element of bundle.
870 * dst->xfrm points to an instanse of transformer.
871 *
872 * Due to unfortunate limitations of current routing cache, which we
873 * have no time to fix, it mirrors struct rtable and bound to the same
874 * routing key, including saddr,daddr. However, we can have many of
875 * bundles differing by session id. All the bundles grow from a parent
876 * policy rule.
877 */
878 struct xfrm_dst {
879 union {
880 struct dst_entry dst;
881 struct rtable rt;
882 struct rt6_info rt6;
883 } u;
884 struct dst_entry *route;
885 struct flow_cache_object flo;
886 struct xfrm_policy *pols[XFRM_POLICY_TYPE_MAX];
887 int num_pols, num_xfrms;
888 #ifdef CONFIG_XFRM_SUB_POLICY
889 struct flowi *origin;
890 struct xfrm_selector *partner;
891 #endif
892 u32 xfrm_genid;
893 u32 policy_genid;
894 u32 route_mtu_cached;
895 u32 child_mtu_cached;
896 u32 route_cookie;
897 u32 path_cookie;
898 };
899
900 #ifdef CONFIG_XFRM
901 static inline void xfrm_dst_destroy(struct xfrm_dst *xdst)
902 {
903 xfrm_pols_put(xdst->pols, xdst->num_pols);
904 dst_release(xdst->route);
905 if (likely(xdst->u.dst.xfrm))
906 xfrm_state_put(xdst->u.dst.xfrm);
907 #ifdef CONFIG_XFRM_SUB_POLICY
908 kfree(xdst->origin);
909 xdst->origin = NULL;
910 kfree(xdst->partner);
911 xdst->partner = NULL;
912 #endif
913 }
914 #endif
915
916 extern void xfrm_dst_ifdown(struct dst_entry *dst, struct net_device *dev);
917
918 struct sec_path {
919 atomic_t refcnt;
920 int len;
921 struct xfrm_state *xvec[XFRM_MAX_DEPTH];
922 };
923
924 static inline struct sec_path *
925 secpath_get(struct sec_path *sp)
926 {
927 if (sp)
928 atomic_inc(&sp->refcnt);
929 return sp;
930 }
931
932 extern void __secpath_destroy(struct sec_path *sp);
933
934 static inline void
935 secpath_put(struct sec_path *sp)
936 {
937 if (sp && atomic_dec_and_test(&sp->refcnt))
938 __secpath_destroy(sp);
939 }
940
941 extern struct sec_path *secpath_dup(struct sec_path *src);
942
943 static inline void
944 secpath_reset(struct sk_buff *skb)
945 {
946 #ifdef CONFIG_XFRM
947 secpath_put(skb->sp);
948 skb->sp = NULL;
949 #endif
950 }
951
952 static inline int
953 xfrm_addr_any(xfrm_address_t *addr, unsigned short family)
954 {
955 switch (family) {
956 case AF_INET:
957 return addr->a4 == 0;
958 case AF_INET6:
959 return ipv6_addr_any((struct in6_addr *)&addr->a6);
960 }
961 return 0;
962 }
963
964 static inline int
965 __xfrm4_state_addr_cmp(struct xfrm_tmpl *tmpl, struct xfrm_state *x)
966 {
967 return (tmpl->saddr.a4 &&
968 tmpl->saddr.a4 != x->props.saddr.a4);
969 }
970
971 static inline int
972 __xfrm6_state_addr_cmp(struct xfrm_tmpl *tmpl, struct xfrm_state *x)
973 {
974 return (!ipv6_addr_any((struct in6_addr*)&tmpl->saddr) &&
975 ipv6_addr_cmp((struct in6_addr *)&tmpl->saddr, (struct in6_addr*)&x->props.saddr));
976 }
977
978 static inline int
979 xfrm_state_addr_cmp(struct xfrm_tmpl *tmpl, struct xfrm_state *x, unsigned short family)
980 {
981 switch (family) {
982 case AF_INET:
983 return __xfrm4_state_addr_cmp(tmpl, x);
984 case AF_INET6:
985 return __xfrm6_state_addr_cmp(tmpl, x);
986 }
987 return !0;
988 }
989
990 #ifdef CONFIG_XFRM
991 extern int __xfrm_policy_check(struct sock *, int dir, struct sk_buff *skb, unsigned short family);
992
993 static inline int __xfrm_policy_check2(struct sock *sk, int dir,
994 struct sk_buff *skb,
995 unsigned int family, int reverse)
996 {
997 struct net *net = dev_net(skb->dev);
998 int ndir = dir | (reverse ? XFRM_POLICY_MASK + 1 : 0);
999
1000 if (sk && sk->sk_policy[XFRM_POLICY_IN])
1001 return __xfrm_policy_check(sk, ndir, skb, family);
1002
1003 return (!net->xfrm.policy_count[dir] && !skb->sp) ||
1004 (skb_dst(skb)->flags & DST_NOPOLICY) ||
1005 __xfrm_policy_check(sk, ndir, skb, family);
1006 }
1007
1008 static inline int xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *skb, unsigned short family)
1009 {
1010 return __xfrm_policy_check2(sk, dir, skb, family, 0);
1011 }
1012
1013 static inline int xfrm4_policy_check(struct sock *sk, int dir, struct sk_buff *skb)
1014 {
1015 return xfrm_policy_check(sk, dir, skb, AF_INET);
1016 }
1017
1018 static inline int xfrm6_policy_check(struct sock *sk, int dir, struct sk_buff *skb)
1019 {
1020 return xfrm_policy_check(sk, dir, skb, AF_INET6);
1021 }
1022
1023 static inline int xfrm4_policy_check_reverse(struct sock *sk, int dir,
1024 struct sk_buff *skb)
1025 {
1026 return __xfrm_policy_check2(sk, dir, skb, AF_INET, 1);
1027 }
1028
1029 static inline int xfrm6_policy_check_reverse(struct sock *sk, int dir,
1030 struct sk_buff *skb)
1031 {
1032 return __xfrm_policy_check2(sk, dir, skb, AF_INET6, 1);
1033 }
1034
1035 extern int __xfrm_decode_session(struct sk_buff *skb, struct flowi *fl,
1036 unsigned int family, int reverse);
1037
1038 static inline int xfrm_decode_session(struct sk_buff *skb, struct flowi *fl,
1039 unsigned int family)
1040 {
1041 return __xfrm_decode_session(skb, fl, family, 0);
1042 }
1043
1044 static inline int xfrm_decode_session_reverse(struct sk_buff *skb,
1045 struct flowi *fl,
1046 unsigned int family)
1047 {
1048 return __xfrm_decode_session(skb, fl, family, 1);
1049 }
1050
1051 extern int __xfrm_route_forward(struct sk_buff *skb, unsigned short family);
1052
1053 static inline int xfrm_route_forward(struct sk_buff *skb, unsigned short family)
1054 {
1055 struct net *net = dev_net(skb->dev);
1056
1057 return !net->xfrm.policy_count[XFRM_POLICY_OUT] ||
1058 (skb_dst(skb)->flags & DST_NOXFRM) ||
1059 __xfrm_route_forward(skb, family);
1060 }
1061
1062 static inline int xfrm4_route_forward(struct sk_buff *skb)
1063 {
1064 return xfrm_route_forward(skb, AF_INET);
1065 }
1066
1067 static inline int xfrm6_route_forward(struct sk_buff *skb)
1068 {
1069 return xfrm_route_forward(skb, AF_INET6);
1070 }
1071
1072 extern int __xfrm_sk_clone_policy(struct sock *sk);
1073
1074 static inline int xfrm_sk_clone_policy(struct sock *sk)
1075 {
1076 if (unlikely(sk->sk_policy[0] || sk->sk_policy[1]))
1077 return __xfrm_sk_clone_policy(sk);
1078 return 0;
1079 }
1080
1081 extern int xfrm_policy_delete(struct xfrm_policy *pol, int dir);
1082
1083 static inline void xfrm_sk_free_policy(struct sock *sk)
1084 {
1085 if (unlikely(sk->sk_policy[0] != NULL)) {
1086 xfrm_policy_delete(sk->sk_policy[0], XFRM_POLICY_MAX);
1087 sk->sk_policy[0] = NULL;
1088 }
1089 if (unlikely(sk->sk_policy[1] != NULL)) {
1090 xfrm_policy_delete(sk->sk_policy[1], XFRM_POLICY_MAX+1);
1091 sk->sk_policy[1] = NULL;
1092 }
1093 }
1094
1095 #else
1096
1097 static inline void xfrm_sk_free_policy(struct sock *sk) {}
1098 static inline int xfrm_sk_clone_policy(struct sock *sk) { return 0; }
1099 static inline int xfrm6_route_forward(struct sk_buff *skb) { return 1; }
1100 static inline int xfrm4_route_forward(struct sk_buff *skb) { return 1; }
1101 static inline int xfrm6_policy_check(struct sock *sk, int dir, struct sk_buff *skb)
1102 {
1103 return 1;
1104 }
1105 static inline int xfrm4_policy_check(struct sock *sk, int dir, struct sk_buff *skb)
1106 {
1107 return 1;
1108 }
1109 static inline int xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *skb, unsigned short family)
1110 {
1111 return 1;
1112 }
1113 static inline int xfrm_decode_session_reverse(struct sk_buff *skb,
1114 struct flowi *fl,
1115 unsigned int family)
1116 {
1117 return -ENOSYS;
1118 }
1119 static inline int xfrm4_policy_check_reverse(struct sock *sk, int dir,
1120 struct sk_buff *skb)
1121 {
1122 return 1;
1123 }
1124 static inline int xfrm6_policy_check_reverse(struct sock *sk, int dir,
1125 struct sk_buff *skb)
1126 {
1127 return 1;
1128 }
1129 #endif
1130
1131 static __inline__
1132 xfrm_address_t *xfrm_flowi_daddr(const struct flowi *fl, unsigned short family)
1133 {
1134 switch (family){
1135 case AF_INET:
1136 return (xfrm_address_t *)&fl->fl4_dst;
1137 case AF_INET6:
1138 return (xfrm_address_t *)&fl->fl6_dst;
1139 }
1140 return NULL;
1141 }
1142
1143 static __inline__
1144 xfrm_address_t *xfrm_flowi_saddr(const struct flowi *fl, unsigned short family)
1145 {
1146 switch (family){
1147 case AF_INET:
1148 return (xfrm_address_t *)&fl->fl4_src;
1149 case AF_INET6:
1150 return (xfrm_address_t *)&fl->fl6_src;
1151 }
1152 return NULL;
1153 }
1154
1155 static __inline__
1156 void xfrm_flowi_addr_get(const struct flowi *fl,
1157 xfrm_address_t *saddr, xfrm_address_t *daddr,
1158 unsigned short family)
1159 {
1160 switch(family) {
1161 case AF_INET:
1162 memcpy(&saddr->a4, &fl->fl4_src, sizeof(saddr->a4));
1163 memcpy(&daddr->a4, &fl->fl4_dst, sizeof(daddr->a4));
1164 break;
1165 case AF_INET6:
1166 ipv6_addr_copy((struct in6_addr *)&saddr->a6, &fl->fl6_src);
1167 ipv6_addr_copy((struct in6_addr *)&daddr->a6, &fl->fl6_dst);
1168 break;
1169 }
1170 }
1171
1172 static __inline__ int
1173 __xfrm4_state_addr_check(struct xfrm_state *x,
1174 xfrm_address_t *daddr, xfrm_address_t *saddr)
1175 {
1176 if (daddr->a4 == x->id.daddr.a4 &&
1177 (saddr->a4 == x->props.saddr.a4 || !saddr->a4 || !x->props.saddr.a4))
1178 return 1;
1179 return 0;
1180 }
1181
1182 static __inline__ int
1183 __xfrm6_state_addr_check(struct xfrm_state *x,
1184 xfrm_address_t *daddr, xfrm_address_t *saddr)
1185 {
1186 if (!ipv6_addr_cmp((struct in6_addr *)daddr, (struct in6_addr *)&x->id.daddr) &&
1187 (!ipv6_addr_cmp((struct in6_addr *)saddr, (struct in6_addr *)&x->props.saddr)||
1188 ipv6_addr_any((struct in6_addr *)saddr) ||
1189 ipv6_addr_any((struct in6_addr *)&x->props.saddr)))
1190 return 1;
1191 return 0;
1192 }
1193
1194 static __inline__ int
1195 xfrm_state_addr_check(struct xfrm_state *x,
1196 xfrm_address_t *daddr, xfrm_address_t *saddr,
1197 unsigned short family)
1198 {
1199 switch (family) {
1200 case AF_INET:
1201 return __xfrm4_state_addr_check(x, daddr, saddr);
1202 case AF_INET6:
1203 return __xfrm6_state_addr_check(x, daddr, saddr);
1204 }
1205 return 0;
1206 }
1207
1208 static __inline__ int
1209 xfrm_state_addr_flow_check(struct xfrm_state *x, const struct flowi *fl,
1210 unsigned short family)
1211 {
1212 switch (family) {
1213 case AF_INET:
1214 return __xfrm4_state_addr_check(x,
1215 (xfrm_address_t *)&fl->fl4_dst,
1216 (xfrm_address_t *)&fl->fl4_src);
1217 case AF_INET6:
1218 return __xfrm6_state_addr_check(x,
1219 (xfrm_address_t *)&fl->fl6_dst,
1220 (xfrm_address_t *)&fl->fl6_src);
1221 }
1222 return 0;
1223 }
1224
1225 static inline int xfrm_state_kern(struct xfrm_state *x)
1226 {
1227 return atomic_read(&x->tunnel_users);
1228 }
1229
1230 static inline int xfrm_id_proto_match(u8 proto, u8 userproto)
1231 {
1232 return (!userproto || proto == userproto ||
1233 (userproto == IPSEC_PROTO_ANY && (proto == IPPROTO_AH ||
1234 proto == IPPROTO_ESP ||
1235 proto == IPPROTO_COMP)));
1236 }
1237
1238 /*
1239 * xfrm algorithm information
1240 */
1241 struct xfrm_algo_aead_info {
1242 u16 icv_truncbits;
1243 };
1244
1245 struct xfrm_algo_auth_info {
1246 u16 icv_truncbits;
1247 u16 icv_fullbits;
1248 };
1249
1250 struct xfrm_algo_encr_info {
1251 u16 blockbits;
1252 u16 defkeybits;
1253 };
1254
1255 struct xfrm_algo_comp_info {
1256 u16 threshold;
1257 };
1258
1259 struct xfrm_algo_desc {
1260 char *name;
1261 char *compat;
1262 u8 available:1;
1263 union {
1264 struct xfrm_algo_aead_info aead;
1265 struct xfrm_algo_auth_info auth;
1266 struct xfrm_algo_encr_info encr;
1267 struct xfrm_algo_comp_info comp;
1268 } uinfo;
1269 struct sadb_alg desc;
1270 };
1271
1272 /* XFRM tunnel handlers. */
1273 struct xfrm_tunnel {
1274 int (*handler)(struct sk_buff *skb);
1275 int (*err_handler)(struct sk_buff *skb, u32 info);
1276
1277 struct xfrm_tunnel __rcu *next;
1278 int priority;
1279 };
1280
1281 struct xfrm6_tunnel {
1282 int (*handler)(struct sk_buff *skb);
1283 int (*err_handler)(struct sk_buff *skb, struct inet6_skb_parm *opt,
1284 u8 type, u8 code, int offset, __be32 info);
1285 struct xfrm6_tunnel __rcu *next;
1286 int priority;
1287 };
1288
1289 extern void xfrm_init(void);
1290 extern void xfrm4_init(int rt_hash_size);
1291 extern int xfrm_state_init(struct net *net);
1292 extern void xfrm_state_fini(struct net *net);
1293 extern void xfrm4_state_init(void);
1294 #ifdef CONFIG_XFRM
1295 extern int xfrm6_init(void);
1296 extern void xfrm6_fini(void);
1297 extern int xfrm6_state_init(void);
1298 extern void xfrm6_state_fini(void);
1299 #else
1300 static inline int xfrm6_init(void)
1301 {
1302 return 0;
1303 }
1304 static inline void xfrm6_fini(void)
1305 {
1306 ;
1307 }
1308 #endif
1309
1310 #ifdef CONFIG_XFRM_STATISTICS
1311 extern int xfrm_proc_init(struct net *net);
1312 extern void xfrm_proc_fini(struct net *net);
1313 #endif
1314
1315 extern int xfrm_sysctl_init(struct net *net);
1316 #ifdef CONFIG_SYSCTL
1317 extern void xfrm_sysctl_fini(struct net *net);
1318 #else
1319 static inline void xfrm_sysctl_fini(struct net *net)
1320 {
1321 }
1322 #endif
1323
1324 extern void xfrm_state_walk_init(struct xfrm_state_walk *walk, u8 proto);
1325 extern int xfrm_state_walk(struct net *net, struct xfrm_state_walk *walk,
1326 int (*func)(struct xfrm_state *, int, void*), void *);
1327 extern void xfrm_state_walk_done(struct xfrm_state_walk *walk);
1328 extern struct xfrm_state *xfrm_state_alloc(struct net *net);
1329 extern struct xfrm_state *xfrm_state_find(xfrm_address_t *daddr, xfrm_address_t *saddr,
1330 struct flowi *fl, struct xfrm_tmpl *tmpl,
1331 struct xfrm_policy *pol, int *err,
1332 unsigned short family);
1333 extern struct xfrm_state *xfrm_stateonly_find(struct net *net, u32 mark,
1334 xfrm_address_t *daddr,
1335 xfrm_address_t *saddr,
1336 unsigned short family,
1337 u8 mode, u8 proto, u32 reqid);
1338 extern int xfrm_state_check_expire(struct xfrm_state *x);
1339 extern void xfrm_state_insert(struct xfrm_state *x);
1340 extern int xfrm_state_add(struct xfrm_state *x);
1341 extern int xfrm_state_update(struct xfrm_state *x);
1342 extern struct xfrm_state *xfrm_state_lookup(struct net *net, u32 mark,
1343 xfrm_address_t *daddr, __be32 spi,
1344 u8 proto, unsigned short family);
1345 extern struct xfrm_state *xfrm_state_lookup_byaddr(struct net *net, u32 mark,
1346 xfrm_address_t *daddr,
1347 xfrm_address_t *saddr,
1348 u8 proto,
1349 unsigned short family);
1350 #ifdef CONFIG_XFRM_SUB_POLICY
1351 extern int xfrm_tmpl_sort(struct xfrm_tmpl **dst, struct xfrm_tmpl **src,
1352 int n, unsigned short family);
1353 extern int xfrm_state_sort(struct xfrm_state **dst, struct xfrm_state **src,
1354 int n, unsigned short family);
1355 #else
1356 static inline int xfrm_tmpl_sort(struct xfrm_tmpl **dst, struct xfrm_tmpl **src,
1357 int n, unsigned short family)
1358 {
1359 return -ENOSYS;
1360 }
1361
1362 static inline int xfrm_state_sort(struct xfrm_state **dst, struct xfrm_state **src,
1363 int n, unsigned short family)
1364 {
1365 return -ENOSYS;
1366 }
1367 #endif
1368
1369 struct xfrmk_sadinfo {
1370 u32 sadhcnt; /* current hash bkts */
1371 u32 sadhmcnt; /* max allowed hash bkts */
1372 u32 sadcnt; /* current running count */
1373 };
1374
1375 struct xfrmk_spdinfo {
1376 u32 incnt;
1377 u32 outcnt;
1378 u32 fwdcnt;
1379 u32 inscnt;
1380 u32 outscnt;
1381 u32 fwdscnt;
1382 u32 spdhcnt;
1383 u32 spdhmcnt;
1384 };
1385
1386 extern struct xfrm_state *xfrm_find_acq_byseq(struct net *net, u32 mark,
1387 u32 seq);
1388 extern int xfrm_state_delete(struct xfrm_state *x);
1389 extern int xfrm_state_flush(struct net *net, u8 proto, struct xfrm_audit *audit_info);
1390 extern void xfrm_sad_getinfo(struct net *net, struct xfrmk_sadinfo *si);
1391 extern void xfrm_spd_getinfo(struct net *net, struct xfrmk_spdinfo *si);
1392 extern int xfrm_replay_check(struct xfrm_state *x,
1393 struct sk_buff *skb, __be32 seq);
1394 extern void xfrm_replay_advance(struct xfrm_state *x, __be32 seq);
1395 extern void xfrm_replay_notify(struct xfrm_state *x, int event);
1396 extern int xfrm_state_mtu(struct xfrm_state *x, int mtu);
1397 extern int xfrm_init_state(struct xfrm_state *x);
1398 extern int xfrm_prepare_input(struct xfrm_state *x, struct sk_buff *skb);
1399 extern int xfrm_input(struct sk_buff *skb, int nexthdr, __be32 spi,
1400 int encap_type);
1401 extern int xfrm_input_resume(struct sk_buff *skb, int nexthdr);
1402 extern int xfrm_output_resume(struct sk_buff *skb, int err);
1403 extern int xfrm_output(struct sk_buff *skb);
1404 extern int xfrm_inner_extract_output(struct xfrm_state *x, struct sk_buff *skb);
1405 extern int xfrm4_extract_header(struct sk_buff *skb);
1406 extern int xfrm4_extract_input(struct xfrm_state *x, struct sk_buff *skb);
1407 extern int xfrm4_rcv_encap(struct sk_buff *skb, int nexthdr, __be32 spi,
1408 int encap_type);
1409 extern int xfrm4_transport_finish(struct sk_buff *skb, int async);
1410 extern int xfrm4_rcv(struct sk_buff *skb);
1411
1412 static inline int xfrm4_rcv_spi(struct sk_buff *skb, int nexthdr, __be32 spi)
1413 {
1414 return xfrm4_rcv_encap(skb, nexthdr, spi, 0);
1415 }
1416
1417 extern int xfrm4_extract_output(struct xfrm_state *x, struct sk_buff *skb);
1418 extern int xfrm4_prepare_output(struct xfrm_state *x, struct sk_buff *skb);
1419 extern int xfrm4_output(struct sk_buff *skb);
1420 extern int xfrm4_tunnel_register(struct xfrm_tunnel *handler, unsigned short family);
1421 extern int xfrm4_tunnel_deregister(struct xfrm_tunnel *handler, unsigned short family);
1422 extern int xfrm6_extract_header(struct sk_buff *skb);
1423 extern int xfrm6_extract_input(struct xfrm_state *x, struct sk_buff *skb);
1424 extern int xfrm6_rcv_spi(struct sk_buff *skb, int nexthdr, __be32 spi);
1425 extern int xfrm6_transport_finish(struct sk_buff *skb, int async);
1426 extern int xfrm6_rcv(struct sk_buff *skb);
1427 extern int xfrm6_input_addr(struct sk_buff *skb, xfrm_address_t *daddr,
1428 xfrm_address_t *saddr, u8 proto);
1429 extern int xfrm6_tunnel_register(struct xfrm6_tunnel *handler, unsigned short family);
1430 extern int xfrm6_tunnel_deregister(struct xfrm6_tunnel *handler, unsigned short family);
1431 extern __be32 xfrm6_tunnel_alloc_spi(struct net *net, xfrm_address_t *saddr);
1432 extern __be32 xfrm6_tunnel_spi_lookup(struct net *net, xfrm_address_t *saddr);
1433 extern int xfrm6_extract_output(struct xfrm_state *x, struct sk_buff *skb);
1434 extern int xfrm6_prepare_output(struct xfrm_state *x, struct sk_buff *skb);
1435 extern int xfrm6_output(struct sk_buff *skb);
1436 extern int xfrm6_find_1stfragopt(struct xfrm_state *x, struct sk_buff *skb,
1437 u8 **prevhdr);
1438
1439 #ifdef CONFIG_XFRM
1440 extern int xfrm4_udp_encap_rcv(struct sock *sk, struct sk_buff *skb);
1441 extern int xfrm_user_policy(struct sock *sk, int optname, u8 __user *optval, int optlen);
1442 #else
1443 static inline int xfrm_user_policy(struct sock *sk, int optname, u8 __user *optval, int optlen)
1444 {
1445 return -ENOPROTOOPT;
1446 }
1447
1448 static inline int xfrm4_udp_encap_rcv(struct sock *sk, struct sk_buff *skb)
1449 {
1450 /* should not happen */
1451 kfree_skb(skb);
1452 return 0;
1453 }
1454 #endif
1455
1456 struct xfrm_policy *xfrm_policy_alloc(struct net *net, gfp_t gfp);
1457
1458 extern void xfrm_policy_walk_init(struct xfrm_policy_walk *walk, u8 type);
1459 extern int xfrm_policy_walk(struct net *net, struct xfrm_policy_walk *walk,
1460 int (*func)(struct xfrm_policy *, int, int, void*), void *);
1461 extern void xfrm_policy_walk_done(struct xfrm_policy_walk *walk);
1462 int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl);
1463 struct xfrm_policy *xfrm_policy_bysel_ctx(struct net *net, u32 mark,
1464 u8 type, int dir,
1465 struct xfrm_selector *sel,
1466 struct xfrm_sec_ctx *ctx, int delete,
1467 int *err);
1468 struct xfrm_policy *xfrm_policy_byid(struct net *net, u32 mark, u8, int dir, u32 id, int delete, int *err);
1469 int xfrm_policy_flush(struct net *net, u8 type, struct xfrm_audit *audit_info);
1470 u32 xfrm_get_acqseq(void);
1471 extern int xfrm_alloc_spi(struct xfrm_state *x, u32 minspi, u32 maxspi);
1472 struct xfrm_state *xfrm_find_acq(struct net *net, struct xfrm_mark *mark,
1473 u8 mode, u32 reqid, u8 proto,
1474 xfrm_address_t *daddr,
1475 xfrm_address_t *saddr, int create,
1476 unsigned short family);
1477 extern int xfrm_sk_policy_insert(struct sock *sk, int dir, struct xfrm_policy *pol);
1478
1479 #ifdef CONFIG_XFRM_MIGRATE
1480 extern int km_migrate(struct xfrm_selector *sel, u8 dir, u8 type,
1481 struct xfrm_migrate *m, int num_bundles,
1482 struct xfrm_kmaddress *k);
1483 extern struct xfrm_state * xfrm_migrate_state_find(struct xfrm_migrate *m);
1484 extern struct xfrm_state * xfrm_state_migrate(struct xfrm_state *x,
1485 struct xfrm_migrate *m);
1486 extern int xfrm_migrate(struct xfrm_selector *sel, u8 dir, u8 type,
1487 struct xfrm_migrate *m, int num_bundles,
1488 struct xfrm_kmaddress *k);
1489 #endif
1490
1491 extern int km_new_mapping(struct xfrm_state *x, xfrm_address_t *ipaddr, __be16 sport);
1492 extern void km_policy_expired(struct xfrm_policy *pol, int dir, int hard, u32 pid);
1493 extern int km_report(struct net *net, u8 proto, struct xfrm_selector *sel, xfrm_address_t *addr);
1494
1495 extern void xfrm_input_init(void);
1496 extern int xfrm_parse_spi(struct sk_buff *skb, u8 nexthdr, __be32 *spi, __be32 *seq);
1497
1498 extern void xfrm_probe_algs(void);
1499 extern int xfrm_count_auth_supported(void);
1500 extern int xfrm_count_enc_supported(void);
1501 extern struct xfrm_algo_desc *xfrm_aalg_get_byidx(unsigned int idx);
1502 extern struct xfrm_algo_desc *xfrm_ealg_get_byidx(unsigned int idx);
1503 extern struct xfrm_algo_desc *xfrm_aalg_get_byid(int alg_id);
1504 extern struct xfrm_algo_desc *xfrm_ealg_get_byid(int alg_id);
1505 extern struct xfrm_algo_desc *xfrm_calg_get_byid(int alg_id);
1506 extern struct xfrm_algo_desc *xfrm_aalg_get_byname(char *name, int probe);
1507 extern struct xfrm_algo_desc *xfrm_ealg_get_byname(char *name, int probe);
1508 extern struct xfrm_algo_desc *xfrm_calg_get_byname(char *name, int probe);
1509 extern struct xfrm_algo_desc *xfrm_aead_get_byname(char *name, int icv_len,
1510 int probe);
1511
1512 struct hash_desc;
1513 struct scatterlist;
1514 typedef int (icv_update_fn_t)(struct hash_desc *, struct scatterlist *,
1515 unsigned int);
1516
1517 static inline int xfrm_addr_cmp(xfrm_address_t *a, xfrm_address_t *b,
1518 int family)
1519 {
1520 switch (family) {
1521 default:
1522 case AF_INET:
1523 return (__force u32)a->a4 - (__force u32)b->a4;
1524 case AF_INET6:
1525 return ipv6_addr_cmp((struct in6_addr *)a,
1526 (struct in6_addr *)b);
1527 }
1528 }
1529
1530 static inline int xfrm_policy_id2dir(u32 index)
1531 {
1532 return index & 7;
1533 }
1534
1535 #ifdef CONFIG_XFRM
1536 static inline int xfrm_aevent_is_on(struct net *net)
1537 {
1538 struct sock *nlsk;
1539 int ret = 0;
1540
1541 rcu_read_lock();
1542 nlsk = rcu_dereference(net->xfrm.nlsk);
1543 if (nlsk)
1544 ret = netlink_has_listeners(nlsk, XFRMNLGRP_AEVENTS);
1545 rcu_read_unlock();
1546 return ret;
1547 }
1548 #endif
1549
1550 static inline int xfrm_alg_len(struct xfrm_algo *alg)
1551 {
1552 return sizeof(*alg) + ((alg->alg_key_len + 7) / 8);
1553 }
1554
1555 static inline int xfrm_alg_auth_len(struct xfrm_algo_auth *alg)
1556 {
1557 return sizeof(*alg) + ((alg->alg_key_len + 7) / 8);
1558 }
1559
1560 #ifdef CONFIG_XFRM_MIGRATE
1561 static inline struct xfrm_algo *xfrm_algo_clone(struct xfrm_algo *orig)
1562 {
1563 return kmemdup(orig, xfrm_alg_len(orig), GFP_KERNEL);
1564 }
1565
1566 static inline struct xfrm_algo_auth *xfrm_algo_auth_clone(struct xfrm_algo_auth *orig)
1567 {
1568 return kmemdup(orig, xfrm_alg_auth_len(orig), GFP_KERNEL);
1569 }
1570
1571 static inline void xfrm_states_put(struct xfrm_state **states, int n)
1572 {
1573 int i;
1574 for (i = 0; i < n; i++)
1575 xfrm_state_put(*(states + i));
1576 }
1577
1578 static inline void xfrm_states_delete(struct xfrm_state **states, int n)
1579 {
1580 int i;
1581 for (i = 0; i < n; i++)
1582 xfrm_state_delete(*(states + i));
1583 }
1584 #endif
1585
1586 #ifdef CONFIG_XFRM
1587 static inline struct xfrm_state *xfrm_input_state(struct sk_buff *skb)
1588 {
1589 return skb->sp->xvec[skb->sp->len - 1];
1590 }
1591 #endif
1592
1593 static inline int xfrm_mark_get(struct nlattr **attrs, struct xfrm_mark *m)
1594 {
1595 if (attrs[XFRMA_MARK])
1596 memcpy(m, nla_data(attrs[XFRMA_MARK]), sizeof(struct xfrm_mark));
1597 else
1598 m->v = m->m = 0;
1599
1600 return m->v & m->m;
1601 }
1602
1603 static inline int xfrm_mark_put(struct sk_buff *skb, struct xfrm_mark *m)
1604 {
1605 if (m->m | m->v)
1606 NLA_PUT(skb, XFRMA_MARK, sizeof(struct xfrm_mark), m);
1607 return 0;
1608
1609 nla_put_failure:
1610 return -1;
1611 }
1612
1613 #endif /* _NET_XFRM_H */
This page took 0.067219 seconds and 5 git commands to generate.