Merge branch 'for-2.6.26' of master.kernel.org:/pub/scm/linux/kernel/git/olof/pasemi
[deliverable/linux.git] / net / xfrm / xfrm_state.c
1 /*
2 * xfrm_state.c
3 *
4 * Changes:
5 * Mitsuru KANDA @USAGI
6 * Kazunori MIYAZAWA @USAGI
7 * Kunihiro Ishiguro <kunihiro@ipinfusion.com>
8 * IPv6 support
9 * YOSHIFUJI Hideaki @USAGI
10 * Split up af-specific functions
11 * Derek Atkins <derek@ihtfp.com>
12 * Add UDP Encapsulation
13 *
14 */
15
16 #include <linux/workqueue.h>
17 #include <net/xfrm.h>
18 #include <linux/pfkeyv2.h>
19 #include <linux/ipsec.h>
20 #include <linux/module.h>
21 #include <linux/cache.h>
22 #include <linux/audit.h>
23 #include <asm/uaccess.h>
24
25 #include "xfrm_hash.h"
26
27 struct sock *xfrm_nl;
28 EXPORT_SYMBOL(xfrm_nl);
29
30 u32 sysctl_xfrm_aevent_etime __read_mostly = XFRM_AE_ETIME;
31 EXPORT_SYMBOL(sysctl_xfrm_aevent_etime);
32
33 u32 sysctl_xfrm_aevent_rseqth __read_mostly = XFRM_AE_SEQT_SIZE;
34 EXPORT_SYMBOL(sysctl_xfrm_aevent_rseqth);
35
36 u32 sysctl_xfrm_acq_expires __read_mostly = 30;
37
38 /* Each xfrm_state may be linked to two tables:
39
40 1. Hash table by (spi,daddr,ah/esp) to find SA by SPI. (input,ctl)
41 2. Hash table by (daddr,family,reqid) to find what SAs exist for given
42 destination/tunnel endpoint. (output)
43 */
44
45 static DEFINE_SPINLOCK(xfrm_state_lock);
46
47 /* Hash table to find appropriate SA towards given target (endpoint
48 * of tunnel or destination of transport mode) allowed by selector.
49 *
50 * Main use is finding SA after policy selected tunnel or transport mode.
51 * Also, it can be used by ah/esp icmp error handler to find offending SA.
52 */
53 static struct hlist_head *xfrm_state_bydst __read_mostly;
54 static struct hlist_head *xfrm_state_bysrc __read_mostly;
55 static struct hlist_head *xfrm_state_byspi __read_mostly;
56 static unsigned int xfrm_state_hmask __read_mostly;
57 static unsigned int xfrm_state_hashmax __read_mostly = 1 * 1024 * 1024;
58 static unsigned int xfrm_state_num;
59 static unsigned int xfrm_state_genid;
60
61 static struct xfrm_state_afinfo *xfrm_state_get_afinfo(unsigned int family);
62 static void xfrm_state_put_afinfo(struct xfrm_state_afinfo *afinfo);
63
64 #ifdef CONFIG_AUDITSYSCALL
65 static void xfrm_audit_state_replay(struct xfrm_state *x,
66 struct sk_buff *skb, __be32 net_seq);
67 #else
68 #define xfrm_audit_state_replay(x, s, sq) do { ; } while (0)
69 #endif /* CONFIG_AUDITSYSCALL */
70
71 static inline unsigned int xfrm_dst_hash(xfrm_address_t *daddr,
72 xfrm_address_t *saddr,
73 u32 reqid,
74 unsigned short family)
75 {
76 return __xfrm_dst_hash(daddr, saddr, reqid, family, xfrm_state_hmask);
77 }
78
79 static inline unsigned int xfrm_src_hash(xfrm_address_t *daddr,
80 xfrm_address_t *saddr,
81 unsigned short family)
82 {
83 return __xfrm_src_hash(daddr, saddr, family, xfrm_state_hmask);
84 }
85
86 static inline unsigned int
87 xfrm_spi_hash(xfrm_address_t *daddr, __be32 spi, u8 proto, unsigned short family)
88 {
89 return __xfrm_spi_hash(daddr, spi, proto, family, xfrm_state_hmask);
90 }
91
92 static void xfrm_hash_transfer(struct hlist_head *list,
93 struct hlist_head *ndsttable,
94 struct hlist_head *nsrctable,
95 struct hlist_head *nspitable,
96 unsigned int nhashmask)
97 {
98 struct hlist_node *entry, *tmp;
99 struct xfrm_state *x;
100
101 hlist_for_each_entry_safe(x, entry, tmp, list, bydst) {
102 unsigned int h;
103
104 h = __xfrm_dst_hash(&x->id.daddr, &x->props.saddr,
105 x->props.reqid, x->props.family,
106 nhashmask);
107 hlist_add_head(&x->bydst, ndsttable+h);
108
109 h = __xfrm_src_hash(&x->id.daddr, &x->props.saddr,
110 x->props.family,
111 nhashmask);
112 hlist_add_head(&x->bysrc, nsrctable+h);
113
114 if (x->id.spi) {
115 h = __xfrm_spi_hash(&x->id.daddr, x->id.spi,
116 x->id.proto, x->props.family,
117 nhashmask);
118 hlist_add_head(&x->byspi, nspitable+h);
119 }
120 }
121 }
122
123 static unsigned long xfrm_hash_new_size(void)
124 {
125 return ((xfrm_state_hmask + 1) << 1) *
126 sizeof(struct hlist_head);
127 }
128
129 static DEFINE_MUTEX(hash_resize_mutex);
130
131 static void xfrm_hash_resize(struct work_struct *__unused)
132 {
133 struct hlist_head *ndst, *nsrc, *nspi, *odst, *osrc, *ospi;
134 unsigned long nsize, osize;
135 unsigned int nhashmask, ohashmask;
136 int i;
137
138 mutex_lock(&hash_resize_mutex);
139
140 nsize = xfrm_hash_new_size();
141 ndst = xfrm_hash_alloc(nsize);
142 if (!ndst)
143 goto out_unlock;
144 nsrc = xfrm_hash_alloc(nsize);
145 if (!nsrc) {
146 xfrm_hash_free(ndst, nsize);
147 goto out_unlock;
148 }
149 nspi = xfrm_hash_alloc(nsize);
150 if (!nspi) {
151 xfrm_hash_free(ndst, nsize);
152 xfrm_hash_free(nsrc, nsize);
153 goto out_unlock;
154 }
155
156 spin_lock_bh(&xfrm_state_lock);
157
158 nhashmask = (nsize / sizeof(struct hlist_head)) - 1U;
159 for (i = xfrm_state_hmask; i >= 0; i--)
160 xfrm_hash_transfer(xfrm_state_bydst+i, ndst, nsrc, nspi,
161 nhashmask);
162
163 odst = xfrm_state_bydst;
164 osrc = xfrm_state_bysrc;
165 ospi = xfrm_state_byspi;
166 ohashmask = xfrm_state_hmask;
167
168 xfrm_state_bydst = ndst;
169 xfrm_state_bysrc = nsrc;
170 xfrm_state_byspi = nspi;
171 xfrm_state_hmask = nhashmask;
172
173 spin_unlock_bh(&xfrm_state_lock);
174
175 osize = (ohashmask + 1) * sizeof(struct hlist_head);
176 xfrm_hash_free(odst, osize);
177 xfrm_hash_free(osrc, osize);
178 xfrm_hash_free(ospi, osize);
179
180 out_unlock:
181 mutex_unlock(&hash_resize_mutex);
182 }
183
184 static DECLARE_WORK(xfrm_hash_work, xfrm_hash_resize);
185
186 DECLARE_WAIT_QUEUE_HEAD(km_waitq);
187 EXPORT_SYMBOL(km_waitq);
188
189 static DEFINE_RWLOCK(xfrm_state_afinfo_lock);
190 static struct xfrm_state_afinfo *xfrm_state_afinfo[NPROTO];
191
192 static struct work_struct xfrm_state_gc_work;
193 static HLIST_HEAD(xfrm_state_gc_list);
194 static DEFINE_SPINLOCK(xfrm_state_gc_lock);
195
196 int __xfrm_state_delete(struct xfrm_state *x);
197
198 int km_query(struct xfrm_state *x, struct xfrm_tmpl *t, struct xfrm_policy *pol);
199 void km_state_expired(struct xfrm_state *x, int hard, u32 pid);
200
201 static struct xfrm_state_afinfo *xfrm_state_lock_afinfo(unsigned int family)
202 {
203 struct xfrm_state_afinfo *afinfo;
204 if (unlikely(family >= NPROTO))
205 return NULL;
206 write_lock_bh(&xfrm_state_afinfo_lock);
207 afinfo = xfrm_state_afinfo[family];
208 if (unlikely(!afinfo))
209 write_unlock_bh(&xfrm_state_afinfo_lock);
210 return afinfo;
211 }
212
213 static void xfrm_state_unlock_afinfo(struct xfrm_state_afinfo *afinfo)
214 __releases(xfrm_state_afinfo_lock)
215 {
216 write_unlock_bh(&xfrm_state_afinfo_lock);
217 }
218
219 int xfrm_register_type(const struct xfrm_type *type, unsigned short family)
220 {
221 struct xfrm_state_afinfo *afinfo = xfrm_state_lock_afinfo(family);
222 const struct xfrm_type **typemap;
223 int err = 0;
224
225 if (unlikely(afinfo == NULL))
226 return -EAFNOSUPPORT;
227 typemap = afinfo->type_map;
228
229 if (likely(typemap[type->proto] == NULL))
230 typemap[type->proto] = type;
231 else
232 err = -EEXIST;
233 xfrm_state_unlock_afinfo(afinfo);
234 return err;
235 }
236 EXPORT_SYMBOL(xfrm_register_type);
237
238 int xfrm_unregister_type(const struct xfrm_type *type, unsigned short family)
239 {
240 struct xfrm_state_afinfo *afinfo = xfrm_state_lock_afinfo(family);
241 const struct xfrm_type **typemap;
242 int err = 0;
243
244 if (unlikely(afinfo == NULL))
245 return -EAFNOSUPPORT;
246 typemap = afinfo->type_map;
247
248 if (unlikely(typemap[type->proto] != type))
249 err = -ENOENT;
250 else
251 typemap[type->proto] = NULL;
252 xfrm_state_unlock_afinfo(afinfo);
253 return err;
254 }
255 EXPORT_SYMBOL(xfrm_unregister_type);
256
257 static const struct xfrm_type *xfrm_get_type(u8 proto, unsigned short family)
258 {
259 struct xfrm_state_afinfo *afinfo;
260 const struct xfrm_type **typemap;
261 const struct xfrm_type *type;
262 int modload_attempted = 0;
263
264 retry:
265 afinfo = xfrm_state_get_afinfo(family);
266 if (unlikely(afinfo == NULL))
267 return NULL;
268 typemap = afinfo->type_map;
269
270 type = typemap[proto];
271 if (unlikely(type && !try_module_get(type->owner)))
272 type = NULL;
273 if (!type && !modload_attempted) {
274 xfrm_state_put_afinfo(afinfo);
275 request_module("xfrm-type-%d-%d", family, proto);
276 modload_attempted = 1;
277 goto retry;
278 }
279
280 xfrm_state_put_afinfo(afinfo);
281 return type;
282 }
283
284 static void xfrm_put_type(const struct xfrm_type *type)
285 {
286 module_put(type->owner);
287 }
288
289 int xfrm_register_mode(struct xfrm_mode *mode, int family)
290 {
291 struct xfrm_state_afinfo *afinfo;
292 struct xfrm_mode **modemap;
293 int err;
294
295 if (unlikely(mode->encap >= XFRM_MODE_MAX))
296 return -EINVAL;
297
298 afinfo = xfrm_state_lock_afinfo(family);
299 if (unlikely(afinfo == NULL))
300 return -EAFNOSUPPORT;
301
302 err = -EEXIST;
303 modemap = afinfo->mode_map;
304 if (modemap[mode->encap])
305 goto out;
306
307 err = -ENOENT;
308 if (!try_module_get(afinfo->owner))
309 goto out;
310
311 mode->afinfo = afinfo;
312 modemap[mode->encap] = mode;
313 err = 0;
314
315 out:
316 xfrm_state_unlock_afinfo(afinfo);
317 return err;
318 }
319 EXPORT_SYMBOL(xfrm_register_mode);
320
321 int xfrm_unregister_mode(struct xfrm_mode *mode, int family)
322 {
323 struct xfrm_state_afinfo *afinfo;
324 struct xfrm_mode **modemap;
325 int err;
326
327 if (unlikely(mode->encap >= XFRM_MODE_MAX))
328 return -EINVAL;
329
330 afinfo = xfrm_state_lock_afinfo(family);
331 if (unlikely(afinfo == NULL))
332 return -EAFNOSUPPORT;
333
334 err = -ENOENT;
335 modemap = afinfo->mode_map;
336 if (likely(modemap[mode->encap] == mode)) {
337 modemap[mode->encap] = NULL;
338 module_put(mode->afinfo->owner);
339 err = 0;
340 }
341
342 xfrm_state_unlock_afinfo(afinfo);
343 return err;
344 }
345 EXPORT_SYMBOL(xfrm_unregister_mode);
346
347 static struct xfrm_mode *xfrm_get_mode(unsigned int encap, int family)
348 {
349 struct xfrm_state_afinfo *afinfo;
350 struct xfrm_mode *mode;
351 int modload_attempted = 0;
352
353 if (unlikely(encap >= XFRM_MODE_MAX))
354 return NULL;
355
356 retry:
357 afinfo = xfrm_state_get_afinfo(family);
358 if (unlikely(afinfo == NULL))
359 return NULL;
360
361 mode = afinfo->mode_map[encap];
362 if (unlikely(mode && !try_module_get(mode->owner)))
363 mode = NULL;
364 if (!mode && !modload_attempted) {
365 xfrm_state_put_afinfo(afinfo);
366 request_module("xfrm-mode-%d-%d", family, encap);
367 modload_attempted = 1;
368 goto retry;
369 }
370
371 xfrm_state_put_afinfo(afinfo);
372 return mode;
373 }
374
375 static void xfrm_put_mode(struct xfrm_mode *mode)
376 {
377 module_put(mode->owner);
378 }
379
380 static void xfrm_state_gc_destroy(struct xfrm_state *x)
381 {
382 del_timer_sync(&x->timer);
383 del_timer_sync(&x->rtimer);
384 kfree(x->aalg);
385 kfree(x->ealg);
386 kfree(x->calg);
387 kfree(x->encap);
388 kfree(x->coaddr);
389 if (x->inner_mode)
390 xfrm_put_mode(x->inner_mode);
391 if (x->inner_mode_iaf)
392 xfrm_put_mode(x->inner_mode_iaf);
393 if (x->outer_mode)
394 xfrm_put_mode(x->outer_mode);
395 if (x->type) {
396 x->type->destructor(x);
397 xfrm_put_type(x->type);
398 }
399 security_xfrm_state_free(x);
400 kfree(x);
401 }
402
403 static void xfrm_state_gc_task(struct work_struct *data)
404 {
405 struct xfrm_state *x;
406 struct hlist_node *entry, *tmp;
407 struct hlist_head gc_list;
408
409 spin_lock_bh(&xfrm_state_gc_lock);
410 gc_list.first = xfrm_state_gc_list.first;
411 INIT_HLIST_HEAD(&xfrm_state_gc_list);
412 spin_unlock_bh(&xfrm_state_gc_lock);
413
414 hlist_for_each_entry_safe(x, entry, tmp, &gc_list, bydst)
415 xfrm_state_gc_destroy(x);
416
417 wake_up(&km_waitq);
418 }
419
420 static inline unsigned long make_jiffies(long secs)
421 {
422 if (secs >= (MAX_SCHEDULE_TIMEOUT-1)/HZ)
423 return MAX_SCHEDULE_TIMEOUT-1;
424 else
425 return secs*HZ;
426 }
427
428 static void xfrm_timer_handler(unsigned long data)
429 {
430 struct xfrm_state *x = (struct xfrm_state*)data;
431 unsigned long now = get_seconds();
432 long next = LONG_MAX;
433 int warn = 0;
434 int err = 0;
435
436 spin_lock(&x->lock);
437 if (x->km.state == XFRM_STATE_DEAD)
438 goto out;
439 if (x->km.state == XFRM_STATE_EXPIRED)
440 goto expired;
441 if (x->lft.hard_add_expires_seconds) {
442 long tmo = x->lft.hard_add_expires_seconds +
443 x->curlft.add_time - now;
444 if (tmo <= 0)
445 goto expired;
446 if (tmo < next)
447 next = tmo;
448 }
449 if (x->lft.hard_use_expires_seconds) {
450 long tmo = x->lft.hard_use_expires_seconds +
451 (x->curlft.use_time ? : now) - now;
452 if (tmo <= 0)
453 goto expired;
454 if (tmo < next)
455 next = tmo;
456 }
457 if (x->km.dying)
458 goto resched;
459 if (x->lft.soft_add_expires_seconds) {
460 long tmo = x->lft.soft_add_expires_seconds +
461 x->curlft.add_time - now;
462 if (tmo <= 0)
463 warn = 1;
464 else if (tmo < next)
465 next = tmo;
466 }
467 if (x->lft.soft_use_expires_seconds) {
468 long tmo = x->lft.soft_use_expires_seconds +
469 (x->curlft.use_time ? : now) - now;
470 if (tmo <= 0)
471 warn = 1;
472 else if (tmo < next)
473 next = tmo;
474 }
475
476 x->km.dying = warn;
477 if (warn)
478 km_state_expired(x, 0, 0);
479 resched:
480 if (next != LONG_MAX)
481 mod_timer(&x->timer, jiffies + make_jiffies(next));
482
483 goto out;
484
485 expired:
486 if (x->km.state == XFRM_STATE_ACQ && x->id.spi == 0) {
487 x->km.state = XFRM_STATE_EXPIRED;
488 wake_up(&km_waitq);
489 next = 2;
490 goto resched;
491 }
492
493 err = __xfrm_state_delete(x);
494 if (!err && x->id.spi)
495 km_state_expired(x, 1, 0);
496
497 xfrm_audit_state_delete(x, err ? 0 : 1,
498 audit_get_loginuid(current), 0);
499
500 out:
501 spin_unlock(&x->lock);
502 }
503
504 static void xfrm_replay_timer_handler(unsigned long data);
505
506 struct xfrm_state *xfrm_state_alloc(void)
507 {
508 struct xfrm_state *x;
509
510 x = kzalloc(sizeof(struct xfrm_state), GFP_ATOMIC);
511
512 if (x) {
513 atomic_set(&x->refcnt, 1);
514 atomic_set(&x->tunnel_users, 0);
515 INIT_HLIST_NODE(&x->bydst);
516 INIT_HLIST_NODE(&x->bysrc);
517 INIT_HLIST_NODE(&x->byspi);
518 setup_timer(&x->timer, xfrm_timer_handler, (unsigned long)x);
519 setup_timer(&x->rtimer, xfrm_replay_timer_handler,
520 (unsigned long)x);
521 x->curlft.add_time = get_seconds();
522 x->lft.soft_byte_limit = XFRM_INF;
523 x->lft.soft_packet_limit = XFRM_INF;
524 x->lft.hard_byte_limit = XFRM_INF;
525 x->lft.hard_packet_limit = XFRM_INF;
526 x->replay_maxage = 0;
527 x->replay_maxdiff = 0;
528 x->inner_mode = NULL;
529 x->inner_mode_iaf = NULL;
530 spin_lock_init(&x->lock);
531 }
532 return x;
533 }
534 EXPORT_SYMBOL(xfrm_state_alloc);
535
536 void __xfrm_state_destroy(struct xfrm_state *x)
537 {
538 BUG_TRAP(x->km.state == XFRM_STATE_DEAD);
539
540 spin_lock_bh(&xfrm_state_gc_lock);
541 hlist_add_head(&x->bydst, &xfrm_state_gc_list);
542 spin_unlock_bh(&xfrm_state_gc_lock);
543 schedule_work(&xfrm_state_gc_work);
544 }
545 EXPORT_SYMBOL(__xfrm_state_destroy);
546
547 int __xfrm_state_delete(struct xfrm_state *x)
548 {
549 int err = -ESRCH;
550
551 if (x->km.state != XFRM_STATE_DEAD) {
552 x->km.state = XFRM_STATE_DEAD;
553 spin_lock(&xfrm_state_lock);
554 hlist_del(&x->bydst);
555 hlist_del(&x->bysrc);
556 if (x->id.spi)
557 hlist_del(&x->byspi);
558 xfrm_state_num--;
559 spin_unlock(&xfrm_state_lock);
560
561 /* All xfrm_state objects are created by xfrm_state_alloc.
562 * The xfrm_state_alloc call gives a reference, and that
563 * is what we are dropping here.
564 */
565 xfrm_state_put(x);
566 err = 0;
567 }
568
569 return err;
570 }
571 EXPORT_SYMBOL(__xfrm_state_delete);
572
573 int xfrm_state_delete(struct xfrm_state *x)
574 {
575 int err;
576
577 spin_lock_bh(&x->lock);
578 err = __xfrm_state_delete(x);
579 spin_unlock_bh(&x->lock);
580
581 return err;
582 }
583 EXPORT_SYMBOL(xfrm_state_delete);
584
585 #ifdef CONFIG_SECURITY_NETWORK_XFRM
586 static inline int
587 xfrm_state_flush_secctx_check(u8 proto, struct xfrm_audit *audit_info)
588 {
589 int i, err = 0;
590
591 for (i = 0; i <= xfrm_state_hmask; i++) {
592 struct hlist_node *entry;
593 struct xfrm_state *x;
594
595 hlist_for_each_entry(x, entry, xfrm_state_bydst+i, bydst) {
596 if (xfrm_id_proto_match(x->id.proto, proto) &&
597 (err = security_xfrm_state_delete(x)) != 0) {
598 xfrm_audit_state_delete(x, 0,
599 audit_info->loginuid,
600 audit_info->secid);
601 return err;
602 }
603 }
604 }
605
606 return err;
607 }
608 #else
609 static inline int
610 xfrm_state_flush_secctx_check(u8 proto, struct xfrm_audit *audit_info)
611 {
612 return 0;
613 }
614 #endif
615
616 int xfrm_state_flush(u8 proto, struct xfrm_audit *audit_info)
617 {
618 int i, err = 0;
619
620 spin_lock_bh(&xfrm_state_lock);
621 err = xfrm_state_flush_secctx_check(proto, audit_info);
622 if (err)
623 goto out;
624
625 for (i = 0; i <= xfrm_state_hmask; i++) {
626 struct hlist_node *entry;
627 struct xfrm_state *x;
628 restart:
629 hlist_for_each_entry(x, entry, xfrm_state_bydst+i, bydst) {
630 if (!xfrm_state_kern(x) &&
631 xfrm_id_proto_match(x->id.proto, proto)) {
632 xfrm_state_hold(x);
633 spin_unlock_bh(&xfrm_state_lock);
634
635 err = xfrm_state_delete(x);
636 xfrm_audit_state_delete(x, err ? 0 : 1,
637 audit_info->loginuid,
638 audit_info->secid);
639 xfrm_state_put(x);
640
641 spin_lock_bh(&xfrm_state_lock);
642 goto restart;
643 }
644 }
645 }
646 err = 0;
647
648 out:
649 spin_unlock_bh(&xfrm_state_lock);
650 wake_up(&km_waitq);
651 return err;
652 }
653 EXPORT_SYMBOL(xfrm_state_flush);
654
655 void xfrm_sad_getinfo(struct xfrmk_sadinfo *si)
656 {
657 spin_lock_bh(&xfrm_state_lock);
658 si->sadcnt = xfrm_state_num;
659 si->sadhcnt = xfrm_state_hmask;
660 si->sadhmcnt = xfrm_state_hashmax;
661 spin_unlock_bh(&xfrm_state_lock);
662 }
663 EXPORT_SYMBOL(xfrm_sad_getinfo);
664
665 static int
666 xfrm_init_tempsel(struct xfrm_state *x, struct flowi *fl,
667 struct xfrm_tmpl *tmpl,
668 xfrm_address_t *daddr, xfrm_address_t *saddr,
669 unsigned short family)
670 {
671 struct xfrm_state_afinfo *afinfo = xfrm_state_get_afinfo(family);
672 if (!afinfo)
673 return -1;
674 afinfo->init_tempsel(x, fl, tmpl, daddr, saddr);
675 xfrm_state_put_afinfo(afinfo);
676 return 0;
677 }
678
679 static struct xfrm_state *__xfrm_state_lookup(xfrm_address_t *daddr, __be32 spi, u8 proto, unsigned short family)
680 {
681 unsigned int h = xfrm_spi_hash(daddr, spi, proto, family);
682 struct xfrm_state *x;
683 struct hlist_node *entry;
684
685 hlist_for_each_entry(x, entry, xfrm_state_byspi+h, byspi) {
686 if (x->props.family != family ||
687 x->id.spi != spi ||
688 x->id.proto != proto)
689 continue;
690
691 switch (family) {
692 case AF_INET:
693 if (x->id.daddr.a4 != daddr->a4)
694 continue;
695 break;
696 case AF_INET6:
697 if (!ipv6_addr_equal((struct in6_addr *)daddr,
698 (struct in6_addr *)
699 x->id.daddr.a6))
700 continue;
701 break;
702 }
703
704 xfrm_state_hold(x);
705 return x;
706 }
707
708 return NULL;
709 }
710
711 static struct xfrm_state *__xfrm_state_lookup_byaddr(xfrm_address_t *daddr, xfrm_address_t *saddr, u8 proto, unsigned short family)
712 {
713 unsigned int h = xfrm_src_hash(daddr, saddr, family);
714 struct xfrm_state *x;
715 struct hlist_node *entry;
716
717 hlist_for_each_entry(x, entry, xfrm_state_bysrc+h, bysrc) {
718 if (x->props.family != family ||
719 x->id.proto != proto)
720 continue;
721
722 switch (family) {
723 case AF_INET:
724 if (x->id.daddr.a4 != daddr->a4 ||
725 x->props.saddr.a4 != saddr->a4)
726 continue;
727 break;
728 case AF_INET6:
729 if (!ipv6_addr_equal((struct in6_addr *)daddr,
730 (struct in6_addr *)
731 x->id.daddr.a6) ||
732 !ipv6_addr_equal((struct in6_addr *)saddr,
733 (struct in6_addr *)
734 x->props.saddr.a6))
735 continue;
736 break;
737 }
738
739 xfrm_state_hold(x);
740 return x;
741 }
742
743 return NULL;
744 }
745
746 static inline struct xfrm_state *
747 __xfrm_state_locate(struct xfrm_state *x, int use_spi, int family)
748 {
749 if (use_spi)
750 return __xfrm_state_lookup(&x->id.daddr, x->id.spi,
751 x->id.proto, family);
752 else
753 return __xfrm_state_lookup_byaddr(&x->id.daddr,
754 &x->props.saddr,
755 x->id.proto, family);
756 }
757
758 static void xfrm_hash_grow_check(int have_hash_collision)
759 {
760 if (have_hash_collision &&
761 (xfrm_state_hmask + 1) < xfrm_state_hashmax &&
762 xfrm_state_num > xfrm_state_hmask)
763 schedule_work(&xfrm_hash_work);
764 }
765
766 struct xfrm_state *
767 xfrm_state_find(xfrm_address_t *daddr, xfrm_address_t *saddr,
768 struct flowi *fl, struct xfrm_tmpl *tmpl,
769 struct xfrm_policy *pol, int *err,
770 unsigned short family)
771 {
772 unsigned int h;
773 struct hlist_node *entry;
774 struct xfrm_state *x, *x0;
775 int acquire_in_progress = 0;
776 int error = 0;
777 struct xfrm_state *best = NULL;
778
779 spin_lock_bh(&xfrm_state_lock);
780 h = xfrm_dst_hash(daddr, saddr, tmpl->reqid, family);
781 hlist_for_each_entry(x, entry, xfrm_state_bydst+h, bydst) {
782 if (x->props.family == family &&
783 x->props.reqid == tmpl->reqid &&
784 !(x->props.flags & XFRM_STATE_WILDRECV) &&
785 xfrm_state_addr_check(x, daddr, saddr, family) &&
786 tmpl->mode == x->props.mode &&
787 tmpl->id.proto == x->id.proto &&
788 (tmpl->id.spi == x->id.spi || !tmpl->id.spi)) {
789 /* Resolution logic:
790 1. There is a valid state with matching selector.
791 Done.
792 2. Valid state with inappropriate selector. Skip.
793
794 Entering area of "sysdeps".
795
796 3. If state is not valid, selector is temporary,
797 it selects only session which triggered
798 previous resolution. Key manager will do
799 something to install a state with proper
800 selector.
801 */
802 if (x->km.state == XFRM_STATE_VALID) {
803 if ((x->sel.family && !xfrm_selector_match(&x->sel, fl, x->sel.family)) ||
804 !security_xfrm_state_pol_flow_match(x, pol, fl))
805 continue;
806 if (!best ||
807 best->km.dying > x->km.dying ||
808 (best->km.dying == x->km.dying &&
809 best->curlft.add_time < x->curlft.add_time))
810 best = x;
811 } else if (x->km.state == XFRM_STATE_ACQ) {
812 acquire_in_progress = 1;
813 } else if (x->km.state == XFRM_STATE_ERROR ||
814 x->km.state == XFRM_STATE_EXPIRED) {
815 if (xfrm_selector_match(&x->sel, fl, x->sel.family) &&
816 security_xfrm_state_pol_flow_match(x, pol, fl))
817 error = -ESRCH;
818 }
819 }
820 }
821
822 x = best;
823 if (!x && !error && !acquire_in_progress) {
824 if (tmpl->id.spi &&
825 (x0 = __xfrm_state_lookup(daddr, tmpl->id.spi,
826 tmpl->id.proto, family)) != NULL) {
827 xfrm_state_put(x0);
828 error = -EEXIST;
829 goto out;
830 }
831 x = xfrm_state_alloc();
832 if (x == NULL) {
833 error = -ENOMEM;
834 goto out;
835 }
836 /* Initialize temporary selector matching only
837 * to current session. */
838 xfrm_init_tempsel(x, fl, tmpl, daddr, saddr, family);
839
840 error = security_xfrm_state_alloc_acquire(x, pol->security, fl->secid);
841 if (error) {
842 x->km.state = XFRM_STATE_DEAD;
843 xfrm_state_put(x);
844 x = NULL;
845 goto out;
846 }
847
848 if (km_query(x, tmpl, pol) == 0) {
849 x->km.state = XFRM_STATE_ACQ;
850 hlist_add_head(&x->bydst, xfrm_state_bydst+h);
851 h = xfrm_src_hash(daddr, saddr, family);
852 hlist_add_head(&x->bysrc, xfrm_state_bysrc+h);
853 if (x->id.spi) {
854 h = xfrm_spi_hash(&x->id.daddr, x->id.spi, x->id.proto, family);
855 hlist_add_head(&x->byspi, xfrm_state_byspi+h);
856 }
857 x->lft.hard_add_expires_seconds = sysctl_xfrm_acq_expires;
858 x->timer.expires = jiffies + sysctl_xfrm_acq_expires*HZ;
859 add_timer(&x->timer);
860 xfrm_state_num++;
861 xfrm_hash_grow_check(x->bydst.next != NULL);
862 } else {
863 x->km.state = XFRM_STATE_DEAD;
864 xfrm_state_put(x);
865 x = NULL;
866 error = -ESRCH;
867 }
868 }
869 out:
870 if (x)
871 xfrm_state_hold(x);
872 else
873 *err = acquire_in_progress ? -EAGAIN : error;
874 spin_unlock_bh(&xfrm_state_lock);
875 return x;
876 }
877
878 struct xfrm_state *
879 xfrm_stateonly_find(xfrm_address_t *daddr, xfrm_address_t *saddr,
880 unsigned short family, u8 mode, u8 proto, u32 reqid)
881 {
882 unsigned int h;
883 struct xfrm_state *rx = NULL, *x = NULL;
884 struct hlist_node *entry;
885
886 spin_lock(&xfrm_state_lock);
887 h = xfrm_dst_hash(daddr, saddr, reqid, family);
888 hlist_for_each_entry(x, entry, xfrm_state_bydst+h, bydst) {
889 if (x->props.family == family &&
890 x->props.reqid == reqid &&
891 !(x->props.flags & XFRM_STATE_WILDRECV) &&
892 xfrm_state_addr_check(x, daddr, saddr, family) &&
893 mode == x->props.mode &&
894 proto == x->id.proto &&
895 x->km.state == XFRM_STATE_VALID) {
896 rx = x;
897 break;
898 }
899 }
900
901 if (rx)
902 xfrm_state_hold(rx);
903 spin_unlock(&xfrm_state_lock);
904
905
906 return rx;
907 }
908 EXPORT_SYMBOL(xfrm_stateonly_find);
909
910 static void __xfrm_state_insert(struct xfrm_state *x)
911 {
912 unsigned int h;
913
914 x->genid = ++xfrm_state_genid;
915
916 h = xfrm_dst_hash(&x->id.daddr, &x->props.saddr,
917 x->props.reqid, x->props.family);
918 hlist_add_head(&x->bydst, xfrm_state_bydst+h);
919
920 h = xfrm_src_hash(&x->id.daddr, &x->props.saddr, x->props.family);
921 hlist_add_head(&x->bysrc, xfrm_state_bysrc+h);
922
923 if (x->id.spi) {
924 h = xfrm_spi_hash(&x->id.daddr, x->id.spi, x->id.proto,
925 x->props.family);
926
927 hlist_add_head(&x->byspi, xfrm_state_byspi+h);
928 }
929
930 mod_timer(&x->timer, jiffies + HZ);
931 if (x->replay_maxage)
932 mod_timer(&x->rtimer, jiffies + x->replay_maxage);
933
934 wake_up(&km_waitq);
935
936 xfrm_state_num++;
937
938 xfrm_hash_grow_check(x->bydst.next != NULL);
939 }
940
941 /* xfrm_state_lock is held */
942 static void __xfrm_state_bump_genids(struct xfrm_state *xnew)
943 {
944 unsigned short family = xnew->props.family;
945 u32 reqid = xnew->props.reqid;
946 struct xfrm_state *x;
947 struct hlist_node *entry;
948 unsigned int h;
949
950 h = xfrm_dst_hash(&xnew->id.daddr, &xnew->props.saddr, reqid, family);
951 hlist_for_each_entry(x, entry, xfrm_state_bydst+h, bydst) {
952 if (x->props.family == family &&
953 x->props.reqid == reqid &&
954 !xfrm_addr_cmp(&x->id.daddr, &xnew->id.daddr, family) &&
955 !xfrm_addr_cmp(&x->props.saddr, &xnew->props.saddr, family))
956 x->genid = xfrm_state_genid;
957 }
958 }
959
960 void xfrm_state_insert(struct xfrm_state *x)
961 {
962 spin_lock_bh(&xfrm_state_lock);
963 __xfrm_state_bump_genids(x);
964 __xfrm_state_insert(x);
965 spin_unlock_bh(&xfrm_state_lock);
966 }
967 EXPORT_SYMBOL(xfrm_state_insert);
968
969 /* xfrm_state_lock is held */
970 static struct xfrm_state *__find_acq_core(unsigned short family, u8 mode, u32 reqid, u8 proto, xfrm_address_t *daddr, xfrm_address_t *saddr, int create)
971 {
972 unsigned int h = xfrm_dst_hash(daddr, saddr, reqid, family);
973 struct hlist_node *entry;
974 struct xfrm_state *x;
975
976 hlist_for_each_entry(x, entry, xfrm_state_bydst+h, bydst) {
977 if (x->props.reqid != reqid ||
978 x->props.mode != mode ||
979 x->props.family != family ||
980 x->km.state != XFRM_STATE_ACQ ||
981 x->id.spi != 0 ||
982 x->id.proto != proto)
983 continue;
984
985 switch (family) {
986 case AF_INET:
987 if (x->id.daddr.a4 != daddr->a4 ||
988 x->props.saddr.a4 != saddr->a4)
989 continue;
990 break;
991 case AF_INET6:
992 if (!ipv6_addr_equal((struct in6_addr *)x->id.daddr.a6,
993 (struct in6_addr *)daddr) ||
994 !ipv6_addr_equal((struct in6_addr *)
995 x->props.saddr.a6,
996 (struct in6_addr *)saddr))
997 continue;
998 break;
999 }
1000
1001 xfrm_state_hold(x);
1002 return x;
1003 }
1004
1005 if (!create)
1006 return NULL;
1007
1008 x = xfrm_state_alloc();
1009 if (likely(x)) {
1010 switch (family) {
1011 case AF_INET:
1012 x->sel.daddr.a4 = daddr->a4;
1013 x->sel.saddr.a4 = saddr->a4;
1014 x->sel.prefixlen_d = 32;
1015 x->sel.prefixlen_s = 32;
1016 x->props.saddr.a4 = saddr->a4;
1017 x->id.daddr.a4 = daddr->a4;
1018 break;
1019
1020 case AF_INET6:
1021 ipv6_addr_copy((struct in6_addr *)x->sel.daddr.a6,
1022 (struct in6_addr *)daddr);
1023 ipv6_addr_copy((struct in6_addr *)x->sel.saddr.a6,
1024 (struct in6_addr *)saddr);
1025 x->sel.prefixlen_d = 128;
1026 x->sel.prefixlen_s = 128;
1027 ipv6_addr_copy((struct in6_addr *)x->props.saddr.a6,
1028 (struct in6_addr *)saddr);
1029 ipv6_addr_copy((struct in6_addr *)x->id.daddr.a6,
1030 (struct in6_addr *)daddr);
1031 break;
1032 }
1033
1034 x->km.state = XFRM_STATE_ACQ;
1035 x->id.proto = proto;
1036 x->props.family = family;
1037 x->props.mode = mode;
1038 x->props.reqid = reqid;
1039 x->lft.hard_add_expires_seconds = sysctl_xfrm_acq_expires;
1040 xfrm_state_hold(x);
1041 x->timer.expires = jiffies + sysctl_xfrm_acq_expires*HZ;
1042 add_timer(&x->timer);
1043 hlist_add_head(&x->bydst, xfrm_state_bydst+h);
1044 h = xfrm_src_hash(daddr, saddr, family);
1045 hlist_add_head(&x->bysrc, xfrm_state_bysrc+h);
1046
1047 xfrm_state_num++;
1048
1049 xfrm_hash_grow_check(x->bydst.next != NULL);
1050 }
1051
1052 return x;
1053 }
1054
1055 static struct xfrm_state *__xfrm_find_acq_byseq(u32 seq);
1056
1057 int xfrm_state_add(struct xfrm_state *x)
1058 {
1059 struct xfrm_state *x1;
1060 int family;
1061 int err;
1062 int use_spi = xfrm_id_proto_match(x->id.proto, IPSEC_PROTO_ANY);
1063
1064 family = x->props.family;
1065
1066 spin_lock_bh(&xfrm_state_lock);
1067
1068 x1 = __xfrm_state_locate(x, use_spi, family);
1069 if (x1) {
1070 xfrm_state_put(x1);
1071 x1 = NULL;
1072 err = -EEXIST;
1073 goto out;
1074 }
1075
1076 if (use_spi && x->km.seq) {
1077 x1 = __xfrm_find_acq_byseq(x->km.seq);
1078 if (x1 && ((x1->id.proto != x->id.proto) ||
1079 xfrm_addr_cmp(&x1->id.daddr, &x->id.daddr, family))) {
1080 xfrm_state_put(x1);
1081 x1 = NULL;
1082 }
1083 }
1084
1085 if (use_spi && !x1)
1086 x1 = __find_acq_core(family, x->props.mode, x->props.reqid,
1087 x->id.proto,
1088 &x->id.daddr, &x->props.saddr, 0);
1089
1090 __xfrm_state_bump_genids(x);
1091 __xfrm_state_insert(x);
1092 err = 0;
1093
1094 out:
1095 spin_unlock_bh(&xfrm_state_lock);
1096
1097 if (x1) {
1098 xfrm_state_delete(x1);
1099 xfrm_state_put(x1);
1100 }
1101
1102 return err;
1103 }
1104 EXPORT_SYMBOL(xfrm_state_add);
1105
1106 #ifdef CONFIG_XFRM_MIGRATE
1107 static struct xfrm_state *xfrm_state_clone(struct xfrm_state *orig, int *errp)
1108 {
1109 int err = -ENOMEM;
1110 struct xfrm_state *x = xfrm_state_alloc();
1111 if (!x)
1112 goto error;
1113
1114 memcpy(&x->id, &orig->id, sizeof(x->id));
1115 memcpy(&x->sel, &orig->sel, sizeof(x->sel));
1116 memcpy(&x->lft, &orig->lft, sizeof(x->lft));
1117 x->props.mode = orig->props.mode;
1118 x->props.replay_window = orig->props.replay_window;
1119 x->props.reqid = orig->props.reqid;
1120 x->props.family = orig->props.family;
1121 x->props.saddr = orig->props.saddr;
1122
1123 if (orig->aalg) {
1124 x->aalg = xfrm_algo_clone(orig->aalg);
1125 if (!x->aalg)
1126 goto error;
1127 }
1128 x->props.aalgo = orig->props.aalgo;
1129
1130 if (orig->ealg) {
1131 x->ealg = xfrm_algo_clone(orig->ealg);
1132 if (!x->ealg)
1133 goto error;
1134 }
1135 x->props.ealgo = orig->props.ealgo;
1136
1137 if (orig->calg) {
1138 x->calg = xfrm_algo_clone(orig->calg);
1139 if (!x->calg)
1140 goto error;
1141 }
1142 x->props.calgo = orig->props.calgo;
1143
1144 if (orig->encap) {
1145 x->encap = kmemdup(orig->encap, sizeof(*x->encap), GFP_KERNEL);
1146 if (!x->encap)
1147 goto error;
1148 }
1149
1150 if (orig->coaddr) {
1151 x->coaddr = kmemdup(orig->coaddr, sizeof(*x->coaddr),
1152 GFP_KERNEL);
1153 if (!x->coaddr)
1154 goto error;
1155 }
1156
1157 err = xfrm_init_state(x);
1158 if (err)
1159 goto error;
1160
1161 x->props.flags = orig->props.flags;
1162
1163 x->curlft.add_time = orig->curlft.add_time;
1164 x->km.state = orig->km.state;
1165 x->km.seq = orig->km.seq;
1166
1167 return x;
1168
1169 error:
1170 if (errp)
1171 *errp = err;
1172 if (x) {
1173 kfree(x->aalg);
1174 kfree(x->ealg);
1175 kfree(x->calg);
1176 kfree(x->encap);
1177 kfree(x->coaddr);
1178 }
1179 kfree(x);
1180 return NULL;
1181 }
1182
1183 /* xfrm_state_lock is held */
1184 struct xfrm_state * xfrm_migrate_state_find(struct xfrm_migrate *m)
1185 {
1186 unsigned int h;
1187 struct xfrm_state *x;
1188 struct hlist_node *entry;
1189
1190 if (m->reqid) {
1191 h = xfrm_dst_hash(&m->old_daddr, &m->old_saddr,
1192 m->reqid, m->old_family);
1193 hlist_for_each_entry(x, entry, xfrm_state_bydst+h, bydst) {
1194 if (x->props.mode != m->mode ||
1195 x->id.proto != m->proto)
1196 continue;
1197 if (m->reqid && x->props.reqid != m->reqid)
1198 continue;
1199 if (xfrm_addr_cmp(&x->id.daddr, &m->old_daddr,
1200 m->old_family) ||
1201 xfrm_addr_cmp(&x->props.saddr, &m->old_saddr,
1202 m->old_family))
1203 continue;
1204 xfrm_state_hold(x);
1205 return x;
1206 }
1207 } else {
1208 h = xfrm_src_hash(&m->old_daddr, &m->old_saddr,
1209 m->old_family);
1210 hlist_for_each_entry(x, entry, xfrm_state_bysrc+h, bysrc) {
1211 if (x->props.mode != m->mode ||
1212 x->id.proto != m->proto)
1213 continue;
1214 if (xfrm_addr_cmp(&x->id.daddr, &m->old_daddr,
1215 m->old_family) ||
1216 xfrm_addr_cmp(&x->props.saddr, &m->old_saddr,
1217 m->old_family))
1218 continue;
1219 xfrm_state_hold(x);
1220 return x;
1221 }
1222 }
1223
1224 return NULL;
1225 }
1226 EXPORT_SYMBOL(xfrm_migrate_state_find);
1227
1228 struct xfrm_state * xfrm_state_migrate(struct xfrm_state *x,
1229 struct xfrm_migrate *m)
1230 {
1231 struct xfrm_state *xc;
1232 int err;
1233
1234 xc = xfrm_state_clone(x, &err);
1235 if (!xc)
1236 return NULL;
1237
1238 memcpy(&xc->id.daddr, &m->new_daddr, sizeof(xc->id.daddr));
1239 memcpy(&xc->props.saddr, &m->new_saddr, sizeof(xc->props.saddr));
1240
1241 /* add state */
1242 if (!xfrm_addr_cmp(&x->id.daddr, &m->new_daddr, m->new_family)) {
1243 /* a care is needed when the destination address of the
1244 state is to be updated as it is a part of triplet */
1245 xfrm_state_insert(xc);
1246 } else {
1247 if ((err = xfrm_state_add(xc)) < 0)
1248 goto error;
1249 }
1250
1251 return xc;
1252 error:
1253 kfree(xc);
1254 return NULL;
1255 }
1256 EXPORT_SYMBOL(xfrm_state_migrate);
1257 #endif
1258
1259 int xfrm_state_update(struct xfrm_state *x)
1260 {
1261 struct xfrm_state *x1;
1262 int err;
1263 int use_spi = xfrm_id_proto_match(x->id.proto, IPSEC_PROTO_ANY);
1264
1265 spin_lock_bh(&xfrm_state_lock);
1266 x1 = __xfrm_state_locate(x, use_spi, x->props.family);
1267
1268 err = -ESRCH;
1269 if (!x1)
1270 goto out;
1271
1272 if (xfrm_state_kern(x1)) {
1273 xfrm_state_put(x1);
1274 err = -EEXIST;
1275 goto out;
1276 }
1277
1278 if (x1->km.state == XFRM_STATE_ACQ) {
1279 __xfrm_state_insert(x);
1280 x = NULL;
1281 }
1282 err = 0;
1283
1284 out:
1285 spin_unlock_bh(&xfrm_state_lock);
1286
1287 if (err)
1288 return err;
1289
1290 if (!x) {
1291 xfrm_state_delete(x1);
1292 xfrm_state_put(x1);
1293 return 0;
1294 }
1295
1296 err = -EINVAL;
1297 spin_lock_bh(&x1->lock);
1298 if (likely(x1->km.state == XFRM_STATE_VALID)) {
1299 if (x->encap && x1->encap)
1300 memcpy(x1->encap, x->encap, sizeof(*x1->encap));
1301 if (x->coaddr && x1->coaddr) {
1302 memcpy(x1->coaddr, x->coaddr, sizeof(*x1->coaddr));
1303 }
1304 if (!use_spi && memcmp(&x1->sel, &x->sel, sizeof(x1->sel)))
1305 memcpy(&x1->sel, &x->sel, sizeof(x1->sel));
1306 memcpy(&x1->lft, &x->lft, sizeof(x1->lft));
1307 x1->km.dying = 0;
1308
1309 mod_timer(&x1->timer, jiffies + HZ);
1310 if (x1->curlft.use_time)
1311 xfrm_state_check_expire(x1);
1312
1313 err = 0;
1314 }
1315 spin_unlock_bh(&x1->lock);
1316
1317 xfrm_state_put(x1);
1318
1319 return err;
1320 }
1321 EXPORT_SYMBOL(xfrm_state_update);
1322
1323 int xfrm_state_check_expire(struct xfrm_state *x)
1324 {
1325 if (!x->curlft.use_time)
1326 x->curlft.use_time = get_seconds();
1327
1328 if (x->km.state != XFRM_STATE_VALID)
1329 return -EINVAL;
1330
1331 if (x->curlft.bytes >= x->lft.hard_byte_limit ||
1332 x->curlft.packets >= x->lft.hard_packet_limit) {
1333 x->km.state = XFRM_STATE_EXPIRED;
1334 mod_timer(&x->timer, jiffies);
1335 return -EINVAL;
1336 }
1337
1338 if (!x->km.dying &&
1339 (x->curlft.bytes >= x->lft.soft_byte_limit ||
1340 x->curlft.packets >= x->lft.soft_packet_limit)) {
1341 x->km.dying = 1;
1342 km_state_expired(x, 0, 0);
1343 }
1344 return 0;
1345 }
1346 EXPORT_SYMBOL(xfrm_state_check_expire);
1347
1348 struct xfrm_state *
1349 xfrm_state_lookup(xfrm_address_t *daddr, __be32 spi, u8 proto,
1350 unsigned short family)
1351 {
1352 struct xfrm_state *x;
1353
1354 spin_lock_bh(&xfrm_state_lock);
1355 x = __xfrm_state_lookup(daddr, spi, proto, family);
1356 spin_unlock_bh(&xfrm_state_lock);
1357 return x;
1358 }
1359 EXPORT_SYMBOL(xfrm_state_lookup);
1360
1361 struct xfrm_state *
1362 xfrm_state_lookup_byaddr(xfrm_address_t *daddr, xfrm_address_t *saddr,
1363 u8 proto, unsigned short family)
1364 {
1365 struct xfrm_state *x;
1366
1367 spin_lock_bh(&xfrm_state_lock);
1368 x = __xfrm_state_lookup_byaddr(daddr, saddr, proto, family);
1369 spin_unlock_bh(&xfrm_state_lock);
1370 return x;
1371 }
1372 EXPORT_SYMBOL(xfrm_state_lookup_byaddr);
1373
1374 struct xfrm_state *
1375 xfrm_find_acq(u8 mode, u32 reqid, u8 proto,
1376 xfrm_address_t *daddr, xfrm_address_t *saddr,
1377 int create, unsigned short family)
1378 {
1379 struct xfrm_state *x;
1380
1381 spin_lock_bh(&xfrm_state_lock);
1382 x = __find_acq_core(family, mode, reqid, proto, daddr, saddr, create);
1383 spin_unlock_bh(&xfrm_state_lock);
1384
1385 return x;
1386 }
1387 EXPORT_SYMBOL(xfrm_find_acq);
1388
1389 #ifdef CONFIG_XFRM_SUB_POLICY
1390 int
1391 xfrm_tmpl_sort(struct xfrm_tmpl **dst, struct xfrm_tmpl **src, int n,
1392 unsigned short family)
1393 {
1394 int err = 0;
1395 struct xfrm_state_afinfo *afinfo = xfrm_state_get_afinfo(family);
1396 if (!afinfo)
1397 return -EAFNOSUPPORT;
1398
1399 spin_lock_bh(&xfrm_state_lock);
1400 if (afinfo->tmpl_sort)
1401 err = afinfo->tmpl_sort(dst, src, n);
1402 spin_unlock_bh(&xfrm_state_lock);
1403 xfrm_state_put_afinfo(afinfo);
1404 return err;
1405 }
1406 EXPORT_SYMBOL(xfrm_tmpl_sort);
1407
1408 int
1409 xfrm_state_sort(struct xfrm_state **dst, struct xfrm_state **src, int n,
1410 unsigned short family)
1411 {
1412 int err = 0;
1413 struct xfrm_state_afinfo *afinfo = xfrm_state_get_afinfo(family);
1414 if (!afinfo)
1415 return -EAFNOSUPPORT;
1416
1417 spin_lock_bh(&xfrm_state_lock);
1418 if (afinfo->state_sort)
1419 err = afinfo->state_sort(dst, src, n);
1420 spin_unlock_bh(&xfrm_state_lock);
1421 xfrm_state_put_afinfo(afinfo);
1422 return err;
1423 }
1424 EXPORT_SYMBOL(xfrm_state_sort);
1425 #endif
1426
1427 /* Silly enough, but I'm lazy to build resolution list */
1428
1429 static struct xfrm_state *__xfrm_find_acq_byseq(u32 seq)
1430 {
1431 int i;
1432
1433 for (i = 0; i <= xfrm_state_hmask; i++) {
1434 struct hlist_node *entry;
1435 struct xfrm_state *x;
1436
1437 hlist_for_each_entry(x, entry, xfrm_state_bydst+i, bydst) {
1438 if (x->km.seq == seq &&
1439 x->km.state == XFRM_STATE_ACQ) {
1440 xfrm_state_hold(x);
1441 return x;
1442 }
1443 }
1444 }
1445 return NULL;
1446 }
1447
1448 struct xfrm_state *xfrm_find_acq_byseq(u32 seq)
1449 {
1450 struct xfrm_state *x;
1451
1452 spin_lock_bh(&xfrm_state_lock);
1453 x = __xfrm_find_acq_byseq(seq);
1454 spin_unlock_bh(&xfrm_state_lock);
1455 return x;
1456 }
1457 EXPORT_SYMBOL(xfrm_find_acq_byseq);
1458
1459 u32 xfrm_get_acqseq(void)
1460 {
1461 u32 res;
1462 static u32 acqseq;
1463 static DEFINE_SPINLOCK(acqseq_lock);
1464
1465 spin_lock_bh(&acqseq_lock);
1466 res = (++acqseq ? : ++acqseq);
1467 spin_unlock_bh(&acqseq_lock);
1468 return res;
1469 }
1470 EXPORT_SYMBOL(xfrm_get_acqseq);
1471
1472 int xfrm_alloc_spi(struct xfrm_state *x, u32 low, u32 high)
1473 {
1474 unsigned int h;
1475 struct xfrm_state *x0;
1476 int err = -ENOENT;
1477 __be32 minspi = htonl(low);
1478 __be32 maxspi = htonl(high);
1479
1480 spin_lock_bh(&x->lock);
1481 if (x->km.state == XFRM_STATE_DEAD)
1482 goto unlock;
1483
1484 err = 0;
1485 if (x->id.spi)
1486 goto unlock;
1487
1488 err = -ENOENT;
1489
1490 if (minspi == maxspi) {
1491 x0 = xfrm_state_lookup(&x->id.daddr, minspi, x->id.proto, x->props.family);
1492 if (x0) {
1493 xfrm_state_put(x0);
1494 goto unlock;
1495 }
1496 x->id.spi = minspi;
1497 } else {
1498 u32 spi = 0;
1499 for (h=0; h<high-low+1; h++) {
1500 spi = low + net_random()%(high-low+1);
1501 x0 = xfrm_state_lookup(&x->id.daddr, htonl(spi), x->id.proto, x->props.family);
1502 if (x0 == NULL) {
1503 x->id.spi = htonl(spi);
1504 break;
1505 }
1506 xfrm_state_put(x0);
1507 }
1508 }
1509 if (x->id.spi) {
1510 spin_lock_bh(&xfrm_state_lock);
1511 h = xfrm_spi_hash(&x->id.daddr, x->id.spi, x->id.proto, x->props.family);
1512 hlist_add_head(&x->byspi, xfrm_state_byspi+h);
1513 spin_unlock_bh(&xfrm_state_lock);
1514
1515 err = 0;
1516 }
1517
1518 unlock:
1519 spin_unlock_bh(&x->lock);
1520
1521 return err;
1522 }
1523 EXPORT_SYMBOL(xfrm_alloc_spi);
1524
1525 int xfrm_state_walk(u8 proto, int (*func)(struct xfrm_state *, int, void*),
1526 void *data)
1527 {
1528 int i;
1529 struct xfrm_state *x, *last = NULL;
1530 struct hlist_node *entry;
1531 int count = 0;
1532 int err = 0;
1533
1534 spin_lock_bh(&xfrm_state_lock);
1535 for (i = 0; i <= xfrm_state_hmask; i++) {
1536 hlist_for_each_entry(x, entry, xfrm_state_bydst+i, bydst) {
1537 if (!xfrm_id_proto_match(x->id.proto, proto))
1538 continue;
1539 if (last) {
1540 err = func(last, count, data);
1541 if (err)
1542 goto out;
1543 }
1544 last = x;
1545 count++;
1546 }
1547 }
1548 if (count == 0) {
1549 err = -ENOENT;
1550 goto out;
1551 }
1552 err = func(last, 0, data);
1553 out:
1554 spin_unlock_bh(&xfrm_state_lock);
1555 return err;
1556 }
1557 EXPORT_SYMBOL(xfrm_state_walk);
1558
1559
1560 void xfrm_replay_notify(struct xfrm_state *x, int event)
1561 {
1562 struct km_event c;
1563 /* we send notify messages in case
1564 * 1. we updated on of the sequence numbers, and the seqno difference
1565 * is at least x->replay_maxdiff, in this case we also update the
1566 * timeout of our timer function
1567 * 2. if x->replay_maxage has elapsed since last update,
1568 * and there were changes
1569 *
1570 * The state structure must be locked!
1571 */
1572
1573 switch (event) {
1574 case XFRM_REPLAY_UPDATE:
1575 if (x->replay_maxdiff &&
1576 (x->replay.seq - x->preplay.seq < x->replay_maxdiff) &&
1577 (x->replay.oseq - x->preplay.oseq < x->replay_maxdiff)) {
1578 if (x->xflags & XFRM_TIME_DEFER)
1579 event = XFRM_REPLAY_TIMEOUT;
1580 else
1581 return;
1582 }
1583
1584 break;
1585
1586 case XFRM_REPLAY_TIMEOUT:
1587 if ((x->replay.seq == x->preplay.seq) &&
1588 (x->replay.bitmap == x->preplay.bitmap) &&
1589 (x->replay.oseq == x->preplay.oseq)) {
1590 x->xflags |= XFRM_TIME_DEFER;
1591 return;
1592 }
1593
1594 break;
1595 }
1596
1597 memcpy(&x->preplay, &x->replay, sizeof(struct xfrm_replay_state));
1598 c.event = XFRM_MSG_NEWAE;
1599 c.data.aevent = event;
1600 km_state_notify(x, &c);
1601
1602 if (x->replay_maxage &&
1603 !mod_timer(&x->rtimer, jiffies + x->replay_maxage))
1604 x->xflags &= ~XFRM_TIME_DEFER;
1605 }
1606
1607 static void xfrm_replay_timer_handler(unsigned long data)
1608 {
1609 struct xfrm_state *x = (struct xfrm_state*)data;
1610
1611 spin_lock(&x->lock);
1612
1613 if (x->km.state == XFRM_STATE_VALID) {
1614 if (xfrm_aevent_is_on())
1615 xfrm_replay_notify(x, XFRM_REPLAY_TIMEOUT);
1616 else
1617 x->xflags |= XFRM_TIME_DEFER;
1618 }
1619
1620 spin_unlock(&x->lock);
1621 }
1622
1623 int xfrm_replay_check(struct xfrm_state *x,
1624 struct sk_buff *skb, __be32 net_seq)
1625 {
1626 u32 diff;
1627 u32 seq = ntohl(net_seq);
1628
1629 if (unlikely(seq == 0))
1630 goto err;
1631
1632 if (likely(seq > x->replay.seq))
1633 return 0;
1634
1635 diff = x->replay.seq - seq;
1636 if (diff >= min_t(unsigned int, x->props.replay_window,
1637 sizeof(x->replay.bitmap) * 8)) {
1638 x->stats.replay_window++;
1639 goto err;
1640 }
1641
1642 if (x->replay.bitmap & (1U << diff)) {
1643 x->stats.replay++;
1644 goto err;
1645 }
1646 return 0;
1647
1648 err:
1649 xfrm_audit_state_replay(x, skb, net_seq);
1650 return -EINVAL;
1651 }
1652
1653 void xfrm_replay_advance(struct xfrm_state *x, __be32 net_seq)
1654 {
1655 u32 diff;
1656 u32 seq = ntohl(net_seq);
1657
1658 if (seq > x->replay.seq) {
1659 diff = seq - x->replay.seq;
1660 if (diff < x->props.replay_window)
1661 x->replay.bitmap = ((x->replay.bitmap) << diff) | 1;
1662 else
1663 x->replay.bitmap = 1;
1664 x->replay.seq = seq;
1665 } else {
1666 diff = x->replay.seq - seq;
1667 x->replay.bitmap |= (1U << diff);
1668 }
1669
1670 if (xfrm_aevent_is_on())
1671 xfrm_replay_notify(x, XFRM_REPLAY_UPDATE);
1672 }
1673
1674 static LIST_HEAD(xfrm_km_list);
1675 static DEFINE_RWLOCK(xfrm_km_lock);
1676
1677 void km_policy_notify(struct xfrm_policy *xp, int dir, struct km_event *c)
1678 {
1679 struct xfrm_mgr *km;
1680
1681 read_lock(&xfrm_km_lock);
1682 list_for_each_entry(km, &xfrm_km_list, list)
1683 if (km->notify_policy)
1684 km->notify_policy(xp, dir, c);
1685 read_unlock(&xfrm_km_lock);
1686 }
1687
1688 void km_state_notify(struct xfrm_state *x, struct km_event *c)
1689 {
1690 struct xfrm_mgr *km;
1691 read_lock(&xfrm_km_lock);
1692 list_for_each_entry(km, &xfrm_km_list, list)
1693 if (km->notify)
1694 km->notify(x, c);
1695 read_unlock(&xfrm_km_lock);
1696 }
1697
1698 EXPORT_SYMBOL(km_policy_notify);
1699 EXPORT_SYMBOL(km_state_notify);
1700
1701 void km_state_expired(struct xfrm_state *x, int hard, u32 pid)
1702 {
1703 struct km_event c;
1704
1705 c.data.hard = hard;
1706 c.pid = pid;
1707 c.event = XFRM_MSG_EXPIRE;
1708 km_state_notify(x, &c);
1709
1710 if (hard)
1711 wake_up(&km_waitq);
1712 }
1713
1714 EXPORT_SYMBOL(km_state_expired);
1715 /*
1716 * We send to all registered managers regardless of failure
1717 * We are happy with one success
1718 */
1719 int km_query(struct xfrm_state *x, struct xfrm_tmpl *t, struct xfrm_policy *pol)
1720 {
1721 int err = -EINVAL, acqret;
1722 struct xfrm_mgr *km;
1723
1724 read_lock(&xfrm_km_lock);
1725 list_for_each_entry(km, &xfrm_km_list, list) {
1726 acqret = km->acquire(x, t, pol, XFRM_POLICY_OUT);
1727 if (!acqret)
1728 err = acqret;
1729 }
1730 read_unlock(&xfrm_km_lock);
1731 return err;
1732 }
1733 EXPORT_SYMBOL(km_query);
1734
1735 int km_new_mapping(struct xfrm_state *x, xfrm_address_t *ipaddr, __be16 sport)
1736 {
1737 int err = -EINVAL;
1738 struct xfrm_mgr *km;
1739
1740 read_lock(&xfrm_km_lock);
1741 list_for_each_entry(km, &xfrm_km_list, list) {
1742 if (km->new_mapping)
1743 err = km->new_mapping(x, ipaddr, sport);
1744 if (!err)
1745 break;
1746 }
1747 read_unlock(&xfrm_km_lock);
1748 return err;
1749 }
1750 EXPORT_SYMBOL(km_new_mapping);
1751
1752 void km_policy_expired(struct xfrm_policy *pol, int dir, int hard, u32 pid)
1753 {
1754 struct km_event c;
1755
1756 c.data.hard = hard;
1757 c.pid = pid;
1758 c.event = XFRM_MSG_POLEXPIRE;
1759 km_policy_notify(pol, dir, &c);
1760
1761 if (hard)
1762 wake_up(&km_waitq);
1763 }
1764 EXPORT_SYMBOL(km_policy_expired);
1765
1766 #ifdef CONFIG_XFRM_MIGRATE
1767 int km_migrate(struct xfrm_selector *sel, u8 dir, u8 type,
1768 struct xfrm_migrate *m, int num_migrate)
1769 {
1770 int err = -EINVAL;
1771 int ret;
1772 struct xfrm_mgr *km;
1773
1774 read_lock(&xfrm_km_lock);
1775 list_for_each_entry(km, &xfrm_km_list, list) {
1776 if (km->migrate) {
1777 ret = km->migrate(sel, dir, type, m, num_migrate);
1778 if (!ret)
1779 err = ret;
1780 }
1781 }
1782 read_unlock(&xfrm_km_lock);
1783 return err;
1784 }
1785 EXPORT_SYMBOL(km_migrate);
1786 #endif
1787
1788 int km_report(u8 proto, struct xfrm_selector *sel, xfrm_address_t *addr)
1789 {
1790 int err = -EINVAL;
1791 int ret;
1792 struct xfrm_mgr *km;
1793
1794 read_lock(&xfrm_km_lock);
1795 list_for_each_entry(km, &xfrm_km_list, list) {
1796 if (km->report) {
1797 ret = km->report(proto, sel, addr);
1798 if (!ret)
1799 err = ret;
1800 }
1801 }
1802 read_unlock(&xfrm_km_lock);
1803 return err;
1804 }
1805 EXPORT_SYMBOL(km_report);
1806
1807 int xfrm_user_policy(struct sock *sk, int optname, u8 __user *optval, int optlen)
1808 {
1809 int err;
1810 u8 *data;
1811 struct xfrm_mgr *km;
1812 struct xfrm_policy *pol = NULL;
1813
1814 if (optlen <= 0 || optlen > PAGE_SIZE)
1815 return -EMSGSIZE;
1816
1817 data = kmalloc(optlen, GFP_KERNEL);
1818 if (!data)
1819 return -ENOMEM;
1820
1821 err = -EFAULT;
1822 if (copy_from_user(data, optval, optlen))
1823 goto out;
1824
1825 err = -EINVAL;
1826 read_lock(&xfrm_km_lock);
1827 list_for_each_entry(km, &xfrm_km_list, list) {
1828 pol = km->compile_policy(sk, optname, data,
1829 optlen, &err);
1830 if (err >= 0)
1831 break;
1832 }
1833 read_unlock(&xfrm_km_lock);
1834
1835 if (err >= 0) {
1836 xfrm_sk_policy_insert(sk, err, pol);
1837 xfrm_pol_put(pol);
1838 err = 0;
1839 }
1840
1841 out:
1842 kfree(data);
1843 return err;
1844 }
1845 EXPORT_SYMBOL(xfrm_user_policy);
1846
1847 int xfrm_register_km(struct xfrm_mgr *km)
1848 {
1849 write_lock_bh(&xfrm_km_lock);
1850 list_add_tail(&km->list, &xfrm_km_list);
1851 write_unlock_bh(&xfrm_km_lock);
1852 return 0;
1853 }
1854 EXPORT_SYMBOL(xfrm_register_km);
1855
1856 int xfrm_unregister_km(struct xfrm_mgr *km)
1857 {
1858 write_lock_bh(&xfrm_km_lock);
1859 list_del(&km->list);
1860 write_unlock_bh(&xfrm_km_lock);
1861 return 0;
1862 }
1863 EXPORT_SYMBOL(xfrm_unregister_km);
1864
1865 int xfrm_state_register_afinfo(struct xfrm_state_afinfo *afinfo)
1866 {
1867 int err = 0;
1868 if (unlikely(afinfo == NULL))
1869 return -EINVAL;
1870 if (unlikely(afinfo->family >= NPROTO))
1871 return -EAFNOSUPPORT;
1872 write_lock_bh(&xfrm_state_afinfo_lock);
1873 if (unlikely(xfrm_state_afinfo[afinfo->family] != NULL))
1874 err = -ENOBUFS;
1875 else
1876 xfrm_state_afinfo[afinfo->family] = afinfo;
1877 write_unlock_bh(&xfrm_state_afinfo_lock);
1878 return err;
1879 }
1880 EXPORT_SYMBOL(xfrm_state_register_afinfo);
1881
1882 int xfrm_state_unregister_afinfo(struct xfrm_state_afinfo *afinfo)
1883 {
1884 int err = 0;
1885 if (unlikely(afinfo == NULL))
1886 return -EINVAL;
1887 if (unlikely(afinfo->family >= NPROTO))
1888 return -EAFNOSUPPORT;
1889 write_lock_bh(&xfrm_state_afinfo_lock);
1890 if (likely(xfrm_state_afinfo[afinfo->family] != NULL)) {
1891 if (unlikely(xfrm_state_afinfo[afinfo->family] != afinfo))
1892 err = -EINVAL;
1893 else
1894 xfrm_state_afinfo[afinfo->family] = NULL;
1895 }
1896 write_unlock_bh(&xfrm_state_afinfo_lock);
1897 return err;
1898 }
1899 EXPORT_SYMBOL(xfrm_state_unregister_afinfo);
1900
1901 static struct xfrm_state_afinfo *xfrm_state_get_afinfo(unsigned int family)
1902 {
1903 struct xfrm_state_afinfo *afinfo;
1904 if (unlikely(family >= NPROTO))
1905 return NULL;
1906 read_lock(&xfrm_state_afinfo_lock);
1907 afinfo = xfrm_state_afinfo[family];
1908 if (unlikely(!afinfo))
1909 read_unlock(&xfrm_state_afinfo_lock);
1910 return afinfo;
1911 }
1912
1913 static void xfrm_state_put_afinfo(struct xfrm_state_afinfo *afinfo)
1914 __releases(xfrm_state_afinfo_lock)
1915 {
1916 read_unlock(&xfrm_state_afinfo_lock);
1917 }
1918
1919 /* Temporarily located here until net/xfrm/xfrm_tunnel.c is created */
1920 void xfrm_state_delete_tunnel(struct xfrm_state *x)
1921 {
1922 if (x->tunnel) {
1923 struct xfrm_state *t = x->tunnel;
1924
1925 if (atomic_read(&t->tunnel_users) == 2)
1926 xfrm_state_delete(t);
1927 atomic_dec(&t->tunnel_users);
1928 xfrm_state_put(t);
1929 x->tunnel = NULL;
1930 }
1931 }
1932 EXPORT_SYMBOL(xfrm_state_delete_tunnel);
1933
1934 int xfrm_state_mtu(struct xfrm_state *x, int mtu)
1935 {
1936 int res;
1937
1938 spin_lock_bh(&x->lock);
1939 if (x->km.state == XFRM_STATE_VALID &&
1940 x->type && x->type->get_mtu)
1941 res = x->type->get_mtu(x, mtu);
1942 else
1943 res = mtu - x->props.header_len;
1944 spin_unlock_bh(&x->lock);
1945 return res;
1946 }
1947
1948 int xfrm_init_state(struct xfrm_state *x)
1949 {
1950 struct xfrm_state_afinfo *afinfo;
1951 struct xfrm_mode *inner_mode;
1952 int family = x->props.family;
1953 int err;
1954
1955 err = -EAFNOSUPPORT;
1956 afinfo = xfrm_state_get_afinfo(family);
1957 if (!afinfo)
1958 goto error;
1959
1960 err = 0;
1961 if (afinfo->init_flags)
1962 err = afinfo->init_flags(x);
1963
1964 xfrm_state_put_afinfo(afinfo);
1965
1966 if (err)
1967 goto error;
1968
1969 err = -EPROTONOSUPPORT;
1970
1971 if (x->sel.family != AF_UNSPEC) {
1972 inner_mode = xfrm_get_mode(x->props.mode, x->sel.family);
1973 if (inner_mode == NULL)
1974 goto error;
1975
1976 if (!(inner_mode->flags & XFRM_MODE_FLAG_TUNNEL) &&
1977 family != x->sel.family) {
1978 xfrm_put_mode(inner_mode);
1979 goto error;
1980 }
1981
1982 x->inner_mode = inner_mode;
1983 } else {
1984 struct xfrm_mode *inner_mode_iaf;
1985
1986 inner_mode = xfrm_get_mode(x->props.mode, AF_INET);
1987 if (inner_mode == NULL)
1988 goto error;
1989
1990 if (!(inner_mode->flags & XFRM_MODE_FLAG_TUNNEL)) {
1991 xfrm_put_mode(inner_mode);
1992 goto error;
1993 }
1994
1995 inner_mode_iaf = xfrm_get_mode(x->props.mode, AF_INET6);
1996 if (inner_mode_iaf == NULL)
1997 goto error;
1998
1999 if (!(inner_mode_iaf->flags & XFRM_MODE_FLAG_TUNNEL)) {
2000 xfrm_put_mode(inner_mode_iaf);
2001 goto error;
2002 }
2003
2004 if (x->props.family == AF_INET) {
2005 x->inner_mode = inner_mode;
2006 x->inner_mode_iaf = inner_mode_iaf;
2007 } else {
2008 x->inner_mode = inner_mode_iaf;
2009 x->inner_mode_iaf = inner_mode;
2010 }
2011 }
2012
2013 x->type = xfrm_get_type(x->id.proto, family);
2014 if (x->type == NULL)
2015 goto error;
2016
2017 err = x->type->init_state(x);
2018 if (err)
2019 goto error;
2020
2021 x->outer_mode = xfrm_get_mode(x->props.mode, family);
2022 if (x->outer_mode == NULL)
2023 goto error;
2024
2025 x->km.state = XFRM_STATE_VALID;
2026
2027 error:
2028 return err;
2029 }
2030
2031 EXPORT_SYMBOL(xfrm_init_state);
2032
2033 void __init xfrm_state_init(void)
2034 {
2035 unsigned int sz;
2036
2037 sz = sizeof(struct hlist_head) * 8;
2038
2039 xfrm_state_bydst = xfrm_hash_alloc(sz);
2040 xfrm_state_bysrc = xfrm_hash_alloc(sz);
2041 xfrm_state_byspi = xfrm_hash_alloc(sz);
2042 if (!xfrm_state_bydst || !xfrm_state_bysrc || !xfrm_state_byspi)
2043 panic("XFRM: Cannot allocate bydst/bysrc/byspi hashes.");
2044 xfrm_state_hmask = ((sz / sizeof(struct hlist_head)) - 1);
2045
2046 INIT_WORK(&xfrm_state_gc_work, xfrm_state_gc_task);
2047 }
2048
2049 #ifdef CONFIG_AUDITSYSCALL
2050 static void xfrm_audit_helper_sainfo(struct xfrm_state *x,
2051 struct audit_buffer *audit_buf)
2052 {
2053 struct xfrm_sec_ctx *ctx = x->security;
2054 u32 spi = ntohl(x->id.spi);
2055
2056 if (ctx)
2057 audit_log_format(audit_buf, " sec_alg=%u sec_doi=%u sec_obj=%s",
2058 ctx->ctx_alg, ctx->ctx_doi, ctx->ctx_str);
2059
2060 switch(x->props.family) {
2061 case AF_INET:
2062 audit_log_format(audit_buf,
2063 " src=" NIPQUAD_FMT " dst=" NIPQUAD_FMT,
2064 NIPQUAD(x->props.saddr.a4),
2065 NIPQUAD(x->id.daddr.a4));
2066 break;
2067 case AF_INET6:
2068 audit_log_format(audit_buf,
2069 " src=" NIP6_FMT " dst=" NIP6_FMT,
2070 NIP6(*(struct in6_addr *)x->props.saddr.a6),
2071 NIP6(*(struct in6_addr *)x->id.daddr.a6));
2072 break;
2073 }
2074
2075 audit_log_format(audit_buf, " spi=%u(0x%x)", spi, spi);
2076 }
2077
2078 static void xfrm_audit_helper_pktinfo(struct sk_buff *skb, u16 family,
2079 struct audit_buffer *audit_buf)
2080 {
2081 struct iphdr *iph4;
2082 struct ipv6hdr *iph6;
2083
2084 switch (family) {
2085 case AF_INET:
2086 iph4 = ip_hdr(skb);
2087 audit_log_format(audit_buf,
2088 " src=" NIPQUAD_FMT " dst=" NIPQUAD_FMT,
2089 NIPQUAD(iph4->saddr),
2090 NIPQUAD(iph4->daddr));
2091 break;
2092 case AF_INET6:
2093 iph6 = ipv6_hdr(skb);
2094 audit_log_format(audit_buf,
2095 " src=" NIP6_FMT " dst=" NIP6_FMT
2096 " flowlbl=0x%x%x%x",
2097 NIP6(iph6->saddr),
2098 NIP6(iph6->daddr),
2099 iph6->flow_lbl[0] & 0x0f,
2100 iph6->flow_lbl[1],
2101 iph6->flow_lbl[2]);
2102 break;
2103 }
2104 }
2105
2106 void xfrm_audit_state_add(struct xfrm_state *x, int result,
2107 u32 auid, u32 secid)
2108 {
2109 struct audit_buffer *audit_buf;
2110
2111 audit_buf = xfrm_audit_start("SAD-add");
2112 if (audit_buf == NULL)
2113 return;
2114 xfrm_audit_helper_usrinfo(auid, secid, audit_buf);
2115 xfrm_audit_helper_sainfo(x, audit_buf);
2116 audit_log_format(audit_buf, " res=%u", result);
2117 audit_log_end(audit_buf);
2118 }
2119 EXPORT_SYMBOL_GPL(xfrm_audit_state_add);
2120
2121 void xfrm_audit_state_delete(struct xfrm_state *x, int result,
2122 u32 auid, u32 secid)
2123 {
2124 struct audit_buffer *audit_buf;
2125
2126 audit_buf = xfrm_audit_start("SAD-delete");
2127 if (audit_buf == NULL)
2128 return;
2129 xfrm_audit_helper_usrinfo(auid, secid, audit_buf);
2130 xfrm_audit_helper_sainfo(x, audit_buf);
2131 audit_log_format(audit_buf, " res=%u", result);
2132 audit_log_end(audit_buf);
2133 }
2134 EXPORT_SYMBOL_GPL(xfrm_audit_state_delete);
2135
2136 void xfrm_audit_state_replay_overflow(struct xfrm_state *x,
2137 struct sk_buff *skb)
2138 {
2139 struct audit_buffer *audit_buf;
2140 u32 spi;
2141
2142 audit_buf = xfrm_audit_start("SA-replay-overflow");
2143 if (audit_buf == NULL)
2144 return;
2145 xfrm_audit_helper_pktinfo(skb, x->props.family, audit_buf);
2146 /* don't record the sequence number because it's inherent in this kind
2147 * of audit message */
2148 spi = ntohl(x->id.spi);
2149 audit_log_format(audit_buf, " spi=%u(0x%x)", spi, spi);
2150 audit_log_end(audit_buf);
2151 }
2152 EXPORT_SYMBOL_GPL(xfrm_audit_state_replay_overflow);
2153
2154 static void xfrm_audit_state_replay(struct xfrm_state *x,
2155 struct sk_buff *skb, __be32 net_seq)
2156 {
2157 struct audit_buffer *audit_buf;
2158 u32 spi;
2159
2160 audit_buf = xfrm_audit_start("SA-replayed-pkt");
2161 if (audit_buf == NULL)
2162 return;
2163 xfrm_audit_helper_pktinfo(skb, x->props.family, audit_buf);
2164 spi = ntohl(x->id.spi);
2165 audit_log_format(audit_buf, " spi=%u(0x%x) seqno=%u",
2166 spi, spi, ntohl(net_seq));
2167 audit_log_end(audit_buf);
2168 }
2169
2170 void xfrm_audit_state_notfound_simple(struct sk_buff *skb, u16 family)
2171 {
2172 struct audit_buffer *audit_buf;
2173
2174 audit_buf = xfrm_audit_start("SA-notfound");
2175 if (audit_buf == NULL)
2176 return;
2177 xfrm_audit_helper_pktinfo(skb, family, audit_buf);
2178 audit_log_end(audit_buf);
2179 }
2180 EXPORT_SYMBOL_GPL(xfrm_audit_state_notfound_simple);
2181
2182 void xfrm_audit_state_notfound(struct sk_buff *skb, u16 family,
2183 __be32 net_spi, __be32 net_seq)
2184 {
2185 struct audit_buffer *audit_buf;
2186 u32 spi;
2187
2188 audit_buf = xfrm_audit_start("SA-notfound");
2189 if (audit_buf == NULL)
2190 return;
2191 xfrm_audit_helper_pktinfo(skb, family, audit_buf);
2192 spi = ntohl(net_spi);
2193 audit_log_format(audit_buf, " spi=%u(0x%x) seqno=%u",
2194 spi, spi, ntohl(net_seq));
2195 audit_log_end(audit_buf);
2196 }
2197 EXPORT_SYMBOL_GPL(xfrm_audit_state_notfound);
2198
2199 void xfrm_audit_state_icvfail(struct xfrm_state *x,
2200 struct sk_buff *skb, u8 proto)
2201 {
2202 struct audit_buffer *audit_buf;
2203 __be32 net_spi;
2204 __be32 net_seq;
2205
2206 audit_buf = xfrm_audit_start("SA-icv-failure");
2207 if (audit_buf == NULL)
2208 return;
2209 xfrm_audit_helper_pktinfo(skb, x->props.family, audit_buf);
2210 if (xfrm_parse_spi(skb, proto, &net_spi, &net_seq) == 0) {
2211 u32 spi = ntohl(net_spi);
2212 audit_log_format(audit_buf, " spi=%u(0x%x) seqno=%u",
2213 spi, spi, ntohl(net_seq));
2214 }
2215 audit_log_end(audit_buf);
2216 }
2217 EXPORT_SYMBOL_GPL(xfrm_audit_state_icvfail);
2218 #endif /* CONFIG_AUDITSYSCALL */
This page took 0.073654 seconds and 6 git commands to generate.