Merge branch 'master' of ../net-2.6/
[deliverable/linux.git] / net / xfrm / xfrm_state.c
1 /*
2 * xfrm_state.c
3 *
4 * Changes:
5 * Mitsuru KANDA @USAGI
6 * Kazunori MIYAZAWA @USAGI
7 * Kunihiro Ishiguro <kunihiro@ipinfusion.com>
8 * IPv6 support
9 * YOSHIFUJI Hideaki @USAGI
10 * Split up af-specific functions
11 * Derek Atkins <derek@ihtfp.com>
12 * Add UDP Encapsulation
13 *
14 */
15
16 #include <linux/workqueue.h>
17 #include <net/xfrm.h>
18 #include <linux/pfkeyv2.h>
19 #include <linux/ipsec.h>
20 #include <linux/module.h>
21 #include <linux/cache.h>
22 #include <linux/audit.h>
23 #include <asm/uaccess.h>
24
25 #include "xfrm_hash.h"
26
27 struct sock *xfrm_nl;
28 EXPORT_SYMBOL(xfrm_nl);
29
30 u32 sysctl_xfrm_aevent_etime __read_mostly = XFRM_AE_ETIME;
31 EXPORT_SYMBOL(sysctl_xfrm_aevent_etime);
32
33 u32 sysctl_xfrm_aevent_rseqth __read_mostly = XFRM_AE_SEQT_SIZE;
34 EXPORT_SYMBOL(sysctl_xfrm_aevent_rseqth);
35
36 u32 sysctl_xfrm_acq_expires __read_mostly = 30;
37
38 /* Each xfrm_state may be linked to two tables:
39
40 1. Hash table by (spi,daddr,ah/esp) to find SA by SPI. (input,ctl)
41 2. Hash table by (daddr,family,reqid) to find what SAs exist for given
42 destination/tunnel endpoint. (output)
43 */
44
45 static DEFINE_SPINLOCK(xfrm_state_lock);
46
47 /* Hash table to find appropriate SA towards given target (endpoint
48 * of tunnel or destination of transport mode) allowed by selector.
49 *
50 * Main use is finding SA after policy selected tunnel or transport mode.
51 * Also, it can be used by ah/esp icmp error handler to find offending SA.
52 */
53 static LIST_HEAD(xfrm_state_all);
54 static struct hlist_head *xfrm_state_bydst __read_mostly;
55 static struct hlist_head *xfrm_state_bysrc __read_mostly;
56 static struct hlist_head *xfrm_state_byspi __read_mostly;
57 static unsigned int xfrm_state_hmask __read_mostly;
58 static unsigned int xfrm_state_hashmax __read_mostly = 1 * 1024 * 1024;
59 static unsigned int xfrm_state_num;
60 static unsigned int xfrm_state_genid;
61
62 static struct xfrm_state_afinfo *xfrm_state_get_afinfo(unsigned int family);
63 static void xfrm_state_put_afinfo(struct xfrm_state_afinfo *afinfo);
64
65 #ifdef CONFIG_AUDITSYSCALL
66 static void xfrm_audit_state_replay(struct xfrm_state *x,
67 struct sk_buff *skb, __be32 net_seq);
68 #else
69 #define xfrm_audit_state_replay(x, s, sq) do { ; } while (0)
70 #endif /* CONFIG_AUDITSYSCALL */
71
72 static inline unsigned int xfrm_dst_hash(xfrm_address_t *daddr,
73 xfrm_address_t *saddr,
74 u32 reqid,
75 unsigned short family)
76 {
77 return __xfrm_dst_hash(daddr, saddr, reqid, family, xfrm_state_hmask);
78 }
79
80 static inline unsigned int xfrm_src_hash(xfrm_address_t *daddr,
81 xfrm_address_t *saddr,
82 unsigned short family)
83 {
84 return __xfrm_src_hash(daddr, saddr, family, xfrm_state_hmask);
85 }
86
87 static inline unsigned int
88 xfrm_spi_hash(xfrm_address_t *daddr, __be32 spi, u8 proto, unsigned short family)
89 {
90 return __xfrm_spi_hash(daddr, spi, proto, family, xfrm_state_hmask);
91 }
92
93 static void xfrm_hash_transfer(struct hlist_head *list,
94 struct hlist_head *ndsttable,
95 struct hlist_head *nsrctable,
96 struct hlist_head *nspitable,
97 unsigned int nhashmask)
98 {
99 struct hlist_node *entry, *tmp;
100 struct xfrm_state *x;
101
102 hlist_for_each_entry_safe(x, entry, tmp, list, bydst) {
103 unsigned int h;
104
105 h = __xfrm_dst_hash(&x->id.daddr, &x->props.saddr,
106 x->props.reqid, x->props.family,
107 nhashmask);
108 hlist_add_head(&x->bydst, ndsttable+h);
109
110 h = __xfrm_src_hash(&x->id.daddr, &x->props.saddr,
111 x->props.family,
112 nhashmask);
113 hlist_add_head(&x->bysrc, nsrctable+h);
114
115 if (x->id.spi) {
116 h = __xfrm_spi_hash(&x->id.daddr, x->id.spi,
117 x->id.proto, x->props.family,
118 nhashmask);
119 hlist_add_head(&x->byspi, nspitable+h);
120 }
121 }
122 }
123
124 static unsigned long xfrm_hash_new_size(void)
125 {
126 return ((xfrm_state_hmask + 1) << 1) *
127 sizeof(struct hlist_head);
128 }
129
130 static DEFINE_MUTEX(hash_resize_mutex);
131
132 static void xfrm_hash_resize(struct work_struct *__unused)
133 {
134 struct hlist_head *ndst, *nsrc, *nspi, *odst, *osrc, *ospi;
135 unsigned long nsize, osize;
136 unsigned int nhashmask, ohashmask;
137 int i;
138
139 mutex_lock(&hash_resize_mutex);
140
141 nsize = xfrm_hash_new_size();
142 ndst = xfrm_hash_alloc(nsize);
143 if (!ndst)
144 goto out_unlock;
145 nsrc = xfrm_hash_alloc(nsize);
146 if (!nsrc) {
147 xfrm_hash_free(ndst, nsize);
148 goto out_unlock;
149 }
150 nspi = xfrm_hash_alloc(nsize);
151 if (!nspi) {
152 xfrm_hash_free(ndst, nsize);
153 xfrm_hash_free(nsrc, nsize);
154 goto out_unlock;
155 }
156
157 spin_lock_bh(&xfrm_state_lock);
158
159 nhashmask = (nsize / sizeof(struct hlist_head)) - 1U;
160 for (i = xfrm_state_hmask; i >= 0; i--)
161 xfrm_hash_transfer(xfrm_state_bydst+i, ndst, nsrc, nspi,
162 nhashmask);
163
164 odst = xfrm_state_bydst;
165 osrc = xfrm_state_bysrc;
166 ospi = xfrm_state_byspi;
167 ohashmask = xfrm_state_hmask;
168
169 xfrm_state_bydst = ndst;
170 xfrm_state_bysrc = nsrc;
171 xfrm_state_byspi = nspi;
172 xfrm_state_hmask = nhashmask;
173
174 spin_unlock_bh(&xfrm_state_lock);
175
176 osize = (ohashmask + 1) * sizeof(struct hlist_head);
177 xfrm_hash_free(odst, osize);
178 xfrm_hash_free(osrc, osize);
179 xfrm_hash_free(ospi, osize);
180
181 out_unlock:
182 mutex_unlock(&hash_resize_mutex);
183 }
184
185 static DECLARE_WORK(xfrm_hash_work, xfrm_hash_resize);
186
187 DECLARE_WAIT_QUEUE_HEAD(km_waitq);
188 EXPORT_SYMBOL(km_waitq);
189
190 static DEFINE_RWLOCK(xfrm_state_afinfo_lock);
191 static struct xfrm_state_afinfo *xfrm_state_afinfo[NPROTO];
192
193 static struct work_struct xfrm_state_gc_work;
194 static HLIST_HEAD(xfrm_state_gc_list);
195 static DEFINE_SPINLOCK(xfrm_state_gc_lock);
196
197 int __xfrm_state_delete(struct xfrm_state *x);
198
199 int km_query(struct xfrm_state *x, struct xfrm_tmpl *t, struct xfrm_policy *pol);
200 void km_state_expired(struct xfrm_state *x, int hard, u32 pid);
201
202 static struct xfrm_state_afinfo *xfrm_state_lock_afinfo(unsigned int family)
203 {
204 struct xfrm_state_afinfo *afinfo;
205 if (unlikely(family >= NPROTO))
206 return NULL;
207 write_lock_bh(&xfrm_state_afinfo_lock);
208 afinfo = xfrm_state_afinfo[family];
209 if (unlikely(!afinfo))
210 write_unlock_bh(&xfrm_state_afinfo_lock);
211 return afinfo;
212 }
213
214 static void xfrm_state_unlock_afinfo(struct xfrm_state_afinfo *afinfo)
215 __releases(xfrm_state_afinfo_lock)
216 {
217 write_unlock_bh(&xfrm_state_afinfo_lock);
218 }
219
220 int xfrm_register_type(const struct xfrm_type *type, unsigned short family)
221 {
222 struct xfrm_state_afinfo *afinfo = xfrm_state_lock_afinfo(family);
223 const struct xfrm_type **typemap;
224 int err = 0;
225
226 if (unlikely(afinfo == NULL))
227 return -EAFNOSUPPORT;
228 typemap = afinfo->type_map;
229
230 if (likely(typemap[type->proto] == NULL))
231 typemap[type->proto] = type;
232 else
233 err = -EEXIST;
234 xfrm_state_unlock_afinfo(afinfo);
235 return err;
236 }
237 EXPORT_SYMBOL(xfrm_register_type);
238
239 int xfrm_unregister_type(const struct xfrm_type *type, unsigned short family)
240 {
241 struct xfrm_state_afinfo *afinfo = xfrm_state_lock_afinfo(family);
242 const struct xfrm_type **typemap;
243 int err = 0;
244
245 if (unlikely(afinfo == NULL))
246 return -EAFNOSUPPORT;
247 typemap = afinfo->type_map;
248
249 if (unlikely(typemap[type->proto] != type))
250 err = -ENOENT;
251 else
252 typemap[type->proto] = NULL;
253 xfrm_state_unlock_afinfo(afinfo);
254 return err;
255 }
256 EXPORT_SYMBOL(xfrm_unregister_type);
257
258 static const struct xfrm_type *xfrm_get_type(u8 proto, unsigned short family)
259 {
260 struct xfrm_state_afinfo *afinfo;
261 const struct xfrm_type **typemap;
262 const struct xfrm_type *type;
263 int modload_attempted = 0;
264
265 retry:
266 afinfo = xfrm_state_get_afinfo(family);
267 if (unlikely(afinfo == NULL))
268 return NULL;
269 typemap = afinfo->type_map;
270
271 type = typemap[proto];
272 if (unlikely(type && !try_module_get(type->owner)))
273 type = NULL;
274 if (!type && !modload_attempted) {
275 xfrm_state_put_afinfo(afinfo);
276 request_module("xfrm-type-%d-%d", family, proto);
277 modload_attempted = 1;
278 goto retry;
279 }
280
281 xfrm_state_put_afinfo(afinfo);
282 return type;
283 }
284
285 static void xfrm_put_type(const struct xfrm_type *type)
286 {
287 module_put(type->owner);
288 }
289
290 int xfrm_register_mode(struct xfrm_mode *mode, int family)
291 {
292 struct xfrm_state_afinfo *afinfo;
293 struct xfrm_mode **modemap;
294 int err;
295
296 if (unlikely(mode->encap >= XFRM_MODE_MAX))
297 return -EINVAL;
298
299 afinfo = xfrm_state_lock_afinfo(family);
300 if (unlikely(afinfo == NULL))
301 return -EAFNOSUPPORT;
302
303 err = -EEXIST;
304 modemap = afinfo->mode_map;
305 if (modemap[mode->encap])
306 goto out;
307
308 err = -ENOENT;
309 if (!try_module_get(afinfo->owner))
310 goto out;
311
312 mode->afinfo = afinfo;
313 modemap[mode->encap] = mode;
314 err = 0;
315
316 out:
317 xfrm_state_unlock_afinfo(afinfo);
318 return err;
319 }
320 EXPORT_SYMBOL(xfrm_register_mode);
321
322 int xfrm_unregister_mode(struct xfrm_mode *mode, int family)
323 {
324 struct xfrm_state_afinfo *afinfo;
325 struct xfrm_mode **modemap;
326 int err;
327
328 if (unlikely(mode->encap >= XFRM_MODE_MAX))
329 return -EINVAL;
330
331 afinfo = xfrm_state_lock_afinfo(family);
332 if (unlikely(afinfo == NULL))
333 return -EAFNOSUPPORT;
334
335 err = -ENOENT;
336 modemap = afinfo->mode_map;
337 if (likely(modemap[mode->encap] == mode)) {
338 modemap[mode->encap] = NULL;
339 module_put(mode->afinfo->owner);
340 err = 0;
341 }
342
343 xfrm_state_unlock_afinfo(afinfo);
344 return err;
345 }
346 EXPORT_SYMBOL(xfrm_unregister_mode);
347
348 static struct xfrm_mode *xfrm_get_mode(unsigned int encap, int family)
349 {
350 struct xfrm_state_afinfo *afinfo;
351 struct xfrm_mode *mode;
352 int modload_attempted = 0;
353
354 if (unlikely(encap >= XFRM_MODE_MAX))
355 return NULL;
356
357 retry:
358 afinfo = xfrm_state_get_afinfo(family);
359 if (unlikely(afinfo == NULL))
360 return NULL;
361
362 mode = afinfo->mode_map[encap];
363 if (unlikely(mode && !try_module_get(mode->owner)))
364 mode = NULL;
365 if (!mode && !modload_attempted) {
366 xfrm_state_put_afinfo(afinfo);
367 request_module("xfrm-mode-%d-%d", family, encap);
368 modload_attempted = 1;
369 goto retry;
370 }
371
372 xfrm_state_put_afinfo(afinfo);
373 return mode;
374 }
375
376 static void xfrm_put_mode(struct xfrm_mode *mode)
377 {
378 module_put(mode->owner);
379 }
380
381 static void xfrm_state_gc_destroy(struct xfrm_state *x)
382 {
383 del_timer_sync(&x->timer);
384 del_timer_sync(&x->rtimer);
385 kfree(x->aalg);
386 kfree(x->ealg);
387 kfree(x->calg);
388 kfree(x->encap);
389 kfree(x->coaddr);
390 if (x->inner_mode)
391 xfrm_put_mode(x->inner_mode);
392 if (x->outer_mode)
393 xfrm_put_mode(x->outer_mode);
394 if (x->type) {
395 x->type->destructor(x);
396 xfrm_put_type(x->type);
397 }
398 security_xfrm_state_free(x);
399 kfree(x);
400 }
401
402 static void xfrm_state_gc_task(struct work_struct *data)
403 {
404 struct xfrm_state *x;
405 struct hlist_node *entry, *tmp;
406 struct hlist_head gc_list;
407
408 spin_lock_bh(&xfrm_state_gc_lock);
409 gc_list.first = xfrm_state_gc_list.first;
410 INIT_HLIST_HEAD(&xfrm_state_gc_list);
411 spin_unlock_bh(&xfrm_state_gc_lock);
412
413 hlist_for_each_entry_safe(x, entry, tmp, &gc_list, bydst)
414 xfrm_state_gc_destroy(x);
415
416 wake_up(&km_waitq);
417 }
418
419 static inline unsigned long make_jiffies(long secs)
420 {
421 if (secs >= (MAX_SCHEDULE_TIMEOUT-1)/HZ)
422 return MAX_SCHEDULE_TIMEOUT-1;
423 else
424 return secs*HZ;
425 }
426
427 static void xfrm_timer_handler(unsigned long data)
428 {
429 struct xfrm_state *x = (struct xfrm_state*)data;
430 unsigned long now = get_seconds();
431 long next = LONG_MAX;
432 int warn = 0;
433 int err = 0;
434
435 spin_lock(&x->lock);
436 if (x->km.state == XFRM_STATE_DEAD)
437 goto out;
438 if (x->km.state == XFRM_STATE_EXPIRED)
439 goto expired;
440 if (x->lft.hard_add_expires_seconds) {
441 long tmo = x->lft.hard_add_expires_seconds +
442 x->curlft.add_time - now;
443 if (tmo <= 0)
444 goto expired;
445 if (tmo < next)
446 next = tmo;
447 }
448 if (x->lft.hard_use_expires_seconds) {
449 long tmo = x->lft.hard_use_expires_seconds +
450 (x->curlft.use_time ? : now) - now;
451 if (tmo <= 0)
452 goto expired;
453 if (tmo < next)
454 next = tmo;
455 }
456 if (x->km.dying)
457 goto resched;
458 if (x->lft.soft_add_expires_seconds) {
459 long tmo = x->lft.soft_add_expires_seconds +
460 x->curlft.add_time - now;
461 if (tmo <= 0)
462 warn = 1;
463 else if (tmo < next)
464 next = tmo;
465 }
466 if (x->lft.soft_use_expires_seconds) {
467 long tmo = x->lft.soft_use_expires_seconds +
468 (x->curlft.use_time ? : now) - now;
469 if (tmo <= 0)
470 warn = 1;
471 else if (tmo < next)
472 next = tmo;
473 }
474
475 x->km.dying = warn;
476 if (warn)
477 km_state_expired(x, 0, 0);
478 resched:
479 if (next != LONG_MAX)
480 mod_timer(&x->timer, jiffies + make_jiffies(next));
481
482 goto out;
483
484 expired:
485 if (x->km.state == XFRM_STATE_ACQ && x->id.spi == 0) {
486 x->km.state = XFRM_STATE_EXPIRED;
487 wake_up(&km_waitq);
488 next = 2;
489 goto resched;
490 }
491
492 err = __xfrm_state_delete(x);
493 if (!err && x->id.spi)
494 km_state_expired(x, 1, 0);
495
496 xfrm_audit_state_delete(x, err ? 0 : 1,
497 audit_get_loginuid(current), 0);
498
499 out:
500 spin_unlock(&x->lock);
501 }
502
503 static void xfrm_replay_timer_handler(unsigned long data);
504
505 struct xfrm_state *xfrm_state_alloc(void)
506 {
507 struct xfrm_state *x;
508
509 x = kzalloc(sizeof(struct xfrm_state), GFP_ATOMIC);
510
511 if (x) {
512 atomic_set(&x->refcnt, 1);
513 atomic_set(&x->tunnel_users, 0);
514 INIT_LIST_HEAD(&x->all);
515 INIT_HLIST_NODE(&x->bydst);
516 INIT_HLIST_NODE(&x->bysrc);
517 INIT_HLIST_NODE(&x->byspi);
518 setup_timer(&x->timer, xfrm_timer_handler, (unsigned long)x);
519 setup_timer(&x->rtimer, xfrm_replay_timer_handler,
520 (unsigned long)x);
521 x->curlft.add_time = get_seconds();
522 x->lft.soft_byte_limit = XFRM_INF;
523 x->lft.soft_packet_limit = XFRM_INF;
524 x->lft.hard_byte_limit = XFRM_INF;
525 x->lft.hard_packet_limit = XFRM_INF;
526 x->replay_maxage = 0;
527 x->replay_maxdiff = 0;
528 spin_lock_init(&x->lock);
529 }
530 return x;
531 }
532 EXPORT_SYMBOL(xfrm_state_alloc);
533
534 void __xfrm_state_destroy(struct xfrm_state *x)
535 {
536 BUG_TRAP(x->km.state == XFRM_STATE_DEAD);
537
538 spin_lock_bh(&xfrm_state_lock);
539 list_del(&x->all);
540 spin_unlock_bh(&xfrm_state_lock);
541
542 spin_lock_bh(&xfrm_state_gc_lock);
543 hlist_add_head(&x->bydst, &xfrm_state_gc_list);
544 spin_unlock_bh(&xfrm_state_gc_lock);
545 schedule_work(&xfrm_state_gc_work);
546 }
547 EXPORT_SYMBOL(__xfrm_state_destroy);
548
549 int __xfrm_state_delete(struct xfrm_state *x)
550 {
551 int err = -ESRCH;
552
553 if (x->km.state != XFRM_STATE_DEAD) {
554 x->km.state = XFRM_STATE_DEAD;
555 spin_lock(&xfrm_state_lock);
556 hlist_del(&x->bydst);
557 hlist_del(&x->bysrc);
558 if (x->id.spi)
559 hlist_del(&x->byspi);
560 xfrm_state_num--;
561 spin_unlock(&xfrm_state_lock);
562
563 /* All xfrm_state objects are created by xfrm_state_alloc.
564 * The xfrm_state_alloc call gives a reference, and that
565 * is what we are dropping here.
566 */
567 xfrm_state_put(x);
568 err = 0;
569 }
570
571 return err;
572 }
573 EXPORT_SYMBOL(__xfrm_state_delete);
574
575 int xfrm_state_delete(struct xfrm_state *x)
576 {
577 int err;
578
579 spin_lock_bh(&x->lock);
580 err = __xfrm_state_delete(x);
581 spin_unlock_bh(&x->lock);
582
583 return err;
584 }
585 EXPORT_SYMBOL(xfrm_state_delete);
586
587 #ifdef CONFIG_SECURITY_NETWORK_XFRM
588 static inline int
589 xfrm_state_flush_secctx_check(u8 proto, struct xfrm_audit *audit_info)
590 {
591 int i, err = 0;
592
593 for (i = 0; i <= xfrm_state_hmask; i++) {
594 struct hlist_node *entry;
595 struct xfrm_state *x;
596
597 hlist_for_each_entry(x, entry, xfrm_state_bydst+i, bydst) {
598 if (xfrm_id_proto_match(x->id.proto, proto) &&
599 (err = security_xfrm_state_delete(x)) != 0) {
600 xfrm_audit_state_delete(x, 0,
601 audit_info->loginuid,
602 audit_info->secid);
603 return err;
604 }
605 }
606 }
607
608 return err;
609 }
610 #else
611 static inline int
612 xfrm_state_flush_secctx_check(u8 proto, struct xfrm_audit *audit_info)
613 {
614 return 0;
615 }
616 #endif
617
618 int xfrm_state_flush(u8 proto, struct xfrm_audit *audit_info)
619 {
620 int i, err = 0;
621
622 spin_lock_bh(&xfrm_state_lock);
623 err = xfrm_state_flush_secctx_check(proto, audit_info);
624 if (err)
625 goto out;
626
627 for (i = 0; i <= xfrm_state_hmask; i++) {
628 struct hlist_node *entry;
629 struct xfrm_state *x;
630 restart:
631 hlist_for_each_entry(x, entry, xfrm_state_bydst+i, bydst) {
632 if (!xfrm_state_kern(x) &&
633 xfrm_id_proto_match(x->id.proto, proto)) {
634 xfrm_state_hold(x);
635 spin_unlock_bh(&xfrm_state_lock);
636
637 err = xfrm_state_delete(x);
638 xfrm_audit_state_delete(x, err ? 0 : 1,
639 audit_info->loginuid,
640 audit_info->secid);
641 xfrm_state_put(x);
642
643 spin_lock_bh(&xfrm_state_lock);
644 goto restart;
645 }
646 }
647 }
648 err = 0;
649
650 out:
651 spin_unlock_bh(&xfrm_state_lock);
652 wake_up(&km_waitq);
653 return err;
654 }
655 EXPORT_SYMBOL(xfrm_state_flush);
656
657 void xfrm_sad_getinfo(struct xfrmk_sadinfo *si)
658 {
659 spin_lock_bh(&xfrm_state_lock);
660 si->sadcnt = xfrm_state_num;
661 si->sadhcnt = xfrm_state_hmask;
662 si->sadhmcnt = xfrm_state_hashmax;
663 spin_unlock_bh(&xfrm_state_lock);
664 }
665 EXPORT_SYMBOL(xfrm_sad_getinfo);
666
667 static int
668 xfrm_init_tempsel(struct xfrm_state *x, struct flowi *fl,
669 struct xfrm_tmpl *tmpl,
670 xfrm_address_t *daddr, xfrm_address_t *saddr,
671 unsigned short family)
672 {
673 struct xfrm_state_afinfo *afinfo = xfrm_state_get_afinfo(family);
674 if (!afinfo)
675 return -1;
676 afinfo->init_tempsel(x, fl, tmpl, daddr, saddr);
677 xfrm_state_put_afinfo(afinfo);
678 return 0;
679 }
680
681 static struct xfrm_state *__xfrm_state_lookup(xfrm_address_t *daddr, __be32 spi, u8 proto, unsigned short family)
682 {
683 unsigned int h = xfrm_spi_hash(daddr, spi, proto, family);
684 struct xfrm_state *x;
685 struct hlist_node *entry;
686
687 hlist_for_each_entry(x, entry, xfrm_state_byspi+h, byspi) {
688 if (x->props.family != family ||
689 x->id.spi != spi ||
690 x->id.proto != proto)
691 continue;
692
693 switch (family) {
694 case AF_INET:
695 if (x->id.daddr.a4 != daddr->a4)
696 continue;
697 break;
698 case AF_INET6:
699 if (!ipv6_addr_equal((struct in6_addr *)daddr,
700 (struct in6_addr *)
701 x->id.daddr.a6))
702 continue;
703 break;
704 }
705
706 xfrm_state_hold(x);
707 return x;
708 }
709
710 return NULL;
711 }
712
713 static struct xfrm_state *__xfrm_state_lookup_byaddr(xfrm_address_t *daddr, xfrm_address_t *saddr, u8 proto, unsigned short family)
714 {
715 unsigned int h = xfrm_src_hash(daddr, saddr, family);
716 struct xfrm_state *x;
717 struct hlist_node *entry;
718
719 hlist_for_each_entry(x, entry, xfrm_state_bysrc+h, bysrc) {
720 if (x->props.family != family ||
721 x->id.proto != proto)
722 continue;
723
724 switch (family) {
725 case AF_INET:
726 if (x->id.daddr.a4 != daddr->a4 ||
727 x->props.saddr.a4 != saddr->a4)
728 continue;
729 break;
730 case AF_INET6:
731 if (!ipv6_addr_equal((struct in6_addr *)daddr,
732 (struct in6_addr *)
733 x->id.daddr.a6) ||
734 !ipv6_addr_equal((struct in6_addr *)saddr,
735 (struct in6_addr *)
736 x->props.saddr.a6))
737 continue;
738 break;
739 }
740
741 xfrm_state_hold(x);
742 return x;
743 }
744
745 return NULL;
746 }
747
748 static inline struct xfrm_state *
749 __xfrm_state_locate(struct xfrm_state *x, int use_spi, int family)
750 {
751 if (use_spi)
752 return __xfrm_state_lookup(&x->id.daddr, x->id.spi,
753 x->id.proto, family);
754 else
755 return __xfrm_state_lookup_byaddr(&x->id.daddr,
756 &x->props.saddr,
757 x->id.proto, family);
758 }
759
760 static void xfrm_hash_grow_check(int have_hash_collision)
761 {
762 if (have_hash_collision &&
763 (xfrm_state_hmask + 1) < xfrm_state_hashmax &&
764 xfrm_state_num > xfrm_state_hmask)
765 schedule_work(&xfrm_hash_work);
766 }
767
768 struct xfrm_state *
769 xfrm_state_find(xfrm_address_t *daddr, xfrm_address_t *saddr,
770 struct flowi *fl, struct xfrm_tmpl *tmpl,
771 struct xfrm_policy *pol, int *err,
772 unsigned short family)
773 {
774 unsigned int h;
775 struct hlist_node *entry;
776 struct xfrm_state *x, *x0;
777 int acquire_in_progress = 0;
778 int error = 0;
779 struct xfrm_state *best = NULL;
780
781 spin_lock_bh(&xfrm_state_lock);
782 h = xfrm_dst_hash(daddr, saddr, tmpl->reqid, family);
783 hlist_for_each_entry(x, entry, xfrm_state_bydst+h, bydst) {
784 if (x->props.family == family &&
785 x->props.reqid == tmpl->reqid &&
786 !(x->props.flags & XFRM_STATE_WILDRECV) &&
787 xfrm_state_addr_check(x, daddr, saddr, family) &&
788 tmpl->mode == x->props.mode &&
789 tmpl->id.proto == x->id.proto &&
790 (tmpl->id.spi == x->id.spi || !tmpl->id.spi)) {
791 /* Resolution logic:
792 1. There is a valid state with matching selector.
793 Done.
794 2. Valid state with inappropriate selector. Skip.
795
796 Entering area of "sysdeps".
797
798 3. If state is not valid, selector is temporary,
799 it selects only session which triggered
800 previous resolution. Key manager will do
801 something to install a state with proper
802 selector.
803 */
804 if (x->km.state == XFRM_STATE_VALID) {
805 if (!xfrm_selector_match(&x->sel, fl, x->sel.family) ||
806 !security_xfrm_state_pol_flow_match(x, pol, fl))
807 continue;
808 if (!best ||
809 best->km.dying > x->km.dying ||
810 (best->km.dying == x->km.dying &&
811 best->curlft.add_time < x->curlft.add_time))
812 best = x;
813 } else if (x->km.state == XFRM_STATE_ACQ) {
814 acquire_in_progress = 1;
815 } else if (x->km.state == XFRM_STATE_ERROR ||
816 x->km.state == XFRM_STATE_EXPIRED) {
817 if (xfrm_selector_match(&x->sel, fl, x->sel.family) &&
818 security_xfrm_state_pol_flow_match(x, pol, fl))
819 error = -ESRCH;
820 }
821 }
822 }
823
824 x = best;
825 if (!x && !error && !acquire_in_progress) {
826 if (tmpl->id.spi &&
827 (x0 = __xfrm_state_lookup(daddr, tmpl->id.spi,
828 tmpl->id.proto, family)) != NULL) {
829 xfrm_state_put(x0);
830 error = -EEXIST;
831 goto out;
832 }
833 x = xfrm_state_alloc();
834 if (x == NULL) {
835 error = -ENOMEM;
836 goto out;
837 }
838 /* Initialize temporary selector matching only
839 * to current session. */
840 xfrm_init_tempsel(x, fl, tmpl, daddr, saddr, family);
841
842 error = security_xfrm_state_alloc_acquire(x, pol->security, fl->secid);
843 if (error) {
844 x->km.state = XFRM_STATE_DEAD;
845 xfrm_state_put(x);
846 x = NULL;
847 goto out;
848 }
849
850 if (km_query(x, tmpl, pol) == 0) {
851 x->km.state = XFRM_STATE_ACQ;
852 hlist_add_head(&x->bydst, xfrm_state_bydst+h);
853 h = xfrm_src_hash(daddr, saddr, family);
854 hlist_add_head(&x->bysrc, xfrm_state_bysrc+h);
855 if (x->id.spi) {
856 h = xfrm_spi_hash(&x->id.daddr, x->id.spi, x->id.proto, family);
857 hlist_add_head(&x->byspi, xfrm_state_byspi+h);
858 }
859 x->lft.hard_add_expires_seconds = sysctl_xfrm_acq_expires;
860 x->timer.expires = jiffies + sysctl_xfrm_acq_expires*HZ;
861 add_timer(&x->timer);
862 xfrm_state_num++;
863 xfrm_hash_grow_check(x->bydst.next != NULL);
864 } else {
865 x->km.state = XFRM_STATE_DEAD;
866 xfrm_state_put(x);
867 x = NULL;
868 error = -ESRCH;
869 }
870 }
871 out:
872 if (x)
873 xfrm_state_hold(x);
874 else
875 *err = acquire_in_progress ? -EAGAIN : error;
876 spin_unlock_bh(&xfrm_state_lock);
877 return x;
878 }
879
880 struct xfrm_state *
881 xfrm_stateonly_find(xfrm_address_t *daddr, xfrm_address_t *saddr,
882 unsigned short family, u8 mode, u8 proto, u32 reqid)
883 {
884 unsigned int h;
885 struct xfrm_state *rx = NULL, *x = NULL;
886 struct hlist_node *entry;
887
888 spin_lock(&xfrm_state_lock);
889 h = xfrm_dst_hash(daddr, saddr, reqid, family);
890 hlist_for_each_entry(x, entry, xfrm_state_bydst+h, bydst) {
891 if (x->props.family == family &&
892 x->props.reqid == reqid &&
893 !(x->props.flags & XFRM_STATE_WILDRECV) &&
894 xfrm_state_addr_check(x, daddr, saddr, family) &&
895 mode == x->props.mode &&
896 proto == x->id.proto &&
897 x->km.state == XFRM_STATE_VALID) {
898 rx = x;
899 break;
900 }
901 }
902
903 if (rx)
904 xfrm_state_hold(rx);
905 spin_unlock(&xfrm_state_lock);
906
907
908 return rx;
909 }
910 EXPORT_SYMBOL(xfrm_stateonly_find);
911
912 static void __xfrm_state_insert(struct xfrm_state *x)
913 {
914 unsigned int h;
915
916 x->genid = ++xfrm_state_genid;
917
918 list_add_tail(&x->all, &xfrm_state_all);
919
920 h = xfrm_dst_hash(&x->id.daddr, &x->props.saddr,
921 x->props.reqid, x->props.family);
922 hlist_add_head(&x->bydst, xfrm_state_bydst+h);
923
924 h = xfrm_src_hash(&x->id.daddr, &x->props.saddr, x->props.family);
925 hlist_add_head(&x->bysrc, xfrm_state_bysrc+h);
926
927 if (x->id.spi) {
928 h = xfrm_spi_hash(&x->id.daddr, x->id.spi, x->id.proto,
929 x->props.family);
930
931 hlist_add_head(&x->byspi, xfrm_state_byspi+h);
932 }
933
934 mod_timer(&x->timer, jiffies + HZ);
935 if (x->replay_maxage)
936 mod_timer(&x->rtimer, jiffies + x->replay_maxage);
937
938 wake_up(&km_waitq);
939
940 xfrm_state_num++;
941
942 xfrm_hash_grow_check(x->bydst.next != NULL);
943 }
944
945 /* xfrm_state_lock is held */
946 static void __xfrm_state_bump_genids(struct xfrm_state *xnew)
947 {
948 unsigned short family = xnew->props.family;
949 u32 reqid = xnew->props.reqid;
950 struct xfrm_state *x;
951 struct hlist_node *entry;
952 unsigned int h;
953
954 h = xfrm_dst_hash(&xnew->id.daddr, &xnew->props.saddr, reqid, family);
955 hlist_for_each_entry(x, entry, xfrm_state_bydst+h, bydst) {
956 if (x->props.family == family &&
957 x->props.reqid == reqid &&
958 !xfrm_addr_cmp(&x->id.daddr, &xnew->id.daddr, family) &&
959 !xfrm_addr_cmp(&x->props.saddr, &xnew->props.saddr, family))
960 x->genid = xfrm_state_genid;
961 }
962 }
963
964 void xfrm_state_insert(struct xfrm_state *x)
965 {
966 spin_lock_bh(&xfrm_state_lock);
967 __xfrm_state_bump_genids(x);
968 __xfrm_state_insert(x);
969 spin_unlock_bh(&xfrm_state_lock);
970 }
971 EXPORT_SYMBOL(xfrm_state_insert);
972
973 /* xfrm_state_lock is held */
974 static struct xfrm_state *__find_acq_core(unsigned short family, u8 mode, u32 reqid, u8 proto, xfrm_address_t *daddr, xfrm_address_t *saddr, int create)
975 {
976 unsigned int h = xfrm_dst_hash(daddr, saddr, reqid, family);
977 struct hlist_node *entry;
978 struct xfrm_state *x;
979
980 hlist_for_each_entry(x, entry, xfrm_state_bydst+h, bydst) {
981 if (x->props.reqid != reqid ||
982 x->props.mode != mode ||
983 x->props.family != family ||
984 x->km.state != XFRM_STATE_ACQ ||
985 x->id.spi != 0 ||
986 x->id.proto != proto)
987 continue;
988
989 switch (family) {
990 case AF_INET:
991 if (x->id.daddr.a4 != daddr->a4 ||
992 x->props.saddr.a4 != saddr->a4)
993 continue;
994 break;
995 case AF_INET6:
996 if (!ipv6_addr_equal((struct in6_addr *)x->id.daddr.a6,
997 (struct in6_addr *)daddr) ||
998 !ipv6_addr_equal((struct in6_addr *)
999 x->props.saddr.a6,
1000 (struct in6_addr *)saddr))
1001 continue;
1002 break;
1003 }
1004
1005 xfrm_state_hold(x);
1006 return x;
1007 }
1008
1009 if (!create)
1010 return NULL;
1011
1012 x = xfrm_state_alloc();
1013 if (likely(x)) {
1014 switch (family) {
1015 case AF_INET:
1016 x->sel.daddr.a4 = daddr->a4;
1017 x->sel.saddr.a4 = saddr->a4;
1018 x->sel.prefixlen_d = 32;
1019 x->sel.prefixlen_s = 32;
1020 x->props.saddr.a4 = saddr->a4;
1021 x->id.daddr.a4 = daddr->a4;
1022 break;
1023
1024 case AF_INET6:
1025 ipv6_addr_copy((struct in6_addr *)x->sel.daddr.a6,
1026 (struct in6_addr *)daddr);
1027 ipv6_addr_copy((struct in6_addr *)x->sel.saddr.a6,
1028 (struct in6_addr *)saddr);
1029 x->sel.prefixlen_d = 128;
1030 x->sel.prefixlen_s = 128;
1031 ipv6_addr_copy((struct in6_addr *)x->props.saddr.a6,
1032 (struct in6_addr *)saddr);
1033 ipv6_addr_copy((struct in6_addr *)x->id.daddr.a6,
1034 (struct in6_addr *)daddr);
1035 break;
1036 }
1037
1038 x->km.state = XFRM_STATE_ACQ;
1039 x->id.proto = proto;
1040 x->props.family = family;
1041 x->props.mode = mode;
1042 x->props.reqid = reqid;
1043 x->lft.hard_add_expires_seconds = sysctl_xfrm_acq_expires;
1044 xfrm_state_hold(x);
1045 x->timer.expires = jiffies + sysctl_xfrm_acq_expires*HZ;
1046 add_timer(&x->timer);
1047 hlist_add_head(&x->bydst, xfrm_state_bydst+h);
1048 h = xfrm_src_hash(daddr, saddr, family);
1049 hlist_add_head(&x->bysrc, xfrm_state_bysrc+h);
1050
1051 xfrm_state_num++;
1052
1053 xfrm_hash_grow_check(x->bydst.next != NULL);
1054 }
1055
1056 return x;
1057 }
1058
1059 static struct xfrm_state *__xfrm_find_acq_byseq(u32 seq);
1060
1061 int xfrm_state_add(struct xfrm_state *x)
1062 {
1063 struct xfrm_state *x1;
1064 int family;
1065 int err;
1066 int use_spi = xfrm_id_proto_match(x->id.proto, IPSEC_PROTO_ANY);
1067
1068 family = x->props.family;
1069
1070 spin_lock_bh(&xfrm_state_lock);
1071
1072 x1 = __xfrm_state_locate(x, use_spi, family);
1073 if (x1) {
1074 xfrm_state_put(x1);
1075 x1 = NULL;
1076 err = -EEXIST;
1077 goto out;
1078 }
1079
1080 if (use_spi && x->km.seq) {
1081 x1 = __xfrm_find_acq_byseq(x->km.seq);
1082 if (x1 && ((x1->id.proto != x->id.proto) ||
1083 xfrm_addr_cmp(&x1->id.daddr, &x->id.daddr, family))) {
1084 xfrm_state_put(x1);
1085 x1 = NULL;
1086 }
1087 }
1088
1089 if (use_spi && !x1)
1090 x1 = __find_acq_core(family, x->props.mode, x->props.reqid,
1091 x->id.proto,
1092 &x->id.daddr, &x->props.saddr, 0);
1093
1094 __xfrm_state_bump_genids(x);
1095 __xfrm_state_insert(x);
1096 err = 0;
1097
1098 out:
1099 spin_unlock_bh(&xfrm_state_lock);
1100
1101 if (x1) {
1102 xfrm_state_delete(x1);
1103 xfrm_state_put(x1);
1104 }
1105
1106 return err;
1107 }
1108 EXPORT_SYMBOL(xfrm_state_add);
1109
1110 #ifdef CONFIG_XFRM_MIGRATE
1111 static struct xfrm_state *xfrm_state_clone(struct xfrm_state *orig, int *errp)
1112 {
1113 int err = -ENOMEM;
1114 struct xfrm_state *x = xfrm_state_alloc();
1115 if (!x)
1116 goto error;
1117
1118 memcpy(&x->id, &orig->id, sizeof(x->id));
1119 memcpy(&x->sel, &orig->sel, sizeof(x->sel));
1120 memcpy(&x->lft, &orig->lft, sizeof(x->lft));
1121 x->props.mode = orig->props.mode;
1122 x->props.replay_window = orig->props.replay_window;
1123 x->props.reqid = orig->props.reqid;
1124 x->props.family = orig->props.family;
1125 x->props.saddr = orig->props.saddr;
1126
1127 if (orig->aalg) {
1128 x->aalg = xfrm_algo_clone(orig->aalg);
1129 if (!x->aalg)
1130 goto error;
1131 }
1132 x->props.aalgo = orig->props.aalgo;
1133
1134 if (orig->ealg) {
1135 x->ealg = xfrm_algo_clone(orig->ealg);
1136 if (!x->ealg)
1137 goto error;
1138 }
1139 x->props.ealgo = orig->props.ealgo;
1140
1141 if (orig->calg) {
1142 x->calg = xfrm_algo_clone(orig->calg);
1143 if (!x->calg)
1144 goto error;
1145 }
1146 x->props.calgo = orig->props.calgo;
1147
1148 if (orig->encap) {
1149 x->encap = kmemdup(orig->encap, sizeof(*x->encap), GFP_KERNEL);
1150 if (!x->encap)
1151 goto error;
1152 }
1153
1154 if (orig->coaddr) {
1155 x->coaddr = kmemdup(orig->coaddr, sizeof(*x->coaddr),
1156 GFP_KERNEL);
1157 if (!x->coaddr)
1158 goto error;
1159 }
1160
1161 err = xfrm_init_state(x);
1162 if (err)
1163 goto error;
1164
1165 x->props.flags = orig->props.flags;
1166
1167 x->curlft.add_time = orig->curlft.add_time;
1168 x->km.state = orig->km.state;
1169 x->km.seq = orig->km.seq;
1170
1171 return x;
1172
1173 error:
1174 if (errp)
1175 *errp = err;
1176 if (x) {
1177 kfree(x->aalg);
1178 kfree(x->ealg);
1179 kfree(x->calg);
1180 kfree(x->encap);
1181 kfree(x->coaddr);
1182 }
1183 kfree(x);
1184 return NULL;
1185 }
1186
1187 /* xfrm_state_lock is held */
1188 struct xfrm_state * xfrm_migrate_state_find(struct xfrm_migrate *m)
1189 {
1190 unsigned int h;
1191 struct xfrm_state *x;
1192 struct hlist_node *entry;
1193
1194 if (m->reqid) {
1195 h = xfrm_dst_hash(&m->old_daddr, &m->old_saddr,
1196 m->reqid, m->old_family);
1197 hlist_for_each_entry(x, entry, xfrm_state_bydst+h, bydst) {
1198 if (x->props.mode != m->mode ||
1199 x->id.proto != m->proto)
1200 continue;
1201 if (m->reqid && x->props.reqid != m->reqid)
1202 continue;
1203 if (xfrm_addr_cmp(&x->id.daddr, &m->old_daddr,
1204 m->old_family) ||
1205 xfrm_addr_cmp(&x->props.saddr, &m->old_saddr,
1206 m->old_family))
1207 continue;
1208 xfrm_state_hold(x);
1209 return x;
1210 }
1211 } else {
1212 h = xfrm_src_hash(&m->old_daddr, &m->old_saddr,
1213 m->old_family);
1214 hlist_for_each_entry(x, entry, xfrm_state_bysrc+h, bysrc) {
1215 if (x->props.mode != m->mode ||
1216 x->id.proto != m->proto)
1217 continue;
1218 if (xfrm_addr_cmp(&x->id.daddr, &m->old_daddr,
1219 m->old_family) ||
1220 xfrm_addr_cmp(&x->props.saddr, &m->old_saddr,
1221 m->old_family))
1222 continue;
1223 xfrm_state_hold(x);
1224 return x;
1225 }
1226 }
1227
1228 return NULL;
1229 }
1230 EXPORT_SYMBOL(xfrm_migrate_state_find);
1231
1232 struct xfrm_state * xfrm_state_migrate(struct xfrm_state *x,
1233 struct xfrm_migrate *m)
1234 {
1235 struct xfrm_state *xc;
1236 int err;
1237
1238 xc = xfrm_state_clone(x, &err);
1239 if (!xc)
1240 return NULL;
1241
1242 memcpy(&xc->id.daddr, &m->new_daddr, sizeof(xc->id.daddr));
1243 memcpy(&xc->props.saddr, &m->new_saddr, sizeof(xc->props.saddr));
1244
1245 /* add state */
1246 if (!xfrm_addr_cmp(&x->id.daddr, &m->new_daddr, m->new_family)) {
1247 /* a care is needed when the destination address of the
1248 state is to be updated as it is a part of triplet */
1249 xfrm_state_insert(xc);
1250 } else {
1251 if ((err = xfrm_state_add(xc)) < 0)
1252 goto error;
1253 }
1254
1255 return xc;
1256 error:
1257 kfree(xc);
1258 return NULL;
1259 }
1260 EXPORT_SYMBOL(xfrm_state_migrate);
1261 #endif
1262
1263 int xfrm_state_update(struct xfrm_state *x)
1264 {
1265 struct xfrm_state *x1;
1266 int err;
1267 int use_spi = xfrm_id_proto_match(x->id.proto, IPSEC_PROTO_ANY);
1268
1269 spin_lock_bh(&xfrm_state_lock);
1270 x1 = __xfrm_state_locate(x, use_spi, x->props.family);
1271
1272 err = -ESRCH;
1273 if (!x1)
1274 goto out;
1275
1276 if (xfrm_state_kern(x1)) {
1277 xfrm_state_put(x1);
1278 err = -EEXIST;
1279 goto out;
1280 }
1281
1282 if (x1->km.state == XFRM_STATE_ACQ) {
1283 __xfrm_state_insert(x);
1284 x = NULL;
1285 }
1286 err = 0;
1287
1288 out:
1289 spin_unlock_bh(&xfrm_state_lock);
1290
1291 if (err)
1292 return err;
1293
1294 if (!x) {
1295 xfrm_state_delete(x1);
1296 xfrm_state_put(x1);
1297 return 0;
1298 }
1299
1300 err = -EINVAL;
1301 spin_lock_bh(&x1->lock);
1302 if (likely(x1->km.state == XFRM_STATE_VALID)) {
1303 if (x->encap && x1->encap)
1304 memcpy(x1->encap, x->encap, sizeof(*x1->encap));
1305 if (x->coaddr && x1->coaddr) {
1306 memcpy(x1->coaddr, x->coaddr, sizeof(*x1->coaddr));
1307 }
1308 if (!use_spi && memcmp(&x1->sel, &x->sel, sizeof(x1->sel)))
1309 memcpy(&x1->sel, &x->sel, sizeof(x1->sel));
1310 memcpy(&x1->lft, &x->lft, sizeof(x1->lft));
1311 x1->km.dying = 0;
1312
1313 mod_timer(&x1->timer, jiffies + HZ);
1314 if (x1->curlft.use_time)
1315 xfrm_state_check_expire(x1);
1316
1317 err = 0;
1318 }
1319 spin_unlock_bh(&x1->lock);
1320
1321 xfrm_state_put(x1);
1322
1323 return err;
1324 }
1325 EXPORT_SYMBOL(xfrm_state_update);
1326
1327 int xfrm_state_check_expire(struct xfrm_state *x)
1328 {
1329 if (!x->curlft.use_time)
1330 x->curlft.use_time = get_seconds();
1331
1332 if (x->km.state != XFRM_STATE_VALID)
1333 return -EINVAL;
1334
1335 if (x->curlft.bytes >= x->lft.hard_byte_limit ||
1336 x->curlft.packets >= x->lft.hard_packet_limit) {
1337 x->km.state = XFRM_STATE_EXPIRED;
1338 mod_timer(&x->timer, jiffies);
1339 return -EINVAL;
1340 }
1341
1342 if (!x->km.dying &&
1343 (x->curlft.bytes >= x->lft.soft_byte_limit ||
1344 x->curlft.packets >= x->lft.soft_packet_limit)) {
1345 x->km.dying = 1;
1346 km_state_expired(x, 0, 0);
1347 }
1348 return 0;
1349 }
1350 EXPORT_SYMBOL(xfrm_state_check_expire);
1351
1352 struct xfrm_state *
1353 xfrm_state_lookup(xfrm_address_t *daddr, __be32 spi, u8 proto,
1354 unsigned short family)
1355 {
1356 struct xfrm_state *x;
1357
1358 spin_lock_bh(&xfrm_state_lock);
1359 x = __xfrm_state_lookup(daddr, spi, proto, family);
1360 spin_unlock_bh(&xfrm_state_lock);
1361 return x;
1362 }
1363 EXPORT_SYMBOL(xfrm_state_lookup);
1364
1365 struct xfrm_state *
1366 xfrm_state_lookup_byaddr(xfrm_address_t *daddr, xfrm_address_t *saddr,
1367 u8 proto, unsigned short family)
1368 {
1369 struct xfrm_state *x;
1370
1371 spin_lock_bh(&xfrm_state_lock);
1372 x = __xfrm_state_lookup_byaddr(daddr, saddr, proto, family);
1373 spin_unlock_bh(&xfrm_state_lock);
1374 return x;
1375 }
1376 EXPORT_SYMBOL(xfrm_state_lookup_byaddr);
1377
1378 struct xfrm_state *
1379 xfrm_find_acq(u8 mode, u32 reqid, u8 proto,
1380 xfrm_address_t *daddr, xfrm_address_t *saddr,
1381 int create, unsigned short family)
1382 {
1383 struct xfrm_state *x;
1384
1385 spin_lock_bh(&xfrm_state_lock);
1386 x = __find_acq_core(family, mode, reqid, proto, daddr, saddr, create);
1387 spin_unlock_bh(&xfrm_state_lock);
1388
1389 return x;
1390 }
1391 EXPORT_SYMBOL(xfrm_find_acq);
1392
1393 #ifdef CONFIG_XFRM_SUB_POLICY
1394 int
1395 xfrm_tmpl_sort(struct xfrm_tmpl **dst, struct xfrm_tmpl **src, int n,
1396 unsigned short family)
1397 {
1398 int err = 0;
1399 struct xfrm_state_afinfo *afinfo = xfrm_state_get_afinfo(family);
1400 if (!afinfo)
1401 return -EAFNOSUPPORT;
1402
1403 spin_lock_bh(&xfrm_state_lock);
1404 if (afinfo->tmpl_sort)
1405 err = afinfo->tmpl_sort(dst, src, n);
1406 spin_unlock_bh(&xfrm_state_lock);
1407 xfrm_state_put_afinfo(afinfo);
1408 return err;
1409 }
1410 EXPORT_SYMBOL(xfrm_tmpl_sort);
1411
1412 int
1413 xfrm_state_sort(struct xfrm_state **dst, struct xfrm_state **src, int n,
1414 unsigned short family)
1415 {
1416 int err = 0;
1417 struct xfrm_state_afinfo *afinfo = xfrm_state_get_afinfo(family);
1418 if (!afinfo)
1419 return -EAFNOSUPPORT;
1420
1421 spin_lock_bh(&xfrm_state_lock);
1422 if (afinfo->state_sort)
1423 err = afinfo->state_sort(dst, src, n);
1424 spin_unlock_bh(&xfrm_state_lock);
1425 xfrm_state_put_afinfo(afinfo);
1426 return err;
1427 }
1428 EXPORT_SYMBOL(xfrm_state_sort);
1429 #endif
1430
1431 /* Silly enough, but I'm lazy to build resolution list */
1432
1433 static struct xfrm_state *__xfrm_find_acq_byseq(u32 seq)
1434 {
1435 int i;
1436
1437 for (i = 0; i <= xfrm_state_hmask; i++) {
1438 struct hlist_node *entry;
1439 struct xfrm_state *x;
1440
1441 hlist_for_each_entry(x, entry, xfrm_state_bydst+i, bydst) {
1442 if (x->km.seq == seq &&
1443 x->km.state == XFRM_STATE_ACQ) {
1444 xfrm_state_hold(x);
1445 return x;
1446 }
1447 }
1448 }
1449 return NULL;
1450 }
1451
1452 struct xfrm_state *xfrm_find_acq_byseq(u32 seq)
1453 {
1454 struct xfrm_state *x;
1455
1456 spin_lock_bh(&xfrm_state_lock);
1457 x = __xfrm_find_acq_byseq(seq);
1458 spin_unlock_bh(&xfrm_state_lock);
1459 return x;
1460 }
1461 EXPORT_SYMBOL(xfrm_find_acq_byseq);
1462
1463 u32 xfrm_get_acqseq(void)
1464 {
1465 u32 res;
1466 static u32 acqseq;
1467 static DEFINE_SPINLOCK(acqseq_lock);
1468
1469 spin_lock_bh(&acqseq_lock);
1470 res = (++acqseq ? : ++acqseq);
1471 spin_unlock_bh(&acqseq_lock);
1472 return res;
1473 }
1474 EXPORT_SYMBOL(xfrm_get_acqseq);
1475
1476 int xfrm_alloc_spi(struct xfrm_state *x, u32 low, u32 high)
1477 {
1478 unsigned int h;
1479 struct xfrm_state *x0;
1480 int err = -ENOENT;
1481 __be32 minspi = htonl(low);
1482 __be32 maxspi = htonl(high);
1483
1484 spin_lock_bh(&x->lock);
1485 if (x->km.state == XFRM_STATE_DEAD)
1486 goto unlock;
1487
1488 err = 0;
1489 if (x->id.spi)
1490 goto unlock;
1491
1492 err = -ENOENT;
1493
1494 if (minspi == maxspi) {
1495 x0 = xfrm_state_lookup(&x->id.daddr, minspi, x->id.proto, x->props.family);
1496 if (x0) {
1497 xfrm_state_put(x0);
1498 goto unlock;
1499 }
1500 x->id.spi = minspi;
1501 } else {
1502 u32 spi = 0;
1503 for (h=0; h<high-low+1; h++) {
1504 spi = low + net_random()%(high-low+1);
1505 x0 = xfrm_state_lookup(&x->id.daddr, htonl(spi), x->id.proto, x->props.family);
1506 if (x0 == NULL) {
1507 x->id.spi = htonl(spi);
1508 break;
1509 }
1510 xfrm_state_put(x0);
1511 }
1512 }
1513 if (x->id.spi) {
1514 spin_lock_bh(&xfrm_state_lock);
1515 h = xfrm_spi_hash(&x->id.daddr, x->id.spi, x->id.proto, x->props.family);
1516 hlist_add_head(&x->byspi, xfrm_state_byspi+h);
1517 spin_unlock_bh(&xfrm_state_lock);
1518
1519 err = 0;
1520 }
1521
1522 unlock:
1523 spin_unlock_bh(&x->lock);
1524
1525 return err;
1526 }
1527 EXPORT_SYMBOL(xfrm_alloc_spi);
1528
1529 int xfrm_state_walk(struct xfrm_state_walk *walk,
1530 int (*func)(struct xfrm_state *, int, void*),
1531 void *data)
1532 {
1533 struct xfrm_state *old, *x, *last = NULL;
1534 int err = 0;
1535
1536 if (walk->state == NULL && walk->count != 0)
1537 return 0;
1538
1539 old = x = walk->state;
1540 walk->state = NULL;
1541 spin_lock_bh(&xfrm_state_lock);
1542 if (x == NULL)
1543 x = list_first_entry(&xfrm_state_all, struct xfrm_state, all);
1544 list_for_each_entry_from(x, &xfrm_state_all, all) {
1545 if (x->km.state == XFRM_STATE_DEAD)
1546 continue;
1547 if (!xfrm_id_proto_match(x->id.proto, walk->proto))
1548 continue;
1549 if (last) {
1550 err = func(last, walk->count, data);
1551 if (err) {
1552 xfrm_state_hold(last);
1553 walk->state = last;
1554 goto out;
1555 }
1556 }
1557 last = x;
1558 walk->count++;
1559 }
1560 if (walk->count == 0) {
1561 err = -ENOENT;
1562 goto out;
1563 }
1564 if (last)
1565 err = func(last, 0, data);
1566 out:
1567 spin_unlock_bh(&xfrm_state_lock);
1568 if (old != NULL)
1569 xfrm_state_put(old);
1570 return err;
1571 }
1572 EXPORT_SYMBOL(xfrm_state_walk);
1573
1574
1575 void xfrm_replay_notify(struct xfrm_state *x, int event)
1576 {
1577 struct km_event c;
1578 /* we send notify messages in case
1579 * 1. we updated on of the sequence numbers, and the seqno difference
1580 * is at least x->replay_maxdiff, in this case we also update the
1581 * timeout of our timer function
1582 * 2. if x->replay_maxage has elapsed since last update,
1583 * and there were changes
1584 *
1585 * The state structure must be locked!
1586 */
1587
1588 switch (event) {
1589 case XFRM_REPLAY_UPDATE:
1590 if (x->replay_maxdiff &&
1591 (x->replay.seq - x->preplay.seq < x->replay_maxdiff) &&
1592 (x->replay.oseq - x->preplay.oseq < x->replay_maxdiff)) {
1593 if (x->xflags & XFRM_TIME_DEFER)
1594 event = XFRM_REPLAY_TIMEOUT;
1595 else
1596 return;
1597 }
1598
1599 break;
1600
1601 case XFRM_REPLAY_TIMEOUT:
1602 if ((x->replay.seq == x->preplay.seq) &&
1603 (x->replay.bitmap == x->preplay.bitmap) &&
1604 (x->replay.oseq == x->preplay.oseq)) {
1605 x->xflags |= XFRM_TIME_DEFER;
1606 return;
1607 }
1608
1609 break;
1610 }
1611
1612 memcpy(&x->preplay, &x->replay, sizeof(struct xfrm_replay_state));
1613 c.event = XFRM_MSG_NEWAE;
1614 c.data.aevent = event;
1615 km_state_notify(x, &c);
1616
1617 if (x->replay_maxage &&
1618 !mod_timer(&x->rtimer, jiffies + x->replay_maxage))
1619 x->xflags &= ~XFRM_TIME_DEFER;
1620 }
1621
1622 static void xfrm_replay_timer_handler(unsigned long data)
1623 {
1624 struct xfrm_state *x = (struct xfrm_state*)data;
1625
1626 spin_lock(&x->lock);
1627
1628 if (x->km.state == XFRM_STATE_VALID) {
1629 if (xfrm_aevent_is_on())
1630 xfrm_replay_notify(x, XFRM_REPLAY_TIMEOUT);
1631 else
1632 x->xflags |= XFRM_TIME_DEFER;
1633 }
1634
1635 spin_unlock(&x->lock);
1636 }
1637
1638 int xfrm_replay_check(struct xfrm_state *x,
1639 struct sk_buff *skb, __be32 net_seq)
1640 {
1641 u32 diff;
1642 u32 seq = ntohl(net_seq);
1643
1644 if (unlikely(seq == 0))
1645 goto err;
1646
1647 if (likely(seq > x->replay.seq))
1648 return 0;
1649
1650 diff = x->replay.seq - seq;
1651 if (diff >= min_t(unsigned int, x->props.replay_window,
1652 sizeof(x->replay.bitmap) * 8)) {
1653 x->stats.replay_window++;
1654 goto err;
1655 }
1656
1657 if (x->replay.bitmap & (1U << diff)) {
1658 x->stats.replay++;
1659 goto err;
1660 }
1661 return 0;
1662
1663 err:
1664 xfrm_audit_state_replay(x, skb, net_seq);
1665 return -EINVAL;
1666 }
1667
1668 void xfrm_replay_advance(struct xfrm_state *x, __be32 net_seq)
1669 {
1670 u32 diff;
1671 u32 seq = ntohl(net_seq);
1672
1673 if (seq > x->replay.seq) {
1674 diff = seq - x->replay.seq;
1675 if (diff < x->props.replay_window)
1676 x->replay.bitmap = ((x->replay.bitmap) << diff) | 1;
1677 else
1678 x->replay.bitmap = 1;
1679 x->replay.seq = seq;
1680 } else {
1681 diff = x->replay.seq - seq;
1682 x->replay.bitmap |= (1U << diff);
1683 }
1684
1685 if (xfrm_aevent_is_on())
1686 xfrm_replay_notify(x, XFRM_REPLAY_UPDATE);
1687 }
1688
1689 static LIST_HEAD(xfrm_km_list);
1690 static DEFINE_RWLOCK(xfrm_km_lock);
1691
1692 void km_policy_notify(struct xfrm_policy *xp, int dir, struct km_event *c)
1693 {
1694 struct xfrm_mgr *km;
1695
1696 read_lock(&xfrm_km_lock);
1697 list_for_each_entry(km, &xfrm_km_list, list)
1698 if (km->notify_policy)
1699 km->notify_policy(xp, dir, c);
1700 read_unlock(&xfrm_km_lock);
1701 }
1702
1703 void km_state_notify(struct xfrm_state *x, struct km_event *c)
1704 {
1705 struct xfrm_mgr *km;
1706 read_lock(&xfrm_km_lock);
1707 list_for_each_entry(km, &xfrm_km_list, list)
1708 if (km->notify)
1709 km->notify(x, c);
1710 read_unlock(&xfrm_km_lock);
1711 }
1712
1713 EXPORT_SYMBOL(km_policy_notify);
1714 EXPORT_SYMBOL(km_state_notify);
1715
1716 void km_state_expired(struct xfrm_state *x, int hard, u32 pid)
1717 {
1718 struct km_event c;
1719
1720 c.data.hard = hard;
1721 c.pid = pid;
1722 c.event = XFRM_MSG_EXPIRE;
1723 km_state_notify(x, &c);
1724
1725 if (hard)
1726 wake_up(&km_waitq);
1727 }
1728
1729 EXPORT_SYMBOL(km_state_expired);
1730 /*
1731 * We send to all registered managers regardless of failure
1732 * We are happy with one success
1733 */
1734 int km_query(struct xfrm_state *x, struct xfrm_tmpl *t, struct xfrm_policy *pol)
1735 {
1736 int err = -EINVAL, acqret;
1737 struct xfrm_mgr *km;
1738
1739 read_lock(&xfrm_km_lock);
1740 list_for_each_entry(km, &xfrm_km_list, list) {
1741 acqret = km->acquire(x, t, pol, XFRM_POLICY_OUT);
1742 if (!acqret)
1743 err = acqret;
1744 }
1745 read_unlock(&xfrm_km_lock);
1746 return err;
1747 }
1748 EXPORT_SYMBOL(km_query);
1749
1750 int km_new_mapping(struct xfrm_state *x, xfrm_address_t *ipaddr, __be16 sport)
1751 {
1752 int err = -EINVAL;
1753 struct xfrm_mgr *km;
1754
1755 read_lock(&xfrm_km_lock);
1756 list_for_each_entry(km, &xfrm_km_list, list) {
1757 if (km->new_mapping)
1758 err = km->new_mapping(x, ipaddr, sport);
1759 if (!err)
1760 break;
1761 }
1762 read_unlock(&xfrm_km_lock);
1763 return err;
1764 }
1765 EXPORT_SYMBOL(km_new_mapping);
1766
1767 void km_policy_expired(struct xfrm_policy *pol, int dir, int hard, u32 pid)
1768 {
1769 struct km_event c;
1770
1771 c.data.hard = hard;
1772 c.pid = pid;
1773 c.event = XFRM_MSG_POLEXPIRE;
1774 km_policy_notify(pol, dir, &c);
1775
1776 if (hard)
1777 wake_up(&km_waitq);
1778 }
1779 EXPORT_SYMBOL(km_policy_expired);
1780
1781 #ifdef CONFIG_XFRM_MIGRATE
1782 int km_migrate(struct xfrm_selector *sel, u8 dir, u8 type,
1783 struct xfrm_migrate *m, int num_migrate)
1784 {
1785 int err = -EINVAL;
1786 int ret;
1787 struct xfrm_mgr *km;
1788
1789 read_lock(&xfrm_km_lock);
1790 list_for_each_entry(km, &xfrm_km_list, list) {
1791 if (km->migrate) {
1792 ret = km->migrate(sel, dir, type, m, num_migrate);
1793 if (!ret)
1794 err = ret;
1795 }
1796 }
1797 read_unlock(&xfrm_km_lock);
1798 return err;
1799 }
1800 EXPORT_SYMBOL(km_migrate);
1801 #endif
1802
1803 int km_report(u8 proto, struct xfrm_selector *sel, xfrm_address_t *addr)
1804 {
1805 int err = -EINVAL;
1806 int ret;
1807 struct xfrm_mgr *km;
1808
1809 read_lock(&xfrm_km_lock);
1810 list_for_each_entry(km, &xfrm_km_list, list) {
1811 if (km->report) {
1812 ret = km->report(proto, sel, addr);
1813 if (!ret)
1814 err = ret;
1815 }
1816 }
1817 read_unlock(&xfrm_km_lock);
1818 return err;
1819 }
1820 EXPORT_SYMBOL(km_report);
1821
1822 int xfrm_user_policy(struct sock *sk, int optname, u8 __user *optval, int optlen)
1823 {
1824 int err;
1825 u8 *data;
1826 struct xfrm_mgr *km;
1827 struct xfrm_policy *pol = NULL;
1828
1829 if (optlen <= 0 || optlen > PAGE_SIZE)
1830 return -EMSGSIZE;
1831
1832 data = kmalloc(optlen, GFP_KERNEL);
1833 if (!data)
1834 return -ENOMEM;
1835
1836 err = -EFAULT;
1837 if (copy_from_user(data, optval, optlen))
1838 goto out;
1839
1840 err = -EINVAL;
1841 read_lock(&xfrm_km_lock);
1842 list_for_each_entry(km, &xfrm_km_list, list) {
1843 pol = km->compile_policy(sk, optname, data,
1844 optlen, &err);
1845 if (err >= 0)
1846 break;
1847 }
1848 read_unlock(&xfrm_km_lock);
1849
1850 if (err >= 0) {
1851 xfrm_sk_policy_insert(sk, err, pol);
1852 xfrm_pol_put(pol);
1853 err = 0;
1854 }
1855
1856 out:
1857 kfree(data);
1858 return err;
1859 }
1860 EXPORT_SYMBOL(xfrm_user_policy);
1861
1862 int xfrm_register_km(struct xfrm_mgr *km)
1863 {
1864 write_lock_bh(&xfrm_km_lock);
1865 list_add_tail(&km->list, &xfrm_km_list);
1866 write_unlock_bh(&xfrm_km_lock);
1867 return 0;
1868 }
1869 EXPORT_SYMBOL(xfrm_register_km);
1870
1871 int xfrm_unregister_km(struct xfrm_mgr *km)
1872 {
1873 write_lock_bh(&xfrm_km_lock);
1874 list_del(&km->list);
1875 write_unlock_bh(&xfrm_km_lock);
1876 return 0;
1877 }
1878 EXPORT_SYMBOL(xfrm_unregister_km);
1879
1880 int xfrm_state_register_afinfo(struct xfrm_state_afinfo *afinfo)
1881 {
1882 int err = 0;
1883 if (unlikely(afinfo == NULL))
1884 return -EINVAL;
1885 if (unlikely(afinfo->family >= NPROTO))
1886 return -EAFNOSUPPORT;
1887 write_lock_bh(&xfrm_state_afinfo_lock);
1888 if (unlikely(xfrm_state_afinfo[afinfo->family] != NULL))
1889 err = -ENOBUFS;
1890 else
1891 xfrm_state_afinfo[afinfo->family] = afinfo;
1892 write_unlock_bh(&xfrm_state_afinfo_lock);
1893 return err;
1894 }
1895 EXPORT_SYMBOL(xfrm_state_register_afinfo);
1896
1897 int xfrm_state_unregister_afinfo(struct xfrm_state_afinfo *afinfo)
1898 {
1899 int err = 0;
1900 if (unlikely(afinfo == NULL))
1901 return -EINVAL;
1902 if (unlikely(afinfo->family >= NPROTO))
1903 return -EAFNOSUPPORT;
1904 write_lock_bh(&xfrm_state_afinfo_lock);
1905 if (likely(xfrm_state_afinfo[afinfo->family] != NULL)) {
1906 if (unlikely(xfrm_state_afinfo[afinfo->family] != afinfo))
1907 err = -EINVAL;
1908 else
1909 xfrm_state_afinfo[afinfo->family] = NULL;
1910 }
1911 write_unlock_bh(&xfrm_state_afinfo_lock);
1912 return err;
1913 }
1914 EXPORT_SYMBOL(xfrm_state_unregister_afinfo);
1915
1916 static struct xfrm_state_afinfo *xfrm_state_get_afinfo(unsigned int family)
1917 {
1918 struct xfrm_state_afinfo *afinfo;
1919 if (unlikely(family >= NPROTO))
1920 return NULL;
1921 read_lock(&xfrm_state_afinfo_lock);
1922 afinfo = xfrm_state_afinfo[family];
1923 if (unlikely(!afinfo))
1924 read_unlock(&xfrm_state_afinfo_lock);
1925 return afinfo;
1926 }
1927
1928 static void xfrm_state_put_afinfo(struct xfrm_state_afinfo *afinfo)
1929 __releases(xfrm_state_afinfo_lock)
1930 {
1931 read_unlock(&xfrm_state_afinfo_lock);
1932 }
1933
1934 /* Temporarily located here until net/xfrm/xfrm_tunnel.c is created */
1935 void xfrm_state_delete_tunnel(struct xfrm_state *x)
1936 {
1937 if (x->tunnel) {
1938 struct xfrm_state *t = x->tunnel;
1939
1940 if (atomic_read(&t->tunnel_users) == 2)
1941 xfrm_state_delete(t);
1942 atomic_dec(&t->tunnel_users);
1943 xfrm_state_put(t);
1944 x->tunnel = NULL;
1945 }
1946 }
1947 EXPORT_SYMBOL(xfrm_state_delete_tunnel);
1948
1949 int xfrm_state_mtu(struct xfrm_state *x, int mtu)
1950 {
1951 int res;
1952
1953 spin_lock_bh(&x->lock);
1954 if (x->km.state == XFRM_STATE_VALID &&
1955 x->type && x->type->get_mtu)
1956 res = x->type->get_mtu(x, mtu);
1957 else
1958 res = mtu - x->props.header_len;
1959 spin_unlock_bh(&x->lock);
1960 return res;
1961 }
1962
1963 int xfrm_init_state(struct xfrm_state *x)
1964 {
1965 struct xfrm_state_afinfo *afinfo;
1966 int family = x->props.family;
1967 int err;
1968
1969 err = -EAFNOSUPPORT;
1970 afinfo = xfrm_state_get_afinfo(family);
1971 if (!afinfo)
1972 goto error;
1973
1974 err = 0;
1975 if (afinfo->init_flags)
1976 err = afinfo->init_flags(x);
1977
1978 xfrm_state_put_afinfo(afinfo);
1979
1980 if (err)
1981 goto error;
1982
1983 err = -EPROTONOSUPPORT;
1984 x->inner_mode = xfrm_get_mode(x->props.mode, x->sel.family);
1985 if (x->inner_mode == NULL)
1986 goto error;
1987
1988 if (!(x->inner_mode->flags & XFRM_MODE_FLAG_TUNNEL) &&
1989 family != x->sel.family)
1990 goto error;
1991
1992 x->type = xfrm_get_type(x->id.proto, family);
1993 if (x->type == NULL)
1994 goto error;
1995
1996 err = x->type->init_state(x);
1997 if (err)
1998 goto error;
1999
2000 x->outer_mode = xfrm_get_mode(x->props.mode, family);
2001 if (x->outer_mode == NULL)
2002 goto error;
2003
2004 x->km.state = XFRM_STATE_VALID;
2005
2006 error:
2007 return err;
2008 }
2009
2010 EXPORT_SYMBOL(xfrm_init_state);
2011
2012 void __init xfrm_state_init(void)
2013 {
2014 unsigned int sz;
2015
2016 sz = sizeof(struct hlist_head) * 8;
2017
2018 xfrm_state_bydst = xfrm_hash_alloc(sz);
2019 xfrm_state_bysrc = xfrm_hash_alloc(sz);
2020 xfrm_state_byspi = xfrm_hash_alloc(sz);
2021 if (!xfrm_state_bydst || !xfrm_state_bysrc || !xfrm_state_byspi)
2022 panic("XFRM: Cannot allocate bydst/bysrc/byspi hashes.");
2023 xfrm_state_hmask = ((sz / sizeof(struct hlist_head)) - 1);
2024
2025 INIT_WORK(&xfrm_state_gc_work, xfrm_state_gc_task);
2026 }
2027
2028 #ifdef CONFIG_AUDITSYSCALL
2029 static void xfrm_audit_helper_sainfo(struct xfrm_state *x,
2030 struct audit_buffer *audit_buf)
2031 {
2032 struct xfrm_sec_ctx *ctx = x->security;
2033 u32 spi = ntohl(x->id.spi);
2034
2035 if (ctx)
2036 audit_log_format(audit_buf, " sec_alg=%u sec_doi=%u sec_obj=%s",
2037 ctx->ctx_alg, ctx->ctx_doi, ctx->ctx_str);
2038
2039 switch(x->props.family) {
2040 case AF_INET:
2041 audit_log_format(audit_buf,
2042 " src=" NIPQUAD_FMT " dst=" NIPQUAD_FMT,
2043 NIPQUAD(x->props.saddr.a4),
2044 NIPQUAD(x->id.daddr.a4));
2045 break;
2046 case AF_INET6:
2047 audit_log_format(audit_buf,
2048 " src=" NIP6_FMT " dst=" NIP6_FMT,
2049 NIP6(*(struct in6_addr *)x->props.saddr.a6),
2050 NIP6(*(struct in6_addr *)x->id.daddr.a6));
2051 break;
2052 }
2053
2054 audit_log_format(audit_buf, " spi=%u(0x%x)", spi, spi);
2055 }
2056
2057 static void xfrm_audit_helper_pktinfo(struct sk_buff *skb, u16 family,
2058 struct audit_buffer *audit_buf)
2059 {
2060 struct iphdr *iph4;
2061 struct ipv6hdr *iph6;
2062
2063 switch (family) {
2064 case AF_INET:
2065 iph4 = ip_hdr(skb);
2066 audit_log_format(audit_buf,
2067 " src=" NIPQUAD_FMT " dst=" NIPQUAD_FMT,
2068 NIPQUAD(iph4->saddr),
2069 NIPQUAD(iph4->daddr));
2070 break;
2071 case AF_INET6:
2072 iph6 = ipv6_hdr(skb);
2073 audit_log_format(audit_buf,
2074 " src=" NIP6_FMT " dst=" NIP6_FMT
2075 " flowlbl=0x%x%x%x",
2076 NIP6(iph6->saddr),
2077 NIP6(iph6->daddr),
2078 iph6->flow_lbl[0] & 0x0f,
2079 iph6->flow_lbl[1],
2080 iph6->flow_lbl[2]);
2081 break;
2082 }
2083 }
2084
2085 void xfrm_audit_state_add(struct xfrm_state *x, int result,
2086 u32 auid, u32 secid)
2087 {
2088 struct audit_buffer *audit_buf;
2089
2090 audit_buf = xfrm_audit_start("SAD-add");
2091 if (audit_buf == NULL)
2092 return;
2093 xfrm_audit_helper_usrinfo(auid, secid, audit_buf);
2094 xfrm_audit_helper_sainfo(x, audit_buf);
2095 audit_log_format(audit_buf, " res=%u", result);
2096 audit_log_end(audit_buf);
2097 }
2098 EXPORT_SYMBOL_GPL(xfrm_audit_state_add);
2099
2100 void xfrm_audit_state_delete(struct xfrm_state *x, int result,
2101 u32 auid, u32 secid)
2102 {
2103 struct audit_buffer *audit_buf;
2104
2105 audit_buf = xfrm_audit_start("SAD-delete");
2106 if (audit_buf == NULL)
2107 return;
2108 xfrm_audit_helper_usrinfo(auid, secid, audit_buf);
2109 xfrm_audit_helper_sainfo(x, audit_buf);
2110 audit_log_format(audit_buf, " res=%u", result);
2111 audit_log_end(audit_buf);
2112 }
2113 EXPORT_SYMBOL_GPL(xfrm_audit_state_delete);
2114
2115 void xfrm_audit_state_replay_overflow(struct xfrm_state *x,
2116 struct sk_buff *skb)
2117 {
2118 struct audit_buffer *audit_buf;
2119 u32 spi;
2120
2121 audit_buf = xfrm_audit_start("SA-replay-overflow");
2122 if (audit_buf == NULL)
2123 return;
2124 xfrm_audit_helper_pktinfo(skb, x->props.family, audit_buf);
2125 /* don't record the sequence number because it's inherent in this kind
2126 * of audit message */
2127 spi = ntohl(x->id.spi);
2128 audit_log_format(audit_buf, " spi=%u(0x%x)", spi, spi);
2129 audit_log_end(audit_buf);
2130 }
2131 EXPORT_SYMBOL_GPL(xfrm_audit_state_replay_overflow);
2132
2133 static void xfrm_audit_state_replay(struct xfrm_state *x,
2134 struct sk_buff *skb, __be32 net_seq)
2135 {
2136 struct audit_buffer *audit_buf;
2137 u32 spi;
2138
2139 audit_buf = xfrm_audit_start("SA-replayed-pkt");
2140 if (audit_buf == NULL)
2141 return;
2142 xfrm_audit_helper_pktinfo(skb, x->props.family, audit_buf);
2143 spi = ntohl(x->id.spi);
2144 audit_log_format(audit_buf, " spi=%u(0x%x) seqno=%u",
2145 spi, spi, ntohl(net_seq));
2146 audit_log_end(audit_buf);
2147 }
2148
2149 void xfrm_audit_state_notfound_simple(struct sk_buff *skb, u16 family)
2150 {
2151 struct audit_buffer *audit_buf;
2152
2153 audit_buf = xfrm_audit_start("SA-notfound");
2154 if (audit_buf == NULL)
2155 return;
2156 xfrm_audit_helper_pktinfo(skb, family, audit_buf);
2157 audit_log_end(audit_buf);
2158 }
2159 EXPORT_SYMBOL_GPL(xfrm_audit_state_notfound_simple);
2160
2161 void xfrm_audit_state_notfound(struct sk_buff *skb, u16 family,
2162 __be32 net_spi, __be32 net_seq)
2163 {
2164 struct audit_buffer *audit_buf;
2165 u32 spi;
2166
2167 audit_buf = xfrm_audit_start("SA-notfound");
2168 if (audit_buf == NULL)
2169 return;
2170 xfrm_audit_helper_pktinfo(skb, family, audit_buf);
2171 spi = ntohl(net_spi);
2172 audit_log_format(audit_buf, " spi=%u(0x%x) seqno=%u",
2173 spi, spi, ntohl(net_seq));
2174 audit_log_end(audit_buf);
2175 }
2176 EXPORT_SYMBOL_GPL(xfrm_audit_state_notfound);
2177
2178 void xfrm_audit_state_icvfail(struct xfrm_state *x,
2179 struct sk_buff *skb, u8 proto)
2180 {
2181 struct audit_buffer *audit_buf;
2182 __be32 net_spi;
2183 __be32 net_seq;
2184
2185 audit_buf = xfrm_audit_start("SA-icv-failure");
2186 if (audit_buf == NULL)
2187 return;
2188 xfrm_audit_helper_pktinfo(skb, x->props.family, audit_buf);
2189 if (xfrm_parse_spi(skb, proto, &net_spi, &net_seq) == 0) {
2190 u32 spi = ntohl(net_spi);
2191 audit_log_format(audit_buf, " spi=%u(0x%x) seqno=%u",
2192 spi, spi, ntohl(net_seq));
2193 }
2194 audit_log_end(audit_buf);
2195 }
2196 EXPORT_SYMBOL_GPL(xfrm_audit_state_icvfail);
2197 #endif /* CONFIG_AUDITSYSCALL */
This page took 0.128098 seconds and 6 git commands to generate.