Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/dtor/input
[deliverable/linux.git] / net / xfrm / xfrm_state.c
1 /*
2 * xfrm_state.c
3 *
4 * Changes:
5 * Mitsuru KANDA @USAGI
6 * Kazunori MIYAZAWA @USAGI
7 * Kunihiro Ishiguro <kunihiro@ipinfusion.com>
8 * IPv6 support
9 * YOSHIFUJI Hideaki @USAGI
10 * Split up af-specific functions
11 * Derek Atkins <derek@ihtfp.com>
12 * Add UDP Encapsulation
13 *
14 */
15
16 #include <linux/workqueue.h>
17 #include <net/xfrm.h>
18 #include <linux/pfkeyv2.h>
19 #include <linux/ipsec.h>
20 #include <linux/module.h>
21 #include <linux/cache.h>
22 #include <linux/audit.h>
23 #include <asm/uaccess.h>
24
25 #include "xfrm_hash.h"
26
27 struct sock *xfrm_nl;
28 EXPORT_SYMBOL(xfrm_nl);
29
30 u32 sysctl_xfrm_aevent_etime __read_mostly = XFRM_AE_ETIME;
31 EXPORT_SYMBOL(sysctl_xfrm_aevent_etime);
32
33 u32 sysctl_xfrm_aevent_rseqth __read_mostly = XFRM_AE_SEQT_SIZE;
34 EXPORT_SYMBOL(sysctl_xfrm_aevent_rseqth);
35
36 u32 sysctl_xfrm_acq_expires __read_mostly = 30;
37
38 /* Each xfrm_state may be linked to two tables:
39
40 1. Hash table by (spi,daddr,ah/esp) to find SA by SPI. (input,ctl)
41 2. Hash table by (daddr,family,reqid) to find what SAs exist for given
42 destination/tunnel endpoint. (output)
43 */
44
45 static DEFINE_SPINLOCK(xfrm_state_lock);
46
47 /* Hash table to find appropriate SA towards given target (endpoint
48 * of tunnel or destination of transport mode) allowed by selector.
49 *
50 * Main use is finding SA after policy selected tunnel or transport mode.
51 * Also, it can be used by ah/esp icmp error handler to find offending SA.
52 */
53 static LIST_HEAD(xfrm_state_all);
54 static struct hlist_head *xfrm_state_bydst __read_mostly;
55 static struct hlist_head *xfrm_state_bysrc __read_mostly;
56 static struct hlist_head *xfrm_state_byspi __read_mostly;
57 static unsigned int xfrm_state_hmask __read_mostly;
58 static unsigned int xfrm_state_hashmax __read_mostly = 1 * 1024 * 1024;
59 static unsigned int xfrm_state_num;
60 static unsigned int xfrm_state_genid;
61
62 static struct xfrm_state_afinfo *xfrm_state_get_afinfo(unsigned int family);
63 static void xfrm_state_put_afinfo(struct xfrm_state_afinfo *afinfo);
64
65 #ifdef CONFIG_AUDITSYSCALL
66 static void xfrm_audit_state_replay(struct xfrm_state *x,
67 struct sk_buff *skb, __be32 net_seq);
68 #else
69 #define xfrm_audit_state_replay(x, s, sq) do { ; } while (0)
70 #endif /* CONFIG_AUDITSYSCALL */
71
72 static inline unsigned int xfrm_dst_hash(xfrm_address_t *daddr,
73 xfrm_address_t *saddr,
74 u32 reqid,
75 unsigned short family)
76 {
77 return __xfrm_dst_hash(daddr, saddr, reqid, family, xfrm_state_hmask);
78 }
79
80 static inline unsigned int xfrm_src_hash(xfrm_address_t *daddr,
81 xfrm_address_t *saddr,
82 unsigned short family)
83 {
84 return __xfrm_src_hash(daddr, saddr, family, xfrm_state_hmask);
85 }
86
87 static inline unsigned int
88 xfrm_spi_hash(xfrm_address_t *daddr, __be32 spi, u8 proto, unsigned short family)
89 {
90 return __xfrm_spi_hash(daddr, spi, proto, family, xfrm_state_hmask);
91 }
92
93 static void xfrm_hash_transfer(struct hlist_head *list,
94 struct hlist_head *ndsttable,
95 struct hlist_head *nsrctable,
96 struct hlist_head *nspitable,
97 unsigned int nhashmask)
98 {
99 struct hlist_node *entry, *tmp;
100 struct xfrm_state *x;
101
102 hlist_for_each_entry_safe(x, entry, tmp, list, bydst) {
103 unsigned int h;
104
105 h = __xfrm_dst_hash(&x->id.daddr, &x->props.saddr,
106 x->props.reqid, x->props.family,
107 nhashmask);
108 hlist_add_head(&x->bydst, ndsttable+h);
109
110 h = __xfrm_src_hash(&x->id.daddr, &x->props.saddr,
111 x->props.family,
112 nhashmask);
113 hlist_add_head(&x->bysrc, nsrctable+h);
114
115 if (x->id.spi) {
116 h = __xfrm_spi_hash(&x->id.daddr, x->id.spi,
117 x->id.proto, x->props.family,
118 nhashmask);
119 hlist_add_head(&x->byspi, nspitable+h);
120 }
121 }
122 }
123
124 static unsigned long xfrm_hash_new_size(void)
125 {
126 return ((xfrm_state_hmask + 1) << 1) *
127 sizeof(struct hlist_head);
128 }
129
130 static DEFINE_MUTEX(hash_resize_mutex);
131
132 static void xfrm_hash_resize(struct work_struct *__unused)
133 {
134 struct hlist_head *ndst, *nsrc, *nspi, *odst, *osrc, *ospi;
135 unsigned long nsize, osize;
136 unsigned int nhashmask, ohashmask;
137 int i;
138
139 mutex_lock(&hash_resize_mutex);
140
141 nsize = xfrm_hash_new_size();
142 ndst = xfrm_hash_alloc(nsize);
143 if (!ndst)
144 goto out_unlock;
145 nsrc = xfrm_hash_alloc(nsize);
146 if (!nsrc) {
147 xfrm_hash_free(ndst, nsize);
148 goto out_unlock;
149 }
150 nspi = xfrm_hash_alloc(nsize);
151 if (!nspi) {
152 xfrm_hash_free(ndst, nsize);
153 xfrm_hash_free(nsrc, nsize);
154 goto out_unlock;
155 }
156
157 spin_lock_bh(&xfrm_state_lock);
158
159 nhashmask = (nsize / sizeof(struct hlist_head)) - 1U;
160 for (i = xfrm_state_hmask; i >= 0; i--)
161 xfrm_hash_transfer(xfrm_state_bydst+i, ndst, nsrc, nspi,
162 nhashmask);
163
164 odst = xfrm_state_bydst;
165 osrc = xfrm_state_bysrc;
166 ospi = xfrm_state_byspi;
167 ohashmask = xfrm_state_hmask;
168
169 xfrm_state_bydst = ndst;
170 xfrm_state_bysrc = nsrc;
171 xfrm_state_byspi = nspi;
172 xfrm_state_hmask = nhashmask;
173
174 spin_unlock_bh(&xfrm_state_lock);
175
176 osize = (ohashmask + 1) * sizeof(struct hlist_head);
177 xfrm_hash_free(odst, osize);
178 xfrm_hash_free(osrc, osize);
179 xfrm_hash_free(ospi, osize);
180
181 out_unlock:
182 mutex_unlock(&hash_resize_mutex);
183 }
184
185 static DECLARE_WORK(xfrm_hash_work, xfrm_hash_resize);
186
187 DECLARE_WAIT_QUEUE_HEAD(km_waitq);
188 EXPORT_SYMBOL(km_waitq);
189
190 static DEFINE_RWLOCK(xfrm_state_afinfo_lock);
191 static struct xfrm_state_afinfo *xfrm_state_afinfo[NPROTO];
192
193 static struct work_struct xfrm_state_gc_work;
194 static HLIST_HEAD(xfrm_state_gc_list);
195 static DEFINE_SPINLOCK(xfrm_state_gc_lock);
196
197 int __xfrm_state_delete(struct xfrm_state *x);
198
199 int km_query(struct xfrm_state *x, struct xfrm_tmpl *t, struct xfrm_policy *pol);
200 void km_state_expired(struct xfrm_state *x, int hard, u32 pid);
201
202 static struct xfrm_state_afinfo *xfrm_state_lock_afinfo(unsigned int family)
203 {
204 struct xfrm_state_afinfo *afinfo;
205 if (unlikely(family >= NPROTO))
206 return NULL;
207 write_lock_bh(&xfrm_state_afinfo_lock);
208 afinfo = xfrm_state_afinfo[family];
209 if (unlikely(!afinfo))
210 write_unlock_bh(&xfrm_state_afinfo_lock);
211 return afinfo;
212 }
213
214 static void xfrm_state_unlock_afinfo(struct xfrm_state_afinfo *afinfo)
215 __releases(xfrm_state_afinfo_lock)
216 {
217 write_unlock_bh(&xfrm_state_afinfo_lock);
218 }
219
220 int xfrm_register_type(const struct xfrm_type *type, unsigned short family)
221 {
222 struct xfrm_state_afinfo *afinfo = xfrm_state_lock_afinfo(family);
223 const struct xfrm_type **typemap;
224 int err = 0;
225
226 if (unlikely(afinfo == NULL))
227 return -EAFNOSUPPORT;
228 typemap = afinfo->type_map;
229
230 if (likely(typemap[type->proto] == NULL))
231 typemap[type->proto] = type;
232 else
233 err = -EEXIST;
234 xfrm_state_unlock_afinfo(afinfo);
235 return err;
236 }
237 EXPORT_SYMBOL(xfrm_register_type);
238
239 int xfrm_unregister_type(const struct xfrm_type *type, unsigned short family)
240 {
241 struct xfrm_state_afinfo *afinfo = xfrm_state_lock_afinfo(family);
242 const struct xfrm_type **typemap;
243 int err = 0;
244
245 if (unlikely(afinfo == NULL))
246 return -EAFNOSUPPORT;
247 typemap = afinfo->type_map;
248
249 if (unlikely(typemap[type->proto] != type))
250 err = -ENOENT;
251 else
252 typemap[type->proto] = NULL;
253 xfrm_state_unlock_afinfo(afinfo);
254 return err;
255 }
256 EXPORT_SYMBOL(xfrm_unregister_type);
257
258 static const struct xfrm_type *xfrm_get_type(u8 proto, unsigned short family)
259 {
260 struct xfrm_state_afinfo *afinfo;
261 const struct xfrm_type **typemap;
262 const struct xfrm_type *type;
263 int modload_attempted = 0;
264
265 retry:
266 afinfo = xfrm_state_get_afinfo(family);
267 if (unlikely(afinfo == NULL))
268 return NULL;
269 typemap = afinfo->type_map;
270
271 type = typemap[proto];
272 if (unlikely(type && !try_module_get(type->owner)))
273 type = NULL;
274 if (!type && !modload_attempted) {
275 xfrm_state_put_afinfo(afinfo);
276 request_module("xfrm-type-%d-%d", family, proto);
277 modload_attempted = 1;
278 goto retry;
279 }
280
281 xfrm_state_put_afinfo(afinfo);
282 return type;
283 }
284
285 static void xfrm_put_type(const struct xfrm_type *type)
286 {
287 module_put(type->owner);
288 }
289
290 int xfrm_register_mode(struct xfrm_mode *mode, int family)
291 {
292 struct xfrm_state_afinfo *afinfo;
293 struct xfrm_mode **modemap;
294 int err;
295
296 if (unlikely(mode->encap >= XFRM_MODE_MAX))
297 return -EINVAL;
298
299 afinfo = xfrm_state_lock_afinfo(family);
300 if (unlikely(afinfo == NULL))
301 return -EAFNOSUPPORT;
302
303 err = -EEXIST;
304 modemap = afinfo->mode_map;
305 if (modemap[mode->encap])
306 goto out;
307
308 err = -ENOENT;
309 if (!try_module_get(afinfo->owner))
310 goto out;
311
312 mode->afinfo = afinfo;
313 modemap[mode->encap] = mode;
314 err = 0;
315
316 out:
317 xfrm_state_unlock_afinfo(afinfo);
318 return err;
319 }
320 EXPORT_SYMBOL(xfrm_register_mode);
321
322 int xfrm_unregister_mode(struct xfrm_mode *mode, int family)
323 {
324 struct xfrm_state_afinfo *afinfo;
325 struct xfrm_mode **modemap;
326 int err;
327
328 if (unlikely(mode->encap >= XFRM_MODE_MAX))
329 return -EINVAL;
330
331 afinfo = xfrm_state_lock_afinfo(family);
332 if (unlikely(afinfo == NULL))
333 return -EAFNOSUPPORT;
334
335 err = -ENOENT;
336 modemap = afinfo->mode_map;
337 if (likely(modemap[mode->encap] == mode)) {
338 modemap[mode->encap] = NULL;
339 module_put(mode->afinfo->owner);
340 err = 0;
341 }
342
343 xfrm_state_unlock_afinfo(afinfo);
344 return err;
345 }
346 EXPORT_SYMBOL(xfrm_unregister_mode);
347
348 static struct xfrm_mode *xfrm_get_mode(unsigned int encap, int family)
349 {
350 struct xfrm_state_afinfo *afinfo;
351 struct xfrm_mode *mode;
352 int modload_attempted = 0;
353
354 if (unlikely(encap >= XFRM_MODE_MAX))
355 return NULL;
356
357 retry:
358 afinfo = xfrm_state_get_afinfo(family);
359 if (unlikely(afinfo == NULL))
360 return NULL;
361
362 mode = afinfo->mode_map[encap];
363 if (unlikely(mode && !try_module_get(mode->owner)))
364 mode = NULL;
365 if (!mode && !modload_attempted) {
366 xfrm_state_put_afinfo(afinfo);
367 request_module("xfrm-mode-%d-%d", family, encap);
368 modload_attempted = 1;
369 goto retry;
370 }
371
372 xfrm_state_put_afinfo(afinfo);
373 return mode;
374 }
375
376 static void xfrm_put_mode(struct xfrm_mode *mode)
377 {
378 module_put(mode->owner);
379 }
380
381 static void xfrm_state_gc_destroy(struct xfrm_state *x)
382 {
383 del_timer_sync(&x->timer);
384 del_timer_sync(&x->rtimer);
385 kfree(x->aalg);
386 kfree(x->ealg);
387 kfree(x->calg);
388 kfree(x->encap);
389 kfree(x->coaddr);
390 if (x->inner_mode)
391 xfrm_put_mode(x->inner_mode);
392 if (x->inner_mode_iaf)
393 xfrm_put_mode(x->inner_mode_iaf);
394 if (x->outer_mode)
395 xfrm_put_mode(x->outer_mode);
396 if (x->type) {
397 x->type->destructor(x);
398 xfrm_put_type(x->type);
399 }
400 security_xfrm_state_free(x);
401 kfree(x);
402 }
403
404 static void xfrm_state_gc_task(struct work_struct *data)
405 {
406 struct xfrm_state *x;
407 struct hlist_node *entry, *tmp;
408 struct hlist_head gc_list;
409
410 spin_lock_bh(&xfrm_state_gc_lock);
411 gc_list.first = xfrm_state_gc_list.first;
412 INIT_HLIST_HEAD(&xfrm_state_gc_list);
413 spin_unlock_bh(&xfrm_state_gc_lock);
414
415 hlist_for_each_entry_safe(x, entry, tmp, &gc_list, bydst)
416 xfrm_state_gc_destroy(x);
417
418 wake_up(&km_waitq);
419 }
420
421 static inline unsigned long make_jiffies(long secs)
422 {
423 if (secs >= (MAX_SCHEDULE_TIMEOUT-1)/HZ)
424 return MAX_SCHEDULE_TIMEOUT-1;
425 else
426 return secs*HZ;
427 }
428
429 static void xfrm_timer_handler(unsigned long data)
430 {
431 struct xfrm_state *x = (struct xfrm_state*)data;
432 unsigned long now = get_seconds();
433 long next = LONG_MAX;
434 int warn = 0;
435 int err = 0;
436
437 spin_lock(&x->lock);
438 if (x->km.state == XFRM_STATE_DEAD)
439 goto out;
440 if (x->km.state == XFRM_STATE_EXPIRED)
441 goto expired;
442 if (x->lft.hard_add_expires_seconds) {
443 long tmo = x->lft.hard_add_expires_seconds +
444 x->curlft.add_time - now;
445 if (tmo <= 0)
446 goto expired;
447 if (tmo < next)
448 next = tmo;
449 }
450 if (x->lft.hard_use_expires_seconds) {
451 long tmo = x->lft.hard_use_expires_seconds +
452 (x->curlft.use_time ? : now) - now;
453 if (tmo <= 0)
454 goto expired;
455 if (tmo < next)
456 next = tmo;
457 }
458 if (x->km.dying)
459 goto resched;
460 if (x->lft.soft_add_expires_seconds) {
461 long tmo = x->lft.soft_add_expires_seconds +
462 x->curlft.add_time - now;
463 if (tmo <= 0)
464 warn = 1;
465 else if (tmo < next)
466 next = tmo;
467 }
468 if (x->lft.soft_use_expires_seconds) {
469 long tmo = x->lft.soft_use_expires_seconds +
470 (x->curlft.use_time ? : now) - now;
471 if (tmo <= 0)
472 warn = 1;
473 else if (tmo < next)
474 next = tmo;
475 }
476
477 x->km.dying = warn;
478 if (warn)
479 km_state_expired(x, 0, 0);
480 resched:
481 if (next != LONG_MAX)
482 mod_timer(&x->timer, jiffies + make_jiffies(next));
483
484 goto out;
485
486 expired:
487 if (x->km.state == XFRM_STATE_ACQ && x->id.spi == 0) {
488 x->km.state = XFRM_STATE_EXPIRED;
489 wake_up(&km_waitq);
490 next = 2;
491 goto resched;
492 }
493
494 err = __xfrm_state_delete(x);
495 if (!err && x->id.spi)
496 km_state_expired(x, 1, 0);
497
498 xfrm_audit_state_delete(x, err ? 0 : 1,
499 audit_get_loginuid(current), 0);
500
501 out:
502 spin_unlock(&x->lock);
503 }
504
505 static void xfrm_replay_timer_handler(unsigned long data);
506
507 struct xfrm_state *xfrm_state_alloc(void)
508 {
509 struct xfrm_state *x;
510
511 x = kzalloc(sizeof(struct xfrm_state), GFP_ATOMIC);
512
513 if (x) {
514 atomic_set(&x->refcnt, 1);
515 atomic_set(&x->tunnel_users, 0);
516 INIT_LIST_HEAD(&x->all);
517 INIT_HLIST_NODE(&x->bydst);
518 INIT_HLIST_NODE(&x->bysrc);
519 INIT_HLIST_NODE(&x->byspi);
520 setup_timer(&x->timer, xfrm_timer_handler, (unsigned long)x);
521 setup_timer(&x->rtimer, xfrm_replay_timer_handler,
522 (unsigned long)x);
523 x->curlft.add_time = get_seconds();
524 x->lft.soft_byte_limit = XFRM_INF;
525 x->lft.soft_packet_limit = XFRM_INF;
526 x->lft.hard_byte_limit = XFRM_INF;
527 x->lft.hard_packet_limit = XFRM_INF;
528 x->replay_maxage = 0;
529 x->replay_maxdiff = 0;
530 x->inner_mode = NULL;
531 x->inner_mode_iaf = NULL;
532 spin_lock_init(&x->lock);
533 }
534 return x;
535 }
536 EXPORT_SYMBOL(xfrm_state_alloc);
537
538 void __xfrm_state_destroy(struct xfrm_state *x)
539 {
540 BUG_TRAP(x->km.state == XFRM_STATE_DEAD);
541
542 spin_lock_bh(&xfrm_state_lock);
543 list_del(&x->all);
544 spin_unlock_bh(&xfrm_state_lock);
545
546 spin_lock_bh(&xfrm_state_gc_lock);
547 hlist_add_head(&x->bydst, &xfrm_state_gc_list);
548 spin_unlock_bh(&xfrm_state_gc_lock);
549 schedule_work(&xfrm_state_gc_work);
550 }
551 EXPORT_SYMBOL(__xfrm_state_destroy);
552
553 int __xfrm_state_delete(struct xfrm_state *x)
554 {
555 int err = -ESRCH;
556
557 if (x->km.state != XFRM_STATE_DEAD) {
558 x->km.state = XFRM_STATE_DEAD;
559 spin_lock(&xfrm_state_lock);
560 hlist_del(&x->bydst);
561 hlist_del(&x->bysrc);
562 if (x->id.spi)
563 hlist_del(&x->byspi);
564 xfrm_state_num--;
565 spin_unlock(&xfrm_state_lock);
566
567 /* All xfrm_state objects are created by xfrm_state_alloc.
568 * The xfrm_state_alloc call gives a reference, and that
569 * is what we are dropping here.
570 */
571 xfrm_state_put(x);
572 err = 0;
573 }
574
575 return err;
576 }
577 EXPORT_SYMBOL(__xfrm_state_delete);
578
579 int xfrm_state_delete(struct xfrm_state *x)
580 {
581 int err;
582
583 spin_lock_bh(&x->lock);
584 err = __xfrm_state_delete(x);
585 spin_unlock_bh(&x->lock);
586
587 return err;
588 }
589 EXPORT_SYMBOL(xfrm_state_delete);
590
591 #ifdef CONFIG_SECURITY_NETWORK_XFRM
592 static inline int
593 xfrm_state_flush_secctx_check(u8 proto, struct xfrm_audit *audit_info)
594 {
595 int i, err = 0;
596
597 for (i = 0; i <= xfrm_state_hmask; i++) {
598 struct hlist_node *entry;
599 struct xfrm_state *x;
600
601 hlist_for_each_entry(x, entry, xfrm_state_bydst+i, bydst) {
602 if (xfrm_id_proto_match(x->id.proto, proto) &&
603 (err = security_xfrm_state_delete(x)) != 0) {
604 xfrm_audit_state_delete(x, 0,
605 audit_info->loginuid,
606 audit_info->secid);
607 return err;
608 }
609 }
610 }
611
612 return err;
613 }
614 #else
615 static inline int
616 xfrm_state_flush_secctx_check(u8 proto, struct xfrm_audit *audit_info)
617 {
618 return 0;
619 }
620 #endif
621
622 int xfrm_state_flush(u8 proto, struct xfrm_audit *audit_info)
623 {
624 int i, err = 0;
625
626 spin_lock_bh(&xfrm_state_lock);
627 err = xfrm_state_flush_secctx_check(proto, audit_info);
628 if (err)
629 goto out;
630
631 for (i = 0; i <= xfrm_state_hmask; i++) {
632 struct hlist_node *entry;
633 struct xfrm_state *x;
634 restart:
635 hlist_for_each_entry(x, entry, xfrm_state_bydst+i, bydst) {
636 if (!xfrm_state_kern(x) &&
637 xfrm_id_proto_match(x->id.proto, proto)) {
638 xfrm_state_hold(x);
639 spin_unlock_bh(&xfrm_state_lock);
640
641 err = xfrm_state_delete(x);
642 xfrm_audit_state_delete(x, err ? 0 : 1,
643 audit_info->loginuid,
644 audit_info->secid);
645 xfrm_state_put(x);
646
647 spin_lock_bh(&xfrm_state_lock);
648 goto restart;
649 }
650 }
651 }
652 err = 0;
653
654 out:
655 spin_unlock_bh(&xfrm_state_lock);
656 wake_up(&km_waitq);
657 return err;
658 }
659 EXPORT_SYMBOL(xfrm_state_flush);
660
661 void xfrm_sad_getinfo(struct xfrmk_sadinfo *si)
662 {
663 spin_lock_bh(&xfrm_state_lock);
664 si->sadcnt = xfrm_state_num;
665 si->sadhcnt = xfrm_state_hmask;
666 si->sadhmcnt = xfrm_state_hashmax;
667 spin_unlock_bh(&xfrm_state_lock);
668 }
669 EXPORT_SYMBOL(xfrm_sad_getinfo);
670
671 static int
672 xfrm_init_tempsel(struct xfrm_state *x, struct flowi *fl,
673 struct xfrm_tmpl *tmpl,
674 xfrm_address_t *daddr, xfrm_address_t *saddr,
675 unsigned short family)
676 {
677 struct xfrm_state_afinfo *afinfo = xfrm_state_get_afinfo(family);
678 if (!afinfo)
679 return -1;
680 afinfo->init_tempsel(x, fl, tmpl, daddr, saddr);
681 xfrm_state_put_afinfo(afinfo);
682 return 0;
683 }
684
685 static struct xfrm_state *__xfrm_state_lookup(xfrm_address_t *daddr, __be32 spi, u8 proto, unsigned short family)
686 {
687 unsigned int h = xfrm_spi_hash(daddr, spi, proto, family);
688 struct xfrm_state *x;
689 struct hlist_node *entry;
690
691 hlist_for_each_entry(x, entry, xfrm_state_byspi+h, byspi) {
692 if (x->props.family != family ||
693 x->id.spi != spi ||
694 x->id.proto != proto)
695 continue;
696
697 switch (family) {
698 case AF_INET:
699 if (x->id.daddr.a4 != daddr->a4)
700 continue;
701 break;
702 case AF_INET6:
703 if (!ipv6_addr_equal((struct in6_addr *)daddr,
704 (struct in6_addr *)
705 x->id.daddr.a6))
706 continue;
707 break;
708 }
709
710 xfrm_state_hold(x);
711 return x;
712 }
713
714 return NULL;
715 }
716
717 static struct xfrm_state *__xfrm_state_lookup_byaddr(xfrm_address_t *daddr, xfrm_address_t *saddr, u8 proto, unsigned short family)
718 {
719 unsigned int h = xfrm_src_hash(daddr, saddr, family);
720 struct xfrm_state *x;
721 struct hlist_node *entry;
722
723 hlist_for_each_entry(x, entry, xfrm_state_bysrc+h, bysrc) {
724 if (x->props.family != family ||
725 x->id.proto != proto)
726 continue;
727
728 switch (family) {
729 case AF_INET:
730 if (x->id.daddr.a4 != daddr->a4 ||
731 x->props.saddr.a4 != saddr->a4)
732 continue;
733 break;
734 case AF_INET6:
735 if (!ipv6_addr_equal((struct in6_addr *)daddr,
736 (struct in6_addr *)
737 x->id.daddr.a6) ||
738 !ipv6_addr_equal((struct in6_addr *)saddr,
739 (struct in6_addr *)
740 x->props.saddr.a6))
741 continue;
742 break;
743 }
744
745 xfrm_state_hold(x);
746 return x;
747 }
748
749 return NULL;
750 }
751
752 static inline struct xfrm_state *
753 __xfrm_state_locate(struct xfrm_state *x, int use_spi, int family)
754 {
755 if (use_spi)
756 return __xfrm_state_lookup(&x->id.daddr, x->id.spi,
757 x->id.proto, family);
758 else
759 return __xfrm_state_lookup_byaddr(&x->id.daddr,
760 &x->props.saddr,
761 x->id.proto, family);
762 }
763
764 static void xfrm_hash_grow_check(int have_hash_collision)
765 {
766 if (have_hash_collision &&
767 (xfrm_state_hmask + 1) < xfrm_state_hashmax &&
768 xfrm_state_num > xfrm_state_hmask)
769 schedule_work(&xfrm_hash_work);
770 }
771
772 struct xfrm_state *
773 xfrm_state_find(xfrm_address_t *daddr, xfrm_address_t *saddr,
774 struct flowi *fl, struct xfrm_tmpl *tmpl,
775 struct xfrm_policy *pol, int *err,
776 unsigned short family)
777 {
778 unsigned int h;
779 struct hlist_node *entry;
780 struct xfrm_state *x, *x0;
781 int acquire_in_progress = 0;
782 int error = 0;
783 struct xfrm_state *best = NULL;
784
785 spin_lock_bh(&xfrm_state_lock);
786 h = xfrm_dst_hash(daddr, saddr, tmpl->reqid, family);
787 hlist_for_each_entry(x, entry, xfrm_state_bydst+h, bydst) {
788 if (x->props.family == family &&
789 x->props.reqid == tmpl->reqid &&
790 !(x->props.flags & XFRM_STATE_WILDRECV) &&
791 xfrm_state_addr_check(x, daddr, saddr, family) &&
792 tmpl->mode == x->props.mode &&
793 tmpl->id.proto == x->id.proto &&
794 (tmpl->id.spi == x->id.spi || !tmpl->id.spi)) {
795 /* Resolution logic:
796 1. There is a valid state with matching selector.
797 Done.
798 2. Valid state with inappropriate selector. Skip.
799
800 Entering area of "sysdeps".
801
802 3. If state is not valid, selector is temporary,
803 it selects only session which triggered
804 previous resolution. Key manager will do
805 something to install a state with proper
806 selector.
807 */
808 if (x->km.state == XFRM_STATE_VALID) {
809 if ((x->sel.family && !xfrm_selector_match(&x->sel, fl, x->sel.family)) ||
810 !security_xfrm_state_pol_flow_match(x, pol, fl))
811 continue;
812 if (!best ||
813 best->km.dying > x->km.dying ||
814 (best->km.dying == x->km.dying &&
815 best->curlft.add_time < x->curlft.add_time))
816 best = x;
817 } else if (x->km.state == XFRM_STATE_ACQ) {
818 acquire_in_progress = 1;
819 } else if (x->km.state == XFRM_STATE_ERROR ||
820 x->km.state == XFRM_STATE_EXPIRED) {
821 if (xfrm_selector_match(&x->sel, fl, x->sel.family) &&
822 security_xfrm_state_pol_flow_match(x, pol, fl))
823 error = -ESRCH;
824 }
825 }
826 }
827
828 x = best;
829 if (!x && !error && !acquire_in_progress) {
830 if (tmpl->id.spi &&
831 (x0 = __xfrm_state_lookup(daddr, tmpl->id.spi,
832 tmpl->id.proto, family)) != NULL) {
833 xfrm_state_put(x0);
834 error = -EEXIST;
835 goto out;
836 }
837 x = xfrm_state_alloc();
838 if (x == NULL) {
839 error = -ENOMEM;
840 goto out;
841 }
842 /* Initialize temporary selector matching only
843 * to current session. */
844 xfrm_init_tempsel(x, fl, tmpl, daddr, saddr, family);
845
846 error = security_xfrm_state_alloc_acquire(x, pol->security, fl->secid);
847 if (error) {
848 x->km.state = XFRM_STATE_DEAD;
849 xfrm_state_put(x);
850 x = NULL;
851 goto out;
852 }
853
854 if (km_query(x, tmpl, pol) == 0) {
855 x->km.state = XFRM_STATE_ACQ;
856 hlist_add_head(&x->bydst, xfrm_state_bydst+h);
857 h = xfrm_src_hash(daddr, saddr, family);
858 hlist_add_head(&x->bysrc, xfrm_state_bysrc+h);
859 if (x->id.spi) {
860 h = xfrm_spi_hash(&x->id.daddr, x->id.spi, x->id.proto, family);
861 hlist_add_head(&x->byspi, xfrm_state_byspi+h);
862 }
863 x->lft.hard_add_expires_seconds = sysctl_xfrm_acq_expires;
864 x->timer.expires = jiffies + sysctl_xfrm_acq_expires*HZ;
865 add_timer(&x->timer);
866 xfrm_state_num++;
867 xfrm_hash_grow_check(x->bydst.next != NULL);
868 } else {
869 x->km.state = XFRM_STATE_DEAD;
870 xfrm_state_put(x);
871 x = NULL;
872 error = -ESRCH;
873 }
874 }
875 out:
876 if (x)
877 xfrm_state_hold(x);
878 else
879 *err = acquire_in_progress ? -EAGAIN : error;
880 spin_unlock_bh(&xfrm_state_lock);
881 return x;
882 }
883
884 struct xfrm_state *
885 xfrm_stateonly_find(xfrm_address_t *daddr, xfrm_address_t *saddr,
886 unsigned short family, u8 mode, u8 proto, u32 reqid)
887 {
888 unsigned int h;
889 struct xfrm_state *rx = NULL, *x = NULL;
890 struct hlist_node *entry;
891
892 spin_lock(&xfrm_state_lock);
893 h = xfrm_dst_hash(daddr, saddr, reqid, family);
894 hlist_for_each_entry(x, entry, xfrm_state_bydst+h, bydst) {
895 if (x->props.family == family &&
896 x->props.reqid == reqid &&
897 !(x->props.flags & XFRM_STATE_WILDRECV) &&
898 xfrm_state_addr_check(x, daddr, saddr, family) &&
899 mode == x->props.mode &&
900 proto == x->id.proto &&
901 x->km.state == XFRM_STATE_VALID) {
902 rx = x;
903 break;
904 }
905 }
906
907 if (rx)
908 xfrm_state_hold(rx);
909 spin_unlock(&xfrm_state_lock);
910
911
912 return rx;
913 }
914 EXPORT_SYMBOL(xfrm_stateonly_find);
915
916 static void __xfrm_state_insert(struct xfrm_state *x)
917 {
918 unsigned int h;
919
920 x->genid = ++xfrm_state_genid;
921
922 list_add_tail(&x->all, &xfrm_state_all);
923
924 h = xfrm_dst_hash(&x->id.daddr, &x->props.saddr,
925 x->props.reqid, x->props.family);
926 hlist_add_head(&x->bydst, xfrm_state_bydst+h);
927
928 h = xfrm_src_hash(&x->id.daddr, &x->props.saddr, x->props.family);
929 hlist_add_head(&x->bysrc, xfrm_state_bysrc+h);
930
931 if (x->id.spi) {
932 h = xfrm_spi_hash(&x->id.daddr, x->id.spi, x->id.proto,
933 x->props.family);
934
935 hlist_add_head(&x->byspi, xfrm_state_byspi+h);
936 }
937
938 mod_timer(&x->timer, jiffies + HZ);
939 if (x->replay_maxage)
940 mod_timer(&x->rtimer, jiffies + x->replay_maxage);
941
942 wake_up(&km_waitq);
943
944 xfrm_state_num++;
945
946 xfrm_hash_grow_check(x->bydst.next != NULL);
947 }
948
949 /* xfrm_state_lock is held */
950 static void __xfrm_state_bump_genids(struct xfrm_state *xnew)
951 {
952 unsigned short family = xnew->props.family;
953 u32 reqid = xnew->props.reqid;
954 struct xfrm_state *x;
955 struct hlist_node *entry;
956 unsigned int h;
957
958 h = xfrm_dst_hash(&xnew->id.daddr, &xnew->props.saddr, reqid, family);
959 hlist_for_each_entry(x, entry, xfrm_state_bydst+h, bydst) {
960 if (x->props.family == family &&
961 x->props.reqid == reqid &&
962 !xfrm_addr_cmp(&x->id.daddr, &xnew->id.daddr, family) &&
963 !xfrm_addr_cmp(&x->props.saddr, &xnew->props.saddr, family))
964 x->genid = xfrm_state_genid;
965 }
966 }
967
968 void xfrm_state_insert(struct xfrm_state *x)
969 {
970 spin_lock_bh(&xfrm_state_lock);
971 __xfrm_state_bump_genids(x);
972 __xfrm_state_insert(x);
973 spin_unlock_bh(&xfrm_state_lock);
974 }
975 EXPORT_SYMBOL(xfrm_state_insert);
976
977 /* xfrm_state_lock is held */
978 static struct xfrm_state *__find_acq_core(unsigned short family, u8 mode, u32 reqid, u8 proto, xfrm_address_t *daddr, xfrm_address_t *saddr, int create)
979 {
980 unsigned int h = xfrm_dst_hash(daddr, saddr, reqid, family);
981 struct hlist_node *entry;
982 struct xfrm_state *x;
983
984 hlist_for_each_entry(x, entry, xfrm_state_bydst+h, bydst) {
985 if (x->props.reqid != reqid ||
986 x->props.mode != mode ||
987 x->props.family != family ||
988 x->km.state != XFRM_STATE_ACQ ||
989 x->id.spi != 0 ||
990 x->id.proto != proto)
991 continue;
992
993 switch (family) {
994 case AF_INET:
995 if (x->id.daddr.a4 != daddr->a4 ||
996 x->props.saddr.a4 != saddr->a4)
997 continue;
998 break;
999 case AF_INET6:
1000 if (!ipv6_addr_equal((struct in6_addr *)x->id.daddr.a6,
1001 (struct in6_addr *)daddr) ||
1002 !ipv6_addr_equal((struct in6_addr *)
1003 x->props.saddr.a6,
1004 (struct in6_addr *)saddr))
1005 continue;
1006 break;
1007 }
1008
1009 xfrm_state_hold(x);
1010 return x;
1011 }
1012
1013 if (!create)
1014 return NULL;
1015
1016 x = xfrm_state_alloc();
1017 if (likely(x)) {
1018 switch (family) {
1019 case AF_INET:
1020 x->sel.daddr.a4 = daddr->a4;
1021 x->sel.saddr.a4 = saddr->a4;
1022 x->sel.prefixlen_d = 32;
1023 x->sel.prefixlen_s = 32;
1024 x->props.saddr.a4 = saddr->a4;
1025 x->id.daddr.a4 = daddr->a4;
1026 break;
1027
1028 case AF_INET6:
1029 ipv6_addr_copy((struct in6_addr *)x->sel.daddr.a6,
1030 (struct in6_addr *)daddr);
1031 ipv6_addr_copy((struct in6_addr *)x->sel.saddr.a6,
1032 (struct in6_addr *)saddr);
1033 x->sel.prefixlen_d = 128;
1034 x->sel.prefixlen_s = 128;
1035 ipv6_addr_copy((struct in6_addr *)x->props.saddr.a6,
1036 (struct in6_addr *)saddr);
1037 ipv6_addr_copy((struct in6_addr *)x->id.daddr.a6,
1038 (struct in6_addr *)daddr);
1039 break;
1040 }
1041
1042 x->km.state = XFRM_STATE_ACQ;
1043 x->id.proto = proto;
1044 x->props.family = family;
1045 x->props.mode = mode;
1046 x->props.reqid = reqid;
1047 x->lft.hard_add_expires_seconds = sysctl_xfrm_acq_expires;
1048 xfrm_state_hold(x);
1049 x->timer.expires = jiffies + sysctl_xfrm_acq_expires*HZ;
1050 add_timer(&x->timer);
1051 hlist_add_head(&x->bydst, xfrm_state_bydst+h);
1052 h = xfrm_src_hash(daddr, saddr, family);
1053 hlist_add_head(&x->bysrc, xfrm_state_bysrc+h);
1054
1055 xfrm_state_num++;
1056
1057 xfrm_hash_grow_check(x->bydst.next != NULL);
1058 }
1059
1060 return x;
1061 }
1062
1063 static struct xfrm_state *__xfrm_find_acq_byseq(u32 seq);
1064
1065 int xfrm_state_add(struct xfrm_state *x)
1066 {
1067 struct xfrm_state *x1;
1068 int family;
1069 int err;
1070 int use_spi = xfrm_id_proto_match(x->id.proto, IPSEC_PROTO_ANY);
1071
1072 family = x->props.family;
1073
1074 spin_lock_bh(&xfrm_state_lock);
1075
1076 x1 = __xfrm_state_locate(x, use_spi, family);
1077 if (x1) {
1078 xfrm_state_put(x1);
1079 x1 = NULL;
1080 err = -EEXIST;
1081 goto out;
1082 }
1083
1084 if (use_spi && x->km.seq) {
1085 x1 = __xfrm_find_acq_byseq(x->km.seq);
1086 if (x1 && ((x1->id.proto != x->id.proto) ||
1087 xfrm_addr_cmp(&x1->id.daddr, &x->id.daddr, family))) {
1088 xfrm_state_put(x1);
1089 x1 = NULL;
1090 }
1091 }
1092
1093 if (use_spi && !x1)
1094 x1 = __find_acq_core(family, x->props.mode, x->props.reqid,
1095 x->id.proto,
1096 &x->id.daddr, &x->props.saddr, 0);
1097
1098 __xfrm_state_bump_genids(x);
1099 __xfrm_state_insert(x);
1100 err = 0;
1101
1102 out:
1103 spin_unlock_bh(&xfrm_state_lock);
1104
1105 if (x1) {
1106 xfrm_state_delete(x1);
1107 xfrm_state_put(x1);
1108 }
1109
1110 return err;
1111 }
1112 EXPORT_SYMBOL(xfrm_state_add);
1113
1114 #ifdef CONFIG_XFRM_MIGRATE
1115 static struct xfrm_state *xfrm_state_clone(struct xfrm_state *orig, int *errp)
1116 {
1117 int err = -ENOMEM;
1118 struct xfrm_state *x = xfrm_state_alloc();
1119 if (!x)
1120 goto error;
1121
1122 memcpy(&x->id, &orig->id, sizeof(x->id));
1123 memcpy(&x->sel, &orig->sel, sizeof(x->sel));
1124 memcpy(&x->lft, &orig->lft, sizeof(x->lft));
1125 x->props.mode = orig->props.mode;
1126 x->props.replay_window = orig->props.replay_window;
1127 x->props.reqid = orig->props.reqid;
1128 x->props.family = orig->props.family;
1129 x->props.saddr = orig->props.saddr;
1130
1131 if (orig->aalg) {
1132 x->aalg = xfrm_algo_clone(orig->aalg);
1133 if (!x->aalg)
1134 goto error;
1135 }
1136 x->props.aalgo = orig->props.aalgo;
1137
1138 if (orig->ealg) {
1139 x->ealg = xfrm_algo_clone(orig->ealg);
1140 if (!x->ealg)
1141 goto error;
1142 }
1143 x->props.ealgo = orig->props.ealgo;
1144
1145 if (orig->calg) {
1146 x->calg = xfrm_algo_clone(orig->calg);
1147 if (!x->calg)
1148 goto error;
1149 }
1150 x->props.calgo = orig->props.calgo;
1151
1152 if (orig->encap) {
1153 x->encap = kmemdup(orig->encap, sizeof(*x->encap), GFP_KERNEL);
1154 if (!x->encap)
1155 goto error;
1156 }
1157
1158 if (orig->coaddr) {
1159 x->coaddr = kmemdup(orig->coaddr, sizeof(*x->coaddr),
1160 GFP_KERNEL);
1161 if (!x->coaddr)
1162 goto error;
1163 }
1164
1165 err = xfrm_init_state(x);
1166 if (err)
1167 goto error;
1168
1169 x->props.flags = orig->props.flags;
1170
1171 x->curlft.add_time = orig->curlft.add_time;
1172 x->km.state = orig->km.state;
1173 x->km.seq = orig->km.seq;
1174
1175 return x;
1176
1177 error:
1178 if (errp)
1179 *errp = err;
1180 if (x) {
1181 kfree(x->aalg);
1182 kfree(x->ealg);
1183 kfree(x->calg);
1184 kfree(x->encap);
1185 kfree(x->coaddr);
1186 }
1187 kfree(x);
1188 return NULL;
1189 }
1190
1191 /* xfrm_state_lock is held */
1192 struct xfrm_state * xfrm_migrate_state_find(struct xfrm_migrate *m)
1193 {
1194 unsigned int h;
1195 struct xfrm_state *x;
1196 struct hlist_node *entry;
1197
1198 if (m->reqid) {
1199 h = xfrm_dst_hash(&m->old_daddr, &m->old_saddr,
1200 m->reqid, m->old_family);
1201 hlist_for_each_entry(x, entry, xfrm_state_bydst+h, bydst) {
1202 if (x->props.mode != m->mode ||
1203 x->id.proto != m->proto)
1204 continue;
1205 if (m->reqid && x->props.reqid != m->reqid)
1206 continue;
1207 if (xfrm_addr_cmp(&x->id.daddr, &m->old_daddr,
1208 m->old_family) ||
1209 xfrm_addr_cmp(&x->props.saddr, &m->old_saddr,
1210 m->old_family))
1211 continue;
1212 xfrm_state_hold(x);
1213 return x;
1214 }
1215 } else {
1216 h = xfrm_src_hash(&m->old_daddr, &m->old_saddr,
1217 m->old_family);
1218 hlist_for_each_entry(x, entry, xfrm_state_bysrc+h, bysrc) {
1219 if (x->props.mode != m->mode ||
1220 x->id.proto != m->proto)
1221 continue;
1222 if (xfrm_addr_cmp(&x->id.daddr, &m->old_daddr,
1223 m->old_family) ||
1224 xfrm_addr_cmp(&x->props.saddr, &m->old_saddr,
1225 m->old_family))
1226 continue;
1227 xfrm_state_hold(x);
1228 return x;
1229 }
1230 }
1231
1232 return NULL;
1233 }
1234 EXPORT_SYMBOL(xfrm_migrate_state_find);
1235
1236 struct xfrm_state * xfrm_state_migrate(struct xfrm_state *x,
1237 struct xfrm_migrate *m)
1238 {
1239 struct xfrm_state *xc;
1240 int err;
1241
1242 xc = xfrm_state_clone(x, &err);
1243 if (!xc)
1244 return NULL;
1245
1246 memcpy(&xc->id.daddr, &m->new_daddr, sizeof(xc->id.daddr));
1247 memcpy(&xc->props.saddr, &m->new_saddr, sizeof(xc->props.saddr));
1248
1249 /* add state */
1250 if (!xfrm_addr_cmp(&x->id.daddr, &m->new_daddr, m->new_family)) {
1251 /* a care is needed when the destination address of the
1252 state is to be updated as it is a part of triplet */
1253 xfrm_state_insert(xc);
1254 } else {
1255 if ((err = xfrm_state_add(xc)) < 0)
1256 goto error;
1257 }
1258
1259 return xc;
1260 error:
1261 kfree(xc);
1262 return NULL;
1263 }
1264 EXPORT_SYMBOL(xfrm_state_migrate);
1265 #endif
1266
1267 int xfrm_state_update(struct xfrm_state *x)
1268 {
1269 struct xfrm_state *x1;
1270 int err;
1271 int use_spi = xfrm_id_proto_match(x->id.proto, IPSEC_PROTO_ANY);
1272
1273 spin_lock_bh(&xfrm_state_lock);
1274 x1 = __xfrm_state_locate(x, use_spi, x->props.family);
1275
1276 err = -ESRCH;
1277 if (!x1)
1278 goto out;
1279
1280 if (xfrm_state_kern(x1)) {
1281 xfrm_state_put(x1);
1282 err = -EEXIST;
1283 goto out;
1284 }
1285
1286 if (x1->km.state == XFRM_STATE_ACQ) {
1287 __xfrm_state_insert(x);
1288 x = NULL;
1289 }
1290 err = 0;
1291
1292 out:
1293 spin_unlock_bh(&xfrm_state_lock);
1294
1295 if (err)
1296 return err;
1297
1298 if (!x) {
1299 xfrm_state_delete(x1);
1300 xfrm_state_put(x1);
1301 return 0;
1302 }
1303
1304 err = -EINVAL;
1305 spin_lock_bh(&x1->lock);
1306 if (likely(x1->km.state == XFRM_STATE_VALID)) {
1307 if (x->encap && x1->encap)
1308 memcpy(x1->encap, x->encap, sizeof(*x1->encap));
1309 if (x->coaddr && x1->coaddr) {
1310 memcpy(x1->coaddr, x->coaddr, sizeof(*x1->coaddr));
1311 }
1312 if (!use_spi && memcmp(&x1->sel, &x->sel, sizeof(x1->sel)))
1313 memcpy(&x1->sel, &x->sel, sizeof(x1->sel));
1314 memcpy(&x1->lft, &x->lft, sizeof(x1->lft));
1315 x1->km.dying = 0;
1316
1317 mod_timer(&x1->timer, jiffies + HZ);
1318 if (x1->curlft.use_time)
1319 xfrm_state_check_expire(x1);
1320
1321 err = 0;
1322 }
1323 spin_unlock_bh(&x1->lock);
1324
1325 xfrm_state_put(x1);
1326
1327 return err;
1328 }
1329 EXPORT_SYMBOL(xfrm_state_update);
1330
1331 int xfrm_state_check_expire(struct xfrm_state *x)
1332 {
1333 if (!x->curlft.use_time)
1334 x->curlft.use_time = get_seconds();
1335
1336 if (x->km.state != XFRM_STATE_VALID)
1337 return -EINVAL;
1338
1339 if (x->curlft.bytes >= x->lft.hard_byte_limit ||
1340 x->curlft.packets >= x->lft.hard_packet_limit) {
1341 x->km.state = XFRM_STATE_EXPIRED;
1342 mod_timer(&x->timer, jiffies);
1343 return -EINVAL;
1344 }
1345
1346 if (!x->km.dying &&
1347 (x->curlft.bytes >= x->lft.soft_byte_limit ||
1348 x->curlft.packets >= x->lft.soft_packet_limit)) {
1349 x->km.dying = 1;
1350 km_state_expired(x, 0, 0);
1351 }
1352 return 0;
1353 }
1354 EXPORT_SYMBOL(xfrm_state_check_expire);
1355
1356 struct xfrm_state *
1357 xfrm_state_lookup(xfrm_address_t *daddr, __be32 spi, u8 proto,
1358 unsigned short family)
1359 {
1360 struct xfrm_state *x;
1361
1362 spin_lock_bh(&xfrm_state_lock);
1363 x = __xfrm_state_lookup(daddr, spi, proto, family);
1364 spin_unlock_bh(&xfrm_state_lock);
1365 return x;
1366 }
1367 EXPORT_SYMBOL(xfrm_state_lookup);
1368
1369 struct xfrm_state *
1370 xfrm_state_lookup_byaddr(xfrm_address_t *daddr, xfrm_address_t *saddr,
1371 u8 proto, unsigned short family)
1372 {
1373 struct xfrm_state *x;
1374
1375 spin_lock_bh(&xfrm_state_lock);
1376 x = __xfrm_state_lookup_byaddr(daddr, saddr, proto, family);
1377 spin_unlock_bh(&xfrm_state_lock);
1378 return x;
1379 }
1380 EXPORT_SYMBOL(xfrm_state_lookup_byaddr);
1381
1382 struct xfrm_state *
1383 xfrm_find_acq(u8 mode, u32 reqid, u8 proto,
1384 xfrm_address_t *daddr, xfrm_address_t *saddr,
1385 int create, unsigned short family)
1386 {
1387 struct xfrm_state *x;
1388
1389 spin_lock_bh(&xfrm_state_lock);
1390 x = __find_acq_core(family, mode, reqid, proto, daddr, saddr, create);
1391 spin_unlock_bh(&xfrm_state_lock);
1392
1393 return x;
1394 }
1395 EXPORT_SYMBOL(xfrm_find_acq);
1396
1397 #ifdef CONFIG_XFRM_SUB_POLICY
1398 int
1399 xfrm_tmpl_sort(struct xfrm_tmpl **dst, struct xfrm_tmpl **src, int n,
1400 unsigned short family)
1401 {
1402 int err = 0;
1403 struct xfrm_state_afinfo *afinfo = xfrm_state_get_afinfo(family);
1404 if (!afinfo)
1405 return -EAFNOSUPPORT;
1406
1407 spin_lock_bh(&xfrm_state_lock);
1408 if (afinfo->tmpl_sort)
1409 err = afinfo->tmpl_sort(dst, src, n);
1410 spin_unlock_bh(&xfrm_state_lock);
1411 xfrm_state_put_afinfo(afinfo);
1412 return err;
1413 }
1414 EXPORT_SYMBOL(xfrm_tmpl_sort);
1415
1416 int
1417 xfrm_state_sort(struct xfrm_state **dst, struct xfrm_state **src, int n,
1418 unsigned short family)
1419 {
1420 int err = 0;
1421 struct xfrm_state_afinfo *afinfo = xfrm_state_get_afinfo(family);
1422 if (!afinfo)
1423 return -EAFNOSUPPORT;
1424
1425 spin_lock_bh(&xfrm_state_lock);
1426 if (afinfo->state_sort)
1427 err = afinfo->state_sort(dst, src, n);
1428 spin_unlock_bh(&xfrm_state_lock);
1429 xfrm_state_put_afinfo(afinfo);
1430 return err;
1431 }
1432 EXPORT_SYMBOL(xfrm_state_sort);
1433 #endif
1434
1435 /* Silly enough, but I'm lazy to build resolution list */
1436
1437 static struct xfrm_state *__xfrm_find_acq_byseq(u32 seq)
1438 {
1439 int i;
1440
1441 for (i = 0; i <= xfrm_state_hmask; i++) {
1442 struct hlist_node *entry;
1443 struct xfrm_state *x;
1444
1445 hlist_for_each_entry(x, entry, xfrm_state_bydst+i, bydst) {
1446 if (x->km.seq == seq &&
1447 x->km.state == XFRM_STATE_ACQ) {
1448 xfrm_state_hold(x);
1449 return x;
1450 }
1451 }
1452 }
1453 return NULL;
1454 }
1455
1456 struct xfrm_state *xfrm_find_acq_byseq(u32 seq)
1457 {
1458 struct xfrm_state *x;
1459
1460 spin_lock_bh(&xfrm_state_lock);
1461 x = __xfrm_find_acq_byseq(seq);
1462 spin_unlock_bh(&xfrm_state_lock);
1463 return x;
1464 }
1465 EXPORT_SYMBOL(xfrm_find_acq_byseq);
1466
1467 u32 xfrm_get_acqseq(void)
1468 {
1469 u32 res;
1470 static u32 acqseq;
1471 static DEFINE_SPINLOCK(acqseq_lock);
1472
1473 spin_lock_bh(&acqseq_lock);
1474 res = (++acqseq ? : ++acqseq);
1475 spin_unlock_bh(&acqseq_lock);
1476 return res;
1477 }
1478 EXPORT_SYMBOL(xfrm_get_acqseq);
1479
1480 int xfrm_alloc_spi(struct xfrm_state *x, u32 low, u32 high)
1481 {
1482 unsigned int h;
1483 struct xfrm_state *x0;
1484 int err = -ENOENT;
1485 __be32 minspi = htonl(low);
1486 __be32 maxspi = htonl(high);
1487
1488 spin_lock_bh(&x->lock);
1489 if (x->km.state == XFRM_STATE_DEAD)
1490 goto unlock;
1491
1492 err = 0;
1493 if (x->id.spi)
1494 goto unlock;
1495
1496 err = -ENOENT;
1497
1498 if (minspi == maxspi) {
1499 x0 = xfrm_state_lookup(&x->id.daddr, minspi, x->id.proto, x->props.family);
1500 if (x0) {
1501 xfrm_state_put(x0);
1502 goto unlock;
1503 }
1504 x->id.spi = minspi;
1505 } else {
1506 u32 spi = 0;
1507 for (h=0; h<high-low+1; h++) {
1508 spi = low + net_random()%(high-low+1);
1509 x0 = xfrm_state_lookup(&x->id.daddr, htonl(spi), x->id.proto, x->props.family);
1510 if (x0 == NULL) {
1511 x->id.spi = htonl(spi);
1512 break;
1513 }
1514 xfrm_state_put(x0);
1515 }
1516 }
1517 if (x->id.spi) {
1518 spin_lock_bh(&xfrm_state_lock);
1519 h = xfrm_spi_hash(&x->id.daddr, x->id.spi, x->id.proto, x->props.family);
1520 hlist_add_head(&x->byspi, xfrm_state_byspi+h);
1521 spin_unlock_bh(&xfrm_state_lock);
1522
1523 err = 0;
1524 }
1525
1526 unlock:
1527 spin_unlock_bh(&x->lock);
1528
1529 return err;
1530 }
1531 EXPORT_SYMBOL(xfrm_alloc_spi);
1532
1533 int xfrm_state_walk(struct xfrm_state_walk *walk,
1534 int (*func)(struct xfrm_state *, int, void*),
1535 void *data)
1536 {
1537 struct xfrm_state *old, *x, *last = NULL;
1538 int err = 0;
1539
1540 if (walk->state == NULL && walk->count != 0)
1541 return 0;
1542
1543 old = x = walk->state;
1544 walk->state = NULL;
1545 spin_lock_bh(&xfrm_state_lock);
1546 if (x == NULL)
1547 x = list_first_entry(&xfrm_state_all, struct xfrm_state, all);
1548 list_for_each_entry_from(x, &xfrm_state_all, all) {
1549 if (x->km.state == XFRM_STATE_DEAD)
1550 continue;
1551 if (!xfrm_id_proto_match(x->id.proto, walk->proto))
1552 continue;
1553 if (last) {
1554 err = func(last, walk->count, data);
1555 if (err) {
1556 xfrm_state_hold(last);
1557 walk->state = last;
1558 goto out;
1559 }
1560 }
1561 last = x;
1562 walk->count++;
1563 }
1564 if (walk->count == 0) {
1565 err = -ENOENT;
1566 goto out;
1567 }
1568 if (last)
1569 err = func(last, 0, data);
1570 out:
1571 spin_unlock_bh(&xfrm_state_lock);
1572 if (old != NULL)
1573 xfrm_state_put(old);
1574 return err;
1575 }
1576 EXPORT_SYMBOL(xfrm_state_walk);
1577
1578
1579 void xfrm_replay_notify(struct xfrm_state *x, int event)
1580 {
1581 struct km_event c;
1582 /* we send notify messages in case
1583 * 1. we updated on of the sequence numbers, and the seqno difference
1584 * is at least x->replay_maxdiff, in this case we also update the
1585 * timeout of our timer function
1586 * 2. if x->replay_maxage has elapsed since last update,
1587 * and there were changes
1588 *
1589 * The state structure must be locked!
1590 */
1591
1592 switch (event) {
1593 case XFRM_REPLAY_UPDATE:
1594 if (x->replay_maxdiff &&
1595 (x->replay.seq - x->preplay.seq < x->replay_maxdiff) &&
1596 (x->replay.oseq - x->preplay.oseq < x->replay_maxdiff)) {
1597 if (x->xflags & XFRM_TIME_DEFER)
1598 event = XFRM_REPLAY_TIMEOUT;
1599 else
1600 return;
1601 }
1602
1603 break;
1604
1605 case XFRM_REPLAY_TIMEOUT:
1606 if ((x->replay.seq == x->preplay.seq) &&
1607 (x->replay.bitmap == x->preplay.bitmap) &&
1608 (x->replay.oseq == x->preplay.oseq)) {
1609 x->xflags |= XFRM_TIME_DEFER;
1610 return;
1611 }
1612
1613 break;
1614 }
1615
1616 memcpy(&x->preplay, &x->replay, sizeof(struct xfrm_replay_state));
1617 c.event = XFRM_MSG_NEWAE;
1618 c.data.aevent = event;
1619 km_state_notify(x, &c);
1620
1621 if (x->replay_maxage &&
1622 !mod_timer(&x->rtimer, jiffies + x->replay_maxage))
1623 x->xflags &= ~XFRM_TIME_DEFER;
1624 }
1625
1626 static void xfrm_replay_timer_handler(unsigned long data)
1627 {
1628 struct xfrm_state *x = (struct xfrm_state*)data;
1629
1630 spin_lock(&x->lock);
1631
1632 if (x->km.state == XFRM_STATE_VALID) {
1633 if (xfrm_aevent_is_on())
1634 xfrm_replay_notify(x, XFRM_REPLAY_TIMEOUT);
1635 else
1636 x->xflags |= XFRM_TIME_DEFER;
1637 }
1638
1639 spin_unlock(&x->lock);
1640 }
1641
1642 int xfrm_replay_check(struct xfrm_state *x,
1643 struct sk_buff *skb, __be32 net_seq)
1644 {
1645 u32 diff;
1646 u32 seq = ntohl(net_seq);
1647
1648 if (unlikely(seq == 0))
1649 goto err;
1650
1651 if (likely(seq > x->replay.seq))
1652 return 0;
1653
1654 diff = x->replay.seq - seq;
1655 if (diff >= min_t(unsigned int, x->props.replay_window,
1656 sizeof(x->replay.bitmap) * 8)) {
1657 x->stats.replay_window++;
1658 goto err;
1659 }
1660
1661 if (x->replay.bitmap & (1U << diff)) {
1662 x->stats.replay++;
1663 goto err;
1664 }
1665 return 0;
1666
1667 err:
1668 xfrm_audit_state_replay(x, skb, net_seq);
1669 return -EINVAL;
1670 }
1671
1672 void xfrm_replay_advance(struct xfrm_state *x, __be32 net_seq)
1673 {
1674 u32 diff;
1675 u32 seq = ntohl(net_seq);
1676
1677 if (seq > x->replay.seq) {
1678 diff = seq - x->replay.seq;
1679 if (diff < x->props.replay_window)
1680 x->replay.bitmap = ((x->replay.bitmap) << diff) | 1;
1681 else
1682 x->replay.bitmap = 1;
1683 x->replay.seq = seq;
1684 } else {
1685 diff = x->replay.seq - seq;
1686 x->replay.bitmap |= (1U << diff);
1687 }
1688
1689 if (xfrm_aevent_is_on())
1690 xfrm_replay_notify(x, XFRM_REPLAY_UPDATE);
1691 }
1692
1693 static LIST_HEAD(xfrm_km_list);
1694 static DEFINE_RWLOCK(xfrm_km_lock);
1695
1696 void km_policy_notify(struct xfrm_policy *xp, int dir, struct km_event *c)
1697 {
1698 struct xfrm_mgr *km;
1699
1700 read_lock(&xfrm_km_lock);
1701 list_for_each_entry(km, &xfrm_km_list, list)
1702 if (km->notify_policy)
1703 km->notify_policy(xp, dir, c);
1704 read_unlock(&xfrm_km_lock);
1705 }
1706
1707 void km_state_notify(struct xfrm_state *x, struct km_event *c)
1708 {
1709 struct xfrm_mgr *km;
1710 read_lock(&xfrm_km_lock);
1711 list_for_each_entry(km, &xfrm_km_list, list)
1712 if (km->notify)
1713 km->notify(x, c);
1714 read_unlock(&xfrm_km_lock);
1715 }
1716
1717 EXPORT_SYMBOL(km_policy_notify);
1718 EXPORT_SYMBOL(km_state_notify);
1719
1720 void km_state_expired(struct xfrm_state *x, int hard, u32 pid)
1721 {
1722 struct km_event c;
1723
1724 c.data.hard = hard;
1725 c.pid = pid;
1726 c.event = XFRM_MSG_EXPIRE;
1727 km_state_notify(x, &c);
1728
1729 if (hard)
1730 wake_up(&km_waitq);
1731 }
1732
1733 EXPORT_SYMBOL(km_state_expired);
1734 /*
1735 * We send to all registered managers regardless of failure
1736 * We are happy with one success
1737 */
1738 int km_query(struct xfrm_state *x, struct xfrm_tmpl *t, struct xfrm_policy *pol)
1739 {
1740 int err = -EINVAL, acqret;
1741 struct xfrm_mgr *km;
1742
1743 read_lock(&xfrm_km_lock);
1744 list_for_each_entry(km, &xfrm_km_list, list) {
1745 acqret = km->acquire(x, t, pol, XFRM_POLICY_OUT);
1746 if (!acqret)
1747 err = acqret;
1748 }
1749 read_unlock(&xfrm_km_lock);
1750 return err;
1751 }
1752 EXPORT_SYMBOL(km_query);
1753
1754 int km_new_mapping(struct xfrm_state *x, xfrm_address_t *ipaddr, __be16 sport)
1755 {
1756 int err = -EINVAL;
1757 struct xfrm_mgr *km;
1758
1759 read_lock(&xfrm_km_lock);
1760 list_for_each_entry(km, &xfrm_km_list, list) {
1761 if (km->new_mapping)
1762 err = km->new_mapping(x, ipaddr, sport);
1763 if (!err)
1764 break;
1765 }
1766 read_unlock(&xfrm_km_lock);
1767 return err;
1768 }
1769 EXPORT_SYMBOL(km_new_mapping);
1770
1771 void km_policy_expired(struct xfrm_policy *pol, int dir, int hard, u32 pid)
1772 {
1773 struct km_event c;
1774
1775 c.data.hard = hard;
1776 c.pid = pid;
1777 c.event = XFRM_MSG_POLEXPIRE;
1778 km_policy_notify(pol, dir, &c);
1779
1780 if (hard)
1781 wake_up(&km_waitq);
1782 }
1783 EXPORT_SYMBOL(km_policy_expired);
1784
1785 #ifdef CONFIG_XFRM_MIGRATE
1786 int km_migrate(struct xfrm_selector *sel, u8 dir, u8 type,
1787 struct xfrm_migrate *m, int num_migrate)
1788 {
1789 int err = -EINVAL;
1790 int ret;
1791 struct xfrm_mgr *km;
1792
1793 read_lock(&xfrm_km_lock);
1794 list_for_each_entry(km, &xfrm_km_list, list) {
1795 if (km->migrate) {
1796 ret = km->migrate(sel, dir, type, m, num_migrate);
1797 if (!ret)
1798 err = ret;
1799 }
1800 }
1801 read_unlock(&xfrm_km_lock);
1802 return err;
1803 }
1804 EXPORT_SYMBOL(km_migrate);
1805 #endif
1806
1807 int km_report(u8 proto, struct xfrm_selector *sel, xfrm_address_t *addr)
1808 {
1809 int err = -EINVAL;
1810 int ret;
1811 struct xfrm_mgr *km;
1812
1813 read_lock(&xfrm_km_lock);
1814 list_for_each_entry(km, &xfrm_km_list, list) {
1815 if (km->report) {
1816 ret = km->report(proto, sel, addr);
1817 if (!ret)
1818 err = ret;
1819 }
1820 }
1821 read_unlock(&xfrm_km_lock);
1822 return err;
1823 }
1824 EXPORT_SYMBOL(km_report);
1825
1826 int xfrm_user_policy(struct sock *sk, int optname, u8 __user *optval, int optlen)
1827 {
1828 int err;
1829 u8 *data;
1830 struct xfrm_mgr *km;
1831 struct xfrm_policy *pol = NULL;
1832
1833 if (optlen <= 0 || optlen > PAGE_SIZE)
1834 return -EMSGSIZE;
1835
1836 data = kmalloc(optlen, GFP_KERNEL);
1837 if (!data)
1838 return -ENOMEM;
1839
1840 err = -EFAULT;
1841 if (copy_from_user(data, optval, optlen))
1842 goto out;
1843
1844 err = -EINVAL;
1845 read_lock(&xfrm_km_lock);
1846 list_for_each_entry(km, &xfrm_km_list, list) {
1847 pol = km->compile_policy(sk, optname, data,
1848 optlen, &err);
1849 if (err >= 0)
1850 break;
1851 }
1852 read_unlock(&xfrm_km_lock);
1853
1854 if (err >= 0) {
1855 xfrm_sk_policy_insert(sk, err, pol);
1856 xfrm_pol_put(pol);
1857 err = 0;
1858 }
1859
1860 out:
1861 kfree(data);
1862 return err;
1863 }
1864 EXPORT_SYMBOL(xfrm_user_policy);
1865
1866 int xfrm_register_km(struct xfrm_mgr *km)
1867 {
1868 write_lock_bh(&xfrm_km_lock);
1869 list_add_tail(&km->list, &xfrm_km_list);
1870 write_unlock_bh(&xfrm_km_lock);
1871 return 0;
1872 }
1873 EXPORT_SYMBOL(xfrm_register_km);
1874
1875 int xfrm_unregister_km(struct xfrm_mgr *km)
1876 {
1877 write_lock_bh(&xfrm_km_lock);
1878 list_del(&km->list);
1879 write_unlock_bh(&xfrm_km_lock);
1880 return 0;
1881 }
1882 EXPORT_SYMBOL(xfrm_unregister_km);
1883
1884 int xfrm_state_register_afinfo(struct xfrm_state_afinfo *afinfo)
1885 {
1886 int err = 0;
1887 if (unlikely(afinfo == NULL))
1888 return -EINVAL;
1889 if (unlikely(afinfo->family >= NPROTO))
1890 return -EAFNOSUPPORT;
1891 write_lock_bh(&xfrm_state_afinfo_lock);
1892 if (unlikely(xfrm_state_afinfo[afinfo->family] != NULL))
1893 err = -ENOBUFS;
1894 else
1895 xfrm_state_afinfo[afinfo->family] = afinfo;
1896 write_unlock_bh(&xfrm_state_afinfo_lock);
1897 return err;
1898 }
1899 EXPORT_SYMBOL(xfrm_state_register_afinfo);
1900
1901 int xfrm_state_unregister_afinfo(struct xfrm_state_afinfo *afinfo)
1902 {
1903 int err = 0;
1904 if (unlikely(afinfo == NULL))
1905 return -EINVAL;
1906 if (unlikely(afinfo->family >= NPROTO))
1907 return -EAFNOSUPPORT;
1908 write_lock_bh(&xfrm_state_afinfo_lock);
1909 if (likely(xfrm_state_afinfo[afinfo->family] != NULL)) {
1910 if (unlikely(xfrm_state_afinfo[afinfo->family] != afinfo))
1911 err = -EINVAL;
1912 else
1913 xfrm_state_afinfo[afinfo->family] = NULL;
1914 }
1915 write_unlock_bh(&xfrm_state_afinfo_lock);
1916 return err;
1917 }
1918 EXPORT_SYMBOL(xfrm_state_unregister_afinfo);
1919
1920 static struct xfrm_state_afinfo *xfrm_state_get_afinfo(unsigned int family)
1921 {
1922 struct xfrm_state_afinfo *afinfo;
1923 if (unlikely(family >= NPROTO))
1924 return NULL;
1925 read_lock(&xfrm_state_afinfo_lock);
1926 afinfo = xfrm_state_afinfo[family];
1927 if (unlikely(!afinfo))
1928 read_unlock(&xfrm_state_afinfo_lock);
1929 return afinfo;
1930 }
1931
1932 static void xfrm_state_put_afinfo(struct xfrm_state_afinfo *afinfo)
1933 __releases(xfrm_state_afinfo_lock)
1934 {
1935 read_unlock(&xfrm_state_afinfo_lock);
1936 }
1937
1938 /* Temporarily located here until net/xfrm/xfrm_tunnel.c is created */
1939 void xfrm_state_delete_tunnel(struct xfrm_state *x)
1940 {
1941 if (x->tunnel) {
1942 struct xfrm_state *t = x->tunnel;
1943
1944 if (atomic_read(&t->tunnel_users) == 2)
1945 xfrm_state_delete(t);
1946 atomic_dec(&t->tunnel_users);
1947 xfrm_state_put(t);
1948 x->tunnel = NULL;
1949 }
1950 }
1951 EXPORT_SYMBOL(xfrm_state_delete_tunnel);
1952
1953 int xfrm_state_mtu(struct xfrm_state *x, int mtu)
1954 {
1955 int res;
1956
1957 spin_lock_bh(&x->lock);
1958 if (x->km.state == XFRM_STATE_VALID &&
1959 x->type && x->type->get_mtu)
1960 res = x->type->get_mtu(x, mtu);
1961 else
1962 res = mtu - x->props.header_len;
1963 spin_unlock_bh(&x->lock);
1964 return res;
1965 }
1966
1967 int xfrm_init_state(struct xfrm_state *x)
1968 {
1969 struct xfrm_state_afinfo *afinfo;
1970 struct xfrm_mode *inner_mode;
1971 int family = x->props.family;
1972 int err;
1973
1974 err = -EAFNOSUPPORT;
1975 afinfo = xfrm_state_get_afinfo(family);
1976 if (!afinfo)
1977 goto error;
1978
1979 err = 0;
1980 if (afinfo->init_flags)
1981 err = afinfo->init_flags(x);
1982
1983 xfrm_state_put_afinfo(afinfo);
1984
1985 if (err)
1986 goto error;
1987
1988 err = -EPROTONOSUPPORT;
1989
1990 if (x->sel.family != AF_UNSPEC) {
1991 inner_mode = xfrm_get_mode(x->props.mode, x->sel.family);
1992 if (inner_mode == NULL)
1993 goto error;
1994
1995 if (!(inner_mode->flags & XFRM_MODE_FLAG_TUNNEL) &&
1996 family != x->sel.family) {
1997 xfrm_put_mode(inner_mode);
1998 goto error;
1999 }
2000
2001 x->inner_mode = inner_mode;
2002 } else {
2003 struct xfrm_mode *inner_mode_iaf;
2004
2005 inner_mode = xfrm_get_mode(x->props.mode, AF_INET);
2006 if (inner_mode == NULL)
2007 goto error;
2008
2009 if (!(inner_mode->flags & XFRM_MODE_FLAG_TUNNEL)) {
2010 xfrm_put_mode(inner_mode);
2011 goto error;
2012 }
2013
2014 inner_mode_iaf = xfrm_get_mode(x->props.mode, AF_INET6);
2015 if (inner_mode_iaf == NULL)
2016 goto error;
2017
2018 if (!(inner_mode_iaf->flags & XFRM_MODE_FLAG_TUNNEL)) {
2019 xfrm_put_mode(inner_mode_iaf);
2020 goto error;
2021 }
2022
2023 if (x->props.family == AF_INET) {
2024 x->inner_mode = inner_mode;
2025 x->inner_mode_iaf = inner_mode_iaf;
2026 } else {
2027 x->inner_mode = inner_mode_iaf;
2028 x->inner_mode_iaf = inner_mode;
2029 }
2030 }
2031
2032 x->type = xfrm_get_type(x->id.proto, family);
2033 if (x->type == NULL)
2034 goto error;
2035
2036 err = x->type->init_state(x);
2037 if (err)
2038 goto error;
2039
2040 x->outer_mode = xfrm_get_mode(x->props.mode, family);
2041 if (x->outer_mode == NULL)
2042 goto error;
2043
2044 x->km.state = XFRM_STATE_VALID;
2045
2046 error:
2047 return err;
2048 }
2049
2050 EXPORT_SYMBOL(xfrm_init_state);
2051
2052 void __init xfrm_state_init(void)
2053 {
2054 unsigned int sz;
2055
2056 sz = sizeof(struct hlist_head) * 8;
2057
2058 xfrm_state_bydst = xfrm_hash_alloc(sz);
2059 xfrm_state_bysrc = xfrm_hash_alloc(sz);
2060 xfrm_state_byspi = xfrm_hash_alloc(sz);
2061 if (!xfrm_state_bydst || !xfrm_state_bysrc || !xfrm_state_byspi)
2062 panic("XFRM: Cannot allocate bydst/bysrc/byspi hashes.");
2063 xfrm_state_hmask = ((sz / sizeof(struct hlist_head)) - 1);
2064
2065 INIT_WORK(&xfrm_state_gc_work, xfrm_state_gc_task);
2066 }
2067
2068 #ifdef CONFIG_AUDITSYSCALL
2069 static void xfrm_audit_helper_sainfo(struct xfrm_state *x,
2070 struct audit_buffer *audit_buf)
2071 {
2072 struct xfrm_sec_ctx *ctx = x->security;
2073 u32 spi = ntohl(x->id.spi);
2074
2075 if (ctx)
2076 audit_log_format(audit_buf, " sec_alg=%u sec_doi=%u sec_obj=%s",
2077 ctx->ctx_alg, ctx->ctx_doi, ctx->ctx_str);
2078
2079 switch(x->props.family) {
2080 case AF_INET:
2081 audit_log_format(audit_buf,
2082 " src=" NIPQUAD_FMT " dst=" NIPQUAD_FMT,
2083 NIPQUAD(x->props.saddr.a4),
2084 NIPQUAD(x->id.daddr.a4));
2085 break;
2086 case AF_INET6:
2087 audit_log_format(audit_buf,
2088 " src=" NIP6_FMT " dst=" NIP6_FMT,
2089 NIP6(*(struct in6_addr *)x->props.saddr.a6),
2090 NIP6(*(struct in6_addr *)x->id.daddr.a6));
2091 break;
2092 }
2093
2094 audit_log_format(audit_buf, " spi=%u(0x%x)", spi, spi);
2095 }
2096
2097 static void xfrm_audit_helper_pktinfo(struct sk_buff *skb, u16 family,
2098 struct audit_buffer *audit_buf)
2099 {
2100 struct iphdr *iph4;
2101 struct ipv6hdr *iph6;
2102
2103 switch (family) {
2104 case AF_INET:
2105 iph4 = ip_hdr(skb);
2106 audit_log_format(audit_buf,
2107 " src=" NIPQUAD_FMT " dst=" NIPQUAD_FMT,
2108 NIPQUAD(iph4->saddr),
2109 NIPQUAD(iph4->daddr));
2110 break;
2111 case AF_INET6:
2112 iph6 = ipv6_hdr(skb);
2113 audit_log_format(audit_buf,
2114 " src=" NIP6_FMT " dst=" NIP6_FMT
2115 " flowlbl=0x%x%x%x",
2116 NIP6(iph6->saddr),
2117 NIP6(iph6->daddr),
2118 iph6->flow_lbl[0] & 0x0f,
2119 iph6->flow_lbl[1],
2120 iph6->flow_lbl[2]);
2121 break;
2122 }
2123 }
2124
2125 void xfrm_audit_state_add(struct xfrm_state *x, int result,
2126 u32 auid, u32 secid)
2127 {
2128 struct audit_buffer *audit_buf;
2129
2130 audit_buf = xfrm_audit_start("SAD-add");
2131 if (audit_buf == NULL)
2132 return;
2133 xfrm_audit_helper_usrinfo(auid, secid, audit_buf);
2134 xfrm_audit_helper_sainfo(x, audit_buf);
2135 audit_log_format(audit_buf, " res=%u", result);
2136 audit_log_end(audit_buf);
2137 }
2138 EXPORT_SYMBOL_GPL(xfrm_audit_state_add);
2139
2140 void xfrm_audit_state_delete(struct xfrm_state *x, int result,
2141 u32 auid, u32 secid)
2142 {
2143 struct audit_buffer *audit_buf;
2144
2145 audit_buf = xfrm_audit_start("SAD-delete");
2146 if (audit_buf == NULL)
2147 return;
2148 xfrm_audit_helper_usrinfo(auid, secid, audit_buf);
2149 xfrm_audit_helper_sainfo(x, audit_buf);
2150 audit_log_format(audit_buf, " res=%u", result);
2151 audit_log_end(audit_buf);
2152 }
2153 EXPORT_SYMBOL_GPL(xfrm_audit_state_delete);
2154
2155 void xfrm_audit_state_replay_overflow(struct xfrm_state *x,
2156 struct sk_buff *skb)
2157 {
2158 struct audit_buffer *audit_buf;
2159 u32 spi;
2160
2161 audit_buf = xfrm_audit_start("SA-replay-overflow");
2162 if (audit_buf == NULL)
2163 return;
2164 xfrm_audit_helper_pktinfo(skb, x->props.family, audit_buf);
2165 /* don't record the sequence number because it's inherent in this kind
2166 * of audit message */
2167 spi = ntohl(x->id.spi);
2168 audit_log_format(audit_buf, " spi=%u(0x%x)", spi, spi);
2169 audit_log_end(audit_buf);
2170 }
2171 EXPORT_SYMBOL_GPL(xfrm_audit_state_replay_overflow);
2172
2173 static void xfrm_audit_state_replay(struct xfrm_state *x,
2174 struct sk_buff *skb, __be32 net_seq)
2175 {
2176 struct audit_buffer *audit_buf;
2177 u32 spi;
2178
2179 audit_buf = xfrm_audit_start("SA-replayed-pkt");
2180 if (audit_buf == NULL)
2181 return;
2182 xfrm_audit_helper_pktinfo(skb, x->props.family, audit_buf);
2183 spi = ntohl(x->id.spi);
2184 audit_log_format(audit_buf, " spi=%u(0x%x) seqno=%u",
2185 spi, spi, ntohl(net_seq));
2186 audit_log_end(audit_buf);
2187 }
2188
2189 void xfrm_audit_state_notfound_simple(struct sk_buff *skb, u16 family)
2190 {
2191 struct audit_buffer *audit_buf;
2192
2193 audit_buf = xfrm_audit_start("SA-notfound");
2194 if (audit_buf == NULL)
2195 return;
2196 xfrm_audit_helper_pktinfo(skb, family, audit_buf);
2197 audit_log_end(audit_buf);
2198 }
2199 EXPORT_SYMBOL_GPL(xfrm_audit_state_notfound_simple);
2200
2201 void xfrm_audit_state_notfound(struct sk_buff *skb, u16 family,
2202 __be32 net_spi, __be32 net_seq)
2203 {
2204 struct audit_buffer *audit_buf;
2205 u32 spi;
2206
2207 audit_buf = xfrm_audit_start("SA-notfound");
2208 if (audit_buf == NULL)
2209 return;
2210 xfrm_audit_helper_pktinfo(skb, family, audit_buf);
2211 spi = ntohl(net_spi);
2212 audit_log_format(audit_buf, " spi=%u(0x%x) seqno=%u",
2213 spi, spi, ntohl(net_seq));
2214 audit_log_end(audit_buf);
2215 }
2216 EXPORT_SYMBOL_GPL(xfrm_audit_state_notfound);
2217
2218 void xfrm_audit_state_icvfail(struct xfrm_state *x,
2219 struct sk_buff *skb, u8 proto)
2220 {
2221 struct audit_buffer *audit_buf;
2222 __be32 net_spi;
2223 __be32 net_seq;
2224
2225 audit_buf = xfrm_audit_start("SA-icv-failure");
2226 if (audit_buf == NULL)
2227 return;
2228 xfrm_audit_helper_pktinfo(skb, x->props.family, audit_buf);
2229 if (xfrm_parse_spi(skb, proto, &net_spi, &net_seq) == 0) {
2230 u32 spi = ntohl(net_spi);
2231 audit_log_format(audit_buf, " spi=%u(0x%x) seqno=%u",
2232 spi, spi, ntohl(net_seq));
2233 }
2234 audit_log_end(audit_buf);
2235 }
2236 EXPORT_SYMBOL_GPL(xfrm_audit_state_icvfail);
2237 #endif /* CONFIG_AUDITSYSCALL */
This page took 0.0754 seconds and 6 git commands to generate.