bb8a60235d01c90e6de3a433c25559edd070b549
[deliverable/linux.git] / net / sched / cls_route.c
1 /*
2 * net/sched/cls_route.c ROUTE4 classifier.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 *
9 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
10 */
11
12 #include <linux/module.h>
13 #include <linux/slab.h>
14 #include <linux/types.h>
15 #include <linux/kernel.h>
16 #include <linux/string.h>
17 #include <linux/errno.h>
18 #include <linux/skbuff.h>
19 #include <net/dst.h>
20 #include <net/route.h>
21 #include <net/netlink.h>
22 #include <net/act_api.h>
23 #include <net/pkt_cls.h>
24
25 /*
26 * 1. For now we assume that route tags < 256.
27 * It allows to use direct table lookups, instead of hash tables.
28 * 2. For now we assume that "from TAG" and "fromdev DEV" statements
29 * are mutually exclusive.
30 * 3. "to TAG from ANY" has higher priority, than "to ANY from XXX"
31 */
32 struct route4_fastmap {
33 struct route4_filter *filter;
34 u32 id;
35 int iif;
36 };
37
38 struct route4_head {
39 struct route4_fastmap fastmap[16];
40 struct route4_bucket __rcu *table[256 + 1];
41 struct rcu_head rcu;
42 };
43
44 struct route4_bucket {
45 /* 16 FROM buckets + 16 IIF buckets + 1 wildcard bucket */
46 struct route4_filter __rcu *ht[16 + 16 + 1];
47 struct rcu_head rcu;
48 };
49
50 struct route4_filter {
51 struct route4_filter __rcu *next;
52 u32 id;
53 int iif;
54
55 struct tcf_result res;
56 struct tcf_exts exts;
57 u32 handle;
58 struct route4_bucket *bkt;
59 struct tcf_proto *tp;
60 struct rcu_head rcu;
61 };
62
63 #define ROUTE4_FAILURE ((struct route4_filter *)(-1L))
64
65 static inline int route4_fastmap_hash(u32 id, int iif)
66 {
67 return id & 0xF;
68 }
69
70 static DEFINE_SPINLOCK(fastmap_lock);
71 static void
72 route4_reset_fastmap(struct route4_head *head)
73 {
74 spin_lock_bh(&fastmap_lock);
75 memset(head->fastmap, 0, sizeof(head->fastmap));
76 spin_unlock_bh(&fastmap_lock);
77 }
78
79 static void
80 route4_set_fastmap(struct route4_head *head, u32 id, int iif,
81 struct route4_filter *f)
82 {
83 int h = route4_fastmap_hash(id, iif);
84
85 /* fastmap updates must look atomic to aling id, iff, filter */
86 spin_lock_bh(&fastmap_lock);
87 head->fastmap[h].id = id;
88 head->fastmap[h].iif = iif;
89 head->fastmap[h].filter = f;
90 spin_unlock_bh(&fastmap_lock);
91 }
92
93 static inline int route4_hash_to(u32 id)
94 {
95 return id & 0xFF;
96 }
97
98 static inline int route4_hash_from(u32 id)
99 {
100 return (id >> 16) & 0xF;
101 }
102
103 static inline int route4_hash_iif(int iif)
104 {
105 return 16 + ((iif >> 16) & 0xF);
106 }
107
108 static inline int route4_hash_wild(void)
109 {
110 return 32;
111 }
112
113 #define ROUTE4_APPLY_RESULT() \
114 { \
115 *res = f->res; \
116 if (tcf_exts_is_available(&f->exts)) { \
117 int r = tcf_exts_exec(skb, &f->exts, res); \
118 if (r < 0) { \
119 dont_cache = 1; \
120 continue; \
121 } \
122 return r; \
123 } else if (!dont_cache) \
124 route4_set_fastmap(head, id, iif, f); \
125 return 0; \
126 }
127
128 static int route4_classify(struct sk_buff *skb, const struct tcf_proto *tp,
129 struct tcf_result *res)
130 {
131 struct route4_head *head = rcu_dereference_bh(tp->root);
132 struct dst_entry *dst;
133 struct route4_bucket *b;
134 struct route4_filter *f;
135 u32 id, h;
136 int iif, dont_cache = 0;
137
138 dst = skb_dst(skb);
139 if (!dst)
140 goto failure;
141
142 id = dst->tclassid;
143 if (head == NULL)
144 goto old_method;
145
146 iif = inet_iif(skb);
147
148 h = route4_fastmap_hash(id, iif);
149
150 spin_lock(&fastmap_lock);
151 if (id == head->fastmap[h].id &&
152 iif == head->fastmap[h].iif &&
153 (f = head->fastmap[h].filter) != NULL) {
154 if (f == ROUTE4_FAILURE) {
155 spin_unlock(&fastmap_lock);
156 goto failure;
157 }
158
159 *res = f->res;
160 spin_unlock(&fastmap_lock);
161 return 0;
162 }
163 spin_unlock(&fastmap_lock);
164
165 h = route4_hash_to(id);
166
167 restart:
168 b = rcu_dereference_bh(head->table[h]);
169 if (b) {
170 for (f = rcu_dereference_bh(b->ht[route4_hash_from(id)]);
171 f;
172 f = rcu_dereference_bh(f->next))
173 if (f->id == id)
174 ROUTE4_APPLY_RESULT();
175
176 for (f = rcu_dereference_bh(b->ht[route4_hash_iif(iif)]);
177 f;
178 f = rcu_dereference_bh(f->next))
179 if (f->iif == iif)
180 ROUTE4_APPLY_RESULT();
181
182 for (f = rcu_dereference_bh(b->ht[route4_hash_wild()]);
183 f;
184 f = rcu_dereference_bh(f->next))
185 ROUTE4_APPLY_RESULT();
186 }
187 if (h < 256) {
188 h = 256;
189 id &= ~0xFFFF;
190 goto restart;
191 }
192
193 if (!dont_cache)
194 route4_set_fastmap(head, id, iif, ROUTE4_FAILURE);
195 failure:
196 return -1;
197
198 old_method:
199 if (id && (TC_H_MAJ(id) == 0 ||
200 !(TC_H_MAJ(id^tp->q->handle)))) {
201 res->classid = id;
202 res->class = 0;
203 return 0;
204 }
205 return -1;
206 }
207
208 static inline u32 to_hash(u32 id)
209 {
210 u32 h = id & 0xFF;
211
212 if (id & 0x8000)
213 h += 256;
214 return h;
215 }
216
217 static inline u32 from_hash(u32 id)
218 {
219 id &= 0xFFFF;
220 if (id == 0xFFFF)
221 return 32;
222 if (!(id & 0x8000)) {
223 if (id > 255)
224 return 256;
225 return id & 0xF;
226 }
227 return 16 + (id & 0xF);
228 }
229
230 static unsigned long route4_get(struct tcf_proto *tp, u32 handle)
231 {
232 struct route4_head *head = rtnl_dereference(tp->root);
233 struct route4_bucket *b;
234 struct route4_filter *f;
235 unsigned int h1, h2;
236
237 if (!head)
238 return 0;
239
240 h1 = to_hash(handle);
241 if (h1 > 256)
242 return 0;
243
244 h2 = from_hash(handle >> 16);
245 if (h2 > 32)
246 return 0;
247
248 b = rtnl_dereference(head->table[h1]);
249 if (b) {
250 for (f = rtnl_dereference(b->ht[h2]);
251 f;
252 f = rtnl_dereference(f->next))
253 if (f->handle == handle)
254 return (unsigned long)f;
255 }
256 return 0;
257 }
258
259 static int route4_init(struct tcf_proto *tp)
260 {
261 struct route4_head *head;
262
263 head = kzalloc(sizeof(struct route4_head), GFP_KERNEL);
264 if (head == NULL)
265 return -ENOBUFS;
266
267 rcu_assign_pointer(tp->root, head);
268 return 0;
269 }
270
271 static void
272 route4_delete_filter(struct rcu_head *head)
273 {
274 struct route4_filter *f = container_of(head, struct route4_filter, rcu);
275
276 tcf_exts_destroy(&f->exts);
277 kfree(f);
278 }
279
280 static void route4_destroy(struct tcf_proto *tp)
281 {
282 struct route4_head *head = rtnl_dereference(tp->root);
283 int h1, h2;
284
285 if (head == NULL)
286 return;
287
288 for (h1 = 0; h1 <= 256; h1++) {
289 struct route4_bucket *b;
290
291 b = rtnl_dereference(head->table[h1]);
292 if (b) {
293 for (h2 = 0; h2 <= 32; h2++) {
294 struct route4_filter *f;
295
296 while ((f = rtnl_dereference(b->ht[h2])) != NULL) {
297 struct route4_filter *next;
298
299 next = rtnl_dereference(f->next);
300 RCU_INIT_POINTER(b->ht[h2], next);
301 tcf_unbind_filter(tp, &f->res);
302 call_rcu(&f->rcu, route4_delete_filter);
303 }
304 }
305 RCU_INIT_POINTER(head->table[h1], NULL);
306 kfree_rcu(b, rcu);
307 }
308 }
309 RCU_INIT_POINTER(tp->root, NULL);
310 kfree_rcu(head, rcu);
311 }
312
313 static int route4_delete(struct tcf_proto *tp, unsigned long arg)
314 {
315 struct route4_head *head = rtnl_dereference(tp->root);
316 struct route4_filter *f = (struct route4_filter *)arg;
317 struct route4_filter __rcu **fp;
318 struct route4_filter *nf;
319 struct route4_bucket *b;
320 unsigned int h = 0;
321 int i;
322
323 if (!head || !f)
324 return -EINVAL;
325
326 h = f->handle;
327 b = f->bkt;
328
329 fp = &b->ht[from_hash(h >> 16)];
330 for (nf = rtnl_dereference(*fp); nf;
331 fp = &nf->next, nf = rtnl_dereference(*fp)) {
332 if (nf == f) {
333 /* unlink it */
334 RCU_INIT_POINTER(*fp, rtnl_dereference(f->next));
335
336 /* Remove any fastmap lookups that might ref filter
337 * notice we unlink'd the filter so we can't get it
338 * back in the fastmap.
339 */
340 route4_reset_fastmap(head);
341
342 /* Delete it */
343 tcf_unbind_filter(tp, &f->res);
344 call_rcu(&f->rcu, route4_delete_filter);
345
346 /* Strip RTNL protected tree */
347 for (i = 0; i <= 32; i++) {
348 struct route4_filter *rt;
349
350 rt = rtnl_dereference(b->ht[i]);
351 if (rt)
352 return 0;
353 }
354
355 /* OK, session has no flows */
356 RCU_INIT_POINTER(head->table[to_hash(h)], NULL);
357 kfree_rcu(b, rcu);
358
359 return 0;
360 }
361 }
362 return 0;
363 }
364
365 static const struct nla_policy route4_policy[TCA_ROUTE4_MAX + 1] = {
366 [TCA_ROUTE4_CLASSID] = { .type = NLA_U32 },
367 [TCA_ROUTE4_TO] = { .type = NLA_U32 },
368 [TCA_ROUTE4_FROM] = { .type = NLA_U32 },
369 [TCA_ROUTE4_IIF] = { .type = NLA_U32 },
370 };
371
372 static int route4_set_parms(struct net *net, struct tcf_proto *tp,
373 unsigned long base, struct route4_filter *f,
374 u32 handle, struct route4_head *head,
375 struct nlattr **tb, struct nlattr *est, int new,
376 bool ovr)
377 {
378 int err;
379 u32 id = 0, to = 0, nhandle = 0x8000;
380 struct route4_filter *fp;
381 unsigned int h1;
382 struct route4_bucket *b;
383 struct tcf_exts e;
384
385 tcf_exts_init(&e, TCA_ROUTE4_ACT, TCA_ROUTE4_POLICE);
386 err = tcf_exts_validate(net, tp, tb, est, &e, ovr);
387 if (err < 0)
388 return err;
389
390 err = -EINVAL;
391 if (tb[TCA_ROUTE4_TO]) {
392 if (new && handle & 0x8000)
393 goto errout;
394 to = nla_get_u32(tb[TCA_ROUTE4_TO]);
395 if (to > 0xFF)
396 goto errout;
397 nhandle = to;
398 }
399
400 if (tb[TCA_ROUTE4_FROM]) {
401 if (tb[TCA_ROUTE4_IIF])
402 goto errout;
403 id = nla_get_u32(tb[TCA_ROUTE4_FROM]);
404 if (id > 0xFF)
405 goto errout;
406 nhandle |= id << 16;
407 } else if (tb[TCA_ROUTE4_IIF]) {
408 id = nla_get_u32(tb[TCA_ROUTE4_IIF]);
409 if (id > 0x7FFF)
410 goto errout;
411 nhandle |= (id | 0x8000) << 16;
412 } else
413 nhandle |= 0xFFFF << 16;
414
415 if (handle && new) {
416 nhandle |= handle & 0x7F00;
417 if (nhandle != handle)
418 goto errout;
419 }
420
421 h1 = to_hash(nhandle);
422 b = rtnl_dereference(head->table[h1]);
423 if (!b) {
424 err = -ENOBUFS;
425 b = kzalloc(sizeof(struct route4_bucket), GFP_KERNEL);
426 if (b == NULL)
427 goto errout;
428
429 rcu_assign_pointer(head->table[h1], b);
430 } else {
431 unsigned int h2 = from_hash(nhandle >> 16);
432
433 err = -EEXIST;
434 for (fp = rtnl_dereference(b->ht[h2]);
435 fp;
436 fp = rtnl_dereference(fp->next))
437 if (fp->handle == f->handle)
438 goto errout;
439 }
440
441 if (tb[TCA_ROUTE4_TO])
442 f->id = to;
443
444 if (tb[TCA_ROUTE4_FROM])
445 f->id = to | id<<16;
446 else if (tb[TCA_ROUTE4_IIF])
447 f->iif = id;
448
449 f->handle = nhandle;
450 f->bkt = b;
451 f->tp = tp;
452
453 if (tb[TCA_ROUTE4_CLASSID]) {
454 f->res.classid = nla_get_u32(tb[TCA_ROUTE4_CLASSID]);
455 tcf_bind_filter(tp, &f->res, base);
456 }
457
458 tcf_exts_change(tp, &f->exts, &e);
459
460 return 0;
461 errout:
462 tcf_exts_destroy(&e);
463 return err;
464 }
465
466 static int route4_change(struct net *net, struct sk_buff *in_skb,
467 struct tcf_proto *tp, unsigned long base,
468 u32 handle,
469 struct nlattr **tca,
470 unsigned long *arg, bool ovr)
471 {
472 struct route4_head *head = rtnl_dereference(tp->root);
473 struct route4_filter __rcu **fp;
474 struct route4_filter *fold, *f1, *pfp, *f = NULL;
475 struct route4_bucket *b;
476 struct nlattr *opt = tca[TCA_OPTIONS];
477 struct nlattr *tb[TCA_ROUTE4_MAX + 1];
478 unsigned int h, th;
479 int err;
480 bool new = true;
481
482 if (opt == NULL)
483 return handle ? -EINVAL : 0;
484
485 err = nla_parse_nested(tb, TCA_ROUTE4_MAX, opt, route4_policy);
486 if (err < 0)
487 return err;
488
489 fold = (struct route4_filter *)*arg;
490 if (fold && handle && fold->handle != handle)
491 return -EINVAL;
492
493 err = -ENOBUFS;
494 f = kzalloc(sizeof(struct route4_filter), GFP_KERNEL);
495 if (!f)
496 goto errout;
497
498 tcf_exts_init(&f->exts, TCA_ROUTE4_ACT, TCA_ROUTE4_POLICE);
499 if (fold) {
500 f->id = fold->id;
501 f->iif = fold->iif;
502 f->res = fold->res;
503 f->handle = fold->handle;
504
505 f->tp = fold->tp;
506 f->bkt = fold->bkt;
507 new = false;
508 }
509
510 err = route4_set_parms(net, tp, base, f, handle, head, tb,
511 tca[TCA_RATE], new, ovr);
512 if (err < 0)
513 goto errout;
514
515 h = from_hash(f->handle >> 16);
516 fp = &f->bkt->ht[h];
517 for (pfp = rtnl_dereference(*fp);
518 (f1 = rtnl_dereference(*fp)) != NULL;
519 fp = &f1->next)
520 if (f->handle < f1->handle)
521 break;
522
523 netif_keep_dst(qdisc_dev(tp->q));
524 rcu_assign_pointer(f->next, f1);
525 rcu_assign_pointer(*fp, f);
526
527 if (fold && fold->handle && f->handle != fold->handle) {
528 th = to_hash(fold->handle);
529 h = from_hash(fold->handle >> 16);
530 b = rtnl_dereference(head->table[th]);
531 if (b) {
532 fp = &b->ht[h];
533 for (pfp = rtnl_dereference(*fp); pfp;
534 fp = &pfp->next, pfp = rtnl_dereference(*fp)) {
535 if (pfp == f) {
536 *fp = f->next;
537 break;
538 }
539 }
540 }
541 }
542
543 route4_reset_fastmap(head);
544 *arg = (unsigned long)f;
545 if (fold) {
546 tcf_unbind_filter(tp, &fold->res);
547 call_rcu(&fold->rcu, route4_delete_filter);
548 }
549 return 0;
550
551 errout:
552 kfree(f);
553 return err;
554 }
555
556 static void route4_walk(struct tcf_proto *tp, struct tcf_walker *arg)
557 {
558 struct route4_head *head = rtnl_dereference(tp->root);
559 unsigned int h, h1;
560
561 if (head == NULL)
562 arg->stop = 1;
563
564 if (arg->stop)
565 return;
566
567 for (h = 0; h <= 256; h++) {
568 struct route4_bucket *b = rtnl_dereference(head->table[h]);
569
570 if (b) {
571 for (h1 = 0; h1 <= 32; h1++) {
572 struct route4_filter *f;
573
574 for (f = rtnl_dereference(b->ht[h1]);
575 f;
576 f = rtnl_dereference(f->next)) {
577 if (arg->count < arg->skip) {
578 arg->count++;
579 continue;
580 }
581 if (arg->fn(tp, (unsigned long)f, arg) < 0) {
582 arg->stop = 1;
583 return;
584 }
585 arg->count++;
586 }
587 }
588 }
589 }
590 }
591
592 static int route4_dump(struct net *net, struct tcf_proto *tp, unsigned long fh,
593 struct sk_buff *skb, struct tcmsg *t)
594 {
595 struct route4_filter *f = (struct route4_filter *)fh;
596 struct nlattr *nest;
597 u32 id;
598
599 if (f == NULL)
600 return skb->len;
601
602 t->tcm_handle = f->handle;
603
604 nest = nla_nest_start(skb, TCA_OPTIONS);
605 if (nest == NULL)
606 goto nla_put_failure;
607
608 if (!(f->handle & 0x8000)) {
609 id = f->id & 0xFF;
610 if (nla_put_u32(skb, TCA_ROUTE4_TO, id))
611 goto nla_put_failure;
612 }
613 if (f->handle & 0x80000000) {
614 if ((f->handle >> 16) != 0xFFFF &&
615 nla_put_u32(skb, TCA_ROUTE4_IIF, f->iif))
616 goto nla_put_failure;
617 } else {
618 id = f->id >> 16;
619 if (nla_put_u32(skb, TCA_ROUTE4_FROM, id))
620 goto nla_put_failure;
621 }
622 if (f->res.classid &&
623 nla_put_u32(skb, TCA_ROUTE4_CLASSID, f->res.classid))
624 goto nla_put_failure;
625
626 if (tcf_exts_dump(skb, &f->exts) < 0)
627 goto nla_put_failure;
628
629 nla_nest_end(skb, nest);
630
631 if (tcf_exts_dump_stats(skb, &f->exts) < 0)
632 goto nla_put_failure;
633
634 return skb->len;
635
636 nla_put_failure:
637 nla_nest_cancel(skb, nest);
638 return -1;
639 }
640
641 static struct tcf_proto_ops cls_route4_ops __read_mostly = {
642 .kind = "route",
643 .classify = route4_classify,
644 .init = route4_init,
645 .destroy = route4_destroy,
646 .get = route4_get,
647 .change = route4_change,
648 .delete = route4_delete,
649 .walk = route4_walk,
650 .dump = route4_dump,
651 .owner = THIS_MODULE,
652 };
653
654 static int __init init_route4(void)
655 {
656 return register_tcf_proto_ops(&cls_route4_ops);
657 }
658
659 static void __exit exit_route4(void)
660 {
661 unregister_tcf_proto_ops(&cls_route4_ops);
662 }
663
664 module_init(init_route4)
665 module_exit(exit_route4)
666 MODULE_LICENSE("GPL");
This page took 0.043718 seconds and 5 git commands to generate.