Merge remote-tracking branch 'selinux/next'
[deliverable/linux.git] / net / sched / cls_rsvp.h
1 /*
2 * net/sched/cls_rsvp.h Template file for RSVPv[46] classifiers.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 *
9 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
10 */
11
12 /*
13 Comparing to general packet classification problem,
14 RSVP needs only sevaral relatively simple rules:
15
16 * (dst, protocol) are always specified,
17 so that we are able to hash them.
18 * src may be exact, or may be wildcard, so that
19 we can keep a hash table plus one wildcard entry.
20 * source port (or flow label) is important only if src is given.
21
22 IMPLEMENTATION.
23
24 We use a two level hash table: The top level is keyed by
25 destination address and protocol ID, every bucket contains a list
26 of "rsvp sessions", identified by destination address, protocol and
27 DPI(="Destination Port ID"): triple (key, mask, offset).
28
29 Every bucket has a smaller hash table keyed by source address
30 (cf. RSVP flowspec) and one wildcard entry for wildcard reservations.
31 Every bucket is again a list of "RSVP flows", selected by
32 source address and SPI(="Source Port ID" here rather than
33 "security parameter index"): triple (key, mask, offset).
34
35
36 NOTE 1. All the packets with IPv6 extension headers (but AH and ESP)
37 and all fragmented packets go to the best-effort traffic class.
38
39
40 NOTE 2. Two "port id"'s seems to be redundant, rfc2207 requires
41 only one "Generalized Port Identifier". So that for classic
42 ah, esp (and udp,tcp) both *pi should coincide or one of them
43 should be wildcard.
44
45 At first sight, this redundancy is just a waste of CPU
46 resources. But DPI and SPI add the possibility to assign different
47 priorities to GPIs. Look also at note 4 about tunnels below.
48
49
50 NOTE 3. One complication is the case of tunneled packets.
51 We implement it as following: if the first lookup
52 matches a special session with "tunnelhdr" value not zero,
53 flowid doesn't contain the true flow ID, but the tunnel ID (1...255).
54 In this case, we pull tunnelhdr bytes and restart lookup
55 with tunnel ID added to the list of keys. Simple and stupid 8)8)
56 It's enough for PIMREG and IPIP.
57
58
59 NOTE 4. Two GPIs make it possible to parse even GRE packets.
60 F.e. DPI can select ETH_P_IP (and necessary flags to make
61 tunnelhdr correct) in GRE protocol field and SPI matches
62 GRE key. Is it not nice? 8)8)
63
64
65 Well, as result, despite its simplicity, we get a pretty
66 powerful classification engine. */
67
68
69 struct rsvp_head {
70 u32 tmap[256/32];
71 u32 hgenerator;
72 u8 tgenerator;
73 struct rsvp_session __rcu *ht[256];
74 struct rcu_head rcu;
75 };
76
77 struct rsvp_session {
78 struct rsvp_session __rcu *next;
79 __be32 dst[RSVP_DST_LEN];
80 struct tc_rsvp_gpi dpi;
81 u8 protocol;
82 u8 tunnelid;
83 /* 16 (src,sport) hash slots, and one wildcard source slot */
84 struct rsvp_filter __rcu *ht[16 + 1];
85 struct rcu_head rcu;
86 };
87
88
89 struct rsvp_filter {
90 struct rsvp_filter __rcu *next;
91 __be32 src[RSVP_DST_LEN];
92 struct tc_rsvp_gpi spi;
93 u8 tunnelhdr;
94
95 struct tcf_result res;
96 struct tcf_exts exts;
97
98 u32 handle;
99 struct rsvp_session *sess;
100 struct rcu_head rcu;
101 };
102
103 static inline unsigned int hash_dst(__be32 *dst, u8 protocol, u8 tunnelid)
104 {
105 unsigned int h = (__force __u32)dst[RSVP_DST_LEN - 1];
106
107 h ^= h>>16;
108 h ^= h>>8;
109 return (h ^ protocol ^ tunnelid) & 0xFF;
110 }
111
112 static inline unsigned int hash_src(__be32 *src)
113 {
114 unsigned int h = (__force __u32)src[RSVP_DST_LEN-1];
115
116 h ^= h>>16;
117 h ^= h>>8;
118 h ^= h>>4;
119 return h & 0xF;
120 }
121
122 #define RSVP_APPLY_RESULT() \
123 { \
124 int r = tcf_exts_exec(skb, &f->exts, res); \
125 if (r < 0) \
126 continue; \
127 else if (r > 0) \
128 return r; \
129 }
130
131 static int rsvp_classify(struct sk_buff *skb, const struct tcf_proto *tp,
132 struct tcf_result *res)
133 {
134 struct rsvp_head *head = rcu_dereference_bh(tp->root);
135 struct rsvp_session *s;
136 struct rsvp_filter *f;
137 unsigned int h1, h2;
138 __be32 *dst, *src;
139 u8 protocol;
140 u8 tunnelid = 0;
141 u8 *xprt;
142 #if RSVP_DST_LEN == 4
143 struct ipv6hdr *nhptr;
144
145 if (!pskb_network_may_pull(skb, sizeof(*nhptr)))
146 return -1;
147 nhptr = ipv6_hdr(skb);
148 #else
149 struct iphdr *nhptr;
150
151 if (!pskb_network_may_pull(skb, sizeof(*nhptr)))
152 return -1;
153 nhptr = ip_hdr(skb);
154 #endif
155
156 restart:
157
158 #if RSVP_DST_LEN == 4
159 src = &nhptr->saddr.s6_addr32[0];
160 dst = &nhptr->daddr.s6_addr32[0];
161 protocol = nhptr->nexthdr;
162 xprt = ((u8 *)nhptr) + sizeof(struct ipv6hdr);
163 #else
164 src = &nhptr->saddr;
165 dst = &nhptr->daddr;
166 protocol = nhptr->protocol;
167 xprt = ((u8 *)nhptr) + (nhptr->ihl<<2);
168 if (ip_is_fragment(nhptr))
169 return -1;
170 #endif
171
172 h1 = hash_dst(dst, protocol, tunnelid);
173 h2 = hash_src(src);
174
175 for (s = rcu_dereference_bh(head->ht[h1]); s;
176 s = rcu_dereference_bh(s->next)) {
177 if (dst[RSVP_DST_LEN-1] == s->dst[RSVP_DST_LEN - 1] &&
178 protocol == s->protocol &&
179 !(s->dpi.mask &
180 (*(u32 *)(xprt + s->dpi.offset) ^ s->dpi.key)) &&
181 #if RSVP_DST_LEN == 4
182 dst[0] == s->dst[0] &&
183 dst[1] == s->dst[1] &&
184 dst[2] == s->dst[2] &&
185 #endif
186 tunnelid == s->tunnelid) {
187
188 for (f = rcu_dereference_bh(s->ht[h2]); f;
189 f = rcu_dereference_bh(f->next)) {
190 if (src[RSVP_DST_LEN-1] == f->src[RSVP_DST_LEN - 1] &&
191 !(f->spi.mask & (*(u32 *)(xprt + f->spi.offset) ^ f->spi.key))
192 #if RSVP_DST_LEN == 4
193 &&
194 src[0] == f->src[0] &&
195 src[1] == f->src[1] &&
196 src[2] == f->src[2]
197 #endif
198 ) {
199 *res = f->res;
200 RSVP_APPLY_RESULT();
201
202 matched:
203 if (f->tunnelhdr == 0)
204 return 0;
205
206 tunnelid = f->res.classid;
207 nhptr = (void *)(xprt + f->tunnelhdr - sizeof(*nhptr));
208 goto restart;
209 }
210 }
211
212 /* And wildcard bucket... */
213 for (f = rcu_dereference_bh(s->ht[16]); f;
214 f = rcu_dereference_bh(f->next)) {
215 *res = f->res;
216 RSVP_APPLY_RESULT();
217 goto matched;
218 }
219 return -1;
220 }
221 }
222 return -1;
223 }
224
225 static void rsvp_replace(struct tcf_proto *tp, struct rsvp_filter *n, u32 h)
226 {
227 struct rsvp_head *head = rtnl_dereference(tp->root);
228 struct rsvp_session *s;
229 struct rsvp_filter __rcu **ins;
230 struct rsvp_filter *pins;
231 unsigned int h1 = h & 0xFF;
232 unsigned int h2 = (h >> 8) & 0xFF;
233
234 for (s = rtnl_dereference(head->ht[h1]); s;
235 s = rtnl_dereference(s->next)) {
236 for (ins = &s->ht[h2], pins = rtnl_dereference(*ins); ;
237 ins = &pins->next, pins = rtnl_dereference(*ins)) {
238 if (pins->handle == h) {
239 RCU_INIT_POINTER(n->next, pins->next);
240 rcu_assign_pointer(*ins, n);
241 return;
242 }
243 }
244 }
245
246 /* Something went wrong if we are trying to replace a non-existant
247 * node. Mind as well halt instead of silently failing.
248 */
249 BUG_ON(1);
250 }
251
252 static unsigned long rsvp_get(struct tcf_proto *tp, u32 handle)
253 {
254 struct rsvp_head *head = rtnl_dereference(tp->root);
255 struct rsvp_session *s;
256 struct rsvp_filter *f;
257 unsigned int h1 = handle & 0xFF;
258 unsigned int h2 = (handle >> 8) & 0xFF;
259
260 if (h2 > 16)
261 return 0;
262
263 for (s = rtnl_dereference(head->ht[h1]); s;
264 s = rtnl_dereference(s->next)) {
265 for (f = rtnl_dereference(s->ht[h2]); f;
266 f = rtnl_dereference(f->next)) {
267 if (f->handle == handle)
268 return (unsigned long)f;
269 }
270 }
271 return 0;
272 }
273
274 static int rsvp_init(struct tcf_proto *tp)
275 {
276 struct rsvp_head *data;
277
278 data = kzalloc(sizeof(struct rsvp_head), GFP_KERNEL);
279 if (data) {
280 rcu_assign_pointer(tp->root, data);
281 return 0;
282 }
283 return -ENOBUFS;
284 }
285
286 static void rsvp_delete_filter_rcu(struct rcu_head *head)
287 {
288 struct rsvp_filter *f = container_of(head, struct rsvp_filter, rcu);
289
290 tcf_exts_destroy(&f->exts);
291 kfree(f);
292 }
293
294 static void rsvp_delete_filter(struct tcf_proto *tp, struct rsvp_filter *f)
295 {
296 tcf_unbind_filter(tp, &f->res);
297 /* all classifiers are required to call tcf_exts_destroy() after rcu
298 * grace period, since converted-to-rcu actions are relying on that
299 * in cleanup() callback
300 */
301 call_rcu(&f->rcu, rsvp_delete_filter_rcu);
302 }
303
304 static bool rsvp_destroy(struct tcf_proto *tp, bool force)
305 {
306 struct rsvp_head *data = rtnl_dereference(tp->root);
307 int h1, h2;
308
309 if (data == NULL)
310 return true;
311
312 if (!force) {
313 for (h1 = 0; h1 < 256; h1++) {
314 if (rcu_access_pointer(data->ht[h1]))
315 return false;
316 }
317 }
318
319 RCU_INIT_POINTER(tp->root, NULL);
320
321 for (h1 = 0; h1 < 256; h1++) {
322 struct rsvp_session *s;
323
324 while ((s = rtnl_dereference(data->ht[h1])) != NULL) {
325 RCU_INIT_POINTER(data->ht[h1], s->next);
326
327 for (h2 = 0; h2 <= 16; h2++) {
328 struct rsvp_filter *f;
329
330 while ((f = rtnl_dereference(s->ht[h2])) != NULL) {
331 rcu_assign_pointer(s->ht[h2], f->next);
332 rsvp_delete_filter(tp, f);
333 }
334 }
335 kfree_rcu(s, rcu);
336 }
337 }
338 kfree_rcu(data, rcu);
339 return true;
340 }
341
342 static int rsvp_delete(struct tcf_proto *tp, unsigned long arg)
343 {
344 struct rsvp_head *head = rtnl_dereference(tp->root);
345 struct rsvp_filter *nfp, *f = (struct rsvp_filter *)arg;
346 struct rsvp_filter __rcu **fp;
347 unsigned int h = f->handle;
348 struct rsvp_session __rcu **sp;
349 struct rsvp_session *nsp, *s = f->sess;
350 int i;
351
352 fp = &s->ht[(h >> 8) & 0xFF];
353 for (nfp = rtnl_dereference(*fp); nfp;
354 fp = &nfp->next, nfp = rtnl_dereference(*fp)) {
355 if (nfp == f) {
356 RCU_INIT_POINTER(*fp, f->next);
357 rsvp_delete_filter(tp, f);
358
359 /* Strip tree */
360
361 for (i = 0; i <= 16; i++)
362 if (s->ht[i])
363 return 0;
364
365 /* OK, session has no flows */
366 sp = &head->ht[h & 0xFF];
367 for (nsp = rtnl_dereference(*sp); nsp;
368 sp = &nsp->next, nsp = rtnl_dereference(*sp)) {
369 if (nsp == s) {
370 RCU_INIT_POINTER(*sp, s->next);
371 kfree_rcu(s, rcu);
372 return 0;
373 }
374 }
375
376 return 0;
377 }
378 }
379 return 0;
380 }
381
382 static unsigned int gen_handle(struct tcf_proto *tp, unsigned salt)
383 {
384 struct rsvp_head *data = rtnl_dereference(tp->root);
385 int i = 0xFFFF;
386
387 while (i-- > 0) {
388 u32 h;
389
390 if ((data->hgenerator += 0x10000) == 0)
391 data->hgenerator = 0x10000;
392 h = data->hgenerator|salt;
393 if (rsvp_get(tp, h) == 0)
394 return h;
395 }
396 return 0;
397 }
398
399 static int tunnel_bts(struct rsvp_head *data)
400 {
401 int n = data->tgenerator >> 5;
402 u32 b = 1 << (data->tgenerator & 0x1F);
403
404 if (data->tmap[n] & b)
405 return 0;
406 data->tmap[n] |= b;
407 return 1;
408 }
409
410 static void tunnel_recycle(struct rsvp_head *data)
411 {
412 struct rsvp_session __rcu **sht = data->ht;
413 u32 tmap[256/32];
414 int h1, h2;
415
416 memset(tmap, 0, sizeof(tmap));
417
418 for (h1 = 0; h1 < 256; h1++) {
419 struct rsvp_session *s;
420 for (s = rtnl_dereference(sht[h1]); s;
421 s = rtnl_dereference(s->next)) {
422 for (h2 = 0; h2 <= 16; h2++) {
423 struct rsvp_filter *f;
424
425 for (f = rtnl_dereference(s->ht[h2]); f;
426 f = rtnl_dereference(f->next)) {
427 if (f->tunnelhdr == 0)
428 continue;
429 data->tgenerator = f->res.classid;
430 tunnel_bts(data);
431 }
432 }
433 }
434 }
435
436 memcpy(data->tmap, tmap, sizeof(tmap));
437 }
438
439 static u32 gen_tunnel(struct rsvp_head *data)
440 {
441 int i, k;
442
443 for (k = 0; k < 2; k++) {
444 for (i = 255; i > 0; i--) {
445 if (++data->tgenerator == 0)
446 data->tgenerator = 1;
447 if (tunnel_bts(data))
448 return data->tgenerator;
449 }
450 tunnel_recycle(data);
451 }
452 return 0;
453 }
454
455 static const struct nla_policy rsvp_policy[TCA_RSVP_MAX + 1] = {
456 [TCA_RSVP_CLASSID] = { .type = NLA_U32 },
457 [TCA_RSVP_DST] = { .type = NLA_BINARY,
458 .len = RSVP_DST_LEN * sizeof(u32) },
459 [TCA_RSVP_SRC] = { .type = NLA_BINARY,
460 .len = RSVP_DST_LEN * sizeof(u32) },
461 [TCA_RSVP_PINFO] = { .len = sizeof(struct tc_rsvp_pinfo) },
462 };
463
464 static int rsvp_change(struct net *net, struct sk_buff *in_skb,
465 struct tcf_proto *tp, unsigned long base,
466 u32 handle,
467 struct nlattr **tca,
468 unsigned long *arg, bool ovr)
469 {
470 struct rsvp_head *data = rtnl_dereference(tp->root);
471 struct rsvp_filter *f, *nfp;
472 struct rsvp_filter __rcu **fp;
473 struct rsvp_session *nsp, *s;
474 struct rsvp_session __rcu **sp;
475 struct tc_rsvp_pinfo *pinfo = NULL;
476 struct nlattr *opt = tca[TCA_OPTIONS];
477 struct nlattr *tb[TCA_RSVP_MAX + 1];
478 struct tcf_exts e;
479 unsigned int h1, h2;
480 __be32 *dst;
481 int err;
482
483 if (opt == NULL)
484 return handle ? -EINVAL : 0;
485
486 err = nla_parse_nested(tb, TCA_RSVP_MAX, opt, rsvp_policy);
487 if (err < 0)
488 return err;
489
490 err = tcf_exts_init(&e, TCA_RSVP_ACT, TCA_RSVP_POLICE);
491 if (err < 0)
492 return err;
493 err = tcf_exts_validate(net, tp, tb, tca[TCA_RATE], &e, ovr);
494 if (err < 0)
495 goto errout2;
496
497 f = (struct rsvp_filter *)*arg;
498 if (f) {
499 /* Node exists: adjust only classid */
500 struct rsvp_filter *n;
501
502 if (f->handle != handle && handle)
503 goto errout2;
504
505 n = kmemdup(f, sizeof(*f), GFP_KERNEL);
506 if (!n) {
507 err = -ENOMEM;
508 goto errout2;
509 }
510
511 err = tcf_exts_init(&n->exts, TCA_RSVP_ACT, TCA_RSVP_POLICE);
512 if (err < 0) {
513 kfree(n);
514 goto errout2;
515 }
516
517 if (tb[TCA_RSVP_CLASSID]) {
518 n->res.classid = nla_get_u32(tb[TCA_RSVP_CLASSID]);
519 tcf_bind_filter(tp, &n->res, base);
520 }
521
522 tcf_exts_change(tp, &n->exts, &e);
523 rsvp_replace(tp, n, handle);
524 return 0;
525 }
526
527 /* Now more serious part... */
528 err = -EINVAL;
529 if (handle)
530 goto errout2;
531 if (tb[TCA_RSVP_DST] == NULL)
532 goto errout2;
533
534 err = -ENOBUFS;
535 f = kzalloc(sizeof(struct rsvp_filter), GFP_KERNEL);
536 if (f == NULL)
537 goto errout2;
538
539 err = tcf_exts_init(&f->exts, TCA_RSVP_ACT, TCA_RSVP_POLICE);
540 if (err < 0)
541 goto errout;
542 h2 = 16;
543 if (tb[TCA_RSVP_SRC]) {
544 memcpy(f->src, nla_data(tb[TCA_RSVP_SRC]), sizeof(f->src));
545 h2 = hash_src(f->src);
546 }
547 if (tb[TCA_RSVP_PINFO]) {
548 pinfo = nla_data(tb[TCA_RSVP_PINFO]);
549 f->spi = pinfo->spi;
550 f->tunnelhdr = pinfo->tunnelhdr;
551 }
552 if (tb[TCA_RSVP_CLASSID])
553 f->res.classid = nla_get_u32(tb[TCA_RSVP_CLASSID]);
554
555 dst = nla_data(tb[TCA_RSVP_DST]);
556 h1 = hash_dst(dst, pinfo ? pinfo->protocol : 0, pinfo ? pinfo->tunnelid : 0);
557
558 err = -ENOMEM;
559 if ((f->handle = gen_handle(tp, h1 | (h2<<8))) == 0)
560 goto errout;
561
562 if (f->tunnelhdr) {
563 err = -EINVAL;
564 if (f->res.classid > 255)
565 goto errout;
566
567 err = -ENOMEM;
568 if (f->res.classid == 0 &&
569 (f->res.classid = gen_tunnel(data)) == 0)
570 goto errout;
571 }
572
573 for (sp = &data->ht[h1];
574 (s = rtnl_dereference(*sp)) != NULL;
575 sp = &s->next) {
576 if (dst[RSVP_DST_LEN-1] == s->dst[RSVP_DST_LEN-1] &&
577 pinfo && pinfo->protocol == s->protocol &&
578 memcmp(&pinfo->dpi, &s->dpi, sizeof(s->dpi)) == 0 &&
579 #if RSVP_DST_LEN == 4
580 dst[0] == s->dst[0] &&
581 dst[1] == s->dst[1] &&
582 dst[2] == s->dst[2] &&
583 #endif
584 pinfo->tunnelid == s->tunnelid) {
585
586 insert:
587 /* OK, we found appropriate session */
588
589 fp = &s->ht[h2];
590
591 f->sess = s;
592 if (f->tunnelhdr == 0)
593 tcf_bind_filter(tp, &f->res, base);
594
595 tcf_exts_change(tp, &f->exts, &e);
596
597 fp = &s->ht[h2];
598 for (nfp = rtnl_dereference(*fp); nfp;
599 fp = &nfp->next, nfp = rtnl_dereference(*fp)) {
600 __u32 mask = nfp->spi.mask & f->spi.mask;
601
602 if (mask != f->spi.mask)
603 break;
604 }
605 RCU_INIT_POINTER(f->next, nfp);
606 rcu_assign_pointer(*fp, f);
607
608 *arg = (unsigned long)f;
609 return 0;
610 }
611 }
612
613 /* No session found. Create new one. */
614
615 err = -ENOBUFS;
616 s = kzalloc(sizeof(struct rsvp_session), GFP_KERNEL);
617 if (s == NULL)
618 goto errout;
619 memcpy(s->dst, dst, sizeof(s->dst));
620
621 if (pinfo) {
622 s->dpi = pinfo->dpi;
623 s->protocol = pinfo->protocol;
624 s->tunnelid = pinfo->tunnelid;
625 }
626 sp = &data->ht[h1];
627 for (nsp = rtnl_dereference(*sp); nsp;
628 sp = &nsp->next, nsp = rtnl_dereference(*sp)) {
629 if ((nsp->dpi.mask & s->dpi.mask) != s->dpi.mask)
630 break;
631 }
632 RCU_INIT_POINTER(s->next, nsp);
633 rcu_assign_pointer(*sp, s);
634
635 goto insert;
636
637 errout:
638 tcf_exts_destroy(&f->exts);
639 kfree(f);
640 errout2:
641 tcf_exts_destroy(&e);
642 return err;
643 }
644
645 static void rsvp_walk(struct tcf_proto *tp, struct tcf_walker *arg)
646 {
647 struct rsvp_head *head = rtnl_dereference(tp->root);
648 unsigned int h, h1;
649
650 if (arg->stop)
651 return;
652
653 for (h = 0; h < 256; h++) {
654 struct rsvp_session *s;
655
656 for (s = rtnl_dereference(head->ht[h]); s;
657 s = rtnl_dereference(s->next)) {
658 for (h1 = 0; h1 <= 16; h1++) {
659 struct rsvp_filter *f;
660
661 for (f = rtnl_dereference(s->ht[h1]); f;
662 f = rtnl_dereference(f->next)) {
663 if (arg->count < arg->skip) {
664 arg->count++;
665 continue;
666 }
667 if (arg->fn(tp, (unsigned long)f, arg) < 0) {
668 arg->stop = 1;
669 return;
670 }
671 arg->count++;
672 }
673 }
674 }
675 }
676 }
677
678 static int rsvp_dump(struct net *net, struct tcf_proto *tp, unsigned long fh,
679 struct sk_buff *skb, struct tcmsg *t)
680 {
681 struct rsvp_filter *f = (struct rsvp_filter *)fh;
682 struct rsvp_session *s;
683 struct nlattr *nest;
684 struct tc_rsvp_pinfo pinfo;
685
686 if (f == NULL)
687 return skb->len;
688 s = f->sess;
689
690 t->tcm_handle = f->handle;
691
692 nest = nla_nest_start(skb, TCA_OPTIONS);
693 if (nest == NULL)
694 goto nla_put_failure;
695
696 if (nla_put(skb, TCA_RSVP_DST, sizeof(s->dst), &s->dst))
697 goto nla_put_failure;
698 pinfo.dpi = s->dpi;
699 pinfo.spi = f->spi;
700 pinfo.protocol = s->protocol;
701 pinfo.tunnelid = s->tunnelid;
702 pinfo.tunnelhdr = f->tunnelhdr;
703 pinfo.pad = 0;
704 if (nla_put(skb, TCA_RSVP_PINFO, sizeof(pinfo), &pinfo))
705 goto nla_put_failure;
706 if (f->res.classid &&
707 nla_put_u32(skb, TCA_RSVP_CLASSID, f->res.classid))
708 goto nla_put_failure;
709 if (((f->handle >> 8) & 0xFF) != 16 &&
710 nla_put(skb, TCA_RSVP_SRC, sizeof(f->src), f->src))
711 goto nla_put_failure;
712
713 if (tcf_exts_dump(skb, &f->exts) < 0)
714 goto nla_put_failure;
715
716 nla_nest_end(skb, nest);
717
718 if (tcf_exts_dump_stats(skb, &f->exts) < 0)
719 goto nla_put_failure;
720 return skb->len;
721
722 nla_put_failure:
723 nla_nest_cancel(skb, nest);
724 return -1;
725 }
726
727 static struct tcf_proto_ops RSVP_OPS __read_mostly = {
728 .kind = RSVP_ID,
729 .classify = rsvp_classify,
730 .init = rsvp_init,
731 .destroy = rsvp_destroy,
732 .get = rsvp_get,
733 .change = rsvp_change,
734 .delete = rsvp_delete,
735 .walk = rsvp_walk,
736 .dump = rsvp_dump,
737 .owner = THIS_MODULE,
738 };
739
740 static int __init init_rsvp(void)
741 {
742 return register_tcf_proto_ops(&RSVP_OPS);
743 }
744
745 static void __exit exit_rsvp(void)
746 {
747 unregister_tcf_proto_ops(&RSVP_OPS);
748 }
749
750 module_init(init_rsvp)
751 module_exit(exit_rsvp)
This page took 0.055688 seconds and 5 git commands to generate.