Merge branch 'omap-for-v4.8/soc' into omap-for-v4.8/fixes
[deliverable/linux.git] / net / sched / em_meta.c
1 /*
2 * net/sched/em_meta.c Metadata ematch
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 *
9 * Authors: Thomas Graf <tgraf@suug.ch>
10 *
11 * ==========================================================================
12 *
13 * The metadata ematch compares two meta objects where each object
14 * represents either a meta value stored in the kernel or a static
15 * value provided by userspace. The objects are not provided by
16 * userspace itself but rather a definition providing the information
17 * to build them. Every object is of a certain type which must be
18 * equal to the object it is being compared to.
19 *
20 * The definition of a objects conists of the type (meta type), a
21 * identifier (meta id) and additional type specific information.
22 * The meta id is either TCF_META_TYPE_VALUE for values provided by
23 * userspace or a index to the meta operations table consisting of
24 * function pointers to type specific meta data collectors returning
25 * the value of the requested meta value.
26 *
27 * lvalue rvalue
28 * +-----------+ +-----------+
29 * | type: INT | | type: INT |
30 * def | id: DEV | | id: VALUE |
31 * | data: | | data: 3 |
32 * +-----------+ +-----------+
33 * | |
34 * ---> meta_ops[INT][DEV](...) |
35 * | |
36 * ----------- |
37 * V V
38 * +-----------+ +-----------+
39 * | type: INT | | type: INT |
40 * obj | id: DEV | | id: VALUE |
41 * | data: 2 |<--data got filled out | data: 3 |
42 * +-----------+ +-----------+
43 * | |
44 * --------------> 2 equals 3 <--------------
45 *
46 * This is a simplified schema, the complexity varies depending
47 * on the meta type. Obviously, the length of the data must also
48 * be provided for non-numeric types.
49 *
50 * Additionally, type dependent modifiers such as shift operators
51 * or mask may be applied to extend the functionaliy. As of now,
52 * the variable length type supports shifting the byte string to
53 * the right, eating up any number of octets and thus supporting
54 * wildcard interface name comparisons such as "ppp%" matching
55 * ppp0..9.
56 *
57 * NOTE: Certain meta values depend on other subsystems and are
58 * only available if that subsystem is enabled in the kernel.
59 */
60
61 #include <linux/slab.h>
62 #include <linux/module.h>
63 #include <linux/types.h>
64 #include <linux/kernel.h>
65 #include <linux/sched.h>
66 #include <linux/string.h>
67 #include <linux/skbuff.h>
68 #include <linux/random.h>
69 #include <linux/if_vlan.h>
70 #include <linux/tc_ematch/tc_em_meta.h>
71 #include <net/dst.h>
72 #include <net/route.h>
73 #include <net/pkt_cls.h>
74 #include <net/sock.h>
75
76 struct meta_obj {
77 unsigned long value;
78 unsigned int len;
79 };
80
81 struct meta_value {
82 struct tcf_meta_val hdr;
83 unsigned long val;
84 unsigned int len;
85 };
86
87 struct meta_match {
88 struct meta_value lvalue;
89 struct meta_value rvalue;
90 };
91
92 static inline int meta_id(struct meta_value *v)
93 {
94 return TCF_META_ID(v->hdr.kind);
95 }
96
97 static inline int meta_type(struct meta_value *v)
98 {
99 return TCF_META_TYPE(v->hdr.kind);
100 }
101
102 #define META_COLLECTOR(FUNC) static void meta_##FUNC(struct sk_buff *skb, \
103 struct tcf_pkt_info *info, struct meta_value *v, \
104 struct meta_obj *dst, int *err)
105
106 /**************************************************************************
107 * System status & misc
108 **************************************************************************/
109
110 META_COLLECTOR(int_random)
111 {
112 get_random_bytes(&dst->value, sizeof(dst->value));
113 }
114
115 static inline unsigned long fixed_loadavg(int load)
116 {
117 int rnd_load = load + (FIXED_1/200);
118 int rnd_frac = ((rnd_load & (FIXED_1-1)) * 100) >> FSHIFT;
119
120 return ((rnd_load >> FSHIFT) * 100) + rnd_frac;
121 }
122
123 META_COLLECTOR(int_loadavg_0)
124 {
125 dst->value = fixed_loadavg(avenrun[0]);
126 }
127
128 META_COLLECTOR(int_loadavg_1)
129 {
130 dst->value = fixed_loadavg(avenrun[1]);
131 }
132
133 META_COLLECTOR(int_loadavg_2)
134 {
135 dst->value = fixed_loadavg(avenrun[2]);
136 }
137
138 /**************************************************************************
139 * Device names & indices
140 **************************************************************************/
141
142 static inline int int_dev(struct net_device *dev, struct meta_obj *dst)
143 {
144 if (unlikely(dev == NULL))
145 return -1;
146
147 dst->value = dev->ifindex;
148 return 0;
149 }
150
151 static inline int var_dev(struct net_device *dev, struct meta_obj *dst)
152 {
153 if (unlikely(dev == NULL))
154 return -1;
155
156 dst->value = (unsigned long) dev->name;
157 dst->len = strlen(dev->name);
158 return 0;
159 }
160
161 META_COLLECTOR(int_dev)
162 {
163 *err = int_dev(skb->dev, dst);
164 }
165
166 META_COLLECTOR(var_dev)
167 {
168 *err = var_dev(skb->dev, dst);
169 }
170
171 /**************************************************************************
172 * vlan tag
173 **************************************************************************/
174
175 META_COLLECTOR(int_vlan_tag)
176 {
177 unsigned short tag;
178
179 tag = skb_vlan_tag_get(skb);
180 if (!tag && __vlan_get_tag(skb, &tag))
181 *err = -1;
182 else
183 dst->value = tag;
184 }
185
186
187
188 /**************************************************************************
189 * skb attributes
190 **************************************************************************/
191
192 META_COLLECTOR(int_priority)
193 {
194 dst->value = skb->priority;
195 }
196
197 META_COLLECTOR(int_protocol)
198 {
199 /* Let userspace take care of the byte ordering */
200 dst->value = tc_skb_protocol(skb);
201 }
202
203 META_COLLECTOR(int_pkttype)
204 {
205 dst->value = skb->pkt_type;
206 }
207
208 META_COLLECTOR(int_pktlen)
209 {
210 dst->value = skb->len;
211 }
212
213 META_COLLECTOR(int_datalen)
214 {
215 dst->value = skb->data_len;
216 }
217
218 META_COLLECTOR(int_maclen)
219 {
220 dst->value = skb->mac_len;
221 }
222
223 META_COLLECTOR(int_rxhash)
224 {
225 dst->value = skb_get_hash(skb);
226 }
227
228 /**************************************************************************
229 * Netfilter
230 **************************************************************************/
231
232 META_COLLECTOR(int_mark)
233 {
234 dst->value = skb->mark;
235 }
236
237 /**************************************************************************
238 * Traffic Control
239 **************************************************************************/
240
241 META_COLLECTOR(int_tcindex)
242 {
243 dst->value = skb->tc_index;
244 }
245
246 /**************************************************************************
247 * Routing
248 **************************************************************************/
249
250 META_COLLECTOR(int_rtclassid)
251 {
252 if (unlikely(skb_dst(skb) == NULL))
253 *err = -1;
254 else
255 #ifdef CONFIG_IP_ROUTE_CLASSID
256 dst->value = skb_dst(skb)->tclassid;
257 #else
258 dst->value = 0;
259 #endif
260 }
261
262 META_COLLECTOR(int_rtiif)
263 {
264 if (unlikely(skb_rtable(skb) == NULL))
265 *err = -1;
266 else
267 dst->value = inet_iif(skb);
268 }
269
270 /**************************************************************************
271 * Socket Attributes
272 **************************************************************************/
273
274 #define skip_nonlocal(skb) \
275 (unlikely(skb->sk == NULL))
276
277 META_COLLECTOR(int_sk_family)
278 {
279 if (skip_nonlocal(skb)) {
280 *err = -1;
281 return;
282 }
283 dst->value = skb->sk->sk_family;
284 }
285
286 META_COLLECTOR(int_sk_state)
287 {
288 if (skip_nonlocal(skb)) {
289 *err = -1;
290 return;
291 }
292 dst->value = skb->sk->sk_state;
293 }
294
295 META_COLLECTOR(int_sk_reuse)
296 {
297 if (skip_nonlocal(skb)) {
298 *err = -1;
299 return;
300 }
301 dst->value = skb->sk->sk_reuse;
302 }
303
304 META_COLLECTOR(int_sk_bound_if)
305 {
306 if (skip_nonlocal(skb)) {
307 *err = -1;
308 return;
309 }
310 /* No error if bound_dev_if is 0, legal userspace check */
311 dst->value = skb->sk->sk_bound_dev_if;
312 }
313
314 META_COLLECTOR(var_sk_bound_if)
315 {
316 if (skip_nonlocal(skb)) {
317 *err = -1;
318 return;
319 }
320
321 if (skb->sk->sk_bound_dev_if == 0) {
322 dst->value = (unsigned long) "any";
323 dst->len = 3;
324 } else {
325 struct net_device *dev;
326
327 rcu_read_lock();
328 dev = dev_get_by_index_rcu(sock_net(skb->sk),
329 skb->sk->sk_bound_dev_if);
330 *err = var_dev(dev, dst);
331 rcu_read_unlock();
332 }
333 }
334
335 META_COLLECTOR(int_sk_refcnt)
336 {
337 if (skip_nonlocal(skb)) {
338 *err = -1;
339 return;
340 }
341 dst->value = atomic_read(&skb->sk->sk_refcnt);
342 }
343
344 META_COLLECTOR(int_sk_rcvbuf)
345 {
346 const struct sock *sk = skb_to_full_sk(skb);
347
348 if (!sk) {
349 *err = -1;
350 return;
351 }
352 dst->value = sk->sk_rcvbuf;
353 }
354
355 META_COLLECTOR(int_sk_shutdown)
356 {
357 const struct sock *sk = skb_to_full_sk(skb);
358
359 if (!sk) {
360 *err = -1;
361 return;
362 }
363 dst->value = sk->sk_shutdown;
364 }
365
366 META_COLLECTOR(int_sk_proto)
367 {
368 const struct sock *sk = skb_to_full_sk(skb);
369
370 if (!sk) {
371 *err = -1;
372 return;
373 }
374 dst->value = sk->sk_protocol;
375 }
376
377 META_COLLECTOR(int_sk_type)
378 {
379 const struct sock *sk = skb_to_full_sk(skb);
380
381 if (!sk) {
382 *err = -1;
383 return;
384 }
385 dst->value = sk->sk_type;
386 }
387
388 META_COLLECTOR(int_sk_rmem_alloc)
389 {
390 const struct sock *sk = skb_to_full_sk(skb);
391
392 if (!sk) {
393 *err = -1;
394 return;
395 }
396 dst->value = sk_rmem_alloc_get(sk);
397 }
398
399 META_COLLECTOR(int_sk_wmem_alloc)
400 {
401 const struct sock *sk = skb_to_full_sk(skb);
402
403 if (!sk) {
404 *err = -1;
405 return;
406 }
407 dst->value = sk_wmem_alloc_get(sk);
408 }
409
410 META_COLLECTOR(int_sk_omem_alloc)
411 {
412 const struct sock *sk = skb_to_full_sk(skb);
413
414 if (!sk) {
415 *err = -1;
416 return;
417 }
418 dst->value = atomic_read(&sk->sk_omem_alloc);
419 }
420
421 META_COLLECTOR(int_sk_rcv_qlen)
422 {
423 const struct sock *sk = skb_to_full_sk(skb);
424
425 if (!sk) {
426 *err = -1;
427 return;
428 }
429 dst->value = sk->sk_receive_queue.qlen;
430 }
431
432 META_COLLECTOR(int_sk_snd_qlen)
433 {
434 const struct sock *sk = skb_to_full_sk(skb);
435
436 if (!sk) {
437 *err = -1;
438 return;
439 }
440 dst->value = sk->sk_write_queue.qlen;
441 }
442
443 META_COLLECTOR(int_sk_wmem_queued)
444 {
445 const struct sock *sk = skb_to_full_sk(skb);
446
447 if (!sk) {
448 *err = -1;
449 return;
450 }
451 dst->value = sk->sk_wmem_queued;
452 }
453
454 META_COLLECTOR(int_sk_fwd_alloc)
455 {
456 const struct sock *sk = skb_to_full_sk(skb);
457
458 if (!sk) {
459 *err = -1;
460 return;
461 }
462 dst->value = sk->sk_forward_alloc;
463 }
464
465 META_COLLECTOR(int_sk_sndbuf)
466 {
467 const struct sock *sk = skb_to_full_sk(skb);
468
469 if (!sk) {
470 *err = -1;
471 return;
472 }
473 dst->value = sk->sk_sndbuf;
474 }
475
476 META_COLLECTOR(int_sk_alloc)
477 {
478 const struct sock *sk = skb_to_full_sk(skb);
479
480 if (!sk) {
481 *err = -1;
482 return;
483 }
484 dst->value = (__force int) sk->sk_allocation;
485 }
486
487 META_COLLECTOR(int_sk_hash)
488 {
489 if (skip_nonlocal(skb)) {
490 *err = -1;
491 return;
492 }
493 dst->value = skb->sk->sk_hash;
494 }
495
496 META_COLLECTOR(int_sk_lingertime)
497 {
498 const struct sock *sk = skb_to_full_sk(skb);
499
500 if (!sk) {
501 *err = -1;
502 return;
503 }
504 dst->value = sk->sk_lingertime / HZ;
505 }
506
507 META_COLLECTOR(int_sk_err_qlen)
508 {
509 const struct sock *sk = skb_to_full_sk(skb);
510
511 if (!sk) {
512 *err = -1;
513 return;
514 }
515 dst->value = sk->sk_error_queue.qlen;
516 }
517
518 META_COLLECTOR(int_sk_ack_bl)
519 {
520 const struct sock *sk = skb_to_full_sk(skb);
521
522 if (!sk) {
523 *err = -1;
524 return;
525 }
526 dst->value = sk->sk_ack_backlog;
527 }
528
529 META_COLLECTOR(int_sk_max_ack_bl)
530 {
531 const struct sock *sk = skb_to_full_sk(skb);
532
533 if (!sk) {
534 *err = -1;
535 return;
536 }
537 dst->value = sk->sk_max_ack_backlog;
538 }
539
540 META_COLLECTOR(int_sk_prio)
541 {
542 const struct sock *sk = skb_to_full_sk(skb);
543
544 if (!sk) {
545 *err = -1;
546 return;
547 }
548 dst->value = sk->sk_priority;
549 }
550
551 META_COLLECTOR(int_sk_rcvlowat)
552 {
553 const struct sock *sk = skb_to_full_sk(skb);
554
555 if (!sk) {
556 *err = -1;
557 return;
558 }
559 dst->value = sk->sk_rcvlowat;
560 }
561
562 META_COLLECTOR(int_sk_rcvtimeo)
563 {
564 const struct sock *sk = skb_to_full_sk(skb);
565
566 if (!sk) {
567 *err = -1;
568 return;
569 }
570 dst->value = sk->sk_rcvtimeo / HZ;
571 }
572
573 META_COLLECTOR(int_sk_sndtimeo)
574 {
575 const struct sock *sk = skb_to_full_sk(skb);
576
577 if (!sk) {
578 *err = -1;
579 return;
580 }
581 dst->value = sk->sk_sndtimeo / HZ;
582 }
583
584 META_COLLECTOR(int_sk_sendmsg_off)
585 {
586 const struct sock *sk = skb_to_full_sk(skb);
587
588 if (!sk) {
589 *err = -1;
590 return;
591 }
592 dst->value = sk->sk_frag.offset;
593 }
594
595 META_COLLECTOR(int_sk_write_pend)
596 {
597 const struct sock *sk = skb_to_full_sk(skb);
598
599 if (!sk) {
600 *err = -1;
601 return;
602 }
603 dst->value = sk->sk_write_pending;
604 }
605
606 /**************************************************************************
607 * Meta value collectors assignment table
608 **************************************************************************/
609
610 struct meta_ops {
611 void (*get)(struct sk_buff *, struct tcf_pkt_info *,
612 struct meta_value *, struct meta_obj *, int *);
613 };
614
615 #define META_ID(name) TCF_META_ID_##name
616 #define META_FUNC(name) { .get = meta_##name }
617
618 /* Meta value operations table listing all meta value collectors and
619 * assigns them to a type and meta id. */
620 static struct meta_ops __meta_ops[TCF_META_TYPE_MAX + 1][TCF_META_ID_MAX + 1] = {
621 [TCF_META_TYPE_VAR] = {
622 [META_ID(DEV)] = META_FUNC(var_dev),
623 [META_ID(SK_BOUND_IF)] = META_FUNC(var_sk_bound_if),
624 },
625 [TCF_META_TYPE_INT] = {
626 [META_ID(RANDOM)] = META_FUNC(int_random),
627 [META_ID(LOADAVG_0)] = META_FUNC(int_loadavg_0),
628 [META_ID(LOADAVG_1)] = META_FUNC(int_loadavg_1),
629 [META_ID(LOADAVG_2)] = META_FUNC(int_loadavg_2),
630 [META_ID(DEV)] = META_FUNC(int_dev),
631 [META_ID(PRIORITY)] = META_FUNC(int_priority),
632 [META_ID(PROTOCOL)] = META_FUNC(int_protocol),
633 [META_ID(PKTTYPE)] = META_FUNC(int_pkttype),
634 [META_ID(PKTLEN)] = META_FUNC(int_pktlen),
635 [META_ID(DATALEN)] = META_FUNC(int_datalen),
636 [META_ID(MACLEN)] = META_FUNC(int_maclen),
637 [META_ID(NFMARK)] = META_FUNC(int_mark),
638 [META_ID(TCINDEX)] = META_FUNC(int_tcindex),
639 [META_ID(RTCLASSID)] = META_FUNC(int_rtclassid),
640 [META_ID(RTIIF)] = META_FUNC(int_rtiif),
641 [META_ID(SK_FAMILY)] = META_FUNC(int_sk_family),
642 [META_ID(SK_STATE)] = META_FUNC(int_sk_state),
643 [META_ID(SK_REUSE)] = META_FUNC(int_sk_reuse),
644 [META_ID(SK_BOUND_IF)] = META_FUNC(int_sk_bound_if),
645 [META_ID(SK_REFCNT)] = META_FUNC(int_sk_refcnt),
646 [META_ID(SK_RCVBUF)] = META_FUNC(int_sk_rcvbuf),
647 [META_ID(SK_SNDBUF)] = META_FUNC(int_sk_sndbuf),
648 [META_ID(SK_SHUTDOWN)] = META_FUNC(int_sk_shutdown),
649 [META_ID(SK_PROTO)] = META_FUNC(int_sk_proto),
650 [META_ID(SK_TYPE)] = META_FUNC(int_sk_type),
651 [META_ID(SK_RMEM_ALLOC)] = META_FUNC(int_sk_rmem_alloc),
652 [META_ID(SK_WMEM_ALLOC)] = META_FUNC(int_sk_wmem_alloc),
653 [META_ID(SK_OMEM_ALLOC)] = META_FUNC(int_sk_omem_alloc),
654 [META_ID(SK_WMEM_QUEUED)] = META_FUNC(int_sk_wmem_queued),
655 [META_ID(SK_RCV_QLEN)] = META_FUNC(int_sk_rcv_qlen),
656 [META_ID(SK_SND_QLEN)] = META_FUNC(int_sk_snd_qlen),
657 [META_ID(SK_ERR_QLEN)] = META_FUNC(int_sk_err_qlen),
658 [META_ID(SK_FORWARD_ALLOCS)] = META_FUNC(int_sk_fwd_alloc),
659 [META_ID(SK_ALLOCS)] = META_FUNC(int_sk_alloc),
660 [META_ID(SK_HASH)] = META_FUNC(int_sk_hash),
661 [META_ID(SK_LINGERTIME)] = META_FUNC(int_sk_lingertime),
662 [META_ID(SK_ACK_BACKLOG)] = META_FUNC(int_sk_ack_bl),
663 [META_ID(SK_MAX_ACK_BACKLOG)] = META_FUNC(int_sk_max_ack_bl),
664 [META_ID(SK_PRIO)] = META_FUNC(int_sk_prio),
665 [META_ID(SK_RCVLOWAT)] = META_FUNC(int_sk_rcvlowat),
666 [META_ID(SK_RCVTIMEO)] = META_FUNC(int_sk_rcvtimeo),
667 [META_ID(SK_SNDTIMEO)] = META_FUNC(int_sk_sndtimeo),
668 [META_ID(SK_SENDMSG_OFF)] = META_FUNC(int_sk_sendmsg_off),
669 [META_ID(SK_WRITE_PENDING)] = META_FUNC(int_sk_write_pend),
670 [META_ID(VLAN_TAG)] = META_FUNC(int_vlan_tag),
671 [META_ID(RXHASH)] = META_FUNC(int_rxhash),
672 }
673 };
674
675 static inline struct meta_ops *meta_ops(struct meta_value *val)
676 {
677 return &__meta_ops[meta_type(val)][meta_id(val)];
678 }
679
680 /**************************************************************************
681 * Type specific operations for TCF_META_TYPE_VAR
682 **************************************************************************/
683
684 static int meta_var_compare(struct meta_obj *a, struct meta_obj *b)
685 {
686 int r = a->len - b->len;
687
688 if (r == 0)
689 r = memcmp((void *) a->value, (void *) b->value, a->len);
690
691 return r;
692 }
693
694 static int meta_var_change(struct meta_value *dst, struct nlattr *nla)
695 {
696 int len = nla_len(nla);
697
698 dst->val = (unsigned long)kmemdup(nla_data(nla), len, GFP_KERNEL);
699 if (dst->val == 0UL)
700 return -ENOMEM;
701 dst->len = len;
702 return 0;
703 }
704
705 static void meta_var_destroy(struct meta_value *v)
706 {
707 kfree((void *) v->val);
708 }
709
710 static void meta_var_apply_extras(struct meta_value *v,
711 struct meta_obj *dst)
712 {
713 int shift = v->hdr.shift;
714
715 if (shift && shift < dst->len)
716 dst->len -= shift;
717 }
718
719 static int meta_var_dump(struct sk_buff *skb, struct meta_value *v, int tlv)
720 {
721 if (v->val && v->len &&
722 nla_put(skb, tlv, v->len, (void *) v->val))
723 goto nla_put_failure;
724 return 0;
725
726 nla_put_failure:
727 return -1;
728 }
729
730 /**************************************************************************
731 * Type specific operations for TCF_META_TYPE_INT
732 **************************************************************************/
733
734 static int meta_int_compare(struct meta_obj *a, struct meta_obj *b)
735 {
736 /* Let gcc optimize it, the unlikely is not really based on
737 * some numbers but jump free code for mismatches seems
738 * more logical. */
739 if (unlikely(a->value == b->value))
740 return 0;
741 else if (a->value < b->value)
742 return -1;
743 else
744 return 1;
745 }
746
747 static int meta_int_change(struct meta_value *dst, struct nlattr *nla)
748 {
749 if (nla_len(nla) >= sizeof(unsigned long)) {
750 dst->val = *(unsigned long *) nla_data(nla);
751 dst->len = sizeof(unsigned long);
752 } else if (nla_len(nla) == sizeof(u32)) {
753 dst->val = nla_get_u32(nla);
754 dst->len = sizeof(u32);
755 } else
756 return -EINVAL;
757
758 return 0;
759 }
760
761 static void meta_int_apply_extras(struct meta_value *v,
762 struct meta_obj *dst)
763 {
764 if (v->hdr.shift)
765 dst->value >>= v->hdr.shift;
766
767 if (v->val)
768 dst->value &= v->val;
769 }
770
771 static int meta_int_dump(struct sk_buff *skb, struct meta_value *v, int tlv)
772 {
773 if (v->len == sizeof(unsigned long)) {
774 if (nla_put(skb, tlv, sizeof(unsigned long), &v->val))
775 goto nla_put_failure;
776 } else if (v->len == sizeof(u32)) {
777 if (nla_put_u32(skb, tlv, v->val))
778 goto nla_put_failure;
779 }
780
781 return 0;
782
783 nla_put_failure:
784 return -1;
785 }
786
787 /**************************************************************************
788 * Type specific operations table
789 **************************************************************************/
790
791 struct meta_type_ops {
792 void (*destroy)(struct meta_value *);
793 int (*compare)(struct meta_obj *, struct meta_obj *);
794 int (*change)(struct meta_value *, struct nlattr *);
795 void (*apply_extras)(struct meta_value *, struct meta_obj *);
796 int (*dump)(struct sk_buff *, struct meta_value *, int);
797 };
798
799 static const struct meta_type_ops __meta_type_ops[TCF_META_TYPE_MAX + 1] = {
800 [TCF_META_TYPE_VAR] = {
801 .destroy = meta_var_destroy,
802 .compare = meta_var_compare,
803 .change = meta_var_change,
804 .apply_extras = meta_var_apply_extras,
805 .dump = meta_var_dump
806 },
807 [TCF_META_TYPE_INT] = {
808 .compare = meta_int_compare,
809 .change = meta_int_change,
810 .apply_extras = meta_int_apply_extras,
811 .dump = meta_int_dump
812 }
813 };
814
815 static inline const struct meta_type_ops *meta_type_ops(struct meta_value *v)
816 {
817 return &__meta_type_ops[meta_type(v)];
818 }
819
820 /**************************************************************************
821 * Core
822 **************************************************************************/
823
824 static int meta_get(struct sk_buff *skb, struct tcf_pkt_info *info,
825 struct meta_value *v, struct meta_obj *dst)
826 {
827 int err = 0;
828
829 if (meta_id(v) == TCF_META_ID_VALUE) {
830 dst->value = v->val;
831 dst->len = v->len;
832 return 0;
833 }
834
835 meta_ops(v)->get(skb, info, v, dst, &err);
836 if (err < 0)
837 return err;
838
839 if (meta_type_ops(v)->apply_extras)
840 meta_type_ops(v)->apply_extras(v, dst);
841
842 return 0;
843 }
844
845 static int em_meta_match(struct sk_buff *skb, struct tcf_ematch *m,
846 struct tcf_pkt_info *info)
847 {
848 int r;
849 struct meta_match *meta = (struct meta_match *) m->data;
850 struct meta_obj l_value, r_value;
851
852 if (meta_get(skb, info, &meta->lvalue, &l_value) < 0 ||
853 meta_get(skb, info, &meta->rvalue, &r_value) < 0)
854 return 0;
855
856 r = meta_type_ops(&meta->lvalue)->compare(&l_value, &r_value);
857
858 switch (meta->lvalue.hdr.op) {
859 case TCF_EM_OPND_EQ:
860 return !r;
861 case TCF_EM_OPND_LT:
862 return r < 0;
863 case TCF_EM_OPND_GT:
864 return r > 0;
865 }
866
867 return 0;
868 }
869
870 static void meta_delete(struct meta_match *meta)
871 {
872 if (meta) {
873 const struct meta_type_ops *ops = meta_type_ops(&meta->lvalue);
874
875 if (ops && ops->destroy) {
876 ops->destroy(&meta->lvalue);
877 ops->destroy(&meta->rvalue);
878 }
879 }
880
881 kfree(meta);
882 }
883
884 static inline int meta_change_data(struct meta_value *dst, struct nlattr *nla)
885 {
886 if (nla) {
887 if (nla_len(nla) == 0)
888 return -EINVAL;
889
890 return meta_type_ops(dst)->change(dst, nla);
891 }
892
893 return 0;
894 }
895
896 static inline int meta_is_supported(struct meta_value *val)
897 {
898 return !meta_id(val) || meta_ops(val)->get;
899 }
900
901 static const struct nla_policy meta_policy[TCA_EM_META_MAX + 1] = {
902 [TCA_EM_META_HDR] = { .len = sizeof(struct tcf_meta_hdr) },
903 };
904
905 static int em_meta_change(struct net *net, void *data, int len,
906 struct tcf_ematch *m)
907 {
908 int err;
909 struct nlattr *tb[TCA_EM_META_MAX + 1];
910 struct tcf_meta_hdr *hdr;
911 struct meta_match *meta = NULL;
912
913 err = nla_parse(tb, TCA_EM_META_MAX, data, len, meta_policy);
914 if (err < 0)
915 goto errout;
916
917 err = -EINVAL;
918 if (tb[TCA_EM_META_HDR] == NULL)
919 goto errout;
920 hdr = nla_data(tb[TCA_EM_META_HDR]);
921
922 if (TCF_META_TYPE(hdr->left.kind) != TCF_META_TYPE(hdr->right.kind) ||
923 TCF_META_TYPE(hdr->left.kind) > TCF_META_TYPE_MAX ||
924 TCF_META_ID(hdr->left.kind) > TCF_META_ID_MAX ||
925 TCF_META_ID(hdr->right.kind) > TCF_META_ID_MAX)
926 goto errout;
927
928 meta = kzalloc(sizeof(*meta), GFP_KERNEL);
929 if (meta == NULL) {
930 err = -ENOMEM;
931 goto errout;
932 }
933
934 memcpy(&meta->lvalue.hdr, &hdr->left, sizeof(hdr->left));
935 memcpy(&meta->rvalue.hdr, &hdr->right, sizeof(hdr->right));
936
937 if (!meta_is_supported(&meta->lvalue) ||
938 !meta_is_supported(&meta->rvalue)) {
939 err = -EOPNOTSUPP;
940 goto errout;
941 }
942
943 if (meta_change_data(&meta->lvalue, tb[TCA_EM_META_LVALUE]) < 0 ||
944 meta_change_data(&meta->rvalue, tb[TCA_EM_META_RVALUE]) < 0)
945 goto errout;
946
947 m->datalen = sizeof(*meta);
948 m->data = (unsigned long) meta;
949
950 err = 0;
951 errout:
952 if (err && meta)
953 meta_delete(meta);
954 return err;
955 }
956
957 static void em_meta_destroy(struct tcf_ematch *m)
958 {
959 if (m)
960 meta_delete((struct meta_match *) m->data);
961 }
962
963 static int em_meta_dump(struct sk_buff *skb, struct tcf_ematch *em)
964 {
965 struct meta_match *meta = (struct meta_match *) em->data;
966 struct tcf_meta_hdr hdr;
967 const struct meta_type_ops *ops;
968
969 memset(&hdr, 0, sizeof(hdr));
970 memcpy(&hdr.left, &meta->lvalue.hdr, sizeof(hdr.left));
971 memcpy(&hdr.right, &meta->rvalue.hdr, sizeof(hdr.right));
972
973 if (nla_put(skb, TCA_EM_META_HDR, sizeof(hdr), &hdr))
974 goto nla_put_failure;
975
976 ops = meta_type_ops(&meta->lvalue);
977 if (ops->dump(skb, &meta->lvalue, TCA_EM_META_LVALUE) < 0 ||
978 ops->dump(skb, &meta->rvalue, TCA_EM_META_RVALUE) < 0)
979 goto nla_put_failure;
980
981 return 0;
982
983 nla_put_failure:
984 return -1;
985 }
986
987 static struct tcf_ematch_ops em_meta_ops = {
988 .kind = TCF_EM_META,
989 .change = em_meta_change,
990 .match = em_meta_match,
991 .destroy = em_meta_destroy,
992 .dump = em_meta_dump,
993 .owner = THIS_MODULE,
994 .link = LIST_HEAD_INIT(em_meta_ops.link)
995 };
996
997 static int __init init_em_meta(void)
998 {
999 return tcf_em_register(&em_meta_ops);
1000 }
1001
1002 static void __exit exit_em_meta(void)
1003 {
1004 tcf_em_unregister(&em_meta_ops);
1005 }
1006
1007 MODULE_LICENSE("GPL");
1008
1009 module_init(init_em_meta);
1010 module_exit(exit_em_meta);
1011
1012 MODULE_ALIAS_TCF_EMATCH(TCF_EM_META);
This page took 0.196392 seconds and 5 git commands to generate.