Merge remote-tracking branch 'spi/for-next'
[deliverable/linux.git] / include / net / pkt_cls.h
1 #ifndef __NET_PKT_CLS_H
2 #define __NET_PKT_CLS_H
3
4 #include <linux/pkt_cls.h>
5 #include <net/sch_generic.h>
6 #include <net/act_api.h>
7
8 /* Basic packet classifier frontend definitions. */
9
10 struct tcf_walker {
11 int stop;
12 int skip;
13 int count;
14 int (*fn)(struct tcf_proto *, unsigned long node, struct tcf_walker *);
15 };
16
17 int register_tcf_proto_ops(struct tcf_proto_ops *ops);
18 int unregister_tcf_proto_ops(struct tcf_proto_ops *ops);
19
20 static inline unsigned long
21 __cls_set_class(unsigned long *clp, unsigned long cl)
22 {
23 return xchg(clp, cl);
24 }
25
26 static inline unsigned long
27 cls_set_class(struct tcf_proto *tp, unsigned long *clp,
28 unsigned long cl)
29 {
30 unsigned long old_cl;
31
32 tcf_tree_lock(tp);
33 old_cl = __cls_set_class(clp, cl);
34 tcf_tree_unlock(tp);
35
36 return old_cl;
37 }
38
39 static inline void
40 tcf_bind_filter(struct tcf_proto *tp, struct tcf_result *r, unsigned long base)
41 {
42 unsigned long cl;
43
44 cl = tp->q->ops->cl_ops->bind_tcf(tp->q, base, r->classid);
45 cl = cls_set_class(tp, &r->class, cl);
46 if (cl)
47 tp->q->ops->cl_ops->unbind_tcf(tp->q, cl);
48 }
49
50 static inline void
51 tcf_unbind_filter(struct tcf_proto *tp, struct tcf_result *r)
52 {
53 unsigned long cl;
54
55 if ((cl = __cls_set_class(&r->class, 0)) != 0)
56 tp->q->ops->cl_ops->unbind_tcf(tp->q, cl);
57 }
58
59 struct tcf_exts {
60 #ifdef CONFIG_NET_CLS_ACT
61 __u32 type; /* for backward compat(TCA_OLD_COMPAT) */
62 int nr_actions;
63 struct tc_action **actions;
64 #endif
65 /* Map to export classifier specific extension TLV types to the
66 * generic extensions API. Unsupported extensions must be set to 0.
67 */
68 int action;
69 int police;
70 };
71
72 static inline int tcf_exts_init(struct tcf_exts *exts, int action, int police)
73 {
74 #ifdef CONFIG_NET_CLS_ACT
75 exts->type = 0;
76 exts->nr_actions = 0;
77 exts->actions = kcalloc(TCA_ACT_MAX_PRIO, sizeof(struct tc_action *),
78 GFP_KERNEL);
79 if (!exts->actions)
80 return -ENOMEM;
81 #endif
82 exts->action = action;
83 exts->police = police;
84 return 0;
85 }
86
87 /**
88 * tcf_exts_is_predicative - check if a predicative extension is present
89 * @exts: tc filter extensions handle
90 *
91 * Returns 1 if a predicative extension is present, i.e. an extension which
92 * might cause further actions and thus overrule the regular tcf_result.
93 */
94 static inline int
95 tcf_exts_is_predicative(struct tcf_exts *exts)
96 {
97 #ifdef CONFIG_NET_CLS_ACT
98 return exts->nr_actions;
99 #else
100 return 0;
101 #endif
102 }
103
104 /**
105 * tcf_exts_is_available - check if at least one extension is present
106 * @exts: tc filter extensions handle
107 *
108 * Returns 1 if at least one extension is present.
109 */
110 static inline int
111 tcf_exts_is_available(struct tcf_exts *exts)
112 {
113 /* All non-predicative extensions must be added here. */
114 return tcf_exts_is_predicative(exts);
115 }
116
117 static inline void tcf_exts_to_list(const struct tcf_exts *exts,
118 struct list_head *actions)
119 {
120 #ifdef CONFIG_NET_CLS_ACT
121 int i;
122
123 for (i = 0; i < exts->nr_actions; i++) {
124 struct tc_action *a = exts->actions[i];
125
126 list_add(&a->list, actions);
127 }
128 #endif
129 }
130
131 /**
132 * tcf_exts_exec - execute tc filter extensions
133 * @skb: socket buffer
134 * @exts: tc filter extensions handle
135 * @res: desired result
136 *
137 * Executes all configured extensions. Returns 0 on a normal execution,
138 * a negative number if the filter must be considered unmatched or
139 * a positive action code (TC_ACT_*) which must be returned to the
140 * underlying layer.
141 */
142 static inline int
143 tcf_exts_exec(struct sk_buff *skb, struct tcf_exts *exts,
144 struct tcf_result *res)
145 {
146 #ifdef CONFIG_NET_CLS_ACT
147 if (exts->nr_actions)
148 return tcf_action_exec(skb, exts->actions, exts->nr_actions,
149 res);
150 #endif
151 return 0;
152 }
153
154 #ifdef CONFIG_NET_CLS_ACT
155
156 #define tc_no_actions(_exts) ((_exts)->nr_actions == 0)
157 #define tc_single_action(_exts) ((_exts)->nr_actions == 1)
158
159 #else /* CONFIG_NET_CLS_ACT */
160
161 #define tc_no_actions(_exts) true
162 #define tc_single_action(_exts) false
163
164 #endif /* CONFIG_NET_CLS_ACT */
165
166 int tcf_exts_validate(struct net *net, struct tcf_proto *tp,
167 struct nlattr **tb, struct nlattr *rate_tlv,
168 struct tcf_exts *exts, bool ovr);
169 void tcf_exts_destroy(struct tcf_exts *exts);
170 void tcf_exts_change(struct tcf_proto *tp, struct tcf_exts *dst,
171 struct tcf_exts *src);
172 int tcf_exts_dump(struct sk_buff *skb, struct tcf_exts *exts);
173 int tcf_exts_dump_stats(struct sk_buff *skb, struct tcf_exts *exts);
174
175 /**
176 * struct tcf_pkt_info - packet information
177 */
178 struct tcf_pkt_info {
179 unsigned char * ptr;
180 int nexthdr;
181 };
182
183 #ifdef CONFIG_NET_EMATCH
184
185 struct tcf_ematch_ops;
186
187 /**
188 * struct tcf_ematch - extended match (ematch)
189 *
190 * @matchid: identifier to allow userspace to reidentify a match
191 * @flags: flags specifying attributes and the relation to other matches
192 * @ops: the operations lookup table of the corresponding ematch module
193 * @datalen: length of the ematch specific configuration data
194 * @data: ematch specific data
195 */
196 struct tcf_ematch {
197 struct tcf_ematch_ops * ops;
198 unsigned long data;
199 unsigned int datalen;
200 u16 matchid;
201 u16 flags;
202 struct net *net;
203 };
204
205 static inline int tcf_em_is_container(struct tcf_ematch *em)
206 {
207 return !em->ops;
208 }
209
210 static inline int tcf_em_is_simple(struct tcf_ematch *em)
211 {
212 return em->flags & TCF_EM_SIMPLE;
213 }
214
215 static inline int tcf_em_is_inverted(struct tcf_ematch *em)
216 {
217 return em->flags & TCF_EM_INVERT;
218 }
219
220 static inline int tcf_em_last_match(struct tcf_ematch *em)
221 {
222 return (em->flags & TCF_EM_REL_MASK) == TCF_EM_REL_END;
223 }
224
225 static inline int tcf_em_early_end(struct tcf_ematch *em, int result)
226 {
227 if (tcf_em_last_match(em))
228 return 1;
229
230 if (result == 0 && em->flags & TCF_EM_REL_AND)
231 return 1;
232
233 if (result != 0 && em->flags & TCF_EM_REL_OR)
234 return 1;
235
236 return 0;
237 }
238
239 /**
240 * struct tcf_ematch_tree - ematch tree handle
241 *
242 * @hdr: ematch tree header supplied by userspace
243 * @matches: array of ematches
244 */
245 struct tcf_ematch_tree {
246 struct tcf_ematch_tree_hdr hdr;
247 struct tcf_ematch * matches;
248
249 };
250
251 /**
252 * struct tcf_ematch_ops - ematch module operations
253 *
254 * @kind: identifier (kind) of this ematch module
255 * @datalen: length of expected configuration data (optional)
256 * @change: called during validation (optional)
257 * @match: called during ematch tree evaluation, must return 1/0
258 * @destroy: called during destroyage (optional)
259 * @dump: called during dumping process (optional)
260 * @owner: owner, must be set to THIS_MODULE
261 * @link: link to previous/next ematch module (internal use)
262 */
263 struct tcf_ematch_ops {
264 int kind;
265 int datalen;
266 int (*change)(struct net *net, void *,
267 int, struct tcf_ematch *);
268 int (*match)(struct sk_buff *, struct tcf_ematch *,
269 struct tcf_pkt_info *);
270 void (*destroy)(struct tcf_ematch *);
271 int (*dump)(struct sk_buff *, struct tcf_ematch *);
272 struct module *owner;
273 struct list_head link;
274 };
275
276 int tcf_em_register(struct tcf_ematch_ops *);
277 void tcf_em_unregister(struct tcf_ematch_ops *);
278 int tcf_em_tree_validate(struct tcf_proto *, struct nlattr *,
279 struct tcf_ematch_tree *);
280 void tcf_em_tree_destroy(struct tcf_ematch_tree *);
281 int tcf_em_tree_dump(struct sk_buff *, struct tcf_ematch_tree *, int);
282 int __tcf_em_tree_match(struct sk_buff *, struct tcf_ematch_tree *,
283 struct tcf_pkt_info *);
284
285 /**
286 * tcf_em_tree_change - replace ematch tree of a running classifier
287 *
288 * @tp: classifier kind handle
289 * @dst: destination ematch tree variable
290 * @src: source ematch tree (temporary tree from tcf_em_tree_validate)
291 *
292 * This functions replaces the ematch tree in @dst with the ematch
293 * tree in @src. The classifier in charge of the ematch tree may be
294 * running.
295 */
296 static inline void tcf_em_tree_change(struct tcf_proto *tp,
297 struct tcf_ematch_tree *dst,
298 struct tcf_ematch_tree *src)
299 {
300 tcf_tree_lock(tp);
301 memcpy(dst, src, sizeof(*dst));
302 tcf_tree_unlock(tp);
303 }
304
305 /**
306 * tcf_em_tree_match - evaulate an ematch tree
307 *
308 * @skb: socket buffer of the packet in question
309 * @tree: ematch tree to be used for evaluation
310 * @info: packet information examined by classifier
311 *
312 * This function matches @skb against the ematch tree in @tree by going
313 * through all ematches respecting their logic relations returning
314 * as soon as the result is obvious.
315 *
316 * Returns 1 if the ematch tree as-one matches, no ematches are configured
317 * or ematch is not enabled in the kernel, otherwise 0 is returned.
318 */
319 static inline int tcf_em_tree_match(struct sk_buff *skb,
320 struct tcf_ematch_tree *tree,
321 struct tcf_pkt_info *info)
322 {
323 if (tree->hdr.nmatches)
324 return __tcf_em_tree_match(skb, tree, info);
325 else
326 return 1;
327 }
328
329 #define MODULE_ALIAS_TCF_EMATCH(kind) MODULE_ALIAS("ematch-kind-" __stringify(kind))
330
331 #else /* CONFIG_NET_EMATCH */
332
333 struct tcf_ematch_tree {
334 };
335
336 #define tcf_em_tree_validate(tp, tb, t) ((void)(t), 0)
337 #define tcf_em_tree_destroy(t) do { (void)(t); } while(0)
338 #define tcf_em_tree_dump(skb, t, tlv) (0)
339 #define tcf_em_tree_change(tp, dst, src) do { } while(0)
340 #define tcf_em_tree_match(skb, t, info) ((void)(info), 1)
341
342 #endif /* CONFIG_NET_EMATCH */
343
344 static inline unsigned char * tcf_get_base_ptr(struct sk_buff *skb, int layer)
345 {
346 switch (layer) {
347 case TCF_LAYER_LINK:
348 return skb->data;
349 case TCF_LAYER_NETWORK:
350 return skb_network_header(skb);
351 case TCF_LAYER_TRANSPORT:
352 return skb_transport_header(skb);
353 }
354
355 return NULL;
356 }
357
358 static inline int tcf_valid_offset(const struct sk_buff *skb,
359 const unsigned char *ptr, const int len)
360 {
361 return likely((ptr + len) <= skb_tail_pointer(skb) &&
362 ptr >= skb->head &&
363 (ptr <= (ptr + len)));
364 }
365
366 #ifdef CONFIG_NET_CLS_IND
367 #include <net/net_namespace.h>
368
369 static inline int
370 tcf_change_indev(struct net *net, struct nlattr *indev_tlv)
371 {
372 char indev[IFNAMSIZ];
373 struct net_device *dev;
374
375 if (nla_strlcpy(indev, indev_tlv, IFNAMSIZ) >= IFNAMSIZ)
376 return -EINVAL;
377 dev = __dev_get_by_name(net, indev);
378 if (!dev)
379 return -ENODEV;
380 return dev->ifindex;
381 }
382
383 static inline bool
384 tcf_match_indev(struct sk_buff *skb, int ifindex)
385 {
386 if (!ifindex)
387 return true;
388 if (!skb->skb_iif)
389 return false;
390 return ifindex == skb->skb_iif;
391 }
392 #endif /* CONFIG_NET_CLS_IND */
393
394 struct tc_cls_u32_knode {
395 struct tcf_exts *exts;
396 struct tc_u32_sel *sel;
397 u32 handle;
398 u32 val;
399 u32 mask;
400 u32 link_handle;
401 u8 fshift;
402 };
403
404 struct tc_cls_u32_hnode {
405 u32 handle;
406 u32 prio;
407 unsigned int divisor;
408 };
409
410 enum tc_clsu32_command {
411 TC_CLSU32_NEW_KNODE,
412 TC_CLSU32_REPLACE_KNODE,
413 TC_CLSU32_DELETE_KNODE,
414 TC_CLSU32_NEW_HNODE,
415 TC_CLSU32_REPLACE_HNODE,
416 TC_CLSU32_DELETE_HNODE,
417 };
418
419 struct tc_cls_u32_offload {
420 /* knode values */
421 enum tc_clsu32_command command;
422 union {
423 struct tc_cls_u32_knode knode;
424 struct tc_cls_u32_hnode hnode;
425 };
426 };
427
428 static inline bool tc_should_offload(const struct net_device *dev,
429 const struct tcf_proto *tp, u32 flags)
430 {
431 const struct Qdisc *sch = tp->q;
432 const struct Qdisc_class_ops *cops = sch->ops->cl_ops;
433
434 if (!(dev->features & NETIF_F_HW_TC))
435 return false;
436 if (flags & TCA_CLS_FLAGS_SKIP_HW)
437 return false;
438 if (!dev->netdev_ops->ndo_setup_tc)
439 return false;
440 if (cops && cops->tcf_cl_offload)
441 return cops->tcf_cl_offload(tp->classid);
442
443 return true;
444 }
445
446 static inline bool tc_skip_sw(u32 flags)
447 {
448 return (flags & TCA_CLS_FLAGS_SKIP_SW) ? true : false;
449 }
450
451 /* SKIP_HW and SKIP_SW are mutually exclusive flags. */
452 static inline bool tc_flags_valid(u32 flags)
453 {
454 if (flags & ~(TCA_CLS_FLAGS_SKIP_HW | TCA_CLS_FLAGS_SKIP_SW))
455 return false;
456
457 if (!(flags ^ (TCA_CLS_FLAGS_SKIP_HW | TCA_CLS_FLAGS_SKIP_SW)))
458 return false;
459
460 return true;
461 }
462
463 enum tc_fl_command {
464 TC_CLSFLOWER_REPLACE,
465 TC_CLSFLOWER_DESTROY,
466 TC_CLSFLOWER_STATS,
467 };
468
469 struct tc_cls_flower_offload {
470 enum tc_fl_command command;
471 unsigned long cookie;
472 struct flow_dissector *dissector;
473 struct fl_flow_key *mask;
474 struct fl_flow_key *key;
475 struct tcf_exts *exts;
476 };
477
478 enum tc_matchall_command {
479 TC_CLSMATCHALL_REPLACE,
480 TC_CLSMATCHALL_DESTROY,
481 };
482
483 struct tc_cls_matchall_offload {
484 enum tc_matchall_command command;
485 struct tcf_exts *exts;
486 unsigned long cookie;
487 };
488
489 #endif
This page took 0.043094 seconds and 5 git commands to generate.