[NET_SCHED]: Add mask support to fwmark classifier
[deliverable/linux.git] / net / sched / cls_fw.c
1 /*
2 * net/sched/cls_fw.c Classifier mapping ipchains' fwmark to traffic class.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 *
9 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
10 *
11 * Changes:
12 * Karlis Peisenieks <karlis@mt.lv> : 990415 : fw_walk off by one
13 * Karlis Peisenieks <karlis@mt.lv> : 990415 : fw_delete killed all the filter (and kernel).
14 * Alex <alex@pilotsoft.com> : 2004xxyy: Added Action extension
15 *
16 * JHS: We should remove the CONFIG_NET_CLS_IND from here
17 * eventually when the meta match extension is made available
18 *
19 */
20
21 #include <linux/module.h>
22 #include <asm/uaccess.h>
23 #include <asm/system.h>
24 #include <linux/bitops.h>
25 #include <linux/types.h>
26 #include <linux/kernel.h>
27 #include <linux/sched.h>
28 #include <linux/string.h>
29 #include <linux/mm.h>
30 #include <linux/socket.h>
31 #include <linux/sockios.h>
32 #include <linux/in.h>
33 #include <linux/errno.h>
34 #include <linux/interrupt.h>
35 #include <linux/if_ether.h>
36 #include <linux/inet.h>
37 #include <linux/netdevice.h>
38 #include <linux/etherdevice.h>
39 #include <linux/notifier.h>
40 #include <linux/netfilter.h>
41 #include <net/ip.h>
42 #include <net/route.h>
43 #include <linux/skbuff.h>
44 #include <net/sock.h>
45 #include <net/act_api.h>
46 #include <net/pkt_cls.h>
47
48 #define HTSIZE (PAGE_SIZE/sizeof(struct fw_filter *))
49
50 struct fw_head
51 {
52 struct fw_filter *ht[HTSIZE];
53 u32 mask;
54 };
55
56 struct fw_filter
57 {
58 struct fw_filter *next;
59 u32 id;
60 struct tcf_result res;
61 #ifdef CONFIG_NET_CLS_IND
62 char indev[IFNAMSIZ];
63 #endif /* CONFIG_NET_CLS_IND */
64 struct tcf_exts exts;
65 };
66
67 static struct tcf_ext_map fw_ext_map = {
68 .action = TCA_FW_ACT,
69 .police = TCA_FW_POLICE
70 };
71
72 static __inline__ int fw_hash(u32 handle)
73 {
74 if (HTSIZE == 4096)
75 return ((handle >> 24) & 0xFFF) ^
76 ((handle >> 12) & 0xFFF) ^
77 (handle & 0xFFF);
78 else if (HTSIZE == 2048)
79 return ((handle >> 22) & 0x7FF) ^
80 ((handle >> 11) & 0x7FF) ^
81 (handle & 0x7FF);
82 else if (HTSIZE == 1024)
83 return ((handle >> 20) & 0x3FF) ^
84 ((handle >> 10) & 0x3FF) ^
85 (handle & 0x3FF);
86 else if (HTSIZE == 512)
87 return (handle >> 27) ^
88 ((handle >> 18) & 0x1FF) ^
89 ((handle >> 9) & 0x1FF) ^
90 (handle & 0x1FF);
91 else if (HTSIZE == 256) {
92 u8 *t = (u8 *) &handle;
93 return t[0] ^ t[1] ^ t[2] ^ t[3];
94 } else
95 return handle & (HTSIZE - 1);
96 }
97
98 static int fw_classify(struct sk_buff *skb, struct tcf_proto *tp,
99 struct tcf_result *res)
100 {
101 struct fw_head *head = (struct fw_head*)tp->root;
102 struct fw_filter *f;
103 int r;
104 #ifdef CONFIG_NETFILTER
105 u32 id = skb->nfmark & head->mask;
106 #else
107 u32 id = 0;
108 #endif
109
110 if (head != NULL) {
111 for (f=head->ht[fw_hash(id)]; f; f=f->next) {
112 if (f->id == id) {
113 *res = f->res;
114 #ifdef CONFIG_NET_CLS_IND
115 if (!tcf_match_indev(skb, f->indev))
116 continue;
117 #endif /* CONFIG_NET_CLS_IND */
118 r = tcf_exts_exec(skb, &f->exts, res);
119 if (r < 0)
120 continue;
121
122 return r;
123 }
124 }
125 } else {
126 /* old method */
127 if (id && (TC_H_MAJ(id) == 0 || !(TC_H_MAJ(id^tp->q->handle)))) {
128 res->classid = id;
129 res->class = 0;
130 return 0;
131 }
132 }
133
134 return -1;
135 }
136
137 static unsigned long fw_get(struct tcf_proto *tp, u32 handle)
138 {
139 struct fw_head *head = (struct fw_head*)tp->root;
140 struct fw_filter *f;
141
142 if (head == NULL)
143 return 0;
144
145 for (f=head->ht[fw_hash(handle)]; f; f=f->next) {
146 if (f->id == handle)
147 return (unsigned long)f;
148 }
149 return 0;
150 }
151
152 static void fw_put(struct tcf_proto *tp, unsigned long f)
153 {
154 }
155
156 static int fw_init(struct tcf_proto *tp)
157 {
158 return 0;
159 }
160
161 static inline void
162 fw_delete_filter(struct tcf_proto *tp, struct fw_filter *f)
163 {
164 tcf_unbind_filter(tp, &f->res);
165 tcf_exts_destroy(tp, &f->exts);
166 kfree(f);
167 }
168
169 static void fw_destroy(struct tcf_proto *tp)
170 {
171 struct fw_head *head = (struct fw_head*)xchg(&tp->root, NULL);
172 struct fw_filter *f;
173 int h;
174
175 if (head == NULL)
176 return;
177
178 for (h=0; h<HTSIZE; h++) {
179 while ((f=head->ht[h]) != NULL) {
180 head->ht[h] = f->next;
181 fw_delete_filter(tp, f);
182 }
183 }
184 kfree(head);
185 }
186
187 static int fw_delete(struct tcf_proto *tp, unsigned long arg)
188 {
189 struct fw_head *head = (struct fw_head*)tp->root;
190 struct fw_filter *f = (struct fw_filter*)arg;
191 struct fw_filter **fp;
192
193 if (head == NULL || f == NULL)
194 goto out;
195
196 for (fp=&head->ht[fw_hash(f->id)]; *fp; fp = &(*fp)->next) {
197 if (*fp == f) {
198 tcf_tree_lock(tp);
199 *fp = f->next;
200 tcf_tree_unlock(tp);
201 fw_delete_filter(tp, f);
202 return 0;
203 }
204 }
205 out:
206 return -EINVAL;
207 }
208
209 static int
210 fw_change_attrs(struct tcf_proto *tp, struct fw_filter *f,
211 struct rtattr **tb, struct rtattr **tca, unsigned long base)
212 {
213 struct fw_head *head = (struct fw_head *)tp->root;
214 struct tcf_exts e;
215 u32 mask;
216 int err;
217
218 err = tcf_exts_validate(tp, tb, tca[TCA_RATE-1], &e, &fw_ext_map);
219 if (err < 0)
220 return err;
221
222 err = -EINVAL;
223 if (tb[TCA_FW_CLASSID-1]) {
224 if (RTA_PAYLOAD(tb[TCA_FW_CLASSID-1]) != sizeof(u32))
225 goto errout;
226 f->res.classid = *(u32*)RTA_DATA(tb[TCA_FW_CLASSID-1]);
227 tcf_bind_filter(tp, &f->res, base);
228 }
229
230 #ifdef CONFIG_NET_CLS_IND
231 if (tb[TCA_FW_INDEV-1]) {
232 err = tcf_change_indev(tp, f->indev, tb[TCA_FW_INDEV-1]);
233 if (err < 0)
234 goto errout;
235 }
236 #endif /* CONFIG_NET_CLS_IND */
237
238 if (tb[TCA_FW_MASK-1]) {
239 if (RTA_PAYLOAD(tb[TCA_FW_MASK-1]) != sizeof(u32))
240 goto errout;
241 mask = *(u32*)RTA_DATA(tb[TCA_FW_MASK-1]);
242 if (mask != head->mask)
243 goto errout;
244 } else if (head->mask != 0xFFFFFFFF)
245 goto errout;
246
247 tcf_exts_change(tp, &f->exts, &e);
248
249 return 0;
250 errout:
251 tcf_exts_destroy(tp, &e);
252 return err;
253 }
254
255 static int fw_change(struct tcf_proto *tp, unsigned long base,
256 u32 handle,
257 struct rtattr **tca,
258 unsigned long *arg)
259 {
260 struct fw_head *head = (struct fw_head*)tp->root;
261 struct fw_filter *f = (struct fw_filter *) *arg;
262 struct rtattr *opt = tca[TCA_OPTIONS-1];
263 struct rtattr *tb[TCA_FW_MAX];
264 int err;
265
266 if (!opt)
267 return handle ? -EINVAL : 0;
268
269 if (rtattr_parse_nested(tb, TCA_FW_MAX, opt) < 0)
270 return -EINVAL;
271
272 if (f != NULL) {
273 if (f->id != handle && handle)
274 return -EINVAL;
275 return fw_change_attrs(tp, f, tb, tca, base);
276 }
277
278 if (!handle)
279 return -EINVAL;
280
281 if (head == NULL) {
282 u32 mask = 0xFFFFFFFF;
283 if (tb[TCA_FW_MASK-1]) {
284 if (RTA_PAYLOAD(tb[TCA_FW_MASK-1]) != sizeof(u32))
285 return -EINVAL;
286 mask = *(u32*)RTA_DATA(tb[TCA_FW_MASK-1]);
287 }
288
289 head = kzalloc(sizeof(struct fw_head), GFP_KERNEL);
290 if (head == NULL)
291 return -ENOBUFS;
292 head->mask = mask;
293
294 tcf_tree_lock(tp);
295 tp->root = head;
296 tcf_tree_unlock(tp);
297 }
298
299 f = kzalloc(sizeof(struct fw_filter), GFP_KERNEL);
300 if (f == NULL)
301 return -ENOBUFS;
302
303 f->id = handle;
304
305 err = fw_change_attrs(tp, f, tb, tca, base);
306 if (err < 0)
307 goto errout;
308
309 f->next = head->ht[fw_hash(handle)];
310 tcf_tree_lock(tp);
311 head->ht[fw_hash(handle)] = f;
312 tcf_tree_unlock(tp);
313
314 *arg = (unsigned long)f;
315 return 0;
316
317 errout:
318 kfree(f);
319 return err;
320 }
321
322 static void fw_walk(struct tcf_proto *tp, struct tcf_walker *arg)
323 {
324 struct fw_head *head = (struct fw_head*)tp->root;
325 int h;
326
327 if (head == NULL)
328 arg->stop = 1;
329
330 if (arg->stop)
331 return;
332
333 for (h = 0; h < HTSIZE; h++) {
334 struct fw_filter *f;
335
336 for (f = head->ht[h]; f; f = f->next) {
337 if (arg->count < arg->skip) {
338 arg->count++;
339 continue;
340 }
341 if (arg->fn(tp, (unsigned long)f, arg) < 0) {
342 arg->stop = 1;
343 return;
344 }
345 arg->count++;
346 }
347 }
348 }
349
350 static int fw_dump(struct tcf_proto *tp, unsigned long fh,
351 struct sk_buff *skb, struct tcmsg *t)
352 {
353 struct fw_head *head = (struct fw_head *)tp->root;
354 struct fw_filter *f = (struct fw_filter*)fh;
355 unsigned char *b = skb->tail;
356 struct rtattr *rta;
357
358 if (f == NULL)
359 return skb->len;
360
361 t->tcm_handle = f->id;
362
363 if (!f->res.classid && !tcf_exts_is_available(&f->exts))
364 return skb->len;
365
366 rta = (struct rtattr*)b;
367 RTA_PUT(skb, TCA_OPTIONS, 0, NULL);
368
369 if (f->res.classid)
370 RTA_PUT(skb, TCA_FW_CLASSID, 4, &f->res.classid);
371 #ifdef CONFIG_NET_CLS_IND
372 if (strlen(f->indev))
373 RTA_PUT(skb, TCA_FW_INDEV, IFNAMSIZ, f->indev);
374 #endif /* CONFIG_NET_CLS_IND */
375 if (head->mask != 0xFFFFFFFF)
376 RTA_PUT(skb, TCA_FW_MASK, 4, &head->mask);
377
378 if (tcf_exts_dump(skb, &f->exts, &fw_ext_map) < 0)
379 goto rtattr_failure;
380
381 rta->rta_len = skb->tail - b;
382
383 if (tcf_exts_dump_stats(skb, &f->exts, &fw_ext_map) < 0)
384 goto rtattr_failure;
385
386 return skb->len;
387
388 rtattr_failure:
389 skb_trim(skb, b - skb->data);
390 return -1;
391 }
392
393 static struct tcf_proto_ops cls_fw_ops = {
394 .next = NULL,
395 .kind = "fw",
396 .classify = fw_classify,
397 .init = fw_init,
398 .destroy = fw_destroy,
399 .get = fw_get,
400 .put = fw_put,
401 .change = fw_change,
402 .delete = fw_delete,
403 .walk = fw_walk,
404 .dump = fw_dump,
405 .owner = THIS_MODULE,
406 };
407
408 static int __init init_fw(void)
409 {
410 return register_tcf_proto_ops(&cls_fw_ops);
411 }
412
413 static void __exit exit_fw(void)
414 {
415 unregister_tcf_proto_ops(&cls_fw_ops);
416 }
417
418 module_init(init_fw)
419 module_exit(exit_fw)
420 MODULE_LICENSE("GPL");
This page took 0.120495 seconds and 6 git commands to generate.