Merge branch 'for-4.9/block' into for-next
[deliverable/linux.git] / net / sched / cls_bpf.c
1 /*
2 * Berkeley Packet Filter based traffic classifier
3 *
4 * Might be used to classify traffic through flexible, user-defined and
5 * possibly JIT-ed BPF filters for traffic control as an alternative to
6 * ematches.
7 *
8 * (C) 2013 Daniel Borkmann <dborkman@redhat.com>
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
13 */
14
15 #include <linux/module.h>
16 #include <linux/types.h>
17 #include <linux/skbuff.h>
18 #include <linux/filter.h>
19 #include <linux/bpf.h>
20
21 #include <net/rtnetlink.h>
22 #include <net/pkt_cls.h>
23 #include <net/sock.h>
24
25 MODULE_LICENSE("GPL");
26 MODULE_AUTHOR("Daniel Borkmann <dborkman@redhat.com>");
27 MODULE_DESCRIPTION("TC BPF based classifier");
28
29 #define CLS_BPF_NAME_LEN 256
30
31 struct cls_bpf_head {
32 struct list_head plist;
33 u32 hgen;
34 struct rcu_head rcu;
35 };
36
37 struct cls_bpf_prog {
38 struct bpf_prog *filter;
39 struct list_head link;
40 struct tcf_result res;
41 bool exts_integrated;
42 struct tcf_exts exts;
43 u32 handle;
44 union {
45 u32 bpf_fd;
46 u16 bpf_num_ops;
47 };
48 struct sock_filter *bpf_ops;
49 const char *bpf_name;
50 struct tcf_proto *tp;
51 struct rcu_head rcu;
52 };
53
54 static const struct nla_policy bpf_policy[TCA_BPF_MAX + 1] = {
55 [TCA_BPF_CLASSID] = { .type = NLA_U32 },
56 [TCA_BPF_FLAGS] = { .type = NLA_U32 },
57 [TCA_BPF_FD] = { .type = NLA_U32 },
58 [TCA_BPF_NAME] = { .type = NLA_NUL_STRING, .len = CLS_BPF_NAME_LEN },
59 [TCA_BPF_OPS_LEN] = { .type = NLA_U16 },
60 [TCA_BPF_OPS] = { .type = NLA_BINARY,
61 .len = sizeof(struct sock_filter) * BPF_MAXINSNS },
62 };
63
64 static int cls_bpf_exec_opcode(int code)
65 {
66 switch (code) {
67 case TC_ACT_OK:
68 case TC_ACT_SHOT:
69 case TC_ACT_STOLEN:
70 case TC_ACT_REDIRECT:
71 case TC_ACT_UNSPEC:
72 return code;
73 default:
74 return TC_ACT_UNSPEC;
75 }
76 }
77
78 static int cls_bpf_classify(struct sk_buff *skb, const struct tcf_proto *tp,
79 struct tcf_result *res)
80 {
81 struct cls_bpf_head *head = rcu_dereference_bh(tp->root);
82 bool at_ingress = skb_at_tc_ingress(skb);
83 struct cls_bpf_prog *prog;
84 int ret = -1;
85
86 if (unlikely(!skb_mac_header_was_set(skb)))
87 return -1;
88
89 /* Needed here for accessing maps. */
90 rcu_read_lock();
91 list_for_each_entry_rcu(prog, &head->plist, link) {
92 int filter_res;
93
94 qdisc_skb_cb(skb)->tc_classid = prog->res.classid;
95
96 if (at_ingress) {
97 /* It is safe to push/pull even if skb_shared() */
98 __skb_push(skb, skb->mac_len);
99 bpf_compute_data_end(skb);
100 filter_res = BPF_PROG_RUN(prog->filter, skb);
101 __skb_pull(skb, skb->mac_len);
102 } else {
103 bpf_compute_data_end(skb);
104 filter_res = BPF_PROG_RUN(prog->filter, skb);
105 }
106
107 if (prog->exts_integrated) {
108 res->class = 0;
109 res->classid = TC_H_MAJ(prog->res.classid) |
110 qdisc_skb_cb(skb)->tc_classid;
111
112 ret = cls_bpf_exec_opcode(filter_res);
113 if (ret == TC_ACT_UNSPEC)
114 continue;
115 break;
116 }
117
118 if (filter_res == 0)
119 continue;
120 if (filter_res != -1) {
121 res->class = 0;
122 res->classid = filter_res;
123 } else {
124 *res = prog->res;
125 }
126
127 ret = tcf_exts_exec(skb, &prog->exts, res);
128 if (ret < 0)
129 continue;
130
131 break;
132 }
133 rcu_read_unlock();
134
135 return ret;
136 }
137
138 static bool cls_bpf_is_ebpf(const struct cls_bpf_prog *prog)
139 {
140 return !prog->bpf_ops;
141 }
142
143 static int cls_bpf_init(struct tcf_proto *tp)
144 {
145 struct cls_bpf_head *head;
146
147 head = kzalloc(sizeof(*head), GFP_KERNEL);
148 if (head == NULL)
149 return -ENOBUFS;
150
151 INIT_LIST_HEAD_RCU(&head->plist);
152 rcu_assign_pointer(tp->root, head);
153
154 return 0;
155 }
156
157 static void cls_bpf_delete_prog(struct tcf_proto *tp, struct cls_bpf_prog *prog)
158 {
159 tcf_exts_destroy(&prog->exts);
160
161 if (cls_bpf_is_ebpf(prog))
162 bpf_prog_put(prog->filter);
163 else
164 bpf_prog_destroy(prog->filter);
165
166 kfree(prog->bpf_name);
167 kfree(prog->bpf_ops);
168 kfree(prog);
169 }
170
171 static void __cls_bpf_delete_prog(struct rcu_head *rcu)
172 {
173 struct cls_bpf_prog *prog = container_of(rcu, struct cls_bpf_prog, rcu);
174
175 cls_bpf_delete_prog(prog->tp, prog);
176 }
177
178 static int cls_bpf_delete(struct tcf_proto *tp, unsigned long arg)
179 {
180 struct cls_bpf_prog *prog = (struct cls_bpf_prog *) arg;
181
182 list_del_rcu(&prog->link);
183 tcf_unbind_filter(tp, &prog->res);
184 call_rcu(&prog->rcu, __cls_bpf_delete_prog);
185
186 return 0;
187 }
188
189 static bool cls_bpf_destroy(struct tcf_proto *tp, bool force)
190 {
191 struct cls_bpf_head *head = rtnl_dereference(tp->root);
192 struct cls_bpf_prog *prog, *tmp;
193
194 if (!force && !list_empty(&head->plist))
195 return false;
196
197 list_for_each_entry_safe(prog, tmp, &head->plist, link) {
198 list_del_rcu(&prog->link);
199 tcf_unbind_filter(tp, &prog->res);
200 call_rcu(&prog->rcu, __cls_bpf_delete_prog);
201 }
202
203 RCU_INIT_POINTER(tp->root, NULL);
204 kfree_rcu(head, rcu);
205 return true;
206 }
207
208 static unsigned long cls_bpf_get(struct tcf_proto *tp, u32 handle)
209 {
210 struct cls_bpf_head *head = rtnl_dereference(tp->root);
211 struct cls_bpf_prog *prog;
212 unsigned long ret = 0UL;
213
214 if (head == NULL)
215 return 0UL;
216
217 list_for_each_entry(prog, &head->plist, link) {
218 if (prog->handle == handle) {
219 ret = (unsigned long) prog;
220 break;
221 }
222 }
223
224 return ret;
225 }
226
227 static int cls_bpf_prog_from_ops(struct nlattr **tb, struct cls_bpf_prog *prog)
228 {
229 struct sock_filter *bpf_ops;
230 struct sock_fprog_kern fprog_tmp;
231 struct bpf_prog *fp;
232 u16 bpf_size, bpf_num_ops;
233 int ret;
234
235 bpf_num_ops = nla_get_u16(tb[TCA_BPF_OPS_LEN]);
236 if (bpf_num_ops > BPF_MAXINSNS || bpf_num_ops == 0)
237 return -EINVAL;
238
239 bpf_size = bpf_num_ops * sizeof(*bpf_ops);
240 if (bpf_size != nla_len(tb[TCA_BPF_OPS]))
241 return -EINVAL;
242
243 bpf_ops = kzalloc(bpf_size, GFP_KERNEL);
244 if (bpf_ops == NULL)
245 return -ENOMEM;
246
247 memcpy(bpf_ops, nla_data(tb[TCA_BPF_OPS]), bpf_size);
248
249 fprog_tmp.len = bpf_num_ops;
250 fprog_tmp.filter = bpf_ops;
251
252 ret = bpf_prog_create(&fp, &fprog_tmp);
253 if (ret < 0) {
254 kfree(bpf_ops);
255 return ret;
256 }
257
258 prog->bpf_ops = bpf_ops;
259 prog->bpf_num_ops = bpf_num_ops;
260 prog->bpf_name = NULL;
261 prog->filter = fp;
262
263 return 0;
264 }
265
266 static int cls_bpf_prog_from_efd(struct nlattr **tb, struct cls_bpf_prog *prog,
267 const struct tcf_proto *tp)
268 {
269 struct bpf_prog *fp;
270 char *name = NULL;
271 u32 bpf_fd;
272
273 bpf_fd = nla_get_u32(tb[TCA_BPF_FD]);
274
275 fp = bpf_prog_get_type(bpf_fd, BPF_PROG_TYPE_SCHED_CLS);
276 if (IS_ERR(fp))
277 return PTR_ERR(fp);
278
279 if (tb[TCA_BPF_NAME]) {
280 name = kmemdup(nla_data(tb[TCA_BPF_NAME]),
281 nla_len(tb[TCA_BPF_NAME]),
282 GFP_KERNEL);
283 if (!name) {
284 bpf_prog_put(fp);
285 return -ENOMEM;
286 }
287 }
288
289 prog->bpf_ops = NULL;
290 prog->bpf_fd = bpf_fd;
291 prog->bpf_name = name;
292 prog->filter = fp;
293
294 if (fp->dst_needed && !(tp->q->flags & TCQ_F_INGRESS))
295 netif_keep_dst(qdisc_dev(tp->q));
296
297 return 0;
298 }
299
300 static int cls_bpf_modify_existing(struct net *net, struct tcf_proto *tp,
301 struct cls_bpf_prog *prog,
302 unsigned long base, struct nlattr **tb,
303 struct nlattr *est, bool ovr)
304 {
305 bool is_bpf, is_ebpf, have_exts = false;
306 struct tcf_exts exts;
307 int ret;
308
309 is_bpf = tb[TCA_BPF_OPS_LEN] && tb[TCA_BPF_OPS];
310 is_ebpf = tb[TCA_BPF_FD];
311 if ((!is_bpf && !is_ebpf) || (is_bpf && is_ebpf))
312 return -EINVAL;
313
314 tcf_exts_init(&exts, TCA_BPF_ACT, TCA_BPF_POLICE);
315 ret = tcf_exts_validate(net, tp, tb, est, &exts, ovr);
316 if (ret < 0)
317 return ret;
318
319 if (tb[TCA_BPF_FLAGS]) {
320 u32 bpf_flags = nla_get_u32(tb[TCA_BPF_FLAGS]);
321
322 if (bpf_flags & ~TCA_BPF_FLAG_ACT_DIRECT) {
323 tcf_exts_destroy(&exts);
324 return -EINVAL;
325 }
326
327 have_exts = bpf_flags & TCA_BPF_FLAG_ACT_DIRECT;
328 }
329
330 prog->exts_integrated = have_exts;
331
332 ret = is_bpf ? cls_bpf_prog_from_ops(tb, prog) :
333 cls_bpf_prog_from_efd(tb, prog, tp);
334 if (ret < 0) {
335 tcf_exts_destroy(&exts);
336 return ret;
337 }
338
339 if (tb[TCA_BPF_CLASSID]) {
340 prog->res.classid = nla_get_u32(tb[TCA_BPF_CLASSID]);
341 tcf_bind_filter(tp, &prog->res, base);
342 }
343
344 tcf_exts_change(tp, &prog->exts, &exts);
345 return 0;
346 }
347
348 static u32 cls_bpf_grab_new_handle(struct tcf_proto *tp,
349 struct cls_bpf_head *head)
350 {
351 unsigned int i = 0x80000000;
352 u32 handle;
353
354 do {
355 if (++head->hgen == 0x7FFFFFFF)
356 head->hgen = 1;
357 } while (--i > 0 && cls_bpf_get(tp, head->hgen));
358
359 if (unlikely(i == 0)) {
360 pr_err("Insufficient number of handles\n");
361 handle = 0;
362 } else {
363 handle = head->hgen;
364 }
365
366 return handle;
367 }
368
369 static int cls_bpf_change(struct net *net, struct sk_buff *in_skb,
370 struct tcf_proto *tp, unsigned long base,
371 u32 handle, struct nlattr **tca,
372 unsigned long *arg, bool ovr)
373 {
374 struct cls_bpf_head *head = rtnl_dereference(tp->root);
375 struct cls_bpf_prog *oldprog = (struct cls_bpf_prog *) *arg;
376 struct nlattr *tb[TCA_BPF_MAX + 1];
377 struct cls_bpf_prog *prog;
378 int ret;
379
380 if (tca[TCA_OPTIONS] == NULL)
381 return -EINVAL;
382
383 ret = nla_parse_nested(tb, TCA_BPF_MAX, tca[TCA_OPTIONS], bpf_policy);
384 if (ret < 0)
385 return ret;
386
387 prog = kzalloc(sizeof(*prog), GFP_KERNEL);
388 if (!prog)
389 return -ENOBUFS;
390
391 tcf_exts_init(&prog->exts, TCA_BPF_ACT, TCA_BPF_POLICE);
392
393 if (oldprog) {
394 if (handle && oldprog->handle != handle) {
395 ret = -EINVAL;
396 goto errout;
397 }
398 }
399
400 if (handle == 0)
401 prog->handle = cls_bpf_grab_new_handle(tp, head);
402 else
403 prog->handle = handle;
404 if (prog->handle == 0) {
405 ret = -EINVAL;
406 goto errout;
407 }
408
409 ret = cls_bpf_modify_existing(net, tp, prog, base, tb, tca[TCA_RATE], ovr);
410 if (ret < 0)
411 goto errout;
412
413 if (oldprog) {
414 list_replace_rcu(&oldprog->link, &prog->link);
415 tcf_unbind_filter(tp, &oldprog->res);
416 call_rcu(&oldprog->rcu, __cls_bpf_delete_prog);
417 } else {
418 list_add_rcu(&prog->link, &head->plist);
419 }
420
421 *arg = (unsigned long) prog;
422 return 0;
423 errout:
424 kfree(prog);
425
426 return ret;
427 }
428
429 static int cls_bpf_dump_bpf_info(const struct cls_bpf_prog *prog,
430 struct sk_buff *skb)
431 {
432 struct nlattr *nla;
433
434 if (nla_put_u16(skb, TCA_BPF_OPS_LEN, prog->bpf_num_ops))
435 return -EMSGSIZE;
436
437 nla = nla_reserve(skb, TCA_BPF_OPS, prog->bpf_num_ops *
438 sizeof(struct sock_filter));
439 if (nla == NULL)
440 return -EMSGSIZE;
441
442 memcpy(nla_data(nla), prog->bpf_ops, nla_len(nla));
443
444 return 0;
445 }
446
447 static int cls_bpf_dump_ebpf_info(const struct cls_bpf_prog *prog,
448 struct sk_buff *skb)
449 {
450 if (nla_put_u32(skb, TCA_BPF_FD, prog->bpf_fd))
451 return -EMSGSIZE;
452
453 if (prog->bpf_name &&
454 nla_put_string(skb, TCA_BPF_NAME, prog->bpf_name))
455 return -EMSGSIZE;
456
457 return 0;
458 }
459
460 static int cls_bpf_dump(struct net *net, struct tcf_proto *tp, unsigned long fh,
461 struct sk_buff *skb, struct tcmsg *tm)
462 {
463 struct cls_bpf_prog *prog = (struct cls_bpf_prog *) fh;
464 struct nlattr *nest;
465 u32 bpf_flags = 0;
466 int ret;
467
468 if (prog == NULL)
469 return skb->len;
470
471 tm->tcm_handle = prog->handle;
472
473 nest = nla_nest_start(skb, TCA_OPTIONS);
474 if (nest == NULL)
475 goto nla_put_failure;
476
477 if (prog->res.classid &&
478 nla_put_u32(skb, TCA_BPF_CLASSID, prog->res.classid))
479 goto nla_put_failure;
480
481 if (cls_bpf_is_ebpf(prog))
482 ret = cls_bpf_dump_ebpf_info(prog, skb);
483 else
484 ret = cls_bpf_dump_bpf_info(prog, skb);
485 if (ret)
486 goto nla_put_failure;
487
488 if (tcf_exts_dump(skb, &prog->exts) < 0)
489 goto nla_put_failure;
490
491 if (prog->exts_integrated)
492 bpf_flags |= TCA_BPF_FLAG_ACT_DIRECT;
493 if (bpf_flags && nla_put_u32(skb, TCA_BPF_FLAGS, bpf_flags))
494 goto nla_put_failure;
495
496 nla_nest_end(skb, nest);
497
498 if (tcf_exts_dump_stats(skb, &prog->exts) < 0)
499 goto nla_put_failure;
500
501 return skb->len;
502
503 nla_put_failure:
504 nla_nest_cancel(skb, nest);
505 return -1;
506 }
507
508 static void cls_bpf_walk(struct tcf_proto *tp, struct tcf_walker *arg)
509 {
510 struct cls_bpf_head *head = rtnl_dereference(tp->root);
511 struct cls_bpf_prog *prog;
512
513 list_for_each_entry(prog, &head->plist, link) {
514 if (arg->count < arg->skip)
515 goto skip;
516 if (arg->fn(tp, (unsigned long) prog, arg) < 0) {
517 arg->stop = 1;
518 break;
519 }
520 skip:
521 arg->count++;
522 }
523 }
524
525 static struct tcf_proto_ops cls_bpf_ops __read_mostly = {
526 .kind = "bpf",
527 .owner = THIS_MODULE,
528 .classify = cls_bpf_classify,
529 .init = cls_bpf_init,
530 .destroy = cls_bpf_destroy,
531 .get = cls_bpf_get,
532 .change = cls_bpf_change,
533 .delete = cls_bpf_delete,
534 .walk = cls_bpf_walk,
535 .dump = cls_bpf_dump,
536 };
537
538 static int __init cls_bpf_init_mod(void)
539 {
540 return register_tcf_proto_ops(&cls_bpf_ops);
541 }
542
543 static void __exit cls_bpf_exit_mod(void)
544 {
545 unregister_tcf_proto_ops(&cls_bpf_ops);
546 }
547
548 module_init(cls_bpf_init_mod);
549 module_exit(cls_bpf_exit_mod);
This page took 0.0441319999999999 seconds and 5 git commands to generate.