Commit | Line | Data |
---|---|---|
92651940 AD |
1 | /* |
2 | * Copyright (c) 2008, Intel Corporation. | |
3 | * | |
4 | * This program is free software; you can redistribute it and/or modify it | |
5 | * under the terms and conditions of the GNU General Public License, | |
6 | * version 2, as published by the Free Software Foundation. | |
7 | * | |
8 | * This program is distributed in the hope it will be useful, but WITHOUT | |
9 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
10 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | |
11 | * more details. | |
12 | * | |
13 | * You should have received a copy of the GNU General Public License along with | |
14 | * this program; if not, write to the Free Software Foundation, Inc., 59 Temple | |
15 | * Place - Suite 330, Boston, MA 02111-1307 USA. | |
16 | * | |
17 | * Author: Alexander Duyck <alexander.h.duyck@intel.com> | |
18 | */ | |
19 | ||
20 | #include <linux/module.h> | |
21 | #include <linux/types.h> | |
22 | #include <linux/kernel.h> | |
23 | #include <linux/string.h> | |
24 | #include <linux/errno.h> | |
25 | #include <linux/skbuff.h> | |
26 | #include <net/netlink.h> | |
27 | #include <net/pkt_sched.h> | |
28 | ||
29 | ||
30 | struct multiq_sched_data { | |
31 | u16 bands; | |
32 | u16 max_bands; | |
33 | u16 curband; | |
34 | struct tcf_proto *filter_list; | |
35 | struct Qdisc **queues; | |
36 | }; | |
37 | ||
38 | ||
39 | static struct Qdisc * | |
40 | multiq_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr) | |
41 | { | |
42 | struct multiq_sched_data *q = qdisc_priv(sch); | |
43 | u32 band; | |
44 | struct tcf_result res; | |
45 | int err; | |
46 | ||
47 | *qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS; | |
48 | err = tc_classify(skb, q->filter_list, &res); | |
49 | #ifdef CONFIG_NET_CLS_ACT | |
50 | switch (err) { | |
51 | case TC_ACT_STOLEN: | |
52 | case TC_ACT_QUEUED: | |
53 | *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN; | |
54 | case TC_ACT_SHOT: | |
55 | return NULL; | |
56 | } | |
57 | #endif | |
58 | band = skb_get_queue_mapping(skb); | |
59 | ||
60 | if (band >= q->bands) | |
61 | return q->queues[0]; | |
62 | ||
63 | return q->queues[band]; | |
64 | } | |
65 | ||
66 | static int | |
67 | multiq_enqueue(struct sk_buff *skb, struct Qdisc *sch) | |
68 | { | |
69 | struct Qdisc *qdisc; | |
70 | int ret; | |
71 | ||
72 | qdisc = multiq_classify(skb, sch, &ret); | |
73 | #ifdef CONFIG_NET_CLS_ACT | |
74 | if (qdisc == NULL) { | |
75 | ||
76 | if (ret & __NET_XMIT_BYPASS) | |
77 | sch->qstats.drops++; | |
78 | kfree_skb(skb); | |
79 | return ret; | |
80 | } | |
81 | #endif | |
82 | ||
83 | ret = qdisc_enqueue(skb, qdisc); | |
84 | if (ret == NET_XMIT_SUCCESS) { | |
85 | sch->bstats.bytes += qdisc_pkt_len(skb); | |
86 | sch->bstats.packets++; | |
87 | sch->q.qlen++; | |
88 | return NET_XMIT_SUCCESS; | |
89 | } | |
90 | if (net_xmit_drop_count(ret)) | |
91 | sch->qstats.drops++; | |
92 | return ret; | |
93 | } | |
94 | ||
92651940 AD |
95 | static struct sk_buff *multiq_dequeue(struct Qdisc *sch) |
96 | { | |
97 | struct multiq_sched_data *q = qdisc_priv(sch); | |
98 | struct Qdisc *qdisc; | |
99 | struct sk_buff *skb; | |
100 | int band; | |
101 | ||
102 | for (band = 0; band < q->bands; band++) { | |
103 | /* cycle through bands to ensure fairness */ | |
104 | q->curband++; | |
105 | if (q->curband >= q->bands) | |
106 | q->curband = 0; | |
107 | ||
108 | /* Check that target subqueue is available before | |
f30ab418 | 109 | * pulling an skb to avoid head-of-line blocking. |
92651940 AD |
110 | */ |
111 | if (!__netif_subqueue_stopped(qdisc_dev(sch), q->curband)) { | |
112 | qdisc = q->queues[q->curband]; | |
113 | skb = qdisc->dequeue(qdisc); | |
114 | if (skb) { | |
115 | sch->q.qlen--; | |
116 | return skb; | |
117 | } | |
118 | } | |
119 | } | |
120 | return NULL; | |
121 | ||
122 | } | |
123 | ||
8e3af978 JP |
124 | static struct sk_buff *multiq_peek(struct Qdisc *sch) |
125 | { | |
126 | struct multiq_sched_data *q = qdisc_priv(sch); | |
127 | unsigned int curband = q->curband; | |
128 | struct Qdisc *qdisc; | |
129 | struct sk_buff *skb; | |
130 | int band; | |
131 | ||
132 | for (band = 0; band < q->bands; band++) { | |
133 | /* cycle through bands to ensure fairness */ | |
134 | curband++; | |
135 | if (curband >= q->bands) | |
136 | curband = 0; | |
137 | ||
138 | /* Check that target subqueue is available before | |
f30ab418 | 139 | * pulling an skb to avoid head-of-line blocking. |
8e3af978 JP |
140 | */ |
141 | if (!__netif_subqueue_stopped(qdisc_dev(sch), curband)) { | |
142 | qdisc = q->queues[curband]; | |
143 | skb = qdisc->ops->peek(qdisc); | |
144 | if (skb) | |
145 | return skb; | |
146 | } | |
147 | } | |
148 | return NULL; | |
149 | ||
150 | } | |
151 | ||
92651940 AD |
152 | static unsigned int multiq_drop(struct Qdisc *sch) |
153 | { | |
154 | struct multiq_sched_data *q = qdisc_priv(sch); | |
155 | int band; | |
156 | unsigned int len; | |
157 | struct Qdisc *qdisc; | |
158 | ||
159 | for (band = q->bands-1; band >= 0; band--) { | |
160 | qdisc = q->queues[band]; | |
161 | if (qdisc->ops->drop) { | |
162 | len = qdisc->ops->drop(qdisc); | |
163 | if (len != 0) { | |
164 | sch->q.qlen--; | |
165 | return len; | |
166 | } | |
167 | } | |
168 | } | |
169 | return 0; | |
170 | } | |
171 | ||
172 | ||
173 | static void | |
174 | multiq_reset(struct Qdisc *sch) | |
175 | { | |
176 | u16 band; | |
177 | struct multiq_sched_data *q = qdisc_priv(sch); | |
178 | ||
179 | for (band = 0; band < q->bands; band++) | |
180 | qdisc_reset(q->queues[band]); | |
181 | sch->q.qlen = 0; | |
182 | q->curband = 0; | |
183 | } | |
184 | ||
185 | static void | |
186 | multiq_destroy(struct Qdisc *sch) | |
187 | { | |
188 | int band; | |
189 | struct multiq_sched_data *q = qdisc_priv(sch); | |
190 | ||
191 | tcf_destroy_chain(&q->filter_list); | |
192 | for (band = 0; band < q->bands; band++) | |
193 | qdisc_destroy(q->queues[band]); | |
194 | ||
195 | kfree(q->queues); | |
196 | } | |
197 | ||
198 | static int multiq_tune(struct Qdisc *sch, struct nlattr *opt) | |
199 | { | |
200 | struct multiq_sched_data *q = qdisc_priv(sch); | |
201 | struct tc_multiq_qopt *qopt; | |
202 | int i; | |
203 | ||
204 | if (!netif_is_multiqueue(qdisc_dev(sch))) | |
205 | return -EINVAL; | |
206 | if (nla_len(opt) < sizeof(*qopt)) | |
207 | return -EINVAL; | |
208 | ||
209 | qopt = nla_data(opt); | |
210 | ||
211 | qopt->bands = qdisc_dev(sch)->real_num_tx_queues; | |
212 | ||
213 | sch_tree_lock(sch); | |
214 | q->bands = qopt->bands; | |
215 | for (i = q->bands; i < q->max_bands; i++) { | |
f07d1501 AD |
216 | if (q->queues[i] != &noop_qdisc) { |
217 | struct Qdisc *child = xchg(&q->queues[i], &noop_qdisc); | |
92651940 AD |
218 | qdisc_tree_decrease_qlen(child, child->q.qlen); |
219 | qdisc_destroy(child); | |
220 | } | |
221 | } | |
222 | ||
223 | sch_tree_unlock(sch); | |
224 | ||
225 | for (i = 0; i < q->bands; i++) { | |
226 | if (q->queues[i] == &noop_qdisc) { | |
227 | struct Qdisc *child; | |
228 | child = qdisc_create_dflt(qdisc_dev(sch), | |
229 | sch->dev_queue, | |
230 | &pfifo_qdisc_ops, | |
231 | TC_H_MAKE(sch->handle, | |
232 | i + 1)); | |
233 | if (child) { | |
234 | sch_tree_lock(sch); | |
235 | child = xchg(&q->queues[i], child); | |
236 | ||
237 | if (child != &noop_qdisc) { | |
238 | qdisc_tree_decrease_qlen(child, | |
239 | child->q.qlen); | |
240 | qdisc_destroy(child); | |
241 | } | |
242 | sch_tree_unlock(sch); | |
243 | } | |
244 | } | |
245 | } | |
246 | return 0; | |
247 | } | |
248 | ||
249 | static int multiq_init(struct Qdisc *sch, struct nlattr *opt) | |
250 | { | |
251 | struct multiq_sched_data *q = qdisc_priv(sch); | |
f07d1501 | 252 | int i, err; |
92651940 AD |
253 | |
254 | q->queues = NULL; | |
255 | ||
256 | if (opt == NULL) | |
257 | return -EINVAL; | |
258 | ||
259 | q->max_bands = qdisc_dev(sch)->num_tx_queues; | |
260 | ||
261 | q->queues = kcalloc(q->max_bands, sizeof(struct Qdisc *), GFP_KERNEL); | |
262 | if (!q->queues) | |
263 | return -ENOBUFS; | |
264 | for (i = 0; i < q->max_bands; i++) | |
265 | q->queues[i] = &noop_qdisc; | |
266 | ||
f07d1501 AD |
267 | err = multiq_tune(sch,opt); |
268 | ||
269 | if (err) | |
270 | kfree(q->queues); | |
271 | ||
272 | return err; | |
92651940 AD |
273 | } |
274 | ||
275 | static int multiq_dump(struct Qdisc *sch, struct sk_buff *skb) | |
276 | { | |
277 | struct multiq_sched_data *q = qdisc_priv(sch); | |
278 | unsigned char *b = skb_tail_pointer(skb); | |
279 | struct tc_multiq_qopt opt; | |
280 | ||
281 | opt.bands = q->bands; | |
282 | opt.max_bands = q->max_bands; | |
283 | ||
284 | NLA_PUT(skb, TCA_OPTIONS, sizeof(opt), &opt); | |
285 | ||
286 | return skb->len; | |
287 | ||
288 | nla_put_failure: | |
289 | nlmsg_trim(skb, b); | |
290 | return -1; | |
291 | } | |
292 | ||
293 | static int multiq_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new, | |
294 | struct Qdisc **old) | |
295 | { | |
296 | struct multiq_sched_data *q = qdisc_priv(sch); | |
297 | unsigned long band = arg - 1; | |
298 | ||
299 | if (band >= q->bands) | |
300 | return -EINVAL; | |
301 | ||
302 | if (new == NULL) | |
303 | new = &noop_qdisc; | |
304 | ||
305 | sch_tree_lock(sch); | |
306 | *old = q->queues[band]; | |
307 | q->queues[band] = new; | |
308 | qdisc_tree_decrease_qlen(*old, (*old)->q.qlen); | |
309 | qdisc_reset(*old); | |
310 | sch_tree_unlock(sch); | |
311 | ||
312 | return 0; | |
313 | } | |
314 | ||
315 | static struct Qdisc * | |
316 | multiq_leaf(struct Qdisc *sch, unsigned long arg) | |
317 | { | |
318 | struct multiq_sched_data *q = qdisc_priv(sch); | |
319 | unsigned long band = arg - 1; | |
320 | ||
321 | if (band >= q->bands) | |
322 | return NULL; | |
323 | ||
324 | return q->queues[band]; | |
325 | } | |
326 | ||
327 | static unsigned long multiq_get(struct Qdisc *sch, u32 classid) | |
328 | { | |
329 | struct multiq_sched_data *q = qdisc_priv(sch); | |
330 | unsigned long band = TC_H_MIN(classid); | |
331 | ||
332 | if (band - 1 >= q->bands) | |
333 | return 0; | |
334 | return band; | |
335 | } | |
336 | ||
337 | static unsigned long multiq_bind(struct Qdisc *sch, unsigned long parent, | |
338 | u32 classid) | |
339 | { | |
340 | return multiq_get(sch, classid); | |
341 | } | |
342 | ||
343 | ||
344 | static void multiq_put(struct Qdisc *q, unsigned long cl) | |
345 | { | |
346 | return; | |
347 | } | |
348 | ||
349 | static int multiq_change(struct Qdisc *sch, u32 handle, u32 parent, | |
350 | struct nlattr **tca, unsigned long *arg) | |
351 | { | |
352 | unsigned long cl = *arg; | |
353 | struct multiq_sched_data *q = qdisc_priv(sch); | |
354 | ||
355 | if (cl - 1 > q->bands) | |
356 | return -ENOENT; | |
357 | return 0; | |
358 | } | |
359 | ||
360 | static int multiq_delete(struct Qdisc *sch, unsigned long cl) | |
361 | { | |
362 | struct multiq_sched_data *q = qdisc_priv(sch); | |
363 | if (cl - 1 > q->bands) | |
364 | return -ENOENT; | |
365 | return 0; | |
366 | } | |
367 | ||
368 | ||
369 | static int multiq_dump_class(struct Qdisc *sch, unsigned long cl, | |
370 | struct sk_buff *skb, struct tcmsg *tcm) | |
371 | { | |
372 | struct multiq_sched_data *q = qdisc_priv(sch); | |
373 | ||
374 | if (cl - 1 > q->bands) | |
375 | return -ENOENT; | |
376 | tcm->tcm_handle |= TC_H_MIN(cl); | |
377 | if (q->queues[cl-1]) | |
378 | tcm->tcm_info = q->queues[cl-1]->handle; | |
379 | return 0; | |
380 | } | |
381 | ||
382 | static int multiq_dump_class_stats(struct Qdisc *sch, unsigned long cl, | |
383 | struct gnet_dump *d) | |
384 | { | |
385 | struct multiq_sched_data *q = qdisc_priv(sch); | |
386 | struct Qdisc *cl_q; | |
387 | ||
388 | cl_q = q->queues[cl - 1]; | |
389 | if (gnet_stats_copy_basic(d, &cl_q->bstats) < 0 || | |
390 | gnet_stats_copy_queue(d, &cl_q->qstats) < 0) | |
391 | return -1; | |
392 | ||
393 | return 0; | |
394 | } | |
395 | ||
396 | static void multiq_walk(struct Qdisc *sch, struct qdisc_walker *arg) | |
397 | { | |
398 | struct multiq_sched_data *q = qdisc_priv(sch); | |
399 | int band; | |
400 | ||
401 | if (arg->stop) | |
402 | return; | |
403 | ||
404 | for (band = 0; band < q->bands; band++) { | |
405 | if (arg->count < arg->skip) { | |
406 | arg->count++; | |
407 | continue; | |
408 | } | |
409 | if (arg->fn(sch, band+1, arg) < 0) { | |
410 | arg->stop = 1; | |
411 | break; | |
412 | } | |
413 | arg->count++; | |
414 | } | |
415 | } | |
416 | ||
417 | static struct tcf_proto **multiq_find_tcf(struct Qdisc *sch, unsigned long cl) | |
418 | { | |
419 | struct multiq_sched_data *q = qdisc_priv(sch); | |
420 | ||
421 | if (cl) | |
422 | return NULL; | |
423 | return &q->filter_list; | |
424 | } | |
425 | ||
426 | static const struct Qdisc_class_ops multiq_class_ops = { | |
427 | .graft = multiq_graft, | |
428 | .leaf = multiq_leaf, | |
429 | .get = multiq_get, | |
430 | .put = multiq_put, | |
431 | .change = multiq_change, | |
432 | .delete = multiq_delete, | |
433 | .walk = multiq_walk, | |
434 | .tcf_chain = multiq_find_tcf, | |
435 | .bind_tcf = multiq_bind, | |
436 | .unbind_tcf = multiq_put, | |
437 | .dump = multiq_dump_class, | |
438 | .dump_stats = multiq_dump_class_stats, | |
439 | }; | |
440 | ||
441 | static struct Qdisc_ops multiq_qdisc_ops __read_mostly = { | |
442 | .next = NULL, | |
443 | .cl_ops = &multiq_class_ops, | |
444 | .id = "multiq", | |
445 | .priv_size = sizeof(struct multiq_sched_data), | |
446 | .enqueue = multiq_enqueue, | |
447 | .dequeue = multiq_dequeue, | |
8e3af978 | 448 | .peek = multiq_peek, |
92651940 AD |
449 | .drop = multiq_drop, |
450 | .init = multiq_init, | |
451 | .reset = multiq_reset, | |
452 | .destroy = multiq_destroy, | |
453 | .change = multiq_tune, | |
454 | .dump = multiq_dump, | |
455 | .owner = THIS_MODULE, | |
456 | }; | |
457 | ||
458 | static int __init multiq_module_init(void) | |
459 | { | |
460 | return register_qdisc(&multiq_qdisc_ops); | |
461 | } | |
462 | ||
463 | static void __exit multiq_module_exit(void) | |
464 | { | |
465 | unregister_qdisc(&multiq_qdisc_ops); | |
466 | } | |
467 | ||
468 | module_init(multiq_module_init) | |
469 | module_exit(multiq_module_exit) | |
470 | ||
471 | MODULE_LICENSE("GPL"); |