Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/dtor/input
[deliverable/linux.git] / net / sched / sch_gred.c
1 /*
2 * net/sched/sch_gred.c Generic Random Early Detection queue.
3 *
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version
8 * 2 of the License, or (at your option) any later version.
9 *
10 * Authors: J Hadi Salim (hadi@cyberus.ca) 1998-2002
11 *
12 * 991129: - Bug fix with grio mode
13 * - a better sing. AvgQ mode with Grio(WRED)
14 * - A finer grained VQ dequeue based on sugestion
15 * from Ren Liu
16 * - More error checks
17 *
18 * For all the glorious comments look at include/net/red.h
19 */
20
21 #include <linux/slab.h>
22 #include <linux/module.h>
23 #include <linux/types.h>
24 #include <linux/kernel.h>
25 #include <linux/skbuff.h>
26 #include <net/pkt_sched.h>
27 #include <net/red.h>
28
29 #define GRED_DEF_PRIO (MAX_DPs / 2)
30 #define GRED_VQ_MASK (MAX_DPs - 1)
31
32 struct gred_sched_data;
33 struct gred_sched;
34
35 struct gred_sched_data {
36 u32 limit; /* HARD maximal queue length */
37 u32 DP; /* the drop parameters */
38 u32 bytesin; /* bytes seen on virtualQ so far*/
39 u32 packetsin; /* packets seen on virtualQ so far*/
40 u32 backlog; /* bytes on the virtualQ */
41 u8 prio; /* the prio of this vq */
42
43 struct red_parms parms;
44 struct red_vars vars;
45 struct red_stats stats;
46 };
47
48 enum {
49 GRED_WRED_MODE = 1,
50 GRED_RIO_MODE,
51 };
52
53 struct gred_sched {
54 struct gred_sched_data *tab[MAX_DPs];
55 unsigned long flags;
56 u32 red_flags;
57 u32 DPs;
58 u32 def;
59 struct red_vars wred_set;
60 };
61
62 static inline int gred_wred_mode(struct gred_sched *table)
63 {
64 return test_bit(GRED_WRED_MODE, &table->flags);
65 }
66
67 static inline void gred_enable_wred_mode(struct gred_sched *table)
68 {
69 __set_bit(GRED_WRED_MODE, &table->flags);
70 }
71
72 static inline void gred_disable_wred_mode(struct gred_sched *table)
73 {
74 __clear_bit(GRED_WRED_MODE, &table->flags);
75 }
76
77 static inline int gred_rio_mode(struct gred_sched *table)
78 {
79 return test_bit(GRED_RIO_MODE, &table->flags);
80 }
81
82 static inline void gred_enable_rio_mode(struct gred_sched *table)
83 {
84 __set_bit(GRED_RIO_MODE, &table->flags);
85 }
86
87 static inline void gred_disable_rio_mode(struct gred_sched *table)
88 {
89 __clear_bit(GRED_RIO_MODE, &table->flags);
90 }
91
92 static inline int gred_wred_mode_check(struct Qdisc *sch)
93 {
94 struct gred_sched *table = qdisc_priv(sch);
95 int i;
96
97 /* Really ugly O(n^2) but shouldn't be necessary too frequent. */
98 for (i = 0; i < table->DPs; i++) {
99 struct gred_sched_data *q = table->tab[i];
100 int n;
101
102 if (q == NULL)
103 continue;
104
105 for (n = i + 1; n < table->DPs; n++)
106 if (table->tab[n] && table->tab[n]->prio == q->prio)
107 return 1;
108 }
109
110 return 0;
111 }
112
113 static inline unsigned int gred_backlog(struct gred_sched *table,
114 struct gred_sched_data *q,
115 struct Qdisc *sch)
116 {
117 if (gred_wred_mode(table))
118 return sch->qstats.backlog;
119 else
120 return q->backlog;
121 }
122
123 static inline u16 tc_index_to_dp(struct sk_buff *skb)
124 {
125 return skb->tc_index & GRED_VQ_MASK;
126 }
127
128 static inline void gred_load_wred_set(const struct gred_sched *table,
129 struct gred_sched_data *q)
130 {
131 q->vars.qavg = table->wred_set.qavg;
132 q->vars.qidlestart = table->wred_set.qidlestart;
133 }
134
135 static inline void gred_store_wred_set(struct gred_sched *table,
136 struct gred_sched_data *q)
137 {
138 table->wred_set.qavg = q->vars.qavg;
139 table->wred_set.qidlestart = q->vars.qidlestart;
140 }
141
142 static inline int gred_use_ecn(struct gred_sched *t)
143 {
144 return t->red_flags & TC_RED_ECN;
145 }
146
147 static inline int gred_use_harddrop(struct gred_sched *t)
148 {
149 return t->red_flags & TC_RED_HARDDROP;
150 }
151
152 static int gred_enqueue(struct sk_buff *skb, struct Qdisc *sch)
153 {
154 struct gred_sched_data *q = NULL;
155 struct gred_sched *t = qdisc_priv(sch);
156 unsigned long qavg = 0;
157 u16 dp = tc_index_to_dp(skb);
158
159 if (dp >= t->DPs || (q = t->tab[dp]) == NULL) {
160 dp = t->def;
161
162 q = t->tab[dp];
163 if (!q) {
164 /* Pass through packets not assigned to a DP
165 * if no default DP has been configured. This
166 * allows for DP flows to be left untouched.
167 */
168 if (likely(sch->qstats.backlog + qdisc_pkt_len(skb) <=
169 sch->limit))
170 return qdisc_enqueue_tail(skb, sch);
171 else
172 goto drop;
173 }
174
175 /* fix tc_index? --could be controversial but needed for
176 requeueing */
177 skb->tc_index = (skb->tc_index & ~GRED_VQ_MASK) | dp;
178 }
179
180 /* sum up all the qaves of prios < ours to get the new qave */
181 if (!gred_wred_mode(t) && gred_rio_mode(t)) {
182 int i;
183
184 for (i = 0; i < t->DPs; i++) {
185 if (t->tab[i] && t->tab[i]->prio < q->prio &&
186 !red_is_idling(&t->tab[i]->vars))
187 qavg += t->tab[i]->vars.qavg;
188 }
189
190 }
191
192 q->packetsin++;
193 q->bytesin += qdisc_pkt_len(skb);
194
195 if (gred_wred_mode(t))
196 gred_load_wred_set(t, q);
197
198 q->vars.qavg = red_calc_qavg(&q->parms,
199 &q->vars,
200 gred_backlog(t, q, sch));
201
202 if (red_is_idling(&q->vars))
203 red_end_of_idle_period(&q->vars);
204
205 if (gred_wred_mode(t))
206 gred_store_wred_set(t, q);
207
208 switch (red_action(&q->parms, &q->vars, q->vars.qavg + qavg)) {
209 case RED_DONT_MARK:
210 break;
211
212 case RED_PROB_MARK:
213 qdisc_qstats_overlimit(sch);
214 if (!gred_use_ecn(t) || !INET_ECN_set_ce(skb)) {
215 q->stats.prob_drop++;
216 goto congestion_drop;
217 }
218
219 q->stats.prob_mark++;
220 break;
221
222 case RED_HARD_MARK:
223 qdisc_qstats_overlimit(sch);
224 if (gred_use_harddrop(t) || !gred_use_ecn(t) ||
225 !INET_ECN_set_ce(skb)) {
226 q->stats.forced_drop++;
227 goto congestion_drop;
228 }
229 q->stats.forced_mark++;
230 break;
231 }
232
233 if (gred_backlog(t, q, sch) + qdisc_pkt_len(skb) <= q->limit) {
234 q->backlog += qdisc_pkt_len(skb);
235 return qdisc_enqueue_tail(skb, sch);
236 }
237
238 q->stats.pdrop++;
239 drop:
240 return qdisc_drop(skb, sch);
241
242 congestion_drop:
243 qdisc_drop(skb, sch);
244 return NET_XMIT_CN;
245 }
246
247 static struct sk_buff *gred_dequeue(struct Qdisc *sch)
248 {
249 struct sk_buff *skb;
250 struct gred_sched *t = qdisc_priv(sch);
251
252 skb = qdisc_dequeue_head(sch);
253
254 if (skb) {
255 struct gred_sched_data *q;
256 u16 dp = tc_index_to_dp(skb);
257
258 if (dp >= t->DPs || (q = t->tab[dp]) == NULL) {
259 net_warn_ratelimited("GRED: Unable to relocate VQ 0x%x after dequeue, screwing up backlog\n",
260 tc_index_to_dp(skb));
261 } else {
262 q->backlog -= qdisc_pkt_len(skb);
263
264 if (gred_wred_mode(t)) {
265 if (!sch->qstats.backlog)
266 red_start_of_idle_period(&t->wred_set);
267 } else {
268 if (!q->backlog)
269 red_start_of_idle_period(&q->vars);
270 }
271 }
272
273 return skb;
274 }
275
276 return NULL;
277 }
278
279 static unsigned int gred_drop(struct Qdisc *sch)
280 {
281 struct sk_buff *skb;
282 struct gred_sched *t = qdisc_priv(sch);
283
284 skb = qdisc_dequeue_tail(sch);
285 if (skb) {
286 unsigned int len = qdisc_pkt_len(skb);
287 struct gred_sched_data *q;
288 u16 dp = tc_index_to_dp(skb);
289
290 if (dp >= t->DPs || (q = t->tab[dp]) == NULL) {
291 net_warn_ratelimited("GRED: Unable to relocate VQ 0x%x while dropping, screwing up backlog\n",
292 tc_index_to_dp(skb));
293 } else {
294 q->backlog -= len;
295 q->stats.other++;
296
297 if (gred_wred_mode(t)) {
298 if (!sch->qstats.backlog)
299 red_start_of_idle_period(&t->wred_set);
300 } else {
301 if (!q->backlog)
302 red_start_of_idle_period(&q->vars);
303 }
304 }
305
306 qdisc_drop(skb, sch);
307 return len;
308 }
309
310 return 0;
311 }
312
313 static void gred_reset(struct Qdisc *sch)
314 {
315 int i;
316 struct gred_sched *t = qdisc_priv(sch);
317
318 qdisc_reset_queue(sch);
319
320 for (i = 0; i < t->DPs; i++) {
321 struct gred_sched_data *q = t->tab[i];
322
323 if (!q)
324 continue;
325
326 red_restart(&q->vars);
327 q->backlog = 0;
328 }
329 }
330
331 static inline void gred_destroy_vq(struct gred_sched_data *q)
332 {
333 kfree(q);
334 }
335
336 static inline int gred_change_table_def(struct Qdisc *sch, struct nlattr *dps)
337 {
338 struct gred_sched *table = qdisc_priv(sch);
339 struct tc_gred_sopt *sopt;
340 int i;
341
342 if (dps == NULL)
343 return -EINVAL;
344
345 sopt = nla_data(dps);
346
347 if (sopt->DPs > MAX_DPs || sopt->DPs == 0 || sopt->def_DP >= sopt->DPs)
348 return -EINVAL;
349
350 sch_tree_lock(sch);
351 table->DPs = sopt->DPs;
352 table->def = sopt->def_DP;
353 table->red_flags = sopt->flags;
354
355 /*
356 * Every entry point to GRED is synchronized with the above code
357 * and the DP is checked against DPs, i.e. shadowed VQs can no
358 * longer be found so we can unlock right here.
359 */
360 sch_tree_unlock(sch);
361
362 if (sopt->grio) {
363 gred_enable_rio_mode(table);
364 gred_disable_wred_mode(table);
365 if (gred_wred_mode_check(sch))
366 gred_enable_wred_mode(table);
367 } else {
368 gred_disable_rio_mode(table);
369 gred_disable_wred_mode(table);
370 }
371
372 for (i = table->DPs; i < MAX_DPs; i++) {
373 if (table->tab[i]) {
374 pr_warn("GRED: Warning: Destroying shadowed VQ 0x%x\n",
375 i);
376 gred_destroy_vq(table->tab[i]);
377 table->tab[i] = NULL;
378 }
379 }
380
381 return 0;
382 }
383
384 static inline int gred_change_vq(struct Qdisc *sch, int dp,
385 struct tc_gred_qopt *ctl, int prio,
386 u8 *stab, u32 max_P,
387 struct gred_sched_data **prealloc)
388 {
389 struct gred_sched *table = qdisc_priv(sch);
390 struct gred_sched_data *q = table->tab[dp];
391
392 if (!q) {
393 table->tab[dp] = q = *prealloc;
394 *prealloc = NULL;
395 if (!q)
396 return -ENOMEM;
397 }
398
399 q->DP = dp;
400 q->prio = prio;
401 if (ctl->limit > sch->limit)
402 q->limit = sch->limit;
403 else
404 q->limit = ctl->limit;
405
406 if (q->backlog == 0)
407 red_end_of_idle_period(&q->vars);
408
409 red_set_parms(&q->parms,
410 ctl->qth_min, ctl->qth_max, ctl->Wlog, ctl->Plog,
411 ctl->Scell_log, stab, max_P);
412 red_set_vars(&q->vars);
413 return 0;
414 }
415
416 static const struct nla_policy gred_policy[TCA_GRED_MAX + 1] = {
417 [TCA_GRED_PARMS] = { .len = sizeof(struct tc_gred_qopt) },
418 [TCA_GRED_STAB] = { .len = 256 },
419 [TCA_GRED_DPS] = { .len = sizeof(struct tc_gred_sopt) },
420 [TCA_GRED_MAX_P] = { .type = NLA_U32 },
421 [TCA_GRED_LIMIT] = { .type = NLA_U32 },
422 };
423
424 static int gred_change(struct Qdisc *sch, struct nlattr *opt)
425 {
426 struct gred_sched *table = qdisc_priv(sch);
427 struct tc_gred_qopt *ctl;
428 struct nlattr *tb[TCA_GRED_MAX + 1];
429 int err, prio = GRED_DEF_PRIO;
430 u8 *stab;
431 u32 max_P;
432 struct gred_sched_data *prealloc;
433
434 if (opt == NULL)
435 return -EINVAL;
436
437 err = nla_parse_nested(tb, TCA_GRED_MAX, opt, gred_policy);
438 if (err < 0)
439 return err;
440
441 if (tb[TCA_GRED_PARMS] == NULL && tb[TCA_GRED_STAB] == NULL) {
442 if (tb[TCA_GRED_LIMIT] != NULL)
443 sch->limit = nla_get_u32(tb[TCA_GRED_LIMIT]);
444 return gred_change_table_def(sch, opt);
445 }
446
447 if (tb[TCA_GRED_PARMS] == NULL ||
448 tb[TCA_GRED_STAB] == NULL ||
449 tb[TCA_GRED_LIMIT] != NULL)
450 return -EINVAL;
451
452 max_P = tb[TCA_GRED_MAX_P] ? nla_get_u32(tb[TCA_GRED_MAX_P]) : 0;
453
454 err = -EINVAL;
455 ctl = nla_data(tb[TCA_GRED_PARMS]);
456 stab = nla_data(tb[TCA_GRED_STAB]);
457
458 if (ctl->DP >= table->DPs)
459 goto errout;
460
461 if (gred_rio_mode(table)) {
462 if (ctl->prio == 0) {
463 int def_prio = GRED_DEF_PRIO;
464
465 if (table->tab[table->def])
466 def_prio = table->tab[table->def]->prio;
467
468 printk(KERN_DEBUG "GRED: DP %u does not have a prio "
469 "setting default to %d\n", ctl->DP, def_prio);
470
471 prio = def_prio;
472 } else
473 prio = ctl->prio;
474 }
475
476 prealloc = kzalloc(sizeof(*prealloc), GFP_KERNEL);
477 sch_tree_lock(sch);
478
479 err = gred_change_vq(sch, ctl->DP, ctl, prio, stab, max_P, &prealloc);
480 if (err < 0)
481 goto errout_locked;
482
483 if (gred_rio_mode(table)) {
484 gred_disable_wred_mode(table);
485 if (gred_wred_mode_check(sch))
486 gred_enable_wred_mode(table);
487 }
488
489 err = 0;
490
491 errout_locked:
492 sch_tree_unlock(sch);
493 kfree(prealloc);
494 errout:
495 return err;
496 }
497
498 static int gred_init(struct Qdisc *sch, struct nlattr *opt)
499 {
500 struct nlattr *tb[TCA_GRED_MAX + 1];
501 int err;
502
503 if (opt == NULL)
504 return -EINVAL;
505
506 err = nla_parse_nested(tb, TCA_GRED_MAX, opt, gred_policy);
507 if (err < 0)
508 return err;
509
510 if (tb[TCA_GRED_PARMS] || tb[TCA_GRED_STAB])
511 return -EINVAL;
512
513 if (tb[TCA_GRED_LIMIT])
514 sch->limit = nla_get_u32(tb[TCA_GRED_LIMIT]);
515 else
516 sch->limit = qdisc_dev(sch)->tx_queue_len
517 * psched_mtu(qdisc_dev(sch));
518
519 return gred_change_table_def(sch, tb[TCA_GRED_DPS]);
520 }
521
522 static int gred_dump(struct Qdisc *sch, struct sk_buff *skb)
523 {
524 struct gred_sched *table = qdisc_priv(sch);
525 struct nlattr *parms, *opts = NULL;
526 int i;
527 u32 max_p[MAX_DPs];
528 struct tc_gred_sopt sopt = {
529 .DPs = table->DPs,
530 .def_DP = table->def,
531 .grio = gred_rio_mode(table),
532 .flags = table->red_flags,
533 };
534
535 opts = nla_nest_start(skb, TCA_OPTIONS);
536 if (opts == NULL)
537 goto nla_put_failure;
538 if (nla_put(skb, TCA_GRED_DPS, sizeof(sopt), &sopt))
539 goto nla_put_failure;
540
541 for (i = 0; i < MAX_DPs; i++) {
542 struct gred_sched_data *q = table->tab[i];
543
544 max_p[i] = q ? q->parms.max_P : 0;
545 }
546 if (nla_put(skb, TCA_GRED_MAX_P, sizeof(max_p), max_p))
547 goto nla_put_failure;
548
549 if (nla_put_u32(skb, TCA_GRED_LIMIT, sch->limit))
550 goto nla_put_failure;
551
552 parms = nla_nest_start(skb, TCA_GRED_PARMS);
553 if (parms == NULL)
554 goto nla_put_failure;
555
556 for (i = 0; i < MAX_DPs; i++) {
557 struct gred_sched_data *q = table->tab[i];
558 struct tc_gred_qopt opt;
559 unsigned long qavg;
560
561 memset(&opt, 0, sizeof(opt));
562
563 if (!q) {
564 /* hack -- fix at some point with proper message
565 This is how we indicate to tc that there is no VQ
566 at this DP */
567
568 opt.DP = MAX_DPs + i;
569 goto append_opt;
570 }
571
572 opt.limit = q->limit;
573 opt.DP = q->DP;
574 opt.backlog = gred_backlog(table, q, sch);
575 opt.prio = q->prio;
576 opt.qth_min = q->parms.qth_min >> q->parms.Wlog;
577 opt.qth_max = q->parms.qth_max >> q->parms.Wlog;
578 opt.Wlog = q->parms.Wlog;
579 opt.Plog = q->parms.Plog;
580 opt.Scell_log = q->parms.Scell_log;
581 opt.other = q->stats.other;
582 opt.early = q->stats.prob_drop;
583 opt.forced = q->stats.forced_drop;
584 opt.pdrop = q->stats.pdrop;
585 opt.packets = q->packetsin;
586 opt.bytesin = q->bytesin;
587
588 if (gred_wred_mode(table))
589 gred_load_wred_set(table, q);
590
591 qavg = red_calc_qavg(&q->parms, &q->vars,
592 q->vars.qavg >> q->parms.Wlog);
593 opt.qave = qavg >> q->parms.Wlog;
594
595 append_opt:
596 if (nla_append(skb, sizeof(opt), &opt) < 0)
597 goto nla_put_failure;
598 }
599
600 nla_nest_end(skb, parms);
601
602 return nla_nest_end(skb, opts);
603
604 nla_put_failure:
605 nla_nest_cancel(skb, opts);
606 return -EMSGSIZE;
607 }
608
609 static void gred_destroy(struct Qdisc *sch)
610 {
611 struct gred_sched *table = qdisc_priv(sch);
612 int i;
613
614 for (i = 0; i < table->DPs; i++) {
615 if (table->tab[i])
616 gred_destroy_vq(table->tab[i]);
617 }
618 }
619
620 static struct Qdisc_ops gred_qdisc_ops __read_mostly = {
621 .id = "gred",
622 .priv_size = sizeof(struct gred_sched),
623 .enqueue = gred_enqueue,
624 .dequeue = gred_dequeue,
625 .peek = qdisc_peek_head,
626 .drop = gred_drop,
627 .init = gred_init,
628 .reset = gred_reset,
629 .destroy = gred_destroy,
630 .change = gred_change,
631 .dump = gred_dump,
632 .owner = THIS_MODULE,
633 };
634
635 static int __init gred_module_init(void)
636 {
637 return register_qdisc(&gred_qdisc_ops);
638 }
639
640 static void __exit gred_module_exit(void)
641 {
642 unregister_qdisc(&gred_qdisc_ops);
643 }
644
645 module_init(gred_module_init)
646 module_exit(gred_module_exit)
647
648 MODULE_LICENSE("GPL");
This page took 0.044139 seconds and 6 git commands to generate.