Merge branch 'upstream'
[deliverable/linux.git] / net / sched / sch_red.c
1 /*
2 * net/sched/sch_red.c Random Early Detection queue.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 *
9 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
10 *
11 * Changes:
12 * J Hadi Salim 980914: computation fixes
13 * Alexey Makarenko <makar@phoenix.kharkov.ua> 990814: qave on idle link was calculated incorrectly.
14 * J Hadi Salim 980816: ECN support
15 */
16
17 #include <linux/config.h>
18 #include <linux/module.h>
19 #include <linux/types.h>
20 #include <linux/kernel.h>
21 #include <linux/netdevice.h>
22 #include <linux/skbuff.h>
23 #include <net/pkt_sched.h>
24 #include <net/inet_ecn.h>
25 #include <net/red.h>
26
27
28 /* Parameters, settable by user:
29 -----------------------------
30
31 limit - bytes (must be > qth_max + burst)
32
33 Hard limit on queue length, should be chosen >qth_max
34 to allow packet bursts. This parameter does not
35 affect the algorithms behaviour and can be chosen
36 arbitrarily high (well, less than ram size)
37 Really, this limit will never be reached
38 if RED works correctly.
39 */
40
41 struct red_sched_data
42 {
43 u32 limit; /* HARD maximal queue length */
44 unsigned char flags;
45 struct red_parms parms;
46 struct red_stats stats;
47 };
48
49 static inline int red_use_ecn(struct red_sched_data *q)
50 {
51 return q->flags & TC_RED_ECN;
52 }
53
54 static inline int red_use_harddrop(struct red_sched_data *q)
55 {
56 return q->flags & TC_RED_HARDDROP;
57 }
58
59 static int red_enqueue(struct sk_buff *skb, struct Qdisc* sch)
60 {
61 struct red_sched_data *q = qdisc_priv(sch);
62
63 q->parms.qavg = red_calc_qavg(&q->parms, sch->qstats.backlog);
64
65 if (red_is_idling(&q->parms))
66 red_end_of_idle_period(&q->parms);
67
68 switch (red_action(&q->parms, q->parms.qavg)) {
69 case RED_DONT_MARK:
70 break;
71
72 case RED_PROB_MARK:
73 sch->qstats.overlimits++;
74 if (!red_use_ecn(q) || !INET_ECN_set_ce(skb)) {
75 q->stats.prob_drop++;
76 goto congestion_drop;
77 }
78
79 q->stats.prob_mark++;
80 break;
81
82 case RED_HARD_MARK:
83 sch->qstats.overlimits++;
84 if (red_use_harddrop(q) || !red_use_ecn(q) ||
85 !INET_ECN_set_ce(skb)) {
86 q->stats.forced_drop++;
87 goto congestion_drop;
88 }
89
90 q->stats.forced_mark++;
91 break;
92 }
93
94 if (sch->qstats.backlog + skb->len <= q->limit)
95 return qdisc_enqueue_tail(skb, sch);
96
97 q->stats.pdrop++;
98 return qdisc_drop(skb, sch);
99
100 congestion_drop:
101 qdisc_drop(skb, sch);
102 return NET_XMIT_CN;
103 }
104
105 static int red_requeue(struct sk_buff *skb, struct Qdisc* sch)
106 {
107 struct red_sched_data *q = qdisc_priv(sch);
108
109 if (red_is_idling(&q->parms))
110 red_end_of_idle_period(&q->parms);
111
112 return qdisc_requeue(skb, sch);
113 }
114
115 static struct sk_buff * red_dequeue(struct Qdisc* sch)
116 {
117 struct sk_buff *skb;
118 struct red_sched_data *q = qdisc_priv(sch);
119
120 skb = qdisc_dequeue_head(sch);
121
122 if (skb == NULL && !red_is_idling(&q->parms))
123 red_start_of_idle_period(&q->parms);
124
125 return skb;
126 }
127
128 static unsigned int red_drop(struct Qdisc* sch)
129 {
130 struct sk_buff *skb;
131 struct red_sched_data *q = qdisc_priv(sch);
132
133 skb = qdisc_dequeue_tail(sch);
134 if (skb) {
135 unsigned int len = skb->len;
136 q->stats.other++;
137 qdisc_drop(skb, sch);
138 return len;
139 }
140
141 if (!red_is_idling(&q->parms))
142 red_start_of_idle_period(&q->parms);
143
144 return 0;
145 }
146
147 static void red_reset(struct Qdisc* sch)
148 {
149 struct red_sched_data *q = qdisc_priv(sch);
150
151 qdisc_reset_queue(sch);
152 red_restart(&q->parms);
153 }
154
155 static int red_change(struct Qdisc *sch, struct rtattr *opt)
156 {
157 struct red_sched_data *q = qdisc_priv(sch);
158 struct rtattr *tb[TCA_RED_MAX];
159 struct tc_red_qopt *ctl;
160
161 if (opt == NULL || rtattr_parse_nested(tb, TCA_RED_MAX, opt))
162 return -EINVAL;
163
164 if (tb[TCA_RED_PARMS-1] == NULL ||
165 RTA_PAYLOAD(tb[TCA_RED_PARMS-1]) < sizeof(*ctl) ||
166 tb[TCA_RED_STAB-1] == NULL ||
167 RTA_PAYLOAD(tb[TCA_RED_STAB-1]) < RED_STAB_SIZE)
168 return -EINVAL;
169
170 ctl = RTA_DATA(tb[TCA_RED_PARMS-1]);
171
172 sch_tree_lock(sch);
173 q->flags = ctl->flags;
174 q->limit = ctl->limit;
175
176 red_set_parms(&q->parms, ctl->qth_min, ctl->qth_max, ctl->Wlog,
177 ctl->Plog, ctl->Scell_log,
178 RTA_DATA(tb[TCA_RED_STAB-1]));
179
180 if (skb_queue_empty(&sch->q))
181 red_end_of_idle_period(&q->parms);
182
183 sch_tree_unlock(sch);
184 return 0;
185 }
186
187 static int red_init(struct Qdisc* sch, struct rtattr *opt)
188 {
189 return red_change(sch, opt);
190 }
191
192 static int red_dump(struct Qdisc *sch, struct sk_buff *skb)
193 {
194 struct red_sched_data *q = qdisc_priv(sch);
195 struct rtattr *opts = NULL;
196 struct tc_red_qopt opt = {
197 .limit = q->limit,
198 .flags = q->flags,
199 .qth_min = q->parms.qth_min >> q->parms.Wlog,
200 .qth_max = q->parms.qth_max >> q->parms.Wlog,
201 .Wlog = q->parms.Wlog,
202 .Plog = q->parms.Plog,
203 .Scell_log = q->parms.Scell_log,
204 };
205
206 opts = RTA_NEST(skb, TCA_OPTIONS);
207 RTA_PUT(skb, TCA_RED_PARMS, sizeof(opt), &opt);
208 return RTA_NEST_END(skb, opts);
209
210 rtattr_failure:
211 return RTA_NEST_CANCEL(skb, opts);
212 }
213
214 static int red_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
215 {
216 struct red_sched_data *q = qdisc_priv(sch);
217 struct tc_red_xstats st = {
218 .early = q->stats.prob_drop + q->stats.forced_drop,
219 .pdrop = q->stats.pdrop,
220 .other = q->stats.other,
221 .marked = q->stats.prob_mark + q->stats.forced_mark,
222 };
223
224 return gnet_stats_copy_app(d, &st, sizeof(st));
225 }
226
227 static struct Qdisc_ops red_qdisc_ops = {
228 .id = "red",
229 .priv_size = sizeof(struct red_sched_data),
230 .enqueue = red_enqueue,
231 .dequeue = red_dequeue,
232 .requeue = red_requeue,
233 .drop = red_drop,
234 .init = red_init,
235 .reset = red_reset,
236 .change = red_change,
237 .dump = red_dump,
238 .dump_stats = red_dump_stats,
239 .owner = THIS_MODULE,
240 };
241
242 static int __init red_module_init(void)
243 {
244 return register_qdisc(&red_qdisc_ops);
245 }
246
247 static void __exit red_module_exit(void)
248 {
249 unregister_qdisc(&red_qdisc_ops);
250 }
251
252 module_init(red_module_init)
253 module_exit(red_module_exit)
254
255 MODULE_LICENSE("GPL");
This page took 0.049582 seconds and 6 git commands to generate.