[PKT_SCHED]: RED: Use new generic red interface
[deliverable/linux.git] / net / sched / sch_red.c
CommitLineData
1da177e4
LT
1/*
2 * net/sched/sch_red.c Random Early Detection queue.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 *
9 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
10 *
11 * Changes:
12 * J Hadi Salim <hadi@nortel.com> 980914: computation fixes
13 * Alexey Makarenko <makar@phoenix.kharkov.ua> 990814: qave on idle link was calculated incorrectly.
14 * J Hadi Salim <hadi@nortelnetworks.com> 980816: ECN support
15 */
16
17#include <linux/config.h>
18#include <linux/module.h>
19#include <asm/uaccess.h>
20#include <asm/system.h>
21#include <linux/bitops.h>
22#include <linux/types.h>
23#include <linux/kernel.h>
24#include <linux/sched.h>
25#include <linux/string.h>
26#include <linux/mm.h>
27#include <linux/socket.h>
28#include <linux/sockios.h>
29#include <linux/in.h>
30#include <linux/errno.h>
31#include <linux/interrupt.h>
32#include <linux/if_ether.h>
33#include <linux/inet.h>
34#include <linux/netdevice.h>
35#include <linux/etherdevice.h>
36#include <linux/notifier.h>
37#include <net/ip.h>
38#include <net/route.h>
39#include <linux/skbuff.h>
40#include <net/sock.h>
41#include <net/pkt_sched.h>
42#include <net/inet_ecn.h>
43#include <net/dsfield.h>
6b31b28a 44#include <net/red.h>
1da177e4
LT
45
46
6b31b28a 47/* Parameters, settable by user:
1da177e4
LT
48 -----------------------------
49
50 limit - bytes (must be > qth_max + burst)
51
52 Hard limit on queue length, should be chosen >qth_max
53 to allow packet bursts. This parameter does not
54 affect the algorithms behaviour and can be chosen
55 arbitrarily high (well, less than ram size)
56 Really, this limit will never be reached
57 if RED works correctly.
1da177e4
LT
58 */
59
60struct red_sched_data
61{
6b31b28a
TG
62 u32 limit; /* HARD maximal queue length */
63 unsigned char flags;
64 struct red_parms parms;
65 struct red_stats stats;
1da177e4
LT
66};
67
6b31b28a 68static inline int red_use_ecn(struct red_sched_data *q)
1da177e4 69{
6b31b28a 70 return q->flags & TC_RED_ECN;
1da177e4
LT
71}
72
73static int
74red_enqueue(struct sk_buff *skb, struct Qdisc* sch)
75{
76 struct red_sched_data *q = qdisc_priv(sch);
77
6b31b28a 78 q->parms.qavg = red_calc_qavg(&q->parms, sch->qstats.backlog);
1da177e4 79
6b31b28a
TG
80 if (red_is_idling(&q->parms))
81 red_end_of_idle_period(&q->parms);
1da177e4 82
6b31b28a
TG
83 switch (red_action(&q->parms, q->parms.qavg)) {
84 case RED_DONT_MARK:
85 break;
1da177e4 86
6b31b28a
TG
87 case RED_PROB_MARK:
88 sch->qstats.overlimits++;
89 if (!red_use_ecn(q) || !INET_ECN_set_ce(skb)) {
90 q->stats.prob_drop++;
91 goto congestion_drop;
92 }
1da177e4 93
6b31b28a
TG
94 q->stats.prob_mark++;
95 break;
96
97 case RED_HARD_MARK:
98 sch->qstats.overlimits++;
99 if (!red_use_ecn(q) || !INET_ECN_set_ce(skb)) {
100 q->stats.forced_drop++;
101 goto congestion_drop;
102 }
103
104 q->stats.forced_mark++;
105 break;
1da177e4
LT
106 }
107
6b31b28a
TG
108 if (sch->qstats.backlog + skb->len <= q->limit) {
109 __skb_queue_tail(&sch->q, skb);
110 sch->qstats.backlog += skb->len;
111 sch->bstats.bytes += skb->len;
112 sch->bstats.packets++;
113 return NET_XMIT_SUCCESS;
1da177e4 114 }
1da177e4 115
6b31b28a
TG
116 q->stats.pdrop++;
117 kfree_skb(skb);
118 sch->qstats.drops++;
119 return NET_XMIT_DROP;
120
121congestion_drop:
1da177e4
LT
122 kfree_skb(skb);
123 sch->qstats.drops++;
124 return NET_XMIT_CN;
125}
126
127static int
128red_requeue(struct sk_buff *skb, struct Qdisc* sch)
129{
130 struct red_sched_data *q = qdisc_priv(sch);
131
6b31b28a
TG
132 if (red_is_idling(&q->parms))
133 red_end_of_idle_period(&q->parms);
1da177e4
LT
134
135 __skb_queue_head(&sch->q, skb);
136 sch->qstats.backlog += skb->len;
137 sch->qstats.requeues++;
138 return 0;
139}
140
141static struct sk_buff *
142red_dequeue(struct Qdisc* sch)
143{
144 struct sk_buff *skb;
145 struct red_sched_data *q = qdisc_priv(sch);
146
147 skb = __skb_dequeue(&sch->q);
148 if (skb) {
149 sch->qstats.backlog -= skb->len;
150 return skb;
151 }
6b31b28a
TG
152
153 red_start_of_idle_period(&q->parms);
1da177e4
LT
154 return NULL;
155}
156
157static unsigned int red_drop(struct Qdisc* sch)
158{
159 struct sk_buff *skb;
160 struct red_sched_data *q = qdisc_priv(sch);
161
162 skb = __skb_dequeue_tail(&sch->q);
163 if (skb) {
164 unsigned int len = skb->len;
165 sch->qstats.backlog -= len;
166 sch->qstats.drops++;
6b31b28a 167 q->stats.other++;
1da177e4
LT
168 kfree_skb(skb);
169 return len;
170 }
6b31b28a
TG
171
172 red_start_of_idle_period(&q->parms);
1da177e4
LT
173 return 0;
174}
175
176static void red_reset(struct Qdisc* sch)
177{
178 struct red_sched_data *q = qdisc_priv(sch);
179
180 __skb_queue_purge(&sch->q);
181 sch->qstats.backlog = 0;
6b31b28a 182 red_restart(&q->parms);
1da177e4
LT
183}
184
185static int red_change(struct Qdisc *sch, struct rtattr *opt)
186{
187 struct red_sched_data *q = qdisc_priv(sch);
188 struct rtattr *tb[TCA_RED_STAB];
189 struct tc_red_qopt *ctl;
190
191 if (opt == NULL ||
192 rtattr_parse_nested(tb, TCA_RED_STAB, opt) ||
193 tb[TCA_RED_PARMS-1] == 0 || tb[TCA_RED_STAB-1] == 0 ||
194 RTA_PAYLOAD(tb[TCA_RED_PARMS-1]) < sizeof(*ctl) ||
195 RTA_PAYLOAD(tb[TCA_RED_STAB-1]) < 256)
196 return -EINVAL;
197
198 ctl = RTA_DATA(tb[TCA_RED_PARMS-1]);
199
200 sch_tree_lock(sch);
201 q->flags = ctl->flags;
1da177e4 202 q->limit = ctl->limit;
1da177e4 203
6b31b28a
TG
204 red_set_parms(&q->parms, ctl->qth_min, ctl->qth_max, ctl->Wlog,
205 ctl->Plog, ctl->Scell_log,
206 RTA_DATA(tb[TCA_RED_STAB-1]));
207
b03efcfb 208 if (skb_queue_empty(&sch->q))
6b31b28a 209 red_end_of_idle_period(&q->parms);
1da177e4
LT
210 sch_tree_unlock(sch);
211 return 0;
212}
213
214static int red_init(struct Qdisc* sch, struct rtattr *opt)
215{
216 return red_change(sch, opt);
217}
218
219static int red_dump(struct Qdisc *sch, struct sk_buff *skb)
220{
221 struct red_sched_data *q = qdisc_priv(sch);
222 unsigned char *b = skb->tail;
223 struct rtattr *rta;
6b31b28a
TG
224 struct tc_red_qopt opt = {
225 .limit = q->limit,
226 .flags = q->flags,
227 .qth_min = q->parms.qth_min >> q->parms.Wlog,
228 .qth_max = q->parms.qth_max >> q->parms.Wlog,
229 .Wlog = q->parms.Wlog,
230 .Plog = q->parms.Plog,
231 .Scell_log = q->parms.Scell_log,
232 };
1da177e4
LT
233
234 rta = (struct rtattr*)b;
235 RTA_PUT(skb, TCA_OPTIONS, 0, NULL);
1da177e4
LT
236 RTA_PUT(skb, TCA_RED_PARMS, sizeof(opt), &opt);
237 rta->rta_len = skb->tail - b;
238
239 return skb->len;
240
241rtattr_failure:
242 skb_trim(skb, b - skb->data);
243 return -1;
244}
245
246static int red_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
247{
248 struct red_sched_data *q = qdisc_priv(sch);
6b31b28a
TG
249 struct tc_red_xstats st = {
250 .early = q->stats.prob_drop + q->stats.forced_drop,
251 .pdrop = q->stats.pdrop,
252 .other = q->stats.other,
253 .marked = q->stats.prob_mark + q->stats.forced_mark,
254 };
255
256 return gnet_stats_copy_app(d, &st, sizeof(st));
1da177e4
LT
257}
258
259static struct Qdisc_ops red_qdisc_ops = {
260 .next = NULL,
261 .cl_ops = NULL,
262 .id = "red",
263 .priv_size = sizeof(struct red_sched_data),
264 .enqueue = red_enqueue,
265 .dequeue = red_dequeue,
266 .requeue = red_requeue,
267 .drop = red_drop,
268 .init = red_init,
269 .reset = red_reset,
270 .change = red_change,
271 .dump = red_dump,
272 .dump_stats = red_dump_stats,
273 .owner = THIS_MODULE,
274};
275
276static int __init red_module_init(void)
277{
278 return register_qdisc(&red_qdisc_ops);
279}
280static void __exit red_module_exit(void)
281{
282 unregister_qdisc(&red_qdisc_ops);
283}
284module_init(red_module_init)
285module_exit(red_module_exit)
286MODULE_LICENSE("GPL");
This page took 0.094326 seconds and 5 git commands to generate.