2 * net/sched/sch_red.c Random Early Detection queue.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
9 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
12 * J Hadi Salim <hadi@nortel.com> 980914: computation fixes
13 * Alexey Makarenko <makar@phoenix.kharkov.ua> 990814: qave on idle link was calculated incorrectly.
14 * J Hadi Salim <hadi@nortelnetworks.com> 980816: ECN support
17 #include <linux/config.h>
18 #include <linux/module.h>
19 #include <asm/uaccess.h>
20 #include <asm/system.h>
21 #include <linux/bitops.h>
22 #include <linux/types.h>
23 #include <linux/kernel.h>
24 #include <linux/sched.h>
25 #include <linux/string.h>
27 #include <linux/socket.h>
28 #include <linux/sockios.h>
30 #include <linux/errno.h>
31 #include <linux/interrupt.h>
32 #include <linux/if_ether.h>
33 #include <linux/inet.h>
34 #include <linux/netdevice.h>
35 #include <linux/etherdevice.h>
36 #include <linux/notifier.h>
38 #include <net/route.h>
39 #include <linux/skbuff.h>
41 #include <net/pkt_sched.h>
42 #include <net/inet_ecn.h>
43 #include <net/dsfield.h>
47 /* Parameters, settable by user:
48 -----------------------------
50 limit - bytes (must be > qth_max + burst)
52 Hard limit on queue length, should be chosen >qth_max
53 to allow packet bursts. This parameter does not
54 affect the algorithms behaviour and can be chosen
55 arbitrarily high (well, less than ram size)
56 Really, this limit will never be reached
57 if RED works correctly.
62 u32 limit
; /* HARD maximal queue length */
64 struct red_parms parms
;
65 struct red_stats stats
;
68 static inline int red_use_ecn(struct red_sched_data
*q
)
70 return q
->flags
& TC_RED_ECN
;
74 red_enqueue(struct sk_buff
*skb
, struct Qdisc
* sch
)
76 struct red_sched_data
*q
= qdisc_priv(sch
);
78 q
->parms
.qavg
= red_calc_qavg(&q
->parms
, sch
->qstats
.backlog
);
80 if (red_is_idling(&q
->parms
))
81 red_end_of_idle_period(&q
->parms
);
83 switch (red_action(&q
->parms
, q
->parms
.qavg
)) {
88 sch
->qstats
.overlimits
++;
89 if (!red_use_ecn(q
) || !INET_ECN_set_ce(skb
)) {
98 sch
->qstats
.overlimits
++;
99 if (!red_use_ecn(q
) || !INET_ECN_set_ce(skb
)) {
100 q
->stats
.forced_drop
++;
101 goto congestion_drop
;
104 q
->stats
.forced_mark
++;
108 if (sch
->qstats
.backlog
+ skb
->len
<= q
->limit
)
109 return qdisc_enqueue_tail(skb
, sch
);
112 return qdisc_drop(skb
, sch
);
115 qdisc_drop(skb
, sch
);
120 red_requeue(struct sk_buff
*skb
, struct Qdisc
* sch
)
122 struct red_sched_data
*q
= qdisc_priv(sch
);
124 if (red_is_idling(&q
->parms
))
125 red_end_of_idle_period(&q
->parms
);
127 return qdisc_requeue(skb
, sch
);
130 static struct sk_buff
*
131 red_dequeue(struct Qdisc
* sch
)
134 struct red_sched_data
*q
= qdisc_priv(sch
);
136 skb
= qdisc_dequeue_head(sch
);
139 red_start_of_idle_period(&q
->parms
);
144 static unsigned int red_drop(struct Qdisc
* sch
)
147 struct red_sched_data
*q
= qdisc_priv(sch
);
149 skb
= qdisc_dequeue_tail(sch
);
151 unsigned int len
= skb
->len
;
153 qdisc_drop(skb
, sch
);
157 red_start_of_idle_period(&q
->parms
);
161 static void red_reset(struct Qdisc
* sch
)
163 struct red_sched_data
*q
= qdisc_priv(sch
);
165 qdisc_reset_queue(sch
);
166 red_restart(&q
->parms
);
169 static int red_change(struct Qdisc
*sch
, struct rtattr
*opt
)
171 struct red_sched_data
*q
= qdisc_priv(sch
);
172 struct rtattr
*tb
[TCA_RED_STAB
];
173 struct tc_red_qopt
*ctl
;
176 rtattr_parse_nested(tb
, TCA_RED_STAB
, opt
) ||
177 tb
[TCA_RED_PARMS
-1] == 0 || tb
[TCA_RED_STAB
-1] == 0 ||
178 RTA_PAYLOAD(tb
[TCA_RED_PARMS
-1]) < sizeof(*ctl
) ||
179 RTA_PAYLOAD(tb
[TCA_RED_STAB
-1]) < 256)
182 ctl
= RTA_DATA(tb
[TCA_RED_PARMS
-1]);
185 q
->flags
= ctl
->flags
;
186 q
->limit
= ctl
->limit
;
188 red_set_parms(&q
->parms
, ctl
->qth_min
, ctl
->qth_max
, ctl
->Wlog
,
189 ctl
->Plog
, ctl
->Scell_log
,
190 RTA_DATA(tb
[TCA_RED_STAB
-1]));
192 if (skb_queue_empty(&sch
->q
))
193 red_end_of_idle_period(&q
->parms
);
194 sch_tree_unlock(sch
);
198 static int red_init(struct Qdisc
* sch
, struct rtattr
*opt
)
200 return red_change(sch
, opt
);
203 static int red_dump(struct Qdisc
*sch
, struct sk_buff
*skb
)
205 struct red_sched_data
*q
= qdisc_priv(sch
);
206 unsigned char *b
= skb
->tail
;
208 struct tc_red_qopt opt
= {
211 .qth_min
= q
->parms
.qth_min
>> q
->parms
.Wlog
,
212 .qth_max
= q
->parms
.qth_max
>> q
->parms
.Wlog
,
213 .Wlog
= q
->parms
.Wlog
,
214 .Plog
= q
->parms
.Plog
,
215 .Scell_log
= q
->parms
.Scell_log
,
218 rta
= (struct rtattr
*)b
;
219 RTA_PUT(skb
, TCA_OPTIONS
, 0, NULL
);
220 RTA_PUT(skb
, TCA_RED_PARMS
, sizeof(opt
), &opt
);
221 rta
->rta_len
= skb
->tail
- b
;
226 skb_trim(skb
, b
- skb
->data
);
230 static int red_dump_stats(struct Qdisc
*sch
, struct gnet_dump
*d
)
232 struct red_sched_data
*q
= qdisc_priv(sch
);
233 struct tc_red_xstats st
= {
234 .early
= q
->stats
.prob_drop
+ q
->stats
.forced_drop
,
235 .pdrop
= q
->stats
.pdrop
,
236 .other
= q
->stats
.other
,
237 .marked
= q
->stats
.prob_mark
+ q
->stats
.forced_mark
,
240 return gnet_stats_copy_app(d
, &st
, sizeof(st
));
243 static struct Qdisc_ops red_qdisc_ops
= {
247 .priv_size
= sizeof(struct red_sched_data
),
248 .enqueue
= red_enqueue
,
249 .dequeue
= red_dequeue
,
250 .requeue
= red_requeue
,
254 .change
= red_change
,
256 .dump_stats
= red_dump_stats
,
257 .owner
= THIS_MODULE
,
260 static int __init
red_module_init(void)
262 return register_qdisc(&red_qdisc_ops
);
264 static void __exit
red_module_exit(void)
266 unregister_qdisc(&red_qdisc_ops
);
268 module_init(red_module_init
)
269 module_exit(red_module_exit
)
270 MODULE_LICENSE("GPL");
This page took 0.057717 seconds and 6 git commands to generate.