[PKT_SCHED]: GRED: Support ECN marking
[deliverable/linux.git] / net / sched / sch_red.c
CommitLineData
1da177e4
LT
1/*
2 * net/sched/sch_red.c Random Early Detection queue.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 *
9 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
10 *
11 * Changes:
dba051f3 12 * J Hadi Salim 980914: computation fixes
1da177e4 13 * Alexey Makarenko <makar@phoenix.kharkov.ua> 990814: qave on idle link was calculated incorrectly.
dba051f3 14 * J Hadi Salim 980816: ECN support
1da177e4
LT
15 */
16
17#include <linux/config.h>
18#include <linux/module.h>
1da177e4
LT
19#include <linux/types.h>
20#include <linux/kernel.h>
1da177e4 21#include <linux/netdevice.h>
1da177e4 22#include <linux/skbuff.h>
1da177e4
LT
23#include <net/pkt_sched.h>
24#include <net/inet_ecn.h>
6b31b28a 25#include <net/red.h>
1da177e4
LT
26
27
6b31b28a 28/* Parameters, settable by user:
1da177e4
LT
29 -----------------------------
30
31 limit - bytes (must be > qth_max + burst)
32
33 Hard limit on queue length, should be chosen >qth_max
34 to allow packet bursts. This parameter does not
35 affect the algorithms behaviour and can be chosen
36 arbitrarily high (well, less than ram size)
37 Really, this limit will never be reached
38 if RED works correctly.
1da177e4
LT
39 */
40
41struct red_sched_data
42{
6b31b28a
TG
43 u32 limit; /* HARD maximal queue length */
44 unsigned char flags;
45 struct red_parms parms;
46 struct red_stats stats;
1da177e4
LT
47};
48
6b31b28a 49static inline int red_use_ecn(struct red_sched_data *q)
1da177e4 50{
6b31b28a 51 return q->flags & TC_RED_ECN;
1da177e4
LT
52}
53
dba051f3 54static int red_enqueue(struct sk_buff *skb, struct Qdisc* sch)
1da177e4
LT
55{
56 struct red_sched_data *q = qdisc_priv(sch);
57
6b31b28a 58 q->parms.qavg = red_calc_qavg(&q->parms, sch->qstats.backlog);
1da177e4 59
6b31b28a
TG
60 if (red_is_idling(&q->parms))
61 red_end_of_idle_period(&q->parms);
1da177e4 62
6b31b28a
TG
63 switch (red_action(&q->parms, q->parms.qavg)) {
64 case RED_DONT_MARK:
65 break;
1da177e4 66
6b31b28a
TG
67 case RED_PROB_MARK:
68 sch->qstats.overlimits++;
69 if (!red_use_ecn(q) || !INET_ECN_set_ce(skb)) {
70 q->stats.prob_drop++;
71 goto congestion_drop;
72 }
1da177e4 73
6b31b28a
TG
74 q->stats.prob_mark++;
75 break;
76
77 case RED_HARD_MARK:
78 sch->qstats.overlimits++;
79 if (!red_use_ecn(q) || !INET_ECN_set_ce(skb)) {
80 q->stats.forced_drop++;
81 goto congestion_drop;
82 }
83
84 q->stats.forced_mark++;
85 break;
1da177e4
LT
86 }
87
9e178ff2
TG
88 if (sch->qstats.backlog + skb->len <= q->limit)
89 return qdisc_enqueue_tail(skb, sch);
1da177e4 90
6b31b28a 91 q->stats.pdrop++;
9e178ff2 92 return qdisc_drop(skb, sch);
6b31b28a
TG
93
94congestion_drop:
9e178ff2 95 qdisc_drop(skb, sch);
1da177e4
LT
96 return NET_XMIT_CN;
97}
98
dba051f3 99static int red_requeue(struct sk_buff *skb, struct Qdisc* sch)
1da177e4
LT
100{
101 struct red_sched_data *q = qdisc_priv(sch);
102
6b31b28a
TG
103 if (red_is_idling(&q->parms))
104 red_end_of_idle_period(&q->parms);
1da177e4 105
9e178ff2 106 return qdisc_requeue(skb, sch);
1da177e4
LT
107}
108
dba051f3 109static struct sk_buff * red_dequeue(struct Qdisc* sch)
1da177e4
LT
110{
111 struct sk_buff *skb;
112 struct red_sched_data *q = qdisc_priv(sch);
113
9e178ff2 114 skb = qdisc_dequeue_head(sch);
6b31b28a 115
6a1b63d4 116 if (skb == NULL && !red_is_idling(&q->parms))
9e178ff2
TG
117 red_start_of_idle_period(&q->parms);
118
119 return skb;
1da177e4
LT
120}
121
122static unsigned int red_drop(struct Qdisc* sch)
123{
124 struct sk_buff *skb;
125 struct red_sched_data *q = qdisc_priv(sch);
126
9e178ff2 127 skb = qdisc_dequeue_tail(sch);
1da177e4
LT
128 if (skb) {
129 unsigned int len = skb->len;
6b31b28a 130 q->stats.other++;
9e178ff2 131 qdisc_drop(skb, sch);
1da177e4
LT
132 return len;
133 }
6b31b28a 134
6a1b63d4
TG
135 if (!red_is_idling(&q->parms))
136 red_start_of_idle_period(&q->parms);
137
1da177e4
LT
138 return 0;
139}
140
141static void red_reset(struct Qdisc* sch)
142{
143 struct red_sched_data *q = qdisc_priv(sch);
144
9e178ff2 145 qdisc_reset_queue(sch);
6b31b28a 146 red_restart(&q->parms);
1da177e4
LT
147}
148
149static int red_change(struct Qdisc *sch, struct rtattr *opt)
150{
151 struct red_sched_data *q = qdisc_priv(sch);
dba051f3 152 struct rtattr *tb[TCA_RED_MAX];
1da177e4
LT
153 struct tc_red_qopt *ctl;
154
dba051f3
TG
155 if (opt == NULL || rtattr_parse_nested(tb, TCA_RED_MAX, opt))
156 return -EINVAL;
157
158 if (tb[TCA_RED_PARMS-1] == NULL ||
1da177e4 159 RTA_PAYLOAD(tb[TCA_RED_PARMS-1]) < sizeof(*ctl) ||
dba051f3
TG
160 tb[TCA_RED_STAB-1] == NULL ||
161 RTA_PAYLOAD(tb[TCA_RED_STAB-1]) < RED_STAB_SIZE)
1da177e4
LT
162 return -EINVAL;
163
164 ctl = RTA_DATA(tb[TCA_RED_PARMS-1]);
165
166 sch_tree_lock(sch);
167 q->flags = ctl->flags;
1da177e4 168 q->limit = ctl->limit;
1da177e4 169
6b31b28a
TG
170 red_set_parms(&q->parms, ctl->qth_min, ctl->qth_max, ctl->Wlog,
171 ctl->Plog, ctl->Scell_log,
172 RTA_DATA(tb[TCA_RED_STAB-1]));
173
b03efcfb 174 if (skb_queue_empty(&sch->q))
6b31b28a 175 red_end_of_idle_period(&q->parms);
dba051f3 176
1da177e4
LT
177 sch_tree_unlock(sch);
178 return 0;
179}
180
181static int red_init(struct Qdisc* sch, struct rtattr *opt)
182{
183 return red_change(sch, opt);
184}
185
186static int red_dump(struct Qdisc *sch, struct sk_buff *skb)
187{
188 struct red_sched_data *q = qdisc_priv(sch);
dba051f3 189 struct rtattr *opts = NULL;
6b31b28a
TG
190 struct tc_red_qopt opt = {
191 .limit = q->limit,
192 .flags = q->flags,
193 .qth_min = q->parms.qth_min >> q->parms.Wlog,
194 .qth_max = q->parms.qth_max >> q->parms.Wlog,
195 .Wlog = q->parms.Wlog,
196 .Plog = q->parms.Plog,
197 .Scell_log = q->parms.Scell_log,
198 };
1da177e4 199
dba051f3 200 opts = RTA_NEST(skb, TCA_OPTIONS);
1da177e4 201 RTA_PUT(skb, TCA_RED_PARMS, sizeof(opt), &opt);
dba051f3 202 return RTA_NEST_END(skb, opts);
1da177e4
LT
203
204rtattr_failure:
dba051f3 205 return RTA_NEST_CANCEL(skb, opts);
1da177e4
LT
206}
207
208static int red_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
209{
210 struct red_sched_data *q = qdisc_priv(sch);
6b31b28a
TG
211 struct tc_red_xstats st = {
212 .early = q->stats.prob_drop + q->stats.forced_drop,
213 .pdrop = q->stats.pdrop,
214 .other = q->stats.other,
215 .marked = q->stats.prob_mark + q->stats.forced_mark,
216 };
217
218 return gnet_stats_copy_app(d, &st, sizeof(st));
1da177e4
LT
219}
220
221static struct Qdisc_ops red_qdisc_ops = {
1da177e4
LT
222 .id = "red",
223 .priv_size = sizeof(struct red_sched_data),
224 .enqueue = red_enqueue,
225 .dequeue = red_dequeue,
226 .requeue = red_requeue,
227 .drop = red_drop,
228 .init = red_init,
229 .reset = red_reset,
230 .change = red_change,
231 .dump = red_dump,
232 .dump_stats = red_dump_stats,
233 .owner = THIS_MODULE,
234};
235
236static int __init red_module_init(void)
237{
238 return register_qdisc(&red_qdisc_ops);
239}
dba051f3
TG
240
241static void __exit red_module_exit(void)
1da177e4
LT
242{
243 unregister_qdisc(&red_qdisc_ops);
244}
dba051f3 245
1da177e4
LT
246module_init(red_module_init)
247module_exit(red_module_exit)
dba051f3 248
1da177e4 249MODULE_LICENSE("GPL");
This page took 0.073029 seconds and 5 git commands to generate.