2 * net/sched/sch_choke.c CHOKE scheduler
4 * Copyright (c) 2011 Stephen Hemminger <shemminger@vyatta.com>
5 * Copyright (c) 2011 Eric Dumazet <eric.dumazet@gmail.com>
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * version 2 as published by the Free Software Foundation.
13 #include <linux/module.h>
14 #include <linux/types.h>
15 #include <linux/kernel.h>
16 #include <linux/skbuff.h>
17 #include <linux/vmalloc.h>
18 #include <net/pkt_sched.h>
19 #include <net/inet_ecn.h>
21 #include <net/flow_dissector.h>
24 CHOKe stateless AQM for fair bandwidth allocation
25 =================================================
27 CHOKe (CHOose and Keep for responsive flows, CHOose and Kill for
28 unresponsive flows) is a variant of RED that penalizes misbehaving flows but
29 maintains no flow state. The difference from RED is an additional step
30 during the enqueuing process. If average queue size is over the
31 low threshold (qmin), a packet is chosen at random from the queue.
32 If both the new and chosen packet are from the same flow, both
33 are dropped. Unlike RED, CHOKe is not really a "classful" qdisc because it
34 needs to access packets in queue randomly. It has a minimal class
35 interface to allow overriding the builtin flow classifier with
39 R. Pan, B. Prabhakar, and K. Psounis, "CHOKe, A Stateless
40 Active Queue Management Scheme for Approximating Fair Bandwidth Allocation",
43 A. Tang, J. Wang, S. Low, "Understanding CHOKe: Throughput and Spatial
44 Characteristics", IEEE/ACM Transactions on Networking, 2004
48 /* Upper bound on size of sk_buff table (packets) */
49 #define CHOKE_MAX_QUEUE (128*1024 - 1)
51 struct choke_sched_data
{
56 struct red_parms parms
;
60 struct tcf_proto __rcu
*filter_list
;
62 u32 prob_drop
; /* Early probability drops */
63 u32 prob_mark
; /* Early probability marks */
64 u32 forced_drop
; /* Forced drops, qavg > max_thresh */
65 u32 forced_mark
; /* Forced marks, qavg > max_thresh */
66 u32 pdrop
; /* Drops due to queue limits */
67 u32 other
; /* Drops due to drop() calls */
68 u32 matched
; /* Drops to flow match */
74 unsigned int tab_mask
; /* size - 1 */
79 /* number of elements in queue including holes */
80 static unsigned int choke_len(const struct choke_sched_data
*q
)
82 return (q
->tail
- q
->head
) & q
->tab_mask
;
85 /* Is ECN parameter configured */
86 static int use_ecn(const struct choke_sched_data
*q
)
88 return q
->flags
& TC_RED_ECN
;
91 /* Should packets over max just be dropped (versus marked) */
92 static int use_harddrop(const struct choke_sched_data
*q
)
94 return q
->flags
& TC_RED_HARDDROP
;
97 /* Move head pointer forward to skip over holes */
98 static void choke_zap_head_holes(struct choke_sched_data
*q
)
101 q
->head
= (q
->head
+ 1) & q
->tab_mask
;
102 if (q
->head
== q
->tail
)
104 } while (q
->tab
[q
->head
] == NULL
);
107 /* Move tail pointer backwards to reuse holes */
108 static void choke_zap_tail_holes(struct choke_sched_data
*q
)
111 q
->tail
= (q
->tail
- 1) & q
->tab_mask
;
112 if (q
->head
== q
->tail
)
114 } while (q
->tab
[q
->tail
] == NULL
);
117 /* Drop packet from queue array by creating a "hole" */
118 static void choke_drop_by_idx(struct Qdisc
*sch
, unsigned int idx
)
120 struct choke_sched_data
*q
= qdisc_priv(sch
);
121 struct sk_buff
*skb
= q
->tab
[idx
];
126 choke_zap_head_holes(q
);
128 choke_zap_tail_holes(q
);
130 qdisc_qstats_backlog_dec(sch
, skb
);
131 qdisc_tree_reduce_backlog(sch
, 1, qdisc_pkt_len(skb
));
132 qdisc_drop(skb
, sch
);
136 struct choke_skb_cb
{
139 struct flow_keys_digest keys
;
142 static inline struct choke_skb_cb
*choke_skb_cb(const struct sk_buff
*skb
)
144 qdisc_cb_private_validate(skb
, sizeof(struct choke_skb_cb
));
145 return (struct choke_skb_cb
*)qdisc_skb_cb(skb
)->data
;
148 static inline void choke_set_classid(struct sk_buff
*skb
, u16 classid
)
150 choke_skb_cb(skb
)->classid
= classid
;
153 static u16
choke_get_classid(const struct sk_buff
*skb
)
155 return choke_skb_cb(skb
)->classid
;
159 * Compare flow of two packets
160 * Returns true only if source and destination address and port match.
161 * false for special cases
163 static bool choke_match_flow(struct sk_buff
*skb1
,
164 struct sk_buff
*skb2
)
166 struct flow_keys temp
;
168 if (skb1
->protocol
!= skb2
->protocol
)
171 if (!choke_skb_cb(skb1
)->keys_valid
) {
172 choke_skb_cb(skb1
)->keys_valid
= 1;
173 skb_flow_dissect_flow_keys(skb1
, &temp
, 0);
174 make_flow_keys_digest(&choke_skb_cb(skb1
)->keys
, &temp
);
177 if (!choke_skb_cb(skb2
)->keys_valid
) {
178 choke_skb_cb(skb2
)->keys_valid
= 1;
179 skb_flow_dissect_flow_keys(skb2
, &temp
, 0);
180 make_flow_keys_digest(&choke_skb_cb(skb2
)->keys
, &temp
);
183 return !memcmp(&choke_skb_cb(skb1
)->keys
,
184 &choke_skb_cb(skb2
)->keys
,
185 sizeof(choke_skb_cb(skb1
)->keys
));
189 * Classify flow using either:
190 * 1. pre-existing classification result in skb
191 * 2. fast internal classification
192 * 3. use TC filter based classification
194 static bool choke_classify(struct sk_buff
*skb
,
195 struct Qdisc
*sch
, int *qerr
)
198 struct choke_sched_data
*q
= qdisc_priv(sch
);
199 struct tcf_result res
;
200 struct tcf_proto
*fl
;
203 fl
= rcu_dereference_bh(q
->filter_list
);
204 result
= tc_classify(skb
, fl
, &res
, false);
206 #ifdef CONFIG_NET_CLS_ACT
210 *qerr
= NET_XMIT_SUCCESS
| __NET_XMIT_STOLEN
;
215 choke_set_classid(skb
, TC_H_MIN(res
.classid
));
223 * Select a packet at random from queue
224 * HACK: since queue can have holes from previous deletion; retry several
225 * times to find a random skb but then just give up and return the head
226 * Will return NULL if queue is empty (q->head == q->tail)
228 static struct sk_buff
*choke_peek_random(const struct choke_sched_data
*q
,
235 *pidx
= (q
->head
+ prandom_u32_max(choke_len(q
))) & q
->tab_mask
;
239 } while (--retrys
> 0);
241 return q
->tab
[*pidx
= q
->head
];
245 * Compare new packet with random packet in queue
246 * returns true if matched and sets *pidx
248 static bool choke_match_random(const struct choke_sched_data
*q
,
249 struct sk_buff
*nskb
,
252 struct sk_buff
*oskb
;
254 if (q
->head
== q
->tail
)
257 oskb
= choke_peek_random(q
, pidx
);
258 if (rcu_access_pointer(q
->filter_list
))
259 return choke_get_classid(nskb
) == choke_get_classid(oskb
);
261 return choke_match_flow(oskb
, nskb
);
264 static int choke_enqueue(struct sk_buff
*skb
, struct Qdisc
*sch
)
266 int ret
= NET_XMIT_SUCCESS
| __NET_XMIT_BYPASS
;
267 struct choke_sched_data
*q
= qdisc_priv(sch
);
268 const struct red_parms
*p
= &q
->parms
;
270 if (rcu_access_pointer(q
->filter_list
)) {
271 /* If using external classifiers, get result and record it. */
272 if (!choke_classify(skb
, sch
, &ret
))
273 goto other_drop
; /* Packet was eaten by filter */
276 choke_skb_cb(skb
)->keys_valid
= 0;
277 /* Compute average queue usage (see RED) */
278 q
->vars
.qavg
= red_calc_qavg(p
, &q
->vars
, sch
->q
.qlen
);
279 if (red_is_idling(&q
->vars
))
280 red_end_of_idle_period(&q
->vars
);
282 /* Is queue small? */
283 if (q
->vars
.qavg
<= p
->qth_min
)
288 /* Draw a packet at random from queue and compare flow */
289 if (choke_match_random(q
, skb
, &idx
)) {
291 choke_drop_by_idx(sch
, idx
);
292 goto congestion_drop
;
295 /* Queue is large, always mark/drop */
296 if (q
->vars
.qavg
> p
->qth_max
) {
299 qdisc_qstats_overlimit(sch
);
300 if (use_harddrop(q
) || !use_ecn(q
) ||
301 !INET_ECN_set_ce(skb
)) {
302 q
->stats
.forced_drop
++;
303 goto congestion_drop
;
306 q
->stats
.forced_mark
++;
307 } else if (++q
->vars
.qcount
) {
308 if (red_mark_probability(p
, &q
->vars
, q
->vars
.qavg
)) {
310 q
->vars
.qR
= red_random(p
);
312 qdisc_qstats_overlimit(sch
);
313 if (!use_ecn(q
) || !INET_ECN_set_ce(skb
)) {
314 q
->stats
.prob_drop
++;
315 goto congestion_drop
;
318 q
->stats
.prob_mark
++;
321 q
->vars
.qR
= red_random(p
);
324 /* Admit new packet */
325 if (sch
->q
.qlen
< q
->limit
) {
326 q
->tab
[q
->tail
] = skb
;
327 q
->tail
= (q
->tail
+ 1) & q
->tab_mask
;
329 qdisc_qstats_backlog_inc(sch
, skb
);
330 return NET_XMIT_SUCCESS
;
334 return qdisc_drop(skb
, sch
);
337 qdisc_drop(skb
, sch
);
341 if (ret
& __NET_XMIT_BYPASS
)
342 qdisc_qstats_drop(sch
);
347 static struct sk_buff
*choke_dequeue(struct Qdisc
*sch
)
349 struct choke_sched_data
*q
= qdisc_priv(sch
);
352 if (q
->head
== q
->tail
) {
353 if (!red_is_idling(&q
->vars
))
354 red_start_of_idle_period(&q
->vars
);
358 skb
= q
->tab
[q
->head
];
359 q
->tab
[q
->head
] = NULL
;
360 choke_zap_head_holes(q
);
362 qdisc_qstats_backlog_dec(sch
, skb
);
363 qdisc_bstats_update(sch
, skb
);
368 static unsigned int choke_drop(struct Qdisc
*sch
)
370 struct choke_sched_data
*q
= qdisc_priv(sch
);
373 len
= qdisc_queue_drop(sch
);
377 if (!red_is_idling(&q
->vars
))
378 red_start_of_idle_period(&q
->vars
);
384 static void choke_reset(struct Qdisc
*sch
)
386 struct choke_sched_data
*q
= qdisc_priv(sch
);
388 while (q
->head
!= q
->tail
) {
389 struct sk_buff
*skb
= q
->tab
[q
->head
];
391 q
->head
= (q
->head
+ 1) & q
->tab_mask
;
394 qdisc_qstats_backlog_dec(sch
, skb
);
396 qdisc_drop(skb
, sch
);
399 memset(q
->tab
, 0, (q
->tab_mask
+ 1) * sizeof(struct sk_buff
*));
400 q
->head
= q
->tail
= 0;
401 red_restart(&q
->vars
);
404 static const struct nla_policy choke_policy
[TCA_CHOKE_MAX
+ 1] = {
405 [TCA_CHOKE_PARMS
] = { .len
= sizeof(struct tc_red_qopt
) },
406 [TCA_CHOKE_STAB
] = { .len
= RED_STAB_SIZE
},
407 [TCA_CHOKE_MAX_P
] = { .type
= NLA_U32
},
411 static void choke_free(void *addr
)
416 static int choke_change(struct Qdisc
*sch
, struct nlattr
*opt
)
418 struct choke_sched_data
*q
= qdisc_priv(sch
);
419 struct nlattr
*tb
[TCA_CHOKE_MAX
+ 1];
420 const struct tc_red_qopt
*ctl
;
422 struct sk_buff
**old
= NULL
;
429 err
= nla_parse_nested(tb
, TCA_CHOKE_MAX
, opt
, choke_policy
);
433 if (tb
[TCA_CHOKE_PARMS
] == NULL
||
434 tb
[TCA_CHOKE_STAB
] == NULL
)
437 max_P
= tb
[TCA_CHOKE_MAX_P
] ? nla_get_u32(tb
[TCA_CHOKE_MAX_P
]) : 0;
439 ctl
= nla_data(tb
[TCA_CHOKE_PARMS
]);
441 if (ctl
->limit
> CHOKE_MAX_QUEUE
)
444 mask
= roundup_pow_of_two(ctl
->limit
+ 1) - 1;
445 if (mask
!= q
->tab_mask
) {
446 struct sk_buff
**ntab
;
448 ntab
= kcalloc(mask
+ 1, sizeof(struct sk_buff
*),
449 GFP_KERNEL
| __GFP_NOWARN
);
451 ntab
= vzalloc((mask
+ 1) * sizeof(struct sk_buff
*));
458 unsigned int oqlen
= sch
->q
.qlen
, tail
= 0;
459 unsigned dropped
= 0;
461 while (q
->head
!= q
->tail
) {
462 struct sk_buff
*skb
= q
->tab
[q
->head
];
464 q
->head
= (q
->head
+ 1) & q
->tab_mask
;
471 dropped
+= qdisc_pkt_len(skb
);
472 qdisc_qstats_backlog_dec(sch
, skb
);
474 qdisc_drop(skb
, sch
);
476 qdisc_tree_reduce_backlog(sch
, oqlen
- sch
->q
.qlen
, dropped
);
486 q
->flags
= ctl
->flags
;
487 q
->limit
= ctl
->limit
;
489 red_set_parms(&q
->parms
, ctl
->qth_min
, ctl
->qth_max
, ctl
->Wlog
,
490 ctl
->Plog
, ctl
->Scell_log
,
491 nla_data(tb
[TCA_CHOKE_STAB
]),
493 red_set_vars(&q
->vars
);
495 if (q
->head
== q
->tail
)
496 red_end_of_idle_period(&q
->vars
);
498 sch_tree_unlock(sch
);
503 static int choke_init(struct Qdisc
*sch
, struct nlattr
*opt
)
505 return choke_change(sch
, opt
);
508 static int choke_dump(struct Qdisc
*sch
, struct sk_buff
*skb
)
510 struct choke_sched_data
*q
= qdisc_priv(sch
);
511 struct nlattr
*opts
= NULL
;
512 struct tc_red_qopt opt
= {
515 .qth_min
= q
->parms
.qth_min
>> q
->parms
.Wlog
,
516 .qth_max
= q
->parms
.qth_max
>> q
->parms
.Wlog
,
517 .Wlog
= q
->parms
.Wlog
,
518 .Plog
= q
->parms
.Plog
,
519 .Scell_log
= q
->parms
.Scell_log
,
522 opts
= nla_nest_start(skb
, TCA_OPTIONS
);
524 goto nla_put_failure
;
526 if (nla_put(skb
, TCA_CHOKE_PARMS
, sizeof(opt
), &opt
) ||
527 nla_put_u32(skb
, TCA_CHOKE_MAX_P
, q
->parms
.max_P
))
528 goto nla_put_failure
;
529 return nla_nest_end(skb
, opts
);
532 nla_nest_cancel(skb
, opts
);
536 static int choke_dump_stats(struct Qdisc
*sch
, struct gnet_dump
*d
)
538 struct choke_sched_data
*q
= qdisc_priv(sch
);
539 struct tc_choke_xstats st
= {
540 .early
= q
->stats
.prob_drop
+ q
->stats
.forced_drop
,
541 .marked
= q
->stats
.prob_mark
+ q
->stats
.forced_mark
,
542 .pdrop
= q
->stats
.pdrop
,
543 .other
= q
->stats
.other
,
544 .matched
= q
->stats
.matched
,
547 return gnet_stats_copy_app(d
, &st
, sizeof(st
));
550 static void choke_destroy(struct Qdisc
*sch
)
552 struct choke_sched_data
*q
= qdisc_priv(sch
);
554 tcf_destroy_chain(&q
->filter_list
);
558 static struct sk_buff
*choke_peek_head(struct Qdisc
*sch
)
560 struct choke_sched_data
*q
= qdisc_priv(sch
);
562 return (q
->head
!= q
->tail
) ? q
->tab
[q
->head
] : NULL
;
565 static struct Qdisc_ops choke_qdisc_ops __read_mostly
= {
567 .priv_size
= sizeof(struct choke_sched_data
),
569 .enqueue
= choke_enqueue
,
570 .dequeue
= choke_dequeue
,
571 .peek
= choke_peek_head
,
574 .destroy
= choke_destroy
,
575 .reset
= choke_reset
,
576 .change
= choke_change
,
578 .dump_stats
= choke_dump_stats
,
579 .owner
= THIS_MODULE
,
582 static int __init
choke_module_init(void)
584 return register_qdisc(&choke_qdisc_ops
);
587 static void __exit
choke_module_exit(void)
589 unregister_qdisc(&choke_qdisc_ops
);
592 module_init(choke_module_init
)
593 module_exit(choke_module_exit
)
595 MODULE_LICENSE("GPL");