2 * net/sched/sch_qfq.c Quick Fair Queueing Scheduler.
4 * Copyright (c) 2009 Fabio Checconi, Luigi Rizzo, and Paolo Valente.
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * version 2 as published by the Free Software Foundation.
11 #include <linux/module.h>
12 #include <linux/init.h>
13 #include <linux/bitops.h>
14 #include <linux/errno.h>
15 #include <linux/netdevice.h>
16 #include <linux/pkt_sched.h>
17 #include <net/sch_generic.h>
18 #include <net/pkt_sched.h>
19 #include <net/pkt_cls.h>
22 /* Quick Fair Queueing
27 Fabio Checconi, Luigi Rizzo, and Paolo Valente: "QFQ: Efficient
28 Packet Scheduling with Tight Bandwidth Distribution Guarantees."
31 http://retis.sssup.it/~fabio/linux/qfq/
36 Virtual time computations.
38 S, F and V are all computed in fixed point arithmetic with
39 FRAC_BITS decimal bits.
41 QFQ_MAX_INDEX is the maximum index allowed for a group. We need
43 QFQ_MAX_WSHIFT is the maximum power of two supported as a weight.
45 The layout of the bits is as below:
47 [ MTU_SHIFT ][ FRAC_BITS ]
48 [ MAX_INDEX ][ MIN_SLOT_SHIFT ]
52 where MIN_SLOT_SHIFT is derived by difference from the others.
54 The max group index corresponds to Lmax/w_min, where
55 Lmax=1<<MTU_SHIFT, w_min = 1 .
56 From this, and knowing how many groups (MAX_INDEX) we want,
57 we can derive the shift corresponding to each group.
59 Because we often need to compute
60 F = S + len/w_i and V = V + len/wsum
61 instead of storing w_i store the value
62 inv_w = (1<<FRAC_BITS)/w_i
63 so we can do F = S + len * inv_w * wsum.
64 We use W_TOT in the formulas so we can easily move between
65 static and adaptive weight sum.
67 The per-scheduler-instance data contain all the data structures
68 for the scheduler: bitmaps and bucket lists.
73 * Maximum number of consecutive slots occupied by backlogged classes
76 #define QFQ_MAX_SLOTS 32
79 * Shifts used for class<->group mapping. We allow class weights that are
80 * in the range [1, 2^MAX_WSHIFT], and we try to map each class i to the
81 * group with the smallest index that can support the L_i / r_i configured
84 * grp->index is the index of the group; and grp->slot_shift
85 * is the shift for the corresponding (scaled) sigma_i.
87 #define QFQ_MAX_INDEX 24
88 #define QFQ_MAX_WSHIFT 12
90 #define QFQ_MAX_WEIGHT (1<<QFQ_MAX_WSHIFT)
91 #define QFQ_MAX_WSUM (16*QFQ_MAX_WEIGHT)
93 #define FRAC_BITS 30 /* fixed point arithmetic */
94 #define ONE_FP (1UL << FRAC_BITS)
95 #define IWSUM (ONE_FP/QFQ_MAX_WSUM)
97 #define QFQ_MTU_SHIFT 16 /* to support TSO/GSO */
98 #define QFQ_MIN_SLOT_SHIFT (FRAC_BITS + QFQ_MTU_SHIFT - QFQ_MAX_INDEX)
99 #define QFQ_MIN_LMAX 256 /* min possible lmax for a class */
102 * Possible group states. These values are used as indexes for the bitmaps
103 * array of struct qfq_queue.
105 enum qfq_state
{ ER
, IR
, EB
, IB
, QFQ_MAX_STATE
};
110 struct Qdisc_class_common common
;
113 unsigned int filter_cnt
;
115 struct gnet_stats_basic_packed bstats
;
116 struct gnet_stats_queue qstats
;
117 struct gnet_stats_rate_est rate_est
;
120 struct hlist_node next
; /* Link for the slot list. */
121 u64 S
, F
; /* flow timestamps (exact) */
123 /* group we belong to. In principle we would need the index,
124 * which is log_2(lmax/weight), but we never reference it
125 * directly, only the group.
127 struct qfq_group
*grp
;
129 /* these are copied from the flowset. */
130 u32 inv_w
; /* ONE_FP/weight */
131 u32 lmax
; /* Max packet size for this flow. */
135 u64 S
, F
; /* group timestamps (approx). */
136 unsigned int slot_shift
; /* Slot shift. */
137 unsigned int index
; /* Group index. */
138 unsigned int front
; /* Index of the front slot. */
139 unsigned long full_slots
; /* non-empty slots */
141 /* Array of RR lists of active classes. */
142 struct hlist_head slots
[QFQ_MAX_SLOTS
];
146 struct tcf_proto
*filter_list
;
147 struct Qdisc_class_hash clhash
;
149 u64 V
; /* Precise virtual time. */
150 u32 wsum
; /* weight sum */
152 unsigned long bitmaps
[QFQ_MAX_STATE
]; /* Group bitmaps. */
153 struct qfq_group groups
[QFQ_MAX_INDEX
+ 1]; /* The groups. */
156 static struct qfq_class
*qfq_find_class(struct Qdisc
*sch
, u32 classid
)
158 struct qfq_sched
*q
= qdisc_priv(sch
);
159 struct Qdisc_class_common
*clc
;
161 clc
= qdisc_class_find(&q
->clhash
, classid
);
164 return container_of(clc
, struct qfq_class
, common
);
167 static void qfq_purge_queue(struct qfq_class
*cl
)
169 unsigned int len
= cl
->qdisc
->q
.qlen
;
171 qdisc_reset(cl
->qdisc
);
172 qdisc_tree_decrease_qlen(cl
->qdisc
, len
);
175 static const struct nla_policy qfq_policy
[TCA_QFQ_MAX
+ 1] = {
176 [TCA_QFQ_WEIGHT
] = { .type
= NLA_U32
},
177 [TCA_QFQ_LMAX
] = { .type
= NLA_U32
},
181 * Calculate a flow index, given its weight and maximum packet length.
182 * index = log_2(maxlen/weight) but we need to apply the scaling.
183 * This is used only once at flow creation.
185 static int qfq_calc_index(u32 inv_w
, unsigned int maxlen
)
187 u64 slot_size
= (u64
)maxlen
* inv_w
;
188 unsigned long size_map
;
191 size_map
= slot_size
>> QFQ_MIN_SLOT_SHIFT
;
195 index
= __fls(size_map
) + 1; /* basically a log_2 */
196 index
-= !(slot_size
- (1ULL << (index
+ QFQ_MIN_SLOT_SHIFT
- 1)));
201 pr_debug("qfq calc_index: W = %lu, L = %u, I = %d\n",
202 (unsigned long) ONE_FP
/inv_w
, maxlen
, index
);
207 /* Length of the next packet (0 if the queue is empty). */
208 static unsigned int qdisc_peek_len(struct Qdisc
*sch
)
212 skb
= sch
->ops
->peek(sch
);
213 return skb
? qdisc_pkt_len(skb
) : 0;
216 static void qfq_deactivate_class(struct qfq_sched
*, struct qfq_class
*);
217 static void qfq_activate_class(struct qfq_sched
*q
, struct qfq_class
*cl
,
220 static void qfq_update_class_params(struct qfq_sched
*q
, struct qfq_class
*cl
,
221 u32 lmax
, u32 inv_w
, int delta_w
)
225 /* update qfq-specific data */
228 i
= qfq_calc_index(cl
->inv_w
, cl
->lmax
);
230 cl
->grp
= &q
->groups
[i
];
235 static void qfq_update_reactivate_class(struct qfq_sched
*q
,
236 struct qfq_class
*cl
,
237 u32 inv_w
, u32 lmax
, int delta_w
)
239 bool need_reactivation
= false;
240 int i
= qfq_calc_index(inv_w
, lmax
);
242 if (&q
->groups
[i
] != cl
->grp
&& cl
->qdisc
->q
.qlen
> 0) {
244 * shift cl->F back, to not charge the
245 * class for the not-yet-served head
249 /* remove class from its slot in the old group */
250 qfq_deactivate_class(q
, cl
);
251 need_reactivation
= true;
254 qfq_update_class_params(q
, cl
, lmax
, inv_w
, delta_w
);
256 if (need_reactivation
) /* activate in new group */
257 qfq_activate_class(q
, cl
, qdisc_peek_len(cl
->qdisc
));
261 static int qfq_change_class(struct Qdisc
*sch
, u32 classid
, u32 parentid
,
262 struct nlattr
**tca
, unsigned long *arg
)
264 struct qfq_sched
*q
= qdisc_priv(sch
);
265 struct qfq_class
*cl
= (struct qfq_class
*)*arg
;
266 struct nlattr
*tb
[TCA_QFQ_MAX
+ 1];
267 u32 weight
, lmax
, inv_w
;
271 if (tca
[TCA_OPTIONS
] == NULL
) {
272 pr_notice("qfq: no options\n");
276 err
= nla_parse_nested(tb
, TCA_QFQ_MAX
, tca
[TCA_OPTIONS
], qfq_policy
);
280 if (tb
[TCA_QFQ_WEIGHT
]) {
281 weight
= nla_get_u32(tb
[TCA_QFQ_WEIGHT
]);
282 if (!weight
|| weight
> (1UL << QFQ_MAX_WSHIFT
)) {
283 pr_notice("qfq: invalid weight %u\n", weight
);
289 inv_w
= ONE_FP
/ weight
;
290 weight
= ONE_FP
/ inv_w
;
291 delta_w
= weight
- (cl
? ONE_FP
/ cl
->inv_w
: 0);
292 if (q
->wsum
+ delta_w
> QFQ_MAX_WSUM
) {
293 pr_notice("qfq: total weight out of range (%u + %u)\n",
298 if (tb
[TCA_QFQ_LMAX
]) {
299 lmax
= nla_get_u32(tb
[TCA_QFQ_LMAX
]);
300 if (lmax
< QFQ_MIN_LMAX
|| lmax
> (1UL << QFQ_MTU_SHIFT
)) {
301 pr_notice("qfq: invalid max length %u\n", lmax
);
305 lmax
= psched_mtu(qdisc_dev(sch
));
309 err
= gen_replace_estimator(&cl
->bstats
, &cl
->rate_est
,
310 qdisc_root_sleeping_lock(sch
),
316 if (lmax
== cl
->lmax
&& inv_w
== cl
->inv_w
)
317 return 0; /* nothing to update */
320 qfq_update_reactivate_class(q
, cl
, inv_w
, lmax
, delta_w
);
321 sch_tree_unlock(sch
);
326 cl
= kzalloc(sizeof(struct qfq_class
), GFP_KERNEL
);
331 cl
->common
.classid
= classid
;
333 qfq_update_class_params(q
, cl
, lmax
, inv_w
, delta_w
);
335 cl
->qdisc
= qdisc_create_dflt(sch
->dev_queue
,
336 &pfifo_qdisc_ops
, classid
);
337 if (cl
->qdisc
== NULL
)
338 cl
->qdisc
= &noop_qdisc
;
341 err
= gen_new_estimator(&cl
->bstats
, &cl
->rate_est
,
342 qdisc_root_sleeping_lock(sch
),
345 qdisc_destroy(cl
->qdisc
);
352 qdisc_class_hash_insert(&q
->clhash
, &cl
->common
);
353 sch_tree_unlock(sch
);
355 qdisc_class_hash_grow(sch
, &q
->clhash
);
357 *arg
= (unsigned long)cl
;
361 static void qfq_destroy_class(struct Qdisc
*sch
, struct qfq_class
*cl
)
363 struct qfq_sched
*q
= qdisc_priv(sch
);
366 q
->wsum
-= ONE_FP
/ cl
->inv_w
;
370 gen_kill_estimator(&cl
->bstats
, &cl
->rate_est
);
371 qdisc_destroy(cl
->qdisc
);
375 static int qfq_delete_class(struct Qdisc
*sch
, unsigned long arg
)
377 struct qfq_sched
*q
= qdisc_priv(sch
);
378 struct qfq_class
*cl
= (struct qfq_class
*)arg
;
380 if (cl
->filter_cnt
> 0)
386 qdisc_class_hash_remove(&q
->clhash
, &cl
->common
);
388 BUG_ON(--cl
->refcnt
== 0);
390 * This shouldn't happen: we "hold" one cops->get() when called
391 * from tc_ctl_tclass; the destroy method is done from cops->put().
394 sch_tree_unlock(sch
);
398 static unsigned long qfq_get_class(struct Qdisc
*sch
, u32 classid
)
400 struct qfq_class
*cl
= qfq_find_class(sch
, classid
);
405 return (unsigned long)cl
;
408 static void qfq_put_class(struct Qdisc
*sch
, unsigned long arg
)
410 struct qfq_class
*cl
= (struct qfq_class
*)arg
;
412 if (--cl
->refcnt
== 0)
413 qfq_destroy_class(sch
, cl
);
416 static struct tcf_proto
**qfq_tcf_chain(struct Qdisc
*sch
, unsigned long cl
)
418 struct qfq_sched
*q
= qdisc_priv(sch
);
423 return &q
->filter_list
;
426 static unsigned long qfq_bind_tcf(struct Qdisc
*sch
, unsigned long parent
,
429 struct qfq_class
*cl
= qfq_find_class(sch
, classid
);
434 return (unsigned long)cl
;
437 static void qfq_unbind_tcf(struct Qdisc
*sch
, unsigned long arg
)
439 struct qfq_class
*cl
= (struct qfq_class
*)arg
;
444 static int qfq_graft_class(struct Qdisc
*sch
, unsigned long arg
,
445 struct Qdisc
*new, struct Qdisc
**old
)
447 struct qfq_class
*cl
= (struct qfq_class
*)arg
;
450 new = qdisc_create_dflt(sch
->dev_queue
,
451 &pfifo_qdisc_ops
, cl
->common
.classid
);
460 sch_tree_unlock(sch
);
464 static struct Qdisc
*qfq_class_leaf(struct Qdisc
*sch
, unsigned long arg
)
466 struct qfq_class
*cl
= (struct qfq_class
*)arg
;
471 static int qfq_dump_class(struct Qdisc
*sch
, unsigned long arg
,
472 struct sk_buff
*skb
, struct tcmsg
*tcm
)
474 struct qfq_class
*cl
= (struct qfq_class
*)arg
;
477 tcm
->tcm_parent
= TC_H_ROOT
;
478 tcm
->tcm_handle
= cl
->common
.classid
;
479 tcm
->tcm_info
= cl
->qdisc
->handle
;
481 nest
= nla_nest_start(skb
, TCA_OPTIONS
);
483 goto nla_put_failure
;
484 if (nla_put_u32(skb
, TCA_QFQ_WEIGHT
, ONE_FP
/cl
->inv_w
) ||
485 nla_put_u32(skb
, TCA_QFQ_LMAX
, cl
->lmax
))
486 goto nla_put_failure
;
487 return nla_nest_end(skb
, nest
);
490 nla_nest_cancel(skb
, nest
);
494 static int qfq_dump_class_stats(struct Qdisc
*sch
, unsigned long arg
,
497 struct qfq_class
*cl
= (struct qfq_class
*)arg
;
498 struct tc_qfq_stats xstats
;
500 memset(&xstats
, 0, sizeof(xstats
));
501 cl
->qdisc
->qstats
.qlen
= cl
->qdisc
->q
.qlen
;
503 xstats
.weight
= ONE_FP
/cl
->inv_w
;
504 xstats
.lmax
= cl
->lmax
;
506 if (gnet_stats_copy_basic(d
, &cl
->bstats
) < 0 ||
507 gnet_stats_copy_rate_est(d
, &cl
->bstats
, &cl
->rate_est
) < 0 ||
508 gnet_stats_copy_queue(d
, &cl
->qdisc
->qstats
) < 0)
511 return gnet_stats_copy_app(d
, &xstats
, sizeof(xstats
));
514 static void qfq_walk(struct Qdisc
*sch
, struct qdisc_walker
*arg
)
516 struct qfq_sched
*q
= qdisc_priv(sch
);
517 struct qfq_class
*cl
;
518 struct hlist_node
*n
;
524 for (i
= 0; i
< q
->clhash
.hashsize
; i
++) {
525 hlist_for_each_entry(cl
, n
, &q
->clhash
.hash
[i
], common
.hnode
) {
526 if (arg
->count
< arg
->skip
) {
530 if (arg
->fn(sch
, (unsigned long)cl
, arg
) < 0) {
539 static struct qfq_class
*qfq_classify(struct sk_buff
*skb
, struct Qdisc
*sch
,
542 struct qfq_sched
*q
= qdisc_priv(sch
);
543 struct qfq_class
*cl
;
544 struct tcf_result res
;
547 if (TC_H_MAJ(skb
->priority
^ sch
->handle
) == 0) {
548 pr_debug("qfq_classify: found %d\n", skb
->priority
);
549 cl
= qfq_find_class(sch
, skb
->priority
);
554 *qerr
= NET_XMIT_SUCCESS
| __NET_XMIT_BYPASS
;
555 result
= tc_classify(skb
, q
->filter_list
, &res
);
557 #ifdef CONFIG_NET_CLS_ACT
561 *qerr
= NET_XMIT_SUCCESS
| __NET_XMIT_STOLEN
;
566 cl
= (struct qfq_class
*)res
.class;
568 cl
= qfq_find_class(sch
, res
.classid
);
575 /* Generic comparison function, handling wraparound. */
576 static inline int qfq_gt(u64 a
, u64 b
)
578 return (s64
)(a
- b
) > 0;
581 /* Round a precise timestamp to its slotted value. */
582 static inline u64
qfq_round_down(u64 ts
, unsigned int shift
)
584 return ts
& ~((1ULL << shift
) - 1);
587 /* return the pointer to the group with lowest index in the bitmap */
588 static inline struct qfq_group
*qfq_ffs(struct qfq_sched
*q
,
589 unsigned long bitmap
)
591 int index
= __ffs(bitmap
);
592 return &q
->groups
[index
];
594 /* Calculate a mask to mimic what would be ffs_from(). */
595 static inline unsigned long mask_from(unsigned long bitmap
, int from
)
597 return bitmap
& ~((1UL << from
) - 1);
601 * The state computation relies on ER=0, IR=1, EB=2, IB=3
602 * First compute eligibility comparing grp->S, q->V,
603 * then check if someone is blocking us and possibly add EB
605 static int qfq_calc_state(struct qfq_sched
*q
, const struct qfq_group
*grp
)
607 /* if S > V we are not eligible */
608 unsigned int state
= qfq_gt(grp
->S
, q
->V
);
609 unsigned long mask
= mask_from(q
->bitmaps
[ER
], grp
->index
);
610 struct qfq_group
*next
;
613 next
= qfq_ffs(q
, mask
);
614 if (qfq_gt(grp
->F
, next
->F
))
624 * q->bitmaps[dst] |= q->bitmaps[src] & mask;
625 * q->bitmaps[src] &= ~mask;
626 * but we should make sure that src != dst
628 static inline void qfq_move_groups(struct qfq_sched
*q
, unsigned long mask
,
631 q
->bitmaps
[dst
] |= q
->bitmaps
[src
] & mask
;
632 q
->bitmaps
[src
] &= ~mask
;
635 static void qfq_unblock_groups(struct qfq_sched
*q
, int index
, u64 old_F
)
637 unsigned long mask
= mask_from(q
->bitmaps
[ER
], index
+ 1);
638 struct qfq_group
*next
;
641 next
= qfq_ffs(q
, mask
);
642 if (!qfq_gt(next
->F
, old_F
))
646 mask
= (1UL << index
) - 1;
647 qfq_move_groups(q
, mask
, EB
, ER
);
648 qfq_move_groups(q
, mask
, IB
, IR
);
655 old_V >>= QFQ_MIN_SLOT_SHIFT;
661 static void qfq_make_eligible(struct qfq_sched
*q
, u64 old_V
)
663 unsigned long vslot
= q
->V
>> QFQ_MIN_SLOT_SHIFT
;
664 unsigned long old_vslot
= old_V
>> QFQ_MIN_SLOT_SHIFT
;
666 if (vslot
!= old_vslot
) {
667 unsigned long mask
= (1UL << fls(vslot
^ old_vslot
)) - 1;
668 qfq_move_groups(q
, mask
, IR
, ER
);
669 qfq_move_groups(q
, mask
, IB
, EB
);
675 * If the weight and lmax (max_pkt_size) of the classes do not change,
676 * then QFQ guarantees that the slot index is never higher than
677 * 2 + ((1<<QFQ_MTU_SHIFT)/QFQ_MIN_LMAX) * (QFQ_MAX_WEIGHT/QFQ_MAX_WSUM).
679 * With the current values of the above constants, the index is
680 * then guaranteed to never be higher than 2 + 256 * (1 / 16) = 18.
682 * When the weight of a class is increased or the lmax of the class is
683 * decreased, a new class with smaller slot size may happen to be
684 * activated. The activation of this class should be properly delayed
685 * to when the service of the class has finished in the ideal system
686 * tracked by QFQ. If the activation of the class is not delayed to
687 * this reference time instant, then this class may be unjustly served
688 * before other classes waiting for service. This may cause
689 * (unfrequently) the above bound to the slot index to be violated for
690 * some of these unlucky classes.
692 * Instead of delaying the activation of the new class, which is quite
693 * complex, the following inaccurate but simple solution is used: if
694 * the slot index is higher than QFQ_MAX_SLOTS-2, then the timestamps
695 * of the class are shifted backward so as to let the slot index
696 * become equal to QFQ_MAX_SLOTS-2. This threshold is used because, if
697 * the slot index is above it, then the data structure implementing
698 * the bucket list either gets immediately corrupted or may get
699 * corrupted on a possible next packet arrival that causes the start
700 * time of the group to be shifted backward.
702 static void qfq_slot_insert(struct qfq_group
*grp
, struct qfq_class
*cl
,
705 u64 slot
= (roundedS
- grp
->S
) >> grp
->slot_shift
;
706 unsigned int i
; /* slot index in the bucket list */
708 if (unlikely(slot
> QFQ_MAX_SLOTS
- 2)) {
709 u64 deltaS
= roundedS
- grp
->S
-
710 ((u64
)(QFQ_MAX_SLOTS
- 2)<<grp
->slot_shift
);
713 slot
= QFQ_MAX_SLOTS
- 2;
716 i
= (grp
->front
+ slot
) % QFQ_MAX_SLOTS
;
718 hlist_add_head(&cl
->next
, &grp
->slots
[i
]);
719 __set_bit(slot
, &grp
->full_slots
);
722 /* Maybe introduce hlist_first_entry?? */
723 static struct qfq_class
*qfq_slot_head(struct qfq_group
*grp
)
725 return hlist_entry(grp
->slots
[grp
->front
].first
,
726 struct qfq_class
, next
);
730 * remove the entry from the slot
732 static void qfq_front_slot_remove(struct qfq_group
*grp
)
734 struct qfq_class
*cl
= qfq_slot_head(grp
);
737 hlist_del(&cl
->next
);
738 if (hlist_empty(&grp
->slots
[grp
->front
]))
739 __clear_bit(0, &grp
->full_slots
);
743 * Returns the first full queue in a group. As a side effect,
744 * adjust the bucket list so the first non-empty bucket is at
745 * position 0 in full_slots.
747 static struct qfq_class
*qfq_slot_scan(struct qfq_group
*grp
)
751 pr_debug("qfq slot_scan: grp %u full %#lx\n",
752 grp
->index
, grp
->full_slots
);
754 if (grp
->full_slots
== 0)
757 i
= __ffs(grp
->full_slots
); /* zero based */
759 grp
->front
= (grp
->front
+ i
) % QFQ_MAX_SLOTS
;
760 grp
->full_slots
>>= i
;
763 return qfq_slot_head(grp
);
767 * adjust the bucket list. When the start time of a group decreases,
768 * we move the index down (modulo QFQ_MAX_SLOTS) so we don't need to
769 * move the objects. The mask of occupied slots must be shifted
770 * because we use ffs() to find the first non-empty slot.
771 * This covers decreases in the group's start time, but what about
772 * increases of the start time ?
773 * Here too we should make sure that i is less than 32
775 static void qfq_slot_rotate(struct qfq_group
*grp
, u64 roundedS
)
777 unsigned int i
= (grp
->S
- roundedS
) >> grp
->slot_shift
;
779 grp
->full_slots
<<= i
;
780 grp
->front
= (grp
->front
- i
) % QFQ_MAX_SLOTS
;
783 static void qfq_update_eligible(struct qfq_sched
*q
, u64 old_V
)
785 struct qfq_group
*grp
;
786 unsigned long ineligible
;
788 ineligible
= q
->bitmaps
[IR
] | q
->bitmaps
[IB
];
790 if (!q
->bitmaps
[ER
]) {
791 grp
= qfq_ffs(q
, ineligible
);
792 if (qfq_gt(grp
->S
, q
->V
))
795 qfq_make_eligible(q
, old_V
);
800 * Updates the class, returns true if also the group needs to be updated.
802 static bool qfq_update_class(struct qfq_group
*grp
, struct qfq_class
*cl
)
804 unsigned int len
= qdisc_peek_len(cl
->qdisc
);
808 qfq_front_slot_remove(grp
); /* queue is empty */
812 cl
->F
= cl
->S
+ (u64
)len
* cl
->inv_w
;
813 roundedS
= qfq_round_down(cl
->S
, grp
->slot_shift
);
814 if (roundedS
== grp
->S
)
817 qfq_front_slot_remove(grp
);
818 qfq_slot_insert(grp
, cl
, roundedS
);
824 static struct sk_buff
*qfq_dequeue(struct Qdisc
*sch
)
826 struct qfq_sched
*q
= qdisc_priv(sch
);
827 struct qfq_group
*grp
;
828 struct qfq_class
*cl
;
836 grp
= qfq_ffs(q
, q
->bitmaps
[ER
]);
838 cl
= qfq_slot_head(grp
);
839 skb
= qdisc_dequeue_peeked(cl
->qdisc
);
841 WARN_ONCE(1, "qfq_dequeue: non-workconserving leaf\n");
846 qdisc_bstats_update(sch
, skb
);
849 len
= qdisc_pkt_len(skb
);
850 q
->V
+= (u64
)len
* IWSUM
;
851 pr_debug("qfq dequeue: len %u F %lld now %lld\n",
852 len
, (unsigned long long) cl
->F
, (unsigned long long) q
->V
);
854 if (qfq_update_class(grp
, cl
)) {
857 cl
= qfq_slot_scan(grp
);
859 __clear_bit(grp
->index
, &q
->bitmaps
[ER
]);
861 u64 roundedS
= qfq_round_down(cl
->S
, grp
->slot_shift
);
864 if (grp
->S
== roundedS
)
867 grp
->F
= roundedS
+ (2ULL << grp
->slot_shift
);
868 __clear_bit(grp
->index
, &q
->bitmaps
[ER
]);
869 s
= qfq_calc_state(q
, grp
);
870 __set_bit(grp
->index
, &q
->bitmaps
[s
]);
873 qfq_unblock_groups(q
, grp
->index
, old_F
);
877 qfq_update_eligible(q
, old_V
);
883 * Assign a reasonable start time for a new flow k in group i.
884 * Admissible values for \hat(F) are multiples of \sigma_i
885 * no greater than V+\sigma_i . Larger values mean that
886 * we had a wraparound so we consider the timestamp to be stale.
888 * If F is not stale and F >= V then we set S = F.
889 * Otherwise we should assign S = V, but this may violate
890 * the ordering in ER. So, if we have groups in ER, set S to
891 * the F_j of the first group j which would be blocking us.
892 * We are guaranteed not to move S backward because
893 * otherwise our group i would still be blocked.
895 static void qfq_update_start(struct qfq_sched
*q
, struct qfq_class
*cl
)
899 int slot_shift
= cl
->grp
->slot_shift
;
901 roundedF
= qfq_round_down(cl
->F
, slot_shift
);
902 limit
= qfq_round_down(q
->V
, slot_shift
) + (1ULL << slot_shift
);
904 if (!qfq_gt(cl
->F
, q
->V
) || qfq_gt(roundedF
, limit
)) {
905 /* timestamp was stale */
906 mask
= mask_from(q
->bitmaps
[ER
], cl
->grp
->index
);
908 struct qfq_group
*next
= qfq_ffs(q
, mask
);
909 if (qfq_gt(roundedF
, next
->F
)) {
910 if (qfq_gt(limit
, next
->F
))
912 else /* preserve timestamp correctness */
918 } else /* timestamp is not stale */
922 static int qfq_enqueue(struct sk_buff
*skb
, struct Qdisc
*sch
)
924 struct qfq_sched
*q
= qdisc_priv(sch
);
925 struct qfq_class
*cl
;
928 cl
= qfq_classify(skb
, sch
, &err
);
930 if (err
& __NET_XMIT_BYPASS
)
935 pr_debug("qfq_enqueue: cl = %x\n", cl
->common
.classid
);
937 if (unlikely(cl
->lmax
< qdisc_pkt_len(skb
))) {
938 pr_debug("qfq: increasing maxpkt from %u to %u for class %u",
939 cl
->lmax
, qdisc_pkt_len(skb
), cl
->common
.classid
);
940 qfq_update_reactivate_class(q
, cl
, cl
->inv_w
,
941 qdisc_pkt_len(skb
), 0);
944 err
= qdisc_enqueue(skb
, cl
->qdisc
);
945 if (unlikely(err
!= NET_XMIT_SUCCESS
)) {
946 pr_debug("qfq_enqueue: enqueue failed %d\n", err
);
947 if (net_xmit_drop_count(err
)) {
954 bstats_update(&cl
->bstats
, skb
);
957 /* If the new skb is not the head of queue, then done here. */
958 if (cl
->qdisc
->q
.qlen
!= 1)
961 /* If reach this point, queue q was idle */
962 qfq_activate_class(q
, cl
, qdisc_pkt_len(skb
));
968 * Handle class switch from idle to backlogged.
970 static void qfq_activate_class(struct qfq_sched
*q
, struct qfq_class
*cl
,
971 unsigned int pkt_len
)
973 struct qfq_group
*grp
= cl
->grp
;
977 qfq_update_start(q
, cl
);
979 /* compute new finish time and rounded start. */
980 cl
->F
= cl
->S
+ (u64
)pkt_len
* cl
->inv_w
;
981 roundedS
= qfq_round_down(cl
->S
, grp
->slot_shift
);
984 * insert cl in the correct bucket.
985 * If cl->S >= grp->S we don't need to adjust the
986 * bucket list and simply go to the insertion phase.
987 * Otherwise grp->S is decreasing, we must make room
988 * in the bucket list, and also recompute the group state.
989 * Finally, if there were no flows in this group and nobody
990 * was in ER make sure to adjust V.
992 if (grp
->full_slots
) {
993 if (!qfq_gt(grp
->S
, cl
->S
))
996 /* create a slot for this cl->S */
997 qfq_slot_rotate(grp
, roundedS
);
998 /* group was surely ineligible, remove */
999 __clear_bit(grp
->index
, &q
->bitmaps
[IR
]);
1000 __clear_bit(grp
->index
, &q
->bitmaps
[IB
]);
1001 } else if (!q
->bitmaps
[ER
] && qfq_gt(roundedS
, q
->V
))
1005 grp
->F
= roundedS
+ (2ULL << grp
->slot_shift
);
1006 s
= qfq_calc_state(q
, grp
);
1007 __set_bit(grp
->index
, &q
->bitmaps
[s
]);
1009 pr_debug("qfq enqueue: new state %d %#lx S %lld F %lld V %lld\n",
1011 (unsigned long long) cl
->S
,
1012 (unsigned long long) cl
->F
,
1013 (unsigned long long) q
->V
);
1016 qfq_slot_insert(grp
, cl
, roundedS
);
1020 static void qfq_slot_remove(struct qfq_sched
*q
, struct qfq_group
*grp
,
1021 struct qfq_class
*cl
)
1023 unsigned int i
, offset
;
1026 roundedS
= qfq_round_down(cl
->S
, grp
->slot_shift
);
1027 offset
= (roundedS
- grp
->S
) >> grp
->slot_shift
;
1028 i
= (grp
->front
+ offset
) % QFQ_MAX_SLOTS
;
1030 hlist_del(&cl
->next
);
1031 if (hlist_empty(&grp
->slots
[i
]))
1032 __clear_bit(offset
, &grp
->full_slots
);
1036 * called to forcibly destroy a queue.
1037 * If the queue is not in the front bucket, or if it has
1038 * other queues in the front bucket, we can simply remove
1039 * the queue with no other side effects.
1040 * Otherwise we must propagate the event up.
1042 static void qfq_deactivate_class(struct qfq_sched
*q
, struct qfq_class
*cl
)
1044 struct qfq_group
*grp
= cl
->grp
;
1050 qfq_slot_remove(q
, grp
, cl
);
1052 if (!grp
->full_slots
) {
1053 __clear_bit(grp
->index
, &q
->bitmaps
[IR
]);
1054 __clear_bit(grp
->index
, &q
->bitmaps
[EB
]);
1055 __clear_bit(grp
->index
, &q
->bitmaps
[IB
]);
1057 if (test_bit(grp
->index
, &q
->bitmaps
[ER
]) &&
1058 !(q
->bitmaps
[ER
] & ~((1UL << grp
->index
) - 1))) {
1059 mask
= q
->bitmaps
[ER
] & ((1UL << grp
->index
) - 1);
1061 mask
= ~((1UL << __fls(mask
)) - 1);
1064 qfq_move_groups(q
, mask
, EB
, ER
);
1065 qfq_move_groups(q
, mask
, IB
, IR
);
1067 __clear_bit(grp
->index
, &q
->bitmaps
[ER
]);
1068 } else if (hlist_empty(&grp
->slots
[grp
->front
])) {
1069 cl
= qfq_slot_scan(grp
);
1070 roundedS
= qfq_round_down(cl
->S
, grp
->slot_shift
);
1071 if (grp
->S
!= roundedS
) {
1072 __clear_bit(grp
->index
, &q
->bitmaps
[ER
]);
1073 __clear_bit(grp
->index
, &q
->bitmaps
[IR
]);
1074 __clear_bit(grp
->index
, &q
->bitmaps
[EB
]);
1075 __clear_bit(grp
->index
, &q
->bitmaps
[IB
]);
1077 grp
->F
= roundedS
+ (2ULL << grp
->slot_shift
);
1078 s
= qfq_calc_state(q
, grp
);
1079 __set_bit(grp
->index
, &q
->bitmaps
[s
]);
1083 qfq_update_eligible(q
, q
->V
);
1086 static void qfq_qlen_notify(struct Qdisc
*sch
, unsigned long arg
)
1088 struct qfq_sched
*q
= qdisc_priv(sch
);
1089 struct qfq_class
*cl
= (struct qfq_class
*)arg
;
1091 if (cl
->qdisc
->q
.qlen
== 0)
1092 qfq_deactivate_class(q
, cl
);
1095 static unsigned int qfq_drop(struct Qdisc
*sch
)
1097 struct qfq_sched
*q
= qdisc_priv(sch
);
1098 struct qfq_group
*grp
;
1099 unsigned int i
, j
, len
;
1101 for (i
= 0; i
<= QFQ_MAX_INDEX
; i
++) {
1102 grp
= &q
->groups
[i
];
1103 for (j
= 0; j
< QFQ_MAX_SLOTS
; j
++) {
1104 struct qfq_class
*cl
;
1105 struct hlist_node
*n
;
1107 hlist_for_each_entry(cl
, n
, &grp
->slots
[j
], next
) {
1109 if (!cl
->qdisc
->ops
->drop
)
1112 len
= cl
->qdisc
->ops
->drop(cl
->qdisc
);
1115 if (!cl
->qdisc
->q
.qlen
)
1116 qfq_deactivate_class(q
, cl
);
1127 static int qfq_init_qdisc(struct Qdisc
*sch
, struct nlattr
*opt
)
1129 struct qfq_sched
*q
= qdisc_priv(sch
);
1130 struct qfq_group
*grp
;
1133 err
= qdisc_class_hash_init(&q
->clhash
);
1137 for (i
= 0; i
<= QFQ_MAX_INDEX
; i
++) {
1138 grp
= &q
->groups
[i
];
1140 grp
->slot_shift
= QFQ_MTU_SHIFT
+ FRAC_BITS
1141 - (QFQ_MAX_INDEX
- i
);
1142 for (j
= 0; j
< QFQ_MAX_SLOTS
; j
++)
1143 INIT_HLIST_HEAD(&grp
->slots
[j
]);
1149 static void qfq_reset_qdisc(struct Qdisc
*sch
)
1151 struct qfq_sched
*q
= qdisc_priv(sch
);
1152 struct qfq_group
*grp
;
1153 struct qfq_class
*cl
;
1154 struct hlist_node
*n
, *tmp
;
1157 for (i
= 0; i
<= QFQ_MAX_INDEX
; i
++) {
1158 grp
= &q
->groups
[i
];
1159 for (j
= 0; j
< QFQ_MAX_SLOTS
; j
++) {
1160 hlist_for_each_entry_safe(cl
, n
, tmp
,
1161 &grp
->slots
[j
], next
) {
1162 qfq_deactivate_class(q
, cl
);
1167 for (i
= 0; i
< q
->clhash
.hashsize
; i
++) {
1168 hlist_for_each_entry(cl
, n
, &q
->clhash
.hash
[i
], common
.hnode
)
1169 qdisc_reset(cl
->qdisc
);
1174 static void qfq_destroy_qdisc(struct Qdisc
*sch
)
1176 struct qfq_sched
*q
= qdisc_priv(sch
);
1177 struct qfq_class
*cl
;
1178 struct hlist_node
*n
, *next
;
1181 tcf_destroy_chain(&q
->filter_list
);
1183 for (i
= 0; i
< q
->clhash
.hashsize
; i
++) {
1184 hlist_for_each_entry_safe(cl
, n
, next
, &q
->clhash
.hash
[i
],
1186 qfq_destroy_class(sch
, cl
);
1189 qdisc_class_hash_destroy(&q
->clhash
);
1192 static const struct Qdisc_class_ops qfq_class_ops
= {
1193 .change
= qfq_change_class
,
1194 .delete = qfq_delete_class
,
1195 .get
= qfq_get_class
,
1196 .put
= qfq_put_class
,
1197 .tcf_chain
= qfq_tcf_chain
,
1198 .bind_tcf
= qfq_bind_tcf
,
1199 .unbind_tcf
= qfq_unbind_tcf
,
1200 .graft
= qfq_graft_class
,
1201 .leaf
= qfq_class_leaf
,
1202 .qlen_notify
= qfq_qlen_notify
,
1203 .dump
= qfq_dump_class
,
1204 .dump_stats
= qfq_dump_class_stats
,
1208 static struct Qdisc_ops qfq_qdisc_ops __read_mostly
= {
1209 .cl_ops
= &qfq_class_ops
,
1211 .priv_size
= sizeof(struct qfq_sched
),
1212 .enqueue
= qfq_enqueue
,
1213 .dequeue
= qfq_dequeue
,
1214 .peek
= qdisc_peek_dequeued
,
1216 .init
= qfq_init_qdisc
,
1217 .reset
= qfq_reset_qdisc
,
1218 .destroy
= qfq_destroy_qdisc
,
1219 .owner
= THIS_MODULE
,
1222 static int __init
qfq_init(void)
1224 return register_qdisc(&qfq_qdisc_ops
);
1227 static void __exit
qfq_exit(void)
1229 unregister_qdisc(&qfq_qdisc_ops
);
1232 module_init(qfq_init
);
1233 module_exit(qfq_exit
);
1234 MODULE_LICENSE("GPL");