net: dev_close_many() is static
[deliverable/linux.git] / net / sched / sch_sfq.c
CommitLineData
1da177e4
LT
1/*
2 * net/sched/sch_sfq.c Stochastic Fairness Queueing discipline.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 *
9 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
10 */
11
1da177e4 12#include <linux/module.h>
1da177e4
LT
13#include <linux/types.h>
14#include <linux/kernel.h>
15#include <linux/jiffies.h>
16#include <linux/string.h>
1da177e4
LT
17#include <linux/in.h>
18#include <linux/errno.h>
1da177e4 19#include <linux/init.h>
1da177e4 20#include <linux/ipv6.h>
1da177e4 21#include <linux/skbuff.h>
32740ddc 22#include <linux/jhash.h>
5a0e3ad6 23#include <linux/slab.h>
0ba48053
PM
24#include <net/ip.h>
25#include <net/netlink.h>
1da177e4
LT
26#include <net/pkt_sched.h>
27
28
29/* Stochastic Fairness Queuing algorithm.
30 =======================================
31
32 Source:
33 Paul E. McKenney "Stochastic Fairness Queuing",
34 IEEE INFOCOMM'90 Proceedings, San Francisco, 1990.
35
36 Paul E. McKenney "Stochastic Fairness Queuing",
37 "Interworking: Research and Experience", v.2, 1991, p.113-131.
38
39
40 See also:
41 M. Shreedhar and George Varghese "Efficient Fair
42 Queuing using Deficit Round Robin", Proc. SIGCOMM 95.
43
44
10297b99 45 This is not the thing that is usually called (W)FQ nowadays.
1da177e4
LT
46 It does not use any timestamp mechanism, but instead
47 processes queues in round-robin order.
48
49 ADVANTAGE:
50
51 - It is very cheap. Both CPU and memory requirements are minimal.
52
53 DRAWBACKS:
54
10297b99 55 - "Stochastic" -> It is not 100% fair.
1da177e4
LT
56 When hash collisions occur, several flows are considered as one.
57
58 - "Round-robin" -> It introduces larger delays than virtual clock
59 based schemes, and should not be used for isolating interactive
60 traffic from non-interactive. It means, that this scheduler
61 should be used as leaf of CBQ or P3, which put interactive traffic
62 to higher priority band.
63
64 We still need true WFQ for top level CSZ, but using WFQ
65 for the best effort traffic is absolutely pointless:
66 SFQ is superior for this purpose.
67
68 IMPLEMENTATION:
69 This implementation limits maximal queue length to 128;
eeaeb068 70 max mtu to 2^18-1; max 128 flows, number of hash buckets to 1024.
1da177e4 71 The only goal of this restrictions was that all data
eda83e3b 72 fit into one 4K page on 32bit arches.
1da177e4
LT
73
74 It is easy to increase these values, but not in flight. */
75
eda83e3b
ED
76#define SFQ_DEPTH 128 /* max number of packets per flow */
77#define SFQ_SLOTS 128 /* max number of flows */
78#define SFQ_EMPTY_SLOT 255
1da177e4 79#define SFQ_HASH_DIVISOR 1024
eeaeb068
ED
80/* We use 16 bits to store allot, and want to handle packets up to 64K
81 * Scale allot by 8 (1<<3) so that no overflow occurs.
82 */
83#define SFQ_ALLOT_SHIFT 3
84#define SFQ_ALLOT_SIZE(X) DIV_ROUND_UP(X, 1 << SFQ_ALLOT_SHIFT)
1da177e4 85
eda83e3b 86/* This type should contain at least SFQ_DEPTH + SFQ_SLOTS values */
1da177e4
LT
87typedef unsigned char sfq_index;
88
eda83e3b
ED
89/*
90 * We dont use pointers to save space.
91 * Small indexes [0 ... SFQ_SLOTS - 1] are 'pointers' to slots[] array
92 * while following values [SFQ_SLOTS ... SFQ_SLOTS + SFQ_DEPTH - 1]
93 * are 'pointers' to dep[] array
94 */
cc7ec456 95struct sfq_head {
1da177e4
LT
96 sfq_index next;
97 sfq_index prev;
98};
99
eda83e3b
ED
100struct sfq_slot {
101 struct sk_buff *skblist_next;
102 struct sk_buff *skblist_prev;
103 sfq_index qlen; /* number of skbs in skblist */
104 sfq_index next; /* next slot in sfq chain */
105 struct sfq_head dep; /* anchor in dep[] chains */
106 unsigned short hash; /* hash value (index in ht[]) */
107 short allot; /* credit for this slot */
108};
109
cc7ec456 110struct sfq_sched_data {
1da177e4
LT
111/* Parameters */
112 int perturb_period;
cc7ec456 113 unsigned int quantum; /* Allotment per round: MUST BE >= MTU */
1da177e4
LT
114 int limit;
115
116/* Variables */
7d2681a6 117 struct tcf_proto *filter_list;
1da177e4 118 struct timer_list perturb_timer;
32740ddc 119 u32 perturbation;
eda83e3b 120 sfq_index cur_depth; /* depth of longest slot */
eeaeb068 121 unsigned short scaled_quantum; /* SFQ_ALLOT_SIZE(quantum) */
eda83e3b 122 struct sfq_slot *tail; /* current slot in round */
1da177e4 123 sfq_index ht[SFQ_HASH_DIVISOR]; /* Hash table */
eda83e3b
ED
124 struct sfq_slot slots[SFQ_SLOTS];
125 struct sfq_head dep[SFQ_DEPTH]; /* Linked list of slots, indexed by depth */
1da177e4
LT
126};
127
eda83e3b
ED
128/*
129 * sfq_head are either in a sfq_slot or in dep[] array
130 */
131static inline struct sfq_head *sfq_dep_head(struct sfq_sched_data *q, sfq_index val)
132{
133 if (val < SFQ_SLOTS)
134 return &q->slots[val].dep;
135 return &q->dep[val - SFQ_SLOTS];
136}
137
cc7ec456 138static unsigned int sfq_fold_hash(struct sfq_sched_data *q, u32 h, u32 h1)
1da177e4 139{
32740ddc 140 return jhash_2words(h, h1, q->perturbation) & (SFQ_HASH_DIVISOR - 1);
1da177e4
LT
141}
142
cc7ec456 143static unsigned int sfq_hash(struct sfq_sched_data *q, struct sk_buff *skb)
1da177e4
LT
144{
145 u32 h, h2;
146
147 switch (skb->protocol) {
60678040 148 case htons(ETH_P_IP):
1da177e4 149 {
f2f00981 150 const struct iphdr *iph;
b9959c2e 151 int poff;
f2f00981
CG
152
153 if (!pskb_network_may_pull(skb, sizeof(*iph)))
154 goto err;
155 iph = ip_hdr(skb);
0eae88f3
ED
156 h = (__force u32)iph->daddr;
157 h2 = (__force u32)iph->saddr ^ iph->protocol;
cc7ec456 158 if (iph->frag_off & htons(IP_MF | IP_OFFSET))
b9959c2e
CG
159 break;
160 poff = proto_ports_offset(iph->protocol);
161 if (poff >= 0 &&
162 pskb_network_may_pull(skb, iph->ihl * 4 + 4 + poff)) {
163 iph = ip_hdr(skb);
cc7ec456 164 h2 ^= *(u32 *)((void *)iph + iph->ihl * 4 + poff);
b9959c2e 165 }
1da177e4
LT
166 break;
167 }
60678040 168 case htons(ETH_P_IPV6):
1da177e4 169 {
f2f00981 170 struct ipv6hdr *iph;
b9959c2e 171 int poff;
f2f00981
CG
172
173 if (!pskb_network_may_pull(skb, sizeof(*iph)))
174 goto err;
175 iph = ipv6_hdr(skb);
0eae88f3
ED
176 h = (__force u32)iph->daddr.s6_addr32[3];
177 h2 = (__force u32)iph->saddr.s6_addr32[3] ^ iph->nexthdr;
b9959c2e
CG
178 poff = proto_ports_offset(iph->nexthdr);
179 if (poff >= 0 &&
180 pskb_network_may_pull(skb, sizeof(*iph) + 4 + poff)) {
181 iph = ipv6_hdr(skb);
cc7ec456 182 h2 ^= *(u32 *)((void *)iph + sizeof(*iph) + poff);
b9959c2e 183 }
1da177e4
LT
184 break;
185 }
186 default:
f2f00981 187err:
0eae88f3 188 h = (unsigned long)skb_dst(skb) ^ (__force u32)skb->protocol;
6f9e98f7 189 h2 = (unsigned long)skb->sk;
1da177e4 190 }
6f9e98f7 191
1da177e4
LT
192 return sfq_fold_hash(q, h, h2);
193}
194
7d2681a6
PM
195static unsigned int sfq_classify(struct sk_buff *skb, struct Qdisc *sch,
196 int *qerr)
197{
198 struct sfq_sched_data *q = qdisc_priv(sch);
199 struct tcf_result res;
200 int result;
201
202 if (TC_H_MAJ(skb->priority) == sch->handle &&
203 TC_H_MIN(skb->priority) > 0 &&
204 TC_H_MIN(skb->priority) <= SFQ_HASH_DIVISOR)
205 return TC_H_MIN(skb->priority);
206
207 if (!q->filter_list)
208 return sfq_hash(q, skb) + 1;
209
c27f339a 210 *qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
7d2681a6
PM
211 result = tc_classify(skb, q->filter_list, &res);
212 if (result >= 0) {
213#ifdef CONFIG_NET_CLS_ACT
214 switch (result) {
215 case TC_ACT_STOLEN:
216 case TC_ACT_QUEUED:
378a2f09 217 *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
7d2681a6
PM
218 case TC_ACT_SHOT:
219 return 0;
220 }
221#endif
222 if (TC_H_MIN(res.classid) <= SFQ_HASH_DIVISOR)
223 return TC_H_MIN(res.classid);
224 }
225 return 0;
226}
227
eda83e3b
ED
228/*
229 * x : slot number [0 .. SFQ_SLOTS - 1]
230 */
1da177e4
LT
231static inline void sfq_link(struct sfq_sched_data *q, sfq_index x)
232{
233 sfq_index p, n;
eda83e3b
ED
234 int qlen = q->slots[x].qlen;
235
236 p = qlen + SFQ_SLOTS;
237 n = q->dep[qlen].next;
1da177e4 238
eda83e3b
ED
239 q->slots[x].dep.next = n;
240 q->slots[x].dep.prev = p;
241
242 q->dep[qlen].next = x; /* sfq_dep_head(q, p)->next = x */
243 sfq_dep_head(q, n)->prev = x;
1da177e4
LT
244}
245
eda83e3b
ED
246#define sfq_unlink(q, x, n, p) \
247 n = q->slots[x].dep.next; \
248 p = q->slots[x].dep.prev; \
249 sfq_dep_head(q, p)->next = n; \
250 sfq_dep_head(q, n)->prev = p
251
252
1da177e4
LT
253static inline void sfq_dec(struct sfq_sched_data *q, sfq_index x)
254{
255 sfq_index p, n;
eda83e3b 256 int d;
1da177e4 257
eda83e3b 258 sfq_unlink(q, x, n, p);
1da177e4 259
eda83e3b
ED
260 d = q->slots[x].qlen--;
261 if (n == p && q->cur_depth == d)
262 q->cur_depth--;
1da177e4
LT
263 sfq_link(q, x);
264}
265
266static inline void sfq_inc(struct sfq_sched_data *q, sfq_index x)
267{
268 sfq_index p, n;
269 int d;
270
eda83e3b 271 sfq_unlink(q, x, n, p);
1da177e4 272
eda83e3b
ED
273 d = ++q->slots[x].qlen;
274 if (q->cur_depth < d)
275 q->cur_depth = d;
1da177e4
LT
276 sfq_link(q, x);
277}
278
eda83e3b
ED
279/* helper functions : might be changed when/if skb use a standard list_head */
280
281/* remove one skb from tail of slot queue */
282static inline struct sk_buff *slot_dequeue_tail(struct sfq_slot *slot)
283{
284 struct sk_buff *skb = slot->skblist_prev;
285
286 slot->skblist_prev = skb->prev;
ee09b3c1 287 skb->prev->next = (struct sk_buff *)slot;
eda83e3b
ED
288 skb->next = skb->prev = NULL;
289 return skb;
290}
291
292/* remove one skb from head of slot queue */
293static inline struct sk_buff *slot_dequeue_head(struct sfq_slot *slot)
294{
295 struct sk_buff *skb = slot->skblist_next;
296
297 slot->skblist_next = skb->next;
18c8d82a 298 skb->next->prev = (struct sk_buff *)slot;
eda83e3b
ED
299 skb->next = skb->prev = NULL;
300 return skb;
301}
302
303static inline void slot_queue_init(struct sfq_slot *slot)
304{
305 slot->skblist_prev = slot->skblist_next = (struct sk_buff *)slot;
306}
307
308/* add skb to slot queue (tail add) */
309static inline void slot_queue_add(struct sfq_slot *slot, struct sk_buff *skb)
310{
311 skb->prev = slot->skblist_prev;
312 skb->next = (struct sk_buff *)slot;
313 slot->skblist_prev->next = skb;
314 slot->skblist_prev = skb;
315}
316
317#define slot_queue_walk(slot, skb) \
318 for (skb = slot->skblist_next; \
319 skb != (struct sk_buff *)slot; \
320 skb = skb->next)
321
1da177e4
LT
322static unsigned int sfq_drop(struct Qdisc *sch)
323{
324 struct sfq_sched_data *q = qdisc_priv(sch);
eda83e3b 325 sfq_index x, d = q->cur_depth;
1da177e4
LT
326 struct sk_buff *skb;
327 unsigned int len;
eda83e3b 328 struct sfq_slot *slot;
1da177e4 329
eda83e3b 330 /* Queue is full! Find the longest slot and drop tail packet from it */
1da177e4 331 if (d > 1) {
eda83e3b
ED
332 x = q->dep[d].next;
333 slot = &q->slots[x];
334drop:
335 skb = slot_dequeue_tail(slot);
0abf77e5 336 len = qdisc_pkt_len(skb);
1da177e4 337 sfq_dec(q, x);
eda83e3b 338 kfree_skb(skb);
1da177e4
LT
339 sch->q.qlen--;
340 sch->qstats.drops++;
f5539eb8 341 sch->qstats.backlog -= len;
1da177e4
LT
342 return len;
343 }
344
345 if (d == 1) {
346 /* It is difficult to believe, but ALL THE SLOTS HAVE LENGTH 1. */
eda83e3b
ED
347 x = q->tail->next;
348 slot = &q->slots[x];
349 q->tail->next = slot->next;
350 q->ht[slot->hash] = SFQ_EMPTY_SLOT;
351 goto drop;
1da177e4
LT
352 }
353
354 return 0;
355}
356
357static int
6f9e98f7 358sfq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
1da177e4
LT
359{
360 struct sfq_sched_data *q = qdisc_priv(sch);
7d2681a6 361 unsigned int hash;
1da177e4 362 sfq_index x;
eda83e3b 363 struct sfq_slot *slot;
7f3ff4f6 364 int uninitialized_var(ret);
7d2681a6
PM
365
366 hash = sfq_classify(skb, sch, &ret);
367 if (hash == 0) {
c27f339a 368 if (ret & __NET_XMIT_BYPASS)
7d2681a6
PM
369 sch->qstats.drops++;
370 kfree_skb(skb);
371 return ret;
372 }
373 hash--;
1da177e4
LT
374
375 x = q->ht[hash];
eda83e3b
ED
376 slot = &q->slots[x];
377 if (x == SFQ_EMPTY_SLOT) {
378 x = q->dep[0].next; /* get a free slot */
379 q->ht[hash] = x;
380 slot = &q->slots[x];
381 slot->hash = hash;
1da177e4 382 }
6f9e98f7 383
eda83e3b 384 /* If selected queue has length q->limit, do simple tail drop,
32740ddc
AK
385 * i.e. drop _this_ packet.
386 */
eda83e3b 387 if (slot->qlen >= q->limit)
32740ddc
AK
388 return qdisc_drop(skb, sch);
389
0abf77e5 390 sch->qstats.backlog += qdisc_pkt_len(skb);
eda83e3b 391 slot_queue_add(slot, skb);
1da177e4 392 sfq_inc(q, x);
eda83e3b
ED
393 if (slot->qlen == 1) { /* The flow is new */
394 if (q->tail == NULL) { /* It is the first flow */
395 slot->next = x;
1da177e4 396 } else {
eda83e3b
ED
397 slot->next = q->tail->next;
398 q->tail->next = x;
1da177e4 399 }
eda83e3b 400 q->tail = slot;
eeaeb068 401 slot->allot = q->scaled_quantum;
1da177e4 402 }
5588b40d 403 if (++sch->q.qlen <= q->limit) {
bfe0d029 404 qdisc_bstats_update(sch, skb);
9871e50e 405 return NET_XMIT_SUCCESS;
1da177e4
LT
406 }
407
408 sfq_drop(sch);
409 return NET_XMIT_CN;
410}
411
48a8f519
PM
412static struct sk_buff *
413sfq_peek(struct Qdisc *sch)
414{
415 struct sfq_sched_data *q = qdisc_priv(sch);
1da177e4 416
48a8f519 417 /* No active slots */
eda83e3b 418 if (q->tail == NULL)
48a8f519 419 return NULL;
1da177e4 420
eda83e3b 421 return q->slots[q->tail->next].skblist_next;
48a8f519 422}
1da177e4
LT
423
424static struct sk_buff *
6f9e98f7 425sfq_dequeue(struct Qdisc *sch)
1da177e4
LT
426{
427 struct sfq_sched_data *q = qdisc_priv(sch);
428 struct sk_buff *skb;
aa3e2199 429 sfq_index a, next_a;
eda83e3b 430 struct sfq_slot *slot;
1da177e4
LT
431
432 /* No active slots */
eda83e3b 433 if (q->tail == NULL)
1da177e4
LT
434 return NULL;
435
eeaeb068 436next_slot:
eda83e3b
ED
437 a = q->tail->next;
438 slot = &q->slots[a];
eeaeb068
ED
439 if (slot->allot <= 0) {
440 q->tail = slot;
441 slot->allot += q->scaled_quantum;
442 goto next_slot;
443 }
eda83e3b 444 skb = slot_dequeue_head(slot);
1da177e4
LT
445 sfq_dec(q, a);
446 sch->q.qlen--;
0abf77e5 447 sch->qstats.backlog -= qdisc_pkt_len(skb);
1da177e4
LT
448
449 /* Is the slot empty? */
eda83e3b
ED
450 if (slot->qlen == 0) {
451 q->ht[slot->hash] = SFQ_EMPTY_SLOT;
452 next_a = slot->next;
aa3e2199 453 if (a == next_a) {
eda83e3b 454 q->tail = NULL; /* no more active slots */
1da177e4
LT
455 return skb;
456 }
eda83e3b 457 q->tail->next = next_a;
eeaeb068
ED
458 } else {
459 slot->allot -= SFQ_ALLOT_SIZE(qdisc_pkt_len(skb));
1da177e4
LT
460 }
461 return skb;
462}
463
464static void
6f9e98f7 465sfq_reset(struct Qdisc *sch)
1da177e4
LT
466{
467 struct sk_buff *skb;
468
469 while ((skb = sfq_dequeue(sch)) != NULL)
470 kfree_skb(skb);
471}
472
473static void sfq_perturbation(unsigned long arg)
474{
6f9e98f7 475 struct Qdisc *sch = (struct Qdisc *)arg;
1da177e4
LT
476 struct sfq_sched_data *q = qdisc_priv(sch);
477
d46f8dd8 478 q->perturbation = net_random();
1da177e4 479
32740ddc
AK
480 if (q->perturb_period)
481 mod_timer(&q->perturb_timer, jiffies + q->perturb_period);
1da177e4
LT
482}
483
1e90474c 484static int sfq_change(struct Qdisc *sch, struct nlattr *opt)
1da177e4
LT
485{
486 struct sfq_sched_data *q = qdisc_priv(sch);
1e90474c 487 struct tc_sfq_qopt *ctl = nla_data(opt);
5e50da01 488 unsigned int qlen;
1da177e4 489
1e90474c 490 if (opt->nla_len < nla_attr_size(sizeof(*ctl)))
1da177e4
LT
491 return -EINVAL;
492
493 sch_tree_lock(sch);
5ce2d488 494 q->quantum = ctl->quantum ? : psched_mtu(qdisc_dev(sch));
eeaeb068 495 q->scaled_quantum = SFQ_ALLOT_SIZE(q->quantum);
6f9e98f7 496 q->perturb_period = ctl->perturb_period * HZ;
1da177e4 497 if (ctl->limit)
32740ddc 498 q->limit = min_t(u32, ctl->limit, SFQ_DEPTH - 1);
1da177e4 499
5e50da01 500 qlen = sch->q.qlen;
5588b40d 501 while (sch->q.qlen > q->limit)
1da177e4 502 sfq_drop(sch);
5e50da01 503 qdisc_tree_decrease_qlen(sch, qlen - sch->q.qlen);
1da177e4
LT
504
505 del_timer(&q->perturb_timer);
506 if (q->perturb_period) {
32740ddc 507 mod_timer(&q->perturb_timer, jiffies + q->perturb_period);
d46f8dd8 508 q->perturbation = net_random();
1da177e4
LT
509 }
510 sch_tree_unlock(sch);
511 return 0;
512}
513
1e90474c 514static int sfq_init(struct Qdisc *sch, struct nlattr *opt)
1da177e4
LT
515{
516 struct sfq_sched_data *q = qdisc_priv(sch);
517 int i;
518
d3e99483 519 q->perturb_timer.function = sfq_perturbation;
c19a28e1 520 q->perturb_timer.data = (unsigned long)sch;
d3e99483 521 init_timer_deferrable(&q->perturb_timer);
1da177e4 522
6f9e98f7 523 for (i = 0; i < SFQ_HASH_DIVISOR; i++)
eda83e3b 524 q->ht[i] = SFQ_EMPTY_SLOT;
6f9e98f7
SH
525
526 for (i = 0; i < SFQ_DEPTH; i++) {
eda83e3b
ED
527 q->dep[i].next = i + SFQ_SLOTS;
528 q->dep[i].prev = i + SFQ_SLOTS;
1da177e4 529 }
6f9e98f7 530
32740ddc 531 q->limit = SFQ_DEPTH - 1;
eda83e3b
ED
532 q->cur_depth = 0;
533 q->tail = NULL;
1da177e4 534 if (opt == NULL) {
5ce2d488 535 q->quantum = psched_mtu(qdisc_dev(sch));
eeaeb068 536 q->scaled_quantum = SFQ_ALLOT_SIZE(q->quantum);
1da177e4 537 q->perturb_period = 0;
d46f8dd8 538 q->perturbation = net_random();
1da177e4
LT
539 } else {
540 int err = sfq_change(sch, opt);
541 if (err)
542 return err;
543 }
6f9e98f7 544
18c8d82a
ED
545 for (i = 0; i < SFQ_SLOTS; i++) {
546 slot_queue_init(&q->slots[i]);
1da177e4 547 sfq_link(q, i);
18c8d82a 548 }
1da177e4
LT
549 return 0;
550}
551
552static void sfq_destroy(struct Qdisc *sch)
553{
554 struct sfq_sched_data *q = qdisc_priv(sch);
7d2681a6 555
ff31ab56 556 tcf_destroy_chain(&q->filter_list);
980c478d
JP
557 q->perturb_period = 0;
558 del_timer_sync(&q->perturb_timer);
1da177e4
LT
559}
560
561static int sfq_dump(struct Qdisc *sch, struct sk_buff *skb)
562{
563 struct sfq_sched_data *q = qdisc_priv(sch);
27a884dc 564 unsigned char *b = skb_tail_pointer(skb);
1da177e4
LT
565 struct tc_sfq_qopt opt;
566
567 opt.quantum = q->quantum;
6f9e98f7 568 opt.perturb_period = q->perturb_period / HZ;
1da177e4
LT
569
570 opt.limit = q->limit;
571 opt.divisor = SFQ_HASH_DIVISOR;
cdec7e50 572 opt.flows = q->limit;
1da177e4 573
1e90474c 574 NLA_PUT(skb, TCA_OPTIONS, sizeof(opt), &opt);
1da177e4
LT
575
576 return skb->len;
577
1e90474c 578nla_put_failure:
dc5fc579 579 nlmsg_trim(skb, b);
1da177e4
LT
580 return -1;
581}
582
41065fba
JP
583static struct Qdisc *sfq_leaf(struct Qdisc *sch, unsigned long arg)
584{
585 return NULL;
586}
587
7d2681a6
PM
588static unsigned long sfq_get(struct Qdisc *sch, u32 classid)
589{
590 return 0;
591}
592
eb4a5527
JP
593static unsigned long sfq_bind(struct Qdisc *sch, unsigned long parent,
594 u32 classid)
595{
596 return 0;
597}
598
da7115d9
JP
599static void sfq_put(struct Qdisc *q, unsigned long cl)
600{
601}
602
7d2681a6
PM
603static struct tcf_proto **sfq_find_tcf(struct Qdisc *sch, unsigned long cl)
604{
605 struct sfq_sched_data *q = qdisc_priv(sch);
606
607 if (cl)
608 return NULL;
609 return &q->filter_list;
610}
611
94de78d1
PM
612static int sfq_dump_class(struct Qdisc *sch, unsigned long cl,
613 struct sk_buff *skb, struct tcmsg *tcm)
614{
615 tcm->tcm_handle |= TC_H_MIN(cl);
616 return 0;
617}
618
619static int sfq_dump_class_stats(struct Qdisc *sch, unsigned long cl,
620 struct gnet_dump *d)
621{
622 struct sfq_sched_data *q = qdisc_priv(sch);
ee09b3c1
ED
623 sfq_index idx = q->ht[cl - 1];
624 struct gnet_stats_queue qs = { 0 };
625 struct tc_sfq_xstats xstats = { 0 };
c4266263
ED
626 struct sk_buff *skb;
627
ee09b3c1
ED
628 if (idx != SFQ_EMPTY_SLOT) {
629 const struct sfq_slot *slot = &q->slots[idx];
94de78d1 630
eeaeb068 631 xstats.allot = slot->allot << SFQ_ALLOT_SHIFT;
ee09b3c1
ED
632 qs.qlen = slot->qlen;
633 slot_queue_walk(slot, skb)
634 qs.backlog += qdisc_pkt_len(skb);
635 }
94de78d1
PM
636 if (gnet_stats_copy_queue(d, &qs) < 0)
637 return -1;
638 return gnet_stats_copy_app(d, &xstats, sizeof(xstats));
639}
640
7d2681a6
PM
641static void sfq_walk(struct Qdisc *sch, struct qdisc_walker *arg)
642{
94de78d1
PM
643 struct sfq_sched_data *q = qdisc_priv(sch);
644 unsigned int i;
645
646 if (arg->stop)
647 return;
648
649 for (i = 0; i < SFQ_HASH_DIVISOR; i++) {
eda83e3b 650 if (q->ht[i] == SFQ_EMPTY_SLOT ||
94de78d1
PM
651 arg->count < arg->skip) {
652 arg->count++;
653 continue;
654 }
655 if (arg->fn(sch, i + 1, arg) < 0) {
656 arg->stop = 1;
657 break;
658 }
659 arg->count++;
660 }
7d2681a6
PM
661}
662
663static const struct Qdisc_class_ops sfq_class_ops = {
41065fba 664 .leaf = sfq_leaf,
7d2681a6 665 .get = sfq_get,
da7115d9 666 .put = sfq_put,
7d2681a6 667 .tcf_chain = sfq_find_tcf,
eb4a5527 668 .bind_tcf = sfq_bind,
da7115d9 669 .unbind_tcf = sfq_put,
94de78d1
PM
670 .dump = sfq_dump_class,
671 .dump_stats = sfq_dump_class_stats,
7d2681a6
PM
672 .walk = sfq_walk,
673};
674
20fea08b 675static struct Qdisc_ops sfq_qdisc_ops __read_mostly = {
7d2681a6 676 .cl_ops = &sfq_class_ops,
1da177e4
LT
677 .id = "sfq",
678 .priv_size = sizeof(struct sfq_sched_data),
679 .enqueue = sfq_enqueue,
680 .dequeue = sfq_dequeue,
48a8f519 681 .peek = sfq_peek,
1da177e4
LT
682 .drop = sfq_drop,
683 .init = sfq_init,
684 .reset = sfq_reset,
685 .destroy = sfq_destroy,
686 .change = NULL,
687 .dump = sfq_dump,
688 .owner = THIS_MODULE,
689};
690
691static int __init sfq_module_init(void)
692{
693 return register_qdisc(&sfq_qdisc_ops);
694}
10297b99 695static void __exit sfq_module_exit(void)
1da177e4
LT
696{
697 unregister_qdisc(&sfq_qdisc_ops);
698}
699module_init(sfq_module_init)
700module_exit(sfq_module_exit)
701MODULE_LICENSE("GPL");
This page took 0.544761 seconds and 5 git commands to generate.