Merge remote-tracking branches 'asoc/topic/wm8753' and 'asoc/topic/wm8985' into asoc...
[deliverable/linux.git] / net / sched / sch_htb.c
CommitLineData
87990467 1/*
1da177e4
LT
2 * net/sched/sch_htb.c Hierarchical token bucket, feed tree version
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 *
9 * Authors: Martin Devera, <devik@cdi.cz>
10 *
11 * Credits (in time order) for older HTB versions:
12 * Stef Coene <stef.coene@docum.org>
13 * HTB support at LARTC mailing list
10297b99 14 * Ondrej Kraus, <krauso@barr.cz>
1da177e4
LT
15 * found missing INIT_QDISC(htb)
16 * Vladimir Smelhaus, Aamer Akhter, Bert Hubert
17 * helped a lot to locate nasty class stall bug
18 * Andi Kleen, Jamal Hadi, Bert Hubert
19 * code review and helpful comments on shaping
20 * Tomasz Wrona, <tw@eter.tym.pl>
21 * created test case so that I was able to fix nasty bug
22 * Wilfried Weissmann
23 * spotted bug in dequeue code and helped with fix
24 * Jiri Fojtasek
25 * fixed requeue routine
26 * and many others. thanks.
1da177e4 27 */
1da177e4 28#include <linux/module.h>
47083fc0 29#include <linux/moduleparam.h>
1da177e4
LT
30#include <linux/types.h>
31#include <linux/kernel.h>
1da177e4 32#include <linux/string.h>
1da177e4 33#include <linux/errno.h>
1da177e4
LT
34#include <linux/skbuff.h>
35#include <linux/list.h>
36#include <linux/compiler.h>
0ba48053 37#include <linux/rbtree.h>
1224736d 38#include <linux/workqueue.h>
5a0e3ad6 39#include <linux/slab.h>
dc5fc579 40#include <net/netlink.h>
292f1c7f 41#include <net/sch_generic.h>
1da177e4 42#include <net/pkt_sched.h>
1da177e4
LT
43
44/* HTB algorithm.
45 Author: devik@cdi.cz
46 ========================================================================
47 HTB is like TBF with multiple classes. It is also similar to CBQ because
10297b99 48 it allows to assign priority to each class in hierarchy.
1da177e4
LT
49 In fact it is another implementation of Floyd's formal sharing.
50
51 Levels:
10297b99 52 Each class is assigned level. Leaf has ALWAYS level 0 and root
1da177e4
LT
53 classes have level TC_HTB_MAXDEPTH-1. Interior nodes has level
54 one less than their parent.
55*/
56
47083fc0 57static int htb_hysteresis __read_mostly = 0; /* whether to use mode hysteresis for speedup */
87990467 58#define HTB_VER 0x30011 /* major must be matched with number suplied by TC as version */
1da177e4
LT
59
60#if HTB_VER >> 16 != TC_HTB_PROTOVER
61#error "Mismatched sch_htb.c and pkt_sch.h"
62#endif
63
47083fc0
JDB
64/* Module parameter and sysfs export */
65module_param (htb_hysteresis, int, 0640);
66MODULE_PARM_DESC(htb_hysteresis, "Hysteresis mode, less CPU load, less accurate");
67
64153ce0
ED
68static int htb_rate_est = 0; /* htb classes have a default rate estimator */
69module_param(htb_rate_est, int, 0640);
70MODULE_PARM_DESC(htb_rate_est, "setup a default rate estimator (4sec 16sec) for htb classes");
71
1da177e4
LT
72/* used internaly to keep status of single class */
73enum htb_cmode {
87990467
SH
74 HTB_CANT_SEND, /* class can't send and can't borrow */
75 HTB_MAY_BORROW, /* class can't send but may borrow */
76 HTB_CAN_SEND /* class can send */
1da177e4
LT
77};
78
c9364636
ED
79struct htb_prio {
80 union {
81 struct rb_root row;
82 struct rb_root feed;
83 };
84 struct rb_node *ptr;
85 /* When class changes from state 1->2 and disconnects from
86 * parent's feed then we lost ptr value and start from the
87 * first child again. Here we store classid of the
88 * last valid ptr (used when ptr is NULL).
89 */
90 u32 last_ptr_id;
91};
92
ca4ec90b
ED
93/* interior & leaf nodes; props specific to leaves are marked L:
94 * To reduce false sharing, place mostly read fields at beginning,
95 * and mostly written ones at the end.
96 */
87990467 97struct htb_class {
f4c1f3e0 98 struct Qdisc_class_common common;
ca4ec90b
ED
99 struct psched_ratecfg rate;
100 struct psched_ratecfg ceil;
101 s64 buffer, cbuffer;/* token bucket depth/rate */
102 s64 mbuffer; /* max wait time */
cbd37556 103 u32 prio; /* these two are used only by leaves... */
ca4ec90b
ED
104 int quantum; /* but stored for parent-to-leaf return */
105
25d8c0d5 106 struct tcf_proto __rcu *filter_list; /* class attached filters */
ca4ec90b
ED
107 int filter_cnt;
108 int refcnt; /* usage count of this class */
109
110 int level; /* our level (see above) */
111 unsigned int children;
112 struct htb_class *parent; /* parent class */
113
45203a3b 114 struct gnet_stats_rate_est64 rate_est;
1da177e4 115
ca4ec90b
ED
116 /*
117 * Written often fields
118 */
119 struct gnet_stats_basic_packed bstats;
120 struct gnet_stats_queue qstats;
121 struct tc_htb_xstats xstats; /* our special stats */
87990467 122
ca4ec90b
ED
123 /* token bucket parameters */
124 s64 tokens, ctokens;/* current number of tokens */
125 s64 t_c; /* checkpoint time */
c19f7a34 126
87990467
SH
127 union {
128 struct htb_class_leaf {
87990467 129 struct list_head drop_list;
c9364636
ED
130 int deficit[TC_HTB_MAXDEPTH];
131 struct Qdisc *q;
87990467
SH
132 } leaf;
133 struct htb_class_inner {
c9364636 134 struct htb_prio clprio[TC_HTB_NUMPRIO];
87990467
SH
135 } inner;
136 } un;
ca4ec90b 137 s64 pq_key;
87990467 138
ca4ec90b
ED
139 int prio_activity; /* for which prios are we active */
140 enum htb_cmode cmode; /* current mode of the class */
141 struct rb_node pq_node; /* node for event queue */
142 struct rb_node node[TC_HTB_NUMPRIO]; /* node for self or feed tree */
1da177e4
LT
143};
144
c9364636
ED
145struct htb_level {
146 struct rb_root wait_pq;
147 struct htb_prio hprio[TC_HTB_NUMPRIO];
148};
149
87990467 150struct htb_sched {
f4c1f3e0 151 struct Qdisc_class_hash clhash;
c9364636
ED
152 int defcls; /* class where unclassified flows go to */
153 int rate2quantum; /* quant = rate / rate2quantum */
1da177e4 154
c9364636 155 /* filters for qdisc itself */
25d8c0d5 156 struct tcf_proto __rcu *filter_list;
1da177e4 157
c9364636
ED
158#define HTB_WARN_TOOMANYEVENTS 0x1
159 unsigned int warned; /* only one warning */
160 int direct_qlen;
161 struct work_struct work;
1da177e4 162
c9364636
ED
163 /* non shaped skbs; let them go directly thru */
164 struct sk_buff_head direct_queue;
165 long direct_pkts;
1da177e4 166
c9364636 167 struct qdisc_watchdog watchdog;
1da177e4 168
c9364636
ED
169 s64 now; /* cached dequeue time */
170 struct list_head drops[TC_HTB_NUMPRIO];/* active leaves (for drops) */
1da177e4 171
c9364636
ED
172 /* time of nearest event per level (row) */
173 s64 near_ev_cache[TC_HTB_MAXDEPTH];
87990467 174
c9364636 175 int row_mask[TC_HTB_MAXDEPTH];
e82181de 176
c9364636 177 struct htb_level hlevel[TC_HTB_MAXDEPTH];
1da177e4
LT
178};
179
1da177e4 180/* find class in global hash table using given handle */
87990467 181static inline struct htb_class *htb_find(u32 handle, struct Qdisc *sch)
1da177e4
LT
182{
183 struct htb_sched *q = qdisc_priv(sch);
f4c1f3e0 184 struct Qdisc_class_common *clc;
0cef296d 185
f4c1f3e0
PM
186 clc = qdisc_class_find(&q->clhash, handle);
187 if (clc == NULL)
1da177e4 188 return NULL;
f4c1f3e0 189 return container_of(clc, struct htb_class, common);
1da177e4
LT
190}
191
192/**
193 * htb_classify - classify a packet into class
194 *
195 * It returns NULL if the packet should be dropped or -1 if the packet
196 * should be passed directly thru. In all other cases leaf class is returned.
197 * We allow direct class selection by classid in priority. The we examine
198 * filters in qdisc and in inner nodes (if higher filter points to the inner
199 * node). If we end up with classid MAJOR:0 we enqueue the skb into special
10297b99 200 * internal fifo (direct). These packets then go directly thru. If we still
25985edc 201 * have no valid leaf we try to use MAJOR:default leaf. It still unsuccessful
1da177e4
LT
202 * then finish and return direct queue.
203 */
cc7ec456 204#define HTB_DIRECT ((struct htb_class *)-1L)
1da177e4 205
87990467
SH
206static struct htb_class *htb_classify(struct sk_buff *skb, struct Qdisc *sch,
207 int *qerr)
1da177e4
LT
208{
209 struct htb_sched *q = qdisc_priv(sch);
210 struct htb_class *cl;
211 struct tcf_result res;
212 struct tcf_proto *tcf;
213 int result;
214
215 /* allow to select class by setting skb->priority to valid classid;
cc7ec456
ED
216 * note that nfmark can be used too by attaching filter fw with no
217 * rules in it
218 */
1da177e4 219 if (skb->priority == sch->handle)
87990467 220 return HTB_DIRECT; /* X:0 (direct flow) selected */
cc7ec456 221 cl = htb_find(skb->priority, sch);
29824310
HM
222 if (cl) {
223 if (cl->level == 0)
224 return cl;
225 /* Start with inner filter chain if a non-leaf class is selected */
25d8c0d5 226 tcf = rcu_dereference_bh(cl->filter_list);
29824310 227 } else {
25d8c0d5 228 tcf = rcu_dereference_bh(q->filter_list);
29824310 229 }
1da177e4 230
c27f339a 231 *qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
3b3ae880 232 while (tcf && (result = tc_classify(skb, tcf, &res, false)) >= 0) {
1da177e4
LT
233#ifdef CONFIG_NET_CLS_ACT
234 switch (result) {
235 case TC_ACT_QUEUED:
87990467 236 case TC_ACT_STOLEN:
378a2f09 237 *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
1da177e4
LT
238 case TC_ACT_SHOT:
239 return NULL;
240 }
1da177e4 241#endif
cc7ec456
ED
242 cl = (void *)res.class;
243 if (!cl) {
1da177e4 244 if (res.classid == sch->handle)
87990467 245 return HTB_DIRECT; /* X:0 (direct flow) */
cc7ec456
ED
246 cl = htb_find(res.classid, sch);
247 if (!cl)
87990467 248 break; /* filter selected invalid classid */
1da177e4
LT
249 }
250 if (!cl->level)
87990467 251 return cl; /* we hit leaf; return it */
1da177e4
LT
252
253 /* we have got inner class; apply inner filter chain */
25d8c0d5 254 tcf = rcu_dereference_bh(cl->filter_list);
1da177e4
LT
255 }
256 /* classification failed; try to use default class */
87990467 257 cl = htb_find(TC_H_MAKE(TC_H_MAJ(sch->handle), q->defcls), sch);
1da177e4 258 if (!cl || cl->level)
87990467 259 return HTB_DIRECT; /* bad default .. this is safe bet */
1da177e4
LT
260 return cl;
261}
262
1da177e4
LT
263/**
264 * htb_add_to_id_tree - adds class to the round robin list
265 *
266 * Routine adds class to the list (actually tree) sorted by classid.
267 * Make sure that class is not already on such list for given prio.
268 */
87990467
SH
269static void htb_add_to_id_tree(struct rb_root *root,
270 struct htb_class *cl, int prio)
1da177e4
LT
271{
272 struct rb_node **p = &root->rb_node, *parent = NULL;
3bf72957 273
1da177e4 274 while (*p) {
87990467
SH
275 struct htb_class *c;
276 parent = *p;
1da177e4 277 c = rb_entry(parent, struct htb_class, node[prio]);
3bf72957 278
f4c1f3e0 279 if (cl->common.classid > c->common.classid)
1da177e4 280 p = &parent->rb_right;
87990467 281 else
1da177e4
LT
282 p = &parent->rb_left;
283 }
284 rb_link_node(&cl->node[prio], parent, p);
285 rb_insert_color(&cl->node[prio], root);
286}
287
288/**
289 * htb_add_to_wait_tree - adds class to the event queue with delay
290 *
291 * The class is added to priority event queue to indicate that class will
292 * change its mode in cl->pq_key microseconds. Make sure that class is not
293 * already in the queue.
294 */
87990467 295static void htb_add_to_wait_tree(struct htb_sched *q,
56b765b7 296 struct htb_class *cl, s64 delay)
1da177e4 297{
c9364636 298 struct rb_node **p = &q->hlevel[cl->level].wait_pq.rb_node, *parent = NULL;
3bf72957 299
fb983d45
PM
300 cl->pq_key = q->now + delay;
301 if (cl->pq_key == q->now)
1da177e4
LT
302 cl->pq_key++;
303
304 /* update the nearest event cache */
fb983d45 305 if (q->near_ev_cache[cl->level] > cl->pq_key)
1da177e4 306 q->near_ev_cache[cl->level] = cl->pq_key;
87990467 307
1da177e4 308 while (*p) {
87990467
SH
309 struct htb_class *c;
310 parent = *p;
1da177e4 311 c = rb_entry(parent, struct htb_class, pq_node);
fb983d45 312 if (cl->pq_key >= c->pq_key)
1da177e4 313 p = &parent->rb_right;
87990467 314 else
1da177e4
LT
315 p = &parent->rb_left;
316 }
317 rb_link_node(&cl->pq_node, parent, p);
c9364636 318 rb_insert_color(&cl->pq_node, &q->hlevel[cl->level].wait_pq);
1da177e4
LT
319}
320
321/**
322 * htb_next_rb_node - finds next node in binary tree
323 *
324 * When we are past last key we return NULL.
325 * Average complexity is 2 steps per call.
326 */
3696f625 327static inline void htb_next_rb_node(struct rb_node **n)
1da177e4
LT
328{
329 *n = rb_next(*n);
330}
331
332/**
333 * htb_add_class_to_row - add class to its row
334 *
335 * The class is added to row at priorities marked in mask.
336 * It does nothing if mask == 0.
337 */
87990467
SH
338static inline void htb_add_class_to_row(struct htb_sched *q,
339 struct htb_class *cl, int mask)
1da177e4 340{
1da177e4
LT
341 q->row_mask[cl->level] |= mask;
342 while (mask) {
343 int prio = ffz(~mask);
344 mask &= ~(1 << prio);
c9364636 345 htb_add_to_id_tree(&q->hlevel[cl->level].hprio[prio].row, cl, prio);
1da177e4
LT
346 }
347}
348
3696f625
SH
349/* If this triggers, it is a bug in this code, but it need not be fatal */
350static void htb_safe_rb_erase(struct rb_node *rb, struct rb_root *root)
351{
81771b3b 352 if (RB_EMPTY_NODE(rb)) {
3696f625
SH
353 WARN_ON(1);
354 } else {
355 rb_erase(rb, root);
356 RB_CLEAR_NODE(rb);
357 }
358}
359
360
1da177e4
LT
361/**
362 * htb_remove_class_from_row - removes class from its row
363 *
364 * The class is removed from row at priorities marked in mask.
365 * It does nothing if mask == 0.
366 */
87990467
SH
367static inline void htb_remove_class_from_row(struct htb_sched *q,
368 struct htb_class *cl, int mask)
1da177e4
LT
369{
370 int m = 0;
c9364636 371 struct htb_level *hlevel = &q->hlevel[cl->level];
3bf72957 372
1da177e4
LT
373 while (mask) {
374 int prio = ffz(~mask);
c9364636 375 struct htb_prio *hprio = &hlevel->hprio[prio];
3696f625 376
1da177e4 377 mask &= ~(1 << prio);
c9364636
ED
378 if (hprio->ptr == cl->node + prio)
379 htb_next_rb_node(&hprio->ptr);
3696f625 380
c9364636
ED
381 htb_safe_rb_erase(cl->node + prio, &hprio->row);
382 if (!hprio->row.rb_node)
1da177e4
LT
383 m |= 1 << prio;
384 }
1da177e4
LT
385 q->row_mask[cl->level] &= ~m;
386}
387
388/**
389 * htb_activate_prios - creates active classe's feed chain
390 *
391 * The class is connected to ancestors and/or appropriate rows
10297b99 392 * for priorities it is participating on. cl->cmode must be new
1da177e4
LT
393 * (activated) mode. It does nothing if cl->prio_activity == 0.
394 */
87990467 395static void htb_activate_prios(struct htb_sched *q, struct htb_class *cl)
1da177e4
LT
396{
397 struct htb_class *p = cl->parent;
87990467 398 long m, mask = cl->prio_activity;
1da177e4
LT
399
400 while (cl->cmode == HTB_MAY_BORROW && p && mask) {
87990467
SH
401 m = mask;
402 while (m) {
1da177e4
LT
403 int prio = ffz(~m);
404 m &= ~(1 << prio);
87990467 405
c9364636 406 if (p->un.inner.clprio[prio].feed.rb_node)
1da177e4 407 /* parent already has its feed in use so that
cc7ec456
ED
408 * reset bit in mask as parent is already ok
409 */
1da177e4 410 mask &= ~(1 << prio);
87990467 411
c9364636 412 htb_add_to_id_tree(&p->un.inner.clprio[prio].feed, cl, prio);
1da177e4 413 }
1da177e4 414 p->prio_activity |= mask;
87990467
SH
415 cl = p;
416 p = cl->parent;
3bf72957 417
1da177e4
LT
418 }
419 if (cl->cmode == HTB_CAN_SEND && mask)
87990467 420 htb_add_class_to_row(q, cl, mask);
1da177e4
LT
421}
422
423/**
424 * htb_deactivate_prios - remove class from feed chain
425 *
10297b99 426 * cl->cmode must represent old mode (before deactivation). It does
1da177e4
LT
427 * nothing if cl->prio_activity == 0. Class is removed from all feed
428 * chains and rows.
429 */
430static void htb_deactivate_prios(struct htb_sched *q, struct htb_class *cl)
431{
432 struct htb_class *p = cl->parent;
87990467 433 long m, mask = cl->prio_activity;
1da177e4
LT
434
435 while (cl->cmode == HTB_MAY_BORROW && p && mask) {
87990467
SH
436 m = mask;
437 mask = 0;
1da177e4
LT
438 while (m) {
439 int prio = ffz(~m);
440 m &= ~(1 << prio);
87990467 441
c9364636 442 if (p->un.inner.clprio[prio].ptr == cl->node + prio) {
1da177e4 443 /* we are removing child which is pointed to from
cc7ec456
ED
444 * parent feed - forget the pointer but remember
445 * classid
446 */
c9364636
ED
447 p->un.inner.clprio[prio].last_ptr_id = cl->common.classid;
448 p->un.inner.clprio[prio].ptr = NULL;
1da177e4 449 }
87990467 450
c9364636
ED
451 htb_safe_rb_erase(cl->node + prio,
452 &p->un.inner.clprio[prio].feed);
87990467 453
c9364636 454 if (!p->un.inner.clprio[prio].feed.rb_node)
1da177e4
LT
455 mask |= 1 << prio;
456 }
3bf72957 457
1da177e4 458 p->prio_activity &= ~mask;
87990467
SH
459 cl = p;
460 p = cl->parent;
3bf72957 461
1da177e4 462 }
87990467
SH
463 if (cl->cmode == HTB_CAN_SEND && mask)
464 htb_remove_class_from_row(q, cl, mask);
1da177e4
LT
465}
466
56b765b7 467static inline s64 htb_lowater(const struct htb_class *cl)
18a63e86 468{
47083fc0
JDB
469 if (htb_hysteresis)
470 return cl->cmode != HTB_CANT_SEND ? -cl->cbuffer : 0;
471 else
472 return 0;
18a63e86 473}
56b765b7 474static inline s64 htb_hiwater(const struct htb_class *cl)
18a63e86 475{
47083fc0
JDB
476 if (htb_hysteresis)
477 return cl->cmode == HTB_CAN_SEND ? -cl->buffer : 0;
478 else
479 return 0;
18a63e86 480}
47083fc0 481
18a63e86 482
1da177e4
LT
483/**
484 * htb_class_mode - computes and returns current class mode
485 *
486 * It computes cl's mode at time cl->t_c+diff and returns it. If mode
487 * is not HTB_CAN_SEND then cl->pq_key is updated to time difference
10297b99 488 * from now to time when cl will change its state.
1da177e4 489 * Also it is worth to note that class mode doesn't change simply
10297b99 490 * at cl->{c,}tokens == 0 but there can rather be hysteresis of
1da177e4
LT
491 * 0 .. -cl->{c,}buffer range. It is meant to limit number of
492 * mode transitions per time unit. The speed gain is about 1/6.
493 */
87990467 494static inline enum htb_cmode
56b765b7 495htb_class_mode(struct htb_class *cl, s64 *diff)
1da177e4 496{
56b765b7 497 s64 toks;
1da177e4 498
87990467
SH
499 if ((toks = (cl->ctokens + *diff)) < htb_lowater(cl)) {
500 *diff = -toks;
501 return HTB_CANT_SEND;
502 }
18a63e86 503
87990467
SH
504 if ((toks = (cl->tokens + *diff)) >= htb_hiwater(cl))
505 return HTB_CAN_SEND;
1da177e4 506
87990467
SH
507 *diff = -toks;
508 return HTB_MAY_BORROW;
1da177e4
LT
509}
510
511/**
512 * htb_change_class_mode - changes classe's mode
513 *
514 * This should be the only way how to change classe's mode under normal
515 * cirsumstances. Routine will update feed lists linkage, change mode
516 * and add class to the wait event queue if appropriate. New mode should
517 * be different from old one and cl->pq_key has to be valid if changing
518 * to mode other than HTB_CAN_SEND (see htb_add_to_wait_tree).
519 */
87990467 520static void
56b765b7 521htb_change_class_mode(struct htb_sched *q, struct htb_class *cl, s64 *diff)
87990467
SH
522{
523 enum htb_cmode new_mode = htb_class_mode(cl, diff);
1da177e4
LT
524
525 if (new_mode == cl->cmode)
87990467
SH
526 return;
527
528 if (cl->prio_activity) { /* not necessary: speed optimization */
529 if (cl->cmode != HTB_CANT_SEND)
530 htb_deactivate_prios(q, cl);
1da177e4 531 cl->cmode = new_mode;
87990467
SH
532 if (new_mode != HTB_CANT_SEND)
533 htb_activate_prios(q, cl);
534 } else
1da177e4
LT
535 cl->cmode = new_mode;
536}
537
538/**
10297b99 539 * htb_activate - inserts leaf cl into appropriate active feeds
1da177e4
LT
540 *
541 * Routine learns (new) priority of leaf and activates feed chain
542 * for the prio. It can be called on already active leaf safely.
543 * It also adds leaf into droplist.
544 */
87990467 545static inline void htb_activate(struct htb_sched *q, struct htb_class *cl)
1da177e4 546{
547b792c 547 WARN_ON(cl->level || !cl->un.leaf.q || !cl->un.leaf.q->q.qlen);
3bf72957 548
1da177e4 549 if (!cl->prio_activity) {
c19f7a34 550 cl->prio_activity = 1 << cl->prio;
87990467
SH
551 htb_activate_prios(q, cl);
552 list_add_tail(&cl->un.leaf.drop_list,
c19f7a34 553 q->drops + cl->prio);
1da177e4
LT
554 }
555}
556
557/**
10297b99 558 * htb_deactivate - remove leaf cl from active feeds
1da177e4
LT
559 *
560 * Make sure that leaf is active. In the other words it can't be called
561 * with non-active leaf. It also removes class from the drop list.
562 */
87990467 563static inline void htb_deactivate(struct htb_sched *q, struct htb_class *cl)
1da177e4 564{
547b792c 565 WARN_ON(!cl->prio_activity);
3bf72957 566
87990467 567 htb_deactivate_prios(q, cl);
1da177e4
LT
568 cl->prio_activity = 0;
569 list_del_init(&cl->un.leaf.drop_list);
570}
571
572static int htb_enqueue(struct sk_buff *skb, struct Qdisc *sch)
573{
f30ab418 574 int uninitialized_var(ret);
87990467
SH
575 struct htb_sched *q = qdisc_priv(sch);
576 struct htb_class *cl = htb_classify(skb, sch, &ret);
577
578 if (cl == HTB_DIRECT) {
579 /* enqueue to helper queue */
580 if (q->direct_queue.qlen < q->direct_qlen) {
581 __skb_queue_tail(&q->direct_queue, skb);
582 q->direct_pkts++;
583 } else {
17045755 584 return qdisc_drop(skb, sch);
87990467 585 }
1da177e4 586#ifdef CONFIG_NET_CLS_ACT
87990467 587 } else if (!cl) {
c27f339a 588 if (ret & __NET_XMIT_BYPASS)
25331d6c 589 qdisc_qstats_drop(sch);
87990467
SH
590 kfree_skb(skb);
591 return ret;
1da177e4 592#endif
378a2f09
JP
593 } else if ((ret = qdisc_enqueue(skb, cl->un.leaf.q)) != NET_XMIT_SUCCESS) {
594 if (net_xmit_drop_count(ret)) {
25331d6c 595 qdisc_qstats_drop(sch);
378a2f09
JP
596 cl->qstats.drops++;
597 }
69747650 598 return ret;
87990467 599 } else {
87990467
SH
600 htb_activate(q, cl);
601 }
602
431e3a8e 603 qdisc_qstats_backlog_inc(sch, skb);
87990467 604 sch->q.qlen++;
87990467 605 return NET_XMIT_SUCCESS;
1da177e4
LT
606}
607
56b765b7 608static inline void htb_accnt_tokens(struct htb_class *cl, int bytes, s64 diff)
59e4220a 609{
56b765b7 610 s64 toks = diff + cl->tokens;
59e4220a
JP
611
612 if (toks > cl->buffer)
613 toks = cl->buffer;
292f1c7f 614 toks -= (s64) psched_l2t_ns(&cl->rate, bytes);
59e4220a
JP
615 if (toks <= -cl->mbuffer)
616 toks = 1 - cl->mbuffer;
617
618 cl->tokens = toks;
619}
620
56b765b7 621static inline void htb_accnt_ctokens(struct htb_class *cl, int bytes, s64 diff)
59e4220a 622{
56b765b7 623 s64 toks = diff + cl->ctokens;
59e4220a
JP
624
625 if (toks > cl->cbuffer)
626 toks = cl->cbuffer;
292f1c7f 627 toks -= (s64) psched_l2t_ns(&cl->ceil, bytes);
59e4220a
JP
628 if (toks <= -cl->mbuffer)
629 toks = 1 - cl->mbuffer;
630
631 cl->ctokens = toks;
632}
633
1da177e4
LT
634/**
635 * htb_charge_class - charges amount "bytes" to leaf and ancestors
636 *
637 * Routine assumes that packet "bytes" long was dequeued from leaf cl
638 * borrowing from "level". It accounts bytes to ceil leaky bucket for
639 * leaf and all ancestors and to rate bucket for ancestors at levels
640 * "level" and higher. It also handles possible change of mode resulting
641 * from the update. Note that mode can also increase here (MAY_BORROW to
642 * CAN_SEND) because we can use more precise clock that event queue here.
643 * In such case we remove class from event queue first.
644 */
87990467 645static void htb_charge_class(struct htb_sched *q, struct htb_class *cl,
c9726d68 646 int level, struct sk_buff *skb)
87990467 647{
0abf77e5 648 int bytes = qdisc_pkt_len(skb);
1da177e4 649 enum htb_cmode old_mode;
56b765b7 650 s64 diff;
1da177e4
LT
651
652 while (cl) {
56b765b7 653 diff = min_t(s64, q->now - cl->t_c, cl->mbuffer);
1da177e4 654 if (cl->level >= level) {
87990467
SH
655 if (cl->level == level)
656 cl->xstats.lends++;
59e4220a 657 htb_accnt_tokens(cl, bytes, diff);
1da177e4
LT
658 } else {
659 cl->xstats.borrows++;
87990467 660 cl->tokens += diff; /* we moved t_c; update tokens */
1da177e4 661 }
59e4220a 662 htb_accnt_ctokens(cl, bytes, diff);
1da177e4 663 cl->t_c = q->now;
1da177e4 664
87990467
SH
665 old_mode = cl->cmode;
666 diff = 0;
667 htb_change_class_mode(q, cl, &diff);
1da177e4
LT
668 if (old_mode != cl->cmode) {
669 if (old_mode != HTB_CAN_SEND)
c9364636 670 htb_safe_rb_erase(&cl->pq_node, &q->hlevel[cl->level].wait_pq);
1da177e4 671 if (cl->cmode != HTB_CAN_SEND)
87990467 672 htb_add_to_wait_tree(q, cl, diff);
1da177e4 673 }
1da177e4 674
bfe0d029
ED
675 /* update basic stats except for leaves which are already updated */
676 if (cl->level)
677 bstats_update(&cl->bstats, skb);
678
1da177e4
LT
679 cl = cl->parent;
680 }
681}
682
683/**
684 * htb_do_events - make mode changes to classes at the level
685 *
fb983d45 686 * Scans event queue for pending events and applies them. Returns time of
1224736d 687 * next pending event (0 for no event in pq, q->now for too many events).
fb983d45 688 * Note: Applied are events whose have cl->pq_key <= q->now.
1da177e4 689 */
c9364636 690static s64 htb_do_events(struct htb_sched *q, const int level,
5343a7f8 691 unsigned long start)
1da177e4 692{
8f3ea33a 693 /* don't run for longer than 2 jiffies; 2 is used instead of
cc7ec456
ED
694 * 1 to simplify things when jiffy is going to be incremented
695 * too soon
696 */
a73be040 697 unsigned long stop_at = start + 2;
c9364636
ED
698 struct rb_root *wait_pq = &q->hlevel[level].wait_pq;
699
8f3ea33a 700 while (time_before(jiffies, stop_at)) {
1da177e4 701 struct htb_class *cl;
56b765b7 702 s64 diff;
c9364636 703 struct rb_node *p = rb_first(wait_pq);
30bdbe39 704
87990467
SH
705 if (!p)
706 return 0;
1da177e4
LT
707
708 cl = rb_entry(p, struct htb_class, pq_node);
fb983d45
PM
709 if (cl->pq_key > q->now)
710 return cl->pq_key;
711
c9364636 712 htb_safe_rb_erase(p, wait_pq);
56b765b7 713 diff = min_t(s64, q->now - cl->t_c, cl->mbuffer);
87990467 714 htb_change_class_mode(q, cl, &diff);
1da177e4 715 if (cl->cmode != HTB_CAN_SEND)
87990467 716 htb_add_to_wait_tree(q, cl, diff);
1da177e4 717 }
1224736d
JP
718
719 /* too much load - let's continue after a break for scheduling */
e82181de 720 if (!(q->warned & HTB_WARN_TOOMANYEVENTS)) {
c17988a9 721 pr_warn("htb: too many events!\n");
e82181de
JP
722 q->warned |= HTB_WARN_TOOMANYEVENTS;
723 }
1224736d
JP
724
725 return q->now;
1da177e4
LT
726}
727
728/* Returns class->node+prio from id-tree where classe's id is >= id. NULL
cc7ec456
ED
729 * is no such one exists.
730 */
87990467
SH
731static struct rb_node *htb_id_find_next_upper(int prio, struct rb_node *n,
732 u32 id)
1da177e4
LT
733{
734 struct rb_node *r = NULL;
735 while (n) {
87990467
SH
736 struct htb_class *cl =
737 rb_entry(n, struct htb_class, node[prio]);
87990467 738
f4c1f3e0 739 if (id > cl->common.classid) {
1da177e4 740 n = n->rb_right;
1b5c0077 741 } else if (id < cl->common.classid) {
1da177e4
LT
742 r = n;
743 n = n->rb_left;
1b5c0077
JP
744 } else {
745 return n;
1da177e4
LT
746 }
747 }
748 return r;
749}
750
751/**
752 * htb_lookup_leaf - returns next leaf class in DRR order
753 *
754 * Find leaf where current feed pointers points to.
755 */
c9364636 756static struct htb_class *htb_lookup_leaf(struct htb_prio *hprio, const int prio)
1da177e4
LT
757{
758 int i;
759 struct {
760 struct rb_node *root;
761 struct rb_node **pptr;
762 u32 *pid;
87990467
SH
763 } stk[TC_HTB_MAXDEPTH], *sp = stk;
764
c9364636
ED
765 BUG_ON(!hprio->row.rb_node);
766 sp->root = hprio->row.rb_node;
767 sp->pptr = &hprio->ptr;
768 sp->pid = &hprio->last_ptr_id;
1da177e4
LT
769
770 for (i = 0; i < 65535; i++) {
87990467 771 if (!*sp->pptr && *sp->pid) {
10297b99 772 /* ptr was invalidated but id is valid - try to recover
cc7ec456
ED
773 * the original or next ptr
774 */
87990467
SH
775 *sp->pptr =
776 htb_id_find_next_upper(prio, sp->root, *sp->pid);
1da177e4 777 }
87990467 778 *sp->pid = 0; /* ptr is valid now so that remove this hint as it
cc7ec456
ED
779 * can become out of date quickly
780 */
87990467 781 if (!*sp->pptr) { /* we are at right end; rewind & go up */
1da177e4 782 *sp->pptr = sp->root;
87990467 783 while ((*sp->pptr)->rb_left)
1da177e4
LT
784 *sp->pptr = (*sp->pptr)->rb_left;
785 if (sp > stk) {
786 sp--;
512bb43e
JP
787 if (!*sp->pptr) {
788 WARN_ON(1);
87990467 789 return NULL;
512bb43e 790 }
87990467 791 htb_next_rb_node(sp->pptr);
1da177e4
LT
792 }
793 } else {
794 struct htb_class *cl;
c9364636
ED
795 struct htb_prio *clp;
796
87990467
SH
797 cl = rb_entry(*sp->pptr, struct htb_class, node[prio]);
798 if (!cl->level)
1da177e4 799 return cl;
c9364636
ED
800 clp = &cl->un.inner.clprio[prio];
801 (++sp)->root = clp->feed.rb_node;
802 sp->pptr = &clp->ptr;
803 sp->pid = &clp->last_ptr_id;
1da177e4
LT
804 }
805 }
547b792c 806 WARN_ON(1);
1da177e4
LT
807 return NULL;
808}
809
810/* dequeues packet at given priority and level; call only if
cc7ec456
ED
811 * you are sure that there is active class at prio/level
812 */
c9364636
ED
813static struct sk_buff *htb_dequeue_tree(struct htb_sched *q, const int prio,
814 const int level)
1da177e4
LT
815{
816 struct sk_buff *skb = NULL;
87990467 817 struct htb_class *cl, *start;
c9364636
ED
818 struct htb_level *hlevel = &q->hlevel[level];
819 struct htb_prio *hprio = &hlevel->hprio[prio];
820
1da177e4 821 /* look initial class up in the row */
c9364636 822 start = cl = htb_lookup_leaf(hprio, prio);
87990467 823
1da177e4
LT
824 do {
825next:
512bb43e 826 if (unlikely(!cl))
87990467 827 return NULL;
1da177e4
LT
828
829 /* class can be empty - it is unlikely but can be true if leaf
cc7ec456
ED
830 * qdisc drops packets in enqueue routine or if someone used
831 * graft operation on the leaf since last dequeue;
832 * simply deactivate and skip such class
833 */
1da177e4
LT
834 if (unlikely(cl->un.leaf.q->q.qlen == 0)) {
835 struct htb_class *next;
87990467 836 htb_deactivate(q, cl);
1da177e4
LT
837
838 /* row/level might become empty */
839 if ((q->row_mask[level] & (1 << prio)) == 0)
87990467 840 return NULL;
1da177e4 841
c9364636 842 next = htb_lookup_leaf(hprio, prio);
87990467
SH
843
844 if (cl == start) /* fix start if we just deleted it */
1da177e4
LT
845 start = next;
846 cl = next;
847 goto next;
848 }
87990467
SH
849
850 skb = cl->un.leaf.q->dequeue(cl->un.leaf.q);
851 if (likely(skb != NULL))
1da177e4 852 break;
633fe66e 853
b00355db 854 qdisc_warn_nonwc("htb", cl->un.leaf.q);
c9364636
ED
855 htb_next_rb_node(level ? &cl->parent->un.inner.clprio[prio].ptr:
856 &q->hlevel[0].hprio[prio].ptr);
857 cl = htb_lookup_leaf(hprio, prio);
1da177e4
LT
858
859 } while (cl != start);
860
861 if (likely(skb != NULL)) {
196d97f6 862 bstats_update(&cl->bstats, skb);
0abf77e5
JK
863 cl->un.leaf.deficit[level] -= qdisc_pkt_len(skb);
864 if (cl->un.leaf.deficit[level] < 0) {
c19f7a34 865 cl->un.leaf.deficit[level] += cl->quantum;
c9364636
ED
866 htb_next_rb_node(level ? &cl->parent->un.inner.clprio[prio].ptr :
867 &q->hlevel[0].hprio[prio].ptr);
1da177e4
LT
868 }
869 /* this used to be after charge_class but this constelation
cc7ec456
ED
870 * gives us slightly better performance
871 */
1da177e4 872 if (!cl->un.leaf.q->q.qlen)
87990467 873 htb_deactivate(q, cl);
c9726d68 874 htb_charge_class(q, cl, level, skb);
1da177e4
LT
875 }
876 return skb;
877}
878
1da177e4
LT
879static struct sk_buff *htb_dequeue(struct Qdisc *sch)
880{
9190b3b3 881 struct sk_buff *skb;
1da177e4
LT
882 struct htb_sched *q = qdisc_priv(sch);
883 int level;
5343a7f8 884 s64 next_event;
a73be040 885 unsigned long start_at;
1da177e4
LT
886
887 /* try to dequeue direct packets as high prio (!) to minimize cpu work */
87990467
SH
888 skb = __skb_dequeue(&q->direct_queue);
889 if (skb != NULL) {
9190b3b3
ED
890ok:
891 qdisc_bstats_update(sch, skb);
fd245a4a 892 qdisc_unthrottled(sch);
431e3a8e 893 qdisc_qstats_backlog_dec(sch, skb);
1da177e4
LT
894 sch->q.qlen--;
895 return skb;
896 }
897
87990467
SH
898 if (!sch->q.qlen)
899 goto fin;
d2de875c 900 q->now = ktime_get_ns();
a73be040 901 start_at = jiffies;
1da177e4 902
d2fe85da 903 next_event = q->now + 5LLU * NSEC_PER_SEC;
633fe66e 904
1da177e4
LT
905 for (level = 0; level < TC_HTB_MAXDEPTH; level++) {
906 /* common case optimization - skip event handler quickly */
907 int m;
c9364636 908 s64 event = q->near_ev_cache[level];
fb983d45 909
c9364636 910 if (q->now >= event) {
a73be040 911 event = htb_do_events(q, level, start_at);
2e4b3b0e 912 if (!event)
56b765b7 913 event = q->now + NSEC_PER_SEC;
2e4b3b0e 914 q->near_ev_cache[level] = event;
c9364636 915 }
fb983d45 916
c0851347 917 if (next_event > event)
fb983d45 918 next_event = event;
87990467 919
1da177e4
LT
920 m = ~q->row_mask[level];
921 while (m != (int)(-1)) {
87990467 922 int prio = ffz(m);
cc7ec456 923
1da177e4 924 m |= 1 << prio;
87990467 925 skb = htb_dequeue_tree(q, prio, level);
9190b3b3
ED
926 if (likely(skb != NULL))
927 goto ok;
1da177e4
LT
928 }
929 }
25331d6c 930 qdisc_qstats_overlimit(sch);
a9efad8b
ED
931 if (likely(next_event > q->now))
932 qdisc_watchdog_schedule_ns(&q->watchdog, next_event, true);
933 else
1224736d 934 schedule_work(&q->work);
1da177e4 935fin:
1da177e4
LT
936 return skb;
937}
938
939/* try to drop from each class (by prio) until one succeed */
87990467 940static unsigned int htb_drop(struct Qdisc *sch)
1da177e4
LT
941{
942 struct htb_sched *q = qdisc_priv(sch);
943 int prio;
944
945 for (prio = TC_HTB_NUMPRIO - 1; prio >= 0; prio--) {
946 struct list_head *p;
87990467 947 list_for_each(p, q->drops + prio) {
1da177e4
LT
948 struct htb_class *cl = list_entry(p, struct htb_class,
949 un.leaf.drop_list);
950 unsigned int len;
87990467
SH
951 if (cl->un.leaf.q->ops->drop &&
952 (len = cl->un.leaf.q->ops->drop(cl->un.leaf.q))) {
431e3a8e 953 sch->qstats.backlog -= len;
1da177e4
LT
954 sch->q.qlen--;
955 if (!cl->un.leaf.q->q.qlen)
87990467 956 htb_deactivate(q, cl);
1da177e4
LT
957 return len;
958 }
959 }
960 }
961 return 0;
962}
963
964/* reset all classes */
965/* always caled under BH & queue lock */
87990467 966static void htb_reset(struct Qdisc *sch)
1da177e4
LT
967{
968 struct htb_sched *q = qdisc_priv(sch);
f4c1f3e0 969 struct htb_class *cl;
f4c1f3e0 970 unsigned int i;
0cef296d 971
f4c1f3e0 972 for (i = 0; i < q->clhash.hashsize; i++) {
b67bfe0d 973 hlist_for_each_entry(cl, &q->clhash.hash[i], common.hnode) {
1da177e4 974 if (cl->level)
87990467 975 memset(&cl->un.inner, 0, sizeof(cl->un.inner));
1da177e4 976 else {
87990467 977 if (cl->un.leaf.q)
1da177e4
LT
978 qdisc_reset(cl->un.leaf.q);
979 INIT_LIST_HEAD(&cl->un.leaf.drop_list);
980 }
981 cl->prio_activity = 0;
982 cl->cmode = HTB_CAN_SEND;
1da177e4
LT
983 }
984 }
fb983d45 985 qdisc_watchdog_cancel(&q->watchdog);
1da177e4
LT
986 __skb_queue_purge(&q->direct_queue);
987 sch->q.qlen = 0;
431e3a8e 988 sch->qstats.backlog = 0;
c9364636 989 memset(q->hlevel, 0, sizeof(q->hlevel));
87990467 990 memset(q->row_mask, 0, sizeof(q->row_mask));
1da177e4 991 for (i = 0; i < TC_HTB_NUMPRIO; i++)
87990467 992 INIT_LIST_HEAD(q->drops + i);
1da177e4
LT
993}
994
27a3421e
PM
995static const struct nla_policy htb_policy[TCA_HTB_MAX + 1] = {
996 [TCA_HTB_PARMS] = { .len = sizeof(struct tc_htb_opt) },
997 [TCA_HTB_INIT] = { .len = sizeof(struct tc_htb_glob) },
998 [TCA_HTB_CTAB] = { .type = NLA_BINARY, .len = TC_RTAB_SIZE },
999 [TCA_HTB_RTAB] = { .type = NLA_BINARY, .len = TC_RTAB_SIZE },
6906f4ed 1000 [TCA_HTB_DIRECT_QLEN] = { .type = NLA_U32 },
df62cdf3
ED
1001 [TCA_HTB_RATE64] = { .type = NLA_U64 },
1002 [TCA_HTB_CEIL64] = { .type = NLA_U64 },
27a3421e
PM
1003};
1004
1224736d
JP
1005static void htb_work_func(struct work_struct *work)
1006{
1007 struct htb_sched *q = container_of(work, struct htb_sched, work);
1008 struct Qdisc *sch = q->watchdog.qdisc;
1009
0ee13627 1010 rcu_read_lock();
1224736d 1011 __netif_schedule(qdisc_root(sch));
0ee13627 1012 rcu_read_unlock();
1224736d
JP
1013}
1014
1e90474c 1015static int htb_init(struct Qdisc *sch, struct nlattr *opt)
1da177e4
LT
1016{
1017 struct htb_sched *q = qdisc_priv(sch);
6906f4ed 1018 struct nlattr *tb[TCA_HTB_MAX + 1];
1da177e4 1019 struct tc_htb_glob *gopt;
cee63723 1020 int err;
1da177e4 1021 int i;
cee63723
PM
1022
1023 if (!opt)
1024 return -EINVAL;
1025
6906f4ed 1026 err = nla_parse_nested(tb, TCA_HTB_MAX, opt, htb_policy);
cee63723
PM
1027 if (err < 0)
1028 return err;
1029
6906f4ed 1030 if (!tb[TCA_HTB_INIT])
1da177e4 1031 return -EINVAL;
6906f4ed 1032
1e90474c 1033 gopt = nla_data(tb[TCA_HTB_INIT]);
6906f4ed 1034 if (gopt->version != HTB_VER >> 16)
1da177e4 1035 return -EINVAL;
1da177e4 1036
f4c1f3e0
PM
1037 err = qdisc_class_hash_init(&q->clhash);
1038 if (err < 0)
1039 return err;
1da177e4 1040 for (i = 0; i < TC_HTB_NUMPRIO; i++)
87990467 1041 INIT_LIST_HEAD(q->drops + i);
1da177e4 1042
fb983d45 1043 qdisc_watchdog_init(&q->watchdog, sch);
1224736d 1044 INIT_WORK(&q->work, htb_work_func);
ab34f648 1045 __skb_queue_head_init(&q->direct_queue);
1da177e4 1046
6906f4ed
ED
1047 if (tb[TCA_HTB_DIRECT_QLEN])
1048 q->direct_qlen = nla_get_u32(tb[TCA_HTB_DIRECT_QLEN]);
348e3435 1049 else
6906f4ed 1050 q->direct_qlen = qdisc_dev(sch)->tx_queue_len;
348e3435 1051
1da177e4
LT
1052 if ((q->rate2quantum = gopt->rate2quantum) < 1)
1053 q->rate2quantum = 1;
1054 q->defcls = gopt->defcls;
1055
1056 return 0;
1057}
1058
1059static int htb_dump(struct Qdisc *sch, struct sk_buff *skb)
1060{
1061 struct htb_sched *q = qdisc_priv(sch);
4b3550ef 1062 struct nlattr *nest;
1da177e4 1063 struct tc_htb_glob gopt;
4b3550ef 1064
6f542efc
ED
1065 /* Its safe to not acquire qdisc lock. As we hold RTNL,
1066 * no change can happen on the qdisc parameters.
1067 */
1da177e4 1068
4b3550ef 1069 gopt.direct_pkts = q->direct_pkts;
1da177e4
LT
1070 gopt.version = HTB_VER;
1071 gopt.rate2quantum = q->rate2quantum;
1072 gopt.defcls = q->defcls;
3bf72957 1073 gopt.debug = 0;
4b3550ef
PM
1074
1075 nest = nla_nest_start(skb, TCA_OPTIONS);
1076 if (nest == NULL)
1077 goto nla_put_failure;
6906f4ed
ED
1078 if (nla_put(skb, TCA_HTB_INIT, sizeof(gopt), &gopt) ||
1079 nla_put_u32(skb, TCA_HTB_DIRECT_QLEN, q->direct_qlen))
1b34ec43 1080 goto nla_put_failure;
4b3550ef 1081
6f542efc 1082 return nla_nest_end(skb, nest);
4b3550ef 1083
1e90474c 1084nla_put_failure:
4b3550ef 1085 nla_nest_cancel(skb, nest);
1da177e4
LT
1086 return -1;
1087}
1088
1089static int htb_dump_class(struct Qdisc *sch, unsigned long arg,
87990467 1090 struct sk_buff *skb, struct tcmsg *tcm)
1da177e4 1091{
87990467 1092 struct htb_class *cl = (struct htb_class *)arg;
4b3550ef 1093 struct nlattr *nest;
1da177e4
LT
1094 struct tc_htb_opt opt;
1095
6f542efc
ED
1096 /* Its safe to not acquire qdisc lock. As we hold RTNL,
1097 * no change can happen on the class parameters.
1098 */
f4c1f3e0
PM
1099 tcm->tcm_parent = cl->parent ? cl->parent->common.classid : TC_H_ROOT;
1100 tcm->tcm_handle = cl->common.classid;
1da177e4
LT
1101 if (!cl->level && cl->un.leaf.q)
1102 tcm->tcm_info = cl->un.leaf.q->handle;
1103
4b3550ef
PM
1104 nest = nla_nest_start(skb, TCA_OPTIONS);
1105 if (nest == NULL)
1106 goto nla_put_failure;
1da177e4 1107
87990467 1108 memset(&opt, 0, sizeof(opt));
1da177e4 1109
01cb71d2 1110 psched_ratecfg_getrate(&opt.rate, &cl->rate);
9c10f411 1111 opt.buffer = PSCHED_NS2TICKS(cl->buffer);
01cb71d2 1112 psched_ratecfg_getrate(&opt.ceil, &cl->ceil);
9c10f411 1113 opt.cbuffer = PSCHED_NS2TICKS(cl->cbuffer);
c19f7a34
JP
1114 opt.quantum = cl->quantum;
1115 opt.prio = cl->prio;
87990467 1116 opt.level = cl->level;
1b34ec43
DM
1117 if (nla_put(skb, TCA_HTB_PARMS, sizeof(opt), &opt))
1118 goto nla_put_failure;
df62cdf3 1119 if ((cl->rate.rate_bytes_ps >= (1ULL << 32)) &&
2a51c1e8
ND
1120 nla_put_u64_64bit(skb, TCA_HTB_RATE64, cl->rate.rate_bytes_ps,
1121 TCA_HTB_PAD))
df62cdf3
ED
1122 goto nla_put_failure;
1123 if ((cl->ceil.rate_bytes_ps >= (1ULL << 32)) &&
2a51c1e8
ND
1124 nla_put_u64_64bit(skb, TCA_HTB_CEIL64, cl->ceil.rate_bytes_ps,
1125 TCA_HTB_PAD))
df62cdf3 1126 goto nla_put_failure;
4b3550ef 1127
6f542efc 1128 return nla_nest_end(skb, nest);
4b3550ef 1129
1e90474c 1130nla_put_failure:
4b3550ef 1131 nla_nest_cancel(skb, nest);
1da177e4
LT
1132 return -1;
1133}
1134
1135static int
87990467 1136htb_dump_class_stats(struct Qdisc *sch, unsigned long arg, struct gnet_dump *d)
1da177e4 1137{
87990467 1138 struct htb_class *cl = (struct htb_class *)arg;
64015853 1139 __u32 qlen = 0;
1da177e4 1140
1da177e4 1141 if (!cl->level && cl->un.leaf.q)
64015853 1142 qlen = cl->un.leaf.q->q.qlen;
0564bf0a
KK
1143 cl->xstats.tokens = clamp_t(s64, PSCHED_NS2TICKS(cl->tokens),
1144 INT_MIN, INT_MAX);
1145 cl->xstats.ctokens = clamp_t(s64, PSCHED_NS2TICKS(cl->ctokens),
1146 INT_MIN, INT_MAX);
1da177e4 1147
22e0f8b9 1148 if (gnet_stats_copy_basic(d, NULL, &cl->bstats) < 0 ||
d250a5f9 1149 gnet_stats_copy_rate_est(d, NULL, &cl->rate_est) < 0 ||
b0ab6f92 1150 gnet_stats_copy_queue(d, NULL, &cl->qstats, qlen) < 0)
1da177e4
LT
1151 return -1;
1152
1153 return gnet_stats_copy_app(d, &cl->xstats, sizeof(cl->xstats));
1154}
1155
1156static int htb_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
87990467 1157 struct Qdisc **old)
1da177e4 1158{
87990467 1159 struct htb_class *cl = (struct htb_class *)arg;
1da177e4 1160
5b9a9ccf
PM
1161 if (cl->level)
1162 return -EINVAL;
1163 if (new == NULL &&
3511c913 1164 (new = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops,
5b9a9ccf
PM
1165 cl->common.classid)) == NULL)
1166 return -ENOBUFS;
1167
86a7996c 1168 *old = qdisc_replace(sch, new, &cl->un.leaf.q);
5b9a9ccf 1169 return 0;
1da177e4
LT
1170}
1171
87990467 1172static struct Qdisc *htb_leaf(struct Qdisc *sch, unsigned long arg)
1da177e4 1173{
87990467 1174 struct htb_class *cl = (struct htb_class *)arg;
5b9a9ccf 1175 return !cl->level ? cl->un.leaf.q : NULL;
1da177e4
LT
1176}
1177
256d61b8
PM
1178static void htb_qlen_notify(struct Qdisc *sch, unsigned long arg)
1179{
1180 struct htb_class *cl = (struct htb_class *)arg;
1181
1182 if (cl->un.leaf.q->q.qlen == 0)
1183 htb_deactivate(qdisc_priv(sch), cl);
1184}
1185
1da177e4
LT
1186static unsigned long htb_get(struct Qdisc *sch, u32 classid)
1187{
87990467
SH
1188 struct htb_class *cl = htb_find(classid, sch);
1189 if (cl)
1da177e4
LT
1190 cl->refcnt++;
1191 return (unsigned long)cl;
1192}
1193
160d5e10
JP
1194static inline int htb_parent_last_child(struct htb_class *cl)
1195{
1196 if (!cl->parent)
1197 /* the root class */
1198 return 0;
42077599 1199 if (cl->parent->children > 1)
160d5e10
JP
1200 /* not the last child */
1201 return 0;
160d5e10
JP
1202 return 1;
1203}
1204
3ba08b00
JP
1205static void htb_parent_to_leaf(struct htb_sched *q, struct htb_class *cl,
1206 struct Qdisc *new_q)
160d5e10
JP
1207{
1208 struct htb_class *parent = cl->parent;
1209
547b792c 1210 WARN_ON(cl->level || !cl->un.leaf.q || cl->prio_activity);
160d5e10 1211
3ba08b00 1212 if (parent->cmode != HTB_CAN_SEND)
c9364636
ED
1213 htb_safe_rb_erase(&parent->pq_node,
1214 &q->hlevel[parent->level].wait_pq);
3ba08b00 1215
160d5e10
JP
1216 parent->level = 0;
1217 memset(&parent->un.inner, 0, sizeof(parent->un.inner));
1218 INIT_LIST_HEAD(&parent->un.leaf.drop_list);
1219 parent->un.leaf.q = new_q ? new_q : &noop_qdisc;
160d5e10
JP
1220 parent->tokens = parent->buffer;
1221 parent->ctokens = parent->cbuffer;
d2de875c 1222 parent->t_c = ktime_get_ns();
160d5e10
JP
1223 parent->cmode = HTB_CAN_SEND;
1224}
1225
87990467 1226static void htb_destroy_class(struct Qdisc *sch, struct htb_class *cl)
1da177e4 1227{
1da177e4 1228 if (!cl->level) {
547b792c 1229 WARN_ON(!cl->un.leaf.q);
1da177e4
LT
1230 qdisc_destroy(cl->un.leaf.q);
1231 }
ee39e10c 1232 gen_kill_estimator(&cl->bstats, &cl->rate_est);
ff31ab56 1233 tcf_destroy_chain(&cl->filter_list);
1da177e4
LT
1234 kfree(cl);
1235}
1236
87990467 1237static void htb_destroy(struct Qdisc *sch)
1da177e4
LT
1238{
1239 struct htb_sched *q = qdisc_priv(sch);
b67bfe0d 1240 struct hlist_node *next;
fbd8f137
PM
1241 struct htb_class *cl;
1242 unsigned int i;
1da177e4 1243
1224736d 1244 cancel_work_sync(&q->work);
fb983d45 1245 qdisc_watchdog_cancel(&q->watchdog);
1da177e4 1246 /* This line used to be after htb_destroy_class call below
cc7ec456
ED
1247 * and surprisingly it worked in 2.4. But it must precede it
1248 * because filter need its target class alive to be able to call
1249 * unbind_filter on it (without Oops).
1250 */
ff31ab56 1251 tcf_destroy_chain(&q->filter_list);
87990467 1252
f4c1f3e0 1253 for (i = 0; i < q->clhash.hashsize; i++) {
b67bfe0d 1254 hlist_for_each_entry(cl, &q->clhash.hash[i], common.hnode)
fbd8f137
PM
1255 tcf_destroy_chain(&cl->filter_list);
1256 }
f4c1f3e0 1257 for (i = 0; i < q->clhash.hashsize; i++) {
b67bfe0d 1258 hlist_for_each_entry_safe(cl, next, &q->clhash.hash[i],
f4c1f3e0 1259 common.hnode)
fbd8f137
PM
1260 htb_destroy_class(sch, cl);
1261 }
f4c1f3e0 1262 qdisc_class_hash_destroy(&q->clhash);
1da177e4
LT
1263 __skb_queue_purge(&q->direct_queue);
1264}
1265
1266static int htb_delete(struct Qdisc *sch, unsigned long arg)
1267{
1268 struct htb_sched *q = qdisc_priv(sch);
87990467 1269 struct htb_class *cl = (struct htb_class *)arg;
160d5e10
JP
1270 struct Qdisc *new_q = NULL;
1271 int last_child = 0;
1da177e4 1272
a071d272
YY
1273 /* TODO: why don't allow to delete subtree ? references ? does
1274 * tc subsys guarantee us that in htb_destroy it holds no class
1275 * refs so that we can remove children safely there ?
1276 */
42077599 1277 if (cl->children || cl->filter_cnt)
1da177e4 1278 return -EBUSY;
87990467 1279
160d5e10 1280 if (!cl->level && htb_parent_last_child(cl)) {
3511c913 1281 new_q = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops,
bb949fbd 1282 cl->parent->common.classid);
160d5e10
JP
1283 last_child = 1;
1284 }
1285
1da177e4 1286 sch_tree_lock(sch);
87990467 1287
814a175e 1288 if (!cl->level) {
2ccccf5f
WC
1289 unsigned int qlen = cl->un.leaf.q->q.qlen;
1290 unsigned int backlog = cl->un.leaf.q->qstats.backlog;
1291
814a175e 1292 qdisc_reset(cl->un.leaf.q);
2ccccf5f 1293 qdisc_tree_reduce_backlog(cl->un.leaf.q, qlen, backlog);
814a175e
PM
1294 }
1295
f4c1f3e0
PM
1296 /* delete from hash and active; remainder in destroy_class */
1297 qdisc_class_hash_remove(&q->clhash, &cl->common);
26b284de
JP
1298 if (cl->parent)
1299 cl->parent->children--;
c38c83cb 1300
1da177e4 1301 if (cl->prio_activity)
87990467 1302 htb_deactivate(q, cl);
1da177e4 1303
fbd8f137 1304 if (cl->cmode != HTB_CAN_SEND)
c9364636
ED
1305 htb_safe_rb_erase(&cl->pq_node,
1306 &q->hlevel[cl->level].wait_pq);
fbd8f137 1307
160d5e10 1308 if (last_child)
3ba08b00 1309 htb_parent_to_leaf(q, cl, new_q);
160d5e10 1310
7cd0a638
JP
1311 BUG_ON(--cl->refcnt == 0);
1312 /*
1313 * This shouldn't happen: we "hold" one cops->get() when called
1314 * from tc_ctl_tclass; the destroy method is done from cops->put().
1315 */
1da177e4
LT
1316
1317 sch_tree_unlock(sch);
1318 return 0;
1319}
1320
1321static void htb_put(struct Qdisc *sch, unsigned long arg)
1322{
87990467 1323 struct htb_class *cl = (struct htb_class *)arg;
1da177e4
LT
1324
1325 if (--cl->refcnt == 0)
87990467 1326 htb_destroy_class(sch, cl);
1da177e4
LT
1327}
1328
87990467 1329static int htb_change_class(struct Qdisc *sch, u32 classid,
1e90474c 1330 u32 parentid, struct nlattr **tca,
87990467 1331 unsigned long *arg)
1da177e4
LT
1332{
1333 int err = -EINVAL;
1334 struct htb_sched *q = qdisc_priv(sch);
87990467 1335 struct htb_class *cl = (struct htb_class *)*arg, *parent;
1e90474c 1336 struct nlattr *opt = tca[TCA_OPTIONS];
6906f4ed 1337 struct nlattr *tb[TCA_HTB_MAX + 1];
1da177e4 1338 struct tc_htb_opt *hopt;
df62cdf3 1339 u64 rate64, ceil64;
1da177e4
LT
1340
1341 /* extract all subattrs from opt attr */
cee63723
PM
1342 if (!opt)
1343 goto failure;
1344
e18434c4 1345 err = nla_parse_nested(tb, TCA_HTB_MAX, opt, htb_policy);
cee63723
PM
1346 if (err < 0)
1347 goto failure;
1348
1349 err = -EINVAL;
27a3421e 1350 if (tb[TCA_HTB_PARMS] == NULL)
1da177e4 1351 goto failure;
1da177e4 1352
87990467
SH
1353 parent = parentid == TC_H_ROOT ? NULL : htb_find(parentid, sch);
1354
1e90474c 1355 hopt = nla_data(tb[TCA_HTB_PARMS]);
196d97f6 1356 if (!hopt->rate.rate || !hopt->ceil.rate)
87990467 1357 goto failure;
1da177e4 1358
8a8e3d84 1359 /* Keeping backward compatible with rate_table based iproute2 tc */
6b1dd856
YY
1360 if (hopt->rate.linklayer == TC_LINKLAYER_UNAWARE)
1361 qdisc_put_rtab(qdisc_get_rtab(&hopt->rate, tb[TCA_HTB_RTAB]));
1362
1363 if (hopt->ceil.linklayer == TC_LINKLAYER_UNAWARE)
1364 qdisc_put_rtab(qdisc_get_rtab(&hopt->ceil, tb[TCA_HTB_CTAB]));
8a8e3d84 1365
87990467 1366 if (!cl) { /* new class */
1da177e4 1367 struct Qdisc *new_q;
3696f625 1368 int prio;
ee39e10c 1369 struct {
1e90474c 1370 struct nlattr nla;
ee39e10c
PM
1371 struct gnet_estimator opt;
1372 } est = {
1e90474c
PM
1373 .nla = {
1374 .nla_len = nla_attr_size(sizeof(est.opt)),
1375 .nla_type = TCA_RATE,
ee39e10c
PM
1376 },
1377 .opt = {
1378 /* 4s interval, 16s averaging constant */
1379 .interval = 2,
1380 .ewma_log = 2,
1381 },
1382 };
3696f625 1383
1da177e4 1384 /* check for valid classid */
f64f9e71
JP
1385 if (!classid || TC_H_MAJ(classid ^ sch->handle) ||
1386 htb_find(classid, sch))
1da177e4
LT
1387 goto failure;
1388
1389 /* check maximal depth */
1390 if (parent && parent->parent && parent->parent->level < 2) {
cc7ec456 1391 pr_err("htb: tree is too deep\n");
1da177e4
LT
1392 goto failure;
1393 }
1394 err = -ENOBUFS;
cc7ec456
ED
1395 cl = kzalloc(sizeof(*cl), GFP_KERNEL);
1396 if (!cl)
1da177e4 1397 goto failure;
87990467 1398
64153ce0 1399 if (htb_rate_est || tca[TCA_RATE]) {
22e0f8b9
JF
1400 err = gen_new_estimator(&cl->bstats, NULL,
1401 &cl->rate_est,
64153ce0
ED
1402 qdisc_root_sleeping_lock(sch),
1403 tca[TCA_RATE] ? : &est.nla);
1404 if (err) {
1405 kfree(cl);
1406 goto failure;
1407 }
71bcb09a
SH
1408 }
1409
1da177e4 1410 cl->refcnt = 1;
42077599 1411 cl->children = 0;
1da177e4 1412 INIT_LIST_HEAD(&cl->un.leaf.drop_list);
3696f625
SH
1413 RB_CLEAR_NODE(&cl->pq_node);
1414
1415 for (prio = 0; prio < TC_HTB_NUMPRIO; prio++)
1416 RB_CLEAR_NODE(&cl->node[prio]);
1da177e4
LT
1417
1418 /* create leaf qdisc early because it uses kmalloc(GFP_KERNEL)
cc7ec456
ED
1419 * so that can't be used inside of sch_tree_lock
1420 * -- thanks to Karlis Peisenieks
1421 */
3511c913 1422 new_q = qdisc_create_dflt(sch->dev_queue,
bb949fbd 1423 &pfifo_qdisc_ops, classid);
1da177e4
LT
1424 sch_tree_lock(sch);
1425 if (parent && !parent->level) {
256d61b8 1426 unsigned int qlen = parent->un.leaf.q->q.qlen;
2ccccf5f 1427 unsigned int backlog = parent->un.leaf.q->qstats.backlog;
256d61b8 1428
1da177e4 1429 /* turn parent into inner node */
256d61b8 1430 qdisc_reset(parent->un.leaf.q);
2ccccf5f 1431 qdisc_tree_reduce_backlog(parent->un.leaf.q, qlen, backlog);
87990467
SH
1432 qdisc_destroy(parent->un.leaf.q);
1433 if (parent->prio_activity)
1434 htb_deactivate(q, parent);
1da177e4
LT
1435
1436 /* remove from evt list because of level change */
1437 if (parent->cmode != HTB_CAN_SEND) {
c9364636 1438 htb_safe_rb_erase(&parent->pq_node, &q->hlevel[0].wait_pq);
1da177e4
LT
1439 parent->cmode = HTB_CAN_SEND;
1440 }
1441 parent->level = (parent->parent ? parent->parent->level
87990467
SH
1442 : TC_HTB_MAXDEPTH) - 1;
1443 memset(&parent->un.inner, 0, sizeof(parent->un.inner));
1da177e4
LT
1444 }
1445 /* leaf (we) needs elementary qdisc */
1446 cl->un.leaf.q = new_q ? new_q : &noop_qdisc;
1447
f4c1f3e0 1448 cl->common.classid = classid;
87990467 1449 cl->parent = parent;
1da177e4
LT
1450
1451 /* set class to be in HTB_CAN_SEND state */
b9a7afde
JP
1452 cl->tokens = PSCHED_TICKS2NS(hopt->buffer);
1453 cl->ctokens = PSCHED_TICKS2NS(hopt->cbuffer);
5343a7f8 1454 cl->mbuffer = 60ULL * NSEC_PER_SEC; /* 1min */
d2de875c 1455 cl->t_c = ktime_get_ns();
1da177e4
LT
1456 cl->cmode = HTB_CAN_SEND;
1457
1458 /* attach to the hash list and parent's family */
f4c1f3e0 1459 qdisc_class_hash_insert(&q->clhash, &cl->common);
42077599
PM
1460 if (parent)
1461 parent->children++;
ee39e10c 1462 } else {
71bcb09a 1463 if (tca[TCA_RATE]) {
22e0f8b9
JF
1464 spinlock_t *lock = qdisc_root_sleeping_lock(sch);
1465
1466 err = gen_replace_estimator(&cl->bstats, NULL,
1467 &cl->rate_est,
1468 lock,
71bcb09a
SH
1469 tca[TCA_RATE]);
1470 if (err)
1471 return err;
1472 }
87990467 1473 sch_tree_lock(sch);
ee39e10c 1474 }
1da177e4 1475
1598f7cb
YY
1476 rate64 = tb[TCA_HTB_RATE64] ? nla_get_u64(tb[TCA_HTB_RATE64]) : 0;
1477
1478 ceil64 = tb[TCA_HTB_CEIL64] ? nla_get_u64(tb[TCA_HTB_CEIL64]) : 0;
1479
1480 psched_ratecfg_precompute(&cl->rate, &hopt->rate, rate64);
1481 psched_ratecfg_precompute(&cl->ceil, &hopt->ceil, ceil64);
1482
1da177e4 1483 /* it used to be a nasty bug here, we have to check that node
cc7ec456
ED
1484 * is really leaf before changing cl->un.leaf !
1485 */
1da177e4 1486 if (!cl->level) {
1598f7cb
YY
1487 u64 quantum = cl->rate.rate_bytes_ps;
1488
1489 do_div(quantum, q->rate2quantum);
1490 cl->quantum = min_t(u64, quantum, INT_MAX);
1491
c19f7a34 1492 if (!hopt->quantum && cl->quantum < 1000) {
c17988a9
YY
1493 pr_warn("HTB: quantum of class %X is small. Consider r2q change.\n",
1494 cl->common.classid);
c19f7a34 1495 cl->quantum = 1000;
1da177e4 1496 }
c19f7a34 1497 if (!hopt->quantum && cl->quantum > 200000) {
c17988a9
YY
1498 pr_warn("HTB: quantum of class %X is big. Consider r2q change.\n",
1499 cl->common.classid);
c19f7a34 1500 cl->quantum = 200000;
1da177e4
LT
1501 }
1502 if (hopt->quantum)
c19f7a34
JP
1503 cl->quantum = hopt->quantum;
1504 if ((cl->prio = hopt->prio) >= TC_HTB_NUMPRIO)
1505 cl->prio = TC_HTB_NUMPRIO - 1;
1da177e4
LT
1506 }
1507
324f5aa5 1508 cl->buffer = PSCHED_TICKS2NS(hopt->buffer);
f3ad857e 1509 cl->cbuffer = PSCHED_TICKS2NS(hopt->cbuffer);
56b765b7 1510
1da177e4
LT
1511 sch_tree_unlock(sch);
1512
f4c1f3e0
PM
1513 qdisc_class_hash_grow(sch, &q->clhash);
1514
1da177e4
LT
1515 *arg = (unsigned long)cl;
1516 return 0;
1517
1518failure:
1da177e4
LT
1519 return err;
1520}
1521
25d8c0d5
JF
1522static struct tcf_proto __rcu **htb_find_tcf(struct Qdisc *sch,
1523 unsigned long arg)
1da177e4
LT
1524{
1525 struct htb_sched *q = qdisc_priv(sch);
1526 struct htb_class *cl = (struct htb_class *)arg;
25d8c0d5 1527 struct tcf_proto __rcu **fl = cl ? &cl->filter_list : &q->filter_list;
3bf72957 1528
1da177e4
LT
1529 return fl;
1530}
1531
1532static unsigned long htb_bind_filter(struct Qdisc *sch, unsigned long parent,
87990467 1533 u32 classid)
1da177e4 1534{
87990467 1535 struct htb_class *cl = htb_find(classid, sch);
3bf72957 1536
1da177e4 1537 /*if (cl && !cl->level) return 0;
cc7ec456
ED
1538 * The line above used to be there to prevent attaching filters to
1539 * leaves. But at least tc_index filter uses this just to get class
1540 * for other reasons so that we have to allow for it.
1541 * ----
1542 * 19.6.2002 As Werner explained it is ok - bind filter is just
1543 * another way to "lock" the class - unlike "get" this lock can
1544 * be broken by class during destroy IIUC.
1da177e4 1545 */
87990467
SH
1546 if (cl)
1547 cl->filter_cnt++;
1da177e4
LT
1548 return (unsigned long)cl;
1549}
1550
1551static void htb_unbind_filter(struct Qdisc *sch, unsigned long arg)
1552{
1da177e4 1553 struct htb_class *cl = (struct htb_class *)arg;
3bf72957 1554
87990467
SH
1555 if (cl)
1556 cl->filter_cnt--;
1da177e4
LT
1557}
1558
1559static void htb_walk(struct Qdisc *sch, struct qdisc_walker *arg)
1560{
1561 struct htb_sched *q = qdisc_priv(sch);
f4c1f3e0 1562 struct htb_class *cl;
f4c1f3e0 1563 unsigned int i;
1da177e4
LT
1564
1565 if (arg->stop)
1566 return;
1567
f4c1f3e0 1568 for (i = 0; i < q->clhash.hashsize; i++) {
b67bfe0d 1569 hlist_for_each_entry(cl, &q->clhash.hash[i], common.hnode) {
1da177e4
LT
1570 if (arg->count < arg->skip) {
1571 arg->count++;
1572 continue;
1573 }
1574 if (arg->fn(sch, (unsigned long)cl, arg) < 0) {
1575 arg->stop = 1;
1576 return;
1577 }
1578 arg->count++;
1579 }
1580 }
1581}
1582
20fea08b 1583static const struct Qdisc_class_ops htb_class_ops = {
1da177e4
LT
1584 .graft = htb_graft,
1585 .leaf = htb_leaf,
256d61b8 1586 .qlen_notify = htb_qlen_notify,
1da177e4
LT
1587 .get = htb_get,
1588 .put = htb_put,
1589 .change = htb_change_class,
1590 .delete = htb_delete,
1591 .walk = htb_walk,
1592 .tcf_chain = htb_find_tcf,
1593 .bind_tcf = htb_bind_filter,
1594 .unbind_tcf = htb_unbind_filter,
1595 .dump = htb_dump_class,
1596 .dump_stats = htb_dump_class_stats,
1597};
1598
20fea08b 1599static struct Qdisc_ops htb_qdisc_ops __read_mostly = {
1da177e4
LT
1600 .cl_ops = &htb_class_ops,
1601 .id = "htb",
1602 .priv_size = sizeof(struct htb_sched),
1603 .enqueue = htb_enqueue,
1604 .dequeue = htb_dequeue,
77be155c 1605 .peek = qdisc_peek_dequeued,
1da177e4
LT
1606 .drop = htb_drop,
1607 .init = htb_init,
1608 .reset = htb_reset,
1609 .destroy = htb_destroy,
1da177e4
LT
1610 .dump = htb_dump,
1611 .owner = THIS_MODULE,
1612};
1613
1614static int __init htb_module_init(void)
1615{
87990467 1616 return register_qdisc(&htb_qdisc_ops);
1da177e4 1617}
87990467 1618static void __exit htb_module_exit(void)
1da177e4 1619{
87990467 1620 unregister_qdisc(&htb_qdisc_ops);
1da177e4 1621}
87990467 1622
1da177e4
LT
1623module_init(htb_module_init)
1624module_exit(htb_module_exit)
1625MODULE_LICENSE("GPL");
This page took 1.110172 seconds and 5 git commands to generate.