Fix: rseq: arm branch to failure
[deliverable/linux.git] / net / sched / sch_hfsc.c
1 /*
2 * Copyright (c) 2003 Patrick McHardy, <kaber@trash.net>
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version 2
7 * of the License, or (at your option) any later version.
8 *
9 * 2003-10-17 - Ported from altq
10 */
11 /*
12 * Copyright (c) 1997-1999 Carnegie Mellon University. All Rights Reserved.
13 *
14 * Permission to use, copy, modify, and distribute this software and
15 * its documentation is hereby granted (including for commercial or
16 * for-profit use), provided that both the copyright notice and this
17 * permission notice appear in all copies of the software, derivative
18 * works, or modified versions, and any portions thereof.
19 *
20 * THIS SOFTWARE IS EXPERIMENTAL AND IS KNOWN TO HAVE BUGS, SOME OF
21 * WHICH MAY HAVE SERIOUS CONSEQUENCES. CARNEGIE MELLON PROVIDES THIS
22 * SOFTWARE IN ITS ``AS IS'' CONDITION, AND ANY EXPRESS OR IMPLIED
23 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
24 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
25 * DISCLAIMED. IN NO EVENT SHALL CARNEGIE MELLON UNIVERSITY BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
28 * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
29 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
30 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
32 * USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
33 * DAMAGE.
34 *
35 * Carnegie Mellon encourages (but does not require) users of this
36 * software to return any improvements or extensions that they make,
37 * and to grant Carnegie Mellon the rights to redistribute these
38 * changes without encumbrance.
39 */
40 /*
41 * H-FSC is described in Proceedings of SIGCOMM'97,
42 * "A Hierarchical Fair Service Curve Algorithm for Link-Sharing,
43 * Real-Time and Priority Service"
44 * by Ion Stoica, Hui Zhang, and T. S. Eugene Ng.
45 *
46 * Oleg Cherevko <olwi@aq.ml.com.ua> added the upperlimit for link-sharing.
47 * when a class has an upperlimit, the fit-time is computed from the
48 * upperlimit service curve. the link-sharing scheduler does not schedule
49 * a class whose fit-time exceeds the current time.
50 */
51
52 #include <linux/kernel.h>
53 #include <linux/module.h>
54 #include <linux/types.h>
55 #include <linux/errno.h>
56 #include <linux/compiler.h>
57 #include <linux/spinlock.h>
58 #include <linux/skbuff.h>
59 #include <linux/string.h>
60 #include <linux/slab.h>
61 #include <linux/list.h>
62 #include <linux/rbtree.h>
63 #include <linux/init.h>
64 #include <linux/rtnetlink.h>
65 #include <linux/pkt_sched.h>
66 #include <net/netlink.h>
67 #include <net/pkt_sched.h>
68 #include <net/pkt_cls.h>
69 #include <asm/div64.h>
70
71 /*
72 * kernel internal service curve representation:
73 * coordinates are given by 64 bit unsigned integers.
74 * x-axis: unit is clock count.
75 * y-axis: unit is byte.
76 *
77 * The service curve parameters are converted to the internal
78 * representation. The slope values are scaled to avoid overflow.
79 * the inverse slope values as well as the y-projection of the 1st
80 * segment are kept in order to avoid 64-bit divide operations
81 * that are expensive on 32-bit architectures.
82 */
83
84 struct internal_sc {
85 u64 sm1; /* scaled slope of the 1st segment */
86 u64 ism1; /* scaled inverse-slope of the 1st segment */
87 u64 dx; /* the x-projection of the 1st segment */
88 u64 dy; /* the y-projection of the 1st segment */
89 u64 sm2; /* scaled slope of the 2nd segment */
90 u64 ism2; /* scaled inverse-slope of the 2nd segment */
91 };
92
93 /* runtime service curve */
94 struct runtime_sc {
95 u64 x; /* current starting position on x-axis */
96 u64 y; /* current starting position on y-axis */
97 u64 sm1; /* scaled slope of the 1st segment */
98 u64 ism1; /* scaled inverse-slope of the 1st segment */
99 u64 dx; /* the x-projection of the 1st segment */
100 u64 dy; /* the y-projection of the 1st segment */
101 u64 sm2; /* scaled slope of the 2nd segment */
102 u64 ism2; /* scaled inverse-slope of the 2nd segment */
103 };
104
105 enum hfsc_class_flags {
106 HFSC_RSC = 0x1,
107 HFSC_FSC = 0x2,
108 HFSC_USC = 0x4
109 };
110
111 struct hfsc_class {
112 struct Qdisc_class_common cl_common;
113 unsigned int refcnt; /* usage count */
114
115 struct gnet_stats_basic_packed bstats;
116 struct gnet_stats_queue qstats;
117 struct gnet_stats_rate_est64 rate_est;
118 struct tcf_proto __rcu *filter_list; /* filter list */
119 unsigned int filter_cnt; /* filter count */
120 unsigned int level; /* class level in hierarchy */
121
122 struct hfsc_sched *sched; /* scheduler data */
123 struct hfsc_class *cl_parent; /* parent class */
124 struct list_head siblings; /* sibling classes */
125 struct list_head children; /* child classes */
126 struct Qdisc *qdisc; /* leaf qdisc */
127
128 struct rb_node el_node; /* qdisc's eligible tree member */
129 struct rb_root vt_tree; /* active children sorted by cl_vt */
130 struct rb_node vt_node; /* parent's vt_tree member */
131 struct rb_root cf_tree; /* active children sorted by cl_f */
132 struct rb_node cf_node; /* parent's cf_heap member */
133
134 u64 cl_total; /* total work in bytes */
135 u64 cl_cumul; /* cumulative work in bytes done by
136 real-time criteria */
137
138 u64 cl_d; /* deadline*/
139 u64 cl_e; /* eligible time */
140 u64 cl_vt; /* virtual time */
141 u64 cl_f; /* time when this class will fit for
142 link-sharing, max(myf, cfmin) */
143 u64 cl_myf; /* my fit-time (calculated from this
144 class's own upperlimit curve) */
145 u64 cl_myfadj; /* my fit-time adjustment (to cancel
146 history dependence) */
147 u64 cl_cfmin; /* earliest children's fit-time (used
148 with cl_myf to obtain cl_f) */
149 u64 cl_cvtmin; /* minimal virtual time among the
150 children fit for link-sharing
151 (monotonic within a period) */
152 u64 cl_vtadj; /* intra-period cumulative vt
153 adjustment */
154 u64 cl_vtoff; /* inter-period cumulative vt offset */
155 u64 cl_cvtmax; /* max child's vt in the last period */
156 u64 cl_cvtoff; /* cumulative cvtmax of all periods */
157 u64 cl_pcvtoff; /* parent's cvtoff at initialization
158 time */
159
160 struct internal_sc cl_rsc; /* internal real-time service curve */
161 struct internal_sc cl_fsc; /* internal fair service curve */
162 struct internal_sc cl_usc; /* internal upperlimit service curve */
163 struct runtime_sc cl_deadline; /* deadline curve */
164 struct runtime_sc cl_eligible; /* eligible curve */
165 struct runtime_sc cl_virtual; /* virtual curve */
166 struct runtime_sc cl_ulimit; /* upperlimit curve */
167
168 u8 cl_flags; /* which curves are valid */
169 u32 cl_vtperiod; /* vt period sequence number */
170 u32 cl_parentperiod;/* parent's vt period sequence number*/
171 u32 cl_nactive; /* number of active children */
172 };
173
174 struct hfsc_sched {
175 u16 defcls; /* default class id */
176 struct hfsc_class root; /* root class */
177 struct Qdisc_class_hash clhash; /* class hash */
178 struct rb_root eligible; /* eligible tree */
179 struct qdisc_watchdog watchdog; /* watchdog timer */
180 };
181
182 #define HT_INFINITY 0xffffffffffffffffULL /* infinite time value */
183
184
185 /*
186 * eligible tree holds backlogged classes being sorted by their eligible times.
187 * there is one eligible tree per hfsc instance.
188 */
189
190 static void
191 eltree_insert(struct hfsc_class *cl)
192 {
193 struct rb_node **p = &cl->sched->eligible.rb_node;
194 struct rb_node *parent = NULL;
195 struct hfsc_class *cl1;
196
197 while (*p != NULL) {
198 parent = *p;
199 cl1 = rb_entry(parent, struct hfsc_class, el_node);
200 if (cl->cl_e >= cl1->cl_e)
201 p = &parent->rb_right;
202 else
203 p = &parent->rb_left;
204 }
205 rb_link_node(&cl->el_node, parent, p);
206 rb_insert_color(&cl->el_node, &cl->sched->eligible);
207 }
208
209 static inline void
210 eltree_remove(struct hfsc_class *cl)
211 {
212 rb_erase(&cl->el_node, &cl->sched->eligible);
213 }
214
215 static inline void
216 eltree_update(struct hfsc_class *cl)
217 {
218 eltree_remove(cl);
219 eltree_insert(cl);
220 }
221
222 /* find the class with the minimum deadline among the eligible classes */
223 static inline struct hfsc_class *
224 eltree_get_mindl(struct hfsc_sched *q, u64 cur_time)
225 {
226 struct hfsc_class *p, *cl = NULL;
227 struct rb_node *n;
228
229 for (n = rb_first(&q->eligible); n != NULL; n = rb_next(n)) {
230 p = rb_entry(n, struct hfsc_class, el_node);
231 if (p->cl_e > cur_time)
232 break;
233 if (cl == NULL || p->cl_d < cl->cl_d)
234 cl = p;
235 }
236 return cl;
237 }
238
239 /* find the class with minimum eligible time among the eligible classes */
240 static inline struct hfsc_class *
241 eltree_get_minel(struct hfsc_sched *q)
242 {
243 struct rb_node *n;
244
245 n = rb_first(&q->eligible);
246 if (n == NULL)
247 return NULL;
248 return rb_entry(n, struct hfsc_class, el_node);
249 }
250
251 /*
252 * vttree holds holds backlogged child classes being sorted by their virtual
253 * time. each intermediate class has one vttree.
254 */
255 static void
256 vttree_insert(struct hfsc_class *cl)
257 {
258 struct rb_node **p = &cl->cl_parent->vt_tree.rb_node;
259 struct rb_node *parent = NULL;
260 struct hfsc_class *cl1;
261
262 while (*p != NULL) {
263 parent = *p;
264 cl1 = rb_entry(parent, struct hfsc_class, vt_node);
265 if (cl->cl_vt >= cl1->cl_vt)
266 p = &parent->rb_right;
267 else
268 p = &parent->rb_left;
269 }
270 rb_link_node(&cl->vt_node, parent, p);
271 rb_insert_color(&cl->vt_node, &cl->cl_parent->vt_tree);
272 }
273
274 static inline void
275 vttree_remove(struct hfsc_class *cl)
276 {
277 rb_erase(&cl->vt_node, &cl->cl_parent->vt_tree);
278 }
279
280 static inline void
281 vttree_update(struct hfsc_class *cl)
282 {
283 vttree_remove(cl);
284 vttree_insert(cl);
285 }
286
287 static inline struct hfsc_class *
288 vttree_firstfit(struct hfsc_class *cl, u64 cur_time)
289 {
290 struct hfsc_class *p;
291 struct rb_node *n;
292
293 for (n = rb_first(&cl->vt_tree); n != NULL; n = rb_next(n)) {
294 p = rb_entry(n, struct hfsc_class, vt_node);
295 if (p->cl_f <= cur_time)
296 return p;
297 }
298 return NULL;
299 }
300
301 /*
302 * get the leaf class with the minimum vt in the hierarchy
303 */
304 static struct hfsc_class *
305 vttree_get_minvt(struct hfsc_class *cl, u64 cur_time)
306 {
307 /* if root-class's cfmin is bigger than cur_time nothing to do */
308 if (cl->cl_cfmin > cur_time)
309 return NULL;
310
311 while (cl->level > 0) {
312 cl = vttree_firstfit(cl, cur_time);
313 if (cl == NULL)
314 return NULL;
315 /*
316 * update parent's cl_cvtmin.
317 */
318 if (cl->cl_parent->cl_cvtmin < cl->cl_vt)
319 cl->cl_parent->cl_cvtmin = cl->cl_vt;
320 }
321 return cl;
322 }
323
324 static void
325 cftree_insert(struct hfsc_class *cl)
326 {
327 struct rb_node **p = &cl->cl_parent->cf_tree.rb_node;
328 struct rb_node *parent = NULL;
329 struct hfsc_class *cl1;
330
331 while (*p != NULL) {
332 parent = *p;
333 cl1 = rb_entry(parent, struct hfsc_class, cf_node);
334 if (cl->cl_f >= cl1->cl_f)
335 p = &parent->rb_right;
336 else
337 p = &parent->rb_left;
338 }
339 rb_link_node(&cl->cf_node, parent, p);
340 rb_insert_color(&cl->cf_node, &cl->cl_parent->cf_tree);
341 }
342
343 static inline void
344 cftree_remove(struct hfsc_class *cl)
345 {
346 rb_erase(&cl->cf_node, &cl->cl_parent->cf_tree);
347 }
348
349 static inline void
350 cftree_update(struct hfsc_class *cl)
351 {
352 cftree_remove(cl);
353 cftree_insert(cl);
354 }
355
356 /*
357 * service curve support functions
358 *
359 * external service curve parameters
360 * m: bps
361 * d: us
362 * internal service curve parameters
363 * sm: (bytes/psched_us) << SM_SHIFT
364 * ism: (psched_us/byte) << ISM_SHIFT
365 * dx: psched_us
366 *
367 * The clock source resolution with ktime and PSCHED_SHIFT 10 is 1.024us.
368 *
369 * sm and ism are scaled in order to keep effective digits.
370 * SM_SHIFT and ISM_SHIFT are selected to keep at least 4 effective
371 * digits in decimal using the following table.
372 *
373 * bits/sec 100Kbps 1Mbps 10Mbps 100Mbps 1Gbps
374 * ------------+-------------------------------------------------------
375 * bytes/1.024us 12.8e-3 128e-3 1280e-3 12800e-3 128000e-3
376 *
377 * 1.024us/byte 78.125 7.8125 0.78125 0.078125 0.0078125
378 *
379 * So, for PSCHED_SHIFT 10 we need: SM_SHIFT 20, ISM_SHIFT 18.
380 */
381 #define SM_SHIFT (30 - PSCHED_SHIFT)
382 #define ISM_SHIFT (8 + PSCHED_SHIFT)
383
384 #define SM_MASK ((1ULL << SM_SHIFT) - 1)
385 #define ISM_MASK ((1ULL << ISM_SHIFT) - 1)
386
387 static inline u64
388 seg_x2y(u64 x, u64 sm)
389 {
390 u64 y;
391
392 /*
393 * compute
394 * y = x * sm >> SM_SHIFT
395 * but divide it for the upper and lower bits to avoid overflow
396 */
397 y = (x >> SM_SHIFT) * sm + (((x & SM_MASK) * sm) >> SM_SHIFT);
398 return y;
399 }
400
401 static inline u64
402 seg_y2x(u64 y, u64 ism)
403 {
404 u64 x;
405
406 if (y == 0)
407 x = 0;
408 else if (ism == HT_INFINITY)
409 x = HT_INFINITY;
410 else {
411 x = (y >> ISM_SHIFT) * ism
412 + (((y & ISM_MASK) * ism) >> ISM_SHIFT);
413 }
414 return x;
415 }
416
417 /* Convert m (bps) into sm (bytes/psched us) */
418 static u64
419 m2sm(u32 m)
420 {
421 u64 sm;
422
423 sm = ((u64)m << SM_SHIFT);
424 sm += PSCHED_TICKS_PER_SEC - 1;
425 do_div(sm, PSCHED_TICKS_PER_SEC);
426 return sm;
427 }
428
429 /* convert m (bps) into ism (psched us/byte) */
430 static u64
431 m2ism(u32 m)
432 {
433 u64 ism;
434
435 if (m == 0)
436 ism = HT_INFINITY;
437 else {
438 ism = ((u64)PSCHED_TICKS_PER_SEC << ISM_SHIFT);
439 ism += m - 1;
440 do_div(ism, m);
441 }
442 return ism;
443 }
444
445 /* convert d (us) into dx (psched us) */
446 static u64
447 d2dx(u32 d)
448 {
449 u64 dx;
450
451 dx = ((u64)d * PSCHED_TICKS_PER_SEC);
452 dx += USEC_PER_SEC - 1;
453 do_div(dx, USEC_PER_SEC);
454 return dx;
455 }
456
457 /* convert sm (bytes/psched us) into m (bps) */
458 static u32
459 sm2m(u64 sm)
460 {
461 u64 m;
462
463 m = (sm * PSCHED_TICKS_PER_SEC) >> SM_SHIFT;
464 return (u32)m;
465 }
466
467 /* convert dx (psched us) into d (us) */
468 static u32
469 dx2d(u64 dx)
470 {
471 u64 d;
472
473 d = dx * USEC_PER_SEC;
474 do_div(d, PSCHED_TICKS_PER_SEC);
475 return (u32)d;
476 }
477
478 static void
479 sc2isc(struct tc_service_curve *sc, struct internal_sc *isc)
480 {
481 isc->sm1 = m2sm(sc->m1);
482 isc->ism1 = m2ism(sc->m1);
483 isc->dx = d2dx(sc->d);
484 isc->dy = seg_x2y(isc->dx, isc->sm1);
485 isc->sm2 = m2sm(sc->m2);
486 isc->ism2 = m2ism(sc->m2);
487 }
488
489 /*
490 * initialize the runtime service curve with the given internal
491 * service curve starting at (x, y).
492 */
493 static void
494 rtsc_init(struct runtime_sc *rtsc, struct internal_sc *isc, u64 x, u64 y)
495 {
496 rtsc->x = x;
497 rtsc->y = y;
498 rtsc->sm1 = isc->sm1;
499 rtsc->ism1 = isc->ism1;
500 rtsc->dx = isc->dx;
501 rtsc->dy = isc->dy;
502 rtsc->sm2 = isc->sm2;
503 rtsc->ism2 = isc->ism2;
504 }
505
506 /*
507 * calculate the y-projection of the runtime service curve by the
508 * given x-projection value
509 */
510 static u64
511 rtsc_y2x(struct runtime_sc *rtsc, u64 y)
512 {
513 u64 x;
514
515 if (y < rtsc->y)
516 x = rtsc->x;
517 else if (y <= rtsc->y + rtsc->dy) {
518 /* x belongs to the 1st segment */
519 if (rtsc->dy == 0)
520 x = rtsc->x + rtsc->dx;
521 else
522 x = rtsc->x + seg_y2x(y - rtsc->y, rtsc->ism1);
523 } else {
524 /* x belongs to the 2nd segment */
525 x = rtsc->x + rtsc->dx
526 + seg_y2x(y - rtsc->y - rtsc->dy, rtsc->ism2);
527 }
528 return x;
529 }
530
531 static u64
532 rtsc_x2y(struct runtime_sc *rtsc, u64 x)
533 {
534 u64 y;
535
536 if (x <= rtsc->x)
537 y = rtsc->y;
538 else if (x <= rtsc->x + rtsc->dx)
539 /* y belongs to the 1st segment */
540 y = rtsc->y + seg_x2y(x - rtsc->x, rtsc->sm1);
541 else
542 /* y belongs to the 2nd segment */
543 y = rtsc->y + rtsc->dy
544 + seg_x2y(x - rtsc->x - rtsc->dx, rtsc->sm2);
545 return y;
546 }
547
548 /*
549 * update the runtime service curve by taking the minimum of the current
550 * runtime service curve and the service curve starting at (x, y).
551 */
552 static void
553 rtsc_min(struct runtime_sc *rtsc, struct internal_sc *isc, u64 x, u64 y)
554 {
555 u64 y1, y2, dx, dy;
556 u32 dsm;
557
558 if (isc->sm1 <= isc->sm2) {
559 /* service curve is convex */
560 y1 = rtsc_x2y(rtsc, x);
561 if (y1 < y)
562 /* the current rtsc is smaller */
563 return;
564 rtsc->x = x;
565 rtsc->y = y;
566 return;
567 }
568
569 /*
570 * service curve is concave
571 * compute the two y values of the current rtsc
572 * y1: at x
573 * y2: at (x + dx)
574 */
575 y1 = rtsc_x2y(rtsc, x);
576 if (y1 <= y) {
577 /* rtsc is below isc, no change to rtsc */
578 return;
579 }
580
581 y2 = rtsc_x2y(rtsc, x + isc->dx);
582 if (y2 >= y + isc->dy) {
583 /* rtsc is above isc, replace rtsc by isc */
584 rtsc->x = x;
585 rtsc->y = y;
586 rtsc->dx = isc->dx;
587 rtsc->dy = isc->dy;
588 return;
589 }
590
591 /*
592 * the two curves intersect
593 * compute the offsets (dx, dy) using the reverse
594 * function of seg_x2y()
595 * seg_x2y(dx, sm1) == seg_x2y(dx, sm2) + (y1 - y)
596 */
597 dx = (y1 - y) << SM_SHIFT;
598 dsm = isc->sm1 - isc->sm2;
599 do_div(dx, dsm);
600 /*
601 * check if (x, y1) belongs to the 1st segment of rtsc.
602 * if so, add the offset.
603 */
604 if (rtsc->x + rtsc->dx > x)
605 dx += rtsc->x + rtsc->dx - x;
606 dy = seg_x2y(dx, isc->sm1);
607
608 rtsc->x = x;
609 rtsc->y = y;
610 rtsc->dx = dx;
611 rtsc->dy = dy;
612 }
613
614 static void
615 init_ed(struct hfsc_class *cl, unsigned int next_len)
616 {
617 u64 cur_time = psched_get_time();
618
619 /* update the deadline curve */
620 rtsc_min(&cl->cl_deadline, &cl->cl_rsc, cur_time, cl->cl_cumul);
621
622 /*
623 * update the eligible curve.
624 * for concave, it is equal to the deadline curve.
625 * for convex, it is a linear curve with slope m2.
626 */
627 cl->cl_eligible = cl->cl_deadline;
628 if (cl->cl_rsc.sm1 <= cl->cl_rsc.sm2) {
629 cl->cl_eligible.dx = 0;
630 cl->cl_eligible.dy = 0;
631 }
632
633 /* compute e and d */
634 cl->cl_e = rtsc_y2x(&cl->cl_eligible, cl->cl_cumul);
635 cl->cl_d = rtsc_y2x(&cl->cl_deadline, cl->cl_cumul + next_len);
636
637 eltree_insert(cl);
638 }
639
640 static void
641 update_ed(struct hfsc_class *cl, unsigned int next_len)
642 {
643 cl->cl_e = rtsc_y2x(&cl->cl_eligible, cl->cl_cumul);
644 cl->cl_d = rtsc_y2x(&cl->cl_deadline, cl->cl_cumul + next_len);
645
646 eltree_update(cl);
647 }
648
649 static inline void
650 update_d(struct hfsc_class *cl, unsigned int next_len)
651 {
652 cl->cl_d = rtsc_y2x(&cl->cl_deadline, cl->cl_cumul + next_len);
653 }
654
655 static inline void
656 update_cfmin(struct hfsc_class *cl)
657 {
658 struct rb_node *n = rb_first(&cl->cf_tree);
659 struct hfsc_class *p;
660
661 if (n == NULL) {
662 cl->cl_cfmin = 0;
663 return;
664 }
665 p = rb_entry(n, struct hfsc_class, cf_node);
666 cl->cl_cfmin = p->cl_f;
667 }
668
669 static void
670 init_vf(struct hfsc_class *cl, unsigned int len)
671 {
672 struct hfsc_class *max_cl;
673 struct rb_node *n;
674 u64 vt, f, cur_time;
675 int go_active;
676
677 cur_time = 0;
678 go_active = 1;
679 for (; cl->cl_parent != NULL; cl = cl->cl_parent) {
680 if (go_active && cl->cl_nactive++ == 0)
681 go_active = 1;
682 else
683 go_active = 0;
684
685 if (go_active) {
686 n = rb_last(&cl->cl_parent->vt_tree);
687 if (n != NULL) {
688 max_cl = rb_entry(n, struct hfsc_class, vt_node);
689 /*
690 * set vt to the average of the min and max
691 * classes. if the parent's period didn't
692 * change, don't decrease vt of the class.
693 */
694 vt = max_cl->cl_vt;
695 if (cl->cl_parent->cl_cvtmin != 0)
696 vt = (cl->cl_parent->cl_cvtmin + vt)/2;
697
698 if (cl->cl_parent->cl_vtperiod !=
699 cl->cl_parentperiod || vt > cl->cl_vt)
700 cl->cl_vt = vt;
701 } else {
702 /*
703 * first child for a new parent backlog period.
704 * add parent's cvtmax to cvtoff to make a new
705 * vt (vtoff + vt) larger than the vt in the
706 * last period for all children.
707 */
708 vt = cl->cl_parent->cl_cvtmax;
709 cl->cl_parent->cl_cvtoff += vt;
710 cl->cl_parent->cl_cvtmax = 0;
711 cl->cl_parent->cl_cvtmin = 0;
712 cl->cl_vt = 0;
713 }
714
715 cl->cl_vtoff = cl->cl_parent->cl_cvtoff -
716 cl->cl_pcvtoff;
717
718 /* update the virtual curve */
719 vt = cl->cl_vt + cl->cl_vtoff;
720 rtsc_min(&cl->cl_virtual, &cl->cl_fsc, vt,
721 cl->cl_total);
722 if (cl->cl_virtual.x == vt) {
723 cl->cl_virtual.x -= cl->cl_vtoff;
724 cl->cl_vtoff = 0;
725 }
726 cl->cl_vtadj = 0;
727
728 cl->cl_vtperiod++; /* increment vt period */
729 cl->cl_parentperiod = cl->cl_parent->cl_vtperiod;
730 if (cl->cl_parent->cl_nactive == 0)
731 cl->cl_parentperiod++;
732 cl->cl_f = 0;
733
734 vttree_insert(cl);
735 cftree_insert(cl);
736
737 if (cl->cl_flags & HFSC_USC) {
738 /* class has upper limit curve */
739 if (cur_time == 0)
740 cur_time = psched_get_time();
741
742 /* update the ulimit curve */
743 rtsc_min(&cl->cl_ulimit, &cl->cl_usc, cur_time,
744 cl->cl_total);
745 /* compute myf */
746 cl->cl_myf = rtsc_y2x(&cl->cl_ulimit,
747 cl->cl_total);
748 cl->cl_myfadj = 0;
749 }
750 }
751
752 f = max(cl->cl_myf, cl->cl_cfmin);
753 if (f != cl->cl_f) {
754 cl->cl_f = f;
755 cftree_update(cl);
756 }
757 update_cfmin(cl->cl_parent);
758 }
759 }
760
761 static void
762 update_vf(struct hfsc_class *cl, unsigned int len, u64 cur_time)
763 {
764 u64 f; /* , myf_bound, delta; */
765 int go_passive = 0;
766
767 if (cl->qdisc->q.qlen == 0 && cl->cl_flags & HFSC_FSC)
768 go_passive = 1;
769
770 for (; cl->cl_parent != NULL; cl = cl->cl_parent) {
771 cl->cl_total += len;
772
773 if (!(cl->cl_flags & HFSC_FSC) || cl->cl_nactive == 0)
774 continue;
775
776 if (go_passive && --cl->cl_nactive == 0)
777 go_passive = 1;
778 else
779 go_passive = 0;
780
781 /* update vt */
782 cl->cl_vt = rtsc_y2x(&cl->cl_virtual, cl->cl_total)
783 - cl->cl_vtoff + cl->cl_vtadj;
784
785 /*
786 * if vt of the class is smaller than cvtmin,
787 * the class was skipped in the past due to non-fit.
788 * if so, we need to adjust vtadj.
789 */
790 if (cl->cl_vt < cl->cl_parent->cl_cvtmin) {
791 cl->cl_vtadj += cl->cl_parent->cl_cvtmin - cl->cl_vt;
792 cl->cl_vt = cl->cl_parent->cl_cvtmin;
793 }
794
795 if (go_passive) {
796 /* no more active child, going passive */
797
798 /* update cvtmax of the parent class */
799 if (cl->cl_vt > cl->cl_parent->cl_cvtmax)
800 cl->cl_parent->cl_cvtmax = cl->cl_vt;
801
802 /* remove this class from the vt tree */
803 vttree_remove(cl);
804
805 cftree_remove(cl);
806 update_cfmin(cl->cl_parent);
807
808 continue;
809 }
810
811 /* update the vt tree */
812 vttree_update(cl);
813
814 /* update f */
815 if (cl->cl_flags & HFSC_USC) {
816 cl->cl_myf = cl->cl_myfadj + rtsc_y2x(&cl->cl_ulimit,
817 cl->cl_total);
818 #if 0
819 /*
820 * This code causes classes to stay way under their
821 * limit when multiple classes are used at gigabit
822 * speed. needs investigation. -kaber
823 */
824 /*
825 * if myf lags behind by more than one clock tick
826 * from the current time, adjust myfadj to prevent
827 * a rate-limited class from going greedy.
828 * in a steady state under rate-limiting, myf
829 * fluctuates within one clock tick.
830 */
831 myf_bound = cur_time - PSCHED_JIFFIE2US(1);
832 if (cl->cl_myf < myf_bound) {
833 delta = cur_time - cl->cl_myf;
834 cl->cl_myfadj += delta;
835 cl->cl_myf += delta;
836 }
837 #endif
838 }
839
840 f = max(cl->cl_myf, cl->cl_cfmin);
841 if (f != cl->cl_f) {
842 cl->cl_f = f;
843 cftree_update(cl);
844 update_cfmin(cl->cl_parent);
845 }
846 }
847 }
848
849 static void
850 set_active(struct hfsc_class *cl, unsigned int len)
851 {
852 if (cl->cl_flags & HFSC_RSC)
853 init_ed(cl, len);
854 if (cl->cl_flags & HFSC_FSC)
855 init_vf(cl, len);
856
857 }
858
859 static void
860 set_passive(struct hfsc_class *cl)
861 {
862 if (cl->cl_flags & HFSC_RSC)
863 eltree_remove(cl);
864
865 /*
866 * vttree is now handled in update_vf() so that update_vf(cl, 0, 0)
867 * needs to be called explicitly to remove a class from vttree.
868 */
869 }
870
871 static unsigned int
872 qdisc_peek_len(struct Qdisc *sch)
873 {
874 struct sk_buff *skb;
875 unsigned int len;
876
877 skb = sch->ops->peek(sch);
878 if (unlikely(skb == NULL)) {
879 qdisc_warn_nonwc("qdisc_peek_len", sch);
880 return 0;
881 }
882 len = qdisc_pkt_len(skb);
883
884 return len;
885 }
886
887 static void
888 hfsc_purge_queue(struct Qdisc *sch, struct hfsc_class *cl)
889 {
890 unsigned int len = cl->qdisc->q.qlen;
891 unsigned int backlog = cl->qdisc->qstats.backlog;
892
893 qdisc_reset(cl->qdisc);
894 qdisc_tree_reduce_backlog(cl->qdisc, len, backlog);
895 }
896
897 static void
898 hfsc_adjust_levels(struct hfsc_class *cl)
899 {
900 struct hfsc_class *p;
901 unsigned int level;
902
903 do {
904 level = 0;
905 list_for_each_entry(p, &cl->children, siblings) {
906 if (p->level >= level)
907 level = p->level + 1;
908 }
909 cl->level = level;
910 } while ((cl = cl->cl_parent) != NULL);
911 }
912
913 static inline struct hfsc_class *
914 hfsc_find_class(u32 classid, struct Qdisc *sch)
915 {
916 struct hfsc_sched *q = qdisc_priv(sch);
917 struct Qdisc_class_common *clc;
918
919 clc = qdisc_class_find(&q->clhash, classid);
920 if (clc == NULL)
921 return NULL;
922 return container_of(clc, struct hfsc_class, cl_common);
923 }
924
925 static void
926 hfsc_change_rsc(struct hfsc_class *cl, struct tc_service_curve *rsc,
927 u64 cur_time)
928 {
929 sc2isc(rsc, &cl->cl_rsc);
930 rtsc_init(&cl->cl_deadline, &cl->cl_rsc, cur_time, cl->cl_cumul);
931 cl->cl_eligible = cl->cl_deadline;
932 if (cl->cl_rsc.sm1 <= cl->cl_rsc.sm2) {
933 cl->cl_eligible.dx = 0;
934 cl->cl_eligible.dy = 0;
935 }
936 cl->cl_flags |= HFSC_RSC;
937 }
938
939 static void
940 hfsc_change_fsc(struct hfsc_class *cl, struct tc_service_curve *fsc)
941 {
942 sc2isc(fsc, &cl->cl_fsc);
943 rtsc_init(&cl->cl_virtual, &cl->cl_fsc, cl->cl_vtoff + cl->cl_vt, cl->cl_total);
944 cl->cl_flags |= HFSC_FSC;
945 }
946
947 static void
948 hfsc_change_usc(struct hfsc_class *cl, struct tc_service_curve *usc,
949 u64 cur_time)
950 {
951 sc2isc(usc, &cl->cl_usc);
952 rtsc_init(&cl->cl_ulimit, &cl->cl_usc, cur_time, cl->cl_total);
953 cl->cl_flags |= HFSC_USC;
954 }
955
956 static const struct nla_policy hfsc_policy[TCA_HFSC_MAX + 1] = {
957 [TCA_HFSC_RSC] = { .len = sizeof(struct tc_service_curve) },
958 [TCA_HFSC_FSC] = { .len = sizeof(struct tc_service_curve) },
959 [TCA_HFSC_USC] = { .len = sizeof(struct tc_service_curve) },
960 };
961
962 static int
963 hfsc_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
964 struct nlattr **tca, unsigned long *arg)
965 {
966 struct hfsc_sched *q = qdisc_priv(sch);
967 struct hfsc_class *cl = (struct hfsc_class *)*arg;
968 struct hfsc_class *parent = NULL;
969 struct nlattr *opt = tca[TCA_OPTIONS];
970 struct nlattr *tb[TCA_HFSC_MAX + 1];
971 struct tc_service_curve *rsc = NULL, *fsc = NULL, *usc = NULL;
972 u64 cur_time;
973 int err;
974
975 if (opt == NULL)
976 return -EINVAL;
977
978 err = nla_parse_nested(tb, TCA_HFSC_MAX, opt, hfsc_policy);
979 if (err < 0)
980 return err;
981
982 if (tb[TCA_HFSC_RSC]) {
983 rsc = nla_data(tb[TCA_HFSC_RSC]);
984 if (rsc->m1 == 0 && rsc->m2 == 0)
985 rsc = NULL;
986 }
987
988 if (tb[TCA_HFSC_FSC]) {
989 fsc = nla_data(tb[TCA_HFSC_FSC]);
990 if (fsc->m1 == 0 && fsc->m2 == 0)
991 fsc = NULL;
992 }
993
994 if (tb[TCA_HFSC_USC]) {
995 usc = nla_data(tb[TCA_HFSC_USC]);
996 if (usc->m1 == 0 && usc->m2 == 0)
997 usc = NULL;
998 }
999
1000 if (cl != NULL) {
1001 if (parentid) {
1002 if (cl->cl_parent &&
1003 cl->cl_parent->cl_common.classid != parentid)
1004 return -EINVAL;
1005 if (cl->cl_parent == NULL && parentid != TC_H_ROOT)
1006 return -EINVAL;
1007 }
1008 cur_time = psched_get_time();
1009
1010 if (tca[TCA_RATE]) {
1011 err = gen_replace_estimator(&cl->bstats, NULL,
1012 &cl->rate_est,
1013 NULL,
1014 qdisc_root_sleeping_running(sch),
1015 tca[TCA_RATE]);
1016 if (err)
1017 return err;
1018 }
1019
1020 sch_tree_lock(sch);
1021 if (rsc != NULL)
1022 hfsc_change_rsc(cl, rsc, cur_time);
1023 if (fsc != NULL)
1024 hfsc_change_fsc(cl, fsc);
1025 if (usc != NULL)
1026 hfsc_change_usc(cl, usc, cur_time);
1027
1028 if (cl->qdisc->q.qlen != 0) {
1029 if (cl->cl_flags & HFSC_RSC)
1030 update_ed(cl, qdisc_peek_len(cl->qdisc));
1031 if (cl->cl_flags & HFSC_FSC)
1032 update_vf(cl, 0, cur_time);
1033 }
1034 sch_tree_unlock(sch);
1035
1036 return 0;
1037 }
1038
1039 if (parentid == TC_H_ROOT)
1040 return -EEXIST;
1041
1042 parent = &q->root;
1043 if (parentid) {
1044 parent = hfsc_find_class(parentid, sch);
1045 if (parent == NULL)
1046 return -ENOENT;
1047 }
1048
1049 if (classid == 0 || TC_H_MAJ(classid ^ sch->handle) != 0)
1050 return -EINVAL;
1051 if (hfsc_find_class(classid, sch))
1052 return -EEXIST;
1053
1054 if (rsc == NULL && fsc == NULL)
1055 return -EINVAL;
1056
1057 cl = kzalloc(sizeof(struct hfsc_class), GFP_KERNEL);
1058 if (cl == NULL)
1059 return -ENOBUFS;
1060
1061 if (tca[TCA_RATE]) {
1062 err = gen_new_estimator(&cl->bstats, NULL, &cl->rate_est,
1063 NULL,
1064 qdisc_root_sleeping_running(sch),
1065 tca[TCA_RATE]);
1066 if (err) {
1067 kfree(cl);
1068 return err;
1069 }
1070 }
1071
1072 if (rsc != NULL)
1073 hfsc_change_rsc(cl, rsc, 0);
1074 if (fsc != NULL)
1075 hfsc_change_fsc(cl, fsc);
1076 if (usc != NULL)
1077 hfsc_change_usc(cl, usc, 0);
1078
1079 cl->cl_common.classid = classid;
1080 cl->refcnt = 1;
1081 cl->sched = q;
1082 cl->cl_parent = parent;
1083 cl->qdisc = qdisc_create_dflt(sch->dev_queue,
1084 &pfifo_qdisc_ops, classid);
1085 if (cl->qdisc == NULL)
1086 cl->qdisc = &noop_qdisc;
1087 INIT_LIST_HEAD(&cl->children);
1088 cl->vt_tree = RB_ROOT;
1089 cl->cf_tree = RB_ROOT;
1090
1091 sch_tree_lock(sch);
1092 qdisc_class_hash_insert(&q->clhash, &cl->cl_common);
1093 list_add_tail(&cl->siblings, &parent->children);
1094 if (parent->level == 0)
1095 hfsc_purge_queue(sch, parent);
1096 hfsc_adjust_levels(parent);
1097 cl->cl_pcvtoff = parent->cl_cvtoff;
1098 sch_tree_unlock(sch);
1099
1100 qdisc_class_hash_grow(sch, &q->clhash);
1101
1102 *arg = (unsigned long)cl;
1103 return 0;
1104 }
1105
1106 static void
1107 hfsc_destroy_class(struct Qdisc *sch, struct hfsc_class *cl)
1108 {
1109 struct hfsc_sched *q = qdisc_priv(sch);
1110
1111 tcf_destroy_chain(&cl->filter_list);
1112 qdisc_destroy(cl->qdisc);
1113 gen_kill_estimator(&cl->bstats, &cl->rate_est);
1114 if (cl != &q->root)
1115 kfree(cl);
1116 }
1117
1118 static int
1119 hfsc_delete_class(struct Qdisc *sch, unsigned long arg)
1120 {
1121 struct hfsc_sched *q = qdisc_priv(sch);
1122 struct hfsc_class *cl = (struct hfsc_class *)arg;
1123
1124 if (cl->level > 0 || cl->filter_cnt > 0 || cl == &q->root)
1125 return -EBUSY;
1126
1127 sch_tree_lock(sch);
1128
1129 list_del(&cl->siblings);
1130 hfsc_adjust_levels(cl->cl_parent);
1131
1132 hfsc_purge_queue(sch, cl);
1133 qdisc_class_hash_remove(&q->clhash, &cl->cl_common);
1134
1135 BUG_ON(--cl->refcnt == 0);
1136 /*
1137 * This shouldn't happen: we "hold" one cops->get() when called
1138 * from tc_ctl_tclass; the destroy method is done from cops->put().
1139 */
1140
1141 sch_tree_unlock(sch);
1142 return 0;
1143 }
1144
1145 static struct hfsc_class *
1146 hfsc_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr)
1147 {
1148 struct hfsc_sched *q = qdisc_priv(sch);
1149 struct hfsc_class *head, *cl;
1150 struct tcf_result res;
1151 struct tcf_proto *tcf;
1152 int result;
1153
1154 if (TC_H_MAJ(skb->priority ^ sch->handle) == 0 &&
1155 (cl = hfsc_find_class(skb->priority, sch)) != NULL)
1156 if (cl->level == 0)
1157 return cl;
1158
1159 *qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
1160 head = &q->root;
1161 tcf = rcu_dereference_bh(q->root.filter_list);
1162 while (tcf && (result = tc_classify(skb, tcf, &res, false)) >= 0) {
1163 #ifdef CONFIG_NET_CLS_ACT
1164 switch (result) {
1165 case TC_ACT_QUEUED:
1166 case TC_ACT_STOLEN:
1167 *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
1168 case TC_ACT_SHOT:
1169 return NULL;
1170 }
1171 #endif
1172 cl = (struct hfsc_class *)res.class;
1173 if (!cl) {
1174 cl = hfsc_find_class(res.classid, sch);
1175 if (!cl)
1176 break; /* filter selected invalid classid */
1177 if (cl->level >= head->level)
1178 break; /* filter may only point downwards */
1179 }
1180
1181 if (cl->level == 0)
1182 return cl; /* hit leaf class */
1183
1184 /* apply inner filter chain */
1185 tcf = rcu_dereference_bh(cl->filter_list);
1186 head = cl;
1187 }
1188
1189 /* classification failed, try default class */
1190 cl = hfsc_find_class(TC_H_MAKE(TC_H_MAJ(sch->handle), q->defcls), sch);
1191 if (cl == NULL || cl->level > 0)
1192 return NULL;
1193
1194 return cl;
1195 }
1196
1197 static int
1198 hfsc_graft_class(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
1199 struct Qdisc **old)
1200 {
1201 struct hfsc_class *cl = (struct hfsc_class *)arg;
1202
1203 if (cl->level > 0)
1204 return -EINVAL;
1205 if (new == NULL) {
1206 new = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops,
1207 cl->cl_common.classid);
1208 if (new == NULL)
1209 new = &noop_qdisc;
1210 }
1211
1212 *old = qdisc_replace(sch, new, &cl->qdisc);
1213 return 0;
1214 }
1215
1216 static struct Qdisc *
1217 hfsc_class_leaf(struct Qdisc *sch, unsigned long arg)
1218 {
1219 struct hfsc_class *cl = (struct hfsc_class *)arg;
1220
1221 if (cl->level == 0)
1222 return cl->qdisc;
1223
1224 return NULL;
1225 }
1226
1227 static void
1228 hfsc_qlen_notify(struct Qdisc *sch, unsigned long arg)
1229 {
1230 struct hfsc_class *cl = (struct hfsc_class *)arg;
1231
1232 if (cl->qdisc->q.qlen == 0) {
1233 update_vf(cl, 0, 0);
1234 set_passive(cl);
1235 }
1236 }
1237
1238 static unsigned long
1239 hfsc_get_class(struct Qdisc *sch, u32 classid)
1240 {
1241 struct hfsc_class *cl = hfsc_find_class(classid, sch);
1242
1243 if (cl != NULL)
1244 cl->refcnt++;
1245
1246 return (unsigned long)cl;
1247 }
1248
1249 static void
1250 hfsc_put_class(struct Qdisc *sch, unsigned long arg)
1251 {
1252 struct hfsc_class *cl = (struct hfsc_class *)arg;
1253
1254 if (--cl->refcnt == 0)
1255 hfsc_destroy_class(sch, cl);
1256 }
1257
1258 static unsigned long
1259 hfsc_bind_tcf(struct Qdisc *sch, unsigned long parent, u32 classid)
1260 {
1261 struct hfsc_class *p = (struct hfsc_class *)parent;
1262 struct hfsc_class *cl = hfsc_find_class(classid, sch);
1263
1264 if (cl != NULL) {
1265 if (p != NULL && p->level <= cl->level)
1266 return 0;
1267 cl->filter_cnt++;
1268 }
1269
1270 return (unsigned long)cl;
1271 }
1272
1273 static void
1274 hfsc_unbind_tcf(struct Qdisc *sch, unsigned long arg)
1275 {
1276 struct hfsc_class *cl = (struct hfsc_class *)arg;
1277
1278 cl->filter_cnt--;
1279 }
1280
1281 static struct tcf_proto __rcu **
1282 hfsc_tcf_chain(struct Qdisc *sch, unsigned long arg)
1283 {
1284 struct hfsc_sched *q = qdisc_priv(sch);
1285 struct hfsc_class *cl = (struct hfsc_class *)arg;
1286
1287 if (cl == NULL)
1288 cl = &q->root;
1289
1290 return &cl->filter_list;
1291 }
1292
1293 static int
1294 hfsc_dump_sc(struct sk_buff *skb, int attr, struct internal_sc *sc)
1295 {
1296 struct tc_service_curve tsc;
1297
1298 tsc.m1 = sm2m(sc->sm1);
1299 tsc.d = dx2d(sc->dx);
1300 tsc.m2 = sm2m(sc->sm2);
1301 if (nla_put(skb, attr, sizeof(tsc), &tsc))
1302 goto nla_put_failure;
1303
1304 return skb->len;
1305
1306 nla_put_failure:
1307 return -1;
1308 }
1309
1310 static int
1311 hfsc_dump_curves(struct sk_buff *skb, struct hfsc_class *cl)
1312 {
1313 if ((cl->cl_flags & HFSC_RSC) &&
1314 (hfsc_dump_sc(skb, TCA_HFSC_RSC, &cl->cl_rsc) < 0))
1315 goto nla_put_failure;
1316
1317 if ((cl->cl_flags & HFSC_FSC) &&
1318 (hfsc_dump_sc(skb, TCA_HFSC_FSC, &cl->cl_fsc) < 0))
1319 goto nla_put_failure;
1320
1321 if ((cl->cl_flags & HFSC_USC) &&
1322 (hfsc_dump_sc(skb, TCA_HFSC_USC, &cl->cl_usc) < 0))
1323 goto nla_put_failure;
1324
1325 return skb->len;
1326
1327 nla_put_failure:
1328 return -1;
1329 }
1330
1331 static int
1332 hfsc_dump_class(struct Qdisc *sch, unsigned long arg, struct sk_buff *skb,
1333 struct tcmsg *tcm)
1334 {
1335 struct hfsc_class *cl = (struct hfsc_class *)arg;
1336 struct nlattr *nest;
1337
1338 tcm->tcm_parent = cl->cl_parent ? cl->cl_parent->cl_common.classid :
1339 TC_H_ROOT;
1340 tcm->tcm_handle = cl->cl_common.classid;
1341 if (cl->level == 0)
1342 tcm->tcm_info = cl->qdisc->handle;
1343
1344 nest = nla_nest_start(skb, TCA_OPTIONS);
1345 if (nest == NULL)
1346 goto nla_put_failure;
1347 if (hfsc_dump_curves(skb, cl) < 0)
1348 goto nla_put_failure;
1349 return nla_nest_end(skb, nest);
1350
1351 nla_put_failure:
1352 nla_nest_cancel(skb, nest);
1353 return -EMSGSIZE;
1354 }
1355
1356 static int
1357 hfsc_dump_class_stats(struct Qdisc *sch, unsigned long arg,
1358 struct gnet_dump *d)
1359 {
1360 struct hfsc_class *cl = (struct hfsc_class *)arg;
1361 struct tc_hfsc_stats xstats;
1362
1363 cl->qstats.backlog = cl->qdisc->qstats.backlog;
1364 xstats.level = cl->level;
1365 xstats.period = cl->cl_vtperiod;
1366 xstats.work = cl->cl_total;
1367 xstats.rtwork = cl->cl_cumul;
1368
1369 if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch), d, NULL, &cl->bstats) < 0 ||
1370 gnet_stats_copy_rate_est(d, &cl->bstats, &cl->rate_est) < 0 ||
1371 gnet_stats_copy_queue(d, NULL, &cl->qstats, cl->qdisc->q.qlen) < 0)
1372 return -1;
1373
1374 return gnet_stats_copy_app(d, &xstats, sizeof(xstats));
1375 }
1376
1377
1378
1379 static void
1380 hfsc_walk(struct Qdisc *sch, struct qdisc_walker *arg)
1381 {
1382 struct hfsc_sched *q = qdisc_priv(sch);
1383 struct hfsc_class *cl;
1384 unsigned int i;
1385
1386 if (arg->stop)
1387 return;
1388
1389 for (i = 0; i < q->clhash.hashsize; i++) {
1390 hlist_for_each_entry(cl, &q->clhash.hash[i],
1391 cl_common.hnode) {
1392 if (arg->count < arg->skip) {
1393 arg->count++;
1394 continue;
1395 }
1396 if (arg->fn(sch, (unsigned long)cl, arg) < 0) {
1397 arg->stop = 1;
1398 return;
1399 }
1400 arg->count++;
1401 }
1402 }
1403 }
1404
1405 static void
1406 hfsc_schedule_watchdog(struct Qdisc *sch)
1407 {
1408 struct hfsc_sched *q = qdisc_priv(sch);
1409 struct hfsc_class *cl;
1410 u64 next_time = 0;
1411
1412 cl = eltree_get_minel(q);
1413 if (cl)
1414 next_time = cl->cl_e;
1415 if (q->root.cl_cfmin != 0) {
1416 if (next_time == 0 || next_time > q->root.cl_cfmin)
1417 next_time = q->root.cl_cfmin;
1418 }
1419 WARN_ON(next_time == 0);
1420 qdisc_watchdog_schedule(&q->watchdog, next_time);
1421 }
1422
1423 static int
1424 hfsc_init_qdisc(struct Qdisc *sch, struct nlattr *opt)
1425 {
1426 struct hfsc_sched *q = qdisc_priv(sch);
1427 struct tc_hfsc_qopt *qopt;
1428 int err;
1429
1430 if (opt == NULL || nla_len(opt) < sizeof(*qopt))
1431 return -EINVAL;
1432 qopt = nla_data(opt);
1433
1434 q->defcls = qopt->defcls;
1435 err = qdisc_class_hash_init(&q->clhash);
1436 if (err < 0)
1437 return err;
1438 q->eligible = RB_ROOT;
1439
1440 q->root.cl_common.classid = sch->handle;
1441 q->root.refcnt = 1;
1442 q->root.sched = q;
1443 q->root.qdisc = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops,
1444 sch->handle);
1445 if (q->root.qdisc == NULL)
1446 q->root.qdisc = &noop_qdisc;
1447 INIT_LIST_HEAD(&q->root.children);
1448 q->root.vt_tree = RB_ROOT;
1449 q->root.cf_tree = RB_ROOT;
1450
1451 qdisc_class_hash_insert(&q->clhash, &q->root.cl_common);
1452 qdisc_class_hash_grow(sch, &q->clhash);
1453
1454 qdisc_watchdog_init(&q->watchdog, sch);
1455
1456 return 0;
1457 }
1458
1459 static int
1460 hfsc_change_qdisc(struct Qdisc *sch, struct nlattr *opt)
1461 {
1462 struct hfsc_sched *q = qdisc_priv(sch);
1463 struct tc_hfsc_qopt *qopt;
1464
1465 if (opt == NULL || nla_len(opt) < sizeof(*qopt))
1466 return -EINVAL;
1467 qopt = nla_data(opt);
1468
1469 sch_tree_lock(sch);
1470 q->defcls = qopt->defcls;
1471 sch_tree_unlock(sch);
1472
1473 return 0;
1474 }
1475
1476 static void
1477 hfsc_reset_class(struct hfsc_class *cl)
1478 {
1479 cl->cl_total = 0;
1480 cl->cl_cumul = 0;
1481 cl->cl_d = 0;
1482 cl->cl_e = 0;
1483 cl->cl_vt = 0;
1484 cl->cl_vtadj = 0;
1485 cl->cl_vtoff = 0;
1486 cl->cl_cvtmin = 0;
1487 cl->cl_cvtmax = 0;
1488 cl->cl_cvtoff = 0;
1489 cl->cl_pcvtoff = 0;
1490 cl->cl_vtperiod = 0;
1491 cl->cl_parentperiod = 0;
1492 cl->cl_f = 0;
1493 cl->cl_myf = 0;
1494 cl->cl_myfadj = 0;
1495 cl->cl_cfmin = 0;
1496 cl->cl_nactive = 0;
1497
1498 cl->vt_tree = RB_ROOT;
1499 cl->cf_tree = RB_ROOT;
1500 qdisc_reset(cl->qdisc);
1501
1502 if (cl->cl_flags & HFSC_RSC)
1503 rtsc_init(&cl->cl_deadline, &cl->cl_rsc, 0, 0);
1504 if (cl->cl_flags & HFSC_FSC)
1505 rtsc_init(&cl->cl_virtual, &cl->cl_fsc, 0, 0);
1506 if (cl->cl_flags & HFSC_USC)
1507 rtsc_init(&cl->cl_ulimit, &cl->cl_usc, 0, 0);
1508 }
1509
1510 static void
1511 hfsc_reset_qdisc(struct Qdisc *sch)
1512 {
1513 struct hfsc_sched *q = qdisc_priv(sch);
1514 struct hfsc_class *cl;
1515 unsigned int i;
1516
1517 for (i = 0; i < q->clhash.hashsize; i++) {
1518 hlist_for_each_entry(cl, &q->clhash.hash[i], cl_common.hnode)
1519 hfsc_reset_class(cl);
1520 }
1521 q->eligible = RB_ROOT;
1522 qdisc_watchdog_cancel(&q->watchdog);
1523 sch->qstats.backlog = 0;
1524 sch->q.qlen = 0;
1525 }
1526
1527 static void
1528 hfsc_destroy_qdisc(struct Qdisc *sch)
1529 {
1530 struct hfsc_sched *q = qdisc_priv(sch);
1531 struct hlist_node *next;
1532 struct hfsc_class *cl;
1533 unsigned int i;
1534
1535 for (i = 0; i < q->clhash.hashsize; i++) {
1536 hlist_for_each_entry(cl, &q->clhash.hash[i], cl_common.hnode)
1537 tcf_destroy_chain(&cl->filter_list);
1538 }
1539 for (i = 0; i < q->clhash.hashsize; i++) {
1540 hlist_for_each_entry_safe(cl, next, &q->clhash.hash[i],
1541 cl_common.hnode)
1542 hfsc_destroy_class(sch, cl);
1543 }
1544 qdisc_class_hash_destroy(&q->clhash);
1545 qdisc_watchdog_cancel(&q->watchdog);
1546 }
1547
1548 static int
1549 hfsc_dump_qdisc(struct Qdisc *sch, struct sk_buff *skb)
1550 {
1551 struct hfsc_sched *q = qdisc_priv(sch);
1552 unsigned char *b = skb_tail_pointer(skb);
1553 struct tc_hfsc_qopt qopt;
1554
1555 qopt.defcls = q->defcls;
1556 if (nla_put(skb, TCA_OPTIONS, sizeof(qopt), &qopt))
1557 goto nla_put_failure;
1558 return skb->len;
1559
1560 nla_put_failure:
1561 nlmsg_trim(skb, b);
1562 return -1;
1563 }
1564
1565 static int
1566 hfsc_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free)
1567 {
1568 struct hfsc_class *cl;
1569 int uninitialized_var(err);
1570
1571 cl = hfsc_classify(skb, sch, &err);
1572 if (cl == NULL) {
1573 if (err & __NET_XMIT_BYPASS)
1574 qdisc_qstats_drop(sch);
1575 __qdisc_drop(skb, to_free);
1576 return err;
1577 }
1578
1579 err = qdisc_enqueue(skb, cl->qdisc, to_free);
1580 if (unlikely(err != NET_XMIT_SUCCESS)) {
1581 if (net_xmit_drop_count(err)) {
1582 cl->qstats.drops++;
1583 qdisc_qstats_drop(sch);
1584 }
1585 return err;
1586 }
1587
1588 if (cl->qdisc->q.qlen == 1) {
1589 set_active(cl, qdisc_pkt_len(skb));
1590 /*
1591 * If this is the first packet, isolate the head so an eventual
1592 * head drop before the first dequeue operation has no chance
1593 * to invalidate the deadline.
1594 */
1595 if (cl->cl_flags & HFSC_RSC)
1596 cl->qdisc->ops->peek(cl->qdisc);
1597
1598 }
1599
1600 qdisc_qstats_backlog_inc(sch, skb);
1601 sch->q.qlen++;
1602
1603 return NET_XMIT_SUCCESS;
1604 }
1605
1606 static struct sk_buff *
1607 hfsc_dequeue(struct Qdisc *sch)
1608 {
1609 struct hfsc_sched *q = qdisc_priv(sch);
1610 struct hfsc_class *cl;
1611 struct sk_buff *skb;
1612 u64 cur_time;
1613 unsigned int next_len;
1614 int realtime = 0;
1615
1616 if (sch->q.qlen == 0)
1617 return NULL;
1618
1619 cur_time = psched_get_time();
1620
1621 /*
1622 * if there are eligible classes, use real-time criteria.
1623 * find the class with the minimum deadline among
1624 * the eligible classes.
1625 */
1626 cl = eltree_get_mindl(q, cur_time);
1627 if (cl) {
1628 realtime = 1;
1629 } else {
1630 /*
1631 * use link-sharing criteria
1632 * get the class with the minimum vt in the hierarchy
1633 */
1634 cl = vttree_get_minvt(&q->root, cur_time);
1635 if (cl == NULL) {
1636 qdisc_qstats_overlimit(sch);
1637 hfsc_schedule_watchdog(sch);
1638 return NULL;
1639 }
1640 }
1641
1642 skb = qdisc_dequeue_peeked(cl->qdisc);
1643 if (skb == NULL) {
1644 qdisc_warn_nonwc("HFSC", cl->qdisc);
1645 return NULL;
1646 }
1647
1648 bstats_update(&cl->bstats, skb);
1649 update_vf(cl, qdisc_pkt_len(skb), cur_time);
1650 if (realtime)
1651 cl->cl_cumul += qdisc_pkt_len(skb);
1652
1653 if (cl->qdisc->q.qlen != 0) {
1654 if (cl->cl_flags & HFSC_RSC) {
1655 /* update ed */
1656 next_len = qdisc_peek_len(cl->qdisc);
1657 if (realtime)
1658 update_ed(cl, next_len);
1659 else
1660 update_d(cl, next_len);
1661 }
1662 } else {
1663 /* the class becomes passive */
1664 set_passive(cl);
1665 }
1666
1667 qdisc_bstats_update(sch, skb);
1668 qdisc_qstats_backlog_dec(sch, skb);
1669 sch->q.qlen--;
1670
1671 return skb;
1672 }
1673
1674 static const struct Qdisc_class_ops hfsc_class_ops = {
1675 .change = hfsc_change_class,
1676 .delete = hfsc_delete_class,
1677 .graft = hfsc_graft_class,
1678 .leaf = hfsc_class_leaf,
1679 .qlen_notify = hfsc_qlen_notify,
1680 .get = hfsc_get_class,
1681 .put = hfsc_put_class,
1682 .bind_tcf = hfsc_bind_tcf,
1683 .unbind_tcf = hfsc_unbind_tcf,
1684 .tcf_chain = hfsc_tcf_chain,
1685 .dump = hfsc_dump_class,
1686 .dump_stats = hfsc_dump_class_stats,
1687 .walk = hfsc_walk
1688 };
1689
1690 static struct Qdisc_ops hfsc_qdisc_ops __read_mostly = {
1691 .id = "hfsc",
1692 .init = hfsc_init_qdisc,
1693 .change = hfsc_change_qdisc,
1694 .reset = hfsc_reset_qdisc,
1695 .destroy = hfsc_destroy_qdisc,
1696 .dump = hfsc_dump_qdisc,
1697 .enqueue = hfsc_enqueue,
1698 .dequeue = hfsc_dequeue,
1699 .peek = qdisc_peek_dequeued,
1700 .cl_ops = &hfsc_class_ops,
1701 .priv_size = sizeof(struct hfsc_sched),
1702 .owner = THIS_MODULE
1703 };
1704
1705 static int __init
1706 hfsc_init(void)
1707 {
1708 return register_qdisc(&hfsc_qdisc_ops);
1709 }
1710
1711 static void __exit
1712 hfsc_cleanup(void)
1713 {
1714 unregister_qdisc(&hfsc_qdisc_ops);
1715 }
1716
1717 MODULE_LICENSE("GPL");
1718 module_init(hfsc_init);
1719 module_exit(hfsc_cleanup);
This page took 0.069174 seconds and 5 git commands to generate.