Merge branch 'battery' into release
[deliverable/linux.git] / net / sched / sch_netem.c
1 /*
2 * net/sched/sch_netem.c Network emulator
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License.
8 *
9 * Many of the algorithms and ideas for this came from
10 * NIST Net which is not copyrighted.
11 *
12 * Authors: Stephen Hemminger <shemminger@osdl.org>
13 * Catalin(ux aka Dino) BOIE <catab at umbrella dot ro>
14 */
15
16 #include <linux/module.h>
17 #include <linux/types.h>
18 #include <linux/kernel.h>
19 #include <linux/errno.h>
20 #include <linux/skbuff.h>
21 #include <linux/rtnetlink.h>
22
23 #include <net/netlink.h>
24 #include <net/pkt_sched.h>
25
26 #define VERSION "1.2"
27
28 /* Network Emulation Queuing algorithm.
29 ====================================
30
31 Sources: [1] Mark Carson, Darrin Santay, "NIST Net - A Linux-based
32 Network Emulation Tool
33 [2] Luigi Rizzo, DummyNet for FreeBSD
34
35 ----------------------------------------------------------------
36
37 This started out as a simple way to delay outgoing packets to
38 test TCP but has grown to include most of the functionality
39 of a full blown network emulator like NISTnet. It can delay
40 packets and add random jitter (and correlation). The random
41 distribution can be loaded from a table as well to provide
42 normal, Pareto, or experimental curves. Packet loss,
43 duplication, and reordering can also be emulated.
44
45 This qdisc does not do classification that can be handled in
46 layering other disciplines. It does not need to do bandwidth
47 control either since that can be handled by using token
48 bucket or other rate control.
49 */
50
51 struct netem_sched_data {
52 struct Qdisc *qdisc;
53 struct qdisc_watchdog watchdog;
54
55 psched_tdiff_t latency;
56 psched_tdiff_t jitter;
57
58 u32 loss;
59 u32 limit;
60 u32 counter;
61 u32 gap;
62 u32 duplicate;
63 u32 reorder;
64 u32 corrupt;
65
66 struct crndstate {
67 u32 last;
68 u32 rho;
69 } delay_cor, loss_cor, dup_cor, reorder_cor, corrupt_cor;
70
71 struct disttable {
72 u32 size;
73 s16 table[0];
74 } *delay_dist;
75 };
76
77 /* Time stamp put into socket buffer control block */
78 struct netem_skb_cb {
79 psched_time_t time_to_send;
80 };
81
82 static inline struct netem_skb_cb *netem_skb_cb(struct sk_buff *skb)
83 {
84 BUILD_BUG_ON(sizeof(skb->cb) <
85 sizeof(struct qdisc_skb_cb) + sizeof(struct netem_skb_cb));
86 return (struct netem_skb_cb *)qdisc_skb_cb(skb)->data;
87 }
88
89 /* init_crandom - initialize correlated random number generator
90 * Use entropy source for initial seed.
91 */
92 static void init_crandom(struct crndstate *state, unsigned long rho)
93 {
94 state->rho = rho;
95 state->last = net_random();
96 }
97
98 /* get_crandom - correlated random number generator
99 * Next number depends on last value.
100 * rho is scaled to avoid floating point.
101 */
102 static u32 get_crandom(struct crndstate *state)
103 {
104 u64 value, rho;
105 unsigned long answer;
106
107 if (state->rho == 0) /* no correlation */
108 return net_random();
109
110 value = net_random();
111 rho = (u64)state->rho + 1;
112 answer = (value * ((1ull<<32) - rho) + state->last * rho) >> 32;
113 state->last = answer;
114 return answer;
115 }
116
117 /* tabledist - return a pseudo-randomly distributed value with mean mu and
118 * std deviation sigma. Uses table lookup to approximate the desired
119 * distribution, and a uniformly-distributed pseudo-random source.
120 */
121 static psched_tdiff_t tabledist(psched_tdiff_t mu, psched_tdiff_t sigma,
122 struct crndstate *state,
123 const struct disttable *dist)
124 {
125 psched_tdiff_t x;
126 long t;
127 u32 rnd;
128
129 if (sigma == 0)
130 return mu;
131
132 rnd = get_crandom(state);
133
134 /* default uniform distribution */
135 if (dist == NULL)
136 return (rnd % (2*sigma)) - sigma + mu;
137
138 t = dist->table[rnd % dist->size];
139 x = (sigma % NETEM_DIST_SCALE) * t;
140 if (x >= 0)
141 x += NETEM_DIST_SCALE/2;
142 else
143 x -= NETEM_DIST_SCALE/2;
144
145 return x / NETEM_DIST_SCALE + (sigma / NETEM_DIST_SCALE) * t + mu;
146 }
147
148 /*
149 * Insert one skb into qdisc.
150 * Note: parent depends on return value to account for queue length.
151 * NET_XMIT_DROP: queue length didn't change.
152 * NET_XMIT_SUCCESS: one skb was queued.
153 */
154 static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
155 {
156 struct netem_sched_data *q = qdisc_priv(sch);
157 /* We don't fill cb now as skb_unshare() may invalidate it */
158 struct netem_skb_cb *cb;
159 struct sk_buff *skb2;
160 int ret;
161 int count = 1;
162
163 pr_debug("netem_enqueue skb=%p\n", skb);
164
165 /* Random duplication */
166 if (q->duplicate && q->duplicate >= get_crandom(&q->dup_cor))
167 ++count;
168
169 /* Random packet drop 0 => none, ~0 => all */
170 if (q->loss && q->loss >= get_crandom(&q->loss_cor))
171 --count;
172
173 if (count == 0) {
174 sch->qstats.drops++;
175 kfree_skb(skb);
176 return NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
177 }
178
179 skb_orphan(skb);
180
181 /*
182 * If we need to duplicate packet, then re-insert at top of the
183 * qdisc tree, since parent queuer expects that only one
184 * skb will be queued.
185 */
186 if (count > 1 && (skb2 = skb_clone(skb, GFP_ATOMIC)) != NULL) {
187 struct Qdisc *rootq = qdisc_root(sch);
188 u32 dupsave = q->duplicate; /* prevent duplicating a dup... */
189 q->duplicate = 0;
190
191 qdisc_enqueue_root(skb2, rootq);
192 q->duplicate = dupsave;
193 }
194
195 /*
196 * Randomized packet corruption.
197 * Make copy if needed since we are modifying
198 * If packet is going to be hardware checksummed, then
199 * do it now in software before we mangle it.
200 */
201 if (q->corrupt && q->corrupt >= get_crandom(&q->corrupt_cor)) {
202 if (!(skb = skb_unshare(skb, GFP_ATOMIC))
203 || (skb->ip_summed == CHECKSUM_PARTIAL
204 && skb_checksum_help(skb))) {
205 sch->qstats.drops++;
206 return NET_XMIT_DROP;
207 }
208
209 skb->data[net_random() % skb_headlen(skb)] ^= 1<<(net_random() % 8);
210 }
211
212 cb = netem_skb_cb(skb);
213 if (q->gap == 0 /* not doing reordering */
214 || q->counter < q->gap /* inside last reordering gap */
215 || q->reorder < get_crandom(&q->reorder_cor)) {
216 psched_time_t now;
217 psched_tdiff_t delay;
218
219 delay = tabledist(q->latency, q->jitter,
220 &q->delay_cor, q->delay_dist);
221
222 now = psched_get_time();
223 cb->time_to_send = now + delay;
224 ++q->counter;
225 ret = qdisc_enqueue(skb, q->qdisc);
226 } else {
227 /*
228 * Do re-ordering by putting one out of N packets at the front
229 * of the queue.
230 */
231 cb->time_to_send = psched_get_time();
232 q->counter = 0;
233 ret = q->qdisc->ops->requeue(skb, q->qdisc);
234 }
235
236 if (likely(ret == NET_XMIT_SUCCESS)) {
237 sch->q.qlen++;
238 sch->bstats.bytes += qdisc_pkt_len(skb);
239 sch->bstats.packets++;
240 } else if (net_xmit_drop_count(ret)) {
241 sch->qstats.drops++;
242 }
243
244 pr_debug("netem: enqueue ret %d\n", ret);
245 return ret;
246 }
247
248 /* Requeue packets but don't change time stamp */
249 static int netem_requeue(struct sk_buff *skb, struct Qdisc *sch)
250 {
251 struct netem_sched_data *q = qdisc_priv(sch);
252 int ret;
253
254 if ((ret = q->qdisc->ops->requeue(skb, q->qdisc)) == 0) {
255 sch->q.qlen++;
256 sch->qstats.requeues++;
257 }
258
259 return ret;
260 }
261
262 static unsigned int netem_drop(struct Qdisc* sch)
263 {
264 struct netem_sched_data *q = qdisc_priv(sch);
265 unsigned int len = 0;
266
267 if (q->qdisc->ops->drop && (len = q->qdisc->ops->drop(q->qdisc)) != 0) {
268 sch->q.qlen--;
269 sch->qstats.drops++;
270 }
271 return len;
272 }
273
274 static struct sk_buff *netem_dequeue(struct Qdisc *sch)
275 {
276 struct netem_sched_data *q = qdisc_priv(sch);
277 struct sk_buff *skb;
278
279 smp_mb();
280 if (sch->flags & TCQ_F_THROTTLED)
281 return NULL;
282
283 skb = q->qdisc->dequeue(q->qdisc);
284 if (skb) {
285 const struct netem_skb_cb *cb = netem_skb_cb(skb);
286 psched_time_t now = psched_get_time();
287
288 /* if more time remaining? */
289 if (cb->time_to_send <= now) {
290 pr_debug("netem_dequeue: return skb=%p\n", skb);
291 sch->q.qlen--;
292 return skb;
293 }
294
295 if (unlikely(q->qdisc->ops->requeue(skb, q->qdisc) != NET_XMIT_SUCCESS)) {
296 qdisc_tree_decrease_qlen(q->qdisc, 1);
297 sch->qstats.drops++;
298 printk(KERN_ERR "netem: %s could not requeue\n",
299 q->qdisc->ops->id);
300 }
301
302 qdisc_watchdog_schedule(&q->watchdog, cb->time_to_send);
303 }
304
305 return NULL;
306 }
307
308 static void netem_reset(struct Qdisc *sch)
309 {
310 struct netem_sched_data *q = qdisc_priv(sch);
311
312 qdisc_reset(q->qdisc);
313 sch->q.qlen = 0;
314 qdisc_watchdog_cancel(&q->watchdog);
315 }
316
317 /*
318 * Distribution data is a variable size payload containing
319 * signed 16 bit values.
320 */
321 static int get_dist_table(struct Qdisc *sch, const struct nlattr *attr)
322 {
323 struct netem_sched_data *q = qdisc_priv(sch);
324 unsigned long n = nla_len(attr)/sizeof(__s16);
325 const __s16 *data = nla_data(attr);
326 spinlock_t *root_lock;
327 struct disttable *d;
328 int i;
329
330 if (n > 65536)
331 return -EINVAL;
332
333 d = kmalloc(sizeof(*d) + n*sizeof(d->table[0]), GFP_KERNEL);
334 if (!d)
335 return -ENOMEM;
336
337 d->size = n;
338 for (i = 0; i < n; i++)
339 d->table[i] = data[i];
340
341 root_lock = qdisc_root_sleeping_lock(sch);
342
343 spin_lock_bh(root_lock);
344 d = xchg(&q->delay_dist, d);
345 spin_unlock_bh(root_lock);
346
347 kfree(d);
348 return 0;
349 }
350
351 static int get_correlation(struct Qdisc *sch, const struct nlattr *attr)
352 {
353 struct netem_sched_data *q = qdisc_priv(sch);
354 const struct tc_netem_corr *c = nla_data(attr);
355
356 init_crandom(&q->delay_cor, c->delay_corr);
357 init_crandom(&q->loss_cor, c->loss_corr);
358 init_crandom(&q->dup_cor, c->dup_corr);
359 return 0;
360 }
361
362 static int get_reorder(struct Qdisc *sch, const struct nlattr *attr)
363 {
364 struct netem_sched_data *q = qdisc_priv(sch);
365 const struct tc_netem_reorder *r = nla_data(attr);
366
367 q->reorder = r->probability;
368 init_crandom(&q->reorder_cor, r->correlation);
369 return 0;
370 }
371
372 static int get_corrupt(struct Qdisc *sch, const struct nlattr *attr)
373 {
374 struct netem_sched_data *q = qdisc_priv(sch);
375 const struct tc_netem_corrupt *r = nla_data(attr);
376
377 q->corrupt = r->probability;
378 init_crandom(&q->corrupt_cor, r->correlation);
379 return 0;
380 }
381
382 static const struct nla_policy netem_policy[TCA_NETEM_MAX + 1] = {
383 [TCA_NETEM_CORR] = { .len = sizeof(struct tc_netem_corr) },
384 [TCA_NETEM_REORDER] = { .len = sizeof(struct tc_netem_reorder) },
385 [TCA_NETEM_CORRUPT] = { .len = sizeof(struct tc_netem_corrupt) },
386 };
387
388 static int parse_attr(struct nlattr *tb[], int maxtype, struct nlattr *nla,
389 const struct nla_policy *policy, int len)
390 {
391 int nested_len = nla_len(nla) - NLA_ALIGN(len);
392
393 if (nested_len < 0)
394 return -EINVAL;
395 if (nested_len >= nla_attr_size(0))
396 return nla_parse(tb, maxtype, nla_data(nla) + NLA_ALIGN(len),
397 nested_len, policy);
398 memset(tb, 0, sizeof(struct nlattr *) * (maxtype + 1));
399 return 0;
400 }
401
402 /* Parse netlink message to set options */
403 static int netem_change(struct Qdisc *sch, struct nlattr *opt)
404 {
405 struct netem_sched_data *q = qdisc_priv(sch);
406 struct nlattr *tb[TCA_NETEM_MAX + 1];
407 struct tc_netem_qopt *qopt;
408 int ret;
409
410 if (opt == NULL)
411 return -EINVAL;
412
413 qopt = nla_data(opt);
414 ret = parse_attr(tb, TCA_NETEM_MAX, opt, netem_policy, sizeof(*qopt));
415 if (ret < 0)
416 return ret;
417
418 ret = fifo_set_limit(q->qdisc, qopt->limit);
419 if (ret) {
420 pr_debug("netem: can't set fifo limit\n");
421 return ret;
422 }
423
424 q->latency = qopt->latency;
425 q->jitter = qopt->jitter;
426 q->limit = qopt->limit;
427 q->gap = qopt->gap;
428 q->counter = 0;
429 q->loss = qopt->loss;
430 q->duplicate = qopt->duplicate;
431
432 /* for compatibility with earlier versions.
433 * if gap is set, need to assume 100% probability
434 */
435 if (q->gap)
436 q->reorder = ~0;
437
438 if (tb[TCA_NETEM_CORR]) {
439 ret = get_correlation(sch, tb[TCA_NETEM_CORR]);
440 if (ret)
441 return ret;
442 }
443
444 if (tb[TCA_NETEM_DELAY_DIST]) {
445 ret = get_dist_table(sch, tb[TCA_NETEM_DELAY_DIST]);
446 if (ret)
447 return ret;
448 }
449
450 if (tb[TCA_NETEM_REORDER]) {
451 ret = get_reorder(sch, tb[TCA_NETEM_REORDER]);
452 if (ret)
453 return ret;
454 }
455
456 if (tb[TCA_NETEM_CORRUPT]) {
457 ret = get_corrupt(sch, tb[TCA_NETEM_CORRUPT]);
458 if (ret)
459 return ret;
460 }
461
462 return 0;
463 }
464
465 /*
466 * Special case version of FIFO queue for use by netem.
467 * It queues in order based on timestamps in skb's
468 */
469 struct fifo_sched_data {
470 u32 limit;
471 psched_time_t oldest;
472 };
473
474 static int tfifo_enqueue(struct sk_buff *nskb, struct Qdisc *sch)
475 {
476 struct fifo_sched_data *q = qdisc_priv(sch);
477 struct sk_buff_head *list = &sch->q;
478 psched_time_t tnext = netem_skb_cb(nskb)->time_to_send;
479 struct sk_buff *skb;
480
481 if (likely(skb_queue_len(list) < q->limit)) {
482 /* Optimize for add at tail */
483 if (likely(skb_queue_empty(list) || tnext >= q->oldest)) {
484 q->oldest = tnext;
485 return qdisc_enqueue_tail(nskb, sch);
486 }
487
488 skb_queue_reverse_walk(list, skb) {
489 const struct netem_skb_cb *cb = netem_skb_cb(skb);
490
491 if (tnext >= cb->time_to_send)
492 break;
493 }
494
495 __skb_queue_after(list, skb, nskb);
496
497 sch->qstats.backlog += qdisc_pkt_len(nskb);
498 sch->bstats.bytes += qdisc_pkt_len(nskb);
499 sch->bstats.packets++;
500
501 return NET_XMIT_SUCCESS;
502 }
503
504 return qdisc_reshape_fail(nskb, sch);
505 }
506
507 static int tfifo_init(struct Qdisc *sch, struct nlattr *opt)
508 {
509 struct fifo_sched_data *q = qdisc_priv(sch);
510
511 if (opt) {
512 struct tc_fifo_qopt *ctl = nla_data(opt);
513 if (nla_len(opt) < sizeof(*ctl))
514 return -EINVAL;
515
516 q->limit = ctl->limit;
517 } else
518 q->limit = max_t(u32, qdisc_dev(sch)->tx_queue_len, 1);
519
520 q->oldest = PSCHED_PASTPERFECT;
521 return 0;
522 }
523
524 static int tfifo_dump(struct Qdisc *sch, struct sk_buff *skb)
525 {
526 struct fifo_sched_data *q = qdisc_priv(sch);
527 struct tc_fifo_qopt opt = { .limit = q->limit };
528
529 NLA_PUT(skb, TCA_OPTIONS, sizeof(opt), &opt);
530 return skb->len;
531
532 nla_put_failure:
533 return -1;
534 }
535
536 static struct Qdisc_ops tfifo_qdisc_ops __read_mostly = {
537 .id = "tfifo",
538 .priv_size = sizeof(struct fifo_sched_data),
539 .enqueue = tfifo_enqueue,
540 .dequeue = qdisc_dequeue_head,
541 .requeue = qdisc_requeue,
542 .drop = qdisc_queue_drop,
543 .init = tfifo_init,
544 .reset = qdisc_reset_queue,
545 .change = tfifo_init,
546 .dump = tfifo_dump,
547 };
548
549 static int netem_init(struct Qdisc *sch, struct nlattr *opt)
550 {
551 struct netem_sched_data *q = qdisc_priv(sch);
552 int ret;
553
554 if (!opt)
555 return -EINVAL;
556
557 qdisc_watchdog_init(&q->watchdog, sch);
558
559 q->qdisc = qdisc_create_dflt(qdisc_dev(sch), sch->dev_queue,
560 &tfifo_qdisc_ops,
561 TC_H_MAKE(sch->handle, 1));
562 if (!q->qdisc) {
563 pr_debug("netem: qdisc create failed\n");
564 return -ENOMEM;
565 }
566
567 ret = netem_change(sch, opt);
568 if (ret) {
569 pr_debug("netem: change failed\n");
570 qdisc_destroy(q->qdisc);
571 }
572 return ret;
573 }
574
575 static void netem_destroy(struct Qdisc *sch)
576 {
577 struct netem_sched_data *q = qdisc_priv(sch);
578
579 qdisc_watchdog_cancel(&q->watchdog);
580 qdisc_destroy(q->qdisc);
581 kfree(q->delay_dist);
582 }
583
584 static int netem_dump(struct Qdisc *sch, struct sk_buff *skb)
585 {
586 const struct netem_sched_data *q = qdisc_priv(sch);
587 unsigned char *b = skb_tail_pointer(skb);
588 struct nlattr *nla = (struct nlattr *) b;
589 struct tc_netem_qopt qopt;
590 struct tc_netem_corr cor;
591 struct tc_netem_reorder reorder;
592 struct tc_netem_corrupt corrupt;
593
594 qopt.latency = q->latency;
595 qopt.jitter = q->jitter;
596 qopt.limit = q->limit;
597 qopt.loss = q->loss;
598 qopt.gap = q->gap;
599 qopt.duplicate = q->duplicate;
600 NLA_PUT(skb, TCA_OPTIONS, sizeof(qopt), &qopt);
601
602 cor.delay_corr = q->delay_cor.rho;
603 cor.loss_corr = q->loss_cor.rho;
604 cor.dup_corr = q->dup_cor.rho;
605 NLA_PUT(skb, TCA_NETEM_CORR, sizeof(cor), &cor);
606
607 reorder.probability = q->reorder;
608 reorder.correlation = q->reorder_cor.rho;
609 NLA_PUT(skb, TCA_NETEM_REORDER, sizeof(reorder), &reorder);
610
611 corrupt.probability = q->corrupt;
612 corrupt.correlation = q->corrupt_cor.rho;
613 NLA_PUT(skb, TCA_NETEM_CORRUPT, sizeof(corrupt), &corrupt);
614
615 nla->nla_len = skb_tail_pointer(skb) - b;
616
617 return skb->len;
618
619 nla_put_failure:
620 nlmsg_trim(skb, b);
621 return -1;
622 }
623
624 static int netem_dump_class(struct Qdisc *sch, unsigned long cl,
625 struct sk_buff *skb, struct tcmsg *tcm)
626 {
627 struct netem_sched_data *q = qdisc_priv(sch);
628
629 if (cl != 1) /* only one class */
630 return -ENOENT;
631
632 tcm->tcm_handle |= TC_H_MIN(1);
633 tcm->tcm_info = q->qdisc->handle;
634
635 return 0;
636 }
637
638 static int netem_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
639 struct Qdisc **old)
640 {
641 struct netem_sched_data *q = qdisc_priv(sch);
642
643 if (new == NULL)
644 new = &noop_qdisc;
645
646 sch_tree_lock(sch);
647 *old = xchg(&q->qdisc, new);
648 qdisc_tree_decrease_qlen(*old, (*old)->q.qlen);
649 qdisc_reset(*old);
650 sch_tree_unlock(sch);
651
652 return 0;
653 }
654
655 static struct Qdisc *netem_leaf(struct Qdisc *sch, unsigned long arg)
656 {
657 struct netem_sched_data *q = qdisc_priv(sch);
658 return q->qdisc;
659 }
660
661 static unsigned long netem_get(struct Qdisc *sch, u32 classid)
662 {
663 return 1;
664 }
665
666 static void netem_put(struct Qdisc *sch, unsigned long arg)
667 {
668 }
669
670 static int netem_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
671 struct nlattr **tca, unsigned long *arg)
672 {
673 return -ENOSYS;
674 }
675
676 static int netem_delete(struct Qdisc *sch, unsigned long arg)
677 {
678 return -ENOSYS;
679 }
680
681 static void netem_walk(struct Qdisc *sch, struct qdisc_walker *walker)
682 {
683 if (!walker->stop) {
684 if (walker->count >= walker->skip)
685 if (walker->fn(sch, 1, walker) < 0) {
686 walker->stop = 1;
687 return;
688 }
689 walker->count++;
690 }
691 }
692
693 static struct tcf_proto **netem_find_tcf(struct Qdisc *sch, unsigned long cl)
694 {
695 return NULL;
696 }
697
698 static const struct Qdisc_class_ops netem_class_ops = {
699 .graft = netem_graft,
700 .leaf = netem_leaf,
701 .get = netem_get,
702 .put = netem_put,
703 .change = netem_change_class,
704 .delete = netem_delete,
705 .walk = netem_walk,
706 .tcf_chain = netem_find_tcf,
707 .dump = netem_dump_class,
708 };
709
710 static struct Qdisc_ops netem_qdisc_ops __read_mostly = {
711 .id = "netem",
712 .cl_ops = &netem_class_ops,
713 .priv_size = sizeof(struct netem_sched_data),
714 .enqueue = netem_enqueue,
715 .dequeue = netem_dequeue,
716 .requeue = netem_requeue,
717 .drop = netem_drop,
718 .init = netem_init,
719 .reset = netem_reset,
720 .destroy = netem_destroy,
721 .change = netem_change,
722 .dump = netem_dump,
723 .owner = THIS_MODULE,
724 };
725
726
727 static int __init netem_module_init(void)
728 {
729 pr_info("netem: version " VERSION "\n");
730 return register_qdisc(&netem_qdisc_ops);
731 }
732 static void __exit netem_module_exit(void)
733 {
734 unregister_qdisc(&netem_qdisc_ops);
735 }
736 module_init(netem_module_init)
737 module_exit(netem_module_exit)
738 MODULE_LICENSE("GPL");
This page took 0.046985 seconds and 5 git commands to generate.