Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * net/sched/sch_netem.c Network emulator | |
3 | * | |
4 | * This program is free software; you can redistribute it and/or | |
5 | * modify it under the terms of the GNU General Public License | |
6 | * as published by the Free Software Foundation; either version | |
798b6b19 | 7 | * 2 of the License. |
1da177e4 LT |
8 | * |
9 | * Many of the algorithms and ideas for this came from | |
10297b99 | 10 | * NIST Net which is not copyrighted. |
1da177e4 LT |
11 | * |
12 | * Authors: Stephen Hemminger <shemminger@osdl.org> | |
13 | * Catalin(ux aka Dino) BOIE <catab at umbrella dot ro> | |
14 | */ | |
15 | ||
b7f080cf | 16 | #include <linux/mm.h> |
1da177e4 | 17 | #include <linux/module.h> |
5a0e3ad6 | 18 | #include <linux/slab.h> |
1da177e4 LT |
19 | #include <linux/types.h> |
20 | #include <linux/kernel.h> | |
21 | #include <linux/errno.h> | |
1da177e4 | 22 | #include <linux/skbuff.h> |
78776d3f | 23 | #include <linux/vmalloc.h> |
1da177e4 | 24 | #include <linux/rtnetlink.h> |
90b41a1c | 25 | #include <linux/reciprocal_div.h> |
1da177e4 | 26 | |
dc5fc579 | 27 | #include <net/netlink.h> |
1da177e4 | 28 | #include <net/pkt_sched.h> |
e4ae004b | 29 | #include <net/inet_ecn.h> |
1da177e4 | 30 | |
250a65f7 | 31 | #define VERSION "1.3" |
eb229c4c | 32 | |
1da177e4 LT |
33 | /* Network Emulation Queuing algorithm. |
34 | ==================================== | |
35 | ||
36 | Sources: [1] Mark Carson, Darrin Santay, "NIST Net - A Linux-based | |
37 | Network Emulation Tool | |
38 | [2] Luigi Rizzo, DummyNet for FreeBSD | |
39 | ||
40 | ---------------------------------------------------------------- | |
41 | ||
42 | This started out as a simple way to delay outgoing packets to | |
43 | test TCP but has grown to include most of the functionality | |
44 | of a full blown network emulator like NISTnet. It can delay | |
45 | packets and add random jitter (and correlation). The random | |
46 | distribution can be loaded from a table as well to provide | |
47 | normal, Pareto, or experimental curves. Packet loss, | |
48 | duplication, and reordering can also be emulated. | |
49 | ||
50 | This qdisc does not do classification that can be handled in | |
51 | layering other disciplines. It does not need to do bandwidth | |
52 | control either since that can be handled by using token | |
53 | bucket or other rate control. | |
661b7972 | 54 | |
55 | Correlated Loss Generator models | |
56 | ||
57 | Added generation of correlated loss according to the | |
58 | "Gilbert-Elliot" model, a 4-state markov model. | |
59 | ||
60 | References: | |
61 | [1] NetemCLG Home http://netgroup.uniroma2.it/NetemCLG | |
62 | [2] S. Salsano, F. Ludovici, A. Ordine, "Definition of a general | |
63 | and intuitive loss model for packet networks and its implementation | |
64 | in the Netem module in the Linux kernel", available in [1] | |
65 | ||
66 | Authors: Stefano Salsano <stefano.salsano at uniroma2.it | |
67 | Fabio Ludovici <fabio.ludovici at yahoo.it> | |
1da177e4 LT |
68 | */ |
69 | ||
70 | struct netem_sched_data { | |
50612537 ED |
71 | /* internal t(ime)fifo qdisc uses sch->q and sch->limit */ |
72 | ||
73 | /* optional qdisc for classful handling (NULL at netem init) */ | |
1da177e4 | 74 | struct Qdisc *qdisc; |
50612537 | 75 | |
59cb5c67 | 76 | struct qdisc_watchdog watchdog; |
1da177e4 | 77 | |
b407621c SH |
78 | psched_tdiff_t latency; |
79 | psched_tdiff_t jitter; | |
80 | ||
1da177e4 | 81 | u32 loss; |
e4ae004b | 82 | u32 ecn; |
1da177e4 LT |
83 | u32 limit; |
84 | u32 counter; | |
85 | u32 gap; | |
1da177e4 | 86 | u32 duplicate; |
0dca51d3 | 87 | u32 reorder; |
c865e5d9 | 88 | u32 corrupt; |
7bc0f28c | 89 | u32 rate; |
90b41a1c HPP |
90 | s32 packet_overhead; |
91 | u32 cell_size; | |
92 | u32 cell_size_reciprocal; | |
93 | s32 cell_overhead; | |
1da177e4 LT |
94 | |
95 | struct crndstate { | |
b407621c SH |
96 | u32 last; |
97 | u32 rho; | |
c865e5d9 | 98 | } delay_cor, loss_cor, dup_cor, reorder_cor, corrupt_cor; |
1da177e4 LT |
99 | |
100 | struct disttable { | |
101 | u32 size; | |
102 | s16 table[0]; | |
103 | } *delay_dist; | |
661b7972 | 104 | |
105 | enum { | |
106 | CLG_RANDOM, | |
107 | CLG_4_STATES, | |
108 | CLG_GILB_ELL, | |
109 | } loss_model; | |
110 | ||
111 | /* Correlated Loss Generation models */ | |
112 | struct clgstate { | |
113 | /* state of the Markov chain */ | |
114 | u8 state; | |
115 | ||
116 | /* 4-states and Gilbert-Elliot models */ | |
117 | u32 a1; /* p13 for 4-states or p for GE */ | |
118 | u32 a2; /* p31 for 4-states or r for GE */ | |
119 | u32 a3; /* p32 for 4-states or h for GE */ | |
120 | u32 a4; /* p14 for 4-states or 1-k for GE */ | |
121 | u32 a5; /* p23 used only in 4-states */ | |
122 | } clg; | |
123 | ||
1da177e4 LT |
124 | }; |
125 | ||
50612537 ED |
126 | /* Time stamp put into socket buffer control block |
127 | * Only valid when skbs are in our internal t(ime)fifo queue. | |
128 | */ | |
1da177e4 LT |
129 | struct netem_skb_cb { |
130 | psched_time_t time_to_send; | |
131 | }; | |
132 | ||
5f86173b JK |
133 | static inline struct netem_skb_cb *netem_skb_cb(struct sk_buff *skb) |
134 | { | |
16bda13d | 135 | qdisc_cb_private_validate(skb, sizeof(struct netem_skb_cb)); |
175f9c1b | 136 | return (struct netem_skb_cb *)qdisc_skb_cb(skb)->data; |
5f86173b JK |
137 | } |
138 | ||
1da177e4 LT |
139 | /* init_crandom - initialize correlated random number generator |
140 | * Use entropy source for initial seed. | |
141 | */ | |
142 | static void init_crandom(struct crndstate *state, unsigned long rho) | |
143 | { | |
144 | state->rho = rho; | |
145 | state->last = net_random(); | |
146 | } | |
147 | ||
148 | /* get_crandom - correlated random number generator | |
149 | * Next number depends on last value. | |
150 | * rho is scaled to avoid floating point. | |
151 | */ | |
b407621c | 152 | static u32 get_crandom(struct crndstate *state) |
1da177e4 LT |
153 | { |
154 | u64 value, rho; | |
155 | unsigned long answer; | |
156 | ||
bb2f8cc0 | 157 | if (state->rho == 0) /* no correlation */ |
1da177e4 LT |
158 | return net_random(); |
159 | ||
160 | value = net_random(); | |
161 | rho = (u64)state->rho + 1; | |
162 | answer = (value * ((1ull<<32) - rho) + state->last * rho) >> 32; | |
163 | state->last = answer; | |
164 | return answer; | |
165 | } | |
166 | ||
661b7972 | 167 | /* loss_4state - 4-state model loss generator |
168 | * Generates losses according to the 4-state Markov chain adopted in | |
169 | * the GI (General and Intuitive) loss model. | |
170 | */ | |
171 | static bool loss_4state(struct netem_sched_data *q) | |
172 | { | |
173 | struct clgstate *clg = &q->clg; | |
174 | u32 rnd = net_random(); | |
175 | ||
176 | /* | |
25985edc | 177 | * Makes a comparison between rnd and the transition |
661b7972 | 178 | * probabilities outgoing from the current state, then decides the |
179 | * next state and if the next packet has to be transmitted or lost. | |
180 | * The four states correspond to: | |
181 | * 1 => successfully transmitted packets within a gap period | |
182 | * 4 => isolated losses within a gap period | |
183 | * 3 => lost packets within a burst period | |
184 | * 2 => successfully transmitted packets within a burst period | |
185 | */ | |
186 | switch (clg->state) { | |
187 | case 1: | |
188 | if (rnd < clg->a4) { | |
189 | clg->state = 4; | |
190 | return true; | |
191 | } else if (clg->a4 < rnd && rnd < clg->a1) { | |
192 | clg->state = 3; | |
193 | return true; | |
194 | } else if (clg->a1 < rnd) | |
195 | clg->state = 1; | |
196 | ||
197 | break; | |
198 | case 2: | |
199 | if (rnd < clg->a5) { | |
200 | clg->state = 3; | |
201 | return true; | |
202 | } else | |
203 | clg->state = 2; | |
204 | ||
205 | break; | |
206 | case 3: | |
207 | if (rnd < clg->a3) | |
208 | clg->state = 2; | |
209 | else if (clg->a3 < rnd && rnd < clg->a2 + clg->a3) { | |
210 | clg->state = 1; | |
211 | return true; | |
212 | } else if (clg->a2 + clg->a3 < rnd) { | |
213 | clg->state = 3; | |
214 | return true; | |
215 | } | |
216 | break; | |
217 | case 4: | |
218 | clg->state = 1; | |
219 | break; | |
220 | } | |
221 | ||
222 | return false; | |
223 | } | |
224 | ||
225 | /* loss_gilb_ell - Gilbert-Elliot model loss generator | |
226 | * Generates losses according to the Gilbert-Elliot loss model or | |
227 | * its special cases (Gilbert or Simple Gilbert) | |
228 | * | |
25985edc | 229 | * Makes a comparison between random number and the transition |
661b7972 | 230 | * probabilities outgoing from the current state, then decides the |
25985edc | 231 | * next state. A second random number is extracted and the comparison |
661b7972 | 232 | * with the loss probability of the current state decides if the next |
233 | * packet will be transmitted or lost. | |
234 | */ | |
235 | static bool loss_gilb_ell(struct netem_sched_data *q) | |
236 | { | |
237 | struct clgstate *clg = &q->clg; | |
238 | ||
239 | switch (clg->state) { | |
240 | case 1: | |
241 | if (net_random() < clg->a1) | |
242 | clg->state = 2; | |
243 | if (net_random() < clg->a4) | |
244 | return true; | |
245 | case 2: | |
246 | if (net_random() < clg->a2) | |
247 | clg->state = 1; | |
248 | if (clg->a3 > net_random()) | |
249 | return true; | |
250 | } | |
251 | ||
252 | return false; | |
253 | } | |
254 | ||
255 | static bool loss_event(struct netem_sched_data *q) | |
256 | { | |
257 | switch (q->loss_model) { | |
258 | case CLG_RANDOM: | |
259 | /* Random packet drop 0 => none, ~0 => all */ | |
260 | return q->loss && q->loss >= get_crandom(&q->loss_cor); | |
261 | ||
262 | case CLG_4_STATES: | |
263 | /* 4state loss model algorithm (used also for GI model) | |
264 | * Extracts a value from the markov 4 state loss generator, | |
265 | * if it is 1 drops a packet and if needed writes the event in | |
266 | * the kernel logs | |
267 | */ | |
268 | return loss_4state(q); | |
269 | ||
270 | case CLG_GILB_ELL: | |
271 | /* Gilbert-Elliot loss model algorithm | |
272 | * Extracts a value from the Gilbert-Elliot loss generator, | |
273 | * if it is 1 drops a packet and if needed writes the event in | |
274 | * the kernel logs | |
275 | */ | |
276 | return loss_gilb_ell(q); | |
277 | } | |
278 | ||
279 | return false; /* not reached */ | |
280 | } | |
281 | ||
282 | ||
1da177e4 LT |
283 | /* tabledist - return a pseudo-randomly distributed value with mean mu and |
284 | * std deviation sigma. Uses table lookup to approximate the desired | |
285 | * distribution, and a uniformly-distributed pseudo-random source. | |
286 | */ | |
b407621c SH |
287 | static psched_tdiff_t tabledist(psched_tdiff_t mu, psched_tdiff_t sigma, |
288 | struct crndstate *state, | |
289 | const struct disttable *dist) | |
1da177e4 | 290 | { |
b407621c SH |
291 | psched_tdiff_t x; |
292 | long t; | |
293 | u32 rnd; | |
1da177e4 LT |
294 | |
295 | if (sigma == 0) | |
296 | return mu; | |
297 | ||
298 | rnd = get_crandom(state); | |
299 | ||
300 | /* default uniform distribution */ | |
10297b99 | 301 | if (dist == NULL) |
1da177e4 LT |
302 | return (rnd % (2*sigma)) - sigma + mu; |
303 | ||
304 | t = dist->table[rnd % dist->size]; | |
305 | x = (sigma % NETEM_DIST_SCALE) * t; | |
306 | if (x >= 0) | |
307 | x += NETEM_DIST_SCALE/2; | |
308 | else | |
309 | x -= NETEM_DIST_SCALE/2; | |
310 | ||
311 | return x / NETEM_DIST_SCALE + (sigma / NETEM_DIST_SCALE) * t + mu; | |
312 | } | |
313 | ||
90b41a1c | 314 | static psched_time_t packet_len_2_sched_time(unsigned int len, struct netem_sched_data *q) |
7bc0f28c | 315 | { |
90b41a1c | 316 | u64 ticks; |
fc33cc72 | 317 | |
90b41a1c HPP |
318 | len += q->packet_overhead; |
319 | ||
320 | if (q->cell_size) { | |
321 | u32 cells = reciprocal_divide(len, q->cell_size_reciprocal); | |
322 | ||
323 | if (len > cells * q->cell_size) /* extra cell needed for remainder */ | |
324 | cells++; | |
325 | len = cells * (q->cell_size + q->cell_overhead); | |
326 | } | |
327 | ||
328 | ticks = (u64)len * NSEC_PER_SEC; | |
329 | ||
330 | do_div(ticks, q->rate); | |
fc33cc72 | 331 | return PSCHED_NS2TICKS(ticks); |
7bc0f28c HPP |
332 | } |
333 | ||
960fb66e | 334 | static void tfifo_enqueue(struct sk_buff *nskb, struct Qdisc *sch) |
50612537 ED |
335 | { |
336 | struct sk_buff_head *list = &sch->q; | |
337 | psched_time_t tnext = netem_skb_cb(nskb)->time_to_send; | |
960fb66e | 338 | struct sk_buff *skb = skb_peek_tail(list); |
50612537 | 339 | |
960fb66e ED |
340 | /* Optimize for add at tail */ |
341 | if (likely(!skb || tnext >= netem_skb_cb(skb)->time_to_send)) | |
342 | return __skb_queue_tail(list, nskb); | |
50612537 | 343 | |
960fb66e ED |
344 | skb_queue_reverse_walk(list, skb) { |
345 | if (tnext >= netem_skb_cb(skb)->time_to_send) | |
346 | break; | |
50612537 ED |
347 | } |
348 | ||
960fb66e | 349 | __skb_queue_after(list, skb, nskb); |
50612537 ED |
350 | } |
351 | ||
0afb51e7 SH |
352 | /* |
353 | * Insert one skb into qdisc. | |
354 | * Note: parent depends on return value to account for queue length. | |
355 | * NET_XMIT_DROP: queue length didn't change. | |
356 | * NET_XMIT_SUCCESS: one skb was queued. | |
357 | */ | |
1da177e4 LT |
358 | static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch) |
359 | { | |
360 | struct netem_sched_data *q = qdisc_priv(sch); | |
89e1df74 GC |
361 | /* We don't fill cb now as skb_unshare() may invalidate it */ |
362 | struct netem_skb_cb *cb; | |
0afb51e7 | 363 | struct sk_buff *skb2; |
0afb51e7 | 364 | int count = 1; |
1da177e4 | 365 | |
0afb51e7 SH |
366 | /* Random duplication */ |
367 | if (q->duplicate && q->duplicate >= get_crandom(&q->dup_cor)) | |
368 | ++count; | |
369 | ||
661b7972 | 370 | /* Drop packet? */ |
e4ae004b ED |
371 | if (loss_event(q)) { |
372 | if (q->ecn && INET_ECN_set_ce(skb)) | |
373 | sch->qstats.drops++; /* mark packet */ | |
374 | else | |
375 | --count; | |
376 | } | |
0afb51e7 | 377 | if (count == 0) { |
1da177e4 LT |
378 | sch->qstats.drops++; |
379 | kfree_skb(skb); | |
c27f339a | 380 | return NET_XMIT_SUCCESS | __NET_XMIT_BYPASS; |
1da177e4 LT |
381 | } |
382 | ||
5a308f40 ED |
383 | /* If a delay is expected, orphan the skb. (orphaning usually takes |
384 | * place at TX completion time, so _before_ the link transit delay) | |
385 | * Ideally, this orphaning should be done after the rate limiting | |
386 | * module, because this breaks TCP Small Queue, and other mechanisms | |
387 | * based on socket sk_wmem_alloc. | |
388 | */ | |
389 | if (q->latency || q->jitter) | |
390 | skb_orphan(skb); | |
4e8a5201 | 391 | |
0afb51e7 SH |
392 | /* |
393 | * If we need to duplicate packet, then re-insert at top of the | |
394 | * qdisc tree, since parent queuer expects that only one | |
395 | * skb will be queued. | |
396 | */ | |
397 | if (count > 1 && (skb2 = skb_clone(skb, GFP_ATOMIC)) != NULL) { | |
7698b4fc | 398 | struct Qdisc *rootq = qdisc_root(sch); |
0afb51e7 SH |
399 | u32 dupsave = q->duplicate; /* prevent duplicating a dup... */ |
400 | q->duplicate = 0; | |
401 | ||
5f86173b | 402 | qdisc_enqueue_root(skb2, rootq); |
0afb51e7 | 403 | q->duplicate = dupsave; |
1da177e4 LT |
404 | } |
405 | ||
c865e5d9 SH |
406 | /* |
407 | * Randomized packet corruption. | |
408 | * Make copy if needed since we are modifying | |
409 | * If packet is going to be hardware checksummed, then | |
410 | * do it now in software before we mangle it. | |
411 | */ | |
412 | if (q->corrupt && q->corrupt >= get_crandom(&q->corrupt_cor)) { | |
f64f9e71 JP |
413 | if (!(skb = skb_unshare(skb, GFP_ATOMIC)) || |
414 | (skb->ip_summed == CHECKSUM_PARTIAL && | |
116a0fc3 ED |
415 | skb_checksum_help(skb))) |
416 | return qdisc_drop(skb, sch); | |
c865e5d9 SH |
417 | |
418 | skb->data[net_random() % skb_headlen(skb)] ^= 1<<(net_random() % 8); | |
419 | } | |
420 | ||
960fb66e ED |
421 | if (unlikely(skb_queue_len(&sch->q) >= sch->limit)) |
422 | return qdisc_reshape_fail(skb, sch); | |
423 | ||
424 | sch->qstats.backlog += qdisc_pkt_len(skb); | |
425 | ||
5f86173b | 426 | cb = netem_skb_cb(skb); |
cc7ec456 | 427 | if (q->gap == 0 || /* not doing reordering */ |
a42b4799 | 428 | q->counter < q->gap - 1 || /* inside last reordering gap */ |
f64f9e71 | 429 | q->reorder < get_crandom(&q->reorder_cor)) { |
0f9f32ac | 430 | psched_time_t now; |
07aaa115 SH |
431 | psched_tdiff_t delay; |
432 | ||
433 | delay = tabledist(q->latency, q->jitter, | |
434 | &q->delay_cor, q->delay_dist); | |
435 | ||
3bebcda2 | 436 | now = psched_get_time(); |
7bc0f28c HPP |
437 | |
438 | if (q->rate) { | |
50612537 | 439 | struct sk_buff_head *list = &sch->q; |
7bc0f28c | 440 | |
7bc0f28c HPP |
441 | if (!skb_queue_empty(list)) { |
442 | /* | |
a13d3104 JN |
443 | * Last packet in queue is reference point (now), |
444 | * calculate this time bonus and subtract | |
7bc0f28c HPP |
445 | * from delay. |
446 | */ | |
a13d3104 JN |
447 | delay -= netem_skb_cb(skb_peek_tail(list))->time_to_send - now; |
448 | delay = max_t(psched_tdiff_t, 0, delay); | |
7bc0f28c HPP |
449 | now = netem_skb_cb(skb_peek_tail(list))->time_to_send; |
450 | } | |
a13d3104 JN |
451 | |
452 | delay += packet_len_2_sched_time(skb->len, q); | |
7bc0f28c HPP |
453 | } |
454 | ||
7c59e25f | 455 | cb->time_to_send = now + delay; |
1da177e4 | 456 | ++q->counter; |
960fb66e | 457 | tfifo_enqueue(skb, sch); |
1da177e4 | 458 | } else { |
10297b99 | 459 | /* |
0dca51d3 SH |
460 | * Do re-ordering by putting one out of N packets at the front |
461 | * of the queue. | |
462 | */ | |
3bebcda2 | 463 | cb->time_to_send = psched_get_time(); |
0dca51d3 | 464 | q->counter = 0; |
8ba25dad | 465 | |
50612537 | 466 | __skb_queue_head(&sch->q, skb); |
eb101924 | 467 | sch->qstats.requeues++; |
378a2f09 | 468 | } |
1da177e4 | 469 | |
10f6dfcf | 470 | return NET_XMIT_SUCCESS; |
1da177e4 LT |
471 | } |
472 | ||
cc7ec456 | 473 | static unsigned int netem_drop(struct Qdisc *sch) |
1da177e4 LT |
474 | { |
475 | struct netem_sched_data *q = qdisc_priv(sch); | |
50612537 | 476 | unsigned int len; |
1da177e4 | 477 | |
50612537 ED |
478 | len = qdisc_queue_drop(sch); |
479 | if (!len && q->qdisc && q->qdisc->ops->drop) | |
480 | len = q->qdisc->ops->drop(q->qdisc); | |
481 | if (len) | |
1da177e4 | 482 | sch->qstats.drops++; |
50612537 | 483 | |
1da177e4 LT |
484 | return len; |
485 | } | |
486 | ||
1da177e4 LT |
487 | static struct sk_buff *netem_dequeue(struct Qdisc *sch) |
488 | { | |
489 | struct netem_sched_data *q = qdisc_priv(sch); | |
490 | struct sk_buff *skb; | |
491 | ||
fd245a4a | 492 | if (qdisc_is_throttled(sch)) |
11274e5a SH |
493 | return NULL; |
494 | ||
50612537 ED |
495 | tfifo_dequeue: |
496 | skb = qdisc_peek_head(sch); | |
771018e7 | 497 | if (skb) { |
5f86173b | 498 | const struct netem_skb_cb *cb = netem_skb_cb(skb); |
0f9f32ac SH |
499 | |
500 | /* if more time remaining? */ | |
50612537 | 501 | if (cb->time_to_send <= psched_get_time()) { |
cd961c2c ED |
502 | __skb_unlink(skb, &sch->q); |
503 | sch->qstats.backlog -= qdisc_pkt_len(skb); | |
03c05f0d | 504 | |
8caf1539 JP |
505 | #ifdef CONFIG_NET_CLS_ACT |
506 | /* | |
507 | * If it's at ingress let's pretend the delay is | |
508 | * from the network (tstamp will be updated). | |
509 | */ | |
510 | if (G_TC_FROM(skb->tc_verd) & AT_INGRESS) | |
511 | skb->tstamp.tv64 = 0; | |
512 | #endif | |
10f6dfcf | 513 | |
50612537 ED |
514 | if (q->qdisc) { |
515 | int err = qdisc_enqueue(skb, q->qdisc); | |
516 | ||
517 | if (unlikely(err != NET_XMIT_SUCCESS)) { | |
518 | if (net_xmit_drop_count(err)) { | |
519 | sch->qstats.drops++; | |
520 | qdisc_tree_decrease_qlen(sch, 1); | |
521 | } | |
522 | } | |
523 | goto tfifo_dequeue; | |
524 | } | |
525 | deliver: | |
10f6dfcf | 526 | qdisc_unthrottled(sch); |
527 | qdisc_bstats_update(sch, skb); | |
0f9f32ac | 528 | return skb; |
07aaa115 | 529 | } |
11274e5a | 530 | |
50612537 ED |
531 | if (q->qdisc) { |
532 | skb = q->qdisc->ops->dequeue(q->qdisc); | |
533 | if (skb) | |
534 | goto deliver; | |
535 | } | |
11274e5a | 536 | qdisc_watchdog_schedule(&q->watchdog, cb->time_to_send); |
0f9f32ac SH |
537 | } |
538 | ||
50612537 ED |
539 | if (q->qdisc) { |
540 | skb = q->qdisc->ops->dequeue(q->qdisc); | |
541 | if (skb) | |
542 | goto deliver; | |
543 | } | |
0f9f32ac | 544 | return NULL; |
1da177e4 LT |
545 | } |
546 | ||
1da177e4 LT |
547 | static void netem_reset(struct Qdisc *sch) |
548 | { | |
549 | struct netem_sched_data *q = qdisc_priv(sch); | |
550 | ||
50612537 ED |
551 | qdisc_reset_queue(sch); |
552 | if (q->qdisc) | |
553 | qdisc_reset(q->qdisc); | |
59cb5c67 | 554 | qdisc_watchdog_cancel(&q->watchdog); |
1da177e4 LT |
555 | } |
556 | ||
6373a9a2 | 557 | static void dist_free(struct disttable *d) |
558 | { | |
559 | if (d) { | |
560 | if (is_vmalloc_addr(d)) | |
561 | vfree(d); | |
562 | else | |
563 | kfree(d); | |
564 | } | |
565 | } | |
566 | ||
1da177e4 LT |
567 | /* |
568 | * Distribution data is a variable size payload containing | |
569 | * signed 16 bit values. | |
570 | */ | |
1e90474c | 571 | static int get_dist_table(struct Qdisc *sch, const struct nlattr *attr) |
1da177e4 LT |
572 | { |
573 | struct netem_sched_data *q = qdisc_priv(sch); | |
6373a9a2 | 574 | size_t n = nla_len(attr)/sizeof(__s16); |
1e90474c | 575 | const __s16 *data = nla_data(attr); |
7698b4fc | 576 | spinlock_t *root_lock; |
1da177e4 LT |
577 | struct disttable *d; |
578 | int i; | |
6373a9a2 | 579 | size_t s; |
1da177e4 | 580 | |
df173bda | 581 | if (n > NETEM_DIST_MAX) |
1da177e4 LT |
582 | return -EINVAL; |
583 | ||
6373a9a2 | 584 | s = sizeof(struct disttable) + n * sizeof(s16); |
bb52c7ac | 585 | d = kmalloc(s, GFP_KERNEL | __GFP_NOWARN); |
6373a9a2 | 586 | if (!d) |
587 | d = vmalloc(s); | |
1da177e4 LT |
588 | if (!d) |
589 | return -ENOMEM; | |
590 | ||
591 | d->size = n; | |
592 | for (i = 0; i < n; i++) | |
593 | d->table[i] = data[i]; | |
10297b99 | 594 | |
102396ae | 595 | root_lock = qdisc_root_sleeping_lock(sch); |
7698b4fc DM |
596 | |
597 | spin_lock_bh(root_lock); | |
bb52c7ac | 598 | swap(q->delay_dist, d); |
7698b4fc | 599 | spin_unlock_bh(root_lock); |
bb52c7ac ED |
600 | |
601 | dist_free(d); | |
1da177e4 LT |
602 | return 0; |
603 | } | |
604 | ||
265eb67f | 605 | static void get_correlation(struct Qdisc *sch, const struct nlattr *attr) |
1da177e4 LT |
606 | { |
607 | struct netem_sched_data *q = qdisc_priv(sch); | |
1e90474c | 608 | const struct tc_netem_corr *c = nla_data(attr); |
1da177e4 | 609 | |
1da177e4 LT |
610 | init_crandom(&q->delay_cor, c->delay_corr); |
611 | init_crandom(&q->loss_cor, c->loss_corr); | |
612 | init_crandom(&q->dup_cor, c->dup_corr); | |
1da177e4 LT |
613 | } |
614 | ||
265eb67f | 615 | static void get_reorder(struct Qdisc *sch, const struct nlattr *attr) |
0dca51d3 SH |
616 | { |
617 | struct netem_sched_data *q = qdisc_priv(sch); | |
1e90474c | 618 | const struct tc_netem_reorder *r = nla_data(attr); |
0dca51d3 | 619 | |
0dca51d3 SH |
620 | q->reorder = r->probability; |
621 | init_crandom(&q->reorder_cor, r->correlation); | |
0dca51d3 SH |
622 | } |
623 | ||
265eb67f | 624 | static void get_corrupt(struct Qdisc *sch, const struct nlattr *attr) |
c865e5d9 SH |
625 | { |
626 | struct netem_sched_data *q = qdisc_priv(sch); | |
1e90474c | 627 | const struct tc_netem_corrupt *r = nla_data(attr); |
c865e5d9 | 628 | |
c865e5d9 SH |
629 | q->corrupt = r->probability; |
630 | init_crandom(&q->corrupt_cor, r->correlation); | |
c865e5d9 SH |
631 | } |
632 | ||
7bc0f28c HPP |
633 | static void get_rate(struct Qdisc *sch, const struct nlattr *attr) |
634 | { | |
635 | struct netem_sched_data *q = qdisc_priv(sch); | |
636 | const struct tc_netem_rate *r = nla_data(attr); | |
637 | ||
638 | q->rate = r->rate; | |
90b41a1c HPP |
639 | q->packet_overhead = r->packet_overhead; |
640 | q->cell_size = r->cell_size; | |
641 | if (q->cell_size) | |
642 | q->cell_size_reciprocal = reciprocal_value(q->cell_size); | |
643 | q->cell_overhead = r->cell_overhead; | |
7bc0f28c HPP |
644 | } |
645 | ||
661b7972 | 646 | static int get_loss_clg(struct Qdisc *sch, const struct nlattr *attr) |
647 | { | |
648 | struct netem_sched_data *q = qdisc_priv(sch); | |
649 | const struct nlattr *la; | |
650 | int rem; | |
651 | ||
652 | nla_for_each_nested(la, attr, rem) { | |
653 | u16 type = nla_type(la); | |
654 | ||
655 | switch(type) { | |
656 | case NETEM_LOSS_GI: { | |
657 | const struct tc_netem_gimodel *gi = nla_data(la); | |
658 | ||
2494654d | 659 | if (nla_len(la) < sizeof(struct tc_netem_gimodel)) { |
661b7972 | 660 | pr_info("netem: incorrect gi model size\n"); |
661 | return -EINVAL; | |
662 | } | |
663 | ||
664 | q->loss_model = CLG_4_STATES; | |
665 | ||
666 | q->clg.state = 1; | |
667 | q->clg.a1 = gi->p13; | |
668 | q->clg.a2 = gi->p31; | |
669 | q->clg.a3 = gi->p32; | |
670 | q->clg.a4 = gi->p14; | |
671 | q->clg.a5 = gi->p23; | |
672 | break; | |
673 | } | |
674 | ||
675 | case NETEM_LOSS_GE: { | |
676 | const struct tc_netem_gemodel *ge = nla_data(la); | |
677 | ||
2494654d | 678 | if (nla_len(la) < sizeof(struct tc_netem_gemodel)) { |
679 | pr_info("netem: incorrect ge model size\n"); | |
661b7972 | 680 | return -EINVAL; |
681 | } | |
682 | ||
683 | q->loss_model = CLG_GILB_ELL; | |
684 | q->clg.state = 1; | |
685 | q->clg.a1 = ge->p; | |
686 | q->clg.a2 = ge->r; | |
687 | q->clg.a3 = ge->h; | |
688 | q->clg.a4 = ge->k1; | |
689 | break; | |
690 | } | |
691 | ||
692 | default: | |
693 | pr_info("netem: unknown loss type %u\n", type); | |
694 | return -EINVAL; | |
695 | } | |
696 | } | |
697 | ||
698 | return 0; | |
699 | } | |
700 | ||
27a3421e PM |
701 | static const struct nla_policy netem_policy[TCA_NETEM_MAX + 1] = { |
702 | [TCA_NETEM_CORR] = { .len = sizeof(struct tc_netem_corr) }, | |
703 | [TCA_NETEM_REORDER] = { .len = sizeof(struct tc_netem_reorder) }, | |
704 | [TCA_NETEM_CORRUPT] = { .len = sizeof(struct tc_netem_corrupt) }, | |
7bc0f28c | 705 | [TCA_NETEM_RATE] = { .len = sizeof(struct tc_netem_rate) }, |
661b7972 | 706 | [TCA_NETEM_LOSS] = { .type = NLA_NESTED }, |
e4ae004b | 707 | [TCA_NETEM_ECN] = { .type = NLA_U32 }, |
27a3421e PM |
708 | }; |
709 | ||
2c10b32b TG |
710 | static int parse_attr(struct nlattr *tb[], int maxtype, struct nlattr *nla, |
711 | const struct nla_policy *policy, int len) | |
712 | { | |
713 | int nested_len = nla_len(nla) - NLA_ALIGN(len); | |
714 | ||
661b7972 | 715 | if (nested_len < 0) { |
716 | pr_info("netem: invalid attributes len %d\n", nested_len); | |
2c10b32b | 717 | return -EINVAL; |
661b7972 | 718 | } |
719 | ||
2c10b32b TG |
720 | if (nested_len >= nla_attr_size(0)) |
721 | return nla_parse(tb, maxtype, nla_data(nla) + NLA_ALIGN(len), | |
722 | nested_len, policy); | |
661b7972 | 723 | |
2c10b32b TG |
724 | memset(tb, 0, sizeof(struct nlattr *) * (maxtype + 1)); |
725 | return 0; | |
726 | } | |
727 | ||
c865e5d9 | 728 | /* Parse netlink message to set options */ |
1e90474c | 729 | static int netem_change(struct Qdisc *sch, struct nlattr *opt) |
1da177e4 LT |
730 | { |
731 | struct netem_sched_data *q = qdisc_priv(sch); | |
b03f4672 | 732 | struct nlattr *tb[TCA_NETEM_MAX + 1]; |
1da177e4 LT |
733 | struct tc_netem_qopt *qopt; |
734 | int ret; | |
10297b99 | 735 | |
b03f4672 | 736 | if (opt == NULL) |
1da177e4 LT |
737 | return -EINVAL; |
738 | ||
2c10b32b TG |
739 | qopt = nla_data(opt); |
740 | ret = parse_attr(tb, TCA_NETEM_MAX, opt, netem_policy, sizeof(*qopt)); | |
b03f4672 PM |
741 | if (ret < 0) |
742 | return ret; | |
743 | ||
50612537 | 744 | sch->limit = qopt->limit; |
10297b99 | 745 | |
1da177e4 LT |
746 | q->latency = qopt->latency; |
747 | q->jitter = qopt->jitter; | |
748 | q->limit = qopt->limit; | |
749 | q->gap = qopt->gap; | |
0dca51d3 | 750 | q->counter = 0; |
1da177e4 LT |
751 | q->loss = qopt->loss; |
752 | q->duplicate = qopt->duplicate; | |
753 | ||
bb2f8cc0 SH |
754 | /* for compatibility with earlier versions. |
755 | * if gap is set, need to assume 100% probability | |
0dca51d3 | 756 | */ |
a362e0a7 SH |
757 | if (q->gap) |
758 | q->reorder = ~0; | |
0dca51d3 | 759 | |
265eb67f SH |
760 | if (tb[TCA_NETEM_CORR]) |
761 | get_correlation(sch, tb[TCA_NETEM_CORR]); | |
1da177e4 | 762 | |
b03f4672 PM |
763 | if (tb[TCA_NETEM_DELAY_DIST]) { |
764 | ret = get_dist_table(sch, tb[TCA_NETEM_DELAY_DIST]); | |
765 | if (ret) | |
766 | return ret; | |
767 | } | |
c865e5d9 | 768 | |
265eb67f SH |
769 | if (tb[TCA_NETEM_REORDER]) |
770 | get_reorder(sch, tb[TCA_NETEM_REORDER]); | |
1da177e4 | 771 | |
265eb67f SH |
772 | if (tb[TCA_NETEM_CORRUPT]) |
773 | get_corrupt(sch, tb[TCA_NETEM_CORRUPT]); | |
1da177e4 | 774 | |
7bc0f28c HPP |
775 | if (tb[TCA_NETEM_RATE]) |
776 | get_rate(sch, tb[TCA_NETEM_RATE]); | |
777 | ||
e4ae004b ED |
778 | if (tb[TCA_NETEM_ECN]) |
779 | q->ecn = nla_get_u32(tb[TCA_NETEM_ECN]); | |
780 | ||
661b7972 | 781 | q->loss_model = CLG_RANDOM; |
782 | if (tb[TCA_NETEM_LOSS]) | |
783 | ret = get_loss_clg(sch, tb[TCA_NETEM_LOSS]); | |
784 | ||
785 | return ret; | |
1da177e4 LT |
786 | } |
787 | ||
1e90474c | 788 | static int netem_init(struct Qdisc *sch, struct nlattr *opt) |
1da177e4 LT |
789 | { |
790 | struct netem_sched_data *q = qdisc_priv(sch); | |
791 | int ret; | |
792 | ||
793 | if (!opt) | |
794 | return -EINVAL; | |
795 | ||
59cb5c67 | 796 | qdisc_watchdog_init(&q->watchdog, sch); |
1da177e4 | 797 | |
661b7972 | 798 | q->loss_model = CLG_RANDOM; |
1da177e4 | 799 | ret = netem_change(sch, opt); |
50612537 | 800 | if (ret) |
250a65f7 | 801 | pr_info("netem: change failed\n"); |
1da177e4 LT |
802 | return ret; |
803 | } | |
804 | ||
805 | static void netem_destroy(struct Qdisc *sch) | |
806 | { | |
807 | struct netem_sched_data *q = qdisc_priv(sch); | |
808 | ||
59cb5c67 | 809 | qdisc_watchdog_cancel(&q->watchdog); |
50612537 ED |
810 | if (q->qdisc) |
811 | qdisc_destroy(q->qdisc); | |
6373a9a2 | 812 | dist_free(q->delay_dist); |
1da177e4 LT |
813 | } |
814 | ||
661b7972 | 815 | static int dump_loss_model(const struct netem_sched_data *q, |
816 | struct sk_buff *skb) | |
817 | { | |
818 | struct nlattr *nest; | |
819 | ||
820 | nest = nla_nest_start(skb, TCA_NETEM_LOSS); | |
821 | if (nest == NULL) | |
822 | goto nla_put_failure; | |
823 | ||
824 | switch (q->loss_model) { | |
825 | case CLG_RANDOM: | |
826 | /* legacy loss model */ | |
827 | nla_nest_cancel(skb, nest); | |
828 | return 0; /* no data */ | |
829 | ||
830 | case CLG_4_STATES: { | |
831 | struct tc_netem_gimodel gi = { | |
832 | .p13 = q->clg.a1, | |
833 | .p31 = q->clg.a2, | |
834 | .p32 = q->clg.a3, | |
835 | .p14 = q->clg.a4, | |
836 | .p23 = q->clg.a5, | |
837 | }; | |
838 | ||
1b34ec43 DM |
839 | if (nla_put(skb, NETEM_LOSS_GI, sizeof(gi), &gi)) |
840 | goto nla_put_failure; | |
661b7972 | 841 | break; |
842 | } | |
843 | case CLG_GILB_ELL: { | |
844 | struct tc_netem_gemodel ge = { | |
845 | .p = q->clg.a1, | |
846 | .r = q->clg.a2, | |
847 | .h = q->clg.a3, | |
848 | .k1 = q->clg.a4, | |
849 | }; | |
850 | ||
1b34ec43 DM |
851 | if (nla_put(skb, NETEM_LOSS_GE, sizeof(ge), &ge)) |
852 | goto nla_put_failure; | |
661b7972 | 853 | break; |
854 | } | |
855 | } | |
856 | ||
857 | nla_nest_end(skb, nest); | |
858 | return 0; | |
859 | ||
860 | nla_put_failure: | |
861 | nla_nest_cancel(skb, nest); | |
862 | return -1; | |
863 | } | |
864 | ||
1da177e4 LT |
865 | static int netem_dump(struct Qdisc *sch, struct sk_buff *skb) |
866 | { | |
867 | const struct netem_sched_data *q = qdisc_priv(sch); | |
861d7f74 | 868 | struct nlattr *nla = (struct nlattr *) skb_tail_pointer(skb); |
1da177e4 LT |
869 | struct tc_netem_qopt qopt; |
870 | struct tc_netem_corr cor; | |
0dca51d3 | 871 | struct tc_netem_reorder reorder; |
c865e5d9 | 872 | struct tc_netem_corrupt corrupt; |
7bc0f28c | 873 | struct tc_netem_rate rate; |
1da177e4 LT |
874 | |
875 | qopt.latency = q->latency; | |
876 | qopt.jitter = q->jitter; | |
877 | qopt.limit = q->limit; | |
878 | qopt.loss = q->loss; | |
879 | qopt.gap = q->gap; | |
880 | qopt.duplicate = q->duplicate; | |
1b34ec43 DM |
881 | if (nla_put(skb, TCA_OPTIONS, sizeof(qopt), &qopt)) |
882 | goto nla_put_failure; | |
1da177e4 LT |
883 | |
884 | cor.delay_corr = q->delay_cor.rho; | |
885 | cor.loss_corr = q->loss_cor.rho; | |
886 | cor.dup_corr = q->dup_cor.rho; | |
1b34ec43 DM |
887 | if (nla_put(skb, TCA_NETEM_CORR, sizeof(cor), &cor)) |
888 | goto nla_put_failure; | |
0dca51d3 SH |
889 | |
890 | reorder.probability = q->reorder; | |
891 | reorder.correlation = q->reorder_cor.rho; | |
1b34ec43 DM |
892 | if (nla_put(skb, TCA_NETEM_REORDER, sizeof(reorder), &reorder)) |
893 | goto nla_put_failure; | |
0dca51d3 | 894 | |
c865e5d9 SH |
895 | corrupt.probability = q->corrupt; |
896 | corrupt.correlation = q->corrupt_cor.rho; | |
1b34ec43 DM |
897 | if (nla_put(skb, TCA_NETEM_CORRUPT, sizeof(corrupt), &corrupt)) |
898 | goto nla_put_failure; | |
c865e5d9 | 899 | |
7bc0f28c | 900 | rate.rate = q->rate; |
90b41a1c HPP |
901 | rate.packet_overhead = q->packet_overhead; |
902 | rate.cell_size = q->cell_size; | |
903 | rate.cell_overhead = q->cell_overhead; | |
1b34ec43 DM |
904 | if (nla_put(skb, TCA_NETEM_RATE, sizeof(rate), &rate)) |
905 | goto nla_put_failure; | |
7bc0f28c | 906 | |
e4ae004b ED |
907 | if (q->ecn && nla_put_u32(skb, TCA_NETEM_ECN, q->ecn)) |
908 | goto nla_put_failure; | |
909 | ||
661b7972 | 910 | if (dump_loss_model(q, skb) != 0) |
911 | goto nla_put_failure; | |
912 | ||
861d7f74 | 913 | return nla_nest_end(skb, nla); |
1da177e4 | 914 | |
1e90474c | 915 | nla_put_failure: |
861d7f74 | 916 | nlmsg_trim(skb, nla); |
1da177e4 LT |
917 | return -1; |
918 | } | |
919 | ||
10f6dfcf | 920 | static int netem_dump_class(struct Qdisc *sch, unsigned long cl, |
921 | struct sk_buff *skb, struct tcmsg *tcm) | |
922 | { | |
923 | struct netem_sched_data *q = qdisc_priv(sch); | |
924 | ||
50612537 | 925 | if (cl != 1 || !q->qdisc) /* only one class */ |
10f6dfcf | 926 | return -ENOENT; |
927 | ||
928 | tcm->tcm_handle |= TC_H_MIN(1); | |
929 | tcm->tcm_info = q->qdisc->handle; | |
930 | ||
931 | return 0; | |
932 | } | |
933 | ||
934 | static int netem_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new, | |
935 | struct Qdisc **old) | |
936 | { | |
937 | struct netem_sched_data *q = qdisc_priv(sch); | |
938 | ||
10f6dfcf | 939 | sch_tree_lock(sch); |
940 | *old = q->qdisc; | |
941 | q->qdisc = new; | |
50612537 ED |
942 | if (*old) { |
943 | qdisc_tree_decrease_qlen(*old, (*old)->q.qlen); | |
944 | qdisc_reset(*old); | |
945 | } | |
10f6dfcf | 946 | sch_tree_unlock(sch); |
947 | ||
948 | return 0; | |
949 | } | |
950 | ||
951 | static struct Qdisc *netem_leaf(struct Qdisc *sch, unsigned long arg) | |
952 | { | |
953 | struct netem_sched_data *q = qdisc_priv(sch); | |
954 | return q->qdisc; | |
955 | } | |
956 | ||
957 | static unsigned long netem_get(struct Qdisc *sch, u32 classid) | |
958 | { | |
959 | return 1; | |
960 | } | |
961 | ||
962 | static void netem_put(struct Qdisc *sch, unsigned long arg) | |
963 | { | |
964 | } | |
965 | ||
966 | static void netem_walk(struct Qdisc *sch, struct qdisc_walker *walker) | |
967 | { | |
968 | if (!walker->stop) { | |
969 | if (walker->count >= walker->skip) | |
970 | if (walker->fn(sch, 1, walker) < 0) { | |
971 | walker->stop = 1; | |
972 | return; | |
973 | } | |
974 | walker->count++; | |
975 | } | |
976 | } | |
977 | ||
978 | static const struct Qdisc_class_ops netem_class_ops = { | |
979 | .graft = netem_graft, | |
980 | .leaf = netem_leaf, | |
981 | .get = netem_get, | |
982 | .put = netem_put, | |
983 | .walk = netem_walk, | |
984 | .dump = netem_dump_class, | |
985 | }; | |
986 | ||
20fea08b | 987 | static struct Qdisc_ops netem_qdisc_ops __read_mostly = { |
1da177e4 | 988 | .id = "netem", |
10f6dfcf | 989 | .cl_ops = &netem_class_ops, |
1da177e4 LT |
990 | .priv_size = sizeof(struct netem_sched_data), |
991 | .enqueue = netem_enqueue, | |
992 | .dequeue = netem_dequeue, | |
77be155c | 993 | .peek = qdisc_peek_dequeued, |
1da177e4 LT |
994 | .drop = netem_drop, |
995 | .init = netem_init, | |
996 | .reset = netem_reset, | |
997 | .destroy = netem_destroy, | |
998 | .change = netem_change, | |
999 | .dump = netem_dump, | |
1000 | .owner = THIS_MODULE, | |
1001 | }; | |
1002 | ||
1003 | ||
1004 | static int __init netem_module_init(void) | |
1005 | { | |
eb229c4c | 1006 | pr_info("netem: version " VERSION "\n"); |
1da177e4 LT |
1007 | return register_qdisc(&netem_qdisc_ops); |
1008 | } | |
1009 | static void __exit netem_module_exit(void) | |
1010 | { | |
1011 | unregister_qdisc(&netem_qdisc_ops); | |
1012 | } | |
1013 | module_init(netem_module_init) | |
1014 | module_exit(netem_module_exit) | |
1015 | MODULE_LICENSE("GPL"); |