Commit | Line | Data |
---|---|---|
10239edf TL |
1 | /* net/sched/sch_hhf.c Heavy-Hitter Filter (HHF) |
2 | * | |
3 | * Copyright (C) 2013 Terry Lam <vtlam@google.com> | |
4 | * Copyright (C) 2013 Nandita Dukkipati <nanditad@google.com> | |
5 | */ | |
6 | ||
7 | #include <linux/jhash.h> | |
8 | #include <linux/jiffies.h> | |
9 | #include <linux/module.h> | |
10 | #include <linux/skbuff.h> | |
11 | #include <linux/vmalloc.h> | |
10239edf TL |
12 | #include <net/pkt_sched.h> |
13 | #include <net/sock.h> | |
14 | ||
15 | /* Heavy-Hitter Filter (HHF) | |
16 | * | |
17 | * Principles : | |
18 | * Flows are classified into two buckets: non-heavy-hitter and heavy-hitter | |
19 | * buckets. Initially, a new flow starts as non-heavy-hitter. Once classified | |
20 | * as heavy-hitter, it is immediately switched to the heavy-hitter bucket. | |
21 | * The buckets are dequeued by a Weighted Deficit Round Robin (WDRR) scheduler, | |
22 | * in which the heavy-hitter bucket is served with less weight. | |
23 | * In other words, non-heavy-hitters (e.g., short bursts of critical traffic) | |
24 | * are isolated from heavy-hitters (e.g., persistent bulk traffic) and also have | |
25 | * higher share of bandwidth. | |
26 | * | |
27 | * To capture heavy-hitters, we use the "multi-stage filter" algorithm in the | |
28 | * following paper: | |
29 | * [EV02] C. Estan and G. Varghese, "New Directions in Traffic Measurement and | |
30 | * Accounting", in ACM SIGCOMM, 2002. | |
31 | * | |
32 | * Conceptually, a multi-stage filter comprises k independent hash functions | |
33 | * and k counter arrays. Packets are indexed into k counter arrays by k hash | |
34 | * functions, respectively. The counters are then increased by the packet sizes. | |
35 | * Therefore, | |
36 | * - For a heavy-hitter flow: *all* of its k array counters must be large. | |
37 | * - For a non-heavy-hitter flow: some of its k array counters can be large | |
38 | * due to hash collision with other small flows; however, with high | |
39 | * probability, not *all* k counters are large. | |
40 | * | |
41 | * By the design of the multi-stage filter algorithm, the false negative rate | |
42 | * (heavy-hitters getting away uncaptured) is zero. However, the algorithm is | |
43 | * susceptible to false positives (non-heavy-hitters mistakenly classified as | |
44 | * heavy-hitters). | |
45 | * Therefore, we also implement the following optimizations to reduce false | |
46 | * positives by avoiding unnecessary increment of the counter values: | |
47 | * - Optimization O1: once a heavy-hitter is identified, its bytes are not | |
48 | * accounted in the array counters. This technique is called "shielding" | |
49 | * in Section 3.3.1 of [EV02]. | |
50 | * - Optimization O2: conservative update of counters | |
51 | * (Section 3.3.2 of [EV02]), | |
52 | * New counter value = max {old counter value, | |
53 | * smallest counter value + packet bytes} | |
54 | * | |
55 | * Finally, we refresh the counters periodically since otherwise the counter | |
56 | * values will keep accumulating. | |
57 | * | |
58 | * Once a flow is classified as heavy-hitter, we also save its per-flow state | |
59 | * in an exact-matching flow table so that its subsequent packets can be | |
60 | * dispatched to the heavy-hitter bucket accordingly. | |
61 | * | |
62 | * | |
63 | * At a high level, this qdisc works as follows: | |
64 | * Given a packet p: | |
65 | * - If the flow-id of p (e.g., TCP 5-tuple) is already in the exact-matching | |
66 | * heavy-hitter flow table, denoted table T, then send p to the heavy-hitter | |
67 | * bucket. | |
68 | * - Otherwise, forward p to the multi-stage filter, denoted filter F | |
69 | * + If F decides that p belongs to a non-heavy-hitter flow, then send p | |
70 | * to the non-heavy-hitter bucket. | |
71 | * + Otherwise, if F decides that p belongs to a new heavy-hitter flow, | |
72 | * then set up a new flow entry for the flow-id of p in the table T and | |
73 | * send p to the heavy-hitter bucket. | |
74 | * | |
75 | * In this implementation: | |
76 | * - T is a fixed-size hash-table with 1024 entries. Hash collision is | |
77 | * resolved by linked-list chaining. | |
78 | * - F has four counter arrays, each array containing 1024 32-bit counters. | |
79 | * That means 4 * 1024 * 32 bits = 16KB of memory. | |
80 | * - Since each array in F contains 1024 counters, 10 bits are sufficient to | |
81 | * index into each array. | |
82 | * Hence, instead of having four hash functions, we chop the 32-bit | |
83 | * skb-hash into three 10-bit chunks, and the remaining 10-bit chunk is | |
84 | * computed as XOR sum of those three chunks. | |
85 | * - We need to clear the counter arrays periodically; however, directly | |
86 | * memsetting 16KB of memory can lead to cache eviction and unwanted delay. | |
87 | * So by representing each counter by a valid bit, we only need to reset | |
88 | * 4K of 1 bit (i.e. 512 bytes) instead of 16KB of memory. | |
89 | * - The Deficit Round Robin engine is taken from fq_codel implementation | |
90 | * (net/sched/sch_fq_codel.c). Note that wdrr_bucket corresponds to | |
91 | * fq_codel_flow in fq_codel implementation. | |
92 | * | |
93 | */ | |
94 | ||
95 | /* Non-configurable parameters */ | |
96 | #define HH_FLOWS_CNT 1024 /* number of entries in exact-matching table T */ | |
97 | #define HHF_ARRAYS_CNT 4 /* number of arrays in multi-stage filter F */ | |
98 | #define HHF_ARRAYS_LEN 1024 /* number of counters in each array of F */ | |
99 | #define HHF_BIT_MASK_LEN 10 /* masking 10 bits */ | |
100 | #define HHF_BIT_MASK 0x3FF /* bitmask of 10 bits */ | |
101 | ||
102 | #define WDRR_BUCKET_CNT 2 /* two buckets for Weighted DRR */ | |
103 | enum wdrr_bucket_idx { | |
104 | WDRR_BUCKET_FOR_HH = 0, /* bucket id for heavy-hitters */ | |
105 | WDRR_BUCKET_FOR_NON_HH = 1 /* bucket id for non-heavy-hitters */ | |
106 | }; | |
107 | ||
108 | #define hhf_time_before(a, b) \ | |
109 | (typecheck(u32, a) && typecheck(u32, b) && ((s32)((a) - (b)) < 0)) | |
110 | ||
111 | /* Heavy-hitter per-flow state */ | |
112 | struct hh_flow_state { | |
113 | u32 hash_id; /* hash of flow-id (e.g. TCP 5-tuple) */ | |
114 | u32 hit_timestamp; /* last time heavy-hitter was seen */ | |
115 | struct list_head flowchain; /* chaining under hash collision */ | |
116 | }; | |
117 | ||
118 | /* Weighted Deficit Round Robin (WDRR) scheduler */ | |
119 | struct wdrr_bucket { | |
120 | struct sk_buff *head; | |
121 | struct sk_buff *tail; | |
122 | struct list_head bucketchain; | |
123 | int deficit; | |
124 | }; | |
125 | ||
126 | struct hhf_sched_data { | |
127 | struct wdrr_bucket buckets[WDRR_BUCKET_CNT]; | |
128 | u32 perturbation; /* hash perturbation */ | |
129 | u32 quantum; /* psched_mtu(qdisc_dev(sch)); */ | |
130 | u32 drop_overlimit; /* number of times max qdisc packet | |
131 | * limit was hit | |
132 | */ | |
133 | struct list_head *hh_flows; /* table T (currently active HHs) */ | |
134 | u32 hh_flows_limit; /* max active HH allocs */ | |
135 | u32 hh_flows_overlimit; /* num of disallowed HH allocs */ | |
136 | u32 hh_flows_total_cnt; /* total admitted HHs */ | |
137 | u32 hh_flows_current_cnt; /* total current HHs */ | |
138 | u32 *hhf_arrays[HHF_ARRAYS_CNT]; /* HH filter F */ | |
139 | u32 hhf_arrays_reset_timestamp; /* last time hhf_arrays | |
140 | * was reset | |
141 | */ | |
142 | unsigned long *hhf_valid_bits[HHF_ARRAYS_CNT]; /* shadow valid bits | |
143 | * of hhf_arrays | |
144 | */ | |
145 | /* Similar to the "new_flows" vs. "old_flows" concept in fq_codel DRR */ | |
146 | struct list_head new_buckets; /* list of new buckets */ | |
147 | struct list_head old_buckets; /* list of old buckets */ | |
148 | ||
149 | /* Configurable HHF parameters */ | |
150 | u32 hhf_reset_timeout; /* interval to reset counter | |
151 | * arrays in filter F | |
152 | * (default 40ms) | |
153 | */ | |
154 | u32 hhf_admit_bytes; /* counter thresh to classify as | |
155 | * HH (default 128KB). | |
156 | * With these default values, | |
157 | * 128KB / 40ms = 25 Mbps | |
158 | * i.e., we expect to capture HHs | |
159 | * sending > 25 Mbps. | |
160 | */ | |
161 | u32 hhf_evict_timeout; /* aging threshold to evict idle | |
162 | * HHs out of table T. This should | |
163 | * be large enough to avoid | |
164 | * reordering during HH eviction. | |
165 | * (default 1s) | |
166 | */ | |
167 | u32 hhf_non_hh_weight; /* WDRR weight for non-HHs | |
168 | * (default 2, | |
169 | * i.e., non-HH : HH = 2 : 1) | |
170 | */ | |
171 | }; | |
172 | ||
173 | static u32 hhf_time_stamp(void) | |
174 | { | |
175 | return jiffies; | |
176 | } | |
177 | ||
10239edf TL |
178 | /* Looks up a heavy-hitter flow in a chaining list of table T. */ |
179 | static struct hh_flow_state *seek_list(const u32 hash, | |
180 | struct list_head *head, | |
181 | struct hhf_sched_data *q) | |
182 | { | |
183 | struct hh_flow_state *flow, *next; | |
184 | u32 now = hhf_time_stamp(); | |
185 | ||
186 | if (list_empty(head)) | |
187 | return NULL; | |
188 | ||
189 | list_for_each_entry_safe(flow, next, head, flowchain) { | |
190 | u32 prev = flow->hit_timestamp + q->hhf_evict_timeout; | |
191 | ||
192 | if (hhf_time_before(prev, now)) { | |
193 | /* Delete expired heavy-hitters, but preserve one entry | |
194 | * to avoid kzalloc() when next time this slot is hit. | |
195 | */ | |
196 | if (list_is_last(&flow->flowchain, head)) | |
197 | return NULL; | |
198 | list_del(&flow->flowchain); | |
199 | kfree(flow); | |
200 | q->hh_flows_current_cnt--; | |
201 | } else if (flow->hash_id == hash) { | |
202 | return flow; | |
203 | } | |
204 | } | |
205 | return NULL; | |
206 | } | |
207 | ||
208 | /* Returns a flow state entry for a new heavy-hitter. Either reuses an expired | |
209 | * entry or dynamically alloc a new entry. | |
210 | */ | |
211 | static struct hh_flow_state *alloc_new_hh(struct list_head *head, | |
212 | struct hhf_sched_data *q) | |
213 | { | |
214 | struct hh_flow_state *flow; | |
215 | u32 now = hhf_time_stamp(); | |
216 | ||
217 | if (!list_empty(head)) { | |
218 | /* Find an expired heavy-hitter flow entry. */ | |
219 | list_for_each_entry(flow, head, flowchain) { | |
220 | u32 prev = flow->hit_timestamp + q->hhf_evict_timeout; | |
221 | ||
222 | if (hhf_time_before(prev, now)) | |
223 | return flow; | |
224 | } | |
225 | } | |
226 | ||
227 | if (q->hh_flows_current_cnt >= q->hh_flows_limit) { | |
228 | q->hh_flows_overlimit++; | |
229 | return NULL; | |
230 | } | |
231 | /* Create new entry. */ | |
232 | flow = kzalloc(sizeof(struct hh_flow_state), GFP_ATOMIC); | |
233 | if (!flow) | |
234 | return NULL; | |
235 | ||
236 | q->hh_flows_current_cnt++; | |
237 | INIT_LIST_HEAD(&flow->flowchain); | |
238 | list_add_tail(&flow->flowchain, head); | |
239 | ||
240 | return flow; | |
241 | } | |
242 | ||
243 | /* Assigns packets to WDRR buckets. Implements a multi-stage filter to | |
244 | * classify heavy-hitters. | |
245 | */ | |
246 | static enum wdrr_bucket_idx hhf_classify(struct sk_buff *skb, struct Qdisc *sch) | |
247 | { | |
248 | struct hhf_sched_data *q = qdisc_priv(sch); | |
249 | u32 tmp_hash, hash; | |
250 | u32 xorsum, filter_pos[HHF_ARRAYS_CNT], flow_pos; | |
251 | struct hh_flow_state *flow; | |
252 | u32 pkt_len, min_hhf_val; | |
253 | int i; | |
254 | u32 prev; | |
255 | u32 now = hhf_time_stamp(); | |
256 | ||
257 | /* Reset the HHF counter arrays if this is the right time. */ | |
258 | prev = q->hhf_arrays_reset_timestamp + q->hhf_reset_timeout; | |
259 | if (hhf_time_before(prev, now)) { | |
260 | for (i = 0; i < HHF_ARRAYS_CNT; i++) | |
261 | bitmap_zero(q->hhf_valid_bits[i], HHF_ARRAYS_LEN); | |
262 | q->hhf_arrays_reset_timestamp = now; | |
263 | } | |
264 | ||
265 | /* Get hashed flow-id of the skb. */ | |
f969777a | 266 | hash = skb_get_hash_perturb(skb, q->perturbation); |
10239edf TL |
267 | |
268 | /* Check if this packet belongs to an already established HH flow. */ | |
269 | flow_pos = hash & HHF_BIT_MASK; | |
270 | flow = seek_list(hash, &q->hh_flows[flow_pos], q); | |
271 | if (flow) { /* found its HH flow */ | |
272 | flow->hit_timestamp = now; | |
273 | return WDRR_BUCKET_FOR_HH; | |
274 | } | |
275 | ||
276 | /* Now pass the packet through the multi-stage filter. */ | |
277 | tmp_hash = hash; | |
278 | xorsum = 0; | |
279 | for (i = 0; i < HHF_ARRAYS_CNT - 1; i++) { | |
280 | /* Split the skb_hash into three 10-bit chunks. */ | |
281 | filter_pos[i] = tmp_hash & HHF_BIT_MASK; | |
282 | xorsum ^= filter_pos[i]; | |
283 | tmp_hash >>= HHF_BIT_MASK_LEN; | |
284 | } | |
285 | /* The last chunk is computed as XOR sum of other chunks. */ | |
286 | filter_pos[HHF_ARRAYS_CNT - 1] = xorsum ^ tmp_hash; | |
287 | ||
288 | pkt_len = qdisc_pkt_len(skb); | |
289 | min_hhf_val = ~0U; | |
290 | for (i = 0; i < HHF_ARRAYS_CNT; i++) { | |
291 | u32 val; | |
292 | ||
293 | if (!test_bit(filter_pos[i], q->hhf_valid_bits[i])) { | |
294 | q->hhf_arrays[i][filter_pos[i]] = 0; | |
295 | __set_bit(filter_pos[i], q->hhf_valid_bits[i]); | |
296 | } | |
297 | ||
298 | val = q->hhf_arrays[i][filter_pos[i]] + pkt_len; | |
299 | if (min_hhf_val > val) | |
300 | min_hhf_val = val; | |
301 | } | |
302 | ||
303 | /* Found a new HH iff all counter values > HH admit threshold. */ | |
304 | if (min_hhf_val > q->hhf_admit_bytes) { | |
305 | /* Just captured a new heavy-hitter. */ | |
306 | flow = alloc_new_hh(&q->hh_flows[flow_pos], q); | |
307 | if (!flow) /* memory alloc problem */ | |
308 | return WDRR_BUCKET_FOR_NON_HH; | |
309 | flow->hash_id = hash; | |
310 | flow->hit_timestamp = now; | |
311 | q->hh_flows_total_cnt++; | |
312 | ||
313 | /* By returning without updating counters in q->hhf_arrays, | |
314 | * we implicitly implement "shielding" (see Optimization O1). | |
315 | */ | |
316 | return WDRR_BUCKET_FOR_HH; | |
317 | } | |
318 | ||
319 | /* Conservative update of HHF arrays (see Optimization O2). */ | |
320 | for (i = 0; i < HHF_ARRAYS_CNT; i++) { | |
321 | if (q->hhf_arrays[i][filter_pos[i]] < min_hhf_val) | |
322 | q->hhf_arrays[i][filter_pos[i]] = min_hhf_val; | |
323 | } | |
324 | return WDRR_BUCKET_FOR_NON_HH; | |
325 | } | |
326 | ||
327 | /* Removes one skb from head of bucket. */ | |
328 | static struct sk_buff *dequeue_head(struct wdrr_bucket *bucket) | |
329 | { | |
330 | struct sk_buff *skb = bucket->head; | |
331 | ||
332 | bucket->head = skb->next; | |
333 | skb->next = NULL; | |
334 | return skb; | |
335 | } | |
336 | ||
337 | /* Tail-adds skb to bucket. */ | |
338 | static void bucket_add(struct wdrr_bucket *bucket, struct sk_buff *skb) | |
339 | { | |
340 | if (bucket->head == NULL) | |
341 | bucket->head = skb; | |
342 | else | |
343 | bucket->tail->next = skb; | |
344 | bucket->tail = skb; | |
345 | skb->next = NULL; | |
346 | } | |
347 | ||
348 | static unsigned int hhf_drop(struct Qdisc *sch) | |
349 | { | |
350 | struct hhf_sched_data *q = qdisc_priv(sch); | |
351 | struct wdrr_bucket *bucket; | |
352 | ||
353 | /* Always try to drop from heavy-hitters first. */ | |
354 | bucket = &q->buckets[WDRR_BUCKET_FOR_HH]; | |
355 | if (!bucket->head) | |
356 | bucket = &q->buckets[WDRR_BUCKET_FOR_NON_HH]; | |
357 | ||
358 | if (bucket->head) { | |
359 | struct sk_buff *skb = dequeue_head(bucket); | |
360 | ||
361 | sch->q.qlen--; | |
25331d6c JF |
362 | qdisc_qstats_drop(sch); |
363 | qdisc_qstats_backlog_dec(sch, skb); | |
10239edf TL |
364 | kfree_skb(skb); |
365 | } | |
366 | ||
367 | /* Return id of the bucket from which the packet was dropped. */ | |
368 | return bucket - q->buckets; | |
369 | } | |
370 | ||
6ac644a8 WC |
371 | static unsigned int hhf_qdisc_drop(struct Qdisc *sch) |
372 | { | |
373 | unsigned int prev_backlog; | |
374 | ||
375 | prev_backlog = sch->qstats.backlog; | |
376 | hhf_drop(sch); | |
377 | return prev_backlog - sch->qstats.backlog; | |
378 | } | |
379 | ||
10239edf TL |
380 | static int hhf_enqueue(struct sk_buff *skb, struct Qdisc *sch) |
381 | { | |
382 | struct hhf_sched_data *q = qdisc_priv(sch); | |
383 | enum wdrr_bucket_idx idx; | |
384 | struct wdrr_bucket *bucket; | |
385 | ||
386 | idx = hhf_classify(skb, sch); | |
387 | ||
388 | bucket = &q->buckets[idx]; | |
389 | bucket_add(bucket, skb); | |
25331d6c | 390 | qdisc_qstats_backlog_inc(sch, skb); |
10239edf TL |
391 | |
392 | if (list_empty(&bucket->bucketchain)) { | |
393 | unsigned int weight; | |
394 | ||
395 | /* The logic of new_buckets vs. old_buckets is the same as | |
396 | * new_flows vs. old_flows in the implementation of fq_codel, | |
397 | * i.e., short bursts of non-HHs should have strict priority. | |
398 | */ | |
399 | if (idx == WDRR_BUCKET_FOR_HH) { | |
400 | /* Always move heavy-hitters to old bucket. */ | |
401 | weight = 1; | |
402 | list_add_tail(&bucket->bucketchain, &q->old_buckets); | |
403 | } else { | |
404 | weight = q->hhf_non_hh_weight; | |
405 | list_add_tail(&bucket->bucketchain, &q->new_buckets); | |
406 | } | |
407 | bucket->deficit = weight * q->quantum; | |
408 | } | |
b2ce49e7 | 409 | if (++sch->q.qlen <= sch->limit) |
10239edf TL |
410 | return NET_XMIT_SUCCESS; |
411 | ||
412 | q->drop_overlimit++; | |
413 | /* Return Congestion Notification only if we dropped a packet from this | |
414 | * bucket. | |
415 | */ | |
416 | if (hhf_drop(sch) == idx) | |
417 | return NET_XMIT_CN; | |
418 | ||
419 | /* As we dropped a packet, better let upper stack know this. */ | |
420 | qdisc_tree_decrease_qlen(sch, 1); | |
421 | return NET_XMIT_SUCCESS; | |
422 | } | |
423 | ||
424 | static struct sk_buff *hhf_dequeue(struct Qdisc *sch) | |
425 | { | |
426 | struct hhf_sched_data *q = qdisc_priv(sch); | |
427 | struct sk_buff *skb = NULL; | |
428 | struct wdrr_bucket *bucket; | |
429 | struct list_head *head; | |
430 | ||
431 | begin: | |
432 | head = &q->new_buckets; | |
433 | if (list_empty(head)) { | |
434 | head = &q->old_buckets; | |
435 | if (list_empty(head)) | |
436 | return NULL; | |
437 | } | |
438 | bucket = list_first_entry(head, struct wdrr_bucket, bucketchain); | |
439 | ||
440 | if (bucket->deficit <= 0) { | |
441 | int weight = (bucket - q->buckets == WDRR_BUCKET_FOR_HH) ? | |
442 | 1 : q->hhf_non_hh_weight; | |
443 | ||
444 | bucket->deficit += weight * q->quantum; | |
445 | list_move_tail(&bucket->bucketchain, &q->old_buckets); | |
446 | goto begin; | |
447 | } | |
448 | ||
449 | if (bucket->head) { | |
450 | skb = dequeue_head(bucket); | |
451 | sch->q.qlen--; | |
25331d6c | 452 | qdisc_qstats_backlog_dec(sch, skb); |
10239edf TL |
453 | } |
454 | ||
455 | if (!skb) { | |
456 | /* Force a pass through old_buckets to prevent starvation. */ | |
457 | if ((head == &q->new_buckets) && !list_empty(&q->old_buckets)) | |
458 | list_move_tail(&bucket->bucketchain, &q->old_buckets); | |
459 | else | |
460 | list_del_init(&bucket->bucketchain); | |
461 | goto begin; | |
462 | } | |
463 | qdisc_bstats_update(sch, skb); | |
464 | bucket->deficit -= qdisc_pkt_len(skb); | |
465 | ||
466 | return skb; | |
467 | } | |
468 | ||
469 | static void hhf_reset(struct Qdisc *sch) | |
470 | { | |
471 | struct sk_buff *skb; | |
472 | ||
473 | while ((skb = hhf_dequeue(sch)) != NULL) | |
474 | kfree_skb(skb); | |
475 | } | |
476 | ||
477 | static void *hhf_zalloc(size_t sz) | |
478 | { | |
479 | void *ptr = kzalloc(sz, GFP_KERNEL | __GFP_NOWARN); | |
480 | ||
481 | if (!ptr) | |
482 | ptr = vzalloc(sz); | |
483 | ||
484 | return ptr; | |
485 | } | |
486 | ||
487 | static void hhf_free(void *addr) | |
488 | { | |
4cb28970 | 489 | kvfree(addr); |
10239edf TL |
490 | } |
491 | ||
492 | static void hhf_destroy(struct Qdisc *sch) | |
493 | { | |
494 | int i; | |
495 | struct hhf_sched_data *q = qdisc_priv(sch); | |
496 | ||
497 | for (i = 0; i < HHF_ARRAYS_CNT; i++) { | |
498 | hhf_free(q->hhf_arrays[i]); | |
499 | hhf_free(q->hhf_valid_bits[i]); | |
500 | } | |
501 | ||
502 | for (i = 0; i < HH_FLOWS_CNT; i++) { | |
503 | struct hh_flow_state *flow, *next; | |
504 | struct list_head *head = &q->hh_flows[i]; | |
505 | ||
506 | if (list_empty(head)) | |
507 | continue; | |
508 | list_for_each_entry_safe(flow, next, head, flowchain) { | |
509 | list_del(&flow->flowchain); | |
510 | kfree(flow); | |
511 | } | |
512 | } | |
513 | hhf_free(q->hh_flows); | |
514 | } | |
515 | ||
516 | static const struct nla_policy hhf_policy[TCA_HHF_MAX + 1] = { | |
517 | [TCA_HHF_BACKLOG_LIMIT] = { .type = NLA_U32 }, | |
518 | [TCA_HHF_QUANTUM] = { .type = NLA_U32 }, | |
519 | [TCA_HHF_HH_FLOWS_LIMIT] = { .type = NLA_U32 }, | |
520 | [TCA_HHF_RESET_TIMEOUT] = { .type = NLA_U32 }, | |
521 | [TCA_HHF_ADMIT_BYTES] = { .type = NLA_U32 }, | |
522 | [TCA_HHF_EVICT_TIMEOUT] = { .type = NLA_U32 }, | |
523 | [TCA_HHF_NON_HH_WEIGHT] = { .type = NLA_U32 }, | |
524 | }; | |
525 | ||
526 | static int hhf_change(struct Qdisc *sch, struct nlattr *opt) | |
527 | { | |
528 | struct hhf_sched_data *q = qdisc_priv(sch); | |
529 | struct nlattr *tb[TCA_HHF_MAX + 1]; | |
530 | unsigned int qlen; | |
531 | int err; | |
532 | u64 non_hh_quantum; | |
533 | u32 new_quantum = q->quantum; | |
534 | u32 new_hhf_non_hh_weight = q->hhf_non_hh_weight; | |
535 | ||
536 | if (!opt) | |
537 | return -EINVAL; | |
538 | ||
539 | err = nla_parse_nested(tb, TCA_HHF_MAX, opt, hhf_policy); | |
540 | if (err < 0) | |
541 | return err; | |
542 | ||
10239edf TL |
543 | if (tb[TCA_HHF_QUANTUM]) |
544 | new_quantum = nla_get_u32(tb[TCA_HHF_QUANTUM]); | |
545 | ||
546 | if (tb[TCA_HHF_NON_HH_WEIGHT]) | |
547 | new_hhf_non_hh_weight = nla_get_u32(tb[TCA_HHF_NON_HH_WEIGHT]); | |
548 | ||
549 | non_hh_quantum = (u64)new_quantum * new_hhf_non_hh_weight; | |
550 | if (non_hh_quantum > INT_MAX) | |
551 | return -EINVAL; | |
f6a082fe JF |
552 | |
553 | sch_tree_lock(sch); | |
554 | ||
555 | if (tb[TCA_HHF_BACKLOG_LIMIT]) | |
556 | sch->limit = nla_get_u32(tb[TCA_HHF_BACKLOG_LIMIT]); | |
557 | ||
10239edf TL |
558 | q->quantum = new_quantum; |
559 | q->hhf_non_hh_weight = new_hhf_non_hh_weight; | |
560 | ||
561 | if (tb[TCA_HHF_HH_FLOWS_LIMIT]) | |
562 | q->hh_flows_limit = nla_get_u32(tb[TCA_HHF_HH_FLOWS_LIMIT]); | |
563 | ||
564 | if (tb[TCA_HHF_RESET_TIMEOUT]) { | |
6c76a07a | 565 | u32 us = nla_get_u32(tb[TCA_HHF_RESET_TIMEOUT]); |
10239edf | 566 | |
6c76a07a | 567 | q->hhf_reset_timeout = usecs_to_jiffies(us); |
10239edf TL |
568 | } |
569 | ||
570 | if (tb[TCA_HHF_ADMIT_BYTES]) | |
571 | q->hhf_admit_bytes = nla_get_u32(tb[TCA_HHF_ADMIT_BYTES]); | |
572 | ||
573 | if (tb[TCA_HHF_EVICT_TIMEOUT]) { | |
6c76a07a | 574 | u32 us = nla_get_u32(tb[TCA_HHF_EVICT_TIMEOUT]); |
10239edf | 575 | |
6c76a07a | 576 | q->hhf_evict_timeout = usecs_to_jiffies(us); |
10239edf TL |
577 | } |
578 | ||
579 | qlen = sch->q.qlen; | |
580 | while (sch->q.qlen > sch->limit) { | |
581 | struct sk_buff *skb = hhf_dequeue(sch); | |
582 | ||
583 | kfree_skb(skb); | |
584 | } | |
585 | qdisc_tree_decrease_qlen(sch, qlen - sch->q.qlen); | |
586 | ||
587 | sch_tree_unlock(sch); | |
588 | return 0; | |
589 | } | |
590 | ||
591 | static int hhf_init(struct Qdisc *sch, struct nlattr *opt) | |
592 | { | |
593 | struct hhf_sched_data *q = qdisc_priv(sch); | |
594 | int i; | |
595 | ||
596 | sch->limit = 1000; | |
597 | q->quantum = psched_mtu(qdisc_dev(sch)); | |
63862b5b | 598 | q->perturbation = prandom_u32(); |
10239edf TL |
599 | INIT_LIST_HEAD(&q->new_buckets); |
600 | INIT_LIST_HEAD(&q->old_buckets); | |
601 | ||
602 | /* Configurable HHF parameters */ | |
603 | q->hhf_reset_timeout = HZ / 25; /* 40 ms */ | |
604 | q->hhf_admit_bytes = 131072; /* 128 KB */ | |
605 | q->hhf_evict_timeout = HZ; /* 1 sec */ | |
606 | q->hhf_non_hh_weight = 2; | |
607 | ||
608 | if (opt) { | |
609 | int err = hhf_change(sch, opt); | |
610 | ||
611 | if (err) | |
612 | return err; | |
613 | } | |
614 | ||
615 | if (!q->hh_flows) { | |
616 | /* Initialize heavy-hitter flow table. */ | |
617 | q->hh_flows = hhf_zalloc(HH_FLOWS_CNT * | |
618 | sizeof(struct list_head)); | |
619 | if (!q->hh_flows) | |
620 | return -ENOMEM; | |
621 | for (i = 0; i < HH_FLOWS_CNT; i++) | |
622 | INIT_LIST_HEAD(&q->hh_flows[i]); | |
623 | ||
624 | /* Cap max active HHs at twice len of hh_flows table. */ | |
625 | q->hh_flows_limit = 2 * HH_FLOWS_CNT; | |
626 | q->hh_flows_overlimit = 0; | |
627 | q->hh_flows_total_cnt = 0; | |
628 | q->hh_flows_current_cnt = 0; | |
629 | ||
630 | /* Initialize heavy-hitter filter arrays. */ | |
631 | for (i = 0; i < HHF_ARRAYS_CNT; i++) { | |
632 | q->hhf_arrays[i] = hhf_zalloc(HHF_ARRAYS_LEN * | |
633 | sizeof(u32)); | |
634 | if (!q->hhf_arrays[i]) { | |
635 | hhf_destroy(sch); | |
636 | return -ENOMEM; | |
637 | } | |
638 | } | |
639 | q->hhf_arrays_reset_timestamp = hhf_time_stamp(); | |
640 | ||
641 | /* Initialize valid bits of heavy-hitter filter arrays. */ | |
642 | for (i = 0; i < HHF_ARRAYS_CNT; i++) { | |
643 | q->hhf_valid_bits[i] = hhf_zalloc(HHF_ARRAYS_LEN / | |
644 | BITS_PER_BYTE); | |
645 | if (!q->hhf_valid_bits[i]) { | |
646 | hhf_destroy(sch); | |
647 | return -ENOMEM; | |
648 | } | |
649 | } | |
650 | ||
651 | /* Initialize Weighted DRR buckets. */ | |
652 | for (i = 0; i < WDRR_BUCKET_CNT; i++) { | |
653 | struct wdrr_bucket *bucket = q->buckets + i; | |
654 | ||
655 | INIT_LIST_HEAD(&bucket->bucketchain); | |
656 | } | |
657 | } | |
658 | ||
659 | return 0; | |
660 | } | |
661 | ||
662 | static int hhf_dump(struct Qdisc *sch, struct sk_buff *skb) | |
663 | { | |
664 | struct hhf_sched_data *q = qdisc_priv(sch); | |
665 | struct nlattr *opts; | |
666 | ||
667 | opts = nla_nest_start(skb, TCA_OPTIONS); | |
668 | if (opts == NULL) | |
669 | goto nla_put_failure; | |
670 | ||
671 | if (nla_put_u32(skb, TCA_HHF_BACKLOG_LIMIT, sch->limit) || | |
672 | nla_put_u32(skb, TCA_HHF_QUANTUM, q->quantum) || | |
673 | nla_put_u32(skb, TCA_HHF_HH_FLOWS_LIMIT, q->hh_flows_limit) || | |
674 | nla_put_u32(skb, TCA_HHF_RESET_TIMEOUT, | |
6c76a07a | 675 | jiffies_to_usecs(q->hhf_reset_timeout)) || |
10239edf TL |
676 | nla_put_u32(skb, TCA_HHF_ADMIT_BYTES, q->hhf_admit_bytes) || |
677 | nla_put_u32(skb, TCA_HHF_EVICT_TIMEOUT, | |
6c76a07a | 678 | jiffies_to_usecs(q->hhf_evict_timeout)) || |
10239edf TL |
679 | nla_put_u32(skb, TCA_HHF_NON_HH_WEIGHT, q->hhf_non_hh_weight)) |
680 | goto nla_put_failure; | |
681 | ||
d59b7d80 | 682 | return nla_nest_end(skb, opts); |
10239edf TL |
683 | |
684 | nla_put_failure: | |
685 | return -1; | |
686 | } | |
687 | ||
688 | static int hhf_dump_stats(struct Qdisc *sch, struct gnet_dump *d) | |
689 | { | |
690 | struct hhf_sched_data *q = qdisc_priv(sch); | |
691 | struct tc_hhf_xstats st = { | |
692 | .drop_overlimit = q->drop_overlimit, | |
693 | .hh_overlimit = q->hh_flows_overlimit, | |
694 | .hh_tot_count = q->hh_flows_total_cnt, | |
695 | .hh_cur_count = q->hh_flows_current_cnt, | |
696 | }; | |
697 | ||
698 | return gnet_stats_copy_app(d, &st, sizeof(st)); | |
699 | } | |
700 | ||
c49fa257 | 701 | static struct Qdisc_ops hhf_qdisc_ops __read_mostly = { |
10239edf TL |
702 | .id = "hhf", |
703 | .priv_size = sizeof(struct hhf_sched_data), | |
704 | ||
705 | .enqueue = hhf_enqueue, | |
706 | .dequeue = hhf_dequeue, | |
707 | .peek = qdisc_peek_dequeued, | |
6ac644a8 | 708 | .drop = hhf_qdisc_drop, |
10239edf TL |
709 | .init = hhf_init, |
710 | .reset = hhf_reset, | |
711 | .destroy = hhf_destroy, | |
712 | .change = hhf_change, | |
713 | .dump = hhf_dump, | |
714 | .dump_stats = hhf_dump_stats, | |
715 | .owner = THIS_MODULE, | |
716 | }; | |
10239edf TL |
717 | |
718 | static int __init hhf_module_init(void) | |
719 | { | |
720 | return register_qdisc(&hhf_qdisc_ops); | |
721 | } | |
722 | ||
723 | static void __exit hhf_module_exit(void) | |
724 | { | |
725 | unregister_qdisc(&hhf_qdisc_ops); | |
726 | } | |
727 | ||
728 | module_init(hhf_module_init) | |
729 | module_exit(hhf_module_exit) | |
730 | MODULE_AUTHOR("Terry Lam"); | |
731 | MODULE_AUTHOR("Nandita Dukkipati"); | |
732 | MODULE_LICENSE("GPL"); |