Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux
[deliverable/linux.git] / kernel / sched / fair.c
1 /*
2 * Completely Fair Scheduling (CFS) Class (SCHED_NORMAL/SCHED_BATCH)
3 *
4 * Copyright (C) 2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
5 *
6 * Interactivity improvements by Mike Galbraith
7 * (C) 2007 Mike Galbraith <efault@gmx.de>
8 *
9 * Various enhancements by Dmitry Adamushko.
10 * (C) 2007 Dmitry Adamushko <dmitry.adamushko@gmail.com>
11 *
12 * Group scheduling enhancements by Srivatsa Vaddagiri
13 * Copyright IBM Corporation, 2007
14 * Author: Srivatsa Vaddagiri <vatsa@linux.vnet.ibm.com>
15 *
16 * Scaled math optimizations by Thomas Gleixner
17 * Copyright (C) 2007, Thomas Gleixner <tglx@linutronix.de>
18 *
19 * Adaptive scheduling granularity, math enhancements by Peter Zijlstra
20 * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra
21 */
22
23 #include <linux/sched.h>
24 #include <linux/latencytop.h>
25 #include <linux/cpumask.h>
26 #include <linux/cpuidle.h>
27 #include <linux/slab.h>
28 #include <linux/profile.h>
29 #include <linux/interrupt.h>
30 #include <linux/mempolicy.h>
31 #include <linux/migrate.h>
32 #include <linux/task_work.h>
33
34 #include <trace/events/sched.h>
35
36 #include "sched.h"
37
38 /*
39 * Targeted preemption latency for CPU-bound tasks:
40 * (default: 6ms * (1 + ilog(ncpus)), units: nanoseconds)
41 *
42 * NOTE: this latency value is not the same as the concept of
43 * 'timeslice length' - timeslices in CFS are of variable length
44 * and have no persistent notion like in traditional, time-slice
45 * based scheduling concepts.
46 *
47 * (to see the precise effective timeslice length of your workload,
48 * run vmstat and monitor the context-switches (cs) field)
49 */
50 unsigned int sysctl_sched_latency = 6000000ULL;
51 unsigned int normalized_sysctl_sched_latency = 6000000ULL;
52
53 /*
54 * The initial- and re-scaling of tunables is configurable
55 * (default SCHED_TUNABLESCALING_LOG = *(1+ilog(ncpus))
56 *
57 * Options are:
58 * SCHED_TUNABLESCALING_NONE - unscaled, always *1
59 * SCHED_TUNABLESCALING_LOG - scaled logarithmical, *1+ilog(ncpus)
60 * SCHED_TUNABLESCALING_LINEAR - scaled linear, *ncpus
61 */
62 enum sched_tunable_scaling sysctl_sched_tunable_scaling
63 = SCHED_TUNABLESCALING_LOG;
64
65 /*
66 * Minimal preemption granularity for CPU-bound tasks:
67 * (default: 0.75 msec * (1 + ilog(ncpus)), units: nanoseconds)
68 */
69 unsigned int sysctl_sched_min_granularity = 750000ULL;
70 unsigned int normalized_sysctl_sched_min_granularity = 750000ULL;
71
72 /*
73 * is kept at sysctl_sched_latency / sysctl_sched_min_granularity
74 */
75 static unsigned int sched_nr_latency = 8;
76
77 /*
78 * After fork, child runs first. If set to 0 (default) then
79 * parent will (try to) run first.
80 */
81 unsigned int sysctl_sched_child_runs_first __read_mostly;
82
83 /*
84 * SCHED_OTHER wake-up granularity.
85 * (default: 1 msec * (1 + ilog(ncpus)), units: nanoseconds)
86 *
87 * This option delays the preemption effects of decoupled workloads
88 * and reduces their over-scheduling. Synchronous workloads will still
89 * have immediate wakeup/sleep latencies.
90 */
91 unsigned int sysctl_sched_wakeup_granularity = 1000000UL;
92 unsigned int normalized_sysctl_sched_wakeup_granularity = 1000000UL;
93
94 const_debug unsigned int sysctl_sched_migration_cost = 500000UL;
95
96 /*
97 * The exponential sliding window over which load is averaged for shares
98 * distribution.
99 * (default: 10msec)
100 */
101 unsigned int __read_mostly sysctl_sched_shares_window = 10000000UL;
102
103 #ifdef CONFIG_CFS_BANDWIDTH
104 /*
105 * Amount of runtime to allocate from global (tg) to local (per-cfs_rq) pool
106 * each time a cfs_rq requests quota.
107 *
108 * Note: in the case that the slice exceeds the runtime remaining (either due
109 * to consumption or the quota being specified to be smaller than the slice)
110 * we will always only issue the remaining available time.
111 *
112 * default: 5 msec, units: microseconds
113 */
114 unsigned int sysctl_sched_cfs_bandwidth_slice = 5000UL;
115 #endif
116
117 static inline void update_load_add(struct load_weight *lw, unsigned long inc)
118 {
119 lw->weight += inc;
120 lw->inv_weight = 0;
121 }
122
123 static inline void update_load_sub(struct load_weight *lw, unsigned long dec)
124 {
125 lw->weight -= dec;
126 lw->inv_weight = 0;
127 }
128
129 static inline void update_load_set(struct load_weight *lw, unsigned long w)
130 {
131 lw->weight = w;
132 lw->inv_weight = 0;
133 }
134
135 /*
136 * Increase the granularity value when there are more CPUs,
137 * because with more CPUs the 'effective latency' as visible
138 * to users decreases. But the relationship is not linear,
139 * so pick a second-best guess by going with the log2 of the
140 * number of CPUs.
141 *
142 * This idea comes from the SD scheduler of Con Kolivas:
143 */
144 static unsigned int get_update_sysctl_factor(void)
145 {
146 unsigned int cpus = min_t(unsigned int, num_online_cpus(), 8);
147 unsigned int factor;
148
149 switch (sysctl_sched_tunable_scaling) {
150 case SCHED_TUNABLESCALING_NONE:
151 factor = 1;
152 break;
153 case SCHED_TUNABLESCALING_LINEAR:
154 factor = cpus;
155 break;
156 case SCHED_TUNABLESCALING_LOG:
157 default:
158 factor = 1 + ilog2(cpus);
159 break;
160 }
161
162 return factor;
163 }
164
165 static void update_sysctl(void)
166 {
167 unsigned int factor = get_update_sysctl_factor();
168
169 #define SET_SYSCTL(name) \
170 (sysctl_##name = (factor) * normalized_sysctl_##name)
171 SET_SYSCTL(sched_min_granularity);
172 SET_SYSCTL(sched_latency);
173 SET_SYSCTL(sched_wakeup_granularity);
174 #undef SET_SYSCTL
175 }
176
177 void sched_init_granularity(void)
178 {
179 update_sysctl();
180 }
181
182 #define WMULT_CONST (~0U)
183 #define WMULT_SHIFT 32
184
185 static void __update_inv_weight(struct load_weight *lw)
186 {
187 unsigned long w;
188
189 if (likely(lw->inv_weight))
190 return;
191
192 w = scale_load_down(lw->weight);
193
194 if (BITS_PER_LONG > 32 && unlikely(w >= WMULT_CONST))
195 lw->inv_weight = 1;
196 else if (unlikely(!w))
197 lw->inv_weight = WMULT_CONST;
198 else
199 lw->inv_weight = WMULT_CONST / w;
200 }
201
202 /*
203 * delta_exec * weight / lw.weight
204 * OR
205 * (delta_exec * (weight * lw->inv_weight)) >> WMULT_SHIFT
206 *
207 * Either weight := NICE_0_LOAD and lw \e prio_to_wmult[], in which case
208 * we're guaranteed shift stays positive because inv_weight is guaranteed to
209 * fit 32 bits, and NICE_0_LOAD gives another 10 bits; therefore shift >= 22.
210 *
211 * Or, weight =< lw.weight (because lw.weight is the runqueue weight), thus
212 * weight/lw.weight <= 1, and therefore our shift will also be positive.
213 */
214 static u64 __calc_delta(u64 delta_exec, unsigned long weight, struct load_weight *lw)
215 {
216 u64 fact = scale_load_down(weight);
217 int shift = WMULT_SHIFT;
218
219 __update_inv_weight(lw);
220
221 if (unlikely(fact >> 32)) {
222 while (fact >> 32) {
223 fact >>= 1;
224 shift--;
225 }
226 }
227
228 /* hint to use a 32x32->64 mul */
229 fact = (u64)(u32)fact * lw->inv_weight;
230
231 while (fact >> 32) {
232 fact >>= 1;
233 shift--;
234 }
235
236 return mul_u64_u32_shr(delta_exec, fact, shift);
237 }
238
239
240 const struct sched_class fair_sched_class;
241
242 /**************************************************************
243 * CFS operations on generic schedulable entities:
244 */
245
246 #ifdef CONFIG_FAIR_GROUP_SCHED
247
248 /* cpu runqueue to which this cfs_rq is attached */
249 static inline struct rq *rq_of(struct cfs_rq *cfs_rq)
250 {
251 return cfs_rq->rq;
252 }
253
254 /* An entity is a task if it doesn't "own" a runqueue */
255 #define entity_is_task(se) (!se->my_q)
256
257 static inline struct task_struct *task_of(struct sched_entity *se)
258 {
259 #ifdef CONFIG_SCHED_DEBUG
260 WARN_ON_ONCE(!entity_is_task(se));
261 #endif
262 return container_of(se, struct task_struct, se);
263 }
264
265 /* Walk up scheduling entities hierarchy */
266 #define for_each_sched_entity(se) \
267 for (; se; se = se->parent)
268
269 static inline struct cfs_rq *task_cfs_rq(struct task_struct *p)
270 {
271 return p->se.cfs_rq;
272 }
273
274 /* runqueue on which this entity is (to be) queued */
275 static inline struct cfs_rq *cfs_rq_of(struct sched_entity *se)
276 {
277 return se->cfs_rq;
278 }
279
280 /* runqueue "owned" by this group */
281 static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp)
282 {
283 return grp->my_q;
284 }
285
286 static inline void list_add_leaf_cfs_rq(struct cfs_rq *cfs_rq)
287 {
288 if (!cfs_rq->on_list) {
289 /*
290 * Ensure we either appear before our parent (if already
291 * enqueued) or force our parent to appear after us when it is
292 * enqueued. The fact that we always enqueue bottom-up
293 * reduces this to two cases.
294 */
295 if (cfs_rq->tg->parent &&
296 cfs_rq->tg->parent->cfs_rq[cpu_of(rq_of(cfs_rq))]->on_list) {
297 list_add_rcu(&cfs_rq->leaf_cfs_rq_list,
298 &rq_of(cfs_rq)->leaf_cfs_rq_list);
299 } else {
300 list_add_tail_rcu(&cfs_rq->leaf_cfs_rq_list,
301 &rq_of(cfs_rq)->leaf_cfs_rq_list);
302 }
303
304 cfs_rq->on_list = 1;
305 }
306 }
307
308 static inline void list_del_leaf_cfs_rq(struct cfs_rq *cfs_rq)
309 {
310 if (cfs_rq->on_list) {
311 list_del_rcu(&cfs_rq->leaf_cfs_rq_list);
312 cfs_rq->on_list = 0;
313 }
314 }
315
316 /* Iterate thr' all leaf cfs_rq's on a runqueue */
317 #define for_each_leaf_cfs_rq(rq, cfs_rq) \
318 list_for_each_entry_rcu(cfs_rq, &rq->leaf_cfs_rq_list, leaf_cfs_rq_list)
319
320 /* Do the two (enqueued) entities belong to the same group ? */
321 static inline struct cfs_rq *
322 is_same_group(struct sched_entity *se, struct sched_entity *pse)
323 {
324 if (se->cfs_rq == pse->cfs_rq)
325 return se->cfs_rq;
326
327 return NULL;
328 }
329
330 static inline struct sched_entity *parent_entity(struct sched_entity *se)
331 {
332 return se->parent;
333 }
334
335 static void
336 find_matching_se(struct sched_entity **se, struct sched_entity **pse)
337 {
338 int se_depth, pse_depth;
339
340 /*
341 * preemption test can be made between sibling entities who are in the
342 * same cfs_rq i.e who have a common parent. Walk up the hierarchy of
343 * both tasks until we find their ancestors who are siblings of common
344 * parent.
345 */
346
347 /* First walk up until both entities are at same depth */
348 se_depth = (*se)->depth;
349 pse_depth = (*pse)->depth;
350
351 while (se_depth > pse_depth) {
352 se_depth--;
353 *se = parent_entity(*se);
354 }
355
356 while (pse_depth > se_depth) {
357 pse_depth--;
358 *pse = parent_entity(*pse);
359 }
360
361 while (!is_same_group(*se, *pse)) {
362 *se = parent_entity(*se);
363 *pse = parent_entity(*pse);
364 }
365 }
366
367 #else /* !CONFIG_FAIR_GROUP_SCHED */
368
369 static inline struct task_struct *task_of(struct sched_entity *se)
370 {
371 return container_of(se, struct task_struct, se);
372 }
373
374 static inline struct rq *rq_of(struct cfs_rq *cfs_rq)
375 {
376 return container_of(cfs_rq, struct rq, cfs);
377 }
378
379 #define entity_is_task(se) 1
380
381 #define for_each_sched_entity(se) \
382 for (; se; se = NULL)
383
384 static inline struct cfs_rq *task_cfs_rq(struct task_struct *p)
385 {
386 return &task_rq(p)->cfs;
387 }
388
389 static inline struct cfs_rq *cfs_rq_of(struct sched_entity *se)
390 {
391 struct task_struct *p = task_of(se);
392 struct rq *rq = task_rq(p);
393
394 return &rq->cfs;
395 }
396
397 /* runqueue "owned" by this group */
398 static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp)
399 {
400 return NULL;
401 }
402
403 static inline void list_add_leaf_cfs_rq(struct cfs_rq *cfs_rq)
404 {
405 }
406
407 static inline void list_del_leaf_cfs_rq(struct cfs_rq *cfs_rq)
408 {
409 }
410
411 #define for_each_leaf_cfs_rq(rq, cfs_rq) \
412 for (cfs_rq = &rq->cfs; cfs_rq; cfs_rq = NULL)
413
414 static inline struct sched_entity *parent_entity(struct sched_entity *se)
415 {
416 return NULL;
417 }
418
419 static inline void
420 find_matching_se(struct sched_entity **se, struct sched_entity **pse)
421 {
422 }
423
424 #endif /* CONFIG_FAIR_GROUP_SCHED */
425
426 static __always_inline
427 void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec);
428
429 /**************************************************************
430 * Scheduling class tree data structure manipulation methods:
431 */
432
433 static inline u64 max_vruntime(u64 max_vruntime, u64 vruntime)
434 {
435 s64 delta = (s64)(vruntime - max_vruntime);
436 if (delta > 0)
437 max_vruntime = vruntime;
438
439 return max_vruntime;
440 }
441
442 static inline u64 min_vruntime(u64 min_vruntime, u64 vruntime)
443 {
444 s64 delta = (s64)(vruntime - min_vruntime);
445 if (delta < 0)
446 min_vruntime = vruntime;
447
448 return min_vruntime;
449 }
450
451 static inline int entity_before(struct sched_entity *a,
452 struct sched_entity *b)
453 {
454 return (s64)(a->vruntime - b->vruntime) < 0;
455 }
456
457 static void update_min_vruntime(struct cfs_rq *cfs_rq)
458 {
459 u64 vruntime = cfs_rq->min_vruntime;
460
461 if (cfs_rq->curr)
462 vruntime = cfs_rq->curr->vruntime;
463
464 if (cfs_rq->rb_leftmost) {
465 struct sched_entity *se = rb_entry(cfs_rq->rb_leftmost,
466 struct sched_entity,
467 run_node);
468
469 if (!cfs_rq->curr)
470 vruntime = se->vruntime;
471 else
472 vruntime = min_vruntime(vruntime, se->vruntime);
473 }
474
475 /* ensure we never gain time by being placed backwards. */
476 cfs_rq->min_vruntime = max_vruntime(cfs_rq->min_vruntime, vruntime);
477 #ifndef CONFIG_64BIT
478 smp_wmb();
479 cfs_rq->min_vruntime_copy = cfs_rq->min_vruntime;
480 #endif
481 }
482
483 /*
484 * Enqueue an entity into the rb-tree:
485 */
486 static void __enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
487 {
488 struct rb_node **link = &cfs_rq->tasks_timeline.rb_node;
489 struct rb_node *parent = NULL;
490 struct sched_entity *entry;
491 int leftmost = 1;
492
493 /*
494 * Find the right place in the rbtree:
495 */
496 while (*link) {
497 parent = *link;
498 entry = rb_entry(parent, struct sched_entity, run_node);
499 /*
500 * We dont care about collisions. Nodes with
501 * the same key stay together.
502 */
503 if (entity_before(se, entry)) {
504 link = &parent->rb_left;
505 } else {
506 link = &parent->rb_right;
507 leftmost = 0;
508 }
509 }
510
511 /*
512 * Maintain a cache of leftmost tree entries (it is frequently
513 * used):
514 */
515 if (leftmost)
516 cfs_rq->rb_leftmost = &se->run_node;
517
518 rb_link_node(&se->run_node, parent, link);
519 rb_insert_color(&se->run_node, &cfs_rq->tasks_timeline);
520 }
521
522 static void __dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
523 {
524 if (cfs_rq->rb_leftmost == &se->run_node) {
525 struct rb_node *next_node;
526
527 next_node = rb_next(&se->run_node);
528 cfs_rq->rb_leftmost = next_node;
529 }
530
531 rb_erase(&se->run_node, &cfs_rq->tasks_timeline);
532 }
533
534 struct sched_entity *__pick_first_entity(struct cfs_rq *cfs_rq)
535 {
536 struct rb_node *left = cfs_rq->rb_leftmost;
537
538 if (!left)
539 return NULL;
540
541 return rb_entry(left, struct sched_entity, run_node);
542 }
543
544 static struct sched_entity *__pick_next_entity(struct sched_entity *se)
545 {
546 struct rb_node *next = rb_next(&se->run_node);
547
548 if (!next)
549 return NULL;
550
551 return rb_entry(next, struct sched_entity, run_node);
552 }
553
554 #ifdef CONFIG_SCHED_DEBUG
555 struct sched_entity *__pick_last_entity(struct cfs_rq *cfs_rq)
556 {
557 struct rb_node *last = rb_last(&cfs_rq->tasks_timeline);
558
559 if (!last)
560 return NULL;
561
562 return rb_entry(last, struct sched_entity, run_node);
563 }
564
565 /**************************************************************
566 * Scheduling class statistics methods:
567 */
568
569 int sched_proc_update_handler(struct ctl_table *table, int write,
570 void __user *buffer, size_t *lenp,
571 loff_t *ppos)
572 {
573 int ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
574 unsigned int factor = get_update_sysctl_factor();
575
576 if (ret || !write)
577 return ret;
578
579 sched_nr_latency = DIV_ROUND_UP(sysctl_sched_latency,
580 sysctl_sched_min_granularity);
581
582 #define WRT_SYSCTL(name) \
583 (normalized_sysctl_##name = sysctl_##name / (factor))
584 WRT_SYSCTL(sched_min_granularity);
585 WRT_SYSCTL(sched_latency);
586 WRT_SYSCTL(sched_wakeup_granularity);
587 #undef WRT_SYSCTL
588
589 return 0;
590 }
591 #endif
592
593 /*
594 * delta /= w
595 */
596 static inline u64 calc_delta_fair(u64 delta, struct sched_entity *se)
597 {
598 if (unlikely(se->load.weight != NICE_0_LOAD))
599 delta = __calc_delta(delta, NICE_0_LOAD, &se->load);
600
601 return delta;
602 }
603
604 /*
605 * The idea is to set a period in which each task runs once.
606 *
607 * When there are too many tasks (sched_nr_latency) we have to stretch
608 * this period because otherwise the slices get too small.
609 *
610 * p = (nr <= nl) ? l : l*nr/nl
611 */
612 static u64 __sched_period(unsigned long nr_running)
613 {
614 if (unlikely(nr_running > sched_nr_latency))
615 return nr_running * sysctl_sched_min_granularity;
616 else
617 return sysctl_sched_latency;
618 }
619
620 /*
621 * We calculate the wall-time slice from the period by taking a part
622 * proportional to the weight.
623 *
624 * s = p*P[w/rw]
625 */
626 static u64 sched_slice(struct cfs_rq *cfs_rq, struct sched_entity *se)
627 {
628 u64 slice = __sched_period(cfs_rq->nr_running + !se->on_rq);
629
630 for_each_sched_entity(se) {
631 struct load_weight *load;
632 struct load_weight lw;
633
634 cfs_rq = cfs_rq_of(se);
635 load = &cfs_rq->load;
636
637 if (unlikely(!se->on_rq)) {
638 lw = cfs_rq->load;
639
640 update_load_add(&lw, se->load.weight);
641 load = &lw;
642 }
643 slice = __calc_delta(slice, se->load.weight, load);
644 }
645 return slice;
646 }
647
648 /*
649 * We calculate the vruntime slice of a to-be-inserted task.
650 *
651 * vs = s/w
652 */
653 static u64 sched_vslice(struct cfs_rq *cfs_rq, struct sched_entity *se)
654 {
655 return calc_delta_fair(sched_slice(cfs_rq, se), se);
656 }
657
658 #ifdef CONFIG_SMP
659 static int select_idle_sibling(struct task_struct *p, int cpu);
660 static unsigned long task_h_load(struct task_struct *p);
661
662 /*
663 * We choose a half-life close to 1 scheduling period.
664 * Note: The tables runnable_avg_yN_inv and runnable_avg_yN_sum are
665 * dependent on this value.
666 */
667 #define LOAD_AVG_PERIOD 32
668 #define LOAD_AVG_MAX 47742 /* maximum possible load avg */
669 #define LOAD_AVG_MAX_N 345 /* number of full periods to produce LOAD_AVG_MAX */
670
671 /* Give new sched_entity start runnable values to heavy its load in infant time */
672 void init_entity_runnable_average(struct sched_entity *se)
673 {
674 struct sched_avg *sa = &se->avg;
675
676 sa->last_update_time = 0;
677 /*
678 * sched_avg's period_contrib should be strictly less then 1024, so
679 * we give it 1023 to make sure it is almost a period (1024us), and
680 * will definitely be update (after enqueue).
681 */
682 sa->period_contrib = 1023;
683 sa->load_avg = scale_load_down(se->load.weight);
684 sa->load_sum = sa->load_avg * LOAD_AVG_MAX;
685 sa->util_avg = scale_load_down(SCHED_LOAD_SCALE);
686 sa->util_sum = sa->util_avg * LOAD_AVG_MAX;
687 /* when this task enqueue'ed, it will contribute to its cfs_rq's load_avg */
688 }
689
690 static inline unsigned long cfs_rq_runnable_load_avg(struct cfs_rq *cfs_rq);
691 static inline unsigned long cfs_rq_load_avg(struct cfs_rq *cfs_rq);
692 #else
693 void init_entity_runnable_average(struct sched_entity *se)
694 {
695 }
696 #endif
697
698 /*
699 * Update the current task's runtime statistics.
700 */
701 static void update_curr(struct cfs_rq *cfs_rq)
702 {
703 struct sched_entity *curr = cfs_rq->curr;
704 u64 now = rq_clock_task(rq_of(cfs_rq));
705 u64 delta_exec;
706
707 if (unlikely(!curr))
708 return;
709
710 delta_exec = now - curr->exec_start;
711 if (unlikely((s64)delta_exec <= 0))
712 return;
713
714 curr->exec_start = now;
715
716 schedstat_set(curr->statistics.exec_max,
717 max(delta_exec, curr->statistics.exec_max));
718
719 curr->sum_exec_runtime += delta_exec;
720 schedstat_add(cfs_rq, exec_clock, delta_exec);
721
722 curr->vruntime += calc_delta_fair(delta_exec, curr);
723 update_min_vruntime(cfs_rq);
724
725 if (entity_is_task(curr)) {
726 struct task_struct *curtask = task_of(curr);
727
728 trace_sched_stat_runtime(curtask, delta_exec, curr->vruntime);
729 cpuacct_charge(curtask, delta_exec);
730 account_group_exec_runtime(curtask, delta_exec);
731 }
732
733 account_cfs_rq_runtime(cfs_rq, delta_exec);
734 }
735
736 static void update_curr_fair(struct rq *rq)
737 {
738 update_curr(cfs_rq_of(&rq->curr->se));
739 }
740
741 #ifdef CONFIG_SCHEDSTATS
742 static inline void
743 update_stats_wait_start(struct cfs_rq *cfs_rq, struct sched_entity *se)
744 {
745 u64 wait_start = rq_clock(rq_of(cfs_rq));
746
747 if (entity_is_task(se) && task_on_rq_migrating(task_of(se)) &&
748 likely(wait_start > se->statistics.wait_start))
749 wait_start -= se->statistics.wait_start;
750
751 se->statistics.wait_start = wait_start;
752 }
753
754 static void
755 update_stats_wait_end(struct cfs_rq *cfs_rq, struct sched_entity *se)
756 {
757 struct task_struct *p;
758 u64 delta;
759
760 delta = rq_clock(rq_of(cfs_rq)) - se->statistics.wait_start;
761
762 if (entity_is_task(se)) {
763 p = task_of(se);
764 if (task_on_rq_migrating(p)) {
765 /*
766 * Preserve migrating task's wait time so wait_start
767 * time stamp can be adjusted to accumulate wait time
768 * prior to migration.
769 */
770 se->statistics.wait_start = delta;
771 return;
772 }
773 trace_sched_stat_wait(p, delta);
774 }
775
776 se->statistics.wait_max = max(se->statistics.wait_max, delta);
777 se->statistics.wait_count++;
778 se->statistics.wait_sum += delta;
779 se->statistics.wait_start = 0;
780 }
781
782 /*
783 * Task is being enqueued - update stats:
784 */
785 static inline void
786 update_stats_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se)
787 {
788 /*
789 * Are we enqueueing a waiting task? (for current tasks
790 * a dequeue/enqueue event is a NOP)
791 */
792 if (se != cfs_rq->curr)
793 update_stats_wait_start(cfs_rq, se);
794 }
795
796 static inline void
797 update_stats_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
798 {
799 /*
800 * Mark the end of the wait period if dequeueing a
801 * waiting task:
802 */
803 if (se != cfs_rq->curr)
804 update_stats_wait_end(cfs_rq, se);
805
806 if (flags & DEQUEUE_SLEEP) {
807 if (entity_is_task(se)) {
808 struct task_struct *tsk = task_of(se);
809
810 if (tsk->state & TASK_INTERRUPTIBLE)
811 se->statistics.sleep_start = rq_clock(rq_of(cfs_rq));
812 if (tsk->state & TASK_UNINTERRUPTIBLE)
813 se->statistics.block_start = rq_clock(rq_of(cfs_rq));
814 }
815 }
816
817 }
818 #else
819 static inline void
820 update_stats_wait_start(struct cfs_rq *cfs_rq, struct sched_entity *se)
821 {
822 }
823
824 static inline void
825 update_stats_wait_end(struct cfs_rq *cfs_rq, struct sched_entity *se)
826 {
827 }
828
829 static inline void
830 update_stats_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se)
831 {
832 }
833
834 static inline void
835 update_stats_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
836 {
837 }
838 #endif
839
840 /*
841 * We are picking a new current task - update its stats:
842 */
843 static inline void
844 update_stats_curr_start(struct cfs_rq *cfs_rq, struct sched_entity *se)
845 {
846 /*
847 * We are starting a new run period:
848 */
849 se->exec_start = rq_clock_task(rq_of(cfs_rq));
850 }
851
852 /**************************************************
853 * Scheduling class queueing methods:
854 */
855
856 #ifdef CONFIG_NUMA_BALANCING
857 /*
858 * Approximate time to scan a full NUMA task in ms. The task scan period is
859 * calculated based on the tasks virtual memory size and
860 * numa_balancing_scan_size.
861 */
862 unsigned int sysctl_numa_balancing_scan_period_min = 1000;
863 unsigned int sysctl_numa_balancing_scan_period_max = 60000;
864
865 /* Portion of address space to scan in MB */
866 unsigned int sysctl_numa_balancing_scan_size = 256;
867
868 /* Scan @scan_size MB every @scan_period after an initial @scan_delay in ms */
869 unsigned int sysctl_numa_balancing_scan_delay = 1000;
870
871 static unsigned int task_nr_scan_windows(struct task_struct *p)
872 {
873 unsigned long rss = 0;
874 unsigned long nr_scan_pages;
875
876 /*
877 * Calculations based on RSS as non-present and empty pages are skipped
878 * by the PTE scanner and NUMA hinting faults should be trapped based
879 * on resident pages
880 */
881 nr_scan_pages = sysctl_numa_balancing_scan_size << (20 - PAGE_SHIFT);
882 rss = get_mm_rss(p->mm);
883 if (!rss)
884 rss = nr_scan_pages;
885
886 rss = round_up(rss, nr_scan_pages);
887 return rss / nr_scan_pages;
888 }
889
890 /* For sanitys sake, never scan more PTEs than MAX_SCAN_WINDOW MB/sec. */
891 #define MAX_SCAN_WINDOW 2560
892
893 static unsigned int task_scan_min(struct task_struct *p)
894 {
895 unsigned int scan_size = READ_ONCE(sysctl_numa_balancing_scan_size);
896 unsigned int scan, floor;
897 unsigned int windows = 1;
898
899 if (scan_size < MAX_SCAN_WINDOW)
900 windows = MAX_SCAN_WINDOW / scan_size;
901 floor = 1000 / windows;
902
903 scan = sysctl_numa_balancing_scan_period_min / task_nr_scan_windows(p);
904 return max_t(unsigned int, floor, scan);
905 }
906
907 static unsigned int task_scan_max(struct task_struct *p)
908 {
909 unsigned int smin = task_scan_min(p);
910 unsigned int smax;
911
912 /* Watch for min being lower than max due to floor calculations */
913 smax = sysctl_numa_balancing_scan_period_max / task_nr_scan_windows(p);
914 return max(smin, smax);
915 }
916
917 static void account_numa_enqueue(struct rq *rq, struct task_struct *p)
918 {
919 rq->nr_numa_running += (p->numa_preferred_nid != -1);
920 rq->nr_preferred_running += (p->numa_preferred_nid == task_node(p));
921 }
922
923 static void account_numa_dequeue(struct rq *rq, struct task_struct *p)
924 {
925 rq->nr_numa_running -= (p->numa_preferred_nid != -1);
926 rq->nr_preferred_running -= (p->numa_preferred_nid == task_node(p));
927 }
928
929 struct numa_group {
930 atomic_t refcount;
931
932 spinlock_t lock; /* nr_tasks, tasks */
933 int nr_tasks;
934 pid_t gid;
935 int active_nodes;
936
937 struct rcu_head rcu;
938 unsigned long total_faults;
939 unsigned long max_faults_cpu;
940 /*
941 * Faults_cpu is used to decide whether memory should move
942 * towards the CPU. As a consequence, these stats are weighted
943 * more by CPU use than by memory faults.
944 */
945 unsigned long *faults_cpu;
946 unsigned long faults[0];
947 };
948
949 /* Shared or private faults. */
950 #define NR_NUMA_HINT_FAULT_TYPES 2
951
952 /* Memory and CPU locality */
953 #define NR_NUMA_HINT_FAULT_STATS (NR_NUMA_HINT_FAULT_TYPES * 2)
954
955 /* Averaged statistics, and temporary buffers. */
956 #define NR_NUMA_HINT_FAULT_BUCKETS (NR_NUMA_HINT_FAULT_STATS * 2)
957
958 pid_t task_numa_group_id(struct task_struct *p)
959 {
960 return p->numa_group ? p->numa_group->gid : 0;
961 }
962
963 /*
964 * The averaged statistics, shared & private, memory & cpu,
965 * occupy the first half of the array. The second half of the
966 * array is for current counters, which are averaged into the
967 * first set by task_numa_placement.
968 */
969 static inline int task_faults_idx(enum numa_faults_stats s, int nid, int priv)
970 {
971 return NR_NUMA_HINT_FAULT_TYPES * (s * nr_node_ids + nid) + priv;
972 }
973
974 static inline unsigned long task_faults(struct task_struct *p, int nid)
975 {
976 if (!p->numa_faults)
977 return 0;
978
979 return p->numa_faults[task_faults_idx(NUMA_MEM, nid, 0)] +
980 p->numa_faults[task_faults_idx(NUMA_MEM, nid, 1)];
981 }
982
983 static inline unsigned long group_faults(struct task_struct *p, int nid)
984 {
985 if (!p->numa_group)
986 return 0;
987
988 return p->numa_group->faults[task_faults_idx(NUMA_MEM, nid, 0)] +
989 p->numa_group->faults[task_faults_idx(NUMA_MEM, nid, 1)];
990 }
991
992 static inline unsigned long group_faults_cpu(struct numa_group *group, int nid)
993 {
994 return group->faults_cpu[task_faults_idx(NUMA_MEM, nid, 0)] +
995 group->faults_cpu[task_faults_idx(NUMA_MEM, nid, 1)];
996 }
997
998 /*
999 * A node triggering more than 1/3 as many NUMA faults as the maximum is
1000 * considered part of a numa group's pseudo-interleaving set. Migrations
1001 * between these nodes are slowed down, to allow things to settle down.
1002 */
1003 #define ACTIVE_NODE_FRACTION 3
1004
1005 static bool numa_is_active_node(int nid, struct numa_group *ng)
1006 {
1007 return group_faults_cpu(ng, nid) * ACTIVE_NODE_FRACTION > ng->max_faults_cpu;
1008 }
1009
1010 /* Handle placement on systems where not all nodes are directly connected. */
1011 static unsigned long score_nearby_nodes(struct task_struct *p, int nid,
1012 int maxdist, bool task)
1013 {
1014 unsigned long score = 0;
1015 int node;
1016
1017 /*
1018 * All nodes are directly connected, and the same distance
1019 * from each other. No need for fancy placement algorithms.
1020 */
1021 if (sched_numa_topology_type == NUMA_DIRECT)
1022 return 0;
1023
1024 /*
1025 * This code is called for each node, introducing N^2 complexity,
1026 * which should be ok given the number of nodes rarely exceeds 8.
1027 */
1028 for_each_online_node(node) {
1029 unsigned long faults;
1030 int dist = node_distance(nid, node);
1031
1032 /*
1033 * The furthest away nodes in the system are not interesting
1034 * for placement; nid was already counted.
1035 */
1036 if (dist == sched_max_numa_distance || node == nid)
1037 continue;
1038
1039 /*
1040 * On systems with a backplane NUMA topology, compare groups
1041 * of nodes, and move tasks towards the group with the most
1042 * memory accesses. When comparing two nodes at distance
1043 * "hoplimit", only nodes closer by than "hoplimit" are part
1044 * of each group. Skip other nodes.
1045 */
1046 if (sched_numa_topology_type == NUMA_BACKPLANE &&
1047 dist > maxdist)
1048 continue;
1049
1050 /* Add up the faults from nearby nodes. */
1051 if (task)
1052 faults = task_faults(p, node);
1053 else
1054 faults = group_faults(p, node);
1055
1056 /*
1057 * On systems with a glueless mesh NUMA topology, there are
1058 * no fixed "groups of nodes". Instead, nodes that are not
1059 * directly connected bounce traffic through intermediate
1060 * nodes; a numa_group can occupy any set of nodes.
1061 * The further away a node is, the less the faults count.
1062 * This seems to result in good task placement.
1063 */
1064 if (sched_numa_topology_type == NUMA_GLUELESS_MESH) {
1065 faults *= (sched_max_numa_distance - dist);
1066 faults /= (sched_max_numa_distance - LOCAL_DISTANCE);
1067 }
1068
1069 score += faults;
1070 }
1071
1072 return score;
1073 }
1074
1075 /*
1076 * These return the fraction of accesses done by a particular task, or
1077 * task group, on a particular numa node. The group weight is given a
1078 * larger multiplier, in order to group tasks together that are almost
1079 * evenly spread out between numa nodes.
1080 */
1081 static inline unsigned long task_weight(struct task_struct *p, int nid,
1082 int dist)
1083 {
1084 unsigned long faults, total_faults;
1085
1086 if (!p->numa_faults)
1087 return 0;
1088
1089 total_faults = p->total_numa_faults;
1090
1091 if (!total_faults)
1092 return 0;
1093
1094 faults = task_faults(p, nid);
1095 faults += score_nearby_nodes(p, nid, dist, true);
1096
1097 return 1000 * faults / total_faults;
1098 }
1099
1100 static inline unsigned long group_weight(struct task_struct *p, int nid,
1101 int dist)
1102 {
1103 unsigned long faults, total_faults;
1104
1105 if (!p->numa_group)
1106 return 0;
1107
1108 total_faults = p->numa_group->total_faults;
1109
1110 if (!total_faults)
1111 return 0;
1112
1113 faults = group_faults(p, nid);
1114 faults += score_nearby_nodes(p, nid, dist, false);
1115
1116 return 1000 * faults / total_faults;
1117 }
1118
1119 bool should_numa_migrate_memory(struct task_struct *p, struct page * page,
1120 int src_nid, int dst_cpu)
1121 {
1122 struct numa_group *ng = p->numa_group;
1123 int dst_nid = cpu_to_node(dst_cpu);
1124 int last_cpupid, this_cpupid;
1125
1126 this_cpupid = cpu_pid_to_cpupid(dst_cpu, current->pid);
1127
1128 /*
1129 * Multi-stage node selection is used in conjunction with a periodic
1130 * migration fault to build a temporal task<->page relation. By using
1131 * a two-stage filter we remove short/unlikely relations.
1132 *
1133 * Using P(p) ~ n_p / n_t as per frequentist probability, we can equate
1134 * a task's usage of a particular page (n_p) per total usage of this
1135 * page (n_t) (in a given time-span) to a probability.
1136 *
1137 * Our periodic faults will sample this probability and getting the
1138 * same result twice in a row, given these samples are fully
1139 * independent, is then given by P(n)^2, provided our sample period
1140 * is sufficiently short compared to the usage pattern.
1141 *
1142 * This quadric squishes small probabilities, making it less likely we
1143 * act on an unlikely task<->page relation.
1144 */
1145 last_cpupid = page_cpupid_xchg_last(page, this_cpupid);
1146 if (!cpupid_pid_unset(last_cpupid) &&
1147 cpupid_to_nid(last_cpupid) != dst_nid)
1148 return false;
1149
1150 /* Always allow migrate on private faults */
1151 if (cpupid_match_pid(p, last_cpupid))
1152 return true;
1153
1154 /* A shared fault, but p->numa_group has not been set up yet. */
1155 if (!ng)
1156 return true;
1157
1158 /*
1159 * Destination node is much more heavily used than the source
1160 * node? Allow migration.
1161 */
1162 if (group_faults_cpu(ng, dst_nid) > group_faults_cpu(ng, src_nid) *
1163 ACTIVE_NODE_FRACTION)
1164 return true;
1165
1166 /*
1167 * Distribute memory according to CPU & memory use on each node,
1168 * with 3/4 hysteresis to avoid unnecessary memory migrations:
1169 *
1170 * faults_cpu(dst) 3 faults_cpu(src)
1171 * --------------- * - > ---------------
1172 * faults_mem(dst) 4 faults_mem(src)
1173 */
1174 return group_faults_cpu(ng, dst_nid) * group_faults(p, src_nid) * 3 >
1175 group_faults_cpu(ng, src_nid) * group_faults(p, dst_nid) * 4;
1176 }
1177
1178 static unsigned long weighted_cpuload(const int cpu);
1179 static unsigned long source_load(int cpu, int type);
1180 static unsigned long target_load(int cpu, int type);
1181 static unsigned long capacity_of(int cpu);
1182 static long effective_load(struct task_group *tg, int cpu, long wl, long wg);
1183
1184 /* Cached statistics for all CPUs within a node */
1185 struct numa_stats {
1186 unsigned long nr_running;
1187 unsigned long load;
1188
1189 /* Total compute capacity of CPUs on a node */
1190 unsigned long compute_capacity;
1191
1192 /* Approximate capacity in terms of runnable tasks on a node */
1193 unsigned long task_capacity;
1194 int has_free_capacity;
1195 };
1196
1197 /*
1198 * XXX borrowed from update_sg_lb_stats
1199 */
1200 static void update_numa_stats(struct numa_stats *ns, int nid)
1201 {
1202 int smt, cpu, cpus = 0;
1203 unsigned long capacity;
1204
1205 memset(ns, 0, sizeof(*ns));
1206 for_each_cpu(cpu, cpumask_of_node(nid)) {
1207 struct rq *rq = cpu_rq(cpu);
1208
1209 ns->nr_running += rq->nr_running;
1210 ns->load += weighted_cpuload(cpu);
1211 ns->compute_capacity += capacity_of(cpu);
1212
1213 cpus++;
1214 }
1215
1216 /*
1217 * If we raced with hotplug and there are no CPUs left in our mask
1218 * the @ns structure is NULL'ed and task_numa_compare() will
1219 * not find this node attractive.
1220 *
1221 * We'll either bail at !has_free_capacity, or we'll detect a huge
1222 * imbalance and bail there.
1223 */
1224 if (!cpus)
1225 return;
1226
1227 /* smt := ceil(cpus / capacity), assumes: 1 < smt_power < 2 */
1228 smt = DIV_ROUND_UP(SCHED_CAPACITY_SCALE * cpus, ns->compute_capacity);
1229 capacity = cpus / smt; /* cores */
1230
1231 ns->task_capacity = min_t(unsigned, capacity,
1232 DIV_ROUND_CLOSEST(ns->compute_capacity, SCHED_CAPACITY_SCALE));
1233 ns->has_free_capacity = (ns->nr_running < ns->task_capacity);
1234 }
1235
1236 struct task_numa_env {
1237 struct task_struct *p;
1238
1239 int src_cpu, src_nid;
1240 int dst_cpu, dst_nid;
1241
1242 struct numa_stats src_stats, dst_stats;
1243
1244 int imbalance_pct;
1245 int dist;
1246
1247 struct task_struct *best_task;
1248 long best_imp;
1249 int best_cpu;
1250 };
1251
1252 static void task_numa_assign(struct task_numa_env *env,
1253 struct task_struct *p, long imp)
1254 {
1255 if (env->best_task)
1256 put_task_struct(env->best_task);
1257
1258 env->best_task = p;
1259 env->best_imp = imp;
1260 env->best_cpu = env->dst_cpu;
1261 }
1262
1263 static bool load_too_imbalanced(long src_load, long dst_load,
1264 struct task_numa_env *env)
1265 {
1266 long imb, old_imb;
1267 long orig_src_load, orig_dst_load;
1268 long src_capacity, dst_capacity;
1269
1270 /*
1271 * The load is corrected for the CPU capacity available on each node.
1272 *
1273 * src_load dst_load
1274 * ------------ vs ---------
1275 * src_capacity dst_capacity
1276 */
1277 src_capacity = env->src_stats.compute_capacity;
1278 dst_capacity = env->dst_stats.compute_capacity;
1279
1280 /* We care about the slope of the imbalance, not the direction. */
1281 if (dst_load < src_load)
1282 swap(dst_load, src_load);
1283
1284 /* Is the difference below the threshold? */
1285 imb = dst_load * src_capacity * 100 -
1286 src_load * dst_capacity * env->imbalance_pct;
1287 if (imb <= 0)
1288 return false;
1289
1290 /*
1291 * The imbalance is above the allowed threshold.
1292 * Compare it with the old imbalance.
1293 */
1294 orig_src_load = env->src_stats.load;
1295 orig_dst_load = env->dst_stats.load;
1296
1297 if (orig_dst_load < orig_src_load)
1298 swap(orig_dst_load, orig_src_load);
1299
1300 old_imb = orig_dst_load * src_capacity * 100 -
1301 orig_src_load * dst_capacity * env->imbalance_pct;
1302
1303 /* Would this change make things worse? */
1304 return (imb > old_imb);
1305 }
1306
1307 /*
1308 * This checks if the overall compute and NUMA accesses of the system would
1309 * be improved if the source tasks was migrated to the target dst_cpu taking
1310 * into account that it might be best if task running on the dst_cpu should
1311 * be exchanged with the source task
1312 */
1313 static void task_numa_compare(struct task_numa_env *env,
1314 long taskimp, long groupimp)
1315 {
1316 struct rq *src_rq = cpu_rq(env->src_cpu);
1317 struct rq *dst_rq = cpu_rq(env->dst_cpu);
1318 struct task_struct *cur;
1319 long src_load, dst_load;
1320 long load;
1321 long imp = env->p->numa_group ? groupimp : taskimp;
1322 long moveimp = imp;
1323 int dist = env->dist;
1324 bool assigned = false;
1325
1326 rcu_read_lock();
1327
1328 raw_spin_lock_irq(&dst_rq->lock);
1329 cur = dst_rq->curr;
1330 /*
1331 * No need to move the exiting task or idle task.
1332 */
1333 if ((cur->flags & PF_EXITING) || is_idle_task(cur))
1334 cur = NULL;
1335 else {
1336 /*
1337 * The task_struct must be protected here to protect the
1338 * p->numa_faults access in the task_weight since the
1339 * numa_faults could already be freed in the following path:
1340 * finish_task_switch()
1341 * --> put_task_struct()
1342 * --> __put_task_struct()
1343 * --> task_numa_free()
1344 */
1345 get_task_struct(cur);
1346 }
1347
1348 raw_spin_unlock_irq(&dst_rq->lock);
1349
1350 /*
1351 * Because we have preemption enabled we can get migrated around and
1352 * end try selecting ourselves (current == env->p) as a swap candidate.
1353 */
1354 if (cur == env->p)
1355 goto unlock;
1356
1357 /*
1358 * "imp" is the fault differential for the source task between the
1359 * source and destination node. Calculate the total differential for
1360 * the source task and potential destination task. The more negative
1361 * the value is, the more rmeote accesses that would be expected to
1362 * be incurred if the tasks were swapped.
1363 */
1364 if (cur) {
1365 /* Skip this swap candidate if cannot move to the source cpu */
1366 if (!cpumask_test_cpu(env->src_cpu, tsk_cpus_allowed(cur)))
1367 goto unlock;
1368
1369 /*
1370 * If dst and source tasks are in the same NUMA group, or not
1371 * in any group then look only at task weights.
1372 */
1373 if (cur->numa_group == env->p->numa_group) {
1374 imp = taskimp + task_weight(cur, env->src_nid, dist) -
1375 task_weight(cur, env->dst_nid, dist);
1376 /*
1377 * Add some hysteresis to prevent swapping the
1378 * tasks within a group over tiny differences.
1379 */
1380 if (cur->numa_group)
1381 imp -= imp/16;
1382 } else {
1383 /*
1384 * Compare the group weights. If a task is all by
1385 * itself (not part of a group), use the task weight
1386 * instead.
1387 */
1388 if (cur->numa_group)
1389 imp += group_weight(cur, env->src_nid, dist) -
1390 group_weight(cur, env->dst_nid, dist);
1391 else
1392 imp += task_weight(cur, env->src_nid, dist) -
1393 task_weight(cur, env->dst_nid, dist);
1394 }
1395 }
1396
1397 if (imp <= env->best_imp && moveimp <= env->best_imp)
1398 goto unlock;
1399
1400 if (!cur) {
1401 /* Is there capacity at our destination? */
1402 if (env->src_stats.nr_running <= env->src_stats.task_capacity &&
1403 !env->dst_stats.has_free_capacity)
1404 goto unlock;
1405
1406 goto balance;
1407 }
1408
1409 /* Balance doesn't matter much if we're running a task per cpu */
1410 if (imp > env->best_imp && src_rq->nr_running == 1 &&
1411 dst_rq->nr_running == 1)
1412 goto assign;
1413
1414 /*
1415 * In the overloaded case, try and keep the load balanced.
1416 */
1417 balance:
1418 load = task_h_load(env->p);
1419 dst_load = env->dst_stats.load + load;
1420 src_load = env->src_stats.load - load;
1421
1422 if (moveimp > imp && moveimp > env->best_imp) {
1423 /*
1424 * If the improvement from just moving env->p direction is
1425 * better than swapping tasks around, check if a move is
1426 * possible. Store a slightly smaller score than moveimp,
1427 * so an actually idle CPU will win.
1428 */
1429 if (!load_too_imbalanced(src_load, dst_load, env)) {
1430 imp = moveimp - 1;
1431 put_task_struct(cur);
1432 cur = NULL;
1433 goto assign;
1434 }
1435 }
1436
1437 if (imp <= env->best_imp)
1438 goto unlock;
1439
1440 if (cur) {
1441 load = task_h_load(cur);
1442 dst_load -= load;
1443 src_load += load;
1444 }
1445
1446 if (load_too_imbalanced(src_load, dst_load, env))
1447 goto unlock;
1448
1449 /*
1450 * One idle CPU per node is evaluated for a task numa move.
1451 * Call select_idle_sibling to maybe find a better one.
1452 */
1453 if (!cur)
1454 env->dst_cpu = select_idle_sibling(env->p, env->dst_cpu);
1455
1456 assign:
1457 assigned = true;
1458 task_numa_assign(env, cur, imp);
1459 unlock:
1460 rcu_read_unlock();
1461 /*
1462 * The dst_rq->curr isn't assigned. The protection for task_struct is
1463 * finished.
1464 */
1465 if (cur && !assigned)
1466 put_task_struct(cur);
1467 }
1468
1469 static void task_numa_find_cpu(struct task_numa_env *env,
1470 long taskimp, long groupimp)
1471 {
1472 int cpu;
1473
1474 for_each_cpu(cpu, cpumask_of_node(env->dst_nid)) {
1475 /* Skip this CPU if the source task cannot migrate */
1476 if (!cpumask_test_cpu(cpu, tsk_cpus_allowed(env->p)))
1477 continue;
1478
1479 env->dst_cpu = cpu;
1480 task_numa_compare(env, taskimp, groupimp);
1481 }
1482 }
1483
1484 /* Only move tasks to a NUMA node less busy than the current node. */
1485 static bool numa_has_capacity(struct task_numa_env *env)
1486 {
1487 struct numa_stats *src = &env->src_stats;
1488 struct numa_stats *dst = &env->dst_stats;
1489
1490 if (src->has_free_capacity && !dst->has_free_capacity)
1491 return false;
1492
1493 /*
1494 * Only consider a task move if the source has a higher load
1495 * than the destination, corrected for CPU capacity on each node.
1496 *
1497 * src->load dst->load
1498 * --------------------- vs ---------------------
1499 * src->compute_capacity dst->compute_capacity
1500 */
1501 if (src->load * dst->compute_capacity * env->imbalance_pct >
1502
1503 dst->load * src->compute_capacity * 100)
1504 return true;
1505
1506 return false;
1507 }
1508
1509 static int task_numa_migrate(struct task_struct *p)
1510 {
1511 struct task_numa_env env = {
1512 .p = p,
1513
1514 .src_cpu = task_cpu(p),
1515 .src_nid = task_node(p),
1516
1517 .imbalance_pct = 112,
1518
1519 .best_task = NULL,
1520 .best_imp = 0,
1521 .best_cpu = -1,
1522 };
1523 struct sched_domain *sd;
1524 unsigned long taskweight, groupweight;
1525 int nid, ret, dist;
1526 long taskimp, groupimp;
1527
1528 /*
1529 * Pick the lowest SD_NUMA domain, as that would have the smallest
1530 * imbalance and would be the first to start moving tasks about.
1531 *
1532 * And we want to avoid any moving of tasks about, as that would create
1533 * random movement of tasks -- counter the numa conditions we're trying
1534 * to satisfy here.
1535 */
1536 rcu_read_lock();
1537 sd = rcu_dereference(per_cpu(sd_numa, env.src_cpu));
1538 if (sd)
1539 env.imbalance_pct = 100 + (sd->imbalance_pct - 100) / 2;
1540 rcu_read_unlock();
1541
1542 /*
1543 * Cpusets can break the scheduler domain tree into smaller
1544 * balance domains, some of which do not cross NUMA boundaries.
1545 * Tasks that are "trapped" in such domains cannot be migrated
1546 * elsewhere, so there is no point in (re)trying.
1547 */
1548 if (unlikely(!sd)) {
1549 p->numa_preferred_nid = task_node(p);
1550 return -EINVAL;
1551 }
1552
1553 env.dst_nid = p->numa_preferred_nid;
1554 dist = env.dist = node_distance(env.src_nid, env.dst_nid);
1555 taskweight = task_weight(p, env.src_nid, dist);
1556 groupweight = group_weight(p, env.src_nid, dist);
1557 update_numa_stats(&env.src_stats, env.src_nid);
1558 taskimp = task_weight(p, env.dst_nid, dist) - taskweight;
1559 groupimp = group_weight(p, env.dst_nid, dist) - groupweight;
1560 update_numa_stats(&env.dst_stats, env.dst_nid);
1561
1562 /* Try to find a spot on the preferred nid. */
1563 if (numa_has_capacity(&env))
1564 task_numa_find_cpu(&env, taskimp, groupimp);
1565
1566 /*
1567 * Look at other nodes in these cases:
1568 * - there is no space available on the preferred_nid
1569 * - the task is part of a numa_group that is interleaved across
1570 * multiple NUMA nodes; in order to better consolidate the group,
1571 * we need to check other locations.
1572 */
1573 if (env.best_cpu == -1 || (p->numa_group && p->numa_group->active_nodes > 1)) {
1574 for_each_online_node(nid) {
1575 if (nid == env.src_nid || nid == p->numa_preferred_nid)
1576 continue;
1577
1578 dist = node_distance(env.src_nid, env.dst_nid);
1579 if (sched_numa_topology_type == NUMA_BACKPLANE &&
1580 dist != env.dist) {
1581 taskweight = task_weight(p, env.src_nid, dist);
1582 groupweight = group_weight(p, env.src_nid, dist);
1583 }
1584
1585 /* Only consider nodes where both task and groups benefit */
1586 taskimp = task_weight(p, nid, dist) - taskweight;
1587 groupimp = group_weight(p, nid, dist) - groupweight;
1588 if (taskimp < 0 && groupimp < 0)
1589 continue;
1590
1591 env.dist = dist;
1592 env.dst_nid = nid;
1593 update_numa_stats(&env.dst_stats, env.dst_nid);
1594 if (numa_has_capacity(&env))
1595 task_numa_find_cpu(&env, taskimp, groupimp);
1596 }
1597 }
1598
1599 /*
1600 * If the task is part of a workload that spans multiple NUMA nodes,
1601 * and is migrating into one of the workload's active nodes, remember
1602 * this node as the task's preferred numa node, so the workload can
1603 * settle down.
1604 * A task that migrated to a second choice node will be better off
1605 * trying for a better one later. Do not set the preferred node here.
1606 */
1607 if (p->numa_group) {
1608 struct numa_group *ng = p->numa_group;
1609
1610 if (env.best_cpu == -1)
1611 nid = env.src_nid;
1612 else
1613 nid = env.dst_nid;
1614
1615 if (ng->active_nodes > 1 && numa_is_active_node(env.dst_nid, ng))
1616 sched_setnuma(p, env.dst_nid);
1617 }
1618
1619 /* No better CPU than the current one was found. */
1620 if (env.best_cpu == -1)
1621 return -EAGAIN;
1622
1623 /*
1624 * Reset the scan period if the task is being rescheduled on an
1625 * alternative node to recheck if the tasks is now properly placed.
1626 */
1627 p->numa_scan_period = task_scan_min(p);
1628
1629 if (env.best_task == NULL) {
1630 ret = migrate_task_to(p, env.best_cpu);
1631 if (ret != 0)
1632 trace_sched_stick_numa(p, env.src_cpu, env.best_cpu);
1633 return ret;
1634 }
1635
1636 ret = migrate_swap(p, env.best_task);
1637 if (ret != 0)
1638 trace_sched_stick_numa(p, env.src_cpu, task_cpu(env.best_task));
1639 put_task_struct(env.best_task);
1640 return ret;
1641 }
1642
1643 /* Attempt to migrate a task to a CPU on the preferred node. */
1644 static void numa_migrate_preferred(struct task_struct *p)
1645 {
1646 unsigned long interval = HZ;
1647
1648 /* This task has no NUMA fault statistics yet */
1649 if (unlikely(p->numa_preferred_nid == -1 || !p->numa_faults))
1650 return;
1651
1652 /* Periodically retry migrating the task to the preferred node */
1653 interval = min(interval, msecs_to_jiffies(p->numa_scan_period) / 16);
1654 p->numa_migrate_retry = jiffies + interval;
1655
1656 /* Success if task is already running on preferred CPU */
1657 if (task_node(p) == p->numa_preferred_nid)
1658 return;
1659
1660 /* Otherwise, try migrate to a CPU on the preferred node */
1661 task_numa_migrate(p);
1662 }
1663
1664 /*
1665 * Find out how many nodes on the workload is actively running on. Do this by
1666 * tracking the nodes from which NUMA hinting faults are triggered. This can
1667 * be different from the set of nodes where the workload's memory is currently
1668 * located.
1669 */
1670 static void numa_group_count_active_nodes(struct numa_group *numa_group)
1671 {
1672 unsigned long faults, max_faults = 0;
1673 int nid, active_nodes = 0;
1674
1675 for_each_online_node(nid) {
1676 faults = group_faults_cpu(numa_group, nid);
1677 if (faults > max_faults)
1678 max_faults = faults;
1679 }
1680
1681 for_each_online_node(nid) {
1682 faults = group_faults_cpu(numa_group, nid);
1683 if (faults * ACTIVE_NODE_FRACTION > max_faults)
1684 active_nodes++;
1685 }
1686
1687 numa_group->max_faults_cpu = max_faults;
1688 numa_group->active_nodes = active_nodes;
1689 }
1690
1691 /*
1692 * When adapting the scan rate, the period is divided into NUMA_PERIOD_SLOTS
1693 * increments. The more local the fault statistics are, the higher the scan
1694 * period will be for the next scan window. If local/(local+remote) ratio is
1695 * below NUMA_PERIOD_THRESHOLD (where range of ratio is 1..NUMA_PERIOD_SLOTS)
1696 * the scan period will decrease. Aim for 70% local accesses.
1697 */
1698 #define NUMA_PERIOD_SLOTS 10
1699 #define NUMA_PERIOD_THRESHOLD 7
1700
1701 /*
1702 * Increase the scan period (slow down scanning) if the majority of
1703 * our memory is already on our local node, or if the majority of
1704 * the page accesses are shared with other processes.
1705 * Otherwise, decrease the scan period.
1706 */
1707 static void update_task_scan_period(struct task_struct *p,
1708 unsigned long shared, unsigned long private)
1709 {
1710 unsigned int period_slot;
1711 int ratio;
1712 int diff;
1713
1714 unsigned long remote = p->numa_faults_locality[0];
1715 unsigned long local = p->numa_faults_locality[1];
1716
1717 /*
1718 * If there were no record hinting faults then either the task is
1719 * completely idle or all activity is areas that are not of interest
1720 * to automatic numa balancing. Related to that, if there were failed
1721 * migration then it implies we are migrating too quickly or the local
1722 * node is overloaded. In either case, scan slower
1723 */
1724 if (local + shared == 0 || p->numa_faults_locality[2]) {
1725 p->numa_scan_period = min(p->numa_scan_period_max,
1726 p->numa_scan_period << 1);
1727
1728 p->mm->numa_next_scan = jiffies +
1729 msecs_to_jiffies(p->numa_scan_period);
1730
1731 return;
1732 }
1733
1734 /*
1735 * Prepare to scale scan period relative to the current period.
1736 * == NUMA_PERIOD_THRESHOLD scan period stays the same
1737 * < NUMA_PERIOD_THRESHOLD scan period decreases (scan faster)
1738 * >= NUMA_PERIOD_THRESHOLD scan period increases (scan slower)
1739 */
1740 period_slot = DIV_ROUND_UP(p->numa_scan_period, NUMA_PERIOD_SLOTS);
1741 ratio = (local * NUMA_PERIOD_SLOTS) / (local + remote);
1742 if (ratio >= NUMA_PERIOD_THRESHOLD) {
1743 int slot = ratio - NUMA_PERIOD_THRESHOLD;
1744 if (!slot)
1745 slot = 1;
1746 diff = slot * period_slot;
1747 } else {
1748 diff = -(NUMA_PERIOD_THRESHOLD - ratio) * period_slot;
1749
1750 /*
1751 * Scale scan rate increases based on sharing. There is an
1752 * inverse relationship between the degree of sharing and
1753 * the adjustment made to the scanning period. Broadly
1754 * speaking the intent is that there is little point
1755 * scanning faster if shared accesses dominate as it may
1756 * simply bounce migrations uselessly
1757 */
1758 ratio = DIV_ROUND_UP(private * NUMA_PERIOD_SLOTS, (private + shared + 1));
1759 diff = (diff * ratio) / NUMA_PERIOD_SLOTS;
1760 }
1761
1762 p->numa_scan_period = clamp(p->numa_scan_period + diff,
1763 task_scan_min(p), task_scan_max(p));
1764 memset(p->numa_faults_locality, 0, sizeof(p->numa_faults_locality));
1765 }
1766
1767 /*
1768 * Get the fraction of time the task has been running since the last
1769 * NUMA placement cycle. The scheduler keeps similar statistics, but
1770 * decays those on a 32ms period, which is orders of magnitude off
1771 * from the dozens-of-seconds NUMA balancing period. Use the scheduler
1772 * stats only if the task is so new there are no NUMA statistics yet.
1773 */
1774 static u64 numa_get_avg_runtime(struct task_struct *p, u64 *period)
1775 {
1776 u64 runtime, delta, now;
1777 /* Use the start of this time slice to avoid calculations. */
1778 now = p->se.exec_start;
1779 runtime = p->se.sum_exec_runtime;
1780
1781 if (p->last_task_numa_placement) {
1782 delta = runtime - p->last_sum_exec_runtime;
1783 *period = now - p->last_task_numa_placement;
1784 } else {
1785 delta = p->se.avg.load_sum / p->se.load.weight;
1786 *period = LOAD_AVG_MAX;
1787 }
1788
1789 p->last_sum_exec_runtime = runtime;
1790 p->last_task_numa_placement = now;
1791
1792 return delta;
1793 }
1794
1795 /*
1796 * Determine the preferred nid for a task in a numa_group. This needs to
1797 * be done in a way that produces consistent results with group_weight,
1798 * otherwise workloads might not converge.
1799 */
1800 static int preferred_group_nid(struct task_struct *p, int nid)
1801 {
1802 nodemask_t nodes;
1803 int dist;
1804
1805 /* Direct connections between all NUMA nodes. */
1806 if (sched_numa_topology_type == NUMA_DIRECT)
1807 return nid;
1808
1809 /*
1810 * On a system with glueless mesh NUMA topology, group_weight
1811 * scores nodes according to the number of NUMA hinting faults on
1812 * both the node itself, and on nearby nodes.
1813 */
1814 if (sched_numa_topology_type == NUMA_GLUELESS_MESH) {
1815 unsigned long score, max_score = 0;
1816 int node, max_node = nid;
1817
1818 dist = sched_max_numa_distance;
1819
1820 for_each_online_node(node) {
1821 score = group_weight(p, node, dist);
1822 if (score > max_score) {
1823 max_score = score;
1824 max_node = node;
1825 }
1826 }
1827 return max_node;
1828 }
1829
1830 /*
1831 * Finding the preferred nid in a system with NUMA backplane
1832 * interconnect topology is more involved. The goal is to locate
1833 * tasks from numa_groups near each other in the system, and
1834 * untangle workloads from different sides of the system. This requires
1835 * searching down the hierarchy of node groups, recursively searching
1836 * inside the highest scoring group of nodes. The nodemask tricks
1837 * keep the complexity of the search down.
1838 */
1839 nodes = node_online_map;
1840 for (dist = sched_max_numa_distance; dist > LOCAL_DISTANCE; dist--) {
1841 unsigned long max_faults = 0;
1842 nodemask_t max_group = NODE_MASK_NONE;
1843 int a, b;
1844
1845 /* Are there nodes at this distance from each other? */
1846 if (!find_numa_distance(dist))
1847 continue;
1848
1849 for_each_node_mask(a, nodes) {
1850 unsigned long faults = 0;
1851 nodemask_t this_group;
1852 nodes_clear(this_group);
1853
1854 /* Sum group's NUMA faults; includes a==b case. */
1855 for_each_node_mask(b, nodes) {
1856 if (node_distance(a, b) < dist) {
1857 faults += group_faults(p, b);
1858 node_set(b, this_group);
1859 node_clear(b, nodes);
1860 }
1861 }
1862
1863 /* Remember the top group. */
1864 if (faults > max_faults) {
1865 max_faults = faults;
1866 max_group = this_group;
1867 /*
1868 * subtle: at the smallest distance there is
1869 * just one node left in each "group", the
1870 * winner is the preferred nid.
1871 */
1872 nid = a;
1873 }
1874 }
1875 /* Next round, evaluate the nodes within max_group. */
1876 if (!max_faults)
1877 break;
1878 nodes = max_group;
1879 }
1880 return nid;
1881 }
1882
1883 static void task_numa_placement(struct task_struct *p)
1884 {
1885 int seq, nid, max_nid = -1, max_group_nid = -1;
1886 unsigned long max_faults = 0, max_group_faults = 0;
1887 unsigned long fault_types[2] = { 0, 0 };
1888 unsigned long total_faults;
1889 u64 runtime, period;
1890 spinlock_t *group_lock = NULL;
1891
1892 /*
1893 * The p->mm->numa_scan_seq field gets updated without
1894 * exclusive access. Use READ_ONCE() here to ensure
1895 * that the field is read in a single access:
1896 */
1897 seq = READ_ONCE(p->mm->numa_scan_seq);
1898 if (p->numa_scan_seq == seq)
1899 return;
1900 p->numa_scan_seq = seq;
1901 p->numa_scan_period_max = task_scan_max(p);
1902
1903 total_faults = p->numa_faults_locality[0] +
1904 p->numa_faults_locality[1];
1905 runtime = numa_get_avg_runtime(p, &period);
1906
1907 /* If the task is part of a group prevent parallel updates to group stats */
1908 if (p->numa_group) {
1909 group_lock = &p->numa_group->lock;
1910 spin_lock_irq(group_lock);
1911 }
1912
1913 /* Find the node with the highest number of faults */
1914 for_each_online_node(nid) {
1915 /* Keep track of the offsets in numa_faults array */
1916 int mem_idx, membuf_idx, cpu_idx, cpubuf_idx;
1917 unsigned long faults = 0, group_faults = 0;
1918 int priv;
1919
1920 for (priv = 0; priv < NR_NUMA_HINT_FAULT_TYPES; priv++) {
1921 long diff, f_diff, f_weight;
1922
1923 mem_idx = task_faults_idx(NUMA_MEM, nid, priv);
1924 membuf_idx = task_faults_idx(NUMA_MEMBUF, nid, priv);
1925 cpu_idx = task_faults_idx(NUMA_CPU, nid, priv);
1926 cpubuf_idx = task_faults_idx(NUMA_CPUBUF, nid, priv);
1927
1928 /* Decay existing window, copy faults since last scan */
1929 diff = p->numa_faults[membuf_idx] - p->numa_faults[mem_idx] / 2;
1930 fault_types[priv] += p->numa_faults[membuf_idx];
1931 p->numa_faults[membuf_idx] = 0;
1932
1933 /*
1934 * Normalize the faults_from, so all tasks in a group
1935 * count according to CPU use, instead of by the raw
1936 * number of faults. Tasks with little runtime have
1937 * little over-all impact on throughput, and thus their
1938 * faults are less important.
1939 */
1940 f_weight = div64_u64(runtime << 16, period + 1);
1941 f_weight = (f_weight * p->numa_faults[cpubuf_idx]) /
1942 (total_faults + 1);
1943 f_diff = f_weight - p->numa_faults[cpu_idx] / 2;
1944 p->numa_faults[cpubuf_idx] = 0;
1945
1946 p->numa_faults[mem_idx] += diff;
1947 p->numa_faults[cpu_idx] += f_diff;
1948 faults += p->numa_faults[mem_idx];
1949 p->total_numa_faults += diff;
1950 if (p->numa_group) {
1951 /*
1952 * safe because we can only change our own group
1953 *
1954 * mem_idx represents the offset for a given
1955 * nid and priv in a specific region because it
1956 * is at the beginning of the numa_faults array.
1957 */
1958 p->numa_group->faults[mem_idx] += diff;
1959 p->numa_group->faults_cpu[mem_idx] += f_diff;
1960 p->numa_group->total_faults += diff;
1961 group_faults += p->numa_group->faults[mem_idx];
1962 }
1963 }
1964
1965 if (faults > max_faults) {
1966 max_faults = faults;
1967 max_nid = nid;
1968 }
1969
1970 if (group_faults > max_group_faults) {
1971 max_group_faults = group_faults;
1972 max_group_nid = nid;
1973 }
1974 }
1975
1976 update_task_scan_period(p, fault_types[0], fault_types[1]);
1977
1978 if (p->numa_group) {
1979 numa_group_count_active_nodes(p->numa_group);
1980 spin_unlock_irq(group_lock);
1981 max_nid = preferred_group_nid(p, max_group_nid);
1982 }
1983
1984 if (max_faults) {
1985 /* Set the new preferred node */
1986 if (max_nid != p->numa_preferred_nid)
1987 sched_setnuma(p, max_nid);
1988
1989 if (task_node(p) != p->numa_preferred_nid)
1990 numa_migrate_preferred(p);
1991 }
1992 }
1993
1994 static inline int get_numa_group(struct numa_group *grp)
1995 {
1996 return atomic_inc_not_zero(&grp->refcount);
1997 }
1998
1999 static inline void put_numa_group(struct numa_group *grp)
2000 {
2001 if (atomic_dec_and_test(&grp->refcount))
2002 kfree_rcu(grp, rcu);
2003 }
2004
2005 static void task_numa_group(struct task_struct *p, int cpupid, int flags,
2006 int *priv)
2007 {
2008 struct numa_group *grp, *my_grp;
2009 struct task_struct *tsk;
2010 bool join = false;
2011 int cpu = cpupid_to_cpu(cpupid);
2012 int i;
2013
2014 if (unlikely(!p->numa_group)) {
2015 unsigned int size = sizeof(struct numa_group) +
2016 4*nr_node_ids*sizeof(unsigned long);
2017
2018 grp = kzalloc(size, GFP_KERNEL | __GFP_NOWARN);
2019 if (!grp)
2020 return;
2021
2022 atomic_set(&grp->refcount, 1);
2023 grp->active_nodes = 1;
2024 grp->max_faults_cpu = 0;
2025 spin_lock_init(&grp->lock);
2026 grp->gid = p->pid;
2027 /* Second half of the array tracks nids where faults happen */
2028 grp->faults_cpu = grp->faults + NR_NUMA_HINT_FAULT_TYPES *
2029 nr_node_ids;
2030
2031 for (i = 0; i < NR_NUMA_HINT_FAULT_STATS * nr_node_ids; i++)
2032 grp->faults[i] = p->numa_faults[i];
2033
2034 grp->total_faults = p->total_numa_faults;
2035
2036 grp->nr_tasks++;
2037 rcu_assign_pointer(p->numa_group, grp);
2038 }
2039
2040 rcu_read_lock();
2041 tsk = READ_ONCE(cpu_rq(cpu)->curr);
2042
2043 if (!cpupid_match_pid(tsk, cpupid))
2044 goto no_join;
2045
2046 grp = rcu_dereference(tsk->numa_group);
2047 if (!grp)
2048 goto no_join;
2049
2050 my_grp = p->numa_group;
2051 if (grp == my_grp)
2052 goto no_join;
2053
2054 /*
2055 * Only join the other group if its bigger; if we're the bigger group,
2056 * the other task will join us.
2057 */
2058 if (my_grp->nr_tasks > grp->nr_tasks)
2059 goto no_join;
2060
2061 /*
2062 * Tie-break on the grp address.
2063 */
2064 if (my_grp->nr_tasks == grp->nr_tasks && my_grp > grp)
2065 goto no_join;
2066
2067 /* Always join threads in the same process. */
2068 if (tsk->mm == current->mm)
2069 join = true;
2070
2071 /* Simple filter to avoid false positives due to PID collisions */
2072 if (flags & TNF_SHARED)
2073 join = true;
2074
2075 /* Update priv based on whether false sharing was detected */
2076 *priv = !join;
2077
2078 if (join && !get_numa_group(grp))
2079 goto no_join;
2080
2081 rcu_read_unlock();
2082
2083 if (!join)
2084 return;
2085
2086 BUG_ON(irqs_disabled());
2087 double_lock_irq(&my_grp->lock, &grp->lock);
2088
2089 for (i = 0; i < NR_NUMA_HINT_FAULT_STATS * nr_node_ids; i++) {
2090 my_grp->faults[i] -= p->numa_faults[i];
2091 grp->faults[i] += p->numa_faults[i];
2092 }
2093 my_grp->total_faults -= p->total_numa_faults;
2094 grp->total_faults += p->total_numa_faults;
2095
2096 my_grp->nr_tasks--;
2097 grp->nr_tasks++;
2098
2099 spin_unlock(&my_grp->lock);
2100 spin_unlock_irq(&grp->lock);
2101
2102 rcu_assign_pointer(p->numa_group, grp);
2103
2104 put_numa_group(my_grp);
2105 return;
2106
2107 no_join:
2108 rcu_read_unlock();
2109 return;
2110 }
2111
2112 void task_numa_free(struct task_struct *p)
2113 {
2114 struct numa_group *grp = p->numa_group;
2115 void *numa_faults = p->numa_faults;
2116 unsigned long flags;
2117 int i;
2118
2119 if (grp) {
2120 spin_lock_irqsave(&grp->lock, flags);
2121 for (i = 0; i < NR_NUMA_HINT_FAULT_STATS * nr_node_ids; i++)
2122 grp->faults[i] -= p->numa_faults[i];
2123 grp->total_faults -= p->total_numa_faults;
2124
2125 grp->nr_tasks--;
2126 spin_unlock_irqrestore(&grp->lock, flags);
2127 RCU_INIT_POINTER(p->numa_group, NULL);
2128 put_numa_group(grp);
2129 }
2130
2131 p->numa_faults = NULL;
2132 kfree(numa_faults);
2133 }
2134
2135 /*
2136 * Got a PROT_NONE fault for a page on @node.
2137 */
2138 void task_numa_fault(int last_cpupid, int mem_node, int pages, int flags)
2139 {
2140 struct task_struct *p = current;
2141 bool migrated = flags & TNF_MIGRATED;
2142 int cpu_node = task_node(current);
2143 int local = !!(flags & TNF_FAULT_LOCAL);
2144 struct numa_group *ng;
2145 int priv;
2146
2147 if (!static_branch_likely(&sched_numa_balancing))
2148 return;
2149
2150 /* for example, ksmd faulting in a user's mm */
2151 if (!p->mm)
2152 return;
2153
2154 /* Allocate buffer to track faults on a per-node basis */
2155 if (unlikely(!p->numa_faults)) {
2156 int size = sizeof(*p->numa_faults) *
2157 NR_NUMA_HINT_FAULT_BUCKETS * nr_node_ids;
2158
2159 p->numa_faults = kzalloc(size, GFP_KERNEL|__GFP_NOWARN);
2160 if (!p->numa_faults)
2161 return;
2162
2163 p->total_numa_faults = 0;
2164 memset(p->numa_faults_locality, 0, sizeof(p->numa_faults_locality));
2165 }
2166
2167 /*
2168 * First accesses are treated as private, otherwise consider accesses
2169 * to be private if the accessing pid has not changed
2170 */
2171 if (unlikely(last_cpupid == (-1 & LAST_CPUPID_MASK))) {
2172 priv = 1;
2173 } else {
2174 priv = cpupid_match_pid(p, last_cpupid);
2175 if (!priv && !(flags & TNF_NO_GROUP))
2176 task_numa_group(p, last_cpupid, flags, &priv);
2177 }
2178
2179 /*
2180 * If a workload spans multiple NUMA nodes, a shared fault that
2181 * occurs wholly within the set of nodes that the workload is
2182 * actively using should be counted as local. This allows the
2183 * scan rate to slow down when a workload has settled down.
2184 */
2185 ng = p->numa_group;
2186 if (!priv && !local && ng && ng->active_nodes > 1 &&
2187 numa_is_active_node(cpu_node, ng) &&
2188 numa_is_active_node(mem_node, ng))
2189 local = 1;
2190
2191 task_numa_placement(p);
2192
2193 /*
2194 * Retry task to preferred node migration periodically, in case it
2195 * case it previously failed, or the scheduler moved us.
2196 */
2197 if (time_after(jiffies, p->numa_migrate_retry))
2198 numa_migrate_preferred(p);
2199
2200 if (migrated)
2201 p->numa_pages_migrated += pages;
2202 if (flags & TNF_MIGRATE_FAIL)
2203 p->numa_faults_locality[2] += pages;
2204
2205 p->numa_faults[task_faults_idx(NUMA_MEMBUF, mem_node, priv)] += pages;
2206 p->numa_faults[task_faults_idx(NUMA_CPUBUF, cpu_node, priv)] += pages;
2207 p->numa_faults_locality[local] += pages;
2208 }
2209
2210 static void reset_ptenuma_scan(struct task_struct *p)
2211 {
2212 /*
2213 * We only did a read acquisition of the mmap sem, so
2214 * p->mm->numa_scan_seq is written to without exclusive access
2215 * and the update is not guaranteed to be atomic. That's not
2216 * much of an issue though, since this is just used for
2217 * statistical sampling. Use READ_ONCE/WRITE_ONCE, which are not
2218 * expensive, to avoid any form of compiler optimizations:
2219 */
2220 WRITE_ONCE(p->mm->numa_scan_seq, READ_ONCE(p->mm->numa_scan_seq) + 1);
2221 p->mm->numa_scan_offset = 0;
2222 }
2223
2224 /*
2225 * The expensive part of numa migration is done from task_work context.
2226 * Triggered from task_tick_numa().
2227 */
2228 void task_numa_work(struct callback_head *work)
2229 {
2230 unsigned long migrate, next_scan, now = jiffies;
2231 struct task_struct *p = current;
2232 struct mm_struct *mm = p->mm;
2233 u64 runtime = p->se.sum_exec_runtime;
2234 struct vm_area_struct *vma;
2235 unsigned long start, end;
2236 unsigned long nr_pte_updates = 0;
2237 long pages, virtpages;
2238
2239 WARN_ON_ONCE(p != container_of(work, struct task_struct, numa_work));
2240
2241 work->next = work; /* protect against double add */
2242 /*
2243 * Who cares about NUMA placement when they're dying.
2244 *
2245 * NOTE: make sure not to dereference p->mm before this check,
2246 * exit_task_work() happens _after_ exit_mm() so we could be called
2247 * without p->mm even though we still had it when we enqueued this
2248 * work.
2249 */
2250 if (p->flags & PF_EXITING)
2251 return;
2252
2253 if (!mm->numa_next_scan) {
2254 mm->numa_next_scan = now +
2255 msecs_to_jiffies(sysctl_numa_balancing_scan_delay);
2256 }
2257
2258 /*
2259 * Enforce maximal scan/migration frequency..
2260 */
2261 migrate = mm->numa_next_scan;
2262 if (time_before(now, migrate))
2263 return;
2264
2265 if (p->numa_scan_period == 0) {
2266 p->numa_scan_period_max = task_scan_max(p);
2267 p->numa_scan_period = task_scan_min(p);
2268 }
2269
2270 next_scan = now + msecs_to_jiffies(p->numa_scan_period);
2271 if (cmpxchg(&mm->numa_next_scan, migrate, next_scan) != migrate)
2272 return;
2273
2274 /*
2275 * Delay this task enough that another task of this mm will likely win
2276 * the next time around.
2277 */
2278 p->node_stamp += 2 * TICK_NSEC;
2279
2280 start = mm->numa_scan_offset;
2281 pages = sysctl_numa_balancing_scan_size;
2282 pages <<= 20 - PAGE_SHIFT; /* MB in pages */
2283 virtpages = pages * 8; /* Scan up to this much virtual space */
2284 if (!pages)
2285 return;
2286
2287
2288 down_read(&mm->mmap_sem);
2289 vma = find_vma(mm, start);
2290 if (!vma) {
2291 reset_ptenuma_scan(p);
2292 start = 0;
2293 vma = mm->mmap;
2294 }
2295 for (; vma; vma = vma->vm_next) {
2296 if (!vma_migratable(vma) || !vma_policy_mof(vma) ||
2297 is_vm_hugetlb_page(vma) || (vma->vm_flags & VM_MIXEDMAP)) {
2298 continue;
2299 }
2300
2301 /*
2302 * Shared library pages mapped by multiple processes are not
2303 * migrated as it is expected they are cache replicated. Avoid
2304 * hinting faults in read-only file-backed mappings or the vdso
2305 * as migrating the pages will be of marginal benefit.
2306 */
2307 if (!vma->vm_mm ||
2308 (vma->vm_file && (vma->vm_flags & (VM_READ|VM_WRITE)) == (VM_READ)))
2309 continue;
2310
2311 /*
2312 * Skip inaccessible VMAs to avoid any confusion between
2313 * PROT_NONE and NUMA hinting ptes
2314 */
2315 if (!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE)))
2316 continue;
2317
2318 do {
2319 start = max(start, vma->vm_start);
2320 end = ALIGN(start + (pages << PAGE_SHIFT), HPAGE_SIZE);
2321 end = min(end, vma->vm_end);
2322 nr_pte_updates = change_prot_numa(vma, start, end);
2323
2324 /*
2325 * Try to scan sysctl_numa_balancing_size worth of
2326 * hpages that have at least one present PTE that
2327 * is not already pte-numa. If the VMA contains
2328 * areas that are unused or already full of prot_numa
2329 * PTEs, scan up to virtpages, to skip through those
2330 * areas faster.
2331 */
2332 if (nr_pte_updates)
2333 pages -= (end - start) >> PAGE_SHIFT;
2334 virtpages -= (end - start) >> PAGE_SHIFT;
2335
2336 start = end;
2337 if (pages <= 0 || virtpages <= 0)
2338 goto out;
2339
2340 cond_resched();
2341 } while (end != vma->vm_end);
2342 }
2343
2344 out:
2345 /*
2346 * It is possible to reach the end of the VMA list but the last few
2347 * VMAs are not guaranteed to the vma_migratable. If they are not, we
2348 * would find the !migratable VMA on the next scan but not reset the
2349 * scanner to the start so check it now.
2350 */
2351 if (vma)
2352 mm->numa_scan_offset = start;
2353 else
2354 reset_ptenuma_scan(p);
2355 up_read(&mm->mmap_sem);
2356
2357 /*
2358 * Make sure tasks use at least 32x as much time to run other code
2359 * than they used here, to limit NUMA PTE scanning overhead to 3% max.
2360 * Usually update_task_scan_period slows down scanning enough; on an
2361 * overloaded system we need to limit overhead on a per task basis.
2362 */
2363 if (unlikely(p->se.sum_exec_runtime != runtime)) {
2364 u64 diff = p->se.sum_exec_runtime - runtime;
2365 p->node_stamp += 32 * diff;
2366 }
2367 }
2368
2369 /*
2370 * Drive the periodic memory faults..
2371 */
2372 void task_tick_numa(struct rq *rq, struct task_struct *curr)
2373 {
2374 struct callback_head *work = &curr->numa_work;
2375 u64 period, now;
2376
2377 /*
2378 * We don't care about NUMA placement if we don't have memory.
2379 */
2380 if (!curr->mm || (curr->flags & PF_EXITING) || work->next != work)
2381 return;
2382
2383 /*
2384 * Using runtime rather than walltime has the dual advantage that
2385 * we (mostly) drive the selection from busy threads and that the
2386 * task needs to have done some actual work before we bother with
2387 * NUMA placement.
2388 */
2389 now = curr->se.sum_exec_runtime;
2390 period = (u64)curr->numa_scan_period * NSEC_PER_MSEC;
2391
2392 if (now > curr->node_stamp + period) {
2393 if (!curr->node_stamp)
2394 curr->numa_scan_period = task_scan_min(curr);
2395 curr->node_stamp += period;
2396
2397 if (!time_before(jiffies, curr->mm->numa_next_scan)) {
2398 init_task_work(work, task_numa_work); /* TODO: move this into sched_fork() */
2399 task_work_add(curr, work, true);
2400 }
2401 }
2402 }
2403 #else
2404 static void task_tick_numa(struct rq *rq, struct task_struct *curr)
2405 {
2406 }
2407
2408 static inline void account_numa_enqueue(struct rq *rq, struct task_struct *p)
2409 {
2410 }
2411
2412 static inline void account_numa_dequeue(struct rq *rq, struct task_struct *p)
2413 {
2414 }
2415 #endif /* CONFIG_NUMA_BALANCING */
2416
2417 static void
2418 account_entity_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se)
2419 {
2420 update_load_add(&cfs_rq->load, se->load.weight);
2421 if (!parent_entity(se))
2422 update_load_add(&rq_of(cfs_rq)->load, se->load.weight);
2423 #ifdef CONFIG_SMP
2424 if (entity_is_task(se)) {
2425 struct rq *rq = rq_of(cfs_rq);
2426
2427 account_numa_enqueue(rq, task_of(se));
2428 list_add(&se->group_node, &rq->cfs_tasks);
2429 }
2430 #endif
2431 cfs_rq->nr_running++;
2432 }
2433
2434 static void
2435 account_entity_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se)
2436 {
2437 update_load_sub(&cfs_rq->load, se->load.weight);
2438 if (!parent_entity(se))
2439 update_load_sub(&rq_of(cfs_rq)->load, se->load.weight);
2440 if (entity_is_task(se)) {
2441 account_numa_dequeue(rq_of(cfs_rq), task_of(se));
2442 list_del_init(&se->group_node);
2443 }
2444 cfs_rq->nr_running--;
2445 }
2446
2447 #ifdef CONFIG_FAIR_GROUP_SCHED
2448 # ifdef CONFIG_SMP
2449 static inline long calc_tg_weight(struct task_group *tg, struct cfs_rq *cfs_rq)
2450 {
2451 long tg_weight;
2452
2453 /*
2454 * Use this CPU's real-time load instead of the last load contribution
2455 * as the updating of the contribution is delayed, and we will use the
2456 * the real-time load to calc the share. See update_tg_load_avg().
2457 */
2458 tg_weight = atomic_long_read(&tg->load_avg);
2459 tg_weight -= cfs_rq->tg_load_avg_contrib;
2460 tg_weight += cfs_rq->load.weight;
2461
2462 return tg_weight;
2463 }
2464
2465 static long calc_cfs_shares(struct cfs_rq *cfs_rq, struct task_group *tg)
2466 {
2467 long tg_weight, load, shares;
2468
2469 tg_weight = calc_tg_weight(tg, cfs_rq);
2470 load = cfs_rq->load.weight;
2471
2472 shares = (tg->shares * load);
2473 if (tg_weight)
2474 shares /= tg_weight;
2475
2476 if (shares < MIN_SHARES)
2477 shares = MIN_SHARES;
2478 if (shares > tg->shares)
2479 shares = tg->shares;
2480
2481 return shares;
2482 }
2483 # else /* CONFIG_SMP */
2484 static inline long calc_cfs_shares(struct cfs_rq *cfs_rq, struct task_group *tg)
2485 {
2486 return tg->shares;
2487 }
2488 # endif /* CONFIG_SMP */
2489 static void reweight_entity(struct cfs_rq *cfs_rq, struct sched_entity *se,
2490 unsigned long weight)
2491 {
2492 if (se->on_rq) {
2493 /* commit outstanding execution time */
2494 if (cfs_rq->curr == se)
2495 update_curr(cfs_rq);
2496 account_entity_dequeue(cfs_rq, se);
2497 }
2498
2499 update_load_set(&se->load, weight);
2500
2501 if (se->on_rq)
2502 account_entity_enqueue(cfs_rq, se);
2503 }
2504
2505 static inline int throttled_hierarchy(struct cfs_rq *cfs_rq);
2506
2507 static void update_cfs_shares(struct cfs_rq *cfs_rq)
2508 {
2509 struct task_group *tg;
2510 struct sched_entity *se;
2511 long shares;
2512
2513 tg = cfs_rq->tg;
2514 se = tg->se[cpu_of(rq_of(cfs_rq))];
2515 if (!se || throttled_hierarchy(cfs_rq))
2516 return;
2517 #ifndef CONFIG_SMP
2518 if (likely(se->load.weight == tg->shares))
2519 return;
2520 #endif
2521 shares = calc_cfs_shares(cfs_rq, tg);
2522
2523 reweight_entity(cfs_rq_of(se), se, shares);
2524 }
2525 #else /* CONFIG_FAIR_GROUP_SCHED */
2526 static inline void update_cfs_shares(struct cfs_rq *cfs_rq)
2527 {
2528 }
2529 #endif /* CONFIG_FAIR_GROUP_SCHED */
2530
2531 #ifdef CONFIG_SMP
2532 /* Precomputed fixed inverse multiplies for multiplication by y^n */
2533 static const u32 runnable_avg_yN_inv[] = {
2534 0xffffffff, 0xfa83b2da, 0xf5257d14, 0xefe4b99a, 0xeac0c6e6, 0xe5b906e6,
2535 0xe0ccdeeb, 0xdbfbb796, 0xd744fcc9, 0xd2a81d91, 0xce248c14, 0xc9b9bd85,
2536 0xc5672a10, 0xc12c4cc9, 0xbd08a39e, 0xb8fbaf46, 0xb504f333, 0xb123f581,
2537 0xad583ee9, 0xa9a15ab4, 0xa5fed6a9, 0xa2704302, 0x9ef5325f, 0x9b8d39b9,
2538 0x9837f050, 0x94f4efa8, 0x91c3d373, 0x8ea4398a, 0x8b95c1e3, 0x88980e80,
2539 0x85aac367, 0x82cd8698,
2540 };
2541
2542 /*
2543 * Precomputed \Sum y^k { 1<=k<=n }. These are floor(true_value) to prevent
2544 * over-estimates when re-combining.
2545 */
2546 static const u32 runnable_avg_yN_sum[] = {
2547 0, 1002, 1982, 2941, 3880, 4798, 5697, 6576, 7437, 8279, 9103,
2548 9909,10698,11470,12226,12966,13690,14398,15091,15769,16433,17082,
2549 17718,18340,18949,19545,20128,20698,21256,21802,22336,22859,23371,
2550 };
2551
2552 /*
2553 * Approximate:
2554 * val * y^n, where y^32 ~= 0.5 (~1 scheduling period)
2555 */
2556 static __always_inline u64 decay_load(u64 val, u64 n)
2557 {
2558 unsigned int local_n;
2559
2560 if (!n)
2561 return val;
2562 else if (unlikely(n > LOAD_AVG_PERIOD * 63))
2563 return 0;
2564
2565 /* after bounds checking we can collapse to 32-bit */
2566 local_n = n;
2567
2568 /*
2569 * As y^PERIOD = 1/2, we can combine
2570 * y^n = 1/2^(n/PERIOD) * y^(n%PERIOD)
2571 * With a look-up table which covers y^n (n<PERIOD)
2572 *
2573 * To achieve constant time decay_load.
2574 */
2575 if (unlikely(local_n >= LOAD_AVG_PERIOD)) {
2576 val >>= local_n / LOAD_AVG_PERIOD;
2577 local_n %= LOAD_AVG_PERIOD;
2578 }
2579
2580 val = mul_u64_u32_shr(val, runnable_avg_yN_inv[local_n], 32);
2581 return val;
2582 }
2583
2584 /*
2585 * For updates fully spanning n periods, the contribution to runnable
2586 * average will be: \Sum 1024*y^n
2587 *
2588 * We can compute this reasonably efficiently by combining:
2589 * y^PERIOD = 1/2 with precomputed \Sum 1024*y^n {for n <PERIOD}
2590 */
2591 static u32 __compute_runnable_contrib(u64 n)
2592 {
2593 u32 contrib = 0;
2594
2595 if (likely(n <= LOAD_AVG_PERIOD))
2596 return runnable_avg_yN_sum[n];
2597 else if (unlikely(n >= LOAD_AVG_MAX_N))
2598 return LOAD_AVG_MAX;
2599
2600 /* Compute \Sum k^n combining precomputed values for k^i, \Sum k^j */
2601 do {
2602 contrib /= 2; /* y^LOAD_AVG_PERIOD = 1/2 */
2603 contrib += runnable_avg_yN_sum[LOAD_AVG_PERIOD];
2604
2605 n -= LOAD_AVG_PERIOD;
2606 } while (n > LOAD_AVG_PERIOD);
2607
2608 contrib = decay_load(contrib, n);
2609 return contrib + runnable_avg_yN_sum[n];
2610 }
2611
2612 #if (SCHED_LOAD_SHIFT - SCHED_LOAD_RESOLUTION) != 10 || SCHED_CAPACITY_SHIFT != 10
2613 #error "load tracking assumes 2^10 as unit"
2614 #endif
2615
2616 #define cap_scale(v, s) ((v)*(s) >> SCHED_CAPACITY_SHIFT)
2617
2618 /*
2619 * We can represent the historical contribution to runnable average as the
2620 * coefficients of a geometric series. To do this we sub-divide our runnable
2621 * history into segments of approximately 1ms (1024us); label the segment that
2622 * occurred N-ms ago p_N, with p_0 corresponding to the current period, e.g.
2623 *
2624 * [<- 1024us ->|<- 1024us ->|<- 1024us ->| ...
2625 * p0 p1 p2
2626 * (now) (~1ms ago) (~2ms ago)
2627 *
2628 * Let u_i denote the fraction of p_i that the entity was runnable.
2629 *
2630 * We then designate the fractions u_i as our co-efficients, yielding the
2631 * following representation of historical load:
2632 * u_0 + u_1*y + u_2*y^2 + u_3*y^3 + ...
2633 *
2634 * We choose y based on the with of a reasonably scheduling period, fixing:
2635 * y^32 = 0.5
2636 *
2637 * This means that the contribution to load ~32ms ago (u_32) will be weighted
2638 * approximately half as much as the contribution to load within the last ms
2639 * (u_0).
2640 *
2641 * When a period "rolls over" and we have new u_0`, multiplying the previous
2642 * sum again by y is sufficient to update:
2643 * load_avg = u_0` + y*(u_0 + u_1*y + u_2*y^2 + ... )
2644 * = u_0 + u_1*y + u_2*y^2 + ... [re-labeling u_i --> u_{i+1}]
2645 */
2646 static __always_inline int
2647 __update_load_avg(u64 now, int cpu, struct sched_avg *sa,
2648 unsigned long weight, int running, struct cfs_rq *cfs_rq)
2649 {
2650 u64 delta, scaled_delta, periods;
2651 u32 contrib;
2652 unsigned int delta_w, scaled_delta_w, decayed = 0;
2653 unsigned long scale_freq, scale_cpu;
2654
2655 delta = now - sa->last_update_time;
2656 /*
2657 * This should only happen when time goes backwards, which it
2658 * unfortunately does during sched clock init when we swap over to TSC.
2659 */
2660 if ((s64)delta < 0) {
2661 sa->last_update_time = now;
2662 return 0;
2663 }
2664
2665 /*
2666 * Use 1024ns as the unit of measurement since it's a reasonable
2667 * approximation of 1us and fast to compute.
2668 */
2669 delta >>= 10;
2670 if (!delta)
2671 return 0;
2672 sa->last_update_time = now;
2673
2674 scale_freq = arch_scale_freq_capacity(NULL, cpu);
2675 scale_cpu = arch_scale_cpu_capacity(NULL, cpu);
2676
2677 /* delta_w is the amount already accumulated against our next period */
2678 delta_w = sa->period_contrib;
2679 if (delta + delta_w >= 1024) {
2680 decayed = 1;
2681
2682 /* how much left for next period will start over, we don't know yet */
2683 sa->period_contrib = 0;
2684
2685 /*
2686 * Now that we know we're crossing a period boundary, figure
2687 * out how much from delta we need to complete the current
2688 * period and accrue it.
2689 */
2690 delta_w = 1024 - delta_w;
2691 scaled_delta_w = cap_scale(delta_w, scale_freq);
2692 if (weight) {
2693 sa->load_sum += weight * scaled_delta_w;
2694 if (cfs_rq) {
2695 cfs_rq->runnable_load_sum +=
2696 weight * scaled_delta_w;
2697 }
2698 }
2699 if (running)
2700 sa->util_sum += scaled_delta_w * scale_cpu;
2701
2702 delta -= delta_w;
2703
2704 /* Figure out how many additional periods this update spans */
2705 periods = delta / 1024;
2706 delta %= 1024;
2707
2708 sa->load_sum = decay_load(sa->load_sum, periods + 1);
2709 if (cfs_rq) {
2710 cfs_rq->runnable_load_sum =
2711 decay_load(cfs_rq->runnable_load_sum, periods + 1);
2712 }
2713 sa->util_sum = decay_load((u64)(sa->util_sum), periods + 1);
2714
2715 /* Efficiently calculate \sum (1..n_period) 1024*y^i */
2716 contrib = __compute_runnable_contrib(periods);
2717 contrib = cap_scale(contrib, scale_freq);
2718 if (weight) {
2719 sa->load_sum += weight * contrib;
2720 if (cfs_rq)
2721 cfs_rq->runnable_load_sum += weight * contrib;
2722 }
2723 if (running)
2724 sa->util_sum += contrib * scale_cpu;
2725 }
2726
2727 /* Remainder of delta accrued against u_0` */
2728 scaled_delta = cap_scale(delta, scale_freq);
2729 if (weight) {
2730 sa->load_sum += weight * scaled_delta;
2731 if (cfs_rq)
2732 cfs_rq->runnable_load_sum += weight * scaled_delta;
2733 }
2734 if (running)
2735 sa->util_sum += scaled_delta * scale_cpu;
2736
2737 sa->period_contrib += delta;
2738
2739 if (decayed) {
2740 sa->load_avg = div_u64(sa->load_sum, LOAD_AVG_MAX);
2741 if (cfs_rq) {
2742 cfs_rq->runnable_load_avg =
2743 div_u64(cfs_rq->runnable_load_sum, LOAD_AVG_MAX);
2744 }
2745 sa->util_avg = sa->util_sum / LOAD_AVG_MAX;
2746 }
2747
2748 return decayed;
2749 }
2750
2751 #ifdef CONFIG_FAIR_GROUP_SCHED
2752 /*
2753 * Updating tg's load_avg is necessary before update_cfs_share (which is done)
2754 * and effective_load (which is not done because it is too costly).
2755 */
2756 static inline void update_tg_load_avg(struct cfs_rq *cfs_rq, int force)
2757 {
2758 long delta = cfs_rq->avg.load_avg - cfs_rq->tg_load_avg_contrib;
2759
2760 /*
2761 * No need to update load_avg for root_task_group as it is not used.
2762 */
2763 if (cfs_rq->tg == &root_task_group)
2764 return;
2765
2766 if (force || abs(delta) > cfs_rq->tg_load_avg_contrib / 64) {
2767 atomic_long_add(delta, &cfs_rq->tg->load_avg);
2768 cfs_rq->tg_load_avg_contrib = cfs_rq->avg.load_avg;
2769 }
2770 }
2771
2772 /*
2773 * Called within set_task_rq() right before setting a task's cpu. The
2774 * caller only guarantees p->pi_lock is held; no other assumptions,
2775 * including the state of rq->lock, should be made.
2776 */
2777 void set_task_rq_fair(struct sched_entity *se,
2778 struct cfs_rq *prev, struct cfs_rq *next)
2779 {
2780 if (!sched_feat(ATTACH_AGE_LOAD))
2781 return;
2782
2783 /*
2784 * We are supposed to update the task to "current" time, then its up to
2785 * date and ready to go to new CPU/cfs_rq. But we have difficulty in
2786 * getting what current time is, so simply throw away the out-of-date
2787 * time. This will result in the wakee task is less decayed, but giving
2788 * the wakee more load sounds not bad.
2789 */
2790 if (se->avg.last_update_time && prev) {
2791 u64 p_last_update_time;
2792 u64 n_last_update_time;
2793
2794 #ifndef CONFIG_64BIT
2795 u64 p_last_update_time_copy;
2796 u64 n_last_update_time_copy;
2797
2798 do {
2799 p_last_update_time_copy = prev->load_last_update_time_copy;
2800 n_last_update_time_copy = next->load_last_update_time_copy;
2801
2802 smp_rmb();
2803
2804 p_last_update_time = prev->avg.last_update_time;
2805 n_last_update_time = next->avg.last_update_time;
2806
2807 } while (p_last_update_time != p_last_update_time_copy ||
2808 n_last_update_time != n_last_update_time_copy);
2809 #else
2810 p_last_update_time = prev->avg.last_update_time;
2811 n_last_update_time = next->avg.last_update_time;
2812 #endif
2813 __update_load_avg(p_last_update_time, cpu_of(rq_of(prev)),
2814 &se->avg, 0, 0, NULL);
2815 se->avg.last_update_time = n_last_update_time;
2816 }
2817 }
2818 #else /* CONFIG_FAIR_GROUP_SCHED */
2819 static inline void update_tg_load_avg(struct cfs_rq *cfs_rq, int force) {}
2820 #endif /* CONFIG_FAIR_GROUP_SCHED */
2821
2822 static inline u64 cfs_rq_clock_task(struct cfs_rq *cfs_rq);
2823
2824 /* Group cfs_rq's load_avg is used for task_h_load and update_cfs_share */
2825 static inline int update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq)
2826 {
2827 struct sched_avg *sa = &cfs_rq->avg;
2828 int decayed, removed = 0;
2829
2830 if (atomic_long_read(&cfs_rq->removed_load_avg)) {
2831 s64 r = atomic_long_xchg(&cfs_rq->removed_load_avg, 0);
2832 sa->load_avg = max_t(long, sa->load_avg - r, 0);
2833 sa->load_sum = max_t(s64, sa->load_sum - r * LOAD_AVG_MAX, 0);
2834 removed = 1;
2835 }
2836
2837 if (atomic_long_read(&cfs_rq->removed_util_avg)) {
2838 long r = atomic_long_xchg(&cfs_rq->removed_util_avg, 0);
2839 sa->util_avg = max_t(long, sa->util_avg - r, 0);
2840 sa->util_sum = max_t(s32, sa->util_sum - r * LOAD_AVG_MAX, 0);
2841 }
2842
2843 decayed = __update_load_avg(now, cpu_of(rq_of(cfs_rq)), sa,
2844 scale_load_down(cfs_rq->load.weight), cfs_rq->curr != NULL, cfs_rq);
2845
2846 #ifndef CONFIG_64BIT
2847 smp_wmb();
2848 cfs_rq->load_last_update_time_copy = sa->last_update_time;
2849 #endif
2850
2851 return decayed || removed;
2852 }
2853
2854 /* Update task and its cfs_rq load average */
2855 static inline void update_load_avg(struct sched_entity *se, int update_tg)
2856 {
2857 struct cfs_rq *cfs_rq = cfs_rq_of(se);
2858 u64 now = cfs_rq_clock_task(cfs_rq);
2859 struct rq *rq = rq_of(cfs_rq);
2860 int cpu = cpu_of(rq);
2861
2862 /*
2863 * Track task load average for carrying it to new CPU after migrated, and
2864 * track group sched_entity load average for task_h_load calc in migration
2865 */
2866 __update_load_avg(now, cpu, &se->avg,
2867 se->on_rq * scale_load_down(se->load.weight),
2868 cfs_rq->curr == se, NULL);
2869
2870 if (update_cfs_rq_load_avg(now, cfs_rq) && update_tg)
2871 update_tg_load_avg(cfs_rq, 0);
2872
2873 if (cpu == smp_processor_id() && &rq->cfs == cfs_rq) {
2874 unsigned long max = rq->cpu_capacity_orig;
2875
2876 /*
2877 * There are a few boundary cases this might miss but it should
2878 * get called often enough that that should (hopefully) not be
2879 * a real problem -- added to that it only calls on the local
2880 * CPU, so if we enqueue remotely we'll miss an update, but
2881 * the next tick/schedule should update.
2882 *
2883 * It will not get called when we go idle, because the idle
2884 * thread is a different class (!fair), nor will the utilization
2885 * number include things like RT tasks.
2886 *
2887 * As is, the util number is not freq-invariant (we'd have to
2888 * implement arch_scale_freq_capacity() for that).
2889 *
2890 * See cpu_util().
2891 */
2892 cpufreq_update_util(rq_clock(rq),
2893 min(cfs_rq->avg.util_avg, max), max);
2894 }
2895 }
2896
2897 static void attach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)
2898 {
2899 if (!sched_feat(ATTACH_AGE_LOAD))
2900 goto skip_aging;
2901
2902 /*
2903 * If we got migrated (either between CPUs or between cgroups) we'll
2904 * have aged the average right before clearing @last_update_time.
2905 */
2906 if (se->avg.last_update_time) {
2907 __update_load_avg(cfs_rq->avg.last_update_time, cpu_of(rq_of(cfs_rq)),
2908 &se->avg, 0, 0, NULL);
2909
2910 /*
2911 * XXX: we could have just aged the entire load away if we've been
2912 * absent from the fair class for too long.
2913 */
2914 }
2915
2916 skip_aging:
2917 se->avg.last_update_time = cfs_rq->avg.last_update_time;
2918 cfs_rq->avg.load_avg += se->avg.load_avg;
2919 cfs_rq->avg.load_sum += se->avg.load_sum;
2920 cfs_rq->avg.util_avg += se->avg.util_avg;
2921 cfs_rq->avg.util_sum += se->avg.util_sum;
2922 }
2923
2924 static void detach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)
2925 {
2926 __update_load_avg(cfs_rq->avg.last_update_time, cpu_of(rq_of(cfs_rq)),
2927 &se->avg, se->on_rq * scale_load_down(se->load.weight),
2928 cfs_rq->curr == se, NULL);
2929
2930 cfs_rq->avg.load_avg = max_t(long, cfs_rq->avg.load_avg - se->avg.load_avg, 0);
2931 cfs_rq->avg.load_sum = max_t(s64, cfs_rq->avg.load_sum - se->avg.load_sum, 0);
2932 cfs_rq->avg.util_avg = max_t(long, cfs_rq->avg.util_avg - se->avg.util_avg, 0);
2933 cfs_rq->avg.util_sum = max_t(s32, cfs_rq->avg.util_sum - se->avg.util_sum, 0);
2934 }
2935
2936 /* Add the load generated by se into cfs_rq's load average */
2937 static inline void
2938 enqueue_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)
2939 {
2940 struct sched_avg *sa = &se->avg;
2941 u64 now = cfs_rq_clock_task(cfs_rq);
2942 int migrated, decayed;
2943
2944 migrated = !sa->last_update_time;
2945 if (!migrated) {
2946 __update_load_avg(now, cpu_of(rq_of(cfs_rq)), sa,
2947 se->on_rq * scale_load_down(se->load.weight),
2948 cfs_rq->curr == se, NULL);
2949 }
2950
2951 decayed = update_cfs_rq_load_avg(now, cfs_rq);
2952
2953 cfs_rq->runnable_load_avg += sa->load_avg;
2954 cfs_rq->runnable_load_sum += sa->load_sum;
2955
2956 if (migrated)
2957 attach_entity_load_avg(cfs_rq, se);
2958
2959 if (decayed || migrated)
2960 update_tg_load_avg(cfs_rq, 0);
2961 }
2962
2963 /* Remove the runnable load generated by se from cfs_rq's runnable load average */
2964 static inline void
2965 dequeue_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)
2966 {
2967 update_load_avg(se, 1);
2968
2969 cfs_rq->runnable_load_avg =
2970 max_t(long, cfs_rq->runnable_load_avg - se->avg.load_avg, 0);
2971 cfs_rq->runnable_load_sum =
2972 max_t(s64, cfs_rq->runnable_load_sum - se->avg.load_sum, 0);
2973 }
2974
2975 #ifndef CONFIG_64BIT
2976 static inline u64 cfs_rq_last_update_time(struct cfs_rq *cfs_rq)
2977 {
2978 u64 last_update_time_copy;
2979 u64 last_update_time;
2980
2981 do {
2982 last_update_time_copy = cfs_rq->load_last_update_time_copy;
2983 smp_rmb();
2984 last_update_time = cfs_rq->avg.last_update_time;
2985 } while (last_update_time != last_update_time_copy);
2986
2987 return last_update_time;
2988 }
2989 #else
2990 static inline u64 cfs_rq_last_update_time(struct cfs_rq *cfs_rq)
2991 {
2992 return cfs_rq->avg.last_update_time;
2993 }
2994 #endif
2995
2996 /*
2997 * Task first catches up with cfs_rq, and then subtract
2998 * itself from the cfs_rq (task must be off the queue now).
2999 */
3000 void remove_entity_load_avg(struct sched_entity *se)
3001 {
3002 struct cfs_rq *cfs_rq = cfs_rq_of(se);
3003 u64 last_update_time;
3004
3005 /*
3006 * Newly created task or never used group entity should not be removed
3007 * from its (source) cfs_rq
3008 */
3009 if (se->avg.last_update_time == 0)
3010 return;
3011
3012 last_update_time = cfs_rq_last_update_time(cfs_rq);
3013
3014 __update_load_avg(last_update_time, cpu_of(rq_of(cfs_rq)), &se->avg, 0, 0, NULL);
3015 atomic_long_add(se->avg.load_avg, &cfs_rq->removed_load_avg);
3016 atomic_long_add(se->avg.util_avg, &cfs_rq->removed_util_avg);
3017 }
3018
3019 static inline unsigned long cfs_rq_runnable_load_avg(struct cfs_rq *cfs_rq)
3020 {
3021 return cfs_rq->runnable_load_avg;
3022 }
3023
3024 static inline unsigned long cfs_rq_load_avg(struct cfs_rq *cfs_rq)
3025 {
3026 return cfs_rq->avg.load_avg;
3027 }
3028
3029 static int idle_balance(struct rq *this_rq);
3030
3031 #else /* CONFIG_SMP */
3032
3033 static inline void update_load_avg(struct sched_entity *se, int update_tg) {}
3034 static inline void
3035 enqueue_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) {}
3036 static inline void
3037 dequeue_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) {}
3038 static inline void remove_entity_load_avg(struct sched_entity *se) {}
3039
3040 static inline void
3041 attach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) {}
3042 static inline void
3043 detach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) {}
3044
3045 static inline int idle_balance(struct rq *rq)
3046 {
3047 return 0;
3048 }
3049
3050 #endif /* CONFIG_SMP */
3051
3052 static void enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se)
3053 {
3054 #ifdef CONFIG_SCHEDSTATS
3055 struct task_struct *tsk = NULL;
3056
3057 if (entity_is_task(se))
3058 tsk = task_of(se);
3059
3060 if (se->statistics.sleep_start) {
3061 u64 delta = rq_clock(rq_of(cfs_rq)) - se->statistics.sleep_start;
3062
3063 if ((s64)delta < 0)
3064 delta = 0;
3065
3066 if (unlikely(delta > se->statistics.sleep_max))
3067 se->statistics.sleep_max = delta;
3068
3069 se->statistics.sleep_start = 0;
3070 se->statistics.sum_sleep_runtime += delta;
3071
3072 if (tsk) {
3073 account_scheduler_latency(tsk, delta >> 10, 1);
3074 trace_sched_stat_sleep(tsk, delta);
3075 }
3076 }
3077 if (se->statistics.block_start) {
3078 u64 delta = rq_clock(rq_of(cfs_rq)) - se->statistics.block_start;
3079
3080 if ((s64)delta < 0)
3081 delta = 0;
3082
3083 if (unlikely(delta > se->statistics.block_max))
3084 se->statistics.block_max = delta;
3085
3086 se->statistics.block_start = 0;
3087 se->statistics.sum_sleep_runtime += delta;
3088
3089 if (tsk) {
3090 if (tsk->in_iowait) {
3091 se->statistics.iowait_sum += delta;
3092 se->statistics.iowait_count++;
3093 trace_sched_stat_iowait(tsk, delta);
3094 }
3095
3096 trace_sched_stat_blocked(tsk, delta);
3097
3098 /*
3099 * Blocking time is in units of nanosecs, so shift by
3100 * 20 to get a milliseconds-range estimation of the
3101 * amount of time that the task spent sleeping:
3102 */
3103 if (unlikely(prof_on == SLEEP_PROFILING)) {
3104 profile_hits(SLEEP_PROFILING,
3105 (void *)get_wchan(tsk),
3106 delta >> 20);
3107 }
3108 account_scheduler_latency(tsk, delta >> 10, 0);
3109 }
3110 }
3111 #endif
3112 }
3113
3114 static void check_spread(struct cfs_rq *cfs_rq, struct sched_entity *se)
3115 {
3116 #ifdef CONFIG_SCHED_DEBUG
3117 s64 d = se->vruntime - cfs_rq->min_vruntime;
3118
3119 if (d < 0)
3120 d = -d;
3121
3122 if (d > 3*sysctl_sched_latency)
3123 schedstat_inc(cfs_rq, nr_spread_over);
3124 #endif
3125 }
3126
3127 static void
3128 place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial)
3129 {
3130 u64 vruntime = cfs_rq->min_vruntime;
3131
3132 /*
3133 * The 'current' period is already promised to the current tasks,
3134 * however the extra weight of the new task will slow them down a
3135 * little, place the new task so that it fits in the slot that
3136 * stays open at the end.
3137 */
3138 if (initial && sched_feat(START_DEBIT))
3139 vruntime += sched_vslice(cfs_rq, se);
3140
3141 /* sleeps up to a single latency don't count. */
3142 if (!initial) {
3143 unsigned long thresh = sysctl_sched_latency;
3144
3145 /*
3146 * Halve their sleep time's effect, to allow
3147 * for a gentler effect of sleepers:
3148 */
3149 if (sched_feat(GENTLE_FAIR_SLEEPERS))
3150 thresh >>= 1;
3151
3152 vruntime -= thresh;
3153 }
3154
3155 /* ensure we never gain time by being placed backwards. */
3156 se->vruntime = max_vruntime(se->vruntime, vruntime);
3157 }
3158
3159 static void check_enqueue_throttle(struct cfs_rq *cfs_rq);
3160
3161 static inline void check_schedstat_required(void)
3162 {
3163 #ifdef CONFIG_SCHEDSTATS
3164 if (schedstat_enabled())
3165 return;
3166
3167 /* Force schedstat enabled if a dependent tracepoint is active */
3168 if (trace_sched_stat_wait_enabled() ||
3169 trace_sched_stat_sleep_enabled() ||
3170 trace_sched_stat_iowait_enabled() ||
3171 trace_sched_stat_blocked_enabled() ||
3172 trace_sched_stat_runtime_enabled()) {
3173 pr_warn_once("Scheduler tracepoints stat_sleep, stat_iowait, "
3174 "stat_blocked and stat_runtime require the "
3175 "kernel parameter schedstats=enabled or "
3176 "kernel.sched_schedstats=1\n");
3177 }
3178 #endif
3179 }
3180
3181 static void
3182 enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
3183 {
3184 bool renorm = !(flags & ENQUEUE_WAKEUP) || (flags & ENQUEUE_WAKING);
3185 bool curr = cfs_rq->curr == se;
3186
3187 /*
3188 * If we're the current task, we must renormalise before calling
3189 * update_curr().
3190 */
3191 if (renorm && curr)
3192 se->vruntime += cfs_rq->min_vruntime;
3193
3194 update_curr(cfs_rq);
3195
3196 /*
3197 * Otherwise, renormalise after, such that we're placed at the current
3198 * moment in time, instead of some random moment in the past.
3199 */
3200 if (renorm && !curr)
3201 se->vruntime += cfs_rq->min_vruntime;
3202
3203 enqueue_entity_load_avg(cfs_rq, se);
3204 account_entity_enqueue(cfs_rq, se);
3205 update_cfs_shares(cfs_rq);
3206
3207 if (flags & ENQUEUE_WAKEUP) {
3208 place_entity(cfs_rq, se, 0);
3209 if (schedstat_enabled())
3210 enqueue_sleeper(cfs_rq, se);
3211 }
3212
3213 check_schedstat_required();
3214 if (schedstat_enabled()) {
3215 update_stats_enqueue(cfs_rq, se);
3216 check_spread(cfs_rq, se);
3217 }
3218 if (!curr)
3219 __enqueue_entity(cfs_rq, se);
3220 se->on_rq = 1;
3221
3222 if (cfs_rq->nr_running == 1) {
3223 list_add_leaf_cfs_rq(cfs_rq);
3224 check_enqueue_throttle(cfs_rq);
3225 }
3226 }
3227
3228 static void __clear_buddies_last(struct sched_entity *se)
3229 {
3230 for_each_sched_entity(se) {
3231 struct cfs_rq *cfs_rq = cfs_rq_of(se);
3232 if (cfs_rq->last != se)
3233 break;
3234
3235 cfs_rq->last = NULL;
3236 }
3237 }
3238
3239 static void __clear_buddies_next(struct sched_entity *se)
3240 {
3241 for_each_sched_entity(se) {
3242 struct cfs_rq *cfs_rq = cfs_rq_of(se);
3243 if (cfs_rq->next != se)
3244 break;
3245
3246 cfs_rq->next = NULL;
3247 }
3248 }
3249
3250 static void __clear_buddies_skip(struct sched_entity *se)
3251 {
3252 for_each_sched_entity(se) {
3253 struct cfs_rq *cfs_rq = cfs_rq_of(se);
3254 if (cfs_rq->skip != se)
3255 break;
3256
3257 cfs_rq->skip = NULL;
3258 }
3259 }
3260
3261 static void clear_buddies(struct cfs_rq *cfs_rq, struct sched_entity *se)
3262 {
3263 if (cfs_rq->last == se)
3264 __clear_buddies_last(se);
3265
3266 if (cfs_rq->next == se)
3267 __clear_buddies_next(se);
3268
3269 if (cfs_rq->skip == se)
3270 __clear_buddies_skip(se);
3271 }
3272
3273 static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq);
3274
3275 static void
3276 dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
3277 {
3278 /*
3279 * Update run-time statistics of the 'current'.
3280 */
3281 update_curr(cfs_rq);
3282 dequeue_entity_load_avg(cfs_rq, se);
3283
3284 if (schedstat_enabled())
3285 update_stats_dequeue(cfs_rq, se, flags);
3286
3287 clear_buddies(cfs_rq, se);
3288
3289 if (se != cfs_rq->curr)
3290 __dequeue_entity(cfs_rq, se);
3291 se->on_rq = 0;
3292 account_entity_dequeue(cfs_rq, se);
3293
3294 /*
3295 * Normalize the entity after updating the min_vruntime because the
3296 * update can refer to the ->curr item and we need to reflect this
3297 * movement in our normalized position.
3298 */
3299 if (!(flags & DEQUEUE_SLEEP))
3300 se->vruntime -= cfs_rq->min_vruntime;
3301
3302 /* return excess runtime on last dequeue */
3303 return_cfs_rq_runtime(cfs_rq);
3304
3305 update_min_vruntime(cfs_rq);
3306 update_cfs_shares(cfs_rq);
3307 }
3308
3309 /*
3310 * Preempt the current task with a newly woken task if needed:
3311 */
3312 static void
3313 check_preempt_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr)
3314 {
3315 unsigned long ideal_runtime, delta_exec;
3316 struct sched_entity *se;
3317 s64 delta;
3318
3319 ideal_runtime = sched_slice(cfs_rq, curr);
3320 delta_exec = curr->sum_exec_runtime - curr->prev_sum_exec_runtime;
3321 if (delta_exec > ideal_runtime) {
3322 resched_curr(rq_of(cfs_rq));
3323 /*
3324 * The current task ran long enough, ensure it doesn't get
3325 * re-elected due to buddy favours.
3326 */
3327 clear_buddies(cfs_rq, curr);
3328 return;
3329 }
3330
3331 /*
3332 * Ensure that a task that missed wakeup preemption by a
3333 * narrow margin doesn't have to wait for a full slice.
3334 * This also mitigates buddy induced latencies under load.
3335 */
3336 if (delta_exec < sysctl_sched_min_granularity)
3337 return;
3338
3339 se = __pick_first_entity(cfs_rq);
3340 delta = curr->vruntime - se->vruntime;
3341
3342 if (delta < 0)
3343 return;
3344
3345 if (delta > ideal_runtime)
3346 resched_curr(rq_of(cfs_rq));
3347 }
3348
3349 static void
3350 set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
3351 {
3352 /* 'current' is not kept within the tree. */
3353 if (se->on_rq) {
3354 /*
3355 * Any task has to be enqueued before it get to execute on
3356 * a CPU. So account for the time it spent waiting on the
3357 * runqueue.
3358 */
3359 if (schedstat_enabled())
3360 update_stats_wait_end(cfs_rq, se);
3361 __dequeue_entity(cfs_rq, se);
3362 update_load_avg(se, 1);
3363 }
3364
3365 update_stats_curr_start(cfs_rq, se);
3366 cfs_rq->curr = se;
3367 #ifdef CONFIG_SCHEDSTATS
3368 /*
3369 * Track our maximum slice length, if the CPU's load is at
3370 * least twice that of our own weight (i.e. dont track it
3371 * when there are only lesser-weight tasks around):
3372 */
3373 if (schedstat_enabled() && rq_of(cfs_rq)->load.weight >= 2*se->load.weight) {
3374 se->statistics.slice_max = max(se->statistics.slice_max,
3375 se->sum_exec_runtime - se->prev_sum_exec_runtime);
3376 }
3377 #endif
3378 se->prev_sum_exec_runtime = se->sum_exec_runtime;
3379 }
3380
3381 static int
3382 wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se);
3383
3384 /*
3385 * Pick the next process, keeping these things in mind, in this order:
3386 * 1) keep things fair between processes/task groups
3387 * 2) pick the "next" process, since someone really wants that to run
3388 * 3) pick the "last" process, for cache locality
3389 * 4) do not run the "skip" process, if something else is available
3390 */
3391 static struct sched_entity *
3392 pick_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *curr)
3393 {
3394 struct sched_entity *left = __pick_first_entity(cfs_rq);
3395 struct sched_entity *se;
3396
3397 /*
3398 * If curr is set we have to see if its left of the leftmost entity
3399 * still in the tree, provided there was anything in the tree at all.
3400 */
3401 if (!left || (curr && entity_before(curr, left)))
3402 left = curr;
3403
3404 se = left; /* ideally we run the leftmost entity */
3405
3406 /*
3407 * Avoid running the skip buddy, if running something else can
3408 * be done without getting too unfair.
3409 */
3410 if (cfs_rq->skip == se) {
3411 struct sched_entity *second;
3412
3413 if (se == curr) {
3414 second = __pick_first_entity(cfs_rq);
3415 } else {
3416 second = __pick_next_entity(se);
3417 if (!second || (curr && entity_before(curr, second)))
3418 second = curr;
3419 }
3420
3421 if (second && wakeup_preempt_entity(second, left) < 1)
3422 se = second;
3423 }
3424
3425 /*
3426 * Prefer last buddy, try to return the CPU to a preempted task.
3427 */
3428 if (cfs_rq->last && wakeup_preempt_entity(cfs_rq->last, left) < 1)
3429 se = cfs_rq->last;
3430
3431 /*
3432 * Someone really wants this to run. If it's not unfair, run it.
3433 */
3434 if (cfs_rq->next && wakeup_preempt_entity(cfs_rq->next, left) < 1)
3435 se = cfs_rq->next;
3436
3437 clear_buddies(cfs_rq, se);
3438
3439 return se;
3440 }
3441
3442 static bool check_cfs_rq_runtime(struct cfs_rq *cfs_rq);
3443
3444 static void put_prev_entity(struct cfs_rq *cfs_rq, struct sched_entity *prev)
3445 {
3446 /*
3447 * If still on the runqueue then deactivate_task()
3448 * was not called and update_curr() has to be done:
3449 */
3450 if (prev->on_rq)
3451 update_curr(cfs_rq);
3452
3453 /* throttle cfs_rqs exceeding runtime */
3454 check_cfs_rq_runtime(cfs_rq);
3455
3456 if (schedstat_enabled()) {
3457 check_spread(cfs_rq, prev);
3458 if (prev->on_rq)
3459 update_stats_wait_start(cfs_rq, prev);
3460 }
3461
3462 if (prev->on_rq) {
3463 /* Put 'current' back into the tree. */
3464 __enqueue_entity(cfs_rq, prev);
3465 /* in !on_rq case, update occurred at dequeue */
3466 update_load_avg(prev, 0);
3467 }
3468 cfs_rq->curr = NULL;
3469 }
3470
3471 static void
3472 entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr, int queued)
3473 {
3474 /*
3475 * Update run-time statistics of the 'current'.
3476 */
3477 update_curr(cfs_rq);
3478
3479 /*
3480 * Ensure that runnable average is periodically updated.
3481 */
3482 update_load_avg(curr, 1);
3483 update_cfs_shares(cfs_rq);
3484
3485 #ifdef CONFIG_SCHED_HRTICK
3486 /*
3487 * queued ticks are scheduled to match the slice, so don't bother
3488 * validating it and just reschedule.
3489 */
3490 if (queued) {
3491 resched_curr(rq_of(cfs_rq));
3492 return;
3493 }
3494 /*
3495 * don't let the period tick interfere with the hrtick preemption
3496 */
3497 if (!sched_feat(DOUBLE_TICK) &&
3498 hrtimer_active(&rq_of(cfs_rq)->hrtick_timer))
3499 return;
3500 #endif
3501
3502 if (cfs_rq->nr_running > 1)
3503 check_preempt_tick(cfs_rq, curr);
3504 }
3505
3506
3507 /**************************************************
3508 * CFS bandwidth control machinery
3509 */
3510
3511 #ifdef CONFIG_CFS_BANDWIDTH
3512
3513 #ifdef HAVE_JUMP_LABEL
3514 static struct static_key __cfs_bandwidth_used;
3515
3516 static inline bool cfs_bandwidth_used(void)
3517 {
3518 return static_key_false(&__cfs_bandwidth_used);
3519 }
3520
3521 void cfs_bandwidth_usage_inc(void)
3522 {
3523 static_key_slow_inc(&__cfs_bandwidth_used);
3524 }
3525
3526 void cfs_bandwidth_usage_dec(void)
3527 {
3528 static_key_slow_dec(&__cfs_bandwidth_used);
3529 }
3530 #else /* HAVE_JUMP_LABEL */
3531 static bool cfs_bandwidth_used(void)
3532 {
3533 return true;
3534 }
3535
3536 void cfs_bandwidth_usage_inc(void) {}
3537 void cfs_bandwidth_usage_dec(void) {}
3538 #endif /* HAVE_JUMP_LABEL */
3539
3540 /*
3541 * default period for cfs group bandwidth.
3542 * default: 0.1s, units: nanoseconds
3543 */
3544 static inline u64 default_cfs_period(void)
3545 {
3546 return 100000000ULL;
3547 }
3548
3549 static inline u64 sched_cfs_bandwidth_slice(void)
3550 {
3551 return (u64)sysctl_sched_cfs_bandwidth_slice * NSEC_PER_USEC;
3552 }
3553
3554 /*
3555 * Replenish runtime according to assigned quota and update expiration time.
3556 * We use sched_clock_cpu directly instead of rq->clock to avoid adding
3557 * additional synchronization around rq->lock.
3558 *
3559 * requires cfs_b->lock
3560 */
3561 void __refill_cfs_bandwidth_runtime(struct cfs_bandwidth *cfs_b)
3562 {
3563 u64 now;
3564
3565 if (cfs_b->quota == RUNTIME_INF)
3566 return;
3567
3568 now = sched_clock_cpu(smp_processor_id());
3569 cfs_b->runtime = cfs_b->quota;
3570 cfs_b->runtime_expires = now + ktime_to_ns(cfs_b->period);
3571 }
3572
3573 static inline struct cfs_bandwidth *tg_cfs_bandwidth(struct task_group *tg)
3574 {
3575 return &tg->cfs_bandwidth;
3576 }
3577
3578 /* rq->task_clock normalized against any time this cfs_rq has spent throttled */
3579 static inline u64 cfs_rq_clock_task(struct cfs_rq *cfs_rq)
3580 {
3581 if (unlikely(cfs_rq->throttle_count))
3582 return cfs_rq->throttled_clock_task;
3583
3584 return rq_clock_task(rq_of(cfs_rq)) - cfs_rq->throttled_clock_task_time;
3585 }
3586
3587 /* returns 0 on failure to allocate runtime */
3588 static int assign_cfs_rq_runtime(struct cfs_rq *cfs_rq)
3589 {
3590 struct task_group *tg = cfs_rq->tg;
3591 struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(tg);
3592 u64 amount = 0, min_amount, expires;
3593
3594 /* note: this is a positive sum as runtime_remaining <= 0 */
3595 min_amount = sched_cfs_bandwidth_slice() - cfs_rq->runtime_remaining;
3596
3597 raw_spin_lock(&cfs_b->lock);
3598 if (cfs_b->quota == RUNTIME_INF)
3599 amount = min_amount;
3600 else {
3601 start_cfs_bandwidth(cfs_b);
3602
3603 if (cfs_b->runtime > 0) {
3604 amount = min(cfs_b->runtime, min_amount);
3605 cfs_b->runtime -= amount;
3606 cfs_b->idle = 0;
3607 }
3608 }
3609 expires = cfs_b->runtime_expires;
3610 raw_spin_unlock(&cfs_b->lock);
3611
3612 cfs_rq->runtime_remaining += amount;
3613 /*
3614 * we may have advanced our local expiration to account for allowed
3615 * spread between our sched_clock and the one on which runtime was
3616 * issued.
3617 */
3618 if ((s64)(expires - cfs_rq->runtime_expires) > 0)
3619 cfs_rq->runtime_expires = expires;
3620
3621 return cfs_rq->runtime_remaining > 0;
3622 }
3623
3624 /*
3625 * Note: This depends on the synchronization provided by sched_clock and the
3626 * fact that rq->clock snapshots this value.
3627 */
3628 static void expire_cfs_rq_runtime(struct cfs_rq *cfs_rq)
3629 {
3630 struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
3631
3632 /* if the deadline is ahead of our clock, nothing to do */
3633 if (likely((s64)(rq_clock(rq_of(cfs_rq)) - cfs_rq->runtime_expires) < 0))
3634 return;
3635
3636 if (cfs_rq->runtime_remaining < 0)
3637 return;
3638
3639 /*
3640 * If the local deadline has passed we have to consider the
3641 * possibility that our sched_clock is 'fast' and the global deadline
3642 * has not truly expired.
3643 *
3644 * Fortunately we can check determine whether this the case by checking
3645 * whether the global deadline has advanced. It is valid to compare
3646 * cfs_b->runtime_expires without any locks since we only care about
3647 * exact equality, so a partial write will still work.
3648 */
3649
3650 if (cfs_rq->runtime_expires != cfs_b->runtime_expires) {
3651 /* extend local deadline, drift is bounded above by 2 ticks */
3652 cfs_rq->runtime_expires += TICK_NSEC;
3653 } else {
3654 /* global deadline is ahead, expiration has passed */
3655 cfs_rq->runtime_remaining = 0;
3656 }
3657 }
3658
3659 static void __account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec)
3660 {
3661 /* dock delta_exec before expiring quota (as it could span periods) */
3662 cfs_rq->runtime_remaining -= delta_exec;
3663 expire_cfs_rq_runtime(cfs_rq);
3664
3665 if (likely(cfs_rq->runtime_remaining > 0))
3666 return;
3667
3668 /*
3669 * if we're unable to extend our runtime we resched so that the active
3670 * hierarchy can be throttled
3671 */
3672 if (!assign_cfs_rq_runtime(cfs_rq) && likely(cfs_rq->curr))
3673 resched_curr(rq_of(cfs_rq));
3674 }
3675
3676 static __always_inline
3677 void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec)
3678 {
3679 if (!cfs_bandwidth_used() || !cfs_rq->runtime_enabled)
3680 return;
3681
3682 __account_cfs_rq_runtime(cfs_rq, delta_exec);
3683 }
3684
3685 static inline int cfs_rq_throttled(struct cfs_rq *cfs_rq)
3686 {
3687 return cfs_bandwidth_used() && cfs_rq->throttled;
3688 }
3689
3690 /* check whether cfs_rq, or any parent, is throttled */
3691 static inline int throttled_hierarchy(struct cfs_rq *cfs_rq)
3692 {
3693 return cfs_bandwidth_used() && cfs_rq->throttle_count;
3694 }
3695
3696 /*
3697 * Ensure that neither of the group entities corresponding to src_cpu or
3698 * dest_cpu are members of a throttled hierarchy when performing group
3699 * load-balance operations.
3700 */
3701 static inline int throttled_lb_pair(struct task_group *tg,
3702 int src_cpu, int dest_cpu)
3703 {
3704 struct cfs_rq *src_cfs_rq, *dest_cfs_rq;
3705
3706 src_cfs_rq = tg->cfs_rq[src_cpu];
3707 dest_cfs_rq = tg->cfs_rq[dest_cpu];
3708
3709 return throttled_hierarchy(src_cfs_rq) ||
3710 throttled_hierarchy(dest_cfs_rq);
3711 }
3712
3713 /* updated child weight may affect parent so we have to do this bottom up */
3714 static int tg_unthrottle_up(struct task_group *tg, void *data)
3715 {
3716 struct rq *rq = data;
3717 struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)];
3718
3719 cfs_rq->throttle_count--;
3720 #ifdef CONFIG_SMP
3721 if (!cfs_rq->throttle_count) {
3722 /* adjust cfs_rq_clock_task() */
3723 cfs_rq->throttled_clock_task_time += rq_clock_task(rq) -
3724 cfs_rq->throttled_clock_task;
3725 }
3726 #endif
3727
3728 return 0;
3729 }
3730
3731 static int tg_throttle_down(struct task_group *tg, void *data)
3732 {
3733 struct rq *rq = data;
3734 struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)];
3735
3736 /* group is entering throttled state, stop time */
3737 if (!cfs_rq->throttle_count)
3738 cfs_rq->throttled_clock_task = rq_clock_task(rq);
3739 cfs_rq->throttle_count++;
3740
3741 return 0;
3742 }
3743
3744 static void throttle_cfs_rq(struct cfs_rq *cfs_rq)
3745 {
3746 struct rq *rq = rq_of(cfs_rq);
3747 struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
3748 struct sched_entity *se;
3749 long task_delta, dequeue = 1;
3750 bool empty;
3751
3752 se = cfs_rq->tg->se[cpu_of(rq_of(cfs_rq))];
3753
3754 /* freeze hierarchy runnable averages while throttled */
3755 rcu_read_lock();
3756 walk_tg_tree_from(cfs_rq->tg, tg_throttle_down, tg_nop, (void *)rq);
3757 rcu_read_unlock();
3758
3759 task_delta = cfs_rq->h_nr_running;
3760 for_each_sched_entity(se) {
3761 struct cfs_rq *qcfs_rq = cfs_rq_of(se);
3762 /* throttled entity or throttle-on-deactivate */
3763 if (!se->on_rq)
3764 break;
3765
3766 if (dequeue)
3767 dequeue_entity(qcfs_rq, se, DEQUEUE_SLEEP);
3768 qcfs_rq->h_nr_running -= task_delta;
3769
3770 if (qcfs_rq->load.weight)
3771 dequeue = 0;
3772 }
3773
3774 if (!se)
3775 sub_nr_running(rq, task_delta);
3776
3777 cfs_rq->throttled = 1;
3778 cfs_rq->throttled_clock = rq_clock(rq);
3779 raw_spin_lock(&cfs_b->lock);
3780 empty = list_empty(&cfs_b->throttled_cfs_rq);
3781
3782 /*
3783 * Add to the _head_ of the list, so that an already-started
3784 * distribute_cfs_runtime will not see us
3785 */
3786 list_add_rcu(&cfs_rq->throttled_list, &cfs_b->throttled_cfs_rq);
3787
3788 /*
3789 * If we're the first throttled task, make sure the bandwidth
3790 * timer is running.
3791 */
3792 if (empty)
3793 start_cfs_bandwidth(cfs_b);
3794
3795 raw_spin_unlock(&cfs_b->lock);
3796 }
3797
3798 void unthrottle_cfs_rq(struct cfs_rq *cfs_rq)
3799 {
3800 struct rq *rq = rq_of(cfs_rq);
3801 struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
3802 struct sched_entity *se;
3803 int enqueue = 1;
3804 long task_delta;
3805
3806 se = cfs_rq->tg->se[cpu_of(rq)];
3807
3808 cfs_rq->throttled = 0;
3809
3810 update_rq_clock(rq);
3811
3812 raw_spin_lock(&cfs_b->lock);
3813 cfs_b->throttled_time += rq_clock(rq) - cfs_rq->throttled_clock;
3814 list_del_rcu(&cfs_rq->throttled_list);
3815 raw_spin_unlock(&cfs_b->lock);
3816
3817 /* update hierarchical throttle state */
3818 walk_tg_tree_from(cfs_rq->tg, tg_nop, tg_unthrottle_up, (void *)rq);
3819
3820 if (!cfs_rq->load.weight)
3821 return;
3822
3823 task_delta = cfs_rq->h_nr_running;
3824 for_each_sched_entity(se) {
3825 if (se->on_rq)
3826 enqueue = 0;
3827
3828 cfs_rq = cfs_rq_of(se);
3829 if (enqueue)
3830 enqueue_entity(cfs_rq, se, ENQUEUE_WAKEUP);
3831 cfs_rq->h_nr_running += task_delta;
3832
3833 if (cfs_rq_throttled(cfs_rq))
3834 break;
3835 }
3836
3837 if (!se)
3838 add_nr_running(rq, task_delta);
3839
3840 /* determine whether we need to wake up potentially idle cpu */
3841 if (rq->curr == rq->idle && rq->cfs.nr_running)
3842 resched_curr(rq);
3843 }
3844
3845 static u64 distribute_cfs_runtime(struct cfs_bandwidth *cfs_b,
3846 u64 remaining, u64 expires)
3847 {
3848 struct cfs_rq *cfs_rq;
3849 u64 runtime;
3850 u64 starting_runtime = remaining;
3851
3852 rcu_read_lock();
3853 list_for_each_entry_rcu(cfs_rq, &cfs_b->throttled_cfs_rq,
3854 throttled_list) {
3855 struct rq *rq = rq_of(cfs_rq);
3856
3857 raw_spin_lock(&rq->lock);
3858 if (!cfs_rq_throttled(cfs_rq))
3859 goto next;
3860
3861 runtime = -cfs_rq->runtime_remaining + 1;
3862 if (runtime > remaining)
3863 runtime = remaining;
3864 remaining -= runtime;
3865
3866 cfs_rq->runtime_remaining += runtime;
3867 cfs_rq->runtime_expires = expires;
3868
3869 /* we check whether we're throttled above */
3870 if (cfs_rq->runtime_remaining > 0)
3871 unthrottle_cfs_rq(cfs_rq);
3872
3873 next:
3874 raw_spin_unlock(&rq->lock);
3875
3876 if (!remaining)
3877 break;
3878 }
3879 rcu_read_unlock();
3880
3881 return starting_runtime - remaining;
3882 }
3883
3884 /*
3885 * Responsible for refilling a task_group's bandwidth and unthrottling its
3886 * cfs_rqs as appropriate. If there has been no activity within the last
3887 * period the timer is deactivated until scheduling resumes; cfs_b->idle is
3888 * used to track this state.
3889 */
3890 static int do_sched_cfs_period_timer(struct cfs_bandwidth *cfs_b, int overrun)
3891 {
3892 u64 runtime, runtime_expires;
3893 int throttled;
3894
3895 /* no need to continue the timer with no bandwidth constraint */
3896 if (cfs_b->quota == RUNTIME_INF)
3897 goto out_deactivate;
3898
3899 throttled = !list_empty(&cfs_b->throttled_cfs_rq);
3900 cfs_b->nr_periods += overrun;
3901
3902 /*
3903 * idle depends on !throttled (for the case of a large deficit), and if
3904 * we're going inactive then everything else can be deferred
3905 */
3906 if (cfs_b->idle && !throttled)
3907 goto out_deactivate;
3908
3909 __refill_cfs_bandwidth_runtime(cfs_b);
3910
3911 if (!throttled) {
3912 /* mark as potentially idle for the upcoming period */
3913 cfs_b->idle = 1;
3914 return 0;
3915 }
3916
3917 /* account preceding periods in which throttling occurred */
3918 cfs_b->nr_throttled += overrun;
3919
3920 runtime_expires = cfs_b->runtime_expires;
3921
3922 /*
3923 * This check is repeated as we are holding onto the new bandwidth while
3924 * we unthrottle. This can potentially race with an unthrottled group
3925 * trying to acquire new bandwidth from the global pool. This can result
3926 * in us over-using our runtime if it is all used during this loop, but
3927 * only by limited amounts in that extreme case.
3928 */
3929 while (throttled && cfs_b->runtime > 0) {
3930 runtime = cfs_b->runtime;
3931 raw_spin_unlock(&cfs_b->lock);
3932 /* we can't nest cfs_b->lock while distributing bandwidth */
3933 runtime = distribute_cfs_runtime(cfs_b, runtime,
3934 runtime_expires);
3935 raw_spin_lock(&cfs_b->lock);
3936
3937 throttled = !list_empty(&cfs_b->throttled_cfs_rq);
3938
3939 cfs_b->runtime -= min(runtime, cfs_b->runtime);
3940 }
3941
3942 /*
3943 * While we are ensured activity in the period following an
3944 * unthrottle, this also covers the case in which the new bandwidth is
3945 * insufficient to cover the existing bandwidth deficit. (Forcing the
3946 * timer to remain active while there are any throttled entities.)
3947 */
3948 cfs_b->idle = 0;
3949
3950 return 0;
3951
3952 out_deactivate:
3953 return 1;
3954 }
3955
3956 /* a cfs_rq won't donate quota below this amount */
3957 static const u64 min_cfs_rq_runtime = 1 * NSEC_PER_MSEC;
3958 /* minimum remaining period time to redistribute slack quota */
3959 static const u64 min_bandwidth_expiration = 2 * NSEC_PER_MSEC;
3960 /* how long we wait to gather additional slack before distributing */
3961 static const u64 cfs_bandwidth_slack_period = 5 * NSEC_PER_MSEC;
3962
3963 /*
3964 * Are we near the end of the current quota period?
3965 *
3966 * Requires cfs_b->lock for hrtimer_expires_remaining to be safe against the
3967 * hrtimer base being cleared by hrtimer_start. In the case of
3968 * migrate_hrtimers, base is never cleared, so we are fine.
3969 */
3970 static int runtime_refresh_within(struct cfs_bandwidth *cfs_b, u64 min_expire)
3971 {
3972 struct hrtimer *refresh_timer = &cfs_b->period_timer;
3973 u64 remaining;
3974
3975 /* if the call-back is running a quota refresh is already occurring */
3976 if (hrtimer_callback_running(refresh_timer))
3977 return 1;
3978
3979 /* is a quota refresh about to occur? */
3980 remaining = ktime_to_ns(hrtimer_expires_remaining(refresh_timer));
3981 if (remaining < min_expire)
3982 return 1;
3983
3984 return 0;
3985 }
3986
3987 static void start_cfs_slack_bandwidth(struct cfs_bandwidth *cfs_b)
3988 {
3989 u64 min_left = cfs_bandwidth_slack_period + min_bandwidth_expiration;
3990
3991 /* if there's a quota refresh soon don't bother with slack */
3992 if (runtime_refresh_within(cfs_b, min_left))
3993 return;
3994
3995 hrtimer_start(&cfs_b->slack_timer,
3996 ns_to_ktime(cfs_bandwidth_slack_period),
3997 HRTIMER_MODE_REL);
3998 }
3999
4000 /* we know any runtime found here is valid as update_curr() precedes return */
4001 static void __return_cfs_rq_runtime(struct cfs_rq *cfs_rq)
4002 {
4003 struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
4004 s64 slack_runtime = cfs_rq->runtime_remaining - min_cfs_rq_runtime;
4005
4006 if (slack_runtime <= 0)
4007 return;
4008
4009 raw_spin_lock(&cfs_b->lock);
4010 if (cfs_b->quota != RUNTIME_INF &&
4011 cfs_rq->runtime_expires == cfs_b->runtime_expires) {
4012 cfs_b->runtime += slack_runtime;
4013
4014 /* we are under rq->lock, defer unthrottling using a timer */
4015 if (cfs_b->runtime > sched_cfs_bandwidth_slice() &&
4016 !list_empty(&cfs_b->throttled_cfs_rq))
4017 start_cfs_slack_bandwidth(cfs_b);
4018 }
4019 raw_spin_unlock(&cfs_b->lock);
4020
4021 /* even if it's not valid for return we don't want to try again */
4022 cfs_rq->runtime_remaining -= slack_runtime;
4023 }
4024
4025 static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq)
4026 {
4027 if (!cfs_bandwidth_used())
4028 return;
4029
4030 if (!cfs_rq->runtime_enabled || cfs_rq->nr_running)
4031 return;
4032
4033 __return_cfs_rq_runtime(cfs_rq);
4034 }
4035
4036 /*
4037 * This is done with a timer (instead of inline with bandwidth return) since
4038 * it's necessary to juggle rq->locks to unthrottle their respective cfs_rqs.
4039 */
4040 static void do_sched_cfs_slack_timer(struct cfs_bandwidth *cfs_b)
4041 {
4042 u64 runtime = 0, slice = sched_cfs_bandwidth_slice();
4043 u64 expires;
4044
4045 /* confirm we're still not at a refresh boundary */
4046 raw_spin_lock(&cfs_b->lock);
4047 if (runtime_refresh_within(cfs_b, min_bandwidth_expiration)) {
4048 raw_spin_unlock(&cfs_b->lock);
4049 return;
4050 }
4051
4052 if (cfs_b->quota != RUNTIME_INF && cfs_b->runtime > slice)
4053 runtime = cfs_b->runtime;
4054
4055 expires = cfs_b->runtime_expires;
4056 raw_spin_unlock(&cfs_b->lock);
4057
4058 if (!runtime)
4059 return;
4060
4061 runtime = distribute_cfs_runtime(cfs_b, runtime, expires);
4062
4063 raw_spin_lock(&cfs_b->lock);
4064 if (expires == cfs_b->runtime_expires)
4065 cfs_b->runtime -= min(runtime, cfs_b->runtime);
4066 raw_spin_unlock(&cfs_b->lock);
4067 }
4068
4069 /*
4070 * When a group wakes up we want to make sure that its quota is not already
4071 * expired/exceeded, otherwise it may be allowed to steal additional ticks of
4072 * runtime as update_curr() throttling can not not trigger until it's on-rq.
4073 */
4074 static void check_enqueue_throttle(struct cfs_rq *cfs_rq)
4075 {
4076 if (!cfs_bandwidth_used())
4077 return;
4078
4079 /* an active group must be handled by the update_curr()->put() path */
4080 if (!cfs_rq->runtime_enabled || cfs_rq->curr)
4081 return;
4082
4083 /* ensure the group is not already throttled */
4084 if (cfs_rq_throttled(cfs_rq))
4085 return;
4086
4087 /* update runtime allocation */
4088 account_cfs_rq_runtime(cfs_rq, 0);
4089 if (cfs_rq->runtime_remaining <= 0)
4090 throttle_cfs_rq(cfs_rq);
4091 }
4092
4093 /* conditionally throttle active cfs_rq's from put_prev_entity() */
4094 static bool check_cfs_rq_runtime(struct cfs_rq *cfs_rq)
4095 {
4096 if (!cfs_bandwidth_used())
4097 return false;
4098
4099 if (likely(!cfs_rq->runtime_enabled || cfs_rq->runtime_remaining > 0))
4100 return false;
4101
4102 /*
4103 * it's possible for a throttled entity to be forced into a running
4104 * state (e.g. set_curr_task), in this case we're finished.
4105 */
4106 if (cfs_rq_throttled(cfs_rq))
4107 return true;
4108
4109 throttle_cfs_rq(cfs_rq);
4110 return true;
4111 }
4112
4113 static enum hrtimer_restart sched_cfs_slack_timer(struct hrtimer *timer)
4114 {
4115 struct cfs_bandwidth *cfs_b =
4116 container_of(timer, struct cfs_bandwidth, slack_timer);
4117
4118 do_sched_cfs_slack_timer(cfs_b);
4119
4120 return HRTIMER_NORESTART;
4121 }
4122
4123 static enum hrtimer_restart sched_cfs_period_timer(struct hrtimer *timer)
4124 {
4125 struct cfs_bandwidth *cfs_b =
4126 container_of(timer, struct cfs_bandwidth, period_timer);
4127 int overrun;
4128 int idle = 0;
4129
4130 raw_spin_lock(&cfs_b->lock);
4131 for (;;) {
4132 overrun = hrtimer_forward_now(timer, cfs_b->period);
4133 if (!overrun)
4134 break;
4135
4136 idle = do_sched_cfs_period_timer(cfs_b, overrun);
4137 }
4138 if (idle)
4139 cfs_b->period_active = 0;
4140 raw_spin_unlock(&cfs_b->lock);
4141
4142 return idle ? HRTIMER_NORESTART : HRTIMER_RESTART;
4143 }
4144
4145 void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b)
4146 {
4147 raw_spin_lock_init(&cfs_b->lock);
4148 cfs_b->runtime = 0;
4149 cfs_b->quota = RUNTIME_INF;
4150 cfs_b->period = ns_to_ktime(default_cfs_period());
4151
4152 INIT_LIST_HEAD(&cfs_b->throttled_cfs_rq);
4153 hrtimer_init(&cfs_b->period_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_PINNED);
4154 cfs_b->period_timer.function = sched_cfs_period_timer;
4155 hrtimer_init(&cfs_b->slack_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
4156 cfs_b->slack_timer.function = sched_cfs_slack_timer;
4157 }
4158
4159 static void init_cfs_rq_runtime(struct cfs_rq *cfs_rq)
4160 {
4161 cfs_rq->runtime_enabled = 0;
4162 INIT_LIST_HEAD(&cfs_rq->throttled_list);
4163 }
4164
4165 void start_cfs_bandwidth(struct cfs_bandwidth *cfs_b)
4166 {
4167 lockdep_assert_held(&cfs_b->lock);
4168
4169 if (!cfs_b->period_active) {
4170 cfs_b->period_active = 1;
4171 hrtimer_forward_now(&cfs_b->period_timer, cfs_b->period);
4172 hrtimer_start_expires(&cfs_b->period_timer, HRTIMER_MODE_ABS_PINNED);
4173 }
4174 }
4175
4176 static void destroy_cfs_bandwidth(struct cfs_bandwidth *cfs_b)
4177 {
4178 /* init_cfs_bandwidth() was not called */
4179 if (!cfs_b->throttled_cfs_rq.next)
4180 return;
4181
4182 hrtimer_cancel(&cfs_b->period_timer);
4183 hrtimer_cancel(&cfs_b->slack_timer);
4184 }
4185
4186 static void __maybe_unused update_runtime_enabled(struct rq *rq)
4187 {
4188 struct cfs_rq *cfs_rq;
4189
4190 for_each_leaf_cfs_rq(rq, cfs_rq) {
4191 struct cfs_bandwidth *cfs_b = &cfs_rq->tg->cfs_bandwidth;
4192
4193 raw_spin_lock(&cfs_b->lock);
4194 cfs_rq->runtime_enabled = cfs_b->quota != RUNTIME_INF;
4195 raw_spin_unlock(&cfs_b->lock);
4196 }
4197 }
4198
4199 static void __maybe_unused unthrottle_offline_cfs_rqs(struct rq *rq)
4200 {
4201 struct cfs_rq *cfs_rq;
4202
4203 for_each_leaf_cfs_rq(rq, cfs_rq) {
4204 if (!cfs_rq->runtime_enabled)
4205 continue;
4206
4207 /*
4208 * clock_task is not advancing so we just need to make sure
4209 * there's some valid quota amount
4210 */
4211 cfs_rq->runtime_remaining = 1;
4212 /*
4213 * Offline rq is schedulable till cpu is completely disabled
4214 * in take_cpu_down(), so we prevent new cfs throttling here.
4215 */
4216 cfs_rq->runtime_enabled = 0;
4217
4218 if (cfs_rq_throttled(cfs_rq))
4219 unthrottle_cfs_rq(cfs_rq);
4220 }
4221 }
4222
4223 #else /* CONFIG_CFS_BANDWIDTH */
4224 static inline u64 cfs_rq_clock_task(struct cfs_rq *cfs_rq)
4225 {
4226 return rq_clock_task(rq_of(cfs_rq));
4227 }
4228
4229 static void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec) {}
4230 static bool check_cfs_rq_runtime(struct cfs_rq *cfs_rq) { return false; }
4231 static void check_enqueue_throttle(struct cfs_rq *cfs_rq) {}
4232 static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq) {}
4233
4234 static inline int cfs_rq_throttled(struct cfs_rq *cfs_rq)
4235 {
4236 return 0;
4237 }
4238
4239 static inline int throttled_hierarchy(struct cfs_rq *cfs_rq)
4240 {
4241 return 0;
4242 }
4243
4244 static inline int throttled_lb_pair(struct task_group *tg,
4245 int src_cpu, int dest_cpu)
4246 {
4247 return 0;
4248 }
4249
4250 void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b) {}
4251
4252 #ifdef CONFIG_FAIR_GROUP_SCHED
4253 static void init_cfs_rq_runtime(struct cfs_rq *cfs_rq) {}
4254 #endif
4255
4256 static inline struct cfs_bandwidth *tg_cfs_bandwidth(struct task_group *tg)
4257 {
4258 return NULL;
4259 }
4260 static inline void destroy_cfs_bandwidth(struct cfs_bandwidth *cfs_b) {}
4261 static inline void update_runtime_enabled(struct rq *rq) {}
4262 static inline void unthrottle_offline_cfs_rqs(struct rq *rq) {}
4263
4264 #endif /* CONFIG_CFS_BANDWIDTH */
4265
4266 /**************************************************
4267 * CFS operations on tasks:
4268 */
4269
4270 #ifdef CONFIG_SCHED_HRTICK
4271 static void hrtick_start_fair(struct rq *rq, struct task_struct *p)
4272 {
4273 struct sched_entity *se = &p->se;
4274 struct cfs_rq *cfs_rq = cfs_rq_of(se);
4275
4276 WARN_ON(task_rq(p) != rq);
4277
4278 if (cfs_rq->nr_running > 1) {
4279 u64 slice = sched_slice(cfs_rq, se);
4280 u64 ran = se->sum_exec_runtime - se->prev_sum_exec_runtime;
4281 s64 delta = slice - ran;
4282
4283 if (delta < 0) {
4284 if (rq->curr == p)
4285 resched_curr(rq);
4286 return;
4287 }
4288 hrtick_start(rq, delta);
4289 }
4290 }
4291
4292 /*
4293 * called from enqueue/dequeue and updates the hrtick when the
4294 * current task is from our class and nr_running is low enough
4295 * to matter.
4296 */
4297 static void hrtick_update(struct rq *rq)
4298 {
4299 struct task_struct *curr = rq->curr;
4300
4301 if (!hrtick_enabled(rq) || curr->sched_class != &fair_sched_class)
4302 return;
4303
4304 if (cfs_rq_of(&curr->se)->nr_running < sched_nr_latency)
4305 hrtick_start_fair(rq, curr);
4306 }
4307 #else /* !CONFIG_SCHED_HRTICK */
4308 static inline void
4309 hrtick_start_fair(struct rq *rq, struct task_struct *p)
4310 {
4311 }
4312
4313 static inline void hrtick_update(struct rq *rq)
4314 {
4315 }
4316 #endif
4317
4318 /*
4319 * The enqueue_task method is called before nr_running is
4320 * increased. Here we update the fair scheduling stats and
4321 * then put the task into the rbtree:
4322 */
4323 static void
4324 enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
4325 {
4326 struct cfs_rq *cfs_rq;
4327 struct sched_entity *se = &p->se;
4328
4329 for_each_sched_entity(se) {
4330 if (se->on_rq)
4331 break;
4332 cfs_rq = cfs_rq_of(se);
4333 enqueue_entity(cfs_rq, se, flags);
4334
4335 /*
4336 * end evaluation on encountering a throttled cfs_rq
4337 *
4338 * note: in the case of encountering a throttled cfs_rq we will
4339 * post the final h_nr_running increment below.
4340 */
4341 if (cfs_rq_throttled(cfs_rq))
4342 break;
4343 cfs_rq->h_nr_running++;
4344
4345 flags = ENQUEUE_WAKEUP;
4346 }
4347
4348 for_each_sched_entity(se) {
4349 cfs_rq = cfs_rq_of(se);
4350 cfs_rq->h_nr_running++;
4351
4352 if (cfs_rq_throttled(cfs_rq))
4353 break;
4354
4355 update_load_avg(se, 1);
4356 update_cfs_shares(cfs_rq);
4357 }
4358
4359 if (!se)
4360 add_nr_running(rq, 1);
4361
4362 hrtick_update(rq);
4363 }
4364
4365 static void set_next_buddy(struct sched_entity *se);
4366
4367 /*
4368 * The dequeue_task method is called before nr_running is
4369 * decreased. We remove the task from the rbtree and
4370 * update the fair scheduling stats:
4371 */
4372 static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags)
4373 {
4374 struct cfs_rq *cfs_rq;
4375 struct sched_entity *se = &p->se;
4376 int task_sleep = flags & DEQUEUE_SLEEP;
4377
4378 for_each_sched_entity(se) {
4379 cfs_rq = cfs_rq_of(se);
4380 dequeue_entity(cfs_rq, se, flags);
4381
4382 /*
4383 * end evaluation on encountering a throttled cfs_rq
4384 *
4385 * note: in the case of encountering a throttled cfs_rq we will
4386 * post the final h_nr_running decrement below.
4387 */
4388 if (cfs_rq_throttled(cfs_rq))
4389 break;
4390 cfs_rq->h_nr_running--;
4391
4392 /* Don't dequeue parent if it has other entities besides us */
4393 if (cfs_rq->load.weight) {
4394 /*
4395 * Bias pick_next to pick a task from this cfs_rq, as
4396 * p is sleeping when it is within its sched_slice.
4397 */
4398 if (task_sleep && parent_entity(se))
4399 set_next_buddy(parent_entity(se));
4400
4401 /* avoid re-evaluating load for this entity */
4402 se = parent_entity(se);
4403 break;
4404 }
4405 flags |= DEQUEUE_SLEEP;
4406 }
4407
4408 for_each_sched_entity(se) {
4409 cfs_rq = cfs_rq_of(se);
4410 cfs_rq->h_nr_running--;
4411
4412 if (cfs_rq_throttled(cfs_rq))
4413 break;
4414
4415 update_load_avg(se, 1);
4416 update_cfs_shares(cfs_rq);
4417 }
4418
4419 if (!se)
4420 sub_nr_running(rq, 1);
4421
4422 hrtick_update(rq);
4423 }
4424
4425 #ifdef CONFIG_SMP
4426
4427 /*
4428 * per rq 'load' arrray crap; XXX kill this.
4429 */
4430
4431 /*
4432 * The exact cpuload calculated at every tick would be:
4433 *
4434 * load' = (1 - 1/2^i) * load + (1/2^i) * cur_load
4435 *
4436 * If a cpu misses updates for n ticks (as it was idle) and update gets
4437 * called on the n+1-th tick when cpu may be busy, then we have:
4438 *
4439 * load_n = (1 - 1/2^i)^n * load_0
4440 * load_n+1 = (1 - 1/2^i) * load_n + (1/2^i) * cur_load
4441 *
4442 * decay_load_missed() below does efficient calculation of
4443 *
4444 * load' = (1 - 1/2^i)^n * load
4445 *
4446 * Because x^(n+m) := x^n * x^m we can decompose any x^n in power-of-2 factors.
4447 * This allows us to precompute the above in said factors, thereby allowing the
4448 * reduction of an arbitrary n in O(log_2 n) steps. (See also
4449 * fixed_power_int())
4450 *
4451 * The calculation is approximated on a 128 point scale.
4452 */
4453 #define DEGRADE_SHIFT 7
4454
4455 static const u8 degrade_zero_ticks[CPU_LOAD_IDX_MAX] = {0, 8, 32, 64, 128};
4456 static const u8 degrade_factor[CPU_LOAD_IDX_MAX][DEGRADE_SHIFT + 1] = {
4457 { 0, 0, 0, 0, 0, 0, 0, 0 },
4458 { 64, 32, 8, 0, 0, 0, 0, 0 },
4459 { 96, 72, 40, 12, 1, 0, 0, 0 },
4460 { 112, 98, 75, 43, 15, 1, 0, 0 },
4461 { 120, 112, 98, 76, 45, 16, 2, 0 }
4462 };
4463
4464 /*
4465 * Update cpu_load for any missed ticks, due to tickless idle. The backlog
4466 * would be when CPU is idle and so we just decay the old load without
4467 * adding any new load.
4468 */
4469 static unsigned long
4470 decay_load_missed(unsigned long load, unsigned long missed_updates, int idx)
4471 {
4472 int j = 0;
4473
4474 if (!missed_updates)
4475 return load;
4476
4477 if (missed_updates >= degrade_zero_ticks[idx])
4478 return 0;
4479
4480 if (idx == 1)
4481 return load >> missed_updates;
4482
4483 while (missed_updates) {
4484 if (missed_updates % 2)
4485 load = (load * degrade_factor[idx][j]) >> DEGRADE_SHIFT;
4486
4487 missed_updates >>= 1;
4488 j++;
4489 }
4490 return load;
4491 }
4492
4493 /**
4494 * __update_cpu_load - update the rq->cpu_load[] statistics
4495 * @this_rq: The rq to update statistics for
4496 * @this_load: The current load
4497 * @pending_updates: The number of missed updates
4498 * @active: !0 for NOHZ_FULL
4499 *
4500 * Update rq->cpu_load[] statistics. This function is usually called every
4501 * scheduler tick (TICK_NSEC).
4502 *
4503 * This function computes a decaying average:
4504 *
4505 * load[i]' = (1 - 1/2^i) * load[i] + (1/2^i) * load
4506 *
4507 * Because of NOHZ it might not get called on every tick which gives need for
4508 * the @pending_updates argument.
4509 *
4510 * load[i]_n = (1 - 1/2^i) * load[i]_n-1 + (1/2^i) * load_n-1
4511 * = A * load[i]_n-1 + B ; A := (1 - 1/2^i), B := (1/2^i) * load
4512 * = A * (A * load[i]_n-2 + B) + B
4513 * = A * (A * (A * load[i]_n-3 + B) + B) + B
4514 * = A^3 * load[i]_n-3 + (A^2 + A + 1) * B
4515 * = A^n * load[i]_0 + (A^(n-1) + A^(n-2) + ... + 1) * B
4516 * = A^n * load[i]_0 + ((1 - A^n) / (1 - A)) * B
4517 * = (1 - 1/2^i)^n * (load[i]_0 - load) + load
4518 *
4519 * In the above we've assumed load_n := load, which is true for NOHZ_FULL as
4520 * any change in load would have resulted in the tick being turned back on.
4521 *
4522 * For regular NOHZ, this reduces to:
4523 *
4524 * load[i]_n = (1 - 1/2^i)^n * load[i]_0
4525 *
4526 * see decay_load_misses(). For NOHZ_FULL we get to subtract and add the extra
4527 * term. See the @active paramter.
4528 */
4529 static void __update_cpu_load(struct rq *this_rq, unsigned long this_load,
4530 unsigned long pending_updates, int active)
4531 {
4532 unsigned long tickless_load = active ? this_rq->cpu_load[0] : 0;
4533 int i, scale;
4534
4535 this_rq->nr_load_updates++;
4536
4537 /* Update our load: */
4538 this_rq->cpu_load[0] = this_load; /* Fasttrack for idx 0 */
4539 for (i = 1, scale = 2; i < CPU_LOAD_IDX_MAX; i++, scale += scale) {
4540 unsigned long old_load, new_load;
4541
4542 /* scale is effectively 1 << i now, and >> i divides by scale */
4543
4544 old_load = this_rq->cpu_load[i];
4545 old_load = decay_load_missed(old_load, pending_updates - 1, i);
4546 if (tickless_load) {
4547 old_load -= decay_load_missed(tickless_load, pending_updates - 1, i);
4548 /*
4549 * old_load can never be a negative value because a
4550 * decayed tickless_load cannot be greater than the
4551 * original tickless_load.
4552 */
4553 old_load += tickless_load;
4554 }
4555 new_load = this_load;
4556 /*
4557 * Round up the averaging division if load is increasing. This
4558 * prevents us from getting stuck on 9 if the load is 10, for
4559 * example.
4560 */
4561 if (new_load > old_load)
4562 new_load += scale - 1;
4563
4564 this_rq->cpu_load[i] = (old_load * (scale - 1) + new_load) >> i;
4565 }
4566
4567 sched_avg_update(this_rq);
4568 }
4569
4570 /* Used instead of source_load when we know the type == 0 */
4571 static unsigned long weighted_cpuload(const int cpu)
4572 {
4573 return cfs_rq_runnable_load_avg(&cpu_rq(cpu)->cfs);
4574 }
4575
4576 #ifdef CONFIG_NO_HZ_COMMON
4577 static void __update_cpu_load_nohz(struct rq *this_rq,
4578 unsigned long curr_jiffies,
4579 unsigned long load,
4580 int active)
4581 {
4582 unsigned long pending_updates;
4583
4584 pending_updates = curr_jiffies - this_rq->last_load_update_tick;
4585 if (pending_updates) {
4586 this_rq->last_load_update_tick = curr_jiffies;
4587 /*
4588 * In the regular NOHZ case, we were idle, this means load 0.
4589 * In the NOHZ_FULL case, we were non-idle, we should consider
4590 * its weighted load.
4591 */
4592 __update_cpu_load(this_rq, load, pending_updates, active);
4593 }
4594 }
4595
4596 /*
4597 * There is no sane way to deal with nohz on smp when using jiffies because the
4598 * cpu doing the jiffies update might drift wrt the cpu doing the jiffy reading
4599 * causing off-by-one errors in observed deltas; {0,2} instead of {1,1}.
4600 *
4601 * Therefore we cannot use the delta approach from the regular tick since that
4602 * would seriously skew the load calculation. However we'll make do for those
4603 * updates happening while idle (nohz_idle_balance) or coming out of idle
4604 * (tick_nohz_idle_exit).
4605 *
4606 * This means we might still be one tick off for nohz periods.
4607 */
4608
4609 /*
4610 * Called from nohz_idle_balance() to update the load ratings before doing the
4611 * idle balance.
4612 */
4613 static void update_cpu_load_idle(struct rq *this_rq)
4614 {
4615 /*
4616 * bail if there's load or we're actually up-to-date.
4617 */
4618 if (weighted_cpuload(cpu_of(this_rq)))
4619 return;
4620
4621 __update_cpu_load_nohz(this_rq, READ_ONCE(jiffies), 0, 0);
4622 }
4623
4624 /*
4625 * Called from tick_nohz_idle_exit() -- try and fix up the ticks we missed.
4626 */
4627 void update_cpu_load_nohz(int active)
4628 {
4629 struct rq *this_rq = this_rq();
4630 unsigned long curr_jiffies = READ_ONCE(jiffies);
4631 unsigned long load = active ? weighted_cpuload(cpu_of(this_rq)) : 0;
4632
4633 if (curr_jiffies == this_rq->last_load_update_tick)
4634 return;
4635
4636 raw_spin_lock(&this_rq->lock);
4637 __update_cpu_load_nohz(this_rq, curr_jiffies, load, active);
4638 raw_spin_unlock(&this_rq->lock);
4639 }
4640 #endif /* CONFIG_NO_HZ */
4641
4642 /*
4643 * Called from scheduler_tick()
4644 */
4645 void update_cpu_load_active(struct rq *this_rq)
4646 {
4647 unsigned long load = weighted_cpuload(cpu_of(this_rq));
4648 /*
4649 * See the mess around update_cpu_load_idle() / update_cpu_load_nohz().
4650 */
4651 this_rq->last_load_update_tick = jiffies;
4652 __update_cpu_load(this_rq, load, 1, 1);
4653 }
4654
4655 /*
4656 * Return a low guess at the load of a migration-source cpu weighted
4657 * according to the scheduling class and "nice" value.
4658 *
4659 * We want to under-estimate the load of migration sources, to
4660 * balance conservatively.
4661 */
4662 static unsigned long source_load(int cpu, int type)
4663 {
4664 struct rq *rq = cpu_rq(cpu);
4665 unsigned long total = weighted_cpuload(cpu);
4666
4667 if (type == 0 || !sched_feat(LB_BIAS))
4668 return total;
4669
4670 return min(rq->cpu_load[type-1], total);
4671 }
4672
4673 /*
4674 * Return a high guess at the load of a migration-target cpu weighted
4675 * according to the scheduling class and "nice" value.
4676 */
4677 static unsigned long target_load(int cpu, int type)
4678 {
4679 struct rq *rq = cpu_rq(cpu);
4680 unsigned long total = weighted_cpuload(cpu);
4681
4682 if (type == 0 || !sched_feat(LB_BIAS))
4683 return total;
4684
4685 return max(rq->cpu_load[type-1], total);
4686 }
4687
4688 static unsigned long capacity_of(int cpu)
4689 {
4690 return cpu_rq(cpu)->cpu_capacity;
4691 }
4692
4693 static unsigned long capacity_orig_of(int cpu)
4694 {
4695 return cpu_rq(cpu)->cpu_capacity_orig;
4696 }
4697
4698 static unsigned long cpu_avg_load_per_task(int cpu)
4699 {
4700 struct rq *rq = cpu_rq(cpu);
4701 unsigned long nr_running = READ_ONCE(rq->cfs.h_nr_running);
4702 unsigned long load_avg = weighted_cpuload(cpu);
4703
4704 if (nr_running)
4705 return load_avg / nr_running;
4706
4707 return 0;
4708 }
4709
4710 static void record_wakee(struct task_struct *p)
4711 {
4712 /*
4713 * Rough decay (wiping) for cost saving, don't worry
4714 * about the boundary, really active task won't care
4715 * about the loss.
4716 */
4717 if (time_after(jiffies, current->wakee_flip_decay_ts + HZ)) {
4718 current->wakee_flips >>= 1;
4719 current->wakee_flip_decay_ts = jiffies;
4720 }
4721
4722 if (current->last_wakee != p) {
4723 current->last_wakee = p;
4724 current->wakee_flips++;
4725 }
4726 }
4727
4728 static void task_waking_fair(struct task_struct *p)
4729 {
4730 struct sched_entity *se = &p->se;
4731 struct cfs_rq *cfs_rq = cfs_rq_of(se);
4732 u64 min_vruntime;
4733
4734 #ifndef CONFIG_64BIT
4735 u64 min_vruntime_copy;
4736
4737 do {
4738 min_vruntime_copy = cfs_rq->min_vruntime_copy;
4739 smp_rmb();
4740 min_vruntime = cfs_rq->min_vruntime;
4741 } while (min_vruntime != min_vruntime_copy);
4742 #else
4743 min_vruntime = cfs_rq->min_vruntime;
4744 #endif
4745
4746 se->vruntime -= min_vruntime;
4747 record_wakee(p);
4748 }
4749
4750 #ifdef CONFIG_FAIR_GROUP_SCHED
4751 /*
4752 * effective_load() calculates the load change as seen from the root_task_group
4753 *
4754 * Adding load to a group doesn't make a group heavier, but can cause movement
4755 * of group shares between cpus. Assuming the shares were perfectly aligned one
4756 * can calculate the shift in shares.
4757 *
4758 * Calculate the effective load difference if @wl is added (subtracted) to @tg
4759 * on this @cpu and results in a total addition (subtraction) of @wg to the
4760 * total group weight.
4761 *
4762 * Given a runqueue weight distribution (rw_i) we can compute a shares
4763 * distribution (s_i) using:
4764 *
4765 * s_i = rw_i / \Sum rw_j (1)
4766 *
4767 * Suppose we have 4 CPUs and our @tg is a direct child of the root group and
4768 * has 7 equal weight tasks, distributed as below (rw_i), with the resulting
4769 * shares distribution (s_i):
4770 *
4771 * rw_i = { 2, 4, 1, 0 }
4772 * s_i = { 2/7, 4/7, 1/7, 0 }
4773 *
4774 * As per wake_affine() we're interested in the load of two CPUs (the CPU the
4775 * task used to run on and the CPU the waker is running on), we need to
4776 * compute the effect of waking a task on either CPU and, in case of a sync
4777 * wakeup, compute the effect of the current task going to sleep.
4778 *
4779 * So for a change of @wl to the local @cpu with an overall group weight change
4780 * of @wl we can compute the new shares distribution (s'_i) using:
4781 *
4782 * s'_i = (rw_i + @wl) / (@wg + \Sum rw_j) (2)
4783 *
4784 * Suppose we're interested in CPUs 0 and 1, and want to compute the load
4785 * differences in waking a task to CPU 0. The additional task changes the
4786 * weight and shares distributions like:
4787 *
4788 * rw'_i = { 3, 4, 1, 0 }
4789 * s'_i = { 3/8, 4/8, 1/8, 0 }
4790 *
4791 * We can then compute the difference in effective weight by using:
4792 *
4793 * dw_i = S * (s'_i - s_i) (3)
4794 *
4795 * Where 'S' is the group weight as seen by its parent.
4796 *
4797 * Therefore the effective change in loads on CPU 0 would be 5/56 (3/8 - 2/7)
4798 * times the weight of the group. The effect on CPU 1 would be -4/56 (4/8 -
4799 * 4/7) times the weight of the group.
4800 */
4801 static long effective_load(struct task_group *tg, int cpu, long wl, long wg)
4802 {
4803 struct sched_entity *se = tg->se[cpu];
4804
4805 if (!tg->parent) /* the trivial, non-cgroup case */
4806 return wl;
4807
4808 for_each_sched_entity(se) {
4809 long w, W;
4810
4811 tg = se->my_q->tg;
4812
4813 /*
4814 * W = @wg + \Sum rw_j
4815 */
4816 W = wg + calc_tg_weight(tg, se->my_q);
4817
4818 /*
4819 * w = rw_i + @wl
4820 */
4821 w = cfs_rq_load_avg(se->my_q) + wl;
4822
4823 /*
4824 * wl = S * s'_i; see (2)
4825 */
4826 if (W > 0 && w < W)
4827 wl = (w * (long)tg->shares) / W;
4828 else
4829 wl = tg->shares;
4830
4831 /*
4832 * Per the above, wl is the new se->load.weight value; since
4833 * those are clipped to [MIN_SHARES, ...) do so now. See
4834 * calc_cfs_shares().
4835 */
4836 if (wl < MIN_SHARES)
4837 wl = MIN_SHARES;
4838
4839 /*
4840 * wl = dw_i = S * (s'_i - s_i); see (3)
4841 */
4842 wl -= se->avg.load_avg;
4843
4844 /*
4845 * Recursively apply this logic to all parent groups to compute
4846 * the final effective load change on the root group. Since
4847 * only the @tg group gets extra weight, all parent groups can
4848 * only redistribute existing shares. @wl is the shift in shares
4849 * resulting from this level per the above.
4850 */
4851 wg = 0;
4852 }
4853
4854 return wl;
4855 }
4856 #else
4857
4858 static long effective_load(struct task_group *tg, int cpu, long wl, long wg)
4859 {
4860 return wl;
4861 }
4862
4863 #endif
4864
4865 /*
4866 * Detect M:N waker/wakee relationships via a switching-frequency heuristic.
4867 * A waker of many should wake a different task than the one last awakened
4868 * at a frequency roughly N times higher than one of its wakees. In order
4869 * to determine whether we should let the load spread vs consolodating to
4870 * shared cache, we look for a minimum 'flip' frequency of llc_size in one
4871 * partner, and a factor of lls_size higher frequency in the other. With
4872 * both conditions met, we can be relatively sure that the relationship is
4873 * non-monogamous, with partner count exceeding socket size. Waker/wakee
4874 * being client/server, worker/dispatcher, interrupt source or whatever is
4875 * irrelevant, spread criteria is apparent partner count exceeds socket size.
4876 */
4877 static int wake_wide(struct task_struct *p)
4878 {
4879 unsigned int master = current->wakee_flips;
4880 unsigned int slave = p->wakee_flips;
4881 int factor = this_cpu_read(sd_llc_size);
4882
4883 if (master < slave)
4884 swap(master, slave);
4885 if (slave < factor || master < slave * factor)
4886 return 0;
4887 return 1;
4888 }
4889
4890 static int wake_affine(struct sched_domain *sd, struct task_struct *p, int sync)
4891 {
4892 s64 this_load, load;
4893 s64 this_eff_load, prev_eff_load;
4894 int idx, this_cpu, prev_cpu;
4895 struct task_group *tg;
4896 unsigned long weight;
4897 int balanced;
4898
4899 idx = sd->wake_idx;
4900 this_cpu = smp_processor_id();
4901 prev_cpu = task_cpu(p);
4902 load = source_load(prev_cpu, idx);
4903 this_load = target_load(this_cpu, idx);
4904
4905 /*
4906 * If sync wakeup then subtract the (maximum possible)
4907 * effect of the currently running task from the load
4908 * of the current CPU:
4909 */
4910 if (sync) {
4911 tg = task_group(current);
4912 weight = current->se.avg.load_avg;
4913
4914 this_load += effective_load(tg, this_cpu, -weight, -weight);
4915 load += effective_load(tg, prev_cpu, 0, -weight);
4916 }
4917
4918 tg = task_group(p);
4919 weight = p->se.avg.load_avg;
4920
4921 /*
4922 * In low-load situations, where prev_cpu is idle and this_cpu is idle
4923 * due to the sync cause above having dropped this_load to 0, we'll
4924 * always have an imbalance, but there's really nothing you can do
4925 * about that, so that's good too.
4926 *
4927 * Otherwise check if either cpus are near enough in load to allow this
4928 * task to be woken on this_cpu.
4929 */
4930 this_eff_load = 100;
4931 this_eff_load *= capacity_of(prev_cpu);
4932
4933 prev_eff_load = 100 + (sd->imbalance_pct - 100) / 2;
4934 prev_eff_load *= capacity_of(this_cpu);
4935
4936 if (this_load > 0) {
4937 this_eff_load *= this_load +
4938 effective_load(tg, this_cpu, weight, weight);
4939
4940 prev_eff_load *= load + effective_load(tg, prev_cpu, 0, weight);
4941 }
4942
4943 balanced = this_eff_load <= prev_eff_load;
4944
4945 schedstat_inc(p, se.statistics.nr_wakeups_affine_attempts);
4946
4947 if (!balanced)
4948 return 0;
4949
4950 schedstat_inc(sd, ttwu_move_affine);
4951 schedstat_inc(p, se.statistics.nr_wakeups_affine);
4952
4953 return 1;
4954 }
4955
4956 /*
4957 * find_idlest_group finds and returns the least busy CPU group within the
4958 * domain.
4959 */
4960 static struct sched_group *
4961 find_idlest_group(struct sched_domain *sd, struct task_struct *p,
4962 int this_cpu, int sd_flag)
4963 {
4964 struct sched_group *idlest = NULL, *group = sd->groups;
4965 unsigned long min_load = ULONG_MAX, this_load = 0;
4966 int load_idx = sd->forkexec_idx;
4967 int imbalance = 100 + (sd->imbalance_pct-100)/2;
4968
4969 if (sd_flag & SD_BALANCE_WAKE)
4970 load_idx = sd->wake_idx;
4971
4972 do {
4973 unsigned long load, avg_load;
4974 int local_group;
4975 int i;
4976
4977 /* Skip over this group if it has no CPUs allowed */
4978 if (!cpumask_intersects(sched_group_cpus(group),
4979 tsk_cpus_allowed(p)))
4980 continue;
4981
4982 local_group = cpumask_test_cpu(this_cpu,
4983 sched_group_cpus(group));
4984
4985 /* Tally up the load of all CPUs in the group */
4986 avg_load = 0;
4987
4988 for_each_cpu(i, sched_group_cpus(group)) {
4989 /* Bias balancing toward cpus of our domain */
4990 if (local_group)
4991 load = source_load(i, load_idx);
4992 else
4993 load = target_load(i, load_idx);
4994
4995 avg_load += load;
4996 }
4997
4998 /* Adjust by relative CPU capacity of the group */
4999 avg_load = (avg_load * SCHED_CAPACITY_SCALE) / group->sgc->capacity;
5000
5001 if (local_group) {
5002 this_load = avg_load;
5003 } else if (avg_load < min_load) {
5004 min_load = avg_load;
5005 idlest = group;
5006 }
5007 } while (group = group->next, group != sd->groups);
5008
5009 if (!idlest || 100*this_load < imbalance*min_load)
5010 return NULL;
5011 return idlest;
5012 }
5013
5014 /*
5015 * find_idlest_cpu - find the idlest cpu among the cpus in group.
5016 */
5017 static int
5018 find_idlest_cpu(struct sched_group *group, struct task_struct *p, int this_cpu)
5019 {
5020 unsigned long load, min_load = ULONG_MAX;
5021 unsigned int min_exit_latency = UINT_MAX;
5022 u64 latest_idle_timestamp = 0;
5023 int least_loaded_cpu = this_cpu;
5024 int shallowest_idle_cpu = -1;
5025 int i;
5026
5027 /* Traverse only the allowed CPUs */
5028 for_each_cpu_and(i, sched_group_cpus(group), tsk_cpus_allowed(p)) {
5029 if (idle_cpu(i)) {
5030 struct rq *rq = cpu_rq(i);
5031 struct cpuidle_state *idle = idle_get_state(rq);
5032 if (idle && idle->exit_latency < min_exit_latency) {
5033 /*
5034 * We give priority to a CPU whose idle state
5035 * has the smallest exit latency irrespective
5036 * of any idle timestamp.
5037 */
5038 min_exit_latency = idle->exit_latency;
5039 latest_idle_timestamp = rq->idle_stamp;
5040 shallowest_idle_cpu = i;
5041 } else if ((!idle || idle->exit_latency == min_exit_latency) &&
5042 rq->idle_stamp > latest_idle_timestamp) {
5043 /*
5044 * If equal or no active idle state, then
5045 * the most recently idled CPU might have
5046 * a warmer cache.
5047 */
5048 latest_idle_timestamp = rq->idle_stamp;
5049 shallowest_idle_cpu = i;
5050 }
5051 } else if (shallowest_idle_cpu == -1) {
5052 load = weighted_cpuload(i);
5053 if (load < min_load || (load == min_load && i == this_cpu)) {
5054 min_load = load;
5055 least_loaded_cpu = i;
5056 }
5057 }
5058 }
5059
5060 return shallowest_idle_cpu != -1 ? shallowest_idle_cpu : least_loaded_cpu;
5061 }
5062
5063 /*
5064 * Try and locate an idle CPU in the sched_domain.
5065 */
5066 static int select_idle_sibling(struct task_struct *p, int target)
5067 {
5068 struct sched_domain *sd;
5069 struct sched_group *sg;
5070 int i = task_cpu(p);
5071
5072 if (idle_cpu(target))
5073 return target;
5074
5075 /*
5076 * If the prevous cpu is cache affine and idle, don't be stupid.
5077 */
5078 if (i != target && cpus_share_cache(i, target) && idle_cpu(i))
5079 return i;
5080
5081 /*
5082 * Otherwise, iterate the domains and find an eligible idle cpu.
5083 *
5084 * A completely idle sched group at higher domains is more
5085 * desirable than an idle group at a lower level, because lower
5086 * domains have smaller groups and usually share hardware
5087 * resources which causes tasks to contend on them, e.g. x86
5088 * hyperthread siblings in the lowest domain (SMT) can contend
5089 * on the shared cpu pipeline.
5090 *
5091 * However, while we prefer idle groups at higher domains
5092 * finding an idle cpu at the lowest domain is still better than
5093 * returning 'target', which we've already established, isn't
5094 * idle.
5095 */
5096 sd = rcu_dereference(per_cpu(sd_llc, target));
5097 for_each_lower_domain(sd) {
5098 sg = sd->groups;
5099 do {
5100 if (!cpumask_intersects(sched_group_cpus(sg),
5101 tsk_cpus_allowed(p)))
5102 goto next;
5103
5104 /* Ensure the entire group is idle */
5105 for_each_cpu(i, sched_group_cpus(sg)) {
5106 if (i == target || !idle_cpu(i))
5107 goto next;
5108 }
5109
5110 /*
5111 * It doesn't matter which cpu we pick, the
5112 * whole group is idle.
5113 */
5114 target = cpumask_first_and(sched_group_cpus(sg),
5115 tsk_cpus_allowed(p));
5116 goto done;
5117 next:
5118 sg = sg->next;
5119 } while (sg != sd->groups);
5120 }
5121 done:
5122 return target;
5123 }
5124
5125 /*
5126 * cpu_util returns the amount of capacity of a CPU that is used by CFS
5127 * tasks. The unit of the return value must be the one of capacity so we can
5128 * compare the utilization with the capacity of the CPU that is available for
5129 * CFS task (ie cpu_capacity).
5130 *
5131 * cfs_rq.avg.util_avg is the sum of running time of runnable tasks plus the
5132 * recent utilization of currently non-runnable tasks on a CPU. It represents
5133 * the amount of utilization of a CPU in the range [0..capacity_orig] where
5134 * capacity_orig is the cpu_capacity available at the highest frequency
5135 * (arch_scale_freq_capacity()).
5136 * The utilization of a CPU converges towards a sum equal to or less than the
5137 * current capacity (capacity_curr <= capacity_orig) of the CPU because it is
5138 * the running time on this CPU scaled by capacity_curr.
5139 *
5140 * Nevertheless, cfs_rq.avg.util_avg can be higher than capacity_curr or even
5141 * higher than capacity_orig because of unfortunate rounding in
5142 * cfs.avg.util_avg or just after migrating tasks and new task wakeups until
5143 * the average stabilizes with the new running time. We need to check that the
5144 * utilization stays within the range of [0..capacity_orig] and cap it if
5145 * necessary. Without utilization capping, a group could be seen as overloaded
5146 * (CPU0 utilization at 121% + CPU1 utilization at 80%) whereas CPU1 has 20% of
5147 * available capacity. We allow utilization to overshoot capacity_curr (but not
5148 * capacity_orig) as it useful for predicting the capacity required after task
5149 * migrations (scheduler-driven DVFS).
5150 */
5151 static int cpu_util(int cpu)
5152 {
5153 unsigned long util = cpu_rq(cpu)->cfs.avg.util_avg;
5154 unsigned long capacity = capacity_orig_of(cpu);
5155
5156 return (util >= capacity) ? capacity : util;
5157 }
5158
5159 /*
5160 * select_task_rq_fair: Select target runqueue for the waking task in domains
5161 * that have the 'sd_flag' flag set. In practice, this is SD_BALANCE_WAKE,
5162 * SD_BALANCE_FORK, or SD_BALANCE_EXEC.
5163 *
5164 * Balances load by selecting the idlest cpu in the idlest group, or under
5165 * certain conditions an idle sibling cpu if the domain has SD_WAKE_AFFINE set.
5166 *
5167 * Returns the target cpu number.
5168 *
5169 * preempt must be disabled.
5170 */
5171 static int
5172 select_task_rq_fair(struct task_struct *p, int prev_cpu, int sd_flag, int wake_flags)
5173 {
5174 struct sched_domain *tmp, *affine_sd = NULL, *sd = NULL;
5175 int cpu = smp_processor_id();
5176 int new_cpu = prev_cpu;
5177 int want_affine = 0;
5178 int sync = wake_flags & WF_SYNC;
5179
5180 if (sd_flag & SD_BALANCE_WAKE)
5181 want_affine = !wake_wide(p) && cpumask_test_cpu(cpu, tsk_cpus_allowed(p));
5182
5183 rcu_read_lock();
5184 for_each_domain(cpu, tmp) {
5185 if (!(tmp->flags & SD_LOAD_BALANCE))
5186 break;
5187
5188 /*
5189 * If both cpu and prev_cpu are part of this domain,
5190 * cpu is a valid SD_WAKE_AFFINE target.
5191 */
5192 if (want_affine && (tmp->flags & SD_WAKE_AFFINE) &&
5193 cpumask_test_cpu(prev_cpu, sched_domain_span(tmp))) {
5194 affine_sd = tmp;
5195 break;
5196 }
5197
5198 if (tmp->flags & sd_flag)
5199 sd = tmp;
5200 else if (!want_affine)
5201 break;
5202 }
5203
5204 if (affine_sd) {
5205 sd = NULL; /* Prefer wake_affine over balance flags */
5206 if (cpu != prev_cpu && wake_affine(affine_sd, p, sync))
5207 new_cpu = cpu;
5208 }
5209
5210 if (!sd) {
5211 if (sd_flag & SD_BALANCE_WAKE) /* XXX always ? */
5212 new_cpu = select_idle_sibling(p, new_cpu);
5213
5214 } else while (sd) {
5215 struct sched_group *group;
5216 int weight;
5217
5218 if (!(sd->flags & sd_flag)) {
5219 sd = sd->child;
5220 continue;
5221 }
5222
5223 group = find_idlest_group(sd, p, cpu, sd_flag);
5224 if (!group) {
5225 sd = sd->child;
5226 continue;
5227 }
5228
5229 new_cpu = find_idlest_cpu(group, p, cpu);
5230 if (new_cpu == -1 || new_cpu == cpu) {
5231 /* Now try balancing at a lower domain level of cpu */
5232 sd = sd->child;
5233 continue;
5234 }
5235
5236 /* Now try balancing at a lower domain level of new_cpu */
5237 cpu = new_cpu;
5238 weight = sd->span_weight;
5239 sd = NULL;
5240 for_each_domain(cpu, tmp) {
5241 if (weight <= tmp->span_weight)
5242 break;
5243 if (tmp->flags & sd_flag)
5244 sd = tmp;
5245 }
5246 /* while loop will break here if sd == NULL */
5247 }
5248 rcu_read_unlock();
5249
5250 return new_cpu;
5251 }
5252
5253 /*
5254 * Called immediately before a task is migrated to a new cpu; task_cpu(p) and
5255 * cfs_rq_of(p) references at time of call are still valid and identify the
5256 * previous cpu. The caller guarantees p->pi_lock or task_rq(p)->lock is held.
5257 */
5258 static void migrate_task_rq_fair(struct task_struct *p)
5259 {
5260 /*
5261 * We are supposed to update the task to "current" time, then its up to date
5262 * and ready to go to new CPU/cfs_rq. But we have difficulty in getting
5263 * what current time is, so simply throw away the out-of-date time. This
5264 * will result in the wakee task is less decayed, but giving the wakee more
5265 * load sounds not bad.
5266 */
5267 remove_entity_load_avg(&p->se);
5268
5269 /* Tell new CPU we are migrated */
5270 p->se.avg.last_update_time = 0;
5271
5272 /* We have migrated, no longer consider this task hot */
5273 p->se.exec_start = 0;
5274 }
5275
5276 static void task_dead_fair(struct task_struct *p)
5277 {
5278 remove_entity_load_avg(&p->se);
5279 }
5280 #endif /* CONFIG_SMP */
5281
5282 static unsigned long
5283 wakeup_gran(struct sched_entity *curr, struct sched_entity *se)
5284 {
5285 unsigned long gran = sysctl_sched_wakeup_granularity;
5286
5287 /*
5288 * Since its curr running now, convert the gran from real-time
5289 * to virtual-time in his units.
5290 *
5291 * By using 'se' instead of 'curr' we penalize light tasks, so
5292 * they get preempted easier. That is, if 'se' < 'curr' then
5293 * the resulting gran will be larger, therefore penalizing the
5294 * lighter, if otoh 'se' > 'curr' then the resulting gran will
5295 * be smaller, again penalizing the lighter task.
5296 *
5297 * This is especially important for buddies when the leftmost
5298 * task is higher priority than the buddy.
5299 */
5300 return calc_delta_fair(gran, se);
5301 }
5302
5303 /*
5304 * Should 'se' preempt 'curr'.
5305 *
5306 * |s1
5307 * |s2
5308 * |s3
5309 * g
5310 * |<--->|c
5311 *
5312 * w(c, s1) = -1
5313 * w(c, s2) = 0
5314 * w(c, s3) = 1
5315 *
5316 */
5317 static int
5318 wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se)
5319 {
5320 s64 gran, vdiff = curr->vruntime - se->vruntime;
5321
5322 if (vdiff <= 0)
5323 return -1;
5324
5325 gran = wakeup_gran(curr, se);
5326 if (vdiff > gran)
5327 return 1;
5328
5329 return 0;
5330 }
5331
5332 static void set_last_buddy(struct sched_entity *se)
5333 {
5334 if (entity_is_task(se) && unlikely(task_of(se)->policy == SCHED_IDLE))
5335 return;
5336
5337 for_each_sched_entity(se)
5338 cfs_rq_of(se)->last = se;
5339 }
5340
5341 static void set_next_buddy(struct sched_entity *se)
5342 {
5343 if (entity_is_task(se) && unlikely(task_of(se)->policy == SCHED_IDLE))
5344 return;
5345
5346 for_each_sched_entity(se)
5347 cfs_rq_of(se)->next = se;
5348 }
5349
5350 static void set_skip_buddy(struct sched_entity *se)
5351 {
5352 for_each_sched_entity(se)
5353 cfs_rq_of(se)->skip = se;
5354 }
5355
5356 /*
5357 * Preempt the current task with a newly woken task if needed:
5358 */
5359 static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_flags)
5360 {
5361 struct task_struct *curr = rq->curr;
5362 struct sched_entity *se = &curr->se, *pse = &p->se;
5363 struct cfs_rq *cfs_rq = task_cfs_rq(curr);
5364 int scale = cfs_rq->nr_running >= sched_nr_latency;
5365 int next_buddy_marked = 0;
5366
5367 if (unlikely(se == pse))
5368 return;
5369
5370 /*
5371 * This is possible from callers such as attach_tasks(), in which we
5372 * unconditionally check_prempt_curr() after an enqueue (which may have
5373 * lead to a throttle). This both saves work and prevents false
5374 * next-buddy nomination below.
5375 */
5376 if (unlikely(throttled_hierarchy(cfs_rq_of(pse))))
5377 return;
5378
5379 if (sched_feat(NEXT_BUDDY) && scale && !(wake_flags & WF_FORK)) {
5380 set_next_buddy(pse);
5381 next_buddy_marked = 1;
5382 }
5383
5384 /*
5385 * We can come here with TIF_NEED_RESCHED already set from new task
5386 * wake up path.
5387 *
5388 * Note: this also catches the edge-case of curr being in a throttled
5389 * group (e.g. via set_curr_task), since update_curr() (in the
5390 * enqueue of curr) will have resulted in resched being set. This
5391 * prevents us from potentially nominating it as a false LAST_BUDDY
5392 * below.
5393 */
5394 if (test_tsk_need_resched(curr))
5395 return;
5396
5397 /* Idle tasks are by definition preempted by non-idle tasks. */
5398 if (unlikely(curr->policy == SCHED_IDLE) &&
5399 likely(p->policy != SCHED_IDLE))
5400 goto preempt;
5401
5402 /*
5403 * Batch and idle tasks do not preempt non-idle tasks (their preemption
5404 * is driven by the tick):
5405 */
5406 if (unlikely(p->policy != SCHED_NORMAL) || !sched_feat(WAKEUP_PREEMPTION))
5407 return;
5408
5409 find_matching_se(&se, &pse);
5410 update_curr(cfs_rq_of(se));
5411 BUG_ON(!pse);
5412 if (wakeup_preempt_entity(se, pse) == 1) {
5413 /*
5414 * Bias pick_next to pick the sched entity that is
5415 * triggering this preemption.
5416 */
5417 if (!next_buddy_marked)
5418 set_next_buddy(pse);
5419 goto preempt;
5420 }
5421
5422 return;
5423
5424 preempt:
5425 resched_curr(rq);
5426 /*
5427 * Only set the backward buddy when the current task is still
5428 * on the rq. This can happen when a wakeup gets interleaved
5429 * with schedule on the ->pre_schedule() or idle_balance()
5430 * point, either of which can * drop the rq lock.
5431 *
5432 * Also, during early boot the idle thread is in the fair class,
5433 * for obvious reasons its a bad idea to schedule back to it.
5434 */
5435 if (unlikely(!se->on_rq || curr == rq->idle))
5436 return;
5437
5438 if (sched_feat(LAST_BUDDY) && scale && entity_is_task(se))
5439 set_last_buddy(se);
5440 }
5441
5442 static struct task_struct *
5443 pick_next_task_fair(struct rq *rq, struct task_struct *prev)
5444 {
5445 struct cfs_rq *cfs_rq = &rq->cfs;
5446 struct sched_entity *se;
5447 struct task_struct *p;
5448 int new_tasks;
5449
5450 again:
5451 #ifdef CONFIG_FAIR_GROUP_SCHED
5452 if (!cfs_rq->nr_running)
5453 goto idle;
5454
5455 if (prev->sched_class != &fair_sched_class)
5456 goto simple;
5457
5458 /*
5459 * Because of the set_next_buddy() in dequeue_task_fair() it is rather
5460 * likely that a next task is from the same cgroup as the current.
5461 *
5462 * Therefore attempt to avoid putting and setting the entire cgroup
5463 * hierarchy, only change the part that actually changes.
5464 */
5465
5466 do {
5467 struct sched_entity *curr = cfs_rq->curr;
5468
5469 /*
5470 * Since we got here without doing put_prev_entity() we also
5471 * have to consider cfs_rq->curr. If it is still a runnable
5472 * entity, update_curr() will update its vruntime, otherwise
5473 * forget we've ever seen it.
5474 */
5475 if (curr) {
5476 if (curr->on_rq)
5477 update_curr(cfs_rq);
5478 else
5479 curr = NULL;
5480
5481 /*
5482 * This call to check_cfs_rq_runtime() will do the
5483 * throttle and dequeue its entity in the parent(s).
5484 * Therefore the 'simple' nr_running test will indeed
5485 * be correct.
5486 */
5487 if (unlikely(check_cfs_rq_runtime(cfs_rq)))
5488 goto simple;
5489 }
5490
5491 se = pick_next_entity(cfs_rq, curr);
5492 cfs_rq = group_cfs_rq(se);
5493 } while (cfs_rq);
5494
5495 p = task_of(se);
5496
5497 /*
5498 * Since we haven't yet done put_prev_entity and if the selected task
5499 * is a different task than we started out with, try and touch the
5500 * least amount of cfs_rqs.
5501 */
5502 if (prev != p) {
5503 struct sched_entity *pse = &prev->se;
5504
5505 while (!(cfs_rq = is_same_group(se, pse))) {
5506 int se_depth = se->depth;
5507 int pse_depth = pse->depth;
5508
5509 if (se_depth <= pse_depth) {
5510 put_prev_entity(cfs_rq_of(pse), pse);
5511 pse = parent_entity(pse);
5512 }
5513 if (se_depth >= pse_depth) {
5514 set_next_entity(cfs_rq_of(se), se);
5515 se = parent_entity(se);
5516 }
5517 }
5518
5519 put_prev_entity(cfs_rq, pse);
5520 set_next_entity(cfs_rq, se);
5521 }
5522
5523 if (hrtick_enabled(rq))
5524 hrtick_start_fair(rq, p);
5525
5526 return p;
5527 simple:
5528 cfs_rq = &rq->cfs;
5529 #endif
5530
5531 if (!cfs_rq->nr_running)
5532 goto idle;
5533
5534 put_prev_task(rq, prev);
5535
5536 do {
5537 se = pick_next_entity(cfs_rq, NULL);
5538 set_next_entity(cfs_rq, se);
5539 cfs_rq = group_cfs_rq(se);
5540 } while (cfs_rq);
5541
5542 p = task_of(se);
5543
5544 if (hrtick_enabled(rq))
5545 hrtick_start_fair(rq, p);
5546
5547 return p;
5548
5549 idle:
5550 /*
5551 * This is OK, because current is on_cpu, which avoids it being picked
5552 * for load-balance and preemption/IRQs are still disabled avoiding
5553 * further scheduler activity on it and we're being very careful to
5554 * re-start the picking loop.
5555 */
5556 lockdep_unpin_lock(&rq->lock);
5557 new_tasks = idle_balance(rq);
5558 lockdep_pin_lock(&rq->lock);
5559 /*
5560 * Because idle_balance() releases (and re-acquires) rq->lock, it is
5561 * possible for any higher priority task to appear. In that case we
5562 * must re-start the pick_next_entity() loop.
5563 */
5564 if (new_tasks < 0)
5565 return RETRY_TASK;
5566
5567 if (new_tasks > 0)
5568 goto again;
5569
5570 return NULL;
5571 }
5572
5573 /*
5574 * Account for a descheduled task:
5575 */
5576 static void put_prev_task_fair(struct rq *rq, struct task_struct *prev)
5577 {
5578 struct sched_entity *se = &prev->se;
5579 struct cfs_rq *cfs_rq;
5580
5581 for_each_sched_entity(se) {
5582 cfs_rq = cfs_rq_of(se);
5583 put_prev_entity(cfs_rq, se);
5584 }
5585 }
5586
5587 /*
5588 * sched_yield() is very simple
5589 *
5590 * The magic of dealing with the ->skip buddy is in pick_next_entity.
5591 */
5592 static void yield_task_fair(struct rq *rq)
5593 {
5594 struct task_struct *curr = rq->curr;
5595 struct cfs_rq *cfs_rq = task_cfs_rq(curr);
5596 struct sched_entity *se = &curr->se;
5597
5598 /*
5599 * Are we the only task in the tree?
5600 */
5601 if (unlikely(rq->nr_running == 1))
5602 return;
5603
5604 clear_buddies(cfs_rq, se);
5605
5606 if (curr->policy != SCHED_BATCH) {
5607 update_rq_clock(rq);
5608 /*
5609 * Update run-time statistics of the 'current'.
5610 */
5611 update_curr(cfs_rq);
5612 /*
5613 * Tell update_rq_clock() that we've just updated,
5614 * so we don't do microscopic update in schedule()
5615 * and double the fastpath cost.
5616 */
5617 rq_clock_skip_update(rq, true);
5618 }
5619
5620 set_skip_buddy(se);
5621 }
5622
5623 static bool yield_to_task_fair(struct rq *rq, struct task_struct *p, bool preempt)
5624 {
5625 struct sched_entity *se = &p->se;
5626
5627 /* throttled hierarchies are not runnable */
5628 if (!se->on_rq || throttled_hierarchy(cfs_rq_of(se)))
5629 return false;
5630
5631 /* Tell the scheduler that we'd really like pse to run next. */
5632 set_next_buddy(se);
5633
5634 yield_task_fair(rq);
5635
5636 return true;
5637 }
5638
5639 #ifdef CONFIG_SMP
5640 /**************************************************
5641 * Fair scheduling class load-balancing methods.
5642 *
5643 * BASICS
5644 *
5645 * The purpose of load-balancing is to achieve the same basic fairness the
5646 * per-cpu scheduler provides, namely provide a proportional amount of compute
5647 * time to each task. This is expressed in the following equation:
5648 *
5649 * W_i,n/P_i == W_j,n/P_j for all i,j (1)
5650 *
5651 * Where W_i,n is the n-th weight average for cpu i. The instantaneous weight
5652 * W_i,0 is defined as:
5653 *
5654 * W_i,0 = \Sum_j w_i,j (2)
5655 *
5656 * Where w_i,j is the weight of the j-th runnable task on cpu i. This weight
5657 * is derived from the nice value as per prio_to_weight[].
5658 *
5659 * The weight average is an exponential decay average of the instantaneous
5660 * weight:
5661 *
5662 * W'_i,n = (2^n - 1) / 2^n * W_i,n + 1 / 2^n * W_i,0 (3)
5663 *
5664 * C_i is the compute capacity of cpu i, typically it is the
5665 * fraction of 'recent' time available for SCHED_OTHER task execution. But it
5666 * can also include other factors [XXX].
5667 *
5668 * To achieve this balance we define a measure of imbalance which follows
5669 * directly from (1):
5670 *
5671 * imb_i,j = max{ avg(W/C), W_i/C_i } - min{ avg(W/C), W_j/C_j } (4)
5672 *
5673 * We them move tasks around to minimize the imbalance. In the continuous
5674 * function space it is obvious this converges, in the discrete case we get
5675 * a few fun cases generally called infeasible weight scenarios.
5676 *
5677 * [XXX expand on:
5678 * - infeasible weights;
5679 * - local vs global optima in the discrete case. ]
5680 *
5681 *
5682 * SCHED DOMAINS
5683 *
5684 * In order to solve the imbalance equation (4), and avoid the obvious O(n^2)
5685 * for all i,j solution, we create a tree of cpus that follows the hardware
5686 * topology where each level pairs two lower groups (or better). This results
5687 * in O(log n) layers. Furthermore we reduce the number of cpus going up the
5688 * tree to only the first of the previous level and we decrease the frequency
5689 * of load-balance at each level inv. proportional to the number of cpus in
5690 * the groups.
5691 *
5692 * This yields:
5693 *
5694 * log_2 n 1 n
5695 * \Sum { --- * --- * 2^i } = O(n) (5)
5696 * i = 0 2^i 2^i
5697 * `- size of each group
5698 * | | `- number of cpus doing load-balance
5699 * | `- freq
5700 * `- sum over all levels
5701 *
5702 * Coupled with a limit on how many tasks we can migrate every balance pass,
5703 * this makes (5) the runtime complexity of the balancer.
5704 *
5705 * An important property here is that each CPU is still (indirectly) connected
5706 * to every other cpu in at most O(log n) steps:
5707 *
5708 * The adjacency matrix of the resulting graph is given by:
5709 *
5710 * log_2 n
5711 * A_i,j = \Union (i % 2^k == 0) && i / 2^(k+1) == j / 2^(k+1) (6)
5712 * k = 0
5713 *
5714 * And you'll find that:
5715 *
5716 * A^(log_2 n)_i,j != 0 for all i,j (7)
5717 *
5718 * Showing there's indeed a path between every cpu in at most O(log n) steps.
5719 * The task movement gives a factor of O(m), giving a convergence complexity
5720 * of:
5721 *
5722 * O(nm log n), n := nr_cpus, m := nr_tasks (8)
5723 *
5724 *
5725 * WORK CONSERVING
5726 *
5727 * In order to avoid CPUs going idle while there's still work to do, new idle
5728 * balancing is more aggressive and has the newly idle cpu iterate up the domain
5729 * tree itself instead of relying on other CPUs to bring it work.
5730 *
5731 * This adds some complexity to both (5) and (8) but it reduces the total idle
5732 * time.
5733 *
5734 * [XXX more?]
5735 *
5736 *
5737 * CGROUPS
5738 *
5739 * Cgroups make a horror show out of (2), instead of a simple sum we get:
5740 *
5741 * s_k,i
5742 * W_i,0 = \Sum_j \Prod_k w_k * ----- (9)
5743 * S_k
5744 *
5745 * Where
5746 *
5747 * s_k,i = \Sum_j w_i,j,k and S_k = \Sum_i s_k,i (10)
5748 *
5749 * w_i,j,k is the weight of the j-th runnable task in the k-th cgroup on cpu i.
5750 *
5751 * The big problem is S_k, its a global sum needed to compute a local (W_i)
5752 * property.
5753 *
5754 * [XXX write more on how we solve this.. _after_ merging pjt's patches that
5755 * rewrite all of this once again.]
5756 */
5757
5758 static unsigned long __read_mostly max_load_balance_interval = HZ/10;
5759
5760 enum fbq_type { regular, remote, all };
5761
5762 #define LBF_ALL_PINNED 0x01
5763 #define LBF_NEED_BREAK 0x02
5764 #define LBF_DST_PINNED 0x04
5765 #define LBF_SOME_PINNED 0x08
5766
5767 struct lb_env {
5768 struct sched_domain *sd;
5769
5770 struct rq *src_rq;
5771 int src_cpu;
5772
5773 int dst_cpu;
5774 struct rq *dst_rq;
5775
5776 struct cpumask *dst_grpmask;
5777 int new_dst_cpu;
5778 enum cpu_idle_type idle;
5779 long imbalance;
5780 /* The set of CPUs under consideration for load-balancing */
5781 struct cpumask *cpus;
5782
5783 unsigned int flags;
5784
5785 unsigned int loop;
5786 unsigned int loop_break;
5787 unsigned int loop_max;
5788
5789 enum fbq_type fbq_type;
5790 struct list_head tasks;
5791 };
5792
5793 /*
5794 * Is this task likely cache-hot:
5795 */
5796 static int task_hot(struct task_struct *p, struct lb_env *env)
5797 {
5798 s64 delta;
5799
5800 lockdep_assert_held(&env->src_rq->lock);
5801
5802 if (p->sched_class != &fair_sched_class)
5803 return 0;
5804
5805 if (unlikely(p->policy == SCHED_IDLE))
5806 return 0;
5807
5808 /*
5809 * Buddy candidates are cache hot:
5810 */
5811 if (sched_feat(CACHE_HOT_BUDDY) && env->dst_rq->nr_running &&
5812 (&p->se == cfs_rq_of(&p->se)->next ||
5813 &p->se == cfs_rq_of(&p->se)->last))
5814 return 1;
5815
5816 if (sysctl_sched_migration_cost == -1)
5817 return 1;
5818 if (sysctl_sched_migration_cost == 0)
5819 return 0;
5820
5821 delta = rq_clock_task(env->src_rq) - p->se.exec_start;
5822
5823 return delta < (s64)sysctl_sched_migration_cost;
5824 }
5825
5826 #ifdef CONFIG_NUMA_BALANCING
5827 /*
5828 * Returns 1, if task migration degrades locality
5829 * Returns 0, if task migration improves locality i.e migration preferred.
5830 * Returns -1, if task migration is not affected by locality.
5831 */
5832 static int migrate_degrades_locality(struct task_struct *p, struct lb_env *env)
5833 {
5834 struct numa_group *numa_group = rcu_dereference(p->numa_group);
5835 unsigned long src_faults, dst_faults;
5836 int src_nid, dst_nid;
5837
5838 if (!static_branch_likely(&sched_numa_balancing))
5839 return -1;
5840
5841 if (!p->numa_faults || !(env->sd->flags & SD_NUMA))
5842 return -1;
5843
5844 src_nid = cpu_to_node(env->src_cpu);
5845 dst_nid = cpu_to_node(env->dst_cpu);
5846
5847 if (src_nid == dst_nid)
5848 return -1;
5849
5850 /* Migrating away from the preferred node is always bad. */
5851 if (src_nid == p->numa_preferred_nid) {
5852 if (env->src_rq->nr_running > env->src_rq->nr_preferred_running)
5853 return 1;
5854 else
5855 return -1;
5856 }
5857
5858 /* Encourage migration to the preferred node. */
5859 if (dst_nid == p->numa_preferred_nid)
5860 return 0;
5861
5862 if (numa_group) {
5863 src_faults = group_faults(p, src_nid);
5864 dst_faults = group_faults(p, dst_nid);
5865 } else {
5866 src_faults = task_faults(p, src_nid);
5867 dst_faults = task_faults(p, dst_nid);
5868 }
5869
5870 return dst_faults < src_faults;
5871 }
5872
5873 #else
5874 static inline int migrate_degrades_locality(struct task_struct *p,
5875 struct lb_env *env)
5876 {
5877 return -1;
5878 }
5879 #endif
5880
5881 /*
5882 * can_migrate_task - may task p from runqueue rq be migrated to this_cpu?
5883 */
5884 static
5885 int can_migrate_task(struct task_struct *p, struct lb_env *env)
5886 {
5887 int tsk_cache_hot;
5888
5889 lockdep_assert_held(&env->src_rq->lock);
5890
5891 /*
5892 * We do not migrate tasks that are:
5893 * 1) throttled_lb_pair, or
5894 * 2) cannot be migrated to this CPU due to cpus_allowed, or
5895 * 3) running (obviously), or
5896 * 4) are cache-hot on their current CPU.
5897 */
5898 if (throttled_lb_pair(task_group(p), env->src_cpu, env->dst_cpu))
5899 return 0;
5900
5901 if (!cpumask_test_cpu(env->dst_cpu, tsk_cpus_allowed(p))) {
5902 int cpu;
5903
5904 schedstat_inc(p, se.statistics.nr_failed_migrations_affine);
5905
5906 env->flags |= LBF_SOME_PINNED;
5907
5908 /*
5909 * Remember if this task can be migrated to any other cpu in
5910 * our sched_group. We may want to revisit it if we couldn't
5911 * meet load balance goals by pulling other tasks on src_cpu.
5912 *
5913 * Also avoid computing new_dst_cpu if we have already computed
5914 * one in current iteration.
5915 */
5916 if (!env->dst_grpmask || (env->flags & LBF_DST_PINNED))
5917 return 0;
5918
5919 /* Prevent to re-select dst_cpu via env's cpus */
5920 for_each_cpu_and(cpu, env->dst_grpmask, env->cpus) {
5921 if (cpumask_test_cpu(cpu, tsk_cpus_allowed(p))) {
5922 env->flags |= LBF_DST_PINNED;
5923 env->new_dst_cpu = cpu;
5924 break;
5925 }
5926 }
5927
5928 return 0;
5929 }
5930
5931 /* Record that we found atleast one task that could run on dst_cpu */
5932 env->flags &= ~LBF_ALL_PINNED;
5933
5934 if (task_running(env->src_rq, p)) {
5935 schedstat_inc(p, se.statistics.nr_failed_migrations_running);
5936 return 0;
5937 }
5938
5939 /*
5940 * Aggressive migration if:
5941 * 1) destination numa is preferred
5942 * 2) task is cache cold, or
5943 * 3) too many balance attempts have failed.
5944 */
5945 tsk_cache_hot = migrate_degrades_locality(p, env);
5946 if (tsk_cache_hot == -1)
5947 tsk_cache_hot = task_hot(p, env);
5948
5949 if (tsk_cache_hot <= 0 ||
5950 env->sd->nr_balance_failed > env->sd->cache_nice_tries) {
5951 if (tsk_cache_hot == 1) {
5952 schedstat_inc(env->sd, lb_hot_gained[env->idle]);
5953 schedstat_inc(p, se.statistics.nr_forced_migrations);
5954 }
5955 return 1;
5956 }
5957
5958 schedstat_inc(p, se.statistics.nr_failed_migrations_hot);
5959 return 0;
5960 }
5961
5962 /*
5963 * detach_task() -- detach the task for the migration specified in env
5964 */
5965 static void detach_task(struct task_struct *p, struct lb_env *env)
5966 {
5967 lockdep_assert_held(&env->src_rq->lock);
5968
5969 p->on_rq = TASK_ON_RQ_MIGRATING;
5970 deactivate_task(env->src_rq, p, 0);
5971 set_task_cpu(p, env->dst_cpu);
5972 }
5973
5974 /*
5975 * detach_one_task() -- tries to dequeue exactly one task from env->src_rq, as
5976 * part of active balancing operations within "domain".
5977 *
5978 * Returns a task if successful and NULL otherwise.
5979 */
5980 static struct task_struct *detach_one_task(struct lb_env *env)
5981 {
5982 struct task_struct *p, *n;
5983
5984 lockdep_assert_held(&env->src_rq->lock);
5985
5986 list_for_each_entry_safe(p, n, &env->src_rq->cfs_tasks, se.group_node) {
5987 if (!can_migrate_task(p, env))
5988 continue;
5989
5990 detach_task(p, env);
5991
5992 /*
5993 * Right now, this is only the second place where
5994 * lb_gained[env->idle] is updated (other is detach_tasks)
5995 * so we can safely collect stats here rather than
5996 * inside detach_tasks().
5997 */
5998 schedstat_inc(env->sd, lb_gained[env->idle]);
5999 return p;
6000 }
6001 return NULL;
6002 }
6003
6004 static const unsigned int sched_nr_migrate_break = 32;
6005
6006 /*
6007 * detach_tasks() -- tries to detach up to imbalance weighted load from
6008 * busiest_rq, as part of a balancing operation within domain "sd".
6009 *
6010 * Returns number of detached tasks if successful and 0 otherwise.
6011 */
6012 static int detach_tasks(struct lb_env *env)
6013 {
6014 struct list_head *tasks = &env->src_rq->cfs_tasks;
6015 struct task_struct *p;
6016 unsigned long load;
6017 int detached = 0;
6018
6019 lockdep_assert_held(&env->src_rq->lock);
6020
6021 if (env->imbalance <= 0)
6022 return 0;
6023
6024 while (!list_empty(tasks)) {
6025 /*
6026 * We don't want to steal all, otherwise we may be treated likewise,
6027 * which could at worst lead to a livelock crash.
6028 */
6029 if (env->idle != CPU_NOT_IDLE && env->src_rq->nr_running <= 1)
6030 break;
6031
6032 p = list_first_entry(tasks, struct task_struct, se.group_node);
6033
6034 env->loop++;
6035 /* We've more or less seen every task there is, call it quits */
6036 if (env->loop > env->loop_max)
6037 break;
6038
6039 /* take a breather every nr_migrate tasks */
6040 if (env->loop > env->loop_break) {
6041 env->loop_break += sched_nr_migrate_break;
6042 env->flags |= LBF_NEED_BREAK;
6043 break;
6044 }
6045
6046 if (!can_migrate_task(p, env))
6047 goto next;
6048
6049 load = task_h_load(p);
6050
6051 if (sched_feat(LB_MIN) && load < 16 && !env->sd->nr_balance_failed)
6052 goto next;
6053
6054 if ((load / 2) > env->imbalance)
6055 goto next;
6056
6057 detach_task(p, env);
6058 list_add(&p->se.group_node, &env->tasks);
6059
6060 detached++;
6061 env->imbalance -= load;
6062
6063 #ifdef CONFIG_PREEMPT
6064 /*
6065 * NEWIDLE balancing is a source of latency, so preemptible
6066 * kernels will stop after the first task is detached to minimize
6067 * the critical section.
6068 */
6069 if (env->idle == CPU_NEWLY_IDLE)
6070 break;
6071 #endif
6072
6073 /*
6074 * We only want to steal up to the prescribed amount of
6075 * weighted load.
6076 */
6077 if (env->imbalance <= 0)
6078 break;
6079
6080 continue;
6081 next:
6082 list_move_tail(&p->se.group_node, tasks);
6083 }
6084
6085 /*
6086 * Right now, this is one of only two places we collect this stat
6087 * so we can safely collect detach_one_task() stats here rather
6088 * than inside detach_one_task().
6089 */
6090 schedstat_add(env->sd, lb_gained[env->idle], detached);
6091
6092 return detached;
6093 }
6094
6095 /*
6096 * attach_task() -- attach the task detached by detach_task() to its new rq.
6097 */
6098 static void attach_task(struct rq *rq, struct task_struct *p)
6099 {
6100 lockdep_assert_held(&rq->lock);
6101
6102 BUG_ON(task_rq(p) != rq);
6103 activate_task(rq, p, 0);
6104 p->on_rq = TASK_ON_RQ_QUEUED;
6105 check_preempt_curr(rq, p, 0);
6106 }
6107
6108 /*
6109 * attach_one_task() -- attaches the task returned from detach_one_task() to
6110 * its new rq.
6111 */
6112 static void attach_one_task(struct rq *rq, struct task_struct *p)
6113 {
6114 raw_spin_lock(&rq->lock);
6115 attach_task(rq, p);
6116 raw_spin_unlock(&rq->lock);
6117 }
6118
6119 /*
6120 * attach_tasks() -- attaches all tasks detached by detach_tasks() to their
6121 * new rq.
6122 */
6123 static void attach_tasks(struct lb_env *env)
6124 {
6125 struct list_head *tasks = &env->tasks;
6126 struct task_struct *p;
6127
6128 raw_spin_lock(&env->dst_rq->lock);
6129
6130 while (!list_empty(tasks)) {
6131 p = list_first_entry(tasks, struct task_struct, se.group_node);
6132 list_del_init(&p->se.group_node);
6133
6134 attach_task(env->dst_rq, p);
6135 }
6136
6137 raw_spin_unlock(&env->dst_rq->lock);
6138 }
6139
6140 #ifdef CONFIG_FAIR_GROUP_SCHED
6141 static void update_blocked_averages(int cpu)
6142 {
6143 struct rq *rq = cpu_rq(cpu);
6144 struct cfs_rq *cfs_rq;
6145 unsigned long flags;
6146
6147 raw_spin_lock_irqsave(&rq->lock, flags);
6148 update_rq_clock(rq);
6149
6150 /*
6151 * Iterates the task_group tree in a bottom up fashion, see
6152 * list_add_leaf_cfs_rq() for details.
6153 */
6154 for_each_leaf_cfs_rq(rq, cfs_rq) {
6155 /* throttled entities do not contribute to load */
6156 if (throttled_hierarchy(cfs_rq))
6157 continue;
6158
6159 if (update_cfs_rq_load_avg(cfs_rq_clock_task(cfs_rq), cfs_rq))
6160 update_tg_load_avg(cfs_rq, 0);
6161 }
6162 raw_spin_unlock_irqrestore(&rq->lock, flags);
6163 }
6164
6165 /*
6166 * Compute the hierarchical load factor for cfs_rq and all its ascendants.
6167 * This needs to be done in a top-down fashion because the load of a child
6168 * group is a fraction of its parents load.
6169 */
6170 static void update_cfs_rq_h_load(struct cfs_rq *cfs_rq)
6171 {
6172 struct rq *rq = rq_of(cfs_rq);
6173 struct sched_entity *se = cfs_rq->tg->se[cpu_of(rq)];
6174 unsigned long now = jiffies;
6175 unsigned long load;
6176
6177 if (cfs_rq->last_h_load_update == now)
6178 return;
6179
6180 cfs_rq->h_load_next = NULL;
6181 for_each_sched_entity(se) {
6182 cfs_rq = cfs_rq_of(se);
6183 cfs_rq->h_load_next = se;
6184 if (cfs_rq->last_h_load_update == now)
6185 break;
6186 }
6187
6188 if (!se) {
6189 cfs_rq->h_load = cfs_rq_load_avg(cfs_rq);
6190 cfs_rq->last_h_load_update = now;
6191 }
6192
6193 while ((se = cfs_rq->h_load_next) != NULL) {
6194 load = cfs_rq->h_load;
6195 load = div64_ul(load * se->avg.load_avg,
6196 cfs_rq_load_avg(cfs_rq) + 1);
6197 cfs_rq = group_cfs_rq(se);
6198 cfs_rq->h_load = load;
6199 cfs_rq->last_h_load_update = now;
6200 }
6201 }
6202
6203 static unsigned long task_h_load(struct task_struct *p)
6204 {
6205 struct cfs_rq *cfs_rq = task_cfs_rq(p);
6206
6207 update_cfs_rq_h_load(cfs_rq);
6208 return div64_ul(p->se.avg.load_avg * cfs_rq->h_load,
6209 cfs_rq_load_avg(cfs_rq) + 1);
6210 }
6211 #else
6212 static inline void update_blocked_averages(int cpu)
6213 {
6214 struct rq *rq = cpu_rq(cpu);
6215 struct cfs_rq *cfs_rq = &rq->cfs;
6216 unsigned long flags;
6217
6218 raw_spin_lock_irqsave(&rq->lock, flags);
6219 update_rq_clock(rq);
6220 update_cfs_rq_load_avg(cfs_rq_clock_task(cfs_rq), cfs_rq);
6221 raw_spin_unlock_irqrestore(&rq->lock, flags);
6222 }
6223
6224 static unsigned long task_h_load(struct task_struct *p)
6225 {
6226 return p->se.avg.load_avg;
6227 }
6228 #endif
6229
6230 /********** Helpers for find_busiest_group ************************/
6231
6232 enum group_type {
6233 group_other = 0,
6234 group_imbalanced,
6235 group_overloaded,
6236 };
6237
6238 /*
6239 * sg_lb_stats - stats of a sched_group required for load_balancing
6240 */
6241 struct sg_lb_stats {
6242 unsigned long avg_load; /*Avg load across the CPUs of the group */
6243 unsigned long group_load; /* Total load over the CPUs of the group */
6244 unsigned long sum_weighted_load; /* Weighted load of group's tasks */
6245 unsigned long load_per_task;
6246 unsigned long group_capacity;
6247 unsigned long group_util; /* Total utilization of the group */
6248 unsigned int sum_nr_running; /* Nr tasks running in the group */
6249 unsigned int idle_cpus;
6250 unsigned int group_weight;
6251 enum group_type group_type;
6252 int group_no_capacity;
6253 #ifdef CONFIG_NUMA_BALANCING
6254 unsigned int nr_numa_running;
6255 unsigned int nr_preferred_running;
6256 #endif
6257 };
6258
6259 /*
6260 * sd_lb_stats - Structure to store the statistics of a sched_domain
6261 * during load balancing.
6262 */
6263 struct sd_lb_stats {
6264 struct sched_group *busiest; /* Busiest group in this sd */
6265 struct sched_group *local; /* Local group in this sd */
6266 unsigned long total_load; /* Total load of all groups in sd */
6267 unsigned long total_capacity; /* Total capacity of all groups in sd */
6268 unsigned long avg_load; /* Average load across all groups in sd */
6269
6270 struct sg_lb_stats busiest_stat;/* Statistics of the busiest group */
6271 struct sg_lb_stats local_stat; /* Statistics of the local group */
6272 };
6273
6274 static inline void init_sd_lb_stats(struct sd_lb_stats *sds)
6275 {
6276 /*
6277 * Skimp on the clearing to avoid duplicate work. We can avoid clearing
6278 * local_stat because update_sg_lb_stats() does a full clear/assignment.
6279 * We must however clear busiest_stat::avg_load because
6280 * update_sd_pick_busiest() reads this before assignment.
6281 */
6282 *sds = (struct sd_lb_stats){
6283 .busiest = NULL,
6284 .local = NULL,
6285 .total_load = 0UL,
6286 .total_capacity = 0UL,
6287 .busiest_stat = {
6288 .avg_load = 0UL,
6289 .sum_nr_running = 0,
6290 .group_type = group_other,
6291 },
6292 };
6293 }
6294
6295 /**
6296 * get_sd_load_idx - Obtain the load index for a given sched domain.
6297 * @sd: The sched_domain whose load_idx is to be obtained.
6298 * @idle: The idle status of the CPU for whose sd load_idx is obtained.
6299 *
6300 * Return: The load index.
6301 */
6302 static inline int get_sd_load_idx(struct sched_domain *sd,
6303 enum cpu_idle_type idle)
6304 {
6305 int load_idx;
6306
6307 switch (idle) {
6308 case CPU_NOT_IDLE:
6309 load_idx = sd->busy_idx;
6310 break;
6311
6312 case CPU_NEWLY_IDLE:
6313 load_idx = sd->newidle_idx;
6314 break;
6315 default:
6316 load_idx = sd->idle_idx;
6317 break;
6318 }
6319
6320 return load_idx;
6321 }
6322
6323 static unsigned long scale_rt_capacity(int cpu)
6324 {
6325 struct rq *rq = cpu_rq(cpu);
6326 u64 total, used, age_stamp, avg;
6327 s64 delta;
6328
6329 /*
6330 * Since we're reading these variables without serialization make sure
6331 * we read them once before doing sanity checks on them.
6332 */
6333 age_stamp = READ_ONCE(rq->age_stamp);
6334 avg = READ_ONCE(rq->rt_avg);
6335 delta = __rq_clock_broken(rq) - age_stamp;
6336
6337 if (unlikely(delta < 0))
6338 delta = 0;
6339
6340 total = sched_avg_period() + delta;
6341
6342 used = div_u64(avg, total);
6343
6344 if (likely(used < SCHED_CAPACITY_SCALE))
6345 return SCHED_CAPACITY_SCALE - used;
6346
6347 return 1;
6348 }
6349
6350 static void update_cpu_capacity(struct sched_domain *sd, int cpu)
6351 {
6352 unsigned long capacity = arch_scale_cpu_capacity(sd, cpu);
6353 struct sched_group *sdg = sd->groups;
6354
6355 cpu_rq(cpu)->cpu_capacity_orig = capacity;
6356
6357 capacity *= scale_rt_capacity(cpu);
6358 capacity >>= SCHED_CAPACITY_SHIFT;
6359
6360 if (!capacity)
6361 capacity = 1;
6362
6363 cpu_rq(cpu)->cpu_capacity = capacity;
6364 sdg->sgc->capacity = capacity;
6365 }
6366
6367 void update_group_capacity(struct sched_domain *sd, int cpu)
6368 {
6369 struct sched_domain *child = sd->child;
6370 struct sched_group *group, *sdg = sd->groups;
6371 unsigned long capacity;
6372 unsigned long interval;
6373
6374 interval = msecs_to_jiffies(sd->balance_interval);
6375 interval = clamp(interval, 1UL, max_load_balance_interval);
6376 sdg->sgc->next_update = jiffies + interval;
6377
6378 if (!child) {
6379 update_cpu_capacity(sd, cpu);
6380 return;
6381 }
6382
6383 capacity = 0;
6384
6385 if (child->flags & SD_OVERLAP) {
6386 /*
6387 * SD_OVERLAP domains cannot assume that child groups
6388 * span the current group.
6389 */
6390
6391 for_each_cpu(cpu, sched_group_cpus(sdg)) {
6392 struct sched_group_capacity *sgc;
6393 struct rq *rq = cpu_rq(cpu);
6394
6395 /*
6396 * build_sched_domains() -> init_sched_groups_capacity()
6397 * gets here before we've attached the domains to the
6398 * runqueues.
6399 *
6400 * Use capacity_of(), which is set irrespective of domains
6401 * in update_cpu_capacity().
6402 *
6403 * This avoids capacity from being 0 and
6404 * causing divide-by-zero issues on boot.
6405 */
6406 if (unlikely(!rq->sd)) {
6407 capacity += capacity_of(cpu);
6408 continue;
6409 }
6410
6411 sgc = rq->sd->groups->sgc;
6412 capacity += sgc->capacity;
6413 }
6414 } else {
6415 /*
6416 * !SD_OVERLAP domains can assume that child groups
6417 * span the current group.
6418 */
6419
6420 group = child->groups;
6421 do {
6422 capacity += group->sgc->capacity;
6423 group = group->next;
6424 } while (group != child->groups);
6425 }
6426
6427 sdg->sgc->capacity = capacity;
6428 }
6429
6430 /*
6431 * Check whether the capacity of the rq has been noticeably reduced by side
6432 * activity. The imbalance_pct is used for the threshold.
6433 * Return true is the capacity is reduced
6434 */
6435 static inline int
6436 check_cpu_capacity(struct rq *rq, struct sched_domain *sd)
6437 {
6438 return ((rq->cpu_capacity * sd->imbalance_pct) <
6439 (rq->cpu_capacity_orig * 100));
6440 }
6441
6442 /*
6443 * Group imbalance indicates (and tries to solve) the problem where balancing
6444 * groups is inadequate due to tsk_cpus_allowed() constraints.
6445 *
6446 * Imagine a situation of two groups of 4 cpus each and 4 tasks each with a
6447 * cpumask covering 1 cpu of the first group and 3 cpus of the second group.
6448 * Something like:
6449 *
6450 * { 0 1 2 3 } { 4 5 6 7 }
6451 * * * * *
6452 *
6453 * If we were to balance group-wise we'd place two tasks in the first group and
6454 * two tasks in the second group. Clearly this is undesired as it will overload
6455 * cpu 3 and leave one of the cpus in the second group unused.
6456 *
6457 * The current solution to this issue is detecting the skew in the first group
6458 * by noticing the lower domain failed to reach balance and had difficulty
6459 * moving tasks due to affinity constraints.
6460 *
6461 * When this is so detected; this group becomes a candidate for busiest; see
6462 * update_sd_pick_busiest(). And calculate_imbalance() and
6463 * find_busiest_group() avoid some of the usual balance conditions to allow it
6464 * to create an effective group imbalance.
6465 *
6466 * This is a somewhat tricky proposition since the next run might not find the
6467 * group imbalance and decide the groups need to be balanced again. A most
6468 * subtle and fragile situation.
6469 */
6470
6471 static inline int sg_imbalanced(struct sched_group *group)
6472 {
6473 return group->sgc->imbalance;
6474 }
6475
6476 /*
6477 * group_has_capacity returns true if the group has spare capacity that could
6478 * be used by some tasks.
6479 * We consider that a group has spare capacity if the * number of task is
6480 * smaller than the number of CPUs or if the utilization is lower than the
6481 * available capacity for CFS tasks.
6482 * For the latter, we use a threshold to stabilize the state, to take into
6483 * account the variance of the tasks' load and to return true if the available
6484 * capacity in meaningful for the load balancer.
6485 * As an example, an available capacity of 1% can appear but it doesn't make
6486 * any benefit for the load balance.
6487 */
6488 static inline bool
6489 group_has_capacity(struct lb_env *env, struct sg_lb_stats *sgs)
6490 {
6491 if (sgs->sum_nr_running < sgs->group_weight)
6492 return true;
6493
6494 if ((sgs->group_capacity * 100) >
6495 (sgs->group_util * env->sd->imbalance_pct))
6496 return true;
6497
6498 return false;
6499 }
6500
6501 /*
6502 * group_is_overloaded returns true if the group has more tasks than it can
6503 * handle.
6504 * group_is_overloaded is not equals to !group_has_capacity because a group
6505 * with the exact right number of tasks, has no more spare capacity but is not
6506 * overloaded so both group_has_capacity and group_is_overloaded return
6507 * false.
6508 */
6509 static inline bool
6510 group_is_overloaded(struct lb_env *env, struct sg_lb_stats *sgs)
6511 {
6512 if (sgs->sum_nr_running <= sgs->group_weight)
6513 return false;
6514
6515 if ((sgs->group_capacity * 100) <
6516 (sgs->group_util * env->sd->imbalance_pct))
6517 return true;
6518
6519 return false;
6520 }
6521
6522 static inline enum
6523 group_type group_classify(struct sched_group *group,
6524 struct sg_lb_stats *sgs)
6525 {
6526 if (sgs->group_no_capacity)
6527 return group_overloaded;
6528
6529 if (sg_imbalanced(group))
6530 return group_imbalanced;
6531
6532 return group_other;
6533 }
6534
6535 /**
6536 * update_sg_lb_stats - Update sched_group's statistics for load balancing.
6537 * @env: The load balancing environment.
6538 * @group: sched_group whose statistics are to be updated.
6539 * @load_idx: Load index of sched_domain of this_cpu for load calc.
6540 * @local_group: Does group contain this_cpu.
6541 * @sgs: variable to hold the statistics for this group.
6542 * @overload: Indicate more than one runnable task for any CPU.
6543 */
6544 static inline void update_sg_lb_stats(struct lb_env *env,
6545 struct sched_group *group, int load_idx,
6546 int local_group, struct sg_lb_stats *sgs,
6547 bool *overload)
6548 {
6549 unsigned long load;
6550 int i, nr_running;
6551
6552 memset(sgs, 0, sizeof(*sgs));
6553
6554 for_each_cpu_and(i, sched_group_cpus(group), env->cpus) {
6555 struct rq *rq = cpu_rq(i);
6556
6557 /* Bias balancing toward cpus of our domain */
6558 if (local_group)
6559 load = target_load(i, load_idx);
6560 else
6561 load = source_load(i, load_idx);
6562
6563 sgs->group_load += load;
6564 sgs->group_util += cpu_util(i);
6565 sgs->sum_nr_running += rq->cfs.h_nr_running;
6566
6567 nr_running = rq->nr_running;
6568 if (nr_running > 1)
6569 *overload = true;
6570
6571 #ifdef CONFIG_NUMA_BALANCING
6572 sgs->nr_numa_running += rq->nr_numa_running;
6573 sgs->nr_preferred_running += rq->nr_preferred_running;
6574 #endif
6575 sgs->sum_weighted_load += weighted_cpuload(i);
6576 /*
6577 * No need to call idle_cpu() if nr_running is not 0
6578 */
6579 if (!nr_running && idle_cpu(i))
6580 sgs->idle_cpus++;
6581 }
6582
6583 /* Adjust by relative CPU capacity of the group */
6584 sgs->group_capacity = group->sgc->capacity;
6585 sgs->avg_load = (sgs->group_load*SCHED_CAPACITY_SCALE) / sgs->group_capacity;
6586
6587 if (sgs->sum_nr_running)
6588 sgs->load_per_task = sgs->sum_weighted_load / sgs->sum_nr_running;
6589
6590 sgs->group_weight = group->group_weight;
6591
6592 sgs->group_no_capacity = group_is_overloaded(env, sgs);
6593 sgs->group_type = group_classify(group, sgs);
6594 }
6595
6596 /**
6597 * update_sd_pick_busiest - return 1 on busiest group
6598 * @env: The load balancing environment.
6599 * @sds: sched_domain statistics
6600 * @sg: sched_group candidate to be checked for being the busiest
6601 * @sgs: sched_group statistics
6602 *
6603 * Determine if @sg is a busier group than the previously selected
6604 * busiest group.
6605 *
6606 * Return: %true if @sg is a busier group than the previously selected
6607 * busiest group. %false otherwise.
6608 */
6609 static bool update_sd_pick_busiest(struct lb_env *env,
6610 struct sd_lb_stats *sds,
6611 struct sched_group *sg,
6612 struct sg_lb_stats *sgs)
6613 {
6614 struct sg_lb_stats *busiest = &sds->busiest_stat;
6615
6616 if (sgs->group_type > busiest->group_type)
6617 return true;
6618
6619 if (sgs->group_type < busiest->group_type)
6620 return false;
6621
6622 if (sgs->avg_load <= busiest->avg_load)
6623 return false;
6624
6625 /* This is the busiest node in its class. */
6626 if (!(env->sd->flags & SD_ASYM_PACKING))
6627 return true;
6628
6629 /*
6630 * ASYM_PACKING needs to move all the work to the lowest
6631 * numbered CPUs in the group, therefore mark all groups
6632 * higher than ourself as busy.
6633 */
6634 if (sgs->sum_nr_running && env->dst_cpu < group_first_cpu(sg)) {
6635 if (!sds->busiest)
6636 return true;
6637
6638 if (group_first_cpu(sds->busiest) > group_first_cpu(sg))
6639 return true;
6640 }
6641
6642 return false;
6643 }
6644
6645 #ifdef CONFIG_NUMA_BALANCING
6646 static inline enum fbq_type fbq_classify_group(struct sg_lb_stats *sgs)
6647 {
6648 if (sgs->sum_nr_running > sgs->nr_numa_running)
6649 return regular;
6650 if (sgs->sum_nr_running > sgs->nr_preferred_running)
6651 return remote;
6652 return all;
6653 }
6654
6655 static inline enum fbq_type fbq_classify_rq(struct rq *rq)
6656 {
6657 if (rq->nr_running > rq->nr_numa_running)
6658 return regular;
6659 if (rq->nr_running > rq->nr_preferred_running)
6660 return remote;
6661 return all;
6662 }
6663 #else
6664 static inline enum fbq_type fbq_classify_group(struct sg_lb_stats *sgs)
6665 {
6666 return all;
6667 }
6668
6669 static inline enum fbq_type fbq_classify_rq(struct rq *rq)
6670 {
6671 return regular;
6672 }
6673 #endif /* CONFIG_NUMA_BALANCING */
6674
6675 /**
6676 * update_sd_lb_stats - Update sched_domain's statistics for load balancing.
6677 * @env: The load balancing environment.
6678 * @sds: variable to hold the statistics for this sched_domain.
6679 */
6680 static inline void update_sd_lb_stats(struct lb_env *env, struct sd_lb_stats *sds)
6681 {
6682 struct sched_domain *child = env->sd->child;
6683 struct sched_group *sg = env->sd->groups;
6684 struct sg_lb_stats tmp_sgs;
6685 int load_idx, prefer_sibling = 0;
6686 bool overload = false;
6687
6688 if (child && child->flags & SD_PREFER_SIBLING)
6689 prefer_sibling = 1;
6690
6691 load_idx = get_sd_load_idx(env->sd, env->idle);
6692
6693 do {
6694 struct sg_lb_stats *sgs = &tmp_sgs;
6695 int local_group;
6696
6697 local_group = cpumask_test_cpu(env->dst_cpu, sched_group_cpus(sg));
6698 if (local_group) {
6699 sds->local = sg;
6700 sgs = &sds->local_stat;
6701
6702 if (env->idle != CPU_NEWLY_IDLE ||
6703 time_after_eq(jiffies, sg->sgc->next_update))
6704 update_group_capacity(env->sd, env->dst_cpu);
6705 }
6706
6707 update_sg_lb_stats(env, sg, load_idx, local_group, sgs,
6708 &overload);
6709
6710 if (local_group)
6711 goto next_group;
6712
6713 /*
6714 * In case the child domain prefers tasks go to siblings
6715 * first, lower the sg capacity so that we'll try
6716 * and move all the excess tasks away. We lower the capacity
6717 * of a group only if the local group has the capacity to fit
6718 * these excess tasks. The extra check prevents the case where
6719 * you always pull from the heaviest group when it is already
6720 * under-utilized (possible with a large weight task outweighs
6721 * the tasks on the system).
6722 */
6723 if (prefer_sibling && sds->local &&
6724 group_has_capacity(env, &sds->local_stat) &&
6725 (sgs->sum_nr_running > 1)) {
6726 sgs->group_no_capacity = 1;
6727 sgs->group_type = group_classify(sg, sgs);
6728 }
6729
6730 if (update_sd_pick_busiest(env, sds, sg, sgs)) {
6731 sds->busiest = sg;
6732 sds->busiest_stat = *sgs;
6733 }
6734
6735 next_group:
6736 /* Now, start updating sd_lb_stats */
6737 sds->total_load += sgs->group_load;
6738 sds->total_capacity += sgs->group_capacity;
6739
6740 sg = sg->next;
6741 } while (sg != env->sd->groups);
6742
6743 if (env->sd->flags & SD_NUMA)
6744 env->fbq_type = fbq_classify_group(&sds->busiest_stat);
6745
6746 if (!env->sd->parent) {
6747 /* update overload indicator if we are at root domain */
6748 if (env->dst_rq->rd->overload != overload)
6749 env->dst_rq->rd->overload = overload;
6750 }
6751
6752 }
6753
6754 /**
6755 * check_asym_packing - Check to see if the group is packed into the
6756 * sched doman.
6757 *
6758 * This is primarily intended to used at the sibling level. Some
6759 * cores like POWER7 prefer to use lower numbered SMT threads. In the
6760 * case of POWER7, it can move to lower SMT modes only when higher
6761 * threads are idle. When in lower SMT modes, the threads will
6762 * perform better since they share less core resources. Hence when we
6763 * have idle threads, we want them to be the higher ones.
6764 *
6765 * This packing function is run on idle threads. It checks to see if
6766 * the busiest CPU in this domain (core in the P7 case) has a higher
6767 * CPU number than the packing function is being run on. Here we are
6768 * assuming lower CPU number will be equivalent to lower a SMT thread
6769 * number.
6770 *
6771 * Return: 1 when packing is required and a task should be moved to
6772 * this CPU. The amount of the imbalance is returned in *imbalance.
6773 *
6774 * @env: The load balancing environment.
6775 * @sds: Statistics of the sched_domain which is to be packed
6776 */
6777 static int check_asym_packing(struct lb_env *env, struct sd_lb_stats *sds)
6778 {
6779 int busiest_cpu;
6780
6781 if (!(env->sd->flags & SD_ASYM_PACKING))
6782 return 0;
6783
6784 if (!sds->busiest)
6785 return 0;
6786
6787 busiest_cpu = group_first_cpu(sds->busiest);
6788 if (env->dst_cpu > busiest_cpu)
6789 return 0;
6790
6791 env->imbalance = DIV_ROUND_CLOSEST(
6792 sds->busiest_stat.avg_load * sds->busiest_stat.group_capacity,
6793 SCHED_CAPACITY_SCALE);
6794
6795 return 1;
6796 }
6797
6798 /**
6799 * fix_small_imbalance - Calculate the minor imbalance that exists
6800 * amongst the groups of a sched_domain, during
6801 * load balancing.
6802 * @env: The load balancing environment.
6803 * @sds: Statistics of the sched_domain whose imbalance is to be calculated.
6804 */
6805 static inline
6806 void fix_small_imbalance(struct lb_env *env, struct sd_lb_stats *sds)
6807 {
6808 unsigned long tmp, capa_now = 0, capa_move = 0;
6809 unsigned int imbn = 2;
6810 unsigned long scaled_busy_load_per_task;
6811 struct sg_lb_stats *local, *busiest;
6812
6813 local = &sds->local_stat;
6814 busiest = &sds->busiest_stat;
6815
6816 if (!local->sum_nr_running)
6817 local->load_per_task = cpu_avg_load_per_task(env->dst_cpu);
6818 else if (busiest->load_per_task > local->load_per_task)
6819 imbn = 1;
6820
6821 scaled_busy_load_per_task =
6822 (busiest->load_per_task * SCHED_CAPACITY_SCALE) /
6823 busiest->group_capacity;
6824
6825 if (busiest->avg_load + scaled_busy_load_per_task >=
6826 local->avg_load + (scaled_busy_load_per_task * imbn)) {
6827 env->imbalance = busiest->load_per_task;
6828 return;
6829 }
6830
6831 /*
6832 * OK, we don't have enough imbalance to justify moving tasks,
6833 * however we may be able to increase total CPU capacity used by
6834 * moving them.
6835 */
6836
6837 capa_now += busiest->group_capacity *
6838 min(busiest->load_per_task, busiest->avg_load);
6839 capa_now += local->group_capacity *
6840 min(local->load_per_task, local->avg_load);
6841 capa_now /= SCHED_CAPACITY_SCALE;
6842
6843 /* Amount of load we'd subtract */
6844 if (busiest->avg_load > scaled_busy_load_per_task) {
6845 capa_move += busiest->group_capacity *
6846 min(busiest->load_per_task,
6847 busiest->avg_load - scaled_busy_load_per_task);
6848 }
6849
6850 /* Amount of load we'd add */
6851 if (busiest->avg_load * busiest->group_capacity <
6852 busiest->load_per_task * SCHED_CAPACITY_SCALE) {
6853 tmp = (busiest->avg_load * busiest->group_capacity) /
6854 local->group_capacity;
6855 } else {
6856 tmp = (busiest->load_per_task * SCHED_CAPACITY_SCALE) /
6857 local->group_capacity;
6858 }
6859 capa_move += local->group_capacity *
6860 min(local->load_per_task, local->avg_load + tmp);
6861 capa_move /= SCHED_CAPACITY_SCALE;
6862
6863 /* Move if we gain throughput */
6864 if (capa_move > capa_now)
6865 env->imbalance = busiest->load_per_task;
6866 }
6867
6868 /**
6869 * calculate_imbalance - Calculate the amount of imbalance present within the
6870 * groups of a given sched_domain during load balance.
6871 * @env: load balance environment
6872 * @sds: statistics of the sched_domain whose imbalance is to be calculated.
6873 */
6874 static inline void calculate_imbalance(struct lb_env *env, struct sd_lb_stats *sds)
6875 {
6876 unsigned long max_pull, load_above_capacity = ~0UL;
6877 struct sg_lb_stats *local, *busiest;
6878
6879 local = &sds->local_stat;
6880 busiest = &sds->busiest_stat;
6881
6882 if (busiest->group_type == group_imbalanced) {
6883 /*
6884 * In the group_imb case we cannot rely on group-wide averages
6885 * to ensure cpu-load equilibrium, look at wider averages. XXX
6886 */
6887 busiest->load_per_task =
6888 min(busiest->load_per_task, sds->avg_load);
6889 }
6890
6891 /*
6892 * In the presence of smp nice balancing, certain scenarios can have
6893 * max load less than avg load(as we skip the groups at or below
6894 * its cpu_capacity, while calculating max_load..)
6895 */
6896 if (busiest->avg_load <= sds->avg_load ||
6897 local->avg_load >= sds->avg_load) {
6898 env->imbalance = 0;
6899 return fix_small_imbalance(env, sds);
6900 }
6901
6902 /*
6903 * If there aren't any idle cpus, avoid creating some.
6904 */
6905 if (busiest->group_type == group_overloaded &&
6906 local->group_type == group_overloaded) {
6907 load_above_capacity = busiest->sum_nr_running *
6908 SCHED_LOAD_SCALE;
6909 if (load_above_capacity > busiest->group_capacity)
6910 load_above_capacity -= busiest->group_capacity;
6911 else
6912 load_above_capacity = ~0UL;
6913 }
6914
6915 /*
6916 * We're trying to get all the cpus to the average_load, so we don't
6917 * want to push ourselves above the average load, nor do we wish to
6918 * reduce the max loaded cpu below the average load. At the same time,
6919 * we also don't want to reduce the group load below the group capacity
6920 * (so that we can implement power-savings policies etc). Thus we look
6921 * for the minimum possible imbalance.
6922 */
6923 max_pull = min(busiest->avg_load - sds->avg_load, load_above_capacity);
6924
6925 /* How much load to actually move to equalise the imbalance */
6926 env->imbalance = min(
6927 max_pull * busiest->group_capacity,
6928 (sds->avg_load - local->avg_load) * local->group_capacity
6929 ) / SCHED_CAPACITY_SCALE;
6930
6931 /*
6932 * if *imbalance is less than the average load per runnable task
6933 * there is no guarantee that any tasks will be moved so we'll have
6934 * a think about bumping its value to force at least one task to be
6935 * moved
6936 */
6937 if (env->imbalance < busiest->load_per_task)
6938 return fix_small_imbalance(env, sds);
6939 }
6940
6941 /******* find_busiest_group() helpers end here *********************/
6942
6943 /**
6944 * find_busiest_group - Returns the busiest group within the sched_domain
6945 * if there is an imbalance. If there isn't an imbalance, and
6946 * the user has opted for power-savings, it returns a group whose
6947 * CPUs can be put to idle by rebalancing those tasks elsewhere, if
6948 * such a group exists.
6949 *
6950 * Also calculates the amount of weighted load which should be moved
6951 * to restore balance.
6952 *
6953 * @env: The load balancing environment.
6954 *
6955 * Return: - The busiest group if imbalance exists.
6956 * - If no imbalance and user has opted for power-savings balance,
6957 * return the least loaded group whose CPUs can be
6958 * put to idle by rebalancing its tasks onto our group.
6959 */
6960 static struct sched_group *find_busiest_group(struct lb_env *env)
6961 {
6962 struct sg_lb_stats *local, *busiest;
6963 struct sd_lb_stats sds;
6964
6965 init_sd_lb_stats(&sds);
6966
6967 /*
6968 * Compute the various statistics relavent for load balancing at
6969 * this level.
6970 */
6971 update_sd_lb_stats(env, &sds);
6972 local = &sds.local_stat;
6973 busiest = &sds.busiest_stat;
6974
6975 /* ASYM feature bypasses nice load balance check */
6976 if ((env->idle == CPU_IDLE || env->idle == CPU_NEWLY_IDLE) &&
6977 check_asym_packing(env, &sds))
6978 return sds.busiest;
6979
6980 /* There is no busy sibling group to pull tasks from */
6981 if (!sds.busiest || busiest->sum_nr_running == 0)
6982 goto out_balanced;
6983
6984 sds.avg_load = (SCHED_CAPACITY_SCALE * sds.total_load)
6985 / sds.total_capacity;
6986
6987 /*
6988 * If the busiest group is imbalanced the below checks don't
6989 * work because they assume all things are equal, which typically
6990 * isn't true due to cpus_allowed constraints and the like.
6991 */
6992 if (busiest->group_type == group_imbalanced)
6993 goto force_balance;
6994
6995 /* SD_BALANCE_NEWIDLE trumps SMP nice when underutilized */
6996 if (env->idle == CPU_NEWLY_IDLE && group_has_capacity(env, local) &&
6997 busiest->group_no_capacity)
6998 goto force_balance;
6999
7000 /*
7001 * If the local group is busier than the selected busiest group
7002 * don't try and pull any tasks.
7003 */
7004 if (local->avg_load >= busiest->avg_load)
7005 goto out_balanced;
7006
7007 /*
7008 * Don't pull any tasks if this group is already above the domain
7009 * average load.
7010 */
7011 if (local->avg_load >= sds.avg_load)
7012 goto out_balanced;
7013
7014 if (env->idle == CPU_IDLE) {
7015 /*
7016 * This cpu is idle. If the busiest group is not overloaded
7017 * and there is no imbalance between this and busiest group
7018 * wrt idle cpus, it is balanced. The imbalance becomes
7019 * significant if the diff is greater than 1 otherwise we
7020 * might end up to just move the imbalance on another group
7021 */
7022 if ((busiest->group_type != group_overloaded) &&
7023 (local->idle_cpus <= (busiest->idle_cpus + 1)))
7024 goto out_balanced;
7025 } else {
7026 /*
7027 * In the CPU_NEWLY_IDLE, CPU_NOT_IDLE cases, use
7028 * imbalance_pct to be conservative.
7029 */
7030 if (100 * busiest->avg_load <=
7031 env->sd->imbalance_pct * local->avg_load)
7032 goto out_balanced;
7033 }
7034
7035 force_balance:
7036 /* Looks like there is an imbalance. Compute it */
7037 calculate_imbalance(env, &sds);
7038 return sds.busiest;
7039
7040 out_balanced:
7041 env->imbalance = 0;
7042 return NULL;
7043 }
7044
7045 /*
7046 * find_busiest_queue - find the busiest runqueue among the cpus in group.
7047 */
7048 static struct rq *find_busiest_queue(struct lb_env *env,
7049 struct sched_group *group)
7050 {
7051 struct rq *busiest = NULL, *rq;
7052 unsigned long busiest_load = 0, busiest_capacity = 1;
7053 int i;
7054
7055 for_each_cpu_and(i, sched_group_cpus(group), env->cpus) {
7056 unsigned long capacity, wl;
7057 enum fbq_type rt;
7058
7059 rq = cpu_rq(i);
7060 rt = fbq_classify_rq(rq);
7061
7062 /*
7063 * We classify groups/runqueues into three groups:
7064 * - regular: there are !numa tasks
7065 * - remote: there are numa tasks that run on the 'wrong' node
7066 * - all: there is no distinction
7067 *
7068 * In order to avoid migrating ideally placed numa tasks,
7069 * ignore those when there's better options.
7070 *
7071 * If we ignore the actual busiest queue to migrate another
7072 * task, the next balance pass can still reduce the busiest
7073 * queue by moving tasks around inside the node.
7074 *
7075 * If we cannot move enough load due to this classification
7076 * the next pass will adjust the group classification and
7077 * allow migration of more tasks.
7078 *
7079 * Both cases only affect the total convergence complexity.
7080 */
7081 if (rt > env->fbq_type)
7082 continue;
7083
7084 capacity = capacity_of(i);
7085
7086 wl = weighted_cpuload(i);
7087
7088 /*
7089 * When comparing with imbalance, use weighted_cpuload()
7090 * which is not scaled with the cpu capacity.
7091 */
7092
7093 if (rq->nr_running == 1 && wl > env->imbalance &&
7094 !check_cpu_capacity(rq, env->sd))
7095 continue;
7096
7097 /*
7098 * For the load comparisons with the other cpu's, consider
7099 * the weighted_cpuload() scaled with the cpu capacity, so
7100 * that the load can be moved away from the cpu that is
7101 * potentially running at a lower capacity.
7102 *
7103 * Thus we're looking for max(wl_i / capacity_i), crosswise
7104 * multiplication to rid ourselves of the division works out
7105 * to: wl_i * capacity_j > wl_j * capacity_i; where j is
7106 * our previous maximum.
7107 */
7108 if (wl * busiest_capacity > busiest_load * capacity) {
7109 busiest_load = wl;
7110 busiest_capacity = capacity;
7111 busiest = rq;
7112 }
7113 }
7114
7115 return busiest;
7116 }
7117
7118 /*
7119 * Max backoff if we encounter pinned tasks. Pretty arbitrary value, but
7120 * so long as it is large enough.
7121 */
7122 #define MAX_PINNED_INTERVAL 512
7123
7124 /* Working cpumask for load_balance and load_balance_newidle. */
7125 DEFINE_PER_CPU(cpumask_var_t, load_balance_mask);
7126
7127 static int need_active_balance(struct lb_env *env)
7128 {
7129 struct sched_domain *sd = env->sd;
7130
7131 if (env->idle == CPU_NEWLY_IDLE) {
7132
7133 /*
7134 * ASYM_PACKING needs to force migrate tasks from busy but
7135 * higher numbered CPUs in order to pack all tasks in the
7136 * lowest numbered CPUs.
7137 */
7138 if ((sd->flags & SD_ASYM_PACKING) && env->src_cpu > env->dst_cpu)
7139 return 1;
7140 }
7141
7142 /*
7143 * The dst_cpu is idle and the src_cpu CPU has only 1 CFS task.
7144 * It's worth migrating the task if the src_cpu's capacity is reduced
7145 * because of other sched_class or IRQs if more capacity stays
7146 * available on dst_cpu.
7147 */
7148 if ((env->idle != CPU_NOT_IDLE) &&
7149 (env->src_rq->cfs.h_nr_running == 1)) {
7150 if ((check_cpu_capacity(env->src_rq, sd)) &&
7151 (capacity_of(env->src_cpu)*sd->imbalance_pct < capacity_of(env->dst_cpu)*100))
7152 return 1;
7153 }
7154
7155 return unlikely(sd->nr_balance_failed > sd->cache_nice_tries+2);
7156 }
7157
7158 static int active_load_balance_cpu_stop(void *data);
7159
7160 static int should_we_balance(struct lb_env *env)
7161 {
7162 struct sched_group *sg = env->sd->groups;
7163 struct cpumask *sg_cpus, *sg_mask;
7164 int cpu, balance_cpu = -1;
7165
7166 /*
7167 * In the newly idle case, we will allow all the cpu's
7168 * to do the newly idle load balance.
7169 */
7170 if (env->idle == CPU_NEWLY_IDLE)
7171 return 1;
7172
7173 sg_cpus = sched_group_cpus(sg);
7174 sg_mask = sched_group_mask(sg);
7175 /* Try to find first idle cpu */
7176 for_each_cpu_and(cpu, sg_cpus, env->cpus) {
7177 if (!cpumask_test_cpu(cpu, sg_mask) || !idle_cpu(cpu))
7178 continue;
7179
7180 balance_cpu = cpu;
7181 break;
7182 }
7183
7184 if (balance_cpu == -1)
7185 balance_cpu = group_balance_cpu(sg);
7186
7187 /*
7188 * First idle cpu or the first cpu(busiest) in this sched group
7189 * is eligible for doing load balancing at this and above domains.
7190 */
7191 return balance_cpu == env->dst_cpu;
7192 }
7193
7194 /*
7195 * Check this_cpu to ensure it is balanced within domain. Attempt to move
7196 * tasks if there is an imbalance.
7197 */
7198 static int load_balance(int this_cpu, struct rq *this_rq,
7199 struct sched_domain *sd, enum cpu_idle_type idle,
7200 int *continue_balancing)
7201 {
7202 int ld_moved, cur_ld_moved, active_balance = 0;
7203 struct sched_domain *sd_parent = sd->parent;
7204 struct sched_group *group;
7205 struct rq *busiest;
7206 unsigned long flags;
7207 struct cpumask *cpus = this_cpu_cpumask_var_ptr(load_balance_mask);
7208
7209 struct lb_env env = {
7210 .sd = sd,
7211 .dst_cpu = this_cpu,
7212 .dst_rq = this_rq,
7213 .dst_grpmask = sched_group_cpus(sd->groups),
7214 .idle = idle,
7215 .loop_break = sched_nr_migrate_break,
7216 .cpus = cpus,
7217 .fbq_type = all,
7218 .tasks = LIST_HEAD_INIT(env.tasks),
7219 };
7220
7221 /*
7222 * For NEWLY_IDLE load_balancing, we don't need to consider
7223 * other cpus in our group
7224 */
7225 if (idle == CPU_NEWLY_IDLE)
7226 env.dst_grpmask = NULL;
7227
7228 cpumask_copy(cpus, cpu_active_mask);
7229
7230 schedstat_inc(sd, lb_count[idle]);
7231
7232 redo:
7233 if (!should_we_balance(&env)) {
7234 *continue_balancing = 0;
7235 goto out_balanced;
7236 }
7237
7238 group = find_busiest_group(&env);
7239 if (!group) {
7240 schedstat_inc(sd, lb_nobusyg[idle]);
7241 goto out_balanced;
7242 }
7243
7244 busiest = find_busiest_queue(&env, group);
7245 if (!busiest) {
7246 schedstat_inc(sd, lb_nobusyq[idle]);
7247 goto out_balanced;
7248 }
7249
7250 BUG_ON(busiest == env.dst_rq);
7251
7252 schedstat_add(sd, lb_imbalance[idle], env.imbalance);
7253
7254 env.src_cpu = busiest->cpu;
7255 env.src_rq = busiest;
7256
7257 ld_moved = 0;
7258 if (busiest->nr_running > 1) {
7259 /*
7260 * Attempt to move tasks. If find_busiest_group has found
7261 * an imbalance but busiest->nr_running <= 1, the group is
7262 * still unbalanced. ld_moved simply stays zero, so it is
7263 * correctly treated as an imbalance.
7264 */
7265 env.flags |= LBF_ALL_PINNED;
7266 env.loop_max = min(sysctl_sched_nr_migrate, busiest->nr_running);
7267
7268 more_balance:
7269 raw_spin_lock_irqsave(&busiest->lock, flags);
7270
7271 /*
7272 * cur_ld_moved - load moved in current iteration
7273 * ld_moved - cumulative load moved across iterations
7274 */
7275 cur_ld_moved = detach_tasks(&env);
7276
7277 /*
7278 * We've detached some tasks from busiest_rq. Every
7279 * task is masked "TASK_ON_RQ_MIGRATING", so we can safely
7280 * unlock busiest->lock, and we are able to be sure
7281 * that nobody can manipulate the tasks in parallel.
7282 * See task_rq_lock() family for the details.
7283 */
7284
7285 raw_spin_unlock(&busiest->lock);
7286
7287 if (cur_ld_moved) {
7288 attach_tasks(&env);
7289 ld_moved += cur_ld_moved;
7290 }
7291
7292 local_irq_restore(flags);
7293
7294 if (env.flags & LBF_NEED_BREAK) {
7295 env.flags &= ~LBF_NEED_BREAK;
7296 goto more_balance;
7297 }
7298
7299 /*
7300 * Revisit (affine) tasks on src_cpu that couldn't be moved to
7301 * us and move them to an alternate dst_cpu in our sched_group
7302 * where they can run. The upper limit on how many times we
7303 * iterate on same src_cpu is dependent on number of cpus in our
7304 * sched_group.
7305 *
7306 * This changes load balance semantics a bit on who can move
7307 * load to a given_cpu. In addition to the given_cpu itself
7308 * (or a ilb_cpu acting on its behalf where given_cpu is
7309 * nohz-idle), we now have balance_cpu in a position to move
7310 * load to given_cpu. In rare situations, this may cause
7311 * conflicts (balance_cpu and given_cpu/ilb_cpu deciding
7312 * _independently_ and at _same_ time to move some load to
7313 * given_cpu) causing exceess load to be moved to given_cpu.
7314 * This however should not happen so much in practice and
7315 * moreover subsequent load balance cycles should correct the
7316 * excess load moved.
7317 */
7318 if ((env.flags & LBF_DST_PINNED) && env.imbalance > 0) {
7319
7320 /* Prevent to re-select dst_cpu via env's cpus */
7321 cpumask_clear_cpu(env.dst_cpu, env.cpus);
7322
7323 env.dst_rq = cpu_rq(env.new_dst_cpu);
7324 env.dst_cpu = env.new_dst_cpu;
7325 env.flags &= ~LBF_DST_PINNED;
7326 env.loop = 0;
7327 env.loop_break = sched_nr_migrate_break;
7328
7329 /*
7330 * Go back to "more_balance" rather than "redo" since we
7331 * need to continue with same src_cpu.
7332 */
7333 goto more_balance;
7334 }
7335
7336 /*
7337 * We failed to reach balance because of affinity.
7338 */
7339 if (sd_parent) {
7340 int *group_imbalance = &sd_parent->groups->sgc->imbalance;
7341
7342 if ((env.flags & LBF_SOME_PINNED) && env.imbalance > 0)
7343 *group_imbalance = 1;
7344 }
7345
7346 /* All tasks on this runqueue were pinned by CPU affinity */
7347 if (unlikely(env.flags & LBF_ALL_PINNED)) {
7348 cpumask_clear_cpu(cpu_of(busiest), cpus);
7349 if (!cpumask_empty(cpus)) {
7350 env.loop = 0;
7351 env.loop_break = sched_nr_migrate_break;
7352 goto redo;
7353 }
7354 goto out_all_pinned;
7355 }
7356 }
7357
7358 if (!ld_moved) {
7359 schedstat_inc(sd, lb_failed[idle]);
7360 /*
7361 * Increment the failure counter only on periodic balance.
7362 * We do not want newidle balance, which can be very
7363 * frequent, pollute the failure counter causing
7364 * excessive cache_hot migrations and active balances.
7365 */
7366 if (idle != CPU_NEWLY_IDLE)
7367 sd->nr_balance_failed++;
7368
7369 if (need_active_balance(&env)) {
7370 raw_spin_lock_irqsave(&busiest->lock, flags);
7371
7372 /* don't kick the active_load_balance_cpu_stop,
7373 * if the curr task on busiest cpu can't be
7374 * moved to this_cpu
7375 */
7376 if (!cpumask_test_cpu(this_cpu,
7377 tsk_cpus_allowed(busiest->curr))) {
7378 raw_spin_unlock_irqrestore(&busiest->lock,
7379 flags);
7380 env.flags |= LBF_ALL_PINNED;
7381 goto out_one_pinned;
7382 }
7383
7384 /*
7385 * ->active_balance synchronizes accesses to
7386 * ->active_balance_work. Once set, it's cleared
7387 * only after active load balance is finished.
7388 */
7389 if (!busiest->active_balance) {
7390 busiest->active_balance = 1;
7391 busiest->push_cpu = this_cpu;
7392 active_balance = 1;
7393 }
7394 raw_spin_unlock_irqrestore(&busiest->lock, flags);
7395
7396 if (active_balance) {
7397 stop_one_cpu_nowait(cpu_of(busiest),
7398 active_load_balance_cpu_stop, busiest,
7399 &busiest->active_balance_work);
7400 }
7401
7402 /*
7403 * We've kicked active balancing, reset the failure
7404 * counter.
7405 */
7406 sd->nr_balance_failed = sd->cache_nice_tries+1;
7407 }
7408 } else
7409 sd->nr_balance_failed = 0;
7410
7411 if (likely(!active_balance)) {
7412 /* We were unbalanced, so reset the balancing interval */
7413 sd->balance_interval = sd->min_interval;
7414 } else {
7415 /*
7416 * If we've begun active balancing, start to back off. This
7417 * case may not be covered by the all_pinned logic if there
7418 * is only 1 task on the busy runqueue (because we don't call
7419 * detach_tasks).
7420 */
7421 if (sd->balance_interval < sd->max_interval)
7422 sd->balance_interval *= 2;
7423 }
7424
7425 goto out;
7426
7427 out_balanced:
7428 /*
7429 * We reach balance although we may have faced some affinity
7430 * constraints. Clear the imbalance flag if it was set.
7431 */
7432 if (sd_parent) {
7433 int *group_imbalance = &sd_parent->groups->sgc->imbalance;
7434
7435 if (*group_imbalance)
7436 *group_imbalance = 0;
7437 }
7438
7439 out_all_pinned:
7440 /*
7441 * We reach balance because all tasks are pinned at this level so
7442 * we can't migrate them. Let the imbalance flag set so parent level
7443 * can try to migrate them.
7444 */
7445 schedstat_inc(sd, lb_balanced[idle]);
7446
7447 sd->nr_balance_failed = 0;
7448
7449 out_one_pinned:
7450 /* tune up the balancing interval */
7451 if (((env.flags & LBF_ALL_PINNED) &&
7452 sd->balance_interval < MAX_PINNED_INTERVAL) ||
7453 (sd->balance_interval < sd->max_interval))
7454 sd->balance_interval *= 2;
7455
7456 ld_moved = 0;
7457 out:
7458 return ld_moved;
7459 }
7460
7461 static inline unsigned long
7462 get_sd_balance_interval(struct sched_domain *sd, int cpu_busy)
7463 {
7464 unsigned long interval = sd->balance_interval;
7465
7466 if (cpu_busy)
7467 interval *= sd->busy_factor;
7468
7469 /* scale ms to jiffies */
7470 interval = msecs_to_jiffies(interval);
7471 interval = clamp(interval, 1UL, max_load_balance_interval);
7472
7473 return interval;
7474 }
7475
7476 static inline void
7477 update_next_balance(struct sched_domain *sd, int cpu_busy, unsigned long *next_balance)
7478 {
7479 unsigned long interval, next;
7480
7481 interval = get_sd_balance_interval(sd, cpu_busy);
7482 next = sd->last_balance + interval;
7483
7484 if (time_after(*next_balance, next))
7485 *next_balance = next;
7486 }
7487
7488 /*
7489 * idle_balance is called by schedule() if this_cpu is about to become
7490 * idle. Attempts to pull tasks from other CPUs.
7491 */
7492 static int idle_balance(struct rq *this_rq)
7493 {
7494 unsigned long next_balance = jiffies + HZ;
7495 int this_cpu = this_rq->cpu;
7496 struct sched_domain *sd;
7497 int pulled_task = 0;
7498 u64 curr_cost = 0;
7499
7500 /*
7501 * We must set idle_stamp _before_ calling idle_balance(), such that we
7502 * measure the duration of idle_balance() as idle time.
7503 */
7504 this_rq->idle_stamp = rq_clock(this_rq);
7505
7506 if (this_rq->avg_idle < sysctl_sched_migration_cost ||
7507 !this_rq->rd->overload) {
7508 rcu_read_lock();
7509 sd = rcu_dereference_check_sched_domain(this_rq->sd);
7510 if (sd)
7511 update_next_balance(sd, 0, &next_balance);
7512 rcu_read_unlock();
7513
7514 goto out;
7515 }
7516
7517 raw_spin_unlock(&this_rq->lock);
7518
7519 update_blocked_averages(this_cpu);
7520 rcu_read_lock();
7521 for_each_domain(this_cpu, sd) {
7522 int continue_balancing = 1;
7523 u64 t0, domain_cost;
7524
7525 if (!(sd->flags & SD_LOAD_BALANCE))
7526 continue;
7527
7528 if (this_rq->avg_idle < curr_cost + sd->max_newidle_lb_cost) {
7529 update_next_balance(sd, 0, &next_balance);
7530 break;
7531 }
7532
7533 if (sd->flags & SD_BALANCE_NEWIDLE) {
7534 t0 = sched_clock_cpu(this_cpu);
7535
7536 pulled_task = load_balance(this_cpu, this_rq,
7537 sd, CPU_NEWLY_IDLE,
7538 &continue_balancing);
7539
7540 domain_cost = sched_clock_cpu(this_cpu) - t0;
7541 if (domain_cost > sd->max_newidle_lb_cost)
7542 sd->max_newidle_lb_cost = domain_cost;
7543
7544 curr_cost += domain_cost;
7545 }
7546
7547 update_next_balance(sd, 0, &next_balance);
7548
7549 /*
7550 * Stop searching for tasks to pull if there are
7551 * now runnable tasks on this rq.
7552 */
7553 if (pulled_task || this_rq->nr_running > 0)
7554 break;
7555 }
7556 rcu_read_unlock();
7557
7558 raw_spin_lock(&this_rq->lock);
7559
7560 if (curr_cost > this_rq->max_idle_balance_cost)
7561 this_rq->max_idle_balance_cost = curr_cost;
7562
7563 /*
7564 * While browsing the domains, we released the rq lock, a task could
7565 * have been enqueued in the meantime. Since we're not going idle,
7566 * pretend we pulled a task.
7567 */
7568 if (this_rq->cfs.h_nr_running && !pulled_task)
7569 pulled_task = 1;
7570
7571 out:
7572 /* Move the next balance forward */
7573 if (time_after(this_rq->next_balance, next_balance))
7574 this_rq->next_balance = next_balance;
7575
7576 /* Is there a task of a high priority class? */
7577 if (this_rq->nr_running != this_rq->cfs.h_nr_running)
7578 pulled_task = -1;
7579
7580 if (pulled_task)
7581 this_rq->idle_stamp = 0;
7582
7583 return pulled_task;
7584 }
7585
7586 /*
7587 * active_load_balance_cpu_stop is run by cpu stopper. It pushes
7588 * running tasks off the busiest CPU onto idle CPUs. It requires at
7589 * least 1 task to be running on each physical CPU where possible, and
7590 * avoids physical / logical imbalances.
7591 */
7592 static int active_load_balance_cpu_stop(void *data)
7593 {
7594 struct rq *busiest_rq = data;
7595 int busiest_cpu = cpu_of(busiest_rq);
7596 int target_cpu = busiest_rq->push_cpu;
7597 struct rq *target_rq = cpu_rq(target_cpu);
7598 struct sched_domain *sd;
7599 struct task_struct *p = NULL;
7600
7601 raw_spin_lock_irq(&busiest_rq->lock);
7602
7603 /* make sure the requested cpu hasn't gone down in the meantime */
7604 if (unlikely(busiest_cpu != smp_processor_id() ||
7605 !busiest_rq->active_balance))
7606 goto out_unlock;
7607
7608 /* Is there any task to move? */
7609 if (busiest_rq->nr_running <= 1)
7610 goto out_unlock;
7611
7612 /*
7613 * This condition is "impossible", if it occurs
7614 * we need to fix it. Originally reported by
7615 * Bjorn Helgaas on a 128-cpu setup.
7616 */
7617 BUG_ON(busiest_rq == target_rq);
7618
7619 /* Search for an sd spanning us and the target CPU. */
7620 rcu_read_lock();
7621 for_each_domain(target_cpu, sd) {
7622 if ((sd->flags & SD_LOAD_BALANCE) &&
7623 cpumask_test_cpu(busiest_cpu, sched_domain_span(sd)))
7624 break;
7625 }
7626
7627 if (likely(sd)) {
7628 struct lb_env env = {
7629 .sd = sd,
7630 .dst_cpu = target_cpu,
7631 .dst_rq = target_rq,
7632 .src_cpu = busiest_rq->cpu,
7633 .src_rq = busiest_rq,
7634 .idle = CPU_IDLE,
7635 };
7636
7637 schedstat_inc(sd, alb_count);
7638
7639 p = detach_one_task(&env);
7640 if (p)
7641 schedstat_inc(sd, alb_pushed);
7642 else
7643 schedstat_inc(sd, alb_failed);
7644 }
7645 rcu_read_unlock();
7646 out_unlock:
7647 busiest_rq->active_balance = 0;
7648 raw_spin_unlock(&busiest_rq->lock);
7649
7650 if (p)
7651 attach_one_task(target_rq, p);
7652
7653 local_irq_enable();
7654
7655 return 0;
7656 }
7657
7658 static inline int on_null_domain(struct rq *rq)
7659 {
7660 return unlikely(!rcu_dereference_sched(rq->sd));
7661 }
7662
7663 #ifdef CONFIG_NO_HZ_COMMON
7664 /*
7665 * idle load balancing details
7666 * - When one of the busy CPUs notice that there may be an idle rebalancing
7667 * needed, they will kick the idle load balancer, which then does idle
7668 * load balancing for all the idle CPUs.
7669 */
7670 static struct {
7671 cpumask_var_t idle_cpus_mask;
7672 atomic_t nr_cpus;
7673 unsigned long next_balance; /* in jiffy units */
7674 } nohz ____cacheline_aligned;
7675
7676 static inline int find_new_ilb(void)
7677 {
7678 int ilb = cpumask_first(nohz.idle_cpus_mask);
7679
7680 if (ilb < nr_cpu_ids && idle_cpu(ilb))
7681 return ilb;
7682
7683 return nr_cpu_ids;
7684 }
7685
7686 /*
7687 * Kick a CPU to do the nohz balancing, if it is time for it. We pick the
7688 * nohz_load_balancer CPU (if there is one) otherwise fallback to any idle
7689 * CPU (if there is one).
7690 */
7691 static void nohz_balancer_kick(void)
7692 {
7693 int ilb_cpu;
7694
7695 nohz.next_balance++;
7696
7697 ilb_cpu = find_new_ilb();
7698
7699 if (ilb_cpu >= nr_cpu_ids)
7700 return;
7701
7702 if (test_and_set_bit(NOHZ_BALANCE_KICK, nohz_flags(ilb_cpu)))
7703 return;
7704 /*
7705 * Use smp_send_reschedule() instead of resched_cpu().
7706 * This way we generate a sched IPI on the target cpu which
7707 * is idle. And the softirq performing nohz idle load balance
7708 * will be run before returning from the IPI.
7709 */
7710 smp_send_reschedule(ilb_cpu);
7711 return;
7712 }
7713
7714 static inline void nohz_balance_exit_idle(int cpu)
7715 {
7716 if (unlikely(test_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu)))) {
7717 /*
7718 * Completely isolated CPUs don't ever set, so we must test.
7719 */
7720 if (likely(cpumask_test_cpu(cpu, nohz.idle_cpus_mask))) {
7721 cpumask_clear_cpu(cpu, nohz.idle_cpus_mask);
7722 atomic_dec(&nohz.nr_cpus);
7723 }
7724 clear_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu));
7725 }
7726 }
7727
7728 static inline void set_cpu_sd_state_busy(void)
7729 {
7730 struct sched_domain *sd;
7731 int cpu = smp_processor_id();
7732
7733 rcu_read_lock();
7734 sd = rcu_dereference(per_cpu(sd_busy, cpu));
7735
7736 if (!sd || !sd->nohz_idle)
7737 goto unlock;
7738 sd->nohz_idle = 0;
7739
7740 atomic_inc(&sd->groups->sgc->nr_busy_cpus);
7741 unlock:
7742 rcu_read_unlock();
7743 }
7744
7745 void set_cpu_sd_state_idle(void)
7746 {
7747 struct sched_domain *sd;
7748 int cpu = smp_processor_id();
7749
7750 rcu_read_lock();
7751 sd = rcu_dereference(per_cpu(sd_busy, cpu));
7752
7753 if (!sd || sd->nohz_idle)
7754 goto unlock;
7755 sd->nohz_idle = 1;
7756
7757 atomic_dec(&sd->groups->sgc->nr_busy_cpus);
7758 unlock:
7759 rcu_read_unlock();
7760 }
7761
7762 /*
7763 * This routine will record that the cpu is going idle with tick stopped.
7764 * This info will be used in performing idle load balancing in the future.
7765 */
7766 void nohz_balance_enter_idle(int cpu)
7767 {
7768 /*
7769 * If this cpu is going down, then nothing needs to be done.
7770 */
7771 if (!cpu_active(cpu))
7772 return;
7773
7774 if (test_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu)))
7775 return;
7776
7777 /*
7778 * If we're a completely isolated CPU, we don't play.
7779 */
7780 if (on_null_domain(cpu_rq(cpu)))
7781 return;
7782
7783 cpumask_set_cpu(cpu, nohz.idle_cpus_mask);
7784 atomic_inc(&nohz.nr_cpus);
7785 set_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu));
7786 }
7787
7788 static int sched_ilb_notifier(struct notifier_block *nfb,
7789 unsigned long action, void *hcpu)
7790 {
7791 switch (action & ~CPU_TASKS_FROZEN) {
7792 case CPU_DYING:
7793 nohz_balance_exit_idle(smp_processor_id());
7794 return NOTIFY_OK;
7795 default:
7796 return NOTIFY_DONE;
7797 }
7798 }
7799 #endif
7800
7801 static DEFINE_SPINLOCK(balancing);
7802
7803 /*
7804 * Scale the max load_balance interval with the number of CPUs in the system.
7805 * This trades load-balance latency on larger machines for less cross talk.
7806 */
7807 void update_max_interval(void)
7808 {
7809 max_load_balance_interval = HZ*num_online_cpus()/10;
7810 }
7811
7812 /*
7813 * It checks each scheduling domain to see if it is due to be balanced,
7814 * and initiates a balancing operation if so.
7815 *
7816 * Balancing parameters are set up in init_sched_domains.
7817 */
7818 static void rebalance_domains(struct rq *rq, enum cpu_idle_type idle)
7819 {
7820 int continue_balancing = 1;
7821 int cpu = rq->cpu;
7822 unsigned long interval;
7823 struct sched_domain *sd;
7824 /* Earliest time when we have to do rebalance again */
7825 unsigned long next_balance = jiffies + 60*HZ;
7826 int update_next_balance = 0;
7827 int need_serialize, need_decay = 0;
7828 u64 max_cost = 0;
7829
7830 update_blocked_averages(cpu);
7831
7832 rcu_read_lock();
7833 for_each_domain(cpu, sd) {
7834 /*
7835 * Decay the newidle max times here because this is a regular
7836 * visit to all the domains. Decay ~1% per second.
7837 */
7838 if (time_after(jiffies, sd->next_decay_max_lb_cost)) {
7839 sd->max_newidle_lb_cost =
7840 (sd->max_newidle_lb_cost * 253) / 256;
7841 sd->next_decay_max_lb_cost = jiffies + HZ;
7842 need_decay = 1;
7843 }
7844 max_cost += sd->max_newidle_lb_cost;
7845
7846 if (!(sd->flags & SD_LOAD_BALANCE))
7847 continue;
7848
7849 /*
7850 * Stop the load balance at this level. There is another
7851 * CPU in our sched group which is doing load balancing more
7852 * actively.
7853 */
7854 if (!continue_balancing) {
7855 if (need_decay)
7856 continue;
7857 break;
7858 }
7859
7860 interval = get_sd_balance_interval(sd, idle != CPU_IDLE);
7861
7862 need_serialize = sd->flags & SD_SERIALIZE;
7863 if (need_serialize) {
7864 if (!spin_trylock(&balancing))
7865 goto out;
7866 }
7867
7868 if (time_after_eq(jiffies, sd->last_balance + interval)) {
7869 if (load_balance(cpu, rq, sd, idle, &continue_balancing)) {
7870 /*
7871 * The LBF_DST_PINNED logic could have changed
7872 * env->dst_cpu, so we can't know our idle
7873 * state even if we migrated tasks. Update it.
7874 */
7875 idle = idle_cpu(cpu) ? CPU_IDLE : CPU_NOT_IDLE;
7876 }
7877 sd->last_balance = jiffies;
7878 interval = get_sd_balance_interval(sd, idle != CPU_IDLE);
7879 }
7880 if (need_serialize)
7881 spin_unlock(&balancing);
7882 out:
7883 if (time_after(next_balance, sd->last_balance + interval)) {
7884 next_balance = sd->last_balance + interval;
7885 update_next_balance = 1;
7886 }
7887 }
7888 if (need_decay) {
7889 /*
7890 * Ensure the rq-wide value also decays but keep it at a
7891 * reasonable floor to avoid funnies with rq->avg_idle.
7892 */
7893 rq->max_idle_balance_cost =
7894 max((u64)sysctl_sched_migration_cost, max_cost);
7895 }
7896 rcu_read_unlock();
7897
7898 /*
7899 * next_balance will be updated only when there is a need.
7900 * When the cpu is attached to null domain for ex, it will not be
7901 * updated.
7902 */
7903 if (likely(update_next_balance)) {
7904 rq->next_balance = next_balance;
7905
7906 #ifdef CONFIG_NO_HZ_COMMON
7907 /*
7908 * If this CPU has been elected to perform the nohz idle
7909 * balance. Other idle CPUs have already rebalanced with
7910 * nohz_idle_balance() and nohz.next_balance has been
7911 * updated accordingly. This CPU is now running the idle load
7912 * balance for itself and we need to update the
7913 * nohz.next_balance accordingly.
7914 */
7915 if ((idle == CPU_IDLE) && time_after(nohz.next_balance, rq->next_balance))
7916 nohz.next_balance = rq->next_balance;
7917 #endif
7918 }
7919 }
7920
7921 #ifdef CONFIG_NO_HZ_COMMON
7922 /*
7923 * In CONFIG_NO_HZ_COMMON case, the idle balance kickee will do the
7924 * rebalancing for all the cpus for whom scheduler ticks are stopped.
7925 */
7926 static void nohz_idle_balance(struct rq *this_rq, enum cpu_idle_type idle)
7927 {
7928 int this_cpu = this_rq->cpu;
7929 struct rq *rq;
7930 int balance_cpu;
7931 /* Earliest time when we have to do rebalance again */
7932 unsigned long next_balance = jiffies + 60*HZ;
7933 int update_next_balance = 0;
7934
7935 if (idle != CPU_IDLE ||
7936 !test_bit(NOHZ_BALANCE_KICK, nohz_flags(this_cpu)))
7937 goto end;
7938
7939 for_each_cpu(balance_cpu, nohz.idle_cpus_mask) {
7940 if (balance_cpu == this_cpu || !idle_cpu(balance_cpu))
7941 continue;
7942
7943 /*
7944 * If this cpu gets work to do, stop the load balancing
7945 * work being done for other cpus. Next load
7946 * balancing owner will pick it up.
7947 */
7948 if (need_resched())
7949 break;
7950
7951 rq = cpu_rq(balance_cpu);
7952
7953 /*
7954 * If time for next balance is due,
7955 * do the balance.
7956 */
7957 if (time_after_eq(jiffies, rq->next_balance)) {
7958 raw_spin_lock_irq(&rq->lock);
7959 update_rq_clock(rq);
7960 update_cpu_load_idle(rq);
7961 raw_spin_unlock_irq(&rq->lock);
7962 rebalance_domains(rq, CPU_IDLE);
7963 }
7964
7965 if (time_after(next_balance, rq->next_balance)) {
7966 next_balance = rq->next_balance;
7967 update_next_balance = 1;
7968 }
7969 }
7970
7971 /*
7972 * next_balance will be updated only when there is a need.
7973 * When the CPU is attached to null domain for ex, it will not be
7974 * updated.
7975 */
7976 if (likely(update_next_balance))
7977 nohz.next_balance = next_balance;
7978 end:
7979 clear_bit(NOHZ_BALANCE_KICK, nohz_flags(this_cpu));
7980 }
7981
7982 /*
7983 * Current heuristic for kicking the idle load balancer in the presence
7984 * of an idle cpu in the system.
7985 * - This rq has more than one task.
7986 * - This rq has at least one CFS task and the capacity of the CPU is
7987 * significantly reduced because of RT tasks or IRQs.
7988 * - At parent of LLC scheduler domain level, this cpu's scheduler group has
7989 * multiple busy cpu.
7990 * - For SD_ASYM_PACKING, if the lower numbered cpu's in the scheduler
7991 * domain span are idle.
7992 */
7993 static inline bool nohz_kick_needed(struct rq *rq)
7994 {
7995 unsigned long now = jiffies;
7996 struct sched_domain *sd;
7997 struct sched_group_capacity *sgc;
7998 int nr_busy, cpu = rq->cpu;
7999 bool kick = false;
8000
8001 if (unlikely(rq->idle_balance))
8002 return false;
8003
8004 /*
8005 * We may be recently in ticked or tickless idle mode. At the first
8006 * busy tick after returning from idle, we will update the busy stats.
8007 */
8008 set_cpu_sd_state_busy();
8009 nohz_balance_exit_idle(cpu);
8010
8011 /*
8012 * None are in tickless mode and hence no need for NOHZ idle load
8013 * balancing.
8014 */
8015 if (likely(!atomic_read(&nohz.nr_cpus)))
8016 return false;
8017
8018 if (time_before(now, nohz.next_balance))
8019 return false;
8020
8021 if (rq->nr_running >= 2)
8022 return true;
8023
8024 rcu_read_lock();
8025 sd = rcu_dereference(per_cpu(sd_busy, cpu));
8026 if (sd) {
8027 sgc = sd->groups->sgc;
8028 nr_busy = atomic_read(&sgc->nr_busy_cpus);
8029
8030 if (nr_busy > 1) {
8031 kick = true;
8032 goto unlock;
8033 }
8034
8035 }
8036
8037 sd = rcu_dereference(rq->sd);
8038 if (sd) {
8039 if ((rq->cfs.h_nr_running >= 1) &&
8040 check_cpu_capacity(rq, sd)) {
8041 kick = true;
8042 goto unlock;
8043 }
8044 }
8045
8046 sd = rcu_dereference(per_cpu(sd_asym, cpu));
8047 if (sd && (cpumask_first_and(nohz.idle_cpus_mask,
8048 sched_domain_span(sd)) < cpu)) {
8049 kick = true;
8050 goto unlock;
8051 }
8052
8053 unlock:
8054 rcu_read_unlock();
8055 return kick;
8056 }
8057 #else
8058 static void nohz_idle_balance(struct rq *this_rq, enum cpu_idle_type idle) { }
8059 #endif
8060
8061 /*
8062 * run_rebalance_domains is triggered when needed from the scheduler tick.
8063 * Also triggered for nohz idle balancing (with nohz_balancing_kick set).
8064 */
8065 static void run_rebalance_domains(struct softirq_action *h)
8066 {
8067 struct rq *this_rq = this_rq();
8068 enum cpu_idle_type idle = this_rq->idle_balance ?
8069 CPU_IDLE : CPU_NOT_IDLE;
8070
8071 /*
8072 * If this cpu has a pending nohz_balance_kick, then do the
8073 * balancing on behalf of the other idle cpus whose ticks are
8074 * stopped. Do nohz_idle_balance *before* rebalance_domains to
8075 * give the idle cpus a chance to load balance. Else we may
8076 * load balance only within the local sched_domain hierarchy
8077 * and abort nohz_idle_balance altogether if we pull some load.
8078 */
8079 nohz_idle_balance(this_rq, idle);
8080 rebalance_domains(this_rq, idle);
8081 }
8082
8083 /*
8084 * Trigger the SCHED_SOFTIRQ if it is time to do periodic load balancing.
8085 */
8086 void trigger_load_balance(struct rq *rq)
8087 {
8088 /* Don't need to rebalance while attached to NULL domain */
8089 if (unlikely(on_null_domain(rq)))
8090 return;
8091
8092 if (time_after_eq(jiffies, rq->next_balance))
8093 raise_softirq(SCHED_SOFTIRQ);
8094 #ifdef CONFIG_NO_HZ_COMMON
8095 if (nohz_kick_needed(rq))
8096 nohz_balancer_kick();
8097 #endif
8098 }
8099
8100 static void rq_online_fair(struct rq *rq)
8101 {
8102 update_sysctl();
8103
8104 update_runtime_enabled(rq);
8105 }
8106
8107 static void rq_offline_fair(struct rq *rq)
8108 {
8109 update_sysctl();
8110
8111 /* Ensure any throttled groups are reachable by pick_next_task */
8112 unthrottle_offline_cfs_rqs(rq);
8113 }
8114
8115 #endif /* CONFIG_SMP */
8116
8117 /*
8118 * scheduler tick hitting a task of our scheduling class:
8119 */
8120 static void task_tick_fair(struct rq *rq, struct task_struct *curr, int queued)
8121 {
8122 struct cfs_rq *cfs_rq;
8123 struct sched_entity *se = &curr->se;
8124
8125 for_each_sched_entity(se) {
8126 cfs_rq = cfs_rq_of(se);
8127 entity_tick(cfs_rq, se, queued);
8128 }
8129
8130 if (static_branch_unlikely(&sched_numa_balancing))
8131 task_tick_numa(rq, curr);
8132 }
8133
8134 /*
8135 * called on fork with the child task as argument from the parent's context
8136 * - child not yet on the tasklist
8137 * - preemption disabled
8138 */
8139 static void task_fork_fair(struct task_struct *p)
8140 {
8141 struct cfs_rq *cfs_rq;
8142 struct sched_entity *se = &p->se, *curr;
8143 int this_cpu = smp_processor_id();
8144 struct rq *rq = this_rq();
8145 unsigned long flags;
8146
8147 raw_spin_lock_irqsave(&rq->lock, flags);
8148
8149 update_rq_clock(rq);
8150
8151 cfs_rq = task_cfs_rq(current);
8152 curr = cfs_rq->curr;
8153
8154 /*
8155 * Not only the cpu but also the task_group of the parent might have
8156 * been changed after parent->se.parent,cfs_rq were copied to
8157 * child->se.parent,cfs_rq. So call __set_task_cpu() to make those
8158 * of child point to valid ones.
8159 */
8160 rcu_read_lock();
8161 __set_task_cpu(p, this_cpu);
8162 rcu_read_unlock();
8163
8164 update_curr(cfs_rq);
8165
8166 if (curr)
8167 se->vruntime = curr->vruntime;
8168 place_entity(cfs_rq, se, 1);
8169
8170 if (sysctl_sched_child_runs_first && curr && entity_before(curr, se)) {
8171 /*
8172 * Upon rescheduling, sched_class::put_prev_task() will place
8173 * 'current' within the tree based on its new key value.
8174 */
8175 swap(curr->vruntime, se->vruntime);
8176 resched_curr(rq);
8177 }
8178
8179 se->vruntime -= cfs_rq->min_vruntime;
8180
8181 raw_spin_unlock_irqrestore(&rq->lock, flags);
8182 }
8183
8184 /*
8185 * Priority of the task has changed. Check to see if we preempt
8186 * the current task.
8187 */
8188 static void
8189 prio_changed_fair(struct rq *rq, struct task_struct *p, int oldprio)
8190 {
8191 if (!task_on_rq_queued(p))
8192 return;
8193
8194 /*
8195 * Reschedule if we are currently running on this runqueue and
8196 * our priority decreased, or if we are not currently running on
8197 * this runqueue and our priority is higher than the current's
8198 */
8199 if (rq->curr == p) {
8200 if (p->prio > oldprio)
8201 resched_curr(rq);
8202 } else
8203 check_preempt_curr(rq, p, 0);
8204 }
8205
8206 static inline bool vruntime_normalized(struct task_struct *p)
8207 {
8208 struct sched_entity *se = &p->se;
8209
8210 /*
8211 * In both the TASK_ON_RQ_QUEUED and TASK_ON_RQ_MIGRATING cases,
8212 * the dequeue_entity(.flags=0) will already have normalized the
8213 * vruntime.
8214 */
8215 if (p->on_rq)
8216 return true;
8217
8218 /*
8219 * When !on_rq, vruntime of the task has usually NOT been normalized.
8220 * But there are some cases where it has already been normalized:
8221 *
8222 * - A forked child which is waiting for being woken up by
8223 * wake_up_new_task().
8224 * - A task which has been woken up by try_to_wake_up() and
8225 * waiting for actually being woken up by sched_ttwu_pending().
8226 */
8227 if (!se->sum_exec_runtime || p->state == TASK_WAKING)
8228 return true;
8229
8230 return false;
8231 }
8232
8233 static void detach_task_cfs_rq(struct task_struct *p)
8234 {
8235 struct sched_entity *se = &p->se;
8236 struct cfs_rq *cfs_rq = cfs_rq_of(se);
8237
8238 if (!vruntime_normalized(p)) {
8239 /*
8240 * Fix up our vruntime so that the current sleep doesn't
8241 * cause 'unlimited' sleep bonus.
8242 */
8243 place_entity(cfs_rq, se, 0);
8244 se->vruntime -= cfs_rq->min_vruntime;
8245 }
8246
8247 /* Catch up with the cfs_rq and remove our load when we leave */
8248 detach_entity_load_avg(cfs_rq, se);
8249 }
8250
8251 static void attach_task_cfs_rq(struct task_struct *p)
8252 {
8253 struct sched_entity *se = &p->se;
8254 struct cfs_rq *cfs_rq = cfs_rq_of(se);
8255
8256 #ifdef CONFIG_FAIR_GROUP_SCHED
8257 /*
8258 * Since the real-depth could have been changed (only FAIR
8259 * class maintain depth value), reset depth properly.
8260 */
8261 se->depth = se->parent ? se->parent->depth + 1 : 0;
8262 #endif
8263
8264 /* Synchronize task with its cfs_rq */
8265 attach_entity_load_avg(cfs_rq, se);
8266
8267 if (!vruntime_normalized(p))
8268 se->vruntime += cfs_rq->min_vruntime;
8269 }
8270
8271 static void switched_from_fair(struct rq *rq, struct task_struct *p)
8272 {
8273 detach_task_cfs_rq(p);
8274 }
8275
8276 static void switched_to_fair(struct rq *rq, struct task_struct *p)
8277 {
8278 attach_task_cfs_rq(p);
8279
8280 if (task_on_rq_queued(p)) {
8281 /*
8282 * We were most likely switched from sched_rt, so
8283 * kick off the schedule if running, otherwise just see
8284 * if we can still preempt the current task.
8285 */
8286 if (rq->curr == p)
8287 resched_curr(rq);
8288 else
8289 check_preempt_curr(rq, p, 0);
8290 }
8291 }
8292
8293 /* Account for a task changing its policy or group.
8294 *
8295 * This routine is mostly called to set cfs_rq->curr field when a task
8296 * migrates between groups/classes.
8297 */
8298 static void set_curr_task_fair(struct rq *rq)
8299 {
8300 struct sched_entity *se = &rq->curr->se;
8301
8302 for_each_sched_entity(se) {
8303 struct cfs_rq *cfs_rq = cfs_rq_of(se);
8304
8305 set_next_entity(cfs_rq, se);
8306 /* ensure bandwidth has been allocated on our new cfs_rq */
8307 account_cfs_rq_runtime(cfs_rq, 0);
8308 }
8309 }
8310
8311 void init_cfs_rq(struct cfs_rq *cfs_rq)
8312 {
8313 cfs_rq->tasks_timeline = RB_ROOT;
8314 cfs_rq->min_vruntime = (u64)(-(1LL << 20));
8315 #ifndef CONFIG_64BIT
8316 cfs_rq->min_vruntime_copy = cfs_rq->min_vruntime;
8317 #endif
8318 #ifdef CONFIG_SMP
8319 atomic_long_set(&cfs_rq->removed_load_avg, 0);
8320 atomic_long_set(&cfs_rq->removed_util_avg, 0);
8321 #endif
8322 }
8323
8324 #ifdef CONFIG_FAIR_GROUP_SCHED
8325 static void task_move_group_fair(struct task_struct *p)
8326 {
8327 detach_task_cfs_rq(p);
8328 set_task_rq(p, task_cpu(p));
8329
8330 #ifdef CONFIG_SMP
8331 /* Tell se's cfs_rq has been changed -- migrated */
8332 p->se.avg.last_update_time = 0;
8333 #endif
8334 attach_task_cfs_rq(p);
8335 }
8336
8337 void free_fair_sched_group(struct task_group *tg)
8338 {
8339 int i;
8340
8341 destroy_cfs_bandwidth(tg_cfs_bandwidth(tg));
8342
8343 for_each_possible_cpu(i) {
8344 if (tg->cfs_rq)
8345 kfree(tg->cfs_rq[i]);
8346 if (tg->se)
8347 kfree(tg->se[i]);
8348 }
8349
8350 kfree(tg->cfs_rq);
8351 kfree(tg->se);
8352 }
8353
8354 int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent)
8355 {
8356 struct cfs_rq *cfs_rq;
8357 struct sched_entity *se;
8358 int i;
8359
8360 tg->cfs_rq = kzalloc(sizeof(cfs_rq) * nr_cpu_ids, GFP_KERNEL);
8361 if (!tg->cfs_rq)
8362 goto err;
8363 tg->se = kzalloc(sizeof(se) * nr_cpu_ids, GFP_KERNEL);
8364 if (!tg->se)
8365 goto err;
8366
8367 tg->shares = NICE_0_LOAD;
8368
8369 init_cfs_bandwidth(tg_cfs_bandwidth(tg));
8370
8371 for_each_possible_cpu(i) {
8372 cfs_rq = kzalloc_node(sizeof(struct cfs_rq),
8373 GFP_KERNEL, cpu_to_node(i));
8374 if (!cfs_rq)
8375 goto err;
8376
8377 se = kzalloc_node(sizeof(struct sched_entity),
8378 GFP_KERNEL, cpu_to_node(i));
8379 if (!se)
8380 goto err_free_rq;
8381
8382 init_cfs_rq(cfs_rq);
8383 init_tg_cfs_entry(tg, cfs_rq, se, i, parent->se[i]);
8384 init_entity_runnable_average(se);
8385 }
8386
8387 return 1;
8388
8389 err_free_rq:
8390 kfree(cfs_rq);
8391 err:
8392 return 0;
8393 }
8394
8395 void unregister_fair_sched_group(struct task_group *tg)
8396 {
8397 unsigned long flags;
8398 struct rq *rq;
8399 int cpu;
8400
8401 for_each_possible_cpu(cpu) {
8402 if (tg->se[cpu])
8403 remove_entity_load_avg(tg->se[cpu]);
8404
8405 /*
8406 * Only empty task groups can be destroyed; so we can speculatively
8407 * check on_list without danger of it being re-added.
8408 */
8409 if (!tg->cfs_rq[cpu]->on_list)
8410 continue;
8411
8412 rq = cpu_rq(cpu);
8413
8414 raw_spin_lock_irqsave(&rq->lock, flags);
8415 list_del_leaf_cfs_rq(tg->cfs_rq[cpu]);
8416 raw_spin_unlock_irqrestore(&rq->lock, flags);
8417 }
8418 }
8419
8420 void init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq,
8421 struct sched_entity *se, int cpu,
8422 struct sched_entity *parent)
8423 {
8424 struct rq *rq = cpu_rq(cpu);
8425
8426 cfs_rq->tg = tg;
8427 cfs_rq->rq = rq;
8428 init_cfs_rq_runtime(cfs_rq);
8429
8430 tg->cfs_rq[cpu] = cfs_rq;
8431 tg->se[cpu] = se;
8432
8433 /* se could be NULL for root_task_group */
8434 if (!se)
8435 return;
8436
8437 if (!parent) {
8438 se->cfs_rq = &rq->cfs;
8439 se->depth = 0;
8440 } else {
8441 se->cfs_rq = parent->my_q;
8442 se->depth = parent->depth + 1;
8443 }
8444
8445 se->my_q = cfs_rq;
8446 /* guarantee group entities always have weight */
8447 update_load_set(&se->load, NICE_0_LOAD);
8448 se->parent = parent;
8449 }
8450
8451 static DEFINE_MUTEX(shares_mutex);
8452
8453 int sched_group_set_shares(struct task_group *tg, unsigned long shares)
8454 {
8455 int i;
8456 unsigned long flags;
8457
8458 /*
8459 * We can't change the weight of the root cgroup.
8460 */
8461 if (!tg->se[0])
8462 return -EINVAL;
8463
8464 shares = clamp(shares, scale_load(MIN_SHARES), scale_load(MAX_SHARES));
8465
8466 mutex_lock(&shares_mutex);
8467 if (tg->shares == shares)
8468 goto done;
8469
8470 tg->shares = shares;
8471 for_each_possible_cpu(i) {
8472 struct rq *rq = cpu_rq(i);
8473 struct sched_entity *se;
8474
8475 se = tg->se[i];
8476 /* Propagate contribution to hierarchy */
8477 raw_spin_lock_irqsave(&rq->lock, flags);
8478
8479 /* Possible calls to update_curr() need rq clock */
8480 update_rq_clock(rq);
8481 for_each_sched_entity(se)
8482 update_cfs_shares(group_cfs_rq(se));
8483 raw_spin_unlock_irqrestore(&rq->lock, flags);
8484 }
8485
8486 done:
8487 mutex_unlock(&shares_mutex);
8488 return 0;
8489 }
8490 #else /* CONFIG_FAIR_GROUP_SCHED */
8491
8492 void free_fair_sched_group(struct task_group *tg) { }
8493
8494 int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent)
8495 {
8496 return 1;
8497 }
8498
8499 void unregister_fair_sched_group(struct task_group *tg) { }
8500
8501 #endif /* CONFIG_FAIR_GROUP_SCHED */
8502
8503
8504 static unsigned int get_rr_interval_fair(struct rq *rq, struct task_struct *task)
8505 {
8506 struct sched_entity *se = &task->se;
8507 unsigned int rr_interval = 0;
8508
8509 /*
8510 * Time slice is 0 for SCHED_OTHER tasks that are on an otherwise
8511 * idle runqueue:
8512 */
8513 if (rq->cfs.load.weight)
8514 rr_interval = NS_TO_JIFFIES(sched_slice(cfs_rq_of(se), se));
8515
8516 return rr_interval;
8517 }
8518
8519 /*
8520 * All the scheduling class methods:
8521 */
8522 const struct sched_class fair_sched_class = {
8523 .next = &idle_sched_class,
8524 .enqueue_task = enqueue_task_fair,
8525 .dequeue_task = dequeue_task_fair,
8526 .yield_task = yield_task_fair,
8527 .yield_to_task = yield_to_task_fair,
8528
8529 .check_preempt_curr = check_preempt_wakeup,
8530
8531 .pick_next_task = pick_next_task_fair,
8532 .put_prev_task = put_prev_task_fair,
8533
8534 #ifdef CONFIG_SMP
8535 .select_task_rq = select_task_rq_fair,
8536 .migrate_task_rq = migrate_task_rq_fair,
8537
8538 .rq_online = rq_online_fair,
8539 .rq_offline = rq_offline_fair,
8540
8541 .task_waking = task_waking_fair,
8542 .task_dead = task_dead_fair,
8543 .set_cpus_allowed = set_cpus_allowed_common,
8544 #endif
8545
8546 .set_curr_task = set_curr_task_fair,
8547 .task_tick = task_tick_fair,
8548 .task_fork = task_fork_fair,
8549
8550 .prio_changed = prio_changed_fair,
8551 .switched_from = switched_from_fair,
8552 .switched_to = switched_to_fair,
8553
8554 .get_rr_interval = get_rr_interval_fair,
8555
8556 .update_curr = update_curr_fair,
8557
8558 #ifdef CONFIG_FAIR_GROUP_SCHED
8559 .task_move_group = task_move_group_fair,
8560 #endif
8561 };
8562
8563 #ifdef CONFIG_SCHED_DEBUG
8564 void print_cfs_stats(struct seq_file *m, int cpu)
8565 {
8566 struct cfs_rq *cfs_rq;
8567
8568 rcu_read_lock();
8569 for_each_leaf_cfs_rq(cpu_rq(cpu), cfs_rq)
8570 print_cfs_rq(m, cpu, cfs_rq);
8571 rcu_read_unlock();
8572 }
8573
8574 #ifdef CONFIG_NUMA_BALANCING
8575 void show_numa_stats(struct task_struct *p, struct seq_file *m)
8576 {
8577 int node;
8578 unsigned long tsf = 0, tpf = 0, gsf = 0, gpf = 0;
8579
8580 for_each_online_node(node) {
8581 if (p->numa_faults) {
8582 tsf = p->numa_faults[task_faults_idx(NUMA_MEM, node, 0)];
8583 tpf = p->numa_faults[task_faults_idx(NUMA_MEM, node, 1)];
8584 }
8585 if (p->numa_group) {
8586 gsf = p->numa_group->faults[task_faults_idx(NUMA_MEM, node, 0)],
8587 gpf = p->numa_group->faults[task_faults_idx(NUMA_MEM, node, 1)];
8588 }
8589 print_numa_stats(m, node, tsf, tpf, gsf, gpf);
8590 }
8591 }
8592 #endif /* CONFIG_NUMA_BALANCING */
8593 #endif /* CONFIG_SCHED_DEBUG */
8594
8595 __init void init_sched_fair_class(void)
8596 {
8597 #ifdef CONFIG_SMP
8598 open_softirq(SCHED_SOFTIRQ, run_rebalance_domains);
8599
8600 #ifdef CONFIG_NO_HZ_COMMON
8601 nohz.next_balance = jiffies;
8602 zalloc_cpumask_var(&nohz.idle_cpus_mask, GFP_NOWAIT);
8603 cpu_notifier(sched_ilb_notifier, 0);
8604 #endif
8605 #endif /* SMP */
8606
8607 }
This page took 0.194549 seconds and 6 git commands to generate.