sched/cpuacct: Introduce cpuacct.usage_all to show all CPU stats together
[deliverable/linux.git] / kernel / sched / core.c
CommitLineData
1da177e4 1/*
391e43da 2 * kernel/sched/core.c
1da177e4
LT
3 *
4 * Kernel scheduler and related syscalls
5 *
6 * Copyright (C) 1991-2002 Linus Torvalds
7 *
8 * 1996-12-23 Modified by Dave Grothe to fix bugs in semaphores and
9 * make semaphores SMP safe
10 * 1998-11-19 Implemented schedule_timeout() and related stuff
11 * by Andrea Arcangeli
12 * 2002-01-04 New ultra-scalable O(1) scheduler by Ingo Molnar:
13 * hybrid priority-list and round-robin design with
14 * an array-switch method of distributing timeslices
15 * and per-CPU runqueues. Cleanups and useful suggestions
16 * by Davide Libenzi, preemptible kernel bits by Robert Love.
17 * 2003-09-03 Interactivity tuning by Con Kolivas.
18 * 2004-04-02 Scheduler domains code by Nick Piggin
c31f2e8a
IM
19 * 2007-04-15 Work begun on replacing all interactivity tuning with a
20 * fair scheduling design by Con Kolivas.
21 * 2007-05-05 Load balancing (smp-nice) and other improvements
22 * by Peter Williams
23 * 2007-05-06 Interactivity improvements to CFS by Mike Galbraith
24 * 2007-07-01 Group scheduling enhancements by Srivatsa Vaddagiri
b9131769
IM
25 * 2007-11-29 RT balancing improvements by Steven Rostedt, Gregory Haskins,
26 * Thomas Gleixner, Mike Kravetz
1da177e4
LT
27 */
28
e1b77c92 29#include <linux/kasan.h>
1da177e4
LT
30#include <linux/mm.h>
31#include <linux/module.h>
32#include <linux/nmi.h>
33#include <linux/init.h>
dff06c15 34#include <linux/uaccess.h>
1da177e4 35#include <linux/highmem.h>
f98db601 36#include <linux/mmu_context.h>
1da177e4 37#include <linux/interrupt.h>
c59ede7b 38#include <linux/capability.h>
1da177e4
LT
39#include <linux/completion.h>
40#include <linux/kernel_stat.h>
9a11b49a 41#include <linux/debug_locks.h>
cdd6c482 42#include <linux/perf_event.h>
1da177e4
LT
43#include <linux/security.h>
44#include <linux/notifier.h>
45#include <linux/profile.h>
7dfb7103 46#include <linux/freezer.h>
198e2f18 47#include <linux/vmalloc.h>
1da177e4
LT
48#include <linux/blkdev.h>
49#include <linux/delay.h>
b488893a 50#include <linux/pid_namespace.h>
1da177e4
LT
51#include <linux/smp.h>
52#include <linux/threads.h>
53#include <linux/timer.h>
54#include <linux/rcupdate.h>
55#include <linux/cpu.h>
56#include <linux/cpuset.h>
57#include <linux/percpu.h>
b5aadf7f 58#include <linux/proc_fs.h>
1da177e4 59#include <linux/seq_file.h>
e692ab53 60#include <linux/sysctl.h>
1da177e4
LT
61#include <linux/syscalls.h>
62#include <linux/times.h>
8f0ab514 63#include <linux/tsacct_kern.h>
c6fd91f0 64#include <linux/kprobes.h>
0ff92245 65#include <linux/delayacct.h>
dff06c15 66#include <linux/unistd.h>
f5ff8422 67#include <linux/pagemap.h>
8f4d37ec 68#include <linux/hrtimer.h>
30914a58 69#include <linux/tick.h>
f00b45c1 70#include <linux/ctype.h>
6cd8a4bb 71#include <linux/ftrace.h>
5a0e3ad6 72#include <linux/slab.h>
f1c6f1a7 73#include <linux/init_task.h>
91d1aa43 74#include <linux/context_tracking.h>
52f5684c 75#include <linux/compiler.h>
8e05e96a 76#include <linux/frame.h>
1da177e4 77
96f951ed 78#include <asm/switch_to.h>
5517d86b 79#include <asm/tlb.h>
838225b4 80#include <asm/irq_regs.h>
db7e527d 81#include <asm/mutex.h>
e6e6685a
GC
82#ifdef CONFIG_PARAVIRT
83#include <asm/paravirt.h>
84#endif
1da177e4 85
029632fb 86#include "sched.h"
ea138446 87#include "../workqueue_internal.h"
29d5e047 88#include "../smpboot.h"
6e0534f2 89
a8d154b0 90#define CREATE_TRACE_POINTS
ad8d75ff 91#include <trace/events/sched.h>
a8d154b0 92
029632fb
PZ
93DEFINE_MUTEX(sched_domains_mutex);
94DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
dc61b1d6 95
fe44d621 96static void update_rq_clock_task(struct rq *rq, s64 delta);
305e6835 97
029632fb 98void update_rq_clock(struct rq *rq)
3e51f33f 99{
fe44d621 100 s64 delta;
305e6835 101
9edfbfed
PZ
102 lockdep_assert_held(&rq->lock);
103
104 if (rq->clock_skip_update & RQCF_ACT_SKIP)
f26f9aff 105 return;
aa483808 106
fe44d621 107 delta = sched_clock_cpu(cpu_of(rq)) - rq->clock;
4036ac15
MG
108 if (delta < 0)
109 return;
fe44d621
PZ
110 rq->clock += delta;
111 update_rq_clock_task(rq, delta);
3e51f33f
PZ
112}
113
bf5c91ba
IM
114/*
115 * Debugging: various feature bits
116 */
f00b45c1 117
f00b45c1
PZ
118#define SCHED_FEAT(name, enabled) \
119 (1UL << __SCHED_FEAT_##name) * enabled |
120
bf5c91ba 121const_debug unsigned int sysctl_sched_features =
391e43da 122#include "features.h"
f00b45c1
PZ
123 0;
124
125#undef SCHED_FEAT
126
b82d9fdd
PZ
127/*
128 * Number of tasks to iterate in a single balance run.
129 * Limited because this is done with IRQs disabled.
130 */
131const_debug unsigned int sysctl_sched_nr_migrate = 32;
132
e9e9250b
PZ
133/*
134 * period over which we average the RT time consumption, measured
135 * in ms.
136 *
137 * default: 1s
138 */
139const_debug unsigned int sysctl_sched_time_avg = MSEC_PER_SEC;
140
fa85ae24 141/*
9f0c1e56 142 * period over which we measure -rt task cpu usage in us.
fa85ae24
PZ
143 * default: 1s
144 */
9f0c1e56 145unsigned int sysctl_sched_rt_period = 1000000;
fa85ae24 146
029632fb 147__read_mostly int scheduler_running;
6892b75e 148
9f0c1e56
PZ
149/*
150 * part of the period that we allow rt tasks to run in us.
151 * default: 0.95s
152 */
153int sysctl_sched_rt_runtime = 950000;
fa85ae24 154
3fa0818b
RR
155/* cpus with isolated domains */
156cpumask_var_t cpu_isolated_map;
157
1da177e4 158/*
cc2a73b5 159 * this_rq_lock - lock this runqueue and disable interrupts.
1da177e4 160 */
a9957449 161static struct rq *this_rq_lock(void)
1da177e4
LT
162 __acquires(rq->lock)
163{
70b97a7f 164 struct rq *rq;
1da177e4
LT
165
166 local_irq_disable();
167 rq = this_rq();
05fa785c 168 raw_spin_lock(&rq->lock);
1da177e4
LT
169
170 return rq;
171}
172
3e71a462
PZ
173/*
174 * __task_rq_lock - lock the rq @p resides on.
175 */
eb580751 176struct rq *__task_rq_lock(struct task_struct *p, struct rq_flags *rf)
3e71a462
PZ
177 __acquires(rq->lock)
178{
179 struct rq *rq;
180
181 lockdep_assert_held(&p->pi_lock);
182
183 for (;;) {
184 rq = task_rq(p);
185 raw_spin_lock(&rq->lock);
186 if (likely(rq == task_rq(p) && !task_on_rq_migrating(p))) {
e7904a28 187 rf->cookie = lockdep_pin_lock(&rq->lock);
3e71a462
PZ
188 return rq;
189 }
190 raw_spin_unlock(&rq->lock);
191
192 while (unlikely(task_on_rq_migrating(p)))
193 cpu_relax();
194 }
195}
196
197/*
198 * task_rq_lock - lock p->pi_lock and lock the rq @p resides on.
199 */
eb580751 200struct rq *task_rq_lock(struct task_struct *p, struct rq_flags *rf)
3e71a462
PZ
201 __acquires(p->pi_lock)
202 __acquires(rq->lock)
203{
204 struct rq *rq;
205
206 for (;;) {
eb580751 207 raw_spin_lock_irqsave(&p->pi_lock, rf->flags);
3e71a462
PZ
208 rq = task_rq(p);
209 raw_spin_lock(&rq->lock);
210 /*
211 * move_queued_task() task_rq_lock()
212 *
213 * ACQUIRE (rq->lock)
214 * [S] ->on_rq = MIGRATING [L] rq = task_rq()
215 * WMB (__set_task_cpu()) ACQUIRE (rq->lock);
216 * [S] ->cpu = new_cpu [L] task_rq()
217 * [L] ->on_rq
218 * RELEASE (rq->lock)
219 *
220 * If we observe the old cpu in task_rq_lock, the acquire of
221 * the old rq->lock will fully serialize against the stores.
222 *
223 * If we observe the new cpu in task_rq_lock, the acquire will
224 * pair with the WMB to ensure we must then also see migrating.
225 */
226 if (likely(rq == task_rq(p) && !task_on_rq_migrating(p))) {
e7904a28 227 rf->cookie = lockdep_pin_lock(&rq->lock);
3e71a462
PZ
228 return rq;
229 }
230 raw_spin_unlock(&rq->lock);
eb580751 231 raw_spin_unlock_irqrestore(&p->pi_lock, rf->flags);
3e71a462
PZ
232
233 while (unlikely(task_on_rq_migrating(p)))
234 cpu_relax();
235 }
236}
237
8f4d37ec
PZ
238#ifdef CONFIG_SCHED_HRTICK
239/*
240 * Use HR-timers to deliver accurate preemption points.
8f4d37ec 241 */
8f4d37ec 242
8f4d37ec
PZ
243static void hrtick_clear(struct rq *rq)
244{
245 if (hrtimer_active(&rq->hrtick_timer))
246 hrtimer_cancel(&rq->hrtick_timer);
247}
248
8f4d37ec
PZ
249/*
250 * High-resolution timer tick.
251 * Runs from hardirq context with interrupts disabled.
252 */
253static enum hrtimer_restart hrtick(struct hrtimer *timer)
254{
255 struct rq *rq = container_of(timer, struct rq, hrtick_timer);
256
257 WARN_ON_ONCE(cpu_of(rq) != smp_processor_id());
258
05fa785c 259 raw_spin_lock(&rq->lock);
3e51f33f 260 update_rq_clock(rq);
8f4d37ec 261 rq->curr->sched_class->task_tick(rq, rq->curr, 1);
05fa785c 262 raw_spin_unlock(&rq->lock);
8f4d37ec
PZ
263
264 return HRTIMER_NORESTART;
265}
266
95e904c7 267#ifdef CONFIG_SMP
971ee28c 268
4961b6e1 269static void __hrtick_restart(struct rq *rq)
971ee28c
PZ
270{
271 struct hrtimer *timer = &rq->hrtick_timer;
971ee28c 272
4961b6e1 273 hrtimer_start_expires(timer, HRTIMER_MODE_ABS_PINNED);
971ee28c
PZ
274}
275
31656519
PZ
276/*
277 * called from hardirq (IPI) context
278 */
279static void __hrtick_start(void *arg)
b328ca18 280{
31656519 281 struct rq *rq = arg;
b328ca18 282
05fa785c 283 raw_spin_lock(&rq->lock);
971ee28c 284 __hrtick_restart(rq);
31656519 285 rq->hrtick_csd_pending = 0;
05fa785c 286 raw_spin_unlock(&rq->lock);
b328ca18
PZ
287}
288
31656519
PZ
289/*
290 * Called to set the hrtick timer state.
291 *
292 * called with rq->lock held and irqs disabled
293 */
029632fb 294void hrtick_start(struct rq *rq, u64 delay)
b328ca18 295{
31656519 296 struct hrtimer *timer = &rq->hrtick_timer;
177ef2a6 297 ktime_t time;
298 s64 delta;
299
300 /*
301 * Don't schedule slices shorter than 10000ns, that just
302 * doesn't make sense and can cause timer DoS.
303 */
304 delta = max_t(s64, delay, 10000LL);
305 time = ktime_add_ns(timer->base->get_time(), delta);
b328ca18 306
cc584b21 307 hrtimer_set_expires(timer, time);
31656519
PZ
308
309 if (rq == this_rq()) {
971ee28c 310 __hrtick_restart(rq);
31656519 311 } else if (!rq->hrtick_csd_pending) {
c46fff2a 312 smp_call_function_single_async(cpu_of(rq), &rq->hrtick_csd);
31656519
PZ
313 rq->hrtick_csd_pending = 1;
314 }
b328ca18
PZ
315}
316
31656519
PZ
317#else
318/*
319 * Called to set the hrtick timer state.
320 *
321 * called with rq->lock held and irqs disabled
322 */
029632fb 323void hrtick_start(struct rq *rq, u64 delay)
31656519 324{
86893335
WL
325 /*
326 * Don't schedule slices shorter than 10000ns, that just
327 * doesn't make sense. Rely on vruntime for fairness.
328 */
329 delay = max_t(u64, delay, 10000LL);
4961b6e1
TG
330 hrtimer_start(&rq->hrtick_timer, ns_to_ktime(delay),
331 HRTIMER_MODE_REL_PINNED);
31656519 332}
31656519 333#endif /* CONFIG_SMP */
8f4d37ec 334
31656519 335static void init_rq_hrtick(struct rq *rq)
8f4d37ec 336{
31656519
PZ
337#ifdef CONFIG_SMP
338 rq->hrtick_csd_pending = 0;
8f4d37ec 339
31656519
PZ
340 rq->hrtick_csd.flags = 0;
341 rq->hrtick_csd.func = __hrtick_start;
342 rq->hrtick_csd.info = rq;
343#endif
8f4d37ec 344
31656519
PZ
345 hrtimer_init(&rq->hrtick_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
346 rq->hrtick_timer.function = hrtick;
8f4d37ec 347}
006c75f1 348#else /* CONFIG_SCHED_HRTICK */
8f4d37ec
PZ
349static inline void hrtick_clear(struct rq *rq)
350{
351}
352
8f4d37ec
PZ
353static inline void init_rq_hrtick(struct rq *rq)
354{
355}
006c75f1 356#endif /* CONFIG_SCHED_HRTICK */
8f4d37ec 357
5529578a
FW
358/*
359 * cmpxchg based fetch_or, macro so it works for different integer types
360 */
361#define fetch_or(ptr, mask) \
362 ({ \
363 typeof(ptr) _ptr = (ptr); \
364 typeof(mask) _mask = (mask); \
365 typeof(*_ptr) _old, _val = *_ptr; \
366 \
367 for (;;) { \
368 _old = cmpxchg(_ptr, _val, _val | _mask); \
369 if (_old == _val) \
370 break; \
371 _val = _old; \
372 } \
373 _old; \
374})
375
e3baac47 376#if defined(CONFIG_SMP) && defined(TIF_POLLING_NRFLAG)
fd99f91a
PZ
377/*
378 * Atomically set TIF_NEED_RESCHED and test for TIF_POLLING_NRFLAG,
379 * this avoids any races wrt polling state changes and thereby avoids
380 * spurious IPIs.
381 */
382static bool set_nr_and_not_polling(struct task_struct *p)
383{
384 struct thread_info *ti = task_thread_info(p);
385 return !(fetch_or(&ti->flags, _TIF_NEED_RESCHED) & _TIF_POLLING_NRFLAG);
386}
e3baac47
PZ
387
388/*
389 * Atomically set TIF_NEED_RESCHED if TIF_POLLING_NRFLAG is set.
390 *
391 * If this returns true, then the idle task promises to call
392 * sched_ttwu_pending() and reschedule soon.
393 */
394static bool set_nr_if_polling(struct task_struct *p)
395{
396 struct thread_info *ti = task_thread_info(p);
316c1608 397 typeof(ti->flags) old, val = READ_ONCE(ti->flags);
e3baac47
PZ
398
399 for (;;) {
400 if (!(val & _TIF_POLLING_NRFLAG))
401 return false;
402 if (val & _TIF_NEED_RESCHED)
403 return true;
404 old = cmpxchg(&ti->flags, val, val | _TIF_NEED_RESCHED);
405 if (old == val)
406 break;
407 val = old;
408 }
409 return true;
410}
411
fd99f91a
PZ
412#else
413static bool set_nr_and_not_polling(struct task_struct *p)
414{
415 set_tsk_need_resched(p);
416 return true;
417}
e3baac47
PZ
418
419#ifdef CONFIG_SMP
420static bool set_nr_if_polling(struct task_struct *p)
421{
422 return false;
423}
424#endif
fd99f91a
PZ
425#endif
426
76751049
PZ
427void wake_q_add(struct wake_q_head *head, struct task_struct *task)
428{
429 struct wake_q_node *node = &task->wake_q;
430
431 /*
432 * Atomically grab the task, if ->wake_q is !nil already it means
433 * its already queued (either by us or someone else) and will get the
434 * wakeup due to that.
435 *
436 * This cmpxchg() implies a full barrier, which pairs with the write
58fe9c46 437 * barrier implied by the wakeup in wake_up_q().
76751049
PZ
438 */
439 if (cmpxchg(&node->next, NULL, WAKE_Q_TAIL))
440 return;
441
442 get_task_struct(task);
443
444 /*
445 * The head is context local, there can be no concurrency.
446 */
447 *head->lastp = node;
448 head->lastp = &node->next;
449}
450
451void wake_up_q(struct wake_q_head *head)
452{
453 struct wake_q_node *node = head->first;
454
455 while (node != WAKE_Q_TAIL) {
456 struct task_struct *task;
457
458 task = container_of(node, struct task_struct, wake_q);
459 BUG_ON(!task);
460 /* task can safely be re-inserted now */
461 node = node->next;
462 task->wake_q.next = NULL;
463
464 /*
465 * wake_up_process() implies a wmb() to pair with the queueing
466 * in wake_q_add() so as not to miss wakeups.
467 */
468 wake_up_process(task);
469 put_task_struct(task);
470 }
471}
472
c24d20db 473/*
8875125e 474 * resched_curr - mark rq's current task 'to be rescheduled now'.
c24d20db
IM
475 *
476 * On UP this means the setting of the need_resched flag, on SMP it
477 * might also involve a cross-CPU call to trigger the scheduler on
478 * the target CPU.
479 */
8875125e 480void resched_curr(struct rq *rq)
c24d20db 481{
8875125e 482 struct task_struct *curr = rq->curr;
c24d20db
IM
483 int cpu;
484
8875125e 485 lockdep_assert_held(&rq->lock);
c24d20db 486
8875125e 487 if (test_tsk_need_resched(curr))
c24d20db
IM
488 return;
489
8875125e 490 cpu = cpu_of(rq);
fd99f91a 491
f27dde8d 492 if (cpu == smp_processor_id()) {
8875125e 493 set_tsk_need_resched(curr);
f27dde8d 494 set_preempt_need_resched();
c24d20db 495 return;
f27dde8d 496 }
c24d20db 497
8875125e 498 if (set_nr_and_not_polling(curr))
c24d20db 499 smp_send_reschedule(cpu);
dfc68f29
AL
500 else
501 trace_sched_wake_idle_without_ipi(cpu);
c24d20db
IM
502}
503
029632fb 504void resched_cpu(int cpu)
c24d20db
IM
505{
506 struct rq *rq = cpu_rq(cpu);
507 unsigned long flags;
508
05fa785c 509 if (!raw_spin_trylock_irqsave(&rq->lock, flags))
c24d20db 510 return;
8875125e 511 resched_curr(rq);
05fa785c 512 raw_spin_unlock_irqrestore(&rq->lock, flags);
c24d20db 513}
06d8308c 514
b021fe3e 515#ifdef CONFIG_SMP
3451d024 516#ifdef CONFIG_NO_HZ_COMMON
83cd4fe2
VP
517/*
518 * In the semi idle case, use the nearest busy cpu for migrating timers
519 * from an idle cpu. This is good for power-savings.
520 *
521 * We don't do similar optimization for completely idle system, as
522 * selecting an idle cpu will add more delays to the timers than intended
523 * (as that cpu's timer base may not be uptodate wrt jiffies etc).
524 */
bc7a34b8 525int get_nohz_timer_target(void)
83cd4fe2 526{
bc7a34b8 527 int i, cpu = smp_processor_id();
83cd4fe2
VP
528 struct sched_domain *sd;
529
9642d18e 530 if (!idle_cpu(cpu) && is_housekeeping_cpu(cpu))
6201b4d6
VK
531 return cpu;
532
057f3fad 533 rcu_read_lock();
83cd4fe2 534 for_each_domain(cpu, sd) {
057f3fad 535 for_each_cpu(i, sched_domain_span(sd)) {
44496922
WL
536 if (cpu == i)
537 continue;
538
539 if (!idle_cpu(i) && is_housekeeping_cpu(i)) {
057f3fad
PZ
540 cpu = i;
541 goto unlock;
542 }
543 }
83cd4fe2 544 }
9642d18e
VH
545
546 if (!is_housekeeping_cpu(cpu))
547 cpu = housekeeping_any_cpu();
057f3fad
PZ
548unlock:
549 rcu_read_unlock();
83cd4fe2
VP
550 return cpu;
551}
06d8308c
TG
552/*
553 * When add_timer_on() enqueues a timer into the timer wheel of an
554 * idle CPU then this timer might expire before the next timer event
555 * which is scheduled to wake up that CPU. In case of a completely
556 * idle system the next event might even be infinite time into the
557 * future. wake_up_idle_cpu() ensures that the CPU is woken up and
558 * leaves the inner idle loop so the newly added timer is taken into
559 * account when the CPU goes back to idle and evaluates the timer
560 * wheel for the next timer event.
561 */
1c20091e 562static void wake_up_idle_cpu(int cpu)
06d8308c
TG
563{
564 struct rq *rq = cpu_rq(cpu);
565
566 if (cpu == smp_processor_id())
567 return;
568
67b9ca70 569 if (set_nr_and_not_polling(rq->idle))
06d8308c 570 smp_send_reschedule(cpu);
dfc68f29
AL
571 else
572 trace_sched_wake_idle_without_ipi(cpu);
45bf76df
IM
573}
574
c5bfece2 575static bool wake_up_full_nohz_cpu(int cpu)
1c20091e 576{
53c5fa16
FW
577 /*
578 * We just need the target to call irq_exit() and re-evaluate
579 * the next tick. The nohz full kick at least implies that.
580 * If needed we can still optimize that later with an
581 * empty IRQ.
582 */
c5bfece2 583 if (tick_nohz_full_cpu(cpu)) {
1c20091e
FW
584 if (cpu != smp_processor_id() ||
585 tick_nohz_tick_stopped())
53c5fa16 586 tick_nohz_full_kick_cpu(cpu);
1c20091e
FW
587 return true;
588 }
589
590 return false;
591}
592
593void wake_up_nohz_cpu(int cpu)
594{
c5bfece2 595 if (!wake_up_full_nohz_cpu(cpu))
1c20091e
FW
596 wake_up_idle_cpu(cpu);
597}
598
ca38062e 599static inline bool got_nohz_idle_kick(void)
45bf76df 600{
1c792db7 601 int cpu = smp_processor_id();
873b4c65
VG
602
603 if (!test_bit(NOHZ_BALANCE_KICK, nohz_flags(cpu)))
604 return false;
605
606 if (idle_cpu(cpu) && !need_resched())
607 return true;
608
609 /*
610 * We can't run Idle Load Balance on this CPU for this time so we
611 * cancel it and clear NOHZ_BALANCE_KICK
612 */
613 clear_bit(NOHZ_BALANCE_KICK, nohz_flags(cpu));
614 return false;
45bf76df
IM
615}
616
3451d024 617#else /* CONFIG_NO_HZ_COMMON */
45bf76df 618
ca38062e 619static inline bool got_nohz_idle_kick(void)
2069dd75 620{
ca38062e 621 return false;
2069dd75
PZ
622}
623
3451d024 624#endif /* CONFIG_NO_HZ_COMMON */
d842de87 625
ce831b38 626#ifdef CONFIG_NO_HZ_FULL
76d92ac3 627bool sched_can_stop_tick(struct rq *rq)
ce831b38 628{
76d92ac3
FW
629 int fifo_nr_running;
630
631 /* Deadline tasks, even if single, need the tick */
632 if (rq->dl.dl_nr_running)
633 return false;
634
1e78cdbd 635 /*
2548d546
PZ
636 * If there are more than one RR tasks, we need the tick to effect the
637 * actual RR behaviour.
1e78cdbd 638 */
76d92ac3
FW
639 if (rq->rt.rr_nr_running) {
640 if (rq->rt.rr_nr_running == 1)
641 return true;
642 else
643 return false;
1e78cdbd
RR
644 }
645
2548d546
PZ
646 /*
647 * If there's no RR tasks, but FIFO tasks, we can skip the tick, no
648 * forced preemption between FIFO tasks.
649 */
650 fifo_nr_running = rq->rt.rt_nr_running - rq->rt.rr_nr_running;
651 if (fifo_nr_running)
652 return true;
653
654 /*
655 * If there are no DL,RR/FIFO tasks, there must only be CFS tasks left;
656 * if there's more than one we need the tick for involuntary
657 * preemption.
658 */
659 if (rq->nr_running > 1)
541b8264 660 return false;
ce831b38 661
541b8264 662 return true;
ce831b38
FW
663}
664#endif /* CONFIG_NO_HZ_FULL */
d842de87 665
029632fb 666void sched_avg_update(struct rq *rq)
18d95a28 667{
e9e9250b
PZ
668 s64 period = sched_avg_period();
669
78becc27 670 while ((s64)(rq_clock(rq) - rq->age_stamp) > period) {
0d98bb26
WD
671 /*
672 * Inline assembly required to prevent the compiler
673 * optimising this loop into a divmod call.
674 * See __iter_div_u64_rem() for another example of this.
675 */
676 asm("" : "+rm" (rq->age_stamp));
e9e9250b
PZ
677 rq->age_stamp += period;
678 rq->rt_avg /= 2;
679 }
18d95a28
PZ
680}
681
6d6bc0ad 682#endif /* CONFIG_SMP */
18d95a28 683
a790de99
PT
684#if defined(CONFIG_RT_GROUP_SCHED) || (defined(CONFIG_FAIR_GROUP_SCHED) && \
685 (defined(CONFIG_SMP) || defined(CONFIG_CFS_BANDWIDTH)))
c09595f6 686/*
8277434e
PT
687 * Iterate task_group tree rooted at *from, calling @down when first entering a
688 * node and @up when leaving it for the final time.
689 *
690 * Caller must hold rcu_lock or sufficient equivalent.
c09595f6 691 */
029632fb 692int walk_tg_tree_from(struct task_group *from,
8277434e 693 tg_visitor down, tg_visitor up, void *data)
c09595f6
PZ
694{
695 struct task_group *parent, *child;
eb755805 696 int ret;
c09595f6 697
8277434e
PT
698 parent = from;
699
c09595f6 700down:
eb755805
PZ
701 ret = (*down)(parent, data);
702 if (ret)
8277434e 703 goto out;
c09595f6
PZ
704 list_for_each_entry_rcu(child, &parent->children, siblings) {
705 parent = child;
706 goto down;
707
708up:
709 continue;
710 }
eb755805 711 ret = (*up)(parent, data);
8277434e
PT
712 if (ret || parent == from)
713 goto out;
c09595f6
PZ
714
715 child = parent;
716 parent = parent->parent;
717 if (parent)
718 goto up;
8277434e 719out:
eb755805 720 return ret;
c09595f6
PZ
721}
722
029632fb 723int tg_nop(struct task_group *tg, void *data)
eb755805 724{
e2b245f8 725 return 0;
eb755805 726}
18d95a28
PZ
727#endif
728
45bf76df
IM
729static void set_load_weight(struct task_struct *p)
730{
f05998d4
NR
731 int prio = p->static_prio - MAX_RT_PRIO;
732 struct load_weight *load = &p->se.load;
733
dd41f596
IM
734 /*
735 * SCHED_IDLE tasks get minimal weight:
736 */
20f9cd2a 737 if (idle_policy(p->policy)) {
c8b28116 738 load->weight = scale_load(WEIGHT_IDLEPRIO);
f05998d4 739 load->inv_weight = WMULT_IDLEPRIO;
dd41f596
IM
740 return;
741 }
71f8bd46 742
ed82b8a1
AK
743 load->weight = scale_load(sched_prio_to_weight[prio]);
744 load->inv_weight = sched_prio_to_wmult[prio];
71f8bd46
IM
745}
746
1de64443 747static inline void enqueue_task(struct rq *rq, struct task_struct *p, int flags)
2087a1ad 748{
a64692a3 749 update_rq_clock(rq);
1de64443
PZ
750 if (!(flags & ENQUEUE_RESTORE))
751 sched_info_queued(rq, p);
371fd7e7 752 p->sched_class->enqueue_task(rq, p, flags);
71f8bd46
IM
753}
754
1de64443 755static inline void dequeue_task(struct rq *rq, struct task_struct *p, int flags)
71f8bd46 756{
a64692a3 757 update_rq_clock(rq);
1de64443
PZ
758 if (!(flags & DEQUEUE_SAVE))
759 sched_info_dequeued(rq, p);
371fd7e7 760 p->sched_class->dequeue_task(rq, p, flags);
71f8bd46
IM
761}
762
029632fb 763void activate_task(struct rq *rq, struct task_struct *p, int flags)
1e3c88bd
PZ
764{
765 if (task_contributes_to_load(p))
766 rq->nr_uninterruptible--;
767
371fd7e7 768 enqueue_task(rq, p, flags);
1e3c88bd
PZ
769}
770
029632fb 771void deactivate_task(struct rq *rq, struct task_struct *p, int flags)
1e3c88bd
PZ
772{
773 if (task_contributes_to_load(p))
774 rq->nr_uninterruptible++;
775
371fd7e7 776 dequeue_task(rq, p, flags);
1e3c88bd
PZ
777}
778
fe44d621 779static void update_rq_clock_task(struct rq *rq, s64 delta)
aa483808 780{
095c0aa8
GC
781/*
782 * In theory, the compile should just see 0 here, and optimize out the call
783 * to sched_rt_avg_update. But I don't trust it...
784 */
785#if defined(CONFIG_IRQ_TIME_ACCOUNTING) || defined(CONFIG_PARAVIRT_TIME_ACCOUNTING)
786 s64 steal = 0, irq_delta = 0;
787#endif
788#ifdef CONFIG_IRQ_TIME_ACCOUNTING
8e92c201 789 irq_delta = irq_time_read(cpu_of(rq)) - rq->prev_irq_time;
fe44d621
PZ
790
791 /*
792 * Since irq_time is only updated on {soft,}irq_exit, we might run into
793 * this case when a previous update_rq_clock() happened inside a
794 * {soft,}irq region.
795 *
796 * When this happens, we stop ->clock_task and only update the
797 * prev_irq_time stamp to account for the part that fit, so that a next
798 * update will consume the rest. This ensures ->clock_task is
799 * monotonic.
800 *
801 * It does however cause some slight miss-attribution of {soft,}irq
802 * time, a more accurate solution would be to update the irq_time using
803 * the current rq->clock timestamp, except that would require using
804 * atomic ops.
805 */
806 if (irq_delta > delta)
807 irq_delta = delta;
808
809 rq->prev_irq_time += irq_delta;
810 delta -= irq_delta;
095c0aa8
GC
811#endif
812#ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING
c5905afb 813 if (static_key_false((&paravirt_steal_rq_enabled))) {
095c0aa8
GC
814 steal = paravirt_steal_clock(cpu_of(rq));
815 steal -= rq->prev_steal_time_rq;
816
817 if (unlikely(steal > delta))
818 steal = delta;
819
095c0aa8 820 rq->prev_steal_time_rq += steal;
095c0aa8
GC
821 delta -= steal;
822 }
823#endif
824
fe44d621
PZ
825 rq->clock_task += delta;
826
095c0aa8 827#if defined(CONFIG_IRQ_TIME_ACCOUNTING) || defined(CONFIG_PARAVIRT_TIME_ACCOUNTING)
5d4dfddd 828 if ((irq_delta + steal) && sched_feat(NONTASK_CAPACITY))
095c0aa8
GC
829 sched_rt_avg_update(rq, irq_delta + steal);
830#endif
aa483808
VP
831}
832
34f971f6
PZ
833void sched_set_stop_task(int cpu, struct task_struct *stop)
834{
835 struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 };
836 struct task_struct *old_stop = cpu_rq(cpu)->stop;
837
838 if (stop) {
839 /*
840 * Make it appear like a SCHED_FIFO task, its something
841 * userspace knows about and won't get confused about.
842 *
843 * Also, it will make PI more or less work without too
844 * much confusion -- but then, stop work should not
845 * rely on PI working anyway.
846 */
847 sched_setscheduler_nocheck(stop, SCHED_FIFO, &param);
848
849 stop->sched_class = &stop_sched_class;
850 }
851
852 cpu_rq(cpu)->stop = stop;
853
854 if (old_stop) {
855 /*
856 * Reset it back to a normal scheduling class so that
857 * it can die in pieces.
858 */
859 old_stop->sched_class = &rt_sched_class;
860 }
861}
862
14531189 863/*
dd41f596 864 * __normal_prio - return the priority that is based on the static prio
14531189 865 */
14531189
IM
866static inline int __normal_prio(struct task_struct *p)
867{
dd41f596 868 return p->static_prio;
14531189
IM
869}
870
b29739f9
IM
871/*
872 * Calculate the expected normal priority: i.e. priority
873 * without taking RT-inheritance into account. Might be
874 * boosted by interactivity modifiers. Changes upon fork,
875 * setprio syscalls, and whenever the interactivity
876 * estimator recalculates.
877 */
36c8b586 878static inline int normal_prio(struct task_struct *p)
b29739f9
IM
879{
880 int prio;
881
aab03e05
DF
882 if (task_has_dl_policy(p))
883 prio = MAX_DL_PRIO-1;
884 else if (task_has_rt_policy(p))
b29739f9
IM
885 prio = MAX_RT_PRIO-1 - p->rt_priority;
886 else
887 prio = __normal_prio(p);
888 return prio;
889}
890
891/*
892 * Calculate the current priority, i.e. the priority
893 * taken into account by the scheduler. This value might
894 * be boosted by RT tasks, or might be boosted by
895 * interactivity modifiers. Will be RT if the task got
896 * RT-boosted. If not then it returns p->normal_prio.
897 */
36c8b586 898static int effective_prio(struct task_struct *p)
b29739f9
IM
899{
900 p->normal_prio = normal_prio(p);
901 /*
902 * If we are RT tasks or we were boosted to RT priority,
903 * keep the priority unchanged. Otherwise, update priority
904 * to the normal priority:
905 */
906 if (!rt_prio(p->prio))
907 return p->normal_prio;
908 return p->prio;
909}
910
1da177e4
LT
911/**
912 * task_curr - is this task currently executing on a CPU?
913 * @p: the task in question.
e69f6186
YB
914 *
915 * Return: 1 if the task is currently executing. 0 otherwise.
1da177e4 916 */
36c8b586 917inline int task_curr(const struct task_struct *p)
1da177e4
LT
918{
919 return cpu_curr(task_cpu(p)) == p;
920}
921
67dfa1b7 922/*
4c9a4bc8
PZ
923 * switched_from, switched_to and prio_changed must _NOT_ drop rq->lock,
924 * use the balance_callback list if you want balancing.
925 *
926 * this means any call to check_class_changed() must be followed by a call to
927 * balance_callback().
67dfa1b7 928 */
cb469845
SR
929static inline void check_class_changed(struct rq *rq, struct task_struct *p,
930 const struct sched_class *prev_class,
da7a735e 931 int oldprio)
cb469845
SR
932{
933 if (prev_class != p->sched_class) {
934 if (prev_class->switched_from)
da7a735e 935 prev_class->switched_from(rq, p);
4c9a4bc8 936
da7a735e 937 p->sched_class->switched_to(rq, p);
2d3d891d 938 } else if (oldprio != p->prio || dl_task(p))
da7a735e 939 p->sched_class->prio_changed(rq, p, oldprio);
cb469845
SR
940}
941
029632fb 942void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags)
1e5a7405
PZ
943{
944 const struct sched_class *class;
945
946 if (p->sched_class == rq->curr->sched_class) {
947 rq->curr->sched_class->check_preempt_curr(rq, p, flags);
948 } else {
949 for_each_class(class) {
950 if (class == rq->curr->sched_class)
951 break;
952 if (class == p->sched_class) {
8875125e 953 resched_curr(rq);
1e5a7405
PZ
954 break;
955 }
956 }
957 }
958
959 /*
960 * A queue event has occurred, and we're going to schedule. In
961 * this case, we can save a useless back to back clock update.
962 */
da0c1e65 963 if (task_on_rq_queued(rq->curr) && test_tsk_need_resched(rq->curr))
9edfbfed 964 rq_clock_skip_update(rq, true);
1e5a7405
PZ
965}
966
1da177e4 967#ifdef CONFIG_SMP
5cc389bc
PZ
968/*
969 * This is how migration works:
970 *
971 * 1) we invoke migration_cpu_stop() on the target CPU using
972 * stop_one_cpu().
973 * 2) stopper starts to run (implicitly forcing the migrated thread
974 * off the CPU)
975 * 3) it checks whether the migrated task is still in the wrong runqueue.
976 * 4) if it's in the wrong runqueue then the migration thread removes
977 * it and puts it into the right queue.
978 * 5) stopper completes and stop_one_cpu() returns and the migration
979 * is done.
980 */
981
982/*
983 * move_queued_task - move a queued task to new rq.
984 *
985 * Returns (locked) new rq. Old rq's lock is released.
986 */
5e16bbc2 987static struct rq *move_queued_task(struct rq *rq, struct task_struct *p, int new_cpu)
5cc389bc 988{
5cc389bc
PZ
989 lockdep_assert_held(&rq->lock);
990
5cc389bc 991 p->on_rq = TASK_ON_RQ_MIGRATING;
3ea94de1 992 dequeue_task(rq, p, 0);
5cc389bc
PZ
993 set_task_cpu(p, new_cpu);
994 raw_spin_unlock(&rq->lock);
995
996 rq = cpu_rq(new_cpu);
997
998 raw_spin_lock(&rq->lock);
999 BUG_ON(task_cpu(p) != new_cpu);
5cc389bc 1000 enqueue_task(rq, p, 0);
3ea94de1 1001 p->on_rq = TASK_ON_RQ_QUEUED;
5cc389bc
PZ
1002 check_preempt_curr(rq, p, 0);
1003
1004 return rq;
1005}
1006
1007struct migration_arg {
1008 struct task_struct *task;
1009 int dest_cpu;
1010};
1011
1012/*
1013 * Move (not current) task off this cpu, onto dest cpu. We're doing
1014 * this because either it can't run here any more (set_cpus_allowed()
1015 * away from this CPU, or CPU going down), or because we're
1016 * attempting to rebalance this task on exec (sched_exec).
1017 *
1018 * So we race with normal scheduler movements, but that's OK, as long
1019 * as the task is no longer on this CPU.
5cc389bc 1020 */
5e16bbc2 1021static struct rq *__migrate_task(struct rq *rq, struct task_struct *p, int dest_cpu)
5cc389bc 1022{
5cc389bc 1023 if (unlikely(!cpu_active(dest_cpu)))
5e16bbc2 1024 return rq;
5cc389bc
PZ
1025
1026 /* Affinity changed (again). */
1027 if (!cpumask_test_cpu(dest_cpu, tsk_cpus_allowed(p)))
5e16bbc2 1028 return rq;
5cc389bc 1029
5e16bbc2
PZ
1030 rq = move_queued_task(rq, p, dest_cpu);
1031
1032 return rq;
5cc389bc
PZ
1033}
1034
1035/*
1036 * migration_cpu_stop - this will be executed by a highprio stopper thread
1037 * and performs thread migration by bumping thread off CPU then
1038 * 'pushing' onto another runqueue.
1039 */
1040static int migration_cpu_stop(void *data)
1041{
1042 struct migration_arg *arg = data;
5e16bbc2
PZ
1043 struct task_struct *p = arg->task;
1044 struct rq *rq = this_rq();
5cc389bc
PZ
1045
1046 /*
1047 * The original target cpu might have gone down and we might
1048 * be on another cpu but it doesn't matter.
1049 */
1050 local_irq_disable();
1051 /*
1052 * We need to explicitly wake pending tasks before running
1053 * __migrate_task() such that we will not miss enforcing cpus_allowed
1054 * during wakeups, see set_cpus_allowed_ptr()'s TASK_WAKING test.
1055 */
1056 sched_ttwu_pending();
5e16bbc2
PZ
1057
1058 raw_spin_lock(&p->pi_lock);
1059 raw_spin_lock(&rq->lock);
1060 /*
1061 * If task_rq(p) != rq, it cannot be migrated here, because we're
1062 * holding rq->lock, if p->on_rq == 0 it cannot get enqueued because
1063 * we're holding p->pi_lock.
1064 */
1065 if (task_rq(p) == rq && task_on_rq_queued(p))
1066 rq = __migrate_task(rq, p, arg->dest_cpu);
1067 raw_spin_unlock(&rq->lock);
1068 raw_spin_unlock(&p->pi_lock);
1069
5cc389bc
PZ
1070 local_irq_enable();
1071 return 0;
1072}
1073
c5b28038
PZ
1074/*
1075 * sched_class::set_cpus_allowed must do the below, but is not required to
1076 * actually call this function.
1077 */
1078void set_cpus_allowed_common(struct task_struct *p, const struct cpumask *new_mask)
5cc389bc 1079{
5cc389bc
PZ
1080 cpumask_copy(&p->cpus_allowed, new_mask);
1081 p->nr_cpus_allowed = cpumask_weight(new_mask);
1082}
1083
c5b28038
PZ
1084void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
1085{
6c37067e
PZ
1086 struct rq *rq = task_rq(p);
1087 bool queued, running;
1088
c5b28038 1089 lockdep_assert_held(&p->pi_lock);
6c37067e
PZ
1090
1091 queued = task_on_rq_queued(p);
1092 running = task_current(rq, p);
1093
1094 if (queued) {
1095 /*
1096 * Because __kthread_bind() calls this on blocked tasks without
1097 * holding rq->lock.
1098 */
1099 lockdep_assert_held(&rq->lock);
1de64443 1100 dequeue_task(rq, p, DEQUEUE_SAVE);
6c37067e
PZ
1101 }
1102 if (running)
1103 put_prev_task(rq, p);
1104
c5b28038 1105 p->sched_class->set_cpus_allowed(p, new_mask);
6c37067e
PZ
1106
1107 if (running)
1108 p->sched_class->set_curr_task(rq);
1109 if (queued)
1de64443 1110 enqueue_task(rq, p, ENQUEUE_RESTORE);
c5b28038
PZ
1111}
1112
5cc389bc
PZ
1113/*
1114 * Change a given task's CPU affinity. Migrate the thread to a
1115 * proper CPU and schedule it away if the CPU it's executing on
1116 * is removed from the allowed bitmask.
1117 *
1118 * NOTE: the caller must have a valid reference to the task, the
1119 * task must not exit() & deallocate itself prematurely. The
1120 * call is not atomic; no spinlocks may be held.
1121 */
25834c73
PZ
1122static int __set_cpus_allowed_ptr(struct task_struct *p,
1123 const struct cpumask *new_mask, bool check)
5cc389bc 1124{
e9d867a6 1125 const struct cpumask *cpu_valid_mask = cpu_active_mask;
5cc389bc 1126 unsigned int dest_cpu;
eb580751
PZ
1127 struct rq_flags rf;
1128 struct rq *rq;
5cc389bc
PZ
1129 int ret = 0;
1130
eb580751 1131 rq = task_rq_lock(p, &rf);
5cc389bc 1132
e9d867a6
PZI
1133 if (p->flags & PF_KTHREAD) {
1134 /*
1135 * Kernel threads are allowed on online && !active CPUs
1136 */
1137 cpu_valid_mask = cpu_online_mask;
1138 }
1139
25834c73
PZ
1140 /*
1141 * Must re-check here, to close a race against __kthread_bind(),
1142 * sched_setaffinity() is not guaranteed to observe the flag.
1143 */
1144 if (check && (p->flags & PF_NO_SETAFFINITY)) {
1145 ret = -EINVAL;
1146 goto out;
1147 }
1148
5cc389bc
PZ
1149 if (cpumask_equal(&p->cpus_allowed, new_mask))
1150 goto out;
1151
e9d867a6 1152 if (!cpumask_intersects(new_mask, cpu_valid_mask)) {
5cc389bc
PZ
1153 ret = -EINVAL;
1154 goto out;
1155 }
1156
1157 do_set_cpus_allowed(p, new_mask);
1158
e9d867a6
PZI
1159 if (p->flags & PF_KTHREAD) {
1160 /*
1161 * For kernel threads that do indeed end up on online &&
1162 * !active we want to ensure they are strict per-cpu threads.
1163 */
1164 WARN_ON(cpumask_intersects(new_mask, cpu_online_mask) &&
1165 !cpumask_intersects(new_mask, cpu_active_mask) &&
1166 p->nr_cpus_allowed != 1);
1167 }
1168
5cc389bc
PZ
1169 /* Can the task run on the task's current CPU? If so, we're done */
1170 if (cpumask_test_cpu(task_cpu(p), new_mask))
1171 goto out;
1172
e9d867a6 1173 dest_cpu = cpumask_any_and(cpu_valid_mask, new_mask);
5cc389bc
PZ
1174 if (task_running(rq, p) || p->state == TASK_WAKING) {
1175 struct migration_arg arg = { p, dest_cpu };
1176 /* Need help from migration thread: drop lock and wait. */
eb580751 1177 task_rq_unlock(rq, p, &rf);
5cc389bc
PZ
1178 stop_one_cpu(cpu_of(rq), migration_cpu_stop, &arg);
1179 tlb_migrate_finish(p->mm);
1180 return 0;
cbce1a68
PZ
1181 } else if (task_on_rq_queued(p)) {
1182 /*
1183 * OK, since we're going to drop the lock immediately
1184 * afterwards anyway.
1185 */
e7904a28 1186 lockdep_unpin_lock(&rq->lock, rf.cookie);
5e16bbc2 1187 rq = move_queued_task(rq, p, dest_cpu);
e7904a28 1188 lockdep_repin_lock(&rq->lock, rf.cookie);
cbce1a68 1189 }
5cc389bc 1190out:
eb580751 1191 task_rq_unlock(rq, p, &rf);
5cc389bc
PZ
1192
1193 return ret;
1194}
25834c73
PZ
1195
1196int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask)
1197{
1198 return __set_cpus_allowed_ptr(p, new_mask, false);
1199}
5cc389bc
PZ
1200EXPORT_SYMBOL_GPL(set_cpus_allowed_ptr);
1201
dd41f596 1202void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
c65cc870 1203{
e2912009
PZ
1204#ifdef CONFIG_SCHED_DEBUG
1205 /*
1206 * We should never call set_task_cpu() on a blocked task,
1207 * ttwu() will sort out the placement.
1208 */
077614ee 1209 WARN_ON_ONCE(p->state != TASK_RUNNING && p->state != TASK_WAKING &&
e2336f6e 1210 !p->on_rq);
0122ec5b 1211
3ea94de1
JP
1212 /*
1213 * Migrating fair class task must have p->on_rq = TASK_ON_RQ_MIGRATING,
1214 * because schedstat_wait_{start,end} rebase migrating task's wait_start
1215 * time relying on p->on_rq.
1216 */
1217 WARN_ON_ONCE(p->state == TASK_RUNNING &&
1218 p->sched_class == &fair_sched_class &&
1219 (p->on_rq && !task_on_rq_migrating(p)));
1220
0122ec5b 1221#ifdef CONFIG_LOCKDEP
6c6c54e1
PZ
1222 /*
1223 * The caller should hold either p->pi_lock or rq->lock, when changing
1224 * a task's CPU. ->pi_lock for waking tasks, rq->lock for runnable tasks.
1225 *
1226 * sched_move_task() holds both and thus holding either pins the cgroup,
8323f26c 1227 * see task_group().
6c6c54e1
PZ
1228 *
1229 * Furthermore, all task_rq users should acquire both locks, see
1230 * task_rq_lock().
1231 */
0122ec5b
PZ
1232 WARN_ON_ONCE(debug_locks && !(lockdep_is_held(&p->pi_lock) ||
1233 lockdep_is_held(&task_rq(p)->lock)));
1234#endif
e2912009
PZ
1235#endif
1236
de1d7286 1237 trace_sched_migrate_task(p, new_cpu);
cbc34ed1 1238
0c69774e 1239 if (task_cpu(p) != new_cpu) {
0a74bef8 1240 if (p->sched_class->migrate_task_rq)
5a4fd036 1241 p->sched_class->migrate_task_rq(p);
0c69774e 1242 p->se.nr_migrations++;
ff303e66 1243 perf_event_task_migrate(p);
0c69774e 1244 }
dd41f596
IM
1245
1246 __set_task_cpu(p, new_cpu);
c65cc870
IM
1247}
1248
ac66f547
PZ
1249static void __migrate_swap_task(struct task_struct *p, int cpu)
1250{
da0c1e65 1251 if (task_on_rq_queued(p)) {
ac66f547
PZ
1252 struct rq *src_rq, *dst_rq;
1253
1254 src_rq = task_rq(p);
1255 dst_rq = cpu_rq(cpu);
1256
3ea94de1 1257 p->on_rq = TASK_ON_RQ_MIGRATING;
ac66f547
PZ
1258 deactivate_task(src_rq, p, 0);
1259 set_task_cpu(p, cpu);
1260 activate_task(dst_rq, p, 0);
3ea94de1 1261 p->on_rq = TASK_ON_RQ_QUEUED;
ac66f547
PZ
1262 check_preempt_curr(dst_rq, p, 0);
1263 } else {
1264 /*
1265 * Task isn't running anymore; make it appear like we migrated
1266 * it before it went to sleep. This means on wakeup we make the
1267 * previous cpu our targer instead of where it really is.
1268 */
1269 p->wake_cpu = cpu;
1270 }
1271}
1272
1273struct migration_swap_arg {
1274 struct task_struct *src_task, *dst_task;
1275 int src_cpu, dst_cpu;
1276};
1277
1278static int migrate_swap_stop(void *data)
1279{
1280 struct migration_swap_arg *arg = data;
1281 struct rq *src_rq, *dst_rq;
1282 int ret = -EAGAIN;
1283
62694cd5
PZ
1284 if (!cpu_active(arg->src_cpu) || !cpu_active(arg->dst_cpu))
1285 return -EAGAIN;
1286
ac66f547
PZ
1287 src_rq = cpu_rq(arg->src_cpu);
1288 dst_rq = cpu_rq(arg->dst_cpu);
1289
74602315
PZ
1290 double_raw_lock(&arg->src_task->pi_lock,
1291 &arg->dst_task->pi_lock);
ac66f547 1292 double_rq_lock(src_rq, dst_rq);
62694cd5 1293
ac66f547
PZ
1294 if (task_cpu(arg->dst_task) != arg->dst_cpu)
1295 goto unlock;
1296
1297 if (task_cpu(arg->src_task) != arg->src_cpu)
1298 goto unlock;
1299
1300 if (!cpumask_test_cpu(arg->dst_cpu, tsk_cpus_allowed(arg->src_task)))
1301 goto unlock;
1302
1303 if (!cpumask_test_cpu(arg->src_cpu, tsk_cpus_allowed(arg->dst_task)))
1304 goto unlock;
1305
1306 __migrate_swap_task(arg->src_task, arg->dst_cpu);
1307 __migrate_swap_task(arg->dst_task, arg->src_cpu);
1308
1309 ret = 0;
1310
1311unlock:
1312 double_rq_unlock(src_rq, dst_rq);
74602315
PZ
1313 raw_spin_unlock(&arg->dst_task->pi_lock);
1314 raw_spin_unlock(&arg->src_task->pi_lock);
ac66f547
PZ
1315
1316 return ret;
1317}
1318
1319/*
1320 * Cross migrate two tasks
1321 */
1322int migrate_swap(struct task_struct *cur, struct task_struct *p)
1323{
1324 struct migration_swap_arg arg;
1325 int ret = -EINVAL;
1326
ac66f547
PZ
1327 arg = (struct migration_swap_arg){
1328 .src_task = cur,
1329 .src_cpu = task_cpu(cur),
1330 .dst_task = p,
1331 .dst_cpu = task_cpu(p),
1332 };
1333
1334 if (arg.src_cpu == arg.dst_cpu)
1335 goto out;
1336
6acce3ef
PZ
1337 /*
1338 * These three tests are all lockless; this is OK since all of them
1339 * will be re-checked with proper locks held further down the line.
1340 */
ac66f547
PZ
1341 if (!cpu_active(arg.src_cpu) || !cpu_active(arg.dst_cpu))
1342 goto out;
1343
1344 if (!cpumask_test_cpu(arg.dst_cpu, tsk_cpus_allowed(arg.src_task)))
1345 goto out;
1346
1347 if (!cpumask_test_cpu(arg.src_cpu, tsk_cpus_allowed(arg.dst_task)))
1348 goto out;
1349
286549dc 1350 trace_sched_swap_numa(cur, arg.src_cpu, p, arg.dst_cpu);
ac66f547
PZ
1351 ret = stop_two_cpus(arg.dst_cpu, arg.src_cpu, migrate_swap_stop, &arg);
1352
1353out:
ac66f547
PZ
1354 return ret;
1355}
1356
1da177e4
LT
1357/*
1358 * wait_task_inactive - wait for a thread to unschedule.
1359 *
85ba2d86
RM
1360 * If @match_state is nonzero, it's the @p->state value just checked and
1361 * not expected to change. If it changes, i.e. @p might have woken up,
1362 * then return zero. When we succeed in waiting for @p to be off its CPU,
1363 * we return a positive number (its total switch count). If a second call
1364 * a short while later returns the same number, the caller can be sure that
1365 * @p has remained unscheduled the whole time.
1366 *
1da177e4
LT
1367 * The caller must ensure that the task *will* unschedule sometime soon,
1368 * else this function might spin for a *long* time. This function can't
1369 * be called with interrupts off, or it may introduce deadlock with
1370 * smp_call_function() if an IPI is sent by the same process we are
1371 * waiting to become inactive.
1372 */
85ba2d86 1373unsigned long wait_task_inactive(struct task_struct *p, long match_state)
1da177e4 1374{
da0c1e65 1375 int running, queued;
eb580751 1376 struct rq_flags rf;
85ba2d86 1377 unsigned long ncsw;
70b97a7f 1378 struct rq *rq;
1da177e4 1379
3a5c359a
AK
1380 for (;;) {
1381 /*
1382 * We do the initial early heuristics without holding
1383 * any task-queue locks at all. We'll only try to get
1384 * the runqueue lock when things look like they will
1385 * work out!
1386 */
1387 rq = task_rq(p);
fa490cfd 1388
3a5c359a
AK
1389 /*
1390 * If the task is actively running on another CPU
1391 * still, just relax and busy-wait without holding
1392 * any locks.
1393 *
1394 * NOTE! Since we don't hold any locks, it's not
1395 * even sure that "rq" stays as the right runqueue!
1396 * But we don't care, since "task_running()" will
1397 * return false if the runqueue has changed and p
1398 * is actually now running somewhere else!
1399 */
85ba2d86
RM
1400 while (task_running(rq, p)) {
1401 if (match_state && unlikely(p->state != match_state))
1402 return 0;
3a5c359a 1403 cpu_relax();
85ba2d86 1404 }
fa490cfd 1405
3a5c359a
AK
1406 /*
1407 * Ok, time to look more closely! We need the rq
1408 * lock now, to be *sure*. If we're wrong, we'll
1409 * just go back and repeat.
1410 */
eb580751 1411 rq = task_rq_lock(p, &rf);
27a9da65 1412 trace_sched_wait_task(p);
3a5c359a 1413 running = task_running(rq, p);
da0c1e65 1414 queued = task_on_rq_queued(p);
85ba2d86 1415 ncsw = 0;
f31e11d8 1416 if (!match_state || p->state == match_state)
93dcf55f 1417 ncsw = p->nvcsw | LONG_MIN; /* sets MSB */
eb580751 1418 task_rq_unlock(rq, p, &rf);
fa490cfd 1419
85ba2d86
RM
1420 /*
1421 * If it changed from the expected state, bail out now.
1422 */
1423 if (unlikely(!ncsw))
1424 break;
1425
3a5c359a
AK
1426 /*
1427 * Was it really running after all now that we
1428 * checked with the proper locks actually held?
1429 *
1430 * Oops. Go back and try again..
1431 */
1432 if (unlikely(running)) {
1433 cpu_relax();
1434 continue;
1435 }
fa490cfd 1436
3a5c359a
AK
1437 /*
1438 * It's not enough that it's not actively running,
1439 * it must be off the runqueue _entirely_, and not
1440 * preempted!
1441 *
80dd99b3 1442 * So if it was still runnable (but just not actively
3a5c359a
AK
1443 * running right now), it's preempted, and we should
1444 * yield - it could be a while.
1445 */
da0c1e65 1446 if (unlikely(queued)) {
8eb90c30
TG
1447 ktime_t to = ktime_set(0, NSEC_PER_SEC/HZ);
1448
1449 set_current_state(TASK_UNINTERRUPTIBLE);
1450 schedule_hrtimeout(&to, HRTIMER_MODE_REL);
3a5c359a
AK
1451 continue;
1452 }
fa490cfd 1453
3a5c359a
AK
1454 /*
1455 * Ahh, all good. It wasn't running, and it wasn't
1456 * runnable, which means that it will never become
1457 * running in the future either. We're all done!
1458 */
1459 break;
1460 }
85ba2d86
RM
1461
1462 return ncsw;
1da177e4
LT
1463}
1464
1465/***
1466 * kick_process - kick a running thread to enter/exit the kernel
1467 * @p: the to-be-kicked thread
1468 *
1469 * Cause a process which is running on another CPU to enter
1470 * kernel-mode, without any delay. (to get signals handled.)
1471 *
25985edc 1472 * NOTE: this function doesn't have to take the runqueue lock,
1da177e4
LT
1473 * because all it wants to ensure is that the remote task enters
1474 * the kernel. If the IPI races and the task has been migrated
1475 * to another CPU then no harm is done and the purpose has been
1476 * achieved as well.
1477 */
36c8b586 1478void kick_process(struct task_struct *p)
1da177e4
LT
1479{
1480 int cpu;
1481
1482 preempt_disable();
1483 cpu = task_cpu(p);
1484 if ((cpu != smp_processor_id()) && task_curr(p))
1485 smp_send_reschedule(cpu);
1486 preempt_enable();
1487}
b43e3521 1488EXPORT_SYMBOL_GPL(kick_process);
1da177e4 1489
30da688e 1490/*
013fdb80 1491 * ->cpus_allowed is protected by both rq->lock and p->pi_lock
e9d867a6
PZI
1492 *
1493 * A few notes on cpu_active vs cpu_online:
1494 *
1495 * - cpu_active must be a subset of cpu_online
1496 *
1497 * - on cpu-up we allow per-cpu kthreads on the online && !active cpu,
1498 * see __set_cpus_allowed_ptr(). At this point the newly online
1499 * cpu isn't yet part of the sched domains, and balancing will not
1500 * see it.
1501 *
1502 * - on cpu-down we clear cpu_active() to mask the sched domains and
1503 * avoid the load balancer to place new tasks on the to be removed
1504 * cpu. Existing tasks will remain running there and will be taken
1505 * off.
1506 *
1507 * This means that fallback selection must not select !active CPUs.
1508 * And can assume that any active CPU must be online. Conversely
1509 * select_task_rq() below may allow selection of !active CPUs in order
1510 * to satisfy the above rules.
30da688e 1511 */
5da9a0fb
PZ
1512static int select_fallback_rq(int cpu, struct task_struct *p)
1513{
aa00d89c
TC
1514 int nid = cpu_to_node(cpu);
1515 const struct cpumask *nodemask = NULL;
2baab4e9
PZ
1516 enum { cpuset, possible, fail } state = cpuset;
1517 int dest_cpu;
5da9a0fb 1518
aa00d89c
TC
1519 /*
1520 * If the node that the cpu is on has been offlined, cpu_to_node()
1521 * will return -1. There is no cpu on the node, and we should
1522 * select the cpu on the other node.
1523 */
1524 if (nid != -1) {
1525 nodemask = cpumask_of_node(nid);
1526
1527 /* Look for allowed, online CPU in same node. */
1528 for_each_cpu(dest_cpu, nodemask) {
aa00d89c
TC
1529 if (!cpu_active(dest_cpu))
1530 continue;
1531 if (cpumask_test_cpu(dest_cpu, tsk_cpus_allowed(p)))
1532 return dest_cpu;
1533 }
2baab4e9 1534 }
5da9a0fb 1535
2baab4e9
PZ
1536 for (;;) {
1537 /* Any allowed, online CPU? */
e3831edd 1538 for_each_cpu(dest_cpu, tsk_cpus_allowed(p)) {
feb245e3
TH
1539 if (!(p->flags & PF_KTHREAD) && !cpu_active(dest_cpu))
1540 continue;
1541 if (!cpu_online(dest_cpu))
2baab4e9
PZ
1542 continue;
1543 goto out;
1544 }
5da9a0fb 1545
e73e85f0 1546 /* No more Mr. Nice Guy. */
2baab4e9
PZ
1547 switch (state) {
1548 case cpuset:
e73e85f0
ON
1549 if (IS_ENABLED(CONFIG_CPUSETS)) {
1550 cpuset_cpus_allowed_fallback(p);
1551 state = possible;
1552 break;
1553 }
1554 /* fall-through */
2baab4e9
PZ
1555 case possible:
1556 do_set_cpus_allowed(p, cpu_possible_mask);
1557 state = fail;
1558 break;
1559
1560 case fail:
1561 BUG();
1562 break;
1563 }
1564 }
1565
1566out:
1567 if (state != cpuset) {
1568 /*
1569 * Don't tell them about moving exiting tasks or
1570 * kernel threads (both mm NULL), since they never
1571 * leave kernel.
1572 */
1573 if (p->mm && printk_ratelimit()) {
aac74dc4 1574 printk_deferred("process %d (%s) no longer affine to cpu%d\n",
2baab4e9
PZ
1575 task_pid_nr(p), p->comm, cpu);
1576 }
5da9a0fb
PZ
1577 }
1578
1579 return dest_cpu;
1580}
1581
e2912009 1582/*
013fdb80 1583 * The caller (fork, wakeup) owns p->pi_lock, ->cpus_allowed is stable.
e2912009 1584 */
970b13ba 1585static inline
ac66f547 1586int select_task_rq(struct task_struct *p, int cpu, int sd_flags, int wake_flags)
970b13ba 1587{
cbce1a68
PZ
1588 lockdep_assert_held(&p->pi_lock);
1589
50605ffb 1590 if (tsk_nr_cpus_allowed(p) > 1)
6c1d9410 1591 cpu = p->sched_class->select_task_rq(p, cpu, sd_flags, wake_flags);
e9d867a6
PZI
1592 else
1593 cpu = cpumask_any(tsk_cpus_allowed(p));
e2912009
PZ
1594
1595 /*
1596 * In order not to call set_task_cpu() on a blocking task we need
1597 * to rely on ttwu() to place the task on a valid ->cpus_allowed
1598 * cpu.
1599 *
1600 * Since this is common to all placement strategies, this lives here.
1601 *
1602 * [ this allows ->select_task() to simply return task_cpu(p) and
1603 * not worry about this generic constraint ]
1604 */
fa17b507 1605 if (unlikely(!cpumask_test_cpu(cpu, tsk_cpus_allowed(p)) ||
70f11205 1606 !cpu_online(cpu)))
5da9a0fb 1607 cpu = select_fallback_rq(task_cpu(p), p);
e2912009
PZ
1608
1609 return cpu;
970b13ba 1610}
09a40af5
MG
1611
1612static void update_avg(u64 *avg, u64 sample)
1613{
1614 s64 diff = sample - *avg;
1615 *avg += diff >> 3;
1616}
25834c73
PZ
1617
1618#else
1619
1620static inline int __set_cpus_allowed_ptr(struct task_struct *p,
1621 const struct cpumask *new_mask, bool check)
1622{
1623 return set_cpus_allowed_ptr(p, new_mask);
1624}
1625
5cc389bc 1626#endif /* CONFIG_SMP */
970b13ba 1627
d7c01d27 1628static void
b84cb5df 1629ttwu_stat(struct task_struct *p, int cpu, int wake_flags)
9ed3811a 1630{
d7c01d27 1631#ifdef CONFIG_SCHEDSTATS
b84cb5df
PZ
1632 struct rq *rq = this_rq();
1633
d7c01d27
PZ
1634#ifdef CONFIG_SMP
1635 int this_cpu = smp_processor_id();
1636
1637 if (cpu == this_cpu) {
1638 schedstat_inc(rq, ttwu_local);
1639 schedstat_inc(p, se.statistics.nr_wakeups_local);
1640 } else {
1641 struct sched_domain *sd;
1642
1643 schedstat_inc(p, se.statistics.nr_wakeups_remote);
057f3fad 1644 rcu_read_lock();
d7c01d27
PZ
1645 for_each_domain(this_cpu, sd) {
1646 if (cpumask_test_cpu(cpu, sched_domain_span(sd))) {
1647 schedstat_inc(sd, ttwu_wake_remote);
1648 break;
1649 }
1650 }
057f3fad 1651 rcu_read_unlock();
d7c01d27 1652 }
f339b9dc
PZ
1653
1654 if (wake_flags & WF_MIGRATED)
1655 schedstat_inc(p, se.statistics.nr_wakeups_migrate);
1656
d7c01d27
PZ
1657#endif /* CONFIG_SMP */
1658
1659 schedstat_inc(rq, ttwu_count);
9ed3811a 1660 schedstat_inc(p, se.statistics.nr_wakeups);
d7c01d27
PZ
1661
1662 if (wake_flags & WF_SYNC)
9ed3811a 1663 schedstat_inc(p, se.statistics.nr_wakeups_sync);
d7c01d27 1664
d7c01d27
PZ
1665#endif /* CONFIG_SCHEDSTATS */
1666}
1667
1de64443 1668static inline void ttwu_activate(struct rq *rq, struct task_struct *p, int en_flags)
d7c01d27 1669{
9ed3811a 1670 activate_task(rq, p, en_flags);
da0c1e65 1671 p->on_rq = TASK_ON_RQ_QUEUED;
c2f7115e
PZ
1672
1673 /* if a worker is waking up, notify workqueue */
1674 if (p->flags & PF_WQ_WORKER)
1675 wq_worker_waking_up(p, cpu_of(rq));
9ed3811a
TH
1676}
1677
23f41eeb
PZ
1678/*
1679 * Mark the task runnable and perform wakeup-preemption.
1680 */
e7904a28
PZ
1681static void ttwu_do_wakeup(struct rq *rq, struct task_struct *p, int wake_flags,
1682 struct pin_cookie cookie)
9ed3811a 1683{
9ed3811a 1684 check_preempt_curr(rq, p, wake_flags);
9ed3811a 1685 p->state = TASK_RUNNING;
fbd705a0
PZ
1686 trace_sched_wakeup(p);
1687
9ed3811a 1688#ifdef CONFIG_SMP
4c9a4bc8
PZ
1689 if (p->sched_class->task_woken) {
1690 /*
cbce1a68
PZ
1691 * Our task @p is fully woken up and running; so its safe to
1692 * drop the rq->lock, hereafter rq is only used for statistics.
4c9a4bc8 1693 */
e7904a28 1694 lockdep_unpin_lock(&rq->lock, cookie);
9ed3811a 1695 p->sched_class->task_woken(rq, p);
e7904a28 1696 lockdep_repin_lock(&rq->lock, cookie);
4c9a4bc8 1697 }
9ed3811a 1698
e69c6341 1699 if (rq->idle_stamp) {
78becc27 1700 u64 delta = rq_clock(rq) - rq->idle_stamp;
9bd721c5 1701 u64 max = 2*rq->max_idle_balance_cost;
9ed3811a 1702
abfafa54
JL
1703 update_avg(&rq->avg_idle, delta);
1704
1705 if (rq->avg_idle > max)
9ed3811a 1706 rq->avg_idle = max;
abfafa54 1707
9ed3811a
TH
1708 rq->idle_stamp = 0;
1709 }
1710#endif
1711}
1712
c05fbafb 1713static void
e7904a28
PZ
1714ttwu_do_activate(struct rq *rq, struct task_struct *p, int wake_flags,
1715 struct pin_cookie cookie)
c05fbafb 1716{
b5179ac7
PZ
1717 int en_flags = ENQUEUE_WAKEUP;
1718
cbce1a68
PZ
1719 lockdep_assert_held(&rq->lock);
1720
c05fbafb
PZ
1721#ifdef CONFIG_SMP
1722 if (p->sched_contributes_to_load)
1723 rq->nr_uninterruptible--;
b5179ac7 1724
b5179ac7 1725 if (wake_flags & WF_MIGRATED)
59efa0ba 1726 en_flags |= ENQUEUE_MIGRATED;
c05fbafb
PZ
1727#endif
1728
b5179ac7 1729 ttwu_activate(rq, p, en_flags);
e7904a28 1730 ttwu_do_wakeup(rq, p, wake_flags, cookie);
c05fbafb
PZ
1731}
1732
1733/*
1734 * Called in case the task @p isn't fully descheduled from its runqueue,
1735 * in this case we must do a remote wakeup. Its a 'light' wakeup though,
1736 * since all we need to do is flip p->state to TASK_RUNNING, since
1737 * the task is still ->on_rq.
1738 */
1739static int ttwu_remote(struct task_struct *p, int wake_flags)
1740{
eb580751 1741 struct rq_flags rf;
c05fbafb
PZ
1742 struct rq *rq;
1743 int ret = 0;
1744
eb580751 1745 rq = __task_rq_lock(p, &rf);
da0c1e65 1746 if (task_on_rq_queued(p)) {
1ad4ec0d
FW
1747 /* check_preempt_curr() may use rq clock */
1748 update_rq_clock(rq);
e7904a28 1749 ttwu_do_wakeup(rq, p, wake_flags, rf.cookie);
c05fbafb
PZ
1750 ret = 1;
1751 }
eb580751 1752 __task_rq_unlock(rq, &rf);
c05fbafb
PZ
1753
1754 return ret;
1755}
1756
317f3941 1757#ifdef CONFIG_SMP
e3baac47 1758void sched_ttwu_pending(void)
317f3941
PZ
1759{
1760 struct rq *rq = this_rq();
fa14ff4a 1761 struct llist_node *llist = llist_del_all(&rq->wake_list);
e7904a28 1762 struct pin_cookie cookie;
fa14ff4a 1763 struct task_struct *p;
e3baac47 1764 unsigned long flags;
317f3941 1765
e3baac47
PZ
1766 if (!llist)
1767 return;
1768
1769 raw_spin_lock_irqsave(&rq->lock, flags);
e7904a28 1770 cookie = lockdep_pin_lock(&rq->lock);
317f3941 1771
fa14ff4a 1772 while (llist) {
b7e7ade3
PZ
1773 int wake_flags = 0;
1774
fa14ff4a
PZ
1775 p = llist_entry(llist, struct task_struct, wake_entry);
1776 llist = llist_next(llist);
b7e7ade3
PZ
1777
1778 if (p->sched_remote_wakeup)
1779 wake_flags = WF_MIGRATED;
1780
1781 ttwu_do_activate(rq, p, wake_flags, cookie);
317f3941
PZ
1782 }
1783
e7904a28 1784 lockdep_unpin_lock(&rq->lock, cookie);
e3baac47 1785 raw_spin_unlock_irqrestore(&rq->lock, flags);
317f3941
PZ
1786}
1787
1788void scheduler_ipi(void)
1789{
f27dde8d
PZ
1790 /*
1791 * Fold TIF_NEED_RESCHED into the preempt_count; anybody setting
1792 * TIF_NEED_RESCHED remotely (for the first time) will also send
1793 * this IPI.
1794 */
8cb75e0c 1795 preempt_fold_need_resched();
f27dde8d 1796
fd2ac4f4 1797 if (llist_empty(&this_rq()->wake_list) && !got_nohz_idle_kick())
c5d753a5
PZ
1798 return;
1799
1800 /*
1801 * Not all reschedule IPI handlers call irq_enter/irq_exit, since
1802 * traditionally all their work was done from the interrupt return
1803 * path. Now that we actually do some work, we need to make sure
1804 * we do call them.
1805 *
1806 * Some archs already do call them, luckily irq_enter/exit nest
1807 * properly.
1808 *
1809 * Arguably we should visit all archs and update all handlers,
1810 * however a fair share of IPIs are still resched only so this would
1811 * somewhat pessimize the simple resched case.
1812 */
1813 irq_enter();
fa14ff4a 1814 sched_ttwu_pending();
ca38062e
SS
1815
1816 /*
1817 * Check if someone kicked us for doing the nohz idle load balance.
1818 */
873b4c65 1819 if (unlikely(got_nohz_idle_kick())) {
6eb57e0d 1820 this_rq()->idle_balance = 1;
ca38062e 1821 raise_softirq_irqoff(SCHED_SOFTIRQ);
6eb57e0d 1822 }
c5d753a5 1823 irq_exit();
317f3941
PZ
1824}
1825
b7e7ade3 1826static void ttwu_queue_remote(struct task_struct *p, int cpu, int wake_flags)
317f3941 1827{
e3baac47
PZ
1828 struct rq *rq = cpu_rq(cpu);
1829
b7e7ade3
PZ
1830 p->sched_remote_wakeup = !!(wake_flags & WF_MIGRATED);
1831
e3baac47
PZ
1832 if (llist_add(&p->wake_entry, &cpu_rq(cpu)->wake_list)) {
1833 if (!set_nr_if_polling(rq->idle))
1834 smp_send_reschedule(cpu);
1835 else
1836 trace_sched_wake_idle_without_ipi(cpu);
1837 }
317f3941 1838}
d6aa8f85 1839
f6be8af1
CL
1840void wake_up_if_idle(int cpu)
1841{
1842 struct rq *rq = cpu_rq(cpu);
1843 unsigned long flags;
1844
fd7de1e8
AL
1845 rcu_read_lock();
1846
1847 if (!is_idle_task(rcu_dereference(rq->curr)))
1848 goto out;
f6be8af1
CL
1849
1850 if (set_nr_if_polling(rq->idle)) {
1851 trace_sched_wake_idle_without_ipi(cpu);
1852 } else {
1853 raw_spin_lock_irqsave(&rq->lock, flags);
1854 if (is_idle_task(rq->curr))
1855 smp_send_reschedule(cpu);
1856 /* Else cpu is not in idle, do nothing here */
1857 raw_spin_unlock_irqrestore(&rq->lock, flags);
1858 }
fd7de1e8
AL
1859
1860out:
1861 rcu_read_unlock();
f6be8af1
CL
1862}
1863
39be3501 1864bool cpus_share_cache(int this_cpu, int that_cpu)
518cd623
PZ
1865{
1866 return per_cpu(sd_llc_id, this_cpu) == per_cpu(sd_llc_id, that_cpu);
1867}
d6aa8f85 1868#endif /* CONFIG_SMP */
317f3941 1869
b5179ac7 1870static void ttwu_queue(struct task_struct *p, int cpu, int wake_flags)
c05fbafb
PZ
1871{
1872 struct rq *rq = cpu_rq(cpu);
e7904a28 1873 struct pin_cookie cookie;
c05fbafb 1874
17d9f311 1875#if defined(CONFIG_SMP)
39be3501 1876 if (sched_feat(TTWU_QUEUE) && !cpus_share_cache(smp_processor_id(), cpu)) {
f01114cb 1877 sched_clock_cpu(cpu); /* sync clocks x-cpu */
b7e7ade3 1878 ttwu_queue_remote(p, cpu, wake_flags);
317f3941
PZ
1879 return;
1880 }
1881#endif
1882
c05fbafb 1883 raw_spin_lock(&rq->lock);
e7904a28 1884 cookie = lockdep_pin_lock(&rq->lock);
b5179ac7 1885 ttwu_do_activate(rq, p, wake_flags, cookie);
e7904a28 1886 lockdep_unpin_lock(&rq->lock, cookie);
c05fbafb 1887 raw_spin_unlock(&rq->lock);
9ed3811a
TH
1888}
1889
8643cda5
PZ
1890/*
1891 * Notes on Program-Order guarantees on SMP systems.
1892 *
1893 * MIGRATION
1894 *
1895 * The basic program-order guarantee on SMP systems is that when a task [t]
1896 * migrates, all its activity on its old cpu [c0] happens-before any subsequent
1897 * execution on its new cpu [c1].
1898 *
1899 * For migration (of runnable tasks) this is provided by the following means:
1900 *
1901 * A) UNLOCK of the rq(c0)->lock scheduling out task t
1902 * B) migration for t is required to synchronize *both* rq(c0)->lock and
1903 * rq(c1)->lock (if not at the same time, then in that order).
1904 * C) LOCK of the rq(c1)->lock scheduling in task
1905 *
1906 * Transitivity guarantees that B happens after A and C after B.
1907 * Note: we only require RCpc transitivity.
1908 * Note: the cpu doing B need not be c0 or c1
1909 *
1910 * Example:
1911 *
1912 * CPU0 CPU1 CPU2
1913 *
1914 * LOCK rq(0)->lock
1915 * sched-out X
1916 * sched-in Y
1917 * UNLOCK rq(0)->lock
1918 *
1919 * LOCK rq(0)->lock // orders against CPU0
1920 * dequeue X
1921 * UNLOCK rq(0)->lock
1922 *
1923 * LOCK rq(1)->lock
1924 * enqueue X
1925 * UNLOCK rq(1)->lock
1926 *
1927 * LOCK rq(1)->lock // orders against CPU2
1928 * sched-out Z
1929 * sched-in X
1930 * UNLOCK rq(1)->lock
1931 *
1932 *
1933 * BLOCKING -- aka. SLEEP + WAKEUP
1934 *
1935 * For blocking we (obviously) need to provide the same guarantee as for
1936 * migration. However the means are completely different as there is no lock
1937 * chain to provide order. Instead we do:
1938 *
1939 * 1) smp_store_release(X->on_cpu, 0)
1940 * 2) smp_cond_acquire(!X->on_cpu)
1941 *
1942 * Example:
1943 *
1944 * CPU0 (schedule) CPU1 (try_to_wake_up) CPU2 (schedule)
1945 *
1946 * LOCK rq(0)->lock LOCK X->pi_lock
1947 * dequeue X
1948 * sched-out X
1949 * smp_store_release(X->on_cpu, 0);
1950 *
1951 * smp_cond_acquire(!X->on_cpu);
1952 * X->state = WAKING
1953 * set_task_cpu(X,2)
1954 *
1955 * LOCK rq(2)->lock
1956 * enqueue X
1957 * X->state = RUNNING
1958 * UNLOCK rq(2)->lock
1959 *
1960 * LOCK rq(2)->lock // orders against CPU1
1961 * sched-out Z
1962 * sched-in X
1963 * UNLOCK rq(2)->lock
1964 *
1965 * UNLOCK X->pi_lock
1966 * UNLOCK rq(0)->lock
1967 *
1968 *
1969 * However; for wakeups there is a second guarantee we must provide, namely we
1970 * must observe the state that lead to our wakeup. That is, not only must our
1971 * task observe its own prior state, it must also observe the stores prior to
1972 * its wakeup.
1973 *
1974 * This means that any means of doing remote wakeups must order the CPU doing
1975 * the wakeup against the CPU the task is going to end up running on. This,
1976 * however, is already required for the regular Program-Order guarantee above,
1977 * since the waking CPU is the one issueing the ACQUIRE (smp_cond_acquire).
1978 *
1979 */
1980
9ed3811a 1981/**
1da177e4 1982 * try_to_wake_up - wake up a thread
9ed3811a 1983 * @p: the thread to be awakened
1da177e4 1984 * @state: the mask of task states that can be woken
9ed3811a 1985 * @wake_flags: wake modifier flags (WF_*)
1da177e4
LT
1986 *
1987 * Put it on the run-queue if it's not already there. The "current"
1988 * thread is always on the run-queue (except when the actual
1989 * re-schedule is in progress), and as such you're allowed to do
1990 * the simpler "current->state = TASK_RUNNING" to mark yourself
1991 * runnable without the overhead of this.
1992 *
e69f6186 1993 * Return: %true if @p was woken up, %false if it was already running.
9ed3811a 1994 * or @state didn't match @p's state.
1da177e4 1995 */
e4a52bcb
PZ
1996static int
1997try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
1da177e4 1998{
1da177e4 1999 unsigned long flags;
c05fbafb 2000 int cpu, success = 0;
2398f2c6 2001
e0acd0a6
ON
2002 /*
2003 * If we are going to wake up a thread waiting for CONDITION we
2004 * need to ensure that CONDITION=1 done by the caller can not be
2005 * reordered with p->state check below. This pairs with mb() in
2006 * set_current_state() the waiting thread does.
2007 */
2008 smp_mb__before_spinlock();
013fdb80 2009 raw_spin_lock_irqsave(&p->pi_lock, flags);
e9c84311 2010 if (!(p->state & state))
1da177e4
LT
2011 goto out;
2012
fbd705a0
PZ
2013 trace_sched_waking(p);
2014
c05fbafb 2015 success = 1; /* we're going to change ->state */
1da177e4 2016 cpu = task_cpu(p);
1da177e4 2017
c05fbafb
PZ
2018 if (p->on_rq && ttwu_remote(p, wake_flags))
2019 goto stat;
1da177e4 2020
1da177e4 2021#ifdef CONFIG_SMP
ecf7d01c
PZ
2022 /*
2023 * Ensure we load p->on_cpu _after_ p->on_rq, otherwise it would be
2024 * possible to, falsely, observe p->on_cpu == 0.
2025 *
2026 * One must be running (->on_cpu == 1) in order to remove oneself
2027 * from the runqueue.
2028 *
2029 * [S] ->on_cpu = 1; [L] ->on_rq
2030 * UNLOCK rq->lock
2031 * RMB
2032 * LOCK rq->lock
2033 * [S] ->on_rq = 0; [L] ->on_cpu
2034 *
2035 * Pairs with the full barrier implied in the UNLOCK+LOCK on rq->lock
2036 * from the consecutive calls to schedule(); the first switching to our
2037 * task, the second putting it to sleep.
2038 */
2039 smp_rmb();
2040
e9c84311 2041 /*
c05fbafb
PZ
2042 * If the owning (remote) cpu is still in the middle of schedule() with
2043 * this task as prev, wait until its done referencing the task.
b75a2253
PZ
2044 *
2045 * Pairs with the smp_store_release() in finish_lock_switch().
2046 *
2047 * This ensures that tasks getting woken will be fully ordered against
2048 * their previous state and preserve Program Order.
0970d299 2049 */
b3e0b1b6 2050 smp_cond_acquire(!p->on_cpu);
1da177e4 2051
a8e4f2ea 2052 p->sched_contributes_to_load = !!task_contributes_to_load(p);
e9c84311 2053 p->state = TASK_WAKING;
e7693a36 2054
ac66f547 2055 cpu = select_task_rq(p, p->wake_cpu, SD_BALANCE_WAKE, wake_flags);
f339b9dc
PZ
2056 if (task_cpu(p) != cpu) {
2057 wake_flags |= WF_MIGRATED;
e4a52bcb 2058 set_task_cpu(p, cpu);
f339b9dc 2059 }
1da177e4 2060#endif /* CONFIG_SMP */
1da177e4 2061
b5179ac7 2062 ttwu_queue(p, cpu, wake_flags);
c05fbafb 2063stat:
cb251765
MG
2064 if (schedstat_enabled())
2065 ttwu_stat(p, cpu, wake_flags);
1da177e4 2066out:
013fdb80 2067 raw_spin_unlock_irqrestore(&p->pi_lock, flags);
1da177e4
LT
2068
2069 return success;
2070}
2071
21aa9af0
TH
2072/**
2073 * try_to_wake_up_local - try to wake up a local task with rq lock held
2074 * @p: the thread to be awakened
2075 *
2acca55e 2076 * Put @p on the run-queue if it's not already there. The caller must
21aa9af0 2077 * ensure that this_rq() is locked, @p is bound to this_rq() and not
2acca55e 2078 * the current task.
21aa9af0 2079 */
e7904a28 2080static void try_to_wake_up_local(struct task_struct *p, struct pin_cookie cookie)
21aa9af0
TH
2081{
2082 struct rq *rq = task_rq(p);
21aa9af0 2083
383efcd0
TH
2084 if (WARN_ON_ONCE(rq != this_rq()) ||
2085 WARN_ON_ONCE(p == current))
2086 return;
2087
21aa9af0
TH
2088 lockdep_assert_held(&rq->lock);
2089
2acca55e 2090 if (!raw_spin_trylock(&p->pi_lock)) {
cbce1a68
PZ
2091 /*
2092 * This is OK, because current is on_cpu, which avoids it being
2093 * picked for load-balance and preemption/IRQs are still
2094 * disabled avoiding further scheduler activity on it and we've
2095 * not yet picked a replacement task.
2096 */
e7904a28 2097 lockdep_unpin_lock(&rq->lock, cookie);
2acca55e
PZ
2098 raw_spin_unlock(&rq->lock);
2099 raw_spin_lock(&p->pi_lock);
2100 raw_spin_lock(&rq->lock);
e7904a28 2101 lockdep_repin_lock(&rq->lock, cookie);
2acca55e
PZ
2102 }
2103
21aa9af0 2104 if (!(p->state & TASK_NORMAL))
2acca55e 2105 goto out;
21aa9af0 2106
fbd705a0
PZ
2107 trace_sched_waking(p);
2108
da0c1e65 2109 if (!task_on_rq_queued(p))
d7c01d27
PZ
2110 ttwu_activate(rq, p, ENQUEUE_WAKEUP);
2111
e7904a28 2112 ttwu_do_wakeup(rq, p, 0, cookie);
cb251765
MG
2113 if (schedstat_enabled())
2114 ttwu_stat(p, smp_processor_id(), 0);
2acca55e
PZ
2115out:
2116 raw_spin_unlock(&p->pi_lock);
21aa9af0
TH
2117}
2118
50fa610a
DH
2119/**
2120 * wake_up_process - Wake up a specific process
2121 * @p: The process to be woken up.
2122 *
2123 * Attempt to wake up the nominated process and move it to the set of runnable
e69f6186
YB
2124 * processes.
2125 *
2126 * Return: 1 if the process was woken up, 0 if it was already running.
50fa610a
DH
2127 *
2128 * It may be assumed that this function implies a write memory barrier before
2129 * changing the task state if and only if any tasks are woken up.
2130 */
7ad5b3a5 2131int wake_up_process(struct task_struct *p)
1da177e4 2132{
9067ac85 2133 return try_to_wake_up(p, TASK_NORMAL, 0);
1da177e4 2134}
1da177e4
LT
2135EXPORT_SYMBOL(wake_up_process);
2136
7ad5b3a5 2137int wake_up_state(struct task_struct *p, unsigned int state)
1da177e4
LT
2138{
2139 return try_to_wake_up(p, state, 0);
2140}
2141
a5e7be3b
JL
2142/*
2143 * This function clears the sched_dl_entity static params.
2144 */
2145void __dl_clear_params(struct task_struct *p)
2146{
2147 struct sched_dl_entity *dl_se = &p->dl;
2148
2149 dl_se->dl_runtime = 0;
2150 dl_se->dl_deadline = 0;
2151 dl_se->dl_period = 0;
2152 dl_se->flags = 0;
2153 dl_se->dl_bw = 0;
40767b0d
PZ
2154
2155 dl_se->dl_throttled = 0;
40767b0d 2156 dl_se->dl_yielded = 0;
a5e7be3b
JL
2157}
2158
1da177e4
LT
2159/*
2160 * Perform scheduler related setup for a newly forked process p.
2161 * p is forked by current.
dd41f596
IM
2162 *
2163 * __sched_fork() is basic setup used by init_idle() too:
2164 */
5e1576ed 2165static void __sched_fork(unsigned long clone_flags, struct task_struct *p)
dd41f596 2166{
fd2f4419
PZ
2167 p->on_rq = 0;
2168
2169 p->se.on_rq = 0;
dd41f596
IM
2170 p->se.exec_start = 0;
2171 p->se.sum_exec_runtime = 0;
f6cf891c 2172 p->se.prev_sum_exec_runtime = 0;
6c594c21 2173 p->se.nr_migrations = 0;
da7a735e 2174 p->se.vruntime = 0;
fd2f4419 2175 INIT_LIST_HEAD(&p->se.group_node);
6cfb0d5d 2176
ad936d86
BP
2177#ifdef CONFIG_FAIR_GROUP_SCHED
2178 p->se.cfs_rq = NULL;
2179#endif
2180
6cfb0d5d 2181#ifdef CONFIG_SCHEDSTATS
cb251765 2182 /* Even if schedstat is disabled, there should not be garbage */
41acab88 2183 memset(&p->se.statistics, 0, sizeof(p->se.statistics));
6cfb0d5d 2184#endif
476d139c 2185
aab03e05 2186 RB_CLEAR_NODE(&p->dl.rb_node);
40767b0d 2187 init_dl_task_timer(&p->dl);
a5e7be3b 2188 __dl_clear_params(p);
aab03e05 2189
fa717060 2190 INIT_LIST_HEAD(&p->rt.run_list);
ff77e468
PZ
2191 p->rt.timeout = 0;
2192 p->rt.time_slice = sched_rr_timeslice;
2193 p->rt.on_rq = 0;
2194 p->rt.on_list = 0;
476d139c 2195
e107be36
AK
2196#ifdef CONFIG_PREEMPT_NOTIFIERS
2197 INIT_HLIST_HEAD(&p->preempt_notifiers);
2198#endif
cbee9f88
PZ
2199
2200#ifdef CONFIG_NUMA_BALANCING
2201 if (p->mm && atomic_read(&p->mm->mm_users) == 1) {
7e8d16b6 2202 p->mm->numa_next_scan = jiffies + msecs_to_jiffies(sysctl_numa_balancing_scan_delay);
cbee9f88
PZ
2203 p->mm->numa_scan_seq = 0;
2204 }
2205
5e1576ed
RR
2206 if (clone_flags & CLONE_VM)
2207 p->numa_preferred_nid = current->numa_preferred_nid;
2208 else
2209 p->numa_preferred_nid = -1;
2210
cbee9f88
PZ
2211 p->node_stamp = 0ULL;
2212 p->numa_scan_seq = p->mm ? p->mm->numa_scan_seq : 0;
4b96a29b 2213 p->numa_scan_period = sysctl_numa_balancing_scan_delay;
cbee9f88 2214 p->numa_work.next = &p->numa_work;
44dba3d5 2215 p->numa_faults = NULL;
7e2703e6
RR
2216 p->last_task_numa_placement = 0;
2217 p->last_sum_exec_runtime = 0;
8c8a743c 2218
8c8a743c 2219 p->numa_group = NULL;
cbee9f88 2220#endif /* CONFIG_NUMA_BALANCING */
dd41f596
IM
2221}
2222
2a595721
SD
2223DEFINE_STATIC_KEY_FALSE(sched_numa_balancing);
2224
1a687c2e 2225#ifdef CONFIG_NUMA_BALANCING
c3b9bc5b 2226
1a687c2e
MG
2227void set_numabalancing_state(bool enabled)
2228{
2229 if (enabled)
2a595721 2230 static_branch_enable(&sched_numa_balancing);
1a687c2e 2231 else
2a595721 2232 static_branch_disable(&sched_numa_balancing);
1a687c2e 2233}
54a43d54
AK
2234
2235#ifdef CONFIG_PROC_SYSCTL
2236int sysctl_numa_balancing(struct ctl_table *table, int write,
2237 void __user *buffer, size_t *lenp, loff_t *ppos)
2238{
2239 struct ctl_table t;
2240 int err;
2a595721 2241 int state = static_branch_likely(&sched_numa_balancing);
54a43d54
AK
2242
2243 if (write && !capable(CAP_SYS_ADMIN))
2244 return -EPERM;
2245
2246 t = *table;
2247 t.data = &state;
2248 err = proc_dointvec_minmax(&t, write, buffer, lenp, ppos);
2249 if (err < 0)
2250 return err;
2251 if (write)
2252 set_numabalancing_state(state);
2253 return err;
2254}
2255#endif
2256#endif
dd41f596 2257
4698f88c
JP
2258#ifdef CONFIG_SCHEDSTATS
2259
cb251765 2260DEFINE_STATIC_KEY_FALSE(sched_schedstats);
4698f88c 2261static bool __initdata __sched_schedstats = false;
cb251765 2262
cb251765
MG
2263static void set_schedstats(bool enabled)
2264{
2265 if (enabled)
2266 static_branch_enable(&sched_schedstats);
2267 else
2268 static_branch_disable(&sched_schedstats);
2269}
2270
2271void force_schedstat_enabled(void)
2272{
2273 if (!schedstat_enabled()) {
2274 pr_info("kernel profiling enabled schedstats, disable via kernel.sched_schedstats.\n");
2275 static_branch_enable(&sched_schedstats);
2276 }
2277}
2278
2279static int __init setup_schedstats(char *str)
2280{
2281 int ret = 0;
2282 if (!str)
2283 goto out;
2284
4698f88c
JP
2285 /*
2286 * This code is called before jump labels have been set up, so we can't
2287 * change the static branch directly just yet. Instead set a temporary
2288 * variable so init_schedstats() can do it later.
2289 */
cb251765 2290 if (!strcmp(str, "enable")) {
4698f88c 2291 __sched_schedstats = true;
cb251765
MG
2292 ret = 1;
2293 } else if (!strcmp(str, "disable")) {
4698f88c 2294 __sched_schedstats = false;
cb251765
MG
2295 ret = 1;
2296 }
2297out:
2298 if (!ret)
2299 pr_warn("Unable to parse schedstats=\n");
2300
2301 return ret;
2302}
2303__setup("schedstats=", setup_schedstats);
2304
4698f88c
JP
2305static void __init init_schedstats(void)
2306{
2307 set_schedstats(__sched_schedstats);
2308}
2309
cb251765
MG
2310#ifdef CONFIG_PROC_SYSCTL
2311int sysctl_schedstats(struct ctl_table *table, int write,
2312 void __user *buffer, size_t *lenp, loff_t *ppos)
2313{
2314 struct ctl_table t;
2315 int err;
2316 int state = static_branch_likely(&sched_schedstats);
2317
2318 if (write && !capable(CAP_SYS_ADMIN))
2319 return -EPERM;
2320
2321 t = *table;
2322 t.data = &state;
2323 err = proc_dointvec_minmax(&t, write, buffer, lenp, ppos);
2324 if (err < 0)
2325 return err;
2326 if (write)
2327 set_schedstats(state);
2328 return err;
2329}
4698f88c
JP
2330#endif /* CONFIG_PROC_SYSCTL */
2331#else /* !CONFIG_SCHEDSTATS */
2332static inline void init_schedstats(void) {}
2333#endif /* CONFIG_SCHEDSTATS */
dd41f596
IM
2334
2335/*
2336 * fork()/clone()-time setup:
2337 */
aab03e05 2338int sched_fork(unsigned long clone_flags, struct task_struct *p)
dd41f596 2339{
0122ec5b 2340 unsigned long flags;
dd41f596
IM
2341 int cpu = get_cpu();
2342
5e1576ed 2343 __sched_fork(clone_flags, p);
06b83b5f 2344 /*
7dc603c9 2345 * We mark the process as NEW here. This guarantees that
06b83b5f
PZ
2346 * nobody will actually run it, and a signal or other external
2347 * event cannot wake it up and insert it on the runqueue either.
2348 */
7dc603c9 2349 p->state = TASK_NEW;
dd41f596 2350
c350a04e
MG
2351 /*
2352 * Make sure we do not leak PI boosting priority to the child.
2353 */
2354 p->prio = current->normal_prio;
2355
b9dc29e7
MG
2356 /*
2357 * Revert to default priority/policy on fork if requested.
2358 */
2359 if (unlikely(p->sched_reset_on_fork)) {
aab03e05 2360 if (task_has_dl_policy(p) || task_has_rt_policy(p)) {
b9dc29e7 2361 p->policy = SCHED_NORMAL;
6c697bdf 2362 p->static_prio = NICE_TO_PRIO(0);
c350a04e
MG
2363 p->rt_priority = 0;
2364 } else if (PRIO_TO_NICE(p->static_prio) < 0)
2365 p->static_prio = NICE_TO_PRIO(0);
2366
2367 p->prio = p->normal_prio = __normal_prio(p);
2368 set_load_weight(p);
6c697bdf 2369
b9dc29e7
MG
2370 /*
2371 * We don't need the reset flag anymore after the fork. It has
2372 * fulfilled its duty:
2373 */
2374 p->sched_reset_on_fork = 0;
2375 }
ca94c442 2376
aab03e05
DF
2377 if (dl_prio(p->prio)) {
2378 put_cpu();
2379 return -EAGAIN;
2380 } else if (rt_prio(p->prio)) {
2381 p->sched_class = &rt_sched_class;
2382 } else {
2ddbf952 2383 p->sched_class = &fair_sched_class;
aab03e05 2384 }
b29739f9 2385
7dc603c9
PZ
2386 init_entity_runnable_average(&p->se);
2387
86951599
PZ
2388 /*
2389 * The child is not yet in the pid-hash so no cgroup attach races,
2390 * and the cgroup is pinned to this child due to cgroup_fork()
2391 * is ran before sched_fork().
2392 *
2393 * Silence PROVE_RCU.
2394 */
0122ec5b 2395 raw_spin_lock_irqsave(&p->pi_lock, flags);
e210bffd
PZ
2396 /*
2397 * We're setting the cpu for the first time, we don't migrate,
2398 * so use __set_task_cpu().
2399 */
2400 __set_task_cpu(p, cpu);
2401 if (p->sched_class->task_fork)
2402 p->sched_class->task_fork(p);
0122ec5b 2403 raw_spin_unlock_irqrestore(&p->pi_lock, flags);
5f3edc1b 2404
f6db8347 2405#ifdef CONFIG_SCHED_INFO
dd41f596 2406 if (likely(sched_info_on()))
52f17b6c 2407 memset(&p->sched_info, 0, sizeof(p->sched_info));
1da177e4 2408#endif
3ca7a440
PZ
2409#if defined(CONFIG_SMP)
2410 p->on_cpu = 0;
4866cde0 2411#endif
01028747 2412 init_task_preempt_count(p);
806c09a7 2413#ifdef CONFIG_SMP
917b627d 2414 plist_node_init(&p->pushable_tasks, MAX_PRIO);
1baca4ce 2415 RB_CLEAR_NODE(&p->pushable_dl_tasks);
806c09a7 2416#endif
917b627d 2417
476d139c 2418 put_cpu();
aab03e05 2419 return 0;
1da177e4
LT
2420}
2421
332ac17e
DF
2422unsigned long to_ratio(u64 period, u64 runtime)
2423{
2424 if (runtime == RUNTIME_INF)
2425 return 1ULL << 20;
2426
2427 /*
2428 * Doing this here saves a lot of checks in all
2429 * the calling paths, and returning zero seems
2430 * safe for them anyway.
2431 */
2432 if (period == 0)
2433 return 0;
2434
2435 return div64_u64(runtime << 20, period);
2436}
2437
2438#ifdef CONFIG_SMP
2439inline struct dl_bw *dl_bw_of(int i)
2440{
f78f5b90
PM
2441 RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held(),
2442 "sched RCU must be held");
332ac17e
DF
2443 return &cpu_rq(i)->rd->dl_bw;
2444}
2445
de212f18 2446static inline int dl_bw_cpus(int i)
332ac17e 2447{
de212f18
PZ
2448 struct root_domain *rd = cpu_rq(i)->rd;
2449 int cpus = 0;
2450
f78f5b90
PM
2451 RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held(),
2452 "sched RCU must be held");
de212f18
PZ
2453 for_each_cpu_and(i, rd->span, cpu_active_mask)
2454 cpus++;
2455
2456 return cpus;
332ac17e
DF
2457}
2458#else
2459inline struct dl_bw *dl_bw_of(int i)
2460{
2461 return &cpu_rq(i)->dl.dl_bw;
2462}
2463
de212f18 2464static inline int dl_bw_cpus(int i)
332ac17e
DF
2465{
2466 return 1;
2467}
2468#endif
2469
332ac17e
DF
2470/*
2471 * We must be sure that accepting a new task (or allowing changing the
2472 * parameters of an existing one) is consistent with the bandwidth
2473 * constraints. If yes, this function also accordingly updates the currently
2474 * allocated bandwidth to reflect the new situation.
2475 *
2476 * This function is called while holding p's rq->lock.
40767b0d
PZ
2477 *
2478 * XXX we should delay bw change until the task's 0-lag point, see
2479 * __setparam_dl().
332ac17e
DF
2480 */
2481static int dl_overflow(struct task_struct *p, int policy,
2482 const struct sched_attr *attr)
2483{
2484
2485 struct dl_bw *dl_b = dl_bw_of(task_cpu(p));
4df1638c 2486 u64 period = attr->sched_period ?: attr->sched_deadline;
332ac17e
DF
2487 u64 runtime = attr->sched_runtime;
2488 u64 new_bw = dl_policy(policy) ? to_ratio(period, runtime) : 0;
de212f18 2489 int cpus, err = -1;
332ac17e 2490
fec148c0
XP
2491 /* !deadline task may carry old deadline bandwidth */
2492 if (new_bw == p->dl.dl_bw && task_has_dl_policy(p))
332ac17e
DF
2493 return 0;
2494
2495 /*
2496 * Either if a task, enters, leave, or stays -deadline but changes
2497 * its parameters, we may need to update accordingly the total
2498 * allocated bandwidth of the container.
2499 */
2500 raw_spin_lock(&dl_b->lock);
de212f18 2501 cpus = dl_bw_cpus(task_cpu(p));
332ac17e
DF
2502 if (dl_policy(policy) && !task_has_dl_policy(p) &&
2503 !__dl_overflow(dl_b, cpus, 0, new_bw)) {
2504 __dl_add(dl_b, new_bw);
2505 err = 0;
2506 } else if (dl_policy(policy) && task_has_dl_policy(p) &&
2507 !__dl_overflow(dl_b, cpus, p->dl.dl_bw, new_bw)) {
2508 __dl_clear(dl_b, p->dl.dl_bw);
2509 __dl_add(dl_b, new_bw);
2510 err = 0;
2511 } else if (!dl_policy(policy) && task_has_dl_policy(p)) {
2512 __dl_clear(dl_b, p->dl.dl_bw);
2513 err = 0;
2514 }
2515 raw_spin_unlock(&dl_b->lock);
2516
2517 return err;
2518}
2519
2520extern void init_dl_bw(struct dl_bw *dl_b);
2521
1da177e4
LT
2522/*
2523 * wake_up_new_task - wake up a newly created task for the first time.
2524 *
2525 * This function will do some initial scheduler statistics housekeeping
2526 * that must be done for every newly created context, then puts the task
2527 * on the runqueue and wakes it.
2528 */
3e51e3ed 2529void wake_up_new_task(struct task_struct *p)
1da177e4 2530{
eb580751 2531 struct rq_flags rf;
dd41f596 2532 struct rq *rq;
fabf318e 2533
eb580751 2534 raw_spin_lock_irqsave(&p->pi_lock, rf.flags);
7dc603c9 2535 p->state = TASK_RUNNING;
fabf318e
PZ
2536#ifdef CONFIG_SMP
2537 /*
2538 * Fork balancing, do it here and not earlier because:
2539 * - cpus_allowed can change in the fork path
2540 * - any previously selected cpu might disappear through hotplug
e210bffd
PZ
2541 *
2542 * Use __set_task_cpu() to avoid calling sched_class::migrate_task_rq,
2543 * as we're not fully set-up yet.
fabf318e 2544 */
e210bffd 2545 __set_task_cpu(p, select_task_rq(p, task_cpu(p), SD_BALANCE_FORK, 0));
0017d735 2546#endif
b7fa30c9 2547 rq = __task_rq_lock(p, &rf);
2b8c41da 2548 post_init_entity_util_avg(&p->se);
0017d735 2549
cd29fe6f 2550 activate_task(rq, p, 0);
da0c1e65 2551 p->on_rq = TASK_ON_RQ_QUEUED;
fbd705a0 2552 trace_sched_wakeup_new(p);
a7558e01 2553 check_preempt_curr(rq, p, WF_FORK);
9a897c5a 2554#ifdef CONFIG_SMP
0aaafaab
PZ
2555 if (p->sched_class->task_woken) {
2556 /*
2557 * Nothing relies on rq->lock after this, so its fine to
2558 * drop it.
2559 */
e7904a28 2560 lockdep_unpin_lock(&rq->lock, rf.cookie);
efbbd05a 2561 p->sched_class->task_woken(rq, p);
e7904a28 2562 lockdep_repin_lock(&rq->lock, rf.cookie);
0aaafaab 2563 }
9a897c5a 2564#endif
eb580751 2565 task_rq_unlock(rq, p, &rf);
1da177e4
LT
2566}
2567
e107be36
AK
2568#ifdef CONFIG_PREEMPT_NOTIFIERS
2569
1cde2930
PZ
2570static struct static_key preempt_notifier_key = STATIC_KEY_INIT_FALSE;
2571
2ecd9d29
PZ
2572void preempt_notifier_inc(void)
2573{
2574 static_key_slow_inc(&preempt_notifier_key);
2575}
2576EXPORT_SYMBOL_GPL(preempt_notifier_inc);
2577
2578void preempt_notifier_dec(void)
2579{
2580 static_key_slow_dec(&preempt_notifier_key);
2581}
2582EXPORT_SYMBOL_GPL(preempt_notifier_dec);
2583
e107be36 2584/**
80dd99b3 2585 * preempt_notifier_register - tell me when current is being preempted & rescheduled
421cee29 2586 * @notifier: notifier struct to register
e107be36
AK
2587 */
2588void preempt_notifier_register(struct preempt_notifier *notifier)
2589{
2ecd9d29
PZ
2590 if (!static_key_false(&preempt_notifier_key))
2591 WARN(1, "registering preempt_notifier while notifiers disabled\n");
2592
e107be36
AK
2593 hlist_add_head(&notifier->link, &current->preempt_notifiers);
2594}
2595EXPORT_SYMBOL_GPL(preempt_notifier_register);
2596
2597/**
2598 * preempt_notifier_unregister - no longer interested in preemption notifications
421cee29 2599 * @notifier: notifier struct to unregister
e107be36 2600 *
d84525a8 2601 * This is *not* safe to call from within a preemption notifier.
e107be36
AK
2602 */
2603void preempt_notifier_unregister(struct preempt_notifier *notifier)
2604{
2605 hlist_del(&notifier->link);
2606}
2607EXPORT_SYMBOL_GPL(preempt_notifier_unregister);
2608
1cde2930 2609static void __fire_sched_in_preempt_notifiers(struct task_struct *curr)
e107be36
AK
2610{
2611 struct preempt_notifier *notifier;
e107be36 2612
b67bfe0d 2613 hlist_for_each_entry(notifier, &curr->preempt_notifiers, link)
e107be36
AK
2614 notifier->ops->sched_in(notifier, raw_smp_processor_id());
2615}
2616
1cde2930
PZ
2617static __always_inline void fire_sched_in_preempt_notifiers(struct task_struct *curr)
2618{
2619 if (static_key_false(&preempt_notifier_key))
2620 __fire_sched_in_preempt_notifiers(curr);
2621}
2622
e107be36 2623static void
1cde2930
PZ
2624__fire_sched_out_preempt_notifiers(struct task_struct *curr,
2625 struct task_struct *next)
e107be36
AK
2626{
2627 struct preempt_notifier *notifier;
e107be36 2628
b67bfe0d 2629 hlist_for_each_entry(notifier, &curr->preempt_notifiers, link)
e107be36
AK
2630 notifier->ops->sched_out(notifier, next);
2631}
2632
1cde2930
PZ
2633static __always_inline void
2634fire_sched_out_preempt_notifiers(struct task_struct *curr,
2635 struct task_struct *next)
2636{
2637 if (static_key_false(&preempt_notifier_key))
2638 __fire_sched_out_preempt_notifiers(curr, next);
2639}
2640
6d6bc0ad 2641#else /* !CONFIG_PREEMPT_NOTIFIERS */
e107be36 2642
1cde2930 2643static inline void fire_sched_in_preempt_notifiers(struct task_struct *curr)
e107be36
AK
2644{
2645}
2646
1cde2930 2647static inline void
e107be36
AK
2648fire_sched_out_preempt_notifiers(struct task_struct *curr,
2649 struct task_struct *next)
2650{
2651}
2652
6d6bc0ad 2653#endif /* CONFIG_PREEMPT_NOTIFIERS */
e107be36 2654
4866cde0
NP
2655/**
2656 * prepare_task_switch - prepare to switch tasks
2657 * @rq: the runqueue preparing to switch
421cee29 2658 * @prev: the current task that is being switched out
4866cde0
NP
2659 * @next: the task we are going to switch to.
2660 *
2661 * This is called with the rq lock held and interrupts off. It must
2662 * be paired with a subsequent finish_task_switch after the context
2663 * switch.
2664 *
2665 * prepare_task_switch sets up locking and calls architecture specific
2666 * hooks.
2667 */
e107be36
AK
2668static inline void
2669prepare_task_switch(struct rq *rq, struct task_struct *prev,
2670 struct task_struct *next)
4866cde0 2671{
43148951 2672 sched_info_switch(rq, prev, next);
fe4b04fa 2673 perf_event_task_sched_out(prev, next);
e107be36 2674 fire_sched_out_preempt_notifiers(prev, next);
4866cde0
NP
2675 prepare_lock_switch(rq, next);
2676 prepare_arch_switch(next);
2677}
2678
1da177e4
LT
2679/**
2680 * finish_task_switch - clean up after a task-switch
2681 * @prev: the thread we just switched away from.
2682 *
4866cde0
NP
2683 * finish_task_switch must be called after the context switch, paired
2684 * with a prepare_task_switch call before the context switch.
2685 * finish_task_switch will reconcile locking set up by prepare_task_switch,
2686 * and do any other architecture-specific cleanup actions.
1da177e4
LT
2687 *
2688 * Note that we may have delayed dropping an mm in context_switch(). If
41a2d6cf 2689 * so, we finish that here outside of the runqueue lock. (Doing it
1da177e4
LT
2690 * with the lock held can cause deadlocks; see schedule() for
2691 * details.)
dfa50b60
ON
2692 *
2693 * The context switch have flipped the stack from under us and restored the
2694 * local variables which were saved when this task called schedule() in the
2695 * past. prev == current is still correct but we need to recalculate this_rq
2696 * because prev may have moved to another CPU.
1da177e4 2697 */
dfa50b60 2698static struct rq *finish_task_switch(struct task_struct *prev)
1da177e4
LT
2699 __releases(rq->lock)
2700{
dfa50b60 2701 struct rq *rq = this_rq();
1da177e4 2702 struct mm_struct *mm = rq->prev_mm;
55a101f8 2703 long prev_state;
1da177e4 2704
609ca066
PZ
2705 /*
2706 * The previous task will have left us with a preempt_count of 2
2707 * because it left us after:
2708 *
2709 * schedule()
2710 * preempt_disable(); // 1
2711 * __schedule()
2712 * raw_spin_lock_irq(&rq->lock) // 2
2713 *
2714 * Also, see FORK_PREEMPT_COUNT.
2715 */
e2bf1c4b
PZ
2716 if (WARN_ONCE(preempt_count() != 2*PREEMPT_DISABLE_OFFSET,
2717 "corrupted preempt_count: %s/%d/0x%x\n",
2718 current->comm, current->pid, preempt_count()))
2719 preempt_count_set(FORK_PREEMPT_COUNT);
609ca066 2720
1da177e4
LT
2721 rq->prev_mm = NULL;
2722
2723 /*
2724 * A task struct has one reference for the use as "current".
c394cc9f 2725 * If a task dies, then it sets TASK_DEAD in tsk->state and calls
55a101f8
ON
2726 * schedule one last time. The schedule call will never return, and
2727 * the scheduled task must drop that reference.
95913d97
PZ
2728 *
2729 * We must observe prev->state before clearing prev->on_cpu (in
2730 * finish_lock_switch), otherwise a concurrent wakeup can get prev
2731 * running on another CPU and we could rave with its RUNNING -> DEAD
2732 * transition, resulting in a double drop.
1da177e4 2733 */
55a101f8 2734 prev_state = prev->state;
bf9fae9f 2735 vtime_task_switch(prev);
a8d757ef 2736 perf_event_task_sched_in(prev, current);
4866cde0 2737 finish_lock_switch(rq, prev);
01f23e16 2738 finish_arch_post_lock_switch();
e8fa1362 2739
e107be36 2740 fire_sched_in_preempt_notifiers(current);
1da177e4
LT
2741 if (mm)
2742 mmdrop(mm);
c394cc9f 2743 if (unlikely(prev_state == TASK_DEAD)) {
e6c390f2
DF
2744 if (prev->sched_class->task_dead)
2745 prev->sched_class->task_dead(prev);
2746
c6fd91f0 2747 /*
2748 * Remove function-return probe instances associated with this
2749 * task and put them back on the free list.
9761eea8 2750 */
c6fd91f0 2751 kprobe_flush_task(prev);
1da177e4 2752 put_task_struct(prev);
c6fd91f0 2753 }
99e5ada9 2754
de734f89 2755 tick_nohz_task_switch();
dfa50b60 2756 return rq;
1da177e4
LT
2757}
2758
3f029d3c
GH
2759#ifdef CONFIG_SMP
2760
3f029d3c 2761/* rq->lock is NOT held, but preemption is disabled */
e3fca9e7 2762static void __balance_callback(struct rq *rq)
3f029d3c 2763{
e3fca9e7
PZ
2764 struct callback_head *head, *next;
2765 void (*func)(struct rq *rq);
2766 unsigned long flags;
3f029d3c 2767
e3fca9e7
PZ
2768 raw_spin_lock_irqsave(&rq->lock, flags);
2769 head = rq->balance_callback;
2770 rq->balance_callback = NULL;
2771 while (head) {
2772 func = (void (*)(struct rq *))head->func;
2773 next = head->next;
2774 head->next = NULL;
2775 head = next;
3f029d3c 2776
e3fca9e7 2777 func(rq);
3f029d3c 2778 }
e3fca9e7
PZ
2779 raw_spin_unlock_irqrestore(&rq->lock, flags);
2780}
2781
2782static inline void balance_callback(struct rq *rq)
2783{
2784 if (unlikely(rq->balance_callback))
2785 __balance_callback(rq);
3f029d3c
GH
2786}
2787
2788#else
da19ab51 2789
e3fca9e7 2790static inline void balance_callback(struct rq *rq)
3f029d3c 2791{
1da177e4
LT
2792}
2793
3f029d3c
GH
2794#endif
2795
1da177e4
LT
2796/**
2797 * schedule_tail - first thing a freshly forked thread must call.
2798 * @prev: the thread we just switched away from.
2799 */
722a9f92 2800asmlinkage __visible void schedule_tail(struct task_struct *prev)
1da177e4
LT
2801 __releases(rq->lock)
2802{
1a43a14a 2803 struct rq *rq;
da19ab51 2804
609ca066
PZ
2805 /*
2806 * New tasks start with FORK_PREEMPT_COUNT, see there and
2807 * finish_task_switch() for details.
2808 *
2809 * finish_task_switch() will drop rq->lock() and lower preempt_count
2810 * and the preempt_enable() will end up enabling preemption (on
2811 * PREEMPT_COUNT kernels).
2812 */
2813
dfa50b60 2814 rq = finish_task_switch(prev);
e3fca9e7 2815 balance_callback(rq);
1a43a14a 2816 preempt_enable();
70b97a7f 2817
1da177e4 2818 if (current->set_child_tid)
b488893a 2819 put_user(task_pid_vnr(current), current->set_child_tid);
1da177e4
LT
2820}
2821
2822/*
dfa50b60 2823 * context_switch - switch to the new MM and the new thread's register state.
1da177e4 2824 */
04936948 2825static __always_inline struct rq *
70b97a7f 2826context_switch(struct rq *rq, struct task_struct *prev,
e7904a28 2827 struct task_struct *next, struct pin_cookie cookie)
1da177e4 2828{
dd41f596 2829 struct mm_struct *mm, *oldmm;
1da177e4 2830
e107be36 2831 prepare_task_switch(rq, prev, next);
fe4b04fa 2832
dd41f596
IM
2833 mm = next->mm;
2834 oldmm = prev->active_mm;
9226d125
ZA
2835 /*
2836 * For paravirt, this is coupled with an exit in switch_to to
2837 * combine the page table reload and the switch backend into
2838 * one hypercall.
2839 */
224101ed 2840 arch_start_context_switch(prev);
9226d125 2841
31915ab4 2842 if (!mm) {
1da177e4
LT
2843 next->active_mm = oldmm;
2844 atomic_inc(&oldmm->mm_count);
2845 enter_lazy_tlb(oldmm, next);
2846 } else
f98db601 2847 switch_mm_irqs_off(oldmm, mm, next);
1da177e4 2848
31915ab4 2849 if (!prev->mm) {
1da177e4 2850 prev->active_mm = NULL;
1da177e4
LT
2851 rq->prev_mm = oldmm;
2852 }
3a5f5e48
IM
2853 /*
2854 * Since the runqueue lock will be released by the next
2855 * task (which is an invalid locking op but in the case
2856 * of the scheduler it's an obvious special-case), so we
2857 * do an early lockdep release here:
2858 */
e7904a28 2859 lockdep_unpin_lock(&rq->lock, cookie);
8a25d5de 2860 spin_release(&rq->lock.dep_map, 1, _THIS_IP_);
1da177e4
LT
2861
2862 /* Here we just switch the register state and the stack. */
2863 switch_to(prev, next, prev);
dd41f596 2864 barrier();
dfa50b60
ON
2865
2866 return finish_task_switch(prev);
1da177e4
LT
2867}
2868
2869/*
1c3e8264 2870 * nr_running and nr_context_switches:
1da177e4
LT
2871 *
2872 * externally visible scheduler statistics: current number of runnable
1c3e8264 2873 * threads, total number of context switches performed since bootup.
1da177e4
LT
2874 */
2875unsigned long nr_running(void)
2876{
2877 unsigned long i, sum = 0;
2878
2879 for_each_online_cpu(i)
2880 sum += cpu_rq(i)->nr_running;
2881
2882 return sum;
f711f609 2883}
1da177e4 2884
2ee507c4
TC
2885/*
2886 * Check if only the current task is running on the cpu.
00cc1633
DD
2887 *
2888 * Caution: this function does not check that the caller has disabled
2889 * preemption, thus the result might have a time-of-check-to-time-of-use
2890 * race. The caller is responsible to use it correctly, for example:
2891 *
2892 * - from a non-preemptable section (of course)
2893 *
2894 * - from a thread that is bound to a single CPU
2895 *
2896 * - in a loop with very short iterations (e.g. a polling loop)
2ee507c4
TC
2897 */
2898bool single_task_running(void)
2899{
00cc1633 2900 return raw_rq()->nr_running == 1;
2ee507c4
TC
2901}
2902EXPORT_SYMBOL(single_task_running);
2903
1da177e4 2904unsigned long long nr_context_switches(void)
46cb4b7c 2905{
cc94abfc
SR
2906 int i;
2907 unsigned long long sum = 0;
46cb4b7c 2908
0a945022 2909 for_each_possible_cpu(i)
1da177e4 2910 sum += cpu_rq(i)->nr_switches;
46cb4b7c 2911
1da177e4
LT
2912 return sum;
2913}
483b4ee6 2914
1da177e4
LT
2915unsigned long nr_iowait(void)
2916{
2917 unsigned long i, sum = 0;
483b4ee6 2918
0a945022 2919 for_each_possible_cpu(i)
1da177e4 2920 sum += atomic_read(&cpu_rq(i)->nr_iowait);
46cb4b7c 2921
1da177e4
LT
2922 return sum;
2923}
483b4ee6 2924
8c215bd3 2925unsigned long nr_iowait_cpu(int cpu)
69d25870 2926{
8c215bd3 2927 struct rq *this = cpu_rq(cpu);
69d25870
AV
2928 return atomic_read(&this->nr_iowait);
2929}
46cb4b7c 2930
372ba8cb
MG
2931void get_iowait_load(unsigned long *nr_waiters, unsigned long *load)
2932{
3289bdb4
PZ
2933 struct rq *rq = this_rq();
2934 *nr_waiters = atomic_read(&rq->nr_iowait);
2935 *load = rq->load.weight;
372ba8cb
MG
2936}
2937
dd41f596 2938#ifdef CONFIG_SMP
8a0be9ef 2939
46cb4b7c 2940/*
38022906
PZ
2941 * sched_exec - execve() is a valuable balancing opportunity, because at
2942 * this point the task has the smallest effective memory and cache footprint.
46cb4b7c 2943 */
38022906 2944void sched_exec(void)
46cb4b7c 2945{
38022906 2946 struct task_struct *p = current;
1da177e4 2947 unsigned long flags;
0017d735 2948 int dest_cpu;
46cb4b7c 2949
8f42ced9 2950 raw_spin_lock_irqsave(&p->pi_lock, flags);
ac66f547 2951 dest_cpu = p->sched_class->select_task_rq(p, task_cpu(p), SD_BALANCE_EXEC, 0);
0017d735
PZ
2952 if (dest_cpu == smp_processor_id())
2953 goto unlock;
38022906 2954
8f42ced9 2955 if (likely(cpu_active(dest_cpu))) {
969c7921 2956 struct migration_arg arg = { p, dest_cpu };
46cb4b7c 2957
8f42ced9
PZ
2958 raw_spin_unlock_irqrestore(&p->pi_lock, flags);
2959 stop_one_cpu(task_cpu(p), migration_cpu_stop, &arg);
1da177e4
LT
2960 return;
2961 }
0017d735 2962unlock:
8f42ced9 2963 raw_spin_unlock_irqrestore(&p->pi_lock, flags);
1da177e4 2964}
dd41f596 2965
1da177e4
LT
2966#endif
2967
1da177e4 2968DEFINE_PER_CPU(struct kernel_stat, kstat);
3292beb3 2969DEFINE_PER_CPU(struct kernel_cpustat, kernel_cpustat);
1da177e4
LT
2970
2971EXPORT_PER_CPU_SYMBOL(kstat);
3292beb3 2972EXPORT_PER_CPU_SYMBOL(kernel_cpustat);
1da177e4 2973
c5f8d995
HS
2974/*
2975 * Return accounted runtime for the task.
2976 * In case the task is currently running, return the runtime plus current's
2977 * pending runtime that have not been accounted yet.
2978 */
2979unsigned long long task_sched_runtime(struct task_struct *p)
2980{
eb580751 2981 struct rq_flags rf;
c5f8d995 2982 struct rq *rq;
6e998916 2983 u64 ns;
c5f8d995 2984
911b2898
PZ
2985#if defined(CONFIG_64BIT) && defined(CONFIG_SMP)
2986 /*
2987 * 64-bit doesn't need locks to atomically read a 64bit value.
2988 * So we have a optimization chance when the task's delta_exec is 0.
2989 * Reading ->on_cpu is racy, but this is ok.
2990 *
2991 * If we race with it leaving cpu, we'll take a lock. So we're correct.
2992 * If we race with it entering cpu, unaccounted time is 0. This is
2993 * indistinguishable from the read occurring a few cycles earlier.
4036ac15
MG
2994 * If we see ->on_cpu without ->on_rq, the task is leaving, and has
2995 * been accounted, so we're correct here as well.
911b2898 2996 */
da0c1e65 2997 if (!p->on_cpu || !task_on_rq_queued(p))
911b2898
PZ
2998 return p->se.sum_exec_runtime;
2999#endif
3000
eb580751 3001 rq = task_rq_lock(p, &rf);
6e998916
SG
3002 /*
3003 * Must be ->curr _and_ ->on_rq. If dequeued, we would
3004 * project cycles that may never be accounted to this
3005 * thread, breaking clock_gettime().
3006 */
3007 if (task_current(rq, p) && task_on_rq_queued(p)) {
3008 update_rq_clock(rq);
3009 p->sched_class->update_curr(rq);
3010 }
3011 ns = p->se.sum_exec_runtime;
eb580751 3012 task_rq_unlock(rq, p, &rf);
c5f8d995
HS
3013
3014 return ns;
3015}
48f24c4d 3016
7835b98b
CL
3017/*
3018 * This function gets called by the timer code, with HZ frequency.
3019 * We call it with interrupts disabled.
7835b98b
CL
3020 */
3021void scheduler_tick(void)
3022{
7835b98b
CL
3023 int cpu = smp_processor_id();
3024 struct rq *rq = cpu_rq(cpu);
dd41f596 3025 struct task_struct *curr = rq->curr;
3e51f33f
PZ
3026
3027 sched_clock_tick();
dd41f596 3028
05fa785c 3029 raw_spin_lock(&rq->lock);
3e51f33f 3030 update_rq_clock(rq);
fa85ae24 3031 curr->sched_class->task_tick(rq, curr, 0);
cee1afce 3032 cpu_load_update_active(rq);
3289bdb4 3033 calc_global_load_tick(rq);
05fa785c 3034 raw_spin_unlock(&rq->lock);
7835b98b 3035
e9d2b064 3036 perf_event_task_tick();
e220d2dc 3037
e418e1c2 3038#ifdef CONFIG_SMP
6eb57e0d 3039 rq->idle_balance = idle_cpu(cpu);
7caff66f 3040 trigger_load_balance(rq);
e418e1c2 3041#endif
265f22a9 3042 rq_last_tick_reset(rq);
1da177e4
LT
3043}
3044
265f22a9
FW
3045#ifdef CONFIG_NO_HZ_FULL
3046/**
3047 * scheduler_tick_max_deferment
3048 *
3049 * Keep at least one tick per second when a single
3050 * active task is running because the scheduler doesn't
3051 * yet completely support full dynticks environment.
3052 *
3053 * This makes sure that uptime, CFS vruntime, load
3054 * balancing, etc... continue to move forward, even
3055 * with a very low granularity.
e69f6186
YB
3056 *
3057 * Return: Maximum deferment in nanoseconds.
265f22a9
FW
3058 */
3059u64 scheduler_tick_max_deferment(void)
3060{
3061 struct rq *rq = this_rq();
316c1608 3062 unsigned long next, now = READ_ONCE(jiffies);
265f22a9
FW
3063
3064 next = rq->last_sched_tick + HZ;
3065
3066 if (time_before_eq(next, now))
3067 return 0;
3068
8fe8ff09 3069 return jiffies_to_nsecs(next - now);
1da177e4 3070}
265f22a9 3071#endif
1da177e4 3072
7e49fcce
SR
3073#if defined(CONFIG_PREEMPT) && (defined(CONFIG_DEBUG_PREEMPT) || \
3074 defined(CONFIG_PREEMPT_TRACER))
47252cfb
SR
3075/*
3076 * If the value passed in is equal to the current preempt count
3077 * then we just disabled preemption. Start timing the latency.
3078 */
3079static inline void preempt_latency_start(int val)
3080{
3081 if (preempt_count() == val) {
3082 unsigned long ip = get_lock_parent_ip();
3083#ifdef CONFIG_DEBUG_PREEMPT
3084 current->preempt_disable_ip = ip;
3085#endif
3086 trace_preempt_off(CALLER_ADDR0, ip);
3087 }
3088}
7e49fcce 3089
edafe3a5 3090void preempt_count_add(int val)
1da177e4 3091{
6cd8a4bb 3092#ifdef CONFIG_DEBUG_PREEMPT
1da177e4
LT
3093 /*
3094 * Underflow?
3095 */
9a11b49a
IM
3096 if (DEBUG_LOCKS_WARN_ON((preempt_count() < 0)))
3097 return;
6cd8a4bb 3098#endif
bdb43806 3099 __preempt_count_add(val);
6cd8a4bb 3100#ifdef CONFIG_DEBUG_PREEMPT
1da177e4
LT
3101 /*
3102 * Spinlock count overflowing soon?
3103 */
33859f7f
MOS
3104 DEBUG_LOCKS_WARN_ON((preempt_count() & PREEMPT_MASK) >=
3105 PREEMPT_MASK - 10);
6cd8a4bb 3106#endif
47252cfb 3107 preempt_latency_start(val);
1da177e4 3108}
bdb43806 3109EXPORT_SYMBOL(preempt_count_add);
edafe3a5 3110NOKPROBE_SYMBOL(preempt_count_add);
1da177e4 3111
47252cfb
SR
3112/*
3113 * If the value passed in equals to the current preempt count
3114 * then we just enabled preemption. Stop timing the latency.
3115 */
3116static inline void preempt_latency_stop(int val)
3117{
3118 if (preempt_count() == val)
3119 trace_preempt_on(CALLER_ADDR0, get_lock_parent_ip());
3120}
3121
edafe3a5 3122void preempt_count_sub(int val)
1da177e4 3123{
6cd8a4bb 3124#ifdef CONFIG_DEBUG_PREEMPT
1da177e4
LT
3125 /*
3126 * Underflow?
3127 */
01e3eb82 3128 if (DEBUG_LOCKS_WARN_ON(val > preempt_count()))
9a11b49a 3129 return;
1da177e4
LT
3130 /*
3131 * Is the spinlock portion underflowing?
3132 */
9a11b49a
IM
3133 if (DEBUG_LOCKS_WARN_ON((val < PREEMPT_MASK) &&
3134 !(preempt_count() & PREEMPT_MASK)))
3135 return;
6cd8a4bb 3136#endif
9a11b49a 3137
47252cfb 3138 preempt_latency_stop(val);
bdb43806 3139 __preempt_count_sub(val);
1da177e4 3140}
bdb43806 3141EXPORT_SYMBOL(preempt_count_sub);
edafe3a5 3142NOKPROBE_SYMBOL(preempt_count_sub);
1da177e4 3143
47252cfb
SR
3144#else
3145static inline void preempt_latency_start(int val) { }
3146static inline void preempt_latency_stop(int val) { }
1da177e4
LT
3147#endif
3148
3149/*
dd41f596 3150 * Print scheduling while atomic bug:
1da177e4 3151 */
dd41f596 3152static noinline void __schedule_bug(struct task_struct *prev)
1da177e4 3153{
664dfa65
DJ
3154 if (oops_in_progress)
3155 return;
3156
3df0fc5b
PZ
3157 printk(KERN_ERR "BUG: scheduling while atomic: %s/%d/0x%08x\n",
3158 prev->comm, prev->pid, preempt_count());
838225b4 3159
dd41f596 3160 debug_show_held_locks(prev);
e21f5b15 3161 print_modules();
dd41f596
IM
3162 if (irqs_disabled())
3163 print_irqtrace_events(prev);
8f47b187
TG
3164#ifdef CONFIG_DEBUG_PREEMPT
3165 if (in_atomic_preempt_off()) {
3166 pr_err("Preemption disabled at:");
3167 print_ip_sym(current->preempt_disable_ip);
3168 pr_cont("\n");
3169 }
3170#endif
6135fc1e 3171 dump_stack();
373d4d09 3172 add_taint(TAINT_WARN, LOCKDEP_STILL_OK);
dd41f596 3173}
1da177e4 3174
dd41f596
IM
3175/*
3176 * Various schedule()-time debugging checks and statistics:
3177 */
3178static inline void schedule_debug(struct task_struct *prev)
3179{
0d9e2632 3180#ifdef CONFIG_SCHED_STACK_END_CHECK
29d64551
JH
3181 if (task_stack_end_corrupted(prev))
3182 panic("corrupted stack end detected inside scheduler\n");
0d9e2632 3183#endif
b99def8b 3184
1dc0fffc 3185 if (unlikely(in_atomic_preempt_off())) {
dd41f596 3186 __schedule_bug(prev);
1dc0fffc
PZ
3187 preempt_count_set(PREEMPT_DISABLED);
3188 }
b3fbab05 3189 rcu_sleep_check();
dd41f596 3190
1da177e4
LT
3191 profile_hit(SCHED_PROFILING, __builtin_return_address(0));
3192
2d72376b 3193 schedstat_inc(this_rq(), sched_count);
dd41f596
IM
3194}
3195
3196/*
3197 * Pick up the highest-prio task:
3198 */
3199static inline struct task_struct *
e7904a28 3200pick_next_task(struct rq *rq, struct task_struct *prev, struct pin_cookie cookie)
dd41f596 3201{
37e117c0 3202 const struct sched_class *class = &fair_sched_class;
dd41f596 3203 struct task_struct *p;
1da177e4
LT
3204
3205 /*
dd41f596
IM
3206 * Optimization: we know that if all tasks are in
3207 * the fair class we can call that function directly:
1da177e4 3208 */
37e117c0 3209 if (likely(prev->sched_class == class &&
38033c37 3210 rq->nr_running == rq->cfs.h_nr_running)) {
e7904a28 3211 p = fair_sched_class.pick_next_task(rq, prev, cookie);
6ccdc84b
PZ
3212 if (unlikely(p == RETRY_TASK))
3213 goto again;
3214
3215 /* assumes fair_sched_class->next == idle_sched_class */
3216 if (unlikely(!p))
e7904a28 3217 p = idle_sched_class.pick_next_task(rq, prev, cookie);
6ccdc84b
PZ
3218
3219 return p;
1da177e4
LT
3220 }
3221
37e117c0 3222again:
34f971f6 3223 for_each_class(class) {
e7904a28 3224 p = class->pick_next_task(rq, prev, cookie);
37e117c0
PZ
3225 if (p) {
3226 if (unlikely(p == RETRY_TASK))
3227 goto again;
dd41f596 3228 return p;
37e117c0 3229 }
dd41f596 3230 }
34f971f6
PZ
3231
3232 BUG(); /* the idle class will always have a runnable task */
dd41f596 3233}
1da177e4 3234
dd41f596 3235/*
c259e01a 3236 * __schedule() is the main scheduler function.
edde96ea
PE
3237 *
3238 * The main means of driving the scheduler and thus entering this function are:
3239 *
3240 * 1. Explicit blocking: mutex, semaphore, waitqueue, etc.
3241 *
3242 * 2. TIF_NEED_RESCHED flag is checked on interrupt and userspace return
3243 * paths. For example, see arch/x86/entry_64.S.
3244 *
3245 * To drive preemption between tasks, the scheduler sets the flag in timer
3246 * interrupt handler scheduler_tick().
3247 *
3248 * 3. Wakeups don't really cause entry into schedule(). They add a
3249 * task to the run-queue and that's it.
3250 *
3251 * Now, if the new task added to the run-queue preempts the current
3252 * task, then the wakeup sets TIF_NEED_RESCHED and schedule() gets
3253 * called on the nearest possible occasion:
3254 *
3255 * - If the kernel is preemptible (CONFIG_PREEMPT=y):
3256 *
3257 * - in syscall or exception context, at the next outmost
3258 * preempt_enable(). (this might be as soon as the wake_up()'s
3259 * spin_unlock()!)
3260 *
3261 * - in IRQ context, return from interrupt-handler to
3262 * preemptible context
3263 *
3264 * - If the kernel is not preemptible (CONFIG_PREEMPT is not set)
3265 * then at the next:
3266 *
3267 * - cond_resched() call
3268 * - explicit schedule() call
3269 * - return from syscall or exception to user-space
3270 * - return from interrupt-handler to user-space
bfd9b2b5 3271 *
b30f0e3f 3272 * WARNING: must be called with preemption disabled!
dd41f596 3273 */
499d7955 3274static void __sched notrace __schedule(bool preempt)
dd41f596
IM
3275{
3276 struct task_struct *prev, *next;
67ca7bde 3277 unsigned long *switch_count;
e7904a28 3278 struct pin_cookie cookie;
dd41f596 3279 struct rq *rq;
31656519 3280 int cpu;
dd41f596 3281
dd41f596
IM
3282 cpu = smp_processor_id();
3283 rq = cpu_rq(cpu);
dd41f596 3284 prev = rq->curr;
dd41f596 3285
b99def8b
PZ
3286 /*
3287 * do_exit() calls schedule() with preemption disabled as an exception;
3288 * however we must fix that up, otherwise the next task will see an
3289 * inconsistent (higher) preempt count.
3290 *
3291 * It also avoids the below schedule_debug() test from complaining
3292 * about this.
3293 */
3294 if (unlikely(prev->state == TASK_DEAD))
3295 preempt_enable_no_resched_notrace();
3296
dd41f596 3297 schedule_debug(prev);
1da177e4 3298
31656519 3299 if (sched_feat(HRTICK))
f333fdc9 3300 hrtick_clear(rq);
8f4d37ec 3301
46a5d164
PM
3302 local_irq_disable();
3303 rcu_note_context_switch();
3304
e0acd0a6
ON
3305 /*
3306 * Make sure that signal_pending_state()->signal_pending() below
3307 * can't be reordered with __set_current_state(TASK_INTERRUPTIBLE)
3308 * done by the caller to avoid the race with signal_wake_up().
3309 */
3310 smp_mb__before_spinlock();
46a5d164 3311 raw_spin_lock(&rq->lock);
e7904a28 3312 cookie = lockdep_pin_lock(&rq->lock);
1da177e4 3313
9edfbfed
PZ
3314 rq->clock_skip_update <<= 1; /* promote REQ to ACT */
3315
246d86b5 3316 switch_count = &prev->nivcsw;
fc13aeba 3317 if (!preempt && prev->state) {
21aa9af0 3318 if (unlikely(signal_pending_state(prev->state, prev))) {
1da177e4 3319 prev->state = TASK_RUNNING;
21aa9af0 3320 } else {
2acca55e
PZ
3321 deactivate_task(rq, prev, DEQUEUE_SLEEP);
3322 prev->on_rq = 0;
3323
21aa9af0 3324 /*
2acca55e
PZ
3325 * If a worker went to sleep, notify and ask workqueue
3326 * whether it wants to wake up a task to maintain
3327 * concurrency.
21aa9af0
TH
3328 */
3329 if (prev->flags & PF_WQ_WORKER) {
3330 struct task_struct *to_wakeup;
3331
9b7f6597 3332 to_wakeup = wq_worker_sleeping(prev);
21aa9af0 3333 if (to_wakeup)
e7904a28 3334 try_to_wake_up_local(to_wakeup, cookie);
21aa9af0 3335 }
21aa9af0 3336 }
dd41f596 3337 switch_count = &prev->nvcsw;
1da177e4
LT
3338 }
3339
9edfbfed 3340 if (task_on_rq_queued(prev))
606dba2e
PZ
3341 update_rq_clock(rq);
3342
e7904a28 3343 next = pick_next_task(rq, prev, cookie);
f26f9aff 3344 clear_tsk_need_resched(prev);
f27dde8d 3345 clear_preempt_need_resched();
9edfbfed 3346 rq->clock_skip_update = 0;
1da177e4 3347
1da177e4 3348 if (likely(prev != next)) {
1da177e4
LT
3349 rq->nr_switches++;
3350 rq->curr = next;
3351 ++*switch_count;
3352
c73464b1 3353 trace_sched_switch(preempt, prev, next);
e7904a28 3354 rq = context_switch(rq, prev, next, cookie); /* unlocks the rq */
cbce1a68 3355 } else {
e7904a28 3356 lockdep_unpin_lock(&rq->lock, cookie);
05fa785c 3357 raw_spin_unlock_irq(&rq->lock);
cbce1a68 3358 }
1da177e4 3359
e3fca9e7 3360 balance_callback(rq);
1da177e4 3361}
8e05e96a 3362STACK_FRAME_NON_STANDARD(__schedule); /* switch_to() */
c259e01a 3363
9c40cef2
TG
3364static inline void sched_submit_work(struct task_struct *tsk)
3365{
3c7d5184 3366 if (!tsk->state || tsk_is_pi_blocked(tsk))
9c40cef2
TG
3367 return;
3368 /*
3369 * If we are going to sleep and we have plugged IO queued,
3370 * make sure to submit it to avoid deadlocks.
3371 */
3372 if (blk_needs_flush_plug(tsk))
3373 blk_schedule_flush_plug(tsk);
3374}
3375
722a9f92 3376asmlinkage __visible void __sched schedule(void)
c259e01a 3377{
9c40cef2
TG
3378 struct task_struct *tsk = current;
3379
3380 sched_submit_work(tsk);
bfd9b2b5 3381 do {
b30f0e3f 3382 preempt_disable();
fc13aeba 3383 __schedule(false);
b30f0e3f 3384 sched_preempt_enable_no_resched();
bfd9b2b5 3385 } while (need_resched());
c259e01a 3386}
1da177e4
LT
3387EXPORT_SYMBOL(schedule);
3388
91d1aa43 3389#ifdef CONFIG_CONTEXT_TRACKING
722a9f92 3390asmlinkage __visible void __sched schedule_user(void)
20ab65e3
FW
3391{
3392 /*
3393 * If we come here after a random call to set_need_resched(),
3394 * or we have been woken up remotely but the IPI has not yet arrived,
3395 * we haven't yet exited the RCU idle mode. Do it here manually until
3396 * we find a better solution.
7cc78f8f
AL
3397 *
3398 * NB: There are buggy callers of this function. Ideally we
c467ea76 3399 * should warn if prev_state != CONTEXT_USER, but that will trigger
7cc78f8f 3400 * too frequently to make sense yet.
20ab65e3 3401 */
7cc78f8f 3402 enum ctx_state prev_state = exception_enter();
20ab65e3 3403 schedule();
7cc78f8f 3404 exception_exit(prev_state);
20ab65e3
FW
3405}
3406#endif
3407
c5491ea7
TG
3408/**
3409 * schedule_preempt_disabled - called with preemption disabled
3410 *
3411 * Returns with preemption disabled. Note: preempt_count must be 1
3412 */
3413void __sched schedule_preempt_disabled(void)
3414{
ba74c144 3415 sched_preempt_enable_no_resched();
c5491ea7
TG
3416 schedule();
3417 preempt_disable();
3418}
3419
06b1f808 3420static void __sched notrace preempt_schedule_common(void)
a18b5d01
FW
3421{
3422 do {
47252cfb
SR
3423 /*
3424 * Because the function tracer can trace preempt_count_sub()
3425 * and it also uses preempt_enable/disable_notrace(), if
3426 * NEED_RESCHED is set, the preempt_enable_notrace() called
3427 * by the function tracer will call this function again and
3428 * cause infinite recursion.
3429 *
3430 * Preemption must be disabled here before the function
3431 * tracer can trace. Break up preempt_disable() into two
3432 * calls. One to disable preemption without fear of being
3433 * traced. The other to still record the preemption latency,
3434 * which can also be traced by the function tracer.
3435 */
499d7955 3436 preempt_disable_notrace();
47252cfb 3437 preempt_latency_start(1);
fc13aeba 3438 __schedule(true);
47252cfb 3439 preempt_latency_stop(1);
499d7955 3440 preempt_enable_no_resched_notrace();
a18b5d01
FW
3441
3442 /*
3443 * Check again in case we missed a preemption opportunity
3444 * between schedule and now.
3445 */
a18b5d01
FW
3446 } while (need_resched());
3447}
3448
1da177e4
LT
3449#ifdef CONFIG_PREEMPT
3450/*
2ed6e34f 3451 * this is the entry point to schedule() from in-kernel preemption
41a2d6cf 3452 * off of preempt_enable. Kernel preemptions off return from interrupt
1da177e4
LT
3453 * occur there and call schedule directly.
3454 */
722a9f92 3455asmlinkage __visible void __sched notrace preempt_schedule(void)
1da177e4 3456{
1da177e4
LT
3457 /*
3458 * If there is a non-zero preempt_count or interrupts are disabled,
41a2d6cf 3459 * we do not want to preempt the current task. Just return..
1da177e4 3460 */
fbb00b56 3461 if (likely(!preemptible()))
1da177e4
LT
3462 return;
3463
a18b5d01 3464 preempt_schedule_common();
1da177e4 3465}
376e2424 3466NOKPROBE_SYMBOL(preempt_schedule);
1da177e4 3467EXPORT_SYMBOL(preempt_schedule);
009f60e2 3468
009f60e2 3469/**
4eaca0a8 3470 * preempt_schedule_notrace - preempt_schedule called by tracing
009f60e2
ON
3471 *
3472 * The tracing infrastructure uses preempt_enable_notrace to prevent
3473 * recursion and tracing preempt enabling caused by the tracing
3474 * infrastructure itself. But as tracing can happen in areas coming
3475 * from userspace or just about to enter userspace, a preempt enable
3476 * can occur before user_exit() is called. This will cause the scheduler
3477 * to be called when the system is still in usermode.
3478 *
3479 * To prevent this, the preempt_enable_notrace will use this function
3480 * instead of preempt_schedule() to exit user context if needed before
3481 * calling the scheduler.
3482 */
4eaca0a8 3483asmlinkage __visible void __sched notrace preempt_schedule_notrace(void)
009f60e2
ON
3484{
3485 enum ctx_state prev_ctx;
3486
3487 if (likely(!preemptible()))
3488 return;
3489
3490 do {
47252cfb
SR
3491 /*
3492 * Because the function tracer can trace preempt_count_sub()
3493 * and it also uses preempt_enable/disable_notrace(), if
3494 * NEED_RESCHED is set, the preempt_enable_notrace() called
3495 * by the function tracer will call this function again and
3496 * cause infinite recursion.
3497 *
3498 * Preemption must be disabled here before the function
3499 * tracer can trace. Break up preempt_disable() into two
3500 * calls. One to disable preemption without fear of being
3501 * traced. The other to still record the preemption latency,
3502 * which can also be traced by the function tracer.
3503 */
3d8f74dd 3504 preempt_disable_notrace();
47252cfb 3505 preempt_latency_start(1);
009f60e2
ON
3506 /*
3507 * Needs preempt disabled in case user_exit() is traced
3508 * and the tracer calls preempt_enable_notrace() causing
3509 * an infinite recursion.
3510 */
3511 prev_ctx = exception_enter();
fc13aeba 3512 __schedule(true);
009f60e2
ON
3513 exception_exit(prev_ctx);
3514
47252cfb 3515 preempt_latency_stop(1);
3d8f74dd 3516 preempt_enable_no_resched_notrace();
009f60e2
ON
3517 } while (need_resched());
3518}
4eaca0a8 3519EXPORT_SYMBOL_GPL(preempt_schedule_notrace);
009f60e2 3520
32e475d7 3521#endif /* CONFIG_PREEMPT */
1da177e4
LT
3522
3523/*
2ed6e34f 3524 * this is the entry point to schedule() from kernel preemption
1da177e4
LT
3525 * off of irq context.
3526 * Note, that this is called and return with irqs disabled. This will
3527 * protect us against recursive calling from irq.
3528 */
722a9f92 3529asmlinkage __visible void __sched preempt_schedule_irq(void)
1da177e4 3530{
b22366cd 3531 enum ctx_state prev_state;
6478d880 3532
2ed6e34f 3533 /* Catch callers which need to be fixed */
f27dde8d 3534 BUG_ON(preempt_count() || !irqs_disabled());
1da177e4 3535
b22366cd
FW
3536 prev_state = exception_enter();
3537
3a5c359a 3538 do {
3d8f74dd 3539 preempt_disable();
3a5c359a 3540 local_irq_enable();
fc13aeba 3541 __schedule(true);
3a5c359a 3542 local_irq_disable();
3d8f74dd 3543 sched_preempt_enable_no_resched();
5ed0cec0 3544 } while (need_resched());
b22366cd
FW
3545
3546 exception_exit(prev_state);
1da177e4
LT
3547}
3548
63859d4f 3549int default_wake_function(wait_queue_t *curr, unsigned mode, int wake_flags,
95cdf3b7 3550 void *key)
1da177e4 3551{
63859d4f 3552 return try_to_wake_up(curr->private, mode, wake_flags);
1da177e4 3553}
1da177e4
LT
3554EXPORT_SYMBOL(default_wake_function);
3555
b29739f9
IM
3556#ifdef CONFIG_RT_MUTEXES
3557
3558/*
3559 * rt_mutex_setprio - set the current priority of a task
3560 * @p: task
3561 * @prio: prio value (kernel-internal form)
3562 *
3563 * This function changes the 'effective' priority of a task. It does
3564 * not touch ->normal_prio like __setscheduler().
3565 *
c365c292
TG
3566 * Used by the rt_mutex code to implement priority inheritance
3567 * logic. Call site only calls if the priority of the task changed.
b29739f9 3568 */
36c8b586 3569void rt_mutex_setprio(struct task_struct *p, int prio)
b29739f9 3570{
ff77e468 3571 int oldprio, queued, running, queue_flag = DEQUEUE_SAVE | DEQUEUE_MOVE;
83ab0aa0 3572 const struct sched_class *prev_class;
eb580751
PZ
3573 struct rq_flags rf;
3574 struct rq *rq;
b29739f9 3575
aab03e05 3576 BUG_ON(prio > MAX_PRIO);
b29739f9 3577
eb580751 3578 rq = __task_rq_lock(p, &rf);
b29739f9 3579
1c4dd99b
TG
3580 /*
3581 * Idle task boosting is a nono in general. There is one
3582 * exception, when PREEMPT_RT and NOHZ is active:
3583 *
3584 * The idle task calls get_next_timer_interrupt() and holds
3585 * the timer wheel base->lock on the CPU and another CPU wants
3586 * to access the timer (probably to cancel it). We can safely
3587 * ignore the boosting request, as the idle CPU runs this code
3588 * with interrupts disabled and will complete the lock
3589 * protected section without being interrupted. So there is no
3590 * real need to boost.
3591 */
3592 if (unlikely(p == rq->idle)) {
3593 WARN_ON(p != rq->curr);
3594 WARN_ON(p->pi_blocked_on);
3595 goto out_unlock;
3596 }
3597
a8027073 3598 trace_sched_pi_setprio(p, prio);
d5f9f942 3599 oldprio = p->prio;
ff77e468
PZ
3600
3601 if (oldprio == prio)
3602 queue_flag &= ~DEQUEUE_MOVE;
3603
83ab0aa0 3604 prev_class = p->sched_class;
da0c1e65 3605 queued = task_on_rq_queued(p);
051a1d1a 3606 running = task_current(rq, p);
da0c1e65 3607 if (queued)
ff77e468 3608 dequeue_task(rq, p, queue_flag);
0e1f3483 3609 if (running)
f3cd1c4e 3610 put_prev_task(rq, p);
dd41f596 3611
2d3d891d
DF
3612 /*
3613 * Boosting condition are:
3614 * 1. -rt task is running and holds mutex A
3615 * --> -dl task blocks on mutex A
3616 *
3617 * 2. -dl task is running and holds mutex A
3618 * --> -dl task blocks on mutex A and could preempt the
3619 * running task
3620 */
3621 if (dl_prio(prio)) {
466af29b
ON
3622 struct task_struct *pi_task = rt_mutex_get_top_task(p);
3623 if (!dl_prio(p->normal_prio) ||
3624 (pi_task && dl_entity_preempt(&pi_task->dl, &p->dl))) {
2d3d891d 3625 p->dl.dl_boosted = 1;
ff77e468 3626 queue_flag |= ENQUEUE_REPLENISH;
2d3d891d
DF
3627 } else
3628 p->dl.dl_boosted = 0;
aab03e05 3629 p->sched_class = &dl_sched_class;
2d3d891d
DF
3630 } else if (rt_prio(prio)) {
3631 if (dl_prio(oldprio))
3632 p->dl.dl_boosted = 0;
3633 if (oldprio < prio)
ff77e468 3634 queue_flag |= ENQUEUE_HEAD;
dd41f596 3635 p->sched_class = &rt_sched_class;
2d3d891d
DF
3636 } else {
3637 if (dl_prio(oldprio))
3638 p->dl.dl_boosted = 0;
746db944
BS
3639 if (rt_prio(oldprio))
3640 p->rt.timeout = 0;
dd41f596 3641 p->sched_class = &fair_sched_class;
2d3d891d 3642 }
dd41f596 3643
b29739f9
IM
3644 p->prio = prio;
3645
0e1f3483
HS
3646 if (running)
3647 p->sched_class->set_curr_task(rq);
da0c1e65 3648 if (queued)
ff77e468 3649 enqueue_task(rq, p, queue_flag);
cb469845 3650
da7a735e 3651 check_class_changed(rq, p, prev_class, oldprio);
1c4dd99b 3652out_unlock:
4c9a4bc8 3653 preempt_disable(); /* avoid rq from going away on us */
eb580751 3654 __task_rq_unlock(rq, &rf);
4c9a4bc8
PZ
3655
3656 balance_callback(rq);
3657 preempt_enable();
b29739f9 3658}
b29739f9 3659#endif
d50dde5a 3660
36c8b586 3661void set_user_nice(struct task_struct *p, long nice)
1da177e4 3662{
da0c1e65 3663 int old_prio, delta, queued;
eb580751 3664 struct rq_flags rf;
70b97a7f 3665 struct rq *rq;
1da177e4 3666
75e45d51 3667 if (task_nice(p) == nice || nice < MIN_NICE || nice > MAX_NICE)
1da177e4
LT
3668 return;
3669 /*
3670 * We have to be careful, if called from sys_setpriority(),
3671 * the task might be in the middle of scheduling on another CPU.
3672 */
eb580751 3673 rq = task_rq_lock(p, &rf);
1da177e4
LT
3674 /*
3675 * The RT priorities are set via sched_setscheduler(), but we still
3676 * allow the 'normal' nice value to be set - but as expected
3677 * it wont have any effect on scheduling until the task is
aab03e05 3678 * SCHED_DEADLINE, SCHED_FIFO or SCHED_RR:
1da177e4 3679 */
aab03e05 3680 if (task_has_dl_policy(p) || task_has_rt_policy(p)) {
1da177e4
LT
3681 p->static_prio = NICE_TO_PRIO(nice);
3682 goto out_unlock;
3683 }
da0c1e65
KT
3684 queued = task_on_rq_queued(p);
3685 if (queued)
1de64443 3686 dequeue_task(rq, p, DEQUEUE_SAVE);
1da177e4 3687
1da177e4 3688 p->static_prio = NICE_TO_PRIO(nice);
2dd73a4f 3689 set_load_weight(p);
b29739f9
IM
3690 old_prio = p->prio;
3691 p->prio = effective_prio(p);
3692 delta = p->prio - old_prio;
1da177e4 3693
da0c1e65 3694 if (queued) {
1de64443 3695 enqueue_task(rq, p, ENQUEUE_RESTORE);
1da177e4 3696 /*
d5f9f942
AM
3697 * If the task increased its priority or is running and
3698 * lowered its priority, then reschedule its CPU:
1da177e4 3699 */
d5f9f942 3700 if (delta < 0 || (delta > 0 && task_running(rq, p)))
8875125e 3701 resched_curr(rq);
1da177e4
LT
3702 }
3703out_unlock:
eb580751 3704 task_rq_unlock(rq, p, &rf);
1da177e4 3705}
1da177e4
LT
3706EXPORT_SYMBOL(set_user_nice);
3707
e43379f1
MM
3708/*
3709 * can_nice - check if a task can reduce its nice value
3710 * @p: task
3711 * @nice: nice value
3712 */
36c8b586 3713int can_nice(const struct task_struct *p, const int nice)
e43379f1 3714{
024f4747 3715 /* convert nice value [19,-20] to rlimit style value [1,40] */
7aa2c016 3716 int nice_rlim = nice_to_rlimit(nice);
48f24c4d 3717
78d7d407 3718 return (nice_rlim <= task_rlimit(p, RLIMIT_NICE) ||
e43379f1
MM
3719 capable(CAP_SYS_NICE));
3720}
3721
1da177e4
LT
3722#ifdef __ARCH_WANT_SYS_NICE
3723
3724/*
3725 * sys_nice - change the priority of the current process.
3726 * @increment: priority increment
3727 *
3728 * sys_setpriority is a more generic, but much slower function that
3729 * does similar things.
3730 */
5add95d4 3731SYSCALL_DEFINE1(nice, int, increment)
1da177e4 3732{
48f24c4d 3733 long nice, retval;
1da177e4
LT
3734
3735 /*
3736 * Setpriority might change our priority at the same moment.
3737 * We don't have to worry. Conceptually one call occurs first
3738 * and we have a single winner.
3739 */
a9467fa3 3740 increment = clamp(increment, -NICE_WIDTH, NICE_WIDTH);
d0ea0268 3741 nice = task_nice(current) + increment;
1da177e4 3742
a9467fa3 3743 nice = clamp_val(nice, MIN_NICE, MAX_NICE);
e43379f1
MM
3744 if (increment < 0 && !can_nice(current, nice))
3745 return -EPERM;
3746
1da177e4
LT
3747 retval = security_task_setnice(current, nice);
3748 if (retval)
3749 return retval;
3750
3751 set_user_nice(current, nice);
3752 return 0;
3753}
3754
3755#endif
3756
3757/**
3758 * task_prio - return the priority value of a given task.
3759 * @p: the task in question.
3760 *
e69f6186 3761 * Return: The priority value as seen by users in /proc.
1da177e4
LT
3762 * RT tasks are offset by -200. Normal tasks are centered
3763 * around 0, value goes from -16 to +15.
3764 */
36c8b586 3765int task_prio(const struct task_struct *p)
1da177e4
LT
3766{
3767 return p->prio - MAX_RT_PRIO;
3768}
3769
1da177e4
LT
3770/**
3771 * idle_cpu - is a given cpu idle currently?
3772 * @cpu: the processor in question.
e69f6186
YB
3773 *
3774 * Return: 1 if the CPU is currently idle. 0 otherwise.
1da177e4
LT
3775 */
3776int idle_cpu(int cpu)
3777{
908a3283
TG
3778 struct rq *rq = cpu_rq(cpu);
3779
3780 if (rq->curr != rq->idle)
3781 return 0;
3782
3783 if (rq->nr_running)
3784 return 0;
3785
3786#ifdef CONFIG_SMP
3787 if (!llist_empty(&rq->wake_list))
3788 return 0;
3789#endif
3790
3791 return 1;
1da177e4
LT
3792}
3793
1da177e4
LT
3794/**
3795 * idle_task - return the idle task for a given cpu.
3796 * @cpu: the processor in question.
e69f6186
YB
3797 *
3798 * Return: The idle task for the cpu @cpu.
1da177e4 3799 */
36c8b586 3800struct task_struct *idle_task(int cpu)
1da177e4
LT
3801{
3802 return cpu_rq(cpu)->idle;
3803}
3804
3805/**
3806 * find_process_by_pid - find a process with a matching PID value.
3807 * @pid: the pid in question.
e69f6186
YB
3808 *
3809 * The task of @pid, if found. %NULL otherwise.
1da177e4 3810 */
a9957449 3811static struct task_struct *find_process_by_pid(pid_t pid)
1da177e4 3812{
228ebcbe 3813 return pid ? find_task_by_vpid(pid) : current;
1da177e4
LT
3814}
3815
aab03e05
DF
3816/*
3817 * This function initializes the sched_dl_entity of a newly becoming
3818 * SCHED_DEADLINE task.
3819 *
3820 * Only the static values are considered here, the actual runtime and the
3821 * absolute deadline will be properly calculated when the task is enqueued
3822 * for the first time with its new policy.
3823 */
3824static void
3825__setparam_dl(struct task_struct *p, const struct sched_attr *attr)
3826{
3827 struct sched_dl_entity *dl_se = &p->dl;
3828
aab03e05
DF
3829 dl_se->dl_runtime = attr->sched_runtime;
3830 dl_se->dl_deadline = attr->sched_deadline;
755378a4 3831 dl_se->dl_period = attr->sched_period ?: dl_se->dl_deadline;
aab03e05 3832 dl_se->flags = attr->sched_flags;
332ac17e 3833 dl_se->dl_bw = to_ratio(dl_se->dl_period, dl_se->dl_runtime);
40767b0d
PZ
3834
3835 /*
3836 * Changing the parameters of a task is 'tricky' and we're not doing
3837 * the correct thing -- also see task_dead_dl() and switched_from_dl().
3838 *
3839 * What we SHOULD do is delay the bandwidth release until the 0-lag
3840 * point. This would include retaining the task_struct until that time
3841 * and change dl_overflow() to not immediately decrement the current
3842 * amount.
3843 *
3844 * Instead we retain the current runtime/deadline and let the new
3845 * parameters take effect after the current reservation period lapses.
3846 * This is safe (albeit pessimistic) because the 0-lag point is always
3847 * before the current scheduling deadline.
3848 *
3849 * We can still have temporary overloads because we do not delay the
3850 * change in bandwidth until that time; so admission control is
3851 * not on the safe side. It does however guarantee tasks will never
3852 * consume more than promised.
3853 */
aab03e05
DF
3854}
3855
c13db6b1
SR
3856/*
3857 * sched_setparam() passes in -1 for its policy, to let the functions
3858 * it calls know not to change it.
3859 */
3860#define SETPARAM_POLICY -1
3861
c365c292
TG
3862static void __setscheduler_params(struct task_struct *p,
3863 const struct sched_attr *attr)
1da177e4 3864{
d50dde5a
DF
3865 int policy = attr->sched_policy;
3866
c13db6b1 3867 if (policy == SETPARAM_POLICY)
39fd8fd2
PZ
3868 policy = p->policy;
3869
1da177e4 3870 p->policy = policy;
d50dde5a 3871
aab03e05
DF
3872 if (dl_policy(policy))
3873 __setparam_dl(p, attr);
39fd8fd2 3874 else if (fair_policy(policy))
d50dde5a
DF
3875 p->static_prio = NICE_TO_PRIO(attr->sched_nice);
3876
39fd8fd2
PZ
3877 /*
3878 * __sched_setscheduler() ensures attr->sched_priority == 0 when
3879 * !rt_policy. Always setting this ensures that things like
3880 * getparam()/getattr() don't report silly values for !rt tasks.
3881 */
3882 p->rt_priority = attr->sched_priority;
383afd09 3883 p->normal_prio = normal_prio(p);
c365c292
TG
3884 set_load_weight(p);
3885}
39fd8fd2 3886
c365c292
TG
3887/* Actually do priority change: must hold pi & rq lock. */
3888static void __setscheduler(struct rq *rq, struct task_struct *p,
0782e63b 3889 const struct sched_attr *attr, bool keep_boost)
c365c292
TG
3890{
3891 __setscheduler_params(p, attr);
d50dde5a 3892
383afd09 3893 /*
0782e63b
TG
3894 * Keep a potential priority boosting if called from
3895 * sched_setscheduler().
383afd09 3896 */
0782e63b
TG
3897 if (keep_boost)
3898 p->prio = rt_mutex_get_effective_prio(p, normal_prio(p));
3899 else
3900 p->prio = normal_prio(p);
383afd09 3901
aab03e05
DF
3902 if (dl_prio(p->prio))
3903 p->sched_class = &dl_sched_class;
3904 else if (rt_prio(p->prio))
ffd44db5
PZ
3905 p->sched_class = &rt_sched_class;
3906 else
3907 p->sched_class = &fair_sched_class;
1da177e4 3908}
aab03e05
DF
3909
3910static void
3911__getparam_dl(struct task_struct *p, struct sched_attr *attr)
3912{
3913 struct sched_dl_entity *dl_se = &p->dl;
3914
3915 attr->sched_priority = p->rt_priority;
3916 attr->sched_runtime = dl_se->dl_runtime;
3917 attr->sched_deadline = dl_se->dl_deadline;
755378a4 3918 attr->sched_period = dl_se->dl_period;
aab03e05
DF
3919 attr->sched_flags = dl_se->flags;
3920}
3921
3922/*
3923 * This function validates the new parameters of a -deadline task.
3924 * We ask for the deadline not being zero, and greater or equal
755378a4 3925 * than the runtime, as well as the period of being zero or
332ac17e 3926 * greater than deadline. Furthermore, we have to be sure that
b0827819
JL
3927 * user parameters are above the internal resolution of 1us (we
3928 * check sched_runtime only since it is always the smaller one) and
3929 * below 2^63 ns (we have to check both sched_deadline and
3930 * sched_period, as the latter can be zero).
aab03e05
DF
3931 */
3932static bool
3933__checkparam_dl(const struct sched_attr *attr)
3934{
b0827819
JL
3935 /* deadline != 0 */
3936 if (attr->sched_deadline == 0)
3937 return false;
3938
3939 /*
3940 * Since we truncate DL_SCALE bits, make sure we're at least
3941 * that big.
3942 */
3943 if (attr->sched_runtime < (1ULL << DL_SCALE))
3944 return false;
3945
3946 /*
3947 * Since we use the MSB for wrap-around and sign issues, make
3948 * sure it's not set (mind that period can be equal to zero).
3949 */
3950 if (attr->sched_deadline & (1ULL << 63) ||
3951 attr->sched_period & (1ULL << 63))
3952 return false;
3953
3954 /* runtime <= deadline <= period (if period != 0) */
3955 if ((attr->sched_period != 0 &&
3956 attr->sched_period < attr->sched_deadline) ||
3957 attr->sched_deadline < attr->sched_runtime)
3958 return false;
3959
3960 return true;
aab03e05
DF
3961}
3962
c69e8d9c
DH
3963/*
3964 * check the target process has a UID that matches the current process's
3965 */
3966static bool check_same_owner(struct task_struct *p)
3967{
3968 const struct cred *cred = current_cred(), *pcred;
3969 bool match;
3970
3971 rcu_read_lock();
3972 pcred = __task_cred(p);
9c806aa0
EB
3973 match = (uid_eq(cred->euid, pcred->euid) ||
3974 uid_eq(cred->euid, pcred->uid));
c69e8d9c
DH
3975 rcu_read_unlock();
3976 return match;
3977}
3978
75381608
WL
3979static bool dl_param_changed(struct task_struct *p,
3980 const struct sched_attr *attr)
3981{
3982 struct sched_dl_entity *dl_se = &p->dl;
3983
3984 if (dl_se->dl_runtime != attr->sched_runtime ||
3985 dl_se->dl_deadline != attr->sched_deadline ||
3986 dl_se->dl_period != attr->sched_period ||
3987 dl_se->flags != attr->sched_flags)
3988 return true;
3989
3990 return false;
3991}
3992
d50dde5a
DF
3993static int __sched_setscheduler(struct task_struct *p,
3994 const struct sched_attr *attr,
dbc7f069 3995 bool user, bool pi)
1da177e4 3996{
383afd09
SR
3997 int newprio = dl_policy(attr->sched_policy) ? MAX_DL_PRIO - 1 :
3998 MAX_RT_PRIO - 1 - attr->sched_priority;
da0c1e65 3999 int retval, oldprio, oldpolicy = -1, queued, running;
0782e63b 4000 int new_effective_prio, policy = attr->sched_policy;
83ab0aa0 4001 const struct sched_class *prev_class;
eb580751 4002 struct rq_flags rf;
ca94c442 4003 int reset_on_fork;
ff77e468 4004 int queue_flags = DEQUEUE_SAVE | DEQUEUE_MOVE;
eb580751 4005 struct rq *rq;
1da177e4 4006
66e5393a
SR
4007 /* may grab non-irq protected spin_locks */
4008 BUG_ON(in_interrupt());
1da177e4
LT
4009recheck:
4010 /* double check policy once rq lock held */
ca94c442
LP
4011 if (policy < 0) {
4012 reset_on_fork = p->sched_reset_on_fork;
1da177e4 4013 policy = oldpolicy = p->policy;
ca94c442 4014 } else {
7479f3c9 4015 reset_on_fork = !!(attr->sched_flags & SCHED_FLAG_RESET_ON_FORK);
ca94c442 4016
20f9cd2a 4017 if (!valid_policy(policy))
ca94c442
LP
4018 return -EINVAL;
4019 }
4020
7479f3c9
PZ
4021 if (attr->sched_flags & ~(SCHED_FLAG_RESET_ON_FORK))
4022 return -EINVAL;
4023
1da177e4
LT
4024 /*
4025 * Valid priorities for SCHED_FIFO and SCHED_RR are
dd41f596
IM
4026 * 1..MAX_USER_RT_PRIO-1, valid priority for SCHED_NORMAL,
4027 * SCHED_BATCH and SCHED_IDLE is 0.
1da177e4 4028 */
0bb040a4 4029 if ((p->mm && attr->sched_priority > MAX_USER_RT_PRIO-1) ||
d50dde5a 4030 (!p->mm && attr->sched_priority > MAX_RT_PRIO-1))
1da177e4 4031 return -EINVAL;
aab03e05
DF
4032 if ((dl_policy(policy) && !__checkparam_dl(attr)) ||
4033 (rt_policy(policy) != (attr->sched_priority != 0)))
1da177e4
LT
4034 return -EINVAL;
4035
37e4ab3f
OC
4036 /*
4037 * Allow unprivileged RT tasks to decrease priority:
4038 */
961ccddd 4039 if (user && !capable(CAP_SYS_NICE)) {
d50dde5a 4040 if (fair_policy(policy)) {
d0ea0268 4041 if (attr->sched_nice < task_nice(p) &&
eaad4513 4042 !can_nice(p, attr->sched_nice))
d50dde5a
DF
4043 return -EPERM;
4044 }
4045
e05606d3 4046 if (rt_policy(policy)) {
a44702e8
ON
4047 unsigned long rlim_rtprio =
4048 task_rlimit(p, RLIMIT_RTPRIO);
8dc3e909
ON
4049
4050 /* can't set/change the rt policy */
4051 if (policy != p->policy && !rlim_rtprio)
4052 return -EPERM;
4053
4054 /* can't increase priority */
d50dde5a
DF
4055 if (attr->sched_priority > p->rt_priority &&
4056 attr->sched_priority > rlim_rtprio)
8dc3e909
ON
4057 return -EPERM;
4058 }
c02aa73b 4059
d44753b8
JL
4060 /*
4061 * Can't set/change SCHED_DEADLINE policy at all for now
4062 * (safest behavior); in the future we would like to allow
4063 * unprivileged DL tasks to increase their relative deadline
4064 * or reduce their runtime (both ways reducing utilization)
4065 */
4066 if (dl_policy(policy))
4067 return -EPERM;
4068
dd41f596 4069 /*
c02aa73b
DH
4070 * Treat SCHED_IDLE as nice 20. Only allow a switch to
4071 * SCHED_NORMAL if the RLIMIT_NICE would normally permit it.
dd41f596 4072 */
20f9cd2a 4073 if (idle_policy(p->policy) && !idle_policy(policy)) {
d0ea0268 4074 if (!can_nice(p, task_nice(p)))
c02aa73b
DH
4075 return -EPERM;
4076 }
5fe1d75f 4077
37e4ab3f 4078 /* can't change other user's priorities */
c69e8d9c 4079 if (!check_same_owner(p))
37e4ab3f 4080 return -EPERM;
ca94c442
LP
4081
4082 /* Normal users shall not reset the sched_reset_on_fork flag */
4083 if (p->sched_reset_on_fork && !reset_on_fork)
4084 return -EPERM;
37e4ab3f 4085 }
1da177e4 4086
725aad24 4087 if (user) {
b0ae1981 4088 retval = security_task_setscheduler(p);
725aad24
JF
4089 if (retval)
4090 return retval;
4091 }
4092
b29739f9
IM
4093 /*
4094 * make sure no PI-waiters arrive (or leave) while we are
4095 * changing the priority of the task:
0122ec5b 4096 *
25985edc 4097 * To be able to change p->policy safely, the appropriate
1da177e4
LT
4098 * runqueue lock must be held.
4099 */
eb580751 4100 rq = task_rq_lock(p, &rf);
dc61b1d6 4101
34f971f6
PZ
4102 /*
4103 * Changing the policy of the stop threads its a very bad idea
4104 */
4105 if (p == rq->stop) {
eb580751 4106 task_rq_unlock(rq, p, &rf);
34f971f6
PZ
4107 return -EINVAL;
4108 }
4109
a51e9198 4110 /*
d6b1e911
TG
4111 * If not changing anything there's no need to proceed further,
4112 * but store a possible modification of reset_on_fork.
a51e9198 4113 */
d50dde5a 4114 if (unlikely(policy == p->policy)) {
d0ea0268 4115 if (fair_policy(policy) && attr->sched_nice != task_nice(p))
d50dde5a
DF
4116 goto change;
4117 if (rt_policy(policy) && attr->sched_priority != p->rt_priority)
4118 goto change;
75381608 4119 if (dl_policy(policy) && dl_param_changed(p, attr))
aab03e05 4120 goto change;
d50dde5a 4121
d6b1e911 4122 p->sched_reset_on_fork = reset_on_fork;
eb580751 4123 task_rq_unlock(rq, p, &rf);
a51e9198
DF
4124 return 0;
4125 }
d50dde5a 4126change:
a51e9198 4127
dc61b1d6 4128 if (user) {
332ac17e 4129#ifdef CONFIG_RT_GROUP_SCHED
dc61b1d6
PZ
4130 /*
4131 * Do not allow realtime tasks into groups that have no runtime
4132 * assigned.
4133 */
4134 if (rt_bandwidth_enabled() && rt_policy(policy) &&
f4493771
MG
4135 task_group(p)->rt_bandwidth.rt_runtime == 0 &&
4136 !task_group_is_autogroup(task_group(p))) {
eb580751 4137 task_rq_unlock(rq, p, &rf);
dc61b1d6
PZ
4138 return -EPERM;
4139 }
dc61b1d6 4140#endif
332ac17e
DF
4141#ifdef CONFIG_SMP
4142 if (dl_bandwidth_enabled() && dl_policy(policy)) {
4143 cpumask_t *span = rq->rd->span;
332ac17e
DF
4144
4145 /*
4146 * Don't allow tasks with an affinity mask smaller than
4147 * the entire root_domain to become SCHED_DEADLINE. We
4148 * will also fail if there's no bandwidth available.
4149 */
e4099a5e
PZ
4150 if (!cpumask_subset(span, &p->cpus_allowed) ||
4151 rq->rd->dl_bw.bw == 0) {
eb580751 4152 task_rq_unlock(rq, p, &rf);
332ac17e
DF
4153 return -EPERM;
4154 }
4155 }
4156#endif
4157 }
dc61b1d6 4158
1da177e4
LT
4159 /* recheck policy now with rq lock held */
4160 if (unlikely(oldpolicy != -1 && oldpolicy != p->policy)) {
4161 policy = oldpolicy = -1;
eb580751 4162 task_rq_unlock(rq, p, &rf);
1da177e4
LT
4163 goto recheck;
4164 }
332ac17e
DF
4165
4166 /*
4167 * If setscheduling to SCHED_DEADLINE (or changing the parameters
4168 * of a SCHED_DEADLINE task) we need to check if enough bandwidth
4169 * is available.
4170 */
e4099a5e 4171 if ((dl_policy(policy) || dl_task(p)) && dl_overflow(p, policy, attr)) {
eb580751 4172 task_rq_unlock(rq, p, &rf);
332ac17e
DF
4173 return -EBUSY;
4174 }
4175
c365c292
TG
4176 p->sched_reset_on_fork = reset_on_fork;
4177 oldprio = p->prio;
4178
dbc7f069
PZ
4179 if (pi) {
4180 /*
4181 * Take priority boosted tasks into account. If the new
4182 * effective priority is unchanged, we just store the new
4183 * normal parameters and do not touch the scheduler class and
4184 * the runqueue. This will be done when the task deboost
4185 * itself.
4186 */
4187 new_effective_prio = rt_mutex_get_effective_prio(p, newprio);
ff77e468
PZ
4188 if (new_effective_prio == oldprio)
4189 queue_flags &= ~DEQUEUE_MOVE;
c365c292
TG
4190 }
4191
da0c1e65 4192 queued = task_on_rq_queued(p);
051a1d1a 4193 running = task_current(rq, p);
da0c1e65 4194 if (queued)
ff77e468 4195 dequeue_task(rq, p, queue_flags);
0e1f3483 4196 if (running)
f3cd1c4e 4197 put_prev_task(rq, p);
f6b53205 4198
83ab0aa0 4199 prev_class = p->sched_class;
dbc7f069 4200 __setscheduler(rq, p, attr, pi);
f6b53205 4201
0e1f3483
HS
4202 if (running)
4203 p->sched_class->set_curr_task(rq);
da0c1e65 4204 if (queued) {
81a44c54
TG
4205 /*
4206 * We enqueue to tail when the priority of a task is
4207 * increased (user space view).
4208 */
ff77e468
PZ
4209 if (oldprio < p->prio)
4210 queue_flags |= ENQUEUE_HEAD;
1de64443 4211
ff77e468 4212 enqueue_task(rq, p, queue_flags);
81a44c54 4213 }
cb469845 4214
da7a735e 4215 check_class_changed(rq, p, prev_class, oldprio);
4c9a4bc8 4216 preempt_disable(); /* avoid rq from going away on us */
eb580751 4217 task_rq_unlock(rq, p, &rf);
b29739f9 4218
dbc7f069
PZ
4219 if (pi)
4220 rt_mutex_adjust_pi(p);
95e02ca9 4221
4c9a4bc8
PZ
4222 /*
4223 * Run balance callbacks after we've adjusted the PI chain.
4224 */
4225 balance_callback(rq);
4226 preempt_enable();
95e02ca9 4227
1da177e4
LT
4228 return 0;
4229}
961ccddd 4230
7479f3c9
PZ
4231static int _sched_setscheduler(struct task_struct *p, int policy,
4232 const struct sched_param *param, bool check)
4233{
4234 struct sched_attr attr = {
4235 .sched_policy = policy,
4236 .sched_priority = param->sched_priority,
4237 .sched_nice = PRIO_TO_NICE(p->static_prio),
4238 };
4239
c13db6b1
SR
4240 /* Fixup the legacy SCHED_RESET_ON_FORK hack. */
4241 if ((policy != SETPARAM_POLICY) && (policy & SCHED_RESET_ON_FORK)) {
7479f3c9
PZ
4242 attr.sched_flags |= SCHED_FLAG_RESET_ON_FORK;
4243 policy &= ~SCHED_RESET_ON_FORK;
4244 attr.sched_policy = policy;
4245 }
4246
dbc7f069 4247 return __sched_setscheduler(p, &attr, check, true);
7479f3c9 4248}
961ccddd
RR
4249/**
4250 * sched_setscheduler - change the scheduling policy and/or RT priority of a thread.
4251 * @p: the task in question.
4252 * @policy: new policy.
4253 * @param: structure containing the new RT priority.
4254 *
e69f6186
YB
4255 * Return: 0 on success. An error code otherwise.
4256 *
961ccddd
RR
4257 * NOTE that the task may be already dead.
4258 */
4259int sched_setscheduler(struct task_struct *p, int policy,
fe7de49f 4260 const struct sched_param *param)
961ccddd 4261{
7479f3c9 4262 return _sched_setscheduler(p, policy, param, true);
961ccddd 4263}
1da177e4
LT
4264EXPORT_SYMBOL_GPL(sched_setscheduler);
4265
d50dde5a
DF
4266int sched_setattr(struct task_struct *p, const struct sched_attr *attr)
4267{
dbc7f069 4268 return __sched_setscheduler(p, attr, true, true);
d50dde5a
DF
4269}
4270EXPORT_SYMBOL_GPL(sched_setattr);
4271
961ccddd
RR
4272/**
4273 * sched_setscheduler_nocheck - change the scheduling policy and/or RT priority of a thread from kernelspace.
4274 * @p: the task in question.
4275 * @policy: new policy.
4276 * @param: structure containing the new RT priority.
4277 *
4278 * Just like sched_setscheduler, only don't bother checking if the
4279 * current context has permission. For example, this is needed in
4280 * stop_machine(): we create temporary high priority worker threads,
4281 * but our caller might not have that capability.
e69f6186
YB
4282 *
4283 * Return: 0 on success. An error code otherwise.
961ccddd
RR
4284 */
4285int sched_setscheduler_nocheck(struct task_struct *p, int policy,
fe7de49f 4286 const struct sched_param *param)
961ccddd 4287{
7479f3c9 4288 return _sched_setscheduler(p, policy, param, false);
961ccddd 4289}
84778472 4290EXPORT_SYMBOL_GPL(sched_setscheduler_nocheck);
961ccddd 4291
95cdf3b7
IM
4292static int
4293do_sched_setscheduler(pid_t pid, int policy, struct sched_param __user *param)
1da177e4 4294{
1da177e4
LT
4295 struct sched_param lparam;
4296 struct task_struct *p;
36c8b586 4297 int retval;
1da177e4
LT
4298
4299 if (!param || pid < 0)
4300 return -EINVAL;
4301 if (copy_from_user(&lparam, param, sizeof(struct sched_param)))
4302 return -EFAULT;
5fe1d75f
ON
4303
4304 rcu_read_lock();
4305 retval = -ESRCH;
1da177e4 4306 p = find_process_by_pid(pid);
5fe1d75f
ON
4307 if (p != NULL)
4308 retval = sched_setscheduler(p, policy, &lparam);
4309 rcu_read_unlock();
36c8b586 4310
1da177e4
LT
4311 return retval;
4312}
4313
d50dde5a
DF
4314/*
4315 * Mimics kernel/events/core.c perf_copy_attr().
4316 */
4317static int sched_copy_attr(struct sched_attr __user *uattr,
4318 struct sched_attr *attr)
4319{
4320 u32 size;
4321 int ret;
4322
4323 if (!access_ok(VERIFY_WRITE, uattr, SCHED_ATTR_SIZE_VER0))
4324 return -EFAULT;
4325
4326 /*
4327 * zero the full structure, so that a short copy will be nice.
4328 */
4329 memset(attr, 0, sizeof(*attr));
4330
4331 ret = get_user(size, &uattr->size);
4332 if (ret)
4333 return ret;
4334
4335 if (size > PAGE_SIZE) /* silly large */
4336 goto err_size;
4337
4338 if (!size) /* abi compat */
4339 size = SCHED_ATTR_SIZE_VER0;
4340
4341 if (size < SCHED_ATTR_SIZE_VER0)
4342 goto err_size;
4343
4344 /*
4345 * If we're handed a bigger struct than we know of,
4346 * ensure all the unknown bits are 0 - i.e. new
4347 * user-space does not rely on any kernel feature
4348 * extensions we dont know about yet.
4349 */
4350 if (size > sizeof(*attr)) {
4351 unsigned char __user *addr;
4352 unsigned char __user *end;
4353 unsigned char val;
4354
4355 addr = (void __user *)uattr + sizeof(*attr);
4356 end = (void __user *)uattr + size;
4357
4358 for (; addr < end; addr++) {
4359 ret = get_user(val, addr);
4360 if (ret)
4361 return ret;
4362 if (val)
4363 goto err_size;
4364 }
4365 size = sizeof(*attr);
4366 }
4367
4368 ret = copy_from_user(attr, uattr, size);
4369 if (ret)
4370 return -EFAULT;
4371
4372 /*
4373 * XXX: do we want to be lenient like existing syscalls; or do we want
4374 * to be strict and return an error on out-of-bounds values?
4375 */
75e45d51 4376 attr->sched_nice = clamp(attr->sched_nice, MIN_NICE, MAX_NICE);
d50dde5a 4377
e78c7bca 4378 return 0;
d50dde5a
DF
4379
4380err_size:
4381 put_user(sizeof(*attr), &uattr->size);
e78c7bca 4382 return -E2BIG;
d50dde5a
DF
4383}
4384
1da177e4
LT
4385/**
4386 * sys_sched_setscheduler - set/change the scheduler policy and RT priority
4387 * @pid: the pid in question.
4388 * @policy: new policy.
4389 * @param: structure containing the new RT priority.
e69f6186
YB
4390 *
4391 * Return: 0 on success. An error code otherwise.
1da177e4 4392 */
5add95d4
HC
4393SYSCALL_DEFINE3(sched_setscheduler, pid_t, pid, int, policy,
4394 struct sched_param __user *, param)
1da177e4 4395{
c21761f1
JB
4396 /* negative values for policy are not valid */
4397 if (policy < 0)
4398 return -EINVAL;
4399
1da177e4
LT
4400 return do_sched_setscheduler(pid, policy, param);
4401}
4402
4403/**
4404 * sys_sched_setparam - set/change the RT priority of a thread
4405 * @pid: the pid in question.
4406 * @param: structure containing the new RT priority.
e69f6186
YB
4407 *
4408 * Return: 0 on success. An error code otherwise.
1da177e4 4409 */
5add95d4 4410SYSCALL_DEFINE2(sched_setparam, pid_t, pid, struct sched_param __user *, param)
1da177e4 4411{
c13db6b1 4412 return do_sched_setscheduler(pid, SETPARAM_POLICY, param);
1da177e4
LT
4413}
4414
d50dde5a
DF
4415/**
4416 * sys_sched_setattr - same as above, but with extended sched_attr
4417 * @pid: the pid in question.
5778fccf 4418 * @uattr: structure containing the extended parameters.
db66d756 4419 * @flags: for future extension.
d50dde5a 4420 */
6d35ab48
PZ
4421SYSCALL_DEFINE3(sched_setattr, pid_t, pid, struct sched_attr __user *, uattr,
4422 unsigned int, flags)
d50dde5a
DF
4423{
4424 struct sched_attr attr;
4425 struct task_struct *p;
4426 int retval;
4427
6d35ab48 4428 if (!uattr || pid < 0 || flags)
d50dde5a
DF
4429 return -EINVAL;
4430
143cf23d
MK
4431 retval = sched_copy_attr(uattr, &attr);
4432 if (retval)
4433 return retval;
d50dde5a 4434
b14ed2c2 4435 if ((int)attr.sched_policy < 0)
dbdb2275 4436 return -EINVAL;
d50dde5a
DF
4437
4438 rcu_read_lock();
4439 retval = -ESRCH;
4440 p = find_process_by_pid(pid);
4441 if (p != NULL)
4442 retval = sched_setattr(p, &attr);
4443 rcu_read_unlock();
4444
4445 return retval;
4446}
4447
1da177e4
LT
4448/**
4449 * sys_sched_getscheduler - get the policy (scheduling class) of a thread
4450 * @pid: the pid in question.
e69f6186
YB
4451 *
4452 * Return: On success, the policy of the thread. Otherwise, a negative error
4453 * code.
1da177e4 4454 */
5add95d4 4455SYSCALL_DEFINE1(sched_getscheduler, pid_t, pid)
1da177e4 4456{
36c8b586 4457 struct task_struct *p;
3a5c359a 4458 int retval;
1da177e4
LT
4459
4460 if (pid < 0)
3a5c359a 4461 return -EINVAL;
1da177e4
LT
4462
4463 retval = -ESRCH;
5fe85be0 4464 rcu_read_lock();
1da177e4
LT
4465 p = find_process_by_pid(pid);
4466 if (p) {
4467 retval = security_task_getscheduler(p);
4468 if (!retval)
ca94c442
LP
4469 retval = p->policy
4470 | (p->sched_reset_on_fork ? SCHED_RESET_ON_FORK : 0);
1da177e4 4471 }
5fe85be0 4472 rcu_read_unlock();
1da177e4
LT
4473 return retval;
4474}
4475
4476/**
ca94c442 4477 * sys_sched_getparam - get the RT priority of a thread
1da177e4
LT
4478 * @pid: the pid in question.
4479 * @param: structure containing the RT priority.
e69f6186
YB
4480 *
4481 * Return: On success, 0 and the RT priority is in @param. Otherwise, an error
4482 * code.
1da177e4 4483 */
5add95d4 4484SYSCALL_DEFINE2(sched_getparam, pid_t, pid, struct sched_param __user *, param)
1da177e4 4485{
ce5f7f82 4486 struct sched_param lp = { .sched_priority = 0 };
36c8b586 4487 struct task_struct *p;
3a5c359a 4488 int retval;
1da177e4
LT
4489
4490 if (!param || pid < 0)
3a5c359a 4491 return -EINVAL;
1da177e4 4492
5fe85be0 4493 rcu_read_lock();
1da177e4
LT
4494 p = find_process_by_pid(pid);
4495 retval = -ESRCH;
4496 if (!p)
4497 goto out_unlock;
4498
4499 retval = security_task_getscheduler(p);
4500 if (retval)
4501 goto out_unlock;
4502
ce5f7f82
PZ
4503 if (task_has_rt_policy(p))
4504 lp.sched_priority = p->rt_priority;
5fe85be0 4505 rcu_read_unlock();
1da177e4
LT
4506
4507 /*
4508 * This one might sleep, we cannot do it with a spinlock held ...
4509 */
4510 retval = copy_to_user(param, &lp, sizeof(*param)) ? -EFAULT : 0;
4511
1da177e4
LT
4512 return retval;
4513
4514out_unlock:
5fe85be0 4515 rcu_read_unlock();
1da177e4
LT
4516 return retval;
4517}
4518
d50dde5a
DF
4519static int sched_read_attr(struct sched_attr __user *uattr,
4520 struct sched_attr *attr,
4521 unsigned int usize)
4522{
4523 int ret;
4524
4525 if (!access_ok(VERIFY_WRITE, uattr, usize))
4526 return -EFAULT;
4527
4528 /*
4529 * If we're handed a smaller struct than we know of,
4530 * ensure all the unknown bits are 0 - i.e. old
4531 * user-space does not get uncomplete information.
4532 */
4533 if (usize < sizeof(*attr)) {
4534 unsigned char *addr;
4535 unsigned char *end;
4536
4537 addr = (void *)attr + usize;
4538 end = (void *)attr + sizeof(*attr);
4539
4540 for (; addr < end; addr++) {
4541 if (*addr)
22400674 4542 return -EFBIG;
d50dde5a
DF
4543 }
4544
4545 attr->size = usize;
4546 }
4547
4efbc454 4548 ret = copy_to_user(uattr, attr, attr->size);
d50dde5a
DF
4549 if (ret)
4550 return -EFAULT;
4551
22400674 4552 return 0;
d50dde5a
DF
4553}
4554
4555/**
aab03e05 4556 * sys_sched_getattr - similar to sched_getparam, but with sched_attr
d50dde5a 4557 * @pid: the pid in question.
5778fccf 4558 * @uattr: structure containing the extended parameters.
d50dde5a 4559 * @size: sizeof(attr) for fwd/bwd comp.
db66d756 4560 * @flags: for future extension.
d50dde5a 4561 */
6d35ab48
PZ
4562SYSCALL_DEFINE4(sched_getattr, pid_t, pid, struct sched_attr __user *, uattr,
4563 unsigned int, size, unsigned int, flags)
d50dde5a
DF
4564{
4565 struct sched_attr attr = {
4566 .size = sizeof(struct sched_attr),
4567 };
4568 struct task_struct *p;
4569 int retval;
4570
4571 if (!uattr || pid < 0 || size > PAGE_SIZE ||
6d35ab48 4572 size < SCHED_ATTR_SIZE_VER0 || flags)
d50dde5a
DF
4573 return -EINVAL;
4574
4575 rcu_read_lock();
4576 p = find_process_by_pid(pid);
4577 retval = -ESRCH;
4578 if (!p)
4579 goto out_unlock;
4580
4581 retval = security_task_getscheduler(p);
4582 if (retval)
4583 goto out_unlock;
4584
4585 attr.sched_policy = p->policy;
7479f3c9
PZ
4586 if (p->sched_reset_on_fork)
4587 attr.sched_flags |= SCHED_FLAG_RESET_ON_FORK;
aab03e05
DF
4588 if (task_has_dl_policy(p))
4589 __getparam_dl(p, &attr);
4590 else if (task_has_rt_policy(p))
d50dde5a
DF
4591 attr.sched_priority = p->rt_priority;
4592 else
d0ea0268 4593 attr.sched_nice = task_nice(p);
d50dde5a
DF
4594
4595 rcu_read_unlock();
4596
4597 retval = sched_read_attr(uattr, &attr, size);
4598 return retval;
4599
4600out_unlock:
4601 rcu_read_unlock();
4602 return retval;
4603}
4604
96f874e2 4605long sched_setaffinity(pid_t pid, const struct cpumask *in_mask)
1da177e4 4606{
5a16f3d3 4607 cpumask_var_t cpus_allowed, new_mask;
36c8b586
IM
4608 struct task_struct *p;
4609 int retval;
1da177e4 4610
23f5d142 4611 rcu_read_lock();
1da177e4
LT
4612
4613 p = find_process_by_pid(pid);
4614 if (!p) {
23f5d142 4615 rcu_read_unlock();
1da177e4
LT
4616 return -ESRCH;
4617 }
4618
23f5d142 4619 /* Prevent p going away */
1da177e4 4620 get_task_struct(p);
23f5d142 4621 rcu_read_unlock();
1da177e4 4622
14a40ffc
TH
4623 if (p->flags & PF_NO_SETAFFINITY) {
4624 retval = -EINVAL;
4625 goto out_put_task;
4626 }
5a16f3d3
RR
4627 if (!alloc_cpumask_var(&cpus_allowed, GFP_KERNEL)) {
4628 retval = -ENOMEM;
4629 goto out_put_task;
4630 }
4631 if (!alloc_cpumask_var(&new_mask, GFP_KERNEL)) {
4632 retval = -ENOMEM;
4633 goto out_free_cpus_allowed;
4634 }
1da177e4 4635 retval = -EPERM;
4c44aaaf
EB
4636 if (!check_same_owner(p)) {
4637 rcu_read_lock();
4638 if (!ns_capable(__task_cred(p)->user_ns, CAP_SYS_NICE)) {
4639 rcu_read_unlock();
16303ab2 4640 goto out_free_new_mask;
4c44aaaf
EB
4641 }
4642 rcu_read_unlock();
4643 }
1da177e4 4644
b0ae1981 4645 retval = security_task_setscheduler(p);
e7834f8f 4646 if (retval)
16303ab2 4647 goto out_free_new_mask;
e7834f8f 4648
e4099a5e
PZ
4649
4650 cpuset_cpus_allowed(p, cpus_allowed);
4651 cpumask_and(new_mask, in_mask, cpus_allowed);
4652
332ac17e
DF
4653 /*
4654 * Since bandwidth control happens on root_domain basis,
4655 * if admission test is enabled, we only admit -deadline
4656 * tasks allowed to run on all the CPUs in the task's
4657 * root_domain.
4658 */
4659#ifdef CONFIG_SMP
f1e3a093
KT
4660 if (task_has_dl_policy(p) && dl_bandwidth_enabled()) {
4661 rcu_read_lock();
4662 if (!cpumask_subset(task_rq(p)->rd->span, new_mask)) {
332ac17e 4663 retval = -EBUSY;
f1e3a093 4664 rcu_read_unlock();
16303ab2 4665 goto out_free_new_mask;
332ac17e 4666 }
f1e3a093 4667 rcu_read_unlock();
332ac17e
DF
4668 }
4669#endif
49246274 4670again:
25834c73 4671 retval = __set_cpus_allowed_ptr(p, new_mask, true);
1da177e4 4672
8707d8b8 4673 if (!retval) {
5a16f3d3
RR
4674 cpuset_cpus_allowed(p, cpus_allowed);
4675 if (!cpumask_subset(new_mask, cpus_allowed)) {
8707d8b8
PM
4676 /*
4677 * We must have raced with a concurrent cpuset
4678 * update. Just reset the cpus_allowed to the
4679 * cpuset's cpus_allowed
4680 */
5a16f3d3 4681 cpumask_copy(new_mask, cpus_allowed);
8707d8b8
PM
4682 goto again;
4683 }
4684 }
16303ab2 4685out_free_new_mask:
5a16f3d3
RR
4686 free_cpumask_var(new_mask);
4687out_free_cpus_allowed:
4688 free_cpumask_var(cpus_allowed);
4689out_put_task:
1da177e4 4690 put_task_struct(p);
1da177e4
LT
4691 return retval;
4692}
4693
4694static int get_user_cpu_mask(unsigned long __user *user_mask_ptr, unsigned len,
96f874e2 4695 struct cpumask *new_mask)
1da177e4 4696{
96f874e2
RR
4697 if (len < cpumask_size())
4698 cpumask_clear(new_mask);
4699 else if (len > cpumask_size())
4700 len = cpumask_size();
4701
1da177e4
LT
4702 return copy_from_user(new_mask, user_mask_ptr, len) ? -EFAULT : 0;
4703}
4704
4705/**
4706 * sys_sched_setaffinity - set the cpu affinity of a process
4707 * @pid: pid of the process
4708 * @len: length in bytes of the bitmask pointed to by user_mask_ptr
4709 * @user_mask_ptr: user-space pointer to the new cpu mask
e69f6186
YB
4710 *
4711 * Return: 0 on success. An error code otherwise.
1da177e4 4712 */
5add95d4
HC
4713SYSCALL_DEFINE3(sched_setaffinity, pid_t, pid, unsigned int, len,
4714 unsigned long __user *, user_mask_ptr)
1da177e4 4715{
5a16f3d3 4716 cpumask_var_t new_mask;
1da177e4
LT
4717 int retval;
4718
5a16f3d3
RR
4719 if (!alloc_cpumask_var(&new_mask, GFP_KERNEL))
4720 return -ENOMEM;
1da177e4 4721
5a16f3d3
RR
4722 retval = get_user_cpu_mask(user_mask_ptr, len, new_mask);
4723 if (retval == 0)
4724 retval = sched_setaffinity(pid, new_mask);
4725 free_cpumask_var(new_mask);
4726 return retval;
1da177e4
LT
4727}
4728
96f874e2 4729long sched_getaffinity(pid_t pid, struct cpumask *mask)
1da177e4 4730{
36c8b586 4731 struct task_struct *p;
31605683 4732 unsigned long flags;
1da177e4 4733 int retval;
1da177e4 4734
23f5d142 4735 rcu_read_lock();
1da177e4
LT
4736
4737 retval = -ESRCH;
4738 p = find_process_by_pid(pid);
4739 if (!p)
4740 goto out_unlock;
4741
e7834f8f
DQ
4742 retval = security_task_getscheduler(p);
4743 if (retval)
4744 goto out_unlock;
4745
013fdb80 4746 raw_spin_lock_irqsave(&p->pi_lock, flags);
6acce3ef 4747 cpumask_and(mask, &p->cpus_allowed, cpu_active_mask);
013fdb80 4748 raw_spin_unlock_irqrestore(&p->pi_lock, flags);
1da177e4
LT
4749
4750out_unlock:
23f5d142 4751 rcu_read_unlock();
1da177e4 4752
9531b62f 4753 return retval;
1da177e4
LT
4754}
4755
4756/**
4757 * sys_sched_getaffinity - get the cpu affinity of a process
4758 * @pid: pid of the process
4759 * @len: length in bytes of the bitmask pointed to by user_mask_ptr
4760 * @user_mask_ptr: user-space pointer to hold the current cpu mask
e69f6186 4761 *
599b4840
ZW
4762 * Return: size of CPU mask copied to user_mask_ptr on success. An
4763 * error code otherwise.
1da177e4 4764 */
5add95d4
HC
4765SYSCALL_DEFINE3(sched_getaffinity, pid_t, pid, unsigned int, len,
4766 unsigned long __user *, user_mask_ptr)
1da177e4
LT
4767{
4768 int ret;
f17c8607 4769 cpumask_var_t mask;
1da177e4 4770
84fba5ec 4771 if ((len * BITS_PER_BYTE) < nr_cpu_ids)
cd3d8031
KM
4772 return -EINVAL;
4773 if (len & (sizeof(unsigned long)-1))
1da177e4
LT
4774 return -EINVAL;
4775
f17c8607
RR
4776 if (!alloc_cpumask_var(&mask, GFP_KERNEL))
4777 return -ENOMEM;
1da177e4 4778
f17c8607
RR
4779 ret = sched_getaffinity(pid, mask);
4780 if (ret == 0) {
8bc037fb 4781 size_t retlen = min_t(size_t, len, cpumask_size());
cd3d8031
KM
4782
4783 if (copy_to_user(user_mask_ptr, mask, retlen))
f17c8607
RR
4784 ret = -EFAULT;
4785 else
cd3d8031 4786 ret = retlen;
f17c8607
RR
4787 }
4788 free_cpumask_var(mask);
1da177e4 4789
f17c8607 4790 return ret;
1da177e4
LT
4791}
4792
4793/**
4794 * sys_sched_yield - yield the current processor to other threads.
4795 *
dd41f596
IM
4796 * This function yields the current CPU to other tasks. If there are no
4797 * other threads running on this CPU then this function will return.
e69f6186
YB
4798 *
4799 * Return: 0.
1da177e4 4800 */
5add95d4 4801SYSCALL_DEFINE0(sched_yield)
1da177e4 4802{
70b97a7f 4803 struct rq *rq = this_rq_lock();
1da177e4 4804
2d72376b 4805 schedstat_inc(rq, yld_count);
4530d7ab 4806 current->sched_class->yield_task(rq);
1da177e4
LT
4807
4808 /*
4809 * Since we are going to call schedule() anyway, there's
4810 * no need to preempt or enable interrupts:
4811 */
4812 __release(rq->lock);
8a25d5de 4813 spin_release(&rq->lock.dep_map, 1, _THIS_IP_);
9828ea9d 4814 do_raw_spin_unlock(&rq->lock);
ba74c144 4815 sched_preempt_enable_no_resched();
1da177e4
LT
4816
4817 schedule();
4818
4819 return 0;
4820}
4821
02b67cc3 4822int __sched _cond_resched(void)
1da177e4 4823{
fe32d3cd 4824 if (should_resched(0)) {
a18b5d01 4825 preempt_schedule_common();
1da177e4
LT
4826 return 1;
4827 }
4828 return 0;
4829}
02b67cc3 4830EXPORT_SYMBOL(_cond_resched);
1da177e4
LT
4831
4832/*
613afbf8 4833 * __cond_resched_lock() - if a reschedule is pending, drop the given lock,
1da177e4
LT
4834 * call schedule, and on return reacquire the lock.
4835 *
41a2d6cf 4836 * This works OK both with and without CONFIG_PREEMPT. We do strange low-level
1da177e4
LT
4837 * operations here to prevent schedule() from being called twice (once via
4838 * spin_unlock(), once by hand).
4839 */
613afbf8 4840int __cond_resched_lock(spinlock_t *lock)
1da177e4 4841{
fe32d3cd 4842 int resched = should_resched(PREEMPT_LOCK_OFFSET);
6df3cecb
JK
4843 int ret = 0;
4844
f607c668
PZ
4845 lockdep_assert_held(lock);
4846
4a81e832 4847 if (spin_needbreak(lock) || resched) {
1da177e4 4848 spin_unlock(lock);
d86ee480 4849 if (resched)
a18b5d01 4850 preempt_schedule_common();
95c354fe
NP
4851 else
4852 cpu_relax();
6df3cecb 4853 ret = 1;
1da177e4 4854 spin_lock(lock);
1da177e4 4855 }
6df3cecb 4856 return ret;
1da177e4 4857}
613afbf8 4858EXPORT_SYMBOL(__cond_resched_lock);
1da177e4 4859
613afbf8 4860int __sched __cond_resched_softirq(void)
1da177e4
LT
4861{
4862 BUG_ON(!in_softirq());
4863
fe32d3cd 4864 if (should_resched(SOFTIRQ_DISABLE_OFFSET)) {
98d82567 4865 local_bh_enable();
a18b5d01 4866 preempt_schedule_common();
1da177e4
LT
4867 local_bh_disable();
4868 return 1;
4869 }
4870 return 0;
4871}
613afbf8 4872EXPORT_SYMBOL(__cond_resched_softirq);
1da177e4 4873
1da177e4
LT
4874/**
4875 * yield - yield the current processor to other threads.
4876 *
8e3fabfd
PZ
4877 * Do not ever use this function, there's a 99% chance you're doing it wrong.
4878 *
4879 * The scheduler is at all times free to pick the calling task as the most
4880 * eligible task to run, if removing the yield() call from your code breaks
4881 * it, its already broken.
4882 *
4883 * Typical broken usage is:
4884 *
4885 * while (!event)
4886 * yield();
4887 *
4888 * where one assumes that yield() will let 'the other' process run that will
4889 * make event true. If the current task is a SCHED_FIFO task that will never
4890 * happen. Never use yield() as a progress guarantee!!
4891 *
4892 * If you want to use yield() to wait for something, use wait_event().
4893 * If you want to use yield() to be 'nice' for others, use cond_resched().
4894 * If you still want to use yield(), do not!
1da177e4
LT
4895 */
4896void __sched yield(void)
4897{
4898 set_current_state(TASK_RUNNING);
4899 sys_sched_yield();
4900}
1da177e4
LT
4901EXPORT_SYMBOL(yield);
4902
d95f4122
MG
4903/**
4904 * yield_to - yield the current processor to another thread in
4905 * your thread group, or accelerate that thread toward the
4906 * processor it's on.
16addf95
RD
4907 * @p: target task
4908 * @preempt: whether task preemption is allowed or not
d95f4122
MG
4909 *
4910 * It's the caller's job to ensure that the target task struct
4911 * can't go away on us before we can do any checks.
4912 *
e69f6186 4913 * Return:
7b270f60
PZ
4914 * true (>0) if we indeed boosted the target task.
4915 * false (0) if we failed to boost the target.
4916 * -ESRCH if there's no task to yield to.
d95f4122 4917 */
fa93384f 4918int __sched yield_to(struct task_struct *p, bool preempt)
d95f4122
MG
4919{
4920 struct task_struct *curr = current;
4921 struct rq *rq, *p_rq;
4922 unsigned long flags;
c3c18640 4923 int yielded = 0;
d95f4122
MG
4924
4925 local_irq_save(flags);
4926 rq = this_rq();
4927
4928again:
4929 p_rq = task_rq(p);
7b270f60
PZ
4930 /*
4931 * If we're the only runnable task on the rq and target rq also
4932 * has only one task, there's absolutely no point in yielding.
4933 */
4934 if (rq->nr_running == 1 && p_rq->nr_running == 1) {
4935 yielded = -ESRCH;
4936 goto out_irq;
4937 }
4938
d95f4122 4939 double_rq_lock(rq, p_rq);
39e24d8f 4940 if (task_rq(p) != p_rq) {
d95f4122
MG
4941 double_rq_unlock(rq, p_rq);
4942 goto again;
4943 }
4944
4945 if (!curr->sched_class->yield_to_task)
7b270f60 4946 goto out_unlock;
d95f4122
MG
4947
4948 if (curr->sched_class != p->sched_class)
7b270f60 4949 goto out_unlock;
d95f4122
MG
4950
4951 if (task_running(p_rq, p) || p->state)
7b270f60 4952 goto out_unlock;
d95f4122
MG
4953
4954 yielded = curr->sched_class->yield_to_task(rq, p, preempt);
6d1cafd8 4955 if (yielded) {
d95f4122 4956 schedstat_inc(rq, yld_count);
6d1cafd8
VP
4957 /*
4958 * Make p's CPU reschedule; pick_next_entity takes care of
4959 * fairness.
4960 */
4961 if (preempt && rq != p_rq)
8875125e 4962 resched_curr(p_rq);
6d1cafd8 4963 }
d95f4122 4964
7b270f60 4965out_unlock:
d95f4122 4966 double_rq_unlock(rq, p_rq);
7b270f60 4967out_irq:
d95f4122
MG
4968 local_irq_restore(flags);
4969
7b270f60 4970 if (yielded > 0)
d95f4122
MG
4971 schedule();
4972
4973 return yielded;
4974}
4975EXPORT_SYMBOL_GPL(yield_to);
4976
1da177e4 4977/*
41a2d6cf 4978 * This task is about to go to sleep on IO. Increment rq->nr_iowait so
1da177e4 4979 * that process accounting knows that this is a task in IO wait state.
1da177e4 4980 */
1da177e4
LT
4981long __sched io_schedule_timeout(long timeout)
4982{
9cff8ade
N
4983 int old_iowait = current->in_iowait;
4984 struct rq *rq;
1da177e4
LT
4985 long ret;
4986
9cff8ade 4987 current->in_iowait = 1;
10d784ea 4988 blk_schedule_flush_plug(current);
9cff8ade 4989
0ff92245 4990 delayacct_blkio_start();
9cff8ade 4991 rq = raw_rq();
1da177e4
LT
4992 atomic_inc(&rq->nr_iowait);
4993 ret = schedule_timeout(timeout);
9cff8ade 4994 current->in_iowait = old_iowait;
1da177e4 4995 atomic_dec(&rq->nr_iowait);
0ff92245 4996 delayacct_blkio_end();
9cff8ade 4997
1da177e4
LT
4998 return ret;
4999}
9cff8ade 5000EXPORT_SYMBOL(io_schedule_timeout);
1da177e4
LT
5001
5002/**
5003 * sys_sched_get_priority_max - return maximum RT priority.
5004 * @policy: scheduling class.
5005 *
e69f6186
YB
5006 * Return: On success, this syscall returns the maximum
5007 * rt_priority that can be used by a given scheduling class.
5008 * On failure, a negative error code is returned.
1da177e4 5009 */
5add95d4 5010SYSCALL_DEFINE1(sched_get_priority_max, int, policy)
1da177e4
LT
5011{
5012 int ret = -EINVAL;
5013
5014 switch (policy) {
5015 case SCHED_FIFO:
5016 case SCHED_RR:
5017 ret = MAX_USER_RT_PRIO-1;
5018 break;
aab03e05 5019 case SCHED_DEADLINE:
1da177e4 5020 case SCHED_NORMAL:
b0a9499c 5021 case SCHED_BATCH:
dd41f596 5022 case SCHED_IDLE:
1da177e4
LT
5023 ret = 0;
5024 break;
5025 }
5026 return ret;
5027}
5028
5029/**
5030 * sys_sched_get_priority_min - return minimum RT priority.
5031 * @policy: scheduling class.
5032 *
e69f6186
YB
5033 * Return: On success, this syscall returns the minimum
5034 * rt_priority that can be used by a given scheduling class.
5035 * On failure, a negative error code is returned.
1da177e4 5036 */
5add95d4 5037SYSCALL_DEFINE1(sched_get_priority_min, int, policy)
1da177e4
LT
5038{
5039 int ret = -EINVAL;
5040
5041 switch (policy) {
5042 case SCHED_FIFO:
5043 case SCHED_RR:
5044 ret = 1;
5045 break;
aab03e05 5046 case SCHED_DEADLINE:
1da177e4 5047 case SCHED_NORMAL:
b0a9499c 5048 case SCHED_BATCH:
dd41f596 5049 case SCHED_IDLE:
1da177e4
LT
5050 ret = 0;
5051 }
5052 return ret;
5053}
5054
5055/**
5056 * sys_sched_rr_get_interval - return the default timeslice of a process.
5057 * @pid: pid of the process.
5058 * @interval: userspace pointer to the timeslice value.
5059 *
5060 * this syscall writes the default timeslice value of a given process
5061 * into the user-space timespec buffer. A value of '0' means infinity.
e69f6186
YB
5062 *
5063 * Return: On success, 0 and the timeslice is in @interval. Otherwise,
5064 * an error code.
1da177e4 5065 */
17da2bd9 5066SYSCALL_DEFINE2(sched_rr_get_interval, pid_t, pid,
754fe8d2 5067 struct timespec __user *, interval)
1da177e4 5068{
36c8b586 5069 struct task_struct *p;
a4ec24b4 5070 unsigned int time_slice;
eb580751
PZ
5071 struct rq_flags rf;
5072 struct timespec t;
dba091b9 5073 struct rq *rq;
3a5c359a 5074 int retval;
1da177e4
LT
5075
5076 if (pid < 0)
3a5c359a 5077 return -EINVAL;
1da177e4
LT
5078
5079 retval = -ESRCH;
1a551ae7 5080 rcu_read_lock();
1da177e4
LT
5081 p = find_process_by_pid(pid);
5082 if (!p)
5083 goto out_unlock;
5084
5085 retval = security_task_getscheduler(p);
5086 if (retval)
5087 goto out_unlock;
5088
eb580751 5089 rq = task_rq_lock(p, &rf);
a57beec5
PZ
5090 time_slice = 0;
5091 if (p->sched_class->get_rr_interval)
5092 time_slice = p->sched_class->get_rr_interval(rq, p);
eb580751 5093 task_rq_unlock(rq, p, &rf);
a4ec24b4 5094
1a551ae7 5095 rcu_read_unlock();
a4ec24b4 5096 jiffies_to_timespec(time_slice, &t);
1da177e4 5097 retval = copy_to_user(interval, &t, sizeof(t)) ? -EFAULT : 0;
1da177e4 5098 return retval;
3a5c359a 5099
1da177e4 5100out_unlock:
1a551ae7 5101 rcu_read_unlock();
1da177e4
LT
5102 return retval;
5103}
5104
7c731e0a 5105static const char stat_nam[] = TASK_STATE_TO_CHAR_STR;
36c8b586 5106
82a1fcb9 5107void sched_show_task(struct task_struct *p)
1da177e4 5108{
1da177e4 5109 unsigned long free = 0;
4e79752c 5110 int ppid;
1f8a7633 5111 unsigned long state = p->state;
1da177e4 5112
1f8a7633
TH
5113 if (state)
5114 state = __ffs(state) + 1;
28d0686c 5115 printk(KERN_INFO "%-15.15s %c", p->comm,
2ed6e34f 5116 state < sizeof(stat_nam) - 1 ? stat_nam[state] : '?');
4bd77321 5117#if BITS_PER_LONG == 32
1da177e4 5118 if (state == TASK_RUNNING)
3df0fc5b 5119 printk(KERN_CONT " running ");
1da177e4 5120 else
3df0fc5b 5121 printk(KERN_CONT " %08lx ", thread_saved_pc(p));
1da177e4
LT
5122#else
5123 if (state == TASK_RUNNING)
3df0fc5b 5124 printk(KERN_CONT " running task ");
1da177e4 5125 else
3df0fc5b 5126 printk(KERN_CONT " %016lx ", thread_saved_pc(p));
1da177e4
LT
5127#endif
5128#ifdef CONFIG_DEBUG_STACK_USAGE
7c9f8861 5129 free = stack_not_used(p);
1da177e4 5130#endif
a90e984c 5131 ppid = 0;
4e79752c 5132 rcu_read_lock();
a90e984c
ON
5133 if (pid_alive(p))
5134 ppid = task_pid_nr(rcu_dereference(p->real_parent));
4e79752c 5135 rcu_read_unlock();
3df0fc5b 5136 printk(KERN_CONT "%5lu %5d %6d 0x%08lx\n", free,
4e79752c 5137 task_pid_nr(p), ppid,
aa47b7e0 5138 (unsigned long)task_thread_info(p)->flags);
1da177e4 5139
3d1cb205 5140 print_worker_info(KERN_INFO, p);
5fb5e6de 5141 show_stack(p, NULL);
1da177e4
LT
5142}
5143
e59e2ae2 5144void show_state_filter(unsigned long state_filter)
1da177e4 5145{
36c8b586 5146 struct task_struct *g, *p;
1da177e4 5147
4bd77321 5148#if BITS_PER_LONG == 32
3df0fc5b
PZ
5149 printk(KERN_INFO
5150 " task PC stack pid father\n");
1da177e4 5151#else
3df0fc5b
PZ
5152 printk(KERN_INFO
5153 " task PC stack pid father\n");
1da177e4 5154#endif
510f5acc 5155 rcu_read_lock();
5d07f420 5156 for_each_process_thread(g, p) {
1da177e4
LT
5157 /*
5158 * reset the NMI-timeout, listing all files on a slow
25985edc 5159 * console might take a lot of time:
57675cb9
AR
5160 * Also, reset softlockup watchdogs on all CPUs, because
5161 * another CPU might be blocked waiting for us to process
5162 * an IPI.
1da177e4
LT
5163 */
5164 touch_nmi_watchdog();
57675cb9 5165 touch_all_softlockup_watchdogs();
39bc89fd 5166 if (!state_filter || (p->state & state_filter))
82a1fcb9 5167 sched_show_task(p);
5d07f420 5168 }
1da177e4 5169
dd41f596 5170#ifdef CONFIG_SCHED_DEBUG
fb90a6e9
RV
5171 if (!state_filter)
5172 sysrq_sched_debug_show();
dd41f596 5173#endif
510f5acc 5174 rcu_read_unlock();
e59e2ae2
IM
5175 /*
5176 * Only show locks if all tasks are dumped:
5177 */
93335a21 5178 if (!state_filter)
e59e2ae2 5179 debug_show_all_locks();
1da177e4
LT
5180}
5181
0db0628d 5182void init_idle_bootup_task(struct task_struct *idle)
1df21055 5183{
dd41f596 5184 idle->sched_class = &idle_sched_class;
1df21055
IM
5185}
5186
f340c0d1
IM
5187/**
5188 * init_idle - set up an idle thread for a given CPU
5189 * @idle: task in question
5190 * @cpu: cpu the idle task belongs to
5191 *
5192 * NOTE: this function does not set the idle thread's NEED_RESCHED
5193 * flag, to make booting more robust.
5194 */
0db0628d 5195void init_idle(struct task_struct *idle, int cpu)
1da177e4 5196{
70b97a7f 5197 struct rq *rq = cpu_rq(cpu);
1da177e4
LT
5198 unsigned long flags;
5199
25834c73
PZ
5200 raw_spin_lock_irqsave(&idle->pi_lock, flags);
5201 raw_spin_lock(&rq->lock);
5cbd54ef 5202
5e1576ed 5203 __sched_fork(0, idle);
06b83b5f 5204 idle->state = TASK_RUNNING;
dd41f596
IM
5205 idle->se.exec_start = sched_clock();
5206
e1b77c92
MR
5207 kasan_unpoison_task_stack(idle);
5208
de9b8f5d
PZ
5209#ifdef CONFIG_SMP
5210 /*
5211 * Its possible that init_idle() gets called multiple times on a task,
5212 * in that case do_set_cpus_allowed() will not do the right thing.
5213 *
5214 * And since this is boot we can forgo the serialization.
5215 */
5216 set_cpus_allowed_common(idle, cpumask_of(cpu));
5217#endif
6506cf6c
PZ
5218 /*
5219 * We're having a chicken and egg problem, even though we are
5220 * holding rq->lock, the cpu isn't yet set to this cpu so the
5221 * lockdep check in task_group() will fail.
5222 *
5223 * Similar case to sched_fork(). / Alternatively we could
5224 * use task_rq_lock() here and obtain the other rq->lock.
5225 *
5226 * Silence PROVE_RCU
5227 */
5228 rcu_read_lock();
dd41f596 5229 __set_task_cpu(idle, cpu);
6506cf6c 5230 rcu_read_unlock();
1da177e4 5231
1da177e4 5232 rq->curr = rq->idle = idle;
da0c1e65 5233 idle->on_rq = TASK_ON_RQ_QUEUED;
de9b8f5d 5234#ifdef CONFIG_SMP
3ca7a440 5235 idle->on_cpu = 1;
4866cde0 5236#endif
25834c73
PZ
5237 raw_spin_unlock(&rq->lock);
5238 raw_spin_unlock_irqrestore(&idle->pi_lock, flags);
1da177e4
LT
5239
5240 /* Set the preempt count _outside_ the spinlocks! */
01028747 5241 init_idle_preempt_count(idle, cpu);
55cd5340 5242
dd41f596
IM
5243 /*
5244 * The idle tasks have their own, simple scheduling class:
5245 */
5246 idle->sched_class = &idle_sched_class;
868baf07 5247 ftrace_graph_init_idle_task(idle, cpu);
45eacc69 5248 vtime_init_idle(idle, cpu);
de9b8f5d 5249#ifdef CONFIG_SMP
f1c6f1a7
CE
5250 sprintf(idle->comm, "%s/%d", INIT_TASK_COMM, cpu);
5251#endif
19978ca6
IM
5252}
5253
f82f8042
JL
5254int cpuset_cpumask_can_shrink(const struct cpumask *cur,
5255 const struct cpumask *trial)
5256{
5257 int ret = 1, trial_cpus;
5258 struct dl_bw *cur_dl_b;
5259 unsigned long flags;
5260
bb2bc55a
MG
5261 if (!cpumask_weight(cur))
5262 return ret;
5263
75e23e49 5264 rcu_read_lock_sched();
f82f8042
JL
5265 cur_dl_b = dl_bw_of(cpumask_any(cur));
5266 trial_cpus = cpumask_weight(trial);
5267
5268 raw_spin_lock_irqsave(&cur_dl_b->lock, flags);
5269 if (cur_dl_b->bw != -1 &&
5270 cur_dl_b->bw * trial_cpus < cur_dl_b->total_bw)
5271 ret = 0;
5272 raw_spin_unlock_irqrestore(&cur_dl_b->lock, flags);
75e23e49 5273 rcu_read_unlock_sched();
f82f8042
JL
5274
5275 return ret;
5276}
5277
7f51412a
JL
5278int task_can_attach(struct task_struct *p,
5279 const struct cpumask *cs_cpus_allowed)
5280{
5281 int ret = 0;
5282
5283 /*
5284 * Kthreads which disallow setaffinity shouldn't be moved
5285 * to a new cpuset; we don't want to change their cpu
5286 * affinity and isolating such threads by their set of
5287 * allowed nodes is unnecessary. Thus, cpusets are not
5288 * applicable for such threads. This prevents checking for
5289 * success of set_cpus_allowed_ptr() on all attached tasks
5290 * before cpus_allowed may be changed.
5291 */
5292 if (p->flags & PF_NO_SETAFFINITY) {
5293 ret = -EINVAL;
5294 goto out;
5295 }
5296
5297#ifdef CONFIG_SMP
5298 if (dl_task(p) && !cpumask_intersects(task_rq(p)->rd->span,
5299 cs_cpus_allowed)) {
5300 unsigned int dest_cpu = cpumask_any_and(cpu_active_mask,
5301 cs_cpus_allowed);
75e23e49 5302 struct dl_bw *dl_b;
7f51412a
JL
5303 bool overflow;
5304 int cpus;
5305 unsigned long flags;
5306
75e23e49
JL
5307 rcu_read_lock_sched();
5308 dl_b = dl_bw_of(dest_cpu);
7f51412a
JL
5309 raw_spin_lock_irqsave(&dl_b->lock, flags);
5310 cpus = dl_bw_cpus(dest_cpu);
5311 overflow = __dl_overflow(dl_b, cpus, 0, p->dl.dl_bw);
5312 if (overflow)
5313 ret = -EBUSY;
5314 else {
5315 /*
5316 * We reserve space for this task in the destination
5317 * root_domain, as we can't fail after this point.
5318 * We will free resources in the source root_domain
5319 * later on (see set_cpus_allowed_dl()).
5320 */
5321 __dl_add(dl_b, p->dl.dl_bw);
5322 }
5323 raw_spin_unlock_irqrestore(&dl_b->lock, flags);
75e23e49 5324 rcu_read_unlock_sched();
7f51412a
JL
5325
5326 }
5327#endif
5328out:
5329 return ret;
5330}
5331
1da177e4 5332#ifdef CONFIG_SMP
1da177e4 5333
e26fbffd
TG
5334static bool sched_smp_initialized __read_mostly;
5335
e6628d5b
MG
5336#ifdef CONFIG_NUMA_BALANCING
5337/* Migrate current task p to target_cpu */
5338int migrate_task_to(struct task_struct *p, int target_cpu)
5339{
5340 struct migration_arg arg = { p, target_cpu };
5341 int curr_cpu = task_cpu(p);
5342
5343 if (curr_cpu == target_cpu)
5344 return 0;
5345
5346 if (!cpumask_test_cpu(target_cpu, tsk_cpus_allowed(p)))
5347 return -EINVAL;
5348
5349 /* TODO: This is not properly updating schedstats */
5350
286549dc 5351 trace_sched_move_numa(p, curr_cpu, target_cpu);
e6628d5b
MG
5352 return stop_one_cpu(curr_cpu, migration_cpu_stop, &arg);
5353}
0ec8aa00
PZ
5354
5355/*
5356 * Requeue a task on a given node and accurately track the number of NUMA
5357 * tasks on the runqueues
5358 */
5359void sched_setnuma(struct task_struct *p, int nid)
5360{
da0c1e65 5361 bool queued, running;
eb580751
PZ
5362 struct rq_flags rf;
5363 struct rq *rq;
0ec8aa00 5364
eb580751 5365 rq = task_rq_lock(p, &rf);
da0c1e65 5366 queued = task_on_rq_queued(p);
0ec8aa00
PZ
5367 running = task_current(rq, p);
5368
da0c1e65 5369 if (queued)
1de64443 5370 dequeue_task(rq, p, DEQUEUE_SAVE);
0ec8aa00 5371 if (running)
f3cd1c4e 5372 put_prev_task(rq, p);
0ec8aa00
PZ
5373
5374 p->numa_preferred_nid = nid;
0ec8aa00
PZ
5375
5376 if (running)
5377 p->sched_class->set_curr_task(rq);
da0c1e65 5378 if (queued)
1de64443 5379 enqueue_task(rq, p, ENQUEUE_RESTORE);
eb580751 5380 task_rq_unlock(rq, p, &rf);
0ec8aa00 5381}
5cc389bc 5382#endif /* CONFIG_NUMA_BALANCING */
f7b4cddc 5383
1da177e4 5384#ifdef CONFIG_HOTPLUG_CPU
054b9108 5385/*
48c5ccae
PZ
5386 * Ensures that the idle task is using init_mm right before its cpu goes
5387 * offline.
054b9108 5388 */
48c5ccae 5389void idle_task_exit(void)
1da177e4 5390{
48c5ccae 5391 struct mm_struct *mm = current->active_mm;
e76bd8d9 5392
48c5ccae 5393 BUG_ON(cpu_online(smp_processor_id()));
e76bd8d9 5394
a53efe5f 5395 if (mm != &init_mm) {
f98db601 5396 switch_mm_irqs_off(mm, &init_mm, current);
a53efe5f
MS
5397 finish_arch_post_lock_switch();
5398 }
48c5ccae 5399 mmdrop(mm);
1da177e4
LT
5400}
5401
5402/*
5d180232
PZ
5403 * Since this CPU is going 'away' for a while, fold any nr_active delta
5404 * we might have. Assumes we're called after migrate_tasks() so that the
5405 * nr_active count is stable.
5406 *
5407 * Also see the comment "Global load-average calculations".
1da177e4 5408 */
5d180232 5409static void calc_load_migrate(struct rq *rq)
1da177e4 5410{
5d180232
PZ
5411 long delta = calc_load_fold_active(rq);
5412 if (delta)
5413 atomic_long_add(delta, &calc_load_tasks);
1da177e4
LT
5414}
5415
3f1d2a31
PZ
5416static void put_prev_task_fake(struct rq *rq, struct task_struct *prev)
5417{
5418}
5419
5420static const struct sched_class fake_sched_class = {
5421 .put_prev_task = put_prev_task_fake,
5422};
5423
5424static struct task_struct fake_task = {
5425 /*
5426 * Avoid pull_{rt,dl}_task()
5427 */
5428 .prio = MAX_PRIO + 1,
5429 .sched_class = &fake_sched_class,
5430};
5431
48f24c4d 5432/*
48c5ccae
PZ
5433 * Migrate all tasks from the rq, sleeping tasks will be migrated by
5434 * try_to_wake_up()->select_task_rq().
5435 *
5436 * Called with rq->lock held even though we'er in stop_machine() and
5437 * there's no concurrency possible, we hold the required locks anyway
5438 * because of lock validation efforts.
1da177e4 5439 */
5e16bbc2 5440static void migrate_tasks(struct rq *dead_rq)
1da177e4 5441{
5e16bbc2 5442 struct rq *rq = dead_rq;
48c5ccae 5443 struct task_struct *next, *stop = rq->stop;
e7904a28 5444 struct pin_cookie cookie;
48c5ccae 5445 int dest_cpu;
1da177e4
LT
5446
5447 /*
48c5ccae
PZ
5448 * Fudge the rq selection such that the below task selection loop
5449 * doesn't get stuck on the currently eligible stop task.
5450 *
5451 * We're currently inside stop_machine() and the rq is either stuck
5452 * in the stop_machine_cpu_stop() loop, or we're executing this code,
5453 * either way we should never end up calling schedule() until we're
5454 * done here.
1da177e4 5455 */
48c5ccae 5456 rq->stop = NULL;
48f24c4d 5457
77bd3970
FW
5458 /*
5459 * put_prev_task() and pick_next_task() sched
5460 * class method both need to have an up-to-date
5461 * value of rq->clock[_task]
5462 */
5463 update_rq_clock(rq);
5464
5e16bbc2 5465 for (;;) {
48c5ccae
PZ
5466 /*
5467 * There's this thread running, bail when that's the only
5468 * remaining thread.
5469 */
5470 if (rq->nr_running == 1)
dd41f596 5471 break;
48c5ccae 5472
cbce1a68 5473 /*
5473e0cc 5474 * pick_next_task assumes pinned rq->lock.
cbce1a68 5475 */
e7904a28
PZ
5476 cookie = lockdep_pin_lock(&rq->lock);
5477 next = pick_next_task(rq, &fake_task, cookie);
48c5ccae 5478 BUG_ON(!next);
79c53799 5479 next->sched_class->put_prev_task(rq, next);
e692ab53 5480
5473e0cc
WL
5481 /*
5482 * Rules for changing task_struct::cpus_allowed are holding
5483 * both pi_lock and rq->lock, such that holding either
5484 * stabilizes the mask.
5485 *
5486 * Drop rq->lock is not quite as disastrous as it usually is
5487 * because !cpu_active at this point, which means load-balance
5488 * will not interfere. Also, stop-machine.
5489 */
e7904a28 5490 lockdep_unpin_lock(&rq->lock, cookie);
5473e0cc
WL
5491 raw_spin_unlock(&rq->lock);
5492 raw_spin_lock(&next->pi_lock);
5493 raw_spin_lock(&rq->lock);
5494
5495 /*
5496 * Since we're inside stop-machine, _nothing_ should have
5497 * changed the task, WARN if weird stuff happened, because in
5498 * that case the above rq->lock drop is a fail too.
5499 */
5500 if (WARN_ON(task_rq(next) != rq || !task_on_rq_queued(next))) {
5501 raw_spin_unlock(&next->pi_lock);
5502 continue;
5503 }
5504
48c5ccae 5505 /* Find suitable destination for @next, with force if needed. */
5e16bbc2 5506 dest_cpu = select_fallback_rq(dead_rq->cpu, next);
48c5ccae 5507
5e16bbc2
PZ
5508 rq = __migrate_task(rq, next, dest_cpu);
5509 if (rq != dead_rq) {
5510 raw_spin_unlock(&rq->lock);
5511 rq = dead_rq;
5512 raw_spin_lock(&rq->lock);
5513 }
5473e0cc 5514 raw_spin_unlock(&next->pi_lock);
1da177e4 5515 }
dce48a84 5516
48c5ccae 5517 rq->stop = stop;
dce48a84 5518}
1da177e4
LT
5519#endif /* CONFIG_HOTPLUG_CPU */
5520
1f11eb6a
GH
5521static void set_rq_online(struct rq *rq)
5522{
5523 if (!rq->online) {
5524 const struct sched_class *class;
5525
c6c4927b 5526 cpumask_set_cpu(rq->cpu, rq->rd->online);
1f11eb6a
GH
5527 rq->online = 1;
5528
5529 for_each_class(class) {
5530 if (class->rq_online)
5531 class->rq_online(rq);
5532 }
5533 }
5534}
5535
5536static void set_rq_offline(struct rq *rq)
5537{
5538 if (rq->online) {
5539 const struct sched_class *class;
5540
5541 for_each_class(class) {
5542 if (class->rq_offline)
5543 class->rq_offline(rq);
5544 }
5545
c6c4927b 5546 cpumask_clear_cpu(rq->cpu, rq->rd->online);
1f11eb6a
GH
5547 rq->online = 0;
5548 }
5549}
5550
9cf7243d 5551static void set_cpu_rq_start_time(unsigned int cpu)
1da177e4 5552{
969c7921 5553 struct rq *rq = cpu_rq(cpu);
1da177e4 5554
a803f026
CM
5555 rq->age_stamp = sched_clock_cpu(cpu);
5556}
5557
4cb98839
PZ
5558static cpumask_var_t sched_domains_tmpmask; /* sched_domains_mutex */
5559
3e9830dc 5560#ifdef CONFIG_SCHED_DEBUG
4dcf6aff 5561
d039ac60 5562static __read_mostly int sched_debug_enabled;
f6630114 5563
d039ac60 5564static int __init sched_debug_setup(char *str)
f6630114 5565{
d039ac60 5566 sched_debug_enabled = 1;
f6630114
MT
5567
5568 return 0;
5569}
d039ac60
PZ
5570early_param("sched_debug", sched_debug_setup);
5571
5572static inline bool sched_debug(void)
5573{
5574 return sched_debug_enabled;
5575}
f6630114 5576
7c16ec58 5577static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level,
96f874e2 5578 struct cpumask *groupmask)
1da177e4 5579{
4dcf6aff 5580 struct sched_group *group = sd->groups;
1da177e4 5581
96f874e2 5582 cpumask_clear(groupmask);
4dcf6aff
IM
5583
5584 printk(KERN_DEBUG "%*s domain %d: ", level, "", level);
5585
5586 if (!(sd->flags & SD_LOAD_BALANCE)) {
3df0fc5b 5587 printk("does not load-balance\n");
4dcf6aff 5588 if (sd->parent)
3df0fc5b
PZ
5589 printk(KERN_ERR "ERROR: !SD_LOAD_BALANCE domain"
5590 " has parent");
4dcf6aff 5591 return -1;
41c7ce9a
NP
5592 }
5593
333470ee
TH
5594 printk(KERN_CONT "span %*pbl level %s\n",
5595 cpumask_pr_args(sched_domain_span(sd)), sd->name);
4dcf6aff 5596
758b2cdc 5597 if (!cpumask_test_cpu(cpu, sched_domain_span(sd))) {
3df0fc5b
PZ
5598 printk(KERN_ERR "ERROR: domain->span does not contain "
5599 "CPU%d\n", cpu);
4dcf6aff 5600 }
758b2cdc 5601 if (!cpumask_test_cpu(cpu, sched_group_cpus(group))) {
3df0fc5b
PZ
5602 printk(KERN_ERR "ERROR: domain->groups does not contain"
5603 " CPU%d\n", cpu);
4dcf6aff 5604 }
1da177e4 5605
4dcf6aff 5606 printk(KERN_DEBUG "%*s groups:", level + 1, "");
1da177e4 5607 do {
4dcf6aff 5608 if (!group) {
3df0fc5b
PZ
5609 printk("\n");
5610 printk(KERN_ERR "ERROR: group is NULL\n");
1da177e4
LT
5611 break;
5612 }
5613
758b2cdc 5614 if (!cpumask_weight(sched_group_cpus(group))) {
3df0fc5b
PZ
5615 printk(KERN_CONT "\n");
5616 printk(KERN_ERR "ERROR: empty group\n");
4dcf6aff
IM
5617 break;
5618 }
1da177e4 5619
cb83b629
PZ
5620 if (!(sd->flags & SD_OVERLAP) &&
5621 cpumask_intersects(groupmask, sched_group_cpus(group))) {
3df0fc5b
PZ
5622 printk(KERN_CONT "\n");
5623 printk(KERN_ERR "ERROR: repeated CPUs\n");
4dcf6aff
IM
5624 break;
5625 }
1da177e4 5626
758b2cdc 5627 cpumask_or(groupmask, groupmask, sched_group_cpus(group));
1da177e4 5628
333470ee
TH
5629 printk(KERN_CONT " %*pbl",
5630 cpumask_pr_args(sched_group_cpus(group)));
ca8ce3d0 5631 if (group->sgc->capacity != SCHED_CAPACITY_SCALE) {
63b2ca30
NP
5632 printk(KERN_CONT " (cpu_capacity = %d)",
5633 group->sgc->capacity);
381512cf 5634 }
1da177e4 5635
4dcf6aff
IM
5636 group = group->next;
5637 } while (group != sd->groups);
3df0fc5b 5638 printk(KERN_CONT "\n");
1da177e4 5639
758b2cdc 5640 if (!cpumask_equal(sched_domain_span(sd), groupmask))
3df0fc5b 5641 printk(KERN_ERR "ERROR: groups don't span domain->span\n");
1da177e4 5642
758b2cdc
RR
5643 if (sd->parent &&
5644 !cpumask_subset(groupmask, sched_domain_span(sd->parent)))
3df0fc5b
PZ
5645 printk(KERN_ERR "ERROR: parent span is not a superset "
5646 "of domain->span\n");
4dcf6aff
IM
5647 return 0;
5648}
1da177e4 5649
4dcf6aff
IM
5650static void sched_domain_debug(struct sched_domain *sd, int cpu)
5651{
5652 int level = 0;
1da177e4 5653
d039ac60 5654 if (!sched_debug_enabled)
f6630114
MT
5655 return;
5656
4dcf6aff
IM
5657 if (!sd) {
5658 printk(KERN_DEBUG "CPU%d attaching NULL sched-domain.\n", cpu);
5659 return;
5660 }
1da177e4 5661
4dcf6aff
IM
5662 printk(KERN_DEBUG "CPU%d attaching sched-domain:\n", cpu);
5663
5664 for (;;) {
4cb98839 5665 if (sched_domain_debug_one(sd, cpu, level, sched_domains_tmpmask))
4dcf6aff 5666 break;
1da177e4
LT
5667 level++;
5668 sd = sd->parent;
33859f7f 5669 if (!sd)
4dcf6aff
IM
5670 break;
5671 }
1da177e4 5672}
6d6bc0ad 5673#else /* !CONFIG_SCHED_DEBUG */
48f24c4d 5674# define sched_domain_debug(sd, cpu) do { } while (0)
d039ac60
PZ
5675static inline bool sched_debug(void)
5676{
5677 return false;
5678}
6d6bc0ad 5679#endif /* CONFIG_SCHED_DEBUG */
1da177e4 5680
1a20ff27 5681static int sd_degenerate(struct sched_domain *sd)
245af2c7 5682{
758b2cdc 5683 if (cpumask_weight(sched_domain_span(sd)) == 1)
245af2c7
SS
5684 return 1;
5685
5686 /* Following flags need at least 2 groups */
5687 if (sd->flags & (SD_LOAD_BALANCE |
5688 SD_BALANCE_NEWIDLE |
5689 SD_BALANCE_FORK |
89c4710e 5690 SD_BALANCE_EXEC |
5d4dfddd 5691 SD_SHARE_CPUCAPACITY |
d77b3ed5
VG
5692 SD_SHARE_PKG_RESOURCES |
5693 SD_SHARE_POWERDOMAIN)) {
245af2c7
SS
5694 if (sd->groups != sd->groups->next)
5695 return 0;
5696 }
5697
5698 /* Following flags don't use groups */
c88d5910 5699 if (sd->flags & (SD_WAKE_AFFINE))
245af2c7
SS
5700 return 0;
5701
5702 return 1;
5703}
5704
48f24c4d
IM
5705static int
5706sd_parent_degenerate(struct sched_domain *sd, struct sched_domain *parent)
245af2c7
SS
5707{
5708 unsigned long cflags = sd->flags, pflags = parent->flags;
5709
5710 if (sd_degenerate(parent))
5711 return 1;
5712
758b2cdc 5713 if (!cpumask_equal(sched_domain_span(sd), sched_domain_span(parent)))
245af2c7
SS
5714 return 0;
5715
245af2c7
SS
5716 /* Flags needing groups don't count if only 1 group in parent */
5717 if (parent->groups == parent->groups->next) {
5718 pflags &= ~(SD_LOAD_BALANCE |
5719 SD_BALANCE_NEWIDLE |
5720 SD_BALANCE_FORK |
89c4710e 5721 SD_BALANCE_EXEC |
5d4dfddd 5722 SD_SHARE_CPUCAPACITY |
10866e62 5723 SD_SHARE_PKG_RESOURCES |
d77b3ed5
VG
5724 SD_PREFER_SIBLING |
5725 SD_SHARE_POWERDOMAIN);
5436499e
KC
5726 if (nr_node_ids == 1)
5727 pflags &= ~SD_SERIALIZE;
245af2c7
SS
5728 }
5729 if (~cflags & pflags)
5730 return 0;
5731
5732 return 1;
5733}
5734
dce840a0 5735static void free_rootdomain(struct rcu_head *rcu)
c6c4927b 5736{
dce840a0 5737 struct root_domain *rd = container_of(rcu, struct root_domain, rcu);
047106ad 5738
68e74568 5739 cpupri_cleanup(&rd->cpupri);
6bfd6d72 5740 cpudl_cleanup(&rd->cpudl);
1baca4ce 5741 free_cpumask_var(rd->dlo_mask);
c6c4927b
RR
5742 free_cpumask_var(rd->rto_mask);
5743 free_cpumask_var(rd->online);
5744 free_cpumask_var(rd->span);
5745 kfree(rd);
5746}
5747
57d885fe
GH
5748static void rq_attach_root(struct rq *rq, struct root_domain *rd)
5749{
a0490fa3 5750 struct root_domain *old_rd = NULL;
57d885fe 5751 unsigned long flags;
57d885fe 5752
05fa785c 5753 raw_spin_lock_irqsave(&rq->lock, flags);
57d885fe
GH
5754
5755 if (rq->rd) {
a0490fa3 5756 old_rd = rq->rd;
57d885fe 5757
c6c4927b 5758 if (cpumask_test_cpu(rq->cpu, old_rd->online))
1f11eb6a 5759 set_rq_offline(rq);
57d885fe 5760
c6c4927b 5761 cpumask_clear_cpu(rq->cpu, old_rd->span);
dc938520 5762
a0490fa3 5763 /*
0515973f 5764 * If we dont want to free the old_rd yet then
a0490fa3
IM
5765 * set old_rd to NULL to skip the freeing later
5766 * in this function:
5767 */
5768 if (!atomic_dec_and_test(&old_rd->refcount))
5769 old_rd = NULL;
57d885fe
GH
5770 }
5771
5772 atomic_inc(&rd->refcount);
5773 rq->rd = rd;
5774
c6c4927b 5775 cpumask_set_cpu(rq->cpu, rd->span);
00aec93d 5776 if (cpumask_test_cpu(rq->cpu, cpu_active_mask))
1f11eb6a 5777 set_rq_online(rq);
57d885fe 5778
05fa785c 5779 raw_spin_unlock_irqrestore(&rq->lock, flags);
a0490fa3
IM
5780
5781 if (old_rd)
dce840a0 5782 call_rcu_sched(&old_rd->rcu, free_rootdomain);
57d885fe
GH
5783}
5784
68c38fc3 5785static int init_rootdomain(struct root_domain *rd)
57d885fe
GH
5786{
5787 memset(rd, 0, sizeof(*rd));
5788
8295c699 5789 if (!zalloc_cpumask_var(&rd->span, GFP_KERNEL))
0c910d28 5790 goto out;
8295c699 5791 if (!zalloc_cpumask_var(&rd->online, GFP_KERNEL))
c6c4927b 5792 goto free_span;
8295c699 5793 if (!zalloc_cpumask_var(&rd->dlo_mask, GFP_KERNEL))
c6c4927b 5794 goto free_online;
8295c699 5795 if (!zalloc_cpumask_var(&rd->rto_mask, GFP_KERNEL))
1baca4ce 5796 goto free_dlo_mask;
6e0534f2 5797
332ac17e 5798 init_dl_bw(&rd->dl_bw);
6bfd6d72
JL
5799 if (cpudl_init(&rd->cpudl) != 0)
5800 goto free_dlo_mask;
332ac17e 5801
68c38fc3 5802 if (cpupri_init(&rd->cpupri) != 0)
68e74568 5803 goto free_rto_mask;
c6c4927b 5804 return 0;
6e0534f2 5805
68e74568
RR
5806free_rto_mask:
5807 free_cpumask_var(rd->rto_mask);
1baca4ce
JL
5808free_dlo_mask:
5809 free_cpumask_var(rd->dlo_mask);
c6c4927b
RR
5810free_online:
5811 free_cpumask_var(rd->online);
5812free_span:
5813 free_cpumask_var(rd->span);
0c910d28 5814out:
c6c4927b 5815 return -ENOMEM;
57d885fe
GH
5816}
5817
029632fb
PZ
5818/*
5819 * By default the system creates a single root-domain with all cpus as
5820 * members (mimicking the global state we have today).
5821 */
5822struct root_domain def_root_domain;
5823
57d885fe
GH
5824static void init_defrootdomain(void)
5825{
68c38fc3 5826 init_rootdomain(&def_root_domain);
c6c4927b 5827
57d885fe
GH
5828 atomic_set(&def_root_domain.refcount, 1);
5829}
5830
dc938520 5831static struct root_domain *alloc_rootdomain(void)
57d885fe
GH
5832{
5833 struct root_domain *rd;
5834
5835 rd = kmalloc(sizeof(*rd), GFP_KERNEL);
5836 if (!rd)
5837 return NULL;
5838
68c38fc3 5839 if (init_rootdomain(rd) != 0) {
c6c4927b
RR
5840 kfree(rd);
5841 return NULL;
5842 }
57d885fe
GH
5843
5844 return rd;
5845}
5846
63b2ca30 5847static void free_sched_groups(struct sched_group *sg, int free_sgc)
e3589f6c
PZ
5848{
5849 struct sched_group *tmp, *first;
5850
5851 if (!sg)
5852 return;
5853
5854 first = sg;
5855 do {
5856 tmp = sg->next;
5857
63b2ca30
NP
5858 if (free_sgc && atomic_dec_and_test(&sg->sgc->ref))
5859 kfree(sg->sgc);
e3589f6c
PZ
5860
5861 kfree(sg);
5862 sg = tmp;
5863 } while (sg != first);
5864}
5865
dce840a0
PZ
5866static void free_sched_domain(struct rcu_head *rcu)
5867{
5868 struct sched_domain *sd = container_of(rcu, struct sched_domain, rcu);
e3589f6c
PZ
5869
5870 /*
5871 * If its an overlapping domain it has private groups, iterate and
5872 * nuke them all.
5873 */
5874 if (sd->flags & SD_OVERLAP) {
5875 free_sched_groups(sd->groups, 1);
5876 } else if (atomic_dec_and_test(&sd->groups->ref)) {
63b2ca30 5877 kfree(sd->groups->sgc);
dce840a0 5878 kfree(sd->groups);
9c3f75cb 5879 }
dce840a0
PZ
5880 kfree(sd);
5881}
5882
5883static void destroy_sched_domain(struct sched_domain *sd, int cpu)
5884{
5885 call_rcu(&sd->rcu, free_sched_domain);
5886}
5887
5888static void destroy_sched_domains(struct sched_domain *sd, int cpu)
5889{
5890 for (; sd; sd = sd->parent)
5891 destroy_sched_domain(sd, cpu);
5892}
5893
518cd623
PZ
5894/*
5895 * Keep a special pointer to the highest sched_domain that has
5896 * SD_SHARE_PKG_RESOURCE set (Last Level Cache Domain) for this
5897 * allows us to avoid some pointer chasing select_idle_sibling().
5898 *
5899 * Also keep a unique ID per domain (we use the first cpu number in
5900 * the cpumask of the domain), this allows us to quickly tell if
39be3501 5901 * two cpus are in the same cache domain, see cpus_share_cache().
518cd623
PZ
5902 */
5903DEFINE_PER_CPU(struct sched_domain *, sd_llc);
7d9ffa89 5904DEFINE_PER_CPU(int, sd_llc_size);
518cd623 5905DEFINE_PER_CPU(int, sd_llc_id);
fb13c7ee 5906DEFINE_PER_CPU(struct sched_domain *, sd_numa);
37dc6b50
PM
5907DEFINE_PER_CPU(struct sched_domain *, sd_busy);
5908DEFINE_PER_CPU(struct sched_domain *, sd_asym);
518cd623
PZ
5909
5910static void update_top_cache_domain(int cpu)
5911{
5912 struct sched_domain *sd;
5d4cf996 5913 struct sched_domain *busy_sd = NULL;
518cd623 5914 int id = cpu;
7d9ffa89 5915 int size = 1;
518cd623
PZ
5916
5917 sd = highest_flag_domain(cpu, SD_SHARE_PKG_RESOURCES);
7d9ffa89 5918 if (sd) {
518cd623 5919 id = cpumask_first(sched_domain_span(sd));
7d9ffa89 5920 size = cpumask_weight(sched_domain_span(sd));
5d4cf996 5921 busy_sd = sd->parent; /* sd_busy */
7d9ffa89 5922 }
5d4cf996 5923 rcu_assign_pointer(per_cpu(sd_busy, cpu), busy_sd);
518cd623
PZ
5924
5925 rcu_assign_pointer(per_cpu(sd_llc, cpu), sd);
7d9ffa89 5926 per_cpu(sd_llc_size, cpu) = size;
518cd623 5927 per_cpu(sd_llc_id, cpu) = id;
fb13c7ee
MG
5928
5929 sd = lowest_flag_domain(cpu, SD_NUMA);
5930 rcu_assign_pointer(per_cpu(sd_numa, cpu), sd);
37dc6b50
PM
5931
5932 sd = highest_flag_domain(cpu, SD_ASYM_PACKING);
5933 rcu_assign_pointer(per_cpu(sd_asym, cpu), sd);
518cd623
PZ
5934}
5935
1da177e4 5936/*
0eab9146 5937 * Attach the domain 'sd' to 'cpu' as its base domain. Callers must
1da177e4
LT
5938 * hold the hotplug lock.
5939 */
0eab9146
IM
5940static void
5941cpu_attach_domain(struct sched_domain *sd, struct root_domain *rd, int cpu)
1da177e4 5942{
70b97a7f 5943 struct rq *rq = cpu_rq(cpu);
245af2c7
SS
5944 struct sched_domain *tmp;
5945
5946 /* Remove the sched domains which do not contribute to scheduling. */
f29c9b1c 5947 for (tmp = sd; tmp; ) {
245af2c7
SS
5948 struct sched_domain *parent = tmp->parent;
5949 if (!parent)
5950 break;
f29c9b1c 5951
1a848870 5952 if (sd_parent_degenerate(tmp, parent)) {
245af2c7 5953 tmp->parent = parent->parent;
1a848870
SS
5954 if (parent->parent)
5955 parent->parent->child = tmp;
10866e62
PZ
5956 /*
5957 * Transfer SD_PREFER_SIBLING down in case of a
5958 * degenerate parent; the spans match for this
5959 * so the property transfers.
5960 */
5961 if (parent->flags & SD_PREFER_SIBLING)
5962 tmp->flags |= SD_PREFER_SIBLING;
dce840a0 5963 destroy_sched_domain(parent, cpu);
f29c9b1c
LZ
5964 } else
5965 tmp = tmp->parent;
245af2c7
SS
5966 }
5967
1a848870 5968 if (sd && sd_degenerate(sd)) {
dce840a0 5969 tmp = sd;
245af2c7 5970 sd = sd->parent;
dce840a0 5971 destroy_sched_domain(tmp, cpu);
1a848870
SS
5972 if (sd)
5973 sd->child = NULL;
5974 }
1da177e4 5975
4cb98839 5976 sched_domain_debug(sd, cpu);
1da177e4 5977
57d885fe 5978 rq_attach_root(rq, rd);
dce840a0 5979 tmp = rq->sd;
674311d5 5980 rcu_assign_pointer(rq->sd, sd);
dce840a0 5981 destroy_sched_domains(tmp, cpu);
518cd623
PZ
5982
5983 update_top_cache_domain(cpu);
1da177e4
LT
5984}
5985
1da177e4
LT
5986/* Setup the mask of cpus configured for isolated domains */
5987static int __init isolated_cpu_setup(char *str)
5988{
a6e4491c
PB
5989 int ret;
5990
bdddd296 5991 alloc_bootmem_cpumask_var(&cpu_isolated_map);
a6e4491c
PB
5992 ret = cpulist_parse(str, cpu_isolated_map);
5993 if (ret) {
5994 pr_err("sched: Error, all isolcpus= values must be between 0 and %d\n", nr_cpu_ids);
5995 return 0;
5996 }
1da177e4
LT
5997 return 1;
5998}
8927f494 5999__setup("isolcpus=", isolated_cpu_setup);
1da177e4 6000
49a02c51 6001struct s_data {
21d42ccf 6002 struct sched_domain ** __percpu sd;
49a02c51
AH
6003 struct root_domain *rd;
6004};
6005
2109b99e 6006enum s_alloc {
2109b99e 6007 sa_rootdomain,
21d42ccf 6008 sa_sd,
dce840a0 6009 sa_sd_storage,
2109b99e
AH
6010 sa_none,
6011};
6012
c1174876
PZ
6013/*
6014 * Build an iteration mask that can exclude certain CPUs from the upwards
6015 * domain traversal.
6016 *
6017 * Asymmetric node setups can result in situations where the domain tree is of
6018 * unequal depth, make sure to skip domains that already cover the entire
6019 * range.
6020 *
6021 * In that case build_sched_domains() will have terminated the iteration early
6022 * and our sibling sd spans will be empty. Domains should always include the
6023 * cpu they're built on, so check that.
6024 *
6025 */
6026static void build_group_mask(struct sched_domain *sd, struct sched_group *sg)
6027{
6028 const struct cpumask *span = sched_domain_span(sd);
6029 struct sd_data *sdd = sd->private;
6030 struct sched_domain *sibling;
6031 int i;
6032
6033 for_each_cpu(i, span) {
6034 sibling = *per_cpu_ptr(sdd->sd, i);
6035 if (!cpumask_test_cpu(i, sched_domain_span(sibling)))
6036 continue;
6037
6038 cpumask_set_cpu(i, sched_group_mask(sg));
6039 }
6040}
6041
6042/*
6043 * Return the canonical balance cpu for this group, this is the first cpu
6044 * of this group that's also in the iteration mask.
6045 */
6046int group_balance_cpu(struct sched_group *sg)
6047{
6048 return cpumask_first_and(sched_group_cpus(sg), sched_group_mask(sg));
6049}
6050
e3589f6c
PZ
6051static int
6052build_overlap_sched_groups(struct sched_domain *sd, int cpu)
6053{
6054 struct sched_group *first = NULL, *last = NULL, *groups = NULL, *sg;
6055 const struct cpumask *span = sched_domain_span(sd);
6056 struct cpumask *covered = sched_domains_tmpmask;
6057 struct sd_data *sdd = sd->private;
aaecac4a 6058 struct sched_domain *sibling;
e3589f6c
PZ
6059 int i;
6060
6061 cpumask_clear(covered);
6062
6063 for_each_cpu(i, span) {
6064 struct cpumask *sg_span;
6065
6066 if (cpumask_test_cpu(i, covered))
6067 continue;
6068
aaecac4a 6069 sibling = *per_cpu_ptr(sdd->sd, i);
c1174876
PZ
6070
6071 /* See the comment near build_group_mask(). */
aaecac4a 6072 if (!cpumask_test_cpu(i, sched_domain_span(sibling)))
c1174876
PZ
6073 continue;
6074
e3589f6c 6075 sg = kzalloc_node(sizeof(struct sched_group) + cpumask_size(),
4d78a223 6076 GFP_KERNEL, cpu_to_node(cpu));
e3589f6c
PZ
6077
6078 if (!sg)
6079 goto fail;
6080
6081 sg_span = sched_group_cpus(sg);
aaecac4a
ZZ
6082 if (sibling->child)
6083 cpumask_copy(sg_span, sched_domain_span(sibling->child));
6084 else
e3589f6c
PZ
6085 cpumask_set_cpu(i, sg_span);
6086
6087 cpumask_or(covered, covered, sg_span);
6088
63b2ca30
NP
6089 sg->sgc = *per_cpu_ptr(sdd->sgc, i);
6090 if (atomic_inc_return(&sg->sgc->ref) == 1)
c1174876
PZ
6091 build_group_mask(sd, sg);
6092
c3decf0d 6093 /*
63b2ca30 6094 * Initialize sgc->capacity such that even if we mess up the
c3decf0d
PZ
6095 * domains and no possible iteration will get us here, we won't
6096 * die on a /0 trap.
6097 */
ca8ce3d0 6098 sg->sgc->capacity = SCHED_CAPACITY_SCALE * cpumask_weight(sg_span);
e3589f6c 6099
c1174876
PZ
6100 /*
6101 * Make sure the first group of this domain contains the
6102 * canonical balance cpu. Otherwise the sched_domain iteration
6103 * breaks. See update_sg_lb_stats().
6104 */
74a5ce20 6105 if ((!groups && cpumask_test_cpu(cpu, sg_span)) ||
c1174876 6106 group_balance_cpu(sg) == cpu)
e3589f6c
PZ
6107 groups = sg;
6108
6109 if (!first)
6110 first = sg;
6111 if (last)
6112 last->next = sg;
6113 last = sg;
6114 last->next = first;
6115 }
6116 sd->groups = groups;
6117
6118 return 0;
6119
6120fail:
6121 free_sched_groups(first, 0);
6122
6123 return -ENOMEM;
6124}
6125
dce840a0 6126static int get_group(int cpu, struct sd_data *sdd, struct sched_group **sg)
1da177e4 6127{
dce840a0
PZ
6128 struct sched_domain *sd = *per_cpu_ptr(sdd->sd, cpu);
6129 struct sched_domain *child = sd->child;
1da177e4 6130
dce840a0
PZ
6131 if (child)
6132 cpu = cpumask_first(sched_domain_span(child));
1e9f28fa 6133
9c3f75cb 6134 if (sg) {
dce840a0 6135 *sg = *per_cpu_ptr(sdd->sg, cpu);
63b2ca30
NP
6136 (*sg)->sgc = *per_cpu_ptr(sdd->sgc, cpu);
6137 atomic_set(&(*sg)->sgc->ref, 1); /* for claim_allocations */
9c3f75cb 6138 }
dce840a0
PZ
6139
6140 return cpu;
1e9f28fa 6141}
1e9f28fa 6142
01a08546 6143/*
dce840a0
PZ
6144 * build_sched_groups will build a circular linked list of the groups
6145 * covered by the given span, and will set each group's ->cpumask correctly,
ced549fa 6146 * and ->cpu_capacity to 0.
e3589f6c
PZ
6147 *
6148 * Assumes the sched_domain tree is fully constructed
01a08546 6149 */
e3589f6c
PZ
6150static int
6151build_sched_groups(struct sched_domain *sd, int cpu)
1da177e4 6152{
dce840a0
PZ
6153 struct sched_group *first = NULL, *last = NULL;
6154 struct sd_data *sdd = sd->private;
6155 const struct cpumask *span = sched_domain_span(sd);
f96225fd 6156 struct cpumask *covered;
dce840a0 6157 int i;
9c1cfda2 6158
e3589f6c
PZ
6159 get_group(cpu, sdd, &sd->groups);
6160 atomic_inc(&sd->groups->ref);
6161
0936629f 6162 if (cpu != cpumask_first(span))
e3589f6c
PZ
6163 return 0;
6164
f96225fd
PZ
6165 lockdep_assert_held(&sched_domains_mutex);
6166 covered = sched_domains_tmpmask;
6167
dce840a0 6168 cpumask_clear(covered);
6711cab4 6169
dce840a0
PZ
6170 for_each_cpu(i, span) {
6171 struct sched_group *sg;
cd08e923 6172 int group, j;
6711cab4 6173
dce840a0
PZ
6174 if (cpumask_test_cpu(i, covered))
6175 continue;
6711cab4 6176
cd08e923 6177 group = get_group(i, sdd, &sg);
c1174876 6178 cpumask_setall(sched_group_mask(sg));
0601a88d 6179
dce840a0
PZ
6180 for_each_cpu(j, span) {
6181 if (get_group(j, sdd, NULL) != group)
6182 continue;
0601a88d 6183
dce840a0
PZ
6184 cpumask_set_cpu(j, covered);
6185 cpumask_set_cpu(j, sched_group_cpus(sg));
6186 }
0601a88d 6187
dce840a0
PZ
6188 if (!first)
6189 first = sg;
6190 if (last)
6191 last->next = sg;
6192 last = sg;
6193 }
6194 last->next = first;
e3589f6c
PZ
6195
6196 return 0;
0601a88d 6197}
51888ca2 6198
89c4710e 6199/*
63b2ca30 6200 * Initialize sched groups cpu_capacity.
89c4710e 6201 *
63b2ca30 6202 * cpu_capacity indicates the capacity of sched group, which is used while
89c4710e 6203 * distributing the load between different sched groups in a sched domain.
63b2ca30
NP
6204 * Typically cpu_capacity for all the groups in a sched domain will be same
6205 * unless there are asymmetries in the topology. If there are asymmetries,
6206 * group having more cpu_capacity will pickup more load compared to the
6207 * group having less cpu_capacity.
89c4710e 6208 */
63b2ca30 6209static void init_sched_groups_capacity(int cpu, struct sched_domain *sd)
89c4710e 6210{
e3589f6c 6211 struct sched_group *sg = sd->groups;
89c4710e 6212
94c95ba6 6213 WARN_ON(!sg);
e3589f6c
PZ
6214
6215 do {
6216 sg->group_weight = cpumask_weight(sched_group_cpus(sg));
6217 sg = sg->next;
6218 } while (sg != sd->groups);
89c4710e 6219
c1174876 6220 if (cpu != group_balance_cpu(sg))
e3589f6c 6221 return;
aae6d3dd 6222
63b2ca30
NP
6223 update_group_capacity(sd, cpu);
6224 atomic_set(&sg->sgc->nr_busy_cpus, sg->group_weight);
89c4710e
SS
6225}
6226
7c16ec58
MT
6227/*
6228 * Initializers for schedule domains
6229 * Non-inlined to reduce accumulated stack pressure in build_sched_domains()
6230 */
6231
1d3504fc 6232static int default_relax_domain_level = -1;
60495e77 6233int sched_domain_level_max;
1d3504fc
HS
6234
6235static int __init setup_relax_domain_level(char *str)
6236{
a841f8ce
DS
6237 if (kstrtoint(str, 0, &default_relax_domain_level))
6238 pr_warn("Unable to set relax_domain_level\n");
30e0e178 6239
1d3504fc
HS
6240 return 1;
6241}
6242__setup("relax_domain_level=", setup_relax_domain_level);
6243
6244static void set_domain_attribute(struct sched_domain *sd,
6245 struct sched_domain_attr *attr)
6246{
6247 int request;
6248
6249 if (!attr || attr->relax_domain_level < 0) {
6250 if (default_relax_domain_level < 0)
6251 return;
6252 else
6253 request = default_relax_domain_level;
6254 } else
6255 request = attr->relax_domain_level;
6256 if (request < sd->level) {
6257 /* turn off idle balance on this domain */
c88d5910 6258 sd->flags &= ~(SD_BALANCE_WAKE|SD_BALANCE_NEWIDLE);
1d3504fc
HS
6259 } else {
6260 /* turn on idle balance on this domain */
c88d5910 6261 sd->flags |= (SD_BALANCE_WAKE|SD_BALANCE_NEWIDLE);
1d3504fc
HS
6262 }
6263}
6264
54ab4ff4
PZ
6265static void __sdt_free(const struct cpumask *cpu_map);
6266static int __sdt_alloc(const struct cpumask *cpu_map);
6267
2109b99e
AH
6268static void __free_domain_allocs(struct s_data *d, enum s_alloc what,
6269 const struct cpumask *cpu_map)
6270{
6271 switch (what) {
2109b99e 6272 case sa_rootdomain:
822ff793
PZ
6273 if (!atomic_read(&d->rd->refcount))
6274 free_rootdomain(&d->rd->rcu); /* fall through */
21d42ccf
PZ
6275 case sa_sd:
6276 free_percpu(d->sd); /* fall through */
dce840a0 6277 case sa_sd_storage:
54ab4ff4 6278 __sdt_free(cpu_map); /* fall through */
2109b99e
AH
6279 case sa_none:
6280 break;
6281 }
6282}
3404c8d9 6283
2109b99e
AH
6284static enum s_alloc __visit_domain_allocation_hell(struct s_data *d,
6285 const struct cpumask *cpu_map)
6286{
dce840a0
PZ
6287 memset(d, 0, sizeof(*d));
6288
54ab4ff4
PZ
6289 if (__sdt_alloc(cpu_map))
6290 return sa_sd_storage;
dce840a0
PZ
6291 d->sd = alloc_percpu(struct sched_domain *);
6292 if (!d->sd)
6293 return sa_sd_storage;
2109b99e 6294 d->rd = alloc_rootdomain();
dce840a0 6295 if (!d->rd)
21d42ccf 6296 return sa_sd;
2109b99e
AH
6297 return sa_rootdomain;
6298}
57d885fe 6299
dce840a0
PZ
6300/*
6301 * NULL the sd_data elements we've used to build the sched_domain and
6302 * sched_group structure so that the subsequent __free_domain_allocs()
6303 * will not free the data we're using.
6304 */
6305static void claim_allocations(int cpu, struct sched_domain *sd)
6306{
6307 struct sd_data *sdd = sd->private;
dce840a0
PZ
6308
6309 WARN_ON_ONCE(*per_cpu_ptr(sdd->sd, cpu) != sd);
6310 *per_cpu_ptr(sdd->sd, cpu) = NULL;
6311
e3589f6c 6312 if (atomic_read(&(*per_cpu_ptr(sdd->sg, cpu))->ref))
dce840a0 6313 *per_cpu_ptr(sdd->sg, cpu) = NULL;
e3589f6c 6314
63b2ca30
NP
6315 if (atomic_read(&(*per_cpu_ptr(sdd->sgc, cpu))->ref))
6316 *per_cpu_ptr(sdd->sgc, cpu) = NULL;
dce840a0
PZ
6317}
6318
cb83b629 6319#ifdef CONFIG_NUMA
cb83b629 6320static int sched_domains_numa_levels;
e3fe70b1 6321enum numa_topology_type sched_numa_topology_type;
cb83b629 6322static int *sched_domains_numa_distance;
9942f79b 6323int sched_max_numa_distance;
cb83b629
PZ
6324static struct cpumask ***sched_domains_numa_masks;
6325static int sched_domains_curr_level;
143e1e28 6326#endif
cb83b629 6327
143e1e28
VG
6328/*
6329 * SD_flags allowed in topology descriptions.
6330 *
5d4dfddd 6331 * SD_SHARE_CPUCAPACITY - describes SMT topologies
143e1e28
VG
6332 * SD_SHARE_PKG_RESOURCES - describes shared caches
6333 * SD_NUMA - describes NUMA topologies
d77b3ed5 6334 * SD_SHARE_POWERDOMAIN - describes shared power domain
143e1e28
VG
6335 *
6336 * Odd one out:
6337 * SD_ASYM_PACKING - describes SMT quirks
6338 */
6339#define TOPOLOGY_SD_FLAGS \
5d4dfddd 6340 (SD_SHARE_CPUCAPACITY | \
143e1e28
VG
6341 SD_SHARE_PKG_RESOURCES | \
6342 SD_NUMA | \
d77b3ed5
VG
6343 SD_ASYM_PACKING | \
6344 SD_SHARE_POWERDOMAIN)
cb83b629
PZ
6345
6346static struct sched_domain *
143e1e28 6347sd_init(struct sched_domain_topology_level *tl, int cpu)
cb83b629
PZ
6348{
6349 struct sched_domain *sd = *per_cpu_ptr(tl->data.sd, cpu);
143e1e28
VG
6350 int sd_weight, sd_flags = 0;
6351
6352#ifdef CONFIG_NUMA
6353 /*
6354 * Ugly hack to pass state to sd_numa_mask()...
6355 */
6356 sched_domains_curr_level = tl->numa_level;
6357#endif
6358
6359 sd_weight = cpumask_weight(tl->mask(cpu));
6360
6361 if (tl->sd_flags)
6362 sd_flags = (*tl->sd_flags)();
6363 if (WARN_ONCE(sd_flags & ~TOPOLOGY_SD_FLAGS,
6364 "wrong sd_flags in topology description\n"))
6365 sd_flags &= ~TOPOLOGY_SD_FLAGS;
cb83b629
PZ
6366
6367 *sd = (struct sched_domain){
6368 .min_interval = sd_weight,
6369 .max_interval = 2*sd_weight,
6370 .busy_factor = 32,
870a0bb5 6371 .imbalance_pct = 125,
143e1e28
VG
6372
6373 .cache_nice_tries = 0,
6374 .busy_idx = 0,
6375 .idle_idx = 0,
cb83b629
PZ
6376 .newidle_idx = 0,
6377 .wake_idx = 0,
6378 .forkexec_idx = 0,
6379
6380 .flags = 1*SD_LOAD_BALANCE
6381 | 1*SD_BALANCE_NEWIDLE
143e1e28
VG
6382 | 1*SD_BALANCE_EXEC
6383 | 1*SD_BALANCE_FORK
cb83b629 6384 | 0*SD_BALANCE_WAKE
143e1e28 6385 | 1*SD_WAKE_AFFINE
5d4dfddd 6386 | 0*SD_SHARE_CPUCAPACITY
cb83b629 6387 | 0*SD_SHARE_PKG_RESOURCES
143e1e28 6388 | 0*SD_SERIALIZE
cb83b629 6389 | 0*SD_PREFER_SIBLING
143e1e28
VG
6390 | 0*SD_NUMA
6391 | sd_flags
cb83b629 6392 ,
143e1e28 6393
cb83b629
PZ
6394 .last_balance = jiffies,
6395 .balance_interval = sd_weight,
143e1e28 6396 .smt_gain = 0,
2b4cfe64
JL
6397 .max_newidle_lb_cost = 0,
6398 .next_decay_max_lb_cost = jiffies,
143e1e28
VG
6399#ifdef CONFIG_SCHED_DEBUG
6400 .name = tl->name,
6401#endif
cb83b629 6402 };
cb83b629
PZ
6403
6404 /*
143e1e28 6405 * Convert topological properties into behaviour.
cb83b629 6406 */
143e1e28 6407
5d4dfddd 6408 if (sd->flags & SD_SHARE_CPUCAPACITY) {
caff37ef 6409 sd->flags |= SD_PREFER_SIBLING;
143e1e28
VG
6410 sd->imbalance_pct = 110;
6411 sd->smt_gain = 1178; /* ~15% */
143e1e28
VG
6412
6413 } else if (sd->flags & SD_SHARE_PKG_RESOURCES) {
6414 sd->imbalance_pct = 117;
6415 sd->cache_nice_tries = 1;
6416 sd->busy_idx = 2;
6417
6418#ifdef CONFIG_NUMA
6419 } else if (sd->flags & SD_NUMA) {
6420 sd->cache_nice_tries = 2;
6421 sd->busy_idx = 3;
6422 sd->idle_idx = 2;
6423
6424 sd->flags |= SD_SERIALIZE;
6425 if (sched_domains_numa_distance[tl->numa_level] > RECLAIM_DISTANCE) {
6426 sd->flags &= ~(SD_BALANCE_EXEC |
6427 SD_BALANCE_FORK |
6428 SD_WAKE_AFFINE);
6429 }
6430
6431#endif
6432 } else {
6433 sd->flags |= SD_PREFER_SIBLING;
6434 sd->cache_nice_tries = 1;
6435 sd->busy_idx = 2;
6436 sd->idle_idx = 1;
6437 }
6438
6439 sd->private = &tl->data;
cb83b629
PZ
6440
6441 return sd;
6442}
6443
143e1e28
VG
6444/*
6445 * Topology list, bottom-up.
6446 */
6447static struct sched_domain_topology_level default_topology[] = {
6448#ifdef CONFIG_SCHED_SMT
6449 { cpu_smt_mask, cpu_smt_flags, SD_INIT_NAME(SMT) },
6450#endif
6451#ifdef CONFIG_SCHED_MC
6452 { cpu_coregroup_mask, cpu_core_flags, SD_INIT_NAME(MC) },
143e1e28
VG
6453#endif
6454 { cpu_cpu_mask, SD_INIT_NAME(DIE) },
6455 { NULL, },
6456};
6457
c6e1e7b5
JG
6458static struct sched_domain_topology_level *sched_domain_topology =
6459 default_topology;
143e1e28
VG
6460
6461#define for_each_sd_topology(tl) \
6462 for (tl = sched_domain_topology; tl->mask; tl++)
6463
6464void set_sched_topology(struct sched_domain_topology_level *tl)
6465{
6466 sched_domain_topology = tl;
6467}
6468
6469#ifdef CONFIG_NUMA
6470
cb83b629
PZ
6471static const struct cpumask *sd_numa_mask(int cpu)
6472{
6473 return sched_domains_numa_masks[sched_domains_curr_level][cpu_to_node(cpu)];
6474}
6475
d039ac60
PZ
6476static void sched_numa_warn(const char *str)
6477{
6478 static int done = false;
6479 int i,j;
6480
6481 if (done)
6482 return;
6483
6484 done = true;
6485
6486 printk(KERN_WARNING "ERROR: %s\n\n", str);
6487
6488 for (i = 0; i < nr_node_ids; i++) {
6489 printk(KERN_WARNING " ");
6490 for (j = 0; j < nr_node_ids; j++)
6491 printk(KERN_CONT "%02d ", node_distance(i,j));
6492 printk(KERN_CONT "\n");
6493 }
6494 printk(KERN_WARNING "\n");
6495}
6496
9942f79b 6497bool find_numa_distance(int distance)
d039ac60
PZ
6498{
6499 int i;
6500
6501 if (distance == node_distance(0, 0))
6502 return true;
6503
6504 for (i = 0; i < sched_domains_numa_levels; i++) {
6505 if (sched_domains_numa_distance[i] == distance)
6506 return true;
6507 }
6508
6509 return false;
6510}
6511
e3fe70b1
RR
6512/*
6513 * A system can have three types of NUMA topology:
6514 * NUMA_DIRECT: all nodes are directly connected, or not a NUMA system
6515 * NUMA_GLUELESS_MESH: some nodes reachable through intermediary nodes
6516 * NUMA_BACKPLANE: nodes can reach other nodes through a backplane
6517 *
6518 * The difference between a glueless mesh topology and a backplane
6519 * topology lies in whether communication between not directly
6520 * connected nodes goes through intermediary nodes (where programs
6521 * could run), or through backplane controllers. This affects
6522 * placement of programs.
6523 *
6524 * The type of topology can be discerned with the following tests:
6525 * - If the maximum distance between any nodes is 1 hop, the system
6526 * is directly connected.
6527 * - If for two nodes A and B, located N > 1 hops away from each other,
6528 * there is an intermediary node C, which is < N hops away from both
6529 * nodes A and B, the system is a glueless mesh.
6530 */
6531static void init_numa_topology_type(void)
6532{
6533 int a, b, c, n;
6534
6535 n = sched_max_numa_distance;
6536
e237882b 6537 if (sched_domains_numa_levels <= 1) {
e3fe70b1 6538 sched_numa_topology_type = NUMA_DIRECT;
e237882b
AG
6539 return;
6540 }
e3fe70b1
RR
6541
6542 for_each_online_node(a) {
6543 for_each_online_node(b) {
6544 /* Find two nodes furthest removed from each other. */
6545 if (node_distance(a, b) < n)
6546 continue;
6547
6548 /* Is there an intermediary node between a and b? */
6549 for_each_online_node(c) {
6550 if (node_distance(a, c) < n &&
6551 node_distance(b, c) < n) {
6552 sched_numa_topology_type =
6553 NUMA_GLUELESS_MESH;
6554 return;
6555 }
6556 }
6557
6558 sched_numa_topology_type = NUMA_BACKPLANE;
6559 return;
6560 }
6561 }
6562}
6563
cb83b629
PZ
6564static void sched_init_numa(void)
6565{
6566 int next_distance, curr_distance = node_distance(0, 0);
6567 struct sched_domain_topology_level *tl;
6568 int level = 0;
6569 int i, j, k;
6570
cb83b629
PZ
6571 sched_domains_numa_distance = kzalloc(sizeof(int) * nr_node_ids, GFP_KERNEL);
6572 if (!sched_domains_numa_distance)
6573 return;
6574
6575 /*
6576 * O(nr_nodes^2) deduplicating selection sort -- in order to find the
6577 * unique distances in the node_distance() table.
6578 *
6579 * Assumes node_distance(0,j) includes all distances in
6580 * node_distance(i,j) in order to avoid cubic time.
cb83b629
PZ
6581 */
6582 next_distance = curr_distance;
6583 for (i = 0; i < nr_node_ids; i++) {
6584 for (j = 0; j < nr_node_ids; j++) {
d039ac60
PZ
6585 for (k = 0; k < nr_node_ids; k++) {
6586 int distance = node_distance(i, k);
6587
6588 if (distance > curr_distance &&
6589 (distance < next_distance ||
6590 next_distance == curr_distance))
6591 next_distance = distance;
6592
6593 /*
6594 * While not a strong assumption it would be nice to know
6595 * about cases where if node A is connected to B, B is not
6596 * equally connected to A.
6597 */
6598 if (sched_debug() && node_distance(k, i) != distance)
6599 sched_numa_warn("Node-distance not symmetric");
6600
6601 if (sched_debug() && i && !find_numa_distance(distance))
6602 sched_numa_warn("Node-0 not representative");
6603 }
6604 if (next_distance != curr_distance) {
6605 sched_domains_numa_distance[level++] = next_distance;
6606 sched_domains_numa_levels = level;
6607 curr_distance = next_distance;
6608 } else break;
cb83b629 6609 }
d039ac60
PZ
6610
6611 /*
6612 * In case of sched_debug() we verify the above assumption.
6613 */
6614 if (!sched_debug())
6615 break;
cb83b629 6616 }
c123588b
AR
6617
6618 if (!level)
6619 return;
6620
cb83b629
PZ
6621 /*
6622 * 'level' contains the number of unique distances, excluding the
6623 * identity distance node_distance(i,i).
6624 *
28b4a521 6625 * The sched_domains_numa_distance[] array includes the actual distance
cb83b629
PZ
6626 * numbers.
6627 */
6628
5f7865f3
TC
6629 /*
6630 * Here, we should temporarily reset sched_domains_numa_levels to 0.
6631 * If it fails to allocate memory for array sched_domains_numa_masks[][],
6632 * the array will contain less then 'level' members. This could be
6633 * dangerous when we use it to iterate array sched_domains_numa_masks[][]
6634 * in other functions.
6635 *
6636 * We reset it to 'level' at the end of this function.
6637 */
6638 sched_domains_numa_levels = 0;
6639
cb83b629
PZ
6640 sched_domains_numa_masks = kzalloc(sizeof(void *) * level, GFP_KERNEL);
6641 if (!sched_domains_numa_masks)
6642 return;
6643
6644 /*
6645 * Now for each level, construct a mask per node which contains all
6646 * cpus of nodes that are that many hops away from us.
6647 */
6648 for (i = 0; i < level; i++) {
6649 sched_domains_numa_masks[i] =
6650 kzalloc(nr_node_ids * sizeof(void *), GFP_KERNEL);
6651 if (!sched_domains_numa_masks[i])
6652 return;
6653
6654 for (j = 0; j < nr_node_ids; j++) {
2ea45800 6655 struct cpumask *mask = kzalloc(cpumask_size(), GFP_KERNEL);
cb83b629
PZ
6656 if (!mask)
6657 return;
6658
6659 sched_domains_numa_masks[i][j] = mask;
6660
9c03ee14 6661 for_each_node(k) {
dd7d8634 6662 if (node_distance(j, k) > sched_domains_numa_distance[i])
cb83b629
PZ
6663 continue;
6664
6665 cpumask_or(mask, mask, cpumask_of_node(k));
6666 }
6667 }
6668 }
6669
143e1e28
VG
6670 /* Compute default topology size */
6671 for (i = 0; sched_domain_topology[i].mask; i++);
6672
c515db8c 6673 tl = kzalloc((i + level + 1) *
cb83b629
PZ
6674 sizeof(struct sched_domain_topology_level), GFP_KERNEL);
6675 if (!tl)
6676 return;
6677
6678 /*
6679 * Copy the default topology bits..
6680 */
143e1e28
VG
6681 for (i = 0; sched_domain_topology[i].mask; i++)
6682 tl[i] = sched_domain_topology[i];
cb83b629
PZ
6683
6684 /*
6685 * .. and append 'j' levels of NUMA goodness.
6686 */
6687 for (j = 0; j < level; i++, j++) {
6688 tl[i] = (struct sched_domain_topology_level){
cb83b629 6689 .mask = sd_numa_mask,
143e1e28 6690 .sd_flags = cpu_numa_flags,
cb83b629
PZ
6691 .flags = SDTL_OVERLAP,
6692 .numa_level = j,
143e1e28 6693 SD_INIT_NAME(NUMA)
cb83b629
PZ
6694 };
6695 }
6696
6697 sched_domain_topology = tl;
5f7865f3
TC
6698
6699 sched_domains_numa_levels = level;
9942f79b 6700 sched_max_numa_distance = sched_domains_numa_distance[level - 1];
e3fe70b1
RR
6701
6702 init_numa_topology_type();
cb83b629 6703}
301a5cba 6704
135fb3e1 6705static void sched_domains_numa_masks_set(unsigned int cpu)
301a5cba 6706{
301a5cba 6707 int node = cpu_to_node(cpu);
135fb3e1 6708 int i, j;
301a5cba
TC
6709
6710 for (i = 0; i < sched_domains_numa_levels; i++) {
6711 for (j = 0; j < nr_node_ids; j++) {
6712 if (node_distance(j, node) <= sched_domains_numa_distance[i])
6713 cpumask_set_cpu(cpu, sched_domains_numa_masks[i][j]);
6714 }
6715 }
6716}
6717
135fb3e1 6718static void sched_domains_numa_masks_clear(unsigned int cpu)
301a5cba
TC
6719{
6720 int i, j;
135fb3e1 6721
301a5cba
TC
6722 for (i = 0; i < sched_domains_numa_levels; i++) {
6723 for (j = 0; j < nr_node_ids; j++)
6724 cpumask_clear_cpu(cpu, sched_domains_numa_masks[i][j]);
6725 }
6726}
6727
cb83b629 6728#else
135fb3e1
TG
6729static inline void sched_init_numa(void) { }
6730static void sched_domains_numa_masks_set(unsigned int cpu) { }
6731static void sched_domains_numa_masks_clear(unsigned int cpu) { }
cb83b629
PZ
6732#endif /* CONFIG_NUMA */
6733
54ab4ff4
PZ
6734static int __sdt_alloc(const struct cpumask *cpu_map)
6735{
6736 struct sched_domain_topology_level *tl;
6737 int j;
6738
27723a68 6739 for_each_sd_topology(tl) {
54ab4ff4
PZ
6740 struct sd_data *sdd = &tl->data;
6741
6742 sdd->sd = alloc_percpu(struct sched_domain *);
6743 if (!sdd->sd)
6744 return -ENOMEM;
6745
6746 sdd->sg = alloc_percpu(struct sched_group *);
6747 if (!sdd->sg)
6748 return -ENOMEM;
6749
63b2ca30
NP
6750 sdd->sgc = alloc_percpu(struct sched_group_capacity *);
6751 if (!sdd->sgc)
9c3f75cb
PZ
6752 return -ENOMEM;
6753
54ab4ff4
PZ
6754 for_each_cpu(j, cpu_map) {
6755 struct sched_domain *sd;
6756 struct sched_group *sg;
63b2ca30 6757 struct sched_group_capacity *sgc;
54ab4ff4 6758
5cc389bc 6759 sd = kzalloc_node(sizeof(struct sched_domain) + cpumask_size(),
54ab4ff4
PZ
6760 GFP_KERNEL, cpu_to_node(j));
6761 if (!sd)
6762 return -ENOMEM;
6763
6764 *per_cpu_ptr(sdd->sd, j) = sd;
6765
6766 sg = kzalloc_node(sizeof(struct sched_group) + cpumask_size(),
6767 GFP_KERNEL, cpu_to_node(j));
6768 if (!sg)
6769 return -ENOMEM;
6770
30b4e9eb
IM
6771 sg->next = sg;
6772
54ab4ff4 6773 *per_cpu_ptr(sdd->sg, j) = sg;
9c3f75cb 6774
63b2ca30 6775 sgc = kzalloc_node(sizeof(struct sched_group_capacity) + cpumask_size(),
9c3f75cb 6776 GFP_KERNEL, cpu_to_node(j));
63b2ca30 6777 if (!sgc)
9c3f75cb
PZ
6778 return -ENOMEM;
6779
63b2ca30 6780 *per_cpu_ptr(sdd->sgc, j) = sgc;
54ab4ff4
PZ
6781 }
6782 }
6783
6784 return 0;
6785}
6786
6787static void __sdt_free(const struct cpumask *cpu_map)
6788{
6789 struct sched_domain_topology_level *tl;
6790 int j;
6791
27723a68 6792 for_each_sd_topology(tl) {
54ab4ff4
PZ
6793 struct sd_data *sdd = &tl->data;
6794
6795 for_each_cpu(j, cpu_map) {
fb2cf2c6 6796 struct sched_domain *sd;
6797
6798 if (sdd->sd) {
6799 sd = *per_cpu_ptr(sdd->sd, j);
6800 if (sd && (sd->flags & SD_OVERLAP))
6801 free_sched_groups(sd->groups, 0);
6802 kfree(*per_cpu_ptr(sdd->sd, j));
6803 }
6804
6805 if (sdd->sg)
6806 kfree(*per_cpu_ptr(sdd->sg, j));
63b2ca30
NP
6807 if (sdd->sgc)
6808 kfree(*per_cpu_ptr(sdd->sgc, j));
54ab4ff4
PZ
6809 }
6810 free_percpu(sdd->sd);
fb2cf2c6 6811 sdd->sd = NULL;
54ab4ff4 6812 free_percpu(sdd->sg);
fb2cf2c6 6813 sdd->sg = NULL;
63b2ca30
NP
6814 free_percpu(sdd->sgc);
6815 sdd->sgc = NULL;
54ab4ff4
PZ
6816 }
6817}
6818
2c402dc3 6819struct sched_domain *build_sched_domain(struct sched_domain_topology_level *tl,
4a850cbe
VK
6820 const struct cpumask *cpu_map, struct sched_domain_attr *attr,
6821 struct sched_domain *child, int cpu)
2c402dc3 6822{
143e1e28 6823 struct sched_domain *sd = sd_init(tl, cpu);
2c402dc3 6824 if (!sd)
d069b916 6825 return child;
2c402dc3 6826
2c402dc3 6827 cpumask_and(sched_domain_span(sd), cpu_map, tl->mask(cpu));
60495e77
PZ
6828 if (child) {
6829 sd->level = child->level + 1;
6830 sched_domain_level_max = max(sched_domain_level_max, sd->level);
d069b916 6831 child->parent = sd;
c75e0128 6832 sd->child = child;
6ae72dff
PZ
6833
6834 if (!cpumask_subset(sched_domain_span(child),
6835 sched_domain_span(sd))) {
6836 pr_err("BUG: arch topology borken\n");
6837#ifdef CONFIG_SCHED_DEBUG
6838 pr_err(" the %s domain not a subset of the %s domain\n",
6839 child->name, sd->name);
6840#endif
6841 /* Fixup, ensure @sd has at least @child cpus. */
6842 cpumask_or(sched_domain_span(sd),
6843 sched_domain_span(sd),
6844 sched_domain_span(child));
6845 }
6846
60495e77 6847 }
a841f8ce 6848 set_domain_attribute(sd, attr);
2c402dc3
PZ
6849
6850 return sd;
6851}
6852
2109b99e
AH
6853/*
6854 * Build sched domains for a given set of cpus and attach the sched domains
6855 * to the individual cpus
6856 */
dce840a0
PZ
6857static int build_sched_domains(const struct cpumask *cpu_map,
6858 struct sched_domain_attr *attr)
2109b99e 6859{
1c632169 6860 enum s_alloc alloc_state;
dce840a0 6861 struct sched_domain *sd;
2109b99e 6862 struct s_data d;
822ff793 6863 int i, ret = -ENOMEM;
9c1cfda2 6864
2109b99e
AH
6865 alloc_state = __visit_domain_allocation_hell(&d, cpu_map);
6866 if (alloc_state != sa_rootdomain)
6867 goto error;
9c1cfda2 6868
dce840a0 6869 /* Set up domains for cpus specified by the cpu_map. */
abcd083a 6870 for_each_cpu(i, cpu_map) {
eb7a74e6
PZ
6871 struct sched_domain_topology_level *tl;
6872
3bd65a80 6873 sd = NULL;
27723a68 6874 for_each_sd_topology(tl) {
4a850cbe 6875 sd = build_sched_domain(tl, cpu_map, attr, sd, i);
22da9569
VK
6876 if (tl == sched_domain_topology)
6877 *per_cpu_ptr(d.sd, i) = sd;
e3589f6c
PZ
6878 if (tl->flags & SDTL_OVERLAP || sched_feat(FORCE_SD_OVERLAP))
6879 sd->flags |= SD_OVERLAP;
d110235d
PZ
6880 if (cpumask_equal(cpu_map, sched_domain_span(sd)))
6881 break;
e3589f6c 6882 }
dce840a0
PZ
6883 }
6884
6885 /* Build the groups for the domains */
6886 for_each_cpu(i, cpu_map) {
6887 for (sd = *per_cpu_ptr(d.sd, i); sd; sd = sd->parent) {
6888 sd->span_weight = cpumask_weight(sched_domain_span(sd));
e3589f6c
PZ
6889 if (sd->flags & SD_OVERLAP) {
6890 if (build_overlap_sched_groups(sd, i))
6891 goto error;
6892 } else {
6893 if (build_sched_groups(sd, i))
6894 goto error;
6895 }
1cf51902 6896 }
a06dadbe 6897 }
9c1cfda2 6898
ced549fa 6899 /* Calculate CPU capacity for physical packages and nodes */
a9c9a9b6
PZ
6900 for (i = nr_cpumask_bits-1; i >= 0; i--) {
6901 if (!cpumask_test_cpu(i, cpu_map))
6902 continue;
9c1cfda2 6903
dce840a0
PZ
6904 for (sd = *per_cpu_ptr(d.sd, i); sd; sd = sd->parent) {
6905 claim_allocations(i, sd);
63b2ca30 6906 init_sched_groups_capacity(i, sd);
dce840a0 6907 }
f712c0c7 6908 }
9c1cfda2 6909
1da177e4 6910 /* Attach the domains */
dce840a0 6911 rcu_read_lock();
abcd083a 6912 for_each_cpu(i, cpu_map) {
21d42ccf 6913 sd = *per_cpu_ptr(d.sd, i);
49a02c51 6914 cpu_attach_domain(sd, d.rd, i);
1da177e4 6915 }
dce840a0 6916 rcu_read_unlock();
51888ca2 6917
822ff793 6918 ret = 0;
51888ca2 6919error:
2109b99e 6920 __free_domain_allocs(&d, alloc_state, cpu_map);
822ff793 6921 return ret;
1da177e4 6922}
029190c5 6923
acc3f5d7 6924static cpumask_var_t *doms_cur; /* current sched domains */
029190c5 6925static int ndoms_cur; /* number of sched domains in 'doms_cur' */
4285f594
IM
6926static struct sched_domain_attr *dattr_cur;
6927 /* attribues of custom domains in 'doms_cur' */
029190c5
PJ
6928
6929/*
6930 * Special case: If a kmalloc of a doms_cur partition (array of
4212823f
RR
6931 * cpumask) fails, then fallback to a single sched domain,
6932 * as determined by the single cpumask fallback_doms.
029190c5 6933 */
4212823f 6934static cpumask_var_t fallback_doms;
029190c5 6935
ee79d1bd
HC
6936/*
6937 * arch_update_cpu_topology lets virtualized architectures update the
6938 * cpu core maps. It is supposed to return 1 if the topology changed
6939 * or 0 if it stayed the same.
6940 */
52f5684c 6941int __weak arch_update_cpu_topology(void)
22e52b07 6942{
ee79d1bd 6943 return 0;
22e52b07
HC
6944}
6945
acc3f5d7
RR
6946cpumask_var_t *alloc_sched_domains(unsigned int ndoms)
6947{
6948 int i;
6949 cpumask_var_t *doms;
6950
6951 doms = kmalloc(sizeof(*doms) * ndoms, GFP_KERNEL);
6952 if (!doms)
6953 return NULL;
6954 for (i = 0; i < ndoms; i++) {
6955 if (!alloc_cpumask_var(&doms[i], GFP_KERNEL)) {
6956 free_sched_domains(doms, i);
6957 return NULL;
6958 }
6959 }
6960 return doms;
6961}
6962
6963void free_sched_domains(cpumask_var_t doms[], unsigned int ndoms)
6964{
6965 unsigned int i;
6966 for (i = 0; i < ndoms; i++)
6967 free_cpumask_var(doms[i]);
6968 kfree(doms);
6969}
6970
1a20ff27 6971/*
41a2d6cf 6972 * Set up scheduler domains and groups. Callers must hold the hotplug lock.
029190c5
PJ
6973 * For now this just excludes isolated cpus, but could be used to
6974 * exclude other special cases in the future.
1a20ff27 6975 */
c4a8849a 6976static int init_sched_domains(const struct cpumask *cpu_map)
1a20ff27 6977{
7378547f
MM
6978 int err;
6979
22e52b07 6980 arch_update_cpu_topology();
029190c5 6981 ndoms_cur = 1;
acc3f5d7 6982 doms_cur = alloc_sched_domains(ndoms_cur);
029190c5 6983 if (!doms_cur)
acc3f5d7
RR
6984 doms_cur = &fallback_doms;
6985 cpumask_andnot(doms_cur[0], cpu_map, cpu_isolated_map);
dce840a0 6986 err = build_sched_domains(doms_cur[0], NULL);
6382bc90 6987 register_sched_domain_sysctl();
7378547f
MM
6988
6989 return err;
1a20ff27
DG
6990}
6991
1a20ff27
DG
6992/*
6993 * Detach sched domains from a group of cpus specified in cpu_map
6994 * These cpus will now be attached to the NULL domain
6995 */
96f874e2 6996static void detach_destroy_domains(const struct cpumask *cpu_map)
1a20ff27
DG
6997{
6998 int i;
6999
dce840a0 7000 rcu_read_lock();
abcd083a 7001 for_each_cpu(i, cpu_map)
57d885fe 7002 cpu_attach_domain(NULL, &def_root_domain, i);
dce840a0 7003 rcu_read_unlock();
1a20ff27
DG
7004}
7005
1d3504fc
HS
7006/* handle null as "default" */
7007static int dattrs_equal(struct sched_domain_attr *cur, int idx_cur,
7008 struct sched_domain_attr *new, int idx_new)
7009{
7010 struct sched_domain_attr tmp;
7011
7012 /* fast path */
7013 if (!new && !cur)
7014 return 1;
7015
7016 tmp = SD_ATTR_INIT;
7017 return !memcmp(cur ? (cur + idx_cur) : &tmp,
7018 new ? (new + idx_new) : &tmp,
7019 sizeof(struct sched_domain_attr));
7020}
7021
029190c5
PJ
7022/*
7023 * Partition sched domains as specified by the 'ndoms_new'
41a2d6cf 7024 * cpumasks in the array doms_new[] of cpumasks. This compares
029190c5
PJ
7025 * doms_new[] to the current sched domain partitioning, doms_cur[].
7026 * It destroys each deleted domain and builds each new domain.
7027 *
acc3f5d7 7028 * 'doms_new' is an array of cpumask_var_t's of length 'ndoms_new'.
41a2d6cf
IM
7029 * The masks don't intersect (don't overlap.) We should setup one
7030 * sched domain for each mask. CPUs not in any of the cpumasks will
7031 * not be load balanced. If the same cpumask appears both in the
029190c5
PJ
7032 * current 'doms_cur' domains and in the new 'doms_new', we can leave
7033 * it as it is.
7034 *
acc3f5d7
RR
7035 * The passed in 'doms_new' should be allocated using
7036 * alloc_sched_domains. This routine takes ownership of it and will
7037 * free_sched_domains it when done with it. If the caller failed the
7038 * alloc call, then it can pass in doms_new == NULL && ndoms_new == 1,
7039 * and partition_sched_domains() will fallback to the single partition
7040 * 'fallback_doms', it also forces the domains to be rebuilt.
029190c5 7041 *
96f874e2 7042 * If doms_new == NULL it will be replaced with cpu_online_mask.
700018e0
LZ
7043 * ndoms_new == 0 is a special case for destroying existing domains,
7044 * and it will not create the default domain.
dfb512ec 7045 *
029190c5
PJ
7046 * Call with hotplug lock held
7047 */
acc3f5d7 7048void partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
1d3504fc 7049 struct sched_domain_attr *dattr_new)
029190c5 7050{
dfb512ec 7051 int i, j, n;
d65bd5ec 7052 int new_topology;
029190c5 7053
712555ee 7054 mutex_lock(&sched_domains_mutex);
a1835615 7055
7378547f
MM
7056 /* always unregister in case we don't destroy any domains */
7057 unregister_sched_domain_sysctl();
7058
d65bd5ec
HC
7059 /* Let architecture update cpu core mappings. */
7060 new_topology = arch_update_cpu_topology();
7061
dfb512ec 7062 n = doms_new ? ndoms_new : 0;
029190c5
PJ
7063
7064 /* Destroy deleted domains */
7065 for (i = 0; i < ndoms_cur; i++) {
d65bd5ec 7066 for (j = 0; j < n && !new_topology; j++) {
acc3f5d7 7067 if (cpumask_equal(doms_cur[i], doms_new[j])
1d3504fc 7068 && dattrs_equal(dattr_cur, i, dattr_new, j))
029190c5
PJ
7069 goto match1;
7070 }
7071 /* no match - a current sched domain not in new doms_new[] */
acc3f5d7 7072 detach_destroy_domains(doms_cur[i]);
029190c5
PJ
7073match1:
7074 ;
7075 }
7076
c8d2d47a 7077 n = ndoms_cur;
e761b772 7078 if (doms_new == NULL) {
c8d2d47a 7079 n = 0;
acc3f5d7 7080 doms_new = &fallback_doms;
6ad4c188 7081 cpumask_andnot(doms_new[0], cpu_active_mask, cpu_isolated_map);
faa2f98f 7082 WARN_ON_ONCE(dattr_new);
e761b772
MK
7083 }
7084
029190c5
PJ
7085 /* Build new domains */
7086 for (i = 0; i < ndoms_new; i++) {
c8d2d47a 7087 for (j = 0; j < n && !new_topology; j++) {
acc3f5d7 7088 if (cpumask_equal(doms_new[i], doms_cur[j])
1d3504fc 7089 && dattrs_equal(dattr_new, i, dattr_cur, j))
029190c5
PJ
7090 goto match2;
7091 }
7092 /* no match - add a new doms_new */
dce840a0 7093 build_sched_domains(doms_new[i], dattr_new ? dattr_new + i : NULL);
029190c5
PJ
7094match2:
7095 ;
7096 }
7097
7098 /* Remember the new sched domains */
acc3f5d7
RR
7099 if (doms_cur != &fallback_doms)
7100 free_sched_domains(doms_cur, ndoms_cur);
1d3504fc 7101 kfree(dattr_cur); /* kfree(NULL) is safe */
029190c5 7102 doms_cur = doms_new;
1d3504fc 7103 dattr_cur = dattr_new;
029190c5 7104 ndoms_cur = ndoms_new;
7378547f
MM
7105
7106 register_sched_domain_sysctl();
a1835615 7107
712555ee 7108 mutex_unlock(&sched_domains_mutex);
029190c5
PJ
7109}
7110
d35be8ba
SB
7111static int num_cpus_frozen; /* used to mark begin/end of suspend/resume */
7112
1da177e4 7113/*
3a101d05
TH
7114 * Update cpusets according to cpu_active mask. If cpusets are
7115 * disabled, cpuset_update_active_cpus() becomes a simple wrapper
7116 * around partition_sched_domains().
d35be8ba
SB
7117 *
7118 * If we come here as part of a suspend/resume, don't touch cpusets because we
7119 * want to restore it back to its original state upon resume anyway.
1da177e4 7120 */
40190a78 7121static void cpuset_cpu_active(void)
e761b772 7122{
40190a78 7123 if (cpuhp_tasks_frozen) {
d35be8ba
SB
7124 /*
7125 * num_cpus_frozen tracks how many CPUs are involved in suspend
7126 * resume sequence. As long as this is not the last online
7127 * operation in the resume sequence, just build a single sched
7128 * domain, ignoring cpusets.
7129 */
7130 num_cpus_frozen--;
7131 if (likely(num_cpus_frozen)) {
7132 partition_sched_domains(1, NULL, NULL);
135fb3e1 7133 return;
d35be8ba 7134 }
d35be8ba
SB
7135 /*
7136 * This is the last CPU online operation. So fall through and
7137 * restore the original sched domains by considering the
7138 * cpuset configurations.
7139 */
3a101d05 7140 }
135fb3e1 7141 cpuset_update_active_cpus(true);
3a101d05 7142}
e761b772 7143
40190a78 7144static int cpuset_cpu_inactive(unsigned int cpu)
3a101d05 7145{
3c18d447 7146 unsigned long flags;
3c18d447 7147 struct dl_bw *dl_b;
533445c6
OS
7148 bool overflow;
7149 int cpus;
3c18d447 7150
40190a78 7151 if (!cpuhp_tasks_frozen) {
533445c6
OS
7152 rcu_read_lock_sched();
7153 dl_b = dl_bw_of(cpu);
3c18d447 7154
533445c6
OS
7155 raw_spin_lock_irqsave(&dl_b->lock, flags);
7156 cpus = dl_bw_cpus(cpu);
7157 overflow = __dl_overflow(dl_b, cpus, 0, 0);
7158 raw_spin_unlock_irqrestore(&dl_b->lock, flags);
3c18d447 7159
533445c6 7160 rcu_read_unlock_sched();
3c18d447 7161
533445c6 7162 if (overflow)
135fb3e1 7163 return -EBUSY;
7ddf96b0 7164 cpuset_update_active_cpus(false);
135fb3e1 7165 } else {
d35be8ba
SB
7166 num_cpus_frozen++;
7167 partition_sched_domains(1, NULL, NULL);
e761b772 7168 }
135fb3e1 7169 return 0;
e761b772 7170}
e761b772 7171
40190a78 7172int sched_cpu_activate(unsigned int cpu)
135fb3e1 7173{
7d976699
TG
7174 struct rq *rq = cpu_rq(cpu);
7175 unsigned long flags;
7176
40190a78 7177 set_cpu_active(cpu, true);
135fb3e1 7178
40190a78 7179 if (sched_smp_initialized) {
135fb3e1 7180 sched_domains_numa_masks_set(cpu);
40190a78 7181 cpuset_cpu_active();
e761b772 7182 }
7d976699
TG
7183
7184 /*
7185 * Put the rq online, if not already. This happens:
7186 *
7187 * 1) In the early boot process, because we build the real domains
7188 * after all cpus have been brought up.
7189 *
7190 * 2) At runtime, if cpuset_cpu_active() fails to rebuild the
7191 * domains.
7192 */
7193 raw_spin_lock_irqsave(&rq->lock, flags);
7194 if (rq->rd) {
7195 BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
7196 set_rq_online(rq);
7197 }
7198 raw_spin_unlock_irqrestore(&rq->lock, flags);
7199
7200 update_max_interval();
7201
40190a78 7202 return 0;
135fb3e1
TG
7203}
7204
40190a78 7205int sched_cpu_deactivate(unsigned int cpu)
135fb3e1 7206{
135fb3e1
TG
7207 int ret;
7208
40190a78 7209 set_cpu_active(cpu, false);
b2454caa
PZ
7210 /*
7211 * We've cleared cpu_active_mask, wait for all preempt-disabled and RCU
7212 * users of this state to go away such that all new such users will
7213 * observe it.
7214 *
7215 * For CONFIG_PREEMPT we have preemptible RCU and its sync_rcu() might
7216 * not imply sync_sched(), so wait for both.
7217 *
7218 * Do sync before park smpboot threads to take care the rcu boost case.
7219 */
7220 if (IS_ENABLED(CONFIG_PREEMPT))
7221 synchronize_rcu_mult(call_rcu, call_rcu_sched);
7222 else
7223 synchronize_rcu();
40190a78
TG
7224
7225 if (!sched_smp_initialized)
7226 return 0;
7227
7228 ret = cpuset_cpu_inactive(cpu);
7229 if (ret) {
7230 set_cpu_active(cpu, true);
7231 return ret;
135fb3e1 7232 }
40190a78
TG
7233 sched_domains_numa_masks_clear(cpu);
7234 return 0;
135fb3e1
TG
7235}
7236
94baf7a5
TG
7237static void sched_rq_cpu_starting(unsigned int cpu)
7238{
7239 struct rq *rq = cpu_rq(cpu);
7240
7241 rq->calc_load_update = calc_load_update;
94baf7a5
TG
7242 update_max_interval();
7243}
7244
135fb3e1
TG
7245int sched_cpu_starting(unsigned int cpu)
7246{
7247 set_cpu_rq_start_time(cpu);
94baf7a5 7248 sched_rq_cpu_starting(cpu);
135fb3e1 7249 return 0;
e761b772 7250}
e761b772 7251
f2785ddb
TG
7252#ifdef CONFIG_HOTPLUG_CPU
7253int sched_cpu_dying(unsigned int cpu)
7254{
7255 struct rq *rq = cpu_rq(cpu);
7256 unsigned long flags;
7257
7258 /* Handle pending wakeups and then migrate everything off */
7259 sched_ttwu_pending();
7260 raw_spin_lock_irqsave(&rq->lock, flags);
7261 if (rq->rd) {
7262 BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
7263 set_rq_offline(rq);
7264 }
7265 migrate_tasks(rq);
7266 BUG_ON(rq->nr_running != 1);
7267 raw_spin_unlock_irqrestore(&rq->lock, flags);
7268 calc_load_migrate(rq);
7269 update_max_interval();
20a5c8cc 7270 nohz_balance_exit_idle(cpu);
e5ef27d0 7271 hrtick_clear(rq);
f2785ddb
TG
7272 return 0;
7273}
7274#endif
7275
1da177e4
LT
7276void __init sched_init_smp(void)
7277{
dcc30a35
RR
7278 cpumask_var_t non_isolated_cpus;
7279
7280 alloc_cpumask_var(&non_isolated_cpus, GFP_KERNEL);
cb5fd13f 7281 alloc_cpumask_var(&fallback_doms, GFP_KERNEL);
5c1e1767 7282
cb83b629
PZ
7283 sched_init_numa();
7284
6acce3ef
PZ
7285 /*
7286 * There's no userspace yet to cause hotplug operations; hence all the
7287 * cpu masks are stable and all blatant races in the below code cannot
7288 * happen.
7289 */
712555ee 7290 mutex_lock(&sched_domains_mutex);
c4a8849a 7291 init_sched_domains(cpu_active_mask);
dcc30a35
RR
7292 cpumask_andnot(non_isolated_cpus, cpu_possible_mask, cpu_isolated_map);
7293 if (cpumask_empty(non_isolated_cpus))
7294 cpumask_set_cpu(smp_processor_id(), non_isolated_cpus);
712555ee 7295 mutex_unlock(&sched_domains_mutex);
e761b772 7296
5c1e1767 7297 /* Move init over to a non-isolated CPU */
dcc30a35 7298 if (set_cpus_allowed_ptr(current, non_isolated_cpus) < 0)
5c1e1767 7299 BUG();
19978ca6 7300 sched_init_granularity();
dcc30a35 7301 free_cpumask_var(non_isolated_cpus);
4212823f 7302
0e3900e6 7303 init_sched_rt_class();
1baca4ce 7304 init_sched_dl_class();
e26fbffd 7305 sched_smp_initialized = true;
1da177e4 7306}
e26fbffd
TG
7307
7308static int __init migration_init(void)
7309{
94baf7a5 7310 sched_rq_cpu_starting(smp_processor_id());
e26fbffd 7311 return 0;
1da177e4 7312}
e26fbffd
TG
7313early_initcall(migration_init);
7314
1da177e4
LT
7315#else
7316void __init sched_init_smp(void)
7317{
19978ca6 7318 sched_init_granularity();
1da177e4
LT
7319}
7320#endif /* CONFIG_SMP */
7321
7322int in_sched_functions(unsigned long addr)
7323{
1da177e4
LT
7324 return in_lock_functions(addr) ||
7325 (addr >= (unsigned long)__sched_text_start
7326 && addr < (unsigned long)__sched_text_end);
7327}
7328
029632fb 7329#ifdef CONFIG_CGROUP_SCHED
27b4b931
LZ
7330/*
7331 * Default task group.
7332 * Every task in system belongs to this group at bootup.
7333 */
029632fb 7334struct task_group root_task_group;
35cf4e50 7335LIST_HEAD(task_groups);
b0367629
WL
7336
7337/* Cacheline aligned slab cache for task_group */
7338static struct kmem_cache *task_group_cache __read_mostly;
052f1dc7 7339#endif
6f505b16 7340
e6252c3e 7341DECLARE_PER_CPU(cpumask_var_t, load_balance_mask);
6f505b16 7342
1da177e4
LT
7343void __init sched_init(void)
7344{
dd41f596 7345 int i, j;
434d53b0
MT
7346 unsigned long alloc_size = 0, ptr;
7347
7348#ifdef CONFIG_FAIR_GROUP_SCHED
7349 alloc_size += 2 * nr_cpu_ids * sizeof(void **);
7350#endif
7351#ifdef CONFIG_RT_GROUP_SCHED
7352 alloc_size += 2 * nr_cpu_ids * sizeof(void **);
7353#endif
434d53b0 7354 if (alloc_size) {
36b7b6d4 7355 ptr = (unsigned long)kzalloc(alloc_size, GFP_NOWAIT);
434d53b0
MT
7356
7357#ifdef CONFIG_FAIR_GROUP_SCHED
07e06b01 7358 root_task_group.se = (struct sched_entity **)ptr;
434d53b0
MT
7359 ptr += nr_cpu_ids * sizeof(void **);
7360
07e06b01 7361 root_task_group.cfs_rq = (struct cfs_rq **)ptr;
434d53b0 7362 ptr += nr_cpu_ids * sizeof(void **);
eff766a6 7363
6d6bc0ad 7364#endif /* CONFIG_FAIR_GROUP_SCHED */
434d53b0 7365#ifdef CONFIG_RT_GROUP_SCHED
07e06b01 7366 root_task_group.rt_se = (struct sched_rt_entity **)ptr;
434d53b0
MT
7367 ptr += nr_cpu_ids * sizeof(void **);
7368
07e06b01 7369 root_task_group.rt_rq = (struct rt_rq **)ptr;
eff766a6
PZ
7370 ptr += nr_cpu_ids * sizeof(void **);
7371
6d6bc0ad 7372#endif /* CONFIG_RT_GROUP_SCHED */
b74e6278 7373 }
df7c8e84 7374#ifdef CONFIG_CPUMASK_OFFSTACK
b74e6278
AT
7375 for_each_possible_cpu(i) {
7376 per_cpu(load_balance_mask, i) = (cpumask_var_t)kzalloc_node(
7377 cpumask_size(), GFP_KERNEL, cpu_to_node(i));
434d53b0 7378 }
b74e6278 7379#endif /* CONFIG_CPUMASK_OFFSTACK */
dd41f596 7380
332ac17e
DF
7381 init_rt_bandwidth(&def_rt_bandwidth,
7382 global_rt_period(), global_rt_runtime());
7383 init_dl_bandwidth(&def_dl_bandwidth,
1724813d 7384 global_rt_period(), global_rt_runtime());
332ac17e 7385
57d885fe
GH
7386#ifdef CONFIG_SMP
7387 init_defrootdomain();
7388#endif
7389
d0b27fa7 7390#ifdef CONFIG_RT_GROUP_SCHED
07e06b01 7391 init_rt_bandwidth(&root_task_group.rt_bandwidth,
d0b27fa7 7392 global_rt_period(), global_rt_runtime());
6d6bc0ad 7393#endif /* CONFIG_RT_GROUP_SCHED */
d0b27fa7 7394
7c941438 7395#ifdef CONFIG_CGROUP_SCHED
b0367629
WL
7396 task_group_cache = KMEM_CACHE(task_group, 0);
7397
07e06b01
YZ
7398 list_add(&root_task_group.list, &task_groups);
7399 INIT_LIST_HEAD(&root_task_group.children);
f4d6f6c2 7400 INIT_LIST_HEAD(&root_task_group.siblings);
5091faa4 7401 autogroup_init(&init_task);
7c941438 7402#endif /* CONFIG_CGROUP_SCHED */
6f505b16 7403
0a945022 7404 for_each_possible_cpu(i) {
70b97a7f 7405 struct rq *rq;
1da177e4
LT
7406
7407 rq = cpu_rq(i);
05fa785c 7408 raw_spin_lock_init(&rq->lock);
7897986b 7409 rq->nr_running = 0;
dce48a84
TG
7410 rq->calc_load_active = 0;
7411 rq->calc_load_update = jiffies + LOAD_FREQ;
acb5a9ba 7412 init_cfs_rq(&rq->cfs);
07c54f7a
AV
7413 init_rt_rq(&rq->rt);
7414 init_dl_rq(&rq->dl);
dd41f596 7415#ifdef CONFIG_FAIR_GROUP_SCHED
029632fb 7416 root_task_group.shares = ROOT_TASK_GROUP_LOAD;
6f505b16 7417 INIT_LIST_HEAD(&rq->leaf_cfs_rq_list);
354d60c2 7418 /*
07e06b01 7419 * How much cpu bandwidth does root_task_group get?
354d60c2
DG
7420 *
7421 * In case of task-groups formed thr' the cgroup filesystem, it
7422 * gets 100% of the cpu resources in the system. This overall
7423 * system cpu resource is divided among the tasks of
07e06b01 7424 * root_task_group and its child task-groups in a fair manner,
354d60c2
DG
7425 * based on each entity's (task or task-group's) weight
7426 * (se->load.weight).
7427 *
07e06b01 7428 * In other words, if root_task_group has 10 tasks of weight
354d60c2
DG
7429 * 1024) and two child groups A0 and A1 (of weight 1024 each),
7430 * then A0's share of the cpu resource is:
7431 *
0d905bca 7432 * A0's bandwidth = 1024 / (10*1024 + 1024 + 1024) = 8.33%
354d60c2 7433 *
07e06b01
YZ
7434 * We achieve this by letting root_task_group's tasks sit
7435 * directly in rq->cfs (i.e root_task_group->se[] = NULL).
354d60c2 7436 */
ab84d31e 7437 init_cfs_bandwidth(&root_task_group.cfs_bandwidth);
07e06b01 7438 init_tg_cfs_entry(&root_task_group, &rq->cfs, NULL, i, NULL);
354d60c2
DG
7439#endif /* CONFIG_FAIR_GROUP_SCHED */
7440
7441 rq->rt.rt_runtime = def_rt_bandwidth.rt_runtime;
052f1dc7 7442#ifdef CONFIG_RT_GROUP_SCHED
07e06b01 7443 init_tg_rt_entry(&root_task_group, &rq->rt, NULL, i, NULL);
dd41f596 7444#endif
1da177e4 7445
dd41f596
IM
7446 for (j = 0; j < CPU_LOAD_IDX_MAX; j++)
7447 rq->cpu_load[j] = 0;
fdf3e95d 7448
1da177e4 7449#ifdef CONFIG_SMP
41c7ce9a 7450 rq->sd = NULL;
57d885fe 7451 rq->rd = NULL;
ca6d75e6 7452 rq->cpu_capacity = rq->cpu_capacity_orig = SCHED_CAPACITY_SCALE;
e3fca9e7 7453 rq->balance_callback = NULL;
1da177e4 7454 rq->active_balance = 0;
dd41f596 7455 rq->next_balance = jiffies;
1da177e4 7456 rq->push_cpu = 0;
0a2966b4 7457 rq->cpu = i;
1f11eb6a 7458 rq->online = 0;
eae0c9df
MG
7459 rq->idle_stamp = 0;
7460 rq->avg_idle = 2*sysctl_sched_migration_cost;
9bd721c5 7461 rq->max_idle_balance_cost = sysctl_sched_migration_cost;
367456c7
PZ
7462
7463 INIT_LIST_HEAD(&rq->cfs_tasks);
7464
dc938520 7465 rq_attach_root(rq, &def_root_domain);
3451d024 7466#ifdef CONFIG_NO_HZ_COMMON
9fd81dd5 7467 rq->last_load_update_tick = jiffies;
1c792db7 7468 rq->nohz_flags = 0;
83cd4fe2 7469#endif
265f22a9
FW
7470#ifdef CONFIG_NO_HZ_FULL
7471 rq->last_sched_tick = 0;
7472#endif
9fd81dd5 7473#endif /* CONFIG_SMP */
8f4d37ec 7474 init_rq_hrtick(rq);
1da177e4 7475 atomic_set(&rq->nr_iowait, 0);
1da177e4
LT
7476 }
7477
2dd73a4f 7478 set_load_weight(&init_task);
b50f60ce 7479
e107be36
AK
7480#ifdef CONFIG_PREEMPT_NOTIFIERS
7481 INIT_HLIST_HEAD(&init_task.preempt_notifiers);
7482#endif
7483
1da177e4
LT
7484 /*
7485 * The boot idle thread does lazy MMU switching as well:
7486 */
7487 atomic_inc(&init_mm.mm_count);
7488 enter_lazy_tlb(&init_mm, current);
7489
1b537c7d
YD
7490 /*
7491 * During early bootup we pretend to be a normal task:
7492 */
7493 current->sched_class = &fair_sched_class;
7494
1da177e4
LT
7495 /*
7496 * Make us the idle thread. Technically, schedule() should not be
7497 * called from this thread, however somewhere below it might be,
7498 * but because we are the idle thread, we just pick up running again
7499 * when this runqueue becomes "idle".
7500 */
7501 init_idle(current, smp_processor_id());
dce48a84
TG
7502
7503 calc_load_update = jiffies + LOAD_FREQ;
7504
bf4d83f6 7505#ifdef CONFIG_SMP
4cb98839 7506 zalloc_cpumask_var(&sched_domains_tmpmask, GFP_NOWAIT);
bdddd296
RR
7507 /* May be allocated at isolcpus cmdline parse time */
7508 if (cpu_isolated_map == NULL)
7509 zalloc_cpumask_var(&cpu_isolated_map, GFP_NOWAIT);
29d5e047 7510 idle_thread_set_boot_cpu();
9cf7243d 7511 set_cpu_rq_start_time(smp_processor_id());
029632fb
PZ
7512#endif
7513 init_sched_fair_class();
6a7b3dc3 7514
4698f88c
JP
7515 init_schedstats();
7516
6892b75e 7517 scheduler_running = 1;
1da177e4
LT
7518}
7519
d902db1e 7520#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
e4aafea2
FW
7521static inline int preempt_count_equals(int preempt_offset)
7522{
da7142e2 7523 int nested = preempt_count() + rcu_preempt_depth();
e4aafea2 7524
4ba8216c 7525 return (nested == preempt_offset);
e4aafea2
FW
7526}
7527
d894837f 7528void __might_sleep(const char *file, int line, int preempt_offset)
1da177e4 7529{
8eb23b9f
PZ
7530 /*
7531 * Blocking primitives will set (and therefore destroy) current->state,
7532 * since we will exit with TASK_RUNNING make sure we enter with it,
7533 * otherwise we will destroy state.
7534 */
00845eb9 7535 WARN_ONCE(current->state != TASK_RUNNING && current->task_state_change,
8eb23b9f
PZ
7536 "do not call blocking ops when !TASK_RUNNING; "
7537 "state=%lx set at [<%p>] %pS\n",
7538 current->state,
7539 (void *)current->task_state_change,
00845eb9 7540 (void *)current->task_state_change);
8eb23b9f 7541
3427445a
PZ
7542 ___might_sleep(file, line, preempt_offset);
7543}
7544EXPORT_SYMBOL(__might_sleep);
7545
7546void ___might_sleep(const char *file, int line, int preempt_offset)
1da177e4 7547{
1da177e4
LT
7548 static unsigned long prev_jiffy; /* ratelimiting */
7549
b3fbab05 7550 rcu_sleep_check(); /* WARN_ON_ONCE() by default, no rate limit reqd. */
db273be2
TG
7551 if ((preempt_count_equals(preempt_offset) && !irqs_disabled() &&
7552 !is_idle_task(current)) ||
e4aafea2 7553 system_state != SYSTEM_RUNNING || oops_in_progress)
aef745fc
IM
7554 return;
7555 if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy)
7556 return;
7557 prev_jiffy = jiffies;
7558
3df0fc5b
PZ
7559 printk(KERN_ERR
7560 "BUG: sleeping function called from invalid context at %s:%d\n",
7561 file, line);
7562 printk(KERN_ERR
7563 "in_atomic(): %d, irqs_disabled(): %d, pid: %d, name: %s\n",
7564 in_atomic(), irqs_disabled(),
7565 current->pid, current->comm);
aef745fc 7566
a8b686b3
ES
7567 if (task_stack_end_corrupted(current))
7568 printk(KERN_EMERG "Thread overran stack, or stack corrupted\n");
7569
aef745fc
IM
7570 debug_show_held_locks(current);
7571 if (irqs_disabled())
7572 print_irqtrace_events(current);
8f47b187
TG
7573#ifdef CONFIG_DEBUG_PREEMPT
7574 if (!preempt_count_equals(preempt_offset)) {
7575 pr_err("Preemption disabled at:");
7576 print_ip_sym(current->preempt_disable_ip);
7577 pr_cont("\n");
7578 }
7579#endif
aef745fc 7580 dump_stack();
1da177e4 7581}
3427445a 7582EXPORT_SYMBOL(___might_sleep);
1da177e4
LT
7583#endif
7584
7585#ifdef CONFIG_MAGIC_SYSRQ
dbc7f069 7586void normalize_rt_tasks(void)
3a5e4dc1 7587{
dbc7f069 7588 struct task_struct *g, *p;
d50dde5a
DF
7589 struct sched_attr attr = {
7590 .sched_policy = SCHED_NORMAL,
7591 };
1da177e4 7592
3472eaa1 7593 read_lock(&tasklist_lock);
5d07f420 7594 for_each_process_thread(g, p) {
178be793
IM
7595 /*
7596 * Only normalize user tasks:
7597 */
3472eaa1 7598 if (p->flags & PF_KTHREAD)
178be793
IM
7599 continue;
7600
6cfb0d5d 7601 p->se.exec_start = 0;
6cfb0d5d 7602#ifdef CONFIG_SCHEDSTATS
41acab88
LDM
7603 p->se.statistics.wait_start = 0;
7604 p->se.statistics.sleep_start = 0;
7605 p->se.statistics.block_start = 0;
6cfb0d5d 7606#endif
dd41f596 7607
aab03e05 7608 if (!dl_task(p) && !rt_task(p)) {
dd41f596
IM
7609 /*
7610 * Renice negative nice level userspace
7611 * tasks back to 0:
7612 */
3472eaa1 7613 if (task_nice(p) < 0)
dd41f596 7614 set_user_nice(p, 0);
1da177e4 7615 continue;
dd41f596 7616 }
1da177e4 7617
dbc7f069 7618 __sched_setscheduler(p, &attr, false, false);
5d07f420 7619 }
3472eaa1 7620 read_unlock(&tasklist_lock);
1da177e4
LT
7621}
7622
7623#endif /* CONFIG_MAGIC_SYSRQ */
1df5c10a 7624
67fc4e0c 7625#if defined(CONFIG_IA64) || defined(CONFIG_KGDB_KDB)
1df5c10a 7626/*
67fc4e0c 7627 * These functions are only useful for the IA64 MCA handling, or kdb.
1df5c10a
LT
7628 *
7629 * They can only be called when the whole system has been
7630 * stopped - every CPU needs to be quiescent, and no scheduling
7631 * activity can take place. Using them for anything else would
7632 * be a serious bug, and as a result, they aren't even visible
7633 * under any other configuration.
7634 */
7635
7636/**
7637 * curr_task - return the current task for a given cpu.
7638 * @cpu: the processor in question.
7639 *
7640 * ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED!
e69f6186
YB
7641 *
7642 * Return: The current task for @cpu.
1df5c10a 7643 */
36c8b586 7644struct task_struct *curr_task(int cpu)
1df5c10a
LT
7645{
7646 return cpu_curr(cpu);
7647}
7648
67fc4e0c
JW
7649#endif /* defined(CONFIG_IA64) || defined(CONFIG_KGDB_KDB) */
7650
7651#ifdef CONFIG_IA64
1df5c10a
LT
7652/**
7653 * set_curr_task - set the current task for a given cpu.
7654 * @cpu: the processor in question.
7655 * @p: the task pointer to set.
7656 *
7657 * Description: This function must only be used when non-maskable interrupts
41a2d6cf
IM
7658 * are serviced on a separate stack. It allows the architecture to switch the
7659 * notion of the current task on a cpu in a non-blocking manner. This function
1df5c10a
LT
7660 * must be called with all CPU's synchronized, and interrupts disabled, the
7661 * and caller must save the original value of the current task (see
7662 * curr_task() above) and restore that value before reenabling interrupts and
7663 * re-starting the system.
7664 *
7665 * ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED!
7666 */
36c8b586 7667void set_curr_task(int cpu, struct task_struct *p)
1df5c10a
LT
7668{
7669 cpu_curr(cpu) = p;
7670}
7671
7672#endif
29f59db3 7673
7c941438 7674#ifdef CONFIG_CGROUP_SCHED
029632fb
PZ
7675/* task_group_lock serializes the addition/removal of task groups */
7676static DEFINE_SPINLOCK(task_group_lock);
7677
2f5177f0 7678static void sched_free_group(struct task_group *tg)
bccbe08a
PZ
7679{
7680 free_fair_sched_group(tg);
7681 free_rt_sched_group(tg);
e9aa1dd1 7682 autogroup_free(tg);
b0367629 7683 kmem_cache_free(task_group_cache, tg);
bccbe08a
PZ
7684}
7685
7686/* allocate runqueue etc for a new task group */
ec7dc8ac 7687struct task_group *sched_create_group(struct task_group *parent)
bccbe08a
PZ
7688{
7689 struct task_group *tg;
bccbe08a 7690
b0367629 7691 tg = kmem_cache_alloc(task_group_cache, GFP_KERNEL | __GFP_ZERO);
bccbe08a
PZ
7692 if (!tg)
7693 return ERR_PTR(-ENOMEM);
7694
ec7dc8ac 7695 if (!alloc_fair_sched_group(tg, parent))
bccbe08a
PZ
7696 goto err;
7697
ec7dc8ac 7698 if (!alloc_rt_sched_group(tg, parent))
bccbe08a
PZ
7699 goto err;
7700
ace783b9
LZ
7701 return tg;
7702
7703err:
2f5177f0 7704 sched_free_group(tg);
ace783b9
LZ
7705 return ERR_PTR(-ENOMEM);
7706}
7707
7708void sched_online_group(struct task_group *tg, struct task_group *parent)
7709{
7710 unsigned long flags;
7711
8ed36996 7712 spin_lock_irqsave(&task_group_lock, flags);
6f505b16 7713 list_add_rcu(&tg->list, &task_groups);
f473aa5e
PZ
7714
7715 WARN_ON(!parent); /* root should already exist */
7716
7717 tg->parent = parent;
f473aa5e 7718 INIT_LIST_HEAD(&tg->children);
09f2724a 7719 list_add_rcu(&tg->siblings, &parent->children);
8ed36996 7720 spin_unlock_irqrestore(&task_group_lock, flags);
8663e24d
PZ
7721
7722 online_fair_sched_group(tg);
29f59db3
SV
7723}
7724
9b5b7751 7725/* rcu callback to free various structures associated with a task group */
2f5177f0 7726static void sched_free_group_rcu(struct rcu_head *rhp)
29f59db3 7727{
29f59db3 7728 /* now it should be safe to free those cfs_rqs */
2f5177f0 7729 sched_free_group(container_of(rhp, struct task_group, rcu));
29f59db3
SV
7730}
7731
4cf86d77 7732void sched_destroy_group(struct task_group *tg)
ace783b9
LZ
7733{
7734 /* wait for possible concurrent references to cfs_rqs complete */
2f5177f0 7735 call_rcu(&tg->rcu, sched_free_group_rcu);
ace783b9
LZ
7736}
7737
7738void sched_offline_group(struct task_group *tg)
29f59db3 7739{
8ed36996 7740 unsigned long flags;
29f59db3 7741
3d4b47b4 7742 /* end participation in shares distribution */
6fe1f348 7743 unregister_fair_sched_group(tg);
3d4b47b4
PZ
7744
7745 spin_lock_irqsave(&task_group_lock, flags);
6f505b16 7746 list_del_rcu(&tg->list);
f473aa5e 7747 list_del_rcu(&tg->siblings);
8ed36996 7748 spin_unlock_irqrestore(&task_group_lock, flags);
29f59db3
SV
7749}
7750
ea86cb4b 7751static void sched_change_group(struct task_struct *tsk, int type)
29f59db3 7752{
8323f26c 7753 struct task_group *tg;
29f59db3 7754
f7b8a47d
KT
7755 /*
7756 * All callers are synchronized by task_rq_lock(); we do not use RCU
7757 * which is pointless here. Thus, we pass "true" to task_css_check()
7758 * to prevent lockdep warnings.
7759 */
7760 tg = container_of(task_css_check(tsk, cpu_cgrp_id, true),
8323f26c
PZ
7761 struct task_group, css);
7762 tg = autogroup_task_group(tsk, tg);
7763 tsk->sched_task_group = tg;
7764
810b3817 7765#ifdef CONFIG_FAIR_GROUP_SCHED
ea86cb4b
VG
7766 if (tsk->sched_class->task_change_group)
7767 tsk->sched_class->task_change_group(tsk, type);
b2b5ce02 7768 else
810b3817 7769#endif
b2b5ce02 7770 set_task_rq(tsk, task_cpu(tsk));
ea86cb4b
VG
7771}
7772
7773/*
7774 * Change task's runqueue when it moves between groups.
7775 *
7776 * The caller of this function should have put the task in its new group by
7777 * now. This function just updates tsk->se.cfs_rq and tsk->se.parent to reflect
7778 * its new group.
7779 */
7780void sched_move_task(struct task_struct *tsk)
7781{
7782 int queued, running;
7783 struct rq_flags rf;
7784 struct rq *rq;
7785
7786 rq = task_rq_lock(tsk, &rf);
7787
7788 running = task_current(rq, tsk);
7789 queued = task_on_rq_queued(tsk);
7790
7791 if (queued)
7792 dequeue_task(rq, tsk, DEQUEUE_SAVE | DEQUEUE_MOVE);
7793 if (unlikely(running))
7794 put_prev_task(rq, tsk);
7795
7796 sched_change_group(tsk, TASK_MOVE_GROUP);
810b3817 7797
0e1f3483
HS
7798 if (unlikely(running))
7799 tsk->sched_class->set_curr_task(rq);
da0c1e65 7800 if (queued)
ff77e468 7801 enqueue_task(rq, tsk, ENQUEUE_RESTORE | ENQUEUE_MOVE);
29f59db3 7802
eb580751 7803 task_rq_unlock(rq, tsk, &rf);
29f59db3 7804}
7c941438 7805#endif /* CONFIG_CGROUP_SCHED */
29f59db3 7806
a790de99
PT
7807#ifdef CONFIG_RT_GROUP_SCHED
7808/*
7809 * Ensure that the real time constraints are schedulable.
7810 */
7811static DEFINE_MUTEX(rt_constraints_mutex);
9f0c1e56 7812
9a7e0b18
PZ
7813/* Must be called with tasklist_lock held */
7814static inline int tg_has_rt_tasks(struct task_group *tg)
b40b2e8e 7815{
9a7e0b18 7816 struct task_struct *g, *p;
b40b2e8e 7817
1fe89e1b
PZ
7818 /*
7819 * Autogroups do not have RT tasks; see autogroup_create().
7820 */
7821 if (task_group_is_autogroup(tg))
7822 return 0;
7823
5d07f420 7824 for_each_process_thread(g, p) {
8651c658 7825 if (rt_task(p) && task_group(p) == tg)
9a7e0b18 7826 return 1;
5d07f420 7827 }
b40b2e8e 7828
9a7e0b18
PZ
7829 return 0;
7830}
b40b2e8e 7831
9a7e0b18
PZ
7832struct rt_schedulable_data {
7833 struct task_group *tg;
7834 u64 rt_period;
7835 u64 rt_runtime;
7836};
b40b2e8e 7837
a790de99 7838static int tg_rt_schedulable(struct task_group *tg, void *data)
9a7e0b18
PZ
7839{
7840 struct rt_schedulable_data *d = data;
7841 struct task_group *child;
7842 unsigned long total, sum = 0;
7843 u64 period, runtime;
b40b2e8e 7844
9a7e0b18
PZ
7845 period = ktime_to_ns(tg->rt_bandwidth.rt_period);
7846 runtime = tg->rt_bandwidth.rt_runtime;
b40b2e8e 7847
9a7e0b18
PZ
7848 if (tg == d->tg) {
7849 period = d->rt_period;
7850 runtime = d->rt_runtime;
b40b2e8e 7851 }
b40b2e8e 7852
4653f803
PZ
7853 /*
7854 * Cannot have more runtime than the period.
7855 */
7856 if (runtime > period && runtime != RUNTIME_INF)
7857 return -EINVAL;
6f505b16 7858
4653f803
PZ
7859 /*
7860 * Ensure we don't starve existing RT tasks.
7861 */
9a7e0b18
PZ
7862 if (rt_bandwidth_enabled() && !runtime && tg_has_rt_tasks(tg))
7863 return -EBUSY;
6f505b16 7864
9a7e0b18 7865 total = to_ratio(period, runtime);
6f505b16 7866
4653f803
PZ
7867 /*
7868 * Nobody can have more than the global setting allows.
7869 */
7870 if (total > to_ratio(global_rt_period(), global_rt_runtime()))
7871 return -EINVAL;
6f505b16 7872
4653f803
PZ
7873 /*
7874 * The sum of our children's runtime should not exceed our own.
7875 */
9a7e0b18
PZ
7876 list_for_each_entry_rcu(child, &tg->children, siblings) {
7877 period = ktime_to_ns(child->rt_bandwidth.rt_period);
7878 runtime = child->rt_bandwidth.rt_runtime;
6f505b16 7879
9a7e0b18
PZ
7880 if (child == d->tg) {
7881 period = d->rt_period;
7882 runtime = d->rt_runtime;
7883 }
6f505b16 7884
9a7e0b18 7885 sum += to_ratio(period, runtime);
9f0c1e56 7886 }
6f505b16 7887
9a7e0b18
PZ
7888 if (sum > total)
7889 return -EINVAL;
7890
7891 return 0;
6f505b16
PZ
7892}
7893
9a7e0b18 7894static int __rt_schedulable(struct task_group *tg, u64 period, u64 runtime)
521f1a24 7895{
8277434e
PT
7896 int ret;
7897
9a7e0b18
PZ
7898 struct rt_schedulable_data data = {
7899 .tg = tg,
7900 .rt_period = period,
7901 .rt_runtime = runtime,
7902 };
7903
8277434e
PT
7904 rcu_read_lock();
7905 ret = walk_tg_tree(tg_rt_schedulable, tg_nop, &data);
7906 rcu_read_unlock();
7907
7908 return ret;
521f1a24
DG
7909}
7910
ab84d31e 7911static int tg_set_rt_bandwidth(struct task_group *tg,
d0b27fa7 7912 u64 rt_period, u64 rt_runtime)
6f505b16 7913{
ac086bc2 7914 int i, err = 0;
9f0c1e56 7915
2636ed5f
PZ
7916 /*
7917 * Disallowing the root group RT runtime is BAD, it would disallow the
7918 * kernel creating (and or operating) RT threads.
7919 */
7920 if (tg == &root_task_group && rt_runtime == 0)
7921 return -EINVAL;
7922
7923 /* No period doesn't make any sense. */
7924 if (rt_period == 0)
7925 return -EINVAL;
7926
9f0c1e56 7927 mutex_lock(&rt_constraints_mutex);
521f1a24 7928 read_lock(&tasklist_lock);
9a7e0b18
PZ
7929 err = __rt_schedulable(tg, rt_period, rt_runtime);
7930 if (err)
9f0c1e56 7931 goto unlock;
ac086bc2 7932
0986b11b 7933 raw_spin_lock_irq(&tg->rt_bandwidth.rt_runtime_lock);
d0b27fa7
PZ
7934 tg->rt_bandwidth.rt_period = ns_to_ktime(rt_period);
7935 tg->rt_bandwidth.rt_runtime = rt_runtime;
ac086bc2
PZ
7936
7937 for_each_possible_cpu(i) {
7938 struct rt_rq *rt_rq = tg->rt_rq[i];
7939
0986b11b 7940 raw_spin_lock(&rt_rq->rt_runtime_lock);
ac086bc2 7941 rt_rq->rt_runtime = rt_runtime;
0986b11b 7942 raw_spin_unlock(&rt_rq->rt_runtime_lock);
ac086bc2 7943 }
0986b11b 7944 raw_spin_unlock_irq(&tg->rt_bandwidth.rt_runtime_lock);
49246274 7945unlock:
521f1a24 7946 read_unlock(&tasklist_lock);
9f0c1e56
PZ
7947 mutex_unlock(&rt_constraints_mutex);
7948
7949 return err;
6f505b16
PZ
7950}
7951
25cc7da7 7952static int sched_group_set_rt_runtime(struct task_group *tg, long rt_runtime_us)
d0b27fa7
PZ
7953{
7954 u64 rt_runtime, rt_period;
7955
7956 rt_period = ktime_to_ns(tg->rt_bandwidth.rt_period);
7957 rt_runtime = (u64)rt_runtime_us * NSEC_PER_USEC;
7958 if (rt_runtime_us < 0)
7959 rt_runtime = RUNTIME_INF;
7960
ab84d31e 7961 return tg_set_rt_bandwidth(tg, rt_period, rt_runtime);
d0b27fa7
PZ
7962}
7963
25cc7da7 7964static long sched_group_rt_runtime(struct task_group *tg)
9f0c1e56
PZ
7965{
7966 u64 rt_runtime_us;
7967
d0b27fa7 7968 if (tg->rt_bandwidth.rt_runtime == RUNTIME_INF)
9f0c1e56
PZ
7969 return -1;
7970
d0b27fa7 7971 rt_runtime_us = tg->rt_bandwidth.rt_runtime;
9f0c1e56
PZ
7972 do_div(rt_runtime_us, NSEC_PER_USEC);
7973 return rt_runtime_us;
7974}
d0b27fa7 7975
ce2f5fe4 7976static int sched_group_set_rt_period(struct task_group *tg, u64 rt_period_us)
d0b27fa7
PZ
7977{
7978 u64 rt_runtime, rt_period;
7979
ce2f5fe4 7980 rt_period = rt_period_us * NSEC_PER_USEC;
d0b27fa7
PZ
7981 rt_runtime = tg->rt_bandwidth.rt_runtime;
7982
ab84d31e 7983 return tg_set_rt_bandwidth(tg, rt_period, rt_runtime);
d0b27fa7
PZ
7984}
7985
25cc7da7 7986static long sched_group_rt_period(struct task_group *tg)
d0b27fa7
PZ
7987{
7988 u64 rt_period_us;
7989
7990 rt_period_us = ktime_to_ns(tg->rt_bandwidth.rt_period);
7991 do_div(rt_period_us, NSEC_PER_USEC);
7992 return rt_period_us;
7993}
332ac17e 7994#endif /* CONFIG_RT_GROUP_SCHED */
d0b27fa7 7995
332ac17e 7996#ifdef CONFIG_RT_GROUP_SCHED
d0b27fa7
PZ
7997static int sched_rt_global_constraints(void)
7998{
7999 int ret = 0;
8000
8001 mutex_lock(&rt_constraints_mutex);
9a7e0b18 8002 read_lock(&tasklist_lock);
4653f803 8003 ret = __rt_schedulable(NULL, 0, 0);
9a7e0b18 8004 read_unlock(&tasklist_lock);
d0b27fa7
PZ
8005 mutex_unlock(&rt_constraints_mutex);
8006
8007 return ret;
8008}
54e99124 8009
25cc7da7 8010static int sched_rt_can_attach(struct task_group *tg, struct task_struct *tsk)
54e99124
DG
8011{
8012 /* Don't accept realtime tasks when there is no way for them to run */
8013 if (rt_task(tsk) && tg->rt_bandwidth.rt_runtime == 0)
8014 return 0;
8015
8016 return 1;
8017}
8018
6d6bc0ad 8019#else /* !CONFIG_RT_GROUP_SCHED */
d0b27fa7
PZ
8020static int sched_rt_global_constraints(void)
8021{
ac086bc2 8022 unsigned long flags;
8c5e9554 8023 int i;
ec5d4989 8024
0986b11b 8025 raw_spin_lock_irqsave(&def_rt_bandwidth.rt_runtime_lock, flags);
ac086bc2
PZ
8026 for_each_possible_cpu(i) {
8027 struct rt_rq *rt_rq = &cpu_rq(i)->rt;
8028
0986b11b 8029 raw_spin_lock(&rt_rq->rt_runtime_lock);
ac086bc2 8030 rt_rq->rt_runtime = global_rt_runtime();
0986b11b 8031 raw_spin_unlock(&rt_rq->rt_runtime_lock);
ac086bc2 8032 }
0986b11b 8033 raw_spin_unlock_irqrestore(&def_rt_bandwidth.rt_runtime_lock, flags);
ac086bc2 8034
8c5e9554 8035 return 0;
d0b27fa7 8036}
6d6bc0ad 8037#endif /* CONFIG_RT_GROUP_SCHED */
d0b27fa7 8038
a1963b81 8039static int sched_dl_global_validate(void)
332ac17e 8040{
1724813d
PZ
8041 u64 runtime = global_rt_runtime();
8042 u64 period = global_rt_period();
332ac17e 8043 u64 new_bw = to_ratio(period, runtime);
f10e00f4 8044 struct dl_bw *dl_b;
1724813d 8045 int cpu, ret = 0;
49516342 8046 unsigned long flags;
332ac17e
DF
8047
8048 /*
8049 * Here we want to check the bandwidth not being set to some
8050 * value smaller than the currently allocated bandwidth in
8051 * any of the root_domains.
8052 *
8053 * FIXME: Cycling on all the CPUs is overdoing, but simpler than
8054 * cycling on root_domains... Discussion on different/better
8055 * solutions is welcome!
8056 */
1724813d 8057 for_each_possible_cpu(cpu) {
f10e00f4
KT
8058 rcu_read_lock_sched();
8059 dl_b = dl_bw_of(cpu);
332ac17e 8060
49516342 8061 raw_spin_lock_irqsave(&dl_b->lock, flags);
1724813d
PZ
8062 if (new_bw < dl_b->total_bw)
8063 ret = -EBUSY;
49516342 8064 raw_spin_unlock_irqrestore(&dl_b->lock, flags);
1724813d 8065
f10e00f4
KT
8066 rcu_read_unlock_sched();
8067
1724813d
PZ
8068 if (ret)
8069 break;
332ac17e
DF
8070 }
8071
1724813d 8072 return ret;
332ac17e
DF
8073}
8074
1724813d 8075static void sched_dl_do_global(void)
ce0dbbbb 8076{
1724813d 8077 u64 new_bw = -1;
f10e00f4 8078 struct dl_bw *dl_b;
1724813d 8079 int cpu;
49516342 8080 unsigned long flags;
ce0dbbbb 8081
1724813d
PZ
8082 def_dl_bandwidth.dl_period = global_rt_period();
8083 def_dl_bandwidth.dl_runtime = global_rt_runtime();
8084
8085 if (global_rt_runtime() != RUNTIME_INF)
8086 new_bw = to_ratio(global_rt_period(), global_rt_runtime());
8087
8088 /*
8089 * FIXME: As above...
8090 */
8091 for_each_possible_cpu(cpu) {
f10e00f4
KT
8092 rcu_read_lock_sched();
8093 dl_b = dl_bw_of(cpu);
1724813d 8094
49516342 8095 raw_spin_lock_irqsave(&dl_b->lock, flags);
1724813d 8096 dl_b->bw = new_bw;
49516342 8097 raw_spin_unlock_irqrestore(&dl_b->lock, flags);
f10e00f4
KT
8098
8099 rcu_read_unlock_sched();
ce0dbbbb 8100 }
1724813d
PZ
8101}
8102
8103static int sched_rt_global_validate(void)
8104{
8105 if (sysctl_sched_rt_period <= 0)
8106 return -EINVAL;
8107
e9e7cb38
JL
8108 if ((sysctl_sched_rt_runtime != RUNTIME_INF) &&
8109 (sysctl_sched_rt_runtime > sysctl_sched_rt_period))
1724813d
PZ
8110 return -EINVAL;
8111
8112 return 0;
8113}
8114
8115static void sched_rt_do_global(void)
8116{
8117 def_rt_bandwidth.rt_runtime = global_rt_runtime();
8118 def_rt_bandwidth.rt_period = ns_to_ktime(global_rt_period());
ce0dbbbb
CW
8119}
8120
d0b27fa7 8121int sched_rt_handler(struct ctl_table *table, int write,
8d65af78 8122 void __user *buffer, size_t *lenp,
d0b27fa7
PZ
8123 loff_t *ppos)
8124{
d0b27fa7
PZ
8125 int old_period, old_runtime;
8126 static DEFINE_MUTEX(mutex);
1724813d 8127 int ret;
d0b27fa7
PZ
8128
8129 mutex_lock(&mutex);
8130 old_period = sysctl_sched_rt_period;
8131 old_runtime = sysctl_sched_rt_runtime;
8132
8d65af78 8133 ret = proc_dointvec(table, write, buffer, lenp, ppos);
d0b27fa7
PZ
8134
8135 if (!ret && write) {
1724813d
PZ
8136 ret = sched_rt_global_validate();
8137 if (ret)
8138 goto undo;
8139
a1963b81 8140 ret = sched_dl_global_validate();
1724813d
PZ
8141 if (ret)
8142 goto undo;
8143
a1963b81 8144 ret = sched_rt_global_constraints();
1724813d
PZ
8145 if (ret)
8146 goto undo;
8147
8148 sched_rt_do_global();
8149 sched_dl_do_global();
8150 }
8151 if (0) {
8152undo:
8153 sysctl_sched_rt_period = old_period;
8154 sysctl_sched_rt_runtime = old_runtime;
d0b27fa7
PZ
8155 }
8156 mutex_unlock(&mutex);
8157
8158 return ret;
8159}
68318b8e 8160
1724813d 8161int sched_rr_handler(struct ctl_table *table, int write,
332ac17e
DF
8162 void __user *buffer, size_t *lenp,
8163 loff_t *ppos)
8164{
8165 int ret;
332ac17e 8166 static DEFINE_MUTEX(mutex);
332ac17e
DF
8167
8168 mutex_lock(&mutex);
332ac17e 8169 ret = proc_dointvec(table, write, buffer, lenp, ppos);
1724813d
PZ
8170 /* make sure that internally we keep jiffies */
8171 /* also, writing zero resets timeslice to default */
332ac17e 8172 if (!ret && write) {
1724813d
PZ
8173 sched_rr_timeslice = sched_rr_timeslice <= 0 ?
8174 RR_TIMESLICE : msecs_to_jiffies(sched_rr_timeslice);
332ac17e
DF
8175 }
8176 mutex_unlock(&mutex);
332ac17e
DF
8177 return ret;
8178}
8179
052f1dc7 8180#ifdef CONFIG_CGROUP_SCHED
68318b8e 8181
a7c6d554 8182static inline struct task_group *css_tg(struct cgroup_subsys_state *css)
68318b8e 8183{
a7c6d554 8184 return css ? container_of(css, struct task_group, css) : NULL;
68318b8e
SV
8185}
8186
eb95419b
TH
8187static struct cgroup_subsys_state *
8188cpu_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
68318b8e 8189{
eb95419b
TH
8190 struct task_group *parent = css_tg(parent_css);
8191 struct task_group *tg;
68318b8e 8192
eb95419b 8193 if (!parent) {
68318b8e 8194 /* This is early initialization for the top cgroup */
07e06b01 8195 return &root_task_group.css;
68318b8e
SV
8196 }
8197
ec7dc8ac 8198 tg = sched_create_group(parent);
68318b8e
SV
8199 if (IS_ERR(tg))
8200 return ERR_PTR(-ENOMEM);
8201
2f5177f0
PZ
8202 sched_online_group(tg, parent);
8203
68318b8e
SV
8204 return &tg->css;
8205}
8206
2f5177f0 8207static void cpu_cgroup_css_released(struct cgroup_subsys_state *css)
ace783b9 8208{
eb95419b 8209 struct task_group *tg = css_tg(css);
ace783b9 8210
2f5177f0 8211 sched_offline_group(tg);
ace783b9
LZ
8212}
8213
eb95419b 8214static void cpu_cgroup_css_free(struct cgroup_subsys_state *css)
68318b8e 8215{
eb95419b 8216 struct task_group *tg = css_tg(css);
68318b8e 8217
2f5177f0
PZ
8218 /*
8219 * Relies on the RCU grace period between css_released() and this.
8220 */
8221 sched_free_group(tg);
ace783b9
LZ
8222}
8223
ea86cb4b
VG
8224/*
8225 * This is called before wake_up_new_task(), therefore we really only
8226 * have to set its group bits, all the other stuff does not apply.
8227 */
b53202e6 8228static void cpu_cgroup_fork(struct task_struct *task)
eeb61e53 8229{
ea86cb4b
VG
8230 struct rq_flags rf;
8231 struct rq *rq;
8232
8233 rq = task_rq_lock(task, &rf);
8234
8235 sched_change_group(task, TASK_SET_GROUP);
8236
8237 task_rq_unlock(rq, task, &rf);
eeb61e53
KT
8238}
8239
1f7dd3e5 8240static int cpu_cgroup_can_attach(struct cgroup_taskset *tset)
68318b8e 8241{
bb9d97b6 8242 struct task_struct *task;
1f7dd3e5 8243 struct cgroup_subsys_state *css;
7dc603c9 8244 int ret = 0;
bb9d97b6 8245
1f7dd3e5 8246 cgroup_taskset_for_each(task, css, tset) {
b68aa230 8247#ifdef CONFIG_RT_GROUP_SCHED
eb95419b 8248 if (!sched_rt_can_attach(css_tg(css), task))
bb9d97b6 8249 return -EINVAL;
b68aa230 8250#else
bb9d97b6
TH
8251 /* We don't support RT-tasks being in separate groups */
8252 if (task->sched_class != &fair_sched_class)
8253 return -EINVAL;
b68aa230 8254#endif
7dc603c9
PZ
8255 /*
8256 * Serialize against wake_up_new_task() such that if its
8257 * running, we're sure to observe its full state.
8258 */
8259 raw_spin_lock_irq(&task->pi_lock);
8260 /*
8261 * Avoid calling sched_move_task() before wake_up_new_task()
8262 * has happened. This would lead to problems with PELT, due to
8263 * move wanting to detach+attach while we're not attached yet.
8264 */
8265 if (task->state == TASK_NEW)
8266 ret = -EINVAL;
8267 raw_spin_unlock_irq(&task->pi_lock);
8268
8269 if (ret)
8270 break;
bb9d97b6 8271 }
7dc603c9 8272 return ret;
be367d09 8273}
68318b8e 8274
1f7dd3e5 8275static void cpu_cgroup_attach(struct cgroup_taskset *tset)
68318b8e 8276{
bb9d97b6 8277 struct task_struct *task;
1f7dd3e5 8278 struct cgroup_subsys_state *css;
bb9d97b6 8279
1f7dd3e5 8280 cgroup_taskset_for_each(task, css, tset)
bb9d97b6 8281 sched_move_task(task);
68318b8e
SV
8282}
8283
052f1dc7 8284#ifdef CONFIG_FAIR_GROUP_SCHED
182446d0
TH
8285static int cpu_shares_write_u64(struct cgroup_subsys_state *css,
8286 struct cftype *cftype, u64 shareval)
68318b8e 8287{
182446d0 8288 return sched_group_set_shares(css_tg(css), scale_load(shareval));
68318b8e
SV
8289}
8290
182446d0
TH
8291static u64 cpu_shares_read_u64(struct cgroup_subsys_state *css,
8292 struct cftype *cft)
68318b8e 8293{
182446d0 8294 struct task_group *tg = css_tg(css);
68318b8e 8295
c8b28116 8296 return (u64) scale_load_down(tg->shares);
68318b8e 8297}
ab84d31e
PT
8298
8299#ifdef CONFIG_CFS_BANDWIDTH
a790de99
PT
8300static DEFINE_MUTEX(cfs_constraints_mutex);
8301
ab84d31e
PT
8302const u64 max_cfs_quota_period = 1 * NSEC_PER_SEC; /* 1s */
8303const u64 min_cfs_quota_period = 1 * NSEC_PER_MSEC; /* 1ms */
8304
a790de99
PT
8305static int __cfs_schedulable(struct task_group *tg, u64 period, u64 runtime);
8306
ab84d31e
PT
8307static int tg_set_cfs_bandwidth(struct task_group *tg, u64 period, u64 quota)
8308{
56f570e5 8309 int i, ret = 0, runtime_enabled, runtime_was_enabled;
029632fb 8310 struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth;
ab84d31e
PT
8311
8312 if (tg == &root_task_group)
8313 return -EINVAL;
8314
8315 /*
8316 * Ensure we have at some amount of bandwidth every period. This is
8317 * to prevent reaching a state of large arrears when throttled via
8318 * entity_tick() resulting in prolonged exit starvation.
8319 */
8320 if (quota < min_cfs_quota_period || period < min_cfs_quota_period)
8321 return -EINVAL;
8322
8323 /*
8324 * Likewise, bound things on the otherside by preventing insane quota
8325 * periods. This also allows us to normalize in computing quota
8326 * feasibility.
8327 */
8328 if (period > max_cfs_quota_period)
8329 return -EINVAL;
8330
0e59bdae
KT
8331 /*
8332 * Prevent race between setting of cfs_rq->runtime_enabled and
8333 * unthrottle_offline_cfs_rqs().
8334 */
8335 get_online_cpus();
a790de99
PT
8336 mutex_lock(&cfs_constraints_mutex);
8337 ret = __cfs_schedulable(tg, period, quota);
8338 if (ret)
8339 goto out_unlock;
8340
58088ad0 8341 runtime_enabled = quota != RUNTIME_INF;
56f570e5 8342 runtime_was_enabled = cfs_b->quota != RUNTIME_INF;
1ee14e6c
BS
8343 /*
8344 * If we need to toggle cfs_bandwidth_used, off->on must occur
8345 * before making related changes, and on->off must occur afterwards
8346 */
8347 if (runtime_enabled && !runtime_was_enabled)
8348 cfs_bandwidth_usage_inc();
ab84d31e
PT
8349 raw_spin_lock_irq(&cfs_b->lock);
8350 cfs_b->period = ns_to_ktime(period);
8351 cfs_b->quota = quota;
58088ad0 8352
a9cf55b2 8353 __refill_cfs_bandwidth_runtime(cfs_b);
58088ad0 8354 /* restart the period timer (if active) to handle new period expiry */
77a4d1a1
PZ
8355 if (runtime_enabled)
8356 start_cfs_bandwidth(cfs_b);
ab84d31e
PT
8357 raw_spin_unlock_irq(&cfs_b->lock);
8358
0e59bdae 8359 for_each_online_cpu(i) {
ab84d31e 8360 struct cfs_rq *cfs_rq = tg->cfs_rq[i];
029632fb 8361 struct rq *rq = cfs_rq->rq;
ab84d31e
PT
8362
8363 raw_spin_lock_irq(&rq->lock);
58088ad0 8364 cfs_rq->runtime_enabled = runtime_enabled;
ab84d31e 8365 cfs_rq->runtime_remaining = 0;
671fd9da 8366
029632fb 8367 if (cfs_rq->throttled)
671fd9da 8368 unthrottle_cfs_rq(cfs_rq);
ab84d31e
PT
8369 raw_spin_unlock_irq(&rq->lock);
8370 }
1ee14e6c
BS
8371 if (runtime_was_enabled && !runtime_enabled)
8372 cfs_bandwidth_usage_dec();
a790de99
PT
8373out_unlock:
8374 mutex_unlock(&cfs_constraints_mutex);
0e59bdae 8375 put_online_cpus();
ab84d31e 8376
a790de99 8377 return ret;
ab84d31e
PT
8378}
8379
8380int tg_set_cfs_quota(struct task_group *tg, long cfs_quota_us)
8381{
8382 u64 quota, period;
8383
029632fb 8384 period = ktime_to_ns(tg->cfs_bandwidth.period);
ab84d31e
PT
8385 if (cfs_quota_us < 0)
8386 quota = RUNTIME_INF;
8387 else
8388 quota = (u64)cfs_quota_us * NSEC_PER_USEC;
8389
8390 return tg_set_cfs_bandwidth(tg, period, quota);
8391}
8392
8393long tg_get_cfs_quota(struct task_group *tg)
8394{
8395 u64 quota_us;
8396
029632fb 8397 if (tg->cfs_bandwidth.quota == RUNTIME_INF)
ab84d31e
PT
8398 return -1;
8399
029632fb 8400 quota_us = tg->cfs_bandwidth.quota;
ab84d31e
PT
8401 do_div(quota_us, NSEC_PER_USEC);
8402
8403 return quota_us;
8404}
8405
8406int tg_set_cfs_period(struct task_group *tg, long cfs_period_us)
8407{
8408 u64 quota, period;
8409
8410 period = (u64)cfs_period_us * NSEC_PER_USEC;
029632fb 8411 quota = tg->cfs_bandwidth.quota;
ab84d31e 8412
ab84d31e
PT
8413 return tg_set_cfs_bandwidth(tg, period, quota);
8414}
8415
8416long tg_get_cfs_period(struct task_group *tg)
8417{
8418 u64 cfs_period_us;
8419
029632fb 8420 cfs_period_us = ktime_to_ns(tg->cfs_bandwidth.period);
ab84d31e
PT
8421 do_div(cfs_period_us, NSEC_PER_USEC);
8422
8423 return cfs_period_us;
8424}
8425
182446d0
TH
8426static s64 cpu_cfs_quota_read_s64(struct cgroup_subsys_state *css,
8427 struct cftype *cft)
ab84d31e 8428{
182446d0 8429 return tg_get_cfs_quota(css_tg(css));
ab84d31e
PT
8430}
8431
182446d0
TH
8432static int cpu_cfs_quota_write_s64(struct cgroup_subsys_state *css,
8433 struct cftype *cftype, s64 cfs_quota_us)
ab84d31e 8434{
182446d0 8435 return tg_set_cfs_quota(css_tg(css), cfs_quota_us);
ab84d31e
PT
8436}
8437
182446d0
TH
8438static u64 cpu_cfs_period_read_u64(struct cgroup_subsys_state *css,
8439 struct cftype *cft)
ab84d31e 8440{
182446d0 8441 return tg_get_cfs_period(css_tg(css));
ab84d31e
PT
8442}
8443
182446d0
TH
8444static int cpu_cfs_period_write_u64(struct cgroup_subsys_state *css,
8445 struct cftype *cftype, u64 cfs_period_us)
ab84d31e 8446{
182446d0 8447 return tg_set_cfs_period(css_tg(css), cfs_period_us);
ab84d31e
PT
8448}
8449
a790de99
PT
8450struct cfs_schedulable_data {
8451 struct task_group *tg;
8452 u64 period, quota;
8453};
8454
8455/*
8456 * normalize group quota/period to be quota/max_period
8457 * note: units are usecs
8458 */
8459static u64 normalize_cfs_quota(struct task_group *tg,
8460 struct cfs_schedulable_data *d)
8461{
8462 u64 quota, period;
8463
8464 if (tg == d->tg) {
8465 period = d->period;
8466 quota = d->quota;
8467 } else {
8468 period = tg_get_cfs_period(tg);
8469 quota = tg_get_cfs_quota(tg);
8470 }
8471
8472 /* note: these should typically be equivalent */
8473 if (quota == RUNTIME_INF || quota == -1)
8474 return RUNTIME_INF;
8475
8476 return to_ratio(period, quota);
8477}
8478
8479static int tg_cfs_schedulable_down(struct task_group *tg, void *data)
8480{
8481 struct cfs_schedulable_data *d = data;
029632fb 8482 struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth;
a790de99
PT
8483 s64 quota = 0, parent_quota = -1;
8484
8485 if (!tg->parent) {
8486 quota = RUNTIME_INF;
8487 } else {
029632fb 8488 struct cfs_bandwidth *parent_b = &tg->parent->cfs_bandwidth;
a790de99
PT
8489
8490 quota = normalize_cfs_quota(tg, d);
9c58c79a 8491 parent_quota = parent_b->hierarchical_quota;
a790de99
PT
8492
8493 /*
8494 * ensure max(child_quota) <= parent_quota, inherit when no
8495 * limit is set
8496 */
8497 if (quota == RUNTIME_INF)
8498 quota = parent_quota;
8499 else if (parent_quota != RUNTIME_INF && quota > parent_quota)
8500 return -EINVAL;
8501 }
9c58c79a 8502 cfs_b->hierarchical_quota = quota;
a790de99
PT
8503
8504 return 0;
8505}
8506
8507static int __cfs_schedulable(struct task_group *tg, u64 period, u64 quota)
8508{
8277434e 8509 int ret;
a790de99
PT
8510 struct cfs_schedulable_data data = {
8511 .tg = tg,
8512 .period = period,
8513 .quota = quota,
8514 };
8515
8516 if (quota != RUNTIME_INF) {
8517 do_div(data.period, NSEC_PER_USEC);
8518 do_div(data.quota, NSEC_PER_USEC);
8519 }
8520
8277434e
PT
8521 rcu_read_lock();
8522 ret = walk_tg_tree(tg_cfs_schedulable_down, tg_nop, &data);
8523 rcu_read_unlock();
8524
8525 return ret;
a790de99 8526}
e8da1b18 8527
2da8ca82 8528static int cpu_stats_show(struct seq_file *sf, void *v)
e8da1b18 8529{
2da8ca82 8530 struct task_group *tg = css_tg(seq_css(sf));
029632fb 8531 struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth;
e8da1b18 8532
44ffc75b
TH
8533 seq_printf(sf, "nr_periods %d\n", cfs_b->nr_periods);
8534 seq_printf(sf, "nr_throttled %d\n", cfs_b->nr_throttled);
8535 seq_printf(sf, "throttled_time %llu\n", cfs_b->throttled_time);
e8da1b18
NR
8536
8537 return 0;
8538}
ab84d31e 8539#endif /* CONFIG_CFS_BANDWIDTH */
6d6bc0ad 8540#endif /* CONFIG_FAIR_GROUP_SCHED */
68318b8e 8541
052f1dc7 8542#ifdef CONFIG_RT_GROUP_SCHED
182446d0
TH
8543static int cpu_rt_runtime_write(struct cgroup_subsys_state *css,
8544 struct cftype *cft, s64 val)
6f505b16 8545{
182446d0 8546 return sched_group_set_rt_runtime(css_tg(css), val);
6f505b16
PZ
8547}
8548
182446d0
TH
8549static s64 cpu_rt_runtime_read(struct cgroup_subsys_state *css,
8550 struct cftype *cft)
6f505b16 8551{
182446d0 8552 return sched_group_rt_runtime(css_tg(css));
6f505b16 8553}
d0b27fa7 8554
182446d0
TH
8555static int cpu_rt_period_write_uint(struct cgroup_subsys_state *css,
8556 struct cftype *cftype, u64 rt_period_us)
d0b27fa7 8557{
182446d0 8558 return sched_group_set_rt_period(css_tg(css), rt_period_us);
d0b27fa7
PZ
8559}
8560
182446d0
TH
8561static u64 cpu_rt_period_read_uint(struct cgroup_subsys_state *css,
8562 struct cftype *cft)
d0b27fa7 8563{
182446d0 8564 return sched_group_rt_period(css_tg(css));
d0b27fa7 8565}
6d6bc0ad 8566#endif /* CONFIG_RT_GROUP_SCHED */
6f505b16 8567
fe5c7cc2 8568static struct cftype cpu_files[] = {
052f1dc7 8569#ifdef CONFIG_FAIR_GROUP_SCHED
fe5c7cc2
PM
8570 {
8571 .name = "shares",
f4c753b7
PM
8572 .read_u64 = cpu_shares_read_u64,
8573 .write_u64 = cpu_shares_write_u64,
fe5c7cc2 8574 },
052f1dc7 8575#endif
ab84d31e
PT
8576#ifdef CONFIG_CFS_BANDWIDTH
8577 {
8578 .name = "cfs_quota_us",
8579 .read_s64 = cpu_cfs_quota_read_s64,
8580 .write_s64 = cpu_cfs_quota_write_s64,
8581 },
8582 {
8583 .name = "cfs_period_us",
8584 .read_u64 = cpu_cfs_period_read_u64,
8585 .write_u64 = cpu_cfs_period_write_u64,
8586 },
e8da1b18
NR
8587 {
8588 .name = "stat",
2da8ca82 8589 .seq_show = cpu_stats_show,
e8da1b18 8590 },
ab84d31e 8591#endif
052f1dc7 8592#ifdef CONFIG_RT_GROUP_SCHED
6f505b16 8593 {
9f0c1e56 8594 .name = "rt_runtime_us",
06ecb27c
PM
8595 .read_s64 = cpu_rt_runtime_read,
8596 .write_s64 = cpu_rt_runtime_write,
6f505b16 8597 },
d0b27fa7
PZ
8598 {
8599 .name = "rt_period_us",
f4c753b7
PM
8600 .read_u64 = cpu_rt_period_read_uint,
8601 .write_u64 = cpu_rt_period_write_uint,
d0b27fa7 8602 },
052f1dc7 8603#endif
4baf6e33 8604 { } /* terminate */
68318b8e
SV
8605};
8606
073219e9 8607struct cgroup_subsys cpu_cgrp_subsys = {
92fb9748 8608 .css_alloc = cpu_cgroup_css_alloc,
2f5177f0 8609 .css_released = cpu_cgroup_css_released,
92fb9748 8610 .css_free = cpu_cgroup_css_free,
eeb61e53 8611 .fork = cpu_cgroup_fork,
bb9d97b6
TH
8612 .can_attach = cpu_cgroup_can_attach,
8613 .attach = cpu_cgroup_attach,
5577964e 8614 .legacy_cftypes = cpu_files,
b38e42e9 8615 .early_init = true,
68318b8e
SV
8616};
8617
052f1dc7 8618#endif /* CONFIG_CGROUP_SCHED */
d842de87 8619
b637a328
PM
8620void dump_cpu_task(int cpu)
8621{
8622 pr_info("Task dump for CPU %d:\n", cpu);
8623 sched_show_task(cpu_curr(cpu));
8624}
ed82b8a1
AK
8625
8626/*
8627 * Nice levels are multiplicative, with a gentle 10% change for every
8628 * nice level changed. I.e. when a CPU-bound task goes from nice 0 to
8629 * nice 1, it will get ~10% less CPU time than another CPU-bound task
8630 * that remained on nice 0.
8631 *
8632 * The "10% effect" is relative and cumulative: from _any_ nice level,
8633 * if you go up 1 level, it's -10% CPU usage, if you go down 1 level
8634 * it's +10% CPU usage. (to achieve that we use a multiplier of 1.25.
8635 * If a task goes up by ~10% and another task goes down by ~10% then
8636 * the relative distance between them is ~25%.)
8637 */
8638const int sched_prio_to_weight[40] = {
8639 /* -20 */ 88761, 71755, 56483, 46273, 36291,
8640 /* -15 */ 29154, 23254, 18705, 14949, 11916,
8641 /* -10 */ 9548, 7620, 6100, 4904, 3906,
8642 /* -5 */ 3121, 2501, 1991, 1586, 1277,
8643 /* 0 */ 1024, 820, 655, 526, 423,
8644 /* 5 */ 335, 272, 215, 172, 137,
8645 /* 10 */ 110, 87, 70, 56, 45,
8646 /* 15 */ 36, 29, 23, 18, 15,
8647};
8648
8649/*
8650 * Inverse (2^32/x) values of the sched_prio_to_weight[] array, precalculated.
8651 *
8652 * In cases where the weight does not change often, we can use the
8653 * precalculated inverse to speed up arithmetics by turning divisions
8654 * into multiplications:
8655 */
8656const u32 sched_prio_to_wmult[40] = {
8657 /* -20 */ 48388, 59856, 76040, 92818, 118348,
8658 /* -15 */ 147320, 184698, 229616, 287308, 360437,
8659 /* -10 */ 449829, 563644, 704093, 875809, 1099582,
8660 /* -5 */ 1376151, 1717300, 2157191, 2708050, 3363326,
8661 /* 0 */ 4194304, 5237765, 6557202, 8165337, 10153587,
8662 /* 5 */ 12820798, 15790321, 19976592, 24970740, 31350126,
8663 /* 10 */ 39045157, 49367440, 61356676, 76695844, 95443717,
8664 /* 15 */ 119304647, 148102320, 186737708, 238609294, 286331153,
8665};
This page took 3.143418 seconds and 5 git commands to generate.