sched/core: Fix trace_sched_switch()
[deliverable/linux.git] / kernel / sched / core.c
CommitLineData
1da177e4 1/*
391e43da 2 * kernel/sched/core.c
1da177e4
LT
3 *
4 * Kernel scheduler and related syscalls
5 *
6 * Copyright (C) 1991-2002 Linus Torvalds
7 *
8 * 1996-12-23 Modified by Dave Grothe to fix bugs in semaphores and
9 * make semaphores SMP safe
10 * 1998-11-19 Implemented schedule_timeout() and related stuff
11 * by Andrea Arcangeli
12 * 2002-01-04 New ultra-scalable O(1) scheduler by Ingo Molnar:
13 * hybrid priority-list and round-robin design with
14 * an array-switch method of distributing timeslices
15 * and per-CPU runqueues. Cleanups and useful suggestions
16 * by Davide Libenzi, preemptible kernel bits by Robert Love.
17 * 2003-09-03 Interactivity tuning by Con Kolivas.
18 * 2004-04-02 Scheduler domains code by Nick Piggin
c31f2e8a
IM
19 * 2007-04-15 Work begun on replacing all interactivity tuning with a
20 * fair scheduling design by Con Kolivas.
21 * 2007-05-05 Load balancing (smp-nice) and other improvements
22 * by Peter Williams
23 * 2007-05-06 Interactivity improvements to CFS by Mike Galbraith
24 * 2007-07-01 Group scheduling enhancements by Srivatsa Vaddagiri
b9131769
IM
25 * 2007-11-29 RT balancing improvements by Steven Rostedt, Gregory Haskins,
26 * Thomas Gleixner, Mike Kravetz
1da177e4
LT
27 */
28
29#include <linux/mm.h>
30#include <linux/module.h>
31#include <linux/nmi.h>
32#include <linux/init.h>
dff06c15 33#include <linux/uaccess.h>
1da177e4 34#include <linux/highmem.h>
1da177e4
LT
35#include <asm/mmu_context.h>
36#include <linux/interrupt.h>
c59ede7b 37#include <linux/capability.h>
1da177e4
LT
38#include <linux/completion.h>
39#include <linux/kernel_stat.h>
9a11b49a 40#include <linux/debug_locks.h>
cdd6c482 41#include <linux/perf_event.h>
1da177e4
LT
42#include <linux/security.h>
43#include <linux/notifier.h>
44#include <linux/profile.h>
7dfb7103 45#include <linux/freezer.h>
198e2f18 46#include <linux/vmalloc.h>
1da177e4
LT
47#include <linux/blkdev.h>
48#include <linux/delay.h>
b488893a 49#include <linux/pid_namespace.h>
1da177e4
LT
50#include <linux/smp.h>
51#include <linux/threads.h>
52#include <linux/timer.h>
53#include <linux/rcupdate.h>
54#include <linux/cpu.h>
55#include <linux/cpuset.h>
56#include <linux/percpu.h>
b5aadf7f 57#include <linux/proc_fs.h>
1da177e4 58#include <linux/seq_file.h>
e692ab53 59#include <linux/sysctl.h>
1da177e4
LT
60#include <linux/syscalls.h>
61#include <linux/times.h>
8f0ab514 62#include <linux/tsacct_kern.h>
c6fd91f0 63#include <linux/kprobes.h>
0ff92245 64#include <linux/delayacct.h>
dff06c15 65#include <linux/unistd.h>
f5ff8422 66#include <linux/pagemap.h>
8f4d37ec 67#include <linux/hrtimer.h>
30914a58 68#include <linux/tick.h>
f00b45c1
PZ
69#include <linux/debugfs.h>
70#include <linux/ctype.h>
6cd8a4bb 71#include <linux/ftrace.h>
5a0e3ad6 72#include <linux/slab.h>
f1c6f1a7 73#include <linux/init_task.h>
40401530 74#include <linux/binfmts.h>
91d1aa43 75#include <linux/context_tracking.h>
52f5684c 76#include <linux/compiler.h>
1da177e4 77
96f951ed 78#include <asm/switch_to.h>
5517d86b 79#include <asm/tlb.h>
838225b4 80#include <asm/irq_regs.h>
db7e527d 81#include <asm/mutex.h>
e6e6685a
GC
82#ifdef CONFIG_PARAVIRT
83#include <asm/paravirt.h>
84#endif
1da177e4 85
029632fb 86#include "sched.h"
ea138446 87#include "../workqueue_internal.h"
29d5e047 88#include "../smpboot.h"
6e0534f2 89
a8d154b0 90#define CREATE_TRACE_POINTS
ad8d75ff 91#include <trace/events/sched.h>
a8d154b0 92
029632fb
PZ
93DEFINE_MUTEX(sched_domains_mutex);
94DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
dc61b1d6 95
fe44d621 96static void update_rq_clock_task(struct rq *rq, s64 delta);
305e6835 97
029632fb 98void update_rq_clock(struct rq *rq)
3e51f33f 99{
fe44d621 100 s64 delta;
305e6835 101
9edfbfed
PZ
102 lockdep_assert_held(&rq->lock);
103
104 if (rq->clock_skip_update & RQCF_ACT_SKIP)
f26f9aff 105 return;
aa483808 106
fe44d621 107 delta = sched_clock_cpu(cpu_of(rq)) - rq->clock;
4036ac15
MG
108 if (delta < 0)
109 return;
fe44d621
PZ
110 rq->clock += delta;
111 update_rq_clock_task(rq, delta);
3e51f33f
PZ
112}
113
bf5c91ba
IM
114/*
115 * Debugging: various feature bits
116 */
f00b45c1 117
f00b45c1
PZ
118#define SCHED_FEAT(name, enabled) \
119 (1UL << __SCHED_FEAT_##name) * enabled |
120
bf5c91ba 121const_debug unsigned int sysctl_sched_features =
391e43da 122#include "features.h"
f00b45c1
PZ
123 0;
124
125#undef SCHED_FEAT
126
127#ifdef CONFIG_SCHED_DEBUG
128#define SCHED_FEAT(name, enabled) \
129 #name ,
130
1292531f 131static const char * const sched_feat_names[] = {
391e43da 132#include "features.h"
f00b45c1
PZ
133};
134
135#undef SCHED_FEAT
136
34f3a814 137static int sched_feat_show(struct seq_file *m, void *v)
f00b45c1 138{
f00b45c1
PZ
139 int i;
140
f8b6d1cc 141 for (i = 0; i < __SCHED_FEAT_NR; i++) {
34f3a814
LZ
142 if (!(sysctl_sched_features & (1UL << i)))
143 seq_puts(m, "NO_");
144 seq_printf(m, "%s ", sched_feat_names[i]);
f00b45c1 145 }
34f3a814 146 seq_puts(m, "\n");
f00b45c1 147
34f3a814 148 return 0;
f00b45c1
PZ
149}
150
f8b6d1cc
PZ
151#ifdef HAVE_JUMP_LABEL
152
c5905afb
IM
153#define jump_label_key__true STATIC_KEY_INIT_TRUE
154#define jump_label_key__false STATIC_KEY_INIT_FALSE
f8b6d1cc
PZ
155
156#define SCHED_FEAT(name, enabled) \
157 jump_label_key__##enabled ,
158
c5905afb 159struct static_key sched_feat_keys[__SCHED_FEAT_NR] = {
f8b6d1cc
PZ
160#include "features.h"
161};
162
163#undef SCHED_FEAT
164
165static void sched_feat_disable(int i)
166{
e33886b3 167 static_key_disable(&sched_feat_keys[i]);
f8b6d1cc
PZ
168}
169
170static void sched_feat_enable(int i)
171{
e33886b3 172 static_key_enable(&sched_feat_keys[i]);
f8b6d1cc
PZ
173}
174#else
175static void sched_feat_disable(int i) { };
176static void sched_feat_enable(int i) { };
177#endif /* HAVE_JUMP_LABEL */
178
1a687c2e 179static int sched_feat_set(char *cmp)
f00b45c1 180{
f00b45c1 181 int i;
1a687c2e 182 int neg = 0;
f00b45c1 183
524429c3 184 if (strncmp(cmp, "NO_", 3) == 0) {
f00b45c1
PZ
185 neg = 1;
186 cmp += 3;
187 }
188
f8b6d1cc 189 for (i = 0; i < __SCHED_FEAT_NR; i++) {
7740191c 190 if (strcmp(cmp, sched_feat_names[i]) == 0) {
f8b6d1cc 191 if (neg) {
f00b45c1 192 sysctl_sched_features &= ~(1UL << i);
f8b6d1cc
PZ
193 sched_feat_disable(i);
194 } else {
f00b45c1 195 sysctl_sched_features |= (1UL << i);
f8b6d1cc
PZ
196 sched_feat_enable(i);
197 }
f00b45c1
PZ
198 break;
199 }
200 }
201
1a687c2e
MG
202 return i;
203}
204
205static ssize_t
206sched_feat_write(struct file *filp, const char __user *ubuf,
207 size_t cnt, loff_t *ppos)
208{
209 char buf[64];
210 char *cmp;
211 int i;
5cd08fbf 212 struct inode *inode;
1a687c2e
MG
213
214 if (cnt > 63)
215 cnt = 63;
216
217 if (copy_from_user(&buf, ubuf, cnt))
218 return -EFAULT;
219
220 buf[cnt] = 0;
221 cmp = strstrip(buf);
222
5cd08fbf
JB
223 /* Ensure the static_key remains in a consistent state */
224 inode = file_inode(filp);
225 mutex_lock(&inode->i_mutex);
1a687c2e 226 i = sched_feat_set(cmp);
5cd08fbf 227 mutex_unlock(&inode->i_mutex);
f8b6d1cc 228 if (i == __SCHED_FEAT_NR)
f00b45c1
PZ
229 return -EINVAL;
230
42994724 231 *ppos += cnt;
f00b45c1
PZ
232
233 return cnt;
234}
235
34f3a814
LZ
236static int sched_feat_open(struct inode *inode, struct file *filp)
237{
238 return single_open(filp, sched_feat_show, NULL);
239}
240
828c0950 241static const struct file_operations sched_feat_fops = {
34f3a814
LZ
242 .open = sched_feat_open,
243 .write = sched_feat_write,
244 .read = seq_read,
245 .llseek = seq_lseek,
246 .release = single_release,
f00b45c1
PZ
247};
248
249static __init int sched_init_debug(void)
250{
f00b45c1
PZ
251 debugfs_create_file("sched_features", 0644, NULL, NULL,
252 &sched_feat_fops);
253
254 return 0;
255}
256late_initcall(sched_init_debug);
f8b6d1cc 257#endif /* CONFIG_SCHED_DEBUG */
bf5c91ba 258
b82d9fdd
PZ
259/*
260 * Number of tasks to iterate in a single balance run.
261 * Limited because this is done with IRQs disabled.
262 */
263const_debug unsigned int sysctl_sched_nr_migrate = 32;
264
e9e9250b
PZ
265/*
266 * period over which we average the RT time consumption, measured
267 * in ms.
268 *
269 * default: 1s
270 */
271const_debug unsigned int sysctl_sched_time_avg = MSEC_PER_SEC;
272
fa85ae24 273/*
9f0c1e56 274 * period over which we measure -rt task cpu usage in us.
fa85ae24
PZ
275 * default: 1s
276 */
9f0c1e56 277unsigned int sysctl_sched_rt_period = 1000000;
fa85ae24 278
029632fb 279__read_mostly int scheduler_running;
6892b75e 280
9f0c1e56
PZ
281/*
282 * part of the period that we allow rt tasks to run in us.
283 * default: 0.95s
284 */
285int sysctl_sched_rt_runtime = 950000;
fa85ae24 286
3fa0818b
RR
287/* cpus with isolated domains */
288cpumask_var_t cpu_isolated_map;
289
1da177e4 290/*
cc2a73b5 291 * this_rq_lock - lock this runqueue and disable interrupts.
1da177e4 292 */
a9957449 293static struct rq *this_rq_lock(void)
1da177e4
LT
294 __acquires(rq->lock)
295{
70b97a7f 296 struct rq *rq;
1da177e4
LT
297
298 local_irq_disable();
299 rq = this_rq();
05fa785c 300 raw_spin_lock(&rq->lock);
1da177e4
LT
301
302 return rq;
303}
304
8f4d37ec
PZ
305#ifdef CONFIG_SCHED_HRTICK
306/*
307 * Use HR-timers to deliver accurate preemption points.
8f4d37ec 308 */
8f4d37ec 309
8f4d37ec
PZ
310static void hrtick_clear(struct rq *rq)
311{
312 if (hrtimer_active(&rq->hrtick_timer))
313 hrtimer_cancel(&rq->hrtick_timer);
314}
315
8f4d37ec
PZ
316/*
317 * High-resolution timer tick.
318 * Runs from hardirq context with interrupts disabled.
319 */
320static enum hrtimer_restart hrtick(struct hrtimer *timer)
321{
322 struct rq *rq = container_of(timer, struct rq, hrtick_timer);
323
324 WARN_ON_ONCE(cpu_of(rq) != smp_processor_id());
325
05fa785c 326 raw_spin_lock(&rq->lock);
3e51f33f 327 update_rq_clock(rq);
8f4d37ec 328 rq->curr->sched_class->task_tick(rq, rq->curr, 1);
05fa785c 329 raw_spin_unlock(&rq->lock);
8f4d37ec
PZ
330
331 return HRTIMER_NORESTART;
332}
333
95e904c7 334#ifdef CONFIG_SMP
971ee28c 335
4961b6e1 336static void __hrtick_restart(struct rq *rq)
971ee28c
PZ
337{
338 struct hrtimer *timer = &rq->hrtick_timer;
971ee28c 339
4961b6e1 340 hrtimer_start_expires(timer, HRTIMER_MODE_ABS_PINNED);
971ee28c
PZ
341}
342
31656519
PZ
343/*
344 * called from hardirq (IPI) context
345 */
346static void __hrtick_start(void *arg)
b328ca18 347{
31656519 348 struct rq *rq = arg;
b328ca18 349
05fa785c 350 raw_spin_lock(&rq->lock);
971ee28c 351 __hrtick_restart(rq);
31656519 352 rq->hrtick_csd_pending = 0;
05fa785c 353 raw_spin_unlock(&rq->lock);
b328ca18
PZ
354}
355
31656519
PZ
356/*
357 * Called to set the hrtick timer state.
358 *
359 * called with rq->lock held and irqs disabled
360 */
029632fb 361void hrtick_start(struct rq *rq, u64 delay)
b328ca18 362{
31656519 363 struct hrtimer *timer = &rq->hrtick_timer;
177ef2a6 364 ktime_t time;
365 s64 delta;
366
367 /*
368 * Don't schedule slices shorter than 10000ns, that just
369 * doesn't make sense and can cause timer DoS.
370 */
371 delta = max_t(s64, delay, 10000LL);
372 time = ktime_add_ns(timer->base->get_time(), delta);
b328ca18 373
cc584b21 374 hrtimer_set_expires(timer, time);
31656519
PZ
375
376 if (rq == this_rq()) {
971ee28c 377 __hrtick_restart(rq);
31656519 378 } else if (!rq->hrtick_csd_pending) {
c46fff2a 379 smp_call_function_single_async(cpu_of(rq), &rq->hrtick_csd);
31656519
PZ
380 rq->hrtick_csd_pending = 1;
381 }
b328ca18
PZ
382}
383
384static int
385hotplug_hrtick(struct notifier_block *nfb, unsigned long action, void *hcpu)
386{
387 int cpu = (int)(long)hcpu;
388
389 switch (action) {
390 case CPU_UP_CANCELED:
391 case CPU_UP_CANCELED_FROZEN:
392 case CPU_DOWN_PREPARE:
393 case CPU_DOWN_PREPARE_FROZEN:
394 case CPU_DEAD:
395 case CPU_DEAD_FROZEN:
31656519 396 hrtick_clear(cpu_rq(cpu));
b328ca18
PZ
397 return NOTIFY_OK;
398 }
399
400 return NOTIFY_DONE;
401}
402
fa748203 403static __init void init_hrtick(void)
b328ca18
PZ
404{
405 hotcpu_notifier(hotplug_hrtick, 0);
406}
31656519
PZ
407#else
408/*
409 * Called to set the hrtick timer state.
410 *
411 * called with rq->lock held and irqs disabled
412 */
029632fb 413void hrtick_start(struct rq *rq, u64 delay)
31656519 414{
86893335
WL
415 /*
416 * Don't schedule slices shorter than 10000ns, that just
417 * doesn't make sense. Rely on vruntime for fairness.
418 */
419 delay = max_t(u64, delay, 10000LL);
4961b6e1
TG
420 hrtimer_start(&rq->hrtick_timer, ns_to_ktime(delay),
421 HRTIMER_MODE_REL_PINNED);
31656519 422}
b328ca18 423
006c75f1 424static inline void init_hrtick(void)
8f4d37ec 425{
8f4d37ec 426}
31656519 427#endif /* CONFIG_SMP */
8f4d37ec 428
31656519 429static void init_rq_hrtick(struct rq *rq)
8f4d37ec 430{
31656519
PZ
431#ifdef CONFIG_SMP
432 rq->hrtick_csd_pending = 0;
8f4d37ec 433
31656519
PZ
434 rq->hrtick_csd.flags = 0;
435 rq->hrtick_csd.func = __hrtick_start;
436 rq->hrtick_csd.info = rq;
437#endif
8f4d37ec 438
31656519
PZ
439 hrtimer_init(&rq->hrtick_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
440 rq->hrtick_timer.function = hrtick;
8f4d37ec 441}
006c75f1 442#else /* CONFIG_SCHED_HRTICK */
8f4d37ec
PZ
443static inline void hrtick_clear(struct rq *rq)
444{
445}
446
8f4d37ec
PZ
447static inline void init_rq_hrtick(struct rq *rq)
448{
449}
450
b328ca18
PZ
451static inline void init_hrtick(void)
452{
453}
006c75f1 454#endif /* CONFIG_SCHED_HRTICK */
8f4d37ec 455
fd99f91a
PZ
456/*
457 * cmpxchg based fetch_or, macro so it works for different integer types
458 */
459#define fetch_or(ptr, val) \
460({ typeof(*(ptr)) __old, __val = *(ptr); \
461 for (;;) { \
462 __old = cmpxchg((ptr), __val, __val | (val)); \
463 if (__old == __val) \
464 break; \
465 __val = __old; \
466 } \
467 __old; \
468})
469
e3baac47 470#if defined(CONFIG_SMP) && defined(TIF_POLLING_NRFLAG)
fd99f91a
PZ
471/*
472 * Atomically set TIF_NEED_RESCHED and test for TIF_POLLING_NRFLAG,
473 * this avoids any races wrt polling state changes and thereby avoids
474 * spurious IPIs.
475 */
476static bool set_nr_and_not_polling(struct task_struct *p)
477{
478 struct thread_info *ti = task_thread_info(p);
479 return !(fetch_or(&ti->flags, _TIF_NEED_RESCHED) & _TIF_POLLING_NRFLAG);
480}
e3baac47
PZ
481
482/*
483 * Atomically set TIF_NEED_RESCHED if TIF_POLLING_NRFLAG is set.
484 *
485 * If this returns true, then the idle task promises to call
486 * sched_ttwu_pending() and reschedule soon.
487 */
488static bool set_nr_if_polling(struct task_struct *p)
489{
490 struct thread_info *ti = task_thread_info(p);
316c1608 491 typeof(ti->flags) old, val = READ_ONCE(ti->flags);
e3baac47
PZ
492
493 for (;;) {
494 if (!(val & _TIF_POLLING_NRFLAG))
495 return false;
496 if (val & _TIF_NEED_RESCHED)
497 return true;
498 old = cmpxchg(&ti->flags, val, val | _TIF_NEED_RESCHED);
499 if (old == val)
500 break;
501 val = old;
502 }
503 return true;
504}
505
fd99f91a
PZ
506#else
507static bool set_nr_and_not_polling(struct task_struct *p)
508{
509 set_tsk_need_resched(p);
510 return true;
511}
e3baac47
PZ
512
513#ifdef CONFIG_SMP
514static bool set_nr_if_polling(struct task_struct *p)
515{
516 return false;
517}
518#endif
fd99f91a
PZ
519#endif
520
76751049
PZ
521void wake_q_add(struct wake_q_head *head, struct task_struct *task)
522{
523 struct wake_q_node *node = &task->wake_q;
524
525 /*
526 * Atomically grab the task, if ->wake_q is !nil already it means
527 * its already queued (either by us or someone else) and will get the
528 * wakeup due to that.
529 *
530 * This cmpxchg() implies a full barrier, which pairs with the write
531 * barrier implied by the wakeup in wake_up_list().
532 */
533 if (cmpxchg(&node->next, NULL, WAKE_Q_TAIL))
534 return;
535
536 get_task_struct(task);
537
538 /*
539 * The head is context local, there can be no concurrency.
540 */
541 *head->lastp = node;
542 head->lastp = &node->next;
543}
544
545void wake_up_q(struct wake_q_head *head)
546{
547 struct wake_q_node *node = head->first;
548
549 while (node != WAKE_Q_TAIL) {
550 struct task_struct *task;
551
552 task = container_of(node, struct task_struct, wake_q);
553 BUG_ON(!task);
554 /* task can safely be re-inserted now */
555 node = node->next;
556 task->wake_q.next = NULL;
557
558 /*
559 * wake_up_process() implies a wmb() to pair with the queueing
560 * in wake_q_add() so as not to miss wakeups.
561 */
562 wake_up_process(task);
563 put_task_struct(task);
564 }
565}
566
c24d20db 567/*
8875125e 568 * resched_curr - mark rq's current task 'to be rescheduled now'.
c24d20db
IM
569 *
570 * On UP this means the setting of the need_resched flag, on SMP it
571 * might also involve a cross-CPU call to trigger the scheduler on
572 * the target CPU.
573 */
8875125e 574void resched_curr(struct rq *rq)
c24d20db 575{
8875125e 576 struct task_struct *curr = rq->curr;
c24d20db
IM
577 int cpu;
578
8875125e 579 lockdep_assert_held(&rq->lock);
c24d20db 580
8875125e 581 if (test_tsk_need_resched(curr))
c24d20db
IM
582 return;
583
8875125e 584 cpu = cpu_of(rq);
fd99f91a 585
f27dde8d 586 if (cpu == smp_processor_id()) {
8875125e 587 set_tsk_need_resched(curr);
f27dde8d 588 set_preempt_need_resched();
c24d20db 589 return;
f27dde8d 590 }
c24d20db 591
8875125e 592 if (set_nr_and_not_polling(curr))
c24d20db 593 smp_send_reschedule(cpu);
dfc68f29
AL
594 else
595 trace_sched_wake_idle_without_ipi(cpu);
c24d20db
IM
596}
597
029632fb 598void resched_cpu(int cpu)
c24d20db
IM
599{
600 struct rq *rq = cpu_rq(cpu);
601 unsigned long flags;
602
05fa785c 603 if (!raw_spin_trylock_irqsave(&rq->lock, flags))
c24d20db 604 return;
8875125e 605 resched_curr(rq);
05fa785c 606 raw_spin_unlock_irqrestore(&rq->lock, flags);
c24d20db 607}
06d8308c 608
b021fe3e 609#ifdef CONFIG_SMP
3451d024 610#ifdef CONFIG_NO_HZ_COMMON
83cd4fe2
VP
611/*
612 * In the semi idle case, use the nearest busy cpu for migrating timers
613 * from an idle cpu. This is good for power-savings.
614 *
615 * We don't do similar optimization for completely idle system, as
616 * selecting an idle cpu will add more delays to the timers than intended
617 * (as that cpu's timer base may not be uptodate wrt jiffies etc).
618 */
bc7a34b8 619int get_nohz_timer_target(void)
83cd4fe2 620{
bc7a34b8 621 int i, cpu = smp_processor_id();
83cd4fe2
VP
622 struct sched_domain *sd;
623
9642d18e 624 if (!idle_cpu(cpu) && is_housekeeping_cpu(cpu))
6201b4d6
VK
625 return cpu;
626
057f3fad 627 rcu_read_lock();
83cd4fe2 628 for_each_domain(cpu, sd) {
057f3fad 629 for_each_cpu(i, sched_domain_span(sd)) {
9642d18e 630 if (!idle_cpu(i) && is_housekeeping_cpu(cpu)) {
057f3fad
PZ
631 cpu = i;
632 goto unlock;
633 }
634 }
83cd4fe2 635 }
9642d18e
VH
636
637 if (!is_housekeeping_cpu(cpu))
638 cpu = housekeeping_any_cpu();
057f3fad
PZ
639unlock:
640 rcu_read_unlock();
83cd4fe2
VP
641 return cpu;
642}
06d8308c
TG
643/*
644 * When add_timer_on() enqueues a timer into the timer wheel of an
645 * idle CPU then this timer might expire before the next timer event
646 * which is scheduled to wake up that CPU. In case of a completely
647 * idle system the next event might even be infinite time into the
648 * future. wake_up_idle_cpu() ensures that the CPU is woken up and
649 * leaves the inner idle loop so the newly added timer is taken into
650 * account when the CPU goes back to idle and evaluates the timer
651 * wheel for the next timer event.
652 */
1c20091e 653static void wake_up_idle_cpu(int cpu)
06d8308c
TG
654{
655 struct rq *rq = cpu_rq(cpu);
656
657 if (cpu == smp_processor_id())
658 return;
659
67b9ca70 660 if (set_nr_and_not_polling(rq->idle))
06d8308c 661 smp_send_reschedule(cpu);
dfc68f29
AL
662 else
663 trace_sched_wake_idle_without_ipi(cpu);
45bf76df
IM
664}
665
c5bfece2 666static bool wake_up_full_nohz_cpu(int cpu)
1c20091e 667{
53c5fa16
FW
668 /*
669 * We just need the target to call irq_exit() and re-evaluate
670 * the next tick. The nohz full kick at least implies that.
671 * If needed we can still optimize that later with an
672 * empty IRQ.
673 */
c5bfece2 674 if (tick_nohz_full_cpu(cpu)) {
1c20091e
FW
675 if (cpu != smp_processor_id() ||
676 tick_nohz_tick_stopped())
53c5fa16 677 tick_nohz_full_kick_cpu(cpu);
1c20091e
FW
678 return true;
679 }
680
681 return false;
682}
683
684void wake_up_nohz_cpu(int cpu)
685{
c5bfece2 686 if (!wake_up_full_nohz_cpu(cpu))
1c20091e
FW
687 wake_up_idle_cpu(cpu);
688}
689
ca38062e 690static inline bool got_nohz_idle_kick(void)
45bf76df 691{
1c792db7 692 int cpu = smp_processor_id();
873b4c65
VG
693
694 if (!test_bit(NOHZ_BALANCE_KICK, nohz_flags(cpu)))
695 return false;
696
697 if (idle_cpu(cpu) && !need_resched())
698 return true;
699
700 /*
701 * We can't run Idle Load Balance on this CPU for this time so we
702 * cancel it and clear NOHZ_BALANCE_KICK
703 */
704 clear_bit(NOHZ_BALANCE_KICK, nohz_flags(cpu));
705 return false;
45bf76df
IM
706}
707
3451d024 708#else /* CONFIG_NO_HZ_COMMON */
45bf76df 709
ca38062e 710static inline bool got_nohz_idle_kick(void)
2069dd75 711{
ca38062e 712 return false;
2069dd75
PZ
713}
714
3451d024 715#endif /* CONFIG_NO_HZ_COMMON */
d842de87 716
ce831b38
FW
717#ifdef CONFIG_NO_HZ_FULL
718bool sched_can_stop_tick(void)
719{
1e78cdbd
RR
720 /*
721 * FIFO realtime policy runs the highest priority task. Other runnable
722 * tasks are of a lower priority. The scheduler tick does nothing.
723 */
724 if (current->policy == SCHED_FIFO)
725 return true;
726
727 /*
728 * Round-robin realtime tasks time slice with other tasks at the same
729 * realtime priority. Is this task the only one at this priority?
730 */
731 if (current->policy == SCHED_RR) {
732 struct sched_rt_entity *rt_se = &current->rt;
733
734 return rt_se->run_list.prev == rt_se->run_list.next;
735 }
736
3882ec64
FW
737 /*
738 * More than one running task need preemption.
739 * nr_running update is assumed to be visible
740 * after IPI is sent from wakers.
741 */
541b8264
VK
742 if (this_rq()->nr_running > 1)
743 return false;
ce831b38 744
541b8264 745 return true;
ce831b38
FW
746}
747#endif /* CONFIG_NO_HZ_FULL */
d842de87 748
029632fb 749void sched_avg_update(struct rq *rq)
18d95a28 750{
e9e9250b
PZ
751 s64 period = sched_avg_period();
752
78becc27 753 while ((s64)(rq_clock(rq) - rq->age_stamp) > period) {
0d98bb26
WD
754 /*
755 * Inline assembly required to prevent the compiler
756 * optimising this loop into a divmod call.
757 * See __iter_div_u64_rem() for another example of this.
758 */
759 asm("" : "+rm" (rq->age_stamp));
e9e9250b
PZ
760 rq->age_stamp += period;
761 rq->rt_avg /= 2;
762 }
18d95a28
PZ
763}
764
6d6bc0ad 765#endif /* CONFIG_SMP */
18d95a28 766
a790de99
PT
767#if defined(CONFIG_RT_GROUP_SCHED) || (defined(CONFIG_FAIR_GROUP_SCHED) && \
768 (defined(CONFIG_SMP) || defined(CONFIG_CFS_BANDWIDTH)))
c09595f6 769/*
8277434e
PT
770 * Iterate task_group tree rooted at *from, calling @down when first entering a
771 * node and @up when leaving it for the final time.
772 *
773 * Caller must hold rcu_lock or sufficient equivalent.
c09595f6 774 */
029632fb 775int walk_tg_tree_from(struct task_group *from,
8277434e 776 tg_visitor down, tg_visitor up, void *data)
c09595f6
PZ
777{
778 struct task_group *parent, *child;
eb755805 779 int ret;
c09595f6 780
8277434e
PT
781 parent = from;
782
c09595f6 783down:
eb755805
PZ
784 ret = (*down)(parent, data);
785 if (ret)
8277434e 786 goto out;
c09595f6
PZ
787 list_for_each_entry_rcu(child, &parent->children, siblings) {
788 parent = child;
789 goto down;
790
791up:
792 continue;
793 }
eb755805 794 ret = (*up)(parent, data);
8277434e
PT
795 if (ret || parent == from)
796 goto out;
c09595f6
PZ
797
798 child = parent;
799 parent = parent->parent;
800 if (parent)
801 goto up;
8277434e 802out:
eb755805 803 return ret;
c09595f6
PZ
804}
805
029632fb 806int tg_nop(struct task_group *tg, void *data)
eb755805 807{
e2b245f8 808 return 0;
eb755805 809}
18d95a28
PZ
810#endif
811
45bf76df
IM
812static void set_load_weight(struct task_struct *p)
813{
f05998d4
NR
814 int prio = p->static_prio - MAX_RT_PRIO;
815 struct load_weight *load = &p->se.load;
816
dd41f596
IM
817 /*
818 * SCHED_IDLE tasks get minimal weight:
819 */
20f9cd2a 820 if (idle_policy(p->policy)) {
c8b28116 821 load->weight = scale_load(WEIGHT_IDLEPRIO);
f05998d4 822 load->inv_weight = WMULT_IDLEPRIO;
dd41f596
IM
823 return;
824 }
71f8bd46 825
c8b28116 826 load->weight = scale_load(prio_to_weight[prio]);
f05998d4 827 load->inv_weight = prio_to_wmult[prio];
71f8bd46
IM
828}
829
371fd7e7 830static void enqueue_task(struct rq *rq, struct task_struct *p, int flags)
2087a1ad 831{
a64692a3 832 update_rq_clock(rq);
43148951 833 sched_info_queued(rq, p);
371fd7e7 834 p->sched_class->enqueue_task(rq, p, flags);
71f8bd46
IM
835}
836
371fd7e7 837static void dequeue_task(struct rq *rq, struct task_struct *p, int flags)
71f8bd46 838{
a64692a3 839 update_rq_clock(rq);
43148951 840 sched_info_dequeued(rq, p);
371fd7e7 841 p->sched_class->dequeue_task(rq, p, flags);
71f8bd46
IM
842}
843
029632fb 844void activate_task(struct rq *rq, struct task_struct *p, int flags)
1e3c88bd
PZ
845{
846 if (task_contributes_to_load(p))
847 rq->nr_uninterruptible--;
848
371fd7e7 849 enqueue_task(rq, p, flags);
1e3c88bd
PZ
850}
851
029632fb 852void deactivate_task(struct rq *rq, struct task_struct *p, int flags)
1e3c88bd
PZ
853{
854 if (task_contributes_to_load(p))
855 rq->nr_uninterruptible++;
856
371fd7e7 857 dequeue_task(rq, p, flags);
1e3c88bd
PZ
858}
859
fe44d621 860static void update_rq_clock_task(struct rq *rq, s64 delta)
aa483808 861{
095c0aa8
GC
862/*
863 * In theory, the compile should just see 0 here, and optimize out the call
864 * to sched_rt_avg_update. But I don't trust it...
865 */
866#if defined(CONFIG_IRQ_TIME_ACCOUNTING) || defined(CONFIG_PARAVIRT_TIME_ACCOUNTING)
867 s64 steal = 0, irq_delta = 0;
868#endif
869#ifdef CONFIG_IRQ_TIME_ACCOUNTING
8e92c201 870 irq_delta = irq_time_read(cpu_of(rq)) - rq->prev_irq_time;
fe44d621
PZ
871
872 /*
873 * Since irq_time is only updated on {soft,}irq_exit, we might run into
874 * this case when a previous update_rq_clock() happened inside a
875 * {soft,}irq region.
876 *
877 * When this happens, we stop ->clock_task and only update the
878 * prev_irq_time stamp to account for the part that fit, so that a next
879 * update will consume the rest. This ensures ->clock_task is
880 * monotonic.
881 *
882 * It does however cause some slight miss-attribution of {soft,}irq
883 * time, a more accurate solution would be to update the irq_time using
884 * the current rq->clock timestamp, except that would require using
885 * atomic ops.
886 */
887 if (irq_delta > delta)
888 irq_delta = delta;
889
890 rq->prev_irq_time += irq_delta;
891 delta -= irq_delta;
095c0aa8
GC
892#endif
893#ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING
c5905afb 894 if (static_key_false((&paravirt_steal_rq_enabled))) {
095c0aa8
GC
895 steal = paravirt_steal_clock(cpu_of(rq));
896 steal -= rq->prev_steal_time_rq;
897
898 if (unlikely(steal > delta))
899 steal = delta;
900
095c0aa8 901 rq->prev_steal_time_rq += steal;
095c0aa8
GC
902 delta -= steal;
903 }
904#endif
905
fe44d621
PZ
906 rq->clock_task += delta;
907
095c0aa8 908#if defined(CONFIG_IRQ_TIME_ACCOUNTING) || defined(CONFIG_PARAVIRT_TIME_ACCOUNTING)
5d4dfddd 909 if ((irq_delta + steal) && sched_feat(NONTASK_CAPACITY))
095c0aa8
GC
910 sched_rt_avg_update(rq, irq_delta + steal);
911#endif
aa483808
VP
912}
913
34f971f6
PZ
914void sched_set_stop_task(int cpu, struct task_struct *stop)
915{
916 struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 };
917 struct task_struct *old_stop = cpu_rq(cpu)->stop;
918
919 if (stop) {
920 /*
921 * Make it appear like a SCHED_FIFO task, its something
922 * userspace knows about and won't get confused about.
923 *
924 * Also, it will make PI more or less work without too
925 * much confusion -- but then, stop work should not
926 * rely on PI working anyway.
927 */
928 sched_setscheduler_nocheck(stop, SCHED_FIFO, &param);
929
930 stop->sched_class = &stop_sched_class;
931 }
932
933 cpu_rq(cpu)->stop = stop;
934
935 if (old_stop) {
936 /*
937 * Reset it back to a normal scheduling class so that
938 * it can die in pieces.
939 */
940 old_stop->sched_class = &rt_sched_class;
941 }
942}
943
14531189 944/*
dd41f596 945 * __normal_prio - return the priority that is based on the static prio
14531189 946 */
14531189
IM
947static inline int __normal_prio(struct task_struct *p)
948{
dd41f596 949 return p->static_prio;
14531189
IM
950}
951
b29739f9
IM
952/*
953 * Calculate the expected normal priority: i.e. priority
954 * without taking RT-inheritance into account. Might be
955 * boosted by interactivity modifiers. Changes upon fork,
956 * setprio syscalls, and whenever the interactivity
957 * estimator recalculates.
958 */
36c8b586 959static inline int normal_prio(struct task_struct *p)
b29739f9
IM
960{
961 int prio;
962
aab03e05
DF
963 if (task_has_dl_policy(p))
964 prio = MAX_DL_PRIO-1;
965 else if (task_has_rt_policy(p))
b29739f9
IM
966 prio = MAX_RT_PRIO-1 - p->rt_priority;
967 else
968 prio = __normal_prio(p);
969 return prio;
970}
971
972/*
973 * Calculate the current priority, i.e. the priority
974 * taken into account by the scheduler. This value might
975 * be boosted by RT tasks, or might be boosted by
976 * interactivity modifiers. Will be RT if the task got
977 * RT-boosted. If not then it returns p->normal_prio.
978 */
36c8b586 979static int effective_prio(struct task_struct *p)
b29739f9
IM
980{
981 p->normal_prio = normal_prio(p);
982 /*
983 * If we are RT tasks or we were boosted to RT priority,
984 * keep the priority unchanged. Otherwise, update priority
985 * to the normal priority:
986 */
987 if (!rt_prio(p->prio))
988 return p->normal_prio;
989 return p->prio;
990}
991
1da177e4
LT
992/**
993 * task_curr - is this task currently executing on a CPU?
994 * @p: the task in question.
e69f6186
YB
995 *
996 * Return: 1 if the task is currently executing. 0 otherwise.
1da177e4 997 */
36c8b586 998inline int task_curr(const struct task_struct *p)
1da177e4
LT
999{
1000 return cpu_curr(task_cpu(p)) == p;
1001}
1002
67dfa1b7 1003/*
4c9a4bc8
PZ
1004 * switched_from, switched_to and prio_changed must _NOT_ drop rq->lock,
1005 * use the balance_callback list if you want balancing.
1006 *
1007 * this means any call to check_class_changed() must be followed by a call to
1008 * balance_callback().
67dfa1b7 1009 */
cb469845
SR
1010static inline void check_class_changed(struct rq *rq, struct task_struct *p,
1011 const struct sched_class *prev_class,
da7a735e 1012 int oldprio)
cb469845
SR
1013{
1014 if (prev_class != p->sched_class) {
1015 if (prev_class->switched_from)
da7a735e 1016 prev_class->switched_from(rq, p);
4c9a4bc8 1017
da7a735e 1018 p->sched_class->switched_to(rq, p);
2d3d891d 1019 } else if (oldprio != p->prio || dl_task(p))
da7a735e 1020 p->sched_class->prio_changed(rq, p, oldprio);
cb469845
SR
1021}
1022
029632fb 1023void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags)
1e5a7405
PZ
1024{
1025 const struct sched_class *class;
1026
1027 if (p->sched_class == rq->curr->sched_class) {
1028 rq->curr->sched_class->check_preempt_curr(rq, p, flags);
1029 } else {
1030 for_each_class(class) {
1031 if (class == rq->curr->sched_class)
1032 break;
1033 if (class == p->sched_class) {
8875125e 1034 resched_curr(rq);
1e5a7405
PZ
1035 break;
1036 }
1037 }
1038 }
1039
1040 /*
1041 * A queue event has occurred, and we're going to schedule. In
1042 * this case, we can save a useless back to back clock update.
1043 */
da0c1e65 1044 if (task_on_rq_queued(rq->curr) && test_tsk_need_resched(rq->curr))
9edfbfed 1045 rq_clock_skip_update(rq, true);
1e5a7405
PZ
1046}
1047
1da177e4 1048#ifdef CONFIG_SMP
5cc389bc
PZ
1049/*
1050 * This is how migration works:
1051 *
1052 * 1) we invoke migration_cpu_stop() on the target CPU using
1053 * stop_one_cpu().
1054 * 2) stopper starts to run (implicitly forcing the migrated thread
1055 * off the CPU)
1056 * 3) it checks whether the migrated task is still in the wrong runqueue.
1057 * 4) if it's in the wrong runqueue then the migration thread removes
1058 * it and puts it into the right queue.
1059 * 5) stopper completes and stop_one_cpu() returns and the migration
1060 * is done.
1061 */
1062
1063/*
1064 * move_queued_task - move a queued task to new rq.
1065 *
1066 * Returns (locked) new rq. Old rq's lock is released.
1067 */
5e16bbc2 1068static struct rq *move_queued_task(struct rq *rq, struct task_struct *p, int new_cpu)
5cc389bc 1069{
5cc389bc
PZ
1070 lockdep_assert_held(&rq->lock);
1071
1072 dequeue_task(rq, p, 0);
1073 p->on_rq = TASK_ON_RQ_MIGRATING;
1074 set_task_cpu(p, new_cpu);
1075 raw_spin_unlock(&rq->lock);
1076
1077 rq = cpu_rq(new_cpu);
1078
1079 raw_spin_lock(&rq->lock);
1080 BUG_ON(task_cpu(p) != new_cpu);
1081 p->on_rq = TASK_ON_RQ_QUEUED;
1082 enqueue_task(rq, p, 0);
1083 check_preempt_curr(rq, p, 0);
1084
1085 return rq;
1086}
1087
1088struct migration_arg {
1089 struct task_struct *task;
1090 int dest_cpu;
1091};
1092
1093/*
1094 * Move (not current) task off this cpu, onto dest cpu. We're doing
1095 * this because either it can't run here any more (set_cpus_allowed()
1096 * away from this CPU, or CPU going down), or because we're
1097 * attempting to rebalance this task on exec (sched_exec).
1098 *
1099 * So we race with normal scheduler movements, but that's OK, as long
1100 * as the task is no longer on this CPU.
5cc389bc 1101 */
5e16bbc2 1102static struct rq *__migrate_task(struct rq *rq, struct task_struct *p, int dest_cpu)
5cc389bc 1103{
5cc389bc 1104 if (unlikely(!cpu_active(dest_cpu)))
5e16bbc2 1105 return rq;
5cc389bc
PZ
1106
1107 /* Affinity changed (again). */
1108 if (!cpumask_test_cpu(dest_cpu, tsk_cpus_allowed(p)))
5e16bbc2 1109 return rq;
5cc389bc 1110
5e16bbc2
PZ
1111 rq = move_queued_task(rq, p, dest_cpu);
1112
1113 return rq;
5cc389bc
PZ
1114}
1115
1116/*
1117 * migration_cpu_stop - this will be executed by a highprio stopper thread
1118 * and performs thread migration by bumping thread off CPU then
1119 * 'pushing' onto another runqueue.
1120 */
1121static int migration_cpu_stop(void *data)
1122{
1123 struct migration_arg *arg = data;
5e16bbc2
PZ
1124 struct task_struct *p = arg->task;
1125 struct rq *rq = this_rq();
5cc389bc
PZ
1126
1127 /*
1128 * The original target cpu might have gone down and we might
1129 * be on another cpu but it doesn't matter.
1130 */
1131 local_irq_disable();
1132 /*
1133 * We need to explicitly wake pending tasks before running
1134 * __migrate_task() such that we will not miss enforcing cpus_allowed
1135 * during wakeups, see set_cpus_allowed_ptr()'s TASK_WAKING test.
1136 */
1137 sched_ttwu_pending();
5e16bbc2
PZ
1138
1139 raw_spin_lock(&p->pi_lock);
1140 raw_spin_lock(&rq->lock);
1141 /*
1142 * If task_rq(p) != rq, it cannot be migrated here, because we're
1143 * holding rq->lock, if p->on_rq == 0 it cannot get enqueued because
1144 * we're holding p->pi_lock.
1145 */
1146 if (task_rq(p) == rq && task_on_rq_queued(p))
1147 rq = __migrate_task(rq, p, arg->dest_cpu);
1148 raw_spin_unlock(&rq->lock);
1149 raw_spin_unlock(&p->pi_lock);
1150
5cc389bc
PZ
1151 local_irq_enable();
1152 return 0;
1153}
1154
c5b28038
PZ
1155/*
1156 * sched_class::set_cpus_allowed must do the below, but is not required to
1157 * actually call this function.
1158 */
1159void set_cpus_allowed_common(struct task_struct *p, const struct cpumask *new_mask)
5cc389bc 1160{
5cc389bc
PZ
1161 cpumask_copy(&p->cpus_allowed, new_mask);
1162 p->nr_cpus_allowed = cpumask_weight(new_mask);
1163}
1164
c5b28038
PZ
1165void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
1166{
6c37067e
PZ
1167 struct rq *rq = task_rq(p);
1168 bool queued, running;
1169
c5b28038 1170 lockdep_assert_held(&p->pi_lock);
6c37067e
PZ
1171
1172 queued = task_on_rq_queued(p);
1173 running = task_current(rq, p);
1174
1175 if (queued) {
1176 /*
1177 * Because __kthread_bind() calls this on blocked tasks without
1178 * holding rq->lock.
1179 */
1180 lockdep_assert_held(&rq->lock);
1181 dequeue_task(rq, p, 0);
1182 }
1183 if (running)
1184 put_prev_task(rq, p);
1185
c5b28038 1186 p->sched_class->set_cpus_allowed(p, new_mask);
6c37067e
PZ
1187
1188 if (running)
1189 p->sched_class->set_curr_task(rq);
1190 if (queued)
1191 enqueue_task(rq, p, 0);
c5b28038
PZ
1192}
1193
5cc389bc
PZ
1194/*
1195 * Change a given task's CPU affinity. Migrate the thread to a
1196 * proper CPU and schedule it away if the CPU it's executing on
1197 * is removed from the allowed bitmask.
1198 *
1199 * NOTE: the caller must have a valid reference to the task, the
1200 * task must not exit() & deallocate itself prematurely. The
1201 * call is not atomic; no spinlocks may be held.
1202 */
25834c73
PZ
1203static int __set_cpus_allowed_ptr(struct task_struct *p,
1204 const struct cpumask *new_mask, bool check)
5cc389bc
PZ
1205{
1206 unsigned long flags;
1207 struct rq *rq;
1208 unsigned int dest_cpu;
1209 int ret = 0;
1210
1211 rq = task_rq_lock(p, &flags);
1212
25834c73
PZ
1213 /*
1214 * Must re-check here, to close a race against __kthread_bind(),
1215 * sched_setaffinity() is not guaranteed to observe the flag.
1216 */
1217 if (check && (p->flags & PF_NO_SETAFFINITY)) {
1218 ret = -EINVAL;
1219 goto out;
1220 }
1221
5cc389bc
PZ
1222 if (cpumask_equal(&p->cpus_allowed, new_mask))
1223 goto out;
1224
1225 if (!cpumask_intersects(new_mask, cpu_active_mask)) {
1226 ret = -EINVAL;
1227 goto out;
1228 }
1229
1230 do_set_cpus_allowed(p, new_mask);
1231
1232 /* Can the task run on the task's current CPU? If so, we're done */
1233 if (cpumask_test_cpu(task_cpu(p), new_mask))
1234 goto out;
1235
1236 dest_cpu = cpumask_any_and(cpu_active_mask, new_mask);
1237 if (task_running(rq, p) || p->state == TASK_WAKING) {
1238 struct migration_arg arg = { p, dest_cpu };
1239 /* Need help from migration thread: drop lock and wait. */
1240 task_rq_unlock(rq, p, &flags);
1241 stop_one_cpu(cpu_of(rq), migration_cpu_stop, &arg);
1242 tlb_migrate_finish(p->mm);
1243 return 0;
cbce1a68
PZ
1244 } else if (task_on_rq_queued(p)) {
1245 /*
1246 * OK, since we're going to drop the lock immediately
1247 * afterwards anyway.
1248 */
1249 lockdep_unpin_lock(&rq->lock);
5e16bbc2 1250 rq = move_queued_task(rq, p, dest_cpu);
cbce1a68
PZ
1251 lockdep_pin_lock(&rq->lock);
1252 }
5cc389bc
PZ
1253out:
1254 task_rq_unlock(rq, p, &flags);
1255
1256 return ret;
1257}
25834c73
PZ
1258
1259int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask)
1260{
1261 return __set_cpus_allowed_ptr(p, new_mask, false);
1262}
5cc389bc
PZ
1263EXPORT_SYMBOL_GPL(set_cpus_allowed_ptr);
1264
dd41f596 1265void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
c65cc870 1266{
e2912009
PZ
1267#ifdef CONFIG_SCHED_DEBUG
1268 /*
1269 * We should never call set_task_cpu() on a blocked task,
1270 * ttwu() will sort out the placement.
1271 */
077614ee 1272 WARN_ON_ONCE(p->state != TASK_RUNNING && p->state != TASK_WAKING &&
e2336f6e 1273 !p->on_rq);
0122ec5b
PZ
1274
1275#ifdef CONFIG_LOCKDEP
6c6c54e1
PZ
1276 /*
1277 * The caller should hold either p->pi_lock or rq->lock, when changing
1278 * a task's CPU. ->pi_lock for waking tasks, rq->lock for runnable tasks.
1279 *
1280 * sched_move_task() holds both and thus holding either pins the cgroup,
8323f26c 1281 * see task_group().
6c6c54e1
PZ
1282 *
1283 * Furthermore, all task_rq users should acquire both locks, see
1284 * task_rq_lock().
1285 */
0122ec5b
PZ
1286 WARN_ON_ONCE(debug_locks && !(lockdep_is_held(&p->pi_lock) ||
1287 lockdep_is_held(&task_rq(p)->lock)));
1288#endif
e2912009
PZ
1289#endif
1290
de1d7286 1291 trace_sched_migrate_task(p, new_cpu);
cbc34ed1 1292
0c69774e 1293 if (task_cpu(p) != new_cpu) {
0a74bef8
PT
1294 if (p->sched_class->migrate_task_rq)
1295 p->sched_class->migrate_task_rq(p, new_cpu);
0c69774e 1296 p->se.nr_migrations++;
ff303e66 1297 perf_event_task_migrate(p);
0c69774e 1298 }
dd41f596
IM
1299
1300 __set_task_cpu(p, new_cpu);
c65cc870
IM
1301}
1302
ac66f547
PZ
1303static void __migrate_swap_task(struct task_struct *p, int cpu)
1304{
da0c1e65 1305 if (task_on_rq_queued(p)) {
ac66f547
PZ
1306 struct rq *src_rq, *dst_rq;
1307
1308 src_rq = task_rq(p);
1309 dst_rq = cpu_rq(cpu);
1310
1311 deactivate_task(src_rq, p, 0);
1312 set_task_cpu(p, cpu);
1313 activate_task(dst_rq, p, 0);
1314 check_preempt_curr(dst_rq, p, 0);
1315 } else {
1316 /*
1317 * Task isn't running anymore; make it appear like we migrated
1318 * it before it went to sleep. This means on wakeup we make the
1319 * previous cpu our targer instead of where it really is.
1320 */
1321 p->wake_cpu = cpu;
1322 }
1323}
1324
1325struct migration_swap_arg {
1326 struct task_struct *src_task, *dst_task;
1327 int src_cpu, dst_cpu;
1328};
1329
1330static int migrate_swap_stop(void *data)
1331{
1332 struct migration_swap_arg *arg = data;
1333 struct rq *src_rq, *dst_rq;
1334 int ret = -EAGAIN;
1335
1336 src_rq = cpu_rq(arg->src_cpu);
1337 dst_rq = cpu_rq(arg->dst_cpu);
1338
74602315
PZ
1339 double_raw_lock(&arg->src_task->pi_lock,
1340 &arg->dst_task->pi_lock);
ac66f547
PZ
1341 double_rq_lock(src_rq, dst_rq);
1342 if (task_cpu(arg->dst_task) != arg->dst_cpu)
1343 goto unlock;
1344
1345 if (task_cpu(arg->src_task) != arg->src_cpu)
1346 goto unlock;
1347
1348 if (!cpumask_test_cpu(arg->dst_cpu, tsk_cpus_allowed(arg->src_task)))
1349 goto unlock;
1350
1351 if (!cpumask_test_cpu(arg->src_cpu, tsk_cpus_allowed(arg->dst_task)))
1352 goto unlock;
1353
1354 __migrate_swap_task(arg->src_task, arg->dst_cpu);
1355 __migrate_swap_task(arg->dst_task, arg->src_cpu);
1356
1357 ret = 0;
1358
1359unlock:
1360 double_rq_unlock(src_rq, dst_rq);
74602315
PZ
1361 raw_spin_unlock(&arg->dst_task->pi_lock);
1362 raw_spin_unlock(&arg->src_task->pi_lock);
ac66f547
PZ
1363
1364 return ret;
1365}
1366
1367/*
1368 * Cross migrate two tasks
1369 */
1370int migrate_swap(struct task_struct *cur, struct task_struct *p)
1371{
1372 struct migration_swap_arg arg;
1373 int ret = -EINVAL;
1374
ac66f547
PZ
1375 arg = (struct migration_swap_arg){
1376 .src_task = cur,
1377 .src_cpu = task_cpu(cur),
1378 .dst_task = p,
1379 .dst_cpu = task_cpu(p),
1380 };
1381
1382 if (arg.src_cpu == arg.dst_cpu)
1383 goto out;
1384
6acce3ef
PZ
1385 /*
1386 * These three tests are all lockless; this is OK since all of them
1387 * will be re-checked with proper locks held further down the line.
1388 */
ac66f547
PZ
1389 if (!cpu_active(arg.src_cpu) || !cpu_active(arg.dst_cpu))
1390 goto out;
1391
1392 if (!cpumask_test_cpu(arg.dst_cpu, tsk_cpus_allowed(arg.src_task)))
1393 goto out;
1394
1395 if (!cpumask_test_cpu(arg.src_cpu, tsk_cpus_allowed(arg.dst_task)))
1396 goto out;
1397
286549dc 1398 trace_sched_swap_numa(cur, arg.src_cpu, p, arg.dst_cpu);
ac66f547
PZ
1399 ret = stop_two_cpus(arg.dst_cpu, arg.src_cpu, migrate_swap_stop, &arg);
1400
1401out:
ac66f547
PZ
1402 return ret;
1403}
1404
1da177e4
LT
1405/*
1406 * wait_task_inactive - wait for a thread to unschedule.
1407 *
85ba2d86
RM
1408 * If @match_state is nonzero, it's the @p->state value just checked and
1409 * not expected to change. If it changes, i.e. @p might have woken up,
1410 * then return zero. When we succeed in waiting for @p to be off its CPU,
1411 * we return a positive number (its total switch count). If a second call
1412 * a short while later returns the same number, the caller can be sure that
1413 * @p has remained unscheduled the whole time.
1414 *
1da177e4
LT
1415 * The caller must ensure that the task *will* unschedule sometime soon,
1416 * else this function might spin for a *long* time. This function can't
1417 * be called with interrupts off, or it may introduce deadlock with
1418 * smp_call_function() if an IPI is sent by the same process we are
1419 * waiting to become inactive.
1420 */
85ba2d86 1421unsigned long wait_task_inactive(struct task_struct *p, long match_state)
1da177e4
LT
1422{
1423 unsigned long flags;
da0c1e65 1424 int running, queued;
85ba2d86 1425 unsigned long ncsw;
70b97a7f 1426 struct rq *rq;
1da177e4 1427
3a5c359a
AK
1428 for (;;) {
1429 /*
1430 * We do the initial early heuristics without holding
1431 * any task-queue locks at all. We'll only try to get
1432 * the runqueue lock when things look like they will
1433 * work out!
1434 */
1435 rq = task_rq(p);
fa490cfd 1436
3a5c359a
AK
1437 /*
1438 * If the task is actively running on another CPU
1439 * still, just relax and busy-wait without holding
1440 * any locks.
1441 *
1442 * NOTE! Since we don't hold any locks, it's not
1443 * even sure that "rq" stays as the right runqueue!
1444 * But we don't care, since "task_running()" will
1445 * return false if the runqueue has changed and p
1446 * is actually now running somewhere else!
1447 */
85ba2d86
RM
1448 while (task_running(rq, p)) {
1449 if (match_state && unlikely(p->state != match_state))
1450 return 0;
3a5c359a 1451 cpu_relax();
85ba2d86 1452 }
fa490cfd 1453
3a5c359a
AK
1454 /*
1455 * Ok, time to look more closely! We need the rq
1456 * lock now, to be *sure*. If we're wrong, we'll
1457 * just go back and repeat.
1458 */
1459 rq = task_rq_lock(p, &flags);
27a9da65 1460 trace_sched_wait_task(p);
3a5c359a 1461 running = task_running(rq, p);
da0c1e65 1462 queued = task_on_rq_queued(p);
85ba2d86 1463 ncsw = 0;
f31e11d8 1464 if (!match_state || p->state == match_state)
93dcf55f 1465 ncsw = p->nvcsw | LONG_MIN; /* sets MSB */
0122ec5b 1466 task_rq_unlock(rq, p, &flags);
fa490cfd 1467
85ba2d86
RM
1468 /*
1469 * If it changed from the expected state, bail out now.
1470 */
1471 if (unlikely(!ncsw))
1472 break;
1473
3a5c359a
AK
1474 /*
1475 * Was it really running after all now that we
1476 * checked with the proper locks actually held?
1477 *
1478 * Oops. Go back and try again..
1479 */
1480 if (unlikely(running)) {
1481 cpu_relax();
1482 continue;
1483 }
fa490cfd 1484
3a5c359a
AK
1485 /*
1486 * It's not enough that it's not actively running,
1487 * it must be off the runqueue _entirely_, and not
1488 * preempted!
1489 *
80dd99b3 1490 * So if it was still runnable (but just not actively
3a5c359a
AK
1491 * running right now), it's preempted, and we should
1492 * yield - it could be a while.
1493 */
da0c1e65 1494 if (unlikely(queued)) {
8eb90c30
TG
1495 ktime_t to = ktime_set(0, NSEC_PER_SEC/HZ);
1496
1497 set_current_state(TASK_UNINTERRUPTIBLE);
1498 schedule_hrtimeout(&to, HRTIMER_MODE_REL);
3a5c359a
AK
1499 continue;
1500 }
fa490cfd 1501
3a5c359a
AK
1502 /*
1503 * Ahh, all good. It wasn't running, and it wasn't
1504 * runnable, which means that it will never become
1505 * running in the future either. We're all done!
1506 */
1507 break;
1508 }
85ba2d86
RM
1509
1510 return ncsw;
1da177e4
LT
1511}
1512
1513/***
1514 * kick_process - kick a running thread to enter/exit the kernel
1515 * @p: the to-be-kicked thread
1516 *
1517 * Cause a process which is running on another CPU to enter
1518 * kernel-mode, without any delay. (to get signals handled.)
1519 *
25985edc 1520 * NOTE: this function doesn't have to take the runqueue lock,
1da177e4
LT
1521 * because all it wants to ensure is that the remote task enters
1522 * the kernel. If the IPI races and the task has been migrated
1523 * to another CPU then no harm is done and the purpose has been
1524 * achieved as well.
1525 */
36c8b586 1526void kick_process(struct task_struct *p)
1da177e4
LT
1527{
1528 int cpu;
1529
1530 preempt_disable();
1531 cpu = task_cpu(p);
1532 if ((cpu != smp_processor_id()) && task_curr(p))
1533 smp_send_reschedule(cpu);
1534 preempt_enable();
1535}
b43e3521 1536EXPORT_SYMBOL_GPL(kick_process);
1da177e4 1537
30da688e 1538/*
013fdb80 1539 * ->cpus_allowed is protected by both rq->lock and p->pi_lock
30da688e 1540 */
5da9a0fb
PZ
1541static int select_fallback_rq(int cpu, struct task_struct *p)
1542{
aa00d89c
TC
1543 int nid = cpu_to_node(cpu);
1544 const struct cpumask *nodemask = NULL;
2baab4e9
PZ
1545 enum { cpuset, possible, fail } state = cpuset;
1546 int dest_cpu;
5da9a0fb 1547
aa00d89c
TC
1548 /*
1549 * If the node that the cpu is on has been offlined, cpu_to_node()
1550 * will return -1. There is no cpu on the node, and we should
1551 * select the cpu on the other node.
1552 */
1553 if (nid != -1) {
1554 nodemask = cpumask_of_node(nid);
1555
1556 /* Look for allowed, online CPU in same node. */
1557 for_each_cpu(dest_cpu, nodemask) {
1558 if (!cpu_online(dest_cpu))
1559 continue;
1560 if (!cpu_active(dest_cpu))
1561 continue;
1562 if (cpumask_test_cpu(dest_cpu, tsk_cpus_allowed(p)))
1563 return dest_cpu;
1564 }
2baab4e9 1565 }
5da9a0fb 1566
2baab4e9
PZ
1567 for (;;) {
1568 /* Any allowed, online CPU? */
e3831edd 1569 for_each_cpu(dest_cpu, tsk_cpus_allowed(p)) {
2baab4e9
PZ
1570 if (!cpu_online(dest_cpu))
1571 continue;
1572 if (!cpu_active(dest_cpu))
1573 continue;
1574 goto out;
1575 }
5da9a0fb 1576
2baab4e9
PZ
1577 switch (state) {
1578 case cpuset:
1579 /* No more Mr. Nice Guy. */
1580 cpuset_cpus_allowed_fallback(p);
1581 state = possible;
1582 break;
1583
1584 case possible:
1585 do_set_cpus_allowed(p, cpu_possible_mask);
1586 state = fail;
1587 break;
1588
1589 case fail:
1590 BUG();
1591 break;
1592 }
1593 }
1594
1595out:
1596 if (state != cpuset) {
1597 /*
1598 * Don't tell them about moving exiting tasks or
1599 * kernel threads (both mm NULL), since they never
1600 * leave kernel.
1601 */
1602 if (p->mm && printk_ratelimit()) {
aac74dc4 1603 printk_deferred("process %d (%s) no longer affine to cpu%d\n",
2baab4e9
PZ
1604 task_pid_nr(p), p->comm, cpu);
1605 }
5da9a0fb
PZ
1606 }
1607
1608 return dest_cpu;
1609}
1610
e2912009 1611/*
013fdb80 1612 * The caller (fork, wakeup) owns p->pi_lock, ->cpus_allowed is stable.
e2912009 1613 */
970b13ba 1614static inline
ac66f547 1615int select_task_rq(struct task_struct *p, int cpu, int sd_flags, int wake_flags)
970b13ba 1616{
cbce1a68
PZ
1617 lockdep_assert_held(&p->pi_lock);
1618
6c1d9410
WL
1619 if (p->nr_cpus_allowed > 1)
1620 cpu = p->sched_class->select_task_rq(p, cpu, sd_flags, wake_flags);
e2912009
PZ
1621
1622 /*
1623 * In order not to call set_task_cpu() on a blocking task we need
1624 * to rely on ttwu() to place the task on a valid ->cpus_allowed
1625 * cpu.
1626 *
1627 * Since this is common to all placement strategies, this lives here.
1628 *
1629 * [ this allows ->select_task() to simply return task_cpu(p) and
1630 * not worry about this generic constraint ]
1631 */
fa17b507 1632 if (unlikely(!cpumask_test_cpu(cpu, tsk_cpus_allowed(p)) ||
70f11205 1633 !cpu_online(cpu)))
5da9a0fb 1634 cpu = select_fallback_rq(task_cpu(p), p);
e2912009
PZ
1635
1636 return cpu;
970b13ba 1637}
09a40af5
MG
1638
1639static void update_avg(u64 *avg, u64 sample)
1640{
1641 s64 diff = sample - *avg;
1642 *avg += diff >> 3;
1643}
25834c73
PZ
1644
1645#else
1646
1647static inline int __set_cpus_allowed_ptr(struct task_struct *p,
1648 const struct cpumask *new_mask, bool check)
1649{
1650 return set_cpus_allowed_ptr(p, new_mask);
1651}
1652
5cc389bc 1653#endif /* CONFIG_SMP */
970b13ba 1654
d7c01d27 1655static void
b84cb5df 1656ttwu_stat(struct task_struct *p, int cpu, int wake_flags)
9ed3811a 1657{
d7c01d27 1658#ifdef CONFIG_SCHEDSTATS
b84cb5df
PZ
1659 struct rq *rq = this_rq();
1660
d7c01d27
PZ
1661#ifdef CONFIG_SMP
1662 int this_cpu = smp_processor_id();
1663
1664 if (cpu == this_cpu) {
1665 schedstat_inc(rq, ttwu_local);
1666 schedstat_inc(p, se.statistics.nr_wakeups_local);
1667 } else {
1668 struct sched_domain *sd;
1669
1670 schedstat_inc(p, se.statistics.nr_wakeups_remote);
057f3fad 1671 rcu_read_lock();
d7c01d27
PZ
1672 for_each_domain(this_cpu, sd) {
1673 if (cpumask_test_cpu(cpu, sched_domain_span(sd))) {
1674 schedstat_inc(sd, ttwu_wake_remote);
1675 break;
1676 }
1677 }
057f3fad 1678 rcu_read_unlock();
d7c01d27 1679 }
f339b9dc
PZ
1680
1681 if (wake_flags & WF_MIGRATED)
1682 schedstat_inc(p, se.statistics.nr_wakeups_migrate);
1683
d7c01d27
PZ
1684#endif /* CONFIG_SMP */
1685
1686 schedstat_inc(rq, ttwu_count);
9ed3811a 1687 schedstat_inc(p, se.statistics.nr_wakeups);
d7c01d27
PZ
1688
1689 if (wake_flags & WF_SYNC)
9ed3811a 1690 schedstat_inc(p, se.statistics.nr_wakeups_sync);
d7c01d27 1691
d7c01d27
PZ
1692#endif /* CONFIG_SCHEDSTATS */
1693}
1694
1695static void ttwu_activate(struct rq *rq, struct task_struct *p, int en_flags)
1696{
9ed3811a 1697 activate_task(rq, p, en_flags);
da0c1e65 1698 p->on_rq = TASK_ON_RQ_QUEUED;
c2f7115e
PZ
1699
1700 /* if a worker is waking up, notify workqueue */
1701 if (p->flags & PF_WQ_WORKER)
1702 wq_worker_waking_up(p, cpu_of(rq));
9ed3811a
TH
1703}
1704
23f41eeb
PZ
1705/*
1706 * Mark the task runnable and perform wakeup-preemption.
1707 */
89363381 1708static void
23f41eeb 1709ttwu_do_wakeup(struct rq *rq, struct task_struct *p, int wake_flags)
9ed3811a 1710{
9ed3811a 1711 check_preempt_curr(rq, p, wake_flags);
9ed3811a 1712 p->state = TASK_RUNNING;
fbd705a0
PZ
1713 trace_sched_wakeup(p);
1714
9ed3811a 1715#ifdef CONFIG_SMP
4c9a4bc8
PZ
1716 if (p->sched_class->task_woken) {
1717 /*
cbce1a68
PZ
1718 * Our task @p is fully woken up and running; so its safe to
1719 * drop the rq->lock, hereafter rq is only used for statistics.
4c9a4bc8 1720 */
cbce1a68 1721 lockdep_unpin_lock(&rq->lock);
9ed3811a 1722 p->sched_class->task_woken(rq, p);
cbce1a68 1723 lockdep_pin_lock(&rq->lock);
4c9a4bc8 1724 }
9ed3811a 1725
e69c6341 1726 if (rq->idle_stamp) {
78becc27 1727 u64 delta = rq_clock(rq) - rq->idle_stamp;
9bd721c5 1728 u64 max = 2*rq->max_idle_balance_cost;
9ed3811a 1729
abfafa54
JL
1730 update_avg(&rq->avg_idle, delta);
1731
1732 if (rq->avg_idle > max)
9ed3811a 1733 rq->avg_idle = max;
abfafa54 1734
9ed3811a
TH
1735 rq->idle_stamp = 0;
1736 }
1737#endif
1738}
1739
c05fbafb
PZ
1740static void
1741ttwu_do_activate(struct rq *rq, struct task_struct *p, int wake_flags)
1742{
cbce1a68
PZ
1743 lockdep_assert_held(&rq->lock);
1744
c05fbafb
PZ
1745#ifdef CONFIG_SMP
1746 if (p->sched_contributes_to_load)
1747 rq->nr_uninterruptible--;
1748#endif
1749
1750 ttwu_activate(rq, p, ENQUEUE_WAKEUP | ENQUEUE_WAKING);
1751 ttwu_do_wakeup(rq, p, wake_flags);
1752}
1753
1754/*
1755 * Called in case the task @p isn't fully descheduled from its runqueue,
1756 * in this case we must do a remote wakeup. Its a 'light' wakeup though,
1757 * since all we need to do is flip p->state to TASK_RUNNING, since
1758 * the task is still ->on_rq.
1759 */
1760static int ttwu_remote(struct task_struct *p, int wake_flags)
1761{
1762 struct rq *rq;
1763 int ret = 0;
1764
1765 rq = __task_rq_lock(p);
da0c1e65 1766 if (task_on_rq_queued(p)) {
1ad4ec0d
FW
1767 /* check_preempt_curr() may use rq clock */
1768 update_rq_clock(rq);
c05fbafb
PZ
1769 ttwu_do_wakeup(rq, p, wake_flags);
1770 ret = 1;
1771 }
1772 __task_rq_unlock(rq);
1773
1774 return ret;
1775}
1776
317f3941 1777#ifdef CONFIG_SMP
e3baac47 1778void sched_ttwu_pending(void)
317f3941
PZ
1779{
1780 struct rq *rq = this_rq();
fa14ff4a
PZ
1781 struct llist_node *llist = llist_del_all(&rq->wake_list);
1782 struct task_struct *p;
e3baac47 1783 unsigned long flags;
317f3941 1784
e3baac47
PZ
1785 if (!llist)
1786 return;
1787
1788 raw_spin_lock_irqsave(&rq->lock, flags);
cbce1a68 1789 lockdep_pin_lock(&rq->lock);
317f3941 1790
fa14ff4a
PZ
1791 while (llist) {
1792 p = llist_entry(llist, struct task_struct, wake_entry);
1793 llist = llist_next(llist);
317f3941
PZ
1794 ttwu_do_activate(rq, p, 0);
1795 }
1796
cbce1a68 1797 lockdep_unpin_lock(&rq->lock);
e3baac47 1798 raw_spin_unlock_irqrestore(&rq->lock, flags);
317f3941
PZ
1799}
1800
1801void scheduler_ipi(void)
1802{
f27dde8d
PZ
1803 /*
1804 * Fold TIF_NEED_RESCHED into the preempt_count; anybody setting
1805 * TIF_NEED_RESCHED remotely (for the first time) will also send
1806 * this IPI.
1807 */
8cb75e0c 1808 preempt_fold_need_resched();
f27dde8d 1809
fd2ac4f4 1810 if (llist_empty(&this_rq()->wake_list) && !got_nohz_idle_kick())
c5d753a5
PZ
1811 return;
1812
1813 /*
1814 * Not all reschedule IPI handlers call irq_enter/irq_exit, since
1815 * traditionally all their work was done from the interrupt return
1816 * path. Now that we actually do some work, we need to make sure
1817 * we do call them.
1818 *
1819 * Some archs already do call them, luckily irq_enter/exit nest
1820 * properly.
1821 *
1822 * Arguably we should visit all archs and update all handlers,
1823 * however a fair share of IPIs are still resched only so this would
1824 * somewhat pessimize the simple resched case.
1825 */
1826 irq_enter();
fa14ff4a 1827 sched_ttwu_pending();
ca38062e
SS
1828
1829 /*
1830 * Check if someone kicked us for doing the nohz idle load balance.
1831 */
873b4c65 1832 if (unlikely(got_nohz_idle_kick())) {
6eb57e0d 1833 this_rq()->idle_balance = 1;
ca38062e 1834 raise_softirq_irqoff(SCHED_SOFTIRQ);
6eb57e0d 1835 }
c5d753a5 1836 irq_exit();
317f3941
PZ
1837}
1838
1839static void ttwu_queue_remote(struct task_struct *p, int cpu)
1840{
e3baac47
PZ
1841 struct rq *rq = cpu_rq(cpu);
1842
1843 if (llist_add(&p->wake_entry, &cpu_rq(cpu)->wake_list)) {
1844 if (!set_nr_if_polling(rq->idle))
1845 smp_send_reschedule(cpu);
1846 else
1847 trace_sched_wake_idle_without_ipi(cpu);
1848 }
317f3941 1849}
d6aa8f85 1850
f6be8af1
CL
1851void wake_up_if_idle(int cpu)
1852{
1853 struct rq *rq = cpu_rq(cpu);
1854 unsigned long flags;
1855
fd7de1e8
AL
1856 rcu_read_lock();
1857
1858 if (!is_idle_task(rcu_dereference(rq->curr)))
1859 goto out;
f6be8af1
CL
1860
1861 if (set_nr_if_polling(rq->idle)) {
1862 trace_sched_wake_idle_without_ipi(cpu);
1863 } else {
1864 raw_spin_lock_irqsave(&rq->lock, flags);
1865 if (is_idle_task(rq->curr))
1866 smp_send_reschedule(cpu);
1867 /* Else cpu is not in idle, do nothing here */
1868 raw_spin_unlock_irqrestore(&rq->lock, flags);
1869 }
fd7de1e8
AL
1870
1871out:
1872 rcu_read_unlock();
f6be8af1
CL
1873}
1874
39be3501 1875bool cpus_share_cache(int this_cpu, int that_cpu)
518cd623
PZ
1876{
1877 return per_cpu(sd_llc_id, this_cpu) == per_cpu(sd_llc_id, that_cpu);
1878}
d6aa8f85 1879#endif /* CONFIG_SMP */
317f3941 1880
c05fbafb
PZ
1881static void ttwu_queue(struct task_struct *p, int cpu)
1882{
1883 struct rq *rq = cpu_rq(cpu);
1884
17d9f311 1885#if defined(CONFIG_SMP)
39be3501 1886 if (sched_feat(TTWU_QUEUE) && !cpus_share_cache(smp_processor_id(), cpu)) {
f01114cb 1887 sched_clock_cpu(cpu); /* sync clocks x-cpu */
317f3941
PZ
1888 ttwu_queue_remote(p, cpu);
1889 return;
1890 }
1891#endif
1892
c05fbafb 1893 raw_spin_lock(&rq->lock);
cbce1a68 1894 lockdep_pin_lock(&rq->lock);
c05fbafb 1895 ttwu_do_activate(rq, p, 0);
cbce1a68 1896 lockdep_unpin_lock(&rq->lock);
c05fbafb 1897 raw_spin_unlock(&rq->lock);
9ed3811a
TH
1898}
1899
1900/**
1da177e4 1901 * try_to_wake_up - wake up a thread
9ed3811a 1902 * @p: the thread to be awakened
1da177e4 1903 * @state: the mask of task states that can be woken
9ed3811a 1904 * @wake_flags: wake modifier flags (WF_*)
1da177e4
LT
1905 *
1906 * Put it on the run-queue if it's not already there. The "current"
1907 * thread is always on the run-queue (except when the actual
1908 * re-schedule is in progress), and as such you're allowed to do
1909 * the simpler "current->state = TASK_RUNNING" to mark yourself
1910 * runnable without the overhead of this.
1911 *
e69f6186 1912 * Return: %true if @p was woken up, %false if it was already running.
9ed3811a 1913 * or @state didn't match @p's state.
1da177e4 1914 */
e4a52bcb
PZ
1915static int
1916try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
1da177e4 1917{
1da177e4 1918 unsigned long flags;
c05fbafb 1919 int cpu, success = 0;
2398f2c6 1920
e0acd0a6
ON
1921 /*
1922 * If we are going to wake up a thread waiting for CONDITION we
1923 * need to ensure that CONDITION=1 done by the caller can not be
1924 * reordered with p->state check below. This pairs with mb() in
1925 * set_current_state() the waiting thread does.
1926 */
1927 smp_mb__before_spinlock();
013fdb80 1928 raw_spin_lock_irqsave(&p->pi_lock, flags);
e9c84311 1929 if (!(p->state & state))
1da177e4
LT
1930 goto out;
1931
fbd705a0
PZ
1932 trace_sched_waking(p);
1933
c05fbafb 1934 success = 1; /* we're going to change ->state */
1da177e4 1935 cpu = task_cpu(p);
1da177e4 1936
c05fbafb
PZ
1937 if (p->on_rq && ttwu_remote(p, wake_flags))
1938 goto stat;
1da177e4 1939
1da177e4 1940#ifdef CONFIG_SMP
e9c84311 1941 /*
c05fbafb
PZ
1942 * If the owning (remote) cpu is still in the middle of schedule() with
1943 * this task as prev, wait until its done referencing the task.
e9c84311 1944 */
f3e94786 1945 while (p->on_cpu)
e4a52bcb 1946 cpu_relax();
0970d299 1947 /*
e4a52bcb 1948 * Pairs with the smp_wmb() in finish_lock_switch().
0970d299 1949 */
e4a52bcb 1950 smp_rmb();
1da177e4 1951
a8e4f2ea 1952 p->sched_contributes_to_load = !!task_contributes_to_load(p);
e9c84311 1953 p->state = TASK_WAKING;
e7693a36 1954
e4a52bcb 1955 if (p->sched_class->task_waking)
74f8e4b2 1956 p->sched_class->task_waking(p);
efbbd05a 1957
ac66f547 1958 cpu = select_task_rq(p, p->wake_cpu, SD_BALANCE_WAKE, wake_flags);
f339b9dc
PZ
1959 if (task_cpu(p) != cpu) {
1960 wake_flags |= WF_MIGRATED;
e4a52bcb 1961 set_task_cpu(p, cpu);
f339b9dc 1962 }
1da177e4 1963#endif /* CONFIG_SMP */
1da177e4 1964
c05fbafb
PZ
1965 ttwu_queue(p, cpu);
1966stat:
b84cb5df 1967 ttwu_stat(p, cpu, wake_flags);
1da177e4 1968out:
013fdb80 1969 raw_spin_unlock_irqrestore(&p->pi_lock, flags);
1da177e4
LT
1970
1971 return success;
1972}
1973
21aa9af0
TH
1974/**
1975 * try_to_wake_up_local - try to wake up a local task with rq lock held
1976 * @p: the thread to be awakened
1977 *
2acca55e 1978 * Put @p on the run-queue if it's not already there. The caller must
21aa9af0 1979 * ensure that this_rq() is locked, @p is bound to this_rq() and not
2acca55e 1980 * the current task.
21aa9af0
TH
1981 */
1982static void try_to_wake_up_local(struct task_struct *p)
1983{
1984 struct rq *rq = task_rq(p);
21aa9af0 1985
383efcd0
TH
1986 if (WARN_ON_ONCE(rq != this_rq()) ||
1987 WARN_ON_ONCE(p == current))
1988 return;
1989
21aa9af0
TH
1990 lockdep_assert_held(&rq->lock);
1991
2acca55e 1992 if (!raw_spin_trylock(&p->pi_lock)) {
cbce1a68
PZ
1993 /*
1994 * This is OK, because current is on_cpu, which avoids it being
1995 * picked for load-balance and preemption/IRQs are still
1996 * disabled avoiding further scheduler activity on it and we've
1997 * not yet picked a replacement task.
1998 */
1999 lockdep_unpin_lock(&rq->lock);
2acca55e
PZ
2000 raw_spin_unlock(&rq->lock);
2001 raw_spin_lock(&p->pi_lock);
2002 raw_spin_lock(&rq->lock);
cbce1a68 2003 lockdep_pin_lock(&rq->lock);
2acca55e
PZ
2004 }
2005
21aa9af0 2006 if (!(p->state & TASK_NORMAL))
2acca55e 2007 goto out;
21aa9af0 2008
fbd705a0
PZ
2009 trace_sched_waking(p);
2010
da0c1e65 2011 if (!task_on_rq_queued(p))
d7c01d27
PZ
2012 ttwu_activate(rq, p, ENQUEUE_WAKEUP);
2013
23f41eeb 2014 ttwu_do_wakeup(rq, p, 0);
b84cb5df 2015 ttwu_stat(p, smp_processor_id(), 0);
2acca55e
PZ
2016out:
2017 raw_spin_unlock(&p->pi_lock);
21aa9af0
TH
2018}
2019
50fa610a
DH
2020/**
2021 * wake_up_process - Wake up a specific process
2022 * @p: The process to be woken up.
2023 *
2024 * Attempt to wake up the nominated process and move it to the set of runnable
e69f6186
YB
2025 * processes.
2026 *
2027 * Return: 1 if the process was woken up, 0 if it was already running.
50fa610a
DH
2028 *
2029 * It may be assumed that this function implies a write memory barrier before
2030 * changing the task state if and only if any tasks are woken up.
2031 */
7ad5b3a5 2032int wake_up_process(struct task_struct *p)
1da177e4 2033{
9067ac85
ON
2034 WARN_ON(task_is_stopped_or_traced(p));
2035 return try_to_wake_up(p, TASK_NORMAL, 0);
1da177e4 2036}
1da177e4
LT
2037EXPORT_SYMBOL(wake_up_process);
2038
7ad5b3a5 2039int wake_up_state(struct task_struct *p, unsigned int state)
1da177e4
LT
2040{
2041 return try_to_wake_up(p, state, 0);
2042}
2043
a5e7be3b
JL
2044/*
2045 * This function clears the sched_dl_entity static params.
2046 */
2047void __dl_clear_params(struct task_struct *p)
2048{
2049 struct sched_dl_entity *dl_se = &p->dl;
2050
2051 dl_se->dl_runtime = 0;
2052 dl_se->dl_deadline = 0;
2053 dl_se->dl_period = 0;
2054 dl_se->flags = 0;
2055 dl_se->dl_bw = 0;
40767b0d
PZ
2056
2057 dl_se->dl_throttled = 0;
2058 dl_se->dl_new = 1;
2059 dl_se->dl_yielded = 0;
a5e7be3b
JL
2060}
2061
1da177e4
LT
2062/*
2063 * Perform scheduler related setup for a newly forked process p.
2064 * p is forked by current.
dd41f596
IM
2065 *
2066 * __sched_fork() is basic setup used by init_idle() too:
2067 */
5e1576ed 2068static void __sched_fork(unsigned long clone_flags, struct task_struct *p)
dd41f596 2069{
fd2f4419
PZ
2070 p->on_rq = 0;
2071
2072 p->se.on_rq = 0;
dd41f596
IM
2073 p->se.exec_start = 0;
2074 p->se.sum_exec_runtime = 0;
f6cf891c 2075 p->se.prev_sum_exec_runtime = 0;
6c594c21 2076 p->se.nr_migrations = 0;
da7a735e 2077 p->se.vruntime = 0;
fd2f4419 2078 INIT_LIST_HEAD(&p->se.group_node);
6cfb0d5d
IM
2079
2080#ifdef CONFIG_SCHEDSTATS
41acab88 2081 memset(&p->se.statistics, 0, sizeof(p->se.statistics));
6cfb0d5d 2082#endif
476d139c 2083
aab03e05 2084 RB_CLEAR_NODE(&p->dl.rb_node);
40767b0d 2085 init_dl_task_timer(&p->dl);
a5e7be3b 2086 __dl_clear_params(p);
aab03e05 2087
fa717060 2088 INIT_LIST_HEAD(&p->rt.run_list);
476d139c 2089
e107be36
AK
2090#ifdef CONFIG_PREEMPT_NOTIFIERS
2091 INIT_HLIST_HEAD(&p->preempt_notifiers);
2092#endif
cbee9f88
PZ
2093
2094#ifdef CONFIG_NUMA_BALANCING
2095 if (p->mm && atomic_read(&p->mm->mm_users) == 1) {
7e8d16b6 2096 p->mm->numa_next_scan = jiffies + msecs_to_jiffies(sysctl_numa_balancing_scan_delay);
cbee9f88
PZ
2097 p->mm->numa_scan_seq = 0;
2098 }
2099
5e1576ed
RR
2100 if (clone_flags & CLONE_VM)
2101 p->numa_preferred_nid = current->numa_preferred_nid;
2102 else
2103 p->numa_preferred_nid = -1;
2104
cbee9f88
PZ
2105 p->node_stamp = 0ULL;
2106 p->numa_scan_seq = p->mm ? p->mm->numa_scan_seq : 0;
4b96a29b 2107 p->numa_scan_period = sysctl_numa_balancing_scan_delay;
cbee9f88 2108 p->numa_work.next = &p->numa_work;
44dba3d5 2109 p->numa_faults = NULL;
7e2703e6
RR
2110 p->last_task_numa_placement = 0;
2111 p->last_sum_exec_runtime = 0;
8c8a743c 2112
8c8a743c 2113 p->numa_group = NULL;
cbee9f88 2114#endif /* CONFIG_NUMA_BALANCING */
dd41f596
IM
2115}
2116
2a595721
SD
2117DEFINE_STATIC_KEY_FALSE(sched_numa_balancing);
2118
1a687c2e 2119#ifdef CONFIG_NUMA_BALANCING
c3b9bc5b 2120
1a687c2e
MG
2121void set_numabalancing_state(bool enabled)
2122{
2a595721
SD
2123 if (enabled)
2124 static_branch_enable(&sched_numa_balancing);
2125 else
2126 static_branch_disable(&sched_numa_balancing);
c3b9bc5b 2127}
54a43d54
AK
2128
2129#ifdef CONFIG_PROC_SYSCTL
2130int sysctl_numa_balancing(struct ctl_table *table, int write,
2131 void __user *buffer, size_t *lenp, loff_t *ppos)
2132{
2133 struct ctl_table t;
2134 int err;
2a595721 2135 int state = static_branch_likely(&sched_numa_balancing);
54a43d54
AK
2136
2137 if (write && !capable(CAP_SYS_ADMIN))
2138 return -EPERM;
2139
2140 t = *table;
2141 t.data = &state;
2142 err = proc_dointvec_minmax(&t, write, buffer, lenp, ppos);
2143 if (err < 0)
2144 return err;
2145 if (write)
2146 set_numabalancing_state(state);
2147 return err;
2148}
2149#endif
2150#endif
dd41f596
IM
2151
2152/*
2153 * fork()/clone()-time setup:
2154 */
aab03e05 2155int sched_fork(unsigned long clone_flags, struct task_struct *p)
dd41f596 2156{
0122ec5b 2157 unsigned long flags;
dd41f596
IM
2158 int cpu = get_cpu();
2159
5e1576ed 2160 __sched_fork(clone_flags, p);
06b83b5f 2161 /*
0017d735 2162 * We mark the process as running here. This guarantees that
06b83b5f
PZ
2163 * nobody will actually run it, and a signal or other external
2164 * event cannot wake it up and insert it on the runqueue either.
2165 */
0017d735 2166 p->state = TASK_RUNNING;
dd41f596 2167
c350a04e
MG
2168 /*
2169 * Make sure we do not leak PI boosting priority to the child.
2170 */
2171 p->prio = current->normal_prio;
2172
b9dc29e7
MG
2173 /*
2174 * Revert to default priority/policy on fork if requested.
2175 */
2176 if (unlikely(p->sched_reset_on_fork)) {
aab03e05 2177 if (task_has_dl_policy(p) || task_has_rt_policy(p)) {
b9dc29e7 2178 p->policy = SCHED_NORMAL;
6c697bdf 2179 p->static_prio = NICE_TO_PRIO(0);
c350a04e
MG
2180 p->rt_priority = 0;
2181 } else if (PRIO_TO_NICE(p->static_prio) < 0)
2182 p->static_prio = NICE_TO_PRIO(0);
2183
2184 p->prio = p->normal_prio = __normal_prio(p);
2185 set_load_weight(p);
6c697bdf 2186
b9dc29e7
MG
2187 /*
2188 * We don't need the reset flag anymore after the fork. It has
2189 * fulfilled its duty:
2190 */
2191 p->sched_reset_on_fork = 0;
2192 }
ca94c442 2193
aab03e05
DF
2194 if (dl_prio(p->prio)) {
2195 put_cpu();
2196 return -EAGAIN;
2197 } else if (rt_prio(p->prio)) {
2198 p->sched_class = &rt_sched_class;
2199 } else {
2ddbf952 2200 p->sched_class = &fair_sched_class;
aab03e05 2201 }
b29739f9 2202
cd29fe6f
PZ
2203 if (p->sched_class->task_fork)
2204 p->sched_class->task_fork(p);
2205
86951599
PZ
2206 /*
2207 * The child is not yet in the pid-hash so no cgroup attach races,
2208 * and the cgroup is pinned to this child due to cgroup_fork()
2209 * is ran before sched_fork().
2210 *
2211 * Silence PROVE_RCU.
2212 */
0122ec5b 2213 raw_spin_lock_irqsave(&p->pi_lock, flags);
5f3edc1b 2214 set_task_cpu(p, cpu);
0122ec5b 2215 raw_spin_unlock_irqrestore(&p->pi_lock, flags);
5f3edc1b 2216
f6db8347 2217#ifdef CONFIG_SCHED_INFO
dd41f596 2218 if (likely(sched_info_on()))
52f17b6c 2219 memset(&p->sched_info, 0, sizeof(p->sched_info));
1da177e4 2220#endif
3ca7a440
PZ
2221#if defined(CONFIG_SMP)
2222 p->on_cpu = 0;
4866cde0 2223#endif
01028747 2224 init_task_preempt_count(p);
806c09a7 2225#ifdef CONFIG_SMP
917b627d 2226 plist_node_init(&p->pushable_tasks, MAX_PRIO);
1baca4ce 2227 RB_CLEAR_NODE(&p->pushable_dl_tasks);
806c09a7 2228#endif
917b627d 2229
476d139c 2230 put_cpu();
aab03e05 2231 return 0;
1da177e4
LT
2232}
2233
332ac17e
DF
2234unsigned long to_ratio(u64 period, u64 runtime)
2235{
2236 if (runtime == RUNTIME_INF)
2237 return 1ULL << 20;
2238
2239 /*
2240 * Doing this here saves a lot of checks in all
2241 * the calling paths, and returning zero seems
2242 * safe for them anyway.
2243 */
2244 if (period == 0)
2245 return 0;
2246
2247 return div64_u64(runtime << 20, period);
2248}
2249
2250#ifdef CONFIG_SMP
2251inline struct dl_bw *dl_bw_of(int i)
2252{
f78f5b90
PM
2253 RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held(),
2254 "sched RCU must be held");
332ac17e
DF
2255 return &cpu_rq(i)->rd->dl_bw;
2256}
2257
de212f18 2258static inline int dl_bw_cpus(int i)
332ac17e 2259{
de212f18
PZ
2260 struct root_domain *rd = cpu_rq(i)->rd;
2261 int cpus = 0;
2262
f78f5b90
PM
2263 RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held(),
2264 "sched RCU must be held");
de212f18
PZ
2265 for_each_cpu_and(i, rd->span, cpu_active_mask)
2266 cpus++;
2267
2268 return cpus;
332ac17e
DF
2269}
2270#else
2271inline struct dl_bw *dl_bw_of(int i)
2272{
2273 return &cpu_rq(i)->dl.dl_bw;
2274}
2275
de212f18 2276static inline int dl_bw_cpus(int i)
332ac17e
DF
2277{
2278 return 1;
2279}
2280#endif
2281
332ac17e
DF
2282/*
2283 * We must be sure that accepting a new task (or allowing changing the
2284 * parameters of an existing one) is consistent with the bandwidth
2285 * constraints. If yes, this function also accordingly updates the currently
2286 * allocated bandwidth to reflect the new situation.
2287 *
2288 * This function is called while holding p's rq->lock.
40767b0d
PZ
2289 *
2290 * XXX we should delay bw change until the task's 0-lag point, see
2291 * __setparam_dl().
332ac17e
DF
2292 */
2293static int dl_overflow(struct task_struct *p, int policy,
2294 const struct sched_attr *attr)
2295{
2296
2297 struct dl_bw *dl_b = dl_bw_of(task_cpu(p));
4df1638c 2298 u64 period = attr->sched_period ?: attr->sched_deadline;
332ac17e
DF
2299 u64 runtime = attr->sched_runtime;
2300 u64 new_bw = dl_policy(policy) ? to_ratio(period, runtime) : 0;
de212f18 2301 int cpus, err = -1;
332ac17e
DF
2302
2303 if (new_bw == p->dl.dl_bw)
2304 return 0;
2305
2306 /*
2307 * Either if a task, enters, leave, or stays -deadline but changes
2308 * its parameters, we may need to update accordingly the total
2309 * allocated bandwidth of the container.
2310 */
2311 raw_spin_lock(&dl_b->lock);
de212f18 2312 cpus = dl_bw_cpus(task_cpu(p));
332ac17e
DF
2313 if (dl_policy(policy) && !task_has_dl_policy(p) &&
2314 !__dl_overflow(dl_b, cpus, 0, new_bw)) {
2315 __dl_add(dl_b, new_bw);
2316 err = 0;
2317 } else if (dl_policy(policy) && task_has_dl_policy(p) &&
2318 !__dl_overflow(dl_b, cpus, p->dl.dl_bw, new_bw)) {
2319 __dl_clear(dl_b, p->dl.dl_bw);
2320 __dl_add(dl_b, new_bw);
2321 err = 0;
2322 } else if (!dl_policy(policy) && task_has_dl_policy(p)) {
2323 __dl_clear(dl_b, p->dl.dl_bw);
2324 err = 0;
2325 }
2326 raw_spin_unlock(&dl_b->lock);
2327
2328 return err;
2329}
2330
2331extern void init_dl_bw(struct dl_bw *dl_b);
2332
1da177e4
LT
2333/*
2334 * wake_up_new_task - wake up a newly created task for the first time.
2335 *
2336 * This function will do some initial scheduler statistics housekeeping
2337 * that must be done for every newly created context, then puts the task
2338 * on the runqueue and wakes it.
2339 */
3e51e3ed 2340void wake_up_new_task(struct task_struct *p)
1da177e4
LT
2341{
2342 unsigned long flags;
dd41f596 2343 struct rq *rq;
fabf318e 2344
ab2515c4 2345 raw_spin_lock_irqsave(&p->pi_lock, flags);
98d8fd81
MR
2346 /* Initialize new task's runnable average */
2347 init_entity_runnable_average(&p->se);
fabf318e
PZ
2348#ifdef CONFIG_SMP
2349 /*
2350 * Fork balancing, do it here and not earlier because:
2351 * - cpus_allowed can change in the fork path
2352 * - any previously selected cpu might disappear through hotplug
fabf318e 2353 */
ac66f547 2354 set_task_cpu(p, select_task_rq(p, task_cpu(p), SD_BALANCE_FORK, 0));
0017d735
PZ
2355#endif
2356
ab2515c4 2357 rq = __task_rq_lock(p);
cd29fe6f 2358 activate_task(rq, p, 0);
da0c1e65 2359 p->on_rq = TASK_ON_RQ_QUEUED;
fbd705a0 2360 trace_sched_wakeup_new(p);
a7558e01 2361 check_preempt_curr(rq, p, WF_FORK);
9a897c5a 2362#ifdef CONFIG_SMP
efbbd05a
PZ
2363 if (p->sched_class->task_woken)
2364 p->sched_class->task_woken(rq, p);
9a897c5a 2365#endif
0122ec5b 2366 task_rq_unlock(rq, p, &flags);
1da177e4
LT
2367}
2368
e107be36
AK
2369#ifdef CONFIG_PREEMPT_NOTIFIERS
2370
1cde2930
PZ
2371static struct static_key preempt_notifier_key = STATIC_KEY_INIT_FALSE;
2372
2ecd9d29
PZ
2373void preempt_notifier_inc(void)
2374{
2375 static_key_slow_inc(&preempt_notifier_key);
2376}
2377EXPORT_SYMBOL_GPL(preempt_notifier_inc);
2378
2379void preempt_notifier_dec(void)
2380{
2381 static_key_slow_dec(&preempt_notifier_key);
2382}
2383EXPORT_SYMBOL_GPL(preempt_notifier_dec);
2384
e107be36 2385/**
80dd99b3 2386 * preempt_notifier_register - tell me when current is being preempted & rescheduled
421cee29 2387 * @notifier: notifier struct to register
e107be36
AK
2388 */
2389void preempt_notifier_register(struct preempt_notifier *notifier)
2390{
2ecd9d29
PZ
2391 if (!static_key_false(&preempt_notifier_key))
2392 WARN(1, "registering preempt_notifier while notifiers disabled\n");
2393
e107be36
AK
2394 hlist_add_head(&notifier->link, &current->preempt_notifiers);
2395}
2396EXPORT_SYMBOL_GPL(preempt_notifier_register);
2397
2398/**
2399 * preempt_notifier_unregister - no longer interested in preemption notifications
421cee29 2400 * @notifier: notifier struct to unregister
e107be36 2401 *
d84525a8 2402 * This is *not* safe to call from within a preemption notifier.
e107be36
AK
2403 */
2404void preempt_notifier_unregister(struct preempt_notifier *notifier)
2405{
2406 hlist_del(&notifier->link);
2407}
2408EXPORT_SYMBOL_GPL(preempt_notifier_unregister);
2409
1cde2930 2410static void __fire_sched_in_preempt_notifiers(struct task_struct *curr)
e107be36
AK
2411{
2412 struct preempt_notifier *notifier;
e107be36 2413
b67bfe0d 2414 hlist_for_each_entry(notifier, &curr->preempt_notifiers, link)
e107be36
AK
2415 notifier->ops->sched_in(notifier, raw_smp_processor_id());
2416}
2417
1cde2930
PZ
2418static __always_inline void fire_sched_in_preempt_notifiers(struct task_struct *curr)
2419{
2420 if (static_key_false(&preempt_notifier_key))
2421 __fire_sched_in_preempt_notifiers(curr);
2422}
2423
e107be36 2424static void
1cde2930
PZ
2425__fire_sched_out_preempt_notifiers(struct task_struct *curr,
2426 struct task_struct *next)
e107be36
AK
2427{
2428 struct preempt_notifier *notifier;
e107be36 2429
b67bfe0d 2430 hlist_for_each_entry(notifier, &curr->preempt_notifiers, link)
e107be36
AK
2431 notifier->ops->sched_out(notifier, next);
2432}
2433
1cde2930
PZ
2434static __always_inline void
2435fire_sched_out_preempt_notifiers(struct task_struct *curr,
2436 struct task_struct *next)
2437{
2438 if (static_key_false(&preempt_notifier_key))
2439 __fire_sched_out_preempt_notifiers(curr, next);
2440}
2441
6d6bc0ad 2442#else /* !CONFIG_PREEMPT_NOTIFIERS */
e107be36 2443
1cde2930 2444static inline void fire_sched_in_preempt_notifiers(struct task_struct *curr)
e107be36
AK
2445{
2446}
2447
1cde2930 2448static inline void
e107be36
AK
2449fire_sched_out_preempt_notifiers(struct task_struct *curr,
2450 struct task_struct *next)
2451{
2452}
2453
6d6bc0ad 2454#endif /* CONFIG_PREEMPT_NOTIFIERS */
e107be36 2455
4866cde0
NP
2456/**
2457 * prepare_task_switch - prepare to switch tasks
2458 * @rq: the runqueue preparing to switch
421cee29 2459 * @prev: the current task that is being switched out
4866cde0
NP
2460 * @next: the task we are going to switch to.
2461 *
2462 * This is called with the rq lock held and interrupts off. It must
2463 * be paired with a subsequent finish_task_switch after the context
2464 * switch.
2465 *
2466 * prepare_task_switch sets up locking and calls architecture specific
2467 * hooks.
2468 */
e107be36
AK
2469static inline void
2470prepare_task_switch(struct rq *rq, struct task_struct *prev,
2471 struct task_struct *next)
4866cde0 2472{
43148951 2473 sched_info_switch(rq, prev, next);
fe4b04fa 2474 perf_event_task_sched_out(prev, next);
e107be36 2475 fire_sched_out_preempt_notifiers(prev, next);
4866cde0
NP
2476 prepare_lock_switch(rq, next);
2477 prepare_arch_switch(next);
2478}
2479
1da177e4
LT
2480/**
2481 * finish_task_switch - clean up after a task-switch
2482 * @prev: the thread we just switched away from.
2483 *
4866cde0
NP
2484 * finish_task_switch must be called after the context switch, paired
2485 * with a prepare_task_switch call before the context switch.
2486 * finish_task_switch will reconcile locking set up by prepare_task_switch,
2487 * and do any other architecture-specific cleanup actions.
1da177e4
LT
2488 *
2489 * Note that we may have delayed dropping an mm in context_switch(). If
41a2d6cf 2490 * so, we finish that here outside of the runqueue lock. (Doing it
1da177e4
LT
2491 * with the lock held can cause deadlocks; see schedule() for
2492 * details.)
dfa50b60
ON
2493 *
2494 * The context switch have flipped the stack from under us and restored the
2495 * local variables which were saved when this task called schedule() in the
2496 * past. prev == current is still correct but we need to recalculate this_rq
2497 * because prev may have moved to another CPU.
1da177e4 2498 */
dfa50b60 2499static struct rq *finish_task_switch(struct task_struct *prev)
1da177e4
LT
2500 __releases(rq->lock)
2501{
dfa50b60 2502 struct rq *rq = this_rq();
1da177e4 2503 struct mm_struct *mm = rq->prev_mm;
55a101f8 2504 long prev_state;
1da177e4 2505
609ca066
PZ
2506 /*
2507 * The previous task will have left us with a preempt_count of 2
2508 * because it left us after:
2509 *
2510 * schedule()
2511 * preempt_disable(); // 1
2512 * __schedule()
2513 * raw_spin_lock_irq(&rq->lock) // 2
2514 *
2515 * Also, see FORK_PREEMPT_COUNT.
2516 */
2517
1da177e4
LT
2518 rq->prev_mm = NULL;
2519
2520 /*
2521 * A task struct has one reference for the use as "current".
c394cc9f 2522 * If a task dies, then it sets TASK_DEAD in tsk->state and calls
55a101f8
ON
2523 * schedule one last time. The schedule call will never return, and
2524 * the scheduled task must drop that reference.
95913d97
PZ
2525 *
2526 * We must observe prev->state before clearing prev->on_cpu (in
2527 * finish_lock_switch), otherwise a concurrent wakeup can get prev
2528 * running on another CPU and we could rave with its RUNNING -> DEAD
2529 * transition, resulting in a double drop.
1da177e4 2530 */
55a101f8 2531 prev_state = prev->state;
bf9fae9f 2532 vtime_task_switch(prev);
a8d757ef 2533 perf_event_task_sched_in(prev, current);
4866cde0 2534 finish_lock_switch(rq, prev);
01f23e16 2535 finish_arch_post_lock_switch();
e8fa1362 2536
e107be36 2537 fire_sched_in_preempt_notifiers(current);
1da177e4
LT
2538 if (mm)
2539 mmdrop(mm);
c394cc9f 2540 if (unlikely(prev_state == TASK_DEAD)) {
e6c390f2
DF
2541 if (prev->sched_class->task_dead)
2542 prev->sched_class->task_dead(prev);
2543
c6fd91f0 2544 /*
2545 * Remove function-return probe instances associated with this
2546 * task and put them back on the free list.
9761eea8 2547 */
c6fd91f0 2548 kprobe_flush_task(prev);
1da177e4 2549 put_task_struct(prev);
c6fd91f0 2550 }
99e5ada9 2551
de734f89 2552 tick_nohz_task_switch();
dfa50b60 2553 return rq;
1da177e4
LT
2554}
2555
3f029d3c
GH
2556#ifdef CONFIG_SMP
2557
3f029d3c 2558/* rq->lock is NOT held, but preemption is disabled */
e3fca9e7 2559static void __balance_callback(struct rq *rq)
3f029d3c 2560{
e3fca9e7
PZ
2561 struct callback_head *head, *next;
2562 void (*func)(struct rq *rq);
2563 unsigned long flags;
3f029d3c 2564
e3fca9e7
PZ
2565 raw_spin_lock_irqsave(&rq->lock, flags);
2566 head = rq->balance_callback;
2567 rq->balance_callback = NULL;
2568 while (head) {
2569 func = (void (*)(struct rq *))head->func;
2570 next = head->next;
2571 head->next = NULL;
2572 head = next;
3f029d3c 2573
e3fca9e7 2574 func(rq);
3f029d3c 2575 }
e3fca9e7
PZ
2576 raw_spin_unlock_irqrestore(&rq->lock, flags);
2577}
2578
2579static inline void balance_callback(struct rq *rq)
2580{
2581 if (unlikely(rq->balance_callback))
2582 __balance_callback(rq);
3f029d3c
GH
2583}
2584
2585#else
da19ab51 2586
e3fca9e7 2587static inline void balance_callback(struct rq *rq)
3f029d3c 2588{
1da177e4
LT
2589}
2590
3f029d3c
GH
2591#endif
2592
1da177e4
LT
2593/**
2594 * schedule_tail - first thing a freshly forked thread must call.
2595 * @prev: the thread we just switched away from.
2596 */
722a9f92 2597asmlinkage __visible void schedule_tail(struct task_struct *prev)
1da177e4
LT
2598 __releases(rq->lock)
2599{
1a43a14a 2600 struct rq *rq;
da19ab51 2601
609ca066
PZ
2602 /*
2603 * New tasks start with FORK_PREEMPT_COUNT, see there and
2604 * finish_task_switch() for details.
2605 *
2606 * finish_task_switch() will drop rq->lock() and lower preempt_count
2607 * and the preempt_enable() will end up enabling preemption (on
2608 * PREEMPT_COUNT kernels).
2609 */
2610
dfa50b60 2611 rq = finish_task_switch(prev);
e3fca9e7 2612 balance_callback(rq);
1a43a14a 2613 preempt_enable();
70b97a7f 2614
1da177e4 2615 if (current->set_child_tid)
b488893a 2616 put_user(task_pid_vnr(current), current->set_child_tid);
1da177e4
LT
2617}
2618
2619/*
dfa50b60 2620 * context_switch - switch to the new MM and the new thread's register state.
1da177e4 2621 */
dfa50b60 2622static inline struct rq *
70b97a7f 2623context_switch(struct rq *rq, struct task_struct *prev,
36c8b586 2624 struct task_struct *next)
1da177e4 2625{
dd41f596 2626 struct mm_struct *mm, *oldmm;
1da177e4 2627
e107be36 2628 prepare_task_switch(rq, prev, next);
fe4b04fa 2629
dd41f596
IM
2630 mm = next->mm;
2631 oldmm = prev->active_mm;
9226d125
ZA
2632 /*
2633 * For paravirt, this is coupled with an exit in switch_to to
2634 * combine the page table reload and the switch backend into
2635 * one hypercall.
2636 */
224101ed 2637 arch_start_context_switch(prev);
9226d125 2638
31915ab4 2639 if (!mm) {
1da177e4
LT
2640 next->active_mm = oldmm;
2641 atomic_inc(&oldmm->mm_count);
2642 enter_lazy_tlb(oldmm, next);
2643 } else
2644 switch_mm(oldmm, mm, next);
2645
31915ab4 2646 if (!prev->mm) {
1da177e4 2647 prev->active_mm = NULL;
1da177e4
LT
2648 rq->prev_mm = oldmm;
2649 }
3a5f5e48
IM
2650 /*
2651 * Since the runqueue lock will be released by the next
2652 * task (which is an invalid locking op but in the case
2653 * of the scheduler it's an obvious special-case), so we
2654 * do an early lockdep release here:
2655 */
cbce1a68 2656 lockdep_unpin_lock(&rq->lock);
8a25d5de 2657 spin_release(&rq->lock.dep_map, 1, _THIS_IP_);
1da177e4
LT
2658
2659 /* Here we just switch the register state and the stack. */
2660 switch_to(prev, next, prev);
dd41f596 2661 barrier();
dfa50b60
ON
2662
2663 return finish_task_switch(prev);
1da177e4
LT
2664}
2665
2666/*
1c3e8264 2667 * nr_running and nr_context_switches:
1da177e4
LT
2668 *
2669 * externally visible scheduler statistics: current number of runnable
1c3e8264 2670 * threads, total number of context switches performed since bootup.
1da177e4
LT
2671 */
2672unsigned long nr_running(void)
2673{
2674 unsigned long i, sum = 0;
2675
2676 for_each_online_cpu(i)
2677 sum += cpu_rq(i)->nr_running;
2678
2679 return sum;
f711f609 2680}
1da177e4 2681
2ee507c4
TC
2682/*
2683 * Check if only the current task is running on the cpu.
00cc1633
DD
2684 *
2685 * Caution: this function does not check that the caller has disabled
2686 * preemption, thus the result might have a time-of-check-to-time-of-use
2687 * race. The caller is responsible to use it correctly, for example:
2688 *
2689 * - from a non-preemptable section (of course)
2690 *
2691 * - from a thread that is bound to a single CPU
2692 *
2693 * - in a loop with very short iterations (e.g. a polling loop)
2ee507c4
TC
2694 */
2695bool single_task_running(void)
2696{
00cc1633 2697 return raw_rq()->nr_running == 1;
2ee507c4
TC
2698}
2699EXPORT_SYMBOL(single_task_running);
2700
1da177e4 2701unsigned long long nr_context_switches(void)
46cb4b7c 2702{
cc94abfc
SR
2703 int i;
2704 unsigned long long sum = 0;
46cb4b7c 2705
0a945022 2706 for_each_possible_cpu(i)
1da177e4 2707 sum += cpu_rq(i)->nr_switches;
46cb4b7c 2708
1da177e4
LT
2709 return sum;
2710}
483b4ee6 2711
1da177e4
LT
2712unsigned long nr_iowait(void)
2713{
2714 unsigned long i, sum = 0;
483b4ee6 2715
0a945022 2716 for_each_possible_cpu(i)
1da177e4 2717 sum += atomic_read(&cpu_rq(i)->nr_iowait);
46cb4b7c 2718
1da177e4
LT
2719 return sum;
2720}
483b4ee6 2721
8c215bd3 2722unsigned long nr_iowait_cpu(int cpu)
69d25870 2723{
8c215bd3 2724 struct rq *this = cpu_rq(cpu);
69d25870
AV
2725 return atomic_read(&this->nr_iowait);
2726}
46cb4b7c 2727
372ba8cb
MG
2728void get_iowait_load(unsigned long *nr_waiters, unsigned long *load)
2729{
3289bdb4
PZ
2730 struct rq *rq = this_rq();
2731 *nr_waiters = atomic_read(&rq->nr_iowait);
2732 *load = rq->load.weight;
372ba8cb
MG
2733}
2734
dd41f596 2735#ifdef CONFIG_SMP
8a0be9ef 2736
46cb4b7c 2737/*
38022906
PZ
2738 * sched_exec - execve() is a valuable balancing opportunity, because at
2739 * this point the task has the smallest effective memory and cache footprint.
46cb4b7c 2740 */
38022906 2741void sched_exec(void)
46cb4b7c 2742{
38022906 2743 struct task_struct *p = current;
1da177e4 2744 unsigned long flags;
0017d735 2745 int dest_cpu;
46cb4b7c 2746
8f42ced9 2747 raw_spin_lock_irqsave(&p->pi_lock, flags);
ac66f547 2748 dest_cpu = p->sched_class->select_task_rq(p, task_cpu(p), SD_BALANCE_EXEC, 0);
0017d735
PZ
2749 if (dest_cpu == smp_processor_id())
2750 goto unlock;
38022906 2751
8f42ced9 2752 if (likely(cpu_active(dest_cpu))) {
969c7921 2753 struct migration_arg arg = { p, dest_cpu };
46cb4b7c 2754
8f42ced9
PZ
2755 raw_spin_unlock_irqrestore(&p->pi_lock, flags);
2756 stop_one_cpu(task_cpu(p), migration_cpu_stop, &arg);
1da177e4
LT
2757 return;
2758 }
0017d735 2759unlock:
8f42ced9 2760 raw_spin_unlock_irqrestore(&p->pi_lock, flags);
1da177e4 2761}
dd41f596 2762
1da177e4
LT
2763#endif
2764
1da177e4 2765DEFINE_PER_CPU(struct kernel_stat, kstat);
3292beb3 2766DEFINE_PER_CPU(struct kernel_cpustat, kernel_cpustat);
1da177e4
LT
2767
2768EXPORT_PER_CPU_SYMBOL(kstat);
3292beb3 2769EXPORT_PER_CPU_SYMBOL(kernel_cpustat);
1da177e4 2770
c5f8d995
HS
2771/*
2772 * Return accounted runtime for the task.
2773 * In case the task is currently running, return the runtime plus current's
2774 * pending runtime that have not been accounted yet.
2775 */
2776unsigned long long task_sched_runtime(struct task_struct *p)
2777{
2778 unsigned long flags;
2779 struct rq *rq;
6e998916 2780 u64 ns;
c5f8d995 2781
911b2898
PZ
2782#if defined(CONFIG_64BIT) && defined(CONFIG_SMP)
2783 /*
2784 * 64-bit doesn't need locks to atomically read a 64bit value.
2785 * So we have a optimization chance when the task's delta_exec is 0.
2786 * Reading ->on_cpu is racy, but this is ok.
2787 *
2788 * If we race with it leaving cpu, we'll take a lock. So we're correct.
2789 * If we race with it entering cpu, unaccounted time is 0. This is
2790 * indistinguishable from the read occurring a few cycles earlier.
4036ac15
MG
2791 * If we see ->on_cpu without ->on_rq, the task is leaving, and has
2792 * been accounted, so we're correct here as well.
911b2898 2793 */
da0c1e65 2794 if (!p->on_cpu || !task_on_rq_queued(p))
911b2898
PZ
2795 return p->se.sum_exec_runtime;
2796#endif
2797
c5f8d995 2798 rq = task_rq_lock(p, &flags);
6e998916
SG
2799 /*
2800 * Must be ->curr _and_ ->on_rq. If dequeued, we would
2801 * project cycles that may never be accounted to this
2802 * thread, breaking clock_gettime().
2803 */
2804 if (task_current(rq, p) && task_on_rq_queued(p)) {
2805 update_rq_clock(rq);
2806 p->sched_class->update_curr(rq);
2807 }
2808 ns = p->se.sum_exec_runtime;
0122ec5b 2809 task_rq_unlock(rq, p, &flags);
c5f8d995
HS
2810
2811 return ns;
2812}
48f24c4d 2813
7835b98b
CL
2814/*
2815 * This function gets called by the timer code, with HZ frequency.
2816 * We call it with interrupts disabled.
7835b98b
CL
2817 */
2818void scheduler_tick(void)
2819{
7835b98b
CL
2820 int cpu = smp_processor_id();
2821 struct rq *rq = cpu_rq(cpu);
dd41f596 2822 struct task_struct *curr = rq->curr;
3e51f33f
PZ
2823
2824 sched_clock_tick();
dd41f596 2825
05fa785c 2826 raw_spin_lock(&rq->lock);
3e51f33f 2827 update_rq_clock(rq);
fa85ae24 2828 curr->sched_class->task_tick(rq, curr, 0);
83dfd523 2829 update_cpu_load_active(rq);
3289bdb4 2830 calc_global_load_tick(rq);
05fa785c 2831 raw_spin_unlock(&rq->lock);
7835b98b 2832
e9d2b064 2833 perf_event_task_tick();
e220d2dc 2834
e418e1c2 2835#ifdef CONFIG_SMP
6eb57e0d 2836 rq->idle_balance = idle_cpu(cpu);
7caff66f 2837 trigger_load_balance(rq);
e418e1c2 2838#endif
265f22a9 2839 rq_last_tick_reset(rq);
1da177e4
LT
2840}
2841
265f22a9
FW
2842#ifdef CONFIG_NO_HZ_FULL
2843/**
2844 * scheduler_tick_max_deferment
2845 *
2846 * Keep at least one tick per second when a single
2847 * active task is running because the scheduler doesn't
2848 * yet completely support full dynticks environment.
2849 *
2850 * This makes sure that uptime, CFS vruntime, load
2851 * balancing, etc... continue to move forward, even
2852 * with a very low granularity.
e69f6186
YB
2853 *
2854 * Return: Maximum deferment in nanoseconds.
265f22a9
FW
2855 */
2856u64 scheduler_tick_max_deferment(void)
2857{
2858 struct rq *rq = this_rq();
316c1608 2859 unsigned long next, now = READ_ONCE(jiffies);
265f22a9
FW
2860
2861 next = rq->last_sched_tick + HZ;
2862
2863 if (time_before_eq(next, now))
2864 return 0;
2865
8fe8ff09 2866 return jiffies_to_nsecs(next - now);
1da177e4 2867}
265f22a9 2868#endif
1da177e4 2869
132380a0 2870notrace unsigned long get_parent_ip(unsigned long addr)
6cd8a4bb
SR
2871{
2872 if (in_lock_functions(addr)) {
2873 addr = CALLER_ADDR2;
2874 if (in_lock_functions(addr))
2875 addr = CALLER_ADDR3;
2876 }
2877 return addr;
2878}
1da177e4 2879
7e49fcce
SR
2880#if defined(CONFIG_PREEMPT) && (defined(CONFIG_DEBUG_PREEMPT) || \
2881 defined(CONFIG_PREEMPT_TRACER))
2882
edafe3a5 2883void preempt_count_add(int val)
1da177e4 2884{
6cd8a4bb 2885#ifdef CONFIG_DEBUG_PREEMPT
1da177e4
LT
2886 /*
2887 * Underflow?
2888 */
9a11b49a
IM
2889 if (DEBUG_LOCKS_WARN_ON((preempt_count() < 0)))
2890 return;
6cd8a4bb 2891#endif
bdb43806 2892 __preempt_count_add(val);
6cd8a4bb 2893#ifdef CONFIG_DEBUG_PREEMPT
1da177e4
LT
2894 /*
2895 * Spinlock count overflowing soon?
2896 */
33859f7f
MOS
2897 DEBUG_LOCKS_WARN_ON((preempt_count() & PREEMPT_MASK) >=
2898 PREEMPT_MASK - 10);
6cd8a4bb 2899#endif
8f47b187
TG
2900 if (preempt_count() == val) {
2901 unsigned long ip = get_parent_ip(CALLER_ADDR1);
2902#ifdef CONFIG_DEBUG_PREEMPT
2903 current->preempt_disable_ip = ip;
2904#endif
2905 trace_preempt_off(CALLER_ADDR0, ip);
2906 }
1da177e4 2907}
bdb43806 2908EXPORT_SYMBOL(preempt_count_add);
edafe3a5 2909NOKPROBE_SYMBOL(preempt_count_add);
1da177e4 2910
edafe3a5 2911void preempt_count_sub(int val)
1da177e4 2912{
6cd8a4bb 2913#ifdef CONFIG_DEBUG_PREEMPT
1da177e4
LT
2914 /*
2915 * Underflow?
2916 */
01e3eb82 2917 if (DEBUG_LOCKS_WARN_ON(val > preempt_count()))
9a11b49a 2918 return;
1da177e4
LT
2919 /*
2920 * Is the spinlock portion underflowing?
2921 */
9a11b49a
IM
2922 if (DEBUG_LOCKS_WARN_ON((val < PREEMPT_MASK) &&
2923 !(preempt_count() & PREEMPT_MASK)))
2924 return;
6cd8a4bb 2925#endif
9a11b49a 2926
6cd8a4bb
SR
2927 if (preempt_count() == val)
2928 trace_preempt_on(CALLER_ADDR0, get_parent_ip(CALLER_ADDR1));
bdb43806 2929 __preempt_count_sub(val);
1da177e4 2930}
bdb43806 2931EXPORT_SYMBOL(preempt_count_sub);
edafe3a5 2932NOKPROBE_SYMBOL(preempt_count_sub);
1da177e4
LT
2933
2934#endif
2935
2936/*
dd41f596 2937 * Print scheduling while atomic bug:
1da177e4 2938 */
dd41f596 2939static noinline void __schedule_bug(struct task_struct *prev)
1da177e4 2940{
664dfa65
DJ
2941 if (oops_in_progress)
2942 return;
2943
3df0fc5b
PZ
2944 printk(KERN_ERR "BUG: scheduling while atomic: %s/%d/0x%08x\n",
2945 prev->comm, prev->pid, preempt_count());
838225b4 2946
dd41f596 2947 debug_show_held_locks(prev);
e21f5b15 2948 print_modules();
dd41f596
IM
2949 if (irqs_disabled())
2950 print_irqtrace_events(prev);
8f47b187
TG
2951#ifdef CONFIG_DEBUG_PREEMPT
2952 if (in_atomic_preempt_off()) {
2953 pr_err("Preemption disabled at:");
2954 print_ip_sym(current->preempt_disable_ip);
2955 pr_cont("\n");
2956 }
2957#endif
6135fc1e 2958 dump_stack();
373d4d09 2959 add_taint(TAINT_WARN, LOCKDEP_STILL_OK);
dd41f596 2960}
1da177e4 2961
dd41f596
IM
2962/*
2963 * Various schedule()-time debugging checks and statistics:
2964 */
2965static inline void schedule_debug(struct task_struct *prev)
2966{
0d9e2632
AT
2967#ifdef CONFIG_SCHED_STACK_END_CHECK
2968 BUG_ON(unlikely(task_stack_end_corrupted(prev)));
2969#endif
b99def8b
PZ
2970
2971 if (unlikely(in_atomic_preempt_off()))
dd41f596 2972 __schedule_bug(prev);
b3fbab05 2973 rcu_sleep_check();
dd41f596 2974
1da177e4
LT
2975 profile_hit(SCHED_PROFILING, __builtin_return_address(0));
2976
2d72376b 2977 schedstat_inc(this_rq(), sched_count);
dd41f596
IM
2978}
2979
2980/*
2981 * Pick up the highest-prio task:
2982 */
2983static inline struct task_struct *
606dba2e 2984pick_next_task(struct rq *rq, struct task_struct *prev)
dd41f596 2985{
37e117c0 2986 const struct sched_class *class = &fair_sched_class;
dd41f596 2987 struct task_struct *p;
1da177e4
LT
2988
2989 /*
dd41f596
IM
2990 * Optimization: we know that if all tasks are in
2991 * the fair class we can call that function directly:
1da177e4 2992 */
37e117c0 2993 if (likely(prev->sched_class == class &&
38033c37 2994 rq->nr_running == rq->cfs.h_nr_running)) {
606dba2e 2995 p = fair_sched_class.pick_next_task(rq, prev);
6ccdc84b
PZ
2996 if (unlikely(p == RETRY_TASK))
2997 goto again;
2998
2999 /* assumes fair_sched_class->next == idle_sched_class */
3000 if (unlikely(!p))
3001 p = idle_sched_class.pick_next_task(rq, prev);
3002
3003 return p;
1da177e4
LT
3004 }
3005
37e117c0 3006again:
34f971f6 3007 for_each_class(class) {
606dba2e 3008 p = class->pick_next_task(rq, prev);
37e117c0
PZ
3009 if (p) {
3010 if (unlikely(p == RETRY_TASK))
3011 goto again;
dd41f596 3012 return p;
37e117c0 3013 }
dd41f596 3014 }
34f971f6
PZ
3015
3016 BUG(); /* the idle class will always have a runnable task */
dd41f596 3017}
1da177e4 3018
dd41f596 3019/*
c259e01a 3020 * __schedule() is the main scheduler function.
edde96ea
PE
3021 *
3022 * The main means of driving the scheduler and thus entering this function are:
3023 *
3024 * 1. Explicit blocking: mutex, semaphore, waitqueue, etc.
3025 *
3026 * 2. TIF_NEED_RESCHED flag is checked on interrupt and userspace return
3027 * paths. For example, see arch/x86/entry_64.S.
3028 *
3029 * To drive preemption between tasks, the scheduler sets the flag in timer
3030 * interrupt handler scheduler_tick().
3031 *
3032 * 3. Wakeups don't really cause entry into schedule(). They add a
3033 * task to the run-queue and that's it.
3034 *
3035 * Now, if the new task added to the run-queue preempts the current
3036 * task, then the wakeup sets TIF_NEED_RESCHED and schedule() gets
3037 * called on the nearest possible occasion:
3038 *
3039 * - If the kernel is preemptible (CONFIG_PREEMPT=y):
3040 *
3041 * - in syscall or exception context, at the next outmost
3042 * preempt_enable(). (this might be as soon as the wake_up()'s
3043 * spin_unlock()!)
3044 *
3045 * - in IRQ context, return from interrupt-handler to
3046 * preemptible context
3047 *
3048 * - If the kernel is not preemptible (CONFIG_PREEMPT is not set)
3049 * then at the next:
3050 *
3051 * - cond_resched() call
3052 * - explicit schedule() call
3053 * - return from syscall or exception to user-space
3054 * - return from interrupt-handler to user-space
bfd9b2b5 3055 *
b30f0e3f 3056 * WARNING: must be called with preemption disabled!
dd41f596 3057 */
fc13aeba 3058static void __sched __schedule(bool preempt)
dd41f596
IM
3059{
3060 struct task_struct *prev, *next;
67ca7bde 3061 unsigned long *switch_count;
dd41f596 3062 struct rq *rq;
31656519 3063 int cpu;
dd41f596 3064
dd41f596
IM
3065 cpu = smp_processor_id();
3066 rq = cpu_rq(cpu);
38200cf2 3067 rcu_note_context_switch();
dd41f596 3068 prev = rq->curr;
dd41f596 3069
b99def8b
PZ
3070 /*
3071 * do_exit() calls schedule() with preemption disabled as an exception;
3072 * however we must fix that up, otherwise the next task will see an
3073 * inconsistent (higher) preempt count.
3074 *
3075 * It also avoids the below schedule_debug() test from complaining
3076 * about this.
3077 */
3078 if (unlikely(prev->state == TASK_DEAD))
3079 preempt_enable_no_resched_notrace();
3080
dd41f596 3081 schedule_debug(prev);
1da177e4 3082
31656519 3083 if (sched_feat(HRTICK))
f333fdc9 3084 hrtick_clear(rq);
8f4d37ec 3085
e0acd0a6
ON
3086 /*
3087 * Make sure that signal_pending_state()->signal_pending() below
3088 * can't be reordered with __set_current_state(TASK_INTERRUPTIBLE)
3089 * done by the caller to avoid the race with signal_wake_up().
3090 */
3091 smp_mb__before_spinlock();
05fa785c 3092 raw_spin_lock_irq(&rq->lock);
cbce1a68 3093 lockdep_pin_lock(&rq->lock);
1da177e4 3094
9edfbfed
PZ
3095 rq->clock_skip_update <<= 1; /* promote REQ to ACT */
3096
246d86b5 3097 switch_count = &prev->nivcsw;
fc13aeba 3098 if (!preempt && prev->state) {
21aa9af0 3099 if (unlikely(signal_pending_state(prev->state, prev))) {
1da177e4 3100 prev->state = TASK_RUNNING;
21aa9af0 3101 } else {
2acca55e
PZ
3102 deactivate_task(rq, prev, DEQUEUE_SLEEP);
3103 prev->on_rq = 0;
3104
21aa9af0 3105 /*
2acca55e
PZ
3106 * If a worker went to sleep, notify and ask workqueue
3107 * whether it wants to wake up a task to maintain
3108 * concurrency.
21aa9af0
TH
3109 */
3110 if (prev->flags & PF_WQ_WORKER) {
3111 struct task_struct *to_wakeup;
3112
3113 to_wakeup = wq_worker_sleeping(prev, cpu);
3114 if (to_wakeup)
3115 try_to_wake_up_local(to_wakeup);
3116 }
21aa9af0 3117 }
dd41f596 3118 switch_count = &prev->nvcsw;
1da177e4
LT
3119 }
3120
9edfbfed 3121 if (task_on_rq_queued(prev))
606dba2e
PZ
3122 update_rq_clock(rq);
3123
3124 next = pick_next_task(rq, prev);
f26f9aff 3125 clear_tsk_need_resched(prev);
f27dde8d 3126 clear_preempt_need_resched();
9edfbfed 3127 rq->clock_skip_update = 0;
1da177e4 3128
1da177e4 3129 if (likely(prev != next)) {
1da177e4
LT
3130 rq->nr_switches++;
3131 rq->curr = next;
3132 ++*switch_count;
3133
c73464b1 3134 trace_sched_switch(preempt, prev, next);
dfa50b60
ON
3135 rq = context_switch(rq, prev, next); /* unlocks the rq */
3136 cpu = cpu_of(rq);
cbce1a68
PZ
3137 } else {
3138 lockdep_unpin_lock(&rq->lock);
05fa785c 3139 raw_spin_unlock_irq(&rq->lock);
cbce1a68 3140 }
1da177e4 3141
e3fca9e7 3142 balance_callback(rq);
1da177e4 3143}
c259e01a 3144
9c40cef2
TG
3145static inline void sched_submit_work(struct task_struct *tsk)
3146{
3c7d5184 3147 if (!tsk->state || tsk_is_pi_blocked(tsk))
9c40cef2
TG
3148 return;
3149 /*
3150 * If we are going to sleep and we have plugged IO queued,
3151 * make sure to submit it to avoid deadlocks.
3152 */
3153 if (blk_needs_flush_plug(tsk))
3154 blk_schedule_flush_plug(tsk);
3155}
3156
722a9f92 3157asmlinkage __visible void __sched schedule(void)
c259e01a 3158{
9c40cef2
TG
3159 struct task_struct *tsk = current;
3160
3161 sched_submit_work(tsk);
bfd9b2b5 3162 do {
b30f0e3f 3163 preempt_disable();
fc13aeba 3164 __schedule(false);
b30f0e3f 3165 sched_preempt_enable_no_resched();
bfd9b2b5 3166 } while (need_resched());
c259e01a 3167}
1da177e4
LT
3168EXPORT_SYMBOL(schedule);
3169
91d1aa43 3170#ifdef CONFIG_CONTEXT_TRACKING
722a9f92 3171asmlinkage __visible void __sched schedule_user(void)
20ab65e3
FW
3172{
3173 /*
3174 * If we come here after a random call to set_need_resched(),
3175 * or we have been woken up remotely but the IPI has not yet arrived,
3176 * we haven't yet exited the RCU idle mode. Do it here manually until
3177 * we find a better solution.
7cc78f8f
AL
3178 *
3179 * NB: There are buggy callers of this function. Ideally we
c467ea76 3180 * should warn if prev_state != CONTEXT_USER, but that will trigger
7cc78f8f 3181 * too frequently to make sense yet.
20ab65e3 3182 */
7cc78f8f 3183 enum ctx_state prev_state = exception_enter();
20ab65e3 3184 schedule();
7cc78f8f 3185 exception_exit(prev_state);
20ab65e3
FW
3186}
3187#endif
3188
c5491ea7
TG
3189/**
3190 * schedule_preempt_disabled - called with preemption disabled
3191 *
3192 * Returns with preemption disabled. Note: preempt_count must be 1
3193 */
3194void __sched schedule_preempt_disabled(void)
3195{
ba74c144 3196 sched_preempt_enable_no_resched();
c5491ea7
TG
3197 schedule();
3198 preempt_disable();
3199}
3200
06b1f808 3201static void __sched notrace preempt_schedule_common(void)
a18b5d01
FW
3202{
3203 do {
b30f0e3f 3204 preempt_active_enter();
fc13aeba 3205 __schedule(true);
b30f0e3f 3206 preempt_active_exit();
a18b5d01
FW
3207
3208 /*
3209 * Check again in case we missed a preemption opportunity
3210 * between schedule and now.
3211 */
a18b5d01
FW
3212 } while (need_resched());
3213}
3214
1da177e4
LT
3215#ifdef CONFIG_PREEMPT
3216/*
2ed6e34f 3217 * this is the entry point to schedule() from in-kernel preemption
41a2d6cf 3218 * off of preempt_enable. Kernel preemptions off return from interrupt
1da177e4
LT
3219 * occur there and call schedule directly.
3220 */
722a9f92 3221asmlinkage __visible void __sched notrace preempt_schedule(void)
1da177e4 3222{
1da177e4
LT
3223 /*
3224 * If there is a non-zero preempt_count or interrupts are disabled,
41a2d6cf 3225 * we do not want to preempt the current task. Just return..
1da177e4 3226 */
fbb00b56 3227 if (likely(!preemptible()))
1da177e4
LT
3228 return;
3229
a18b5d01 3230 preempt_schedule_common();
1da177e4 3231}
376e2424 3232NOKPROBE_SYMBOL(preempt_schedule);
1da177e4 3233EXPORT_SYMBOL(preempt_schedule);
009f60e2 3234
009f60e2 3235/**
4eaca0a8 3236 * preempt_schedule_notrace - preempt_schedule called by tracing
009f60e2
ON
3237 *
3238 * The tracing infrastructure uses preempt_enable_notrace to prevent
3239 * recursion and tracing preempt enabling caused by the tracing
3240 * infrastructure itself. But as tracing can happen in areas coming
3241 * from userspace or just about to enter userspace, a preempt enable
3242 * can occur before user_exit() is called. This will cause the scheduler
3243 * to be called when the system is still in usermode.
3244 *
3245 * To prevent this, the preempt_enable_notrace will use this function
3246 * instead of preempt_schedule() to exit user context if needed before
3247 * calling the scheduler.
3248 */
4eaca0a8 3249asmlinkage __visible void __sched notrace preempt_schedule_notrace(void)
009f60e2
ON
3250{
3251 enum ctx_state prev_ctx;
3252
3253 if (likely(!preemptible()))
3254 return;
3255
3256 do {
be690035
FW
3257 /*
3258 * Use raw __prempt_count() ops that don't call function.
3259 * We can't call functions before disabling preemption which
3260 * disarm preemption tracing recursions.
3261 */
3262 __preempt_count_add(PREEMPT_ACTIVE + PREEMPT_DISABLE_OFFSET);
3263 barrier();
009f60e2
ON
3264 /*
3265 * Needs preempt disabled in case user_exit() is traced
3266 * and the tracer calls preempt_enable_notrace() causing
3267 * an infinite recursion.
3268 */
3269 prev_ctx = exception_enter();
fc13aeba 3270 __schedule(true);
009f60e2
ON
3271 exception_exit(prev_ctx);
3272
009f60e2 3273 barrier();
be690035 3274 __preempt_count_sub(PREEMPT_ACTIVE + PREEMPT_DISABLE_OFFSET);
009f60e2
ON
3275 } while (need_resched());
3276}
4eaca0a8 3277EXPORT_SYMBOL_GPL(preempt_schedule_notrace);
009f60e2 3278
32e475d7 3279#endif /* CONFIG_PREEMPT */
1da177e4
LT
3280
3281/*
2ed6e34f 3282 * this is the entry point to schedule() from kernel preemption
1da177e4
LT
3283 * off of irq context.
3284 * Note, that this is called and return with irqs disabled. This will
3285 * protect us against recursive calling from irq.
3286 */
722a9f92 3287asmlinkage __visible void __sched preempt_schedule_irq(void)
1da177e4 3288{
b22366cd 3289 enum ctx_state prev_state;
6478d880 3290
2ed6e34f 3291 /* Catch callers which need to be fixed */
f27dde8d 3292 BUG_ON(preempt_count() || !irqs_disabled());
1da177e4 3293
b22366cd
FW
3294 prev_state = exception_enter();
3295
3a5c359a 3296 do {
b30f0e3f 3297 preempt_active_enter();
3a5c359a 3298 local_irq_enable();
fc13aeba 3299 __schedule(true);
3a5c359a 3300 local_irq_disable();
b30f0e3f 3301 preempt_active_exit();
5ed0cec0 3302 } while (need_resched());
b22366cd
FW
3303
3304 exception_exit(prev_state);
1da177e4
LT
3305}
3306
63859d4f 3307int default_wake_function(wait_queue_t *curr, unsigned mode, int wake_flags,
95cdf3b7 3308 void *key)
1da177e4 3309{
63859d4f 3310 return try_to_wake_up(curr->private, mode, wake_flags);
1da177e4 3311}
1da177e4
LT
3312EXPORT_SYMBOL(default_wake_function);
3313
b29739f9
IM
3314#ifdef CONFIG_RT_MUTEXES
3315
3316/*
3317 * rt_mutex_setprio - set the current priority of a task
3318 * @p: task
3319 * @prio: prio value (kernel-internal form)
3320 *
3321 * This function changes the 'effective' priority of a task. It does
3322 * not touch ->normal_prio like __setscheduler().
3323 *
c365c292
TG
3324 * Used by the rt_mutex code to implement priority inheritance
3325 * logic. Call site only calls if the priority of the task changed.
b29739f9 3326 */
36c8b586 3327void rt_mutex_setprio(struct task_struct *p, int prio)
b29739f9 3328{
da0c1e65 3329 int oldprio, queued, running, enqueue_flag = 0;
70b97a7f 3330 struct rq *rq;
83ab0aa0 3331 const struct sched_class *prev_class;
b29739f9 3332
aab03e05 3333 BUG_ON(prio > MAX_PRIO);
b29739f9 3334
0122ec5b 3335 rq = __task_rq_lock(p);
b29739f9 3336
1c4dd99b
TG
3337 /*
3338 * Idle task boosting is a nono in general. There is one
3339 * exception, when PREEMPT_RT and NOHZ is active:
3340 *
3341 * The idle task calls get_next_timer_interrupt() and holds
3342 * the timer wheel base->lock on the CPU and another CPU wants
3343 * to access the timer (probably to cancel it). We can safely
3344 * ignore the boosting request, as the idle CPU runs this code
3345 * with interrupts disabled and will complete the lock
3346 * protected section without being interrupted. So there is no
3347 * real need to boost.
3348 */
3349 if (unlikely(p == rq->idle)) {
3350 WARN_ON(p != rq->curr);
3351 WARN_ON(p->pi_blocked_on);
3352 goto out_unlock;
3353 }
3354
a8027073 3355 trace_sched_pi_setprio(p, prio);
d5f9f942 3356 oldprio = p->prio;
83ab0aa0 3357 prev_class = p->sched_class;
da0c1e65 3358 queued = task_on_rq_queued(p);
051a1d1a 3359 running = task_current(rq, p);
da0c1e65 3360 if (queued)
69be72c1 3361 dequeue_task(rq, p, 0);
0e1f3483 3362 if (running)
f3cd1c4e 3363 put_prev_task(rq, p);
dd41f596 3364
2d3d891d
DF
3365 /*
3366 * Boosting condition are:
3367 * 1. -rt task is running and holds mutex A
3368 * --> -dl task blocks on mutex A
3369 *
3370 * 2. -dl task is running and holds mutex A
3371 * --> -dl task blocks on mutex A and could preempt the
3372 * running task
3373 */
3374 if (dl_prio(prio)) {
466af29b
ON
3375 struct task_struct *pi_task = rt_mutex_get_top_task(p);
3376 if (!dl_prio(p->normal_prio) ||
3377 (pi_task && dl_entity_preempt(&pi_task->dl, &p->dl))) {
2d3d891d 3378 p->dl.dl_boosted = 1;
2d3d891d
DF
3379 enqueue_flag = ENQUEUE_REPLENISH;
3380 } else
3381 p->dl.dl_boosted = 0;
aab03e05 3382 p->sched_class = &dl_sched_class;
2d3d891d
DF
3383 } else if (rt_prio(prio)) {
3384 if (dl_prio(oldprio))
3385 p->dl.dl_boosted = 0;
3386 if (oldprio < prio)
3387 enqueue_flag = ENQUEUE_HEAD;
dd41f596 3388 p->sched_class = &rt_sched_class;
2d3d891d
DF
3389 } else {
3390 if (dl_prio(oldprio))
3391 p->dl.dl_boosted = 0;
746db944
BS
3392 if (rt_prio(oldprio))
3393 p->rt.timeout = 0;
dd41f596 3394 p->sched_class = &fair_sched_class;
2d3d891d 3395 }
dd41f596 3396
b29739f9
IM
3397 p->prio = prio;
3398
0e1f3483
HS
3399 if (running)
3400 p->sched_class->set_curr_task(rq);
da0c1e65 3401 if (queued)
2d3d891d 3402 enqueue_task(rq, p, enqueue_flag);
cb469845 3403
da7a735e 3404 check_class_changed(rq, p, prev_class, oldprio);
1c4dd99b 3405out_unlock:
4c9a4bc8 3406 preempt_disable(); /* avoid rq from going away on us */
0122ec5b 3407 __task_rq_unlock(rq);
4c9a4bc8
PZ
3408
3409 balance_callback(rq);
3410 preempt_enable();
b29739f9 3411}
b29739f9 3412#endif
d50dde5a 3413
36c8b586 3414void set_user_nice(struct task_struct *p, long nice)
1da177e4 3415{
da0c1e65 3416 int old_prio, delta, queued;
1da177e4 3417 unsigned long flags;
70b97a7f 3418 struct rq *rq;
1da177e4 3419
75e45d51 3420 if (task_nice(p) == nice || nice < MIN_NICE || nice > MAX_NICE)
1da177e4
LT
3421 return;
3422 /*
3423 * We have to be careful, if called from sys_setpriority(),
3424 * the task might be in the middle of scheduling on another CPU.
3425 */
3426 rq = task_rq_lock(p, &flags);
3427 /*
3428 * The RT priorities are set via sched_setscheduler(), but we still
3429 * allow the 'normal' nice value to be set - but as expected
3430 * it wont have any effect on scheduling until the task is
aab03e05 3431 * SCHED_DEADLINE, SCHED_FIFO or SCHED_RR:
1da177e4 3432 */
aab03e05 3433 if (task_has_dl_policy(p) || task_has_rt_policy(p)) {
1da177e4
LT
3434 p->static_prio = NICE_TO_PRIO(nice);
3435 goto out_unlock;
3436 }
da0c1e65
KT
3437 queued = task_on_rq_queued(p);
3438 if (queued)
69be72c1 3439 dequeue_task(rq, p, 0);
1da177e4 3440
1da177e4 3441 p->static_prio = NICE_TO_PRIO(nice);
2dd73a4f 3442 set_load_weight(p);
b29739f9
IM
3443 old_prio = p->prio;
3444 p->prio = effective_prio(p);
3445 delta = p->prio - old_prio;
1da177e4 3446
da0c1e65 3447 if (queued) {
371fd7e7 3448 enqueue_task(rq, p, 0);
1da177e4 3449 /*
d5f9f942
AM
3450 * If the task increased its priority or is running and
3451 * lowered its priority, then reschedule its CPU:
1da177e4 3452 */
d5f9f942 3453 if (delta < 0 || (delta > 0 && task_running(rq, p)))
8875125e 3454 resched_curr(rq);
1da177e4
LT
3455 }
3456out_unlock:
0122ec5b 3457 task_rq_unlock(rq, p, &flags);
1da177e4 3458}
1da177e4
LT
3459EXPORT_SYMBOL(set_user_nice);
3460
e43379f1
MM
3461/*
3462 * can_nice - check if a task can reduce its nice value
3463 * @p: task
3464 * @nice: nice value
3465 */
36c8b586 3466int can_nice(const struct task_struct *p, const int nice)
e43379f1 3467{
024f4747 3468 /* convert nice value [19,-20] to rlimit style value [1,40] */
7aa2c016 3469 int nice_rlim = nice_to_rlimit(nice);
48f24c4d 3470
78d7d407 3471 return (nice_rlim <= task_rlimit(p, RLIMIT_NICE) ||
e43379f1
MM
3472 capable(CAP_SYS_NICE));
3473}
3474
1da177e4
LT
3475#ifdef __ARCH_WANT_SYS_NICE
3476
3477/*
3478 * sys_nice - change the priority of the current process.
3479 * @increment: priority increment
3480 *
3481 * sys_setpriority is a more generic, but much slower function that
3482 * does similar things.
3483 */
5add95d4 3484SYSCALL_DEFINE1(nice, int, increment)
1da177e4 3485{
48f24c4d 3486 long nice, retval;
1da177e4
LT
3487
3488 /*
3489 * Setpriority might change our priority at the same moment.
3490 * We don't have to worry. Conceptually one call occurs first
3491 * and we have a single winner.
3492 */
a9467fa3 3493 increment = clamp(increment, -NICE_WIDTH, NICE_WIDTH);
d0ea0268 3494 nice = task_nice(current) + increment;
1da177e4 3495
a9467fa3 3496 nice = clamp_val(nice, MIN_NICE, MAX_NICE);
e43379f1
MM
3497 if (increment < 0 && !can_nice(current, nice))
3498 return -EPERM;
3499
1da177e4
LT
3500 retval = security_task_setnice(current, nice);
3501 if (retval)
3502 return retval;
3503
3504 set_user_nice(current, nice);
3505 return 0;
3506}
3507
3508#endif
3509
3510/**
3511 * task_prio - return the priority value of a given task.
3512 * @p: the task in question.
3513 *
e69f6186 3514 * Return: The priority value as seen by users in /proc.
1da177e4
LT
3515 * RT tasks are offset by -200. Normal tasks are centered
3516 * around 0, value goes from -16 to +15.
3517 */
36c8b586 3518int task_prio(const struct task_struct *p)
1da177e4
LT
3519{
3520 return p->prio - MAX_RT_PRIO;
3521}
3522
1da177e4
LT
3523/**
3524 * idle_cpu - is a given cpu idle currently?
3525 * @cpu: the processor in question.
e69f6186
YB
3526 *
3527 * Return: 1 if the CPU is currently idle. 0 otherwise.
1da177e4
LT
3528 */
3529int idle_cpu(int cpu)
3530{
908a3283
TG
3531 struct rq *rq = cpu_rq(cpu);
3532
3533 if (rq->curr != rq->idle)
3534 return 0;
3535
3536 if (rq->nr_running)
3537 return 0;
3538
3539#ifdef CONFIG_SMP
3540 if (!llist_empty(&rq->wake_list))
3541 return 0;
3542#endif
3543
3544 return 1;
1da177e4
LT
3545}
3546
1da177e4
LT
3547/**
3548 * idle_task - return the idle task for a given cpu.
3549 * @cpu: the processor in question.
e69f6186
YB
3550 *
3551 * Return: The idle task for the cpu @cpu.
1da177e4 3552 */
36c8b586 3553struct task_struct *idle_task(int cpu)
1da177e4
LT
3554{
3555 return cpu_rq(cpu)->idle;
3556}
3557
3558/**
3559 * find_process_by_pid - find a process with a matching PID value.
3560 * @pid: the pid in question.
e69f6186
YB
3561 *
3562 * The task of @pid, if found. %NULL otherwise.
1da177e4 3563 */
a9957449 3564static struct task_struct *find_process_by_pid(pid_t pid)
1da177e4 3565{
228ebcbe 3566 return pid ? find_task_by_vpid(pid) : current;
1da177e4
LT
3567}
3568
aab03e05
DF
3569/*
3570 * This function initializes the sched_dl_entity of a newly becoming
3571 * SCHED_DEADLINE task.
3572 *
3573 * Only the static values are considered here, the actual runtime and the
3574 * absolute deadline will be properly calculated when the task is enqueued
3575 * for the first time with its new policy.
3576 */
3577static void
3578__setparam_dl(struct task_struct *p, const struct sched_attr *attr)
3579{
3580 struct sched_dl_entity *dl_se = &p->dl;
3581
aab03e05
DF
3582 dl_se->dl_runtime = attr->sched_runtime;
3583 dl_se->dl_deadline = attr->sched_deadline;
755378a4 3584 dl_se->dl_period = attr->sched_period ?: dl_se->dl_deadline;
aab03e05 3585 dl_se->flags = attr->sched_flags;
332ac17e 3586 dl_se->dl_bw = to_ratio(dl_se->dl_period, dl_se->dl_runtime);
40767b0d
PZ
3587
3588 /*
3589 * Changing the parameters of a task is 'tricky' and we're not doing
3590 * the correct thing -- also see task_dead_dl() and switched_from_dl().
3591 *
3592 * What we SHOULD do is delay the bandwidth release until the 0-lag
3593 * point. This would include retaining the task_struct until that time
3594 * and change dl_overflow() to not immediately decrement the current
3595 * amount.
3596 *
3597 * Instead we retain the current runtime/deadline and let the new
3598 * parameters take effect after the current reservation period lapses.
3599 * This is safe (albeit pessimistic) because the 0-lag point is always
3600 * before the current scheduling deadline.
3601 *
3602 * We can still have temporary overloads because we do not delay the
3603 * change in bandwidth until that time; so admission control is
3604 * not on the safe side. It does however guarantee tasks will never
3605 * consume more than promised.
3606 */
aab03e05
DF
3607}
3608
c13db6b1
SR
3609/*
3610 * sched_setparam() passes in -1 for its policy, to let the functions
3611 * it calls know not to change it.
3612 */
3613#define SETPARAM_POLICY -1
3614
c365c292
TG
3615static void __setscheduler_params(struct task_struct *p,
3616 const struct sched_attr *attr)
1da177e4 3617{
d50dde5a
DF
3618 int policy = attr->sched_policy;
3619
c13db6b1 3620 if (policy == SETPARAM_POLICY)
39fd8fd2
PZ
3621 policy = p->policy;
3622
1da177e4 3623 p->policy = policy;
d50dde5a 3624
aab03e05
DF
3625 if (dl_policy(policy))
3626 __setparam_dl(p, attr);
39fd8fd2 3627 else if (fair_policy(policy))
d50dde5a
DF
3628 p->static_prio = NICE_TO_PRIO(attr->sched_nice);
3629
39fd8fd2
PZ
3630 /*
3631 * __sched_setscheduler() ensures attr->sched_priority == 0 when
3632 * !rt_policy. Always setting this ensures that things like
3633 * getparam()/getattr() don't report silly values for !rt tasks.
3634 */
3635 p->rt_priority = attr->sched_priority;
383afd09 3636 p->normal_prio = normal_prio(p);
c365c292
TG
3637 set_load_weight(p);
3638}
39fd8fd2 3639
c365c292
TG
3640/* Actually do priority change: must hold pi & rq lock. */
3641static void __setscheduler(struct rq *rq, struct task_struct *p,
0782e63b 3642 const struct sched_attr *attr, bool keep_boost)
c365c292
TG
3643{
3644 __setscheduler_params(p, attr);
d50dde5a 3645
383afd09 3646 /*
0782e63b
TG
3647 * Keep a potential priority boosting if called from
3648 * sched_setscheduler().
383afd09 3649 */
0782e63b
TG
3650 if (keep_boost)
3651 p->prio = rt_mutex_get_effective_prio(p, normal_prio(p));
3652 else
3653 p->prio = normal_prio(p);
383afd09 3654
aab03e05
DF
3655 if (dl_prio(p->prio))
3656 p->sched_class = &dl_sched_class;
3657 else if (rt_prio(p->prio))
ffd44db5
PZ
3658 p->sched_class = &rt_sched_class;
3659 else
3660 p->sched_class = &fair_sched_class;
1da177e4 3661}
aab03e05
DF
3662
3663static void
3664__getparam_dl(struct task_struct *p, struct sched_attr *attr)
3665{
3666 struct sched_dl_entity *dl_se = &p->dl;
3667
3668 attr->sched_priority = p->rt_priority;
3669 attr->sched_runtime = dl_se->dl_runtime;
3670 attr->sched_deadline = dl_se->dl_deadline;
755378a4 3671 attr->sched_period = dl_se->dl_period;
aab03e05
DF
3672 attr->sched_flags = dl_se->flags;
3673}
3674
3675/*
3676 * This function validates the new parameters of a -deadline task.
3677 * We ask for the deadline not being zero, and greater or equal
755378a4 3678 * than the runtime, as well as the period of being zero or
332ac17e 3679 * greater than deadline. Furthermore, we have to be sure that
b0827819
JL
3680 * user parameters are above the internal resolution of 1us (we
3681 * check sched_runtime only since it is always the smaller one) and
3682 * below 2^63 ns (we have to check both sched_deadline and
3683 * sched_period, as the latter can be zero).
aab03e05
DF
3684 */
3685static bool
3686__checkparam_dl(const struct sched_attr *attr)
3687{
b0827819
JL
3688 /* deadline != 0 */
3689 if (attr->sched_deadline == 0)
3690 return false;
3691
3692 /*
3693 * Since we truncate DL_SCALE bits, make sure we're at least
3694 * that big.
3695 */
3696 if (attr->sched_runtime < (1ULL << DL_SCALE))
3697 return false;
3698
3699 /*
3700 * Since we use the MSB for wrap-around and sign issues, make
3701 * sure it's not set (mind that period can be equal to zero).
3702 */
3703 if (attr->sched_deadline & (1ULL << 63) ||
3704 attr->sched_period & (1ULL << 63))
3705 return false;
3706
3707 /* runtime <= deadline <= period (if period != 0) */
3708 if ((attr->sched_period != 0 &&
3709 attr->sched_period < attr->sched_deadline) ||
3710 attr->sched_deadline < attr->sched_runtime)
3711 return false;
3712
3713 return true;
aab03e05
DF
3714}
3715
c69e8d9c
DH
3716/*
3717 * check the target process has a UID that matches the current process's
3718 */
3719static bool check_same_owner(struct task_struct *p)
3720{
3721 const struct cred *cred = current_cred(), *pcred;
3722 bool match;
3723
3724 rcu_read_lock();
3725 pcred = __task_cred(p);
9c806aa0
EB
3726 match = (uid_eq(cred->euid, pcred->euid) ||
3727 uid_eq(cred->euid, pcred->uid));
c69e8d9c
DH
3728 rcu_read_unlock();
3729 return match;
3730}
3731
75381608
WL
3732static bool dl_param_changed(struct task_struct *p,
3733 const struct sched_attr *attr)
3734{
3735 struct sched_dl_entity *dl_se = &p->dl;
3736
3737 if (dl_se->dl_runtime != attr->sched_runtime ||
3738 dl_se->dl_deadline != attr->sched_deadline ||
3739 dl_se->dl_period != attr->sched_period ||
3740 dl_se->flags != attr->sched_flags)
3741 return true;
3742
3743 return false;
3744}
3745
d50dde5a
DF
3746static int __sched_setscheduler(struct task_struct *p,
3747 const struct sched_attr *attr,
dbc7f069 3748 bool user, bool pi)
1da177e4 3749{
383afd09
SR
3750 int newprio = dl_policy(attr->sched_policy) ? MAX_DL_PRIO - 1 :
3751 MAX_RT_PRIO - 1 - attr->sched_priority;
da0c1e65 3752 int retval, oldprio, oldpolicy = -1, queued, running;
0782e63b 3753 int new_effective_prio, policy = attr->sched_policy;
1da177e4 3754 unsigned long flags;
83ab0aa0 3755 const struct sched_class *prev_class;
70b97a7f 3756 struct rq *rq;
ca94c442 3757 int reset_on_fork;
1da177e4 3758
66e5393a
SR
3759 /* may grab non-irq protected spin_locks */
3760 BUG_ON(in_interrupt());
1da177e4
LT
3761recheck:
3762 /* double check policy once rq lock held */
ca94c442
LP
3763 if (policy < 0) {
3764 reset_on_fork = p->sched_reset_on_fork;
1da177e4 3765 policy = oldpolicy = p->policy;
ca94c442 3766 } else {
7479f3c9 3767 reset_on_fork = !!(attr->sched_flags & SCHED_FLAG_RESET_ON_FORK);
ca94c442 3768
20f9cd2a 3769 if (!valid_policy(policy))
ca94c442
LP
3770 return -EINVAL;
3771 }
3772
7479f3c9
PZ
3773 if (attr->sched_flags & ~(SCHED_FLAG_RESET_ON_FORK))
3774 return -EINVAL;
3775
1da177e4
LT
3776 /*
3777 * Valid priorities for SCHED_FIFO and SCHED_RR are
dd41f596
IM
3778 * 1..MAX_USER_RT_PRIO-1, valid priority for SCHED_NORMAL,
3779 * SCHED_BATCH and SCHED_IDLE is 0.
1da177e4 3780 */
0bb040a4 3781 if ((p->mm && attr->sched_priority > MAX_USER_RT_PRIO-1) ||
d50dde5a 3782 (!p->mm && attr->sched_priority > MAX_RT_PRIO-1))
1da177e4 3783 return -EINVAL;
aab03e05
DF
3784 if ((dl_policy(policy) && !__checkparam_dl(attr)) ||
3785 (rt_policy(policy) != (attr->sched_priority != 0)))
1da177e4
LT
3786 return -EINVAL;
3787
37e4ab3f
OC
3788 /*
3789 * Allow unprivileged RT tasks to decrease priority:
3790 */
961ccddd 3791 if (user && !capable(CAP_SYS_NICE)) {
d50dde5a 3792 if (fair_policy(policy)) {
d0ea0268 3793 if (attr->sched_nice < task_nice(p) &&
eaad4513 3794 !can_nice(p, attr->sched_nice))
d50dde5a
DF
3795 return -EPERM;
3796 }
3797
e05606d3 3798 if (rt_policy(policy)) {
a44702e8
ON
3799 unsigned long rlim_rtprio =
3800 task_rlimit(p, RLIMIT_RTPRIO);
8dc3e909
ON
3801
3802 /* can't set/change the rt policy */
3803 if (policy != p->policy && !rlim_rtprio)
3804 return -EPERM;
3805
3806 /* can't increase priority */
d50dde5a
DF
3807 if (attr->sched_priority > p->rt_priority &&
3808 attr->sched_priority > rlim_rtprio)
8dc3e909
ON
3809 return -EPERM;
3810 }
c02aa73b 3811
d44753b8
JL
3812 /*
3813 * Can't set/change SCHED_DEADLINE policy at all for now
3814 * (safest behavior); in the future we would like to allow
3815 * unprivileged DL tasks to increase their relative deadline
3816 * or reduce their runtime (both ways reducing utilization)
3817 */
3818 if (dl_policy(policy))
3819 return -EPERM;
3820
dd41f596 3821 /*
c02aa73b
DH
3822 * Treat SCHED_IDLE as nice 20. Only allow a switch to
3823 * SCHED_NORMAL if the RLIMIT_NICE would normally permit it.
dd41f596 3824 */
20f9cd2a 3825 if (idle_policy(p->policy) && !idle_policy(policy)) {
d0ea0268 3826 if (!can_nice(p, task_nice(p)))
c02aa73b
DH
3827 return -EPERM;
3828 }
5fe1d75f 3829
37e4ab3f 3830 /* can't change other user's priorities */
c69e8d9c 3831 if (!check_same_owner(p))
37e4ab3f 3832 return -EPERM;
ca94c442
LP
3833
3834 /* Normal users shall not reset the sched_reset_on_fork flag */
3835 if (p->sched_reset_on_fork && !reset_on_fork)
3836 return -EPERM;
37e4ab3f 3837 }
1da177e4 3838
725aad24 3839 if (user) {
b0ae1981 3840 retval = security_task_setscheduler(p);
725aad24
JF
3841 if (retval)
3842 return retval;
3843 }
3844
b29739f9
IM
3845 /*
3846 * make sure no PI-waiters arrive (or leave) while we are
3847 * changing the priority of the task:
0122ec5b 3848 *
25985edc 3849 * To be able to change p->policy safely, the appropriate
1da177e4
LT
3850 * runqueue lock must be held.
3851 */
0122ec5b 3852 rq = task_rq_lock(p, &flags);
dc61b1d6 3853
34f971f6
PZ
3854 /*
3855 * Changing the policy of the stop threads its a very bad idea
3856 */
3857 if (p == rq->stop) {
0122ec5b 3858 task_rq_unlock(rq, p, &flags);
34f971f6
PZ
3859 return -EINVAL;
3860 }
3861
a51e9198 3862 /*
d6b1e911
TG
3863 * If not changing anything there's no need to proceed further,
3864 * but store a possible modification of reset_on_fork.
a51e9198 3865 */
d50dde5a 3866 if (unlikely(policy == p->policy)) {
d0ea0268 3867 if (fair_policy(policy) && attr->sched_nice != task_nice(p))
d50dde5a
DF
3868 goto change;
3869 if (rt_policy(policy) && attr->sched_priority != p->rt_priority)
3870 goto change;
75381608 3871 if (dl_policy(policy) && dl_param_changed(p, attr))
aab03e05 3872 goto change;
d50dde5a 3873
d6b1e911 3874 p->sched_reset_on_fork = reset_on_fork;
45afb173 3875 task_rq_unlock(rq, p, &flags);
a51e9198
DF
3876 return 0;
3877 }
d50dde5a 3878change:
a51e9198 3879
dc61b1d6 3880 if (user) {
332ac17e 3881#ifdef CONFIG_RT_GROUP_SCHED
dc61b1d6
PZ
3882 /*
3883 * Do not allow realtime tasks into groups that have no runtime
3884 * assigned.
3885 */
3886 if (rt_bandwidth_enabled() && rt_policy(policy) &&
f4493771
MG
3887 task_group(p)->rt_bandwidth.rt_runtime == 0 &&
3888 !task_group_is_autogroup(task_group(p))) {
0122ec5b 3889 task_rq_unlock(rq, p, &flags);
dc61b1d6
PZ
3890 return -EPERM;
3891 }
dc61b1d6 3892#endif
332ac17e
DF
3893#ifdef CONFIG_SMP
3894 if (dl_bandwidth_enabled() && dl_policy(policy)) {
3895 cpumask_t *span = rq->rd->span;
332ac17e
DF
3896
3897 /*
3898 * Don't allow tasks with an affinity mask smaller than
3899 * the entire root_domain to become SCHED_DEADLINE. We
3900 * will also fail if there's no bandwidth available.
3901 */
e4099a5e
PZ
3902 if (!cpumask_subset(span, &p->cpus_allowed) ||
3903 rq->rd->dl_bw.bw == 0) {
332ac17e
DF
3904 task_rq_unlock(rq, p, &flags);
3905 return -EPERM;
3906 }
3907 }
3908#endif
3909 }
dc61b1d6 3910
1da177e4
LT
3911 /* recheck policy now with rq lock held */
3912 if (unlikely(oldpolicy != -1 && oldpolicy != p->policy)) {
3913 policy = oldpolicy = -1;
0122ec5b 3914 task_rq_unlock(rq, p, &flags);
1da177e4
LT
3915 goto recheck;
3916 }
332ac17e
DF
3917
3918 /*
3919 * If setscheduling to SCHED_DEADLINE (or changing the parameters
3920 * of a SCHED_DEADLINE task) we need to check if enough bandwidth
3921 * is available.
3922 */
e4099a5e 3923 if ((dl_policy(policy) || dl_task(p)) && dl_overflow(p, policy, attr)) {
332ac17e
DF
3924 task_rq_unlock(rq, p, &flags);
3925 return -EBUSY;
3926 }
3927
c365c292
TG
3928 p->sched_reset_on_fork = reset_on_fork;
3929 oldprio = p->prio;
3930
dbc7f069
PZ
3931 if (pi) {
3932 /*
3933 * Take priority boosted tasks into account. If the new
3934 * effective priority is unchanged, we just store the new
3935 * normal parameters and do not touch the scheduler class and
3936 * the runqueue. This will be done when the task deboost
3937 * itself.
3938 */
3939 new_effective_prio = rt_mutex_get_effective_prio(p, newprio);
3940 if (new_effective_prio == oldprio) {
3941 __setscheduler_params(p, attr);
3942 task_rq_unlock(rq, p, &flags);
3943 return 0;
3944 }
c365c292
TG
3945 }
3946
da0c1e65 3947 queued = task_on_rq_queued(p);
051a1d1a 3948 running = task_current(rq, p);
da0c1e65 3949 if (queued)
4ca9b72b 3950 dequeue_task(rq, p, 0);
0e1f3483 3951 if (running)
f3cd1c4e 3952 put_prev_task(rq, p);
f6b53205 3953
83ab0aa0 3954 prev_class = p->sched_class;
dbc7f069 3955 __setscheduler(rq, p, attr, pi);
f6b53205 3956
0e1f3483
HS
3957 if (running)
3958 p->sched_class->set_curr_task(rq);
da0c1e65 3959 if (queued) {
81a44c54
TG
3960 /*
3961 * We enqueue to tail when the priority of a task is
3962 * increased (user space view).
3963 */
3964 enqueue_task(rq, p, oldprio <= p->prio ? ENQUEUE_HEAD : 0);
3965 }
cb469845 3966
da7a735e 3967 check_class_changed(rq, p, prev_class, oldprio);
4c9a4bc8 3968 preempt_disable(); /* avoid rq from going away on us */
0122ec5b 3969 task_rq_unlock(rq, p, &flags);
b29739f9 3970
dbc7f069
PZ
3971 if (pi)
3972 rt_mutex_adjust_pi(p);
95e02ca9 3973
4c9a4bc8
PZ
3974 /*
3975 * Run balance callbacks after we've adjusted the PI chain.
3976 */
3977 balance_callback(rq);
3978 preempt_enable();
95e02ca9 3979
1da177e4
LT
3980 return 0;
3981}
961ccddd 3982
7479f3c9
PZ
3983static int _sched_setscheduler(struct task_struct *p, int policy,
3984 const struct sched_param *param, bool check)
3985{
3986 struct sched_attr attr = {
3987 .sched_policy = policy,
3988 .sched_priority = param->sched_priority,
3989 .sched_nice = PRIO_TO_NICE(p->static_prio),
3990 };
3991
c13db6b1
SR
3992 /* Fixup the legacy SCHED_RESET_ON_FORK hack. */
3993 if ((policy != SETPARAM_POLICY) && (policy & SCHED_RESET_ON_FORK)) {
7479f3c9
PZ
3994 attr.sched_flags |= SCHED_FLAG_RESET_ON_FORK;
3995 policy &= ~SCHED_RESET_ON_FORK;
3996 attr.sched_policy = policy;
3997 }
3998
dbc7f069 3999 return __sched_setscheduler(p, &attr, check, true);
7479f3c9 4000}
961ccddd
RR
4001/**
4002 * sched_setscheduler - change the scheduling policy and/or RT priority of a thread.
4003 * @p: the task in question.
4004 * @policy: new policy.
4005 * @param: structure containing the new RT priority.
4006 *
e69f6186
YB
4007 * Return: 0 on success. An error code otherwise.
4008 *
961ccddd
RR
4009 * NOTE that the task may be already dead.
4010 */
4011int sched_setscheduler(struct task_struct *p, int policy,
fe7de49f 4012 const struct sched_param *param)
961ccddd 4013{
7479f3c9 4014 return _sched_setscheduler(p, policy, param, true);
961ccddd 4015}
1da177e4
LT
4016EXPORT_SYMBOL_GPL(sched_setscheduler);
4017
d50dde5a
DF
4018int sched_setattr(struct task_struct *p, const struct sched_attr *attr)
4019{
dbc7f069 4020 return __sched_setscheduler(p, attr, true, true);
d50dde5a
DF
4021}
4022EXPORT_SYMBOL_GPL(sched_setattr);
4023
961ccddd
RR
4024/**
4025 * sched_setscheduler_nocheck - change the scheduling policy and/or RT priority of a thread from kernelspace.
4026 * @p: the task in question.
4027 * @policy: new policy.
4028 * @param: structure containing the new RT priority.
4029 *
4030 * Just like sched_setscheduler, only don't bother checking if the
4031 * current context has permission. For example, this is needed in
4032 * stop_machine(): we create temporary high priority worker threads,
4033 * but our caller might not have that capability.
e69f6186
YB
4034 *
4035 * Return: 0 on success. An error code otherwise.
961ccddd
RR
4036 */
4037int sched_setscheduler_nocheck(struct task_struct *p, int policy,
fe7de49f 4038 const struct sched_param *param)
961ccddd 4039{
7479f3c9 4040 return _sched_setscheduler(p, policy, param, false);
961ccddd
RR
4041}
4042
95cdf3b7
IM
4043static int
4044do_sched_setscheduler(pid_t pid, int policy, struct sched_param __user *param)
1da177e4 4045{
1da177e4
LT
4046 struct sched_param lparam;
4047 struct task_struct *p;
36c8b586 4048 int retval;
1da177e4
LT
4049
4050 if (!param || pid < 0)
4051 return -EINVAL;
4052 if (copy_from_user(&lparam, param, sizeof(struct sched_param)))
4053 return -EFAULT;
5fe1d75f
ON
4054
4055 rcu_read_lock();
4056 retval = -ESRCH;
1da177e4 4057 p = find_process_by_pid(pid);
5fe1d75f
ON
4058 if (p != NULL)
4059 retval = sched_setscheduler(p, policy, &lparam);
4060 rcu_read_unlock();
36c8b586 4061
1da177e4
LT
4062 return retval;
4063}
4064
d50dde5a
DF
4065/*
4066 * Mimics kernel/events/core.c perf_copy_attr().
4067 */
4068static int sched_copy_attr(struct sched_attr __user *uattr,
4069 struct sched_attr *attr)
4070{
4071 u32 size;
4072 int ret;
4073
4074 if (!access_ok(VERIFY_WRITE, uattr, SCHED_ATTR_SIZE_VER0))
4075 return -EFAULT;
4076
4077 /*
4078 * zero the full structure, so that a short copy will be nice.
4079 */
4080 memset(attr, 0, sizeof(*attr));
4081
4082 ret = get_user(size, &uattr->size);
4083 if (ret)
4084 return ret;
4085
4086 if (size > PAGE_SIZE) /* silly large */
4087 goto err_size;
4088
4089 if (!size) /* abi compat */
4090 size = SCHED_ATTR_SIZE_VER0;
4091
4092 if (size < SCHED_ATTR_SIZE_VER0)
4093 goto err_size;
4094
4095 /*
4096 * If we're handed a bigger struct than we know of,
4097 * ensure all the unknown bits are 0 - i.e. new
4098 * user-space does not rely on any kernel feature
4099 * extensions we dont know about yet.
4100 */
4101 if (size > sizeof(*attr)) {
4102 unsigned char __user *addr;
4103 unsigned char __user *end;
4104 unsigned char val;
4105
4106 addr = (void __user *)uattr + sizeof(*attr);
4107 end = (void __user *)uattr + size;
4108
4109 for (; addr < end; addr++) {
4110 ret = get_user(val, addr);
4111 if (ret)
4112 return ret;
4113 if (val)
4114 goto err_size;
4115 }
4116 size = sizeof(*attr);
4117 }
4118
4119 ret = copy_from_user(attr, uattr, size);
4120 if (ret)
4121 return -EFAULT;
4122
4123 /*
4124 * XXX: do we want to be lenient like existing syscalls; or do we want
4125 * to be strict and return an error on out-of-bounds values?
4126 */
75e45d51 4127 attr->sched_nice = clamp(attr->sched_nice, MIN_NICE, MAX_NICE);
d50dde5a 4128
e78c7bca 4129 return 0;
d50dde5a
DF
4130
4131err_size:
4132 put_user(sizeof(*attr), &uattr->size);
e78c7bca 4133 return -E2BIG;
d50dde5a
DF
4134}
4135
1da177e4
LT
4136/**
4137 * sys_sched_setscheduler - set/change the scheduler policy and RT priority
4138 * @pid: the pid in question.
4139 * @policy: new policy.
4140 * @param: structure containing the new RT priority.
e69f6186
YB
4141 *
4142 * Return: 0 on success. An error code otherwise.
1da177e4 4143 */
5add95d4
HC
4144SYSCALL_DEFINE3(sched_setscheduler, pid_t, pid, int, policy,
4145 struct sched_param __user *, param)
1da177e4 4146{
c21761f1
JB
4147 /* negative values for policy are not valid */
4148 if (policy < 0)
4149 return -EINVAL;
4150
1da177e4
LT
4151 return do_sched_setscheduler(pid, policy, param);
4152}
4153
4154/**
4155 * sys_sched_setparam - set/change the RT priority of a thread
4156 * @pid: the pid in question.
4157 * @param: structure containing the new RT priority.
e69f6186
YB
4158 *
4159 * Return: 0 on success. An error code otherwise.
1da177e4 4160 */
5add95d4 4161SYSCALL_DEFINE2(sched_setparam, pid_t, pid, struct sched_param __user *, param)
1da177e4 4162{
c13db6b1 4163 return do_sched_setscheduler(pid, SETPARAM_POLICY, param);
1da177e4
LT
4164}
4165
d50dde5a
DF
4166/**
4167 * sys_sched_setattr - same as above, but with extended sched_attr
4168 * @pid: the pid in question.
5778fccf 4169 * @uattr: structure containing the extended parameters.
db66d756 4170 * @flags: for future extension.
d50dde5a 4171 */
6d35ab48
PZ
4172SYSCALL_DEFINE3(sched_setattr, pid_t, pid, struct sched_attr __user *, uattr,
4173 unsigned int, flags)
d50dde5a
DF
4174{
4175 struct sched_attr attr;
4176 struct task_struct *p;
4177 int retval;
4178
6d35ab48 4179 if (!uattr || pid < 0 || flags)
d50dde5a
DF
4180 return -EINVAL;
4181
143cf23d
MK
4182 retval = sched_copy_attr(uattr, &attr);
4183 if (retval)
4184 return retval;
d50dde5a 4185
b14ed2c2 4186 if ((int)attr.sched_policy < 0)
dbdb2275 4187 return -EINVAL;
d50dde5a
DF
4188
4189 rcu_read_lock();
4190 retval = -ESRCH;
4191 p = find_process_by_pid(pid);
4192 if (p != NULL)
4193 retval = sched_setattr(p, &attr);
4194 rcu_read_unlock();
4195
4196 return retval;
4197}
4198
1da177e4
LT
4199/**
4200 * sys_sched_getscheduler - get the policy (scheduling class) of a thread
4201 * @pid: the pid in question.
e69f6186
YB
4202 *
4203 * Return: On success, the policy of the thread. Otherwise, a negative error
4204 * code.
1da177e4 4205 */
5add95d4 4206SYSCALL_DEFINE1(sched_getscheduler, pid_t, pid)
1da177e4 4207{
36c8b586 4208 struct task_struct *p;
3a5c359a 4209 int retval;
1da177e4
LT
4210
4211 if (pid < 0)
3a5c359a 4212 return -EINVAL;
1da177e4
LT
4213
4214 retval = -ESRCH;
5fe85be0 4215 rcu_read_lock();
1da177e4
LT
4216 p = find_process_by_pid(pid);
4217 if (p) {
4218 retval = security_task_getscheduler(p);
4219 if (!retval)
ca94c442
LP
4220 retval = p->policy
4221 | (p->sched_reset_on_fork ? SCHED_RESET_ON_FORK : 0);
1da177e4 4222 }
5fe85be0 4223 rcu_read_unlock();
1da177e4
LT
4224 return retval;
4225}
4226
4227/**
ca94c442 4228 * sys_sched_getparam - get the RT priority of a thread
1da177e4
LT
4229 * @pid: the pid in question.
4230 * @param: structure containing the RT priority.
e69f6186
YB
4231 *
4232 * Return: On success, 0 and the RT priority is in @param. Otherwise, an error
4233 * code.
1da177e4 4234 */
5add95d4 4235SYSCALL_DEFINE2(sched_getparam, pid_t, pid, struct sched_param __user *, param)
1da177e4 4236{
ce5f7f82 4237 struct sched_param lp = { .sched_priority = 0 };
36c8b586 4238 struct task_struct *p;
3a5c359a 4239 int retval;
1da177e4
LT
4240
4241 if (!param || pid < 0)
3a5c359a 4242 return -EINVAL;
1da177e4 4243
5fe85be0 4244 rcu_read_lock();
1da177e4
LT
4245 p = find_process_by_pid(pid);
4246 retval = -ESRCH;
4247 if (!p)
4248 goto out_unlock;
4249
4250 retval = security_task_getscheduler(p);
4251 if (retval)
4252 goto out_unlock;
4253
ce5f7f82
PZ
4254 if (task_has_rt_policy(p))
4255 lp.sched_priority = p->rt_priority;
5fe85be0 4256 rcu_read_unlock();
1da177e4
LT
4257
4258 /*
4259 * This one might sleep, we cannot do it with a spinlock held ...
4260 */
4261 retval = copy_to_user(param, &lp, sizeof(*param)) ? -EFAULT : 0;
4262
1da177e4
LT
4263 return retval;
4264
4265out_unlock:
5fe85be0 4266 rcu_read_unlock();
1da177e4
LT
4267 return retval;
4268}
4269
d50dde5a
DF
4270static int sched_read_attr(struct sched_attr __user *uattr,
4271 struct sched_attr *attr,
4272 unsigned int usize)
4273{
4274 int ret;
4275
4276 if (!access_ok(VERIFY_WRITE, uattr, usize))
4277 return -EFAULT;
4278
4279 /*
4280 * If we're handed a smaller struct than we know of,
4281 * ensure all the unknown bits are 0 - i.e. old
4282 * user-space does not get uncomplete information.
4283 */
4284 if (usize < sizeof(*attr)) {
4285 unsigned char *addr;
4286 unsigned char *end;
4287
4288 addr = (void *)attr + usize;
4289 end = (void *)attr + sizeof(*attr);
4290
4291 for (; addr < end; addr++) {
4292 if (*addr)
22400674 4293 return -EFBIG;
d50dde5a
DF
4294 }
4295
4296 attr->size = usize;
4297 }
4298
4efbc454 4299 ret = copy_to_user(uattr, attr, attr->size);
d50dde5a
DF
4300 if (ret)
4301 return -EFAULT;
4302
22400674 4303 return 0;
d50dde5a
DF
4304}
4305
4306/**
aab03e05 4307 * sys_sched_getattr - similar to sched_getparam, but with sched_attr
d50dde5a 4308 * @pid: the pid in question.
5778fccf 4309 * @uattr: structure containing the extended parameters.
d50dde5a 4310 * @size: sizeof(attr) for fwd/bwd comp.
db66d756 4311 * @flags: for future extension.
d50dde5a 4312 */
6d35ab48
PZ
4313SYSCALL_DEFINE4(sched_getattr, pid_t, pid, struct sched_attr __user *, uattr,
4314 unsigned int, size, unsigned int, flags)
d50dde5a
DF
4315{
4316 struct sched_attr attr = {
4317 .size = sizeof(struct sched_attr),
4318 };
4319 struct task_struct *p;
4320 int retval;
4321
4322 if (!uattr || pid < 0 || size > PAGE_SIZE ||
6d35ab48 4323 size < SCHED_ATTR_SIZE_VER0 || flags)
d50dde5a
DF
4324 return -EINVAL;
4325
4326 rcu_read_lock();
4327 p = find_process_by_pid(pid);
4328 retval = -ESRCH;
4329 if (!p)
4330 goto out_unlock;
4331
4332 retval = security_task_getscheduler(p);
4333 if (retval)
4334 goto out_unlock;
4335
4336 attr.sched_policy = p->policy;
7479f3c9
PZ
4337 if (p->sched_reset_on_fork)
4338 attr.sched_flags |= SCHED_FLAG_RESET_ON_FORK;
aab03e05
DF
4339 if (task_has_dl_policy(p))
4340 __getparam_dl(p, &attr);
4341 else if (task_has_rt_policy(p))
d50dde5a
DF
4342 attr.sched_priority = p->rt_priority;
4343 else
d0ea0268 4344 attr.sched_nice = task_nice(p);
d50dde5a
DF
4345
4346 rcu_read_unlock();
4347
4348 retval = sched_read_attr(uattr, &attr, size);
4349 return retval;
4350
4351out_unlock:
4352 rcu_read_unlock();
4353 return retval;
4354}
4355
96f874e2 4356long sched_setaffinity(pid_t pid, const struct cpumask *in_mask)
1da177e4 4357{
5a16f3d3 4358 cpumask_var_t cpus_allowed, new_mask;
36c8b586
IM
4359 struct task_struct *p;
4360 int retval;
1da177e4 4361
23f5d142 4362 rcu_read_lock();
1da177e4
LT
4363
4364 p = find_process_by_pid(pid);
4365 if (!p) {
23f5d142 4366 rcu_read_unlock();
1da177e4
LT
4367 return -ESRCH;
4368 }
4369
23f5d142 4370 /* Prevent p going away */
1da177e4 4371 get_task_struct(p);
23f5d142 4372 rcu_read_unlock();
1da177e4 4373
14a40ffc
TH
4374 if (p->flags & PF_NO_SETAFFINITY) {
4375 retval = -EINVAL;
4376 goto out_put_task;
4377 }
5a16f3d3
RR
4378 if (!alloc_cpumask_var(&cpus_allowed, GFP_KERNEL)) {
4379 retval = -ENOMEM;
4380 goto out_put_task;
4381 }
4382 if (!alloc_cpumask_var(&new_mask, GFP_KERNEL)) {
4383 retval = -ENOMEM;
4384 goto out_free_cpus_allowed;
4385 }
1da177e4 4386 retval = -EPERM;
4c44aaaf
EB
4387 if (!check_same_owner(p)) {
4388 rcu_read_lock();
4389 if (!ns_capable(__task_cred(p)->user_ns, CAP_SYS_NICE)) {
4390 rcu_read_unlock();
16303ab2 4391 goto out_free_new_mask;
4c44aaaf
EB
4392 }
4393 rcu_read_unlock();
4394 }
1da177e4 4395
b0ae1981 4396 retval = security_task_setscheduler(p);
e7834f8f 4397 if (retval)
16303ab2 4398 goto out_free_new_mask;
e7834f8f 4399
e4099a5e
PZ
4400
4401 cpuset_cpus_allowed(p, cpus_allowed);
4402 cpumask_and(new_mask, in_mask, cpus_allowed);
4403
332ac17e
DF
4404 /*
4405 * Since bandwidth control happens on root_domain basis,
4406 * if admission test is enabled, we only admit -deadline
4407 * tasks allowed to run on all the CPUs in the task's
4408 * root_domain.
4409 */
4410#ifdef CONFIG_SMP
f1e3a093
KT
4411 if (task_has_dl_policy(p) && dl_bandwidth_enabled()) {
4412 rcu_read_lock();
4413 if (!cpumask_subset(task_rq(p)->rd->span, new_mask)) {
332ac17e 4414 retval = -EBUSY;
f1e3a093 4415 rcu_read_unlock();
16303ab2 4416 goto out_free_new_mask;
332ac17e 4417 }
f1e3a093 4418 rcu_read_unlock();
332ac17e
DF
4419 }
4420#endif
49246274 4421again:
25834c73 4422 retval = __set_cpus_allowed_ptr(p, new_mask, true);
1da177e4 4423
8707d8b8 4424 if (!retval) {
5a16f3d3
RR
4425 cpuset_cpus_allowed(p, cpus_allowed);
4426 if (!cpumask_subset(new_mask, cpus_allowed)) {
8707d8b8
PM
4427 /*
4428 * We must have raced with a concurrent cpuset
4429 * update. Just reset the cpus_allowed to the
4430 * cpuset's cpus_allowed
4431 */
5a16f3d3 4432 cpumask_copy(new_mask, cpus_allowed);
8707d8b8
PM
4433 goto again;
4434 }
4435 }
16303ab2 4436out_free_new_mask:
5a16f3d3
RR
4437 free_cpumask_var(new_mask);
4438out_free_cpus_allowed:
4439 free_cpumask_var(cpus_allowed);
4440out_put_task:
1da177e4 4441 put_task_struct(p);
1da177e4
LT
4442 return retval;
4443}
4444
4445static int get_user_cpu_mask(unsigned long __user *user_mask_ptr, unsigned len,
96f874e2 4446 struct cpumask *new_mask)
1da177e4 4447{
96f874e2
RR
4448 if (len < cpumask_size())
4449 cpumask_clear(new_mask);
4450 else if (len > cpumask_size())
4451 len = cpumask_size();
4452
1da177e4
LT
4453 return copy_from_user(new_mask, user_mask_ptr, len) ? -EFAULT : 0;
4454}
4455
4456/**
4457 * sys_sched_setaffinity - set the cpu affinity of a process
4458 * @pid: pid of the process
4459 * @len: length in bytes of the bitmask pointed to by user_mask_ptr
4460 * @user_mask_ptr: user-space pointer to the new cpu mask
e69f6186
YB
4461 *
4462 * Return: 0 on success. An error code otherwise.
1da177e4 4463 */
5add95d4
HC
4464SYSCALL_DEFINE3(sched_setaffinity, pid_t, pid, unsigned int, len,
4465 unsigned long __user *, user_mask_ptr)
1da177e4 4466{
5a16f3d3 4467 cpumask_var_t new_mask;
1da177e4
LT
4468 int retval;
4469
5a16f3d3
RR
4470 if (!alloc_cpumask_var(&new_mask, GFP_KERNEL))
4471 return -ENOMEM;
1da177e4 4472
5a16f3d3
RR
4473 retval = get_user_cpu_mask(user_mask_ptr, len, new_mask);
4474 if (retval == 0)
4475 retval = sched_setaffinity(pid, new_mask);
4476 free_cpumask_var(new_mask);
4477 return retval;
1da177e4
LT
4478}
4479
96f874e2 4480long sched_getaffinity(pid_t pid, struct cpumask *mask)
1da177e4 4481{
36c8b586 4482 struct task_struct *p;
31605683 4483 unsigned long flags;
1da177e4 4484 int retval;
1da177e4 4485
23f5d142 4486 rcu_read_lock();
1da177e4
LT
4487
4488 retval = -ESRCH;
4489 p = find_process_by_pid(pid);
4490 if (!p)
4491 goto out_unlock;
4492
e7834f8f
DQ
4493 retval = security_task_getscheduler(p);
4494 if (retval)
4495 goto out_unlock;
4496
013fdb80 4497 raw_spin_lock_irqsave(&p->pi_lock, flags);
6acce3ef 4498 cpumask_and(mask, &p->cpus_allowed, cpu_active_mask);
013fdb80 4499 raw_spin_unlock_irqrestore(&p->pi_lock, flags);
1da177e4
LT
4500
4501out_unlock:
23f5d142 4502 rcu_read_unlock();
1da177e4 4503
9531b62f 4504 return retval;
1da177e4
LT
4505}
4506
4507/**
4508 * sys_sched_getaffinity - get the cpu affinity of a process
4509 * @pid: pid of the process
4510 * @len: length in bytes of the bitmask pointed to by user_mask_ptr
4511 * @user_mask_ptr: user-space pointer to hold the current cpu mask
e69f6186
YB
4512 *
4513 * Return: 0 on success. An error code otherwise.
1da177e4 4514 */
5add95d4
HC
4515SYSCALL_DEFINE3(sched_getaffinity, pid_t, pid, unsigned int, len,
4516 unsigned long __user *, user_mask_ptr)
1da177e4
LT
4517{
4518 int ret;
f17c8607 4519 cpumask_var_t mask;
1da177e4 4520
84fba5ec 4521 if ((len * BITS_PER_BYTE) < nr_cpu_ids)
cd3d8031
KM
4522 return -EINVAL;
4523 if (len & (sizeof(unsigned long)-1))
1da177e4
LT
4524 return -EINVAL;
4525
f17c8607
RR
4526 if (!alloc_cpumask_var(&mask, GFP_KERNEL))
4527 return -ENOMEM;
1da177e4 4528
f17c8607
RR
4529 ret = sched_getaffinity(pid, mask);
4530 if (ret == 0) {
8bc037fb 4531 size_t retlen = min_t(size_t, len, cpumask_size());
cd3d8031
KM
4532
4533 if (copy_to_user(user_mask_ptr, mask, retlen))
f17c8607
RR
4534 ret = -EFAULT;
4535 else
cd3d8031 4536 ret = retlen;
f17c8607
RR
4537 }
4538 free_cpumask_var(mask);
1da177e4 4539
f17c8607 4540 return ret;
1da177e4
LT
4541}
4542
4543/**
4544 * sys_sched_yield - yield the current processor to other threads.
4545 *
dd41f596
IM
4546 * This function yields the current CPU to other tasks. If there are no
4547 * other threads running on this CPU then this function will return.
e69f6186
YB
4548 *
4549 * Return: 0.
1da177e4 4550 */
5add95d4 4551SYSCALL_DEFINE0(sched_yield)
1da177e4 4552{
70b97a7f 4553 struct rq *rq = this_rq_lock();
1da177e4 4554
2d72376b 4555 schedstat_inc(rq, yld_count);
4530d7ab 4556 current->sched_class->yield_task(rq);
1da177e4
LT
4557
4558 /*
4559 * Since we are going to call schedule() anyway, there's
4560 * no need to preempt or enable interrupts:
4561 */
4562 __release(rq->lock);
8a25d5de 4563 spin_release(&rq->lock.dep_map, 1, _THIS_IP_);
9828ea9d 4564 do_raw_spin_unlock(&rq->lock);
ba74c144 4565 sched_preempt_enable_no_resched();
1da177e4
LT
4566
4567 schedule();
4568
4569 return 0;
4570}
4571
02b67cc3 4572int __sched _cond_resched(void)
1da177e4 4573{
fe32d3cd 4574 if (should_resched(0)) {
a18b5d01 4575 preempt_schedule_common();
1da177e4
LT
4576 return 1;
4577 }
4578 return 0;
4579}
02b67cc3 4580EXPORT_SYMBOL(_cond_resched);
1da177e4
LT
4581
4582/*
613afbf8 4583 * __cond_resched_lock() - if a reschedule is pending, drop the given lock,
1da177e4
LT
4584 * call schedule, and on return reacquire the lock.
4585 *
41a2d6cf 4586 * This works OK both with and without CONFIG_PREEMPT. We do strange low-level
1da177e4
LT
4587 * operations here to prevent schedule() from being called twice (once via
4588 * spin_unlock(), once by hand).
4589 */
613afbf8 4590int __cond_resched_lock(spinlock_t *lock)
1da177e4 4591{
fe32d3cd 4592 int resched = should_resched(PREEMPT_LOCK_OFFSET);
6df3cecb
JK
4593 int ret = 0;
4594
f607c668
PZ
4595 lockdep_assert_held(lock);
4596
4a81e832 4597 if (spin_needbreak(lock) || resched) {
1da177e4 4598 spin_unlock(lock);
d86ee480 4599 if (resched)
a18b5d01 4600 preempt_schedule_common();
95c354fe
NP
4601 else
4602 cpu_relax();
6df3cecb 4603 ret = 1;
1da177e4 4604 spin_lock(lock);
1da177e4 4605 }
6df3cecb 4606 return ret;
1da177e4 4607}
613afbf8 4608EXPORT_SYMBOL(__cond_resched_lock);
1da177e4 4609
613afbf8 4610int __sched __cond_resched_softirq(void)
1da177e4
LT
4611{
4612 BUG_ON(!in_softirq());
4613
fe32d3cd 4614 if (should_resched(SOFTIRQ_DISABLE_OFFSET)) {
98d82567 4615 local_bh_enable();
a18b5d01 4616 preempt_schedule_common();
1da177e4
LT
4617 local_bh_disable();
4618 return 1;
4619 }
4620 return 0;
4621}
613afbf8 4622EXPORT_SYMBOL(__cond_resched_softirq);
1da177e4 4623
1da177e4
LT
4624/**
4625 * yield - yield the current processor to other threads.
4626 *
8e3fabfd
PZ
4627 * Do not ever use this function, there's a 99% chance you're doing it wrong.
4628 *
4629 * The scheduler is at all times free to pick the calling task as the most
4630 * eligible task to run, if removing the yield() call from your code breaks
4631 * it, its already broken.
4632 *
4633 * Typical broken usage is:
4634 *
4635 * while (!event)
4636 * yield();
4637 *
4638 * where one assumes that yield() will let 'the other' process run that will
4639 * make event true. If the current task is a SCHED_FIFO task that will never
4640 * happen. Never use yield() as a progress guarantee!!
4641 *
4642 * If you want to use yield() to wait for something, use wait_event().
4643 * If you want to use yield() to be 'nice' for others, use cond_resched().
4644 * If you still want to use yield(), do not!
1da177e4
LT
4645 */
4646void __sched yield(void)
4647{
4648 set_current_state(TASK_RUNNING);
4649 sys_sched_yield();
4650}
1da177e4
LT
4651EXPORT_SYMBOL(yield);
4652
d95f4122
MG
4653/**
4654 * yield_to - yield the current processor to another thread in
4655 * your thread group, or accelerate that thread toward the
4656 * processor it's on.
16addf95
RD
4657 * @p: target task
4658 * @preempt: whether task preemption is allowed or not
d95f4122
MG
4659 *
4660 * It's the caller's job to ensure that the target task struct
4661 * can't go away on us before we can do any checks.
4662 *
e69f6186 4663 * Return:
7b270f60
PZ
4664 * true (>0) if we indeed boosted the target task.
4665 * false (0) if we failed to boost the target.
4666 * -ESRCH if there's no task to yield to.
d95f4122 4667 */
fa93384f 4668int __sched yield_to(struct task_struct *p, bool preempt)
d95f4122
MG
4669{
4670 struct task_struct *curr = current;
4671 struct rq *rq, *p_rq;
4672 unsigned long flags;
c3c18640 4673 int yielded = 0;
d95f4122
MG
4674
4675 local_irq_save(flags);
4676 rq = this_rq();
4677
4678again:
4679 p_rq = task_rq(p);
7b270f60
PZ
4680 /*
4681 * If we're the only runnable task on the rq and target rq also
4682 * has only one task, there's absolutely no point in yielding.
4683 */
4684 if (rq->nr_running == 1 && p_rq->nr_running == 1) {
4685 yielded = -ESRCH;
4686 goto out_irq;
4687 }
4688
d95f4122 4689 double_rq_lock(rq, p_rq);
39e24d8f 4690 if (task_rq(p) != p_rq) {
d95f4122
MG
4691 double_rq_unlock(rq, p_rq);
4692 goto again;
4693 }
4694
4695 if (!curr->sched_class->yield_to_task)
7b270f60 4696 goto out_unlock;
d95f4122
MG
4697
4698 if (curr->sched_class != p->sched_class)
7b270f60 4699 goto out_unlock;
d95f4122
MG
4700
4701 if (task_running(p_rq, p) || p->state)
7b270f60 4702 goto out_unlock;
d95f4122
MG
4703
4704 yielded = curr->sched_class->yield_to_task(rq, p, preempt);
6d1cafd8 4705 if (yielded) {
d95f4122 4706 schedstat_inc(rq, yld_count);
6d1cafd8
VP
4707 /*
4708 * Make p's CPU reschedule; pick_next_entity takes care of
4709 * fairness.
4710 */
4711 if (preempt && rq != p_rq)
8875125e 4712 resched_curr(p_rq);
6d1cafd8 4713 }
d95f4122 4714
7b270f60 4715out_unlock:
d95f4122 4716 double_rq_unlock(rq, p_rq);
7b270f60 4717out_irq:
d95f4122
MG
4718 local_irq_restore(flags);
4719
7b270f60 4720 if (yielded > 0)
d95f4122
MG
4721 schedule();
4722
4723 return yielded;
4724}
4725EXPORT_SYMBOL_GPL(yield_to);
4726
1da177e4 4727/*
41a2d6cf 4728 * This task is about to go to sleep on IO. Increment rq->nr_iowait so
1da177e4 4729 * that process accounting knows that this is a task in IO wait state.
1da177e4 4730 */
1da177e4
LT
4731long __sched io_schedule_timeout(long timeout)
4732{
9cff8ade
N
4733 int old_iowait = current->in_iowait;
4734 struct rq *rq;
1da177e4
LT
4735 long ret;
4736
9cff8ade 4737 current->in_iowait = 1;
10d784ea 4738 blk_schedule_flush_plug(current);
9cff8ade 4739
0ff92245 4740 delayacct_blkio_start();
9cff8ade 4741 rq = raw_rq();
1da177e4
LT
4742 atomic_inc(&rq->nr_iowait);
4743 ret = schedule_timeout(timeout);
9cff8ade 4744 current->in_iowait = old_iowait;
1da177e4 4745 atomic_dec(&rq->nr_iowait);
0ff92245 4746 delayacct_blkio_end();
9cff8ade 4747
1da177e4
LT
4748 return ret;
4749}
9cff8ade 4750EXPORT_SYMBOL(io_schedule_timeout);
1da177e4
LT
4751
4752/**
4753 * sys_sched_get_priority_max - return maximum RT priority.
4754 * @policy: scheduling class.
4755 *
e69f6186
YB
4756 * Return: On success, this syscall returns the maximum
4757 * rt_priority that can be used by a given scheduling class.
4758 * On failure, a negative error code is returned.
1da177e4 4759 */
5add95d4 4760SYSCALL_DEFINE1(sched_get_priority_max, int, policy)
1da177e4
LT
4761{
4762 int ret = -EINVAL;
4763
4764 switch (policy) {
4765 case SCHED_FIFO:
4766 case SCHED_RR:
4767 ret = MAX_USER_RT_PRIO-1;
4768 break;
aab03e05 4769 case SCHED_DEADLINE:
1da177e4 4770 case SCHED_NORMAL:
b0a9499c 4771 case SCHED_BATCH:
dd41f596 4772 case SCHED_IDLE:
1da177e4
LT
4773 ret = 0;
4774 break;
4775 }
4776 return ret;
4777}
4778
4779/**
4780 * sys_sched_get_priority_min - return minimum RT priority.
4781 * @policy: scheduling class.
4782 *
e69f6186
YB
4783 * Return: On success, this syscall returns the minimum
4784 * rt_priority that can be used by a given scheduling class.
4785 * On failure, a negative error code is returned.
1da177e4 4786 */
5add95d4 4787SYSCALL_DEFINE1(sched_get_priority_min, int, policy)
1da177e4
LT
4788{
4789 int ret = -EINVAL;
4790
4791 switch (policy) {
4792 case SCHED_FIFO:
4793 case SCHED_RR:
4794 ret = 1;
4795 break;
aab03e05 4796 case SCHED_DEADLINE:
1da177e4 4797 case SCHED_NORMAL:
b0a9499c 4798 case SCHED_BATCH:
dd41f596 4799 case SCHED_IDLE:
1da177e4
LT
4800 ret = 0;
4801 }
4802 return ret;
4803}
4804
4805/**
4806 * sys_sched_rr_get_interval - return the default timeslice of a process.
4807 * @pid: pid of the process.
4808 * @interval: userspace pointer to the timeslice value.
4809 *
4810 * this syscall writes the default timeslice value of a given process
4811 * into the user-space timespec buffer. A value of '0' means infinity.
e69f6186
YB
4812 *
4813 * Return: On success, 0 and the timeslice is in @interval. Otherwise,
4814 * an error code.
1da177e4 4815 */
17da2bd9 4816SYSCALL_DEFINE2(sched_rr_get_interval, pid_t, pid,
754fe8d2 4817 struct timespec __user *, interval)
1da177e4 4818{
36c8b586 4819 struct task_struct *p;
a4ec24b4 4820 unsigned int time_slice;
dba091b9
TG
4821 unsigned long flags;
4822 struct rq *rq;
3a5c359a 4823 int retval;
1da177e4 4824 struct timespec t;
1da177e4
LT
4825
4826 if (pid < 0)
3a5c359a 4827 return -EINVAL;
1da177e4
LT
4828
4829 retval = -ESRCH;
1a551ae7 4830 rcu_read_lock();
1da177e4
LT
4831 p = find_process_by_pid(pid);
4832 if (!p)
4833 goto out_unlock;
4834
4835 retval = security_task_getscheduler(p);
4836 if (retval)
4837 goto out_unlock;
4838
dba091b9 4839 rq = task_rq_lock(p, &flags);
a57beec5
PZ
4840 time_slice = 0;
4841 if (p->sched_class->get_rr_interval)
4842 time_slice = p->sched_class->get_rr_interval(rq, p);
0122ec5b 4843 task_rq_unlock(rq, p, &flags);
a4ec24b4 4844
1a551ae7 4845 rcu_read_unlock();
a4ec24b4 4846 jiffies_to_timespec(time_slice, &t);
1da177e4 4847 retval = copy_to_user(interval, &t, sizeof(t)) ? -EFAULT : 0;
1da177e4 4848 return retval;
3a5c359a 4849
1da177e4 4850out_unlock:
1a551ae7 4851 rcu_read_unlock();
1da177e4
LT
4852 return retval;
4853}
4854
7c731e0a 4855static const char stat_nam[] = TASK_STATE_TO_CHAR_STR;
36c8b586 4856
82a1fcb9 4857void sched_show_task(struct task_struct *p)
1da177e4 4858{
1da177e4 4859 unsigned long free = 0;
4e79752c 4860 int ppid;
1f8a7633 4861 unsigned long state = p->state;
1da177e4 4862
1f8a7633
TH
4863 if (state)
4864 state = __ffs(state) + 1;
28d0686c 4865 printk(KERN_INFO "%-15.15s %c", p->comm,
2ed6e34f 4866 state < sizeof(stat_nam) - 1 ? stat_nam[state] : '?');
4bd77321 4867#if BITS_PER_LONG == 32
1da177e4 4868 if (state == TASK_RUNNING)
3df0fc5b 4869 printk(KERN_CONT " running ");
1da177e4 4870 else
3df0fc5b 4871 printk(KERN_CONT " %08lx ", thread_saved_pc(p));
1da177e4
LT
4872#else
4873 if (state == TASK_RUNNING)
3df0fc5b 4874 printk(KERN_CONT " running task ");
1da177e4 4875 else
3df0fc5b 4876 printk(KERN_CONT " %016lx ", thread_saved_pc(p));
1da177e4
LT
4877#endif
4878#ifdef CONFIG_DEBUG_STACK_USAGE
7c9f8861 4879 free = stack_not_used(p);
1da177e4 4880#endif
a90e984c 4881 ppid = 0;
4e79752c 4882 rcu_read_lock();
a90e984c
ON
4883 if (pid_alive(p))
4884 ppid = task_pid_nr(rcu_dereference(p->real_parent));
4e79752c 4885 rcu_read_unlock();
3df0fc5b 4886 printk(KERN_CONT "%5lu %5d %6d 0x%08lx\n", free,
4e79752c 4887 task_pid_nr(p), ppid,
aa47b7e0 4888 (unsigned long)task_thread_info(p)->flags);
1da177e4 4889
3d1cb205 4890 print_worker_info(KERN_INFO, p);
5fb5e6de 4891 show_stack(p, NULL);
1da177e4
LT
4892}
4893
e59e2ae2 4894void show_state_filter(unsigned long state_filter)
1da177e4 4895{
36c8b586 4896 struct task_struct *g, *p;
1da177e4 4897
4bd77321 4898#if BITS_PER_LONG == 32
3df0fc5b
PZ
4899 printk(KERN_INFO
4900 " task PC stack pid father\n");
1da177e4 4901#else
3df0fc5b
PZ
4902 printk(KERN_INFO
4903 " task PC stack pid father\n");
1da177e4 4904#endif
510f5acc 4905 rcu_read_lock();
5d07f420 4906 for_each_process_thread(g, p) {
1da177e4
LT
4907 /*
4908 * reset the NMI-timeout, listing all files on a slow
25985edc 4909 * console might take a lot of time:
1da177e4
LT
4910 */
4911 touch_nmi_watchdog();
39bc89fd 4912 if (!state_filter || (p->state & state_filter))
82a1fcb9 4913 sched_show_task(p);
5d07f420 4914 }
1da177e4 4915
04c9167f
JF
4916 touch_all_softlockup_watchdogs();
4917
dd41f596
IM
4918#ifdef CONFIG_SCHED_DEBUG
4919 sysrq_sched_debug_show();
4920#endif
510f5acc 4921 rcu_read_unlock();
e59e2ae2
IM
4922 /*
4923 * Only show locks if all tasks are dumped:
4924 */
93335a21 4925 if (!state_filter)
e59e2ae2 4926 debug_show_all_locks();
1da177e4
LT
4927}
4928
0db0628d 4929void init_idle_bootup_task(struct task_struct *idle)
1df21055 4930{
dd41f596 4931 idle->sched_class = &idle_sched_class;
1df21055
IM
4932}
4933
f340c0d1
IM
4934/**
4935 * init_idle - set up an idle thread for a given CPU
4936 * @idle: task in question
4937 * @cpu: cpu the idle task belongs to
4938 *
4939 * NOTE: this function does not set the idle thread's NEED_RESCHED
4940 * flag, to make booting more robust.
4941 */
0db0628d 4942void init_idle(struct task_struct *idle, int cpu)
1da177e4 4943{
70b97a7f 4944 struct rq *rq = cpu_rq(cpu);
1da177e4
LT
4945 unsigned long flags;
4946
25834c73
PZ
4947 raw_spin_lock_irqsave(&idle->pi_lock, flags);
4948 raw_spin_lock(&rq->lock);
5cbd54ef 4949
5e1576ed 4950 __sched_fork(0, idle);
06b83b5f 4951 idle->state = TASK_RUNNING;
dd41f596
IM
4952 idle->se.exec_start = sched_clock();
4953
de9b8f5d
PZ
4954#ifdef CONFIG_SMP
4955 /*
4956 * Its possible that init_idle() gets called multiple times on a task,
4957 * in that case do_set_cpus_allowed() will not do the right thing.
4958 *
4959 * And since this is boot we can forgo the serialization.
4960 */
4961 set_cpus_allowed_common(idle, cpumask_of(cpu));
4962#endif
6506cf6c
PZ
4963 /*
4964 * We're having a chicken and egg problem, even though we are
4965 * holding rq->lock, the cpu isn't yet set to this cpu so the
4966 * lockdep check in task_group() will fail.
4967 *
4968 * Similar case to sched_fork(). / Alternatively we could
4969 * use task_rq_lock() here and obtain the other rq->lock.
4970 *
4971 * Silence PROVE_RCU
4972 */
4973 rcu_read_lock();
dd41f596 4974 __set_task_cpu(idle, cpu);
6506cf6c 4975 rcu_read_unlock();
1da177e4 4976
1da177e4 4977 rq->curr = rq->idle = idle;
da0c1e65 4978 idle->on_rq = TASK_ON_RQ_QUEUED;
de9b8f5d 4979#ifdef CONFIG_SMP
3ca7a440 4980 idle->on_cpu = 1;
4866cde0 4981#endif
25834c73
PZ
4982 raw_spin_unlock(&rq->lock);
4983 raw_spin_unlock_irqrestore(&idle->pi_lock, flags);
1da177e4
LT
4984
4985 /* Set the preempt count _outside_ the spinlocks! */
01028747 4986 init_idle_preempt_count(idle, cpu);
55cd5340 4987
dd41f596
IM
4988 /*
4989 * The idle tasks have their own, simple scheduling class:
4990 */
4991 idle->sched_class = &idle_sched_class;
868baf07 4992 ftrace_graph_init_idle_task(idle, cpu);
45eacc69 4993 vtime_init_idle(idle, cpu);
de9b8f5d 4994#ifdef CONFIG_SMP
f1c6f1a7
CE
4995 sprintf(idle->comm, "%s/%d", INIT_TASK_COMM, cpu);
4996#endif
19978ca6
IM
4997}
4998
f82f8042
JL
4999int cpuset_cpumask_can_shrink(const struct cpumask *cur,
5000 const struct cpumask *trial)
5001{
5002 int ret = 1, trial_cpus;
5003 struct dl_bw *cur_dl_b;
5004 unsigned long flags;
5005
bb2bc55a
MG
5006 if (!cpumask_weight(cur))
5007 return ret;
5008
75e23e49 5009 rcu_read_lock_sched();
f82f8042
JL
5010 cur_dl_b = dl_bw_of(cpumask_any(cur));
5011 trial_cpus = cpumask_weight(trial);
5012
5013 raw_spin_lock_irqsave(&cur_dl_b->lock, flags);
5014 if (cur_dl_b->bw != -1 &&
5015 cur_dl_b->bw * trial_cpus < cur_dl_b->total_bw)
5016 ret = 0;
5017 raw_spin_unlock_irqrestore(&cur_dl_b->lock, flags);
75e23e49 5018 rcu_read_unlock_sched();
f82f8042
JL
5019
5020 return ret;
5021}
5022
7f51412a
JL
5023int task_can_attach(struct task_struct *p,
5024 const struct cpumask *cs_cpus_allowed)
5025{
5026 int ret = 0;
5027
5028 /*
5029 * Kthreads which disallow setaffinity shouldn't be moved
5030 * to a new cpuset; we don't want to change their cpu
5031 * affinity and isolating such threads by their set of
5032 * allowed nodes is unnecessary. Thus, cpusets are not
5033 * applicable for such threads. This prevents checking for
5034 * success of set_cpus_allowed_ptr() on all attached tasks
5035 * before cpus_allowed may be changed.
5036 */
5037 if (p->flags & PF_NO_SETAFFINITY) {
5038 ret = -EINVAL;
5039 goto out;
5040 }
5041
5042#ifdef CONFIG_SMP
5043 if (dl_task(p) && !cpumask_intersects(task_rq(p)->rd->span,
5044 cs_cpus_allowed)) {
5045 unsigned int dest_cpu = cpumask_any_and(cpu_active_mask,
5046 cs_cpus_allowed);
75e23e49 5047 struct dl_bw *dl_b;
7f51412a
JL
5048 bool overflow;
5049 int cpus;
5050 unsigned long flags;
5051
75e23e49
JL
5052 rcu_read_lock_sched();
5053 dl_b = dl_bw_of(dest_cpu);
7f51412a
JL
5054 raw_spin_lock_irqsave(&dl_b->lock, flags);
5055 cpus = dl_bw_cpus(dest_cpu);
5056 overflow = __dl_overflow(dl_b, cpus, 0, p->dl.dl_bw);
5057 if (overflow)
5058 ret = -EBUSY;
5059 else {
5060 /*
5061 * We reserve space for this task in the destination
5062 * root_domain, as we can't fail after this point.
5063 * We will free resources in the source root_domain
5064 * later on (see set_cpus_allowed_dl()).
5065 */
5066 __dl_add(dl_b, p->dl.dl_bw);
5067 }
5068 raw_spin_unlock_irqrestore(&dl_b->lock, flags);
75e23e49 5069 rcu_read_unlock_sched();
7f51412a
JL
5070
5071 }
5072#endif
5073out:
5074 return ret;
5075}
5076
1da177e4 5077#ifdef CONFIG_SMP
1da177e4 5078
e6628d5b
MG
5079#ifdef CONFIG_NUMA_BALANCING
5080/* Migrate current task p to target_cpu */
5081int migrate_task_to(struct task_struct *p, int target_cpu)
5082{
5083 struct migration_arg arg = { p, target_cpu };
5084 int curr_cpu = task_cpu(p);
5085
5086 if (curr_cpu == target_cpu)
5087 return 0;
5088
5089 if (!cpumask_test_cpu(target_cpu, tsk_cpus_allowed(p)))
5090 return -EINVAL;
5091
5092 /* TODO: This is not properly updating schedstats */
5093
286549dc 5094 trace_sched_move_numa(p, curr_cpu, target_cpu);
e6628d5b
MG
5095 return stop_one_cpu(curr_cpu, migration_cpu_stop, &arg);
5096}
0ec8aa00
PZ
5097
5098/*
5099 * Requeue a task on a given node and accurately track the number of NUMA
5100 * tasks on the runqueues
5101 */
5102void sched_setnuma(struct task_struct *p, int nid)
5103{
5104 struct rq *rq;
5105 unsigned long flags;
da0c1e65 5106 bool queued, running;
0ec8aa00
PZ
5107
5108 rq = task_rq_lock(p, &flags);
da0c1e65 5109 queued = task_on_rq_queued(p);
0ec8aa00
PZ
5110 running = task_current(rq, p);
5111
da0c1e65 5112 if (queued)
0ec8aa00
PZ
5113 dequeue_task(rq, p, 0);
5114 if (running)
f3cd1c4e 5115 put_prev_task(rq, p);
0ec8aa00
PZ
5116
5117 p->numa_preferred_nid = nid;
0ec8aa00
PZ
5118
5119 if (running)
5120 p->sched_class->set_curr_task(rq);
da0c1e65 5121 if (queued)
0ec8aa00
PZ
5122 enqueue_task(rq, p, 0);
5123 task_rq_unlock(rq, p, &flags);
5124}
5cc389bc 5125#endif /* CONFIG_NUMA_BALANCING */
f7b4cddc 5126
1da177e4 5127#ifdef CONFIG_HOTPLUG_CPU
054b9108 5128/*
48c5ccae
PZ
5129 * Ensures that the idle task is using init_mm right before its cpu goes
5130 * offline.
054b9108 5131 */
48c5ccae 5132void idle_task_exit(void)
1da177e4 5133{
48c5ccae 5134 struct mm_struct *mm = current->active_mm;
e76bd8d9 5135
48c5ccae 5136 BUG_ON(cpu_online(smp_processor_id()));
e76bd8d9 5137
a53efe5f 5138 if (mm != &init_mm) {
48c5ccae 5139 switch_mm(mm, &init_mm, current);
a53efe5f
MS
5140 finish_arch_post_lock_switch();
5141 }
48c5ccae 5142 mmdrop(mm);
1da177e4
LT
5143}
5144
5145/*
5d180232
PZ
5146 * Since this CPU is going 'away' for a while, fold any nr_active delta
5147 * we might have. Assumes we're called after migrate_tasks() so that the
5148 * nr_active count is stable.
5149 *
5150 * Also see the comment "Global load-average calculations".
1da177e4 5151 */
5d180232 5152static void calc_load_migrate(struct rq *rq)
1da177e4 5153{
5d180232
PZ
5154 long delta = calc_load_fold_active(rq);
5155 if (delta)
5156 atomic_long_add(delta, &calc_load_tasks);
1da177e4
LT
5157}
5158
3f1d2a31
PZ
5159static void put_prev_task_fake(struct rq *rq, struct task_struct *prev)
5160{
5161}
5162
5163static const struct sched_class fake_sched_class = {
5164 .put_prev_task = put_prev_task_fake,
5165};
5166
5167static struct task_struct fake_task = {
5168 /*
5169 * Avoid pull_{rt,dl}_task()
5170 */
5171 .prio = MAX_PRIO + 1,
5172 .sched_class = &fake_sched_class,
5173};
5174
48f24c4d 5175/*
48c5ccae
PZ
5176 * Migrate all tasks from the rq, sleeping tasks will be migrated by
5177 * try_to_wake_up()->select_task_rq().
5178 *
5179 * Called with rq->lock held even though we'er in stop_machine() and
5180 * there's no concurrency possible, we hold the required locks anyway
5181 * because of lock validation efforts.
1da177e4 5182 */
5e16bbc2 5183static void migrate_tasks(struct rq *dead_rq)
1da177e4 5184{
5e16bbc2 5185 struct rq *rq = dead_rq;
48c5ccae
PZ
5186 struct task_struct *next, *stop = rq->stop;
5187 int dest_cpu;
1da177e4
LT
5188
5189 /*
48c5ccae
PZ
5190 * Fudge the rq selection such that the below task selection loop
5191 * doesn't get stuck on the currently eligible stop task.
5192 *
5193 * We're currently inside stop_machine() and the rq is either stuck
5194 * in the stop_machine_cpu_stop() loop, or we're executing this code,
5195 * either way we should never end up calling schedule() until we're
5196 * done here.
1da177e4 5197 */
48c5ccae 5198 rq->stop = NULL;
48f24c4d 5199
77bd3970
FW
5200 /*
5201 * put_prev_task() and pick_next_task() sched
5202 * class method both need to have an up-to-date
5203 * value of rq->clock[_task]
5204 */
5205 update_rq_clock(rq);
5206
5e16bbc2 5207 for (;;) {
48c5ccae
PZ
5208 /*
5209 * There's this thread running, bail when that's the only
5210 * remaining thread.
5211 */
5212 if (rq->nr_running == 1)
dd41f596 5213 break;
48c5ccae 5214
cbce1a68 5215 /*
5473e0cc 5216 * pick_next_task assumes pinned rq->lock.
cbce1a68
PZ
5217 */
5218 lockdep_pin_lock(&rq->lock);
3f1d2a31 5219 next = pick_next_task(rq, &fake_task);
48c5ccae 5220 BUG_ON(!next);
79c53799 5221 next->sched_class->put_prev_task(rq, next);
e692ab53 5222
5473e0cc
WL
5223 /*
5224 * Rules for changing task_struct::cpus_allowed are holding
5225 * both pi_lock and rq->lock, such that holding either
5226 * stabilizes the mask.
5227 *
5228 * Drop rq->lock is not quite as disastrous as it usually is
5229 * because !cpu_active at this point, which means load-balance
5230 * will not interfere. Also, stop-machine.
5231 */
5232 lockdep_unpin_lock(&rq->lock);
5233 raw_spin_unlock(&rq->lock);
5234 raw_spin_lock(&next->pi_lock);
5235 raw_spin_lock(&rq->lock);
5236
5237 /*
5238 * Since we're inside stop-machine, _nothing_ should have
5239 * changed the task, WARN if weird stuff happened, because in
5240 * that case the above rq->lock drop is a fail too.
5241 */
5242 if (WARN_ON(task_rq(next) != rq || !task_on_rq_queued(next))) {
5243 raw_spin_unlock(&next->pi_lock);
5244 continue;
5245 }
5246
48c5ccae 5247 /* Find suitable destination for @next, with force if needed. */
5e16bbc2 5248 dest_cpu = select_fallback_rq(dead_rq->cpu, next);
48c5ccae 5249
5e16bbc2
PZ
5250 rq = __migrate_task(rq, next, dest_cpu);
5251 if (rq != dead_rq) {
5252 raw_spin_unlock(&rq->lock);
5253 rq = dead_rq;
5254 raw_spin_lock(&rq->lock);
5255 }
5473e0cc 5256 raw_spin_unlock(&next->pi_lock);
1da177e4 5257 }
dce48a84 5258
48c5ccae 5259 rq->stop = stop;
dce48a84 5260}
1da177e4
LT
5261#endif /* CONFIG_HOTPLUG_CPU */
5262
e692ab53
NP
5263#if defined(CONFIG_SCHED_DEBUG) && defined(CONFIG_SYSCTL)
5264
5265static struct ctl_table sd_ctl_dir[] = {
e0361851
AD
5266 {
5267 .procname = "sched_domain",
c57baf1e 5268 .mode = 0555,
e0361851 5269 },
56992309 5270 {}
e692ab53
NP
5271};
5272
5273static struct ctl_table sd_ctl_root[] = {
e0361851
AD
5274 {
5275 .procname = "kernel",
c57baf1e 5276 .mode = 0555,
e0361851
AD
5277 .child = sd_ctl_dir,
5278 },
56992309 5279 {}
e692ab53
NP
5280};
5281
5282static struct ctl_table *sd_alloc_ctl_entry(int n)
5283{
5284 struct ctl_table *entry =
5cf9f062 5285 kcalloc(n, sizeof(struct ctl_table), GFP_KERNEL);
e692ab53 5286
e692ab53
NP
5287 return entry;
5288}
5289
6382bc90
MM
5290static void sd_free_ctl_entry(struct ctl_table **tablep)
5291{
cd790076 5292 struct ctl_table *entry;
6382bc90 5293
cd790076
MM
5294 /*
5295 * In the intermediate directories, both the child directory and
5296 * procname are dynamically allocated and could fail but the mode
41a2d6cf 5297 * will always be set. In the lowest directory the names are
cd790076
MM
5298 * static strings and all have proc handlers.
5299 */
5300 for (entry = *tablep; entry->mode; entry++) {
6382bc90
MM
5301 if (entry->child)
5302 sd_free_ctl_entry(&entry->child);
cd790076
MM
5303 if (entry->proc_handler == NULL)
5304 kfree(entry->procname);
5305 }
6382bc90
MM
5306
5307 kfree(*tablep);
5308 *tablep = NULL;
5309}
5310
201c373e 5311static int min_load_idx = 0;
fd9b86d3 5312static int max_load_idx = CPU_LOAD_IDX_MAX-1;
201c373e 5313
e692ab53 5314static void
e0361851 5315set_table_entry(struct ctl_table *entry,
e692ab53 5316 const char *procname, void *data, int maxlen,
201c373e
NK
5317 umode_t mode, proc_handler *proc_handler,
5318 bool load_idx)
e692ab53 5319{
e692ab53
NP
5320 entry->procname = procname;
5321 entry->data = data;
5322 entry->maxlen = maxlen;
5323 entry->mode = mode;
5324 entry->proc_handler = proc_handler;
201c373e
NK
5325
5326 if (load_idx) {
5327 entry->extra1 = &min_load_idx;
5328 entry->extra2 = &max_load_idx;
5329 }
e692ab53
NP
5330}
5331
5332static struct ctl_table *
5333sd_alloc_ctl_domain_table(struct sched_domain *sd)
5334{
37e6bae8 5335 struct ctl_table *table = sd_alloc_ctl_entry(14);
e692ab53 5336
ad1cdc1d
MM
5337 if (table == NULL)
5338 return NULL;
5339
e0361851 5340 set_table_entry(&table[0], "min_interval", &sd->min_interval,
201c373e 5341 sizeof(long), 0644, proc_doulongvec_minmax, false);
e0361851 5342 set_table_entry(&table[1], "max_interval", &sd->max_interval,
201c373e 5343 sizeof(long), 0644, proc_doulongvec_minmax, false);
e0361851 5344 set_table_entry(&table[2], "busy_idx", &sd->busy_idx,
201c373e 5345 sizeof(int), 0644, proc_dointvec_minmax, true);
e0361851 5346 set_table_entry(&table[3], "idle_idx", &sd->idle_idx,
201c373e 5347 sizeof(int), 0644, proc_dointvec_minmax, true);
e0361851 5348 set_table_entry(&table[4], "newidle_idx", &sd->newidle_idx,
201c373e 5349 sizeof(int), 0644, proc_dointvec_minmax, true);
e0361851 5350 set_table_entry(&table[5], "wake_idx", &sd->wake_idx,
201c373e 5351 sizeof(int), 0644, proc_dointvec_minmax, true);
e0361851 5352 set_table_entry(&table[6], "forkexec_idx", &sd->forkexec_idx,
201c373e 5353 sizeof(int), 0644, proc_dointvec_minmax, true);
e0361851 5354 set_table_entry(&table[7], "busy_factor", &sd->busy_factor,
201c373e 5355 sizeof(int), 0644, proc_dointvec_minmax, false);
e0361851 5356 set_table_entry(&table[8], "imbalance_pct", &sd->imbalance_pct,
201c373e 5357 sizeof(int), 0644, proc_dointvec_minmax, false);
ace8b3d6 5358 set_table_entry(&table[9], "cache_nice_tries",
e692ab53 5359 &sd->cache_nice_tries,
201c373e 5360 sizeof(int), 0644, proc_dointvec_minmax, false);
ace8b3d6 5361 set_table_entry(&table[10], "flags", &sd->flags,
201c373e 5362 sizeof(int), 0644, proc_dointvec_minmax, false);
37e6bae8
AS
5363 set_table_entry(&table[11], "max_newidle_lb_cost",
5364 &sd->max_newidle_lb_cost,
5365 sizeof(long), 0644, proc_doulongvec_minmax, false);
5366 set_table_entry(&table[12], "name", sd->name,
201c373e 5367 CORENAME_MAX_SIZE, 0444, proc_dostring, false);
37e6bae8 5368 /* &table[13] is terminator */
e692ab53
NP
5369
5370 return table;
5371}
5372
be7002e6 5373static struct ctl_table *sd_alloc_ctl_cpu_table(int cpu)
e692ab53
NP
5374{
5375 struct ctl_table *entry, *table;
5376 struct sched_domain *sd;
5377 int domain_num = 0, i;
5378 char buf[32];
5379
5380 for_each_domain(cpu, sd)
5381 domain_num++;
5382 entry = table = sd_alloc_ctl_entry(domain_num + 1);
ad1cdc1d
MM
5383 if (table == NULL)
5384 return NULL;
e692ab53
NP
5385
5386 i = 0;
5387 for_each_domain(cpu, sd) {
5388 snprintf(buf, 32, "domain%d", i);
e692ab53 5389 entry->procname = kstrdup(buf, GFP_KERNEL);
c57baf1e 5390 entry->mode = 0555;
e692ab53
NP
5391 entry->child = sd_alloc_ctl_domain_table(sd);
5392 entry++;
5393 i++;
5394 }
5395 return table;
5396}
5397
5398static struct ctl_table_header *sd_sysctl_header;
6382bc90 5399static void register_sched_domain_sysctl(void)
e692ab53 5400{
6ad4c188 5401 int i, cpu_num = num_possible_cpus();
e692ab53
NP
5402 struct ctl_table *entry = sd_alloc_ctl_entry(cpu_num + 1);
5403 char buf[32];
5404
7378547f
MM
5405 WARN_ON(sd_ctl_dir[0].child);
5406 sd_ctl_dir[0].child = entry;
5407
ad1cdc1d
MM
5408 if (entry == NULL)
5409 return;
5410
6ad4c188 5411 for_each_possible_cpu(i) {
e692ab53 5412 snprintf(buf, 32, "cpu%d", i);
e692ab53 5413 entry->procname = kstrdup(buf, GFP_KERNEL);
c57baf1e 5414 entry->mode = 0555;
e692ab53 5415 entry->child = sd_alloc_ctl_cpu_table(i);
97b6ea7b 5416 entry++;
e692ab53 5417 }
7378547f
MM
5418
5419 WARN_ON(sd_sysctl_header);
e692ab53
NP
5420 sd_sysctl_header = register_sysctl_table(sd_ctl_root);
5421}
6382bc90 5422
7378547f 5423/* may be called multiple times per register */
6382bc90
MM
5424static void unregister_sched_domain_sysctl(void)
5425{
781b0203 5426 unregister_sysctl_table(sd_sysctl_header);
6382bc90 5427 sd_sysctl_header = NULL;
7378547f
MM
5428 if (sd_ctl_dir[0].child)
5429 sd_free_ctl_entry(&sd_ctl_dir[0].child);
6382bc90 5430}
e692ab53 5431#else
6382bc90
MM
5432static void register_sched_domain_sysctl(void)
5433{
5434}
5435static void unregister_sched_domain_sysctl(void)
e692ab53
NP
5436{
5437}
5cc389bc 5438#endif /* CONFIG_SCHED_DEBUG && CONFIG_SYSCTL */
e692ab53 5439
1f11eb6a
GH
5440static void set_rq_online(struct rq *rq)
5441{
5442 if (!rq->online) {
5443 const struct sched_class *class;
5444
c6c4927b 5445 cpumask_set_cpu(rq->cpu, rq->rd->online);
1f11eb6a
GH
5446 rq->online = 1;
5447
5448 for_each_class(class) {
5449 if (class->rq_online)
5450 class->rq_online(rq);
5451 }
5452 }
5453}
5454
5455static void set_rq_offline(struct rq *rq)
5456{
5457 if (rq->online) {
5458 const struct sched_class *class;
5459
5460 for_each_class(class) {
5461 if (class->rq_offline)
5462 class->rq_offline(rq);
5463 }
5464
c6c4927b 5465 cpumask_clear_cpu(rq->cpu, rq->rd->online);
1f11eb6a
GH
5466 rq->online = 0;
5467 }
5468}
5469
1da177e4
LT
5470/*
5471 * migration_call - callback that gets triggered when a CPU is added.
5472 * Here we can start up the necessary migration thread for the new CPU.
5473 */
0db0628d 5474static int
48f24c4d 5475migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
1da177e4 5476{
48f24c4d 5477 int cpu = (long)hcpu;
1da177e4 5478 unsigned long flags;
969c7921 5479 struct rq *rq = cpu_rq(cpu);
1da177e4 5480
48c5ccae 5481 switch (action & ~CPU_TASKS_FROZEN) {
5be9361c 5482
1da177e4 5483 case CPU_UP_PREPARE:
a468d389 5484 rq->calc_load_update = calc_load_update;
1da177e4 5485 break;
48f24c4d 5486
1da177e4 5487 case CPU_ONLINE:
1f94ef59 5488 /* Update our root-domain */
05fa785c 5489 raw_spin_lock_irqsave(&rq->lock, flags);
1f94ef59 5490 if (rq->rd) {
c6c4927b 5491 BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
1f11eb6a
GH
5492
5493 set_rq_online(rq);
1f94ef59 5494 }
05fa785c 5495 raw_spin_unlock_irqrestore(&rq->lock, flags);
1da177e4 5496 break;
48f24c4d 5497
1da177e4 5498#ifdef CONFIG_HOTPLUG_CPU
08f503b0 5499 case CPU_DYING:
317f3941 5500 sched_ttwu_pending();
57d885fe 5501 /* Update our root-domain */
05fa785c 5502 raw_spin_lock_irqsave(&rq->lock, flags);
57d885fe 5503 if (rq->rd) {
c6c4927b 5504 BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
1f11eb6a 5505 set_rq_offline(rq);
57d885fe 5506 }
5e16bbc2 5507 migrate_tasks(rq);
48c5ccae 5508 BUG_ON(rq->nr_running != 1); /* the migration thread */
05fa785c 5509 raw_spin_unlock_irqrestore(&rq->lock, flags);
5d180232 5510 break;
48c5ccae 5511
5d180232 5512 case CPU_DEAD:
f319da0c 5513 calc_load_migrate(rq);
57d885fe 5514 break;
1da177e4
LT
5515#endif
5516 }
49c022e6
PZ
5517
5518 update_max_interval();
5519
1da177e4
LT
5520 return NOTIFY_OK;
5521}
5522
f38b0820
PM
5523/*
5524 * Register at high priority so that task migration (migrate_all_tasks)
5525 * happens before everything else. This has to be lower priority than
cdd6c482 5526 * the notifier in the perf_event subsystem, though.
1da177e4 5527 */
0db0628d 5528static struct notifier_block migration_notifier = {
1da177e4 5529 .notifier_call = migration_call,
50a323b7 5530 .priority = CPU_PRI_MIGRATION,
1da177e4
LT
5531};
5532
6a82b60d 5533static void set_cpu_rq_start_time(void)
a803f026
CM
5534{
5535 int cpu = smp_processor_id();
5536 struct rq *rq = cpu_rq(cpu);
5537 rq->age_stamp = sched_clock_cpu(cpu);
5538}
5539
0db0628d 5540static int sched_cpu_active(struct notifier_block *nfb,
3a101d05
TH
5541 unsigned long action, void *hcpu)
5542{
5543 switch (action & ~CPU_TASKS_FROZEN) {
a803f026
CM
5544 case CPU_STARTING:
5545 set_cpu_rq_start_time();
5546 return NOTIFY_OK;
dd9d3843
JS
5547 case CPU_ONLINE:
5548 /*
5549 * At this point a starting CPU has marked itself as online via
5550 * set_cpu_online(). But it might not yet have marked itself
5551 * as active, which is essential from here on.
5552 *
5553 * Thus, fall-through and help the starting CPU along.
5554 */
3a101d05
TH
5555 case CPU_DOWN_FAILED:
5556 set_cpu_active((long)hcpu, true);
5557 return NOTIFY_OK;
5558 default:
5559 return NOTIFY_DONE;
5560 }
5561}
5562
0db0628d 5563static int sched_cpu_inactive(struct notifier_block *nfb,
3a101d05
TH
5564 unsigned long action, void *hcpu)
5565{
5566 switch (action & ~CPU_TASKS_FROZEN) {
5567 case CPU_DOWN_PREPARE:
3c18d447 5568 set_cpu_active((long)hcpu, false);
3a101d05 5569 return NOTIFY_OK;
3c18d447
JL
5570 default:
5571 return NOTIFY_DONE;
3a101d05
TH
5572 }
5573}
5574
7babe8db 5575static int __init migration_init(void)
1da177e4
LT
5576{
5577 void *cpu = (void *)(long)smp_processor_id();
07dccf33 5578 int err;
48f24c4d 5579
3a101d05 5580 /* Initialize migration for the boot CPU */
07dccf33
AM
5581 err = migration_call(&migration_notifier, CPU_UP_PREPARE, cpu);
5582 BUG_ON(err == NOTIFY_BAD);
1da177e4
LT
5583 migration_call(&migration_notifier, CPU_ONLINE, cpu);
5584 register_cpu_notifier(&migration_notifier);
7babe8db 5585
3a101d05
TH
5586 /* Register cpu active notifiers */
5587 cpu_notifier(sched_cpu_active, CPU_PRI_SCHED_ACTIVE);
5588 cpu_notifier(sched_cpu_inactive, CPU_PRI_SCHED_INACTIVE);
5589
a004cd42 5590 return 0;
1da177e4 5591}
7babe8db 5592early_initcall(migration_init);
476f3534 5593
4cb98839
PZ
5594static cpumask_var_t sched_domains_tmpmask; /* sched_domains_mutex */
5595
3e9830dc 5596#ifdef CONFIG_SCHED_DEBUG
4dcf6aff 5597
d039ac60 5598static __read_mostly int sched_debug_enabled;
f6630114 5599
d039ac60 5600static int __init sched_debug_setup(char *str)
f6630114 5601{
d039ac60 5602 sched_debug_enabled = 1;
f6630114
MT
5603
5604 return 0;
5605}
d039ac60
PZ
5606early_param("sched_debug", sched_debug_setup);
5607
5608static inline bool sched_debug(void)
5609{
5610 return sched_debug_enabled;
5611}
f6630114 5612
7c16ec58 5613static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level,
96f874e2 5614 struct cpumask *groupmask)
1da177e4 5615{
4dcf6aff 5616 struct sched_group *group = sd->groups;
1da177e4 5617
96f874e2 5618 cpumask_clear(groupmask);
4dcf6aff
IM
5619
5620 printk(KERN_DEBUG "%*s domain %d: ", level, "", level);
5621
5622 if (!(sd->flags & SD_LOAD_BALANCE)) {
3df0fc5b 5623 printk("does not load-balance\n");
4dcf6aff 5624 if (sd->parent)
3df0fc5b
PZ
5625 printk(KERN_ERR "ERROR: !SD_LOAD_BALANCE domain"
5626 " has parent");
4dcf6aff 5627 return -1;
41c7ce9a
NP
5628 }
5629
333470ee
TH
5630 printk(KERN_CONT "span %*pbl level %s\n",
5631 cpumask_pr_args(sched_domain_span(sd)), sd->name);
4dcf6aff 5632
758b2cdc 5633 if (!cpumask_test_cpu(cpu, sched_domain_span(sd))) {
3df0fc5b
PZ
5634 printk(KERN_ERR "ERROR: domain->span does not contain "
5635 "CPU%d\n", cpu);
4dcf6aff 5636 }
758b2cdc 5637 if (!cpumask_test_cpu(cpu, sched_group_cpus(group))) {
3df0fc5b
PZ
5638 printk(KERN_ERR "ERROR: domain->groups does not contain"
5639 " CPU%d\n", cpu);
4dcf6aff 5640 }
1da177e4 5641
4dcf6aff 5642 printk(KERN_DEBUG "%*s groups:", level + 1, "");
1da177e4 5643 do {
4dcf6aff 5644 if (!group) {
3df0fc5b
PZ
5645 printk("\n");
5646 printk(KERN_ERR "ERROR: group is NULL\n");
1da177e4
LT
5647 break;
5648 }
5649
758b2cdc 5650 if (!cpumask_weight(sched_group_cpus(group))) {
3df0fc5b
PZ
5651 printk(KERN_CONT "\n");
5652 printk(KERN_ERR "ERROR: empty group\n");
4dcf6aff
IM
5653 break;
5654 }
1da177e4 5655
cb83b629
PZ
5656 if (!(sd->flags & SD_OVERLAP) &&
5657 cpumask_intersects(groupmask, sched_group_cpus(group))) {
3df0fc5b
PZ
5658 printk(KERN_CONT "\n");
5659 printk(KERN_ERR "ERROR: repeated CPUs\n");
4dcf6aff
IM
5660 break;
5661 }
1da177e4 5662
758b2cdc 5663 cpumask_or(groupmask, groupmask, sched_group_cpus(group));
1da177e4 5664
333470ee
TH
5665 printk(KERN_CONT " %*pbl",
5666 cpumask_pr_args(sched_group_cpus(group)));
ca8ce3d0 5667 if (group->sgc->capacity != SCHED_CAPACITY_SCALE) {
63b2ca30
NP
5668 printk(KERN_CONT " (cpu_capacity = %d)",
5669 group->sgc->capacity);
381512cf 5670 }
1da177e4 5671
4dcf6aff
IM
5672 group = group->next;
5673 } while (group != sd->groups);
3df0fc5b 5674 printk(KERN_CONT "\n");
1da177e4 5675
758b2cdc 5676 if (!cpumask_equal(sched_domain_span(sd), groupmask))
3df0fc5b 5677 printk(KERN_ERR "ERROR: groups don't span domain->span\n");
1da177e4 5678
758b2cdc
RR
5679 if (sd->parent &&
5680 !cpumask_subset(groupmask, sched_domain_span(sd->parent)))
3df0fc5b
PZ
5681 printk(KERN_ERR "ERROR: parent span is not a superset "
5682 "of domain->span\n");
4dcf6aff
IM
5683 return 0;
5684}
1da177e4 5685
4dcf6aff
IM
5686static void sched_domain_debug(struct sched_domain *sd, int cpu)
5687{
5688 int level = 0;
1da177e4 5689
d039ac60 5690 if (!sched_debug_enabled)
f6630114
MT
5691 return;
5692
4dcf6aff
IM
5693 if (!sd) {
5694 printk(KERN_DEBUG "CPU%d attaching NULL sched-domain.\n", cpu);
5695 return;
5696 }
1da177e4 5697
4dcf6aff
IM
5698 printk(KERN_DEBUG "CPU%d attaching sched-domain:\n", cpu);
5699
5700 for (;;) {
4cb98839 5701 if (sched_domain_debug_one(sd, cpu, level, sched_domains_tmpmask))
4dcf6aff 5702 break;
1da177e4
LT
5703 level++;
5704 sd = sd->parent;
33859f7f 5705 if (!sd)
4dcf6aff
IM
5706 break;
5707 }
1da177e4 5708}
6d6bc0ad 5709#else /* !CONFIG_SCHED_DEBUG */
48f24c4d 5710# define sched_domain_debug(sd, cpu) do { } while (0)
d039ac60
PZ
5711static inline bool sched_debug(void)
5712{
5713 return false;
5714}
6d6bc0ad 5715#endif /* CONFIG_SCHED_DEBUG */
1da177e4 5716
1a20ff27 5717static int sd_degenerate(struct sched_domain *sd)
245af2c7 5718{
758b2cdc 5719 if (cpumask_weight(sched_domain_span(sd)) == 1)
245af2c7
SS
5720 return 1;
5721
5722 /* Following flags need at least 2 groups */
5723 if (sd->flags & (SD_LOAD_BALANCE |
5724 SD_BALANCE_NEWIDLE |
5725 SD_BALANCE_FORK |
89c4710e 5726 SD_BALANCE_EXEC |
5d4dfddd 5727 SD_SHARE_CPUCAPACITY |
d77b3ed5
VG
5728 SD_SHARE_PKG_RESOURCES |
5729 SD_SHARE_POWERDOMAIN)) {
245af2c7
SS
5730 if (sd->groups != sd->groups->next)
5731 return 0;
5732 }
5733
5734 /* Following flags don't use groups */
c88d5910 5735 if (sd->flags & (SD_WAKE_AFFINE))
245af2c7
SS
5736 return 0;
5737
5738 return 1;
5739}
5740
48f24c4d
IM
5741static int
5742sd_parent_degenerate(struct sched_domain *sd, struct sched_domain *parent)
245af2c7
SS
5743{
5744 unsigned long cflags = sd->flags, pflags = parent->flags;
5745
5746 if (sd_degenerate(parent))
5747 return 1;
5748
758b2cdc 5749 if (!cpumask_equal(sched_domain_span(sd), sched_domain_span(parent)))
245af2c7
SS
5750 return 0;
5751
245af2c7
SS
5752 /* Flags needing groups don't count if only 1 group in parent */
5753 if (parent->groups == parent->groups->next) {
5754 pflags &= ~(SD_LOAD_BALANCE |
5755 SD_BALANCE_NEWIDLE |
5756 SD_BALANCE_FORK |
89c4710e 5757 SD_BALANCE_EXEC |
5d4dfddd 5758 SD_SHARE_CPUCAPACITY |
10866e62 5759 SD_SHARE_PKG_RESOURCES |
d77b3ed5
VG
5760 SD_PREFER_SIBLING |
5761 SD_SHARE_POWERDOMAIN);
5436499e
KC
5762 if (nr_node_ids == 1)
5763 pflags &= ~SD_SERIALIZE;
245af2c7
SS
5764 }
5765 if (~cflags & pflags)
5766 return 0;
5767
5768 return 1;
5769}
5770
dce840a0 5771static void free_rootdomain(struct rcu_head *rcu)
c6c4927b 5772{
dce840a0 5773 struct root_domain *rd = container_of(rcu, struct root_domain, rcu);
047106ad 5774
68e74568 5775 cpupri_cleanup(&rd->cpupri);
6bfd6d72 5776 cpudl_cleanup(&rd->cpudl);
1baca4ce 5777 free_cpumask_var(rd->dlo_mask);
c6c4927b
RR
5778 free_cpumask_var(rd->rto_mask);
5779 free_cpumask_var(rd->online);
5780 free_cpumask_var(rd->span);
5781 kfree(rd);
5782}
5783
57d885fe
GH
5784static void rq_attach_root(struct rq *rq, struct root_domain *rd)
5785{
a0490fa3 5786 struct root_domain *old_rd = NULL;
57d885fe 5787 unsigned long flags;
57d885fe 5788
05fa785c 5789 raw_spin_lock_irqsave(&rq->lock, flags);
57d885fe
GH
5790
5791 if (rq->rd) {
a0490fa3 5792 old_rd = rq->rd;
57d885fe 5793
c6c4927b 5794 if (cpumask_test_cpu(rq->cpu, old_rd->online))
1f11eb6a 5795 set_rq_offline(rq);
57d885fe 5796
c6c4927b 5797 cpumask_clear_cpu(rq->cpu, old_rd->span);
dc938520 5798
a0490fa3 5799 /*
0515973f 5800 * If we dont want to free the old_rd yet then
a0490fa3
IM
5801 * set old_rd to NULL to skip the freeing later
5802 * in this function:
5803 */
5804 if (!atomic_dec_and_test(&old_rd->refcount))
5805 old_rd = NULL;
57d885fe
GH
5806 }
5807
5808 atomic_inc(&rd->refcount);
5809 rq->rd = rd;
5810
c6c4927b 5811 cpumask_set_cpu(rq->cpu, rd->span);
00aec93d 5812 if (cpumask_test_cpu(rq->cpu, cpu_active_mask))
1f11eb6a 5813 set_rq_online(rq);
57d885fe 5814
05fa785c 5815 raw_spin_unlock_irqrestore(&rq->lock, flags);
a0490fa3
IM
5816
5817 if (old_rd)
dce840a0 5818 call_rcu_sched(&old_rd->rcu, free_rootdomain);
57d885fe
GH
5819}
5820
68c38fc3 5821static int init_rootdomain(struct root_domain *rd)
57d885fe
GH
5822{
5823 memset(rd, 0, sizeof(*rd));
5824
68c38fc3 5825 if (!alloc_cpumask_var(&rd->span, GFP_KERNEL))
0c910d28 5826 goto out;
68c38fc3 5827 if (!alloc_cpumask_var(&rd->online, GFP_KERNEL))
c6c4927b 5828 goto free_span;
1baca4ce 5829 if (!alloc_cpumask_var(&rd->dlo_mask, GFP_KERNEL))
c6c4927b 5830 goto free_online;
1baca4ce
JL
5831 if (!alloc_cpumask_var(&rd->rto_mask, GFP_KERNEL))
5832 goto free_dlo_mask;
6e0534f2 5833
332ac17e 5834 init_dl_bw(&rd->dl_bw);
6bfd6d72
JL
5835 if (cpudl_init(&rd->cpudl) != 0)
5836 goto free_dlo_mask;
332ac17e 5837
68c38fc3 5838 if (cpupri_init(&rd->cpupri) != 0)
68e74568 5839 goto free_rto_mask;
c6c4927b 5840 return 0;
6e0534f2 5841
68e74568
RR
5842free_rto_mask:
5843 free_cpumask_var(rd->rto_mask);
1baca4ce
JL
5844free_dlo_mask:
5845 free_cpumask_var(rd->dlo_mask);
c6c4927b
RR
5846free_online:
5847 free_cpumask_var(rd->online);
5848free_span:
5849 free_cpumask_var(rd->span);
0c910d28 5850out:
c6c4927b 5851 return -ENOMEM;
57d885fe
GH
5852}
5853
029632fb
PZ
5854/*
5855 * By default the system creates a single root-domain with all cpus as
5856 * members (mimicking the global state we have today).
5857 */
5858struct root_domain def_root_domain;
5859
57d885fe
GH
5860static void init_defrootdomain(void)
5861{
68c38fc3 5862 init_rootdomain(&def_root_domain);
c6c4927b 5863
57d885fe
GH
5864 atomic_set(&def_root_domain.refcount, 1);
5865}
5866
dc938520 5867static struct root_domain *alloc_rootdomain(void)
57d885fe
GH
5868{
5869 struct root_domain *rd;
5870
5871 rd = kmalloc(sizeof(*rd), GFP_KERNEL);
5872 if (!rd)
5873 return NULL;
5874
68c38fc3 5875 if (init_rootdomain(rd) != 0) {
c6c4927b
RR
5876 kfree(rd);
5877 return NULL;
5878 }
57d885fe
GH
5879
5880 return rd;
5881}
5882
63b2ca30 5883static void free_sched_groups(struct sched_group *sg, int free_sgc)
e3589f6c
PZ
5884{
5885 struct sched_group *tmp, *first;
5886
5887 if (!sg)
5888 return;
5889
5890 first = sg;
5891 do {
5892 tmp = sg->next;
5893
63b2ca30
NP
5894 if (free_sgc && atomic_dec_and_test(&sg->sgc->ref))
5895 kfree(sg->sgc);
e3589f6c
PZ
5896
5897 kfree(sg);
5898 sg = tmp;
5899 } while (sg != first);
5900}
5901
dce840a0
PZ
5902static void free_sched_domain(struct rcu_head *rcu)
5903{
5904 struct sched_domain *sd = container_of(rcu, struct sched_domain, rcu);
e3589f6c
PZ
5905
5906 /*
5907 * If its an overlapping domain it has private groups, iterate and
5908 * nuke them all.
5909 */
5910 if (sd->flags & SD_OVERLAP) {
5911 free_sched_groups(sd->groups, 1);
5912 } else if (atomic_dec_and_test(&sd->groups->ref)) {
63b2ca30 5913 kfree(sd->groups->sgc);
dce840a0 5914 kfree(sd->groups);
9c3f75cb 5915 }
dce840a0
PZ
5916 kfree(sd);
5917}
5918
5919static void destroy_sched_domain(struct sched_domain *sd, int cpu)
5920{
5921 call_rcu(&sd->rcu, free_sched_domain);
5922}
5923
5924static void destroy_sched_domains(struct sched_domain *sd, int cpu)
5925{
5926 for (; sd; sd = sd->parent)
5927 destroy_sched_domain(sd, cpu);
5928}
5929
518cd623
PZ
5930/*
5931 * Keep a special pointer to the highest sched_domain that has
5932 * SD_SHARE_PKG_RESOURCE set (Last Level Cache Domain) for this
5933 * allows us to avoid some pointer chasing select_idle_sibling().
5934 *
5935 * Also keep a unique ID per domain (we use the first cpu number in
5936 * the cpumask of the domain), this allows us to quickly tell if
39be3501 5937 * two cpus are in the same cache domain, see cpus_share_cache().
518cd623
PZ
5938 */
5939DEFINE_PER_CPU(struct sched_domain *, sd_llc);
7d9ffa89 5940DEFINE_PER_CPU(int, sd_llc_size);
518cd623 5941DEFINE_PER_CPU(int, sd_llc_id);
fb13c7ee 5942DEFINE_PER_CPU(struct sched_domain *, sd_numa);
37dc6b50
PM
5943DEFINE_PER_CPU(struct sched_domain *, sd_busy);
5944DEFINE_PER_CPU(struct sched_domain *, sd_asym);
518cd623
PZ
5945
5946static void update_top_cache_domain(int cpu)
5947{
5948 struct sched_domain *sd;
5d4cf996 5949 struct sched_domain *busy_sd = NULL;
518cd623 5950 int id = cpu;
7d9ffa89 5951 int size = 1;
518cd623
PZ
5952
5953 sd = highest_flag_domain(cpu, SD_SHARE_PKG_RESOURCES);
7d9ffa89 5954 if (sd) {
518cd623 5955 id = cpumask_first(sched_domain_span(sd));
7d9ffa89 5956 size = cpumask_weight(sched_domain_span(sd));
5d4cf996 5957 busy_sd = sd->parent; /* sd_busy */
7d9ffa89 5958 }
5d4cf996 5959 rcu_assign_pointer(per_cpu(sd_busy, cpu), busy_sd);
518cd623
PZ
5960
5961 rcu_assign_pointer(per_cpu(sd_llc, cpu), sd);
7d9ffa89 5962 per_cpu(sd_llc_size, cpu) = size;
518cd623 5963 per_cpu(sd_llc_id, cpu) = id;
fb13c7ee
MG
5964
5965 sd = lowest_flag_domain(cpu, SD_NUMA);
5966 rcu_assign_pointer(per_cpu(sd_numa, cpu), sd);
37dc6b50
PM
5967
5968 sd = highest_flag_domain(cpu, SD_ASYM_PACKING);
5969 rcu_assign_pointer(per_cpu(sd_asym, cpu), sd);
518cd623
PZ
5970}
5971
1da177e4 5972/*
0eab9146 5973 * Attach the domain 'sd' to 'cpu' as its base domain. Callers must
1da177e4
LT
5974 * hold the hotplug lock.
5975 */
0eab9146
IM
5976static void
5977cpu_attach_domain(struct sched_domain *sd, struct root_domain *rd, int cpu)
1da177e4 5978{
70b97a7f 5979 struct rq *rq = cpu_rq(cpu);
245af2c7
SS
5980 struct sched_domain *tmp;
5981
5982 /* Remove the sched domains which do not contribute to scheduling. */
f29c9b1c 5983 for (tmp = sd; tmp; ) {
245af2c7
SS
5984 struct sched_domain *parent = tmp->parent;
5985 if (!parent)
5986 break;
f29c9b1c 5987
1a848870 5988 if (sd_parent_degenerate(tmp, parent)) {
245af2c7 5989 tmp->parent = parent->parent;
1a848870
SS
5990 if (parent->parent)
5991 parent->parent->child = tmp;
10866e62
PZ
5992 /*
5993 * Transfer SD_PREFER_SIBLING down in case of a
5994 * degenerate parent; the spans match for this
5995 * so the property transfers.
5996 */
5997 if (parent->flags & SD_PREFER_SIBLING)
5998 tmp->flags |= SD_PREFER_SIBLING;
dce840a0 5999 destroy_sched_domain(parent, cpu);
f29c9b1c
LZ
6000 } else
6001 tmp = tmp->parent;
245af2c7
SS
6002 }
6003
1a848870 6004 if (sd && sd_degenerate(sd)) {
dce840a0 6005 tmp = sd;
245af2c7 6006 sd = sd->parent;
dce840a0 6007 destroy_sched_domain(tmp, cpu);
1a848870
SS
6008 if (sd)
6009 sd->child = NULL;
6010 }
1da177e4 6011
4cb98839 6012 sched_domain_debug(sd, cpu);
1da177e4 6013
57d885fe 6014 rq_attach_root(rq, rd);
dce840a0 6015 tmp = rq->sd;
674311d5 6016 rcu_assign_pointer(rq->sd, sd);
dce840a0 6017 destroy_sched_domains(tmp, cpu);
518cd623
PZ
6018
6019 update_top_cache_domain(cpu);
1da177e4
LT
6020}
6021
1da177e4
LT
6022/* Setup the mask of cpus configured for isolated domains */
6023static int __init isolated_cpu_setup(char *str)
6024{
bdddd296 6025 alloc_bootmem_cpumask_var(&cpu_isolated_map);
968ea6d8 6026 cpulist_parse(str, cpu_isolated_map);
1da177e4
LT
6027 return 1;
6028}
6029
8927f494 6030__setup("isolcpus=", isolated_cpu_setup);
1da177e4 6031
49a02c51 6032struct s_data {
21d42ccf 6033 struct sched_domain ** __percpu sd;
49a02c51
AH
6034 struct root_domain *rd;
6035};
6036
2109b99e 6037enum s_alloc {
2109b99e 6038 sa_rootdomain,
21d42ccf 6039 sa_sd,
dce840a0 6040 sa_sd_storage,
2109b99e
AH
6041 sa_none,
6042};
6043
c1174876
PZ
6044/*
6045 * Build an iteration mask that can exclude certain CPUs from the upwards
6046 * domain traversal.
6047 *
6048 * Asymmetric node setups can result in situations where the domain tree is of
6049 * unequal depth, make sure to skip domains that already cover the entire
6050 * range.
6051 *
6052 * In that case build_sched_domains() will have terminated the iteration early
6053 * and our sibling sd spans will be empty. Domains should always include the
6054 * cpu they're built on, so check that.
6055 *
6056 */
6057static void build_group_mask(struct sched_domain *sd, struct sched_group *sg)
6058{
6059 const struct cpumask *span = sched_domain_span(sd);
6060 struct sd_data *sdd = sd->private;
6061 struct sched_domain *sibling;
6062 int i;
6063
6064 for_each_cpu(i, span) {
6065 sibling = *per_cpu_ptr(sdd->sd, i);
6066 if (!cpumask_test_cpu(i, sched_domain_span(sibling)))
6067 continue;
6068
6069 cpumask_set_cpu(i, sched_group_mask(sg));
6070 }
6071}
6072
6073/*
6074 * Return the canonical balance cpu for this group, this is the first cpu
6075 * of this group that's also in the iteration mask.
6076 */
6077int group_balance_cpu(struct sched_group *sg)
6078{
6079 return cpumask_first_and(sched_group_cpus(sg), sched_group_mask(sg));
6080}
6081
e3589f6c
PZ
6082static int
6083build_overlap_sched_groups(struct sched_domain *sd, int cpu)
6084{
6085 struct sched_group *first = NULL, *last = NULL, *groups = NULL, *sg;
6086 const struct cpumask *span = sched_domain_span(sd);
6087 struct cpumask *covered = sched_domains_tmpmask;
6088 struct sd_data *sdd = sd->private;
aaecac4a 6089 struct sched_domain *sibling;
e3589f6c
PZ
6090 int i;
6091
6092 cpumask_clear(covered);
6093
6094 for_each_cpu(i, span) {
6095 struct cpumask *sg_span;
6096
6097 if (cpumask_test_cpu(i, covered))
6098 continue;
6099
aaecac4a 6100 sibling = *per_cpu_ptr(sdd->sd, i);
c1174876
PZ
6101
6102 /* See the comment near build_group_mask(). */
aaecac4a 6103 if (!cpumask_test_cpu(i, sched_domain_span(sibling)))
c1174876
PZ
6104 continue;
6105
e3589f6c 6106 sg = kzalloc_node(sizeof(struct sched_group) + cpumask_size(),
4d78a223 6107 GFP_KERNEL, cpu_to_node(cpu));
e3589f6c
PZ
6108
6109 if (!sg)
6110 goto fail;
6111
6112 sg_span = sched_group_cpus(sg);
aaecac4a
ZZ
6113 if (sibling->child)
6114 cpumask_copy(sg_span, sched_domain_span(sibling->child));
6115 else
e3589f6c
PZ
6116 cpumask_set_cpu(i, sg_span);
6117
6118 cpumask_or(covered, covered, sg_span);
6119
63b2ca30
NP
6120 sg->sgc = *per_cpu_ptr(sdd->sgc, i);
6121 if (atomic_inc_return(&sg->sgc->ref) == 1)
c1174876
PZ
6122 build_group_mask(sd, sg);
6123
c3decf0d 6124 /*
63b2ca30 6125 * Initialize sgc->capacity such that even if we mess up the
c3decf0d
PZ
6126 * domains and no possible iteration will get us here, we won't
6127 * die on a /0 trap.
6128 */
ca8ce3d0 6129 sg->sgc->capacity = SCHED_CAPACITY_SCALE * cpumask_weight(sg_span);
e3589f6c 6130
c1174876
PZ
6131 /*
6132 * Make sure the first group of this domain contains the
6133 * canonical balance cpu. Otherwise the sched_domain iteration
6134 * breaks. See update_sg_lb_stats().
6135 */
74a5ce20 6136 if ((!groups && cpumask_test_cpu(cpu, sg_span)) ||
c1174876 6137 group_balance_cpu(sg) == cpu)
e3589f6c
PZ
6138 groups = sg;
6139
6140 if (!first)
6141 first = sg;
6142 if (last)
6143 last->next = sg;
6144 last = sg;
6145 last->next = first;
6146 }
6147 sd->groups = groups;
6148
6149 return 0;
6150
6151fail:
6152 free_sched_groups(first, 0);
6153
6154 return -ENOMEM;
6155}
6156
dce840a0 6157static int get_group(int cpu, struct sd_data *sdd, struct sched_group **sg)
1da177e4 6158{
dce840a0
PZ
6159 struct sched_domain *sd = *per_cpu_ptr(sdd->sd, cpu);
6160 struct sched_domain *child = sd->child;
1da177e4 6161
dce840a0
PZ
6162 if (child)
6163 cpu = cpumask_first(sched_domain_span(child));
1e9f28fa 6164
9c3f75cb 6165 if (sg) {
dce840a0 6166 *sg = *per_cpu_ptr(sdd->sg, cpu);
63b2ca30
NP
6167 (*sg)->sgc = *per_cpu_ptr(sdd->sgc, cpu);
6168 atomic_set(&(*sg)->sgc->ref, 1); /* for claim_allocations */
9c3f75cb 6169 }
dce840a0
PZ
6170
6171 return cpu;
1e9f28fa 6172}
1e9f28fa 6173
01a08546 6174/*
dce840a0
PZ
6175 * build_sched_groups will build a circular linked list of the groups
6176 * covered by the given span, and will set each group's ->cpumask correctly,
ced549fa 6177 * and ->cpu_capacity to 0.
e3589f6c
PZ
6178 *
6179 * Assumes the sched_domain tree is fully constructed
01a08546 6180 */
e3589f6c
PZ
6181static int
6182build_sched_groups(struct sched_domain *sd, int cpu)
1da177e4 6183{
dce840a0
PZ
6184 struct sched_group *first = NULL, *last = NULL;
6185 struct sd_data *sdd = sd->private;
6186 const struct cpumask *span = sched_domain_span(sd);
f96225fd 6187 struct cpumask *covered;
dce840a0 6188 int i;
9c1cfda2 6189
e3589f6c
PZ
6190 get_group(cpu, sdd, &sd->groups);
6191 atomic_inc(&sd->groups->ref);
6192
0936629f 6193 if (cpu != cpumask_first(span))
e3589f6c
PZ
6194 return 0;
6195
f96225fd
PZ
6196 lockdep_assert_held(&sched_domains_mutex);
6197 covered = sched_domains_tmpmask;
6198
dce840a0 6199 cpumask_clear(covered);
6711cab4 6200
dce840a0
PZ
6201 for_each_cpu(i, span) {
6202 struct sched_group *sg;
cd08e923 6203 int group, j;
6711cab4 6204
dce840a0
PZ
6205 if (cpumask_test_cpu(i, covered))
6206 continue;
6711cab4 6207
cd08e923 6208 group = get_group(i, sdd, &sg);
c1174876 6209 cpumask_setall(sched_group_mask(sg));
0601a88d 6210
dce840a0
PZ
6211 for_each_cpu(j, span) {
6212 if (get_group(j, sdd, NULL) != group)
6213 continue;
0601a88d 6214
dce840a0
PZ
6215 cpumask_set_cpu(j, covered);
6216 cpumask_set_cpu(j, sched_group_cpus(sg));
6217 }
0601a88d 6218
dce840a0
PZ
6219 if (!first)
6220 first = sg;
6221 if (last)
6222 last->next = sg;
6223 last = sg;
6224 }
6225 last->next = first;
e3589f6c
PZ
6226
6227 return 0;
0601a88d 6228}
51888ca2 6229
89c4710e 6230/*
63b2ca30 6231 * Initialize sched groups cpu_capacity.
89c4710e 6232 *
63b2ca30 6233 * cpu_capacity indicates the capacity of sched group, which is used while
89c4710e 6234 * distributing the load between different sched groups in a sched domain.
63b2ca30
NP
6235 * Typically cpu_capacity for all the groups in a sched domain will be same
6236 * unless there are asymmetries in the topology. If there are asymmetries,
6237 * group having more cpu_capacity will pickup more load compared to the
6238 * group having less cpu_capacity.
89c4710e 6239 */
63b2ca30 6240static void init_sched_groups_capacity(int cpu, struct sched_domain *sd)
89c4710e 6241{
e3589f6c 6242 struct sched_group *sg = sd->groups;
89c4710e 6243
94c95ba6 6244 WARN_ON(!sg);
e3589f6c
PZ
6245
6246 do {
6247 sg->group_weight = cpumask_weight(sched_group_cpus(sg));
6248 sg = sg->next;
6249 } while (sg != sd->groups);
89c4710e 6250
c1174876 6251 if (cpu != group_balance_cpu(sg))
e3589f6c 6252 return;
aae6d3dd 6253
63b2ca30
NP
6254 update_group_capacity(sd, cpu);
6255 atomic_set(&sg->sgc->nr_busy_cpus, sg->group_weight);
89c4710e
SS
6256}
6257
7c16ec58
MT
6258/*
6259 * Initializers for schedule domains
6260 * Non-inlined to reduce accumulated stack pressure in build_sched_domains()
6261 */
6262
1d3504fc 6263static int default_relax_domain_level = -1;
60495e77 6264int sched_domain_level_max;
1d3504fc
HS
6265
6266static int __init setup_relax_domain_level(char *str)
6267{
a841f8ce
DS
6268 if (kstrtoint(str, 0, &default_relax_domain_level))
6269 pr_warn("Unable to set relax_domain_level\n");
30e0e178 6270
1d3504fc
HS
6271 return 1;
6272}
6273__setup("relax_domain_level=", setup_relax_domain_level);
6274
6275static void set_domain_attribute(struct sched_domain *sd,
6276 struct sched_domain_attr *attr)
6277{
6278 int request;
6279
6280 if (!attr || attr->relax_domain_level < 0) {
6281 if (default_relax_domain_level < 0)
6282 return;
6283 else
6284 request = default_relax_domain_level;
6285 } else
6286 request = attr->relax_domain_level;
6287 if (request < sd->level) {
6288 /* turn off idle balance on this domain */
c88d5910 6289 sd->flags &= ~(SD_BALANCE_WAKE|SD_BALANCE_NEWIDLE);
1d3504fc
HS
6290 } else {
6291 /* turn on idle balance on this domain */
c88d5910 6292 sd->flags |= (SD_BALANCE_WAKE|SD_BALANCE_NEWIDLE);
1d3504fc
HS
6293 }
6294}
6295
54ab4ff4
PZ
6296static void __sdt_free(const struct cpumask *cpu_map);
6297static int __sdt_alloc(const struct cpumask *cpu_map);
6298
2109b99e
AH
6299static void __free_domain_allocs(struct s_data *d, enum s_alloc what,
6300 const struct cpumask *cpu_map)
6301{
6302 switch (what) {
2109b99e 6303 case sa_rootdomain:
822ff793
PZ
6304 if (!atomic_read(&d->rd->refcount))
6305 free_rootdomain(&d->rd->rcu); /* fall through */
21d42ccf
PZ
6306 case sa_sd:
6307 free_percpu(d->sd); /* fall through */
dce840a0 6308 case sa_sd_storage:
54ab4ff4 6309 __sdt_free(cpu_map); /* fall through */
2109b99e
AH
6310 case sa_none:
6311 break;
6312 }
6313}
3404c8d9 6314
2109b99e
AH
6315static enum s_alloc __visit_domain_allocation_hell(struct s_data *d,
6316 const struct cpumask *cpu_map)
6317{
dce840a0
PZ
6318 memset(d, 0, sizeof(*d));
6319
54ab4ff4
PZ
6320 if (__sdt_alloc(cpu_map))
6321 return sa_sd_storage;
dce840a0
PZ
6322 d->sd = alloc_percpu(struct sched_domain *);
6323 if (!d->sd)
6324 return sa_sd_storage;
2109b99e 6325 d->rd = alloc_rootdomain();
dce840a0 6326 if (!d->rd)
21d42ccf 6327 return sa_sd;
2109b99e
AH
6328 return sa_rootdomain;
6329}
57d885fe 6330
dce840a0
PZ
6331/*
6332 * NULL the sd_data elements we've used to build the sched_domain and
6333 * sched_group structure so that the subsequent __free_domain_allocs()
6334 * will not free the data we're using.
6335 */
6336static void claim_allocations(int cpu, struct sched_domain *sd)
6337{
6338 struct sd_data *sdd = sd->private;
dce840a0
PZ
6339
6340 WARN_ON_ONCE(*per_cpu_ptr(sdd->sd, cpu) != sd);
6341 *per_cpu_ptr(sdd->sd, cpu) = NULL;
6342
e3589f6c 6343 if (atomic_read(&(*per_cpu_ptr(sdd->sg, cpu))->ref))
dce840a0 6344 *per_cpu_ptr(sdd->sg, cpu) = NULL;
e3589f6c 6345
63b2ca30
NP
6346 if (atomic_read(&(*per_cpu_ptr(sdd->sgc, cpu))->ref))
6347 *per_cpu_ptr(sdd->sgc, cpu) = NULL;
dce840a0
PZ
6348}
6349
cb83b629 6350#ifdef CONFIG_NUMA
cb83b629 6351static int sched_domains_numa_levels;
e3fe70b1 6352enum numa_topology_type sched_numa_topology_type;
cb83b629 6353static int *sched_domains_numa_distance;
9942f79b 6354int sched_max_numa_distance;
cb83b629
PZ
6355static struct cpumask ***sched_domains_numa_masks;
6356static int sched_domains_curr_level;
143e1e28 6357#endif
cb83b629 6358
143e1e28
VG
6359/*
6360 * SD_flags allowed in topology descriptions.
6361 *
5d4dfddd 6362 * SD_SHARE_CPUCAPACITY - describes SMT topologies
143e1e28
VG
6363 * SD_SHARE_PKG_RESOURCES - describes shared caches
6364 * SD_NUMA - describes NUMA topologies
d77b3ed5 6365 * SD_SHARE_POWERDOMAIN - describes shared power domain
143e1e28
VG
6366 *
6367 * Odd one out:
6368 * SD_ASYM_PACKING - describes SMT quirks
6369 */
6370#define TOPOLOGY_SD_FLAGS \
5d4dfddd 6371 (SD_SHARE_CPUCAPACITY | \
143e1e28
VG
6372 SD_SHARE_PKG_RESOURCES | \
6373 SD_NUMA | \
d77b3ed5
VG
6374 SD_ASYM_PACKING | \
6375 SD_SHARE_POWERDOMAIN)
cb83b629
PZ
6376
6377static struct sched_domain *
143e1e28 6378sd_init(struct sched_domain_topology_level *tl, int cpu)
cb83b629
PZ
6379{
6380 struct sched_domain *sd = *per_cpu_ptr(tl->data.sd, cpu);
143e1e28
VG
6381 int sd_weight, sd_flags = 0;
6382
6383#ifdef CONFIG_NUMA
6384 /*
6385 * Ugly hack to pass state to sd_numa_mask()...
6386 */
6387 sched_domains_curr_level = tl->numa_level;
6388#endif
6389
6390 sd_weight = cpumask_weight(tl->mask(cpu));
6391
6392 if (tl->sd_flags)
6393 sd_flags = (*tl->sd_flags)();
6394 if (WARN_ONCE(sd_flags & ~TOPOLOGY_SD_FLAGS,
6395 "wrong sd_flags in topology description\n"))
6396 sd_flags &= ~TOPOLOGY_SD_FLAGS;
cb83b629
PZ
6397
6398 *sd = (struct sched_domain){
6399 .min_interval = sd_weight,
6400 .max_interval = 2*sd_weight,
6401 .busy_factor = 32,
870a0bb5 6402 .imbalance_pct = 125,
143e1e28
VG
6403
6404 .cache_nice_tries = 0,
6405 .busy_idx = 0,
6406 .idle_idx = 0,
cb83b629
PZ
6407 .newidle_idx = 0,
6408 .wake_idx = 0,
6409 .forkexec_idx = 0,
6410
6411 .flags = 1*SD_LOAD_BALANCE
6412 | 1*SD_BALANCE_NEWIDLE
143e1e28
VG
6413 | 1*SD_BALANCE_EXEC
6414 | 1*SD_BALANCE_FORK
cb83b629 6415 | 0*SD_BALANCE_WAKE
143e1e28 6416 | 1*SD_WAKE_AFFINE
5d4dfddd 6417 | 0*SD_SHARE_CPUCAPACITY
cb83b629 6418 | 0*SD_SHARE_PKG_RESOURCES
143e1e28 6419 | 0*SD_SERIALIZE
cb83b629 6420 | 0*SD_PREFER_SIBLING
143e1e28
VG
6421 | 0*SD_NUMA
6422 | sd_flags
cb83b629 6423 ,
143e1e28 6424
cb83b629
PZ
6425 .last_balance = jiffies,
6426 .balance_interval = sd_weight,
143e1e28 6427 .smt_gain = 0,
2b4cfe64
JL
6428 .max_newidle_lb_cost = 0,
6429 .next_decay_max_lb_cost = jiffies,
143e1e28
VG
6430#ifdef CONFIG_SCHED_DEBUG
6431 .name = tl->name,
6432#endif
cb83b629 6433 };
cb83b629
PZ
6434
6435 /*
143e1e28 6436 * Convert topological properties into behaviour.
cb83b629 6437 */
143e1e28 6438
5d4dfddd 6439 if (sd->flags & SD_SHARE_CPUCAPACITY) {
caff37ef 6440 sd->flags |= SD_PREFER_SIBLING;
143e1e28
VG
6441 sd->imbalance_pct = 110;
6442 sd->smt_gain = 1178; /* ~15% */
143e1e28
VG
6443
6444 } else if (sd->flags & SD_SHARE_PKG_RESOURCES) {
6445 sd->imbalance_pct = 117;
6446 sd->cache_nice_tries = 1;
6447 sd->busy_idx = 2;
6448
6449#ifdef CONFIG_NUMA
6450 } else if (sd->flags & SD_NUMA) {
6451 sd->cache_nice_tries = 2;
6452 sd->busy_idx = 3;
6453 sd->idle_idx = 2;
6454
6455 sd->flags |= SD_SERIALIZE;
6456 if (sched_domains_numa_distance[tl->numa_level] > RECLAIM_DISTANCE) {
6457 sd->flags &= ~(SD_BALANCE_EXEC |
6458 SD_BALANCE_FORK |
6459 SD_WAKE_AFFINE);
6460 }
6461
6462#endif
6463 } else {
6464 sd->flags |= SD_PREFER_SIBLING;
6465 sd->cache_nice_tries = 1;
6466 sd->busy_idx = 2;
6467 sd->idle_idx = 1;
6468 }
6469
6470 sd->private = &tl->data;
cb83b629
PZ
6471
6472 return sd;
6473}
6474
143e1e28
VG
6475/*
6476 * Topology list, bottom-up.
6477 */
6478static struct sched_domain_topology_level default_topology[] = {
6479#ifdef CONFIG_SCHED_SMT
6480 { cpu_smt_mask, cpu_smt_flags, SD_INIT_NAME(SMT) },
6481#endif
6482#ifdef CONFIG_SCHED_MC
6483 { cpu_coregroup_mask, cpu_core_flags, SD_INIT_NAME(MC) },
143e1e28
VG
6484#endif
6485 { cpu_cpu_mask, SD_INIT_NAME(DIE) },
6486 { NULL, },
6487};
6488
c6e1e7b5
JG
6489static struct sched_domain_topology_level *sched_domain_topology =
6490 default_topology;
143e1e28
VG
6491
6492#define for_each_sd_topology(tl) \
6493 for (tl = sched_domain_topology; tl->mask; tl++)
6494
6495void set_sched_topology(struct sched_domain_topology_level *tl)
6496{
6497 sched_domain_topology = tl;
6498}
6499
6500#ifdef CONFIG_NUMA
6501
cb83b629
PZ
6502static const struct cpumask *sd_numa_mask(int cpu)
6503{
6504 return sched_domains_numa_masks[sched_domains_curr_level][cpu_to_node(cpu)];
6505}
6506
d039ac60
PZ
6507static void sched_numa_warn(const char *str)
6508{
6509 static int done = false;
6510 int i,j;
6511
6512 if (done)
6513 return;
6514
6515 done = true;
6516
6517 printk(KERN_WARNING "ERROR: %s\n\n", str);
6518
6519 for (i = 0; i < nr_node_ids; i++) {
6520 printk(KERN_WARNING " ");
6521 for (j = 0; j < nr_node_ids; j++)
6522 printk(KERN_CONT "%02d ", node_distance(i,j));
6523 printk(KERN_CONT "\n");
6524 }
6525 printk(KERN_WARNING "\n");
6526}
6527
9942f79b 6528bool find_numa_distance(int distance)
d039ac60
PZ
6529{
6530 int i;
6531
6532 if (distance == node_distance(0, 0))
6533 return true;
6534
6535 for (i = 0; i < sched_domains_numa_levels; i++) {
6536 if (sched_domains_numa_distance[i] == distance)
6537 return true;
6538 }
6539
6540 return false;
6541}
6542
e3fe70b1
RR
6543/*
6544 * A system can have three types of NUMA topology:
6545 * NUMA_DIRECT: all nodes are directly connected, or not a NUMA system
6546 * NUMA_GLUELESS_MESH: some nodes reachable through intermediary nodes
6547 * NUMA_BACKPLANE: nodes can reach other nodes through a backplane
6548 *
6549 * The difference between a glueless mesh topology and a backplane
6550 * topology lies in whether communication between not directly
6551 * connected nodes goes through intermediary nodes (where programs
6552 * could run), or through backplane controllers. This affects
6553 * placement of programs.
6554 *
6555 * The type of topology can be discerned with the following tests:
6556 * - If the maximum distance between any nodes is 1 hop, the system
6557 * is directly connected.
6558 * - If for two nodes A and B, located N > 1 hops away from each other,
6559 * there is an intermediary node C, which is < N hops away from both
6560 * nodes A and B, the system is a glueless mesh.
6561 */
6562static void init_numa_topology_type(void)
6563{
6564 int a, b, c, n;
6565
6566 n = sched_max_numa_distance;
6567
e237882b 6568 if (sched_domains_numa_levels <= 1) {
e3fe70b1 6569 sched_numa_topology_type = NUMA_DIRECT;
e237882b
AG
6570 return;
6571 }
e3fe70b1
RR
6572
6573 for_each_online_node(a) {
6574 for_each_online_node(b) {
6575 /* Find two nodes furthest removed from each other. */
6576 if (node_distance(a, b) < n)
6577 continue;
6578
6579 /* Is there an intermediary node between a and b? */
6580 for_each_online_node(c) {
6581 if (node_distance(a, c) < n &&
6582 node_distance(b, c) < n) {
6583 sched_numa_topology_type =
6584 NUMA_GLUELESS_MESH;
6585 return;
6586 }
6587 }
6588
6589 sched_numa_topology_type = NUMA_BACKPLANE;
6590 return;
6591 }
6592 }
6593}
6594
cb83b629
PZ
6595static void sched_init_numa(void)
6596{
6597 int next_distance, curr_distance = node_distance(0, 0);
6598 struct sched_domain_topology_level *tl;
6599 int level = 0;
6600 int i, j, k;
6601
cb83b629
PZ
6602 sched_domains_numa_distance = kzalloc(sizeof(int) * nr_node_ids, GFP_KERNEL);
6603 if (!sched_domains_numa_distance)
6604 return;
6605
6606 /*
6607 * O(nr_nodes^2) deduplicating selection sort -- in order to find the
6608 * unique distances in the node_distance() table.
6609 *
6610 * Assumes node_distance(0,j) includes all distances in
6611 * node_distance(i,j) in order to avoid cubic time.
cb83b629
PZ
6612 */
6613 next_distance = curr_distance;
6614 for (i = 0; i < nr_node_ids; i++) {
6615 for (j = 0; j < nr_node_ids; j++) {
d039ac60
PZ
6616 for (k = 0; k < nr_node_ids; k++) {
6617 int distance = node_distance(i, k);
6618
6619 if (distance > curr_distance &&
6620 (distance < next_distance ||
6621 next_distance == curr_distance))
6622 next_distance = distance;
6623
6624 /*
6625 * While not a strong assumption it would be nice to know
6626 * about cases where if node A is connected to B, B is not
6627 * equally connected to A.
6628 */
6629 if (sched_debug() && node_distance(k, i) != distance)
6630 sched_numa_warn("Node-distance not symmetric");
6631
6632 if (sched_debug() && i && !find_numa_distance(distance))
6633 sched_numa_warn("Node-0 not representative");
6634 }
6635 if (next_distance != curr_distance) {
6636 sched_domains_numa_distance[level++] = next_distance;
6637 sched_domains_numa_levels = level;
6638 curr_distance = next_distance;
6639 } else break;
cb83b629 6640 }
d039ac60
PZ
6641
6642 /*
6643 * In case of sched_debug() we verify the above assumption.
6644 */
6645 if (!sched_debug())
6646 break;
cb83b629 6647 }
c123588b
AR
6648
6649 if (!level)
6650 return;
6651
cb83b629
PZ
6652 /*
6653 * 'level' contains the number of unique distances, excluding the
6654 * identity distance node_distance(i,i).
6655 *
28b4a521 6656 * The sched_domains_numa_distance[] array includes the actual distance
cb83b629
PZ
6657 * numbers.
6658 */
6659
5f7865f3
TC
6660 /*
6661 * Here, we should temporarily reset sched_domains_numa_levels to 0.
6662 * If it fails to allocate memory for array sched_domains_numa_masks[][],
6663 * the array will contain less then 'level' members. This could be
6664 * dangerous when we use it to iterate array sched_domains_numa_masks[][]
6665 * in other functions.
6666 *
6667 * We reset it to 'level' at the end of this function.
6668 */
6669 sched_domains_numa_levels = 0;
6670
cb83b629
PZ
6671 sched_domains_numa_masks = kzalloc(sizeof(void *) * level, GFP_KERNEL);
6672 if (!sched_domains_numa_masks)
6673 return;
6674
6675 /*
6676 * Now for each level, construct a mask per node which contains all
6677 * cpus of nodes that are that many hops away from us.
6678 */
6679 for (i = 0; i < level; i++) {
6680 sched_domains_numa_masks[i] =
6681 kzalloc(nr_node_ids * sizeof(void *), GFP_KERNEL);
6682 if (!sched_domains_numa_masks[i])
6683 return;
6684
6685 for (j = 0; j < nr_node_ids; j++) {
2ea45800 6686 struct cpumask *mask = kzalloc(cpumask_size(), GFP_KERNEL);
cb83b629
PZ
6687 if (!mask)
6688 return;
6689
6690 sched_domains_numa_masks[i][j] = mask;
6691
6692 for (k = 0; k < nr_node_ids; k++) {
dd7d8634 6693 if (node_distance(j, k) > sched_domains_numa_distance[i])
cb83b629
PZ
6694 continue;
6695
6696 cpumask_or(mask, mask, cpumask_of_node(k));
6697 }
6698 }
6699 }
6700
143e1e28
VG
6701 /* Compute default topology size */
6702 for (i = 0; sched_domain_topology[i].mask; i++);
6703
c515db8c 6704 tl = kzalloc((i + level + 1) *
cb83b629
PZ
6705 sizeof(struct sched_domain_topology_level), GFP_KERNEL);
6706 if (!tl)
6707 return;
6708
6709 /*
6710 * Copy the default topology bits..
6711 */
143e1e28
VG
6712 for (i = 0; sched_domain_topology[i].mask; i++)
6713 tl[i] = sched_domain_topology[i];
cb83b629
PZ
6714
6715 /*
6716 * .. and append 'j' levels of NUMA goodness.
6717 */
6718 for (j = 0; j < level; i++, j++) {
6719 tl[i] = (struct sched_domain_topology_level){
cb83b629 6720 .mask = sd_numa_mask,
143e1e28 6721 .sd_flags = cpu_numa_flags,
cb83b629
PZ
6722 .flags = SDTL_OVERLAP,
6723 .numa_level = j,
143e1e28 6724 SD_INIT_NAME(NUMA)
cb83b629
PZ
6725 };
6726 }
6727
6728 sched_domain_topology = tl;
5f7865f3
TC
6729
6730 sched_domains_numa_levels = level;
9942f79b 6731 sched_max_numa_distance = sched_domains_numa_distance[level - 1];
e3fe70b1
RR
6732
6733 init_numa_topology_type();
cb83b629 6734}
301a5cba
TC
6735
6736static void sched_domains_numa_masks_set(int cpu)
6737{
6738 int i, j;
6739 int node = cpu_to_node(cpu);
6740
6741 for (i = 0; i < sched_domains_numa_levels; i++) {
6742 for (j = 0; j < nr_node_ids; j++) {
6743 if (node_distance(j, node) <= sched_domains_numa_distance[i])
6744 cpumask_set_cpu(cpu, sched_domains_numa_masks[i][j]);
6745 }
6746 }
6747}
6748
6749static void sched_domains_numa_masks_clear(int cpu)
6750{
6751 int i, j;
6752 for (i = 0; i < sched_domains_numa_levels; i++) {
6753 for (j = 0; j < nr_node_ids; j++)
6754 cpumask_clear_cpu(cpu, sched_domains_numa_masks[i][j]);
6755 }
6756}
6757
6758/*
6759 * Update sched_domains_numa_masks[level][node] array when new cpus
6760 * are onlined.
6761 */
6762static int sched_domains_numa_masks_update(struct notifier_block *nfb,
6763 unsigned long action,
6764 void *hcpu)
6765{
6766 int cpu = (long)hcpu;
6767
6768 switch (action & ~CPU_TASKS_FROZEN) {
6769 case CPU_ONLINE:
6770 sched_domains_numa_masks_set(cpu);
6771 break;
6772
6773 case CPU_DEAD:
6774 sched_domains_numa_masks_clear(cpu);
6775 break;
6776
6777 default:
6778 return NOTIFY_DONE;
6779 }
6780
6781 return NOTIFY_OK;
cb83b629
PZ
6782}
6783#else
6784static inline void sched_init_numa(void)
6785{
6786}
301a5cba
TC
6787
6788static int sched_domains_numa_masks_update(struct notifier_block *nfb,
6789 unsigned long action,
6790 void *hcpu)
6791{
6792 return 0;
6793}
cb83b629
PZ
6794#endif /* CONFIG_NUMA */
6795
54ab4ff4
PZ
6796static int __sdt_alloc(const struct cpumask *cpu_map)
6797{
6798 struct sched_domain_topology_level *tl;
6799 int j;
6800
27723a68 6801 for_each_sd_topology(tl) {
54ab4ff4
PZ
6802 struct sd_data *sdd = &tl->data;
6803
6804 sdd->sd = alloc_percpu(struct sched_domain *);
6805 if (!sdd->sd)
6806 return -ENOMEM;
6807
6808 sdd->sg = alloc_percpu(struct sched_group *);
6809 if (!sdd->sg)
6810 return -ENOMEM;
6811
63b2ca30
NP
6812 sdd->sgc = alloc_percpu(struct sched_group_capacity *);
6813 if (!sdd->sgc)
9c3f75cb
PZ
6814 return -ENOMEM;
6815
54ab4ff4
PZ
6816 for_each_cpu(j, cpu_map) {
6817 struct sched_domain *sd;
6818 struct sched_group *sg;
63b2ca30 6819 struct sched_group_capacity *sgc;
54ab4ff4 6820
5cc389bc 6821 sd = kzalloc_node(sizeof(struct sched_domain) + cpumask_size(),
54ab4ff4
PZ
6822 GFP_KERNEL, cpu_to_node(j));
6823 if (!sd)
6824 return -ENOMEM;
6825
6826 *per_cpu_ptr(sdd->sd, j) = sd;
6827
6828 sg = kzalloc_node(sizeof(struct sched_group) + cpumask_size(),
6829 GFP_KERNEL, cpu_to_node(j));
6830 if (!sg)
6831 return -ENOMEM;
6832
30b4e9eb
IM
6833 sg->next = sg;
6834
54ab4ff4 6835 *per_cpu_ptr(sdd->sg, j) = sg;
9c3f75cb 6836
63b2ca30 6837 sgc = kzalloc_node(sizeof(struct sched_group_capacity) + cpumask_size(),
9c3f75cb 6838 GFP_KERNEL, cpu_to_node(j));
63b2ca30 6839 if (!sgc)
9c3f75cb
PZ
6840 return -ENOMEM;
6841
63b2ca30 6842 *per_cpu_ptr(sdd->sgc, j) = sgc;
54ab4ff4
PZ
6843 }
6844 }
6845
6846 return 0;
6847}
6848
6849static void __sdt_free(const struct cpumask *cpu_map)
6850{
6851 struct sched_domain_topology_level *tl;
6852 int j;
6853
27723a68 6854 for_each_sd_topology(tl) {
54ab4ff4
PZ
6855 struct sd_data *sdd = &tl->data;
6856
6857 for_each_cpu(j, cpu_map) {
fb2cf2c6 6858 struct sched_domain *sd;
6859
6860 if (sdd->sd) {
6861 sd = *per_cpu_ptr(sdd->sd, j);
6862 if (sd && (sd->flags & SD_OVERLAP))
6863 free_sched_groups(sd->groups, 0);
6864 kfree(*per_cpu_ptr(sdd->sd, j));
6865 }
6866
6867 if (sdd->sg)
6868 kfree(*per_cpu_ptr(sdd->sg, j));
63b2ca30
NP
6869 if (sdd->sgc)
6870 kfree(*per_cpu_ptr(sdd->sgc, j));
54ab4ff4
PZ
6871 }
6872 free_percpu(sdd->sd);
fb2cf2c6 6873 sdd->sd = NULL;
54ab4ff4 6874 free_percpu(sdd->sg);
fb2cf2c6 6875 sdd->sg = NULL;
63b2ca30
NP
6876 free_percpu(sdd->sgc);
6877 sdd->sgc = NULL;
54ab4ff4
PZ
6878 }
6879}
6880
2c402dc3 6881struct sched_domain *build_sched_domain(struct sched_domain_topology_level *tl,
4a850cbe
VK
6882 const struct cpumask *cpu_map, struct sched_domain_attr *attr,
6883 struct sched_domain *child, int cpu)
2c402dc3 6884{
143e1e28 6885 struct sched_domain *sd = sd_init(tl, cpu);
2c402dc3 6886 if (!sd)
d069b916 6887 return child;
2c402dc3 6888
2c402dc3 6889 cpumask_and(sched_domain_span(sd), cpu_map, tl->mask(cpu));
60495e77
PZ
6890 if (child) {
6891 sd->level = child->level + 1;
6892 sched_domain_level_max = max(sched_domain_level_max, sd->level);
d069b916 6893 child->parent = sd;
c75e0128 6894 sd->child = child;
6ae72dff
PZ
6895
6896 if (!cpumask_subset(sched_domain_span(child),
6897 sched_domain_span(sd))) {
6898 pr_err("BUG: arch topology borken\n");
6899#ifdef CONFIG_SCHED_DEBUG
6900 pr_err(" the %s domain not a subset of the %s domain\n",
6901 child->name, sd->name);
6902#endif
6903 /* Fixup, ensure @sd has at least @child cpus. */
6904 cpumask_or(sched_domain_span(sd),
6905 sched_domain_span(sd),
6906 sched_domain_span(child));
6907 }
6908
60495e77 6909 }
a841f8ce 6910 set_domain_attribute(sd, attr);
2c402dc3
PZ
6911
6912 return sd;
6913}
6914
2109b99e
AH
6915/*
6916 * Build sched domains for a given set of cpus and attach the sched domains
6917 * to the individual cpus
6918 */
dce840a0
PZ
6919static int build_sched_domains(const struct cpumask *cpu_map,
6920 struct sched_domain_attr *attr)
2109b99e 6921{
1c632169 6922 enum s_alloc alloc_state;
dce840a0 6923 struct sched_domain *sd;
2109b99e 6924 struct s_data d;
822ff793 6925 int i, ret = -ENOMEM;
9c1cfda2 6926
2109b99e
AH
6927 alloc_state = __visit_domain_allocation_hell(&d, cpu_map);
6928 if (alloc_state != sa_rootdomain)
6929 goto error;
9c1cfda2 6930
dce840a0 6931 /* Set up domains for cpus specified by the cpu_map. */
abcd083a 6932 for_each_cpu(i, cpu_map) {
eb7a74e6
PZ
6933 struct sched_domain_topology_level *tl;
6934
3bd65a80 6935 sd = NULL;
27723a68 6936 for_each_sd_topology(tl) {
4a850cbe 6937 sd = build_sched_domain(tl, cpu_map, attr, sd, i);
22da9569
VK
6938 if (tl == sched_domain_topology)
6939 *per_cpu_ptr(d.sd, i) = sd;
e3589f6c
PZ
6940 if (tl->flags & SDTL_OVERLAP || sched_feat(FORCE_SD_OVERLAP))
6941 sd->flags |= SD_OVERLAP;
d110235d
PZ
6942 if (cpumask_equal(cpu_map, sched_domain_span(sd)))
6943 break;
e3589f6c 6944 }
dce840a0
PZ
6945 }
6946
6947 /* Build the groups for the domains */
6948 for_each_cpu(i, cpu_map) {
6949 for (sd = *per_cpu_ptr(d.sd, i); sd; sd = sd->parent) {
6950 sd->span_weight = cpumask_weight(sched_domain_span(sd));
e3589f6c
PZ
6951 if (sd->flags & SD_OVERLAP) {
6952 if (build_overlap_sched_groups(sd, i))
6953 goto error;
6954 } else {
6955 if (build_sched_groups(sd, i))
6956 goto error;
6957 }
1cf51902 6958 }
a06dadbe 6959 }
9c1cfda2 6960
ced549fa 6961 /* Calculate CPU capacity for physical packages and nodes */
a9c9a9b6
PZ
6962 for (i = nr_cpumask_bits-1; i >= 0; i--) {
6963 if (!cpumask_test_cpu(i, cpu_map))
6964 continue;
9c1cfda2 6965
dce840a0
PZ
6966 for (sd = *per_cpu_ptr(d.sd, i); sd; sd = sd->parent) {
6967 claim_allocations(i, sd);
63b2ca30 6968 init_sched_groups_capacity(i, sd);
dce840a0 6969 }
f712c0c7 6970 }
9c1cfda2 6971
1da177e4 6972 /* Attach the domains */
dce840a0 6973 rcu_read_lock();
abcd083a 6974 for_each_cpu(i, cpu_map) {
21d42ccf 6975 sd = *per_cpu_ptr(d.sd, i);
49a02c51 6976 cpu_attach_domain(sd, d.rd, i);
1da177e4 6977 }
dce840a0 6978 rcu_read_unlock();
51888ca2 6979
822ff793 6980 ret = 0;
51888ca2 6981error:
2109b99e 6982 __free_domain_allocs(&d, alloc_state, cpu_map);
822ff793 6983 return ret;
1da177e4 6984}
029190c5 6985
acc3f5d7 6986static cpumask_var_t *doms_cur; /* current sched domains */
029190c5 6987static int ndoms_cur; /* number of sched domains in 'doms_cur' */
4285f594
IM
6988static struct sched_domain_attr *dattr_cur;
6989 /* attribues of custom domains in 'doms_cur' */
029190c5
PJ
6990
6991/*
6992 * Special case: If a kmalloc of a doms_cur partition (array of
4212823f
RR
6993 * cpumask) fails, then fallback to a single sched domain,
6994 * as determined by the single cpumask fallback_doms.
029190c5 6995 */
4212823f 6996static cpumask_var_t fallback_doms;
029190c5 6997
ee79d1bd
HC
6998/*
6999 * arch_update_cpu_topology lets virtualized architectures update the
7000 * cpu core maps. It is supposed to return 1 if the topology changed
7001 * or 0 if it stayed the same.
7002 */
52f5684c 7003int __weak arch_update_cpu_topology(void)
22e52b07 7004{
ee79d1bd 7005 return 0;
22e52b07
HC
7006}
7007
acc3f5d7
RR
7008cpumask_var_t *alloc_sched_domains(unsigned int ndoms)
7009{
7010 int i;
7011 cpumask_var_t *doms;
7012
7013 doms = kmalloc(sizeof(*doms) * ndoms, GFP_KERNEL);
7014 if (!doms)
7015 return NULL;
7016 for (i = 0; i < ndoms; i++) {
7017 if (!alloc_cpumask_var(&doms[i], GFP_KERNEL)) {
7018 free_sched_domains(doms, i);
7019 return NULL;
7020 }
7021 }
7022 return doms;
7023}
7024
7025void free_sched_domains(cpumask_var_t doms[], unsigned int ndoms)
7026{
7027 unsigned int i;
7028 for (i = 0; i < ndoms; i++)
7029 free_cpumask_var(doms[i]);
7030 kfree(doms);
7031}
7032
1a20ff27 7033/*
41a2d6cf 7034 * Set up scheduler domains and groups. Callers must hold the hotplug lock.
029190c5
PJ
7035 * For now this just excludes isolated cpus, but could be used to
7036 * exclude other special cases in the future.
1a20ff27 7037 */
c4a8849a 7038static int init_sched_domains(const struct cpumask *cpu_map)
1a20ff27 7039{
7378547f
MM
7040 int err;
7041
22e52b07 7042 arch_update_cpu_topology();
029190c5 7043 ndoms_cur = 1;
acc3f5d7 7044 doms_cur = alloc_sched_domains(ndoms_cur);
029190c5 7045 if (!doms_cur)
acc3f5d7
RR
7046 doms_cur = &fallback_doms;
7047 cpumask_andnot(doms_cur[0], cpu_map, cpu_isolated_map);
dce840a0 7048 err = build_sched_domains(doms_cur[0], NULL);
6382bc90 7049 register_sched_domain_sysctl();
7378547f
MM
7050
7051 return err;
1a20ff27
DG
7052}
7053
1a20ff27
DG
7054/*
7055 * Detach sched domains from a group of cpus specified in cpu_map
7056 * These cpus will now be attached to the NULL domain
7057 */
96f874e2 7058static void detach_destroy_domains(const struct cpumask *cpu_map)
1a20ff27
DG
7059{
7060 int i;
7061
dce840a0 7062 rcu_read_lock();
abcd083a 7063 for_each_cpu(i, cpu_map)
57d885fe 7064 cpu_attach_domain(NULL, &def_root_domain, i);
dce840a0 7065 rcu_read_unlock();
1a20ff27
DG
7066}
7067
1d3504fc
HS
7068/* handle null as "default" */
7069static int dattrs_equal(struct sched_domain_attr *cur, int idx_cur,
7070 struct sched_domain_attr *new, int idx_new)
7071{
7072 struct sched_domain_attr tmp;
7073
7074 /* fast path */
7075 if (!new && !cur)
7076 return 1;
7077
7078 tmp = SD_ATTR_INIT;
7079 return !memcmp(cur ? (cur + idx_cur) : &tmp,
7080 new ? (new + idx_new) : &tmp,
7081 sizeof(struct sched_domain_attr));
7082}
7083
029190c5
PJ
7084/*
7085 * Partition sched domains as specified by the 'ndoms_new'
41a2d6cf 7086 * cpumasks in the array doms_new[] of cpumasks. This compares
029190c5
PJ
7087 * doms_new[] to the current sched domain partitioning, doms_cur[].
7088 * It destroys each deleted domain and builds each new domain.
7089 *
acc3f5d7 7090 * 'doms_new' is an array of cpumask_var_t's of length 'ndoms_new'.
41a2d6cf
IM
7091 * The masks don't intersect (don't overlap.) We should setup one
7092 * sched domain for each mask. CPUs not in any of the cpumasks will
7093 * not be load balanced. If the same cpumask appears both in the
029190c5
PJ
7094 * current 'doms_cur' domains and in the new 'doms_new', we can leave
7095 * it as it is.
7096 *
acc3f5d7
RR
7097 * The passed in 'doms_new' should be allocated using
7098 * alloc_sched_domains. This routine takes ownership of it and will
7099 * free_sched_domains it when done with it. If the caller failed the
7100 * alloc call, then it can pass in doms_new == NULL && ndoms_new == 1,
7101 * and partition_sched_domains() will fallback to the single partition
7102 * 'fallback_doms', it also forces the domains to be rebuilt.
029190c5 7103 *
96f874e2 7104 * If doms_new == NULL it will be replaced with cpu_online_mask.
700018e0
LZ
7105 * ndoms_new == 0 is a special case for destroying existing domains,
7106 * and it will not create the default domain.
dfb512ec 7107 *
029190c5
PJ
7108 * Call with hotplug lock held
7109 */
acc3f5d7 7110void partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
1d3504fc 7111 struct sched_domain_attr *dattr_new)
029190c5 7112{
dfb512ec 7113 int i, j, n;
d65bd5ec 7114 int new_topology;
029190c5 7115
712555ee 7116 mutex_lock(&sched_domains_mutex);
a1835615 7117
7378547f
MM
7118 /* always unregister in case we don't destroy any domains */
7119 unregister_sched_domain_sysctl();
7120
d65bd5ec
HC
7121 /* Let architecture update cpu core mappings. */
7122 new_topology = arch_update_cpu_topology();
7123
dfb512ec 7124 n = doms_new ? ndoms_new : 0;
029190c5
PJ
7125
7126 /* Destroy deleted domains */
7127 for (i = 0; i < ndoms_cur; i++) {
d65bd5ec 7128 for (j = 0; j < n && !new_topology; j++) {
acc3f5d7 7129 if (cpumask_equal(doms_cur[i], doms_new[j])
1d3504fc 7130 && dattrs_equal(dattr_cur, i, dattr_new, j))
029190c5
PJ
7131 goto match1;
7132 }
7133 /* no match - a current sched domain not in new doms_new[] */
acc3f5d7 7134 detach_destroy_domains(doms_cur[i]);
029190c5
PJ
7135match1:
7136 ;
7137 }
7138
c8d2d47a 7139 n = ndoms_cur;
e761b772 7140 if (doms_new == NULL) {
c8d2d47a 7141 n = 0;
acc3f5d7 7142 doms_new = &fallback_doms;
6ad4c188 7143 cpumask_andnot(doms_new[0], cpu_active_mask, cpu_isolated_map);
faa2f98f 7144 WARN_ON_ONCE(dattr_new);
e761b772
MK
7145 }
7146
029190c5
PJ
7147 /* Build new domains */
7148 for (i = 0; i < ndoms_new; i++) {
c8d2d47a 7149 for (j = 0; j < n && !new_topology; j++) {
acc3f5d7 7150 if (cpumask_equal(doms_new[i], doms_cur[j])
1d3504fc 7151 && dattrs_equal(dattr_new, i, dattr_cur, j))
029190c5
PJ
7152 goto match2;
7153 }
7154 /* no match - add a new doms_new */
dce840a0 7155 build_sched_domains(doms_new[i], dattr_new ? dattr_new + i : NULL);
029190c5
PJ
7156match2:
7157 ;
7158 }
7159
7160 /* Remember the new sched domains */
acc3f5d7
RR
7161 if (doms_cur != &fallback_doms)
7162 free_sched_domains(doms_cur, ndoms_cur);
1d3504fc 7163 kfree(dattr_cur); /* kfree(NULL) is safe */
029190c5 7164 doms_cur = doms_new;
1d3504fc 7165 dattr_cur = dattr_new;
029190c5 7166 ndoms_cur = ndoms_new;
7378547f
MM
7167
7168 register_sched_domain_sysctl();
a1835615 7169
712555ee 7170 mutex_unlock(&sched_domains_mutex);
029190c5
PJ
7171}
7172
d35be8ba
SB
7173static int num_cpus_frozen; /* used to mark begin/end of suspend/resume */
7174
1da177e4 7175/*
3a101d05
TH
7176 * Update cpusets according to cpu_active mask. If cpusets are
7177 * disabled, cpuset_update_active_cpus() becomes a simple wrapper
7178 * around partition_sched_domains().
d35be8ba
SB
7179 *
7180 * If we come here as part of a suspend/resume, don't touch cpusets because we
7181 * want to restore it back to its original state upon resume anyway.
1da177e4 7182 */
0b2e918a
TH
7183static int cpuset_cpu_active(struct notifier_block *nfb, unsigned long action,
7184 void *hcpu)
e761b772 7185{
d35be8ba
SB
7186 switch (action) {
7187 case CPU_ONLINE_FROZEN:
7188 case CPU_DOWN_FAILED_FROZEN:
7189
7190 /*
7191 * num_cpus_frozen tracks how many CPUs are involved in suspend
7192 * resume sequence. As long as this is not the last online
7193 * operation in the resume sequence, just build a single sched
7194 * domain, ignoring cpusets.
7195 */
7196 num_cpus_frozen--;
7197 if (likely(num_cpus_frozen)) {
7198 partition_sched_domains(1, NULL, NULL);
7199 break;
7200 }
7201
7202 /*
7203 * This is the last CPU online operation. So fall through and
7204 * restore the original sched domains by considering the
7205 * cpuset configurations.
7206 */
7207
e761b772 7208 case CPU_ONLINE:
7ddf96b0 7209 cpuset_update_active_cpus(true);
d35be8ba 7210 break;
3a101d05
TH
7211 default:
7212 return NOTIFY_DONE;
7213 }
d35be8ba 7214 return NOTIFY_OK;
3a101d05 7215}
e761b772 7216
0b2e918a
TH
7217static int cpuset_cpu_inactive(struct notifier_block *nfb, unsigned long action,
7218 void *hcpu)
3a101d05 7219{
3c18d447
JL
7220 unsigned long flags;
7221 long cpu = (long)hcpu;
7222 struct dl_bw *dl_b;
533445c6
OS
7223 bool overflow;
7224 int cpus;
3c18d447 7225
533445c6 7226 switch (action) {
3a101d05 7227 case CPU_DOWN_PREPARE:
533445c6
OS
7228 rcu_read_lock_sched();
7229 dl_b = dl_bw_of(cpu);
3c18d447 7230
533445c6
OS
7231 raw_spin_lock_irqsave(&dl_b->lock, flags);
7232 cpus = dl_bw_cpus(cpu);
7233 overflow = __dl_overflow(dl_b, cpus, 0, 0);
7234 raw_spin_unlock_irqrestore(&dl_b->lock, flags);
3c18d447 7235
533445c6 7236 rcu_read_unlock_sched();
3c18d447 7237
533445c6
OS
7238 if (overflow)
7239 return notifier_from_errno(-EBUSY);
7ddf96b0 7240 cpuset_update_active_cpus(false);
d35be8ba
SB
7241 break;
7242 case CPU_DOWN_PREPARE_FROZEN:
7243 num_cpus_frozen++;
7244 partition_sched_domains(1, NULL, NULL);
7245 break;
e761b772
MK
7246 default:
7247 return NOTIFY_DONE;
7248 }
d35be8ba 7249 return NOTIFY_OK;
e761b772 7250}
e761b772 7251
1da177e4
LT
7252void __init sched_init_smp(void)
7253{
dcc30a35
RR
7254 cpumask_var_t non_isolated_cpus;
7255
7256 alloc_cpumask_var(&non_isolated_cpus, GFP_KERNEL);
cb5fd13f 7257 alloc_cpumask_var(&fallback_doms, GFP_KERNEL);
5c1e1767 7258
8cb9764f
CM
7259 /* nohz_full won't take effect without isolating the cpus. */
7260 tick_nohz_full_add_cpus_to(cpu_isolated_map);
7261
cb83b629
PZ
7262 sched_init_numa();
7263
6acce3ef
PZ
7264 /*
7265 * There's no userspace yet to cause hotplug operations; hence all the
7266 * cpu masks are stable and all blatant races in the below code cannot
7267 * happen.
7268 */
712555ee 7269 mutex_lock(&sched_domains_mutex);
c4a8849a 7270 init_sched_domains(cpu_active_mask);
dcc30a35
RR
7271 cpumask_andnot(non_isolated_cpus, cpu_possible_mask, cpu_isolated_map);
7272 if (cpumask_empty(non_isolated_cpus))
7273 cpumask_set_cpu(smp_processor_id(), non_isolated_cpus);
712555ee 7274 mutex_unlock(&sched_domains_mutex);
e761b772 7275
301a5cba 7276 hotcpu_notifier(sched_domains_numa_masks_update, CPU_PRI_SCHED_ACTIVE);
3a101d05
TH
7277 hotcpu_notifier(cpuset_cpu_active, CPU_PRI_CPUSET_ACTIVE);
7278 hotcpu_notifier(cpuset_cpu_inactive, CPU_PRI_CPUSET_INACTIVE);
e761b772 7279
b328ca18 7280 init_hrtick();
5c1e1767
NP
7281
7282 /* Move init over to a non-isolated CPU */
dcc30a35 7283 if (set_cpus_allowed_ptr(current, non_isolated_cpus) < 0)
5c1e1767 7284 BUG();
19978ca6 7285 sched_init_granularity();
dcc30a35 7286 free_cpumask_var(non_isolated_cpus);
4212823f 7287
0e3900e6 7288 init_sched_rt_class();
1baca4ce 7289 init_sched_dl_class();
1da177e4
LT
7290}
7291#else
7292void __init sched_init_smp(void)
7293{
19978ca6 7294 sched_init_granularity();
1da177e4
LT
7295}
7296#endif /* CONFIG_SMP */
7297
7298int in_sched_functions(unsigned long addr)
7299{
1da177e4
LT
7300 return in_lock_functions(addr) ||
7301 (addr >= (unsigned long)__sched_text_start
7302 && addr < (unsigned long)__sched_text_end);
7303}
7304
029632fb 7305#ifdef CONFIG_CGROUP_SCHED
27b4b931
LZ
7306/*
7307 * Default task group.
7308 * Every task in system belongs to this group at bootup.
7309 */
029632fb 7310struct task_group root_task_group;
35cf4e50 7311LIST_HEAD(task_groups);
052f1dc7 7312#endif
6f505b16 7313
e6252c3e 7314DECLARE_PER_CPU(cpumask_var_t, load_balance_mask);
6f505b16 7315
1da177e4
LT
7316void __init sched_init(void)
7317{
dd41f596 7318 int i, j;
434d53b0
MT
7319 unsigned long alloc_size = 0, ptr;
7320
7321#ifdef CONFIG_FAIR_GROUP_SCHED
7322 alloc_size += 2 * nr_cpu_ids * sizeof(void **);
7323#endif
7324#ifdef CONFIG_RT_GROUP_SCHED
7325 alloc_size += 2 * nr_cpu_ids * sizeof(void **);
7326#endif
434d53b0 7327 if (alloc_size) {
36b7b6d4 7328 ptr = (unsigned long)kzalloc(alloc_size, GFP_NOWAIT);
434d53b0
MT
7329
7330#ifdef CONFIG_FAIR_GROUP_SCHED
07e06b01 7331 root_task_group.se = (struct sched_entity **)ptr;
434d53b0
MT
7332 ptr += nr_cpu_ids * sizeof(void **);
7333
07e06b01 7334 root_task_group.cfs_rq = (struct cfs_rq **)ptr;
434d53b0 7335 ptr += nr_cpu_ids * sizeof(void **);
eff766a6 7336
6d6bc0ad 7337#endif /* CONFIG_FAIR_GROUP_SCHED */
434d53b0 7338#ifdef CONFIG_RT_GROUP_SCHED
07e06b01 7339 root_task_group.rt_se = (struct sched_rt_entity **)ptr;
434d53b0
MT
7340 ptr += nr_cpu_ids * sizeof(void **);
7341
07e06b01 7342 root_task_group.rt_rq = (struct rt_rq **)ptr;
eff766a6
PZ
7343 ptr += nr_cpu_ids * sizeof(void **);
7344
6d6bc0ad 7345#endif /* CONFIG_RT_GROUP_SCHED */
b74e6278 7346 }
df7c8e84 7347#ifdef CONFIG_CPUMASK_OFFSTACK
b74e6278
AT
7348 for_each_possible_cpu(i) {
7349 per_cpu(load_balance_mask, i) = (cpumask_var_t)kzalloc_node(
7350 cpumask_size(), GFP_KERNEL, cpu_to_node(i));
434d53b0 7351 }
b74e6278 7352#endif /* CONFIG_CPUMASK_OFFSTACK */
dd41f596 7353
332ac17e
DF
7354 init_rt_bandwidth(&def_rt_bandwidth,
7355 global_rt_period(), global_rt_runtime());
7356 init_dl_bandwidth(&def_dl_bandwidth,
1724813d 7357 global_rt_period(), global_rt_runtime());
332ac17e 7358
57d885fe
GH
7359#ifdef CONFIG_SMP
7360 init_defrootdomain();
7361#endif
7362
d0b27fa7 7363#ifdef CONFIG_RT_GROUP_SCHED
07e06b01 7364 init_rt_bandwidth(&root_task_group.rt_bandwidth,
d0b27fa7 7365 global_rt_period(), global_rt_runtime());
6d6bc0ad 7366#endif /* CONFIG_RT_GROUP_SCHED */
d0b27fa7 7367
7c941438 7368#ifdef CONFIG_CGROUP_SCHED
07e06b01
YZ
7369 list_add(&root_task_group.list, &task_groups);
7370 INIT_LIST_HEAD(&root_task_group.children);
f4d6f6c2 7371 INIT_LIST_HEAD(&root_task_group.siblings);
5091faa4 7372 autogroup_init(&init_task);
54c707e9 7373
7c941438 7374#endif /* CONFIG_CGROUP_SCHED */
6f505b16 7375
0a945022 7376 for_each_possible_cpu(i) {
70b97a7f 7377 struct rq *rq;
1da177e4
LT
7378
7379 rq = cpu_rq(i);
05fa785c 7380 raw_spin_lock_init(&rq->lock);
7897986b 7381 rq->nr_running = 0;
dce48a84
TG
7382 rq->calc_load_active = 0;
7383 rq->calc_load_update = jiffies + LOAD_FREQ;
acb5a9ba 7384 init_cfs_rq(&rq->cfs);
07c54f7a
AV
7385 init_rt_rq(&rq->rt);
7386 init_dl_rq(&rq->dl);
dd41f596 7387#ifdef CONFIG_FAIR_GROUP_SCHED
029632fb 7388 root_task_group.shares = ROOT_TASK_GROUP_LOAD;
6f505b16 7389 INIT_LIST_HEAD(&rq->leaf_cfs_rq_list);
354d60c2 7390 /*
07e06b01 7391 * How much cpu bandwidth does root_task_group get?
354d60c2
DG
7392 *
7393 * In case of task-groups formed thr' the cgroup filesystem, it
7394 * gets 100% of the cpu resources in the system. This overall
7395 * system cpu resource is divided among the tasks of
07e06b01 7396 * root_task_group and its child task-groups in a fair manner,
354d60c2
DG
7397 * based on each entity's (task or task-group's) weight
7398 * (se->load.weight).
7399 *
07e06b01 7400 * In other words, if root_task_group has 10 tasks of weight
354d60c2
DG
7401 * 1024) and two child groups A0 and A1 (of weight 1024 each),
7402 * then A0's share of the cpu resource is:
7403 *
0d905bca 7404 * A0's bandwidth = 1024 / (10*1024 + 1024 + 1024) = 8.33%
354d60c2 7405 *
07e06b01
YZ
7406 * We achieve this by letting root_task_group's tasks sit
7407 * directly in rq->cfs (i.e root_task_group->se[] = NULL).
354d60c2 7408 */
ab84d31e 7409 init_cfs_bandwidth(&root_task_group.cfs_bandwidth);
07e06b01 7410 init_tg_cfs_entry(&root_task_group, &rq->cfs, NULL, i, NULL);
354d60c2
DG
7411#endif /* CONFIG_FAIR_GROUP_SCHED */
7412
7413 rq->rt.rt_runtime = def_rt_bandwidth.rt_runtime;
052f1dc7 7414#ifdef CONFIG_RT_GROUP_SCHED
07e06b01 7415 init_tg_rt_entry(&root_task_group, &rq->rt, NULL, i, NULL);
dd41f596 7416#endif
1da177e4 7417
dd41f596
IM
7418 for (j = 0; j < CPU_LOAD_IDX_MAX; j++)
7419 rq->cpu_load[j] = 0;
fdf3e95d
VP
7420
7421 rq->last_load_update_tick = jiffies;
7422
1da177e4 7423#ifdef CONFIG_SMP
41c7ce9a 7424 rq->sd = NULL;
57d885fe 7425 rq->rd = NULL;
ca6d75e6 7426 rq->cpu_capacity = rq->cpu_capacity_orig = SCHED_CAPACITY_SCALE;
e3fca9e7 7427 rq->balance_callback = NULL;
1da177e4 7428 rq->active_balance = 0;
dd41f596 7429 rq->next_balance = jiffies;
1da177e4 7430 rq->push_cpu = 0;
0a2966b4 7431 rq->cpu = i;
1f11eb6a 7432 rq->online = 0;
eae0c9df
MG
7433 rq->idle_stamp = 0;
7434 rq->avg_idle = 2*sysctl_sched_migration_cost;
9bd721c5 7435 rq->max_idle_balance_cost = sysctl_sched_migration_cost;
367456c7
PZ
7436
7437 INIT_LIST_HEAD(&rq->cfs_tasks);
7438
dc938520 7439 rq_attach_root(rq, &def_root_domain);
3451d024 7440#ifdef CONFIG_NO_HZ_COMMON
1c792db7 7441 rq->nohz_flags = 0;
83cd4fe2 7442#endif
265f22a9
FW
7443#ifdef CONFIG_NO_HZ_FULL
7444 rq->last_sched_tick = 0;
7445#endif
1da177e4 7446#endif
8f4d37ec 7447 init_rq_hrtick(rq);
1da177e4 7448 atomic_set(&rq->nr_iowait, 0);
1da177e4
LT
7449 }
7450
2dd73a4f 7451 set_load_weight(&init_task);
b50f60ce 7452
e107be36
AK
7453#ifdef CONFIG_PREEMPT_NOTIFIERS
7454 INIT_HLIST_HEAD(&init_task.preempt_notifiers);
7455#endif
7456
1da177e4
LT
7457 /*
7458 * The boot idle thread does lazy MMU switching as well:
7459 */
7460 atomic_inc(&init_mm.mm_count);
7461 enter_lazy_tlb(&init_mm, current);
7462
1b537c7d
YD
7463 /*
7464 * During early bootup we pretend to be a normal task:
7465 */
7466 current->sched_class = &fair_sched_class;
7467
1da177e4
LT
7468 /*
7469 * Make us the idle thread. Technically, schedule() should not be
7470 * called from this thread, however somewhere below it might be,
7471 * but because we are the idle thread, we just pick up running again
7472 * when this runqueue becomes "idle".
7473 */
7474 init_idle(current, smp_processor_id());
dce48a84
TG
7475
7476 calc_load_update = jiffies + LOAD_FREQ;
7477
bf4d83f6 7478#ifdef CONFIG_SMP
4cb98839 7479 zalloc_cpumask_var(&sched_domains_tmpmask, GFP_NOWAIT);
bdddd296
RR
7480 /* May be allocated at isolcpus cmdline parse time */
7481 if (cpu_isolated_map == NULL)
7482 zalloc_cpumask_var(&cpu_isolated_map, GFP_NOWAIT);
29d5e047 7483 idle_thread_set_boot_cpu();
a803f026 7484 set_cpu_rq_start_time();
029632fb
PZ
7485#endif
7486 init_sched_fair_class();
6a7b3dc3 7487
6892b75e 7488 scheduler_running = 1;
1da177e4
LT
7489}
7490
d902db1e 7491#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
e4aafea2
FW
7492static inline int preempt_count_equals(int preempt_offset)
7493{
234da7bc 7494 int nested = (preempt_count() & ~PREEMPT_ACTIVE) + rcu_preempt_depth();
e4aafea2 7495
4ba8216c 7496 return (nested == preempt_offset);
e4aafea2
FW
7497}
7498
d894837f 7499void __might_sleep(const char *file, int line, int preempt_offset)
1da177e4 7500{
8eb23b9f
PZ
7501 /*
7502 * Blocking primitives will set (and therefore destroy) current->state,
7503 * since we will exit with TASK_RUNNING make sure we enter with it,
7504 * otherwise we will destroy state.
7505 */
00845eb9 7506 WARN_ONCE(current->state != TASK_RUNNING && current->task_state_change,
8eb23b9f
PZ
7507 "do not call blocking ops when !TASK_RUNNING; "
7508 "state=%lx set at [<%p>] %pS\n",
7509 current->state,
7510 (void *)current->task_state_change,
00845eb9 7511 (void *)current->task_state_change);
8eb23b9f 7512
3427445a
PZ
7513 ___might_sleep(file, line, preempt_offset);
7514}
7515EXPORT_SYMBOL(__might_sleep);
7516
7517void ___might_sleep(const char *file, int line, int preempt_offset)
1da177e4 7518{
1da177e4
LT
7519 static unsigned long prev_jiffy; /* ratelimiting */
7520
b3fbab05 7521 rcu_sleep_check(); /* WARN_ON_ONCE() by default, no rate limit reqd. */
db273be2
TG
7522 if ((preempt_count_equals(preempt_offset) && !irqs_disabled() &&
7523 !is_idle_task(current)) ||
e4aafea2 7524 system_state != SYSTEM_RUNNING || oops_in_progress)
aef745fc
IM
7525 return;
7526 if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy)
7527 return;
7528 prev_jiffy = jiffies;
7529
3df0fc5b
PZ
7530 printk(KERN_ERR
7531 "BUG: sleeping function called from invalid context at %s:%d\n",
7532 file, line);
7533 printk(KERN_ERR
7534 "in_atomic(): %d, irqs_disabled(): %d, pid: %d, name: %s\n",
7535 in_atomic(), irqs_disabled(),
7536 current->pid, current->comm);
aef745fc 7537
a8b686b3
ES
7538 if (task_stack_end_corrupted(current))
7539 printk(KERN_EMERG "Thread overran stack, or stack corrupted\n");
7540
aef745fc
IM
7541 debug_show_held_locks(current);
7542 if (irqs_disabled())
7543 print_irqtrace_events(current);
8f47b187
TG
7544#ifdef CONFIG_DEBUG_PREEMPT
7545 if (!preempt_count_equals(preempt_offset)) {
7546 pr_err("Preemption disabled at:");
7547 print_ip_sym(current->preempt_disable_ip);
7548 pr_cont("\n");
7549 }
7550#endif
aef745fc 7551 dump_stack();
1da177e4 7552}
3427445a 7553EXPORT_SYMBOL(___might_sleep);
1da177e4
LT
7554#endif
7555
7556#ifdef CONFIG_MAGIC_SYSRQ
dbc7f069 7557void normalize_rt_tasks(void)
3a5e4dc1 7558{
dbc7f069 7559 struct task_struct *g, *p;
d50dde5a
DF
7560 struct sched_attr attr = {
7561 .sched_policy = SCHED_NORMAL,
7562 };
1da177e4 7563
3472eaa1 7564 read_lock(&tasklist_lock);
5d07f420 7565 for_each_process_thread(g, p) {
178be793
IM
7566 /*
7567 * Only normalize user tasks:
7568 */
3472eaa1 7569 if (p->flags & PF_KTHREAD)
178be793
IM
7570 continue;
7571
6cfb0d5d 7572 p->se.exec_start = 0;
6cfb0d5d 7573#ifdef CONFIG_SCHEDSTATS
41acab88
LDM
7574 p->se.statistics.wait_start = 0;
7575 p->se.statistics.sleep_start = 0;
7576 p->se.statistics.block_start = 0;
6cfb0d5d 7577#endif
dd41f596 7578
aab03e05 7579 if (!dl_task(p) && !rt_task(p)) {
dd41f596
IM
7580 /*
7581 * Renice negative nice level userspace
7582 * tasks back to 0:
7583 */
3472eaa1 7584 if (task_nice(p) < 0)
dd41f596 7585 set_user_nice(p, 0);
1da177e4 7586 continue;
dd41f596 7587 }
1da177e4 7588
dbc7f069 7589 __sched_setscheduler(p, &attr, false, false);
5d07f420 7590 }
3472eaa1 7591 read_unlock(&tasklist_lock);
1da177e4
LT
7592}
7593
7594#endif /* CONFIG_MAGIC_SYSRQ */
1df5c10a 7595
67fc4e0c 7596#if defined(CONFIG_IA64) || defined(CONFIG_KGDB_KDB)
1df5c10a 7597/*
67fc4e0c 7598 * These functions are only useful for the IA64 MCA handling, or kdb.
1df5c10a
LT
7599 *
7600 * They can only be called when the whole system has been
7601 * stopped - every CPU needs to be quiescent, and no scheduling
7602 * activity can take place. Using them for anything else would
7603 * be a serious bug, and as a result, they aren't even visible
7604 * under any other configuration.
7605 */
7606
7607/**
7608 * curr_task - return the current task for a given cpu.
7609 * @cpu: the processor in question.
7610 *
7611 * ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED!
e69f6186
YB
7612 *
7613 * Return: The current task for @cpu.
1df5c10a 7614 */
36c8b586 7615struct task_struct *curr_task(int cpu)
1df5c10a
LT
7616{
7617 return cpu_curr(cpu);
7618}
7619
67fc4e0c
JW
7620#endif /* defined(CONFIG_IA64) || defined(CONFIG_KGDB_KDB) */
7621
7622#ifdef CONFIG_IA64
1df5c10a
LT
7623/**
7624 * set_curr_task - set the current task for a given cpu.
7625 * @cpu: the processor in question.
7626 * @p: the task pointer to set.
7627 *
7628 * Description: This function must only be used when non-maskable interrupts
41a2d6cf
IM
7629 * are serviced on a separate stack. It allows the architecture to switch the
7630 * notion of the current task on a cpu in a non-blocking manner. This function
1df5c10a
LT
7631 * must be called with all CPU's synchronized, and interrupts disabled, the
7632 * and caller must save the original value of the current task (see
7633 * curr_task() above) and restore that value before reenabling interrupts and
7634 * re-starting the system.
7635 *
7636 * ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED!
7637 */
36c8b586 7638void set_curr_task(int cpu, struct task_struct *p)
1df5c10a
LT
7639{
7640 cpu_curr(cpu) = p;
7641}
7642
7643#endif
29f59db3 7644
7c941438 7645#ifdef CONFIG_CGROUP_SCHED
029632fb
PZ
7646/* task_group_lock serializes the addition/removal of task groups */
7647static DEFINE_SPINLOCK(task_group_lock);
7648
bccbe08a
PZ
7649static void free_sched_group(struct task_group *tg)
7650{
7651 free_fair_sched_group(tg);
7652 free_rt_sched_group(tg);
e9aa1dd1 7653 autogroup_free(tg);
bccbe08a
PZ
7654 kfree(tg);
7655}
7656
7657/* allocate runqueue etc for a new task group */
ec7dc8ac 7658struct task_group *sched_create_group(struct task_group *parent)
bccbe08a
PZ
7659{
7660 struct task_group *tg;
bccbe08a
PZ
7661
7662 tg = kzalloc(sizeof(*tg), GFP_KERNEL);
7663 if (!tg)
7664 return ERR_PTR(-ENOMEM);
7665
ec7dc8ac 7666 if (!alloc_fair_sched_group(tg, parent))
bccbe08a
PZ
7667 goto err;
7668
ec7dc8ac 7669 if (!alloc_rt_sched_group(tg, parent))
bccbe08a
PZ
7670 goto err;
7671
ace783b9
LZ
7672 return tg;
7673
7674err:
7675 free_sched_group(tg);
7676 return ERR_PTR(-ENOMEM);
7677}
7678
7679void sched_online_group(struct task_group *tg, struct task_group *parent)
7680{
7681 unsigned long flags;
7682
8ed36996 7683 spin_lock_irqsave(&task_group_lock, flags);
6f505b16 7684 list_add_rcu(&tg->list, &task_groups);
f473aa5e
PZ
7685
7686 WARN_ON(!parent); /* root should already exist */
7687
7688 tg->parent = parent;
f473aa5e 7689 INIT_LIST_HEAD(&tg->children);
09f2724a 7690 list_add_rcu(&tg->siblings, &parent->children);
8ed36996 7691 spin_unlock_irqrestore(&task_group_lock, flags);
29f59db3
SV
7692}
7693
9b5b7751 7694/* rcu callback to free various structures associated with a task group */
6f505b16 7695static void free_sched_group_rcu(struct rcu_head *rhp)
29f59db3 7696{
29f59db3 7697 /* now it should be safe to free those cfs_rqs */
6f505b16 7698 free_sched_group(container_of(rhp, struct task_group, rcu));
29f59db3
SV
7699}
7700
9b5b7751 7701/* Destroy runqueue etc associated with a task group */
4cf86d77 7702void sched_destroy_group(struct task_group *tg)
ace783b9
LZ
7703{
7704 /* wait for possible concurrent references to cfs_rqs complete */
7705 call_rcu(&tg->rcu, free_sched_group_rcu);
7706}
7707
7708void sched_offline_group(struct task_group *tg)
29f59db3 7709{
8ed36996 7710 unsigned long flags;
9b5b7751 7711 int i;
29f59db3 7712
3d4b47b4
PZ
7713 /* end participation in shares distribution */
7714 for_each_possible_cpu(i)
bccbe08a 7715 unregister_fair_sched_group(tg, i);
3d4b47b4
PZ
7716
7717 spin_lock_irqsave(&task_group_lock, flags);
6f505b16 7718 list_del_rcu(&tg->list);
f473aa5e 7719 list_del_rcu(&tg->siblings);
8ed36996 7720 spin_unlock_irqrestore(&task_group_lock, flags);
29f59db3
SV
7721}
7722
9b5b7751 7723/* change task's runqueue when it moves between groups.
3a252015
IM
7724 * The caller of this function should have put the task in its new group
7725 * by now. This function just updates tsk->se.cfs_rq and tsk->se.parent to
7726 * reflect its new group.
9b5b7751
SV
7727 */
7728void sched_move_task(struct task_struct *tsk)
29f59db3 7729{
8323f26c 7730 struct task_group *tg;
da0c1e65 7731 int queued, running;
29f59db3
SV
7732 unsigned long flags;
7733 struct rq *rq;
7734
7735 rq = task_rq_lock(tsk, &flags);
7736
051a1d1a 7737 running = task_current(rq, tsk);
da0c1e65 7738 queued = task_on_rq_queued(tsk);
29f59db3 7739
da0c1e65 7740 if (queued)
29f59db3 7741 dequeue_task(rq, tsk, 0);
0e1f3483 7742 if (unlikely(running))
f3cd1c4e 7743 put_prev_task(rq, tsk);
29f59db3 7744
f7b8a47d
KT
7745 /*
7746 * All callers are synchronized by task_rq_lock(); we do not use RCU
7747 * which is pointless here. Thus, we pass "true" to task_css_check()
7748 * to prevent lockdep warnings.
7749 */
7750 tg = container_of(task_css_check(tsk, cpu_cgrp_id, true),
8323f26c
PZ
7751 struct task_group, css);
7752 tg = autogroup_task_group(tsk, tg);
7753 tsk->sched_task_group = tg;
7754
810b3817 7755#ifdef CONFIG_FAIR_GROUP_SCHED
b2b5ce02 7756 if (tsk->sched_class->task_move_group)
bc54da21 7757 tsk->sched_class->task_move_group(tsk);
b2b5ce02 7758 else
810b3817 7759#endif
b2b5ce02 7760 set_task_rq(tsk, task_cpu(tsk));
810b3817 7761
0e1f3483
HS
7762 if (unlikely(running))
7763 tsk->sched_class->set_curr_task(rq);
da0c1e65 7764 if (queued)
371fd7e7 7765 enqueue_task(rq, tsk, 0);
29f59db3 7766
0122ec5b 7767 task_rq_unlock(rq, tsk, &flags);
29f59db3 7768}
7c941438 7769#endif /* CONFIG_CGROUP_SCHED */
29f59db3 7770
a790de99
PT
7771#ifdef CONFIG_RT_GROUP_SCHED
7772/*
7773 * Ensure that the real time constraints are schedulable.
7774 */
7775static DEFINE_MUTEX(rt_constraints_mutex);
9f0c1e56 7776
9a7e0b18
PZ
7777/* Must be called with tasklist_lock held */
7778static inline int tg_has_rt_tasks(struct task_group *tg)
b40b2e8e 7779{
9a7e0b18 7780 struct task_struct *g, *p;
b40b2e8e 7781
1fe89e1b
PZ
7782 /*
7783 * Autogroups do not have RT tasks; see autogroup_create().
7784 */
7785 if (task_group_is_autogroup(tg))
7786 return 0;
7787
5d07f420 7788 for_each_process_thread(g, p) {
8651c658 7789 if (rt_task(p) && task_group(p) == tg)
9a7e0b18 7790 return 1;
5d07f420 7791 }
b40b2e8e 7792
9a7e0b18
PZ
7793 return 0;
7794}
b40b2e8e 7795
9a7e0b18
PZ
7796struct rt_schedulable_data {
7797 struct task_group *tg;
7798 u64 rt_period;
7799 u64 rt_runtime;
7800};
b40b2e8e 7801
a790de99 7802static int tg_rt_schedulable(struct task_group *tg, void *data)
9a7e0b18
PZ
7803{
7804 struct rt_schedulable_data *d = data;
7805 struct task_group *child;
7806 unsigned long total, sum = 0;
7807 u64 period, runtime;
b40b2e8e 7808
9a7e0b18
PZ
7809 period = ktime_to_ns(tg->rt_bandwidth.rt_period);
7810 runtime = tg->rt_bandwidth.rt_runtime;
b40b2e8e 7811
9a7e0b18
PZ
7812 if (tg == d->tg) {
7813 period = d->rt_period;
7814 runtime = d->rt_runtime;
b40b2e8e 7815 }
b40b2e8e 7816
4653f803
PZ
7817 /*
7818 * Cannot have more runtime than the period.
7819 */
7820 if (runtime > period && runtime != RUNTIME_INF)
7821 return -EINVAL;
6f505b16 7822
4653f803
PZ
7823 /*
7824 * Ensure we don't starve existing RT tasks.
7825 */
9a7e0b18
PZ
7826 if (rt_bandwidth_enabled() && !runtime && tg_has_rt_tasks(tg))
7827 return -EBUSY;
6f505b16 7828
9a7e0b18 7829 total = to_ratio(period, runtime);
6f505b16 7830
4653f803
PZ
7831 /*
7832 * Nobody can have more than the global setting allows.
7833 */
7834 if (total > to_ratio(global_rt_period(), global_rt_runtime()))
7835 return -EINVAL;
6f505b16 7836
4653f803
PZ
7837 /*
7838 * The sum of our children's runtime should not exceed our own.
7839 */
9a7e0b18
PZ
7840 list_for_each_entry_rcu(child, &tg->children, siblings) {
7841 period = ktime_to_ns(child->rt_bandwidth.rt_period);
7842 runtime = child->rt_bandwidth.rt_runtime;
6f505b16 7843
9a7e0b18
PZ
7844 if (child == d->tg) {
7845 period = d->rt_period;
7846 runtime = d->rt_runtime;
7847 }
6f505b16 7848
9a7e0b18 7849 sum += to_ratio(period, runtime);
9f0c1e56 7850 }
6f505b16 7851
9a7e0b18
PZ
7852 if (sum > total)
7853 return -EINVAL;
7854
7855 return 0;
6f505b16
PZ
7856}
7857
9a7e0b18 7858static int __rt_schedulable(struct task_group *tg, u64 period, u64 runtime)
521f1a24 7859{
8277434e
PT
7860 int ret;
7861
9a7e0b18
PZ
7862 struct rt_schedulable_data data = {
7863 .tg = tg,
7864 .rt_period = period,
7865 .rt_runtime = runtime,
7866 };
7867
8277434e
PT
7868 rcu_read_lock();
7869 ret = walk_tg_tree(tg_rt_schedulable, tg_nop, &data);
7870 rcu_read_unlock();
7871
7872 return ret;
521f1a24
DG
7873}
7874
ab84d31e 7875static int tg_set_rt_bandwidth(struct task_group *tg,
d0b27fa7 7876 u64 rt_period, u64 rt_runtime)
6f505b16 7877{
ac086bc2 7878 int i, err = 0;
9f0c1e56 7879
2636ed5f
PZ
7880 /*
7881 * Disallowing the root group RT runtime is BAD, it would disallow the
7882 * kernel creating (and or operating) RT threads.
7883 */
7884 if (tg == &root_task_group && rt_runtime == 0)
7885 return -EINVAL;
7886
7887 /* No period doesn't make any sense. */
7888 if (rt_period == 0)
7889 return -EINVAL;
7890
9f0c1e56 7891 mutex_lock(&rt_constraints_mutex);
521f1a24 7892 read_lock(&tasklist_lock);
9a7e0b18
PZ
7893 err = __rt_schedulable(tg, rt_period, rt_runtime);
7894 if (err)
9f0c1e56 7895 goto unlock;
ac086bc2 7896
0986b11b 7897 raw_spin_lock_irq(&tg->rt_bandwidth.rt_runtime_lock);
d0b27fa7
PZ
7898 tg->rt_bandwidth.rt_period = ns_to_ktime(rt_period);
7899 tg->rt_bandwidth.rt_runtime = rt_runtime;
ac086bc2
PZ
7900
7901 for_each_possible_cpu(i) {
7902 struct rt_rq *rt_rq = tg->rt_rq[i];
7903
0986b11b 7904 raw_spin_lock(&rt_rq->rt_runtime_lock);
ac086bc2 7905 rt_rq->rt_runtime = rt_runtime;
0986b11b 7906 raw_spin_unlock(&rt_rq->rt_runtime_lock);
ac086bc2 7907 }
0986b11b 7908 raw_spin_unlock_irq(&tg->rt_bandwidth.rt_runtime_lock);
49246274 7909unlock:
521f1a24 7910 read_unlock(&tasklist_lock);
9f0c1e56
PZ
7911 mutex_unlock(&rt_constraints_mutex);
7912
7913 return err;
6f505b16
PZ
7914}
7915
25cc7da7 7916static int sched_group_set_rt_runtime(struct task_group *tg, long rt_runtime_us)
d0b27fa7
PZ
7917{
7918 u64 rt_runtime, rt_period;
7919
7920 rt_period = ktime_to_ns(tg->rt_bandwidth.rt_period);
7921 rt_runtime = (u64)rt_runtime_us * NSEC_PER_USEC;
7922 if (rt_runtime_us < 0)
7923 rt_runtime = RUNTIME_INF;
7924
ab84d31e 7925 return tg_set_rt_bandwidth(tg, rt_period, rt_runtime);
d0b27fa7
PZ
7926}
7927
25cc7da7 7928static long sched_group_rt_runtime(struct task_group *tg)
9f0c1e56
PZ
7929{
7930 u64 rt_runtime_us;
7931
d0b27fa7 7932 if (tg->rt_bandwidth.rt_runtime == RUNTIME_INF)
9f0c1e56
PZ
7933 return -1;
7934
d0b27fa7 7935 rt_runtime_us = tg->rt_bandwidth.rt_runtime;
9f0c1e56
PZ
7936 do_div(rt_runtime_us, NSEC_PER_USEC);
7937 return rt_runtime_us;
7938}
d0b27fa7 7939
ce2f5fe4 7940static int sched_group_set_rt_period(struct task_group *tg, u64 rt_period_us)
d0b27fa7
PZ
7941{
7942 u64 rt_runtime, rt_period;
7943
ce2f5fe4 7944 rt_period = rt_period_us * NSEC_PER_USEC;
d0b27fa7
PZ
7945 rt_runtime = tg->rt_bandwidth.rt_runtime;
7946
ab84d31e 7947 return tg_set_rt_bandwidth(tg, rt_period, rt_runtime);
d0b27fa7
PZ
7948}
7949
25cc7da7 7950static long sched_group_rt_period(struct task_group *tg)
d0b27fa7
PZ
7951{
7952 u64 rt_period_us;
7953
7954 rt_period_us = ktime_to_ns(tg->rt_bandwidth.rt_period);
7955 do_div(rt_period_us, NSEC_PER_USEC);
7956 return rt_period_us;
7957}
332ac17e 7958#endif /* CONFIG_RT_GROUP_SCHED */
d0b27fa7 7959
332ac17e 7960#ifdef CONFIG_RT_GROUP_SCHED
d0b27fa7
PZ
7961static int sched_rt_global_constraints(void)
7962{
7963 int ret = 0;
7964
7965 mutex_lock(&rt_constraints_mutex);
9a7e0b18 7966 read_lock(&tasklist_lock);
4653f803 7967 ret = __rt_schedulable(NULL, 0, 0);
9a7e0b18 7968 read_unlock(&tasklist_lock);
d0b27fa7
PZ
7969 mutex_unlock(&rt_constraints_mutex);
7970
7971 return ret;
7972}
54e99124 7973
25cc7da7 7974static int sched_rt_can_attach(struct task_group *tg, struct task_struct *tsk)
54e99124
DG
7975{
7976 /* Don't accept realtime tasks when there is no way for them to run */
7977 if (rt_task(tsk) && tg->rt_bandwidth.rt_runtime == 0)
7978 return 0;
7979
7980 return 1;
7981}
7982
6d6bc0ad 7983#else /* !CONFIG_RT_GROUP_SCHED */
d0b27fa7
PZ
7984static int sched_rt_global_constraints(void)
7985{
ac086bc2 7986 unsigned long flags;
332ac17e 7987 int i, ret = 0;
ec5d4989 7988
0986b11b 7989 raw_spin_lock_irqsave(&def_rt_bandwidth.rt_runtime_lock, flags);
ac086bc2
PZ
7990 for_each_possible_cpu(i) {
7991 struct rt_rq *rt_rq = &cpu_rq(i)->rt;
7992
0986b11b 7993 raw_spin_lock(&rt_rq->rt_runtime_lock);
ac086bc2 7994 rt_rq->rt_runtime = global_rt_runtime();
0986b11b 7995 raw_spin_unlock(&rt_rq->rt_runtime_lock);
ac086bc2 7996 }
0986b11b 7997 raw_spin_unlock_irqrestore(&def_rt_bandwidth.rt_runtime_lock, flags);
ac086bc2 7998
332ac17e 7999 return ret;
d0b27fa7 8000}
6d6bc0ad 8001#endif /* CONFIG_RT_GROUP_SCHED */
d0b27fa7 8002
a1963b81 8003static int sched_dl_global_validate(void)
332ac17e 8004{
1724813d
PZ
8005 u64 runtime = global_rt_runtime();
8006 u64 period = global_rt_period();
332ac17e 8007 u64 new_bw = to_ratio(period, runtime);
f10e00f4 8008 struct dl_bw *dl_b;
1724813d 8009 int cpu, ret = 0;
49516342 8010 unsigned long flags;
332ac17e
DF
8011
8012 /*
8013 * Here we want to check the bandwidth not being set to some
8014 * value smaller than the currently allocated bandwidth in
8015 * any of the root_domains.
8016 *
8017 * FIXME: Cycling on all the CPUs is overdoing, but simpler than
8018 * cycling on root_domains... Discussion on different/better
8019 * solutions is welcome!
8020 */
1724813d 8021 for_each_possible_cpu(cpu) {
f10e00f4
KT
8022 rcu_read_lock_sched();
8023 dl_b = dl_bw_of(cpu);
332ac17e 8024
49516342 8025 raw_spin_lock_irqsave(&dl_b->lock, flags);
1724813d
PZ
8026 if (new_bw < dl_b->total_bw)
8027 ret = -EBUSY;
49516342 8028 raw_spin_unlock_irqrestore(&dl_b->lock, flags);
1724813d 8029
f10e00f4
KT
8030 rcu_read_unlock_sched();
8031
1724813d
PZ
8032 if (ret)
8033 break;
332ac17e
DF
8034 }
8035
1724813d 8036 return ret;
332ac17e
DF
8037}
8038
1724813d 8039static void sched_dl_do_global(void)
ce0dbbbb 8040{
1724813d 8041 u64 new_bw = -1;
f10e00f4 8042 struct dl_bw *dl_b;
1724813d 8043 int cpu;
49516342 8044 unsigned long flags;
ce0dbbbb 8045
1724813d
PZ
8046 def_dl_bandwidth.dl_period = global_rt_period();
8047 def_dl_bandwidth.dl_runtime = global_rt_runtime();
8048
8049 if (global_rt_runtime() != RUNTIME_INF)
8050 new_bw = to_ratio(global_rt_period(), global_rt_runtime());
8051
8052 /*
8053 * FIXME: As above...
8054 */
8055 for_each_possible_cpu(cpu) {
f10e00f4
KT
8056 rcu_read_lock_sched();
8057 dl_b = dl_bw_of(cpu);
1724813d 8058
49516342 8059 raw_spin_lock_irqsave(&dl_b->lock, flags);
1724813d 8060 dl_b->bw = new_bw;
49516342 8061 raw_spin_unlock_irqrestore(&dl_b->lock, flags);
f10e00f4
KT
8062
8063 rcu_read_unlock_sched();
ce0dbbbb 8064 }
1724813d
PZ
8065}
8066
8067static int sched_rt_global_validate(void)
8068{
8069 if (sysctl_sched_rt_period <= 0)
8070 return -EINVAL;
8071
e9e7cb38
JL
8072 if ((sysctl_sched_rt_runtime != RUNTIME_INF) &&
8073 (sysctl_sched_rt_runtime > sysctl_sched_rt_period))
1724813d
PZ
8074 return -EINVAL;
8075
8076 return 0;
8077}
8078
8079static void sched_rt_do_global(void)
8080{
8081 def_rt_bandwidth.rt_runtime = global_rt_runtime();
8082 def_rt_bandwidth.rt_period = ns_to_ktime(global_rt_period());
ce0dbbbb
CW
8083}
8084
d0b27fa7 8085int sched_rt_handler(struct ctl_table *table, int write,
8d65af78 8086 void __user *buffer, size_t *lenp,
d0b27fa7
PZ
8087 loff_t *ppos)
8088{
d0b27fa7
PZ
8089 int old_period, old_runtime;
8090 static DEFINE_MUTEX(mutex);
1724813d 8091 int ret;
d0b27fa7
PZ
8092
8093 mutex_lock(&mutex);
8094 old_period = sysctl_sched_rt_period;
8095 old_runtime = sysctl_sched_rt_runtime;
8096
8d65af78 8097 ret = proc_dointvec(table, write, buffer, lenp, ppos);
d0b27fa7
PZ
8098
8099 if (!ret && write) {
1724813d
PZ
8100 ret = sched_rt_global_validate();
8101 if (ret)
8102 goto undo;
8103
a1963b81 8104 ret = sched_dl_global_validate();
1724813d
PZ
8105 if (ret)
8106 goto undo;
8107
a1963b81 8108 ret = sched_rt_global_constraints();
1724813d
PZ
8109 if (ret)
8110 goto undo;
8111
8112 sched_rt_do_global();
8113 sched_dl_do_global();
8114 }
8115 if (0) {
8116undo:
8117 sysctl_sched_rt_period = old_period;
8118 sysctl_sched_rt_runtime = old_runtime;
d0b27fa7
PZ
8119 }
8120 mutex_unlock(&mutex);
8121
8122 return ret;
8123}
68318b8e 8124
1724813d 8125int sched_rr_handler(struct ctl_table *table, int write,
332ac17e
DF
8126 void __user *buffer, size_t *lenp,
8127 loff_t *ppos)
8128{
8129 int ret;
332ac17e 8130 static DEFINE_MUTEX(mutex);
332ac17e
DF
8131
8132 mutex_lock(&mutex);
332ac17e 8133 ret = proc_dointvec(table, write, buffer, lenp, ppos);
1724813d
PZ
8134 /* make sure that internally we keep jiffies */
8135 /* also, writing zero resets timeslice to default */
332ac17e 8136 if (!ret && write) {
1724813d
PZ
8137 sched_rr_timeslice = sched_rr_timeslice <= 0 ?
8138 RR_TIMESLICE : msecs_to_jiffies(sched_rr_timeslice);
332ac17e
DF
8139 }
8140 mutex_unlock(&mutex);
332ac17e
DF
8141 return ret;
8142}
8143
052f1dc7 8144#ifdef CONFIG_CGROUP_SCHED
68318b8e 8145
a7c6d554 8146static inline struct task_group *css_tg(struct cgroup_subsys_state *css)
68318b8e 8147{
a7c6d554 8148 return css ? container_of(css, struct task_group, css) : NULL;
68318b8e
SV
8149}
8150
eb95419b
TH
8151static struct cgroup_subsys_state *
8152cpu_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
68318b8e 8153{
eb95419b
TH
8154 struct task_group *parent = css_tg(parent_css);
8155 struct task_group *tg;
68318b8e 8156
eb95419b 8157 if (!parent) {
68318b8e 8158 /* This is early initialization for the top cgroup */
07e06b01 8159 return &root_task_group.css;
68318b8e
SV
8160 }
8161
ec7dc8ac 8162 tg = sched_create_group(parent);
68318b8e
SV
8163 if (IS_ERR(tg))
8164 return ERR_PTR(-ENOMEM);
8165
68318b8e
SV
8166 return &tg->css;
8167}
8168
eb95419b 8169static int cpu_cgroup_css_online(struct cgroup_subsys_state *css)
ace783b9 8170{
eb95419b 8171 struct task_group *tg = css_tg(css);
5c9d535b 8172 struct task_group *parent = css_tg(css->parent);
ace783b9 8173
63876986
TH
8174 if (parent)
8175 sched_online_group(tg, parent);
ace783b9
LZ
8176 return 0;
8177}
8178
eb95419b 8179static void cpu_cgroup_css_free(struct cgroup_subsys_state *css)
68318b8e 8180{
eb95419b 8181 struct task_group *tg = css_tg(css);
68318b8e
SV
8182
8183 sched_destroy_group(tg);
8184}
8185
eb95419b 8186static void cpu_cgroup_css_offline(struct cgroup_subsys_state *css)
ace783b9 8187{
eb95419b 8188 struct task_group *tg = css_tg(css);
ace783b9
LZ
8189
8190 sched_offline_group(tg);
8191}
8192
7e47682e 8193static void cpu_cgroup_fork(struct task_struct *task, void *private)
eeb61e53
KT
8194{
8195 sched_move_task(task);
8196}
8197
eb95419b 8198static int cpu_cgroup_can_attach(struct cgroup_subsys_state *css,
bb9d97b6 8199 struct cgroup_taskset *tset)
68318b8e 8200{
bb9d97b6
TH
8201 struct task_struct *task;
8202
924f0d9a 8203 cgroup_taskset_for_each(task, tset) {
b68aa230 8204#ifdef CONFIG_RT_GROUP_SCHED
eb95419b 8205 if (!sched_rt_can_attach(css_tg(css), task))
bb9d97b6 8206 return -EINVAL;
b68aa230 8207#else
bb9d97b6
TH
8208 /* We don't support RT-tasks being in separate groups */
8209 if (task->sched_class != &fair_sched_class)
8210 return -EINVAL;
b68aa230 8211#endif
bb9d97b6 8212 }
be367d09
BB
8213 return 0;
8214}
68318b8e 8215
eb95419b 8216static void cpu_cgroup_attach(struct cgroup_subsys_state *css,
bb9d97b6 8217 struct cgroup_taskset *tset)
68318b8e 8218{
bb9d97b6
TH
8219 struct task_struct *task;
8220
924f0d9a 8221 cgroup_taskset_for_each(task, tset)
bb9d97b6 8222 sched_move_task(task);
68318b8e
SV
8223}
8224
eb95419b
TH
8225static void cpu_cgroup_exit(struct cgroup_subsys_state *css,
8226 struct cgroup_subsys_state *old_css,
8227 struct task_struct *task)
068c5cc5 8228{
068c5cc5
PZ
8229 sched_move_task(task);
8230}
8231
052f1dc7 8232#ifdef CONFIG_FAIR_GROUP_SCHED
182446d0
TH
8233static int cpu_shares_write_u64(struct cgroup_subsys_state *css,
8234 struct cftype *cftype, u64 shareval)
68318b8e 8235{
182446d0 8236 return sched_group_set_shares(css_tg(css), scale_load(shareval));
68318b8e
SV
8237}
8238
182446d0
TH
8239static u64 cpu_shares_read_u64(struct cgroup_subsys_state *css,
8240 struct cftype *cft)
68318b8e 8241{
182446d0 8242 struct task_group *tg = css_tg(css);
68318b8e 8243
c8b28116 8244 return (u64) scale_load_down(tg->shares);
68318b8e 8245}
ab84d31e
PT
8246
8247#ifdef CONFIG_CFS_BANDWIDTH
a790de99
PT
8248static DEFINE_MUTEX(cfs_constraints_mutex);
8249
ab84d31e
PT
8250const u64 max_cfs_quota_period = 1 * NSEC_PER_SEC; /* 1s */
8251const u64 min_cfs_quota_period = 1 * NSEC_PER_MSEC; /* 1ms */
8252
a790de99
PT
8253static int __cfs_schedulable(struct task_group *tg, u64 period, u64 runtime);
8254
ab84d31e
PT
8255static int tg_set_cfs_bandwidth(struct task_group *tg, u64 period, u64 quota)
8256{
56f570e5 8257 int i, ret = 0, runtime_enabled, runtime_was_enabled;
029632fb 8258 struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth;
ab84d31e
PT
8259
8260 if (tg == &root_task_group)
8261 return -EINVAL;
8262
8263 /*
8264 * Ensure we have at some amount of bandwidth every period. This is
8265 * to prevent reaching a state of large arrears when throttled via
8266 * entity_tick() resulting in prolonged exit starvation.
8267 */
8268 if (quota < min_cfs_quota_period || period < min_cfs_quota_period)
8269 return -EINVAL;
8270
8271 /*
8272 * Likewise, bound things on the otherside by preventing insane quota
8273 * periods. This also allows us to normalize in computing quota
8274 * feasibility.
8275 */
8276 if (period > max_cfs_quota_period)
8277 return -EINVAL;
8278
0e59bdae
KT
8279 /*
8280 * Prevent race between setting of cfs_rq->runtime_enabled and
8281 * unthrottle_offline_cfs_rqs().
8282 */
8283 get_online_cpus();
a790de99
PT
8284 mutex_lock(&cfs_constraints_mutex);
8285 ret = __cfs_schedulable(tg, period, quota);
8286 if (ret)
8287 goto out_unlock;
8288
58088ad0 8289 runtime_enabled = quota != RUNTIME_INF;
56f570e5 8290 runtime_was_enabled = cfs_b->quota != RUNTIME_INF;
1ee14e6c
BS
8291 /*
8292 * If we need to toggle cfs_bandwidth_used, off->on must occur
8293 * before making related changes, and on->off must occur afterwards
8294 */
8295 if (runtime_enabled && !runtime_was_enabled)
8296 cfs_bandwidth_usage_inc();
ab84d31e
PT
8297 raw_spin_lock_irq(&cfs_b->lock);
8298 cfs_b->period = ns_to_ktime(period);
8299 cfs_b->quota = quota;
58088ad0 8300
a9cf55b2 8301 __refill_cfs_bandwidth_runtime(cfs_b);
58088ad0 8302 /* restart the period timer (if active) to handle new period expiry */
77a4d1a1
PZ
8303 if (runtime_enabled)
8304 start_cfs_bandwidth(cfs_b);
ab84d31e
PT
8305 raw_spin_unlock_irq(&cfs_b->lock);
8306
0e59bdae 8307 for_each_online_cpu(i) {
ab84d31e 8308 struct cfs_rq *cfs_rq = tg->cfs_rq[i];
029632fb 8309 struct rq *rq = cfs_rq->rq;
ab84d31e
PT
8310
8311 raw_spin_lock_irq(&rq->lock);
58088ad0 8312 cfs_rq->runtime_enabled = runtime_enabled;
ab84d31e 8313 cfs_rq->runtime_remaining = 0;
671fd9da 8314
029632fb 8315 if (cfs_rq->throttled)
671fd9da 8316 unthrottle_cfs_rq(cfs_rq);
ab84d31e
PT
8317 raw_spin_unlock_irq(&rq->lock);
8318 }
1ee14e6c
BS
8319 if (runtime_was_enabled && !runtime_enabled)
8320 cfs_bandwidth_usage_dec();
a790de99
PT
8321out_unlock:
8322 mutex_unlock(&cfs_constraints_mutex);
0e59bdae 8323 put_online_cpus();
ab84d31e 8324
a790de99 8325 return ret;
ab84d31e
PT
8326}
8327
8328int tg_set_cfs_quota(struct task_group *tg, long cfs_quota_us)
8329{
8330 u64 quota, period;
8331
029632fb 8332 period = ktime_to_ns(tg->cfs_bandwidth.period);
ab84d31e
PT
8333 if (cfs_quota_us < 0)
8334 quota = RUNTIME_INF;
8335 else
8336 quota = (u64)cfs_quota_us * NSEC_PER_USEC;
8337
8338 return tg_set_cfs_bandwidth(tg, period, quota);
8339}
8340
8341long tg_get_cfs_quota(struct task_group *tg)
8342{
8343 u64 quota_us;
8344
029632fb 8345 if (tg->cfs_bandwidth.quota == RUNTIME_INF)
ab84d31e
PT
8346 return -1;
8347
029632fb 8348 quota_us = tg->cfs_bandwidth.quota;
ab84d31e
PT
8349 do_div(quota_us, NSEC_PER_USEC);
8350
8351 return quota_us;
8352}
8353
8354int tg_set_cfs_period(struct task_group *tg, long cfs_period_us)
8355{
8356 u64 quota, period;
8357
8358 period = (u64)cfs_period_us * NSEC_PER_USEC;
029632fb 8359 quota = tg->cfs_bandwidth.quota;
ab84d31e 8360
ab84d31e
PT
8361 return tg_set_cfs_bandwidth(tg, period, quota);
8362}
8363
8364long tg_get_cfs_period(struct task_group *tg)
8365{
8366 u64 cfs_period_us;
8367
029632fb 8368 cfs_period_us = ktime_to_ns(tg->cfs_bandwidth.period);
ab84d31e
PT
8369 do_div(cfs_period_us, NSEC_PER_USEC);
8370
8371 return cfs_period_us;
8372}
8373
182446d0
TH
8374static s64 cpu_cfs_quota_read_s64(struct cgroup_subsys_state *css,
8375 struct cftype *cft)
ab84d31e 8376{
182446d0 8377 return tg_get_cfs_quota(css_tg(css));
ab84d31e
PT
8378}
8379
182446d0
TH
8380static int cpu_cfs_quota_write_s64(struct cgroup_subsys_state *css,
8381 struct cftype *cftype, s64 cfs_quota_us)
ab84d31e 8382{
182446d0 8383 return tg_set_cfs_quota(css_tg(css), cfs_quota_us);
ab84d31e
PT
8384}
8385
182446d0
TH
8386static u64 cpu_cfs_period_read_u64(struct cgroup_subsys_state *css,
8387 struct cftype *cft)
ab84d31e 8388{
182446d0 8389 return tg_get_cfs_period(css_tg(css));
ab84d31e
PT
8390}
8391
182446d0
TH
8392static int cpu_cfs_period_write_u64(struct cgroup_subsys_state *css,
8393 struct cftype *cftype, u64 cfs_period_us)
ab84d31e 8394{
182446d0 8395 return tg_set_cfs_period(css_tg(css), cfs_period_us);
ab84d31e
PT
8396}
8397
a790de99
PT
8398struct cfs_schedulable_data {
8399 struct task_group *tg;
8400 u64 period, quota;
8401};
8402
8403/*
8404 * normalize group quota/period to be quota/max_period
8405 * note: units are usecs
8406 */
8407static u64 normalize_cfs_quota(struct task_group *tg,
8408 struct cfs_schedulable_data *d)
8409{
8410 u64 quota, period;
8411
8412 if (tg == d->tg) {
8413 period = d->period;
8414 quota = d->quota;
8415 } else {
8416 period = tg_get_cfs_period(tg);
8417 quota = tg_get_cfs_quota(tg);
8418 }
8419
8420 /* note: these should typically be equivalent */
8421 if (quota == RUNTIME_INF || quota == -1)
8422 return RUNTIME_INF;
8423
8424 return to_ratio(period, quota);
8425}
8426
8427static int tg_cfs_schedulable_down(struct task_group *tg, void *data)
8428{
8429 struct cfs_schedulable_data *d = data;
029632fb 8430 struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth;
a790de99
PT
8431 s64 quota = 0, parent_quota = -1;
8432
8433 if (!tg->parent) {
8434 quota = RUNTIME_INF;
8435 } else {
029632fb 8436 struct cfs_bandwidth *parent_b = &tg->parent->cfs_bandwidth;
a790de99
PT
8437
8438 quota = normalize_cfs_quota(tg, d);
9c58c79a 8439 parent_quota = parent_b->hierarchical_quota;
a790de99
PT
8440
8441 /*
8442 * ensure max(child_quota) <= parent_quota, inherit when no
8443 * limit is set
8444 */
8445 if (quota == RUNTIME_INF)
8446 quota = parent_quota;
8447 else if (parent_quota != RUNTIME_INF && quota > parent_quota)
8448 return -EINVAL;
8449 }
9c58c79a 8450 cfs_b->hierarchical_quota = quota;
a790de99
PT
8451
8452 return 0;
8453}
8454
8455static int __cfs_schedulable(struct task_group *tg, u64 period, u64 quota)
8456{
8277434e 8457 int ret;
a790de99
PT
8458 struct cfs_schedulable_data data = {
8459 .tg = tg,
8460 .period = period,
8461 .quota = quota,
8462 };
8463
8464 if (quota != RUNTIME_INF) {
8465 do_div(data.period, NSEC_PER_USEC);
8466 do_div(data.quota, NSEC_PER_USEC);
8467 }
8468
8277434e
PT
8469 rcu_read_lock();
8470 ret = walk_tg_tree(tg_cfs_schedulable_down, tg_nop, &data);
8471 rcu_read_unlock();
8472
8473 return ret;
a790de99 8474}
e8da1b18 8475
2da8ca82 8476static int cpu_stats_show(struct seq_file *sf, void *v)
e8da1b18 8477{
2da8ca82 8478 struct task_group *tg = css_tg(seq_css(sf));
029632fb 8479 struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth;
e8da1b18 8480
44ffc75b
TH
8481 seq_printf(sf, "nr_periods %d\n", cfs_b->nr_periods);
8482 seq_printf(sf, "nr_throttled %d\n", cfs_b->nr_throttled);
8483 seq_printf(sf, "throttled_time %llu\n", cfs_b->throttled_time);
e8da1b18
NR
8484
8485 return 0;
8486}
ab84d31e 8487#endif /* CONFIG_CFS_BANDWIDTH */
6d6bc0ad 8488#endif /* CONFIG_FAIR_GROUP_SCHED */
68318b8e 8489
052f1dc7 8490#ifdef CONFIG_RT_GROUP_SCHED
182446d0
TH
8491static int cpu_rt_runtime_write(struct cgroup_subsys_state *css,
8492 struct cftype *cft, s64 val)
6f505b16 8493{
182446d0 8494 return sched_group_set_rt_runtime(css_tg(css), val);
6f505b16
PZ
8495}
8496
182446d0
TH
8497static s64 cpu_rt_runtime_read(struct cgroup_subsys_state *css,
8498 struct cftype *cft)
6f505b16 8499{
182446d0 8500 return sched_group_rt_runtime(css_tg(css));
6f505b16 8501}
d0b27fa7 8502
182446d0
TH
8503static int cpu_rt_period_write_uint(struct cgroup_subsys_state *css,
8504 struct cftype *cftype, u64 rt_period_us)
d0b27fa7 8505{
182446d0 8506 return sched_group_set_rt_period(css_tg(css), rt_period_us);
d0b27fa7
PZ
8507}
8508
182446d0
TH
8509static u64 cpu_rt_period_read_uint(struct cgroup_subsys_state *css,
8510 struct cftype *cft)
d0b27fa7 8511{
182446d0 8512 return sched_group_rt_period(css_tg(css));
d0b27fa7 8513}
6d6bc0ad 8514#endif /* CONFIG_RT_GROUP_SCHED */
6f505b16 8515
fe5c7cc2 8516static struct cftype cpu_files[] = {
052f1dc7 8517#ifdef CONFIG_FAIR_GROUP_SCHED
fe5c7cc2
PM
8518 {
8519 .name = "shares",
f4c753b7
PM
8520 .read_u64 = cpu_shares_read_u64,
8521 .write_u64 = cpu_shares_write_u64,
fe5c7cc2 8522 },
052f1dc7 8523#endif
ab84d31e
PT
8524#ifdef CONFIG_CFS_BANDWIDTH
8525 {
8526 .name = "cfs_quota_us",
8527 .read_s64 = cpu_cfs_quota_read_s64,
8528 .write_s64 = cpu_cfs_quota_write_s64,
8529 },
8530 {
8531 .name = "cfs_period_us",
8532 .read_u64 = cpu_cfs_period_read_u64,
8533 .write_u64 = cpu_cfs_period_write_u64,
8534 },
e8da1b18
NR
8535 {
8536 .name = "stat",
2da8ca82 8537 .seq_show = cpu_stats_show,
e8da1b18 8538 },
ab84d31e 8539#endif
052f1dc7 8540#ifdef CONFIG_RT_GROUP_SCHED
6f505b16 8541 {
9f0c1e56 8542 .name = "rt_runtime_us",
06ecb27c
PM
8543 .read_s64 = cpu_rt_runtime_read,
8544 .write_s64 = cpu_rt_runtime_write,
6f505b16 8545 },
d0b27fa7
PZ
8546 {
8547 .name = "rt_period_us",
f4c753b7
PM
8548 .read_u64 = cpu_rt_period_read_uint,
8549 .write_u64 = cpu_rt_period_write_uint,
d0b27fa7 8550 },
052f1dc7 8551#endif
4baf6e33 8552 { } /* terminate */
68318b8e
SV
8553};
8554
073219e9 8555struct cgroup_subsys cpu_cgrp_subsys = {
92fb9748
TH
8556 .css_alloc = cpu_cgroup_css_alloc,
8557 .css_free = cpu_cgroup_css_free,
ace783b9
LZ
8558 .css_online = cpu_cgroup_css_online,
8559 .css_offline = cpu_cgroup_css_offline,
eeb61e53 8560 .fork = cpu_cgroup_fork,
bb9d97b6
TH
8561 .can_attach = cpu_cgroup_can_attach,
8562 .attach = cpu_cgroup_attach,
068c5cc5 8563 .exit = cpu_cgroup_exit,
5577964e 8564 .legacy_cftypes = cpu_files,
68318b8e
SV
8565 .early_init = 1,
8566};
8567
052f1dc7 8568#endif /* CONFIG_CGROUP_SCHED */
d842de87 8569
b637a328
PM
8570void dump_cpu_task(int cpu)
8571{
8572 pr_info("Task dump for CPU %d:\n", cpu);
8573 sched_show_task(cpu_curr(cpu));
8574}
This page took 2.945385 seconds and 5 git commands to generate.