sched: Update rq clock earlier in unthrottle_cfs_rq
[deliverable/linux.git] / kernel / sched / core.c
CommitLineData
1da177e4 1/*
391e43da 2 * kernel/sched/core.c
1da177e4
LT
3 *
4 * Kernel scheduler and related syscalls
5 *
6 * Copyright (C) 1991-2002 Linus Torvalds
7 *
8 * 1996-12-23 Modified by Dave Grothe to fix bugs in semaphores and
9 * make semaphores SMP safe
10 * 1998-11-19 Implemented schedule_timeout() and related stuff
11 * by Andrea Arcangeli
12 * 2002-01-04 New ultra-scalable O(1) scheduler by Ingo Molnar:
13 * hybrid priority-list and round-robin design with
14 * an array-switch method of distributing timeslices
15 * and per-CPU runqueues. Cleanups and useful suggestions
16 * by Davide Libenzi, preemptible kernel bits by Robert Love.
17 * 2003-09-03 Interactivity tuning by Con Kolivas.
18 * 2004-04-02 Scheduler domains code by Nick Piggin
c31f2e8a
IM
19 * 2007-04-15 Work begun on replacing all interactivity tuning with a
20 * fair scheduling design by Con Kolivas.
21 * 2007-05-05 Load balancing (smp-nice) and other improvements
22 * by Peter Williams
23 * 2007-05-06 Interactivity improvements to CFS by Mike Galbraith
24 * 2007-07-01 Group scheduling enhancements by Srivatsa Vaddagiri
b9131769
IM
25 * 2007-11-29 RT balancing improvements by Steven Rostedt, Gregory Haskins,
26 * Thomas Gleixner, Mike Kravetz
1da177e4
LT
27 */
28
29#include <linux/mm.h>
30#include <linux/module.h>
31#include <linux/nmi.h>
32#include <linux/init.h>
dff06c15 33#include <linux/uaccess.h>
1da177e4 34#include <linux/highmem.h>
1da177e4
LT
35#include <asm/mmu_context.h>
36#include <linux/interrupt.h>
c59ede7b 37#include <linux/capability.h>
1da177e4
LT
38#include <linux/completion.h>
39#include <linux/kernel_stat.h>
9a11b49a 40#include <linux/debug_locks.h>
cdd6c482 41#include <linux/perf_event.h>
1da177e4
LT
42#include <linux/security.h>
43#include <linux/notifier.h>
44#include <linux/profile.h>
7dfb7103 45#include <linux/freezer.h>
198e2f18 46#include <linux/vmalloc.h>
1da177e4
LT
47#include <linux/blkdev.h>
48#include <linux/delay.h>
b488893a 49#include <linux/pid_namespace.h>
1da177e4
LT
50#include <linux/smp.h>
51#include <linux/threads.h>
52#include <linux/timer.h>
53#include <linux/rcupdate.h>
54#include <linux/cpu.h>
55#include <linux/cpuset.h>
56#include <linux/percpu.h>
b5aadf7f 57#include <linux/proc_fs.h>
1da177e4 58#include <linux/seq_file.h>
e692ab53 59#include <linux/sysctl.h>
1da177e4
LT
60#include <linux/syscalls.h>
61#include <linux/times.h>
8f0ab514 62#include <linux/tsacct_kern.h>
c6fd91f0 63#include <linux/kprobes.h>
0ff92245 64#include <linux/delayacct.h>
dff06c15 65#include <linux/unistd.h>
f5ff8422 66#include <linux/pagemap.h>
8f4d37ec 67#include <linux/hrtimer.h>
30914a58 68#include <linux/tick.h>
f00b45c1
PZ
69#include <linux/debugfs.h>
70#include <linux/ctype.h>
6cd8a4bb 71#include <linux/ftrace.h>
5a0e3ad6 72#include <linux/slab.h>
f1c6f1a7 73#include <linux/init_task.h>
40401530 74#include <linux/binfmts.h>
91d1aa43 75#include <linux/context_tracking.h>
1da177e4 76
96f951ed 77#include <asm/switch_to.h>
5517d86b 78#include <asm/tlb.h>
838225b4 79#include <asm/irq_regs.h>
db7e527d 80#include <asm/mutex.h>
e6e6685a
GC
81#ifdef CONFIG_PARAVIRT
82#include <asm/paravirt.h>
83#endif
1da177e4 84
029632fb 85#include "sched.h"
ea138446 86#include "../workqueue_internal.h"
29d5e047 87#include "../smpboot.h"
6e0534f2 88
a8d154b0 89#define CREATE_TRACE_POINTS
ad8d75ff 90#include <trace/events/sched.h>
a8d154b0 91
029632fb 92void start_bandwidth_timer(struct hrtimer *period_timer, ktime_t period)
d0b27fa7 93{
58088ad0
PT
94 unsigned long delta;
95 ktime_t soft, hard, now;
d0b27fa7 96
58088ad0
PT
97 for (;;) {
98 if (hrtimer_active(period_timer))
99 break;
100
101 now = hrtimer_cb_get_time(period_timer);
102 hrtimer_forward(period_timer, now, period);
d0b27fa7 103
58088ad0
PT
104 soft = hrtimer_get_softexpires(period_timer);
105 hard = hrtimer_get_expires(period_timer);
106 delta = ktime_to_ns(ktime_sub(hard, soft));
107 __hrtimer_start_range_ns(period_timer, soft, delta,
108 HRTIMER_MODE_ABS_PINNED, 0);
109 }
110}
111
029632fb
PZ
112DEFINE_MUTEX(sched_domains_mutex);
113DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
dc61b1d6 114
fe44d621 115static void update_rq_clock_task(struct rq *rq, s64 delta);
305e6835 116
029632fb 117void update_rq_clock(struct rq *rq)
3e51f33f 118{
fe44d621 119 s64 delta;
305e6835 120
61eadef6 121 if (rq->skip_clock_update > 0)
f26f9aff 122 return;
aa483808 123
fe44d621
PZ
124 delta = sched_clock_cpu(cpu_of(rq)) - rq->clock;
125 rq->clock += delta;
126 update_rq_clock_task(rq, delta);
3e51f33f
PZ
127}
128
bf5c91ba
IM
129/*
130 * Debugging: various feature bits
131 */
f00b45c1 132
f00b45c1
PZ
133#define SCHED_FEAT(name, enabled) \
134 (1UL << __SCHED_FEAT_##name) * enabled |
135
bf5c91ba 136const_debug unsigned int sysctl_sched_features =
391e43da 137#include "features.h"
f00b45c1
PZ
138 0;
139
140#undef SCHED_FEAT
141
142#ifdef CONFIG_SCHED_DEBUG
143#define SCHED_FEAT(name, enabled) \
144 #name ,
145
1292531f 146static const char * const sched_feat_names[] = {
391e43da 147#include "features.h"
f00b45c1
PZ
148};
149
150#undef SCHED_FEAT
151
34f3a814 152static int sched_feat_show(struct seq_file *m, void *v)
f00b45c1 153{
f00b45c1
PZ
154 int i;
155
f8b6d1cc 156 for (i = 0; i < __SCHED_FEAT_NR; i++) {
34f3a814
LZ
157 if (!(sysctl_sched_features & (1UL << i)))
158 seq_puts(m, "NO_");
159 seq_printf(m, "%s ", sched_feat_names[i]);
f00b45c1 160 }
34f3a814 161 seq_puts(m, "\n");
f00b45c1 162
34f3a814 163 return 0;
f00b45c1
PZ
164}
165
f8b6d1cc
PZ
166#ifdef HAVE_JUMP_LABEL
167
c5905afb
IM
168#define jump_label_key__true STATIC_KEY_INIT_TRUE
169#define jump_label_key__false STATIC_KEY_INIT_FALSE
f8b6d1cc
PZ
170
171#define SCHED_FEAT(name, enabled) \
172 jump_label_key__##enabled ,
173
c5905afb 174struct static_key sched_feat_keys[__SCHED_FEAT_NR] = {
f8b6d1cc
PZ
175#include "features.h"
176};
177
178#undef SCHED_FEAT
179
180static void sched_feat_disable(int i)
181{
c5905afb
IM
182 if (static_key_enabled(&sched_feat_keys[i]))
183 static_key_slow_dec(&sched_feat_keys[i]);
f8b6d1cc
PZ
184}
185
186static void sched_feat_enable(int i)
187{
c5905afb
IM
188 if (!static_key_enabled(&sched_feat_keys[i]))
189 static_key_slow_inc(&sched_feat_keys[i]);
f8b6d1cc
PZ
190}
191#else
192static void sched_feat_disable(int i) { };
193static void sched_feat_enable(int i) { };
194#endif /* HAVE_JUMP_LABEL */
195
1a687c2e 196static int sched_feat_set(char *cmp)
f00b45c1 197{
f00b45c1 198 int i;
1a687c2e 199 int neg = 0;
f00b45c1 200
524429c3 201 if (strncmp(cmp, "NO_", 3) == 0) {
f00b45c1
PZ
202 neg = 1;
203 cmp += 3;
204 }
205
f8b6d1cc 206 for (i = 0; i < __SCHED_FEAT_NR; i++) {
7740191c 207 if (strcmp(cmp, sched_feat_names[i]) == 0) {
f8b6d1cc 208 if (neg) {
f00b45c1 209 sysctl_sched_features &= ~(1UL << i);
f8b6d1cc
PZ
210 sched_feat_disable(i);
211 } else {
f00b45c1 212 sysctl_sched_features |= (1UL << i);
f8b6d1cc
PZ
213 sched_feat_enable(i);
214 }
f00b45c1
PZ
215 break;
216 }
217 }
218
1a687c2e
MG
219 return i;
220}
221
222static ssize_t
223sched_feat_write(struct file *filp, const char __user *ubuf,
224 size_t cnt, loff_t *ppos)
225{
226 char buf[64];
227 char *cmp;
228 int i;
229
230 if (cnt > 63)
231 cnt = 63;
232
233 if (copy_from_user(&buf, ubuf, cnt))
234 return -EFAULT;
235
236 buf[cnt] = 0;
237 cmp = strstrip(buf);
238
239 i = sched_feat_set(cmp);
f8b6d1cc 240 if (i == __SCHED_FEAT_NR)
f00b45c1
PZ
241 return -EINVAL;
242
42994724 243 *ppos += cnt;
f00b45c1
PZ
244
245 return cnt;
246}
247
34f3a814
LZ
248static int sched_feat_open(struct inode *inode, struct file *filp)
249{
250 return single_open(filp, sched_feat_show, NULL);
251}
252
828c0950 253static const struct file_operations sched_feat_fops = {
34f3a814
LZ
254 .open = sched_feat_open,
255 .write = sched_feat_write,
256 .read = seq_read,
257 .llseek = seq_lseek,
258 .release = single_release,
f00b45c1
PZ
259};
260
261static __init int sched_init_debug(void)
262{
f00b45c1
PZ
263 debugfs_create_file("sched_features", 0644, NULL, NULL,
264 &sched_feat_fops);
265
266 return 0;
267}
268late_initcall(sched_init_debug);
f8b6d1cc 269#endif /* CONFIG_SCHED_DEBUG */
bf5c91ba 270
b82d9fdd
PZ
271/*
272 * Number of tasks to iterate in a single balance run.
273 * Limited because this is done with IRQs disabled.
274 */
275const_debug unsigned int sysctl_sched_nr_migrate = 32;
276
e9e9250b
PZ
277/*
278 * period over which we average the RT time consumption, measured
279 * in ms.
280 *
281 * default: 1s
282 */
283const_debug unsigned int sysctl_sched_time_avg = MSEC_PER_SEC;
284
fa85ae24 285/*
9f0c1e56 286 * period over which we measure -rt task cpu usage in us.
fa85ae24
PZ
287 * default: 1s
288 */
9f0c1e56 289unsigned int sysctl_sched_rt_period = 1000000;
fa85ae24 290
029632fb 291__read_mostly int scheduler_running;
6892b75e 292
9f0c1e56
PZ
293/*
294 * part of the period that we allow rt tasks to run in us.
295 * default: 0.95s
296 */
297int sysctl_sched_rt_runtime = 950000;
fa85ae24 298
fa85ae24 299
1da177e4 300
0970d299 301/*
0122ec5b 302 * __task_rq_lock - lock the rq @p resides on.
b29739f9 303 */
70b97a7f 304static inline struct rq *__task_rq_lock(struct task_struct *p)
b29739f9
IM
305 __acquires(rq->lock)
306{
0970d299
PZ
307 struct rq *rq;
308
0122ec5b
PZ
309 lockdep_assert_held(&p->pi_lock);
310
3a5c359a 311 for (;;) {
0970d299 312 rq = task_rq(p);
05fa785c 313 raw_spin_lock(&rq->lock);
65cc8e48 314 if (likely(rq == task_rq(p)))
3a5c359a 315 return rq;
05fa785c 316 raw_spin_unlock(&rq->lock);
b29739f9 317 }
b29739f9
IM
318}
319
1da177e4 320/*
0122ec5b 321 * task_rq_lock - lock p->pi_lock and lock the rq @p resides on.
1da177e4 322 */
70b97a7f 323static struct rq *task_rq_lock(struct task_struct *p, unsigned long *flags)
0122ec5b 324 __acquires(p->pi_lock)
1da177e4
LT
325 __acquires(rq->lock)
326{
70b97a7f 327 struct rq *rq;
1da177e4 328
3a5c359a 329 for (;;) {
0122ec5b 330 raw_spin_lock_irqsave(&p->pi_lock, *flags);
3a5c359a 331 rq = task_rq(p);
05fa785c 332 raw_spin_lock(&rq->lock);
65cc8e48 333 if (likely(rq == task_rq(p)))
3a5c359a 334 return rq;
0122ec5b
PZ
335 raw_spin_unlock(&rq->lock);
336 raw_spin_unlock_irqrestore(&p->pi_lock, *flags);
1da177e4 337 }
1da177e4
LT
338}
339
a9957449 340static void __task_rq_unlock(struct rq *rq)
b29739f9
IM
341 __releases(rq->lock)
342{
05fa785c 343 raw_spin_unlock(&rq->lock);
b29739f9
IM
344}
345
0122ec5b
PZ
346static inline void
347task_rq_unlock(struct rq *rq, struct task_struct *p, unsigned long *flags)
1da177e4 348 __releases(rq->lock)
0122ec5b 349 __releases(p->pi_lock)
1da177e4 350{
0122ec5b
PZ
351 raw_spin_unlock(&rq->lock);
352 raw_spin_unlock_irqrestore(&p->pi_lock, *flags);
1da177e4
LT
353}
354
1da177e4 355/*
cc2a73b5 356 * this_rq_lock - lock this runqueue and disable interrupts.
1da177e4 357 */
a9957449 358static struct rq *this_rq_lock(void)
1da177e4
LT
359 __acquires(rq->lock)
360{
70b97a7f 361 struct rq *rq;
1da177e4
LT
362
363 local_irq_disable();
364 rq = this_rq();
05fa785c 365 raw_spin_lock(&rq->lock);
1da177e4
LT
366
367 return rq;
368}
369
8f4d37ec
PZ
370#ifdef CONFIG_SCHED_HRTICK
371/*
372 * Use HR-timers to deliver accurate preemption points.
373 *
374 * Its all a bit involved since we cannot program an hrt while holding the
375 * rq->lock. So what we do is store a state in in rq->hrtick_* and ask for a
376 * reschedule event.
377 *
378 * When we get rescheduled we reprogram the hrtick_timer outside of the
379 * rq->lock.
380 */
8f4d37ec 381
8f4d37ec
PZ
382static void hrtick_clear(struct rq *rq)
383{
384 if (hrtimer_active(&rq->hrtick_timer))
385 hrtimer_cancel(&rq->hrtick_timer);
386}
387
8f4d37ec
PZ
388/*
389 * High-resolution timer tick.
390 * Runs from hardirq context with interrupts disabled.
391 */
392static enum hrtimer_restart hrtick(struct hrtimer *timer)
393{
394 struct rq *rq = container_of(timer, struct rq, hrtick_timer);
395
396 WARN_ON_ONCE(cpu_of(rq) != smp_processor_id());
397
05fa785c 398 raw_spin_lock(&rq->lock);
3e51f33f 399 update_rq_clock(rq);
8f4d37ec 400 rq->curr->sched_class->task_tick(rq, rq->curr, 1);
05fa785c 401 raw_spin_unlock(&rq->lock);
8f4d37ec
PZ
402
403 return HRTIMER_NORESTART;
404}
405
95e904c7 406#ifdef CONFIG_SMP
31656519
PZ
407/*
408 * called from hardirq (IPI) context
409 */
410static void __hrtick_start(void *arg)
b328ca18 411{
31656519 412 struct rq *rq = arg;
b328ca18 413
05fa785c 414 raw_spin_lock(&rq->lock);
31656519
PZ
415 hrtimer_restart(&rq->hrtick_timer);
416 rq->hrtick_csd_pending = 0;
05fa785c 417 raw_spin_unlock(&rq->lock);
b328ca18
PZ
418}
419
31656519
PZ
420/*
421 * Called to set the hrtick timer state.
422 *
423 * called with rq->lock held and irqs disabled
424 */
029632fb 425void hrtick_start(struct rq *rq, u64 delay)
b328ca18 426{
31656519
PZ
427 struct hrtimer *timer = &rq->hrtick_timer;
428 ktime_t time = ktime_add_ns(timer->base->get_time(), delay);
b328ca18 429
cc584b21 430 hrtimer_set_expires(timer, time);
31656519
PZ
431
432 if (rq == this_rq()) {
433 hrtimer_restart(timer);
434 } else if (!rq->hrtick_csd_pending) {
6e275637 435 __smp_call_function_single(cpu_of(rq), &rq->hrtick_csd, 0);
31656519
PZ
436 rq->hrtick_csd_pending = 1;
437 }
b328ca18
PZ
438}
439
440static int
441hotplug_hrtick(struct notifier_block *nfb, unsigned long action, void *hcpu)
442{
443 int cpu = (int)(long)hcpu;
444
445 switch (action) {
446 case CPU_UP_CANCELED:
447 case CPU_UP_CANCELED_FROZEN:
448 case CPU_DOWN_PREPARE:
449 case CPU_DOWN_PREPARE_FROZEN:
450 case CPU_DEAD:
451 case CPU_DEAD_FROZEN:
31656519 452 hrtick_clear(cpu_rq(cpu));
b328ca18
PZ
453 return NOTIFY_OK;
454 }
455
456 return NOTIFY_DONE;
457}
458
fa748203 459static __init void init_hrtick(void)
b328ca18
PZ
460{
461 hotcpu_notifier(hotplug_hrtick, 0);
462}
31656519
PZ
463#else
464/*
465 * Called to set the hrtick timer state.
466 *
467 * called with rq->lock held and irqs disabled
468 */
029632fb 469void hrtick_start(struct rq *rq, u64 delay)
31656519 470{
7f1e2ca9 471 __hrtimer_start_range_ns(&rq->hrtick_timer, ns_to_ktime(delay), 0,
5c333864 472 HRTIMER_MODE_REL_PINNED, 0);
31656519 473}
b328ca18 474
006c75f1 475static inline void init_hrtick(void)
8f4d37ec 476{
8f4d37ec 477}
31656519 478#endif /* CONFIG_SMP */
8f4d37ec 479
31656519 480static void init_rq_hrtick(struct rq *rq)
8f4d37ec 481{
31656519
PZ
482#ifdef CONFIG_SMP
483 rq->hrtick_csd_pending = 0;
8f4d37ec 484
31656519
PZ
485 rq->hrtick_csd.flags = 0;
486 rq->hrtick_csd.func = __hrtick_start;
487 rq->hrtick_csd.info = rq;
488#endif
8f4d37ec 489
31656519
PZ
490 hrtimer_init(&rq->hrtick_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
491 rq->hrtick_timer.function = hrtick;
8f4d37ec 492}
006c75f1 493#else /* CONFIG_SCHED_HRTICK */
8f4d37ec
PZ
494static inline void hrtick_clear(struct rq *rq)
495{
496}
497
8f4d37ec
PZ
498static inline void init_rq_hrtick(struct rq *rq)
499{
500}
501
b328ca18
PZ
502static inline void init_hrtick(void)
503{
504}
006c75f1 505#endif /* CONFIG_SCHED_HRTICK */
8f4d37ec 506
c24d20db
IM
507/*
508 * resched_task - mark a task 'to be rescheduled now'.
509 *
510 * On UP this means the setting of the need_resched flag, on SMP it
511 * might also involve a cross-CPU call to trigger the scheduler on
512 * the target CPU.
513 */
514#ifdef CONFIG_SMP
029632fb 515void resched_task(struct task_struct *p)
c24d20db
IM
516{
517 int cpu;
518
05fa785c 519 assert_raw_spin_locked(&task_rq(p)->lock);
c24d20db 520
5ed0cec0 521 if (test_tsk_need_resched(p))
c24d20db
IM
522 return;
523
5ed0cec0 524 set_tsk_need_resched(p);
c24d20db
IM
525
526 cpu = task_cpu(p);
527 if (cpu == smp_processor_id())
528 return;
529
530 /* NEED_RESCHED must be visible before we test polling */
531 smp_mb();
532 if (!tsk_is_polling(p))
533 smp_send_reschedule(cpu);
534}
535
029632fb 536void resched_cpu(int cpu)
c24d20db
IM
537{
538 struct rq *rq = cpu_rq(cpu);
539 unsigned long flags;
540
05fa785c 541 if (!raw_spin_trylock_irqsave(&rq->lock, flags))
c24d20db
IM
542 return;
543 resched_task(cpu_curr(cpu));
05fa785c 544 raw_spin_unlock_irqrestore(&rq->lock, flags);
c24d20db 545}
06d8308c 546
3451d024 547#ifdef CONFIG_NO_HZ_COMMON
83cd4fe2
VP
548/*
549 * In the semi idle case, use the nearest busy cpu for migrating timers
550 * from an idle cpu. This is good for power-savings.
551 *
552 * We don't do similar optimization for completely idle system, as
553 * selecting an idle cpu will add more delays to the timers than intended
554 * (as that cpu's timer base may not be uptodate wrt jiffies etc).
555 */
556int get_nohz_timer_target(void)
557{
558 int cpu = smp_processor_id();
559 int i;
560 struct sched_domain *sd;
561
057f3fad 562 rcu_read_lock();
83cd4fe2 563 for_each_domain(cpu, sd) {
057f3fad
PZ
564 for_each_cpu(i, sched_domain_span(sd)) {
565 if (!idle_cpu(i)) {
566 cpu = i;
567 goto unlock;
568 }
569 }
83cd4fe2 570 }
057f3fad
PZ
571unlock:
572 rcu_read_unlock();
83cd4fe2
VP
573 return cpu;
574}
06d8308c
TG
575/*
576 * When add_timer_on() enqueues a timer into the timer wheel of an
577 * idle CPU then this timer might expire before the next timer event
578 * which is scheduled to wake up that CPU. In case of a completely
579 * idle system the next event might even be infinite time into the
580 * future. wake_up_idle_cpu() ensures that the CPU is woken up and
581 * leaves the inner idle loop so the newly added timer is taken into
582 * account when the CPU goes back to idle and evaluates the timer
583 * wheel for the next timer event.
584 */
1c20091e 585static void wake_up_idle_cpu(int cpu)
06d8308c
TG
586{
587 struct rq *rq = cpu_rq(cpu);
588
589 if (cpu == smp_processor_id())
590 return;
591
592 /*
593 * This is safe, as this function is called with the timer
594 * wheel base lock of (cpu) held. When the CPU is on the way
595 * to idle and has not yet set rq->curr to idle then it will
596 * be serialized on the timer wheel base lock and take the new
597 * timer into account automatically.
598 */
599 if (rq->curr != rq->idle)
600 return;
45bf76df 601
45bf76df 602 /*
06d8308c
TG
603 * We can set TIF_RESCHED on the idle task of the other CPU
604 * lockless. The worst case is that the other CPU runs the
605 * idle task through an additional NOOP schedule()
45bf76df 606 */
5ed0cec0 607 set_tsk_need_resched(rq->idle);
45bf76df 608
06d8308c
TG
609 /* NEED_RESCHED must be visible before we test polling */
610 smp_mb();
611 if (!tsk_is_polling(rq->idle))
612 smp_send_reschedule(cpu);
45bf76df
IM
613}
614
c5bfece2 615static bool wake_up_full_nohz_cpu(int cpu)
1c20091e 616{
c5bfece2 617 if (tick_nohz_full_cpu(cpu)) {
1c20091e
FW
618 if (cpu != smp_processor_id() ||
619 tick_nohz_tick_stopped())
620 smp_send_reschedule(cpu);
621 return true;
622 }
623
624 return false;
625}
626
627void wake_up_nohz_cpu(int cpu)
628{
c5bfece2 629 if (!wake_up_full_nohz_cpu(cpu))
1c20091e
FW
630 wake_up_idle_cpu(cpu);
631}
632
ca38062e 633static inline bool got_nohz_idle_kick(void)
45bf76df 634{
1c792db7
SS
635 int cpu = smp_processor_id();
636 return idle_cpu(cpu) && test_bit(NOHZ_BALANCE_KICK, nohz_flags(cpu));
45bf76df
IM
637}
638
3451d024 639#else /* CONFIG_NO_HZ_COMMON */
45bf76df 640
ca38062e 641static inline bool got_nohz_idle_kick(void)
2069dd75 642{
ca38062e 643 return false;
2069dd75
PZ
644}
645
3451d024 646#endif /* CONFIG_NO_HZ_COMMON */
d842de87 647
ce831b38
FW
648#ifdef CONFIG_NO_HZ_FULL
649bool sched_can_stop_tick(void)
650{
651 struct rq *rq;
652
653 rq = this_rq();
654
655 /* Make sure rq->nr_running update is visible after the IPI */
656 smp_rmb();
657
658 /* More than one running task need preemption */
659 if (rq->nr_running > 1)
660 return false;
661
662 return true;
663}
664#endif /* CONFIG_NO_HZ_FULL */
d842de87 665
029632fb 666void sched_avg_update(struct rq *rq)
18d95a28 667{
e9e9250b
PZ
668 s64 period = sched_avg_period();
669
670 while ((s64)(rq->clock - rq->age_stamp) > period) {
0d98bb26
WD
671 /*
672 * Inline assembly required to prevent the compiler
673 * optimising this loop into a divmod call.
674 * See __iter_div_u64_rem() for another example of this.
675 */
676 asm("" : "+rm" (rq->age_stamp));
e9e9250b
PZ
677 rq->age_stamp += period;
678 rq->rt_avg /= 2;
679 }
18d95a28
PZ
680}
681
6d6bc0ad 682#else /* !CONFIG_SMP */
029632fb 683void resched_task(struct task_struct *p)
18d95a28 684{
05fa785c 685 assert_raw_spin_locked(&task_rq(p)->lock);
31656519 686 set_tsk_need_resched(p);
18d95a28 687}
6d6bc0ad 688#endif /* CONFIG_SMP */
18d95a28 689
a790de99
PT
690#if defined(CONFIG_RT_GROUP_SCHED) || (defined(CONFIG_FAIR_GROUP_SCHED) && \
691 (defined(CONFIG_SMP) || defined(CONFIG_CFS_BANDWIDTH)))
c09595f6 692/*
8277434e
PT
693 * Iterate task_group tree rooted at *from, calling @down when first entering a
694 * node and @up when leaving it for the final time.
695 *
696 * Caller must hold rcu_lock or sufficient equivalent.
c09595f6 697 */
029632fb 698int walk_tg_tree_from(struct task_group *from,
8277434e 699 tg_visitor down, tg_visitor up, void *data)
c09595f6
PZ
700{
701 struct task_group *parent, *child;
eb755805 702 int ret;
c09595f6 703
8277434e
PT
704 parent = from;
705
c09595f6 706down:
eb755805
PZ
707 ret = (*down)(parent, data);
708 if (ret)
8277434e 709 goto out;
c09595f6
PZ
710 list_for_each_entry_rcu(child, &parent->children, siblings) {
711 parent = child;
712 goto down;
713
714up:
715 continue;
716 }
eb755805 717 ret = (*up)(parent, data);
8277434e
PT
718 if (ret || parent == from)
719 goto out;
c09595f6
PZ
720
721 child = parent;
722 parent = parent->parent;
723 if (parent)
724 goto up;
8277434e 725out:
eb755805 726 return ret;
c09595f6
PZ
727}
728
029632fb 729int tg_nop(struct task_group *tg, void *data)
eb755805 730{
e2b245f8 731 return 0;
eb755805 732}
18d95a28
PZ
733#endif
734
45bf76df
IM
735static void set_load_weight(struct task_struct *p)
736{
f05998d4
NR
737 int prio = p->static_prio - MAX_RT_PRIO;
738 struct load_weight *load = &p->se.load;
739
dd41f596
IM
740 /*
741 * SCHED_IDLE tasks get minimal weight:
742 */
743 if (p->policy == SCHED_IDLE) {
c8b28116 744 load->weight = scale_load(WEIGHT_IDLEPRIO);
f05998d4 745 load->inv_weight = WMULT_IDLEPRIO;
dd41f596
IM
746 return;
747 }
71f8bd46 748
c8b28116 749 load->weight = scale_load(prio_to_weight[prio]);
f05998d4 750 load->inv_weight = prio_to_wmult[prio];
71f8bd46
IM
751}
752
371fd7e7 753static void enqueue_task(struct rq *rq, struct task_struct *p, int flags)
2087a1ad 754{
a64692a3 755 update_rq_clock(rq);
dd41f596 756 sched_info_queued(p);
371fd7e7 757 p->sched_class->enqueue_task(rq, p, flags);
71f8bd46
IM
758}
759
371fd7e7 760static void dequeue_task(struct rq *rq, struct task_struct *p, int flags)
71f8bd46 761{
a64692a3 762 update_rq_clock(rq);
46ac22ba 763 sched_info_dequeued(p);
371fd7e7 764 p->sched_class->dequeue_task(rq, p, flags);
71f8bd46
IM
765}
766
029632fb 767void activate_task(struct rq *rq, struct task_struct *p, int flags)
1e3c88bd
PZ
768{
769 if (task_contributes_to_load(p))
770 rq->nr_uninterruptible--;
771
371fd7e7 772 enqueue_task(rq, p, flags);
1e3c88bd
PZ
773}
774
029632fb 775void deactivate_task(struct rq *rq, struct task_struct *p, int flags)
1e3c88bd
PZ
776{
777 if (task_contributes_to_load(p))
778 rq->nr_uninterruptible++;
779
371fd7e7 780 dequeue_task(rq, p, flags);
1e3c88bd
PZ
781}
782
fe44d621 783static void update_rq_clock_task(struct rq *rq, s64 delta)
aa483808 784{
095c0aa8
GC
785/*
786 * In theory, the compile should just see 0 here, and optimize out the call
787 * to sched_rt_avg_update. But I don't trust it...
788 */
789#if defined(CONFIG_IRQ_TIME_ACCOUNTING) || defined(CONFIG_PARAVIRT_TIME_ACCOUNTING)
790 s64 steal = 0, irq_delta = 0;
791#endif
792#ifdef CONFIG_IRQ_TIME_ACCOUNTING
8e92c201 793 irq_delta = irq_time_read(cpu_of(rq)) - rq->prev_irq_time;
fe44d621
PZ
794
795 /*
796 * Since irq_time is only updated on {soft,}irq_exit, we might run into
797 * this case when a previous update_rq_clock() happened inside a
798 * {soft,}irq region.
799 *
800 * When this happens, we stop ->clock_task and only update the
801 * prev_irq_time stamp to account for the part that fit, so that a next
802 * update will consume the rest. This ensures ->clock_task is
803 * monotonic.
804 *
805 * It does however cause some slight miss-attribution of {soft,}irq
806 * time, a more accurate solution would be to update the irq_time using
807 * the current rq->clock timestamp, except that would require using
808 * atomic ops.
809 */
810 if (irq_delta > delta)
811 irq_delta = delta;
812
813 rq->prev_irq_time += irq_delta;
814 delta -= irq_delta;
095c0aa8
GC
815#endif
816#ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING
c5905afb 817 if (static_key_false((&paravirt_steal_rq_enabled))) {
095c0aa8
GC
818 u64 st;
819
820 steal = paravirt_steal_clock(cpu_of(rq));
821 steal -= rq->prev_steal_time_rq;
822
823 if (unlikely(steal > delta))
824 steal = delta;
825
826 st = steal_ticks(steal);
827 steal = st * TICK_NSEC;
828
829 rq->prev_steal_time_rq += steal;
830
831 delta -= steal;
832 }
833#endif
834
fe44d621
PZ
835 rq->clock_task += delta;
836
095c0aa8
GC
837#if defined(CONFIG_IRQ_TIME_ACCOUNTING) || defined(CONFIG_PARAVIRT_TIME_ACCOUNTING)
838 if ((irq_delta + steal) && sched_feat(NONTASK_POWER))
839 sched_rt_avg_update(rq, irq_delta + steal);
840#endif
aa483808
VP
841}
842
34f971f6
PZ
843void sched_set_stop_task(int cpu, struct task_struct *stop)
844{
845 struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 };
846 struct task_struct *old_stop = cpu_rq(cpu)->stop;
847
848 if (stop) {
849 /*
850 * Make it appear like a SCHED_FIFO task, its something
851 * userspace knows about and won't get confused about.
852 *
853 * Also, it will make PI more or less work without too
854 * much confusion -- but then, stop work should not
855 * rely on PI working anyway.
856 */
857 sched_setscheduler_nocheck(stop, SCHED_FIFO, &param);
858
859 stop->sched_class = &stop_sched_class;
860 }
861
862 cpu_rq(cpu)->stop = stop;
863
864 if (old_stop) {
865 /*
866 * Reset it back to a normal scheduling class so that
867 * it can die in pieces.
868 */
869 old_stop->sched_class = &rt_sched_class;
870 }
871}
872
14531189 873/*
dd41f596 874 * __normal_prio - return the priority that is based on the static prio
14531189 875 */
14531189
IM
876static inline int __normal_prio(struct task_struct *p)
877{
dd41f596 878 return p->static_prio;
14531189
IM
879}
880
b29739f9
IM
881/*
882 * Calculate the expected normal priority: i.e. priority
883 * without taking RT-inheritance into account. Might be
884 * boosted by interactivity modifiers. Changes upon fork,
885 * setprio syscalls, and whenever the interactivity
886 * estimator recalculates.
887 */
36c8b586 888static inline int normal_prio(struct task_struct *p)
b29739f9
IM
889{
890 int prio;
891
e05606d3 892 if (task_has_rt_policy(p))
b29739f9
IM
893 prio = MAX_RT_PRIO-1 - p->rt_priority;
894 else
895 prio = __normal_prio(p);
896 return prio;
897}
898
899/*
900 * Calculate the current priority, i.e. the priority
901 * taken into account by the scheduler. This value might
902 * be boosted by RT tasks, or might be boosted by
903 * interactivity modifiers. Will be RT if the task got
904 * RT-boosted. If not then it returns p->normal_prio.
905 */
36c8b586 906static int effective_prio(struct task_struct *p)
b29739f9
IM
907{
908 p->normal_prio = normal_prio(p);
909 /*
910 * If we are RT tasks or we were boosted to RT priority,
911 * keep the priority unchanged. Otherwise, update priority
912 * to the normal priority:
913 */
914 if (!rt_prio(p->prio))
915 return p->normal_prio;
916 return p->prio;
917}
918
1da177e4
LT
919/**
920 * task_curr - is this task currently executing on a CPU?
921 * @p: the task in question.
922 */
36c8b586 923inline int task_curr(const struct task_struct *p)
1da177e4
LT
924{
925 return cpu_curr(task_cpu(p)) == p;
926}
927
cb469845
SR
928static inline void check_class_changed(struct rq *rq, struct task_struct *p,
929 const struct sched_class *prev_class,
da7a735e 930 int oldprio)
cb469845
SR
931{
932 if (prev_class != p->sched_class) {
933 if (prev_class->switched_from)
da7a735e
PZ
934 prev_class->switched_from(rq, p);
935 p->sched_class->switched_to(rq, p);
936 } else if (oldprio != p->prio)
937 p->sched_class->prio_changed(rq, p, oldprio);
cb469845
SR
938}
939
029632fb 940void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags)
1e5a7405
PZ
941{
942 const struct sched_class *class;
943
944 if (p->sched_class == rq->curr->sched_class) {
945 rq->curr->sched_class->check_preempt_curr(rq, p, flags);
946 } else {
947 for_each_class(class) {
948 if (class == rq->curr->sched_class)
949 break;
950 if (class == p->sched_class) {
951 resched_task(rq->curr);
952 break;
953 }
954 }
955 }
956
957 /*
958 * A queue event has occurred, and we're going to schedule. In
959 * this case, we can save a useless back to back clock update.
960 */
fd2f4419 961 if (rq->curr->on_rq && test_tsk_need_resched(rq->curr))
1e5a7405
PZ
962 rq->skip_clock_update = 1;
963}
964
582b336e
MT
965static ATOMIC_NOTIFIER_HEAD(task_migration_notifier);
966
967void register_task_migration_notifier(struct notifier_block *n)
968{
969 atomic_notifier_chain_register(&task_migration_notifier, n);
970}
971
1da177e4 972#ifdef CONFIG_SMP
dd41f596 973void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
c65cc870 974{
e2912009
PZ
975#ifdef CONFIG_SCHED_DEBUG
976 /*
977 * We should never call set_task_cpu() on a blocked task,
978 * ttwu() will sort out the placement.
979 */
077614ee
PZ
980 WARN_ON_ONCE(p->state != TASK_RUNNING && p->state != TASK_WAKING &&
981 !(task_thread_info(p)->preempt_count & PREEMPT_ACTIVE));
0122ec5b
PZ
982
983#ifdef CONFIG_LOCKDEP
6c6c54e1
PZ
984 /*
985 * The caller should hold either p->pi_lock or rq->lock, when changing
986 * a task's CPU. ->pi_lock for waking tasks, rq->lock for runnable tasks.
987 *
988 * sched_move_task() holds both and thus holding either pins the cgroup,
8323f26c 989 * see task_group().
6c6c54e1
PZ
990 *
991 * Furthermore, all task_rq users should acquire both locks, see
992 * task_rq_lock().
993 */
0122ec5b
PZ
994 WARN_ON_ONCE(debug_locks && !(lockdep_is_held(&p->pi_lock) ||
995 lockdep_is_held(&task_rq(p)->lock)));
996#endif
e2912009
PZ
997#endif
998
de1d7286 999 trace_sched_migrate_task(p, new_cpu);
cbc34ed1 1000
0c69774e 1001 if (task_cpu(p) != new_cpu) {
582b336e
MT
1002 struct task_migration_notifier tmn;
1003
0a74bef8
PT
1004 if (p->sched_class->migrate_task_rq)
1005 p->sched_class->migrate_task_rq(p, new_cpu);
0c69774e 1006 p->se.nr_migrations++;
a8b0ca17 1007 perf_sw_event(PERF_COUNT_SW_CPU_MIGRATIONS, 1, NULL, 0);
582b336e
MT
1008
1009 tmn.task = p;
1010 tmn.from_cpu = task_cpu(p);
1011 tmn.to_cpu = new_cpu;
1012
1013 atomic_notifier_call_chain(&task_migration_notifier, 0, &tmn);
0c69774e 1014 }
dd41f596
IM
1015
1016 __set_task_cpu(p, new_cpu);
c65cc870
IM
1017}
1018
969c7921 1019struct migration_arg {
36c8b586 1020 struct task_struct *task;
1da177e4 1021 int dest_cpu;
70b97a7f 1022};
1da177e4 1023
969c7921
TH
1024static int migration_cpu_stop(void *data);
1025
1da177e4
LT
1026/*
1027 * wait_task_inactive - wait for a thread to unschedule.
1028 *
85ba2d86
RM
1029 * If @match_state is nonzero, it's the @p->state value just checked and
1030 * not expected to change. If it changes, i.e. @p might have woken up,
1031 * then return zero. When we succeed in waiting for @p to be off its CPU,
1032 * we return a positive number (its total switch count). If a second call
1033 * a short while later returns the same number, the caller can be sure that
1034 * @p has remained unscheduled the whole time.
1035 *
1da177e4
LT
1036 * The caller must ensure that the task *will* unschedule sometime soon,
1037 * else this function might spin for a *long* time. This function can't
1038 * be called with interrupts off, or it may introduce deadlock with
1039 * smp_call_function() if an IPI is sent by the same process we are
1040 * waiting to become inactive.
1041 */
85ba2d86 1042unsigned long wait_task_inactive(struct task_struct *p, long match_state)
1da177e4
LT
1043{
1044 unsigned long flags;
dd41f596 1045 int running, on_rq;
85ba2d86 1046 unsigned long ncsw;
70b97a7f 1047 struct rq *rq;
1da177e4 1048
3a5c359a
AK
1049 for (;;) {
1050 /*
1051 * We do the initial early heuristics without holding
1052 * any task-queue locks at all. We'll only try to get
1053 * the runqueue lock when things look like they will
1054 * work out!
1055 */
1056 rq = task_rq(p);
fa490cfd 1057
3a5c359a
AK
1058 /*
1059 * If the task is actively running on another CPU
1060 * still, just relax and busy-wait without holding
1061 * any locks.
1062 *
1063 * NOTE! Since we don't hold any locks, it's not
1064 * even sure that "rq" stays as the right runqueue!
1065 * But we don't care, since "task_running()" will
1066 * return false if the runqueue has changed and p
1067 * is actually now running somewhere else!
1068 */
85ba2d86
RM
1069 while (task_running(rq, p)) {
1070 if (match_state && unlikely(p->state != match_state))
1071 return 0;
3a5c359a 1072 cpu_relax();
85ba2d86 1073 }
fa490cfd 1074
3a5c359a
AK
1075 /*
1076 * Ok, time to look more closely! We need the rq
1077 * lock now, to be *sure*. If we're wrong, we'll
1078 * just go back and repeat.
1079 */
1080 rq = task_rq_lock(p, &flags);
27a9da65 1081 trace_sched_wait_task(p);
3a5c359a 1082 running = task_running(rq, p);
fd2f4419 1083 on_rq = p->on_rq;
85ba2d86 1084 ncsw = 0;
f31e11d8 1085 if (!match_state || p->state == match_state)
93dcf55f 1086 ncsw = p->nvcsw | LONG_MIN; /* sets MSB */
0122ec5b 1087 task_rq_unlock(rq, p, &flags);
fa490cfd 1088
85ba2d86
RM
1089 /*
1090 * If it changed from the expected state, bail out now.
1091 */
1092 if (unlikely(!ncsw))
1093 break;
1094
3a5c359a
AK
1095 /*
1096 * Was it really running after all now that we
1097 * checked with the proper locks actually held?
1098 *
1099 * Oops. Go back and try again..
1100 */
1101 if (unlikely(running)) {
1102 cpu_relax();
1103 continue;
1104 }
fa490cfd 1105
3a5c359a
AK
1106 /*
1107 * It's not enough that it's not actively running,
1108 * it must be off the runqueue _entirely_, and not
1109 * preempted!
1110 *
80dd99b3 1111 * So if it was still runnable (but just not actively
3a5c359a
AK
1112 * running right now), it's preempted, and we should
1113 * yield - it could be a while.
1114 */
1115 if (unlikely(on_rq)) {
8eb90c30
TG
1116 ktime_t to = ktime_set(0, NSEC_PER_SEC/HZ);
1117
1118 set_current_state(TASK_UNINTERRUPTIBLE);
1119 schedule_hrtimeout(&to, HRTIMER_MODE_REL);
3a5c359a
AK
1120 continue;
1121 }
fa490cfd 1122
3a5c359a
AK
1123 /*
1124 * Ahh, all good. It wasn't running, and it wasn't
1125 * runnable, which means that it will never become
1126 * running in the future either. We're all done!
1127 */
1128 break;
1129 }
85ba2d86
RM
1130
1131 return ncsw;
1da177e4
LT
1132}
1133
1134/***
1135 * kick_process - kick a running thread to enter/exit the kernel
1136 * @p: the to-be-kicked thread
1137 *
1138 * Cause a process which is running on another CPU to enter
1139 * kernel-mode, without any delay. (to get signals handled.)
1140 *
25985edc 1141 * NOTE: this function doesn't have to take the runqueue lock,
1da177e4
LT
1142 * because all it wants to ensure is that the remote task enters
1143 * the kernel. If the IPI races and the task has been migrated
1144 * to another CPU then no harm is done and the purpose has been
1145 * achieved as well.
1146 */
36c8b586 1147void kick_process(struct task_struct *p)
1da177e4
LT
1148{
1149 int cpu;
1150
1151 preempt_disable();
1152 cpu = task_cpu(p);
1153 if ((cpu != smp_processor_id()) && task_curr(p))
1154 smp_send_reschedule(cpu);
1155 preempt_enable();
1156}
b43e3521 1157EXPORT_SYMBOL_GPL(kick_process);
476d139c 1158#endif /* CONFIG_SMP */
1da177e4 1159
970b13ba 1160#ifdef CONFIG_SMP
30da688e 1161/*
013fdb80 1162 * ->cpus_allowed is protected by both rq->lock and p->pi_lock
30da688e 1163 */
5da9a0fb
PZ
1164static int select_fallback_rq(int cpu, struct task_struct *p)
1165{
aa00d89c
TC
1166 int nid = cpu_to_node(cpu);
1167 const struct cpumask *nodemask = NULL;
2baab4e9
PZ
1168 enum { cpuset, possible, fail } state = cpuset;
1169 int dest_cpu;
5da9a0fb 1170
aa00d89c
TC
1171 /*
1172 * If the node that the cpu is on has been offlined, cpu_to_node()
1173 * will return -1. There is no cpu on the node, and we should
1174 * select the cpu on the other node.
1175 */
1176 if (nid != -1) {
1177 nodemask = cpumask_of_node(nid);
1178
1179 /* Look for allowed, online CPU in same node. */
1180 for_each_cpu(dest_cpu, nodemask) {
1181 if (!cpu_online(dest_cpu))
1182 continue;
1183 if (!cpu_active(dest_cpu))
1184 continue;
1185 if (cpumask_test_cpu(dest_cpu, tsk_cpus_allowed(p)))
1186 return dest_cpu;
1187 }
2baab4e9 1188 }
5da9a0fb 1189
2baab4e9
PZ
1190 for (;;) {
1191 /* Any allowed, online CPU? */
e3831edd 1192 for_each_cpu(dest_cpu, tsk_cpus_allowed(p)) {
2baab4e9
PZ
1193 if (!cpu_online(dest_cpu))
1194 continue;
1195 if (!cpu_active(dest_cpu))
1196 continue;
1197 goto out;
1198 }
5da9a0fb 1199
2baab4e9
PZ
1200 switch (state) {
1201 case cpuset:
1202 /* No more Mr. Nice Guy. */
1203 cpuset_cpus_allowed_fallback(p);
1204 state = possible;
1205 break;
1206
1207 case possible:
1208 do_set_cpus_allowed(p, cpu_possible_mask);
1209 state = fail;
1210 break;
1211
1212 case fail:
1213 BUG();
1214 break;
1215 }
1216 }
1217
1218out:
1219 if (state != cpuset) {
1220 /*
1221 * Don't tell them about moving exiting tasks or
1222 * kernel threads (both mm NULL), since they never
1223 * leave kernel.
1224 */
1225 if (p->mm && printk_ratelimit()) {
1226 printk_sched("process %d (%s) no longer affine to cpu%d\n",
1227 task_pid_nr(p), p->comm, cpu);
1228 }
5da9a0fb
PZ
1229 }
1230
1231 return dest_cpu;
1232}
1233
e2912009 1234/*
013fdb80 1235 * The caller (fork, wakeup) owns p->pi_lock, ->cpus_allowed is stable.
e2912009 1236 */
970b13ba 1237static inline
7608dec2 1238int select_task_rq(struct task_struct *p, int sd_flags, int wake_flags)
970b13ba 1239{
7608dec2 1240 int cpu = p->sched_class->select_task_rq(p, sd_flags, wake_flags);
e2912009
PZ
1241
1242 /*
1243 * In order not to call set_task_cpu() on a blocking task we need
1244 * to rely on ttwu() to place the task on a valid ->cpus_allowed
1245 * cpu.
1246 *
1247 * Since this is common to all placement strategies, this lives here.
1248 *
1249 * [ this allows ->select_task() to simply return task_cpu(p) and
1250 * not worry about this generic constraint ]
1251 */
fa17b507 1252 if (unlikely(!cpumask_test_cpu(cpu, tsk_cpus_allowed(p)) ||
70f11205 1253 !cpu_online(cpu)))
5da9a0fb 1254 cpu = select_fallback_rq(task_cpu(p), p);
e2912009
PZ
1255
1256 return cpu;
970b13ba 1257}
09a40af5
MG
1258
1259static void update_avg(u64 *avg, u64 sample)
1260{
1261 s64 diff = sample - *avg;
1262 *avg += diff >> 3;
1263}
970b13ba
PZ
1264#endif
1265
d7c01d27 1266static void
b84cb5df 1267ttwu_stat(struct task_struct *p, int cpu, int wake_flags)
9ed3811a 1268{
d7c01d27 1269#ifdef CONFIG_SCHEDSTATS
b84cb5df
PZ
1270 struct rq *rq = this_rq();
1271
d7c01d27
PZ
1272#ifdef CONFIG_SMP
1273 int this_cpu = smp_processor_id();
1274
1275 if (cpu == this_cpu) {
1276 schedstat_inc(rq, ttwu_local);
1277 schedstat_inc(p, se.statistics.nr_wakeups_local);
1278 } else {
1279 struct sched_domain *sd;
1280
1281 schedstat_inc(p, se.statistics.nr_wakeups_remote);
057f3fad 1282 rcu_read_lock();
d7c01d27
PZ
1283 for_each_domain(this_cpu, sd) {
1284 if (cpumask_test_cpu(cpu, sched_domain_span(sd))) {
1285 schedstat_inc(sd, ttwu_wake_remote);
1286 break;
1287 }
1288 }
057f3fad 1289 rcu_read_unlock();
d7c01d27 1290 }
f339b9dc
PZ
1291
1292 if (wake_flags & WF_MIGRATED)
1293 schedstat_inc(p, se.statistics.nr_wakeups_migrate);
1294
d7c01d27
PZ
1295#endif /* CONFIG_SMP */
1296
1297 schedstat_inc(rq, ttwu_count);
9ed3811a 1298 schedstat_inc(p, se.statistics.nr_wakeups);
d7c01d27
PZ
1299
1300 if (wake_flags & WF_SYNC)
9ed3811a 1301 schedstat_inc(p, se.statistics.nr_wakeups_sync);
d7c01d27 1302
d7c01d27
PZ
1303#endif /* CONFIG_SCHEDSTATS */
1304}
1305
1306static void ttwu_activate(struct rq *rq, struct task_struct *p, int en_flags)
1307{
9ed3811a 1308 activate_task(rq, p, en_flags);
fd2f4419 1309 p->on_rq = 1;
c2f7115e
PZ
1310
1311 /* if a worker is waking up, notify workqueue */
1312 if (p->flags & PF_WQ_WORKER)
1313 wq_worker_waking_up(p, cpu_of(rq));
9ed3811a
TH
1314}
1315
23f41eeb
PZ
1316/*
1317 * Mark the task runnable and perform wakeup-preemption.
1318 */
89363381 1319static void
23f41eeb 1320ttwu_do_wakeup(struct rq *rq, struct task_struct *p, int wake_flags)
9ed3811a 1321{
9ed3811a 1322 check_preempt_curr(rq, p, wake_flags);
a8d7ad52 1323 trace_sched_wakeup(p, true);
9ed3811a
TH
1324
1325 p->state = TASK_RUNNING;
1326#ifdef CONFIG_SMP
1327 if (p->sched_class->task_woken)
1328 p->sched_class->task_woken(rq, p);
1329
e69c6341 1330 if (rq->idle_stamp) {
9ed3811a
TH
1331 u64 delta = rq->clock - rq->idle_stamp;
1332 u64 max = 2*sysctl_sched_migration_cost;
1333
1334 if (delta > max)
1335 rq->avg_idle = max;
1336 else
1337 update_avg(&rq->avg_idle, delta);
1338 rq->idle_stamp = 0;
1339 }
1340#endif
1341}
1342
c05fbafb
PZ
1343static void
1344ttwu_do_activate(struct rq *rq, struct task_struct *p, int wake_flags)
1345{
1346#ifdef CONFIG_SMP
1347 if (p->sched_contributes_to_load)
1348 rq->nr_uninterruptible--;
1349#endif
1350
1351 ttwu_activate(rq, p, ENQUEUE_WAKEUP | ENQUEUE_WAKING);
1352 ttwu_do_wakeup(rq, p, wake_flags);
1353}
1354
1355/*
1356 * Called in case the task @p isn't fully descheduled from its runqueue,
1357 * in this case we must do a remote wakeup. Its a 'light' wakeup though,
1358 * since all we need to do is flip p->state to TASK_RUNNING, since
1359 * the task is still ->on_rq.
1360 */
1361static int ttwu_remote(struct task_struct *p, int wake_flags)
1362{
1363 struct rq *rq;
1364 int ret = 0;
1365
1366 rq = __task_rq_lock(p);
1367 if (p->on_rq) {
1ad4ec0d
FW
1368 /* check_preempt_curr() may use rq clock */
1369 update_rq_clock(rq);
c05fbafb
PZ
1370 ttwu_do_wakeup(rq, p, wake_flags);
1371 ret = 1;
1372 }
1373 __task_rq_unlock(rq);
1374
1375 return ret;
1376}
1377
317f3941 1378#ifdef CONFIG_SMP
fa14ff4a 1379static void sched_ttwu_pending(void)
317f3941
PZ
1380{
1381 struct rq *rq = this_rq();
fa14ff4a
PZ
1382 struct llist_node *llist = llist_del_all(&rq->wake_list);
1383 struct task_struct *p;
317f3941
PZ
1384
1385 raw_spin_lock(&rq->lock);
1386
fa14ff4a
PZ
1387 while (llist) {
1388 p = llist_entry(llist, struct task_struct, wake_entry);
1389 llist = llist_next(llist);
317f3941
PZ
1390 ttwu_do_activate(rq, p, 0);
1391 }
1392
1393 raw_spin_unlock(&rq->lock);
1394}
1395
1396void scheduler_ipi(void)
1397{
ff442c51
FW
1398 if (llist_empty(&this_rq()->wake_list) && !got_nohz_idle_kick()
1399 && !tick_nohz_full_cpu(smp_processor_id()))
c5d753a5
PZ
1400 return;
1401
1402 /*
1403 * Not all reschedule IPI handlers call irq_enter/irq_exit, since
1404 * traditionally all their work was done from the interrupt return
1405 * path. Now that we actually do some work, we need to make sure
1406 * we do call them.
1407 *
1408 * Some archs already do call them, luckily irq_enter/exit nest
1409 * properly.
1410 *
1411 * Arguably we should visit all archs and update all handlers,
1412 * however a fair share of IPIs are still resched only so this would
1413 * somewhat pessimize the simple resched case.
1414 */
1415 irq_enter();
ff442c51 1416 tick_nohz_full_check();
fa14ff4a 1417 sched_ttwu_pending();
ca38062e
SS
1418
1419 /*
1420 * Check if someone kicked us for doing the nohz idle load balance.
1421 */
6eb57e0d
SS
1422 if (unlikely(got_nohz_idle_kick() && !need_resched())) {
1423 this_rq()->idle_balance = 1;
ca38062e 1424 raise_softirq_irqoff(SCHED_SOFTIRQ);
6eb57e0d 1425 }
c5d753a5 1426 irq_exit();
317f3941
PZ
1427}
1428
1429static void ttwu_queue_remote(struct task_struct *p, int cpu)
1430{
fa14ff4a 1431 if (llist_add(&p->wake_entry, &cpu_rq(cpu)->wake_list))
317f3941
PZ
1432 smp_send_reschedule(cpu);
1433}
d6aa8f85 1434
39be3501 1435bool cpus_share_cache(int this_cpu, int that_cpu)
518cd623
PZ
1436{
1437 return per_cpu(sd_llc_id, this_cpu) == per_cpu(sd_llc_id, that_cpu);
1438}
d6aa8f85 1439#endif /* CONFIG_SMP */
317f3941 1440
c05fbafb
PZ
1441static void ttwu_queue(struct task_struct *p, int cpu)
1442{
1443 struct rq *rq = cpu_rq(cpu);
1444
17d9f311 1445#if defined(CONFIG_SMP)
39be3501 1446 if (sched_feat(TTWU_QUEUE) && !cpus_share_cache(smp_processor_id(), cpu)) {
f01114cb 1447 sched_clock_cpu(cpu); /* sync clocks x-cpu */
317f3941
PZ
1448 ttwu_queue_remote(p, cpu);
1449 return;
1450 }
1451#endif
1452
c05fbafb
PZ
1453 raw_spin_lock(&rq->lock);
1454 ttwu_do_activate(rq, p, 0);
1455 raw_spin_unlock(&rq->lock);
9ed3811a
TH
1456}
1457
1458/**
1da177e4 1459 * try_to_wake_up - wake up a thread
9ed3811a 1460 * @p: the thread to be awakened
1da177e4 1461 * @state: the mask of task states that can be woken
9ed3811a 1462 * @wake_flags: wake modifier flags (WF_*)
1da177e4
LT
1463 *
1464 * Put it on the run-queue if it's not already there. The "current"
1465 * thread is always on the run-queue (except when the actual
1466 * re-schedule is in progress), and as such you're allowed to do
1467 * the simpler "current->state = TASK_RUNNING" to mark yourself
1468 * runnable without the overhead of this.
1469 *
9ed3811a
TH
1470 * Returns %true if @p was woken up, %false if it was already running
1471 * or @state didn't match @p's state.
1da177e4 1472 */
e4a52bcb
PZ
1473static int
1474try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
1da177e4 1475{
1da177e4 1476 unsigned long flags;
c05fbafb 1477 int cpu, success = 0;
2398f2c6 1478
04e2f174 1479 smp_wmb();
013fdb80 1480 raw_spin_lock_irqsave(&p->pi_lock, flags);
e9c84311 1481 if (!(p->state & state))
1da177e4
LT
1482 goto out;
1483
c05fbafb 1484 success = 1; /* we're going to change ->state */
1da177e4 1485 cpu = task_cpu(p);
1da177e4 1486
c05fbafb
PZ
1487 if (p->on_rq && ttwu_remote(p, wake_flags))
1488 goto stat;
1da177e4 1489
1da177e4 1490#ifdef CONFIG_SMP
e9c84311 1491 /*
c05fbafb
PZ
1492 * If the owning (remote) cpu is still in the middle of schedule() with
1493 * this task as prev, wait until its done referencing the task.
e9c84311 1494 */
f3e94786 1495 while (p->on_cpu)
e4a52bcb 1496 cpu_relax();
0970d299 1497 /*
e4a52bcb 1498 * Pairs with the smp_wmb() in finish_lock_switch().
0970d299 1499 */
e4a52bcb 1500 smp_rmb();
1da177e4 1501
a8e4f2ea 1502 p->sched_contributes_to_load = !!task_contributes_to_load(p);
e9c84311 1503 p->state = TASK_WAKING;
e7693a36 1504
e4a52bcb 1505 if (p->sched_class->task_waking)
74f8e4b2 1506 p->sched_class->task_waking(p);
efbbd05a 1507
7608dec2 1508 cpu = select_task_rq(p, SD_BALANCE_WAKE, wake_flags);
f339b9dc
PZ
1509 if (task_cpu(p) != cpu) {
1510 wake_flags |= WF_MIGRATED;
e4a52bcb 1511 set_task_cpu(p, cpu);
f339b9dc 1512 }
1da177e4 1513#endif /* CONFIG_SMP */
1da177e4 1514
c05fbafb
PZ
1515 ttwu_queue(p, cpu);
1516stat:
b84cb5df 1517 ttwu_stat(p, cpu, wake_flags);
1da177e4 1518out:
013fdb80 1519 raw_spin_unlock_irqrestore(&p->pi_lock, flags);
1da177e4
LT
1520
1521 return success;
1522}
1523
21aa9af0
TH
1524/**
1525 * try_to_wake_up_local - try to wake up a local task with rq lock held
1526 * @p: the thread to be awakened
1527 *
2acca55e 1528 * Put @p on the run-queue if it's not already there. The caller must
21aa9af0 1529 * ensure that this_rq() is locked, @p is bound to this_rq() and not
2acca55e 1530 * the current task.
21aa9af0
TH
1531 */
1532static void try_to_wake_up_local(struct task_struct *p)
1533{
1534 struct rq *rq = task_rq(p);
21aa9af0 1535
383efcd0
TH
1536 if (WARN_ON_ONCE(rq != this_rq()) ||
1537 WARN_ON_ONCE(p == current))
1538 return;
1539
21aa9af0
TH
1540 lockdep_assert_held(&rq->lock);
1541
2acca55e
PZ
1542 if (!raw_spin_trylock(&p->pi_lock)) {
1543 raw_spin_unlock(&rq->lock);
1544 raw_spin_lock(&p->pi_lock);
1545 raw_spin_lock(&rq->lock);
1546 }
1547
21aa9af0 1548 if (!(p->state & TASK_NORMAL))
2acca55e 1549 goto out;
21aa9af0 1550
fd2f4419 1551 if (!p->on_rq)
d7c01d27
PZ
1552 ttwu_activate(rq, p, ENQUEUE_WAKEUP);
1553
23f41eeb 1554 ttwu_do_wakeup(rq, p, 0);
b84cb5df 1555 ttwu_stat(p, smp_processor_id(), 0);
2acca55e
PZ
1556out:
1557 raw_spin_unlock(&p->pi_lock);
21aa9af0
TH
1558}
1559
50fa610a
DH
1560/**
1561 * wake_up_process - Wake up a specific process
1562 * @p: The process to be woken up.
1563 *
1564 * Attempt to wake up the nominated process and move it to the set of runnable
1565 * processes. Returns 1 if the process was woken up, 0 if it was already
1566 * running.
1567 *
1568 * It may be assumed that this function implies a write memory barrier before
1569 * changing the task state if and only if any tasks are woken up.
1570 */
7ad5b3a5 1571int wake_up_process(struct task_struct *p)
1da177e4 1572{
9067ac85
ON
1573 WARN_ON(task_is_stopped_or_traced(p));
1574 return try_to_wake_up(p, TASK_NORMAL, 0);
1da177e4 1575}
1da177e4
LT
1576EXPORT_SYMBOL(wake_up_process);
1577
7ad5b3a5 1578int wake_up_state(struct task_struct *p, unsigned int state)
1da177e4
LT
1579{
1580 return try_to_wake_up(p, state, 0);
1581}
1582
1da177e4
LT
1583/*
1584 * Perform scheduler related setup for a newly forked process p.
1585 * p is forked by current.
dd41f596
IM
1586 *
1587 * __sched_fork() is basic setup used by init_idle() too:
1588 */
1589static void __sched_fork(struct task_struct *p)
1590{
fd2f4419
PZ
1591 p->on_rq = 0;
1592
1593 p->se.on_rq = 0;
dd41f596
IM
1594 p->se.exec_start = 0;
1595 p->se.sum_exec_runtime = 0;
f6cf891c 1596 p->se.prev_sum_exec_runtime = 0;
6c594c21 1597 p->se.nr_migrations = 0;
da7a735e 1598 p->se.vruntime = 0;
fd2f4419 1599 INIT_LIST_HEAD(&p->se.group_node);
6cfb0d5d 1600
f4e26b12
PT
1601/*
1602 * Load-tracking only depends on SMP, FAIR_GROUP_SCHED dependency below may be
1603 * removed when useful for applications beyond shares distribution (e.g.
1604 * load-balance).
1605 */
1606#if defined(CONFIG_SMP) && defined(CONFIG_FAIR_GROUP_SCHED)
9d85f21c
PT
1607 p->se.avg.runnable_avg_period = 0;
1608 p->se.avg.runnable_avg_sum = 0;
1609#endif
6cfb0d5d 1610#ifdef CONFIG_SCHEDSTATS
41acab88 1611 memset(&p->se.statistics, 0, sizeof(p->se.statistics));
6cfb0d5d 1612#endif
476d139c 1613
fa717060 1614 INIT_LIST_HEAD(&p->rt.run_list);
476d139c 1615
e107be36
AK
1616#ifdef CONFIG_PREEMPT_NOTIFIERS
1617 INIT_HLIST_HEAD(&p->preempt_notifiers);
1618#endif
cbee9f88
PZ
1619
1620#ifdef CONFIG_NUMA_BALANCING
1621 if (p->mm && atomic_read(&p->mm->mm_users) == 1) {
1622 p->mm->numa_next_scan = jiffies;
b8593bfd 1623 p->mm->numa_next_reset = jiffies;
cbee9f88
PZ
1624 p->mm->numa_scan_seq = 0;
1625 }
1626
1627 p->node_stamp = 0ULL;
1628 p->numa_scan_seq = p->mm ? p->mm->numa_scan_seq : 0;
1629 p->numa_migrate_seq = p->mm ? p->mm->numa_scan_seq - 1 : 0;
4b96a29b 1630 p->numa_scan_period = sysctl_numa_balancing_scan_delay;
cbee9f88
PZ
1631 p->numa_work.next = &p->numa_work;
1632#endif /* CONFIG_NUMA_BALANCING */
dd41f596
IM
1633}
1634
1a687c2e 1635#ifdef CONFIG_NUMA_BALANCING
3105b86a 1636#ifdef CONFIG_SCHED_DEBUG
1a687c2e
MG
1637void set_numabalancing_state(bool enabled)
1638{
1639 if (enabled)
1640 sched_feat_set("NUMA");
1641 else
1642 sched_feat_set("NO_NUMA");
1643}
3105b86a
MG
1644#else
1645__read_mostly bool numabalancing_enabled;
1646
1647void set_numabalancing_state(bool enabled)
1648{
1649 numabalancing_enabled = enabled;
dd41f596 1650}
3105b86a 1651#endif /* CONFIG_SCHED_DEBUG */
1a687c2e 1652#endif /* CONFIG_NUMA_BALANCING */
dd41f596
IM
1653
1654/*
1655 * fork()/clone()-time setup:
1656 */
3e51e3ed 1657void sched_fork(struct task_struct *p)
dd41f596 1658{
0122ec5b 1659 unsigned long flags;
dd41f596
IM
1660 int cpu = get_cpu();
1661
1662 __sched_fork(p);
06b83b5f 1663 /*
0017d735 1664 * We mark the process as running here. This guarantees that
06b83b5f
PZ
1665 * nobody will actually run it, and a signal or other external
1666 * event cannot wake it up and insert it on the runqueue either.
1667 */
0017d735 1668 p->state = TASK_RUNNING;
dd41f596 1669
c350a04e
MG
1670 /*
1671 * Make sure we do not leak PI boosting priority to the child.
1672 */
1673 p->prio = current->normal_prio;
1674
b9dc29e7
MG
1675 /*
1676 * Revert to default priority/policy on fork if requested.
1677 */
1678 if (unlikely(p->sched_reset_on_fork)) {
c350a04e 1679 if (task_has_rt_policy(p)) {
b9dc29e7 1680 p->policy = SCHED_NORMAL;
6c697bdf 1681 p->static_prio = NICE_TO_PRIO(0);
c350a04e
MG
1682 p->rt_priority = 0;
1683 } else if (PRIO_TO_NICE(p->static_prio) < 0)
1684 p->static_prio = NICE_TO_PRIO(0);
1685
1686 p->prio = p->normal_prio = __normal_prio(p);
1687 set_load_weight(p);
6c697bdf 1688
b9dc29e7
MG
1689 /*
1690 * We don't need the reset flag anymore after the fork. It has
1691 * fulfilled its duty:
1692 */
1693 p->sched_reset_on_fork = 0;
1694 }
ca94c442 1695
2ddbf952
HS
1696 if (!rt_prio(p->prio))
1697 p->sched_class = &fair_sched_class;
b29739f9 1698
cd29fe6f
PZ
1699 if (p->sched_class->task_fork)
1700 p->sched_class->task_fork(p);
1701
86951599
PZ
1702 /*
1703 * The child is not yet in the pid-hash so no cgroup attach races,
1704 * and the cgroup is pinned to this child due to cgroup_fork()
1705 * is ran before sched_fork().
1706 *
1707 * Silence PROVE_RCU.
1708 */
0122ec5b 1709 raw_spin_lock_irqsave(&p->pi_lock, flags);
5f3edc1b 1710 set_task_cpu(p, cpu);
0122ec5b 1711 raw_spin_unlock_irqrestore(&p->pi_lock, flags);
5f3edc1b 1712
52f17b6c 1713#if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
dd41f596 1714 if (likely(sched_info_on()))
52f17b6c 1715 memset(&p->sched_info, 0, sizeof(p->sched_info));
1da177e4 1716#endif
3ca7a440
PZ
1717#if defined(CONFIG_SMP)
1718 p->on_cpu = 0;
4866cde0 1719#endif
bdd4e85d 1720#ifdef CONFIG_PREEMPT_COUNT
4866cde0 1721 /* Want to start with kernel preemption disabled. */
a1261f54 1722 task_thread_info(p)->preempt_count = 1;
1da177e4 1723#endif
806c09a7 1724#ifdef CONFIG_SMP
917b627d 1725 plist_node_init(&p->pushable_tasks, MAX_PRIO);
806c09a7 1726#endif
917b627d 1727
476d139c 1728 put_cpu();
1da177e4
LT
1729}
1730
1731/*
1732 * wake_up_new_task - wake up a newly created task for the first time.
1733 *
1734 * This function will do some initial scheduler statistics housekeeping
1735 * that must be done for every newly created context, then puts the task
1736 * on the runqueue and wakes it.
1737 */
3e51e3ed 1738void wake_up_new_task(struct task_struct *p)
1da177e4
LT
1739{
1740 unsigned long flags;
dd41f596 1741 struct rq *rq;
fabf318e 1742
ab2515c4 1743 raw_spin_lock_irqsave(&p->pi_lock, flags);
fabf318e
PZ
1744#ifdef CONFIG_SMP
1745 /*
1746 * Fork balancing, do it here and not earlier because:
1747 * - cpus_allowed can change in the fork path
1748 * - any previously selected cpu might disappear through hotplug
fabf318e 1749 */
ab2515c4 1750 set_task_cpu(p, select_task_rq(p, SD_BALANCE_FORK, 0));
0017d735
PZ
1751#endif
1752
ab2515c4 1753 rq = __task_rq_lock(p);
cd29fe6f 1754 activate_task(rq, p, 0);
fd2f4419 1755 p->on_rq = 1;
89363381 1756 trace_sched_wakeup_new(p, true);
a7558e01 1757 check_preempt_curr(rq, p, WF_FORK);
9a897c5a 1758#ifdef CONFIG_SMP
efbbd05a
PZ
1759 if (p->sched_class->task_woken)
1760 p->sched_class->task_woken(rq, p);
9a897c5a 1761#endif
0122ec5b 1762 task_rq_unlock(rq, p, &flags);
1da177e4
LT
1763}
1764
e107be36
AK
1765#ifdef CONFIG_PREEMPT_NOTIFIERS
1766
1767/**
80dd99b3 1768 * preempt_notifier_register - tell me when current is being preempted & rescheduled
421cee29 1769 * @notifier: notifier struct to register
e107be36
AK
1770 */
1771void preempt_notifier_register(struct preempt_notifier *notifier)
1772{
1773 hlist_add_head(&notifier->link, &current->preempt_notifiers);
1774}
1775EXPORT_SYMBOL_GPL(preempt_notifier_register);
1776
1777/**
1778 * preempt_notifier_unregister - no longer interested in preemption notifications
421cee29 1779 * @notifier: notifier struct to unregister
e107be36
AK
1780 *
1781 * This is safe to call from within a preemption notifier.
1782 */
1783void preempt_notifier_unregister(struct preempt_notifier *notifier)
1784{
1785 hlist_del(&notifier->link);
1786}
1787EXPORT_SYMBOL_GPL(preempt_notifier_unregister);
1788
1789static void fire_sched_in_preempt_notifiers(struct task_struct *curr)
1790{
1791 struct preempt_notifier *notifier;
e107be36 1792
b67bfe0d 1793 hlist_for_each_entry(notifier, &curr->preempt_notifiers, link)
e107be36
AK
1794 notifier->ops->sched_in(notifier, raw_smp_processor_id());
1795}
1796
1797static void
1798fire_sched_out_preempt_notifiers(struct task_struct *curr,
1799 struct task_struct *next)
1800{
1801 struct preempt_notifier *notifier;
e107be36 1802
b67bfe0d 1803 hlist_for_each_entry(notifier, &curr->preempt_notifiers, link)
e107be36
AK
1804 notifier->ops->sched_out(notifier, next);
1805}
1806
6d6bc0ad 1807#else /* !CONFIG_PREEMPT_NOTIFIERS */
e107be36
AK
1808
1809static void fire_sched_in_preempt_notifiers(struct task_struct *curr)
1810{
1811}
1812
1813static void
1814fire_sched_out_preempt_notifiers(struct task_struct *curr,
1815 struct task_struct *next)
1816{
1817}
1818
6d6bc0ad 1819#endif /* CONFIG_PREEMPT_NOTIFIERS */
e107be36 1820
4866cde0
NP
1821/**
1822 * prepare_task_switch - prepare to switch tasks
1823 * @rq: the runqueue preparing to switch
421cee29 1824 * @prev: the current task that is being switched out
4866cde0
NP
1825 * @next: the task we are going to switch to.
1826 *
1827 * This is called with the rq lock held and interrupts off. It must
1828 * be paired with a subsequent finish_task_switch after the context
1829 * switch.
1830 *
1831 * prepare_task_switch sets up locking and calls architecture specific
1832 * hooks.
1833 */
e107be36
AK
1834static inline void
1835prepare_task_switch(struct rq *rq, struct task_struct *prev,
1836 struct task_struct *next)
4866cde0 1837{
895dd92c 1838 trace_sched_switch(prev, next);
fe4b04fa
PZ
1839 sched_info_switch(prev, next);
1840 perf_event_task_sched_out(prev, next);
e107be36 1841 fire_sched_out_preempt_notifiers(prev, next);
4866cde0
NP
1842 prepare_lock_switch(rq, next);
1843 prepare_arch_switch(next);
1844}
1845
1da177e4
LT
1846/**
1847 * finish_task_switch - clean up after a task-switch
344babaa 1848 * @rq: runqueue associated with task-switch
1da177e4
LT
1849 * @prev: the thread we just switched away from.
1850 *
4866cde0
NP
1851 * finish_task_switch must be called after the context switch, paired
1852 * with a prepare_task_switch call before the context switch.
1853 * finish_task_switch will reconcile locking set up by prepare_task_switch,
1854 * and do any other architecture-specific cleanup actions.
1da177e4
LT
1855 *
1856 * Note that we may have delayed dropping an mm in context_switch(). If
41a2d6cf 1857 * so, we finish that here outside of the runqueue lock. (Doing it
1da177e4
LT
1858 * with the lock held can cause deadlocks; see schedule() for
1859 * details.)
1860 */
a9957449 1861static void finish_task_switch(struct rq *rq, struct task_struct *prev)
1da177e4
LT
1862 __releases(rq->lock)
1863{
1da177e4 1864 struct mm_struct *mm = rq->prev_mm;
55a101f8 1865 long prev_state;
1da177e4
LT
1866
1867 rq->prev_mm = NULL;
1868
1869 /*
1870 * A task struct has one reference for the use as "current".
c394cc9f 1871 * If a task dies, then it sets TASK_DEAD in tsk->state and calls
55a101f8
ON
1872 * schedule one last time. The schedule call will never return, and
1873 * the scheduled task must drop that reference.
c394cc9f 1874 * The test for TASK_DEAD must occur while the runqueue locks are
1da177e4
LT
1875 * still held, otherwise prev could be scheduled on another cpu, die
1876 * there before we look at prev->state, and then the reference would
1877 * be dropped twice.
1878 * Manfred Spraul <manfred@colorfullife.com>
1879 */
55a101f8 1880 prev_state = prev->state;
bf9fae9f 1881 vtime_task_switch(prev);
4866cde0 1882 finish_arch_switch(prev);
a8d757ef 1883 perf_event_task_sched_in(prev, current);
4866cde0 1884 finish_lock_switch(rq, prev);
01f23e16 1885 finish_arch_post_lock_switch();
e8fa1362 1886
e107be36 1887 fire_sched_in_preempt_notifiers(current);
1da177e4
LT
1888 if (mm)
1889 mmdrop(mm);
c394cc9f 1890 if (unlikely(prev_state == TASK_DEAD)) {
c6fd91f0 1891 /*
1892 * Remove function-return probe instances associated with this
1893 * task and put them back on the free list.
9761eea8 1894 */
c6fd91f0 1895 kprobe_flush_task(prev);
1da177e4 1896 put_task_struct(prev);
c6fd91f0 1897 }
99e5ada9
FW
1898
1899 tick_nohz_task_switch(current);
1da177e4
LT
1900}
1901
3f029d3c
GH
1902#ifdef CONFIG_SMP
1903
1904/* assumes rq->lock is held */
1905static inline void pre_schedule(struct rq *rq, struct task_struct *prev)
1906{
1907 if (prev->sched_class->pre_schedule)
1908 prev->sched_class->pre_schedule(rq, prev);
1909}
1910
1911/* rq->lock is NOT held, but preemption is disabled */
1912static inline void post_schedule(struct rq *rq)
1913{
1914 if (rq->post_schedule) {
1915 unsigned long flags;
1916
05fa785c 1917 raw_spin_lock_irqsave(&rq->lock, flags);
3f029d3c
GH
1918 if (rq->curr->sched_class->post_schedule)
1919 rq->curr->sched_class->post_schedule(rq);
05fa785c 1920 raw_spin_unlock_irqrestore(&rq->lock, flags);
3f029d3c
GH
1921
1922 rq->post_schedule = 0;
1923 }
1924}
1925
1926#else
da19ab51 1927
3f029d3c
GH
1928static inline void pre_schedule(struct rq *rq, struct task_struct *p)
1929{
1930}
1931
1932static inline void post_schedule(struct rq *rq)
1933{
1da177e4
LT
1934}
1935
3f029d3c
GH
1936#endif
1937
1da177e4
LT
1938/**
1939 * schedule_tail - first thing a freshly forked thread must call.
1940 * @prev: the thread we just switched away from.
1941 */
36c8b586 1942asmlinkage void schedule_tail(struct task_struct *prev)
1da177e4
LT
1943 __releases(rq->lock)
1944{
70b97a7f
IM
1945 struct rq *rq = this_rq();
1946
4866cde0 1947 finish_task_switch(rq, prev);
da19ab51 1948
3f029d3c
GH
1949 /*
1950 * FIXME: do we need to worry about rq being invalidated by the
1951 * task_switch?
1952 */
1953 post_schedule(rq);
70b97a7f 1954
4866cde0
NP
1955#ifdef __ARCH_WANT_UNLOCKED_CTXSW
1956 /* In this case, finish_task_switch does not reenable preemption */
1957 preempt_enable();
1958#endif
1da177e4 1959 if (current->set_child_tid)
b488893a 1960 put_user(task_pid_vnr(current), current->set_child_tid);
1da177e4
LT
1961}
1962
1963/*
1964 * context_switch - switch to the new MM and the new
1965 * thread's register state.
1966 */
dd41f596 1967static inline void
70b97a7f 1968context_switch(struct rq *rq, struct task_struct *prev,
36c8b586 1969 struct task_struct *next)
1da177e4 1970{
dd41f596 1971 struct mm_struct *mm, *oldmm;
1da177e4 1972
e107be36 1973 prepare_task_switch(rq, prev, next);
fe4b04fa 1974
dd41f596
IM
1975 mm = next->mm;
1976 oldmm = prev->active_mm;
9226d125
ZA
1977 /*
1978 * For paravirt, this is coupled with an exit in switch_to to
1979 * combine the page table reload and the switch backend into
1980 * one hypercall.
1981 */
224101ed 1982 arch_start_context_switch(prev);
9226d125 1983
31915ab4 1984 if (!mm) {
1da177e4
LT
1985 next->active_mm = oldmm;
1986 atomic_inc(&oldmm->mm_count);
1987 enter_lazy_tlb(oldmm, next);
1988 } else
1989 switch_mm(oldmm, mm, next);
1990
31915ab4 1991 if (!prev->mm) {
1da177e4 1992 prev->active_mm = NULL;
1da177e4
LT
1993 rq->prev_mm = oldmm;
1994 }
3a5f5e48
IM
1995 /*
1996 * Since the runqueue lock will be released by the next
1997 * task (which is an invalid locking op but in the case
1998 * of the scheduler it's an obvious special-case), so we
1999 * do an early lockdep release here:
2000 */
2001#ifndef __ARCH_WANT_UNLOCKED_CTXSW
8a25d5de 2002 spin_release(&rq->lock.dep_map, 1, _THIS_IP_);
3a5f5e48 2003#endif
1da177e4 2004
91d1aa43 2005 context_tracking_task_switch(prev, next);
1da177e4
LT
2006 /* Here we just switch the register state and the stack. */
2007 switch_to(prev, next, prev);
2008
dd41f596
IM
2009 barrier();
2010 /*
2011 * this_rq must be evaluated again because prev may have moved
2012 * CPUs since it called schedule(), thus the 'rq' on its stack
2013 * frame will be invalid.
2014 */
2015 finish_task_switch(this_rq(), prev);
1da177e4
LT
2016}
2017
2018/*
1c3e8264 2019 * nr_running and nr_context_switches:
1da177e4
LT
2020 *
2021 * externally visible scheduler statistics: current number of runnable
1c3e8264 2022 * threads, total number of context switches performed since bootup.
1da177e4
LT
2023 */
2024unsigned long nr_running(void)
2025{
2026 unsigned long i, sum = 0;
2027
2028 for_each_online_cpu(i)
2029 sum += cpu_rq(i)->nr_running;
2030
2031 return sum;
f711f609 2032}
1da177e4 2033
1da177e4 2034unsigned long long nr_context_switches(void)
46cb4b7c 2035{
cc94abfc
SR
2036 int i;
2037 unsigned long long sum = 0;
46cb4b7c 2038
0a945022 2039 for_each_possible_cpu(i)
1da177e4 2040 sum += cpu_rq(i)->nr_switches;
46cb4b7c 2041
1da177e4
LT
2042 return sum;
2043}
483b4ee6 2044
1da177e4
LT
2045unsigned long nr_iowait(void)
2046{
2047 unsigned long i, sum = 0;
483b4ee6 2048
0a945022 2049 for_each_possible_cpu(i)
1da177e4 2050 sum += atomic_read(&cpu_rq(i)->nr_iowait);
46cb4b7c 2051
1da177e4
LT
2052 return sum;
2053}
483b4ee6 2054
8c215bd3 2055unsigned long nr_iowait_cpu(int cpu)
69d25870 2056{
8c215bd3 2057 struct rq *this = cpu_rq(cpu);
69d25870
AV
2058 return atomic_read(&this->nr_iowait);
2059}
46cb4b7c 2060
dd41f596 2061#ifdef CONFIG_SMP
8a0be9ef 2062
46cb4b7c 2063/*
38022906
PZ
2064 * sched_exec - execve() is a valuable balancing opportunity, because at
2065 * this point the task has the smallest effective memory and cache footprint.
46cb4b7c 2066 */
38022906 2067void sched_exec(void)
46cb4b7c 2068{
38022906 2069 struct task_struct *p = current;
1da177e4 2070 unsigned long flags;
0017d735 2071 int dest_cpu;
46cb4b7c 2072
8f42ced9 2073 raw_spin_lock_irqsave(&p->pi_lock, flags);
7608dec2 2074 dest_cpu = p->sched_class->select_task_rq(p, SD_BALANCE_EXEC, 0);
0017d735
PZ
2075 if (dest_cpu == smp_processor_id())
2076 goto unlock;
38022906 2077
8f42ced9 2078 if (likely(cpu_active(dest_cpu))) {
969c7921 2079 struct migration_arg arg = { p, dest_cpu };
46cb4b7c 2080
8f42ced9
PZ
2081 raw_spin_unlock_irqrestore(&p->pi_lock, flags);
2082 stop_one_cpu(task_cpu(p), migration_cpu_stop, &arg);
1da177e4
LT
2083 return;
2084 }
0017d735 2085unlock:
8f42ced9 2086 raw_spin_unlock_irqrestore(&p->pi_lock, flags);
1da177e4 2087}
dd41f596 2088
1da177e4
LT
2089#endif
2090
1da177e4 2091DEFINE_PER_CPU(struct kernel_stat, kstat);
3292beb3 2092DEFINE_PER_CPU(struct kernel_cpustat, kernel_cpustat);
1da177e4
LT
2093
2094EXPORT_PER_CPU_SYMBOL(kstat);
3292beb3 2095EXPORT_PER_CPU_SYMBOL(kernel_cpustat);
1da177e4
LT
2096
2097/*
c5f8d995 2098 * Return any ns on the sched_clock that have not yet been accounted in
f06febc9 2099 * @p in case that task is currently running.
c5f8d995
HS
2100 *
2101 * Called with task_rq_lock() held on @rq.
1da177e4 2102 */
c5f8d995
HS
2103static u64 do_task_delta_exec(struct task_struct *p, struct rq *rq)
2104{
2105 u64 ns = 0;
2106
2107 if (task_current(rq, p)) {
2108 update_rq_clock(rq);
305e6835 2109 ns = rq->clock_task - p->se.exec_start;
c5f8d995
HS
2110 if ((s64)ns < 0)
2111 ns = 0;
2112 }
2113
2114 return ns;
2115}
2116
bb34d92f 2117unsigned long long task_delta_exec(struct task_struct *p)
1da177e4 2118{
1da177e4 2119 unsigned long flags;
41b86e9c 2120 struct rq *rq;
bb34d92f 2121 u64 ns = 0;
48f24c4d 2122
41b86e9c 2123 rq = task_rq_lock(p, &flags);
c5f8d995 2124 ns = do_task_delta_exec(p, rq);
0122ec5b 2125 task_rq_unlock(rq, p, &flags);
1508487e 2126
c5f8d995
HS
2127 return ns;
2128}
f06febc9 2129
c5f8d995
HS
2130/*
2131 * Return accounted runtime for the task.
2132 * In case the task is currently running, return the runtime plus current's
2133 * pending runtime that have not been accounted yet.
2134 */
2135unsigned long long task_sched_runtime(struct task_struct *p)
2136{
2137 unsigned long flags;
2138 struct rq *rq;
2139 u64 ns = 0;
2140
2141 rq = task_rq_lock(p, &flags);
2142 ns = p->se.sum_exec_runtime + do_task_delta_exec(p, rq);
0122ec5b 2143 task_rq_unlock(rq, p, &flags);
c5f8d995
HS
2144
2145 return ns;
2146}
48f24c4d 2147
7835b98b
CL
2148/*
2149 * This function gets called by the timer code, with HZ frequency.
2150 * We call it with interrupts disabled.
7835b98b
CL
2151 */
2152void scheduler_tick(void)
2153{
7835b98b
CL
2154 int cpu = smp_processor_id();
2155 struct rq *rq = cpu_rq(cpu);
dd41f596 2156 struct task_struct *curr = rq->curr;
3e51f33f
PZ
2157
2158 sched_clock_tick();
dd41f596 2159
05fa785c 2160 raw_spin_lock(&rq->lock);
3e51f33f 2161 update_rq_clock(rq);
fdf3e95d 2162 update_cpu_load_active(rq);
fa85ae24 2163 curr->sched_class->task_tick(rq, curr, 0);
05fa785c 2164 raw_spin_unlock(&rq->lock);
7835b98b 2165
e9d2b064 2166 perf_event_task_tick();
e220d2dc 2167
e418e1c2 2168#ifdef CONFIG_SMP
6eb57e0d 2169 rq->idle_balance = idle_cpu(cpu);
dd41f596 2170 trigger_load_balance(rq, cpu);
e418e1c2 2171#endif
265f22a9 2172 rq_last_tick_reset(rq);
1da177e4
LT
2173}
2174
265f22a9
FW
2175#ifdef CONFIG_NO_HZ_FULL
2176/**
2177 * scheduler_tick_max_deferment
2178 *
2179 * Keep at least one tick per second when a single
2180 * active task is running because the scheduler doesn't
2181 * yet completely support full dynticks environment.
2182 *
2183 * This makes sure that uptime, CFS vruntime, load
2184 * balancing, etc... continue to move forward, even
2185 * with a very low granularity.
2186 */
2187u64 scheduler_tick_max_deferment(void)
2188{
2189 struct rq *rq = this_rq();
2190 unsigned long next, now = ACCESS_ONCE(jiffies);
2191
2192 next = rq->last_sched_tick + HZ;
2193
2194 if (time_before_eq(next, now))
2195 return 0;
2196
2197 return jiffies_to_usecs(next - now) * NSEC_PER_USEC;
1da177e4 2198}
265f22a9 2199#endif
1da177e4 2200
132380a0 2201notrace unsigned long get_parent_ip(unsigned long addr)
6cd8a4bb
SR
2202{
2203 if (in_lock_functions(addr)) {
2204 addr = CALLER_ADDR2;
2205 if (in_lock_functions(addr))
2206 addr = CALLER_ADDR3;
2207 }
2208 return addr;
2209}
1da177e4 2210
7e49fcce
SR
2211#if defined(CONFIG_PREEMPT) && (defined(CONFIG_DEBUG_PREEMPT) || \
2212 defined(CONFIG_PREEMPT_TRACER))
2213
43627582 2214void __kprobes add_preempt_count(int val)
1da177e4 2215{
6cd8a4bb 2216#ifdef CONFIG_DEBUG_PREEMPT
1da177e4
LT
2217 /*
2218 * Underflow?
2219 */
9a11b49a
IM
2220 if (DEBUG_LOCKS_WARN_ON((preempt_count() < 0)))
2221 return;
6cd8a4bb 2222#endif
1da177e4 2223 preempt_count() += val;
6cd8a4bb 2224#ifdef CONFIG_DEBUG_PREEMPT
1da177e4
LT
2225 /*
2226 * Spinlock count overflowing soon?
2227 */
33859f7f
MOS
2228 DEBUG_LOCKS_WARN_ON((preempt_count() & PREEMPT_MASK) >=
2229 PREEMPT_MASK - 10);
6cd8a4bb
SR
2230#endif
2231 if (preempt_count() == val)
2232 trace_preempt_off(CALLER_ADDR0, get_parent_ip(CALLER_ADDR1));
1da177e4
LT
2233}
2234EXPORT_SYMBOL(add_preempt_count);
2235
43627582 2236void __kprobes sub_preempt_count(int val)
1da177e4 2237{
6cd8a4bb 2238#ifdef CONFIG_DEBUG_PREEMPT
1da177e4
LT
2239 /*
2240 * Underflow?
2241 */
01e3eb82 2242 if (DEBUG_LOCKS_WARN_ON(val > preempt_count()))
9a11b49a 2243 return;
1da177e4
LT
2244 /*
2245 * Is the spinlock portion underflowing?
2246 */
9a11b49a
IM
2247 if (DEBUG_LOCKS_WARN_ON((val < PREEMPT_MASK) &&
2248 !(preempt_count() & PREEMPT_MASK)))
2249 return;
6cd8a4bb 2250#endif
9a11b49a 2251
6cd8a4bb
SR
2252 if (preempt_count() == val)
2253 trace_preempt_on(CALLER_ADDR0, get_parent_ip(CALLER_ADDR1));
1da177e4
LT
2254 preempt_count() -= val;
2255}
2256EXPORT_SYMBOL(sub_preempt_count);
2257
2258#endif
2259
2260/*
dd41f596 2261 * Print scheduling while atomic bug:
1da177e4 2262 */
dd41f596 2263static noinline void __schedule_bug(struct task_struct *prev)
1da177e4 2264{
664dfa65
DJ
2265 if (oops_in_progress)
2266 return;
2267
3df0fc5b
PZ
2268 printk(KERN_ERR "BUG: scheduling while atomic: %s/%d/0x%08x\n",
2269 prev->comm, prev->pid, preempt_count());
838225b4 2270
dd41f596 2271 debug_show_held_locks(prev);
e21f5b15 2272 print_modules();
dd41f596
IM
2273 if (irqs_disabled())
2274 print_irqtrace_events(prev);
6135fc1e 2275 dump_stack();
373d4d09 2276 add_taint(TAINT_WARN, LOCKDEP_STILL_OK);
dd41f596 2277}
1da177e4 2278
dd41f596
IM
2279/*
2280 * Various schedule()-time debugging checks and statistics:
2281 */
2282static inline void schedule_debug(struct task_struct *prev)
2283{
1da177e4 2284 /*
41a2d6cf 2285 * Test if we are atomic. Since do_exit() needs to call into
1da177e4
LT
2286 * schedule() atomically, we ignore that path for now.
2287 * Otherwise, whine if we are scheduling when we should not be.
2288 */
3f33a7ce 2289 if (unlikely(in_atomic_preempt_off() && !prev->exit_state))
dd41f596 2290 __schedule_bug(prev);
b3fbab05 2291 rcu_sleep_check();
dd41f596 2292
1da177e4
LT
2293 profile_hit(SCHED_PROFILING, __builtin_return_address(0));
2294
2d72376b 2295 schedstat_inc(this_rq(), sched_count);
dd41f596
IM
2296}
2297
6cecd084 2298static void put_prev_task(struct rq *rq, struct task_struct *prev)
df1c99d4 2299{
61eadef6 2300 if (prev->on_rq || rq->skip_clock_update < 0)
a64692a3 2301 update_rq_clock(rq);
6cecd084 2302 prev->sched_class->put_prev_task(rq, prev);
df1c99d4
MG
2303}
2304
dd41f596
IM
2305/*
2306 * Pick up the highest-prio task:
2307 */
2308static inline struct task_struct *
b67802ea 2309pick_next_task(struct rq *rq)
dd41f596 2310{
5522d5d5 2311 const struct sched_class *class;
dd41f596 2312 struct task_struct *p;
1da177e4
LT
2313
2314 /*
dd41f596
IM
2315 * Optimization: we know that if all tasks are in
2316 * the fair class we can call that function directly:
1da177e4 2317 */
953bfcd1 2318 if (likely(rq->nr_running == rq->cfs.h_nr_running)) {
fb8d4724 2319 p = fair_sched_class.pick_next_task(rq);
dd41f596
IM
2320 if (likely(p))
2321 return p;
1da177e4
LT
2322 }
2323
34f971f6 2324 for_each_class(class) {
fb8d4724 2325 p = class->pick_next_task(rq);
dd41f596
IM
2326 if (p)
2327 return p;
dd41f596 2328 }
34f971f6
PZ
2329
2330 BUG(); /* the idle class will always have a runnable task */
dd41f596 2331}
1da177e4 2332
dd41f596 2333/*
c259e01a 2334 * __schedule() is the main scheduler function.
edde96ea
PE
2335 *
2336 * The main means of driving the scheduler and thus entering this function are:
2337 *
2338 * 1. Explicit blocking: mutex, semaphore, waitqueue, etc.
2339 *
2340 * 2. TIF_NEED_RESCHED flag is checked on interrupt and userspace return
2341 * paths. For example, see arch/x86/entry_64.S.
2342 *
2343 * To drive preemption between tasks, the scheduler sets the flag in timer
2344 * interrupt handler scheduler_tick().
2345 *
2346 * 3. Wakeups don't really cause entry into schedule(). They add a
2347 * task to the run-queue and that's it.
2348 *
2349 * Now, if the new task added to the run-queue preempts the current
2350 * task, then the wakeup sets TIF_NEED_RESCHED and schedule() gets
2351 * called on the nearest possible occasion:
2352 *
2353 * - If the kernel is preemptible (CONFIG_PREEMPT=y):
2354 *
2355 * - in syscall or exception context, at the next outmost
2356 * preempt_enable(). (this might be as soon as the wake_up()'s
2357 * spin_unlock()!)
2358 *
2359 * - in IRQ context, return from interrupt-handler to
2360 * preemptible context
2361 *
2362 * - If the kernel is not preemptible (CONFIG_PREEMPT is not set)
2363 * then at the next:
2364 *
2365 * - cond_resched() call
2366 * - explicit schedule() call
2367 * - return from syscall or exception to user-space
2368 * - return from interrupt-handler to user-space
dd41f596 2369 */
c259e01a 2370static void __sched __schedule(void)
dd41f596
IM
2371{
2372 struct task_struct *prev, *next;
67ca7bde 2373 unsigned long *switch_count;
dd41f596 2374 struct rq *rq;
31656519 2375 int cpu;
dd41f596 2376
ff743345
PZ
2377need_resched:
2378 preempt_disable();
dd41f596
IM
2379 cpu = smp_processor_id();
2380 rq = cpu_rq(cpu);
25502a6c 2381 rcu_note_context_switch(cpu);
dd41f596 2382 prev = rq->curr;
dd41f596 2383
dd41f596 2384 schedule_debug(prev);
1da177e4 2385
31656519 2386 if (sched_feat(HRTICK))
f333fdc9 2387 hrtick_clear(rq);
8f4d37ec 2388
05fa785c 2389 raw_spin_lock_irq(&rq->lock);
1da177e4 2390
246d86b5 2391 switch_count = &prev->nivcsw;
1da177e4 2392 if (prev->state && !(preempt_count() & PREEMPT_ACTIVE)) {
21aa9af0 2393 if (unlikely(signal_pending_state(prev->state, prev))) {
1da177e4 2394 prev->state = TASK_RUNNING;
21aa9af0 2395 } else {
2acca55e
PZ
2396 deactivate_task(rq, prev, DEQUEUE_SLEEP);
2397 prev->on_rq = 0;
2398
21aa9af0 2399 /*
2acca55e
PZ
2400 * If a worker went to sleep, notify and ask workqueue
2401 * whether it wants to wake up a task to maintain
2402 * concurrency.
21aa9af0
TH
2403 */
2404 if (prev->flags & PF_WQ_WORKER) {
2405 struct task_struct *to_wakeup;
2406
2407 to_wakeup = wq_worker_sleeping(prev, cpu);
2408 if (to_wakeup)
2409 try_to_wake_up_local(to_wakeup);
2410 }
21aa9af0 2411 }
dd41f596 2412 switch_count = &prev->nvcsw;
1da177e4
LT
2413 }
2414
3f029d3c 2415 pre_schedule(rq, prev);
f65eda4f 2416
dd41f596 2417 if (unlikely(!rq->nr_running))
1da177e4 2418 idle_balance(cpu, rq);
1da177e4 2419
df1c99d4 2420 put_prev_task(rq, prev);
b67802ea 2421 next = pick_next_task(rq);
f26f9aff
MG
2422 clear_tsk_need_resched(prev);
2423 rq->skip_clock_update = 0;
1da177e4 2424
1da177e4 2425 if (likely(prev != next)) {
1da177e4
LT
2426 rq->nr_switches++;
2427 rq->curr = next;
2428 ++*switch_count;
2429
dd41f596 2430 context_switch(rq, prev, next); /* unlocks the rq */
8f4d37ec 2431 /*
246d86b5
ON
2432 * The context switch have flipped the stack from under us
2433 * and restored the local variables which were saved when
2434 * this task called schedule() in the past. prev == current
2435 * is still correct, but it can be moved to another cpu/rq.
8f4d37ec
PZ
2436 */
2437 cpu = smp_processor_id();
2438 rq = cpu_rq(cpu);
1da177e4 2439 } else
05fa785c 2440 raw_spin_unlock_irq(&rq->lock);
1da177e4 2441
3f029d3c 2442 post_schedule(rq);
1da177e4 2443
ba74c144 2444 sched_preempt_enable_no_resched();
ff743345 2445 if (need_resched())
1da177e4
LT
2446 goto need_resched;
2447}
c259e01a 2448
9c40cef2
TG
2449static inline void sched_submit_work(struct task_struct *tsk)
2450{
3c7d5184 2451 if (!tsk->state || tsk_is_pi_blocked(tsk))
9c40cef2
TG
2452 return;
2453 /*
2454 * If we are going to sleep and we have plugged IO queued,
2455 * make sure to submit it to avoid deadlocks.
2456 */
2457 if (blk_needs_flush_plug(tsk))
2458 blk_schedule_flush_plug(tsk);
2459}
2460
6ebbe7a0 2461asmlinkage void __sched schedule(void)
c259e01a 2462{
9c40cef2
TG
2463 struct task_struct *tsk = current;
2464
2465 sched_submit_work(tsk);
c259e01a
TG
2466 __schedule();
2467}
1da177e4
LT
2468EXPORT_SYMBOL(schedule);
2469
91d1aa43 2470#ifdef CONFIG_CONTEXT_TRACKING
20ab65e3
FW
2471asmlinkage void __sched schedule_user(void)
2472{
2473 /*
2474 * If we come here after a random call to set_need_resched(),
2475 * or we have been woken up remotely but the IPI has not yet arrived,
2476 * we haven't yet exited the RCU idle mode. Do it here manually until
2477 * we find a better solution.
2478 */
91d1aa43 2479 user_exit();
20ab65e3 2480 schedule();
91d1aa43 2481 user_enter();
20ab65e3
FW
2482}
2483#endif
2484
c5491ea7
TG
2485/**
2486 * schedule_preempt_disabled - called with preemption disabled
2487 *
2488 * Returns with preemption disabled. Note: preempt_count must be 1
2489 */
2490void __sched schedule_preempt_disabled(void)
2491{
ba74c144 2492 sched_preempt_enable_no_resched();
c5491ea7
TG
2493 schedule();
2494 preempt_disable();
2495}
2496
1da177e4
LT
2497#ifdef CONFIG_PREEMPT
2498/*
2ed6e34f 2499 * this is the entry point to schedule() from in-kernel preemption
41a2d6cf 2500 * off of preempt_enable. Kernel preemptions off return from interrupt
1da177e4
LT
2501 * occur there and call schedule directly.
2502 */
d1f74e20 2503asmlinkage void __sched notrace preempt_schedule(void)
1da177e4
LT
2504{
2505 struct thread_info *ti = current_thread_info();
6478d880 2506
1da177e4
LT
2507 /*
2508 * If there is a non-zero preempt_count or interrupts are disabled,
41a2d6cf 2509 * we do not want to preempt the current task. Just return..
1da177e4 2510 */
beed33a8 2511 if (likely(ti->preempt_count || irqs_disabled()))
1da177e4
LT
2512 return;
2513
3a5c359a 2514 do {
d1f74e20 2515 add_preempt_count_notrace(PREEMPT_ACTIVE);
c259e01a 2516 __schedule();
d1f74e20 2517 sub_preempt_count_notrace(PREEMPT_ACTIVE);
1da177e4 2518
3a5c359a
AK
2519 /*
2520 * Check again in case we missed a preemption opportunity
2521 * between schedule and now.
2522 */
2523 barrier();
5ed0cec0 2524 } while (need_resched());
1da177e4 2525}
1da177e4
LT
2526EXPORT_SYMBOL(preempt_schedule);
2527
2528/*
2ed6e34f 2529 * this is the entry point to schedule() from kernel preemption
1da177e4
LT
2530 * off of irq context.
2531 * Note, that this is called and return with irqs disabled. This will
2532 * protect us against recursive calling from irq.
2533 */
2534asmlinkage void __sched preempt_schedule_irq(void)
2535{
2536 struct thread_info *ti = current_thread_info();
b22366cd 2537 enum ctx_state prev_state;
6478d880 2538
2ed6e34f 2539 /* Catch callers which need to be fixed */
1da177e4
LT
2540 BUG_ON(ti->preempt_count || !irqs_disabled());
2541
b22366cd
FW
2542 prev_state = exception_enter();
2543
3a5c359a
AK
2544 do {
2545 add_preempt_count(PREEMPT_ACTIVE);
3a5c359a 2546 local_irq_enable();
c259e01a 2547 __schedule();
3a5c359a 2548 local_irq_disable();
3a5c359a 2549 sub_preempt_count(PREEMPT_ACTIVE);
1da177e4 2550
3a5c359a
AK
2551 /*
2552 * Check again in case we missed a preemption opportunity
2553 * between schedule and now.
2554 */
2555 barrier();
5ed0cec0 2556 } while (need_resched());
b22366cd
FW
2557
2558 exception_exit(prev_state);
1da177e4
LT
2559}
2560
2561#endif /* CONFIG_PREEMPT */
2562
63859d4f 2563int default_wake_function(wait_queue_t *curr, unsigned mode, int wake_flags,
95cdf3b7 2564 void *key)
1da177e4 2565{
63859d4f 2566 return try_to_wake_up(curr->private, mode, wake_flags);
1da177e4 2567}
1da177e4
LT
2568EXPORT_SYMBOL(default_wake_function);
2569
2570/*
41a2d6cf
IM
2571 * The core wakeup function. Non-exclusive wakeups (nr_exclusive == 0) just
2572 * wake everything up. If it's an exclusive wakeup (nr_exclusive == small +ve
1da177e4
LT
2573 * number) then we wake all the non-exclusive tasks and one exclusive task.
2574 *
2575 * There are circumstances in which we can try to wake a task which has already
41a2d6cf 2576 * started to run but is not in state TASK_RUNNING. try_to_wake_up() returns
1da177e4
LT
2577 * zero in this (rare) case, and we handle it by continuing to scan the queue.
2578 */
78ddb08f 2579static void __wake_up_common(wait_queue_head_t *q, unsigned int mode,
63859d4f 2580 int nr_exclusive, int wake_flags, void *key)
1da177e4 2581{
2e45874c 2582 wait_queue_t *curr, *next;
1da177e4 2583
2e45874c 2584 list_for_each_entry_safe(curr, next, &q->task_list, task_list) {
48f24c4d
IM
2585 unsigned flags = curr->flags;
2586
63859d4f 2587 if (curr->func(curr, mode, wake_flags, key) &&
48f24c4d 2588 (flags & WQ_FLAG_EXCLUSIVE) && !--nr_exclusive)
1da177e4
LT
2589 break;
2590 }
2591}
2592
2593/**
2594 * __wake_up - wake up threads blocked on a waitqueue.
2595 * @q: the waitqueue
2596 * @mode: which threads
2597 * @nr_exclusive: how many wake-one or wake-many threads to wake up
67be2dd1 2598 * @key: is directly passed to the wakeup function
50fa610a
DH
2599 *
2600 * It may be assumed that this function implies a write memory barrier before
2601 * changing the task state if and only if any tasks are woken up.
1da177e4 2602 */
7ad5b3a5 2603void __wake_up(wait_queue_head_t *q, unsigned int mode,
95cdf3b7 2604 int nr_exclusive, void *key)
1da177e4
LT
2605{
2606 unsigned long flags;
2607
2608 spin_lock_irqsave(&q->lock, flags);
2609 __wake_up_common(q, mode, nr_exclusive, 0, key);
2610 spin_unlock_irqrestore(&q->lock, flags);
2611}
1da177e4
LT
2612EXPORT_SYMBOL(__wake_up);
2613
2614/*
2615 * Same as __wake_up but called with the spinlock in wait_queue_head_t held.
2616 */
63b20011 2617void __wake_up_locked(wait_queue_head_t *q, unsigned int mode, int nr)
1da177e4 2618{
63b20011 2619 __wake_up_common(q, mode, nr, 0, NULL);
1da177e4 2620}
22c43c81 2621EXPORT_SYMBOL_GPL(__wake_up_locked);
1da177e4 2622
4ede816a
DL
2623void __wake_up_locked_key(wait_queue_head_t *q, unsigned int mode, void *key)
2624{
2625 __wake_up_common(q, mode, 1, 0, key);
2626}
bf294b41 2627EXPORT_SYMBOL_GPL(__wake_up_locked_key);
4ede816a 2628
1da177e4 2629/**
4ede816a 2630 * __wake_up_sync_key - wake up threads blocked on a waitqueue.
1da177e4
LT
2631 * @q: the waitqueue
2632 * @mode: which threads
2633 * @nr_exclusive: how many wake-one or wake-many threads to wake up
4ede816a 2634 * @key: opaque value to be passed to wakeup targets
1da177e4
LT
2635 *
2636 * The sync wakeup differs that the waker knows that it will schedule
2637 * away soon, so while the target thread will be woken up, it will not
2638 * be migrated to another CPU - ie. the two threads are 'synchronized'
2639 * with each other. This can prevent needless bouncing between CPUs.
2640 *
2641 * On UP it can prevent extra preemption.
50fa610a
DH
2642 *
2643 * It may be assumed that this function implies a write memory barrier before
2644 * changing the task state if and only if any tasks are woken up.
1da177e4 2645 */
4ede816a
DL
2646void __wake_up_sync_key(wait_queue_head_t *q, unsigned int mode,
2647 int nr_exclusive, void *key)
1da177e4
LT
2648{
2649 unsigned long flags;
7d478721 2650 int wake_flags = WF_SYNC;
1da177e4
LT
2651
2652 if (unlikely(!q))
2653 return;
2654
2655 if (unlikely(!nr_exclusive))
7d478721 2656 wake_flags = 0;
1da177e4
LT
2657
2658 spin_lock_irqsave(&q->lock, flags);
7d478721 2659 __wake_up_common(q, mode, nr_exclusive, wake_flags, key);
1da177e4
LT
2660 spin_unlock_irqrestore(&q->lock, flags);
2661}
4ede816a
DL
2662EXPORT_SYMBOL_GPL(__wake_up_sync_key);
2663
2664/*
2665 * __wake_up_sync - see __wake_up_sync_key()
2666 */
2667void __wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr_exclusive)
2668{
2669 __wake_up_sync_key(q, mode, nr_exclusive, NULL);
2670}
1da177e4
LT
2671EXPORT_SYMBOL_GPL(__wake_up_sync); /* For internal use only */
2672
65eb3dc6
KD
2673/**
2674 * complete: - signals a single thread waiting on this completion
2675 * @x: holds the state of this particular completion
2676 *
2677 * This will wake up a single thread waiting on this completion. Threads will be
2678 * awakened in the same order in which they were queued.
2679 *
2680 * See also complete_all(), wait_for_completion() and related routines.
50fa610a
DH
2681 *
2682 * It may be assumed that this function implies a write memory barrier before
2683 * changing the task state if and only if any tasks are woken up.
65eb3dc6 2684 */
b15136e9 2685void complete(struct completion *x)
1da177e4
LT
2686{
2687 unsigned long flags;
2688
2689 spin_lock_irqsave(&x->wait.lock, flags);
2690 x->done++;
d9514f6c 2691 __wake_up_common(&x->wait, TASK_NORMAL, 1, 0, NULL);
1da177e4
LT
2692 spin_unlock_irqrestore(&x->wait.lock, flags);
2693}
2694EXPORT_SYMBOL(complete);
2695
65eb3dc6
KD
2696/**
2697 * complete_all: - signals all threads waiting on this completion
2698 * @x: holds the state of this particular completion
2699 *
2700 * This will wake up all threads waiting on this particular completion event.
50fa610a
DH
2701 *
2702 * It may be assumed that this function implies a write memory barrier before
2703 * changing the task state if and only if any tasks are woken up.
65eb3dc6 2704 */
b15136e9 2705void complete_all(struct completion *x)
1da177e4
LT
2706{
2707 unsigned long flags;
2708
2709 spin_lock_irqsave(&x->wait.lock, flags);
2710 x->done += UINT_MAX/2;
d9514f6c 2711 __wake_up_common(&x->wait, TASK_NORMAL, 0, 0, NULL);
1da177e4
LT
2712 spin_unlock_irqrestore(&x->wait.lock, flags);
2713}
2714EXPORT_SYMBOL(complete_all);
2715
8cbbe86d 2716static inline long __sched
686855f5
VD
2717do_wait_for_common(struct completion *x,
2718 long (*action)(long), long timeout, int state)
1da177e4 2719{
1da177e4
LT
2720 if (!x->done) {
2721 DECLARE_WAITQUEUE(wait, current);
2722
a93d2f17 2723 __add_wait_queue_tail_exclusive(&x->wait, &wait);
1da177e4 2724 do {
94d3d824 2725 if (signal_pending_state(state, current)) {
ea71a546
ON
2726 timeout = -ERESTARTSYS;
2727 break;
8cbbe86d
AK
2728 }
2729 __set_current_state(state);
1da177e4 2730 spin_unlock_irq(&x->wait.lock);
686855f5 2731 timeout = action(timeout);
1da177e4 2732 spin_lock_irq(&x->wait.lock);
ea71a546 2733 } while (!x->done && timeout);
1da177e4 2734 __remove_wait_queue(&x->wait, &wait);
ea71a546
ON
2735 if (!x->done)
2736 return timeout;
1da177e4
LT
2737 }
2738 x->done--;
ea71a546 2739 return timeout ?: 1;
1da177e4 2740}
1da177e4 2741
686855f5
VD
2742static inline long __sched
2743__wait_for_common(struct completion *x,
2744 long (*action)(long), long timeout, int state)
1da177e4 2745{
1da177e4
LT
2746 might_sleep();
2747
2748 spin_lock_irq(&x->wait.lock);
686855f5 2749 timeout = do_wait_for_common(x, action, timeout, state);
1da177e4 2750 spin_unlock_irq(&x->wait.lock);
8cbbe86d
AK
2751 return timeout;
2752}
1da177e4 2753
686855f5
VD
2754static long __sched
2755wait_for_common(struct completion *x, long timeout, int state)
2756{
2757 return __wait_for_common(x, schedule_timeout, timeout, state);
2758}
2759
2760static long __sched
2761wait_for_common_io(struct completion *x, long timeout, int state)
2762{
2763 return __wait_for_common(x, io_schedule_timeout, timeout, state);
2764}
2765
65eb3dc6
KD
2766/**
2767 * wait_for_completion: - waits for completion of a task
2768 * @x: holds the state of this particular completion
2769 *
2770 * This waits to be signaled for completion of a specific task. It is NOT
2771 * interruptible and there is no timeout.
2772 *
2773 * See also similar routines (i.e. wait_for_completion_timeout()) with timeout
2774 * and interrupt capability. Also see complete().
2775 */
b15136e9 2776void __sched wait_for_completion(struct completion *x)
8cbbe86d
AK
2777{
2778 wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_UNINTERRUPTIBLE);
1da177e4 2779}
8cbbe86d 2780EXPORT_SYMBOL(wait_for_completion);
1da177e4 2781
65eb3dc6
KD
2782/**
2783 * wait_for_completion_timeout: - waits for completion of a task (w/timeout)
2784 * @x: holds the state of this particular completion
2785 * @timeout: timeout value in jiffies
2786 *
2787 * This waits for either a completion of a specific task to be signaled or for a
2788 * specified timeout to expire. The timeout is in jiffies. It is not
2789 * interruptible.
c6dc7f05
BF
2790 *
2791 * The return value is 0 if timed out, and positive (at least 1, or number of
2792 * jiffies left till timeout) if completed.
65eb3dc6 2793 */
b15136e9 2794unsigned long __sched
8cbbe86d 2795wait_for_completion_timeout(struct completion *x, unsigned long timeout)
1da177e4 2796{
8cbbe86d 2797 return wait_for_common(x, timeout, TASK_UNINTERRUPTIBLE);
1da177e4 2798}
8cbbe86d 2799EXPORT_SYMBOL(wait_for_completion_timeout);
1da177e4 2800
686855f5
VD
2801/**
2802 * wait_for_completion_io: - waits for completion of a task
2803 * @x: holds the state of this particular completion
2804 *
2805 * This waits to be signaled for completion of a specific task. It is NOT
2806 * interruptible and there is no timeout. The caller is accounted as waiting
2807 * for IO.
2808 */
2809void __sched wait_for_completion_io(struct completion *x)
2810{
2811 wait_for_common_io(x, MAX_SCHEDULE_TIMEOUT, TASK_UNINTERRUPTIBLE);
2812}
2813EXPORT_SYMBOL(wait_for_completion_io);
2814
2815/**
2816 * wait_for_completion_io_timeout: - waits for completion of a task (w/timeout)
2817 * @x: holds the state of this particular completion
2818 * @timeout: timeout value in jiffies
2819 *
2820 * This waits for either a completion of a specific task to be signaled or for a
2821 * specified timeout to expire. The timeout is in jiffies. It is not
2822 * interruptible. The caller is accounted as waiting for IO.
2823 *
2824 * The return value is 0 if timed out, and positive (at least 1, or number of
2825 * jiffies left till timeout) if completed.
2826 */
2827unsigned long __sched
2828wait_for_completion_io_timeout(struct completion *x, unsigned long timeout)
2829{
2830 return wait_for_common_io(x, timeout, TASK_UNINTERRUPTIBLE);
2831}
2832EXPORT_SYMBOL(wait_for_completion_io_timeout);
2833
65eb3dc6
KD
2834/**
2835 * wait_for_completion_interruptible: - waits for completion of a task (w/intr)
2836 * @x: holds the state of this particular completion
2837 *
2838 * This waits for completion of a specific task to be signaled. It is
2839 * interruptible.
c6dc7f05
BF
2840 *
2841 * The return value is -ERESTARTSYS if interrupted, 0 if completed.
65eb3dc6 2842 */
8cbbe86d 2843int __sched wait_for_completion_interruptible(struct completion *x)
0fec171c 2844{
51e97990
AK
2845 long t = wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_INTERRUPTIBLE);
2846 if (t == -ERESTARTSYS)
2847 return t;
2848 return 0;
0fec171c 2849}
8cbbe86d 2850EXPORT_SYMBOL(wait_for_completion_interruptible);
1da177e4 2851
65eb3dc6
KD
2852/**
2853 * wait_for_completion_interruptible_timeout: - waits for completion (w/(to,intr))
2854 * @x: holds the state of this particular completion
2855 * @timeout: timeout value in jiffies
2856 *
2857 * This waits for either a completion of a specific task to be signaled or for a
2858 * specified timeout to expire. It is interruptible. The timeout is in jiffies.
c6dc7f05
BF
2859 *
2860 * The return value is -ERESTARTSYS if interrupted, 0 if timed out,
2861 * positive (at least 1, or number of jiffies left till timeout) if completed.
65eb3dc6 2862 */
6bf41237 2863long __sched
8cbbe86d
AK
2864wait_for_completion_interruptible_timeout(struct completion *x,
2865 unsigned long timeout)
0fec171c 2866{
8cbbe86d 2867 return wait_for_common(x, timeout, TASK_INTERRUPTIBLE);
0fec171c 2868}
8cbbe86d 2869EXPORT_SYMBOL(wait_for_completion_interruptible_timeout);
1da177e4 2870
65eb3dc6
KD
2871/**
2872 * wait_for_completion_killable: - waits for completion of a task (killable)
2873 * @x: holds the state of this particular completion
2874 *
2875 * This waits to be signaled for completion of a specific task. It can be
2876 * interrupted by a kill signal.
c6dc7f05
BF
2877 *
2878 * The return value is -ERESTARTSYS if interrupted, 0 if completed.
65eb3dc6 2879 */
009e577e
MW
2880int __sched wait_for_completion_killable(struct completion *x)
2881{
2882 long t = wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_KILLABLE);
2883 if (t == -ERESTARTSYS)
2884 return t;
2885 return 0;
2886}
2887EXPORT_SYMBOL(wait_for_completion_killable);
2888
0aa12fb4
SW
2889/**
2890 * wait_for_completion_killable_timeout: - waits for completion of a task (w/(to,killable))
2891 * @x: holds the state of this particular completion
2892 * @timeout: timeout value in jiffies
2893 *
2894 * This waits for either a completion of a specific task to be
2895 * signaled or for a specified timeout to expire. It can be
2896 * interrupted by a kill signal. The timeout is in jiffies.
c6dc7f05
BF
2897 *
2898 * The return value is -ERESTARTSYS if interrupted, 0 if timed out,
2899 * positive (at least 1, or number of jiffies left till timeout) if completed.
0aa12fb4 2900 */
6bf41237 2901long __sched
0aa12fb4
SW
2902wait_for_completion_killable_timeout(struct completion *x,
2903 unsigned long timeout)
2904{
2905 return wait_for_common(x, timeout, TASK_KILLABLE);
2906}
2907EXPORT_SYMBOL(wait_for_completion_killable_timeout);
2908
be4de352
DC
2909/**
2910 * try_wait_for_completion - try to decrement a completion without blocking
2911 * @x: completion structure
2912 *
2913 * Returns: 0 if a decrement cannot be done without blocking
2914 * 1 if a decrement succeeded.
2915 *
2916 * If a completion is being used as a counting completion,
2917 * attempt to decrement the counter without blocking. This
2918 * enables us to avoid waiting if the resource the completion
2919 * is protecting is not available.
2920 */
2921bool try_wait_for_completion(struct completion *x)
2922{
7539a3b3 2923 unsigned long flags;
be4de352
DC
2924 int ret = 1;
2925
7539a3b3 2926 spin_lock_irqsave(&x->wait.lock, flags);
be4de352
DC
2927 if (!x->done)
2928 ret = 0;
2929 else
2930 x->done--;
7539a3b3 2931 spin_unlock_irqrestore(&x->wait.lock, flags);
be4de352
DC
2932 return ret;
2933}
2934EXPORT_SYMBOL(try_wait_for_completion);
2935
2936/**
2937 * completion_done - Test to see if a completion has any waiters
2938 * @x: completion structure
2939 *
2940 * Returns: 0 if there are waiters (wait_for_completion() in progress)
2941 * 1 if there are no waiters.
2942 *
2943 */
2944bool completion_done(struct completion *x)
2945{
7539a3b3 2946 unsigned long flags;
be4de352
DC
2947 int ret = 1;
2948
7539a3b3 2949 spin_lock_irqsave(&x->wait.lock, flags);
be4de352
DC
2950 if (!x->done)
2951 ret = 0;
7539a3b3 2952 spin_unlock_irqrestore(&x->wait.lock, flags);
be4de352
DC
2953 return ret;
2954}
2955EXPORT_SYMBOL(completion_done);
2956
8cbbe86d
AK
2957static long __sched
2958sleep_on_common(wait_queue_head_t *q, int state, long timeout)
1da177e4 2959{
0fec171c
IM
2960 unsigned long flags;
2961 wait_queue_t wait;
2962
2963 init_waitqueue_entry(&wait, current);
1da177e4 2964
8cbbe86d 2965 __set_current_state(state);
1da177e4 2966
8cbbe86d
AK
2967 spin_lock_irqsave(&q->lock, flags);
2968 __add_wait_queue(q, &wait);
2969 spin_unlock(&q->lock);
2970 timeout = schedule_timeout(timeout);
2971 spin_lock_irq(&q->lock);
2972 __remove_wait_queue(q, &wait);
2973 spin_unlock_irqrestore(&q->lock, flags);
2974
2975 return timeout;
2976}
2977
2978void __sched interruptible_sleep_on(wait_queue_head_t *q)
2979{
2980 sleep_on_common(q, TASK_INTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
1da177e4 2981}
1da177e4
LT
2982EXPORT_SYMBOL(interruptible_sleep_on);
2983
0fec171c 2984long __sched
95cdf3b7 2985interruptible_sleep_on_timeout(wait_queue_head_t *q, long timeout)
1da177e4 2986{
8cbbe86d 2987 return sleep_on_common(q, TASK_INTERRUPTIBLE, timeout);
1da177e4 2988}
1da177e4
LT
2989EXPORT_SYMBOL(interruptible_sleep_on_timeout);
2990
0fec171c 2991void __sched sleep_on(wait_queue_head_t *q)
1da177e4 2992{
8cbbe86d 2993 sleep_on_common(q, TASK_UNINTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
1da177e4 2994}
1da177e4
LT
2995EXPORT_SYMBOL(sleep_on);
2996
0fec171c 2997long __sched sleep_on_timeout(wait_queue_head_t *q, long timeout)
1da177e4 2998{
8cbbe86d 2999 return sleep_on_common(q, TASK_UNINTERRUPTIBLE, timeout);
1da177e4 3000}
1da177e4
LT
3001EXPORT_SYMBOL(sleep_on_timeout);
3002
b29739f9
IM
3003#ifdef CONFIG_RT_MUTEXES
3004
3005/*
3006 * rt_mutex_setprio - set the current priority of a task
3007 * @p: task
3008 * @prio: prio value (kernel-internal form)
3009 *
3010 * This function changes the 'effective' priority of a task. It does
3011 * not touch ->normal_prio like __setscheduler().
3012 *
3013 * Used by the rt_mutex code to implement priority inheritance logic.
3014 */
36c8b586 3015void rt_mutex_setprio(struct task_struct *p, int prio)
b29739f9 3016{
83b699ed 3017 int oldprio, on_rq, running;
70b97a7f 3018 struct rq *rq;
83ab0aa0 3019 const struct sched_class *prev_class;
b29739f9
IM
3020
3021 BUG_ON(prio < 0 || prio > MAX_PRIO);
3022
0122ec5b 3023 rq = __task_rq_lock(p);
b29739f9 3024
1c4dd99b
TG
3025 /*
3026 * Idle task boosting is a nono in general. There is one
3027 * exception, when PREEMPT_RT and NOHZ is active:
3028 *
3029 * The idle task calls get_next_timer_interrupt() and holds
3030 * the timer wheel base->lock on the CPU and another CPU wants
3031 * to access the timer (probably to cancel it). We can safely
3032 * ignore the boosting request, as the idle CPU runs this code
3033 * with interrupts disabled and will complete the lock
3034 * protected section without being interrupted. So there is no
3035 * real need to boost.
3036 */
3037 if (unlikely(p == rq->idle)) {
3038 WARN_ON(p != rq->curr);
3039 WARN_ON(p->pi_blocked_on);
3040 goto out_unlock;
3041 }
3042
a8027073 3043 trace_sched_pi_setprio(p, prio);
d5f9f942 3044 oldprio = p->prio;
83ab0aa0 3045 prev_class = p->sched_class;
fd2f4419 3046 on_rq = p->on_rq;
051a1d1a 3047 running = task_current(rq, p);
0e1f3483 3048 if (on_rq)
69be72c1 3049 dequeue_task(rq, p, 0);
0e1f3483
HS
3050 if (running)
3051 p->sched_class->put_prev_task(rq, p);
dd41f596
IM
3052
3053 if (rt_prio(prio))
3054 p->sched_class = &rt_sched_class;
3055 else
3056 p->sched_class = &fair_sched_class;
3057
b29739f9
IM
3058 p->prio = prio;
3059
0e1f3483
HS
3060 if (running)
3061 p->sched_class->set_curr_task(rq);
da7a735e 3062 if (on_rq)
371fd7e7 3063 enqueue_task(rq, p, oldprio < prio ? ENQUEUE_HEAD : 0);
cb469845 3064
da7a735e 3065 check_class_changed(rq, p, prev_class, oldprio);
1c4dd99b 3066out_unlock:
0122ec5b 3067 __task_rq_unlock(rq);
b29739f9 3068}
b29739f9 3069#endif
36c8b586 3070void set_user_nice(struct task_struct *p, long nice)
1da177e4 3071{
dd41f596 3072 int old_prio, delta, on_rq;
1da177e4 3073 unsigned long flags;
70b97a7f 3074 struct rq *rq;
1da177e4
LT
3075
3076 if (TASK_NICE(p) == nice || nice < -20 || nice > 19)
3077 return;
3078 /*
3079 * We have to be careful, if called from sys_setpriority(),
3080 * the task might be in the middle of scheduling on another CPU.
3081 */
3082 rq = task_rq_lock(p, &flags);
3083 /*
3084 * The RT priorities are set via sched_setscheduler(), but we still
3085 * allow the 'normal' nice value to be set - but as expected
3086 * it wont have any effect on scheduling until the task is
dd41f596 3087 * SCHED_FIFO/SCHED_RR:
1da177e4 3088 */
e05606d3 3089 if (task_has_rt_policy(p)) {
1da177e4
LT
3090 p->static_prio = NICE_TO_PRIO(nice);
3091 goto out_unlock;
3092 }
fd2f4419 3093 on_rq = p->on_rq;
c09595f6 3094 if (on_rq)
69be72c1 3095 dequeue_task(rq, p, 0);
1da177e4 3096
1da177e4 3097 p->static_prio = NICE_TO_PRIO(nice);
2dd73a4f 3098 set_load_weight(p);
b29739f9
IM
3099 old_prio = p->prio;
3100 p->prio = effective_prio(p);
3101 delta = p->prio - old_prio;
1da177e4 3102
dd41f596 3103 if (on_rq) {
371fd7e7 3104 enqueue_task(rq, p, 0);
1da177e4 3105 /*
d5f9f942
AM
3106 * If the task increased its priority or is running and
3107 * lowered its priority, then reschedule its CPU:
1da177e4 3108 */
d5f9f942 3109 if (delta < 0 || (delta > 0 && task_running(rq, p)))
1da177e4
LT
3110 resched_task(rq->curr);
3111 }
3112out_unlock:
0122ec5b 3113 task_rq_unlock(rq, p, &flags);
1da177e4 3114}
1da177e4
LT
3115EXPORT_SYMBOL(set_user_nice);
3116
e43379f1
MM
3117/*
3118 * can_nice - check if a task can reduce its nice value
3119 * @p: task
3120 * @nice: nice value
3121 */
36c8b586 3122int can_nice(const struct task_struct *p, const int nice)
e43379f1 3123{
024f4747
MM
3124 /* convert nice value [19,-20] to rlimit style value [1,40] */
3125 int nice_rlim = 20 - nice;
48f24c4d 3126
78d7d407 3127 return (nice_rlim <= task_rlimit(p, RLIMIT_NICE) ||
e43379f1
MM
3128 capable(CAP_SYS_NICE));
3129}
3130
1da177e4
LT
3131#ifdef __ARCH_WANT_SYS_NICE
3132
3133/*
3134 * sys_nice - change the priority of the current process.
3135 * @increment: priority increment
3136 *
3137 * sys_setpriority is a more generic, but much slower function that
3138 * does similar things.
3139 */
5add95d4 3140SYSCALL_DEFINE1(nice, int, increment)
1da177e4 3141{
48f24c4d 3142 long nice, retval;
1da177e4
LT
3143
3144 /*
3145 * Setpriority might change our priority at the same moment.
3146 * We don't have to worry. Conceptually one call occurs first
3147 * and we have a single winner.
3148 */
e43379f1
MM
3149 if (increment < -40)
3150 increment = -40;
1da177e4
LT
3151 if (increment > 40)
3152 increment = 40;
3153
2b8f836f 3154 nice = TASK_NICE(current) + increment;
1da177e4
LT
3155 if (nice < -20)
3156 nice = -20;
3157 if (nice > 19)
3158 nice = 19;
3159
e43379f1
MM
3160 if (increment < 0 && !can_nice(current, nice))
3161 return -EPERM;
3162
1da177e4
LT
3163 retval = security_task_setnice(current, nice);
3164 if (retval)
3165 return retval;
3166
3167 set_user_nice(current, nice);
3168 return 0;
3169}
3170
3171#endif
3172
3173/**
3174 * task_prio - return the priority value of a given task.
3175 * @p: the task in question.
3176 *
3177 * This is the priority value as seen by users in /proc.
3178 * RT tasks are offset by -200. Normal tasks are centered
3179 * around 0, value goes from -16 to +15.
3180 */
36c8b586 3181int task_prio(const struct task_struct *p)
1da177e4
LT
3182{
3183 return p->prio - MAX_RT_PRIO;
3184}
3185
3186/**
3187 * task_nice - return the nice value of a given task.
3188 * @p: the task in question.
3189 */
36c8b586 3190int task_nice(const struct task_struct *p)
1da177e4
LT
3191{
3192 return TASK_NICE(p);
3193}
150d8bed 3194EXPORT_SYMBOL(task_nice);
1da177e4
LT
3195
3196/**
3197 * idle_cpu - is a given cpu idle currently?
3198 * @cpu: the processor in question.
3199 */
3200int idle_cpu(int cpu)
3201{
908a3283
TG
3202 struct rq *rq = cpu_rq(cpu);
3203
3204 if (rq->curr != rq->idle)
3205 return 0;
3206
3207 if (rq->nr_running)
3208 return 0;
3209
3210#ifdef CONFIG_SMP
3211 if (!llist_empty(&rq->wake_list))
3212 return 0;
3213#endif
3214
3215 return 1;
1da177e4
LT
3216}
3217
1da177e4
LT
3218/**
3219 * idle_task - return the idle task for a given cpu.
3220 * @cpu: the processor in question.
3221 */
36c8b586 3222struct task_struct *idle_task(int cpu)
1da177e4
LT
3223{
3224 return cpu_rq(cpu)->idle;
3225}
3226
3227/**
3228 * find_process_by_pid - find a process with a matching PID value.
3229 * @pid: the pid in question.
3230 */
a9957449 3231static struct task_struct *find_process_by_pid(pid_t pid)
1da177e4 3232{
228ebcbe 3233 return pid ? find_task_by_vpid(pid) : current;
1da177e4
LT
3234}
3235
3236/* Actually do priority change: must hold rq lock. */
dd41f596
IM
3237static void
3238__setscheduler(struct rq *rq, struct task_struct *p, int policy, int prio)
1da177e4 3239{
1da177e4
LT
3240 p->policy = policy;
3241 p->rt_priority = prio;
b29739f9
IM
3242 p->normal_prio = normal_prio(p);
3243 /* we are holding p->pi_lock already */
3244 p->prio = rt_mutex_getprio(p);
ffd44db5
PZ
3245 if (rt_prio(p->prio))
3246 p->sched_class = &rt_sched_class;
3247 else
3248 p->sched_class = &fair_sched_class;
2dd73a4f 3249 set_load_weight(p);
1da177e4
LT
3250}
3251
c69e8d9c
DH
3252/*
3253 * check the target process has a UID that matches the current process's
3254 */
3255static bool check_same_owner(struct task_struct *p)
3256{
3257 const struct cred *cred = current_cred(), *pcred;
3258 bool match;
3259
3260 rcu_read_lock();
3261 pcred = __task_cred(p);
9c806aa0
EB
3262 match = (uid_eq(cred->euid, pcred->euid) ||
3263 uid_eq(cred->euid, pcred->uid));
c69e8d9c
DH
3264 rcu_read_unlock();
3265 return match;
3266}
3267
961ccddd 3268static int __sched_setscheduler(struct task_struct *p, int policy,
fe7de49f 3269 const struct sched_param *param, bool user)
1da177e4 3270{
83b699ed 3271 int retval, oldprio, oldpolicy = -1, on_rq, running;
1da177e4 3272 unsigned long flags;
83ab0aa0 3273 const struct sched_class *prev_class;
70b97a7f 3274 struct rq *rq;
ca94c442 3275 int reset_on_fork;
1da177e4 3276
66e5393a
SR
3277 /* may grab non-irq protected spin_locks */
3278 BUG_ON(in_interrupt());
1da177e4
LT
3279recheck:
3280 /* double check policy once rq lock held */
ca94c442
LP
3281 if (policy < 0) {
3282 reset_on_fork = p->sched_reset_on_fork;
1da177e4 3283 policy = oldpolicy = p->policy;
ca94c442
LP
3284 } else {
3285 reset_on_fork = !!(policy & SCHED_RESET_ON_FORK);
3286 policy &= ~SCHED_RESET_ON_FORK;
3287
3288 if (policy != SCHED_FIFO && policy != SCHED_RR &&
3289 policy != SCHED_NORMAL && policy != SCHED_BATCH &&
3290 policy != SCHED_IDLE)
3291 return -EINVAL;
3292 }
3293
1da177e4
LT
3294 /*
3295 * Valid priorities for SCHED_FIFO and SCHED_RR are
dd41f596
IM
3296 * 1..MAX_USER_RT_PRIO-1, valid priority for SCHED_NORMAL,
3297 * SCHED_BATCH and SCHED_IDLE is 0.
1da177e4
LT
3298 */
3299 if (param->sched_priority < 0 ||
95cdf3b7 3300 (p->mm && param->sched_priority > MAX_USER_RT_PRIO-1) ||
d46523ea 3301 (!p->mm && param->sched_priority > MAX_RT_PRIO-1))
1da177e4 3302 return -EINVAL;
e05606d3 3303 if (rt_policy(policy) != (param->sched_priority != 0))
1da177e4
LT
3304 return -EINVAL;
3305
37e4ab3f
OC
3306 /*
3307 * Allow unprivileged RT tasks to decrease priority:
3308 */
961ccddd 3309 if (user && !capable(CAP_SYS_NICE)) {
e05606d3 3310 if (rt_policy(policy)) {
a44702e8
ON
3311 unsigned long rlim_rtprio =
3312 task_rlimit(p, RLIMIT_RTPRIO);
8dc3e909
ON
3313
3314 /* can't set/change the rt policy */
3315 if (policy != p->policy && !rlim_rtprio)
3316 return -EPERM;
3317
3318 /* can't increase priority */
3319 if (param->sched_priority > p->rt_priority &&
3320 param->sched_priority > rlim_rtprio)
3321 return -EPERM;
3322 }
c02aa73b 3323
dd41f596 3324 /*
c02aa73b
DH
3325 * Treat SCHED_IDLE as nice 20. Only allow a switch to
3326 * SCHED_NORMAL if the RLIMIT_NICE would normally permit it.
dd41f596 3327 */
c02aa73b
DH
3328 if (p->policy == SCHED_IDLE && policy != SCHED_IDLE) {
3329 if (!can_nice(p, TASK_NICE(p)))
3330 return -EPERM;
3331 }
5fe1d75f 3332
37e4ab3f 3333 /* can't change other user's priorities */
c69e8d9c 3334 if (!check_same_owner(p))
37e4ab3f 3335 return -EPERM;
ca94c442
LP
3336
3337 /* Normal users shall not reset the sched_reset_on_fork flag */
3338 if (p->sched_reset_on_fork && !reset_on_fork)
3339 return -EPERM;
37e4ab3f 3340 }
1da177e4 3341
725aad24 3342 if (user) {
b0ae1981 3343 retval = security_task_setscheduler(p);
725aad24
JF
3344 if (retval)
3345 return retval;
3346 }
3347
b29739f9
IM
3348 /*
3349 * make sure no PI-waiters arrive (or leave) while we are
3350 * changing the priority of the task:
0122ec5b 3351 *
25985edc 3352 * To be able to change p->policy safely, the appropriate
1da177e4
LT
3353 * runqueue lock must be held.
3354 */
0122ec5b 3355 rq = task_rq_lock(p, &flags);
dc61b1d6 3356
34f971f6
PZ
3357 /*
3358 * Changing the policy of the stop threads its a very bad idea
3359 */
3360 if (p == rq->stop) {
0122ec5b 3361 task_rq_unlock(rq, p, &flags);
34f971f6
PZ
3362 return -EINVAL;
3363 }
3364
a51e9198
DF
3365 /*
3366 * If not changing anything there's no need to proceed further:
3367 */
3368 if (unlikely(policy == p->policy && (!rt_policy(policy) ||
3369 param->sched_priority == p->rt_priority))) {
45afb173 3370 task_rq_unlock(rq, p, &flags);
a51e9198
DF
3371 return 0;
3372 }
3373
dc61b1d6
PZ
3374#ifdef CONFIG_RT_GROUP_SCHED
3375 if (user) {
3376 /*
3377 * Do not allow realtime tasks into groups that have no runtime
3378 * assigned.
3379 */
3380 if (rt_bandwidth_enabled() && rt_policy(policy) &&
f4493771
MG
3381 task_group(p)->rt_bandwidth.rt_runtime == 0 &&
3382 !task_group_is_autogroup(task_group(p))) {
0122ec5b 3383 task_rq_unlock(rq, p, &flags);
dc61b1d6
PZ
3384 return -EPERM;
3385 }
3386 }
3387#endif
3388
1da177e4
LT
3389 /* recheck policy now with rq lock held */
3390 if (unlikely(oldpolicy != -1 && oldpolicy != p->policy)) {
3391 policy = oldpolicy = -1;
0122ec5b 3392 task_rq_unlock(rq, p, &flags);
1da177e4
LT
3393 goto recheck;
3394 }
fd2f4419 3395 on_rq = p->on_rq;
051a1d1a 3396 running = task_current(rq, p);
0e1f3483 3397 if (on_rq)
4ca9b72b 3398 dequeue_task(rq, p, 0);
0e1f3483
HS
3399 if (running)
3400 p->sched_class->put_prev_task(rq, p);
f6b53205 3401
ca94c442
LP
3402 p->sched_reset_on_fork = reset_on_fork;
3403
1da177e4 3404 oldprio = p->prio;
83ab0aa0 3405 prev_class = p->sched_class;
dd41f596 3406 __setscheduler(rq, p, policy, param->sched_priority);
f6b53205 3407
0e1f3483
HS
3408 if (running)
3409 p->sched_class->set_curr_task(rq);
da7a735e 3410 if (on_rq)
4ca9b72b 3411 enqueue_task(rq, p, 0);
cb469845 3412
da7a735e 3413 check_class_changed(rq, p, prev_class, oldprio);
0122ec5b 3414 task_rq_unlock(rq, p, &flags);
b29739f9 3415
95e02ca9
TG
3416 rt_mutex_adjust_pi(p);
3417
1da177e4
LT
3418 return 0;
3419}
961ccddd
RR
3420
3421/**
3422 * sched_setscheduler - change the scheduling policy and/or RT priority of a thread.
3423 * @p: the task in question.
3424 * @policy: new policy.
3425 * @param: structure containing the new RT priority.
3426 *
3427 * NOTE that the task may be already dead.
3428 */
3429int sched_setscheduler(struct task_struct *p, int policy,
fe7de49f 3430 const struct sched_param *param)
961ccddd
RR
3431{
3432 return __sched_setscheduler(p, policy, param, true);
3433}
1da177e4
LT
3434EXPORT_SYMBOL_GPL(sched_setscheduler);
3435
961ccddd
RR
3436/**
3437 * sched_setscheduler_nocheck - change the scheduling policy and/or RT priority of a thread from kernelspace.
3438 * @p: the task in question.
3439 * @policy: new policy.
3440 * @param: structure containing the new RT priority.
3441 *
3442 * Just like sched_setscheduler, only don't bother checking if the
3443 * current context has permission. For example, this is needed in
3444 * stop_machine(): we create temporary high priority worker threads,
3445 * but our caller might not have that capability.
3446 */
3447int sched_setscheduler_nocheck(struct task_struct *p, int policy,
fe7de49f 3448 const struct sched_param *param)
961ccddd
RR
3449{
3450 return __sched_setscheduler(p, policy, param, false);
3451}
3452
95cdf3b7
IM
3453static int
3454do_sched_setscheduler(pid_t pid, int policy, struct sched_param __user *param)
1da177e4 3455{
1da177e4
LT
3456 struct sched_param lparam;
3457 struct task_struct *p;
36c8b586 3458 int retval;
1da177e4
LT
3459
3460 if (!param || pid < 0)
3461 return -EINVAL;
3462 if (copy_from_user(&lparam, param, sizeof(struct sched_param)))
3463 return -EFAULT;
5fe1d75f
ON
3464
3465 rcu_read_lock();
3466 retval = -ESRCH;
1da177e4 3467 p = find_process_by_pid(pid);
5fe1d75f
ON
3468 if (p != NULL)
3469 retval = sched_setscheduler(p, policy, &lparam);
3470 rcu_read_unlock();
36c8b586 3471
1da177e4
LT
3472 return retval;
3473}
3474
3475/**
3476 * sys_sched_setscheduler - set/change the scheduler policy and RT priority
3477 * @pid: the pid in question.
3478 * @policy: new policy.
3479 * @param: structure containing the new RT priority.
3480 */
5add95d4
HC
3481SYSCALL_DEFINE3(sched_setscheduler, pid_t, pid, int, policy,
3482 struct sched_param __user *, param)
1da177e4 3483{
c21761f1
JB
3484 /* negative values for policy are not valid */
3485 if (policy < 0)
3486 return -EINVAL;
3487
1da177e4
LT
3488 return do_sched_setscheduler(pid, policy, param);
3489}
3490
3491/**
3492 * sys_sched_setparam - set/change the RT priority of a thread
3493 * @pid: the pid in question.
3494 * @param: structure containing the new RT priority.
3495 */
5add95d4 3496SYSCALL_DEFINE2(sched_setparam, pid_t, pid, struct sched_param __user *, param)
1da177e4
LT
3497{
3498 return do_sched_setscheduler(pid, -1, param);
3499}
3500
3501/**
3502 * sys_sched_getscheduler - get the policy (scheduling class) of a thread
3503 * @pid: the pid in question.
3504 */
5add95d4 3505SYSCALL_DEFINE1(sched_getscheduler, pid_t, pid)
1da177e4 3506{
36c8b586 3507 struct task_struct *p;
3a5c359a 3508 int retval;
1da177e4
LT
3509
3510 if (pid < 0)
3a5c359a 3511 return -EINVAL;
1da177e4
LT
3512
3513 retval = -ESRCH;
5fe85be0 3514 rcu_read_lock();
1da177e4
LT
3515 p = find_process_by_pid(pid);
3516 if (p) {
3517 retval = security_task_getscheduler(p);
3518 if (!retval)
ca94c442
LP
3519 retval = p->policy
3520 | (p->sched_reset_on_fork ? SCHED_RESET_ON_FORK : 0);
1da177e4 3521 }
5fe85be0 3522 rcu_read_unlock();
1da177e4
LT
3523 return retval;
3524}
3525
3526/**
ca94c442 3527 * sys_sched_getparam - get the RT priority of a thread
1da177e4
LT
3528 * @pid: the pid in question.
3529 * @param: structure containing the RT priority.
3530 */
5add95d4 3531SYSCALL_DEFINE2(sched_getparam, pid_t, pid, struct sched_param __user *, param)
1da177e4
LT
3532{
3533 struct sched_param lp;
36c8b586 3534 struct task_struct *p;
3a5c359a 3535 int retval;
1da177e4
LT
3536
3537 if (!param || pid < 0)
3a5c359a 3538 return -EINVAL;
1da177e4 3539
5fe85be0 3540 rcu_read_lock();
1da177e4
LT
3541 p = find_process_by_pid(pid);
3542 retval = -ESRCH;
3543 if (!p)
3544 goto out_unlock;
3545
3546 retval = security_task_getscheduler(p);
3547 if (retval)
3548 goto out_unlock;
3549
3550 lp.sched_priority = p->rt_priority;
5fe85be0 3551 rcu_read_unlock();
1da177e4
LT
3552
3553 /*
3554 * This one might sleep, we cannot do it with a spinlock held ...
3555 */
3556 retval = copy_to_user(param, &lp, sizeof(*param)) ? -EFAULT : 0;
3557
1da177e4
LT
3558 return retval;
3559
3560out_unlock:
5fe85be0 3561 rcu_read_unlock();
1da177e4
LT
3562 return retval;
3563}
3564
96f874e2 3565long sched_setaffinity(pid_t pid, const struct cpumask *in_mask)
1da177e4 3566{
5a16f3d3 3567 cpumask_var_t cpus_allowed, new_mask;
36c8b586
IM
3568 struct task_struct *p;
3569 int retval;
1da177e4 3570
95402b38 3571 get_online_cpus();
23f5d142 3572 rcu_read_lock();
1da177e4
LT
3573
3574 p = find_process_by_pid(pid);
3575 if (!p) {
23f5d142 3576 rcu_read_unlock();
95402b38 3577 put_online_cpus();
1da177e4
LT
3578 return -ESRCH;
3579 }
3580
23f5d142 3581 /* Prevent p going away */
1da177e4 3582 get_task_struct(p);
23f5d142 3583 rcu_read_unlock();
1da177e4 3584
14a40ffc
TH
3585 if (p->flags & PF_NO_SETAFFINITY) {
3586 retval = -EINVAL;
3587 goto out_put_task;
3588 }
5a16f3d3
RR
3589 if (!alloc_cpumask_var(&cpus_allowed, GFP_KERNEL)) {
3590 retval = -ENOMEM;
3591 goto out_put_task;
3592 }
3593 if (!alloc_cpumask_var(&new_mask, GFP_KERNEL)) {
3594 retval = -ENOMEM;
3595 goto out_free_cpus_allowed;
3596 }
1da177e4 3597 retval = -EPERM;
4c44aaaf
EB
3598 if (!check_same_owner(p)) {
3599 rcu_read_lock();
3600 if (!ns_capable(__task_cred(p)->user_ns, CAP_SYS_NICE)) {
3601 rcu_read_unlock();
3602 goto out_unlock;
3603 }
3604 rcu_read_unlock();
3605 }
1da177e4 3606
b0ae1981 3607 retval = security_task_setscheduler(p);
e7834f8f
DQ
3608 if (retval)
3609 goto out_unlock;
3610
5a16f3d3
RR
3611 cpuset_cpus_allowed(p, cpus_allowed);
3612 cpumask_and(new_mask, in_mask, cpus_allowed);
49246274 3613again:
5a16f3d3 3614 retval = set_cpus_allowed_ptr(p, new_mask);
1da177e4 3615
8707d8b8 3616 if (!retval) {
5a16f3d3
RR
3617 cpuset_cpus_allowed(p, cpus_allowed);
3618 if (!cpumask_subset(new_mask, cpus_allowed)) {
8707d8b8
PM
3619 /*
3620 * We must have raced with a concurrent cpuset
3621 * update. Just reset the cpus_allowed to the
3622 * cpuset's cpus_allowed
3623 */
5a16f3d3 3624 cpumask_copy(new_mask, cpus_allowed);
8707d8b8
PM
3625 goto again;
3626 }
3627 }
1da177e4 3628out_unlock:
5a16f3d3
RR
3629 free_cpumask_var(new_mask);
3630out_free_cpus_allowed:
3631 free_cpumask_var(cpus_allowed);
3632out_put_task:
1da177e4 3633 put_task_struct(p);
95402b38 3634 put_online_cpus();
1da177e4
LT
3635 return retval;
3636}
3637
3638static int get_user_cpu_mask(unsigned long __user *user_mask_ptr, unsigned len,
96f874e2 3639 struct cpumask *new_mask)
1da177e4 3640{
96f874e2
RR
3641 if (len < cpumask_size())
3642 cpumask_clear(new_mask);
3643 else if (len > cpumask_size())
3644 len = cpumask_size();
3645
1da177e4
LT
3646 return copy_from_user(new_mask, user_mask_ptr, len) ? -EFAULT : 0;
3647}
3648
3649/**
3650 * sys_sched_setaffinity - set the cpu affinity of a process
3651 * @pid: pid of the process
3652 * @len: length in bytes of the bitmask pointed to by user_mask_ptr
3653 * @user_mask_ptr: user-space pointer to the new cpu mask
3654 */
5add95d4
HC
3655SYSCALL_DEFINE3(sched_setaffinity, pid_t, pid, unsigned int, len,
3656 unsigned long __user *, user_mask_ptr)
1da177e4 3657{
5a16f3d3 3658 cpumask_var_t new_mask;
1da177e4
LT
3659 int retval;
3660
5a16f3d3
RR
3661 if (!alloc_cpumask_var(&new_mask, GFP_KERNEL))
3662 return -ENOMEM;
1da177e4 3663
5a16f3d3
RR
3664 retval = get_user_cpu_mask(user_mask_ptr, len, new_mask);
3665 if (retval == 0)
3666 retval = sched_setaffinity(pid, new_mask);
3667 free_cpumask_var(new_mask);
3668 return retval;
1da177e4
LT
3669}
3670
96f874e2 3671long sched_getaffinity(pid_t pid, struct cpumask *mask)
1da177e4 3672{
36c8b586 3673 struct task_struct *p;
31605683 3674 unsigned long flags;
1da177e4 3675 int retval;
1da177e4 3676
95402b38 3677 get_online_cpus();
23f5d142 3678 rcu_read_lock();
1da177e4
LT
3679
3680 retval = -ESRCH;
3681 p = find_process_by_pid(pid);
3682 if (!p)
3683 goto out_unlock;
3684
e7834f8f
DQ
3685 retval = security_task_getscheduler(p);
3686 if (retval)
3687 goto out_unlock;
3688
013fdb80 3689 raw_spin_lock_irqsave(&p->pi_lock, flags);
96f874e2 3690 cpumask_and(mask, &p->cpus_allowed, cpu_online_mask);
013fdb80 3691 raw_spin_unlock_irqrestore(&p->pi_lock, flags);
1da177e4
LT
3692
3693out_unlock:
23f5d142 3694 rcu_read_unlock();
95402b38 3695 put_online_cpus();
1da177e4 3696
9531b62f 3697 return retval;
1da177e4
LT
3698}
3699
3700/**
3701 * sys_sched_getaffinity - get the cpu affinity of a process
3702 * @pid: pid of the process
3703 * @len: length in bytes of the bitmask pointed to by user_mask_ptr
3704 * @user_mask_ptr: user-space pointer to hold the current cpu mask
3705 */
5add95d4
HC
3706SYSCALL_DEFINE3(sched_getaffinity, pid_t, pid, unsigned int, len,
3707 unsigned long __user *, user_mask_ptr)
1da177e4
LT
3708{
3709 int ret;
f17c8607 3710 cpumask_var_t mask;
1da177e4 3711
84fba5ec 3712 if ((len * BITS_PER_BYTE) < nr_cpu_ids)
cd3d8031
KM
3713 return -EINVAL;
3714 if (len & (sizeof(unsigned long)-1))
1da177e4
LT
3715 return -EINVAL;
3716
f17c8607
RR
3717 if (!alloc_cpumask_var(&mask, GFP_KERNEL))
3718 return -ENOMEM;
1da177e4 3719
f17c8607
RR
3720 ret = sched_getaffinity(pid, mask);
3721 if (ret == 0) {
8bc037fb 3722 size_t retlen = min_t(size_t, len, cpumask_size());
cd3d8031
KM
3723
3724 if (copy_to_user(user_mask_ptr, mask, retlen))
f17c8607
RR
3725 ret = -EFAULT;
3726 else
cd3d8031 3727 ret = retlen;
f17c8607
RR
3728 }
3729 free_cpumask_var(mask);
1da177e4 3730
f17c8607 3731 return ret;
1da177e4
LT
3732}
3733
3734/**
3735 * sys_sched_yield - yield the current processor to other threads.
3736 *
dd41f596
IM
3737 * This function yields the current CPU to other tasks. If there are no
3738 * other threads running on this CPU then this function will return.
1da177e4 3739 */
5add95d4 3740SYSCALL_DEFINE0(sched_yield)
1da177e4 3741{
70b97a7f 3742 struct rq *rq = this_rq_lock();
1da177e4 3743
2d72376b 3744 schedstat_inc(rq, yld_count);
4530d7ab 3745 current->sched_class->yield_task(rq);
1da177e4
LT
3746
3747 /*
3748 * Since we are going to call schedule() anyway, there's
3749 * no need to preempt or enable interrupts:
3750 */
3751 __release(rq->lock);
8a25d5de 3752 spin_release(&rq->lock.dep_map, 1, _THIS_IP_);
9828ea9d 3753 do_raw_spin_unlock(&rq->lock);
ba74c144 3754 sched_preempt_enable_no_resched();
1da177e4
LT
3755
3756 schedule();
3757
3758 return 0;
3759}
3760
d86ee480
PZ
3761static inline int should_resched(void)
3762{
3763 return need_resched() && !(preempt_count() & PREEMPT_ACTIVE);
3764}
3765
e7b38404 3766static void __cond_resched(void)
1da177e4 3767{
e7aaaa69 3768 add_preempt_count(PREEMPT_ACTIVE);
c259e01a 3769 __schedule();
e7aaaa69 3770 sub_preempt_count(PREEMPT_ACTIVE);
1da177e4
LT
3771}
3772
02b67cc3 3773int __sched _cond_resched(void)
1da177e4 3774{
d86ee480 3775 if (should_resched()) {
1da177e4
LT
3776 __cond_resched();
3777 return 1;
3778 }
3779 return 0;
3780}
02b67cc3 3781EXPORT_SYMBOL(_cond_resched);
1da177e4
LT
3782
3783/*
613afbf8 3784 * __cond_resched_lock() - if a reschedule is pending, drop the given lock,
1da177e4
LT
3785 * call schedule, and on return reacquire the lock.
3786 *
41a2d6cf 3787 * This works OK both with and without CONFIG_PREEMPT. We do strange low-level
1da177e4
LT
3788 * operations here to prevent schedule() from being called twice (once via
3789 * spin_unlock(), once by hand).
3790 */
613afbf8 3791int __cond_resched_lock(spinlock_t *lock)
1da177e4 3792{
d86ee480 3793 int resched = should_resched();
6df3cecb
JK
3794 int ret = 0;
3795
f607c668
PZ
3796 lockdep_assert_held(lock);
3797
95c354fe 3798 if (spin_needbreak(lock) || resched) {
1da177e4 3799 spin_unlock(lock);
d86ee480 3800 if (resched)
95c354fe
NP
3801 __cond_resched();
3802 else
3803 cpu_relax();
6df3cecb 3804 ret = 1;
1da177e4 3805 spin_lock(lock);
1da177e4 3806 }
6df3cecb 3807 return ret;
1da177e4 3808}
613afbf8 3809EXPORT_SYMBOL(__cond_resched_lock);
1da177e4 3810
613afbf8 3811int __sched __cond_resched_softirq(void)
1da177e4
LT
3812{
3813 BUG_ON(!in_softirq());
3814
d86ee480 3815 if (should_resched()) {
98d82567 3816 local_bh_enable();
1da177e4
LT
3817 __cond_resched();
3818 local_bh_disable();
3819 return 1;
3820 }
3821 return 0;
3822}
613afbf8 3823EXPORT_SYMBOL(__cond_resched_softirq);
1da177e4 3824
1da177e4
LT
3825/**
3826 * yield - yield the current processor to other threads.
3827 *
8e3fabfd
PZ
3828 * Do not ever use this function, there's a 99% chance you're doing it wrong.
3829 *
3830 * The scheduler is at all times free to pick the calling task as the most
3831 * eligible task to run, if removing the yield() call from your code breaks
3832 * it, its already broken.
3833 *
3834 * Typical broken usage is:
3835 *
3836 * while (!event)
3837 * yield();
3838 *
3839 * where one assumes that yield() will let 'the other' process run that will
3840 * make event true. If the current task is a SCHED_FIFO task that will never
3841 * happen. Never use yield() as a progress guarantee!!
3842 *
3843 * If you want to use yield() to wait for something, use wait_event().
3844 * If you want to use yield() to be 'nice' for others, use cond_resched().
3845 * If you still want to use yield(), do not!
1da177e4
LT
3846 */
3847void __sched yield(void)
3848{
3849 set_current_state(TASK_RUNNING);
3850 sys_sched_yield();
3851}
1da177e4
LT
3852EXPORT_SYMBOL(yield);
3853
d95f4122
MG
3854/**
3855 * yield_to - yield the current processor to another thread in
3856 * your thread group, or accelerate that thread toward the
3857 * processor it's on.
16addf95
RD
3858 * @p: target task
3859 * @preempt: whether task preemption is allowed or not
d95f4122
MG
3860 *
3861 * It's the caller's job to ensure that the target task struct
3862 * can't go away on us before we can do any checks.
3863 *
7b270f60
PZ
3864 * Returns:
3865 * true (>0) if we indeed boosted the target task.
3866 * false (0) if we failed to boost the target.
3867 * -ESRCH if there's no task to yield to.
d95f4122
MG
3868 */
3869bool __sched yield_to(struct task_struct *p, bool preempt)
3870{
3871 struct task_struct *curr = current;
3872 struct rq *rq, *p_rq;
3873 unsigned long flags;
c3c18640 3874 int yielded = 0;
d95f4122
MG
3875
3876 local_irq_save(flags);
3877 rq = this_rq();
3878
3879again:
3880 p_rq = task_rq(p);
7b270f60
PZ
3881 /*
3882 * If we're the only runnable task on the rq and target rq also
3883 * has only one task, there's absolutely no point in yielding.
3884 */
3885 if (rq->nr_running == 1 && p_rq->nr_running == 1) {
3886 yielded = -ESRCH;
3887 goto out_irq;
3888 }
3889
d95f4122
MG
3890 double_rq_lock(rq, p_rq);
3891 while (task_rq(p) != p_rq) {
3892 double_rq_unlock(rq, p_rq);
3893 goto again;
3894 }
3895
3896 if (!curr->sched_class->yield_to_task)
7b270f60 3897 goto out_unlock;
d95f4122
MG
3898
3899 if (curr->sched_class != p->sched_class)
7b270f60 3900 goto out_unlock;
d95f4122
MG
3901
3902 if (task_running(p_rq, p) || p->state)
7b270f60 3903 goto out_unlock;
d95f4122
MG
3904
3905 yielded = curr->sched_class->yield_to_task(rq, p, preempt);
6d1cafd8 3906 if (yielded) {
d95f4122 3907 schedstat_inc(rq, yld_count);
6d1cafd8
VP
3908 /*
3909 * Make p's CPU reschedule; pick_next_entity takes care of
3910 * fairness.
3911 */
3912 if (preempt && rq != p_rq)
3913 resched_task(p_rq->curr);
3914 }
d95f4122 3915
7b270f60 3916out_unlock:
d95f4122 3917 double_rq_unlock(rq, p_rq);
7b270f60 3918out_irq:
d95f4122
MG
3919 local_irq_restore(flags);
3920
7b270f60 3921 if (yielded > 0)
d95f4122
MG
3922 schedule();
3923
3924 return yielded;
3925}
3926EXPORT_SYMBOL_GPL(yield_to);
3927
1da177e4 3928/*
41a2d6cf 3929 * This task is about to go to sleep on IO. Increment rq->nr_iowait so
1da177e4 3930 * that process accounting knows that this is a task in IO wait state.
1da177e4
LT
3931 */
3932void __sched io_schedule(void)
3933{
54d35f29 3934 struct rq *rq = raw_rq();
1da177e4 3935
0ff92245 3936 delayacct_blkio_start();
1da177e4 3937 atomic_inc(&rq->nr_iowait);
73c10101 3938 blk_flush_plug(current);
8f0dfc34 3939 current->in_iowait = 1;
1da177e4 3940 schedule();
8f0dfc34 3941 current->in_iowait = 0;
1da177e4 3942 atomic_dec(&rq->nr_iowait);
0ff92245 3943 delayacct_blkio_end();
1da177e4 3944}
1da177e4
LT
3945EXPORT_SYMBOL(io_schedule);
3946
3947long __sched io_schedule_timeout(long timeout)
3948{
54d35f29 3949 struct rq *rq = raw_rq();
1da177e4
LT
3950 long ret;
3951
0ff92245 3952 delayacct_blkio_start();
1da177e4 3953 atomic_inc(&rq->nr_iowait);
73c10101 3954 blk_flush_plug(current);
8f0dfc34 3955 current->in_iowait = 1;
1da177e4 3956 ret = schedule_timeout(timeout);
8f0dfc34 3957 current->in_iowait = 0;
1da177e4 3958 atomic_dec(&rq->nr_iowait);
0ff92245 3959 delayacct_blkio_end();
1da177e4
LT
3960 return ret;
3961}
3962
3963/**
3964 * sys_sched_get_priority_max - return maximum RT priority.
3965 * @policy: scheduling class.
3966 *
3967 * this syscall returns the maximum rt_priority that can be used
3968 * by a given scheduling class.
3969 */
5add95d4 3970SYSCALL_DEFINE1(sched_get_priority_max, int, policy)
1da177e4
LT
3971{
3972 int ret = -EINVAL;
3973
3974 switch (policy) {
3975 case SCHED_FIFO:
3976 case SCHED_RR:
3977 ret = MAX_USER_RT_PRIO-1;
3978 break;
3979 case SCHED_NORMAL:
b0a9499c 3980 case SCHED_BATCH:
dd41f596 3981 case SCHED_IDLE:
1da177e4
LT
3982 ret = 0;
3983 break;
3984 }
3985 return ret;
3986}
3987
3988/**
3989 * sys_sched_get_priority_min - return minimum RT priority.
3990 * @policy: scheduling class.
3991 *
3992 * this syscall returns the minimum rt_priority that can be used
3993 * by a given scheduling class.
3994 */
5add95d4 3995SYSCALL_DEFINE1(sched_get_priority_min, int, policy)
1da177e4
LT
3996{
3997 int ret = -EINVAL;
3998
3999 switch (policy) {
4000 case SCHED_FIFO:
4001 case SCHED_RR:
4002 ret = 1;
4003 break;
4004 case SCHED_NORMAL:
b0a9499c 4005 case SCHED_BATCH:
dd41f596 4006 case SCHED_IDLE:
1da177e4
LT
4007 ret = 0;
4008 }
4009 return ret;
4010}
4011
4012/**
4013 * sys_sched_rr_get_interval - return the default timeslice of a process.
4014 * @pid: pid of the process.
4015 * @interval: userspace pointer to the timeslice value.
4016 *
4017 * this syscall writes the default timeslice value of a given process
4018 * into the user-space timespec buffer. A value of '0' means infinity.
4019 */
17da2bd9 4020SYSCALL_DEFINE2(sched_rr_get_interval, pid_t, pid,
754fe8d2 4021 struct timespec __user *, interval)
1da177e4 4022{
36c8b586 4023 struct task_struct *p;
a4ec24b4 4024 unsigned int time_slice;
dba091b9
TG
4025 unsigned long flags;
4026 struct rq *rq;
3a5c359a 4027 int retval;
1da177e4 4028 struct timespec t;
1da177e4
LT
4029
4030 if (pid < 0)
3a5c359a 4031 return -EINVAL;
1da177e4
LT
4032
4033 retval = -ESRCH;
1a551ae7 4034 rcu_read_lock();
1da177e4
LT
4035 p = find_process_by_pid(pid);
4036 if (!p)
4037 goto out_unlock;
4038
4039 retval = security_task_getscheduler(p);
4040 if (retval)
4041 goto out_unlock;
4042
dba091b9
TG
4043 rq = task_rq_lock(p, &flags);
4044 time_slice = p->sched_class->get_rr_interval(rq, p);
0122ec5b 4045 task_rq_unlock(rq, p, &flags);
a4ec24b4 4046
1a551ae7 4047 rcu_read_unlock();
a4ec24b4 4048 jiffies_to_timespec(time_slice, &t);
1da177e4 4049 retval = copy_to_user(interval, &t, sizeof(t)) ? -EFAULT : 0;
1da177e4 4050 return retval;
3a5c359a 4051
1da177e4 4052out_unlock:
1a551ae7 4053 rcu_read_unlock();
1da177e4
LT
4054 return retval;
4055}
4056
7c731e0a 4057static const char stat_nam[] = TASK_STATE_TO_CHAR_STR;
36c8b586 4058
82a1fcb9 4059void sched_show_task(struct task_struct *p)
1da177e4 4060{
1da177e4 4061 unsigned long free = 0;
4e79752c 4062 int ppid;
36c8b586 4063 unsigned state;
1da177e4 4064
1da177e4 4065 state = p->state ? __ffs(p->state) + 1 : 0;
28d0686c 4066 printk(KERN_INFO "%-15.15s %c", p->comm,
2ed6e34f 4067 state < sizeof(stat_nam) - 1 ? stat_nam[state] : '?');
4bd77321 4068#if BITS_PER_LONG == 32
1da177e4 4069 if (state == TASK_RUNNING)
3df0fc5b 4070 printk(KERN_CONT " running ");
1da177e4 4071 else
3df0fc5b 4072 printk(KERN_CONT " %08lx ", thread_saved_pc(p));
1da177e4
LT
4073#else
4074 if (state == TASK_RUNNING)
3df0fc5b 4075 printk(KERN_CONT " running task ");
1da177e4 4076 else
3df0fc5b 4077 printk(KERN_CONT " %016lx ", thread_saved_pc(p));
1da177e4
LT
4078#endif
4079#ifdef CONFIG_DEBUG_STACK_USAGE
7c9f8861 4080 free = stack_not_used(p);
1da177e4 4081#endif
4e79752c
PM
4082 rcu_read_lock();
4083 ppid = task_pid_nr(rcu_dereference(p->real_parent));
4084 rcu_read_unlock();
3df0fc5b 4085 printk(KERN_CONT "%5lu %5d %6d 0x%08lx\n", free,
4e79752c 4086 task_pid_nr(p), ppid,
aa47b7e0 4087 (unsigned long)task_thread_info(p)->flags);
1da177e4 4088
3d1cb205 4089 print_worker_info(KERN_INFO, p);
5fb5e6de 4090 show_stack(p, NULL);
1da177e4
LT
4091}
4092
e59e2ae2 4093void show_state_filter(unsigned long state_filter)
1da177e4 4094{
36c8b586 4095 struct task_struct *g, *p;
1da177e4 4096
4bd77321 4097#if BITS_PER_LONG == 32
3df0fc5b
PZ
4098 printk(KERN_INFO
4099 " task PC stack pid father\n");
1da177e4 4100#else
3df0fc5b
PZ
4101 printk(KERN_INFO
4102 " task PC stack pid father\n");
1da177e4 4103#endif
510f5acc 4104 rcu_read_lock();
1da177e4
LT
4105 do_each_thread(g, p) {
4106 /*
4107 * reset the NMI-timeout, listing all files on a slow
25985edc 4108 * console might take a lot of time:
1da177e4
LT
4109 */
4110 touch_nmi_watchdog();
39bc89fd 4111 if (!state_filter || (p->state & state_filter))
82a1fcb9 4112 sched_show_task(p);
1da177e4
LT
4113 } while_each_thread(g, p);
4114
04c9167f
JF
4115 touch_all_softlockup_watchdogs();
4116
dd41f596
IM
4117#ifdef CONFIG_SCHED_DEBUG
4118 sysrq_sched_debug_show();
4119#endif
510f5acc 4120 rcu_read_unlock();
e59e2ae2
IM
4121 /*
4122 * Only show locks if all tasks are dumped:
4123 */
93335a21 4124 if (!state_filter)
e59e2ae2 4125 debug_show_all_locks();
1da177e4
LT
4126}
4127
1df21055
IM
4128void __cpuinit init_idle_bootup_task(struct task_struct *idle)
4129{
dd41f596 4130 idle->sched_class = &idle_sched_class;
1df21055
IM
4131}
4132
f340c0d1
IM
4133/**
4134 * init_idle - set up an idle thread for a given CPU
4135 * @idle: task in question
4136 * @cpu: cpu the idle task belongs to
4137 *
4138 * NOTE: this function does not set the idle thread's NEED_RESCHED
4139 * flag, to make booting more robust.
4140 */
5c1e1767 4141void __cpuinit init_idle(struct task_struct *idle, int cpu)
1da177e4 4142{
70b97a7f 4143 struct rq *rq = cpu_rq(cpu);
1da177e4
LT
4144 unsigned long flags;
4145
05fa785c 4146 raw_spin_lock_irqsave(&rq->lock, flags);
5cbd54ef 4147
dd41f596 4148 __sched_fork(idle);
06b83b5f 4149 idle->state = TASK_RUNNING;
dd41f596
IM
4150 idle->se.exec_start = sched_clock();
4151
1e1b6c51 4152 do_set_cpus_allowed(idle, cpumask_of(cpu));
6506cf6c
PZ
4153 /*
4154 * We're having a chicken and egg problem, even though we are
4155 * holding rq->lock, the cpu isn't yet set to this cpu so the
4156 * lockdep check in task_group() will fail.
4157 *
4158 * Similar case to sched_fork(). / Alternatively we could
4159 * use task_rq_lock() here and obtain the other rq->lock.
4160 *
4161 * Silence PROVE_RCU
4162 */
4163 rcu_read_lock();
dd41f596 4164 __set_task_cpu(idle, cpu);
6506cf6c 4165 rcu_read_unlock();
1da177e4 4166
1da177e4 4167 rq->curr = rq->idle = idle;
3ca7a440
PZ
4168#if defined(CONFIG_SMP)
4169 idle->on_cpu = 1;
4866cde0 4170#endif
05fa785c 4171 raw_spin_unlock_irqrestore(&rq->lock, flags);
1da177e4
LT
4172
4173 /* Set the preempt count _outside_ the spinlocks! */
a1261f54 4174 task_thread_info(idle)->preempt_count = 0;
55cd5340 4175
dd41f596
IM
4176 /*
4177 * The idle tasks have their own, simple scheduling class:
4178 */
4179 idle->sched_class = &idle_sched_class;
868baf07 4180 ftrace_graph_init_idle_task(idle, cpu);
6a61671b 4181 vtime_init_idle(idle);
f1c6f1a7
CE
4182#if defined(CONFIG_SMP)
4183 sprintf(idle->comm, "%s/%d", INIT_TASK_COMM, cpu);
4184#endif
19978ca6
IM
4185}
4186
1da177e4 4187#ifdef CONFIG_SMP
1e1b6c51
KM
4188void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
4189{
4190 if (p->sched_class && p->sched_class->set_cpus_allowed)
4191 p->sched_class->set_cpus_allowed(p, new_mask);
4939602a
PZ
4192
4193 cpumask_copy(&p->cpus_allowed, new_mask);
29baa747 4194 p->nr_cpus_allowed = cpumask_weight(new_mask);
1e1b6c51
KM
4195}
4196
1da177e4
LT
4197/*
4198 * This is how migration works:
4199 *
969c7921
TH
4200 * 1) we invoke migration_cpu_stop() on the target CPU using
4201 * stop_one_cpu().
4202 * 2) stopper starts to run (implicitly forcing the migrated thread
4203 * off the CPU)
4204 * 3) it checks whether the migrated task is still in the wrong runqueue.
4205 * 4) if it's in the wrong runqueue then the migration thread removes
1da177e4 4206 * it and puts it into the right queue.
969c7921
TH
4207 * 5) stopper completes and stop_one_cpu() returns and the migration
4208 * is done.
1da177e4
LT
4209 */
4210
4211/*
4212 * Change a given task's CPU affinity. Migrate the thread to a
4213 * proper CPU and schedule it away if the CPU it's executing on
4214 * is removed from the allowed bitmask.
4215 *
4216 * NOTE: the caller must have a valid reference to the task, the
41a2d6cf 4217 * task must not exit() & deallocate itself prematurely. The
1da177e4
LT
4218 * call is not atomic; no spinlocks may be held.
4219 */
96f874e2 4220int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask)
1da177e4
LT
4221{
4222 unsigned long flags;
70b97a7f 4223 struct rq *rq;
969c7921 4224 unsigned int dest_cpu;
48f24c4d 4225 int ret = 0;
1da177e4
LT
4226
4227 rq = task_rq_lock(p, &flags);
e2912009 4228
db44fc01
YZ
4229 if (cpumask_equal(&p->cpus_allowed, new_mask))
4230 goto out;
4231
6ad4c188 4232 if (!cpumask_intersects(new_mask, cpu_active_mask)) {
1da177e4
LT
4233 ret = -EINVAL;
4234 goto out;
4235 }
4236
1e1b6c51 4237 do_set_cpus_allowed(p, new_mask);
73fe6aae 4238
1da177e4 4239 /* Can the task run on the task's current CPU? If so, we're done */
96f874e2 4240 if (cpumask_test_cpu(task_cpu(p), new_mask))
1da177e4
LT
4241 goto out;
4242
969c7921 4243 dest_cpu = cpumask_any_and(cpu_active_mask, new_mask);
bd8e7dde 4244 if (p->on_rq) {
969c7921 4245 struct migration_arg arg = { p, dest_cpu };
1da177e4 4246 /* Need help from migration thread: drop lock and wait. */
0122ec5b 4247 task_rq_unlock(rq, p, &flags);
969c7921 4248 stop_one_cpu(cpu_of(rq), migration_cpu_stop, &arg);
1da177e4
LT
4249 tlb_migrate_finish(p->mm);
4250 return 0;
4251 }
4252out:
0122ec5b 4253 task_rq_unlock(rq, p, &flags);
48f24c4d 4254
1da177e4
LT
4255 return ret;
4256}
cd8ba7cd 4257EXPORT_SYMBOL_GPL(set_cpus_allowed_ptr);
1da177e4
LT
4258
4259/*
41a2d6cf 4260 * Move (not current) task off this cpu, onto dest cpu. We're doing
1da177e4
LT
4261 * this because either it can't run here any more (set_cpus_allowed()
4262 * away from this CPU, or CPU going down), or because we're
4263 * attempting to rebalance this task on exec (sched_exec).
4264 *
4265 * So we race with normal scheduler movements, but that's OK, as long
4266 * as the task is no longer on this CPU.
efc30814
KK
4267 *
4268 * Returns non-zero if task was successfully migrated.
1da177e4 4269 */
efc30814 4270static int __migrate_task(struct task_struct *p, int src_cpu, int dest_cpu)
1da177e4 4271{
70b97a7f 4272 struct rq *rq_dest, *rq_src;
e2912009 4273 int ret = 0;
1da177e4 4274
e761b772 4275 if (unlikely(!cpu_active(dest_cpu)))
efc30814 4276 return ret;
1da177e4
LT
4277
4278 rq_src = cpu_rq(src_cpu);
4279 rq_dest = cpu_rq(dest_cpu);
4280
0122ec5b 4281 raw_spin_lock(&p->pi_lock);
1da177e4
LT
4282 double_rq_lock(rq_src, rq_dest);
4283 /* Already moved. */
4284 if (task_cpu(p) != src_cpu)
b1e38734 4285 goto done;
1da177e4 4286 /* Affinity changed (again). */
fa17b507 4287 if (!cpumask_test_cpu(dest_cpu, tsk_cpus_allowed(p)))
b1e38734 4288 goto fail;
1da177e4 4289
e2912009
PZ
4290 /*
4291 * If we're not on a rq, the next wake-up will ensure we're
4292 * placed properly.
4293 */
fd2f4419 4294 if (p->on_rq) {
4ca9b72b 4295 dequeue_task(rq_src, p, 0);
e2912009 4296 set_task_cpu(p, dest_cpu);
4ca9b72b 4297 enqueue_task(rq_dest, p, 0);
15afe09b 4298 check_preempt_curr(rq_dest, p, 0);
1da177e4 4299 }
b1e38734 4300done:
efc30814 4301 ret = 1;
b1e38734 4302fail:
1da177e4 4303 double_rq_unlock(rq_src, rq_dest);
0122ec5b 4304 raw_spin_unlock(&p->pi_lock);
efc30814 4305 return ret;
1da177e4
LT
4306}
4307
4308/*
969c7921
TH
4309 * migration_cpu_stop - this will be executed by a highprio stopper thread
4310 * and performs thread migration by bumping thread off CPU then
4311 * 'pushing' onto another runqueue.
1da177e4 4312 */
969c7921 4313static int migration_cpu_stop(void *data)
1da177e4 4314{
969c7921 4315 struct migration_arg *arg = data;
f7b4cddc 4316
969c7921
TH
4317 /*
4318 * The original target cpu might have gone down and we might
4319 * be on another cpu but it doesn't matter.
4320 */
f7b4cddc 4321 local_irq_disable();
969c7921 4322 __migrate_task(arg->task, raw_smp_processor_id(), arg->dest_cpu);
f7b4cddc 4323 local_irq_enable();
1da177e4 4324 return 0;
f7b4cddc
ON
4325}
4326
1da177e4 4327#ifdef CONFIG_HOTPLUG_CPU
48c5ccae 4328
054b9108 4329/*
48c5ccae
PZ
4330 * Ensures that the idle task is using init_mm right before its cpu goes
4331 * offline.
054b9108 4332 */
48c5ccae 4333void idle_task_exit(void)
1da177e4 4334{
48c5ccae 4335 struct mm_struct *mm = current->active_mm;
e76bd8d9 4336
48c5ccae 4337 BUG_ON(cpu_online(smp_processor_id()));
e76bd8d9 4338
48c5ccae
PZ
4339 if (mm != &init_mm)
4340 switch_mm(mm, &init_mm, current);
4341 mmdrop(mm);
1da177e4
LT
4342}
4343
4344/*
5d180232
PZ
4345 * Since this CPU is going 'away' for a while, fold any nr_active delta
4346 * we might have. Assumes we're called after migrate_tasks() so that the
4347 * nr_active count is stable.
4348 *
4349 * Also see the comment "Global load-average calculations".
1da177e4 4350 */
5d180232 4351static void calc_load_migrate(struct rq *rq)
1da177e4 4352{
5d180232
PZ
4353 long delta = calc_load_fold_active(rq);
4354 if (delta)
4355 atomic_long_add(delta, &calc_load_tasks);
1da177e4
LT
4356}
4357
48f24c4d 4358/*
48c5ccae
PZ
4359 * Migrate all tasks from the rq, sleeping tasks will be migrated by
4360 * try_to_wake_up()->select_task_rq().
4361 *
4362 * Called with rq->lock held even though we'er in stop_machine() and
4363 * there's no concurrency possible, we hold the required locks anyway
4364 * because of lock validation efforts.
1da177e4 4365 */
48c5ccae 4366static void migrate_tasks(unsigned int dead_cpu)
1da177e4 4367{
70b97a7f 4368 struct rq *rq = cpu_rq(dead_cpu);
48c5ccae
PZ
4369 struct task_struct *next, *stop = rq->stop;
4370 int dest_cpu;
1da177e4
LT
4371
4372 /*
48c5ccae
PZ
4373 * Fudge the rq selection such that the below task selection loop
4374 * doesn't get stuck on the currently eligible stop task.
4375 *
4376 * We're currently inside stop_machine() and the rq is either stuck
4377 * in the stop_machine_cpu_stop() loop, or we're executing this code,
4378 * either way we should never end up calling schedule() until we're
4379 * done here.
1da177e4 4380 */
48c5ccae 4381 rq->stop = NULL;
48f24c4d 4382
77bd3970
FW
4383 /*
4384 * put_prev_task() and pick_next_task() sched
4385 * class method both need to have an up-to-date
4386 * value of rq->clock[_task]
4387 */
4388 update_rq_clock(rq);
4389
dd41f596 4390 for ( ; ; ) {
48c5ccae
PZ
4391 /*
4392 * There's this thread running, bail when that's the only
4393 * remaining thread.
4394 */
4395 if (rq->nr_running == 1)
dd41f596 4396 break;
48c5ccae 4397
b67802ea 4398 next = pick_next_task(rq);
48c5ccae 4399 BUG_ON(!next);
79c53799 4400 next->sched_class->put_prev_task(rq, next);
e692ab53 4401
48c5ccae
PZ
4402 /* Find suitable destination for @next, with force if needed. */
4403 dest_cpu = select_fallback_rq(dead_cpu, next);
4404 raw_spin_unlock(&rq->lock);
4405
4406 __migrate_task(next, dead_cpu, dest_cpu);
4407
4408 raw_spin_lock(&rq->lock);
1da177e4 4409 }
dce48a84 4410
48c5ccae 4411 rq->stop = stop;
dce48a84 4412}
48c5ccae 4413
1da177e4
LT
4414#endif /* CONFIG_HOTPLUG_CPU */
4415
e692ab53
NP
4416#if defined(CONFIG_SCHED_DEBUG) && defined(CONFIG_SYSCTL)
4417
4418static struct ctl_table sd_ctl_dir[] = {
e0361851
AD
4419 {
4420 .procname = "sched_domain",
c57baf1e 4421 .mode = 0555,
e0361851 4422 },
56992309 4423 {}
e692ab53
NP
4424};
4425
4426static struct ctl_table sd_ctl_root[] = {
e0361851
AD
4427 {
4428 .procname = "kernel",
c57baf1e 4429 .mode = 0555,
e0361851
AD
4430 .child = sd_ctl_dir,
4431 },
56992309 4432 {}
e692ab53
NP
4433};
4434
4435static struct ctl_table *sd_alloc_ctl_entry(int n)
4436{
4437 struct ctl_table *entry =
5cf9f062 4438 kcalloc(n, sizeof(struct ctl_table), GFP_KERNEL);
e692ab53 4439
e692ab53
NP
4440 return entry;
4441}
4442
6382bc90
MM
4443static void sd_free_ctl_entry(struct ctl_table **tablep)
4444{
cd790076 4445 struct ctl_table *entry;
6382bc90 4446
cd790076
MM
4447 /*
4448 * In the intermediate directories, both the child directory and
4449 * procname are dynamically allocated and could fail but the mode
41a2d6cf 4450 * will always be set. In the lowest directory the names are
cd790076
MM
4451 * static strings and all have proc handlers.
4452 */
4453 for (entry = *tablep; entry->mode; entry++) {
6382bc90
MM
4454 if (entry->child)
4455 sd_free_ctl_entry(&entry->child);
cd790076
MM
4456 if (entry->proc_handler == NULL)
4457 kfree(entry->procname);
4458 }
6382bc90
MM
4459
4460 kfree(*tablep);
4461 *tablep = NULL;
4462}
4463
201c373e 4464static int min_load_idx = 0;
fd9b86d3 4465static int max_load_idx = CPU_LOAD_IDX_MAX-1;
201c373e 4466
e692ab53 4467static void
e0361851 4468set_table_entry(struct ctl_table *entry,
e692ab53 4469 const char *procname, void *data, int maxlen,
201c373e
NK
4470 umode_t mode, proc_handler *proc_handler,
4471 bool load_idx)
e692ab53 4472{
e692ab53
NP
4473 entry->procname = procname;
4474 entry->data = data;
4475 entry->maxlen = maxlen;
4476 entry->mode = mode;
4477 entry->proc_handler = proc_handler;
201c373e
NK
4478
4479 if (load_idx) {
4480 entry->extra1 = &min_load_idx;
4481 entry->extra2 = &max_load_idx;
4482 }
e692ab53
NP
4483}
4484
4485static struct ctl_table *
4486sd_alloc_ctl_domain_table(struct sched_domain *sd)
4487{
a5d8c348 4488 struct ctl_table *table = sd_alloc_ctl_entry(13);
e692ab53 4489
ad1cdc1d
MM
4490 if (table == NULL)
4491 return NULL;
4492
e0361851 4493 set_table_entry(&table[0], "min_interval", &sd->min_interval,
201c373e 4494 sizeof(long), 0644, proc_doulongvec_minmax, false);
e0361851 4495 set_table_entry(&table[1], "max_interval", &sd->max_interval,
201c373e 4496 sizeof(long), 0644, proc_doulongvec_minmax, false);
e0361851 4497 set_table_entry(&table[2], "busy_idx", &sd->busy_idx,
201c373e 4498 sizeof(int), 0644, proc_dointvec_minmax, true);
e0361851 4499 set_table_entry(&table[3], "idle_idx", &sd->idle_idx,
201c373e 4500 sizeof(int), 0644, proc_dointvec_minmax, true);
e0361851 4501 set_table_entry(&table[4], "newidle_idx", &sd->newidle_idx,
201c373e 4502 sizeof(int), 0644, proc_dointvec_minmax, true);
e0361851 4503 set_table_entry(&table[5], "wake_idx", &sd->wake_idx,
201c373e 4504 sizeof(int), 0644, proc_dointvec_minmax, true);
e0361851 4505 set_table_entry(&table[6], "forkexec_idx", &sd->forkexec_idx,
201c373e 4506 sizeof(int), 0644, proc_dointvec_minmax, true);
e0361851 4507 set_table_entry(&table[7], "busy_factor", &sd->busy_factor,
201c373e 4508 sizeof(int), 0644, proc_dointvec_minmax, false);
e0361851 4509 set_table_entry(&table[8], "imbalance_pct", &sd->imbalance_pct,
201c373e 4510 sizeof(int), 0644, proc_dointvec_minmax, false);
ace8b3d6 4511 set_table_entry(&table[9], "cache_nice_tries",
e692ab53 4512 &sd->cache_nice_tries,
201c373e 4513 sizeof(int), 0644, proc_dointvec_minmax, false);
ace8b3d6 4514 set_table_entry(&table[10], "flags", &sd->flags,
201c373e 4515 sizeof(int), 0644, proc_dointvec_minmax, false);
a5d8c348 4516 set_table_entry(&table[11], "name", sd->name,
201c373e 4517 CORENAME_MAX_SIZE, 0444, proc_dostring, false);
a5d8c348 4518 /* &table[12] is terminator */
e692ab53
NP
4519
4520 return table;
4521}
4522
9a4e7159 4523static ctl_table *sd_alloc_ctl_cpu_table(int cpu)
e692ab53
NP
4524{
4525 struct ctl_table *entry, *table;
4526 struct sched_domain *sd;
4527 int domain_num = 0, i;
4528 char buf[32];
4529
4530 for_each_domain(cpu, sd)
4531 domain_num++;
4532 entry = table = sd_alloc_ctl_entry(domain_num + 1);
ad1cdc1d
MM
4533 if (table == NULL)
4534 return NULL;
e692ab53
NP
4535
4536 i = 0;
4537 for_each_domain(cpu, sd) {
4538 snprintf(buf, 32, "domain%d", i);
e692ab53 4539 entry->procname = kstrdup(buf, GFP_KERNEL);
c57baf1e 4540 entry->mode = 0555;
e692ab53
NP
4541 entry->child = sd_alloc_ctl_domain_table(sd);
4542 entry++;
4543 i++;
4544 }
4545 return table;
4546}
4547
4548static struct ctl_table_header *sd_sysctl_header;
6382bc90 4549static void register_sched_domain_sysctl(void)
e692ab53 4550{
6ad4c188 4551 int i, cpu_num = num_possible_cpus();
e692ab53
NP
4552 struct ctl_table *entry = sd_alloc_ctl_entry(cpu_num + 1);
4553 char buf[32];
4554
7378547f
MM
4555 WARN_ON(sd_ctl_dir[0].child);
4556 sd_ctl_dir[0].child = entry;
4557
ad1cdc1d
MM
4558 if (entry == NULL)
4559 return;
4560
6ad4c188 4561 for_each_possible_cpu(i) {
e692ab53 4562 snprintf(buf, 32, "cpu%d", i);
e692ab53 4563 entry->procname = kstrdup(buf, GFP_KERNEL);
c57baf1e 4564 entry->mode = 0555;
e692ab53 4565 entry->child = sd_alloc_ctl_cpu_table(i);
97b6ea7b 4566 entry++;
e692ab53 4567 }
7378547f
MM
4568
4569 WARN_ON(sd_sysctl_header);
e692ab53
NP
4570 sd_sysctl_header = register_sysctl_table(sd_ctl_root);
4571}
6382bc90 4572
7378547f 4573/* may be called multiple times per register */
6382bc90
MM
4574static void unregister_sched_domain_sysctl(void)
4575{
7378547f
MM
4576 if (sd_sysctl_header)
4577 unregister_sysctl_table(sd_sysctl_header);
6382bc90 4578 sd_sysctl_header = NULL;
7378547f
MM
4579 if (sd_ctl_dir[0].child)
4580 sd_free_ctl_entry(&sd_ctl_dir[0].child);
6382bc90 4581}
e692ab53 4582#else
6382bc90
MM
4583static void register_sched_domain_sysctl(void)
4584{
4585}
4586static void unregister_sched_domain_sysctl(void)
e692ab53
NP
4587{
4588}
4589#endif
4590
1f11eb6a
GH
4591static void set_rq_online(struct rq *rq)
4592{
4593 if (!rq->online) {
4594 const struct sched_class *class;
4595
c6c4927b 4596 cpumask_set_cpu(rq->cpu, rq->rd->online);
1f11eb6a
GH
4597 rq->online = 1;
4598
4599 for_each_class(class) {
4600 if (class->rq_online)
4601 class->rq_online(rq);
4602 }
4603 }
4604}
4605
4606static void set_rq_offline(struct rq *rq)
4607{
4608 if (rq->online) {
4609 const struct sched_class *class;
4610
4611 for_each_class(class) {
4612 if (class->rq_offline)
4613 class->rq_offline(rq);
4614 }
4615
c6c4927b 4616 cpumask_clear_cpu(rq->cpu, rq->rd->online);
1f11eb6a
GH
4617 rq->online = 0;
4618 }
4619}
4620
1da177e4
LT
4621/*
4622 * migration_call - callback that gets triggered when a CPU is added.
4623 * Here we can start up the necessary migration thread for the new CPU.
4624 */
48f24c4d
IM
4625static int __cpuinit
4626migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
1da177e4 4627{
48f24c4d 4628 int cpu = (long)hcpu;
1da177e4 4629 unsigned long flags;
969c7921 4630 struct rq *rq = cpu_rq(cpu);
1da177e4 4631
48c5ccae 4632 switch (action & ~CPU_TASKS_FROZEN) {
5be9361c 4633
1da177e4 4634 case CPU_UP_PREPARE:
a468d389 4635 rq->calc_load_update = calc_load_update;
1da177e4 4636 break;
48f24c4d 4637
1da177e4 4638 case CPU_ONLINE:
1f94ef59 4639 /* Update our root-domain */
05fa785c 4640 raw_spin_lock_irqsave(&rq->lock, flags);
1f94ef59 4641 if (rq->rd) {
c6c4927b 4642 BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
1f11eb6a
GH
4643
4644 set_rq_online(rq);
1f94ef59 4645 }
05fa785c 4646 raw_spin_unlock_irqrestore(&rq->lock, flags);
1da177e4 4647 break;
48f24c4d 4648
1da177e4 4649#ifdef CONFIG_HOTPLUG_CPU
08f503b0 4650 case CPU_DYING:
317f3941 4651 sched_ttwu_pending();
57d885fe 4652 /* Update our root-domain */
05fa785c 4653 raw_spin_lock_irqsave(&rq->lock, flags);
57d885fe 4654 if (rq->rd) {
c6c4927b 4655 BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
1f11eb6a 4656 set_rq_offline(rq);
57d885fe 4657 }
48c5ccae
PZ
4658 migrate_tasks(cpu);
4659 BUG_ON(rq->nr_running != 1); /* the migration thread */
05fa785c 4660 raw_spin_unlock_irqrestore(&rq->lock, flags);
5d180232 4661 break;
48c5ccae 4662
5d180232 4663 case CPU_DEAD:
f319da0c 4664 calc_load_migrate(rq);
57d885fe 4665 break;
1da177e4
LT
4666#endif
4667 }
49c022e6
PZ
4668
4669 update_max_interval();
4670
1da177e4
LT
4671 return NOTIFY_OK;
4672}
4673
f38b0820
PM
4674/*
4675 * Register at high priority so that task migration (migrate_all_tasks)
4676 * happens before everything else. This has to be lower priority than
cdd6c482 4677 * the notifier in the perf_event subsystem, though.
1da177e4 4678 */
26c2143b 4679static struct notifier_block __cpuinitdata migration_notifier = {
1da177e4 4680 .notifier_call = migration_call,
50a323b7 4681 .priority = CPU_PRI_MIGRATION,
1da177e4
LT
4682};
4683
3a101d05
TH
4684static int __cpuinit sched_cpu_active(struct notifier_block *nfb,
4685 unsigned long action, void *hcpu)
4686{
4687 switch (action & ~CPU_TASKS_FROZEN) {
5fbd036b 4688 case CPU_STARTING:
3a101d05
TH
4689 case CPU_DOWN_FAILED:
4690 set_cpu_active((long)hcpu, true);
4691 return NOTIFY_OK;
4692 default:
4693 return NOTIFY_DONE;
4694 }
4695}
4696
4697static int __cpuinit sched_cpu_inactive(struct notifier_block *nfb,
4698 unsigned long action, void *hcpu)
4699{
4700 switch (action & ~CPU_TASKS_FROZEN) {
4701 case CPU_DOWN_PREPARE:
4702 set_cpu_active((long)hcpu, false);
4703 return NOTIFY_OK;
4704 default:
4705 return NOTIFY_DONE;
4706 }
4707}
4708
7babe8db 4709static int __init migration_init(void)
1da177e4
LT
4710{
4711 void *cpu = (void *)(long)smp_processor_id();
07dccf33 4712 int err;
48f24c4d 4713
3a101d05 4714 /* Initialize migration for the boot CPU */
07dccf33
AM
4715 err = migration_call(&migration_notifier, CPU_UP_PREPARE, cpu);
4716 BUG_ON(err == NOTIFY_BAD);
1da177e4
LT
4717 migration_call(&migration_notifier, CPU_ONLINE, cpu);
4718 register_cpu_notifier(&migration_notifier);
7babe8db 4719
3a101d05
TH
4720 /* Register cpu active notifiers */
4721 cpu_notifier(sched_cpu_active, CPU_PRI_SCHED_ACTIVE);
4722 cpu_notifier(sched_cpu_inactive, CPU_PRI_SCHED_INACTIVE);
4723
a004cd42 4724 return 0;
1da177e4 4725}
7babe8db 4726early_initcall(migration_init);
1da177e4
LT
4727#endif
4728
4729#ifdef CONFIG_SMP
476f3534 4730
4cb98839
PZ
4731static cpumask_var_t sched_domains_tmpmask; /* sched_domains_mutex */
4732
3e9830dc 4733#ifdef CONFIG_SCHED_DEBUG
4dcf6aff 4734
d039ac60 4735static __read_mostly int sched_debug_enabled;
f6630114 4736
d039ac60 4737static int __init sched_debug_setup(char *str)
f6630114 4738{
d039ac60 4739 sched_debug_enabled = 1;
f6630114
MT
4740
4741 return 0;
4742}
d039ac60
PZ
4743early_param("sched_debug", sched_debug_setup);
4744
4745static inline bool sched_debug(void)
4746{
4747 return sched_debug_enabled;
4748}
f6630114 4749
7c16ec58 4750static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level,
96f874e2 4751 struct cpumask *groupmask)
1da177e4 4752{
4dcf6aff 4753 struct sched_group *group = sd->groups;
434d53b0 4754 char str[256];
1da177e4 4755
968ea6d8 4756 cpulist_scnprintf(str, sizeof(str), sched_domain_span(sd));
96f874e2 4757 cpumask_clear(groupmask);
4dcf6aff
IM
4758
4759 printk(KERN_DEBUG "%*s domain %d: ", level, "", level);
4760
4761 if (!(sd->flags & SD_LOAD_BALANCE)) {
3df0fc5b 4762 printk("does not load-balance\n");
4dcf6aff 4763 if (sd->parent)
3df0fc5b
PZ
4764 printk(KERN_ERR "ERROR: !SD_LOAD_BALANCE domain"
4765 " has parent");
4dcf6aff 4766 return -1;
41c7ce9a
NP
4767 }
4768
3df0fc5b 4769 printk(KERN_CONT "span %s level %s\n", str, sd->name);
4dcf6aff 4770
758b2cdc 4771 if (!cpumask_test_cpu(cpu, sched_domain_span(sd))) {
3df0fc5b
PZ
4772 printk(KERN_ERR "ERROR: domain->span does not contain "
4773 "CPU%d\n", cpu);
4dcf6aff 4774 }
758b2cdc 4775 if (!cpumask_test_cpu(cpu, sched_group_cpus(group))) {
3df0fc5b
PZ
4776 printk(KERN_ERR "ERROR: domain->groups does not contain"
4777 " CPU%d\n", cpu);
4dcf6aff 4778 }
1da177e4 4779
4dcf6aff 4780 printk(KERN_DEBUG "%*s groups:", level + 1, "");
1da177e4 4781 do {
4dcf6aff 4782 if (!group) {
3df0fc5b
PZ
4783 printk("\n");
4784 printk(KERN_ERR "ERROR: group is NULL\n");
1da177e4
LT
4785 break;
4786 }
4787
c3decf0d
PZ
4788 /*
4789 * Even though we initialize ->power to something semi-sane,
4790 * we leave power_orig unset. This allows us to detect if
4791 * domain iteration is still funny without causing /0 traps.
4792 */
4793 if (!group->sgp->power_orig) {
3df0fc5b
PZ
4794 printk(KERN_CONT "\n");
4795 printk(KERN_ERR "ERROR: domain->cpu_power not "
4796 "set\n");
4dcf6aff
IM
4797 break;
4798 }
1da177e4 4799
758b2cdc 4800 if (!cpumask_weight(sched_group_cpus(group))) {
3df0fc5b
PZ
4801 printk(KERN_CONT "\n");
4802 printk(KERN_ERR "ERROR: empty group\n");
4dcf6aff
IM
4803 break;
4804 }
1da177e4 4805
cb83b629
PZ
4806 if (!(sd->flags & SD_OVERLAP) &&
4807 cpumask_intersects(groupmask, sched_group_cpus(group))) {
3df0fc5b
PZ
4808 printk(KERN_CONT "\n");
4809 printk(KERN_ERR "ERROR: repeated CPUs\n");
4dcf6aff
IM
4810 break;
4811 }
1da177e4 4812
758b2cdc 4813 cpumask_or(groupmask, groupmask, sched_group_cpus(group));
1da177e4 4814
968ea6d8 4815 cpulist_scnprintf(str, sizeof(str), sched_group_cpus(group));
381512cf 4816
3df0fc5b 4817 printk(KERN_CONT " %s", str);
9c3f75cb 4818 if (group->sgp->power != SCHED_POWER_SCALE) {
3df0fc5b 4819 printk(KERN_CONT " (cpu_power = %d)",
9c3f75cb 4820 group->sgp->power);
381512cf 4821 }
1da177e4 4822
4dcf6aff
IM
4823 group = group->next;
4824 } while (group != sd->groups);
3df0fc5b 4825 printk(KERN_CONT "\n");
1da177e4 4826
758b2cdc 4827 if (!cpumask_equal(sched_domain_span(sd), groupmask))
3df0fc5b 4828 printk(KERN_ERR "ERROR: groups don't span domain->span\n");
1da177e4 4829
758b2cdc
RR
4830 if (sd->parent &&
4831 !cpumask_subset(groupmask, sched_domain_span(sd->parent)))
3df0fc5b
PZ
4832 printk(KERN_ERR "ERROR: parent span is not a superset "
4833 "of domain->span\n");
4dcf6aff
IM
4834 return 0;
4835}
1da177e4 4836
4dcf6aff
IM
4837static void sched_domain_debug(struct sched_domain *sd, int cpu)
4838{
4839 int level = 0;
1da177e4 4840
d039ac60 4841 if (!sched_debug_enabled)
f6630114
MT
4842 return;
4843
4dcf6aff
IM
4844 if (!sd) {
4845 printk(KERN_DEBUG "CPU%d attaching NULL sched-domain.\n", cpu);
4846 return;
4847 }
1da177e4 4848
4dcf6aff
IM
4849 printk(KERN_DEBUG "CPU%d attaching sched-domain:\n", cpu);
4850
4851 for (;;) {
4cb98839 4852 if (sched_domain_debug_one(sd, cpu, level, sched_domains_tmpmask))
4dcf6aff 4853 break;
1da177e4
LT
4854 level++;
4855 sd = sd->parent;
33859f7f 4856 if (!sd)
4dcf6aff
IM
4857 break;
4858 }
1da177e4 4859}
6d6bc0ad 4860#else /* !CONFIG_SCHED_DEBUG */
48f24c4d 4861# define sched_domain_debug(sd, cpu) do { } while (0)
d039ac60
PZ
4862static inline bool sched_debug(void)
4863{
4864 return false;
4865}
6d6bc0ad 4866#endif /* CONFIG_SCHED_DEBUG */
1da177e4 4867
1a20ff27 4868static int sd_degenerate(struct sched_domain *sd)
245af2c7 4869{
758b2cdc 4870 if (cpumask_weight(sched_domain_span(sd)) == 1)
245af2c7
SS
4871 return 1;
4872
4873 /* Following flags need at least 2 groups */
4874 if (sd->flags & (SD_LOAD_BALANCE |
4875 SD_BALANCE_NEWIDLE |
4876 SD_BALANCE_FORK |
89c4710e
SS
4877 SD_BALANCE_EXEC |
4878 SD_SHARE_CPUPOWER |
4879 SD_SHARE_PKG_RESOURCES)) {
245af2c7
SS
4880 if (sd->groups != sd->groups->next)
4881 return 0;
4882 }
4883
4884 /* Following flags don't use groups */
c88d5910 4885 if (sd->flags & (SD_WAKE_AFFINE))
245af2c7
SS
4886 return 0;
4887
4888 return 1;
4889}
4890
48f24c4d
IM
4891static int
4892sd_parent_degenerate(struct sched_domain *sd, struct sched_domain *parent)
245af2c7
SS
4893{
4894 unsigned long cflags = sd->flags, pflags = parent->flags;
4895
4896 if (sd_degenerate(parent))
4897 return 1;
4898
758b2cdc 4899 if (!cpumask_equal(sched_domain_span(sd), sched_domain_span(parent)))
245af2c7
SS
4900 return 0;
4901
245af2c7
SS
4902 /* Flags needing groups don't count if only 1 group in parent */
4903 if (parent->groups == parent->groups->next) {
4904 pflags &= ~(SD_LOAD_BALANCE |
4905 SD_BALANCE_NEWIDLE |
4906 SD_BALANCE_FORK |
89c4710e
SS
4907 SD_BALANCE_EXEC |
4908 SD_SHARE_CPUPOWER |
4909 SD_SHARE_PKG_RESOURCES);
5436499e
KC
4910 if (nr_node_ids == 1)
4911 pflags &= ~SD_SERIALIZE;
245af2c7
SS
4912 }
4913 if (~cflags & pflags)
4914 return 0;
4915
4916 return 1;
4917}
4918
dce840a0 4919static void free_rootdomain(struct rcu_head *rcu)
c6c4927b 4920{
dce840a0 4921 struct root_domain *rd = container_of(rcu, struct root_domain, rcu);
047106ad 4922
68e74568 4923 cpupri_cleanup(&rd->cpupri);
c6c4927b
RR
4924 free_cpumask_var(rd->rto_mask);
4925 free_cpumask_var(rd->online);
4926 free_cpumask_var(rd->span);
4927 kfree(rd);
4928}
4929
57d885fe
GH
4930static void rq_attach_root(struct rq *rq, struct root_domain *rd)
4931{
a0490fa3 4932 struct root_domain *old_rd = NULL;
57d885fe 4933 unsigned long flags;
57d885fe 4934
05fa785c 4935 raw_spin_lock_irqsave(&rq->lock, flags);
57d885fe
GH
4936
4937 if (rq->rd) {
a0490fa3 4938 old_rd = rq->rd;
57d885fe 4939
c6c4927b 4940 if (cpumask_test_cpu(rq->cpu, old_rd->online))
1f11eb6a 4941 set_rq_offline(rq);
57d885fe 4942
c6c4927b 4943 cpumask_clear_cpu(rq->cpu, old_rd->span);
dc938520 4944
a0490fa3
IM
4945 /*
4946 * If we dont want to free the old_rt yet then
4947 * set old_rd to NULL to skip the freeing later
4948 * in this function:
4949 */
4950 if (!atomic_dec_and_test(&old_rd->refcount))
4951 old_rd = NULL;
57d885fe
GH
4952 }
4953
4954 atomic_inc(&rd->refcount);
4955 rq->rd = rd;
4956
c6c4927b 4957 cpumask_set_cpu(rq->cpu, rd->span);
00aec93d 4958 if (cpumask_test_cpu(rq->cpu, cpu_active_mask))
1f11eb6a 4959 set_rq_online(rq);
57d885fe 4960
05fa785c 4961 raw_spin_unlock_irqrestore(&rq->lock, flags);
a0490fa3
IM
4962
4963 if (old_rd)
dce840a0 4964 call_rcu_sched(&old_rd->rcu, free_rootdomain);
57d885fe
GH
4965}
4966
68c38fc3 4967static int init_rootdomain(struct root_domain *rd)
57d885fe
GH
4968{
4969 memset(rd, 0, sizeof(*rd));
4970
68c38fc3 4971 if (!alloc_cpumask_var(&rd->span, GFP_KERNEL))
0c910d28 4972 goto out;
68c38fc3 4973 if (!alloc_cpumask_var(&rd->online, GFP_KERNEL))
c6c4927b 4974 goto free_span;
68c38fc3 4975 if (!alloc_cpumask_var(&rd->rto_mask, GFP_KERNEL))
c6c4927b 4976 goto free_online;
6e0534f2 4977
68c38fc3 4978 if (cpupri_init(&rd->cpupri) != 0)
68e74568 4979 goto free_rto_mask;
c6c4927b 4980 return 0;
6e0534f2 4981
68e74568
RR
4982free_rto_mask:
4983 free_cpumask_var(rd->rto_mask);
c6c4927b
RR
4984free_online:
4985 free_cpumask_var(rd->online);
4986free_span:
4987 free_cpumask_var(rd->span);
0c910d28 4988out:
c6c4927b 4989 return -ENOMEM;
57d885fe
GH
4990}
4991
029632fb
PZ
4992/*
4993 * By default the system creates a single root-domain with all cpus as
4994 * members (mimicking the global state we have today).
4995 */
4996struct root_domain def_root_domain;
4997
57d885fe
GH
4998static void init_defrootdomain(void)
4999{
68c38fc3 5000 init_rootdomain(&def_root_domain);
c6c4927b 5001
57d885fe
GH
5002 atomic_set(&def_root_domain.refcount, 1);
5003}
5004
dc938520 5005static struct root_domain *alloc_rootdomain(void)
57d885fe
GH
5006{
5007 struct root_domain *rd;
5008
5009 rd = kmalloc(sizeof(*rd), GFP_KERNEL);
5010 if (!rd)
5011 return NULL;
5012
68c38fc3 5013 if (init_rootdomain(rd) != 0) {
c6c4927b
RR
5014 kfree(rd);
5015 return NULL;
5016 }
57d885fe
GH
5017
5018 return rd;
5019}
5020
e3589f6c
PZ
5021static void free_sched_groups(struct sched_group *sg, int free_sgp)
5022{
5023 struct sched_group *tmp, *first;
5024
5025 if (!sg)
5026 return;
5027
5028 first = sg;
5029 do {
5030 tmp = sg->next;
5031
5032 if (free_sgp && atomic_dec_and_test(&sg->sgp->ref))
5033 kfree(sg->sgp);
5034
5035 kfree(sg);
5036 sg = tmp;
5037 } while (sg != first);
5038}
5039
dce840a0
PZ
5040static void free_sched_domain(struct rcu_head *rcu)
5041{
5042 struct sched_domain *sd = container_of(rcu, struct sched_domain, rcu);
e3589f6c
PZ
5043
5044 /*
5045 * If its an overlapping domain it has private groups, iterate and
5046 * nuke them all.
5047 */
5048 if (sd->flags & SD_OVERLAP) {
5049 free_sched_groups(sd->groups, 1);
5050 } else if (atomic_dec_and_test(&sd->groups->ref)) {
9c3f75cb 5051 kfree(sd->groups->sgp);
dce840a0 5052 kfree(sd->groups);
9c3f75cb 5053 }
dce840a0
PZ
5054 kfree(sd);
5055}
5056
5057static void destroy_sched_domain(struct sched_domain *sd, int cpu)
5058{
5059 call_rcu(&sd->rcu, free_sched_domain);
5060}
5061
5062static void destroy_sched_domains(struct sched_domain *sd, int cpu)
5063{
5064 for (; sd; sd = sd->parent)
5065 destroy_sched_domain(sd, cpu);
5066}
5067
518cd623
PZ
5068/*
5069 * Keep a special pointer to the highest sched_domain that has
5070 * SD_SHARE_PKG_RESOURCE set (Last Level Cache Domain) for this
5071 * allows us to avoid some pointer chasing select_idle_sibling().
5072 *
5073 * Also keep a unique ID per domain (we use the first cpu number in
5074 * the cpumask of the domain), this allows us to quickly tell if
39be3501 5075 * two cpus are in the same cache domain, see cpus_share_cache().
518cd623
PZ
5076 */
5077DEFINE_PER_CPU(struct sched_domain *, sd_llc);
5078DEFINE_PER_CPU(int, sd_llc_id);
5079
5080static void update_top_cache_domain(int cpu)
5081{
5082 struct sched_domain *sd;
5083 int id = cpu;
5084
5085 sd = highest_flag_domain(cpu, SD_SHARE_PKG_RESOURCES);
37407ea7 5086 if (sd)
518cd623
PZ
5087 id = cpumask_first(sched_domain_span(sd));
5088
5089 rcu_assign_pointer(per_cpu(sd_llc, cpu), sd);
5090 per_cpu(sd_llc_id, cpu) = id;
5091}
5092
1da177e4 5093/*
0eab9146 5094 * Attach the domain 'sd' to 'cpu' as its base domain. Callers must
1da177e4
LT
5095 * hold the hotplug lock.
5096 */
0eab9146
IM
5097static void
5098cpu_attach_domain(struct sched_domain *sd, struct root_domain *rd, int cpu)
1da177e4 5099{
70b97a7f 5100 struct rq *rq = cpu_rq(cpu);
245af2c7
SS
5101 struct sched_domain *tmp;
5102
5103 /* Remove the sched domains which do not contribute to scheduling. */
f29c9b1c 5104 for (tmp = sd; tmp; ) {
245af2c7
SS
5105 struct sched_domain *parent = tmp->parent;
5106 if (!parent)
5107 break;
f29c9b1c 5108
1a848870 5109 if (sd_parent_degenerate(tmp, parent)) {
245af2c7 5110 tmp->parent = parent->parent;
1a848870
SS
5111 if (parent->parent)
5112 parent->parent->child = tmp;
dce840a0 5113 destroy_sched_domain(parent, cpu);
f29c9b1c
LZ
5114 } else
5115 tmp = tmp->parent;
245af2c7
SS
5116 }
5117
1a848870 5118 if (sd && sd_degenerate(sd)) {
dce840a0 5119 tmp = sd;
245af2c7 5120 sd = sd->parent;
dce840a0 5121 destroy_sched_domain(tmp, cpu);
1a848870
SS
5122 if (sd)
5123 sd->child = NULL;
5124 }
1da177e4 5125
4cb98839 5126 sched_domain_debug(sd, cpu);
1da177e4 5127
57d885fe 5128 rq_attach_root(rq, rd);
dce840a0 5129 tmp = rq->sd;
674311d5 5130 rcu_assign_pointer(rq->sd, sd);
dce840a0 5131 destroy_sched_domains(tmp, cpu);
518cd623
PZ
5132
5133 update_top_cache_domain(cpu);
1da177e4
LT
5134}
5135
5136/* cpus with isolated domains */
dcc30a35 5137static cpumask_var_t cpu_isolated_map;
1da177e4
LT
5138
5139/* Setup the mask of cpus configured for isolated domains */
5140static int __init isolated_cpu_setup(char *str)
5141{
bdddd296 5142 alloc_bootmem_cpumask_var(&cpu_isolated_map);
968ea6d8 5143 cpulist_parse(str, cpu_isolated_map);
1da177e4
LT
5144 return 1;
5145}
5146
8927f494 5147__setup("isolcpus=", isolated_cpu_setup);
1da177e4 5148
d3081f52
PZ
5149static const struct cpumask *cpu_cpu_mask(int cpu)
5150{
5151 return cpumask_of_node(cpu_to_node(cpu));
5152}
5153
dce840a0
PZ
5154struct sd_data {
5155 struct sched_domain **__percpu sd;
5156 struct sched_group **__percpu sg;
9c3f75cb 5157 struct sched_group_power **__percpu sgp;
dce840a0
PZ
5158};
5159
49a02c51 5160struct s_data {
21d42ccf 5161 struct sched_domain ** __percpu sd;
49a02c51
AH
5162 struct root_domain *rd;
5163};
5164
2109b99e 5165enum s_alloc {
2109b99e 5166 sa_rootdomain,
21d42ccf 5167 sa_sd,
dce840a0 5168 sa_sd_storage,
2109b99e
AH
5169 sa_none,
5170};
5171
54ab4ff4
PZ
5172struct sched_domain_topology_level;
5173
5174typedef struct sched_domain *(*sched_domain_init_f)(struct sched_domain_topology_level *tl, int cpu);
eb7a74e6
PZ
5175typedef const struct cpumask *(*sched_domain_mask_f)(int cpu);
5176
e3589f6c
PZ
5177#define SDTL_OVERLAP 0x01
5178
eb7a74e6 5179struct sched_domain_topology_level {
2c402dc3
PZ
5180 sched_domain_init_f init;
5181 sched_domain_mask_f mask;
e3589f6c 5182 int flags;
cb83b629 5183 int numa_level;
54ab4ff4 5184 struct sd_data data;
eb7a74e6
PZ
5185};
5186
c1174876
PZ
5187/*
5188 * Build an iteration mask that can exclude certain CPUs from the upwards
5189 * domain traversal.
5190 *
5191 * Asymmetric node setups can result in situations where the domain tree is of
5192 * unequal depth, make sure to skip domains that already cover the entire
5193 * range.
5194 *
5195 * In that case build_sched_domains() will have terminated the iteration early
5196 * and our sibling sd spans will be empty. Domains should always include the
5197 * cpu they're built on, so check that.
5198 *
5199 */
5200static void build_group_mask(struct sched_domain *sd, struct sched_group *sg)
5201{
5202 const struct cpumask *span = sched_domain_span(sd);
5203 struct sd_data *sdd = sd->private;
5204 struct sched_domain *sibling;
5205 int i;
5206
5207 for_each_cpu(i, span) {
5208 sibling = *per_cpu_ptr(sdd->sd, i);
5209 if (!cpumask_test_cpu(i, sched_domain_span(sibling)))
5210 continue;
5211
5212 cpumask_set_cpu(i, sched_group_mask(sg));
5213 }
5214}
5215
5216/*
5217 * Return the canonical balance cpu for this group, this is the first cpu
5218 * of this group that's also in the iteration mask.
5219 */
5220int group_balance_cpu(struct sched_group *sg)
5221{
5222 return cpumask_first_and(sched_group_cpus(sg), sched_group_mask(sg));
5223}
5224
e3589f6c
PZ
5225static int
5226build_overlap_sched_groups(struct sched_domain *sd, int cpu)
5227{
5228 struct sched_group *first = NULL, *last = NULL, *groups = NULL, *sg;
5229 const struct cpumask *span = sched_domain_span(sd);
5230 struct cpumask *covered = sched_domains_tmpmask;
5231 struct sd_data *sdd = sd->private;
5232 struct sched_domain *child;
5233 int i;
5234
5235 cpumask_clear(covered);
5236
5237 for_each_cpu(i, span) {
5238 struct cpumask *sg_span;
5239
5240 if (cpumask_test_cpu(i, covered))
5241 continue;
5242
c1174876
PZ
5243 child = *per_cpu_ptr(sdd->sd, i);
5244
5245 /* See the comment near build_group_mask(). */
5246 if (!cpumask_test_cpu(i, sched_domain_span(child)))
5247 continue;
5248
e3589f6c 5249 sg = kzalloc_node(sizeof(struct sched_group) + cpumask_size(),
4d78a223 5250 GFP_KERNEL, cpu_to_node(cpu));
e3589f6c
PZ
5251
5252 if (!sg)
5253 goto fail;
5254
5255 sg_span = sched_group_cpus(sg);
e3589f6c
PZ
5256 if (child->child) {
5257 child = child->child;
5258 cpumask_copy(sg_span, sched_domain_span(child));
5259 } else
5260 cpumask_set_cpu(i, sg_span);
5261
5262 cpumask_or(covered, covered, sg_span);
5263
74a5ce20 5264 sg->sgp = *per_cpu_ptr(sdd->sgp, i);
c1174876
PZ
5265 if (atomic_inc_return(&sg->sgp->ref) == 1)
5266 build_group_mask(sd, sg);
5267
c3decf0d
PZ
5268 /*
5269 * Initialize sgp->power such that even if we mess up the
5270 * domains and no possible iteration will get us here, we won't
5271 * die on a /0 trap.
5272 */
5273 sg->sgp->power = SCHED_POWER_SCALE * cpumask_weight(sg_span);
e3589f6c 5274
c1174876
PZ
5275 /*
5276 * Make sure the first group of this domain contains the
5277 * canonical balance cpu. Otherwise the sched_domain iteration
5278 * breaks. See update_sg_lb_stats().
5279 */
74a5ce20 5280 if ((!groups && cpumask_test_cpu(cpu, sg_span)) ||
c1174876 5281 group_balance_cpu(sg) == cpu)
e3589f6c
PZ
5282 groups = sg;
5283
5284 if (!first)
5285 first = sg;
5286 if (last)
5287 last->next = sg;
5288 last = sg;
5289 last->next = first;
5290 }
5291 sd->groups = groups;
5292
5293 return 0;
5294
5295fail:
5296 free_sched_groups(first, 0);
5297
5298 return -ENOMEM;
5299}
5300
dce840a0 5301static int get_group(int cpu, struct sd_data *sdd, struct sched_group **sg)
1da177e4 5302{
dce840a0
PZ
5303 struct sched_domain *sd = *per_cpu_ptr(sdd->sd, cpu);
5304 struct sched_domain *child = sd->child;
1da177e4 5305
dce840a0
PZ
5306 if (child)
5307 cpu = cpumask_first(sched_domain_span(child));
1e9f28fa 5308
9c3f75cb 5309 if (sg) {
dce840a0 5310 *sg = *per_cpu_ptr(sdd->sg, cpu);
9c3f75cb 5311 (*sg)->sgp = *per_cpu_ptr(sdd->sgp, cpu);
e3589f6c 5312 atomic_set(&(*sg)->sgp->ref, 1); /* for claim_allocations */
9c3f75cb 5313 }
dce840a0
PZ
5314
5315 return cpu;
1e9f28fa 5316}
1e9f28fa 5317
01a08546 5318/*
dce840a0
PZ
5319 * build_sched_groups will build a circular linked list of the groups
5320 * covered by the given span, and will set each group's ->cpumask correctly,
5321 * and ->cpu_power to 0.
e3589f6c
PZ
5322 *
5323 * Assumes the sched_domain tree is fully constructed
01a08546 5324 */
e3589f6c
PZ
5325static int
5326build_sched_groups(struct sched_domain *sd, int cpu)
1da177e4 5327{
dce840a0
PZ
5328 struct sched_group *first = NULL, *last = NULL;
5329 struct sd_data *sdd = sd->private;
5330 const struct cpumask *span = sched_domain_span(sd);
f96225fd 5331 struct cpumask *covered;
dce840a0 5332 int i;
9c1cfda2 5333
e3589f6c
PZ
5334 get_group(cpu, sdd, &sd->groups);
5335 atomic_inc(&sd->groups->ref);
5336
5337 if (cpu != cpumask_first(sched_domain_span(sd)))
5338 return 0;
5339
f96225fd
PZ
5340 lockdep_assert_held(&sched_domains_mutex);
5341 covered = sched_domains_tmpmask;
5342
dce840a0 5343 cpumask_clear(covered);
6711cab4 5344
dce840a0
PZ
5345 for_each_cpu(i, span) {
5346 struct sched_group *sg;
5347 int group = get_group(i, sdd, &sg);
5348 int j;
6711cab4 5349
dce840a0
PZ
5350 if (cpumask_test_cpu(i, covered))
5351 continue;
6711cab4 5352
dce840a0 5353 cpumask_clear(sched_group_cpus(sg));
9c3f75cb 5354 sg->sgp->power = 0;
c1174876 5355 cpumask_setall(sched_group_mask(sg));
0601a88d 5356
dce840a0
PZ
5357 for_each_cpu(j, span) {
5358 if (get_group(j, sdd, NULL) != group)
5359 continue;
0601a88d 5360
dce840a0
PZ
5361 cpumask_set_cpu(j, covered);
5362 cpumask_set_cpu(j, sched_group_cpus(sg));
5363 }
0601a88d 5364
dce840a0
PZ
5365 if (!first)
5366 first = sg;
5367 if (last)
5368 last->next = sg;
5369 last = sg;
5370 }
5371 last->next = first;
e3589f6c
PZ
5372
5373 return 0;
0601a88d 5374}
51888ca2 5375
89c4710e
SS
5376/*
5377 * Initialize sched groups cpu_power.
5378 *
5379 * cpu_power indicates the capacity of sched group, which is used while
5380 * distributing the load between different sched groups in a sched domain.
5381 * Typically cpu_power for all the groups in a sched domain will be same unless
5382 * there are asymmetries in the topology. If there are asymmetries, group
5383 * having more cpu_power will pickup more load compared to the group having
5384 * less cpu_power.
89c4710e
SS
5385 */
5386static void init_sched_groups_power(int cpu, struct sched_domain *sd)
5387{
e3589f6c 5388 struct sched_group *sg = sd->groups;
89c4710e 5389
e3589f6c
PZ
5390 WARN_ON(!sd || !sg);
5391
5392 do {
5393 sg->group_weight = cpumask_weight(sched_group_cpus(sg));
5394 sg = sg->next;
5395 } while (sg != sd->groups);
89c4710e 5396
c1174876 5397 if (cpu != group_balance_cpu(sg))
e3589f6c 5398 return;
aae6d3dd 5399
d274cb30 5400 update_group_power(sd, cpu);
69e1e811 5401 atomic_set(&sg->sgp->nr_busy_cpus, sg->group_weight);
89c4710e
SS
5402}
5403
029632fb
PZ
5404int __weak arch_sd_sibling_asym_packing(void)
5405{
5406 return 0*SD_ASYM_PACKING;
89c4710e
SS
5407}
5408
7c16ec58
MT
5409/*
5410 * Initializers for schedule domains
5411 * Non-inlined to reduce accumulated stack pressure in build_sched_domains()
5412 */
5413
a5d8c348
IM
5414#ifdef CONFIG_SCHED_DEBUG
5415# define SD_INIT_NAME(sd, type) sd->name = #type
5416#else
5417# define SD_INIT_NAME(sd, type) do { } while (0)
5418#endif
5419
54ab4ff4
PZ
5420#define SD_INIT_FUNC(type) \
5421static noinline struct sched_domain * \
5422sd_init_##type(struct sched_domain_topology_level *tl, int cpu) \
5423{ \
5424 struct sched_domain *sd = *per_cpu_ptr(tl->data.sd, cpu); \
5425 *sd = SD_##type##_INIT; \
54ab4ff4
PZ
5426 SD_INIT_NAME(sd, type); \
5427 sd->private = &tl->data; \
5428 return sd; \
7c16ec58
MT
5429}
5430
5431SD_INIT_FUNC(CPU)
7c16ec58
MT
5432#ifdef CONFIG_SCHED_SMT
5433 SD_INIT_FUNC(SIBLING)
5434#endif
5435#ifdef CONFIG_SCHED_MC
5436 SD_INIT_FUNC(MC)
5437#endif
01a08546
HC
5438#ifdef CONFIG_SCHED_BOOK
5439 SD_INIT_FUNC(BOOK)
5440#endif
7c16ec58 5441
1d3504fc 5442static int default_relax_domain_level = -1;
60495e77 5443int sched_domain_level_max;
1d3504fc
HS
5444
5445static int __init setup_relax_domain_level(char *str)
5446{
a841f8ce
DS
5447 if (kstrtoint(str, 0, &default_relax_domain_level))
5448 pr_warn("Unable to set relax_domain_level\n");
30e0e178 5449
1d3504fc
HS
5450 return 1;
5451}
5452__setup("relax_domain_level=", setup_relax_domain_level);
5453
5454static void set_domain_attribute(struct sched_domain *sd,
5455 struct sched_domain_attr *attr)
5456{
5457 int request;
5458
5459 if (!attr || attr->relax_domain_level < 0) {
5460 if (default_relax_domain_level < 0)
5461 return;
5462 else
5463 request = default_relax_domain_level;
5464 } else
5465 request = attr->relax_domain_level;
5466 if (request < sd->level) {
5467 /* turn off idle balance on this domain */
c88d5910 5468 sd->flags &= ~(SD_BALANCE_WAKE|SD_BALANCE_NEWIDLE);
1d3504fc
HS
5469 } else {
5470 /* turn on idle balance on this domain */
c88d5910 5471 sd->flags |= (SD_BALANCE_WAKE|SD_BALANCE_NEWIDLE);
1d3504fc
HS
5472 }
5473}
5474
54ab4ff4
PZ
5475static void __sdt_free(const struct cpumask *cpu_map);
5476static int __sdt_alloc(const struct cpumask *cpu_map);
5477
2109b99e
AH
5478static void __free_domain_allocs(struct s_data *d, enum s_alloc what,
5479 const struct cpumask *cpu_map)
5480{
5481 switch (what) {
2109b99e 5482 case sa_rootdomain:
822ff793
PZ
5483 if (!atomic_read(&d->rd->refcount))
5484 free_rootdomain(&d->rd->rcu); /* fall through */
21d42ccf
PZ
5485 case sa_sd:
5486 free_percpu(d->sd); /* fall through */
dce840a0 5487 case sa_sd_storage:
54ab4ff4 5488 __sdt_free(cpu_map); /* fall through */
2109b99e
AH
5489 case sa_none:
5490 break;
5491 }
5492}
3404c8d9 5493
2109b99e
AH
5494static enum s_alloc __visit_domain_allocation_hell(struct s_data *d,
5495 const struct cpumask *cpu_map)
5496{
dce840a0
PZ
5497 memset(d, 0, sizeof(*d));
5498
54ab4ff4
PZ
5499 if (__sdt_alloc(cpu_map))
5500 return sa_sd_storage;
dce840a0
PZ
5501 d->sd = alloc_percpu(struct sched_domain *);
5502 if (!d->sd)
5503 return sa_sd_storage;
2109b99e 5504 d->rd = alloc_rootdomain();
dce840a0 5505 if (!d->rd)
21d42ccf 5506 return sa_sd;
2109b99e
AH
5507 return sa_rootdomain;
5508}
57d885fe 5509
dce840a0
PZ
5510/*
5511 * NULL the sd_data elements we've used to build the sched_domain and
5512 * sched_group structure so that the subsequent __free_domain_allocs()
5513 * will not free the data we're using.
5514 */
5515static void claim_allocations(int cpu, struct sched_domain *sd)
5516{
5517 struct sd_data *sdd = sd->private;
dce840a0
PZ
5518
5519 WARN_ON_ONCE(*per_cpu_ptr(sdd->sd, cpu) != sd);
5520 *per_cpu_ptr(sdd->sd, cpu) = NULL;
5521
e3589f6c 5522 if (atomic_read(&(*per_cpu_ptr(sdd->sg, cpu))->ref))
dce840a0 5523 *per_cpu_ptr(sdd->sg, cpu) = NULL;
e3589f6c
PZ
5524
5525 if (atomic_read(&(*per_cpu_ptr(sdd->sgp, cpu))->ref))
9c3f75cb 5526 *per_cpu_ptr(sdd->sgp, cpu) = NULL;
dce840a0
PZ
5527}
5528
2c402dc3
PZ
5529#ifdef CONFIG_SCHED_SMT
5530static const struct cpumask *cpu_smt_mask(int cpu)
7f4588f3 5531{
2c402dc3 5532 return topology_thread_cpumask(cpu);
3bd65a80 5533}
2c402dc3 5534#endif
7f4588f3 5535
d069b916
PZ
5536/*
5537 * Topology list, bottom-up.
5538 */
2c402dc3 5539static struct sched_domain_topology_level default_topology[] = {
d069b916
PZ
5540#ifdef CONFIG_SCHED_SMT
5541 { sd_init_SIBLING, cpu_smt_mask, },
01a08546 5542#endif
1e9f28fa 5543#ifdef CONFIG_SCHED_MC
2c402dc3 5544 { sd_init_MC, cpu_coregroup_mask, },
1e9f28fa 5545#endif
d069b916
PZ
5546#ifdef CONFIG_SCHED_BOOK
5547 { sd_init_BOOK, cpu_book_mask, },
5548#endif
5549 { sd_init_CPU, cpu_cpu_mask, },
eb7a74e6
PZ
5550 { NULL, },
5551};
5552
5553static struct sched_domain_topology_level *sched_domain_topology = default_topology;
5554
cb83b629
PZ
5555#ifdef CONFIG_NUMA
5556
5557static int sched_domains_numa_levels;
cb83b629
PZ
5558static int *sched_domains_numa_distance;
5559static struct cpumask ***sched_domains_numa_masks;
5560static int sched_domains_curr_level;
5561
cb83b629
PZ
5562static inline int sd_local_flags(int level)
5563{
10717dcd 5564 if (sched_domains_numa_distance[level] > RECLAIM_DISTANCE)
cb83b629
PZ
5565 return 0;
5566
5567 return SD_BALANCE_EXEC | SD_BALANCE_FORK | SD_WAKE_AFFINE;
5568}
5569
5570static struct sched_domain *
5571sd_numa_init(struct sched_domain_topology_level *tl, int cpu)
5572{
5573 struct sched_domain *sd = *per_cpu_ptr(tl->data.sd, cpu);
5574 int level = tl->numa_level;
5575 int sd_weight = cpumask_weight(
5576 sched_domains_numa_masks[level][cpu_to_node(cpu)]);
5577
5578 *sd = (struct sched_domain){
5579 .min_interval = sd_weight,
5580 .max_interval = 2*sd_weight,
5581 .busy_factor = 32,
870a0bb5 5582 .imbalance_pct = 125,
cb83b629
PZ
5583 .cache_nice_tries = 2,
5584 .busy_idx = 3,
5585 .idle_idx = 2,
5586 .newidle_idx = 0,
5587 .wake_idx = 0,
5588 .forkexec_idx = 0,
5589
5590 .flags = 1*SD_LOAD_BALANCE
5591 | 1*SD_BALANCE_NEWIDLE
5592 | 0*SD_BALANCE_EXEC
5593 | 0*SD_BALANCE_FORK
5594 | 0*SD_BALANCE_WAKE
5595 | 0*SD_WAKE_AFFINE
cb83b629 5596 | 0*SD_SHARE_CPUPOWER
cb83b629
PZ
5597 | 0*SD_SHARE_PKG_RESOURCES
5598 | 1*SD_SERIALIZE
5599 | 0*SD_PREFER_SIBLING
5600 | sd_local_flags(level)
5601 ,
5602 .last_balance = jiffies,
5603 .balance_interval = sd_weight,
5604 };
5605 SD_INIT_NAME(sd, NUMA);
5606 sd->private = &tl->data;
5607
5608 /*
5609 * Ugly hack to pass state to sd_numa_mask()...
5610 */
5611 sched_domains_curr_level = tl->numa_level;
5612
5613 return sd;
5614}
5615
5616static const struct cpumask *sd_numa_mask(int cpu)
5617{
5618 return sched_domains_numa_masks[sched_domains_curr_level][cpu_to_node(cpu)];
5619}
5620
d039ac60
PZ
5621static void sched_numa_warn(const char *str)
5622{
5623 static int done = false;
5624 int i,j;
5625
5626 if (done)
5627 return;
5628
5629 done = true;
5630
5631 printk(KERN_WARNING "ERROR: %s\n\n", str);
5632
5633 for (i = 0; i < nr_node_ids; i++) {
5634 printk(KERN_WARNING " ");
5635 for (j = 0; j < nr_node_ids; j++)
5636 printk(KERN_CONT "%02d ", node_distance(i,j));
5637 printk(KERN_CONT "\n");
5638 }
5639 printk(KERN_WARNING "\n");
5640}
5641
5642static bool find_numa_distance(int distance)
5643{
5644 int i;
5645
5646 if (distance == node_distance(0, 0))
5647 return true;
5648
5649 for (i = 0; i < sched_domains_numa_levels; i++) {
5650 if (sched_domains_numa_distance[i] == distance)
5651 return true;
5652 }
5653
5654 return false;
5655}
5656
cb83b629
PZ
5657static void sched_init_numa(void)
5658{
5659 int next_distance, curr_distance = node_distance(0, 0);
5660 struct sched_domain_topology_level *tl;
5661 int level = 0;
5662 int i, j, k;
5663
cb83b629
PZ
5664 sched_domains_numa_distance = kzalloc(sizeof(int) * nr_node_ids, GFP_KERNEL);
5665 if (!sched_domains_numa_distance)
5666 return;
5667
5668 /*
5669 * O(nr_nodes^2) deduplicating selection sort -- in order to find the
5670 * unique distances in the node_distance() table.
5671 *
5672 * Assumes node_distance(0,j) includes all distances in
5673 * node_distance(i,j) in order to avoid cubic time.
cb83b629
PZ
5674 */
5675 next_distance = curr_distance;
5676 for (i = 0; i < nr_node_ids; i++) {
5677 for (j = 0; j < nr_node_ids; j++) {
d039ac60
PZ
5678 for (k = 0; k < nr_node_ids; k++) {
5679 int distance = node_distance(i, k);
5680
5681 if (distance > curr_distance &&
5682 (distance < next_distance ||
5683 next_distance == curr_distance))
5684 next_distance = distance;
5685
5686 /*
5687 * While not a strong assumption it would be nice to know
5688 * about cases where if node A is connected to B, B is not
5689 * equally connected to A.
5690 */
5691 if (sched_debug() && node_distance(k, i) != distance)
5692 sched_numa_warn("Node-distance not symmetric");
5693
5694 if (sched_debug() && i && !find_numa_distance(distance))
5695 sched_numa_warn("Node-0 not representative");
5696 }
5697 if (next_distance != curr_distance) {
5698 sched_domains_numa_distance[level++] = next_distance;
5699 sched_domains_numa_levels = level;
5700 curr_distance = next_distance;
5701 } else break;
cb83b629 5702 }
d039ac60
PZ
5703
5704 /*
5705 * In case of sched_debug() we verify the above assumption.
5706 */
5707 if (!sched_debug())
5708 break;
cb83b629
PZ
5709 }
5710 /*
5711 * 'level' contains the number of unique distances, excluding the
5712 * identity distance node_distance(i,i).
5713 *
28b4a521 5714 * The sched_domains_numa_distance[] array includes the actual distance
cb83b629
PZ
5715 * numbers.
5716 */
5717
5f7865f3
TC
5718 /*
5719 * Here, we should temporarily reset sched_domains_numa_levels to 0.
5720 * If it fails to allocate memory for array sched_domains_numa_masks[][],
5721 * the array will contain less then 'level' members. This could be
5722 * dangerous when we use it to iterate array sched_domains_numa_masks[][]
5723 * in other functions.
5724 *
5725 * We reset it to 'level' at the end of this function.
5726 */
5727 sched_domains_numa_levels = 0;
5728
cb83b629
PZ
5729 sched_domains_numa_masks = kzalloc(sizeof(void *) * level, GFP_KERNEL);
5730 if (!sched_domains_numa_masks)
5731 return;
5732
5733 /*
5734 * Now for each level, construct a mask per node which contains all
5735 * cpus of nodes that are that many hops away from us.
5736 */
5737 for (i = 0; i < level; i++) {
5738 sched_domains_numa_masks[i] =
5739 kzalloc(nr_node_ids * sizeof(void *), GFP_KERNEL);
5740 if (!sched_domains_numa_masks[i])
5741 return;
5742
5743 for (j = 0; j < nr_node_ids; j++) {
2ea45800 5744 struct cpumask *mask = kzalloc(cpumask_size(), GFP_KERNEL);
cb83b629
PZ
5745 if (!mask)
5746 return;
5747
5748 sched_domains_numa_masks[i][j] = mask;
5749
5750 for (k = 0; k < nr_node_ids; k++) {
dd7d8634 5751 if (node_distance(j, k) > sched_domains_numa_distance[i])
cb83b629
PZ
5752 continue;
5753
5754 cpumask_or(mask, mask, cpumask_of_node(k));
5755 }
5756 }
5757 }
5758
5759 tl = kzalloc((ARRAY_SIZE(default_topology) + level) *
5760 sizeof(struct sched_domain_topology_level), GFP_KERNEL);
5761 if (!tl)
5762 return;
5763
5764 /*
5765 * Copy the default topology bits..
5766 */
5767 for (i = 0; default_topology[i].init; i++)
5768 tl[i] = default_topology[i];
5769
5770 /*
5771 * .. and append 'j' levels of NUMA goodness.
5772 */
5773 for (j = 0; j < level; i++, j++) {
5774 tl[i] = (struct sched_domain_topology_level){
5775 .init = sd_numa_init,
5776 .mask = sd_numa_mask,
5777 .flags = SDTL_OVERLAP,
5778 .numa_level = j,
5779 };
5780 }
5781
5782 sched_domain_topology = tl;
5f7865f3
TC
5783
5784 sched_domains_numa_levels = level;
cb83b629 5785}
301a5cba
TC
5786
5787static void sched_domains_numa_masks_set(int cpu)
5788{
5789 int i, j;
5790 int node = cpu_to_node(cpu);
5791
5792 for (i = 0; i < sched_domains_numa_levels; i++) {
5793 for (j = 0; j < nr_node_ids; j++) {
5794 if (node_distance(j, node) <= sched_domains_numa_distance[i])
5795 cpumask_set_cpu(cpu, sched_domains_numa_masks[i][j]);
5796 }
5797 }
5798}
5799
5800static void sched_domains_numa_masks_clear(int cpu)
5801{
5802 int i, j;
5803 for (i = 0; i < sched_domains_numa_levels; i++) {
5804 for (j = 0; j < nr_node_ids; j++)
5805 cpumask_clear_cpu(cpu, sched_domains_numa_masks[i][j]);
5806 }
5807}
5808
5809/*
5810 * Update sched_domains_numa_masks[level][node] array when new cpus
5811 * are onlined.
5812 */
5813static int sched_domains_numa_masks_update(struct notifier_block *nfb,
5814 unsigned long action,
5815 void *hcpu)
5816{
5817 int cpu = (long)hcpu;
5818
5819 switch (action & ~CPU_TASKS_FROZEN) {
5820 case CPU_ONLINE:
5821 sched_domains_numa_masks_set(cpu);
5822 break;
5823
5824 case CPU_DEAD:
5825 sched_domains_numa_masks_clear(cpu);
5826 break;
5827
5828 default:
5829 return NOTIFY_DONE;
5830 }
5831
5832 return NOTIFY_OK;
cb83b629
PZ
5833}
5834#else
5835static inline void sched_init_numa(void)
5836{
5837}
301a5cba
TC
5838
5839static int sched_domains_numa_masks_update(struct notifier_block *nfb,
5840 unsigned long action,
5841 void *hcpu)
5842{
5843 return 0;
5844}
cb83b629
PZ
5845#endif /* CONFIG_NUMA */
5846
54ab4ff4
PZ
5847static int __sdt_alloc(const struct cpumask *cpu_map)
5848{
5849 struct sched_domain_topology_level *tl;
5850 int j;
5851
5852 for (tl = sched_domain_topology; tl->init; tl++) {
5853 struct sd_data *sdd = &tl->data;
5854
5855 sdd->sd = alloc_percpu(struct sched_domain *);
5856 if (!sdd->sd)
5857 return -ENOMEM;
5858
5859 sdd->sg = alloc_percpu(struct sched_group *);
5860 if (!sdd->sg)
5861 return -ENOMEM;
5862
9c3f75cb
PZ
5863 sdd->sgp = alloc_percpu(struct sched_group_power *);
5864 if (!sdd->sgp)
5865 return -ENOMEM;
5866
54ab4ff4
PZ
5867 for_each_cpu(j, cpu_map) {
5868 struct sched_domain *sd;
5869 struct sched_group *sg;
9c3f75cb 5870 struct sched_group_power *sgp;
54ab4ff4
PZ
5871
5872 sd = kzalloc_node(sizeof(struct sched_domain) + cpumask_size(),
5873 GFP_KERNEL, cpu_to_node(j));
5874 if (!sd)
5875 return -ENOMEM;
5876
5877 *per_cpu_ptr(sdd->sd, j) = sd;
5878
5879 sg = kzalloc_node(sizeof(struct sched_group) + cpumask_size(),
5880 GFP_KERNEL, cpu_to_node(j));
5881 if (!sg)
5882 return -ENOMEM;
5883
30b4e9eb
IM
5884 sg->next = sg;
5885
54ab4ff4 5886 *per_cpu_ptr(sdd->sg, j) = sg;
9c3f75cb 5887
c1174876 5888 sgp = kzalloc_node(sizeof(struct sched_group_power) + cpumask_size(),
9c3f75cb
PZ
5889 GFP_KERNEL, cpu_to_node(j));
5890 if (!sgp)
5891 return -ENOMEM;
5892
5893 *per_cpu_ptr(sdd->sgp, j) = sgp;
54ab4ff4
PZ
5894 }
5895 }
5896
5897 return 0;
5898}
5899
5900static void __sdt_free(const struct cpumask *cpu_map)
5901{
5902 struct sched_domain_topology_level *tl;
5903 int j;
5904
5905 for (tl = sched_domain_topology; tl->init; tl++) {
5906 struct sd_data *sdd = &tl->data;
5907
5908 for_each_cpu(j, cpu_map) {
fb2cf2c6 5909 struct sched_domain *sd;
5910
5911 if (sdd->sd) {
5912 sd = *per_cpu_ptr(sdd->sd, j);
5913 if (sd && (sd->flags & SD_OVERLAP))
5914 free_sched_groups(sd->groups, 0);
5915 kfree(*per_cpu_ptr(sdd->sd, j));
5916 }
5917
5918 if (sdd->sg)
5919 kfree(*per_cpu_ptr(sdd->sg, j));
5920 if (sdd->sgp)
5921 kfree(*per_cpu_ptr(sdd->sgp, j));
54ab4ff4
PZ
5922 }
5923 free_percpu(sdd->sd);
fb2cf2c6 5924 sdd->sd = NULL;
54ab4ff4 5925 free_percpu(sdd->sg);
fb2cf2c6 5926 sdd->sg = NULL;
9c3f75cb 5927 free_percpu(sdd->sgp);
fb2cf2c6 5928 sdd->sgp = NULL;
54ab4ff4
PZ
5929 }
5930}
5931
2c402dc3
PZ
5932struct sched_domain *build_sched_domain(struct sched_domain_topology_level *tl,
5933 struct s_data *d, const struct cpumask *cpu_map,
d069b916 5934 struct sched_domain_attr *attr, struct sched_domain *child,
2c402dc3
PZ
5935 int cpu)
5936{
54ab4ff4 5937 struct sched_domain *sd = tl->init(tl, cpu);
2c402dc3 5938 if (!sd)
d069b916 5939 return child;
2c402dc3 5940
2c402dc3 5941 cpumask_and(sched_domain_span(sd), cpu_map, tl->mask(cpu));
60495e77
PZ
5942 if (child) {
5943 sd->level = child->level + 1;
5944 sched_domain_level_max = max(sched_domain_level_max, sd->level);
d069b916 5945 child->parent = sd;
60495e77 5946 }
d069b916 5947 sd->child = child;
a841f8ce 5948 set_domain_attribute(sd, attr);
2c402dc3
PZ
5949
5950 return sd;
5951}
5952
2109b99e
AH
5953/*
5954 * Build sched domains for a given set of cpus and attach the sched domains
5955 * to the individual cpus
5956 */
dce840a0
PZ
5957static int build_sched_domains(const struct cpumask *cpu_map,
5958 struct sched_domain_attr *attr)
2109b99e
AH
5959{
5960 enum s_alloc alloc_state = sa_none;
dce840a0 5961 struct sched_domain *sd;
2109b99e 5962 struct s_data d;
822ff793 5963 int i, ret = -ENOMEM;
9c1cfda2 5964
2109b99e
AH
5965 alloc_state = __visit_domain_allocation_hell(&d, cpu_map);
5966 if (alloc_state != sa_rootdomain)
5967 goto error;
9c1cfda2 5968
dce840a0 5969 /* Set up domains for cpus specified by the cpu_map. */
abcd083a 5970 for_each_cpu(i, cpu_map) {
eb7a74e6
PZ
5971 struct sched_domain_topology_level *tl;
5972
3bd65a80 5973 sd = NULL;
e3589f6c 5974 for (tl = sched_domain_topology; tl->init; tl++) {
2c402dc3 5975 sd = build_sched_domain(tl, &d, cpu_map, attr, sd, i);
e3589f6c
PZ
5976 if (tl->flags & SDTL_OVERLAP || sched_feat(FORCE_SD_OVERLAP))
5977 sd->flags |= SD_OVERLAP;
d110235d
PZ
5978 if (cpumask_equal(cpu_map, sched_domain_span(sd)))
5979 break;
e3589f6c 5980 }
d274cb30 5981
d069b916
PZ
5982 while (sd->child)
5983 sd = sd->child;
5984
21d42ccf 5985 *per_cpu_ptr(d.sd, i) = sd;
dce840a0
PZ
5986 }
5987
5988 /* Build the groups for the domains */
5989 for_each_cpu(i, cpu_map) {
5990 for (sd = *per_cpu_ptr(d.sd, i); sd; sd = sd->parent) {
5991 sd->span_weight = cpumask_weight(sched_domain_span(sd));
e3589f6c
PZ
5992 if (sd->flags & SD_OVERLAP) {
5993 if (build_overlap_sched_groups(sd, i))
5994 goto error;
5995 } else {
5996 if (build_sched_groups(sd, i))
5997 goto error;
5998 }
1cf51902 5999 }
a06dadbe 6000 }
9c1cfda2 6001
1da177e4 6002 /* Calculate CPU power for physical packages and nodes */
a9c9a9b6
PZ
6003 for (i = nr_cpumask_bits-1; i >= 0; i--) {
6004 if (!cpumask_test_cpu(i, cpu_map))
6005 continue;
9c1cfda2 6006
dce840a0
PZ
6007 for (sd = *per_cpu_ptr(d.sd, i); sd; sd = sd->parent) {
6008 claim_allocations(i, sd);
cd4ea6ae 6009 init_sched_groups_power(i, sd);
dce840a0 6010 }
f712c0c7 6011 }
9c1cfda2 6012
1da177e4 6013 /* Attach the domains */
dce840a0 6014 rcu_read_lock();
abcd083a 6015 for_each_cpu(i, cpu_map) {
21d42ccf 6016 sd = *per_cpu_ptr(d.sd, i);
49a02c51 6017 cpu_attach_domain(sd, d.rd, i);
1da177e4 6018 }
dce840a0 6019 rcu_read_unlock();
51888ca2 6020
822ff793 6021 ret = 0;
51888ca2 6022error:
2109b99e 6023 __free_domain_allocs(&d, alloc_state, cpu_map);
822ff793 6024 return ret;
1da177e4 6025}
029190c5 6026
acc3f5d7 6027static cpumask_var_t *doms_cur; /* current sched domains */
029190c5 6028static int ndoms_cur; /* number of sched domains in 'doms_cur' */
4285f594
IM
6029static struct sched_domain_attr *dattr_cur;
6030 /* attribues of custom domains in 'doms_cur' */
029190c5
PJ
6031
6032/*
6033 * Special case: If a kmalloc of a doms_cur partition (array of
4212823f
RR
6034 * cpumask) fails, then fallback to a single sched domain,
6035 * as determined by the single cpumask fallback_doms.
029190c5 6036 */
4212823f 6037static cpumask_var_t fallback_doms;
029190c5 6038
ee79d1bd
HC
6039/*
6040 * arch_update_cpu_topology lets virtualized architectures update the
6041 * cpu core maps. It is supposed to return 1 if the topology changed
6042 * or 0 if it stayed the same.
6043 */
6044int __attribute__((weak)) arch_update_cpu_topology(void)
22e52b07 6045{
ee79d1bd 6046 return 0;
22e52b07
HC
6047}
6048
acc3f5d7
RR
6049cpumask_var_t *alloc_sched_domains(unsigned int ndoms)
6050{
6051 int i;
6052 cpumask_var_t *doms;
6053
6054 doms = kmalloc(sizeof(*doms) * ndoms, GFP_KERNEL);
6055 if (!doms)
6056 return NULL;
6057 for (i = 0; i < ndoms; i++) {
6058 if (!alloc_cpumask_var(&doms[i], GFP_KERNEL)) {
6059 free_sched_domains(doms, i);
6060 return NULL;
6061 }
6062 }
6063 return doms;
6064}
6065
6066void free_sched_domains(cpumask_var_t doms[], unsigned int ndoms)
6067{
6068 unsigned int i;
6069 for (i = 0; i < ndoms; i++)
6070 free_cpumask_var(doms[i]);
6071 kfree(doms);
6072}
6073
1a20ff27 6074/*
41a2d6cf 6075 * Set up scheduler domains and groups. Callers must hold the hotplug lock.
029190c5
PJ
6076 * For now this just excludes isolated cpus, but could be used to
6077 * exclude other special cases in the future.
1a20ff27 6078 */
c4a8849a 6079static int init_sched_domains(const struct cpumask *cpu_map)
1a20ff27 6080{
7378547f
MM
6081 int err;
6082
22e52b07 6083 arch_update_cpu_topology();
029190c5 6084 ndoms_cur = 1;
acc3f5d7 6085 doms_cur = alloc_sched_domains(ndoms_cur);
029190c5 6086 if (!doms_cur)
acc3f5d7
RR
6087 doms_cur = &fallback_doms;
6088 cpumask_andnot(doms_cur[0], cpu_map, cpu_isolated_map);
dce840a0 6089 err = build_sched_domains(doms_cur[0], NULL);
6382bc90 6090 register_sched_domain_sysctl();
7378547f
MM
6091
6092 return err;
1a20ff27
DG
6093}
6094
1a20ff27
DG
6095/*
6096 * Detach sched domains from a group of cpus specified in cpu_map
6097 * These cpus will now be attached to the NULL domain
6098 */
96f874e2 6099static void detach_destroy_domains(const struct cpumask *cpu_map)
1a20ff27
DG
6100{
6101 int i;
6102
dce840a0 6103 rcu_read_lock();
abcd083a 6104 for_each_cpu(i, cpu_map)
57d885fe 6105 cpu_attach_domain(NULL, &def_root_domain, i);
dce840a0 6106 rcu_read_unlock();
1a20ff27
DG
6107}
6108
1d3504fc
HS
6109/* handle null as "default" */
6110static int dattrs_equal(struct sched_domain_attr *cur, int idx_cur,
6111 struct sched_domain_attr *new, int idx_new)
6112{
6113 struct sched_domain_attr tmp;
6114
6115 /* fast path */
6116 if (!new && !cur)
6117 return 1;
6118
6119 tmp = SD_ATTR_INIT;
6120 return !memcmp(cur ? (cur + idx_cur) : &tmp,
6121 new ? (new + idx_new) : &tmp,
6122 sizeof(struct sched_domain_attr));
6123}
6124
029190c5
PJ
6125/*
6126 * Partition sched domains as specified by the 'ndoms_new'
41a2d6cf 6127 * cpumasks in the array doms_new[] of cpumasks. This compares
029190c5
PJ
6128 * doms_new[] to the current sched domain partitioning, doms_cur[].
6129 * It destroys each deleted domain and builds each new domain.
6130 *
acc3f5d7 6131 * 'doms_new' is an array of cpumask_var_t's of length 'ndoms_new'.
41a2d6cf
IM
6132 * The masks don't intersect (don't overlap.) We should setup one
6133 * sched domain for each mask. CPUs not in any of the cpumasks will
6134 * not be load balanced. If the same cpumask appears both in the
029190c5
PJ
6135 * current 'doms_cur' domains and in the new 'doms_new', we can leave
6136 * it as it is.
6137 *
acc3f5d7
RR
6138 * The passed in 'doms_new' should be allocated using
6139 * alloc_sched_domains. This routine takes ownership of it and will
6140 * free_sched_domains it when done with it. If the caller failed the
6141 * alloc call, then it can pass in doms_new == NULL && ndoms_new == 1,
6142 * and partition_sched_domains() will fallback to the single partition
6143 * 'fallback_doms', it also forces the domains to be rebuilt.
029190c5 6144 *
96f874e2 6145 * If doms_new == NULL it will be replaced with cpu_online_mask.
700018e0
LZ
6146 * ndoms_new == 0 is a special case for destroying existing domains,
6147 * and it will not create the default domain.
dfb512ec 6148 *
029190c5
PJ
6149 * Call with hotplug lock held
6150 */
acc3f5d7 6151void partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
1d3504fc 6152 struct sched_domain_attr *dattr_new)
029190c5 6153{
dfb512ec 6154 int i, j, n;
d65bd5ec 6155 int new_topology;
029190c5 6156
712555ee 6157 mutex_lock(&sched_domains_mutex);
a1835615 6158
7378547f
MM
6159 /* always unregister in case we don't destroy any domains */
6160 unregister_sched_domain_sysctl();
6161
d65bd5ec
HC
6162 /* Let architecture update cpu core mappings. */
6163 new_topology = arch_update_cpu_topology();
6164
dfb512ec 6165 n = doms_new ? ndoms_new : 0;
029190c5
PJ
6166
6167 /* Destroy deleted domains */
6168 for (i = 0; i < ndoms_cur; i++) {
d65bd5ec 6169 for (j = 0; j < n && !new_topology; j++) {
acc3f5d7 6170 if (cpumask_equal(doms_cur[i], doms_new[j])
1d3504fc 6171 && dattrs_equal(dattr_cur, i, dattr_new, j))
029190c5
PJ
6172 goto match1;
6173 }
6174 /* no match - a current sched domain not in new doms_new[] */
acc3f5d7 6175 detach_destroy_domains(doms_cur[i]);
029190c5
PJ
6176match1:
6177 ;
6178 }
6179
e761b772
MK
6180 if (doms_new == NULL) {
6181 ndoms_cur = 0;
acc3f5d7 6182 doms_new = &fallback_doms;
6ad4c188 6183 cpumask_andnot(doms_new[0], cpu_active_mask, cpu_isolated_map);
faa2f98f 6184 WARN_ON_ONCE(dattr_new);
e761b772
MK
6185 }
6186
029190c5
PJ
6187 /* Build new domains */
6188 for (i = 0; i < ndoms_new; i++) {
d65bd5ec 6189 for (j = 0; j < ndoms_cur && !new_topology; j++) {
acc3f5d7 6190 if (cpumask_equal(doms_new[i], doms_cur[j])
1d3504fc 6191 && dattrs_equal(dattr_new, i, dattr_cur, j))
029190c5
PJ
6192 goto match2;
6193 }
6194 /* no match - add a new doms_new */
dce840a0 6195 build_sched_domains(doms_new[i], dattr_new ? dattr_new + i : NULL);
029190c5
PJ
6196match2:
6197 ;
6198 }
6199
6200 /* Remember the new sched domains */
acc3f5d7
RR
6201 if (doms_cur != &fallback_doms)
6202 free_sched_domains(doms_cur, ndoms_cur);
1d3504fc 6203 kfree(dattr_cur); /* kfree(NULL) is safe */
029190c5 6204 doms_cur = doms_new;
1d3504fc 6205 dattr_cur = dattr_new;
029190c5 6206 ndoms_cur = ndoms_new;
7378547f
MM
6207
6208 register_sched_domain_sysctl();
a1835615 6209
712555ee 6210 mutex_unlock(&sched_domains_mutex);
029190c5
PJ
6211}
6212
d35be8ba
SB
6213static int num_cpus_frozen; /* used to mark begin/end of suspend/resume */
6214
1da177e4 6215/*
3a101d05
TH
6216 * Update cpusets according to cpu_active mask. If cpusets are
6217 * disabled, cpuset_update_active_cpus() becomes a simple wrapper
6218 * around partition_sched_domains().
d35be8ba
SB
6219 *
6220 * If we come here as part of a suspend/resume, don't touch cpusets because we
6221 * want to restore it back to its original state upon resume anyway.
1da177e4 6222 */
0b2e918a
TH
6223static int cpuset_cpu_active(struct notifier_block *nfb, unsigned long action,
6224 void *hcpu)
e761b772 6225{
d35be8ba
SB
6226 switch (action) {
6227 case CPU_ONLINE_FROZEN:
6228 case CPU_DOWN_FAILED_FROZEN:
6229
6230 /*
6231 * num_cpus_frozen tracks how many CPUs are involved in suspend
6232 * resume sequence. As long as this is not the last online
6233 * operation in the resume sequence, just build a single sched
6234 * domain, ignoring cpusets.
6235 */
6236 num_cpus_frozen--;
6237 if (likely(num_cpus_frozen)) {
6238 partition_sched_domains(1, NULL, NULL);
6239 break;
6240 }
6241
6242 /*
6243 * This is the last CPU online operation. So fall through and
6244 * restore the original sched domains by considering the
6245 * cpuset configurations.
6246 */
6247
e761b772 6248 case CPU_ONLINE:
6ad4c188 6249 case CPU_DOWN_FAILED:
7ddf96b0 6250 cpuset_update_active_cpus(true);
d35be8ba 6251 break;
3a101d05
TH
6252 default:
6253 return NOTIFY_DONE;
6254 }
d35be8ba 6255 return NOTIFY_OK;
3a101d05 6256}
e761b772 6257
0b2e918a
TH
6258static int cpuset_cpu_inactive(struct notifier_block *nfb, unsigned long action,
6259 void *hcpu)
3a101d05 6260{
d35be8ba 6261 switch (action) {
3a101d05 6262 case CPU_DOWN_PREPARE:
7ddf96b0 6263 cpuset_update_active_cpus(false);
d35be8ba
SB
6264 break;
6265 case CPU_DOWN_PREPARE_FROZEN:
6266 num_cpus_frozen++;
6267 partition_sched_domains(1, NULL, NULL);
6268 break;
e761b772
MK
6269 default:
6270 return NOTIFY_DONE;
6271 }
d35be8ba 6272 return NOTIFY_OK;
e761b772 6273}
e761b772 6274
1da177e4
LT
6275void __init sched_init_smp(void)
6276{
dcc30a35
RR
6277 cpumask_var_t non_isolated_cpus;
6278
6279 alloc_cpumask_var(&non_isolated_cpus, GFP_KERNEL);
cb5fd13f 6280 alloc_cpumask_var(&fallback_doms, GFP_KERNEL);
5c1e1767 6281
cb83b629
PZ
6282 sched_init_numa();
6283
95402b38 6284 get_online_cpus();
712555ee 6285 mutex_lock(&sched_domains_mutex);
c4a8849a 6286 init_sched_domains(cpu_active_mask);
dcc30a35
RR
6287 cpumask_andnot(non_isolated_cpus, cpu_possible_mask, cpu_isolated_map);
6288 if (cpumask_empty(non_isolated_cpus))
6289 cpumask_set_cpu(smp_processor_id(), non_isolated_cpus);
712555ee 6290 mutex_unlock(&sched_domains_mutex);
95402b38 6291 put_online_cpus();
e761b772 6292
301a5cba 6293 hotcpu_notifier(sched_domains_numa_masks_update, CPU_PRI_SCHED_ACTIVE);
3a101d05
TH
6294 hotcpu_notifier(cpuset_cpu_active, CPU_PRI_CPUSET_ACTIVE);
6295 hotcpu_notifier(cpuset_cpu_inactive, CPU_PRI_CPUSET_INACTIVE);
e761b772 6296
b328ca18 6297 init_hrtick();
5c1e1767
NP
6298
6299 /* Move init over to a non-isolated CPU */
dcc30a35 6300 if (set_cpus_allowed_ptr(current, non_isolated_cpus) < 0)
5c1e1767 6301 BUG();
19978ca6 6302 sched_init_granularity();
dcc30a35 6303 free_cpumask_var(non_isolated_cpus);
4212823f 6304
0e3900e6 6305 init_sched_rt_class();
1da177e4
LT
6306}
6307#else
6308void __init sched_init_smp(void)
6309{
19978ca6 6310 sched_init_granularity();
1da177e4
LT
6311}
6312#endif /* CONFIG_SMP */
6313
cd1bb94b
AB
6314const_debug unsigned int sysctl_timer_migration = 1;
6315
1da177e4
LT
6316int in_sched_functions(unsigned long addr)
6317{
1da177e4
LT
6318 return in_lock_functions(addr) ||
6319 (addr >= (unsigned long)__sched_text_start
6320 && addr < (unsigned long)__sched_text_end);
6321}
6322
029632fb 6323#ifdef CONFIG_CGROUP_SCHED
27b4b931
LZ
6324/*
6325 * Default task group.
6326 * Every task in system belongs to this group at bootup.
6327 */
029632fb 6328struct task_group root_task_group;
35cf4e50 6329LIST_HEAD(task_groups);
052f1dc7 6330#endif
6f505b16 6331
e6252c3e 6332DECLARE_PER_CPU(cpumask_var_t, load_balance_mask);
6f505b16 6333
1da177e4
LT
6334void __init sched_init(void)
6335{
dd41f596 6336 int i, j;
434d53b0
MT
6337 unsigned long alloc_size = 0, ptr;
6338
6339#ifdef CONFIG_FAIR_GROUP_SCHED
6340 alloc_size += 2 * nr_cpu_ids * sizeof(void **);
6341#endif
6342#ifdef CONFIG_RT_GROUP_SCHED
6343 alloc_size += 2 * nr_cpu_ids * sizeof(void **);
eff766a6 6344#endif
df7c8e84 6345#ifdef CONFIG_CPUMASK_OFFSTACK
8c083f08 6346 alloc_size += num_possible_cpus() * cpumask_size();
434d53b0 6347#endif
434d53b0 6348 if (alloc_size) {
36b7b6d4 6349 ptr = (unsigned long)kzalloc(alloc_size, GFP_NOWAIT);
434d53b0
MT
6350
6351#ifdef CONFIG_FAIR_GROUP_SCHED
07e06b01 6352 root_task_group.se = (struct sched_entity **)ptr;
434d53b0
MT
6353 ptr += nr_cpu_ids * sizeof(void **);
6354
07e06b01 6355 root_task_group.cfs_rq = (struct cfs_rq **)ptr;
434d53b0 6356 ptr += nr_cpu_ids * sizeof(void **);
eff766a6 6357
6d6bc0ad 6358#endif /* CONFIG_FAIR_GROUP_SCHED */
434d53b0 6359#ifdef CONFIG_RT_GROUP_SCHED
07e06b01 6360 root_task_group.rt_se = (struct sched_rt_entity **)ptr;
434d53b0
MT
6361 ptr += nr_cpu_ids * sizeof(void **);
6362
07e06b01 6363 root_task_group.rt_rq = (struct rt_rq **)ptr;
eff766a6
PZ
6364 ptr += nr_cpu_ids * sizeof(void **);
6365
6d6bc0ad 6366#endif /* CONFIG_RT_GROUP_SCHED */
df7c8e84
RR
6367#ifdef CONFIG_CPUMASK_OFFSTACK
6368 for_each_possible_cpu(i) {
e6252c3e 6369 per_cpu(load_balance_mask, i) = (void *)ptr;
df7c8e84
RR
6370 ptr += cpumask_size();
6371 }
6372#endif /* CONFIG_CPUMASK_OFFSTACK */
434d53b0 6373 }
dd41f596 6374
57d885fe
GH
6375#ifdef CONFIG_SMP
6376 init_defrootdomain();
6377#endif
6378
d0b27fa7
PZ
6379 init_rt_bandwidth(&def_rt_bandwidth,
6380 global_rt_period(), global_rt_runtime());
6381
6382#ifdef CONFIG_RT_GROUP_SCHED
07e06b01 6383 init_rt_bandwidth(&root_task_group.rt_bandwidth,
d0b27fa7 6384 global_rt_period(), global_rt_runtime());
6d6bc0ad 6385#endif /* CONFIG_RT_GROUP_SCHED */
d0b27fa7 6386
7c941438 6387#ifdef CONFIG_CGROUP_SCHED
07e06b01
YZ
6388 list_add(&root_task_group.list, &task_groups);
6389 INIT_LIST_HEAD(&root_task_group.children);
f4d6f6c2 6390 INIT_LIST_HEAD(&root_task_group.siblings);
5091faa4 6391 autogroup_init(&init_task);
54c707e9 6392
7c941438 6393#endif /* CONFIG_CGROUP_SCHED */
6f505b16 6394
0a945022 6395 for_each_possible_cpu(i) {
70b97a7f 6396 struct rq *rq;
1da177e4
LT
6397
6398 rq = cpu_rq(i);
05fa785c 6399 raw_spin_lock_init(&rq->lock);
7897986b 6400 rq->nr_running = 0;
dce48a84
TG
6401 rq->calc_load_active = 0;
6402 rq->calc_load_update = jiffies + LOAD_FREQ;
acb5a9ba 6403 init_cfs_rq(&rq->cfs);
6f505b16 6404 init_rt_rq(&rq->rt, rq);
dd41f596 6405#ifdef CONFIG_FAIR_GROUP_SCHED
029632fb 6406 root_task_group.shares = ROOT_TASK_GROUP_LOAD;
6f505b16 6407 INIT_LIST_HEAD(&rq->leaf_cfs_rq_list);
354d60c2 6408 /*
07e06b01 6409 * How much cpu bandwidth does root_task_group get?
354d60c2
DG
6410 *
6411 * In case of task-groups formed thr' the cgroup filesystem, it
6412 * gets 100% of the cpu resources in the system. This overall
6413 * system cpu resource is divided among the tasks of
07e06b01 6414 * root_task_group and its child task-groups in a fair manner,
354d60c2
DG
6415 * based on each entity's (task or task-group's) weight
6416 * (se->load.weight).
6417 *
07e06b01 6418 * In other words, if root_task_group has 10 tasks of weight
354d60c2
DG
6419 * 1024) and two child groups A0 and A1 (of weight 1024 each),
6420 * then A0's share of the cpu resource is:
6421 *
0d905bca 6422 * A0's bandwidth = 1024 / (10*1024 + 1024 + 1024) = 8.33%
354d60c2 6423 *
07e06b01
YZ
6424 * We achieve this by letting root_task_group's tasks sit
6425 * directly in rq->cfs (i.e root_task_group->se[] = NULL).
354d60c2 6426 */
ab84d31e 6427 init_cfs_bandwidth(&root_task_group.cfs_bandwidth);
07e06b01 6428 init_tg_cfs_entry(&root_task_group, &rq->cfs, NULL, i, NULL);
354d60c2
DG
6429#endif /* CONFIG_FAIR_GROUP_SCHED */
6430
6431 rq->rt.rt_runtime = def_rt_bandwidth.rt_runtime;
052f1dc7 6432#ifdef CONFIG_RT_GROUP_SCHED
6f505b16 6433 INIT_LIST_HEAD(&rq->leaf_rt_rq_list);
07e06b01 6434 init_tg_rt_entry(&root_task_group, &rq->rt, NULL, i, NULL);
dd41f596 6435#endif
1da177e4 6436
dd41f596
IM
6437 for (j = 0; j < CPU_LOAD_IDX_MAX; j++)
6438 rq->cpu_load[j] = 0;
fdf3e95d
VP
6439
6440 rq->last_load_update_tick = jiffies;
6441
1da177e4 6442#ifdef CONFIG_SMP
41c7ce9a 6443 rq->sd = NULL;
57d885fe 6444 rq->rd = NULL;
1399fa78 6445 rq->cpu_power = SCHED_POWER_SCALE;
3f029d3c 6446 rq->post_schedule = 0;
1da177e4 6447 rq->active_balance = 0;
dd41f596 6448 rq->next_balance = jiffies;
1da177e4 6449 rq->push_cpu = 0;
0a2966b4 6450 rq->cpu = i;
1f11eb6a 6451 rq->online = 0;
eae0c9df
MG
6452 rq->idle_stamp = 0;
6453 rq->avg_idle = 2*sysctl_sched_migration_cost;
367456c7
PZ
6454
6455 INIT_LIST_HEAD(&rq->cfs_tasks);
6456
dc938520 6457 rq_attach_root(rq, &def_root_domain);
3451d024 6458#ifdef CONFIG_NO_HZ_COMMON
1c792db7 6459 rq->nohz_flags = 0;
83cd4fe2 6460#endif
265f22a9
FW
6461#ifdef CONFIG_NO_HZ_FULL
6462 rq->last_sched_tick = 0;
6463#endif
1da177e4 6464#endif
8f4d37ec 6465 init_rq_hrtick(rq);
1da177e4 6466 atomic_set(&rq->nr_iowait, 0);
1da177e4
LT
6467 }
6468
2dd73a4f 6469 set_load_weight(&init_task);
b50f60ce 6470
e107be36
AK
6471#ifdef CONFIG_PREEMPT_NOTIFIERS
6472 INIT_HLIST_HEAD(&init_task.preempt_notifiers);
6473#endif
6474
b50f60ce 6475#ifdef CONFIG_RT_MUTEXES
732375c6 6476 plist_head_init(&init_task.pi_waiters);
b50f60ce
HC
6477#endif
6478
1da177e4
LT
6479 /*
6480 * The boot idle thread does lazy MMU switching as well:
6481 */
6482 atomic_inc(&init_mm.mm_count);
6483 enter_lazy_tlb(&init_mm, current);
6484
6485 /*
6486 * Make us the idle thread. Technically, schedule() should not be
6487 * called from this thread, however somewhere below it might be,
6488 * but because we are the idle thread, we just pick up running again
6489 * when this runqueue becomes "idle".
6490 */
6491 init_idle(current, smp_processor_id());
dce48a84
TG
6492
6493 calc_load_update = jiffies + LOAD_FREQ;
6494
dd41f596
IM
6495 /*
6496 * During early bootup we pretend to be a normal task:
6497 */
6498 current->sched_class = &fair_sched_class;
6892b75e 6499
bf4d83f6 6500#ifdef CONFIG_SMP
4cb98839 6501 zalloc_cpumask_var(&sched_domains_tmpmask, GFP_NOWAIT);
bdddd296
RR
6502 /* May be allocated at isolcpus cmdline parse time */
6503 if (cpu_isolated_map == NULL)
6504 zalloc_cpumask_var(&cpu_isolated_map, GFP_NOWAIT);
29d5e047 6505 idle_thread_set_boot_cpu();
029632fb
PZ
6506#endif
6507 init_sched_fair_class();
6a7b3dc3 6508
6892b75e 6509 scheduler_running = 1;
1da177e4
LT
6510}
6511
d902db1e 6512#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
e4aafea2
FW
6513static inline int preempt_count_equals(int preempt_offset)
6514{
234da7bc 6515 int nested = (preempt_count() & ~PREEMPT_ACTIVE) + rcu_preempt_depth();
e4aafea2 6516
4ba8216c 6517 return (nested == preempt_offset);
e4aafea2
FW
6518}
6519
d894837f 6520void __might_sleep(const char *file, int line, int preempt_offset)
1da177e4 6521{
1da177e4
LT
6522 static unsigned long prev_jiffy; /* ratelimiting */
6523
b3fbab05 6524 rcu_sleep_check(); /* WARN_ON_ONCE() by default, no rate limit reqd. */
e4aafea2
FW
6525 if ((preempt_count_equals(preempt_offset) && !irqs_disabled()) ||
6526 system_state != SYSTEM_RUNNING || oops_in_progress)
aef745fc
IM
6527 return;
6528 if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy)
6529 return;
6530 prev_jiffy = jiffies;
6531
3df0fc5b
PZ
6532 printk(KERN_ERR
6533 "BUG: sleeping function called from invalid context at %s:%d\n",
6534 file, line);
6535 printk(KERN_ERR
6536 "in_atomic(): %d, irqs_disabled(): %d, pid: %d, name: %s\n",
6537 in_atomic(), irqs_disabled(),
6538 current->pid, current->comm);
aef745fc
IM
6539
6540 debug_show_held_locks(current);
6541 if (irqs_disabled())
6542 print_irqtrace_events(current);
6543 dump_stack();
1da177e4
LT
6544}
6545EXPORT_SYMBOL(__might_sleep);
6546#endif
6547
6548#ifdef CONFIG_MAGIC_SYSRQ
3a5e4dc1
AK
6549static void normalize_task(struct rq *rq, struct task_struct *p)
6550{
da7a735e
PZ
6551 const struct sched_class *prev_class = p->sched_class;
6552 int old_prio = p->prio;
3a5e4dc1 6553 int on_rq;
3e51f33f 6554
fd2f4419 6555 on_rq = p->on_rq;
3a5e4dc1 6556 if (on_rq)
4ca9b72b 6557 dequeue_task(rq, p, 0);
3a5e4dc1
AK
6558 __setscheduler(rq, p, SCHED_NORMAL, 0);
6559 if (on_rq) {
4ca9b72b 6560 enqueue_task(rq, p, 0);
3a5e4dc1
AK
6561 resched_task(rq->curr);
6562 }
da7a735e
PZ
6563
6564 check_class_changed(rq, p, prev_class, old_prio);
3a5e4dc1
AK
6565}
6566
1da177e4
LT
6567void normalize_rt_tasks(void)
6568{
a0f98a1c 6569 struct task_struct *g, *p;
1da177e4 6570 unsigned long flags;
70b97a7f 6571 struct rq *rq;
1da177e4 6572
4cf5d77a 6573 read_lock_irqsave(&tasklist_lock, flags);
a0f98a1c 6574 do_each_thread(g, p) {
178be793
IM
6575 /*
6576 * Only normalize user tasks:
6577 */
6578 if (!p->mm)
6579 continue;
6580
6cfb0d5d 6581 p->se.exec_start = 0;
6cfb0d5d 6582#ifdef CONFIG_SCHEDSTATS
41acab88
LDM
6583 p->se.statistics.wait_start = 0;
6584 p->se.statistics.sleep_start = 0;
6585 p->se.statistics.block_start = 0;
6cfb0d5d 6586#endif
dd41f596
IM
6587
6588 if (!rt_task(p)) {
6589 /*
6590 * Renice negative nice level userspace
6591 * tasks back to 0:
6592 */
6593 if (TASK_NICE(p) < 0 && p->mm)
6594 set_user_nice(p, 0);
1da177e4 6595 continue;
dd41f596 6596 }
1da177e4 6597
1d615482 6598 raw_spin_lock(&p->pi_lock);
b29739f9 6599 rq = __task_rq_lock(p);
1da177e4 6600
178be793 6601 normalize_task(rq, p);
3a5e4dc1 6602
b29739f9 6603 __task_rq_unlock(rq);
1d615482 6604 raw_spin_unlock(&p->pi_lock);
a0f98a1c
IM
6605 } while_each_thread(g, p);
6606
4cf5d77a 6607 read_unlock_irqrestore(&tasklist_lock, flags);
1da177e4
LT
6608}
6609
6610#endif /* CONFIG_MAGIC_SYSRQ */
1df5c10a 6611
67fc4e0c 6612#if defined(CONFIG_IA64) || defined(CONFIG_KGDB_KDB)
1df5c10a 6613/*
67fc4e0c 6614 * These functions are only useful for the IA64 MCA handling, or kdb.
1df5c10a
LT
6615 *
6616 * They can only be called when the whole system has been
6617 * stopped - every CPU needs to be quiescent, and no scheduling
6618 * activity can take place. Using them for anything else would
6619 * be a serious bug, and as a result, they aren't even visible
6620 * under any other configuration.
6621 */
6622
6623/**
6624 * curr_task - return the current task for a given cpu.
6625 * @cpu: the processor in question.
6626 *
6627 * ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED!
6628 */
36c8b586 6629struct task_struct *curr_task(int cpu)
1df5c10a
LT
6630{
6631 return cpu_curr(cpu);
6632}
6633
67fc4e0c
JW
6634#endif /* defined(CONFIG_IA64) || defined(CONFIG_KGDB_KDB) */
6635
6636#ifdef CONFIG_IA64
1df5c10a
LT
6637/**
6638 * set_curr_task - set the current task for a given cpu.
6639 * @cpu: the processor in question.
6640 * @p: the task pointer to set.
6641 *
6642 * Description: This function must only be used when non-maskable interrupts
41a2d6cf
IM
6643 * are serviced on a separate stack. It allows the architecture to switch the
6644 * notion of the current task on a cpu in a non-blocking manner. This function
1df5c10a
LT
6645 * must be called with all CPU's synchronized, and interrupts disabled, the
6646 * and caller must save the original value of the current task (see
6647 * curr_task() above) and restore that value before reenabling interrupts and
6648 * re-starting the system.
6649 *
6650 * ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED!
6651 */
36c8b586 6652void set_curr_task(int cpu, struct task_struct *p)
1df5c10a
LT
6653{
6654 cpu_curr(cpu) = p;
6655}
6656
6657#endif
29f59db3 6658
7c941438 6659#ifdef CONFIG_CGROUP_SCHED
029632fb
PZ
6660/* task_group_lock serializes the addition/removal of task groups */
6661static DEFINE_SPINLOCK(task_group_lock);
6662
bccbe08a
PZ
6663static void free_sched_group(struct task_group *tg)
6664{
6665 free_fair_sched_group(tg);
6666 free_rt_sched_group(tg);
e9aa1dd1 6667 autogroup_free(tg);
bccbe08a
PZ
6668 kfree(tg);
6669}
6670
6671/* allocate runqueue etc for a new task group */
ec7dc8ac 6672struct task_group *sched_create_group(struct task_group *parent)
bccbe08a
PZ
6673{
6674 struct task_group *tg;
bccbe08a
PZ
6675
6676 tg = kzalloc(sizeof(*tg), GFP_KERNEL);
6677 if (!tg)
6678 return ERR_PTR(-ENOMEM);
6679
ec7dc8ac 6680 if (!alloc_fair_sched_group(tg, parent))
bccbe08a
PZ
6681 goto err;
6682
ec7dc8ac 6683 if (!alloc_rt_sched_group(tg, parent))
bccbe08a
PZ
6684 goto err;
6685
ace783b9
LZ
6686 return tg;
6687
6688err:
6689 free_sched_group(tg);
6690 return ERR_PTR(-ENOMEM);
6691}
6692
6693void sched_online_group(struct task_group *tg, struct task_group *parent)
6694{
6695 unsigned long flags;
6696
8ed36996 6697 spin_lock_irqsave(&task_group_lock, flags);
6f505b16 6698 list_add_rcu(&tg->list, &task_groups);
f473aa5e
PZ
6699
6700 WARN_ON(!parent); /* root should already exist */
6701
6702 tg->parent = parent;
f473aa5e 6703 INIT_LIST_HEAD(&tg->children);
09f2724a 6704 list_add_rcu(&tg->siblings, &parent->children);
8ed36996 6705 spin_unlock_irqrestore(&task_group_lock, flags);
29f59db3
SV
6706}
6707
9b5b7751 6708/* rcu callback to free various structures associated with a task group */
6f505b16 6709static void free_sched_group_rcu(struct rcu_head *rhp)
29f59db3 6710{
29f59db3 6711 /* now it should be safe to free those cfs_rqs */
6f505b16 6712 free_sched_group(container_of(rhp, struct task_group, rcu));
29f59db3
SV
6713}
6714
9b5b7751 6715/* Destroy runqueue etc associated with a task group */
4cf86d77 6716void sched_destroy_group(struct task_group *tg)
ace783b9
LZ
6717{
6718 /* wait for possible concurrent references to cfs_rqs complete */
6719 call_rcu(&tg->rcu, free_sched_group_rcu);
6720}
6721
6722void sched_offline_group(struct task_group *tg)
29f59db3 6723{
8ed36996 6724 unsigned long flags;
9b5b7751 6725 int i;
29f59db3 6726
3d4b47b4
PZ
6727 /* end participation in shares distribution */
6728 for_each_possible_cpu(i)
bccbe08a 6729 unregister_fair_sched_group(tg, i);
3d4b47b4
PZ
6730
6731 spin_lock_irqsave(&task_group_lock, flags);
6f505b16 6732 list_del_rcu(&tg->list);
f473aa5e 6733 list_del_rcu(&tg->siblings);
8ed36996 6734 spin_unlock_irqrestore(&task_group_lock, flags);
29f59db3
SV
6735}
6736
9b5b7751 6737/* change task's runqueue when it moves between groups.
3a252015
IM
6738 * The caller of this function should have put the task in its new group
6739 * by now. This function just updates tsk->se.cfs_rq and tsk->se.parent to
6740 * reflect its new group.
9b5b7751
SV
6741 */
6742void sched_move_task(struct task_struct *tsk)
29f59db3 6743{
8323f26c 6744 struct task_group *tg;
29f59db3
SV
6745 int on_rq, running;
6746 unsigned long flags;
6747 struct rq *rq;
6748
6749 rq = task_rq_lock(tsk, &flags);
6750
051a1d1a 6751 running = task_current(rq, tsk);
fd2f4419 6752 on_rq = tsk->on_rq;
29f59db3 6753
0e1f3483 6754 if (on_rq)
29f59db3 6755 dequeue_task(rq, tsk, 0);
0e1f3483
HS
6756 if (unlikely(running))
6757 tsk->sched_class->put_prev_task(rq, tsk);
29f59db3 6758
8323f26c
PZ
6759 tg = container_of(task_subsys_state_check(tsk, cpu_cgroup_subsys_id,
6760 lockdep_is_held(&tsk->sighand->siglock)),
6761 struct task_group, css);
6762 tg = autogroup_task_group(tsk, tg);
6763 tsk->sched_task_group = tg;
6764
810b3817 6765#ifdef CONFIG_FAIR_GROUP_SCHED
b2b5ce02
PZ
6766 if (tsk->sched_class->task_move_group)
6767 tsk->sched_class->task_move_group(tsk, on_rq);
6768 else
810b3817 6769#endif
b2b5ce02 6770 set_task_rq(tsk, task_cpu(tsk));
810b3817 6771
0e1f3483
HS
6772 if (unlikely(running))
6773 tsk->sched_class->set_curr_task(rq);
6774 if (on_rq)
371fd7e7 6775 enqueue_task(rq, tsk, 0);
29f59db3 6776
0122ec5b 6777 task_rq_unlock(rq, tsk, &flags);
29f59db3 6778}
7c941438 6779#endif /* CONFIG_CGROUP_SCHED */
29f59db3 6780
a790de99 6781#if defined(CONFIG_RT_GROUP_SCHED) || defined(CONFIG_CFS_BANDWIDTH)
9f0c1e56
PZ
6782static unsigned long to_ratio(u64 period, u64 runtime)
6783{
6784 if (runtime == RUNTIME_INF)
9a7e0b18 6785 return 1ULL << 20;
9f0c1e56 6786
9a7e0b18 6787 return div64_u64(runtime << 20, period);
9f0c1e56 6788}
a790de99
PT
6789#endif
6790
6791#ifdef CONFIG_RT_GROUP_SCHED
6792/*
6793 * Ensure that the real time constraints are schedulable.
6794 */
6795static DEFINE_MUTEX(rt_constraints_mutex);
9f0c1e56 6796
9a7e0b18
PZ
6797/* Must be called with tasklist_lock held */
6798static inline int tg_has_rt_tasks(struct task_group *tg)
b40b2e8e 6799{
9a7e0b18 6800 struct task_struct *g, *p;
b40b2e8e 6801
9a7e0b18 6802 do_each_thread(g, p) {
029632fb 6803 if (rt_task(p) && task_rq(p)->rt.tg == tg)
9a7e0b18
PZ
6804 return 1;
6805 } while_each_thread(g, p);
b40b2e8e 6806
9a7e0b18
PZ
6807 return 0;
6808}
b40b2e8e 6809
9a7e0b18
PZ
6810struct rt_schedulable_data {
6811 struct task_group *tg;
6812 u64 rt_period;
6813 u64 rt_runtime;
6814};
b40b2e8e 6815
a790de99 6816static int tg_rt_schedulable(struct task_group *tg, void *data)
9a7e0b18
PZ
6817{
6818 struct rt_schedulable_data *d = data;
6819 struct task_group *child;
6820 unsigned long total, sum = 0;
6821 u64 period, runtime;
b40b2e8e 6822
9a7e0b18
PZ
6823 period = ktime_to_ns(tg->rt_bandwidth.rt_period);
6824 runtime = tg->rt_bandwidth.rt_runtime;
b40b2e8e 6825
9a7e0b18
PZ
6826 if (tg == d->tg) {
6827 period = d->rt_period;
6828 runtime = d->rt_runtime;
b40b2e8e 6829 }
b40b2e8e 6830
4653f803
PZ
6831 /*
6832 * Cannot have more runtime than the period.
6833 */
6834 if (runtime > period && runtime != RUNTIME_INF)
6835 return -EINVAL;
6f505b16 6836
4653f803
PZ
6837 /*
6838 * Ensure we don't starve existing RT tasks.
6839 */
9a7e0b18
PZ
6840 if (rt_bandwidth_enabled() && !runtime && tg_has_rt_tasks(tg))
6841 return -EBUSY;
6f505b16 6842
9a7e0b18 6843 total = to_ratio(period, runtime);
6f505b16 6844
4653f803
PZ
6845 /*
6846 * Nobody can have more than the global setting allows.
6847 */
6848 if (total > to_ratio(global_rt_period(), global_rt_runtime()))
6849 return -EINVAL;
6f505b16 6850
4653f803
PZ
6851 /*
6852 * The sum of our children's runtime should not exceed our own.
6853 */
9a7e0b18
PZ
6854 list_for_each_entry_rcu(child, &tg->children, siblings) {
6855 period = ktime_to_ns(child->rt_bandwidth.rt_period);
6856 runtime = child->rt_bandwidth.rt_runtime;
6f505b16 6857
9a7e0b18
PZ
6858 if (child == d->tg) {
6859 period = d->rt_period;
6860 runtime = d->rt_runtime;
6861 }
6f505b16 6862
9a7e0b18 6863 sum += to_ratio(period, runtime);
9f0c1e56 6864 }
6f505b16 6865
9a7e0b18
PZ
6866 if (sum > total)
6867 return -EINVAL;
6868
6869 return 0;
6f505b16
PZ
6870}
6871
9a7e0b18 6872static int __rt_schedulable(struct task_group *tg, u64 period, u64 runtime)
521f1a24 6873{
8277434e
PT
6874 int ret;
6875
9a7e0b18
PZ
6876 struct rt_schedulable_data data = {
6877 .tg = tg,
6878 .rt_period = period,
6879 .rt_runtime = runtime,
6880 };
6881
8277434e
PT
6882 rcu_read_lock();
6883 ret = walk_tg_tree(tg_rt_schedulable, tg_nop, &data);
6884 rcu_read_unlock();
6885
6886 return ret;
521f1a24
DG
6887}
6888
ab84d31e 6889static int tg_set_rt_bandwidth(struct task_group *tg,
d0b27fa7 6890 u64 rt_period, u64 rt_runtime)
6f505b16 6891{
ac086bc2 6892 int i, err = 0;
9f0c1e56 6893
9f0c1e56 6894 mutex_lock(&rt_constraints_mutex);
521f1a24 6895 read_lock(&tasklist_lock);
9a7e0b18
PZ
6896 err = __rt_schedulable(tg, rt_period, rt_runtime);
6897 if (err)
9f0c1e56 6898 goto unlock;
ac086bc2 6899
0986b11b 6900 raw_spin_lock_irq(&tg->rt_bandwidth.rt_runtime_lock);
d0b27fa7
PZ
6901 tg->rt_bandwidth.rt_period = ns_to_ktime(rt_period);
6902 tg->rt_bandwidth.rt_runtime = rt_runtime;
ac086bc2
PZ
6903
6904 for_each_possible_cpu(i) {
6905 struct rt_rq *rt_rq = tg->rt_rq[i];
6906
0986b11b 6907 raw_spin_lock(&rt_rq->rt_runtime_lock);
ac086bc2 6908 rt_rq->rt_runtime = rt_runtime;
0986b11b 6909 raw_spin_unlock(&rt_rq->rt_runtime_lock);
ac086bc2 6910 }
0986b11b 6911 raw_spin_unlock_irq(&tg->rt_bandwidth.rt_runtime_lock);
49246274 6912unlock:
521f1a24 6913 read_unlock(&tasklist_lock);
9f0c1e56
PZ
6914 mutex_unlock(&rt_constraints_mutex);
6915
6916 return err;
6f505b16
PZ
6917}
6918
25cc7da7 6919static int sched_group_set_rt_runtime(struct task_group *tg, long rt_runtime_us)
d0b27fa7
PZ
6920{
6921 u64 rt_runtime, rt_period;
6922
6923 rt_period = ktime_to_ns(tg->rt_bandwidth.rt_period);
6924 rt_runtime = (u64)rt_runtime_us * NSEC_PER_USEC;
6925 if (rt_runtime_us < 0)
6926 rt_runtime = RUNTIME_INF;
6927
ab84d31e 6928 return tg_set_rt_bandwidth(tg, rt_period, rt_runtime);
d0b27fa7
PZ
6929}
6930
25cc7da7 6931static long sched_group_rt_runtime(struct task_group *tg)
9f0c1e56
PZ
6932{
6933 u64 rt_runtime_us;
6934
d0b27fa7 6935 if (tg->rt_bandwidth.rt_runtime == RUNTIME_INF)
9f0c1e56
PZ
6936 return -1;
6937
d0b27fa7 6938 rt_runtime_us = tg->rt_bandwidth.rt_runtime;
9f0c1e56
PZ
6939 do_div(rt_runtime_us, NSEC_PER_USEC);
6940 return rt_runtime_us;
6941}
d0b27fa7 6942
25cc7da7 6943static int sched_group_set_rt_period(struct task_group *tg, long rt_period_us)
d0b27fa7
PZ
6944{
6945 u64 rt_runtime, rt_period;
6946
6947 rt_period = (u64)rt_period_us * NSEC_PER_USEC;
6948 rt_runtime = tg->rt_bandwidth.rt_runtime;
6949
619b0488
R
6950 if (rt_period == 0)
6951 return -EINVAL;
6952
ab84d31e 6953 return tg_set_rt_bandwidth(tg, rt_period, rt_runtime);
d0b27fa7
PZ
6954}
6955
25cc7da7 6956static long sched_group_rt_period(struct task_group *tg)
d0b27fa7
PZ
6957{
6958 u64 rt_period_us;
6959
6960 rt_period_us = ktime_to_ns(tg->rt_bandwidth.rt_period);
6961 do_div(rt_period_us, NSEC_PER_USEC);
6962 return rt_period_us;
6963}
6964
6965static int sched_rt_global_constraints(void)
6966{
4653f803 6967 u64 runtime, period;
d0b27fa7
PZ
6968 int ret = 0;
6969
ec5d4989
HS
6970 if (sysctl_sched_rt_period <= 0)
6971 return -EINVAL;
6972
4653f803
PZ
6973 runtime = global_rt_runtime();
6974 period = global_rt_period();
6975
6976 /*
6977 * Sanity check on the sysctl variables.
6978 */
6979 if (runtime > period && runtime != RUNTIME_INF)
6980 return -EINVAL;
10b612f4 6981
d0b27fa7 6982 mutex_lock(&rt_constraints_mutex);
9a7e0b18 6983 read_lock(&tasklist_lock);
4653f803 6984 ret = __rt_schedulable(NULL, 0, 0);
9a7e0b18 6985 read_unlock(&tasklist_lock);
d0b27fa7
PZ
6986 mutex_unlock(&rt_constraints_mutex);
6987
6988 return ret;
6989}
54e99124 6990
25cc7da7 6991static int sched_rt_can_attach(struct task_group *tg, struct task_struct *tsk)
54e99124
DG
6992{
6993 /* Don't accept realtime tasks when there is no way for them to run */
6994 if (rt_task(tsk) && tg->rt_bandwidth.rt_runtime == 0)
6995 return 0;
6996
6997 return 1;
6998}
6999
6d6bc0ad 7000#else /* !CONFIG_RT_GROUP_SCHED */
d0b27fa7
PZ
7001static int sched_rt_global_constraints(void)
7002{
ac086bc2
PZ
7003 unsigned long flags;
7004 int i;
7005
ec5d4989
HS
7006 if (sysctl_sched_rt_period <= 0)
7007 return -EINVAL;
7008
60aa605d
PZ
7009 /*
7010 * There's always some RT tasks in the root group
7011 * -- migration, kstopmachine etc..
7012 */
7013 if (sysctl_sched_rt_runtime == 0)
7014 return -EBUSY;
7015
0986b11b 7016 raw_spin_lock_irqsave(&def_rt_bandwidth.rt_runtime_lock, flags);
ac086bc2
PZ
7017 for_each_possible_cpu(i) {
7018 struct rt_rq *rt_rq = &cpu_rq(i)->rt;
7019
0986b11b 7020 raw_spin_lock(&rt_rq->rt_runtime_lock);
ac086bc2 7021 rt_rq->rt_runtime = global_rt_runtime();
0986b11b 7022 raw_spin_unlock(&rt_rq->rt_runtime_lock);
ac086bc2 7023 }
0986b11b 7024 raw_spin_unlock_irqrestore(&def_rt_bandwidth.rt_runtime_lock, flags);
ac086bc2 7025
d0b27fa7
PZ
7026 return 0;
7027}
6d6bc0ad 7028#endif /* CONFIG_RT_GROUP_SCHED */
d0b27fa7 7029
ce0dbbbb
CW
7030int sched_rr_handler(struct ctl_table *table, int write,
7031 void __user *buffer, size_t *lenp,
7032 loff_t *ppos)
7033{
7034 int ret;
7035 static DEFINE_MUTEX(mutex);
7036
7037 mutex_lock(&mutex);
7038 ret = proc_dointvec(table, write, buffer, lenp, ppos);
7039 /* make sure that internally we keep jiffies */
7040 /* also, writing zero resets timeslice to default */
7041 if (!ret && write) {
7042 sched_rr_timeslice = sched_rr_timeslice <= 0 ?
7043 RR_TIMESLICE : msecs_to_jiffies(sched_rr_timeslice);
7044 }
7045 mutex_unlock(&mutex);
7046 return ret;
7047}
7048
d0b27fa7 7049int sched_rt_handler(struct ctl_table *table, int write,
8d65af78 7050 void __user *buffer, size_t *lenp,
d0b27fa7
PZ
7051 loff_t *ppos)
7052{
7053 int ret;
7054 int old_period, old_runtime;
7055 static DEFINE_MUTEX(mutex);
7056
7057 mutex_lock(&mutex);
7058 old_period = sysctl_sched_rt_period;
7059 old_runtime = sysctl_sched_rt_runtime;
7060
8d65af78 7061 ret = proc_dointvec(table, write, buffer, lenp, ppos);
d0b27fa7
PZ
7062
7063 if (!ret && write) {
7064 ret = sched_rt_global_constraints();
7065 if (ret) {
7066 sysctl_sched_rt_period = old_period;
7067 sysctl_sched_rt_runtime = old_runtime;
7068 } else {
7069 def_rt_bandwidth.rt_runtime = global_rt_runtime();
7070 def_rt_bandwidth.rt_period =
7071 ns_to_ktime(global_rt_period());
7072 }
7073 }
7074 mutex_unlock(&mutex);
7075
7076 return ret;
7077}
68318b8e 7078
052f1dc7 7079#ifdef CONFIG_CGROUP_SCHED
68318b8e
SV
7080
7081/* return corresponding task_group object of a cgroup */
2b01dfe3 7082static inline struct task_group *cgroup_tg(struct cgroup *cgrp)
68318b8e 7083{
2b01dfe3
PM
7084 return container_of(cgroup_subsys_state(cgrp, cpu_cgroup_subsys_id),
7085 struct task_group, css);
68318b8e
SV
7086}
7087
92fb9748 7088static struct cgroup_subsys_state *cpu_cgroup_css_alloc(struct cgroup *cgrp)
68318b8e 7089{
ec7dc8ac 7090 struct task_group *tg, *parent;
68318b8e 7091
2b01dfe3 7092 if (!cgrp->parent) {
68318b8e 7093 /* This is early initialization for the top cgroup */
07e06b01 7094 return &root_task_group.css;
68318b8e
SV
7095 }
7096
ec7dc8ac
DG
7097 parent = cgroup_tg(cgrp->parent);
7098 tg = sched_create_group(parent);
68318b8e
SV
7099 if (IS_ERR(tg))
7100 return ERR_PTR(-ENOMEM);
7101
68318b8e
SV
7102 return &tg->css;
7103}
7104
ace783b9
LZ
7105static int cpu_cgroup_css_online(struct cgroup *cgrp)
7106{
7107 struct task_group *tg = cgroup_tg(cgrp);
7108 struct task_group *parent;
7109
7110 if (!cgrp->parent)
7111 return 0;
7112
7113 parent = cgroup_tg(cgrp->parent);
7114 sched_online_group(tg, parent);
7115 return 0;
7116}
7117
92fb9748 7118static void cpu_cgroup_css_free(struct cgroup *cgrp)
68318b8e 7119{
2b01dfe3 7120 struct task_group *tg = cgroup_tg(cgrp);
68318b8e
SV
7121
7122 sched_destroy_group(tg);
7123}
7124
ace783b9
LZ
7125static void cpu_cgroup_css_offline(struct cgroup *cgrp)
7126{
7127 struct task_group *tg = cgroup_tg(cgrp);
7128
7129 sched_offline_group(tg);
7130}
7131
761b3ef5 7132static int cpu_cgroup_can_attach(struct cgroup *cgrp,
bb9d97b6 7133 struct cgroup_taskset *tset)
68318b8e 7134{
bb9d97b6
TH
7135 struct task_struct *task;
7136
7137 cgroup_taskset_for_each(task, cgrp, tset) {
b68aa230 7138#ifdef CONFIG_RT_GROUP_SCHED
bb9d97b6
TH
7139 if (!sched_rt_can_attach(cgroup_tg(cgrp), task))
7140 return -EINVAL;
b68aa230 7141#else
bb9d97b6
TH
7142 /* We don't support RT-tasks being in separate groups */
7143 if (task->sched_class != &fair_sched_class)
7144 return -EINVAL;
b68aa230 7145#endif
bb9d97b6 7146 }
be367d09
BB
7147 return 0;
7148}
68318b8e 7149
761b3ef5 7150static void cpu_cgroup_attach(struct cgroup *cgrp,
bb9d97b6 7151 struct cgroup_taskset *tset)
68318b8e 7152{
bb9d97b6
TH
7153 struct task_struct *task;
7154
7155 cgroup_taskset_for_each(task, cgrp, tset)
7156 sched_move_task(task);
68318b8e
SV
7157}
7158
068c5cc5 7159static void
761b3ef5
LZ
7160cpu_cgroup_exit(struct cgroup *cgrp, struct cgroup *old_cgrp,
7161 struct task_struct *task)
068c5cc5
PZ
7162{
7163 /*
7164 * cgroup_exit() is called in the copy_process() failure path.
7165 * Ignore this case since the task hasn't ran yet, this avoids
7166 * trying to poke a half freed task state from generic code.
7167 */
7168 if (!(task->flags & PF_EXITING))
7169 return;
7170
7171 sched_move_task(task);
7172}
7173
052f1dc7 7174#ifdef CONFIG_FAIR_GROUP_SCHED
f4c753b7 7175static int cpu_shares_write_u64(struct cgroup *cgrp, struct cftype *cftype,
2b01dfe3 7176 u64 shareval)
68318b8e 7177{
c8b28116 7178 return sched_group_set_shares(cgroup_tg(cgrp), scale_load(shareval));
68318b8e
SV
7179}
7180
f4c753b7 7181static u64 cpu_shares_read_u64(struct cgroup *cgrp, struct cftype *cft)
68318b8e 7182{
2b01dfe3 7183 struct task_group *tg = cgroup_tg(cgrp);
68318b8e 7184
c8b28116 7185 return (u64) scale_load_down(tg->shares);
68318b8e 7186}
ab84d31e
PT
7187
7188#ifdef CONFIG_CFS_BANDWIDTH
a790de99
PT
7189static DEFINE_MUTEX(cfs_constraints_mutex);
7190
ab84d31e
PT
7191const u64 max_cfs_quota_period = 1 * NSEC_PER_SEC; /* 1s */
7192const u64 min_cfs_quota_period = 1 * NSEC_PER_MSEC; /* 1ms */
7193
a790de99
PT
7194static int __cfs_schedulable(struct task_group *tg, u64 period, u64 runtime);
7195
ab84d31e
PT
7196static int tg_set_cfs_bandwidth(struct task_group *tg, u64 period, u64 quota)
7197{
56f570e5 7198 int i, ret = 0, runtime_enabled, runtime_was_enabled;
029632fb 7199 struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth;
ab84d31e
PT
7200
7201 if (tg == &root_task_group)
7202 return -EINVAL;
7203
7204 /*
7205 * Ensure we have at some amount of bandwidth every period. This is
7206 * to prevent reaching a state of large arrears when throttled via
7207 * entity_tick() resulting in prolonged exit starvation.
7208 */
7209 if (quota < min_cfs_quota_period || period < min_cfs_quota_period)
7210 return -EINVAL;
7211
7212 /*
7213 * Likewise, bound things on the otherside by preventing insane quota
7214 * periods. This also allows us to normalize in computing quota
7215 * feasibility.
7216 */
7217 if (period > max_cfs_quota_period)
7218 return -EINVAL;
7219
a790de99
PT
7220 mutex_lock(&cfs_constraints_mutex);
7221 ret = __cfs_schedulable(tg, period, quota);
7222 if (ret)
7223 goto out_unlock;
7224
58088ad0 7225 runtime_enabled = quota != RUNTIME_INF;
56f570e5
PT
7226 runtime_was_enabled = cfs_b->quota != RUNTIME_INF;
7227 account_cfs_bandwidth_used(runtime_enabled, runtime_was_enabled);
ab84d31e
PT
7228 raw_spin_lock_irq(&cfs_b->lock);
7229 cfs_b->period = ns_to_ktime(period);
7230 cfs_b->quota = quota;
58088ad0 7231
a9cf55b2 7232 __refill_cfs_bandwidth_runtime(cfs_b);
58088ad0
PT
7233 /* restart the period timer (if active) to handle new period expiry */
7234 if (runtime_enabled && cfs_b->timer_active) {
7235 /* force a reprogram */
7236 cfs_b->timer_active = 0;
7237 __start_cfs_bandwidth(cfs_b);
7238 }
ab84d31e
PT
7239 raw_spin_unlock_irq(&cfs_b->lock);
7240
7241 for_each_possible_cpu(i) {
7242 struct cfs_rq *cfs_rq = tg->cfs_rq[i];
029632fb 7243 struct rq *rq = cfs_rq->rq;
ab84d31e
PT
7244
7245 raw_spin_lock_irq(&rq->lock);
58088ad0 7246 cfs_rq->runtime_enabled = runtime_enabled;
ab84d31e 7247 cfs_rq->runtime_remaining = 0;
671fd9da 7248
029632fb 7249 if (cfs_rq->throttled)
671fd9da 7250 unthrottle_cfs_rq(cfs_rq);
ab84d31e
PT
7251 raw_spin_unlock_irq(&rq->lock);
7252 }
a790de99
PT
7253out_unlock:
7254 mutex_unlock(&cfs_constraints_mutex);
ab84d31e 7255
a790de99 7256 return ret;
ab84d31e
PT
7257}
7258
7259int tg_set_cfs_quota(struct task_group *tg, long cfs_quota_us)
7260{
7261 u64 quota, period;
7262
029632fb 7263 period = ktime_to_ns(tg->cfs_bandwidth.period);
ab84d31e
PT
7264 if (cfs_quota_us < 0)
7265 quota = RUNTIME_INF;
7266 else
7267 quota = (u64)cfs_quota_us * NSEC_PER_USEC;
7268
7269 return tg_set_cfs_bandwidth(tg, period, quota);
7270}
7271
7272long tg_get_cfs_quota(struct task_group *tg)
7273{
7274 u64 quota_us;
7275
029632fb 7276 if (tg->cfs_bandwidth.quota == RUNTIME_INF)
ab84d31e
PT
7277 return -1;
7278
029632fb 7279 quota_us = tg->cfs_bandwidth.quota;
ab84d31e
PT
7280 do_div(quota_us, NSEC_PER_USEC);
7281
7282 return quota_us;
7283}
7284
7285int tg_set_cfs_period(struct task_group *tg, long cfs_period_us)
7286{
7287 u64 quota, period;
7288
7289 period = (u64)cfs_period_us * NSEC_PER_USEC;
029632fb 7290 quota = tg->cfs_bandwidth.quota;
ab84d31e 7291
ab84d31e
PT
7292 return tg_set_cfs_bandwidth(tg, period, quota);
7293}
7294
7295long tg_get_cfs_period(struct task_group *tg)
7296{
7297 u64 cfs_period_us;
7298
029632fb 7299 cfs_period_us = ktime_to_ns(tg->cfs_bandwidth.period);
ab84d31e
PT
7300 do_div(cfs_period_us, NSEC_PER_USEC);
7301
7302 return cfs_period_us;
7303}
7304
7305static s64 cpu_cfs_quota_read_s64(struct cgroup *cgrp, struct cftype *cft)
7306{
7307 return tg_get_cfs_quota(cgroup_tg(cgrp));
7308}
7309
7310static int cpu_cfs_quota_write_s64(struct cgroup *cgrp, struct cftype *cftype,
7311 s64 cfs_quota_us)
7312{
7313 return tg_set_cfs_quota(cgroup_tg(cgrp), cfs_quota_us);
7314}
7315
7316static u64 cpu_cfs_period_read_u64(struct cgroup *cgrp, struct cftype *cft)
7317{
7318 return tg_get_cfs_period(cgroup_tg(cgrp));
7319}
7320
7321static int cpu_cfs_period_write_u64(struct cgroup *cgrp, struct cftype *cftype,
7322 u64 cfs_period_us)
7323{
7324 return tg_set_cfs_period(cgroup_tg(cgrp), cfs_period_us);
7325}
7326
a790de99
PT
7327struct cfs_schedulable_data {
7328 struct task_group *tg;
7329 u64 period, quota;
7330};
7331
7332/*
7333 * normalize group quota/period to be quota/max_period
7334 * note: units are usecs
7335 */
7336static u64 normalize_cfs_quota(struct task_group *tg,
7337 struct cfs_schedulable_data *d)
7338{
7339 u64 quota, period;
7340
7341 if (tg == d->tg) {
7342 period = d->period;
7343 quota = d->quota;
7344 } else {
7345 period = tg_get_cfs_period(tg);
7346 quota = tg_get_cfs_quota(tg);
7347 }
7348
7349 /* note: these should typically be equivalent */
7350 if (quota == RUNTIME_INF || quota == -1)
7351 return RUNTIME_INF;
7352
7353 return to_ratio(period, quota);
7354}
7355
7356static int tg_cfs_schedulable_down(struct task_group *tg, void *data)
7357{
7358 struct cfs_schedulable_data *d = data;
029632fb 7359 struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth;
a790de99
PT
7360 s64 quota = 0, parent_quota = -1;
7361
7362 if (!tg->parent) {
7363 quota = RUNTIME_INF;
7364 } else {
029632fb 7365 struct cfs_bandwidth *parent_b = &tg->parent->cfs_bandwidth;
a790de99
PT
7366
7367 quota = normalize_cfs_quota(tg, d);
7368 parent_quota = parent_b->hierarchal_quota;
7369
7370 /*
7371 * ensure max(child_quota) <= parent_quota, inherit when no
7372 * limit is set
7373 */
7374 if (quota == RUNTIME_INF)
7375 quota = parent_quota;
7376 else if (parent_quota != RUNTIME_INF && quota > parent_quota)
7377 return -EINVAL;
7378 }
7379 cfs_b->hierarchal_quota = quota;
7380
7381 return 0;
7382}
7383
7384static int __cfs_schedulable(struct task_group *tg, u64 period, u64 quota)
7385{
8277434e 7386 int ret;
a790de99
PT
7387 struct cfs_schedulable_data data = {
7388 .tg = tg,
7389 .period = period,
7390 .quota = quota,
7391 };
7392
7393 if (quota != RUNTIME_INF) {
7394 do_div(data.period, NSEC_PER_USEC);
7395 do_div(data.quota, NSEC_PER_USEC);
7396 }
7397
8277434e
PT
7398 rcu_read_lock();
7399 ret = walk_tg_tree(tg_cfs_schedulable_down, tg_nop, &data);
7400 rcu_read_unlock();
7401
7402 return ret;
a790de99 7403}
e8da1b18
NR
7404
7405static int cpu_stats_show(struct cgroup *cgrp, struct cftype *cft,
7406 struct cgroup_map_cb *cb)
7407{
7408 struct task_group *tg = cgroup_tg(cgrp);
029632fb 7409 struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth;
e8da1b18
NR
7410
7411 cb->fill(cb, "nr_periods", cfs_b->nr_periods);
7412 cb->fill(cb, "nr_throttled", cfs_b->nr_throttled);
7413 cb->fill(cb, "throttled_time", cfs_b->throttled_time);
7414
7415 return 0;
7416}
ab84d31e 7417#endif /* CONFIG_CFS_BANDWIDTH */
6d6bc0ad 7418#endif /* CONFIG_FAIR_GROUP_SCHED */
68318b8e 7419
052f1dc7 7420#ifdef CONFIG_RT_GROUP_SCHED
0c70814c 7421static int cpu_rt_runtime_write(struct cgroup *cgrp, struct cftype *cft,
06ecb27c 7422 s64 val)
6f505b16 7423{
06ecb27c 7424 return sched_group_set_rt_runtime(cgroup_tg(cgrp), val);
6f505b16
PZ
7425}
7426
06ecb27c 7427static s64 cpu_rt_runtime_read(struct cgroup *cgrp, struct cftype *cft)
6f505b16 7428{
06ecb27c 7429 return sched_group_rt_runtime(cgroup_tg(cgrp));
6f505b16 7430}
d0b27fa7
PZ
7431
7432static int cpu_rt_period_write_uint(struct cgroup *cgrp, struct cftype *cftype,
7433 u64 rt_period_us)
7434{
7435 return sched_group_set_rt_period(cgroup_tg(cgrp), rt_period_us);
7436}
7437
7438static u64 cpu_rt_period_read_uint(struct cgroup *cgrp, struct cftype *cft)
7439{
7440 return sched_group_rt_period(cgroup_tg(cgrp));
7441}
6d6bc0ad 7442#endif /* CONFIG_RT_GROUP_SCHED */
6f505b16 7443
fe5c7cc2 7444static struct cftype cpu_files[] = {
052f1dc7 7445#ifdef CONFIG_FAIR_GROUP_SCHED
fe5c7cc2
PM
7446 {
7447 .name = "shares",
f4c753b7
PM
7448 .read_u64 = cpu_shares_read_u64,
7449 .write_u64 = cpu_shares_write_u64,
fe5c7cc2 7450 },
052f1dc7 7451#endif
ab84d31e
PT
7452#ifdef CONFIG_CFS_BANDWIDTH
7453 {
7454 .name = "cfs_quota_us",
7455 .read_s64 = cpu_cfs_quota_read_s64,
7456 .write_s64 = cpu_cfs_quota_write_s64,
7457 },
7458 {
7459 .name = "cfs_period_us",
7460 .read_u64 = cpu_cfs_period_read_u64,
7461 .write_u64 = cpu_cfs_period_write_u64,
7462 },
e8da1b18
NR
7463 {
7464 .name = "stat",
7465 .read_map = cpu_stats_show,
7466 },
ab84d31e 7467#endif
052f1dc7 7468#ifdef CONFIG_RT_GROUP_SCHED
6f505b16 7469 {
9f0c1e56 7470 .name = "rt_runtime_us",
06ecb27c
PM
7471 .read_s64 = cpu_rt_runtime_read,
7472 .write_s64 = cpu_rt_runtime_write,
6f505b16 7473 },
d0b27fa7
PZ
7474 {
7475 .name = "rt_period_us",
f4c753b7
PM
7476 .read_u64 = cpu_rt_period_read_uint,
7477 .write_u64 = cpu_rt_period_write_uint,
d0b27fa7 7478 },
052f1dc7 7479#endif
4baf6e33 7480 { } /* terminate */
68318b8e
SV
7481};
7482
68318b8e 7483struct cgroup_subsys cpu_cgroup_subsys = {
38605cae 7484 .name = "cpu",
92fb9748
TH
7485 .css_alloc = cpu_cgroup_css_alloc,
7486 .css_free = cpu_cgroup_css_free,
ace783b9
LZ
7487 .css_online = cpu_cgroup_css_online,
7488 .css_offline = cpu_cgroup_css_offline,
bb9d97b6
TH
7489 .can_attach = cpu_cgroup_can_attach,
7490 .attach = cpu_cgroup_attach,
068c5cc5 7491 .exit = cpu_cgroup_exit,
38605cae 7492 .subsys_id = cpu_cgroup_subsys_id,
4baf6e33 7493 .base_cftypes = cpu_files,
68318b8e
SV
7494 .early_init = 1,
7495};
7496
052f1dc7 7497#endif /* CONFIG_CGROUP_SCHED */
d842de87 7498
b637a328
PM
7499void dump_cpu_task(int cpu)
7500{
7501 pr_info("Task dump for CPU %d:\n", cpu);
7502 sched_show_task(cpu_curr(cpu));
7503}
This page took 2.356664 seconds and 5 git commands to generate.