| 1 | /* |
| 2 | * kernel/sched/core.c |
| 3 | * |
| 4 | * Kernel scheduler and related syscalls |
| 5 | * |
| 6 | * Copyright (C) 1991-2002 Linus Torvalds |
| 7 | * |
| 8 | * 1996-12-23 Modified by Dave Grothe to fix bugs in semaphores and |
| 9 | * make semaphores SMP safe |
| 10 | * 1998-11-19 Implemented schedule_timeout() and related stuff |
| 11 | * by Andrea Arcangeli |
| 12 | * 2002-01-04 New ultra-scalable O(1) scheduler by Ingo Molnar: |
| 13 | * hybrid priority-list and round-robin design with |
| 14 | * an array-switch method of distributing timeslices |
| 15 | * and per-CPU runqueues. Cleanups and useful suggestions |
| 16 | * by Davide Libenzi, preemptible kernel bits by Robert Love. |
| 17 | * 2003-09-03 Interactivity tuning by Con Kolivas. |
| 18 | * 2004-04-02 Scheduler domains code by Nick Piggin |
| 19 | * 2007-04-15 Work begun on replacing all interactivity tuning with a |
| 20 | * fair scheduling design by Con Kolivas. |
| 21 | * 2007-05-05 Load balancing (smp-nice) and other improvements |
| 22 | * by Peter Williams |
| 23 | * 2007-05-06 Interactivity improvements to CFS by Mike Galbraith |
| 24 | * 2007-07-01 Group scheduling enhancements by Srivatsa Vaddagiri |
| 25 | * 2007-11-29 RT balancing improvements by Steven Rostedt, Gregory Haskins, |
| 26 | * Thomas Gleixner, Mike Kravetz |
| 27 | */ |
| 28 | |
| 29 | #include <linux/mm.h> |
| 30 | #include <linux/module.h> |
| 31 | #include <linux/nmi.h> |
| 32 | #include <linux/init.h> |
| 33 | #include <linux/uaccess.h> |
| 34 | #include <linux/highmem.h> |
| 35 | #include <asm/mmu_context.h> |
| 36 | #include <linux/interrupt.h> |
| 37 | #include <linux/capability.h> |
| 38 | #include <linux/completion.h> |
| 39 | #include <linux/kernel_stat.h> |
| 40 | #include <linux/debug_locks.h> |
| 41 | #include <linux/perf_event.h> |
| 42 | #include <linux/security.h> |
| 43 | #include <linux/notifier.h> |
| 44 | #include <linux/profile.h> |
| 45 | #include <linux/freezer.h> |
| 46 | #include <linux/vmalloc.h> |
| 47 | #include <linux/blkdev.h> |
| 48 | #include <linux/delay.h> |
| 49 | #include <linux/pid_namespace.h> |
| 50 | #include <linux/smp.h> |
| 51 | #include <linux/threads.h> |
| 52 | #include <linux/timer.h> |
| 53 | #include <linux/rcupdate.h> |
| 54 | #include <linux/cpu.h> |
| 55 | #include <linux/cpuset.h> |
| 56 | #include <linux/percpu.h> |
| 57 | #include <linux/proc_fs.h> |
| 58 | #include <linux/seq_file.h> |
| 59 | #include <linux/sysctl.h> |
| 60 | #include <linux/syscalls.h> |
| 61 | #include <linux/times.h> |
| 62 | #include <linux/tsacct_kern.h> |
| 63 | #include <linux/kprobes.h> |
| 64 | #include <linux/delayacct.h> |
| 65 | #include <linux/unistd.h> |
| 66 | #include <linux/pagemap.h> |
| 67 | #include <linux/hrtimer.h> |
| 68 | #include <linux/tick.h> |
| 69 | #include <linux/debugfs.h> |
| 70 | #include <linux/ctype.h> |
| 71 | #include <linux/ftrace.h> |
| 72 | #include <linux/slab.h> |
| 73 | #include <linux/init_task.h> |
| 74 | #include <linux/binfmts.h> |
| 75 | #include <linux/context_tracking.h> |
| 76 | #include <linux/compiler.h> |
| 77 | |
| 78 | #include <asm/switch_to.h> |
| 79 | #include <asm/tlb.h> |
| 80 | #include <asm/irq_regs.h> |
| 81 | #include <asm/mutex.h> |
| 82 | #ifdef CONFIG_PARAVIRT |
| 83 | #include <asm/paravirt.h> |
| 84 | #endif |
| 85 | |
| 86 | #include "sched.h" |
| 87 | #include "../workqueue_internal.h" |
| 88 | #include "../smpboot.h" |
| 89 | |
| 90 | #define CREATE_TRACE_POINTS |
| 91 | #include <trace/events/sched.h> |
| 92 | |
| 93 | DEFINE_MUTEX(sched_domains_mutex); |
| 94 | DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues); |
| 95 | |
| 96 | static void update_rq_clock_task(struct rq *rq, s64 delta); |
| 97 | |
| 98 | void update_rq_clock(struct rq *rq) |
| 99 | { |
| 100 | s64 delta; |
| 101 | |
| 102 | lockdep_assert_held(&rq->lock); |
| 103 | |
| 104 | if (rq->clock_skip_update & RQCF_ACT_SKIP) |
| 105 | return; |
| 106 | |
| 107 | delta = sched_clock_cpu(cpu_of(rq)) - rq->clock; |
| 108 | if (delta < 0) |
| 109 | return; |
| 110 | rq->clock += delta; |
| 111 | update_rq_clock_task(rq, delta); |
| 112 | } |
| 113 | |
| 114 | /* |
| 115 | * Debugging: various feature bits |
| 116 | */ |
| 117 | |
| 118 | #define SCHED_FEAT(name, enabled) \ |
| 119 | (1UL << __SCHED_FEAT_##name) * enabled | |
| 120 | |
| 121 | const_debug unsigned int sysctl_sched_features = |
| 122 | #include "features.h" |
| 123 | 0; |
| 124 | |
| 125 | #undef SCHED_FEAT |
| 126 | |
| 127 | #ifdef CONFIG_SCHED_DEBUG |
| 128 | #define SCHED_FEAT(name, enabled) \ |
| 129 | #name , |
| 130 | |
| 131 | static const char * const sched_feat_names[] = { |
| 132 | #include "features.h" |
| 133 | }; |
| 134 | |
| 135 | #undef SCHED_FEAT |
| 136 | |
| 137 | static int sched_feat_show(struct seq_file *m, void *v) |
| 138 | { |
| 139 | int i; |
| 140 | |
| 141 | for (i = 0; i < __SCHED_FEAT_NR; i++) { |
| 142 | if (!(sysctl_sched_features & (1UL << i))) |
| 143 | seq_puts(m, "NO_"); |
| 144 | seq_printf(m, "%s ", sched_feat_names[i]); |
| 145 | } |
| 146 | seq_puts(m, "\n"); |
| 147 | |
| 148 | return 0; |
| 149 | } |
| 150 | |
| 151 | #ifdef HAVE_JUMP_LABEL |
| 152 | |
| 153 | #define jump_label_key__true STATIC_KEY_INIT_TRUE |
| 154 | #define jump_label_key__false STATIC_KEY_INIT_FALSE |
| 155 | |
| 156 | #define SCHED_FEAT(name, enabled) \ |
| 157 | jump_label_key__##enabled , |
| 158 | |
| 159 | struct static_key sched_feat_keys[__SCHED_FEAT_NR] = { |
| 160 | #include "features.h" |
| 161 | }; |
| 162 | |
| 163 | #undef SCHED_FEAT |
| 164 | |
| 165 | static void sched_feat_disable(int i) |
| 166 | { |
| 167 | if (static_key_enabled(&sched_feat_keys[i])) |
| 168 | static_key_slow_dec(&sched_feat_keys[i]); |
| 169 | } |
| 170 | |
| 171 | static void sched_feat_enable(int i) |
| 172 | { |
| 173 | if (!static_key_enabled(&sched_feat_keys[i])) |
| 174 | static_key_slow_inc(&sched_feat_keys[i]); |
| 175 | } |
| 176 | #else |
| 177 | static void sched_feat_disable(int i) { }; |
| 178 | static void sched_feat_enable(int i) { }; |
| 179 | #endif /* HAVE_JUMP_LABEL */ |
| 180 | |
| 181 | static int sched_feat_set(char *cmp) |
| 182 | { |
| 183 | int i; |
| 184 | int neg = 0; |
| 185 | |
| 186 | if (strncmp(cmp, "NO_", 3) == 0) { |
| 187 | neg = 1; |
| 188 | cmp += 3; |
| 189 | } |
| 190 | |
| 191 | for (i = 0; i < __SCHED_FEAT_NR; i++) { |
| 192 | if (strcmp(cmp, sched_feat_names[i]) == 0) { |
| 193 | if (neg) { |
| 194 | sysctl_sched_features &= ~(1UL << i); |
| 195 | sched_feat_disable(i); |
| 196 | } else { |
| 197 | sysctl_sched_features |= (1UL << i); |
| 198 | sched_feat_enable(i); |
| 199 | } |
| 200 | break; |
| 201 | } |
| 202 | } |
| 203 | |
| 204 | return i; |
| 205 | } |
| 206 | |
| 207 | static ssize_t |
| 208 | sched_feat_write(struct file *filp, const char __user *ubuf, |
| 209 | size_t cnt, loff_t *ppos) |
| 210 | { |
| 211 | char buf[64]; |
| 212 | char *cmp; |
| 213 | int i; |
| 214 | struct inode *inode; |
| 215 | |
| 216 | if (cnt > 63) |
| 217 | cnt = 63; |
| 218 | |
| 219 | if (copy_from_user(&buf, ubuf, cnt)) |
| 220 | return -EFAULT; |
| 221 | |
| 222 | buf[cnt] = 0; |
| 223 | cmp = strstrip(buf); |
| 224 | |
| 225 | /* Ensure the static_key remains in a consistent state */ |
| 226 | inode = file_inode(filp); |
| 227 | mutex_lock(&inode->i_mutex); |
| 228 | i = sched_feat_set(cmp); |
| 229 | mutex_unlock(&inode->i_mutex); |
| 230 | if (i == __SCHED_FEAT_NR) |
| 231 | return -EINVAL; |
| 232 | |
| 233 | *ppos += cnt; |
| 234 | |
| 235 | return cnt; |
| 236 | } |
| 237 | |
| 238 | static int sched_feat_open(struct inode *inode, struct file *filp) |
| 239 | { |
| 240 | return single_open(filp, sched_feat_show, NULL); |
| 241 | } |
| 242 | |
| 243 | static const struct file_operations sched_feat_fops = { |
| 244 | .open = sched_feat_open, |
| 245 | .write = sched_feat_write, |
| 246 | .read = seq_read, |
| 247 | .llseek = seq_lseek, |
| 248 | .release = single_release, |
| 249 | }; |
| 250 | |
| 251 | static __init int sched_init_debug(void) |
| 252 | { |
| 253 | debugfs_create_file("sched_features", 0644, NULL, NULL, |
| 254 | &sched_feat_fops); |
| 255 | |
| 256 | return 0; |
| 257 | } |
| 258 | late_initcall(sched_init_debug); |
| 259 | #endif /* CONFIG_SCHED_DEBUG */ |
| 260 | |
| 261 | /* |
| 262 | * Number of tasks to iterate in a single balance run. |
| 263 | * Limited because this is done with IRQs disabled. |
| 264 | */ |
| 265 | const_debug unsigned int sysctl_sched_nr_migrate = 32; |
| 266 | |
| 267 | /* |
| 268 | * period over which we average the RT time consumption, measured |
| 269 | * in ms. |
| 270 | * |
| 271 | * default: 1s |
| 272 | */ |
| 273 | const_debug unsigned int sysctl_sched_time_avg = MSEC_PER_SEC; |
| 274 | |
| 275 | /* |
| 276 | * period over which we measure -rt task cpu usage in us. |
| 277 | * default: 1s |
| 278 | */ |
| 279 | unsigned int sysctl_sched_rt_period = 1000000; |
| 280 | |
| 281 | __read_mostly int scheduler_running; |
| 282 | |
| 283 | /* |
| 284 | * part of the period that we allow rt tasks to run in us. |
| 285 | * default: 0.95s |
| 286 | */ |
| 287 | int sysctl_sched_rt_runtime = 950000; |
| 288 | |
| 289 | /* cpus with isolated domains */ |
| 290 | cpumask_var_t cpu_isolated_map; |
| 291 | |
| 292 | /* |
| 293 | * this_rq_lock - lock this runqueue and disable interrupts. |
| 294 | */ |
| 295 | static struct rq *this_rq_lock(void) |
| 296 | __acquires(rq->lock) |
| 297 | { |
| 298 | struct rq *rq; |
| 299 | |
| 300 | local_irq_disable(); |
| 301 | rq = this_rq(); |
| 302 | raw_spin_lock(&rq->lock); |
| 303 | |
| 304 | return rq; |
| 305 | } |
| 306 | |
| 307 | #ifdef CONFIG_SCHED_HRTICK |
| 308 | /* |
| 309 | * Use HR-timers to deliver accurate preemption points. |
| 310 | */ |
| 311 | |
| 312 | static void hrtick_clear(struct rq *rq) |
| 313 | { |
| 314 | if (hrtimer_active(&rq->hrtick_timer)) |
| 315 | hrtimer_cancel(&rq->hrtick_timer); |
| 316 | } |
| 317 | |
| 318 | /* |
| 319 | * High-resolution timer tick. |
| 320 | * Runs from hardirq context with interrupts disabled. |
| 321 | */ |
| 322 | static enum hrtimer_restart hrtick(struct hrtimer *timer) |
| 323 | { |
| 324 | struct rq *rq = container_of(timer, struct rq, hrtick_timer); |
| 325 | |
| 326 | WARN_ON_ONCE(cpu_of(rq) != smp_processor_id()); |
| 327 | |
| 328 | raw_spin_lock(&rq->lock); |
| 329 | update_rq_clock(rq); |
| 330 | rq->curr->sched_class->task_tick(rq, rq->curr, 1); |
| 331 | raw_spin_unlock(&rq->lock); |
| 332 | |
| 333 | return HRTIMER_NORESTART; |
| 334 | } |
| 335 | |
| 336 | #ifdef CONFIG_SMP |
| 337 | |
| 338 | static void __hrtick_restart(struct rq *rq) |
| 339 | { |
| 340 | struct hrtimer *timer = &rq->hrtick_timer; |
| 341 | |
| 342 | hrtimer_start_expires(timer, HRTIMER_MODE_ABS_PINNED); |
| 343 | } |
| 344 | |
| 345 | /* |
| 346 | * called from hardirq (IPI) context |
| 347 | */ |
| 348 | static void __hrtick_start(void *arg) |
| 349 | { |
| 350 | struct rq *rq = arg; |
| 351 | |
| 352 | raw_spin_lock(&rq->lock); |
| 353 | __hrtick_restart(rq); |
| 354 | rq->hrtick_csd_pending = 0; |
| 355 | raw_spin_unlock(&rq->lock); |
| 356 | } |
| 357 | |
| 358 | /* |
| 359 | * Called to set the hrtick timer state. |
| 360 | * |
| 361 | * called with rq->lock held and irqs disabled |
| 362 | */ |
| 363 | void hrtick_start(struct rq *rq, u64 delay) |
| 364 | { |
| 365 | struct hrtimer *timer = &rq->hrtick_timer; |
| 366 | ktime_t time; |
| 367 | s64 delta; |
| 368 | |
| 369 | /* |
| 370 | * Don't schedule slices shorter than 10000ns, that just |
| 371 | * doesn't make sense and can cause timer DoS. |
| 372 | */ |
| 373 | delta = max_t(s64, delay, 10000LL); |
| 374 | time = ktime_add_ns(timer->base->get_time(), delta); |
| 375 | |
| 376 | hrtimer_set_expires(timer, time); |
| 377 | |
| 378 | if (rq == this_rq()) { |
| 379 | __hrtick_restart(rq); |
| 380 | } else if (!rq->hrtick_csd_pending) { |
| 381 | smp_call_function_single_async(cpu_of(rq), &rq->hrtick_csd); |
| 382 | rq->hrtick_csd_pending = 1; |
| 383 | } |
| 384 | } |
| 385 | |
| 386 | static int |
| 387 | hotplug_hrtick(struct notifier_block *nfb, unsigned long action, void *hcpu) |
| 388 | { |
| 389 | int cpu = (int)(long)hcpu; |
| 390 | |
| 391 | switch (action) { |
| 392 | case CPU_UP_CANCELED: |
| 393 | case CPU_UP_CANCELED_FROZEN: |
| 394 | case CPU_DOWN_PREPARE: |
| 395 | case CPU_DOWN_PREPARE_FROZEN: |
| 396 | case CPU_DEAD: |
| 397 | case CPU_DEAD_FROZEN: |
| 398 | hrtick_clear(cpu_rq(cpu)); |
| 399 | return NOTIFY_OK; |
| 400 | } |
| 401 | |
| 402 | return NOTIFY_DONE; |
| 403 | } |
| 404 | |
| 405 | static __init void init_hrtick(void) |
| 406 | { |
| 407 | hotcpu_notifier(hotplug_hrtick, 0); |
| 408 | } |
| 409 | #else |
| 410 | /* |
| 411 | * Called to set the hrtick timer state. |
| 412 | * |
| 413 | * called with rq->lock held and irqs disabled |
| 414 | */ |
| 415 | void hrtick_start(struct rq *rq, u64 delay) |
| 416 | { |
| 417 | /* |
| 418 | * Don't schedule slices shorter than 10000ns, that just |
| 419 | * doesn't make sense. Rely on vruntime for fairness. |
| 420 | */ |
| 421 | delay = max_t(u64, delay, 10000LL); |
| 422 | hrtimer_start(&rq->hrtick_timer, ns_to_ktime(delay), |
| 423 | HRTIMER_MODE_REL_PINNED); |
| 424 | } |
| 425 | |
| 426 | static inline void init_hrtick(void) |
| 427 | { |
| 428 | } |
| 429 | #endif /* CONFIG_SMP */ |
| 430 | |
| 431 | static void init_rq_hrtick(struct rq *rq) |
| 432 | { |
| 433 | #ifdef CONFIG_SMP |
| 434 | rq->hrtick_csd_pending = 0; |
| 435 | |
| 436 | rq->hrtick_csd.flags = 0; |
| 437 | rq->hrtick_csd.func = __hrtick_start; |
| 438 | rq->hrtick_csd.info = rq; |
| 439 | #endif |
| 440 | |
| 441 | hrtimer_init(&rq->hrtick_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); |
| 442 | rq->hrtick_timer.function = hrtick; |
| 443 | } |
| 444 | #else /* CONFIG_SCHED_HRTICK */ |
| 445 | static inline void hrtick_clear(struct rq *rq) |
| 446 | { |
| 447 | } |
| 448 | |
| 449 | static inline void init_rq_hrtick(struct rq *rq) |
| 450 | { |
| 451 | } |
| 452 | |
| 453 | static inline void init_hrtick(void) |
| 454 | { |
| 455 | } |
| 456 | #endif /* CONFIG_SCHED_HRTICK */ |
| 457 | |
| 458 | /* |
| 459 | * cmpxchg based fetch_or, macro so it works for different integer types |
| 460 | */ |
| 461 | #define fetch_or(ptr, val) \ |
| 462 | ({ typeof(*(ptr)) __old, __val = *(ptr); \ |
| 463 | for (;;) { \ |
| 464 | __old = cmpxchg((ptr), __val, __val | (val)); \ |
| 465 | if (__old == __val) \ |
| 466 | break; \ |
| 467 | __val = __old; \ |
| 468 | } \ |
| 469 | __old; \ |
| 470 | }) |
| 471 | |
| 472 | #if defined(CONFIG_SMP) && defined(TIF_POLLING_NRFLAG) |
| 473 | /* |
| 474 | * Atomically set TIF_NEED_RESCHED and test for TIF_POLLING_NRFLAG, |
| 475 | * this avoids any races wrt polling state changes and thereby avoids |
| 476 | * spurious IPIs. |
| 477 | */ |
| 478 | static bool set_nr_and_not_polling(struct task_struct *p) |
| 479 | { |
| 480 | struct thread_info *ti = task_thread_info(p); |
| 481 | return !(fetch_or(&ti->flags, _TIF_NEED_RESCHED) & _TIF_POLLING_NRFLAG); |
| 482 | } |
| 483 | |
| 484 | /* |
| 485 | * Atomically set TIF_NEED_RESCHED if TIF_POLLING_NRFLAG is set. |
| 486 | * |
| 487 | * If this returns true, then the idle task promises to call |
| 488 | * sched_ttwu_pending() and reschedule soon. |
| 489 | */ |
| 490 | static bool set_nr_if_polling(struct task_struct *p) |
| 491 | { |
| 492 | struct thread_info *ti = task_thread_info(p); |
| 493 | typeof(ti->flags) old, val = READ_ONCE(ti->flags); |
| 494 | |
| 495 | for (;;) { |
| 496 | if (!(val & _TIF_POLLING_NRFLAG)) |
| 497 | return false; |
| 498 | if (val & _TIF_NEED_RESCHED) |
| 499 | return true; |
| 500 | old = cmpxchg(&ti->flags, val, val | _TIF_NEED_RESCHED); |
| 501 | if (old == val) |
| 502 | break; |
| 503 | val = old; |
| 504 | } |
| 505 | return true; |
| 506 | } |
| 507 | |
| 508 | #else |
| 509 | static bool set_nr_and_not_polling(struct task_struct *p) |
| 510 | { |
| 511 | set_tsk_need_resched(p); |
| 512 | return true; |
| 513 | } |
| 514 | |
| 515 | #ifdef CONFIG_SMP |
| 516 | static bool set_nr_if_polling(struct task_struct *p) |
| 517 | { |
| 518 | return false; |
| 519 | } |
| 520 | #endif |
| 521 | #endif |
| 522 | |
| 523 | void wake_q_add(struct wake_q_head *head, struct task_struct *task) |
| 524 | { |
| 525 | struct wake_q_node *node = &task->wake_q; |
| 526 | |
| 527 | /* |
| 528 | * Atomically grab the task, if ->wake_q is !nil already it means |
| 529 | * its already queued (either by us or someone else) and will get the |
| 530 | * wakeup due to that. |
| 531 | * |
| 532 | * This cmpxchg() implies a full barrier, which pairs with the write |
| 533 | * barrier implied by the wakeup in wake_up_list(). |
| 534 | */ |
| 535 | if (cmpxchg(&node->next, NULL, WAKE_Q_TAIL)) |
| 536 | return; |
| 537 | |
| 538 | get_task_struct(task); |
| 539 | |
| 540 | /* |
| 541 | * The head is context local, there can be no concurrency. |
| 542 | */ |
| 543 | *head->lastp = node; |
| 544 | head->lastp = &node->next; |
| 545 | } |
| 546 | |
| 547 | void wake_up_q(struct wake_q_head *head) |
| 548 | { |
| 549 | struct wake_q_node *node = head->first; |
| 550 | |
| 551 | while (node != WAKE_Q_TAIL) { |
| 552 | struct task_struct *task; |
| 553 | |
| 554 | task = container_of(node, struct task_struct, wake_q); |
| 555 | BUG_ON(!task); |
| 556 | /* task can safely be re-inserted now */ |
| 557 | node = node->next; |
| 558 | task->wake_q.next = NULL; |
| 559 | |
| 560 | /* |
| 561 | * wake_up_process() implies a wmb() to pair with the queueing |
| 562 | * in wake_q_add() so as not to miss wakeups. |
| 563 | */ |
| 564 | wake_up_process(task); |
| 565 | put_task_struct(task); |
| 566 | } |
| 567 | } |
| 568 | |
| 569 | /* |
| 570 | * resched_curr - mark rq's current task 'to be rescheduled now'. |
| 571 | * |
| 572 | * On UP this means the setting of the need_resched flag, on SMP it |
| 573 | * might also involve a cross-CPU call to trigger the scheduler on |
| 574 | * the target CPU. |
| 575 | */ |
| 576 | void resched_curr(struct rq *rq) |
| 577 | { |
| 578 | struct task_struct *curr = rq->curr; |
| 579 | int cpu; |
| 580 | |
| 581 | lockdep_assert_held(&rq->lock); |
| 582 | |
| 583 | if (test_tsk_need_resched(curr)) |
| 584 | return; |
| 585 | |
| 586 | cpu = cpu_of(rq); |
| 587 | |
| 588 | if (cpu == smp_processor_id()) { |
| 589 | set_tsk_need_resched(curr); |
| 590 | set_preempt_need_resched(); |
| 591 | return; |
| 592 | } |
| 593 | |
| 594 | if (set_nr_and_not_polling(curr)) |
| 595 | smp_send_reschedule(cpu); |
| 596 | else |
| 597 | trace_sched_wake_idle_without_ipi(cpu); |
| 598 | } |
| 599 | |
| 600 | void resched_cpu(int cpu) |
| 601 | { |
| 602 | struct rq *rq = cpu_rq(cpu); |
| 603 | unsigned long flags; |
| 604 | |
| 605 | if (!raw_spin_trylock_irqsave(&rq->lock, flags)) |
| 606 | return; |
| 607 | resched_curr(rq); |
| 608 | raw_spin_unlock_irqrestore(&rq->lock, flags); |
| 609 | } |
| 610 | |
| 611 | #ifdef CONFIG_SMP |
| 612 | #ifdef CONFIG_NO_HZ_COMMON |
| 613 | /* |
| 614 | * In the semi idle case, use the nearest busy cpu for migrating timers |
| 615 | * from an idle cpu. This is good for power-savings. |
| 616 | * |
| 617 | * We don't do similar optimization for completely idle system, as |
| 618 | * selecting an idle cpu will add more delays to the timers than intended |
| 619 | * (as that cpu's timer base may not be uptodate wrt jiffies etc). |
| 620 | */ |
| 621 | int get_nohz_timer_target(int pinned) |
| 622 | { |
| 623 | int cpu = smp_processor_id(); |
| 624 | int i; |
| 625 | struct sched_domain *sd; |
| 626 | |
| 627 | if (pinned || !get_sysctl_timer_migration() || !idle_cpu(cpu)) |
| 628 | return cpu; |
| 629 | |
| 630 | rcu_read_lock(); |
| 631 | for_each_domain(cpu, sd) { |
| 632 | for_each_cpu(i, sched_domain_span(sd)) { |
| 633 | if (!idle_cpu(i)) { |
| 634 | cpu = i; |
| 635 | goto unlock; |
| 636 | } |
| 637 | } |
| 638 | } |
| 639 | unlock: |
| 640 | rcu_read_unlock(); |
| 641 | return cpu; |
| 642 | } |
| 643 | /* |
| 644 | * When add_timer_on() enqueues a timer into the timer wheel of an |
| 645 | * idle CPU then this timer might expire before the next timer event |
| 646 | * which is scheduled to wake up that CPU. In case of a completely |
| 647 | * idle system the next event might even be infinite time into the |
| 648 | * future. wake_up_idle_cpu() ensures that the CPU is woken up and |
| 649 | * leaves the inner idle loop so the newly added timer is taken into |
| 650 | * account when the CPU goes back to idle and evaluates the timer |
| 651 | * wheel for the next timer event. |
| 652 | */ |
| 653 | static void wake_up_idle_cpu(int cpu) |
| 654 | { |
| 655 | struct rq *rq = cpu_rq(cpu); |
| 656 | |
| 657 | if (cpu == smp_processor_id()) |
| 658 | return; |
| 659 | |
| 660 | if (set_nr_and_not_polling(rq->idle)) |
| 661 | smp_send_reschedule(cpu); |
| 662 | else |
| 663 | trace_sched_wake_idle_without_ipi(cpu); |
| 664 | } |
| 665 | |
| 666 | static bool wake_up_full_nohz_cpu(int cpu) |
| 667 | { |
| 668 | /* |
| 669 | * We just need the target to call irq_exit() and re-evaluate |
| 670 | * the next tick. The nohz full kick at least implies that. |
| 671 | * If needed we can still optimize that later with an |
| 672 | * empty IRQ. |
| 673 | */ |
| 674 | if (tick_nohz_full_cpu(cpu)) { |
| 675 | if (cpu != smp_processor_id() || |
| 676 | tick_nohz_tick_stopped()) |
| 677 | tick_nohz_full_kick_cpu(cpu); |
| 678 | return true; |
| 679 | } |
| 680 | |
| 681 | return false; |
| 682 | } |
| 683 | |
| 684 | void wake_up_nohz_cpu(int cpu) |
| 685 | { |
| 686 | if (!wake_up_full_nohz_cpu(cpu)) |
| 687 | wake_up_idle_cpu(cpu); |
| 688 | } |
| 689 | |
| 690 | static inline bool got_nohz_idle_kick(void) |
| 691 | { |
| 692 | int cpu = smp_processor_id(); |
| 693 | |
| 694 | if (!test_bit(NOHZ_BALANCE_KICK, nohz_flags(cpu))) |
| 695 | return false; |
| 696 | |
| 697 | if (idle_cpu(cpu) && !need_resched()) |
| 698 | return true; |
| 699 | |
| 700 | /* |
| 701 | * We can't run Idle Load Balance on this CPU for this time so we |
| 702 | * cancel it and clear NOHZ_BALANCE_KICK |
| 703 | */ |
| 704 | clear_bit(NOHZ_BALANCE_KICK, nohz_flags(cpu)); |
| 705 | return false; |
| 706 | } |
| 707 | |
| 708 | #else /* CONFIG_NO_HZ_COMMON */ |
| 709 | |
| 710 | static inline bool got_nohz_idle_kick(void) |
| 711 | { |
| 712 | return false; |
| 713 | } |
| 714 | |
| 715 | #endif /* CONFIG_NO_HZ_COMMON */ |
| 716 | |
| 717 | #ifdef CONFIG_NO_HZ_FULL |
| 718 | bool sched_can_stop_tick(void) |
| 719 | { |
| 720 | /* |
| 721 | * FIFO realtime policy runs the highest priority task. Other runnable |
| 722 | * tasks are of a lower priority. The scheduler tick does nothing. |
| 723 | */ |
| 724 | if (current->policy == SCHED_FIFO) |
| 725 | return true; |
| 726 | |
| 727 | /* |
| 728 | * Round-robin realtime tasks time slice with other tasks at the same |
| 729 | * realtime priority. Is this task the only one at this priority? |
| 730 | */ |
| 731 | if (current->policy == SCHED_RR) { |
| 732 | struct sched_rt_entity *rt_se = ¤t->rt; |
| 733 | |
| 734 | return rt_se->run_list.prev == rt_se->run_list.next; |
| 735 | } |
| 736 | |
| 737 | /* |
| 738 | * More than one running task need preemption. |
| 739 | * nr_running update is assumed to be visible |
| 740 | * after IPI is sent from wakers. |
| 741 | */ |
| 742 | if (this_rq()->nr_running > 1) |
| 743 | return false; |
| 744 | |
| 745 | return true; |
| 746 | } |
| 747 | #endif /* CONFIG_NO_HZ_FULL */ |
| 748 | |
| 749 | void sched_avg_update(struct rq *rq) |
| 750 | { |
| 751 | s64 period = sched_avg_period(); |
| 752 | |
| 753 | while ((s64)(rq_clock(rq) - rq->age_stamp) > period) { |
| 754 | /* |
| 755 | * Inline assembly required to prevent the compiler |
| 756 | * optimising this loop into a divmod call. |
| 757 | * See __iter_div_u64_rem() for another example of this. |
| 758 | */ |
| 759 | asm("" : "+rm" (rq->age_stamp)); |
| 760 | rq->age_stamp += period; |
| 761 | rq->rt_avg /= 2; |
| 762 | } |
| 763 | } |
| 764 | |
| 765 | #endif /* CONFIG_SMP */ |
| 766 | |
| 767 | #if defined(CONFIG_RT_GROUP_SCHED) || (defined(CONFIG_FAIR_GROUP_SCHED) && \ |
| 768 | (defined(CONFIG_SMP) || defined(CONFIG_CFS_BANDWIDTH))) |
| 769 | /* |
| 770 | * Iterate task_group tree rooted at *from, calling @down when first entering a |
| 771 | * node and @up when leaving it for the final time. |
| 772 | * |
| 773 | * Caller must hold rcu_lock or sufficient equivalent. |
| 774 | */ |
| 775 | int walk_tg_tree_from(struct task_group *from, |
| 776 | tg_visitor down, tg_visitor up, void *data) |
| 777 | { |
| 778 | struct task_group *parent, *child; |
| 779 | int ret; |
| 780 | |
| 781 | parent = from; |
| 782 | |
| 783 | down: |
| 784 | ret = (*down)(parent, data); |
| 785 | if (ret) |
| 786 | goto out; |
| 787 | list_for_each_entry_rcu(child, &parent->children, siblings) { |
| 788 | parent = child; |
| 789 | goto down; |
| 790 | |
| 791 | up: |
| 792 | continue; |
| 793 | } |
| 794 | ret = (*up)(parent, data); |
| 795 | if (ret || parent == from) |
| 796 | goto out; |
| 797 | |
| 798 | child = parent; |
| 799 | parent = parent->parent; |
| 800 | if (parent) |
| 801 | goto up; |
| 802 | out: |
| 803 | return ret; |
| 804 | } |
| 805 | |
| 806 | int tg_nop(struct task_group *tg, void *data) |
| 807 | { |
| 808 | return 0; |
| 809 | } |
| 810 | #endif |
| 811 | |
| 812 | static void set_load_weight(struct task_struct *p) |
| 813 | { |
| 814 | int prio = p->static_prio - MAX_RT_PRIO; |
| 815 | struct load_weight *load = &p->se.load; |
| 816 | |
| 817 | /* |
| 818 | * SCHED_IDLE tasks get minimal weight: |
| 819 | */ |
| 820 | if (p->policy == SCHED_IDLE) { |
| 821 | load->weight = scale_load(WEIGHT_IDLEPRIO); |
| 822 | load->inv_weight = WMULT_IDLEPRIO; |
| 823 | return; |
| 824 | } |
| 825 | |
| 826 | load->weight = scale_load(prio_to_weight[prio]); |
| 827 | load->inv_weight = prio_to_wmult[prio]; |
| 828 | } |
| 829 | |
| 830 | static void enqueue_task(struct rq *rq, struct task_struct *p, int flags) |
| 831 | { |
| 832 | update_rq_clock(rq); |
| 833 | sched_info_queued(rq, p); |
| 834 | p->sched_class->enqueue_task(rq, p, flags); |
| 835 | } |
| 836 | |
| 837 | static void dequeue_task(struct rq *rq, struct task_struct *p, int flags) |
| 838 | { |
| 839 | update_rq_clock(rq); |
| 840 | sched_info_dequeued(rq, p); |
| 841 | p->sched_class->dequeue_task(rq, p, flags); |
| 842 | } |
| 843 | |
| 844 | void activate_task(struct rq *rq, struct task_struct *p, int flags) |
| 845 | { |
| 846 | if (task_contributes_to_load(p)) |
| 847 | rq->nr_uninterruptible--; |
| 848 | |
| 849 | enqueue_task(rq, p, flags); |
| 850 | } |
| 851 | |
| 852 | void deactivate_task(struct rq *rq, struct task_struct *p, int flags) |
| 853 | { |
| 854 | if (task_contributes_to_load(p)) |
| 855 | rq->nr_uninterruptible++; |
| 856 | |
| 857 | dequeue_task(rq, p, flags); |
| 858 | } |
| 859 | |
| 860 | static void update_rq_clock_task(struct rq *rq, s64 delta) |
| 861 | { |
| 862 | /* |
| 863 | * In theory, the compile should just see 0 here, and optimize out the call |
| 864 | * to sched_rt_avg_update. But I don't trust it... |
| 865 | */ |
| 866 | #if defined(CONFIG_IRQ_TIME_ACCOUNTING) || defined(CONFIG_PARAVIRT_TIME_ACCOUNTING) |
| 867 | s64 steal = 0, irq_delta = 0; |
| 868 | #endif |
| 869 | #ifdef CONFIG_IRQ_TIME_ACCOUNTING |
| 870 | irq_delta = irq_time_read(cpu_of(rq)) - rq->prev_irq_time; |
| 871 | |
| 872 | /* |
| 873 | * Since irq_time is only updated on {soft,}irq_exit, we might run into |
| 874 | * this case when a previous update_rq_clock() happened inside a |
| 875 | * {soft,}irq region. |
| 876 | * |
| 877 | * When this happens, we stop ->clock_task and only update the |
| 878 | * prev_irq_time stamp to account for the part that fit, so that a next |
| 879 | * update will consume the rest. This ensures ->clock_task is |
| 880 | * monotonic. |
| 881 | * |
| 882 | * It does however cause some slight miss-attribution of {soft,}irq |
| 883 | * time, a more accurate solution would be to update the irq_time using |
| 884 | * the current rq->clock timestamp, except that would require using |
| 885 | * atomic ops. |
| 886 | */ |
| 887 | if (irq_delta > delta) |
| 888 | irq_delta = delta; |
| 889 | |
| 890 | rq->prev_irq_time += irq_delta; |
| 891 | delta -= irq_delta; |
| 892 | #endif |
| 893 | #ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING |
| 894 | if (static_key_false((¶virt_steal_rq_enabled))) { |
| 895 | steal = paravirt_steal_clock(cpu_of(rq)); |
| 896 | steal -= rq->prev_steal_time_rq; |
| 897 | |
| 898 | if (unlikely(steal > delta)) |
| 899 | steal = delta; |
| 900 | |
| 901 | rq->prev_steal_time_rq += steal; |
| 902 | delta -= steal; |
| 903 | } |
| 904 | #endif |
| 905 | |
| 906 | rq->clock_task += delta; |
| 907 | |
| 908 | #if defined(CONFIG_IRQ_TIME_ACCOUNTING) || defined(CONFIG_PARAVIRT_TIME_ACCOUNTING) |
| 909 | if ((irq_delta + steal) && sched_feat(NONTASK_CAPACITY)) |
| 910 | sched_rt_avg_update(rq, irq_delta + steal); |
| 911 | #endif |
| 912 | } |
| 913 | |
| 914 | void sched_set_stop_task(int cpu, struct task_struct *stop) |
| 915 | { |
| 916 | struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 }; |
| 917 | struct task_struct *old_stop = cpu_rq(cpu)->stop; |
| 918 | |
| 919 | if (stop) { |
| 920 | /* |
| 921 | * Make it appear like a SCHED_FIFO task, its something |
| 922 | * userspace knows about and won't get confused about. |
| 923 | * |
| 924 | * Also, it will make PI more or less work without too |
| 925 | * much confusion -- but then, stop work should not |
| 926 | * rely on PI working anyway. |
| 927 | */ |
| 928 | sched_setscheduler_nocheck(stop, SCHED_FIFO, ¶m); |
| 929 | |
| 930 | stop->sched_class = &stop_sched_class; |
| 931 | } |
| 932 | |
| 933 | cpu_rq(cpu)->stop = stop; |
| 934 | |
| 935 | if (old_stop) { |
| 936 | /* |
| 937 | * Reset it back to a normal scheduling class so that |
| 938 | * it can die in pieces. |
| 939 | */ |
| 940 | old_stop->sched_class = &rt_sched_class; |
| 941 | } |
| 942 | } |
| 943 | |
| 944 | /* |
| 945 | * __normal_prio - return the priority that is based on the static prio |
| 946 | */ |
| 947 | static inline int __normal_prio(struct task_struct *p) |
| 948 | { |
| 949 | return p->static_prio; |
| 950 | } |
| 951 | |
| 952 | /* |
| 953 | * Calculate the expected normal priority: i.e. priority |
| 954 | * without taking RT-inheritance into account. Might be |
| 955 | * boosted by interactivity modifiers. Changes upon fork, |
| 956 | * setprio syscalls, and whenever the interactivity |
| 957 | * estimator recalculates. |
| 958 | */ |
| 959 | static inline int normal_prio(struct task_struct *p) |
| 960 | { |
| 961 | int prio; |
| 962 | |
| 963 | if (task_has_dl_policy(p)) |
| 964 | prio = MAX_DL_PRIO-1; |
| 965 | else if (task_has_rt_policy(p)) |
| 966 | prio = MAX_RT_PRIO-1 - p->rt_priority; |
| 967 | else |
| 968 | prio = __normal_prio(p); |
| 969 | return prio; |
| 970 | } |
| 971 | |
| 972 | /* |
| 973 | * Calculate the current priority, i.e. the priority |
| 974 | * taken into account by the scheduler. This value might |
| 975 | * be boosted by RT tasks, or might be boosted by |
| 976 | * interactivity modifiers. Will be RT if the task got |
| 977 | * RT-boosted. If not then it returns p->normal_prio. |
| 978 | */ |
| 979 | static int effective_prio(struct task_struct *p) |
| 980 | { |
| 981 | p->normal_prio = normal_prio(p); |
| 982 | /* |
| 983 | * If we are RT tasks or we were boosted to RT priority, |
| 984 | * keep the priority unchanged. Otherwise, update priority |
| 985 | * to the normal priority: |
| 986 | */ |
| 987 | if (!rt_prio(p->prio)) |
| 988 | return p->normal_prio; |
| 989 | return p->prio; |
| 990 | } |
| 991 | |
| 992 | /** |
| 993 | * task_curr - is this task currently executing on a CPU? |
| 994 | * @p: the task in question. |
| 995 | * |
| 996 | * Return: 1 if the task is currently executing. 0 otherwise. |
| 997 | */ |
| 998 | inline int task_curr(const struct task_struct *p) |
| 999 | { |
| 1000 | return cpu_curr(task_cpu(p)) == p; |
| 1001 | } |
| 1002 | |
| 1003 | /* |
| 1004 | * Can drop rq->lock because from sched_class::switched_from() methods drop it. |
| 1005 | */ |
| 1006 | static inline void check_class_changed(struct rq *rq, struct task_struct *p, |
| 1007 | const struct sched_class *prev_class, |
| 1008 | int oldprio) |
| 1009 | { |
| 1010 | if (prev_class != p->sched_class) { |
| 1011 | if (prev_class->switched_from) |
| 1012 | prev_class->switched_from(rq, p); |
| 1013 | /* Possble rq->lock 'hole'. */ |
| 1014 | p->sched_class->switched_to(rq, p); |
| 1015 | } else if (oldprio != p->prio || dl_task(p)) |
| 1016 | p->sched_class->prio_changed(rq, p, oldprio); |
| 1017 | } |
| 1018 | |
| 1019 | void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags) |
| 1020 | { |
| 1021 | const struct sched_class *class; |
| 1022 | |
| 1023 | if (p->sched_class == rq->curr->sched_class) { |
| 1024 | rq->curr->sched_class->check_preempt_curr(rq, p, flags); |
| 1025 | } else { |
| 1026 | for_each_class(class) { |
| 1027 | if (class == rq->curr->sched_class) |
| 1028 | break; |
| 1029 | if (class == p->sched_class) { |
| 1030 | resched_curr(rq); |
| 1031 | break; |
| 1032 | } |
| 1033 | } |
| 1034 | } |
| 1035 | |
| 1036 | /* |
| 1037 | * A queue event has occurred, and we're going to schedule. In |
| 1038 | * this case, we can save a useless back to back clock update. |
| 1039 | */ |
| 1040 | if (task_on_rq_queued(rq->curr) && test_tsk_need_resched(rq->curr)) |
| 1041 | rq_clock_skip_update(rq, true); |
| 1042 | } |
| 1043 | |
| 1044 | #ifdef CONFIG_SMP |
| 1045 | void set_task_cpu(struct task_struct *p, unsigned int new_cpu) |
| 1046 | { |
| 1047 | #ifdef CONFIG_SCHED_DEBUG |
| 1048 | /* |
| 1049 | * We should never call set_task_cpu() on a blocked task, |
| 1050 | * ttwu() will sort out the placement. |
| 1051 | */ |
| 1052 | WARN_ON_ONCE(p->state != TASK_RUNNING && p->state != TASK_WAKING && |
| 1053 | !p->on_rq); |
| 1054 | |
| 1055 | #ifdef CONFIG_LOCKDEP |
| 1056 | /* |
| 1057 | * The caller should hold either p->pi_lock or rq->lock, when changing |
| 1058 | * a task's CPU. ->pi_lock for waking tasks, rq->lock for runnable tasks. |
| 1059 | * |
| 1060 | * sched_move_task() holds both and thus holding either pins the cgroup, |
| 1061 | * see task_group(). |
| 1062 | * |
| 1063 | * Furthermore, all task_rq users should acquire both locks, see |
| 1064 | * task_rq_lock(). |
| 1065 | */ |
| 1066 | WARN_ON_ONCE(debug_locks && !(lockdep_is_held(&p->pi_lock) || |
| 1067 | lockdep_is_held(&task_rq(p)->lock))); |
| 1068 | #endif |
| 1069 | #endif |
| 1070 | |
| 1071 | trace_sched_migrate_task(p, new_cpu); |
| 1072 | |
| 1073 | if (task_cpu(p) != new_cpu) { |
| 1074 | if (p->sched_class->migrate_task_rq) |
| 1075 | p->sched_class->migrate_task_rq(p, new_cpu); |
| 1076 | p->se.nr_migrations++; |
| 1077 | perf_sw_event_sched(PERF_COUNT_SW_CPU_MIGRATIONS, 1, 0); |
| 1078 | } |
| 1079 | |
| 1080 | __set_task_cpu(p, new_cpu); |
| 1081 | } |
| 1082 | |
| 1083 | static void __migrate_swap_task(struct task_struct *p, int cpu) |
| 1084 | { |
| 1085 | if (task_on_rq_queued(p)) { |
| 1086 | struct rq *src_rq, *dst_rq; |
| 1087 | |
| 1088 | src_rq = task_rq(p); |
| 1089 | dst_rq = cpu_rq(cpu); |
| 1090 | |
| 1091 | deactivate_task(src_rq, p, 0); |
| 1092 | set_task_cpu(p, cpu); |
| 1093 | activate_task(dst_rq, p, 0); |
| 1094 | check_preempt_curr(dst_rq, p, 0); |
| 1095 | } else { |
| 1096 | /* |
| 1097 | * Task isn't running anymore; make it appear like we migrated |
| 1098 | * it before it went to sleep. This means on wakeup we make the |
| 1099 | * previous cpu our targer instead of where it really is. |
| 1100 | */ |
| 1101 | p->wake_cpu = cpu; |
| 1102 | } |
| 1103 | } |
| 1104 | |
| 1105 | struct migration_swap_arg { |
| 1106 | struct task_struct *src_task, *dst_task; |
| 1107 | int src_cpu, dst_cpu; |
| 1108 | }; |
| 1109 | |
| 1110 | static int migrate_swap_stop(void *data) |
| 1111 | { |
| 1112 | struct migration_swap_arg *arg = data; |
| 1113 | struct rq *src_rq, *dst_rq; |
| 1114 | int ret = -EAGAIN; |
| 1115 | |
| 1116 | src_rq = cpu_rq(arg->src_cpu); |
| 1117 | dst_rq = cpu_rq(arg->dst_cpu); |
| 1118 | |
| 1119 | double_raw_lock(&arg->src_task->pi_lock, |
| 1120 | &arg->dst_task->pi_lock); |
| 1121 | double_rq_lock(src_rq, dst_rq); |
| 1122 | if (task_cpu(arg->dst_task) != arg->dst_cpu) |
| 1123 | goto unlock; |
| 1124 | |
| 1125 | if (task_cpu(arg->src_task) != arg->src_cpu) |
| 1126 | goto unlock; |
| 1127 | |
| 1128 | if (!cpumask_test_cpu(arg->dst_cpu, tsk_cpus_allowed(arg->src_task))) |
| 1129 | goto unlock; |
| 1130 | |
| 1131 | if (!cpumask_test_cpu(arg->src_cpu, tsk_cpus_allowed(arg->dst_task))) |
| 1132 | goto unlock; |
| 1133 | |
| 1134 | __migrate_swap_task(arg->src_task, arg->dst_cpu); |
| 1135 | __migrate_swap_task(arg->dst_task, arg->src_cpu); |
| 1136 | |
| 1137 | ret = 0; |
| 1138 | |
| 1139 | unlock: |
| 1140 | double_rq_unlock(src_rq, dst_rq); |
| 1141 | raw_spin_unlock(&arg->dst_task->pi_lock); |
| 1142 | raw_spin_unlock(&arg->src_task->pi_lock); |
| 1143 | |
| 1144 | return ret; |
| 1145 | } |
| 1146 | |
| 1147 | /* |
| 1148 | * Cross migrate two tasks |
| 1149 | */ |
| 1150 | int migrate_swap(struct task_struct *cur, struct task_struct *p) |
| 1151 | { |
| 1152 | struct migration_swap_arg arg; |
| 1153 | int ret = -EINVAL; |
| 1154 | |
| 1155 | arg = (struct migration_swap_arg){ |
| 1156 | .src_task = cur, |
| 1157 | .src_cpu = task_cpu(cur), |
| 1158 | .dst_task = p, |
| 1159 | .dst_cpu = task_cpu(p), |
| 1160 | }; |
| 1161 | |
| 1162 | if (arg.src_cpu == arg.dst_cpu) |
| 1163 | goto out; |
| 1164 | |
| 1165 | /* |
| 1166 | * These three tests are all lockless; this is OK since all of them |
| 1167 | * will be re-checked with proper locks held further down the line. |
| 1168 | */ |
| 1169 | if (!cpu_active(arg.src_cpu) || !cpu_active(arg.dst_cpu)) |
| 1170 | goto out; |
| 1171 | |
| 1172 | if (!cpumask_test_cpu(arg.dst_cpu, tsk_cpus_allowed(arg.src_task))) |
| 1173 | goto out; |
| 1174 | |
| 1175 | if (!cpumask_test_cpu(arg.src_cpu, tsk_cpus_allowed(arg.dst_task))) |
| 1176 | goto out; |
| 1177 | |
| 1178 | trace_sched_swap_numa(cur, arg.src_cpu, p, arg.dst_cpu); |
| 1179 | ret = stop_two_cpus(arg.dst_cpu, arg.src_cpu, migrate_swap_stop, &arg); |
| 1180 | |
| 1181 | out: |
| 1182 | return ret; |
| 1183 | } |
| 1184 | |
| 1185 | struct migration_arg { |
| 1186 | struct task_struct *task; |
| 1187 | int dest_cpu; |
| 1188 | }; |
| 1189 | |
| 1190 | static int migration_cpu_stop(void *data); |
| 1191 | |
| 1192 | /* |
| 1193 | * wait_task_inactive - wait for a thread to unschedule. |
| 1194 | * |
| 1195 | * If @match_state is nonzero, it's the @p->state value just checked and |
| 1196 | * not expected to change. If it changes, i.e. @p might have woken up, |
| 1197 | * then return zero. When we succeed in waiting for @p to be off its CPU, |
| 1198 | * we return a positive number (its total switch count). If a second call |
| 1199 | * a short while later returns the same number, the caller can be sure that |
| 1200 | * @p has remained unscheduled the whole time. |
| 1201 | * |
| 1202 | * The caller must ensure that the task *will* unschedule sometime soon, |
| 1203 | * else this function might spin for a *long* time. This function can't |
| 1204 | * be called with interrupts off, or it may introduce deadlock with |
| 1205 | * smp_call_function() if an IPI is sent by the same process we are |
| 1206 | * waiting to become inactive. |
| 1207 | */ |
| 1208 | unsigned long wait_task_inactive(struct task_struct *p, long match_state) |
| 1209 | { |
| 1210 | unsigned long flags; |
| 1211 | int running, queued; |
| 1212 | unsigned long ncsw; |
| 1213 | struct rq *rq; |
| 1214 | |
| 1215 | for (;;) { |
| 1216 | /* |
| 1217 | * We do the initial early heuristics without holding |
| 1218 | * any task-queue locks at all. We'll only try to get |
| 1219 | * the runqueue lock when things look like they will |
| 1220 | * work out! |
| 1221 | */ |
| 1222 | rq = task_rq(p); |
| 1223 | |
| 1224 | /* |
| 1225 | * If the task is actively running on another CPU |
| 1226 | * still, just relax and busy-wait without holding |
| 1227 | * any locks. |
| 1228 | * |
| 1229 | * NOTE! Since we don't hold any locks, it's not |
| 1230 | * even sure that "rq" stays as the right runqueue! |
| 1231 | * But we don't care, since "task_running()" will |
| 1232 | * return false if the runqueue has changed and p |
| 1233 | * is actually now running somewhere else! |
| 1234 | */ |
| 1235 | while (task_running(rq, p)) { |
| 1236 | if (match_state && unlikely(p->state != match_state)) |
| 1237 | return 0; |
| 1238 | cpu_relax(); |
| 1239 | } |
| 1240 | |
| 1241 | /* |
| 1242 | * Ok, time to look more closely! We need the rq |
| 1243 | * lock now, to be *sure*. If we're wrong, we'll |
| 1244 | * just go back and repeat. |
| 1245 | */ |
| 1246 | rq = task_rq_lock(p, &flags); |
| 1247 | trace_sched_wait_task(p); |
| 1248 | running = task_running(rq, p); |
| 1249 | queued = task_on_rq_queued(p); |
| 1250 | ncsw = 0; |
| 1251 | if (!match_state || p->state == match_state) |
| 1252 | ncsw = p->nvcsw | LONG_MIN; /* sets MSB */ |
| 1253 | task_rq_unlock(rq, p, &flags); |
| 1254 | |
| 1255 | /* |
| 1256 | * If it changed from the expected state, bail out now. |
| 1257 | */ |
| 1258 | if (unlikely(!ncsw)) |
| 1259 | break; |
| 1260 | |
| 1261 | /* |
| 1262 | * Was it really running after all now that we |
| 1263 | * checked with the proper locks actually held? |
| 1264 | * |
| 1265 | * Oops. Go back and try again.. |
| 1266 | */ |
| 1267 | if (unlikely(running)) { |
| 1268 | cpu_relax(); |
| 1269 | continue; |
| 1270 | } |
| 1271 | |
| 1272 | /* |
| 1273 | * It's not enough that it's not actively running, |
| 1274 | * it must be off the runqueue _entirely_, and not |
| 1275 | * preempted! |
| 1276 | * |
| 1277 | * So if it was still runnable (but just not actively |
| 1278 | * running right now), it's preempted, and we should |
| 1279 | * yield - it could be a while. |
| 1280 | */ |
| 1281 | if (unlikely(queued)) { |
| 1282 | ktime_t to = ktime_set(0, NSEC_PER_SEC/HZ); |
| 1283 | |
| 1284 | set_current_state(TASK_UNINTERRUPTIBLE); |
| 1285 | schedule_hrtimeout(&to, HRTIMER_MODE_REL); |
| 1286 | continue; |
| 1287 | } |
| 1288 | |
| 1289 | /* |
| 1290 | * Ahh, all good. It wasn't running, and it wasn't |
| 1291 | * runnable, which means that it will never become |
| 1292 | * running in the future either. We're all done! |
| 1293 | */ |
| 1294 | break; |
| 1295 | } |
| 1296 | |
| 1297 | return ncsw; |
| 1298 | } |
| 1299 | |
| 1300 | /*** |
| 1301 | * kick_process - kick a running thread to enter/exit the kernel |
| 1302 | * @p: the to-be-kicked thread |
| 1303 | * |
| 1304 | * Cause a process which is running on another CPU to enter |
| 1305 | * kernel-mode, without any delay. (to get signals handled.) |
| 1306 | * |
| 1307 | * NOTE: this function doesn't have to take the runqueue lock, |
| 1308 | * because all it wants to ensure is that the remote task enters |
| 1309 | * the kernel. If the IPI races and the task has been migrated |
| 1310 | * to another CPU then no harm is done and the purpose has been |
| 1311 | * achieved as well. |
| 1312 | */ |
| 1313 | void kick_process(struct task_struct *p) |
| 1314 | { |
| 1315 | int cpu; |
| 1316 | |
| 1317 | preempt_disable(); |
| 1318 | cpu = task_cpu(p); |
| 1319 | if ((cpu != smp_processor_id()) && task_curr(p)) |
| 1320 | smp_send_reschedule(cpu); |
| 1321 | preempt_enable(); |
| 1322 | } |
| 1323 | EXPORT_SYMBOL_GPL(kick_process); |
| 1324 | #endif /* CONFIG_SMP */ |
| 1325 | |
| 1326 | #ifdef CONFIG_SMP |
| 1327 | /* |
| 1328 | * ->cpus_allowed is protected by both rq->lock and p->pi_lock |
| 1329 | */ |
| 1330 | static int select_fallback_rq(int cpu, struct task_struct *p) |
| 1331 | { |
| 1332 | int nid = cpu_to_node(cpu); |
| 1333 | const struct cpumask *nodemask = NULL; |
| 1334 | enum { cpuset, possible, fail } state = cpuset; |
| 1335 | int dest_cpu; |
| 1336 | |
| 1337 | /* |
| 1338 | * If the node that the cpu is on has been offlined, cpu_to_node() |
| 1339 | * will return -1. There is no cpu on the node, and we should |
| 1340 | * select the cpu on the other node. |
| 1341 | */ |
| 1342 | if (nid != -1) { |
| 1343 | nodemask = cpumask_of_node(nid); |
| 1344 | |
| 1345 | /* Look for allowed, online CPU in same node. */ |
| 1346 | for_each_cpu(dest_cpu, nodemask) { |
| 1347 | if (!cpu_online(dest_cpu)) |
| 1348 | continue; |
| 1349 | if (!cpu_active(dest_cpu)) |
| 1350 | continue; |
| 1351 | if (cpumask_test_cpu(dest_cpu, tsk_cpus_allowed(p))) |
| 1352 | return dest_cpu; |
| 1353 | } |
| 1354 | } |
| 1355 | |
| 1356 | for (;;) { |
| 1357 | /* Any allowed, online CPU? */ |
| 1358 | for_each_cpu(dest_cpu, tsk_cpus_allowed(p)) { |
| 1359 | if (!cpu_online(dest_cpu)) |
| 1360 | continue; |
| 1361 | if (!cpu_active(dest_cpu)) |
| 1362 | continue; |
| 1363 | goto out; |
| 1364 | } |
| 1365 | |
| 1366 | switch (state) { |
| 1367 | case cpuset: |
| 1368 | /* No more Mr. Nice Guy. */ |
| 1369 | cpuset_cpus_allowed_fallback(p); |
| 1370 | state = possible; |
| 1371 | break; |
| 1372 | |
| 1373 | case possible: |
| 1374 | do_set_cpus_allowed(p, cpu_possible_mask); |
| 1375 | state = fail; |
| 1376 | break; |
| 1377 | |
| 1378 | case fail: |
| 1379 | BUG(); |
| 1380 | break; |
| 1381 | } |
| 1382 | } |
| 1383 | |
| 1384 | out: |
| 1385 | if (state != cpuset) { |
| 1386 | /* |
| 1387 | * Don't tell them about moving exiting tasks or |
| 1388 | * kernel threads (both mm NULL), since they never |
| 1389 | * leave kernel. |
| 1390 | */ |
| 1391 | if (p->mm && printk_ratelimit()) { |
| 1392 | printk_deferred("process %d (%s) no longer affine to cpu%d\n", |
| 1393 | task_pid_nr(p), p->comm, cpu); |
| 1394 | } |
| 1395 | } |
| 1396 | |
| 1397 | return dest_cpu; |
| 1398 | } |
| 1399 | |
| 1400 | /* |
| 1401 | * The caller (fork, wakeup) owns p->pi_lock, ->cpus_allowed is stable. |
| 1402 | */ |
| 1403 | static inline |
| 1404 | int select_task_rq(struct task_struct *p, int cpu, int sd_flags, int wake_flags) |
| 1405 | { |
| 1406 | if (p->nr_cpus_allowed > 1) |
| 1407 | cpu = p->sched_class->select_task_rq(p, cpu, sd_flags, wake_flags); |
| 1408 | |
| 1409 | /* |
| 1410 | * In order not to call set_task_cpu() on a blocking task we need |
| 1411 | * to rely on ttwu() to place the task on a valid ->cpus_allowed |
| 1412 | * cpu. |
| 1413 | * |
| 1414 | * Since this is common to all placement strategies, this lives here. |
| 1415 | * |
| 1416 | * [ this allows ->select_task() to simply return task_cpu(p) and |
| 1417 | * not worry about this generic constraint ] |
| 1418 | */ |
| 1419 | if (unlikely(!cpumask_test_cpu(cpu, tsk_cpus_allowed(p)) || |
| 1420 | !cpu_online(cpu))) |
| 1421 | cpu = select_fallback_rq(task_cpu(p), p); |
| 1422 | |
| 1423 | return cpu; |
| 1424 | } |
| 1425 | |
| 1426 | static void update_avg(u64 *avg, u64 sample) |
| 1427 | { |
| 1428 | s64 diff = sample - *avg; |
| 1429 | *avg += diff >> 3; |
| 1430 | } |
| 1431 | #endif |
| 1432 | |
| 1433 | static void |
| 1434 | ttwu_stat(struct task_struct *p, int cpu, int wake_flags) |
| 1435 | { |
| 1436 | #ifdef CONFIG_SCHEDSTATS |
| 1437 | struct rq *rq = this_rq(); |
| 1438 | |
| 1439 | #ifdef CONFIG_SMP |
| 1440 | int this_cpu = smp_processor_id(); |
| 1441 | |
| 1442 | if (cpu == this_cpu) { |
| 1443 | schedstat_inc(rq, ttwu_local); |
| 1444 | schedstat_inc(p, se.statistics.nr_wakeups_local); |
| 1445 | } else { |
| 1446 | struct sched_domain *sd; |
| 1447 | |
| 1448 | schedstat_inc(p, se.statistics.nr_wakeups_remote); |
| 1449 | rcu_read_lock(); |
| 1450 | for_each_domain(this_cpu, sd) { |
| 1451 | if (cpumask_test_cpu(cpu, sched_domain_span(sd))) { |
| 1452 | schedstat_inc(sd, ttwu_wake_remote); |
| 1453 | break; |
| 1454 | } |
| 1455 | } |
| 1456 | rcu_read_unlock(); |
| 1457 | } |
| 1458 | |
| 1459 | if (wake_flags & WF_MIGRATED) |
| 1460 | schedstat_inc(p, se.statistics.nr_wakeups_migrate); |
| 1461 | |
| 1462 | #endif /* CONFIG_SMP */ |
| 1463 | |
| 1464 | schedstat_inc(rq, ttwu_count); |
| 1465 | schedstat_inc(p, se.statistics.nr_wakeups); |
| 1466 | |
| 1467 | if (wake_flags & WF_SYNC) |
| 1468 | schedstat_inc(p, se.statistics.nr_wakeups_sync); |
| 1469 | |
| 1470 | #endif /* CONFIG_SCHEDSTATS */ |
| 1471 | } |
| 1472 | |
| 1473 | static void ttwu_activate(struct rq *rq, struct task_struct *p, int en_flags) |
| 1474 | { |
| 1475 | activate_task(rq, p, en_flags); |
| 1476 | p->on_rq = TASK_ON_RQ_QUEUED; |
| 1477 | |
| 1478 | /* if a worker is waking up, notify workqueue */ |
| 1479 | if (p->flags & PF_WQ_WORKER) |
| 1480 | wq_worker_waking_up(p, cpu_of(rq)); |
| 1481 | } |
| 1482 | |
| 1483 | /* |
| 1484 | * Mark the task runnable and perform wakeup-preemption. |
| 1485 | */ |
| 1486 | static void |
| 1487 | ttwu_do_wakeup(struct rq *rq, struct task_struct *p, int wake_flags) |
| 1488 | { |
| 1489 | check_preempt_curr(rq, p, wake_flags); |
| 1490 | trace_sched_wakeup(p, true); |
| 1491 | |
| 1492 | p->state = TASK_RUNNING; |
| 1493 | #ifdef CONFIG_SMP |
| 1494 | if (p->sched_class->task_woken) |
| 1495 | p->sched_class->task_woken(rq, p); |
| 1496 | |
| 1497 | if (rq->idle_stamp) { |
| 1498 | u64 delta = rq_clock(rq) - rq->idle_stamp; |
| 1499 | u64 max = 2*rq->max_idle_balance_cost; |
| 1500 | |
| 1501 | update_avg(&rq->avg_idle, delta); |
| 1502 | |
| 1503 | if (rq->avg_idle > max) |
| 1504 | rq->avg_idle = max; |
| 1505 | |
| 1506 | rq->idle_stamp = 0; |
| 1507 | } |
| 1508 | #endif |
| 1509 | } |
| 1510 | |
| 1511 | static void |
| 1512 | ttwu_do_activate(struct rq *rq, struct task_struct *p, int wake_flags) |
| 1513 | { |
| 1514 | #ifdef CONFIG_SMP |
| 1515 | if (p->sched_contributes_to_load) |
| 1516 | rq->nr_uninterruptible--; |
| 1517 | #endif |
| 1518 | |
| 1519 | ttwu_activate(rq, p, ENQUEUE_WAKEUP | ENQUEUE_WAKING); |
| 1520 | ttwu_do_wakeup(rq, p, wake_flags); |
| 1521 | } |
| 1522 | |
| 1523 | /* |
| 1524 | * Called in case the task @p isn't fully descheduled from its runqueue, |
| 1525 | * in this case we must do a remote wakeup. Its a 'light' wakeup though, |
| 1526 | * since all we need to do is flip p->state to TASK_RUNNING, since |
| 1527 | * the task is still ->on_rq. |
| 1528 | */ |
| 1529 | static int ttwu_remote(struct task_struct *p, int wake_flags) |
| 1530 | { |
| 1531 | struct rq *rq; |
| 1532 | int ret = 0; |
| 1533 | |
| 1534 | rq = __task_rq_lock(p); |
| 1535 | if (task_on_rq_queued(p)) { |
| 1536 | /* check_preempt_curr() may use rq clock */ |
| 1537 | update_rq_clock(rq); |
| 1538 | ttwu_do_wakeup(rq, p, wake_flags); |
| 1539 | ret = 1; |
| 1540 | } |
| 1541 | __task_rq_unlock(rq); |
| 1542 | |
| 1543 | return ret; |
| 1544 | } |
| 1545 | |
| 1546 | #ifdef CONFIG_SMP |
| 1547 | void sched_ttwu_pending(void) |
| 1548 | { |
| 1549 | struct rq *rq = this_rq(); |
| 1550 | struct llist_node *llist = llist_del_all(&rq->wake_list); |
| 1551 | struct task_struct *p; |
| 1552 | unsigned long flags; |
| 1553 | |
| 1554 | if (!llist) |
| 1555 | return; |
| 1556 | |
| 1557 | raw_spin_lock_irqsave(&rq->lock, flags); |
| 1558 | |
| 1559 | while (llist) { |
| 1560 | p = llist_entry(llist, struct task_struct, wake_entry); |
| 1561 | llist = llist_next(llist); |
| 1562 | ttwu_do_activate(rq, p, 0); |
| 1563 | } |
| 1564 | |
| 1565 | raw_spin_unlock_irqrestore(&rq->lock, flags); |
| 1566 | } |
| 1567 | |
| 1568 | void scheduler_ipi(void) |
| 1569 | { |
| 1570 | /* |
| 1571 | * Fold TIF_NEED_RESCHED into the preempt_count; anybody setting |
| 1572 | * TIF_NEED_RESCHED remotely (for the first time) will also send |
| 1573 | * this IPI. |
| 1574 | */ |
| 1575 | preempt_fold_need_resched(); |
| 1576 | |
| 1577 | if (llist_empty(&this_rq()->wake_list) && !got_nohz_idle_kick()) |
| 1578 | return; |
| 1579 | |
| 1580 | /* |
| 1581 | * Not all reschedule IPI handlers call irq_enter/irq_exit, since |
| 1582 | * traditionally all their work was done from the interrupt return |
| 1583 | * path. Now that we actually do some work, we need to make sure |
| 1584 | * we do call them. |
| 1585 | * |
| 1586 | * Some archs already do call them, luckily irq_enter/exit nest |
| 1587 | * properly. |
| 1588 | * |
| 1589 | * Arguably we should visit all archs and update all handlers, |
| 1590 | * however a fair share of IPIs are still resched only so this would |
| 1591 | * somewhat pessimize the simple resched case. |
| 1592 | */ |
| 1593 | irq_enter(); |
| 1594 | sched_ttwu_pending(); |
| 1595 | |
| 1596 | /* |
| 1597 | * Check if someone kicked us for doing the nohz idle load balance. |
| 1598 | */ |
| 1599 | if (unlikely(got_nohz_idle_kick())) { |
| 1600 | this_rq()->idle_balance = 1; |
| 1601 | raise_softirq_irqoff(SCHED_SOFTIRQ); |
| 1602 | } |
| 1603 | irq_exit(); |
| 1604 | } |
| 1605 | |
| 1606 | static void ttwu_queue_remote(struct task_struct *p, int cpu) |
| 1607 | { |
| 1608 | struct rq *rq = cpu_rq(cpu); |
| 1609 | |
| 1610 | if (llist_add(&p->wake_entry, &cpu_rq(cpu)->wake_list)) { |
| 1611 | if (!set_nr_if_polling(rq->idle)) |
| 1612 | smp_send_reschedule(cpu); |
| 1613 | else |
| 1614 | trace_sched_wake_idle_without_ipi(cpu); |
| 1615 | } |
| 1616 | } |
| 1617 | |
| 1618 | void wake_up_if_idle(int cpu) |
| 1619 | { |
| 1620 | struct rq *rq = cpu_rq(cpu); |
| 1621 | unsigned long flags; |
| 1622 | |
| 1623 | rcu_read_lock(); |
| 1624 | |
| 1625 | if (!is_idle_task(rcu_dereference(rq->curr))) |
| 1626 | goto out; |
| 1627 | |
| 1628 | if (set_nr_if_polling(rq->idle)) { |
| 1629 | trace_sched_wake_idle_without_ipi(cpu); |
| 1630 | } else { |
| 1631 | raw_spin_lock_irqsave(&rq->lock, flags); |
| 1632 | if (is_idle_task(rq->curr)) |
| 1633 | smp_send_reschedule(cpu); |
| 1634 | /* Else cpu is not in idle, do nothing here */ |
| 1635 | raw_spin_unlock_irqrestore(&rq->lock, flags); |
| 1636 | } |
| 1637 | |
| 1638 | out: |
| 1639 | rcu_read_unlock(); |
| 1640 | } |
| 1641 | |
| 1642 | bool cpus_share_cache(int this_cpu, int that_cpu) |
| 1643 | { |
| 1644 | return per_cpu(sd_llc_id, this_cpu) == per_cpu(sd_llc_id, that_cpu); |
| 1645 | } |
| 1646 | #endif /* CONFIG_SMP */ |
| 1647 | |
| 1648 | static void ttwu_queue(struct task_struct *p, int cpu) |
| 1649 | { |
| 1650 | struct rq *rq = cpu_rq(cpu); |
| 1651 | |
| 1652 | #if defined(CONFIG_SMP) |
| 1653 | if (sched_feat(TTWU_QUEUE) && !cpus_share_cache(smp_processor_id(), cpu)) { |
| 1654 | sched_clock_cpu(cpu); /* sync clocks x-cpu */ |
| 1655 | ttwu_queue_remote(p, cpu); |
| 1656 | return; |
| 1657 | } |
| 1658 | #endif |
| 1659 | |
| 1660 | raw_spin_lock(&rq->lock); |
| 1661 | ttwu_do_activate(rq, p, 0); |
| 1662 | raw_spin_unlock(&rq->lock); |
| 1663 | } |
| 1664 | |
| 1665 | /** |
| 1666 | * try_to_wake_up - wake up a thread |
| 1667 | * @p: the thread to be awakened |
| 1668 | * @state: the mask of task states that can be woken |
| 1669 | * @wake_flags: wake modifier flags (WF_*) |
| 1670 | * |
| 1671 | * Put it on the run-queue if it's not already there. The "current" |
| 1672 | * thread is always on the run-queue (except when the actual |
| 1673 | * re-schedule is in progress), and as such you're allowed to do |
| 1674 | * the simpler "current->state = TASK_RUNNING" to mark yourself |
| 1675 | * runnable without the overhead of this. |
| 1676 | * |
| 1677 | * Return: %true if @p was woken up, %false if it was already running. |
| 1678 | * or @state didn't match @p's state. |
| 1679 | */ |
| 1680 | static int |
| 1681 | try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags) |
| 1682 | { |
| 1683 | unsigned long flags; |
| 1684 | int cpu, success = 0; |
| 1685 | |
| 1686 | /* |
| 1687 | * If we are going to wake up a thread waiting for CONDITION we |
| 1688 | * need to ensure that CONDITION=1 done by the caller can not be |
| 1689 | * reordered with p->state check below. This pairs with mb() in |
| 1690 | * set_current_state() the waiting thread does. |
| 1691 | */ |
| 1692 | smp_mb__before_spinlock(); |
| 1693 | raw_spin_lock_irqsave(&p->pi_lock, flags); |
| 1694 | if (!(p->state & state)) |
| 1695 | goto out; |
| 1696 | |
| 1697 | success = 1; /* we're going to change ->state */ |
| 1698 | cpu = task_cpu(p); |
| 1699 | |
| 1700 | if (p->on_rq && ttwu_remote(p, wake_flags)) |
| 1701 | goto stat; |
| 1702 | |
| 1703 | #ifdef CONFIG_SMP |
| 1704 | /* |
| 1705 | * If the owning (remote) cpu is still in the middle of schedule() with |
| 1706 | * this task as prev, wait until its done referencing the task. |
| 1707 | */ |
| 1708 | while (p->on_cpu) |
| 1709 | cpu_relax(); |
| 1710 | /* |
| 1711 | * Pairs with the smp_wmb() in finish_lock_switch(). |
| 1712 | */ |
| 1713 | smp_rmb(); |
| 1714 | |
| 1715 | p->sched_contributes_to_load = !!task_contributes_to_load(p); |
| 1716 | p->state = TASK_WAKING; |
| 1717 | |
| 1718 | if (p->sched_class->task_waking) |
| 1719 | p->sched_class->task_waking(p); |
| 1720 | |
| 1721 | cpu = select_task_rq(p, p->wake_cpu, SD_BALANCE_WAKE, wake_flags); |
| 1722 | if (task_cpu(p) != cpu) { |
| 1723 | wake_flags |= WF_MIGRATED; |
| 1724 | set_task_cpu(p, cpu); |
| 1725 | } |
| 1726 | #endif /* CONFIG_SMP */ |
| 1727 | |
| 1728 | ttwu_queue(p, cpu); |
| 1729 | stat: |
| 1730 | ttwu_stat(p, cpu, wake_flags); |
| 1731 | out: |
| 1732 | raw_spin_unlock_irqrestore(&p->pi_lock, flags); |
| 1733 | |
| 1734 | return success; |
| 1735 | } |
| 1736 | |
| 1737 | /** |
| 1738 | * try_to_wake_up_local - try to wake up a local task with rq lock held |
| 1739 | * @p: the thread to be awakened |
| 1740 | * |
| 1741 | * Put @p on the run-queue if it's not already there. The caller must |
| 1742 | * ensure that this_rq() is locked, @p is bound to this_rq() and not |
| 1743 | * the current task. |
| 1744 | */ |
| 1745 | static void try_to_wake_up_local(struct task_struct *p) |
| 1746 | { |
| 1747 | struct rq *rq = task_rq(p); |
| 1748 | |
| 1749 | if (WARN_ON_ONCE(rq != this_rq()) || |
| 1750 | WARN_ON_ONCE(p == current)) |
| 1751 | return; |
| 1752 | |
| 1753 | lockdep_assert_held(&rq->lock); |
| 1754 | |
| 1755 | if (!raw_spin_trylock(&p->pi_lock)) { |
| 1756 | raw_spin_unlock(&rq->lock); |
| 1757 | raw_spin_lock(&p->pi_lock); |
| 1758 | raw_spin_lock(&rq->lock); |
| 1759 | } |
| 1760 | |
| 1761 | if (!(p->state & TASK_NORMAL)) |
| 1762 | goto out; |
| 1763 | |
| 1764 | if (!task_on_rq_queued(p)) |
| 1765 | ttwu_activate(rq, p, ENQUEUE_WAKEUP); |
| 1766 | |
| 1767 | ttwu_do_wakeup(rq, p, 0); |
| 1768 | ttwu_stat(p, smp_processor_id(), 0); |
| 1769 | out: |
| 1770 | raw_spin_unlock(&p->pi_lock); |
| 1771 | } |
| 1772 | |
| 1773 | /** |
| 1774 | * wake_up_process - Wake up a specific process |
| 1775 | * @p: The process to be woken up. |
| 1776 | * |
| 1777 | * Attempt to wake up the nominated process and move it to the set of runnable |
| 1778 | * processes. |
| 1779 | * |
| 1780 | * Return: 1 if the process was woken up, 0 if it was already running. |
| 1781 | * |
| 1782 | * It may be assumed that this function implies a write memory barrier before |
| 1783 | * changing the task state if and only if any tasks are woken up. |
| 1784 | */ |
| 1785 | int wake_up_process(struct task_struct *p) |
| 1786 | { |
| 1787 | WARN_ON(task_is_stopped_or_traced(p)); |
| 1788 | return try_to_wake_up(p, TASK_NORMAL, 0); |
| 1789 | } |
| 1790 | EXPORT_SYMBOL(wake_up_process); |
| 1791 | |
| 1792 | int wake_up_state(struct task_struct *p, unsigned int state) |
| 1793 | { |
| 1794 | return try_to_wake_up(p, state, 0); |
| 1795 | } |
| 1796 | |
| 1797 | /* |
| 1798 | * This function clears the sched_dl_entity static params. |
| 1799 | */ |
| 1800 | void __dl_clear_params(struct task_struct *p) |
| 1801 | { |
| 1802 | struct sched_dl_entity *dl_se = &p->dl; |
| 1803 | |
| 1804 | dl_se->dl_runtime = 0; |
| 1805 | dl_se->dl_deadline = 0; |
| 1806 | dl_se->dl_period = 0; |
| 1807 | dl_se->flags = 0; |
| 1808 | dl_se->dl_bw = 0; |
| 1809 | |
| 1810 | dl_se->dl_throttled = 0; |
| 1811 | dl_se->dl_new = 1; |
| 1812 | dl_se->dl_yielded = 0; |
| 1813 | } |
| 1814 | |
| 1815 | /* |
| 1816 | * Perform scheduler related setup for a newly forked process p. |
| 1817 | * p is forked by current. |
| 1818 | * |
| 1819 | * __sched_fork() is basic setup used by init_idle() too: |
| 1820 | */ |
| 1821 | static void __sched_fork(unsigned long clone_flags, struct task_struct *p) |
| 1822 | { |
| 1823 | p->on_rq = 0; |
| 1824 | |
| 1825 | p->se.on_rq = 0; |
| 1826 | p->se.exec_start = 0; |
| 1827 | p->se.sum_exec_runtime = 0; |
| 1828 | p->se.prev_sum_exec_runtime = 0; |
| 1829 | p->se.nr_migrations = 0; |
| 1830 | p->se.vruntime = 0; |
| 1831 | #ifdef CONFIG_SMP |
| 1832 | p->se.avg.decay_count = 0; |
| 1833 | #endif |
| 1834 | INIT_LIST_HEAD(&p->se.group_node); |
| 1835 | |
| 1836 | #ifdef CONFIG_SCHEDSTATS |
| 1837 | memset(&p->se.statistics, 0, sizeof(p->se.statistics)); |
| 1838 | #endif |
| 1839 | |
| 1840 | RB_CLEAR_NODE(&p->dl.rb_node); |
| 1841 | init_dl_task_timer(&p->dl); |
| 1842 | __dl_clear_params(p); |
| 1843 | |
| 1844 | INIT_LIST_HEAD(&p->rt.run_list); |
| 1845 | |
| 1846 | #ifdef CONFIG_PREEMPT_NOTIFIERS |
| 1847 | INIT_HLIST_HEAD(&p->preempt_notifiers); |
| 1848 | #endif |
| 1849 | |
| 1850 | #ifdef CONFIG_NUMA_BALANCING |
| 1851 | if (p->mm && atomic_read(&p->mm->mm_users) == 1) { |
| 1852 | p->mm->numa_next_scan = jiffies + msecs_to_jiffies(sysctl_numa_balancing_scan_delay); |
| 1853 | p->mm->numa_scan_seq = 0; |
| 1854 | } |
| 1855 | |
| 1856 | if (clone_flags & CLONE_VM) |
| 1857 | p->numa_preferred_nid = current->numa_preferred_nid; |
| 1858 | else |
| 1859 | p->numa_preferred_nid = -1; |
| 1860 | |
| 1861 | p->node_stamp = 0ULL; |
| 1862 | p->numa_scan_seq = p->mm ? p->mm->numa_scan_seq : 0; |
| 1863 | p->numa_scan_period = sysctl_numa_balancing_scan_delay; |
| 1864 | p->numa_work.next = &p->numa_work; |
| 1865 | p->numa_faults = NULL; |
| 1866 | p->last_task_numa_placement = 0; |
| 1867 | p->last_sum_exec_runtime = 0; |
| 1868 | |
| 1869 | p->numa_group = NULL; |
| 1870 | #endif /* CONFIG_NUMA_BALANCING */ |
| 1871 | } |
| 1872 | |
| 1873 | #ifdef CONFIG_NUMA_BALANCING |
| 1874 | #ifdef CONFIG_SCHED_DEBUG |
| 1875 | void set_numabalancing_state(bool enabled) |
| 1876 | { |
| 1877 | if (enabled) |
| 1878 | sched_feat_set("NUMA"); |
| 1879 | else |
| 1880 | sched_feat_set("NO_NUMA"); |
| 1881 | } |
| 1882 | #else |
| 1883 | __read_mostly bool numabalancing_enabled; |
| 1884 | |
| 1885 | void set_numabalancing_state(bool enabled) |
| 1886 | { |
| 1887 | numabalancing_enabled = enabled; |
| 1888 | } |
| 1889 | #endif /* CONFIG_SCHED_DEBUG */ |
| 1890 | |
| 1891 | #ifdef CONFIG_PROC_SYSCTL |
| 1892 | int sysctl_numa_balancing(struct ctl_table *table, int write, |
| 1893 | void __user *buffer, size_t *lenp, loff_t *ppos) |
| 1894 | { |
| 1895 | struct ctl_table t; |
| 1896 | int err; |
| 1897 | int state = numabalancing_enabled; |
| 1898 | |
| 1899 | if (write && !capable(CAP_SYS_ADMIN)) |
| 1900 | return -EPERM; |
| 1901 | |
| 1902 | t = *table; |
| 1903 | t.data = &state; |
| 1904 | err = proc_dointvec_minmax(&t, write, buffer, lenp, ppos); |
| 1905 | if (err < 0) |
| 1906 | return err; |
| 1907 | if (write) |
| 1908 | set_numabalancing_state(state); |
| 1909 | return err; |
| 1910 | } |
| 1911 | #endif |
| 1912 | #endif |
| 1913 | |
| 1914 | /* |
| 1915 | * fork()/clone()-time setup: |
| 1916 | */ |
| 1917 | int sched_fork(unsigned long clone_flags, struct task_struct *p) |
| 1918 | { |
| 1919 | unsigned long flags; |
| 1920 | int cpu = get_cpu(); |
| 1921 | |
| 1922 | __sched_fork(clone_flags, p); |
| 1923 | /* |
| 1924 | * We mark the process as running here. This guarantees that |
| 1925 | * nobody will actually run it, and a signal or other external |
| 1926 | * event cannot wake it up and insert it on the runqueue either. |
| 1927 | */ |
| 1928 | p->state = TASK_RUNNING; |
| 1929 | |
| 1930 | /* |
| 1931 | * Make sure we do not leak PI boosting priority to the child. |
| 1932 | */ |
| 1933 | p->prio = current->normal_prio; |
| 1934 | |
| 1935 | /* |
| 1936 | * Revert to default priority/policy on fork if requested. |
| 1937 | */ |
| 1938 | if (unlikely(p->sched_reset_on_fork)) { |
| 1939 | if (task_has_dl_policy(p) || task_has_rt_policy(p)) { |
| 1940 | p->policy = SCHED_NORMAL; |
| 1941 | p->static_prio = NICE_TO_PRIO(0); |
| 1942 | p->rt_priority = 0; |
| 1943 | } else if (PRIO_TO_NICE(p->static_prio) < 0) |
| 1944 | p->static_prio = NICE_TO_PRIO(0); |
| 1945 | |
| 1946 | p->prio = p->normal_prio = __normal_prio(p); |
| 1947 | set_load_weight(p); |
| 1948 | |
| 1949 | /* |
| 1950 | * We don't need the reset flag anymore after the fork. It has |
| 1951 | * fulfilled its duty: |
| 1952 | */ |
| 1953 | p->sched_reset_on_fork = 0; |
| 1954 | } |
| 1955 | |
| 1956 | if (dl_prio(p->prio)) { |
| 1957 | put_cpu(); |
| 1958 | return -EAGAIN; |
| 1959 | } else if (rt_prio(p->prio)) { |
| 1960 | p->sched_class = &rt_sched_class; |
| 1961 | } else { |
| 1962 | p->sched_class = &fair_sched_class; |
| 1963 | } |
| 1964 | |
| 1965 | if (p->sched_class->task_fork) |
| 1966 | p->sched_class->task_fork(p); |
| 1967 | |
| 1968 | /* |
| 1969 | * The child is not yet in the pid-hash so no cgroup attach races, |
| 1970 | * and the cgroup is pinned to this child due to cgroup_fork() |
| 1971 | * is ran before sched_fork(). |
| 1972 | * |
| 1973 | * Silence PROVE_RCU. |
| 1974 | */ |
| 1975 | raw_spin_lock_irqsave(&p->pi_lock, flags); |
| 1976 | set_task_cpu(p, cpu); |
| 1977 | raw_spin_unlock_irqrestore(&p->pi_lock, flags); |
| 1978 | |
| 1979 | #if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT) |
| 1980 | if (likely(sched_info_on())) |
| 1981 | memset(&p->sched_info, 0, sizeof(p->sched_info)); |
| 1982 | #endif |
| 1983 | #if defined(CONFIG_SMP) |
| 1984 | p->on_cpu = 0; |
| 1985 | #endif |
| 1986 | init_task_preempt_count(p); |
| 1987 | #ifdef CONFIG_SMP |
| 1988 | plist_node_init(&p->pushable_tasks, MAX_PRIO); |
| 1989 | RB_CLEAR_NODE(&p->pushable_dl_tasks); |
| 1990 | #endif |
| 1991 | |
| 1992 | put_cpu(); |
| 1993 | return 0; |
| 1994 | } |
| 1995 | |
| 1996 | unsigned long to_ratio(u64 period, u64 runtime) |
| 1997 | { |
| 1998 | if (runtime == RUNTIME_INF) |
| 1999 | return 1ULL << 20; |
| 2000 | |
| 2001 | /* |
| 2002 | * Doing this here saves a lot of checks in all |
| 2003 | * the calling paths, and returning zero seems |
| 2004 | * safe for them anyway. |
| 2005 | */ |
| 2006 | if (period == 0) |
| 2007 | return 0; |
| 2008 | |
| 2009 | return div64_u64(runtime << 20, period); |
| 2010 | } |
| 2011 | |
| 2012 | #ifdef CONFIG_SMP |
| 2013 | inline struct dl_bw *dl_bw_of(int i) |
| 2014 | { |
| 2015 | rcu_lockdep_assert(rcu_read_lock_sched_held(), |
| 2016 | "sched RCU must be held"); |
| 2017 | return &cpu_rq(i)->rd->dl_bw; |
| 2018 | } |
| 2019 | |
| 2020 | static inline int dl_bw_cpus(int i) |
| 2021 | { |
| 2022 | struct root_domain *rd = cpu_rq(i)->rd; |
| 2023 | int cpus = 0; |
| 2024 | |
| 2025 | rcu_lockdep_assert(rcu_read_lock_sched_held(), |
| 2026 | "sched RCU must be held"); |
| 2027 | for_each_cpu_and(i, rd->span, cpu_active_mask) |
| 2028 | cpus++; |
| 2029 | |
| 2030 | return cpus; |
| 2031 | } |
| 2032 | #else |
| 2033 | inline struct dl_bw *dl_bw_of(int i) |
| 2034 | { |
| 2035 | return &cpu_rq(i)->dl.dl_bw; |
| 2036 | } |
| 2037 | |
| 2038 | static inline int dl_bw_cpus(int i) |
| 2039 | { |
| 2040 | return 1; |
| 2041 | } |
| 2042 | #endif |
| 2043 | |
| 2044 | /* |
| 2045 | * We must be sure that accepting a new task (or allowing changing the |
| 2046 | * parameters of an existing one) is consistent with the bandwidth |
| 2047 | * constraints. If yes, this function also accordingly updates the currently |
| 2048 | * allocated bandwidth to reflect the new situation. |
| 2049 | * |
| 2050 | * This function is called while holding p's rq->lock. |
| 2051 | * |
| 2052 | * XXX we should delay bw change until the task's 0-lag point, see |
| 2053 | * __setparam_dl(). |
| 2054 | */ |
| 2055 | static int dl_overflow(struct task_struct *p, int policy, |
| 2056 | const struct sched_attr *attr) |
| 2057 | { |
| 2058 | |
| 2059 | struct dl_bw *dl_b = dl_bw_of(task_cpu(p)); |
| 2060 | u64 period = attr->sched_period ?: attr->sched_deadline; |
| 2061 | u64 runtime = attr->sched_runtime; |
| 2062 | u64 new_bw = dl_policy(policy) ? to_ratio(period, runtime) : 0; |
| 2063 | int cpus, err = -1; |
| 2064 | |
| 2065 | if (new_bw == p->dl.dl_bw) |
| 2066 | return 0; |
| 2067 | |
| 2068 | /* |
| 2069 | * Either if a task, enters, leave, or stays -deadline but changes |
| 2070 | * its parameters, we may need to update accordingly the total |
| 2071 | * allocated bandwidth of the container. |
| 2072 | */ |
| 2073 | raw_spin_lock(&dl_b->lock); |
| 2074 | cpus = dl_bw_cpus(task_cpu(p)); |
| 2075 | if (dl_policy(policy) && !task_has_dl_policy(p) && |
| 2076 | !__dl_overflow(dl_b, cpus, 0, new_bw)) { |
| 2077 | __dl_add(dl_b, new_bw); |
| 2078 | err = 0; |
| 2079 | } else if (dl_policy(policy) && task_has_dl_policy(p) && |
| 2080 | !__dl_overflow(dl_b, cpus, p->dl.dl_bw, new_bw)) { |
| 2081 | __dl_clear(dl_b, p->dl.dl_bw); |
| 2082 | __dl_add(dl_b, new_bw); |
| 2083 | err = 0; |
| 2084 | } else if (!dl_policy(policy) && task_has_dl_policy(p)) { |
| 2085 | __dl_clear(dl_b, p->dl.dl_bw); |
| 2086 | err = 0; |
| 2087 | } |
| 2088 | raw_spin_unlock(&dl_b->lock); |
| 2089 | |
| 2090 | return err; |
| 2091 | } |
| 2092 | |
| 2093 | extern void init_dl_bw(struct dl_bw *dl_b); |
| 2094 | |
| 2095 | /* |
| 2096 | * wake_up_new_task - wake up a newly created task for the first time. |
| 2097 | * |
| 2098 | * This function will do some initial scheduler statistics housekeeping |
| 2099 | * that must be done for every newly created context, then puts the task |
| 2100 | * on the runqueue and wakes it. |
| 2101 | */ |
| 2102 | void wake_up_new_task(struct task_struct *p) |
| 2103 | { |
| 2104 | unsigned long flags; |
| 2105 | struct rq *rq; |
| 2106 | |
| 2107 | raw_spin_lock_irqsave(&p->pi_lock, flags); |
| 2108 | #ifdef CONFIG_SMP |
| 2109 | /* |
| 2110 | * Fork balancing, do it here and not earlier because: |
| 2111 | * - cpus_allowed can change in the fork path |
| 2112 | * - any previously selected cpu might disappear through hotplug |
| 2113 | */ |
| 2114 | set_task_cpu(p, select_task_rq(p, task_cpu(p), SD_BALANCE_FORK, 0)); |
| 2115 | #endif |
| 2116 | |
| 2117 | /* Initialize new task's runnable average */ |
| 2118 | init_task_runnable_average(p); |
| 2119 | rq = __task_rq_lock(p); |
| 2120 | activate_task(rq, p, 0); |
| 2121 | p->on_rq = TASK_ON_RQ_QUEUED; |
| 2122 | trace_sched_wakeup_new(p, true); |
| 2123 | check_preempt_curr(rq, p, WF_FORK); |
| 2124 | #ifdef CONFIG_SMP |
| 2125 | if (p->sched_class->task_woken) |
| 2126 | p->sched_class->task_woken(rq, p); |
| 2127 | #endif |
| 2128 | task_rq_unlock(rq, p, &flags); |
| 2129 | } |
| 2130 | |
| 2131 | #ifdef CONFIG_PREEMPT_NOTIFIERS |
| 2132 | |
| 2133 | /** |
| 2134 | * preempt_notifier_register - tell me when current is being preempted & rescheduled |
| 2135 | * @notifier: notifier struct to register |
| 2136 | */ |
| 2137 | void preempt_notifier_register(struct preempt_notifier *notifier) |
| 2138 | { |
| 2139 | hlist_add_head(¬ifier->link, ¤t->preempt_notifiers); |
| 2140 | } |
| 2141 | EXPORT_SYMBOL_GPL(preempt_notifier_register); |
| 2142 | |
| 2143 | /** |
| 2144 | * preempt_notifier_unregister - no longer interested in preemption notifications |
| 2145 | * @notifier: notifier struct to unregister |
| 2146 | * |
| 2147 | * This is safe to call from within a preemption notifier. |
| 2148 | */ |
| 2149 | void preempt_notifier_unregister(struct preempt_notifier *notifier) |
| 2150 | { |
| 2151 | hlist_del(¬ifier->link); |
| 2152 | } |
| 2153 | EXPORT_SYMBOL_GPL(preempt_notifier_unregister); |
| 2154 | |
| 2155 | static void fire_sched_in_preempt_notifiers(struct task_struct *curr) |
| 2156 | { |
| 2157 | struct preempt_notifier *notifier; |
| 2158 | |
| 2159 | hlist_for_each_entry(notifier, &curr->preempt_notifiers, link) |
| 2160 | notifier->ops->sched_in(notifier, raw_smp_processor_id()); |
| 2161 | } |
| 2162 | |
| 2163 | static void |
| 2164 | fire_sched_out_preempt_notifiers(struct task_struct *curr, |
| 2165 | struct task_struct *next) |
| 2166 | { |
| 2167 | struct preempt_notifier *notifier; |
| 2168 | |
| 2169 | hlist_for_each_entry(notifier, &curr->preempt_notifiers, link) |
| 2170 | notifier->ops->sched_out(notifier, next); |
| 2171 | } |
| 2172 | |
| 2173 | #else /* !CONFIG_PREEMPT_NOTIFIERS */ |
| 2174 | |
| 2175 | static void fire_sched_in_preempt_notifiers(struct task_struct *curr) |
| 2176 | { |
| 2177 | } |
| 2178 | |
| 2179 | static void |
| 2180 | fire_sched_out_preempt_notifiers(struct task_struct *curr, |
| 2181 | struct task_struct *next) |
| 2182 | { |
| 2183 | } |
| 2184 | |
| 2185 | #endif /* CONFIG_PREEMPT_NOTIFIERS */ |
| 2186 | |
| 2187 | /** |
| 2188 | * prepare_task_switch - prepare to switch tasks |
| 2189 | * @rq: the runqueue preparing to switch |
| 2190 | * @prev: the current task that is being switched out |
| 2191 | * @next: the task we are going to switch to. |
| 2192 | * |
| 2193 | * This is called with the rq lock held and interrupts off. It must |
| 2194 | * be paired with a subsequent finish_task_switch after the context |
| 2195 | * switch. |
| 2196 | * |
| 2197 | * prepare_task_switch sets up locking and calls architecture specific |
| 2198 | * hooks. |
| 2199 | */ |
| 2200 | static inline void |
| 2201 | prepare_task_switch(struct rq *rq, struct task_struct *prev, |
| 2202 | struct task_struct *next) |
| 2203 | { |
| 2204 | trace_sched_switch(prev, next); |
| 2205 | sched_info_switch(rq, prev, next); |
| 2206 | perf_event_task_sched_out(prev, next); |
| 2207 | fire_sched_out_preempt_notifiers(prev, next); |
| 2208 | prepare_lock_switch(rq, next); |
| 2209 | prepare_arch_switch(next); |
| 2210 | } |
| 2211 | |
| 2212 | /** |
| 2213 | * finish_task_switch - clean up after a task-switch |
| 2214 | * @prev: the thread we just switched away from. |
| 2215 | * |
| 2216 | * finish_task_switch must be called after the context switch, paired |
| 2217 | * with a prepare_task_switch call before the context switch. |
| 2218 | * finish_task_switch will reconcile locking set up by prepare_task_switch, |
| 2219 | * and do any other architecture-specific cleanup actions. |
| 2220 | * |
| 2221 | * Note that we may have delayed dropping an mm in context_switch(). If |
| 2222 | * so, we finish that here outside of the runqueue lock. (Doing it |
| 2223 | * with the lock held can cause deadlocks; see schedule() for |
| 2224 | * details.) |
| 2225 | * |
| 2226 | * The context switch have flipped the stack from under us and restored the |
| 2227 | * local variables which were saved when this task called schedule() in the |
| 2228 | * past. prev == current is still correct but we need to recalculate this_rq |
| 2229 | * because prev may have moved to another CPU. |
| 2230 | */ |
| 2231 | static struct rq *finish_task_switch(struct task_struct *prev) |
| 2232 | __releases(rq->lock) |
| 2233 | { |
| 2234 | struct rq *rq = this_rq(); |
| 2235 | struct mm_struct *mm = rq->prev_mm; |
| 2236 | long prev_state; |
| 2237 | |
| 2238 | rq->prev_mm = NULL; |
| 2239 | |
| 2240 | /* |
| 2241 | * A task struct has one reference for the use as "current". |
| 2242 | * If a task dies, then it sets TASK_DEAD in tsk->state and calls |
| 2243 | * schedule one last time. The schedule call will never return, and |
| 2244 | * the scheduled task must drop that reference. |
| 2245 | * The test for TASK_DEAD must occur while the runqueue locks are |
| 2246 | * still held, otherwise prev could be scheduled on another cpu, die |
| 2247 | * there before we look at prev->state, and then the reference would |
| 2248 | * be dropped twice. |
| 2249 | * Manfred Spraul <manfred@colorfullife.com> |
| 2250 | */ |
| 2251 | prev_state = prev->state; |
| 2252 | vtime_task_switch(prev); |
| 2253 | finish_arch_switch(prev); |
| 2254 | perf_event_task_sched_in(prev, current); |
| 2255 | finish_lock_switch(rq, prev); |
| 2256 | finish_arch_post_lock_switch(); |
| 2257 | |
| 2258 | fire_sched_in_preempt_notifiers(current); |
| 2259 | if (mm) |
| 2260 | mmdrop(mm); |
| 2261 | if (unlikely(prev_state == TASK_DEAD)) { |
| 2262 | if (prev->sched_class->task_dead) |
| 2263 | prev->sched_class->task_dead(prev); |
| 2264 | |
| 2265 | /* |
| 2266 | * Remove function-return probe instances associated with this |
| 2267 | * task and put them back on the free list. |
| 2268 | */ |
| 2269 | kprobe_flush_task(prev); |
| 2270 | put_task_struct(prev); |
| 2271 | } |
| 2272 | |
| 2273 | tick_nohz_task_switch(current); |
| 2274 | return rq; |
| 2275 | } |
| 2276 | |
| 2277 | #ifdef CONFIG_SMP |
| 2278 | |
| 2279 | /* rq->lock is NOT held, but preemption is disabled */ |
| 2280 | static void __balance_callback(struct rq *rq) |
| 2281 | { |
| 2282 | struct callback_head *head, *next; |
| 2283 | void (*func)(struct rq *rq); |
| 2284 | unsigned long flags; |
| 2285 | |
| 2286 | raw_spin_lock_irqsave(&rq->lock, flags); |
| 2287 | head = rq->balance_callback; |
| 2288 | rq->balance_callback = NULL; |
| 2289 | while (head) { |
| 2290 | func = (void (*)(struct rq *))head->func; |
| 2291 | next = head->next; |
| 2292 | head->next = NULL; |
| 2293 | head = next; |
| 2294 | |
| 2295 | func(rq); |
| 2296 | } |
| 2297 | raw_spin_unlock_irqrestore(&rq->lock, flags); |
| 2298 | } |
| 2299 | |
| 2300 | static inline void balance_callback(struct rq *rq) |
| 2301 | { |
| 2302 | if (unlikely(rq->balance_callback)) |
| 2303 | __balance_callback(rq); |
| 2304 | } |
| 2305 | |
| 2306 | #else |
| 2307 | |
| 2308 | static inline void balance_callback(struct rq *rq) |
| 2309 | { |
| 2310 | } |
| 2311 | |
| 2312 | #endif |
| 2313 | |
| 2314 | /** |
| 2315 | * schedule_tail - first thing a freshly forked thread must call. |
| 2316 | * @prev: the thread we just switched away from. |
| 2317 | */ |
| 2318 | asmlinkage __visible void schedule_tail(struct task_struct *prev) |
| 2319 | __releases(rq->lock) |
| 2320 | { |
| 2321 | struct rq *rq; |
| 2322 | |
| 2323 | /* finish_task_switch() drops rq->lock and enables preemtion */ |
| 2324 | preempt_disable(); |
| 2325 | rq = finish_task_switch(prev); |
| 2326 | balance_callback(rq); |
| 2327 | preempt_enable(); |
| 2328 | |
| 2329 | if (current->set_child_tid) |
| 2330 | put_user(task_pid_vnr(current), current->set_child_tid); |
| 2331 | } |
| 2332 | |
| 2333 | /* |
| 2334 | * context_switch - switch to the new MM and the new thread's register state. |
| 2335 | */ |
| 2336 | static inline struct rq * |
| 2337 | context_switch(struct rq *rq, struct task_struct *prev, |
| 2338 | struct task_struct *next) |
| 2339 | { |
| 2340 | struct mm_struct *mm, *oldmm; |
| 2341 | |
| 2342 | prepare_task_switch(rq, prev, next); |
| 2343 | |
| 2344 | mm = next->mm; |
| 2345 | oldmm = prev->active_mm; |
| 2346 | /* |
| 2347 | * For paravirt, this is coupled with an exit in switch_to to |
| 2348 | * combine the page table reload and the switch backend into |
| 2349 | * one hypercall. |
| 2350 | */ |
| 2351 | arch_start_context_switch(prev); |
| 2352 | |
| 2353 | if (!mm) { |
| 2354 | next->active_mm = oldmm; |
| 2355 | atomic_inc(&oldmm->mm_count); |
| 2356 | enter_lazy_tlb(oldmm, next); |
| 2357 | } else |
| 2358 | switch_mm(oldmm, mm, next); |
| 2359 | |
| 2360 | if (!prev->mm) { |
| 2361 | prev->active_mm = NULL; |
| 2362 | rq->prev_mm = oldmm; |
| 2363 | } |
| 2364 | /* |
| 2365 | * Since the runqueue lock will be released by the next |
| 2366 | * task (which is an invalid locking op but in the case |
| 2367 | * of the scheduler it's an obvious special-case), so we |
| 2368 | * do an early lockdep release here: |
| 2369 | */ |
| 2370 | spin_release(&rq->lock.dep_map, 1, _THIS_IP_); |
| 2371 | |
| 2372 | context_tracking_task_switch(prev, next); |
| 2373 | /* Here we just switch the register state and the stack. */ |
| 2374 | switch_to(prev, next, prev); |
| 2375 | barrier(); |
| 2376 | |
| 2377 | return finish_task_switch(prev); |
| 2378 | } |
| 2379 | |
| 2380 | /* |
| 2381 | * nr_running and nr_context_switches: |
| 2382 | * |
| 2383 | * externally visible scheduler statistics: current number of runnable |
| 2384 | * threads, total number of context switches performed since bootup. |
| 2385 | */ |
| 2386 | unsigned long nr_running(void) |
| 2387 | { |
| 2388 | unsigned long i, sum = 0; |
| 2389 | |
| 2390 | for_each_online_cpu(i) |
| 2391 | sum += cpu_rq(i)->nr_running; |
| 2392 | |
| 2393 | return sum; |
| 2394 | } |
| 2395 | |
| 2396 | /* |
| 2397 | * Check if only the current task is running on the cpu. |
| 2398 | */ |
| 2399 | bool single_task_running(void) |
| 2400 | { |
| 2401 | if (cpu_rq(smp_processor_id())->nr_running == 1) |
| 2402 | return true; |
| 2403 | else |
| 2404 | return false; |
| 2405 | } |
| 2406 | EXPORT_SYMBOL(single_task_running); |
| 2407 | |
| 2408 | unsigned long long nr_context_switches(void) |
| 2409 | { |
| 2410 | int i; |
| 2411 | unsigned long long sum = 0; |
| 2412 | |
| 2413 | for_each_possible_cpu(i) |
| 2414 | sum += cpu_rq(i)->nr_switches; |
| 2415 | |
| 2416 | return sum; |
| 2417 | } |
| 2418 | |
| 2419 | unsigned long nr_iowait(void) |
| 2420 | { |
| 2421 | unsigned long i, sum = 0; |
| 2422 | |
| 2423 | for_each_possible_cpu(i) |
| 2424 | sum += atomic_read(&cpu_rq(i)->nr_iowait); |
| 2425 | |
| 2426 | return sum; |
| 2427 | } |
| 2428 | |
| 2429 | unsigned long nr_iowait_cpu(int cpu) |
| 2430 | { |
| 2431 | struct rq *this = cpu_rq(cpu); |
| 2432 | return atomic_read(&this->nr_iowait); |
| 2433 | } |
| 2434 | |
| 2435 | void get_iowait_load(unsigned long *nr_waiters, unsigned long *load) |
| 2436 | { |
| 2437 | struct rq *rq = this_rq(); |
| 2438 | *nr_waiters = atomic_read(&rq->nr_iowait); |
| 2439 | *load = rq->load.weight; |
| 2440 | } |
| 2441 | |
| 2442 | #ifdef CONFIG_SMP |
| 2443 | |
| 2444 | /* |
| 2445 | * sched_exec - execve() is a valuable balancing opportunity, because at |
| 2446 | * this point the task has the smallest effective memory and cache footprint. |
| 2447 | */ |
| 2448 | void sched_exec(void) |
| 2449 | { |
| 2450 | struct task_struct *p = current; |
| 2451 | unsigned long flags; |
| 2452 | int dest_cpu; |
| 2453 | |
| 2454 | raw_spin_lock_irqsave(&p->pi_lock, flags); |
| 2455 | dest_cpu = p->sched_class->select_task_rq(p, task_cpu(p), SD_BALANCE_EXEC, 0); |
| 2456 | if (dest_cpu == smp_processor_id()) |
| 2457 | goto unlock; |
| 2458 | |
| 2459 | if (likely(cpu_active(dest_cpu))) { |
| 2460 | struct migration_arg arg = { p, dest_cpu }; |
| 2461 | |
| 2462 | raw_spin_unlock_irqrestore(&p->pi_lock, flags); |
| 2463 | stop_one_cpu(task_cpu(p), migration_cpu_stop, &arg); |
| 2464 | return; |
| 2465 | } |
| 2466 | unlock: |
| 2467 | raw_spin_unlock_irqrestore(&p->pi_lock, flags); |
| 2468 | } |
| 2469 | |
| 2470 | #endif |
| 2471 | |
| 2472 | DEFINE_PER_CPU(struct kernel_stat, kstat); |
| 2473 | DEFINE_PER_CPU(struct kernel_cpustat, kernel_cpustat); |
| 2474 | |
| 2475 | EXPORT_PER_CPU_SYMBOL(kstat); |
| 2476 | EXPORT_PER_CPU_SYMBOL(kernel_cpustat); |
| 2477 | |
| 2478 | /* |
| 2479 | * Return accounted runtime for the task. |
| 2480 | * In case the task is currently running, return the runtime plus current's |
| 2481 | * pending runtime that have not been accounted yet. |
| 2482 | */ |
| 2483 | unsigned long long task_sched_runtime(struct task_struct *p) |
| 2484 | { |
| 2485 | unsigned long flags; |
| 2486 | struct rq *rq; |
| 2487 | u64 ns; |
| 2488 | |
| 2489 | #if defined(CONFIG_64BIT) && defined(CONFIG_SMP) |
| 2490 | /* |
| 2491 | * 64-bit doesn't need locks to atomically read a 64bit value. |
| 2492 | * So we have a optimization chance when the task's delta_exec is 0. |
| 2493 | * Reading ->on_cpu is racy, but this is ok. |
| 2494 | * |
| 2495 | * If we race with it leaving cpu, we'll take a lock. So we're correct. |
| 2496 | * If we race with it entering cpu, unaccounted time is 0. This is |
| 2497 | * indistinguishable from the read occurring a few cycles earlier. |
| 2498 | * If we see ->on_cpu without ->on_rq, the task is leaving, and has |
| 2499 | * been accounted, so we're correct here as well. |
| 2500 | */ |
| 2501 | if (!p->on_cpu || !task_on_rq_queued(p)) |
| 2502 | return p->se.sum_exec_runtime; |
| 2503 | #endif |
| 2504 | |
| 2505 | rq = task_rq_lock(p, &flags); |
| 2506 | /* |
| 2507 | * Must be ->curr _and_ ->on_rq. If dequeued, we would |
| 2508 | * project cycles that may never be accounted to this |
| 2509 | * thread, breaking clock_gettime(). |
| 2510 | */ |
| 2511 | if (task_current(rq, p) && task_on_rq_queued(p)) { |
| 2512 | update_rq_clock(rq); |
| 2513 | p->sched_class->update_curr(rq); |
| 2514 | } |
| 2515 | ns = p->se.sum_exec_runtime; |
| 2516 | task_rq_unlock(rq, p, &flags); |
| 2517 | |
| 2518 | return ns; |
| 2519 | } |
| 2520 | |
| 2521 | /* |
| 2522 | * This function gets called by the timer code, with HZ frequency. |
| 2523 | * We call it with interrupts disabled. |
| 2524 | */ |
| 2525 | void scheduler_tick(void) |
| 2526 | { |
| 2527 | int cpu = smp_processor_id(); |
| 2528 | struct rq *rq = cpu_rq(cpu); |
| 2529 | struct task_struct *curr = rq->curr; |
| 2530 | |
| 2531 | sched_clock_tick(); |
| 2532 | |
| 2533 | raw_spin_lock(&rq->lock); |
| 2534 | update_rq_clock(rq); |
| 2535 | curr->sched_class->task_tick(rq, curr, 0); |
| 2536 | update_cpu_load_active(rq); |
| 2537 | calc_global_load_tick(rq); |
| 2538 | raw_spin_unlock(&rq->lock); |
| 2539 | |
| 2540 | perf_event_task_tick(); |
| 2541 | |
| 2542 | #ifdef CONFIG_SMP |
| 2543 | rq->idle_balance = idle_cpu(cpu); |
| 2544 | trigger_load_balance(rq); |
| 2545 | #endif |
| 2546 | rq_last_tick_reset(rq); |
| 2547 | } |
| 2548 | |
| 2549 | #ifdef CONFIG_NO_HZ_FULL |
| 2550 | /** |
| 2551 | * scheduler_tick_max_deferment |
| 2552 | * |
| 2553 | * Keep at least one tick per second when a single |
| 2554 | * active task is running because the scheduler doesn't |
| 2555 | * yet completely support full dynticks environment. |
| 2556 | * |
| 2557 | * This makes sure that uptime, CFS vruntime, load |
| 2558 | * balancing, etc... continue to move forward, even |
| 2559 | * with a very low granularity. |
| 2560 | * |
| 2561 | * Return: Maximum deferment in nanoseconds. |
| 2562 | */ |
| 2563 | u64 scheduler_tick_max_deferment(void) |
| 2564 | { |
| 2565 | struct rq *rq = this_rq(); |
| 2566 | unsigned long next, now = READ_ONCE(jiffies); |
| 2567 | |
| 2568 | next = rq->last_sched_tick + HZ; |
| 2569 | |
| 2570 | if (time_before_eq(next, now)) |
| 2571 | return 0; |
| 2572 | |
| 2573 | return jiffies_to_nsecs(next - now); |
| 2574 | } |
| 2575 | #endif |
| 2576 | |
| 2577 | notrace unsigned long get_parent_ip(unsigned long addr) |
| 2578 | { |
| 2579 | if (in_lock_functions(addr)) { |
| 2580 | addr = CALLER_ADDR2; |
| 2581 | if (in_lock_functions(addr)) |
| 2582 | addr = CALLER_ADDR3; |
| 2583 | } |
| 2584 | return addr; |
| 2585 | } |
| 2586 | |
| 2587 | #if defined(CONFIG_PREEMPT) && (defined(CONFIG_DEBUG_PREEMPT) || \ |
| 2588 | defined(CONFIG_PREEMPT_TRACER)) |
| 2589 | |
| 2590 | void preempt_count_add(int val) |
| 2591 | { |
| 2592 | #ifdef CONFIG_DEBUG_PREEMPT |
| 2593 | /* |
| 2594 | * Underflow? |
| 2595 | */ |
| 2596 | if (DEBUG_LOCKS_WARN_ON((preempt_count() < 0))) |
| 2597 | return; |
| 2598 | #endif |
| 2599 | __preempt_count_add(val); |
| 2600 | #ifdef CONFIG_DEBUG_PREEMPT |
| 2601 | /* |
| 2602 | * Spinlock count overflowing soon? |
| 2603 | */ |
| 2604 | DEBUG_LOCKS_WARN_ON((preempt_count() & PREEMPT_MASK) >= |
| 2605 | PREEMPT_MASK - 10); |
| 2606 | #endif |
| 2607 | if (preempt_count() == val) { |
| 2608 | unsigned long ip = get_parent_ip(CALLER_ADDR1); |
| 2609 | #ifdef CONFIG_DEBUG_PREEMPT |
| 2610 | current->preempt_disable_ip = ip; |
| 2611 | #endif |
| 2612 | trace_preempt_off(CALLER_ADDR0, ip); |
| 2613 | } |
| 2614 | } |
| 2615 | EXPORT_SYMBOL(preempt_count_add); |
| 2616 | NOKPROBE_SYMBOL(preempt_count_add); |
| 2617 | |
| 2618 | void preempt_count_sub(int val) |
| 2619 | { |
| 2620 | #ifdef CONFIG_DEBUG_PREEMPT |
| 2621 | /* |
| 2622 | * Underflow? |
| 2623 | */ |
| 2624 | if (DEBUG_LOCKS_WARN_ON(val > preempt_count())) |
| 2625 | return; |
| 2626 | /* |
| 2627 | * Is the spinlock portion underflowing? |
| 2628 | */ |
| 2629 | if (DEBUG_LOCKS_WARN_ON((val < PREEMPT_MASK) && |
| 2630 | !(preempt_count() & PREEMPT_MASK))) |
| 2631 | return; |
| 2632 | #endif |
| 2633 | |
| 2634 | if (preempt_count() == val) |
| 2635 | trace_preempt_on(CALLER_ADDR0, get_parent_ip(CALLER_ADDR1)); |
| 2636 | __preempt_count_sub(val); |
| 2637 | } |
| 2638 | EXPORT_SYMBOL(preempt_count_sub); |
| 2639 | NOKPROBE_SYMBOL(preempt_count_sub); |
| 2640 | |
| 2641 | #endif |
| 2642 | |
| 2643 | /* |
| 2644 | * Print scheduling while atomic bug: |
| 2645 | */ |
| 2646 | static noinline void __schedule_bug(struct task_struct *prev) |
| 2647 | { |
| 2648 | if (oops_in_progress) |
| 2649 | return; |
| 2650 | |
| 2651 | printk(KERN_ERR "BUG: scheduling while atomic: %s/%d/0x%08x\n", |
| 2652 | prev->comm, prev->pid, preempt_count()); |
| 2653 | |
| 2654 | debug_show_held_locks(prev); |
| 2655 | print_modules(); |
| 2656 | if (irqs_disabled()) |
| 2657 | print_irqtrace_events(prev); |
| 2658 | #ifdef CONFIG_DEBUG_PREEMPT |
| 2659 | if (in_atomic_preempt_off()) { |
| 2660 | pr_err("Preemption disabled at:"); |
| 2661 | print_ip_sym(current->preempt_disable_ip); |
| 2662 | pr_cont("\n"); |
| 2663 | } |
| 2664 | #endif |
| 2665 | dump_stack(); |
| 2666 | add_taint(TAINT_WARN, LOCKDEP_STILL_OK); |
| 2667 | } |
| 2668 | |
| 2669 | /* |
| 2670 | * Various schedule()-time debugging checks and statistics: |
| 2671 | */ |
| 2672 | static inline void schedule_debug(struct task_struct *prev) |
| 2673 | { |
| 2674 | #ifdef CONFIG_SCHED_STACK_END_CHECK |
| 2675 | BUG_ON(unlikely(task_stack_end_corrupted(prev))); |
| 2676 | #endif |
| 2677 | /* |
| 2678 | * Test if we are atomic. Since do_exit() needs to call into |
| 2679 | * schedule() atomically, we ignore that path. Otherwise whine |
| 2680 | * if we are scheduling when we should not. |
| 2681 | */ |
| 2682 | if (unlikely(in_atomic_preempt_off() && prev->state != TASK_DEAD)) |
| 2683 | __schedule_bug(prev); |
| 2684 | rcu_sleep_check(); |
| 2685 | |
| 2686 | profile_hit(SCHED_PROFILING, __builtin_return_address(0)); |
| 2687 | |
| 2688 | schedstat_inc(this_rq(), sched_count); |
| 2689 | } |
| 2690 | |
| 2691 | /* |
| 2692 | * Pick up the highest-prio task: |
| 2693 | */ |
| 2694 | static inline struct task_struct * |
| 2695 | pick_next_task(struct rq *rq, struct task_struct *prev) |
| 2696 | { |
| 2697 | const struct sched_class *class = &fair_sched_class; |
| 2698 | struct task_struct *p; |
| 2699 | |
| 2700 | /* |
| 2701 | * Optimization: we know that if all tasks are in |
| 2702 | * the fair class we can call that function directly: |
| 2703 | */ |
| 2704 | if (likely(prev->sched_class == class && |
| 2705 | rq->nr_running == rq->cfs.h_nr_running)) { |
| 2706 | p = fair_sched_class.pick_next_task(rq, prev); |
| 2707 | if (unlikely(p == RETRY_TASK)) |
| 2708 | goto again; |
| 2709 | |
| 2710 | /* assumes fair_sched_class->next == idle_sched_class */ |
| 2711 | if (unlikely(!p)) |
| 2712 | p = idle_sched_class.pick_next_task(rq, prev); |
| 2713 | |
| 2714 | return p; |
| 2715 | } |
| 2716 | |
| 2717 | again: |
| 2718 | for_each_class(class) { |
| 2719 | p = class->pick_next_task(rq, prev); |
| 2720 | if (p) { |
| 2721 | if (unlikely(p == RETRY_TASK)) |
| 2722 | goto again; |
| 2723 | return p; |
| 2724 | } |
| 2725 | } |
| 2726 | |
| 2727 | BUG(); /* the idle class will always have a runnable task */ |
| 2728 | } |
| 2729 | |
| 2730 | /* |
| 2731 | * __schedule() is the main scheduler function. |
| 2732 | * |
| 2733 | * The main means of driving the scheduler and thus entering this function are: |
| 2734 | * |
| 2735 | * 1. Explicit blocking: mutex, semaphore, waitqueue, etc. |
| 2736 | * |
| 2737 | * 2. TIF_NEED_RESCHED flag is checked on interrupt and userspace return |
| 2738 | * paths. For example, see arch/x86/entry_64.S. |
| 2739 | * |
| 2740 | * To drive preemption between tasks, the scheduler sets the flag in timer |
| 2741 | * interrupt handler scheduler_tick(). |
| 2742 | * |
| 2743 | * 3. Wakeups don't really cause entry into schedule(). They add a |
| 2744 | * task to the run-queue and that's it. |
| 2745 | * |
| 2746 | * Now, if the new task added to the run-queue preempts the current |
| 2747 | * task, then the wakeup sets TIF_NEED_RESCHED and schedule() gets |
| 2748 | * called on the nearest possible occasion: |
| 2749 | * |
| 2750 | * - If the kernel is preemptible (CONFIG_PREEMPT=y): |
| 2751 | * |
| 2752 | * - in syscall or exception context, at the next outmost |
| 2753 | * preempt_enable(). (this might be as soon as the wake_up()'s |
| 2754 | * spin_unlock()!) |
| 2755 | * |
| 2756 | * - in IRQ context, return from interrupt-handler to |
| 2757 | * preemptible context |
| 2758 | * |
| 2759 | * - If the kernel is not preemptible (CONFIG_PREEMPT is not set) |
| 2760 | * then at the next: |
| 2761 | * |
| 2762 | * - cond_resched() call |
| 2763 | * - explicit schedule() call |
| 2764 | * - return from syscall or exception to user-space |
| 2765 | * - return from interrupt-handler to user-space |
| 2766 | * |
| 2767 | * WARNING: must be called with preemption disabled! |
| 2768 | */ |
| 2769 | static void __sched __schedule(void) |
| 2770 | { |
| 2771 | struct task_struct *prev, *next; |
| 2772 | unsigned long *switch_count; |
| 2773 | struct rq *rq; |
| 2774 | int cpu; |
| 2775 | |
| 2776 | cpu = smp_processor_id(); |
| 2777 | rq = cpu_rq(cpu); |
| 2778 | rcu_note_context_switch(); |
| 2779 | prev = rq->curr; |
| 2780 | |
| 2781 | schedule_debug(prev); |
| 2782 | |
| 2783 | if (sched_feat(HRTICK)) |
| 2784 | hrtick_clear(rq); |
| 2785 | |
| 2786 | /* |
| 2787 | * Make sure that signal_pending_state()->signal_pending() below |
| 2788 | * can't be reordered with __set_current_state(TASK_INTERRUPTIBLE) |
| 2789 | * done by the caller to avoid the race with signal_wake_up(). |
| 2790 | */ |
| 2791 | smp_mb__before_spinlock(); |
| 2792 | raw_spin_lock_irq(&rq->lock); |
| 2793 | |
| 2794 | rq->clock_skip_update <<= 1; /* promote REQ to ACT */ |
| 2795 | |
| 2796 | switch_count = &prev->nivcsw; |
| 2797 | if (prev->state && !(preempt_count() & PREEMPT_ACTIVE)) { |
| 2798 | if (unlikely(signal_pending_state(prev->state, prev))) { |
| 2799 | prev->state = TASK_RUNNING; |
| 2800 | } else { |
| 2801 | deactivate_task(rq, prev, DEQUEUE_SLEEP); |
| 2802 | prev->on_rq = 0; |
| 2803 | |
| 2804 | /* |
| 2805 | * If a worker went to sleep, notify and ask workqueue |
| 2806 | * whether it wants to wake up a task to maintain |
| 2807 | * concurrency. |
| 2808 | */ |
| 2809 | if (prev->flags & PF_WQ_WORKER) { |
| 2810 | struct task_struct *to_wakeup; |
| 2811 | |
| 2812 | to_wakeup = wq_worker_sleeping(prev, cpu); |
| 2813 | if (to_wakeup) |
| 2814 | try_to_wake_up_local(to_wakeup); |
| 2815 | } |
| 2816 | } |
| 2817 | switch_count = &prev->nvcsw; |
| 2818 | } |
| 2819 | |
| 2820 | if (task_on_rq_queued(prev)) |
| 2821 | update_rq_clock(rq); |
| 2822 | |
| 2823 | next = pick_next_task(rq, prev); |
| 2824 | clear_tsk_need_resched(prev); |
| 2825 | clear_preempt_need_resched(); |
| 2826 | rq->clock_skip_update = 0; |
| 2827 | |
| 2828 | if (likely(prev != next)) { |
| 2829 | rq->nr_switches++; |
| 2830 | rq->curr = next; |
| 2831 | ++*switch_count; |
| 2832 | |
| 2833 | rq = context_switch(rq, prev, next); /* unlocks the rq */ |
| 2834 | cpu = cpu_of(rq); |
| 2835 | } else |
| 2836 | raw_spin_unlock_irq(&rq->lock); |
| 2837 | |
| 2838 | balance_callback(rq); |
| 2839 | } |
| 2840 | |
| 2841 | static inline void sched_submit_work(struct task_struct *tsk) |
| 2842 | { |
| 2843 | if (!tsk->state || tsk_is_pi_blocked(tsk)) |
| 2844 | return; |
| 2845 | /* |
| 2846 | * If we are going to sleep and we have plugged IO queued, |
| 2847 | * make sure to submit it to avoid deadlocks. |
| 2848 | */ |
| 2849 | if (blk_needs_flush_plug(tsk)) |
| 2850 | blk_schedule_flush_plug(tsk); |
| 2851 | } |
| 2852 | |
| 2853 | asmlinkage __visible void __sched schedule(void) |
| 2854 | { |
| 2855 | struct task_struct *tsk = current; |
| 2856 | |
| 2857 | sched_submit_work(tsk); |
| 2858 | do { |
| 2859 | preempt_disable(); |
| 2860 | __schedule(); |
| 2861 | sched_preempt_enable_no_resched(); |
| 2862 | } while (need_resched()); |
| 2863 | } |
| 2864 | EXPORT_SYMBOL(schedule); |
| 2865 | |
| 2866 | #ifdef CONFIG_CONTEXT_TRACKING |
| 2867 | asmlinkage __visible void __sched schedule_user(void) |
| 2868 | { |
| 2869 | /* |
| 2870 | * If we come here after a random call to set_need_resched(), |
| 2871 | * or we have been woken up remotely but the IPI has not yet arrived, |
| 2872 | * we haven't yet exited the RCU idle mode. Do it here manually until |
| 2873 | * we find a better solution. |
| 2874 | * |
| 2875 | * NB: There are buggy callers of this function. Ideally we |
| 2876 | * should warn if prev_state != CONTEXT_USER, but that will trigger |
| 2877 | * too frequently to make sense yet. |
| 2878 | */ |
| 2879 | enum ctx_state prev_state = exception_enter(); |
| 2880 | schedule(); |
| 2881 | exception_exit(prev_state); |
| 2882 | } |
| 2883 | #endif |
| 2884 | |
| 2885 | /** |
| 2886 | * schedule_preempt_disabled - called with preemption disabled |
| 2887 | * |
| 2888 | * Returns with preemption disabled. Note: preempt_count must be 1 |
| 2889 | */ |
| 2890 | void __sched schedule_preempt_disabled(void) |
| 2891 | { |
| 2892 | sched_preempt_enable_no_resched(); |
| 2893 | schedule(); |
| 2894 | preempt_disable(); |
| 2895 | } |
| 2896 | |
| 2897 | static void __sched notrace preempt_schedule_common(void) |
| 2898 | { |
| 2899 | do { |
| 2900 | preempt_active_enter(); |
| 2901 | __schedule(); |
| 2902 | preempt_active_exit(); |
| 2903 | |
| 2904 | /* |
| 2905 | * Check again in case we missed a preemption opportunity |
| 2906 | * between schedule and now. |
| 2907 | */ |
| 2908 | } while (need_resched()); |
| 2909 | } |
| 2910 | |
| 2911 | #ifdef CONFIG_PREEMPT |
| 2912 | /* |
| 2913 | * this is the entry point to schedule() from in-kernel preemption |
| 2914 | * off of preempt_enable. Kernel preemptions off return from interrupt |
| 2915 | * occur there and call schedule directly. |
| 2916 | */ |
| 2917 | asmlinkage __visible void __sched notrace preempt_schedule(void) |
| 2918 | { |
| 2919 | /* |
| 2920 | * If there is a non-zero preempt_count or interrupts are disabled, |
| 2921 | * we do not want to preempt the current task. Just return.. |
| 2922 | */ |
| 2923 | if (likely(!preemptible())) |
| 2924 | return; |
| 2925 | |
| 2926 | preempt_schedule_common(); |
| 2927 | } |
| 2928 | NOKPROBE_SYMBOL(preempt_schedule); |
| 2929 | EXPORT_SYMBOL(preempt_schedule); |
| 2930 | |
| 2931 | /** |
| 2932 | * preempt_schedule_notrace - preempt_schedule called by tracing |
| 2933 | * |
| 2934 | * The tracing infrastructure uses preempt_enable_notrace to prevent |
| 2935 | * recursion and tracing preempt enabling caused by the tracing |
| 2936 | * infrastructure itself. But as tracing can happen in areas coming |
| 2937 | * from userspace or just about to enter userspace, a preempt enable |
| 2938 | * can occur before user_exit() is called. This will cause the scheduler |
| 2939 | * to be called when the system is still in usermode. |
| 2940 | * |
| 2941 | * To prevent this, the preempt_enable_notrace will use this function |
| 2942 | * instead of preempt_schedule() to exit user context if needed before |
| 2943 | * calling the scheduler. |
| 2944 | */ |
| 2945 | asmlinkage __visible void __sched notrace preempt_schedule_notrace(void) |
| 2946 | { |
| 2947 | enum ctx_state prev_ctx; |
| 2948 | |
| 2949 | if (likely(!preemptible())) |
| 2950 | return; |
| 2951 | |
| 2952 | do { |
| 2953 | /* |
| 2954 | * Use raw __prempt_count() ops that don't call function. |
| 2955 | * We can't call functions before disabling preemption which |
| 2956 | * disarm preemption tracing recursions. |
| 2957 | */ |
| 2958 | __preempt_count_add(PREEMPT_ACTIVE + PREEMPT_DISABLE_OFFSET); |
| 2959 | barrier(); |
| 2960 | /* |
| 2961 | * Needs preempt disabled in case user_exit() is traced |
| 2962 | * and the tracer calls preempt_enable_notrace() causing |
| 2963 | * an infinite recursion. |
| 2964 | */ |
| 2965 | prev_ctx = exception_enter(); |
| 2966 | __schedule(); |
| 2967 | exception_exit(prev_ctx); |
| 2968 | |
| 2969 | barrier(); |
| 2970 | __preempt_count_sub(PREEMPT_ACTIVE + PREEMPT_DISABLE_OFFSET); |
| 2971 | } while (need_resched()); |
| 2972 | } |
| 2973 | EXPORT_SYMBOL_GPL(preempt_schedule_notrace); |
| 2974 | |
| 2975 | #endif /* CONFIG_PREEMPT */ |
| 2976 | |
| 2977 | /* |
| 2978 | * this is the entry point to schedule() from kernel preemption |
| 2979 | * off of irq context. |
| 2980 | * Note, that this is called and return with irqs disabled. This will |
| 2981 | * protect us against recursive calling from irq. |
| 2982 | */ |
| 2983 | asmlinkage __visible void __sched preempt_schedule_irq(void) |
| 2984 | { |
| 2985 | enum ctx_state prev_state; |
| 2986 | |
| 2987 | /* Catch callers which need to be fixed */ |
| 2988 | BUG_ON(preempt_count() || !irqs_disabled()); |
| 2989 | |
| 2990 | prev_state = exception_enter(); |
| 2991 | |
| 2992 | do { |
| 2993 | preempt_active_enter(); |
| 2994 | local_irq_enable(); |
| 2995 | __schedule(); |
| 2996 | local_irq_disable(); |
| 2997 | preempt_active_exit(); |
| 2998 | } while (need_resched()); |
| 2999 | |
| 3000 | exception_exit(prev_state); |
| 3001 | } |
| 3002 | |
| 3003 | int default_wake_function(wait_queue_t *curr, unsigned mode, int wake_flags, |
| 3004 | void *key) |
| 3005 | { |
| 3006 | return try_to_wake_up(curr->private, mode, wake_flags); |
| 3007 | } |
| 3008 | EXPORT_SYMBOL(default_wake_function); |
| 3009 | |
| 3010 | #ifdef CONFIG_RT_MUTEXES |
| 3011 | |
| 3012 | /* |
| 3013 | * rt_mutex_setprio - set the current priority of a task |
| 3014 | * @p: task |
| 3015 | * @prio: prio value (kernel-internal form) |
| 3016 | * |
| 3017 | * This function changes the 'effective' priority of a task. It does |
| 3018 | * not touch ->normal_prio like __setscheduler(). |
| 3019 | * |
| 3020 | * Used by the rt_mutex code to implement priority inheritance |
| 3021 | * logic. Call site only calls if the priority of the task changed. |
| 3022 | */ |
| 3023 | void rt_mutex_setprio(struct task_struct *p, int prio) |
| 3024 | { |
| 3025 | int oldprio, queued, running, enqueue_flag = 0; |
| 3026 | struct rq *rq; |
| 3027 | const struct sched_class *prev_class; |
| 3028 | |
| 3029 | BUG_ON(prio > MAX_PRIO); |
| 3030 | |
| 3031 | rq = __task_rq_lock(p); |
| 3032 | |
| 3033 | /* |
| 3034 | * Idle task boosting is a nono in general. There is one |
| 3035 | * exception, when PREEMPT_RT and NOHZ is active: |
| 3036 | * |
| 3037 | * The idle task calls get_next_timer_interrupt() and holds |
| 3038 | * the timer wheel base->lock on the CPU and another CPU wants |
| 3039 | * to access the timer (probably to cancel it). We can safely |
| 3040 | * ignore the boosting request, as the idle CPU runs this code |
| 3041 | * with interrupts disabled and will complete the lock |
| 3042 | * protected section without being interrupted. So there is no |
| 3043 | * real need to boost. |
| 3044 | */ |
| 3045 | if (unlikely(p == rq->idle)) { |
| 3046 | WARN_ON(p != rq->curr); |
| 3047 | WARN_ON(p->pi_blocked_on); |
| 3048 | goto out_unlock; |
| 3049 | } |
| 3050 | |
| 3051 | trace_sched_pi_setprio(p, prio); |
| 3052 | oldprio = p->prio; |
| 3053 | prev_class = p->sched_class; |
| 3054 | queued = task_on_rq_queued(p); |
| 3055 | running = task_current(rq, p); |
| 3056 | if (queued) |
| 3057 | dequeue_task(rq, p, 0); |
| 3058 | if (running) |
| 3059 | put_prev_task(rq, p); |
| 3060 | |
| 3061 | /* |
| 3062 | * Boosting condition are: |
| 3063 | * 1. -rt task is running and holds mutex A |
| 3064 | * --> -dl task blocks on mutex A |
| 3065 | * |
| 3066 | * 2. -dl task is running and holds mutex A |
| 3067 | * --> -dl task blocks on mutex A and could preempt the |
| 3068 | * running task |
| 3069 | */ |
| 3070 | if (dl_prio(prio)) { |
| 3071 | struct task_struct *pi_task = rt_mutex_get_top_task(p); |
| 3072 | if (!dl_prio(p->normal_prio) || |
| 3073 | (pi_task && dl_entity_preempt(&pi_task->dl, &p->dl))) { |
| 3074 | p->dl.dl_boosted = 1; |
| 3075 | p->dl.dl_throttled = 0; |
| 3076 | enqueue_flag = ENQUEUE_REPLENISH; |
| 3077 | } else |
| 3078 | p->dl.dl_boosted = 0; |
| 3079 | p->sched_class = &dl_sched_class; |
| 3080 | } else if (rt_prio(prio)) { |
| 3081 | if (dl_prio(oldprio)) |
| 3082 | p->dl.dl_boosted = 0; |
| 3083 | if (oldprio < prio) |
| 3084 | enqueue_flag = ENQUEUE_HEAD; |
| 3085 | p->sched_class = &rt_sched_class; |
| 3086 | } else { |
| 3087 | if (dl_prio(oldprio)) |
| 3088 | p->dl.dl_boosted = 0; |
| 3089 | if (rt_prio(oldprio)) |
| 3090 | p->rt.timeout = 0; |
| 3091 | p->sched_class = &fair_sched_class; |
| 3092 | } |
| 3093 | |
| 3094 | p->prio = prio; |
| 3095 | |
| 3096 | if (running) |
| 3097 | p->sched_class->set_curr_task(rq); |
| 3098 | if (queued) |
| 3099 | enqueue_task(rq, p, enqueue_flag); |
| 3100 | |
| 3101 | check_class_changed(rq, p, prev_class, oldprio); |
| 3102 | out_unlock: |
| 3103 | __task_rq_unlock(rq); |
| 3104 | } |
| 3105 | #endif |
| 3106 | |
| 3107 | void set_user_nice(struct task_struct *p, long nice) |
| 3108 | { |
| 3109 | int old_prio, delta, queued; |
| 3110 | unsigned long flags; |
| 3111 | struct rq *rq; |
| 3112 | |
| 3113 | if (task_nice(p) == nice || nice < MIN_NICE || nice > MAX_NICE) |
| 3114 | return; |
| 3115 | /* |
| 3116 | * We have to be careful, if called from sys_setpriority(), |
| 3117 | * the task might be in the middle of scheduling on another CPU. |
| 3118 | */ |
| 3119 | rq = task_rq_lock(p, &flags); |
| 3120 | /* |
| 3121 | * The RT priorities are set via sched_setscheduler(), but we still |
| 3122 | * allow the 'normal' nice value to be set - but as expected |
| 3123 | * it wont have any effect on scheduling until the task is |
| 3124 | * SCHED_DEADLINE, SCHED_FIFO or SCHED_RR: |
| 3125 | */ |
| 3126 | if (task_has_dl_policy(p) || task_has_rt_policy(p)) { |
| 3127 | p->static_prio = NICE_TO_PRIO(nice); |
| 3128 | goto out_unlock; |
| 3129 | } |
| 3130 | queued = task_on_rq_queued(p); |
| 3131 | if (queued) |
| 3132 | dequeue_task(rq, p, 0); |
| 3133 | |
| 3134 | p->static_prio = NICE_TO_PRIO(nice); |
| 3135 | set_load_weight(p); |
| 3136 | old_prio = p->prio; |
| 3137 | p->prio = effective_prio(p); |
| 3138 | delta = p->prio - old_prio; |
| 3139 | |
| 3140 | if (queued) { |
| 3141 | enqueue_task(rq, p, 0); |
| 3142 | /* |
| 3143 | * If the task increased its priority or is running and |
| 3144 | * lowered its priority, then reschedule its CPU: |
| 3145 | */ |
| 3146 | if (delta < 0 || (delta > 0 && task_running(rq, p))) |
| 3147 | resched_curr(rq); |
| 3148 | } |
| 3149 | out_unlock: |
| 3150 | task_rq_unlock(rq, p, &flags); |
| 3151 | } |
| 3152 | EXPORT_SYMBOL(set_user_nice); |
| 3153 | |
| 3154 | /* |
| 3155 | * can_nice - check if a task can reduce its nice value |
| 3156 | * @p: task |
| 3157 | * @nice: nice value |
| 3158 | */ |
| 3159 | int can_nice(const struct task_struct *p, const int nice) |
| 3160 | { |
| 3161 | /* convert nice value [19,-20] to rlimit style value [1,40] */ |
| 3162 | int nice_rlim = nice_to_rlimit(nice); |
| 3163 | |
| 3164 | return (nice_rlim <= task_rlimit(p, RLIMIT_NICE) || |
| 3165 | capable(CAP_SYS_NICE)); |
| 3166 | } |
| 3167 | |
| 3168 | #ifdef __ARCH_WANT_SYS_NICE |
| 3169 | |
| 3170 | /* |
| 3171 | * sys_nice - change the priority of the current process. |
| 3172 | * @increment: priority increment |
| 3173 | * |
| 3174 | * sys_setpriority is a more generic, but much slower function that |
| 3175 | * does similar things. |
| 3176 | */ |
| 3177 | SYSCALL_DEFINE1(nice, int, increment) |
| 3178 | { |
| 3179 | long nice, retval; |
| 3180 | |
| 3181 | /* |
| 3182 | * Setpriority might change our priority at the same moment. |
| 3183 | * We don't have to worry. Conceptually one call occurs first |
| 3184 | * and we have a single winner. |
| 3185 | */ |
| 3186 | increment = clamp(increment, -NICE_WIDTH, NICE_WIDTH); |
| 3187 | nice = task_nice(current) + increment; |
| 3188 | |
| 3189 | nice = clamp_val(nice, MIN_NICE, MAX_NICE); |
| 3190 | if (increment < 0 && !can_nice(current, nice)) |
| 3191 | return -EPERM; |
| 3192 | |
| 3193 | retval = security_task_setnice(current, nice); |
| 3194 | if (retval) |
| 3195 | return retval; |
| 3196 | |
| 3197 | set_user_nice(current, nice); |
| 3198 | return 0; |
| 3199 | } |
| 3200 | |
| 3201 | #endif |
| 3202 | |
| 3203 | /** |
| 3204 | * task_prio - return the priority value of a given task. |
| 3205 | * @p: the task in question. |
| 3206 | * |
| 3207 | * Return: The priority value as seen by users in /proc. |
| 3208 | * RT tasks are offset by -200. Normal tasks are centered |
| 3209 | * around 0, value goes from -16 to +15. |
| 3210 | */ |
| 3211 | int task_prio(const struct task_struct *p) |
| 3212 | { |
| 3213 | return p->prio - MAX_RT_PRIO; |
| 3214 | } |
| 3215 | |
| 3216 | /** |
| 3217 | * idle_cpu - is a given cpu idle currently? |
| 3218 | * @cpu: the processor in question. |
| 3219 | * |
| 3220 | * Return: 1 if the CPU is currently idle. 0 otherwise. |
| 3221 | */ |
| 3222 | int idle_cpu(int cpu) |
| 3223 | { |
| 3224 | struct rq *rq = cpu_rq(cpu); |
| 3225 | |
| 3226 | if (rq->curr != rq->idle) |
| 3227 | return 0; |
| 3228 | |
| 3229 | if (rq->nr_running) |
| 3230 | return 0; |
| 3231 | |
| 3232 | #ifdef CONFIG_SMP |
| 3233 | if (!llist_empty(&rq->wake_list)) |
| 3234 | return 0; |
| 3235 | #endif |
| 3236 | |
| 3237 | return 1; |
| 3238 | } |
| 3239 | |
| 3240 | /** |
| 3241 | * idle_task - return the idle task for a given cpu. |
| 3242 | * @cpu: the processor in question. |
| 3243 | * |
| 3244 | * Return: The idle task for the cpu @cpu. |
| 3245 | */ |
| 3246 | struct task_struct *idle_task(int cpu) |
| 3247 | { |
| 3248 | return cpu_rq(cpu)->idle; |
| 3249 | } |
| 3250 | |
| 3251 | /** |
| 3252 | * find_process_by_pid - find a process with a matching PID value. |
| 3253 | * @pid: the pid in question. |
| 3254 | * |
| 3255 | * The task of @pid, if found. %NULL otherwise. |
| 3256 | */ |
| 3257 | static struct task_struct *find_process_by_pid(pid_t pid) |
| 3258 | { |
| 3259 | return pid ? find_task_by_vpid(pid) : current; |
| 3260 | } |
| 3261 | |
| 3262 | /* |
| 3263 | * This function initializes the sched_dl_entity of a newly becoming |
| 3264 | * SCHED_DEADLINE task. |
| 3265 | * |
| 3266 | * Only the static values are considered here, the actual runtime and the |
| 3267 | * absolute deadline will be properly calculated when the task is enqueued |
| 3268 | * for the first time with its new policy. |
| 3269 | */ |
| 3270 | static void |
| 3271 | __setparam_dl(struct task_struct *p, const struct sched_attr *attr) |
| 3272 | { |
| 3273 | struct sched_dl_entity *dl_se = &p->dl; |
| 3274 | |
| 3275 | dl_se->dl_runtime = attr->sched_runtime; |
| 3276 | dl_se->dl_deadline = attr->sched_deadline; |
| 3277 | dl_se->dl_period = attr->sched_period ?: dl_se->dl_deadline; |
| 3278 | dl_se->flags = attr->sched_flags; |
| 3279 | dl_se->dl_bw = to_ratio(dl_se->dl_period, dl_se->dl_runtime); |
| 3280 | |
| 3281 | /* |
| 3282 | * Changing the parameters of a task is 'tricky' and we're not doing |
| 3283 | * the correct thing -- also see task_dead_dl() and switched_from_dl(). |
| 3284 | * |
| 3285 | * What we SHOULD do is delay the bandwidth release until the 0-lag |
| 3286 | * point. This would include retaining the task_struct until that time |
| 3287 | * and change dl_overflow() to not immediately decrement the current |
| 3288 | * amount. |
| 3289 | * |
| 3290 | * Instead we retain the current runtime/deadline and let the new |
| 3291 | * parameters take effect after the current reservation period lapses. |
| 3292 | * This is safe (albeit pessimistic) because the 0-lag point is always |
| 3293 | * before the current scheduling deadline. |
| 3294 | * |
| 3295 | * We can still have temporary overloads because we do not delay the |
| 3296 | * change in bandwidth until that time; so admission control is |
| 3297 | * not on the safe side. It does however guarantee tasks will never |
| 3298 | * consume more than promised. |
| 3299 | */ |
| 3300 | } |
| 3301 | |
| 3302 | /* |
| 3303 | * sched_setparam() passes in -1 for its policy, to let the functions |
| 3304 | * it calls know not to change it. |
| 3305 | */ |
| 3306 | #define SETPARAM_POLICY -1 |
| 3307 | |
| 3308 | static void __setscheduler_params(struct task_struct *p, |
| 3309 | const struct sched_attr *attr) |
| 3310 | { |
| 3311 | int policy = attr->sched_policy; |
| 3312 | |
| 3313 | if (policy == SETPARAM_POLICY) |
| 3314 | policy = p->policy; |
| 3315 | |
| 3316 | p->policy = policy; |
| 3317 | |
| 3318 | if (dl_policy(policy)) |
| 3319 | __setparam_dl(p, attr); |
| 3320 | else if (fair_policy(policy)) |
| 3321 | p->static_prio = NICE_TO_PRIO(attr->sched_nice); |
| 3322 | |
| 3323 | /* |
| 3324 | * __sched_setscheduler() ensures attr->sched_priority == 0 when |
| 3325 | * !rt_policy. Always setting this ensures that things like |
| 3326 | * getparam()/getattr() don't report silly values for !rt tasks. |
| 3327 | */ |
| 3328 | p->rt_priority = attr->sched_priority; |
| 3329 | p->normal_prio = normal_prio(p); |
| 3330 | set_load_weight(p); |
| 3331 | } |
| 3332 | |
| 3333 | /* Actually do priority change: must hold pi & rq lock. */ |
| 3334 | static void __setscheduler(struct rq *rq, struct task_struct *p, |
| 3335 | const struct sched_attr *attr, bool keep_boost) |
| 3336 | { |
| 3337 | __setscheduler_params(p, attr); |
| 3338 | |
| 3339 | /* |
| 3340 | * Keep a potential priority boosting if called from |
| 3341 | * sched_setscheduler(). |
| 3342 | */ |
| 3343 | if (keep_boost) |
| 3344 | p->prio = rt_mutex_get_effective_prio(p, normal_prio(p)); |
| 3345 | else |
| 3346 | p->prio = normal_prio(p); |
| 3347 | |
| 3348 | if (dl_prio(p->prio)) |
| 3349 | p->sched_class = &dl_sched_class; |
| 3350 | else if (rt_prio(p->prio)) |
| 3351 | p->sched_class = &rt_sched_class; |
| 3352 | else |
| 3353 | p->sched_class = &fair_sched_class; |
| 3354 | } |
| 3355 | |
| 3356 | static void |
| 3357 | __getparam_dl(struct task_struct *p, struct sched_attr *attr) |
| 3358 | { |
| 3359 | struct sched_dl_entity *dl_se = &p->dl; |
| 3360 | |
| 3361 | attr->sched_priority = p->rt_priority; |
| 3362 | attr->sched_runtime = dl_se->dl_runtime; |
| 3363 | attr->sched_deadline = dl_se->dl_deadline; |
| 3364 | attr->sched_period = dl_se->dl_period; |
| 3365 | attr->sched_flags = dl_se->flags; |
| 3366 | } |
| 3367 | |
| 3368 | /* |
| 3369 | * This function validates the new parameters of a -deadline task. |
| 3370 | * We ask for the deadline not being zero, and greater or equal |
| 3371 | * than the runtime, as well as the period of being zero or |
| 3372 | * greater than deadline. Furthermore, we have to be sure that |
| 3373 | * user parameters are above the internal resolution of 1us (we |
| 3374 | * check sched_runtime only since it is always the smaller one) and |
| 3375 | * below 2^63 ns (we have to check both sched_deadline and |
| 3376 | * sched_period, as the latter can be zero). |
| 3377 | */ |
| 3378 | static bool |
| 3379 | __checkparam_dl(const struct sched_attr *attr) |
| 3380 | { |
| 3381 | /* deadline != 0 */ |
| 3382 | if (attr->sched_deadline == 0) |
| 3383 | return false; |
| 3384 | |
| 3385 | /* |
| 3386 | * Since we truncate DL_SCALE bits, make sure we're at least |
| 3387 | * that big. |
| 3388 | */ |
| 3389 | if (attr->sched_runtime < (1ULL << DL_SCALE)) |
| 3390 | return false; |
| 3391 | |
| 3392 | /* |
| 3393 | * Since we use the MSB for wrap-around and sign issues, make |
| 3394 | * sure it's not set (mind that period can be equal to zero). |
| 3395 | */ |
| 3396 | if (attr->sched_deadline & (1ULL << 63) || |
| 3397 | attr->sched_period & (1ULL << 63)) |
| 3398 | return false; |
| 3399 | |
| 3400 | /* runtime <= deadline <= period (if period != 0) */ |
| 3401 | if ((attr->sched_period != 0 && |
| 3402 | attr->sched_period < attr->sched_deadline) || |
| 3403 | attr->sched_deadline < attr->sched_runtime) |
| 3404 | return false; |
| 3405 | |
| 3406 | return true; |
| 3407 | } |
| 3408 | |
| 3409 | /* |
| 3410 | * check the target process has a UID that matches the current process's |
| 3411 | */ |
| 3412 | static bool check_same_owner(struct task_struct *p) |
| 3413 | { |
| 3414 | const struct cred *cred = current_cred(), *pcred; |
| 3415 | bool match; |
| 3416 | |
| 3417 | rcu_read_lock(); |
| 3418 | pcred = __task_cred(p); |
| 3419 | match = (uid_eq(cred->euid, pcred->euid) || |
| 3420 | uid_eq(cred->euid, pcred->uid)); |
| 3421 | rcu_read_unlock(); |
| 3422 | return match; |
| 3423 | } |
| 3424 | |
| 3425 | static bool dl_param_changed(struct task_struct *p, |
| 3426 | const struct sched_attr *attr) |
| 3427 | { |
| 3428 | struct sched_dl_entity *dl_se = &p->dl; |
| 3429 | |
| 3430 | if (dl_se->dl_runtime != attr->sched_runtime || |
| 3431 | dl_se->dl_deadline != attr->sched_deadline || |
| 3432 | dl_se->dl_period != attr->sched_period || |
| 3433 | dl_se->flags != attr->sched_flags) |
| 3434 | return true; |
| 3435 | |
| 3436 | return false; |
| 3437 | } |
| 3438 | |
| 3439 | static int __sched_setscheduler(struct task_struct *p, |
| 3440 | const struct sched_attr *attr, |
| 3441 | bool user) |
| 3442 | { |
| 3443 | int newprio = dl_policy(attr->sched_policy) ? MAX_DL_PRIO - 1 : |
| 3444 | MAX_RT_PRIO - 1 - attr->sched_priority; |
| 3445 | int retval, oldprio, oldpolicy = -1, queued, running; |
| 3446 | int new_effective_prio, policy = attr->sched_policy; |
| 3447 | unsigned long flags; |
| 3448 | const struct sched_class *prev_class; |
| 3449 | struct rq *rq; |
| 3450 | int reset_on_fork; |
| 3451 | |
| 3452 | /* may grab non-irq protected spin_locks */ |
| 3453 | BUG_ON(in_interrupt()); |
| 3454 | recheck: |
| 3455 | /* double check policy once rq lock held */ |
| 3456 | if (policy < 0) { |
| 3457 | reset_on_fork = p->sched_reset_on_fork; |
| 3458 | policy = oldpolicy = p->policy; |
| 3459 | } else { |
| 3460 | reset_on_fork = !!(attr->sched_flags & SCHED_FLAG_RESET_ON_FORK); |
| 3461 | |
| 3462 | if (policy != SCHED_DEADLINE && |
| 3463 | policy != SCHED_FIFO && policy != SCHED_RR && |
| 3464 | policy != SCHED_NORMAL && policy != SCHED_BATCH && |
| 3465 | policy != SCHED_IDLE) |
| 3466 | return -EINVAL; |
| 3467 | } |
| 3468 | |
| 3469 | if (attr->sched_flags & ~(SCHED_FLAG_RESET_ON_FORK)) |
| 3470 | return -EINVAL; |
| 3471 | |
| 3472 | /* |
| 3473 | * Valid priorities for SCHED_FIFO and SCHED_RR are |
| 3474 | * 1..MAX_USER_RT_PRIO-1, valid priority for SCHED_NORMAL, |
| 3475 | * SCHED_BATCH and SCHED_IDLE is 0. |
| 3476 | */ |
| 3477 | if ((p->mm && attr->sched_priority > MAX_USER_RT_PRIO-1) || |
| 3478 | (!p->mm && attr->sched_priority > MAX_RT_PRIO-1)) |
| 3479 | return -EINVAL; |
| 3480 | if ((dl_policy(policy) && !__checkparam_dl(attr)) || |
| 3481 | (rt_policy(policy) != (attr->sched_priority != 0))) |
| 3482 | return -EINVAL; |
| 3483 | |
| 3484 | /* |
| 3485 | * Allow unprivileged RT tasks to decrease priority: |
| 3486 | */ |
| 3487 | if (user && !capable(CAP_SYS_NICE)) { |
| 3488 | if (fair_policy(policy)) { |
| 3489 | if (attr->sched_nice < task_nice(p) && |
| 3490 | !can_nice(p, attr->sched_nice)) |
| 3491 | return -EPERM; |
| 3492 | } |
| 3493 | |
| 3494 | if (rt_policy(policy)) { |
| 3495 | unsigned long rlim_rtprio = |
| 3496 | task_rlimit(p, RLIMIT_RTPRIO); |
| 3497 | |
| 3498 | /* can't set/change the rt policy */ |
| 3499 | if (policy != p->policy && !rlim_rtprio) |
| 3500 | return -EPERM; |
| 3501 | |
| 3502 | /* can't increase priority */ |
| 3503 | if (attr->sched_priority > p->rt_priority && |
| 3504 | attr->sched_priority > rlim_rtprio) |
| 3505 | return -EPERM; |
| 3506 | } |
| 3507 | |
| 3508 | /* |
| 3509 | * Can't set/change SCHED_DEADLINE policy at all for now |
| 3510 | * (safest behavior); in the future we would like to allow |
| 3511 | * unprivileged DL tasks to increase their relative deadline |
| 3512 | * or reduce their runtime (both ways reducing utilization) |
| 3513 | */ |
| 3514 | if (dl_policy(policy)) |
| 3515 | return -EPERM; |
| 3516 | |
| 3517 | /* |
| 3518 | * Treat SCHED_IDLE as nice 20. Only allow a switch to |
| 3519 | * SCHED_NORMAL if the RLIMIT_NICE would normally permit it. |
| 3520 | */ |
| 3521 | if (p->policy == SCHED_IDLE && policy != SCHED_IDLE) { |
| 3522 | if (!can_nice(p, task_nice(p))) |
| 3523 | return -EPERM; |
| 3524 | } |
| 3525 | |
| 3526 | /* can't change other user's priorities */ |
| 3527 | if (!check_same_owner(p)) |
| 3528 | return -EPERM; |
| 3529 | |
| 3530 | /* Normal users shall not reset the sched_reset_on_fork flag */ |
| 3531 | if (p->sched_reset_on_fork && !reset_on_fork) |
| 3532 | return -EPERM; |
| 3533 | } |
| 3534 | |
| 3535 | if (user) { |
| 3536 | retval = security_task_setscheduler(p); |
| 3537 | if (retval) |
| 3538 | return retval; |
| 3539 | } |
| 3540 | |
| 3541 | /* |
| 3542 | * make sure no PI-waiters arrive (or leave) while we are |
| 3543 | * changing the priority of the task: |
| 3544 | * |
| 3545 | * To be able to change p->policy safely, the appropriate |
| 3546 | * runqueue lock must be held. |
| 3547 | */ |
| 3548 | rq = task_rq_lock(p, &flags); |
| 3549 | |
| 3550 | /* |
| 3551 | * Changing the policy of the stop threads its a very bad idea |
| 3552 | */ |
| 3553 | if (p == rq->stop) { |
| 3554 | task_rq_unlock(rq, p, &flags); |
| 3555 | return -EINVAL; |
| 3556 | } |
| 3557 | |
| 3558 | /* |
| 3559 | * If not changing anything there's no need to proceed further, |
| 3560 | * but store a possible modification of reset_on_fork. |
| 3561 | */ |
| 3562 | if (unlikely(policy == p->policy)) { |
| 3563 | if (fair_policy(policy) && attr->sched_nice != task_nice(p)) |
| 3564 | goto change; |
| 3565 | if (rt_policy(policy) && attr->sched_priority != p->rt_priority) |
| 3566 | goto change; |
| 3567 | if (dl_policy(policy) && dl_param_changed(p, attr)) |
| 3568 | goto change; |
| 3569 | |
| 3570 | p->sched_reset_on_fork = reset_on_fork; |
| 3571 | task_rq_unlock(rq, p, &flags); |
| 3572 | return 0; |
| 3573 | } |
| 3574 | change: |
| 3575 | |
| 3576 | if (user) { |
| 3577 | #ifdef CONFIG_RT_GROUP_SCHED |
| 3578 | /* |
| 3579 | * Do not allow realtime tasks into groups that have no runtime |
| 3580 | * assigned. |
| 3581 | */ |
| 3582 | if (rt_bandwidth_enabled() && rt_policy(policy) && |
| 3583 | task_group(p)->rt_bandwidth.rt_runtime == 0 && |
| 3584 | !task_group_is_autogroup(task_group(p))) { |
| 3585 | task_rq_unlock(rq, p, &flags); |
| 3586 | return -EPERM; |
| 3587 | } |
| 3588 | #endif |
| 3589 | #ifdef CONFIG_SMP |
| 3590 | if (dl_bandwidth_enabled() && dl_policy(policy)) { |
| 3591 | cpumask_t *span = rq->rd->span; |
| 3592 | |
| 3593 | /* |
| 3594 | * Don't allow tasks with an affinity mask smaller than |
| 3595 | * the entire root_domain to become SCHED_DEADLINE. We |
| 3596 | * will also fail if there's no bandwidth available. |
| 3597 | */ |
| 3598 | if (!cpumask_subset(span, &p->cpus_allowed) || |
| 3599 | rq->rd->dl_bw.bw == 0) { |
| 3600 | task_rq_unlock(rq, p, &flags); |
| 3601 | return -EPERM; |
| 3602 | } |
| 3603 | } |
| 3604 | #endif |
| 3605 | } |
| 3606 | |
| 3607 | /* recheck policy now with rq lock held */ |
| 3608 | if (unlikely(oldpolicy != -1 && oldpolicy != p->policy)) { |
| 3609 | policy = oldpolicy = -1; |
| 3610 | task_rq_unlock(rq, p, &flags); |
| 3611 | goto recheck; |
| 3612 | } |
| 3613 | |
| 3614 | /* |
| 3615 | * If setscheduling to SCHED_DEADLINE (or changing the parameters |
| 3616 | * of a SCHED_DEADLINE task) we need to check if enough bandwidth |
| 3617 | * is available. |
| 3618 | */ |
| 3619 | if ((dl_policy(policy) || dl_task(p)) && dl_overflow(p, policy, attr)) { |
| 3620 | task_rq_unlock(rq, p, &flags); |
| 3621 | return -EBUSY; |
| 3622 | } |
| 3623 | |
| 3624 | p->sched_reset_on_fork = reset_on_fork; |
| 3625 | oldprio = p->prio; |
| 3626 | |
| 3627 | /* |
| 3628 | * Take priority boosted tasks into account. If the new |
| 3629 | * effective priority is unchanged, we just store the new |
| 3630 | * normal parameters and do not touch the scheduler class and |
| 3631 | * the runqueue. This will be done when the task deboost |
| 3632 | * itself. |
| 3633 | */ |
| 3634 | new_effective_prio = rt_mutex_get_effective_prio(p, newprio); |
| 3635 | if (new_effective_prio == oldprio) { |
| 3636 | __setscheduler_params(p, attr); |
| 3637 | task_rq_unlock(rq, p, &flags); |
| 3638 | return 0; |
| 3639 | } |
| 3640 | |
| 3641 | queued = task_on_rq_queued(p); |
| 3642 | running = task_current(rq, p); |
| 3643 | if (queued) |
| 3644 | dequeue_task(rq, p, 0); |
| 3645 | if (running) |
| 3646 | put_prev_task(rq, p); |
| 3647 | |
| 3648 | prev_class = p->sched_class; |
| 3649 | __setscheduler(rq, p, attr, true); |
| 3650 | |
| 3651 | if (running) |
| 3652 | p->sched_class->set_curr_task(rq); |
| 3653 | if (queued) { |
| 3654 | /* |
| 3655 | * We enqueue to tail when the priority of a task is |
| 3656 | * increased (user space view). |
| 3657 | */ |
| 3658 | enqueue_task(rq, p, oldprio <= p->prio ? ENQUEUE_HEAD : 0); |
| 3659 | } |
| 3660 | |
| 3661 | check_class_changed(rq, p, prev_class, oldprio); |
| 3662 | task_rq_unlock(rq, p, &flags); |
| 3663 | |
| 3664 | rt_mutex_adjust_pi(p); |
| 3665 | |
| 3666 | return 0; |
| 3667 | } |
| 3668 | |
| 3669 | static int _sched_setscheduler(struct task_struct *p, int policy, |
| 3670 | const struct sched_param *param, bool check) |
| 3671 | { |
| 3672 | struct sched_attr attr = { |
| 3673 | .sched_policy = policy, |
| 3674 | .sched_priority = param->sched_priority, |
| 3675 | .sched_nice = PRIO_TO_NICE(p->static_prio), |
| 3676 | }; |
| 3677 | |
| 3678 | /* Fixup the legacy SCHED_RESET_ON_FORK hack. */ |
| 3679 | if ((policy != SETPARAM_POLICY) && (policy & SCHED_RESET_ON_FORK)) { |
| 3680 | attr.sched_flags |= SCHED_FLAG_RESET_ON_FORK; |
| 3681 | policy &= ~SCHED_RESET_ON_FORK; |
| 3682 | attr.sched_policy = policy; |
| 3683 | } |
| 3684 | |
| 3685 | return __sched_setscheduler(p, &attr, check); |
| 3686 | } |
| 3687 | /** |
| 3688 | * sched_setscheduler - change the scheduling policy and/or RT priority of a thread. |
| 3689 | * @p: the task in question. |
| 3690 | * @policy: new policy. |
| 3691 | * @param: structure containing the new RT priority. |
| 3692 | * |
| 3693 | * Return: 0 on success. An error code otherwise. |
| 3694 | * |
| 3695 | * NOTE that the task may be already dead. |
| 3696 | */ |
| 3697 | int sched_setscheduler(struct task_struct *p, int policy, |
| 3698 | const struct sched_param *param) |
| 3699 | { |
| 3700 | return _sched_setscheduler(p, policy, param, true); |
| 3701 | } |
| 3702 | EXPORT_SYMBOL_GPL(sched_setscheduler); |
| 3703 | |
| 3704 | int sched_setattr(struct task_struct *p, const struct sched_attr *attr) |
| 3705 | { |
| 3706 | return __sched_setscheduler(p, attr, true); |
| 3707 | } |
| 3708 | EXPORT_SYMBOL_GPL(sched_setattr); |
| 3709 | |
| 3710 | /** |
| 3711 | * sched_setscheduler_nocheck - change the scheduling policy and/or RT priority of a thread from kernelspace. |
| 3712 | * @p: the task in question. |
| 3713 | * @policy: new policy. |
| 3714 | * @param: structure containing the new RT priority. |
| 3715 | * |
| 3716 | * Just like sched_setscheduler, only don't bother checking if the |
| 3717 | * current context has permission. For example, this is needed in |
| 3718 | * stop_machine(): we create temporary high priority worker threads, |
| 3719 | * but our caller might not have that capability. |
| 3720 | * |
| 3721 | * Return: 0 on success. An error code otherwise. |
| 3722 | */ |
| 3723 | int sched_setscheduler_nocheck(struct task_struct *p, int policy, |
| 3724 | const struct sched_param *param) |
| 3725 | { |
| 3726 | return _sched_setscheduler(p, policy, param, false); |
| 3727 | } |
| 3728 | |
| 3729 | static int |
| 3730 | do_sched_setscheduler(pid_t pid, int policy, struct sched_param __user *param) |
| 3731 | { |
| 3732 | struct sched_param lparam; |
| 3733 | struct task_struct *p; |
| 3734 | int retval; |
| 3735 | |
| 3736 | if (!param || pid < 0) |
| 3737 | return -EINVAL; |
| 3738 | if (copy_from_user(&lparam, param, sizeof(struct sched_param))) |
| 3739 | return -EFAULT; |
| 3740 | |
| 3741 | rcu_read_lock(); |
| 3742 | retval = -ESRCH; |
| 3743 | p = find_process_by_pid(pid); |
| 3744 | if (p != NULL) |
| 3745 | retval = sched_setscheduler(p, policy, &lparam); |
| 3746 | rcu_read_unlock(); |
| 3747 | |
| 3748 | return retval; |
| 3749 | } |
| 3750 | |
| 3751 | /* |
| 3752 | * Mimics kernel/events/core.c perf_copy_attr(). |
| 3753 | */ |
| 3754 | static int sched_copy_attr(struct sched_attr __user *uattr, |
| 3755 | struct sched_attr *attr) |
| 3756 | { |
| 3757 | u32 size; |
| 3758 | int ret; |
| 3759 | |
| 3760 | if (!access_ok(VERIFY_WRITE, uattr, SCHED_ATTR_SIZE_VER0)) |
| 3761 | return -EFAULT; |
| 3762 | |
| 3763 | /* |
| 3764 | * zero the full structure, so that a short copy will be nice. |
| 3765 | */ |
| 3766 | memset(attr, 0, sizeof(*attr)); |
| 3767 | |
| 3768 | ret = get_user(size, &uattr->size); |
| 3769 | if (ret) |
| 3770 | return ret; |
| 3771 | |
| 3772 | if (size > PAGE_SIZE) /* silly large */ |
| 3773 | goto err_size; |
| 3774 | |
| 3775 | if (!size) /* abi compat */ |
| 3776 | size = SCHED_ATTR_SIZE_VER0; |
| 3777 | |
| 3778 | if (size < SCHED_ATTR_SIZE_VER0) |
| 3779 | goto err_size; |
| 3780 | |
| 3781 | /* |
| 3782 | * If we're handed a bigger struct than we know of, |
| 3783 | * ensure all the unknown bits are 0 - i.e. new |
| 3784 | * user-space does not rely on any kernel feature |
| 3785 | * extensions we dont know about yet. |
| 3786 | */ |
| 3787 | if (size > sizeof(*attr)) { |
| 3788 | unsigned char __user *addr; |
| 3789 | unsigned char __user *end; |
| 3790 | unsigned char val; |
| 3791 | |
| 3792 | addr = (void __user *)uattr + sizeof(*attr); |
| 3793 | end = (void __user *)uattr + size; |
| 3794 | |
| 3795 | for (; addr < end; addr++) { |
| 3796 | ret = get_user(val, addr); |
| 3797 | if (ret) |
| 3798 | return ret; |
| 3799 | if (val) |
| 3800 | goto err_size; |
| 3801 | } |
| 3802 | size = sizeof(*attr); |
| 3803 | } |
| 3804 | |
| 3805 | ret = copy_from_user(attr, uattr, size); |
| 3806 | if (ret) |
| 3807 | return -EFAULT; |
| 3808 | |
| 3809 | /* |
| 3810 | * XXX: do we want to be lenient like existing syscalls; or do we want |
| 3811 | * to be strict and return an error on out-of-bounds values? |
| 3812 | */ |
| 3813 | attr->sched_nice = clamp(attr->sched_nice, MIN_NICE, MAX_NICE); |
| 3814 | |
| 3815 | return 0; |
| 3816 | |
| 3817 | err_size: |
| 3818 | put_user(sizeof(*attr), &uattr->size); |
| 3819 | return -E2BIG; |
| 3820 | } |
| 3821 | |
| 3822 | /** |
| 3823 | * sys_sched_setscheduler - set/change the scheduler policy and RT priority |
| 3824 | * @pid: the pid in question. |
| 3825 | * @policy: new policy. |
| 3826 | * @param: structure containing the new RT priority. |
| 3827 | * |
| 3828 | * Return: 0 on success. An error code otherwise. |
| 3829 | */ |
| 3830 | SYSCALL_DEFINE3(sched_setscheduler, pid_t, pid, int, policy, |
| 3831 | struct sched_param __user *, param) |
| 3832 | { |
| 3833 | /* negative values for policy are not valid */ |
| 3834 | if (policy < 0) |
| 3835 | return -EINVAL; |
| 3836 | |
| 3837 | return do_sched_setscheduler(pid, policy, param); |
| 3838 | } |
| 3839 | |
| 3840 | /** |
| 3841 | * sys_sched_setparam - set/change the RT priority of a thread |
| 3842 | * @pid: the pid in question. |
| 3843 | * @param: structure containing the new RT priority. |
| 3844 | * |
| 3845 | * Return: 0 on success. An error code otherwise. |
| 3846 | */ |
| 3847 | SYSCALL_DEFINE2(sched_setparam, pid_t, pid, struct sched_param __user *, param) |
| 3848 | { |
| 3849 | return do_sched_setscheduler(pid, SETPARAM_POLICY, param); |
| 3850 | } |
| 3851 | |
| 3852 | /** |
| 3853 | * sys_sched_setattr - same as above, but with extended sched_attr |
| 3854 | * @pid: the pid in question. |
| 3855 | * @uattr: structure containing the extended parameters. |
| 3856 | * @flags: for future extension. |
| 3857 | */ |
| 3858 | SYSCALL_DEFINE3(sched_setattr, pid_t, pid, struct sched_attr __user *, uattr, |
| 3859 | unsigned int, flags) |
| 3860 | { |
| 3861 | struct sched_attr attr; |
| 3862 | struct task_struct *p; |
| 3863 | int retval; |
| 3864 | |
| 3865 | if (!uattr || pid < 0 || flags) |
| 3866 | return -EINVAL; |
| 3867 | |
| 3868 | retval = sched_copy_attr(uattr, &attr); |
| 3869 | if (retval) |
| 3870 | return retval; |
| 3871 | |
| 3872 | if ((int)attr.sched_policy < 0) |
| 3873 | return -EINVAL; |
| 3874 | |
| 3875 | rcu_read_lock(); |
| 3876 | retval = -ESRCH; |
| 3877 | p = find_process_by_pid(pid); |
| 3878 | if (p != NULL) |
| 3879 | retval = sched_setattr(p, &attr); |
| 3880 | rcu_read_unlock(); |
| 3881 | |
| 3882 | return retval; |
| 3883 | } |
| 3884 | |
| 3885 | /** |
| 3886 | * sys_sched_getscheduler - get the policy (scheduling class) of a thread |
| 3887 | * @pid: the pid in question. |
| 3888 | * |
| 3889 | * Return: On success, the policy of the thread. Otherwise, a negative error |
| 3890 | * code. |
| 3891 | */ |
| 3892 | SYSCALL_DEFINE1(sched_getscheduler, pid_t, pid) |
| 3893 | { |
| 3894 | struct task_struct *p; |
| 3895 | int retval; |
| 3896 | |
| 3897 | if (pid < 0) |
| 3898 | return -EINVAL; |
| 3899 | |
| 3900 | retval = -ESRCH; |
| 3901 | rcu_read_lock(); |
| 3902 | p = find_process_by_pid(pid); |
| 3903 | if (p) { |
| 3904 | retval = security_task_getscheduler(p); |
| 3905 | if (!retval) |
| 3906 | retval = p->policy |
| 3907 | | (p->sched_reset_on_fork ? SCHED_RESET_ON_FORK : 0); |
| 3908 | } |
| 3909 | rcu_read_unlock(); |
| 3910 | return retval; |
| 3911 | } |
| 3912 | |
| 3913 | /** |
| 3914 | * sys_sched_getparam - get the RT priority of a thread |
| 3915 | * @pid: the pid in question. |
| 3916 | * @param: structure containing the RT priority. |
| 3917 | * |
| 3918 | * Return: On success, 0 and the RT priority is in @param. Otherwise, an error |
| 3919 | * code. |
| 3920 | */ |
| 3921 | SYSCALL_DEFINE2(sched_getparam, pid_t, pid, struct sched_param __user *, param) |
| 3922 | { |
| 3923 | struct sched_param lp = { .sched_priority = 0 }; |
| 3924 | struct task_struct *p; |
| 3925 | int retval; |
| 3926 | |
| 3927 | if (!param || pid < 0) |
| 3928 | return -EINVAL; |
| 3929 | |
| 3930 | rcu_read_lock(); |
| 3931 | p = find_process_by_pid(pid); |
| 3932 | retval = -ESRCH; |
| 3933 | if (!p) |
| 3934 | goto out_unlock; |
| 3935 | |
| 3936 | retval = security_task_getscheduler(p); |
| 3937 | if (retval) |
| 3938 | goto out_unlock; |
| 3939 | |
| 3940 | if (task_has_rt_policy(p)) |
| 3941 | lp.sched_priority = p->rt_priority; |
| 3942 | rcu_read_unlock(); |
| 3943 | |
| 3944 | /* |
| 3945 | * This one might sleep, we cannot do it with a spinlock held ... |
| 3946 | */ |
| 3947 | retval = copy_to_user(param, &lp, sizeof(*param)) ? -EFAULT : 0; |
| 3948 | |
| 3949 | return retval; |
| 3950 | |
| 3951 | out_unlock: |
| 3952 | rcu_read_unlock(); |
| 3953 | return retval; |
| 3954 | } |
| 3955 | |
| 3956 | static int sched_read_attr(struct sched_attr __user *uattr, |
| 3957 | struct sched_attr *attr, |
| 3958 | unsigned int usize) |
| 3959 | { |
| 3960 | int ret; |
| 3961 | |
| 3962 | if (!access_ok(VERIFY_WRITE, uattr, usize)) |
| 3963 | return -EFAULT; |
| 3964 | |
| 3965 | /* |
| 3966 | * If we're handed a smaller struct than we know of, |
| 3967 | * ensure all the unknown bits are 0 - i.e. old |
| 3968 | * user-space does not get uncomplete information. |
| 3969 | */ |
| 3970 | if (usize < sizeof(*attr)) { |
| 3971 | unsigned char *addr; |
| 3972 | unsigned char *end; |
| 3973 | |
| 3974 | addr = (void *)attr + usize; |
| 3975 | end = (void *)attr + sizeof(*attr); |
| 3976 | |
| 3977 | for (; addr < end; addr++) { |
| 3978 | if (*addr) |
| 3979 | return -EFBIG; |
| 3980 | } |
| 3981 | |
| 3982 | attr->size = usize; |
| 3983 | } |
| 3984 | |
| 3985 | ret = copy_to_user(uattr, attr, attr->size); |
| 3986 | if (ret) |
| 3987 | return -EFAULT; |
| 3988 | |
| 3989 | return 0; |
| 3990 | } |
| 3991 | |
| 3992 | /** |
| 3993 | * sys_sched_getattr - similar to sched_getparam, but with sched_attr |
| 3994 | * @pid: the pid in question. |
| 3995 | * @uattr: structure containing the extended parameters. |
| 3996 | * @size: sizeof(attr) for fwd/bwd comp. |
| 3997 | * @flags: for future extension. |
| 3998 | */ |
| 3999 | SYSCALL_DEFINE4(sched_getattr, pid_t, pid, struct sched_attr __user *, uattr, |
| 4000 | unsigned int, size, unsigned int, flags) |
| 4001 | { |
| 4002 | struct sched_attr attr = { |
| 4003 | .size = sizeof(struct sched_attr), |
| 4004 | }; |
| 4005 | struct task_struct *p; |
| 4006 | int retval; |
| 4007 | |
| 4008 | if (!uattr || pid < 0 || size > PAGE_SIZE || |
| 4009 | size < SCHED_ATTR_SIZE_VER0 || flags) |
| 4010 | return -EINVAL; |
| 4011 | |
| 4012 | rcu_read_lock(); |
| 4013 | p = find_process_by_pid(pid); |
| 4014 | retval = -ESRCH; |
| 4015 | if (!p) |
| 4016 | goto out_unlock; |
| 4017 | |
| 4018 | retval = security_task_getscheduler(p); |
| 4019 | if (retval) |
| 4020 | goto out_unlock; |
| 4021 | |
| 4022 | attr.sched_policy = p->policy; |
| 4023 | if (p->sched_reset_on_fork) |
| 4024 | attr.sched_flags |= SCHED_FLAG_RESET_ON_FORK; |
| 4025 | if (task_has_dl_policy(p)) |
| 4026 | __getparam_dl(p, &attr); |
| 4027 | else if (task_has_rt_policy(p)) |
| 4028 | attr.sched_priority = p->rt_priority; |
| 4029 | else |
| 4030 | attr.sched_nice = task_nice(p); |
| 4031 | |
| 4032 | rcu_read_unlock(); |
| 4033 | |
| 4034 | retval = sched_read_attr(uattr, &attr, size); |
| 4035 | return retval; |
| 4036 | |
| 4037 | out_unlock: |
| 4038 | rcu_read_unlock(); |
| 4039 | return retval; |
| 4040 | } |
| 4041 | |
| 4042 | long sched_setaffinity(pid_t pid, const struct cpumask *in_mask) |
| 4043 | { |
| 4044 | cpumask_var_t cpus_allowed, new_mask; |
| 4045 | struct task_struct *p; |
| 4046 | int retval; |
| 4047 | |
| 4048 | rcu_read_lock(); |
| 4049 | |
| 4050 | p = find_process_by_pid(pid); |
| 4051 | if (!p) { |
| 4052 | rcu_read_unlock(); |
| 4053 | return -ESRCH; |
| 4054 | } |
| 4055 | |
| 4056 | /* Prevent p going away */ |
| 4057 | get_task_struct(p); |
| 4058 | rcu_read_unlock(); |
| 4059 | |
| 4060 | if (p->flags & PF_NO_SETAFFINITY) { |
| 4061 | retval = -EINVAL; |
| 4062 | goto out_put_task; |
| 4063 | } |
| 4064 | if (!alloc_cpumask_var(&cpus_allowed, GFP_KERNEL)) { |
| 4065 | retval = -ENOMEM; |
| 4066 | goto out_put_task; |
| 4067 | } |
| 4068 | if (!alloc_cpumask_var(&new_mask, GFP_KERNEL)) { |
| 4069 | retval = -ENOMEM; |
| 4070 | goto out_free_cpus_allowed; |
| 4071 | } |
| 4072 | retval = -EPERM; |
| 4073 | if (!check_same_owner(p)) { |
| 4074 | rcu_read_lock(); |
| 4075 | if (!ns_capable(__task_cred(p)->user_ns, CAP_SYS_NICE)) { |
| 4076 | rcu_read_unlock(); |
| 4077 | goto out_free_new_mask; |
| 4078 | } |
| 4079 | rcu_read_unlock(); |
| 4080 | } |
| 4081 | |
| 4082 | retval = security_task_setscheduler(p); |
| 4083 | if (retval) |
| 4084 | goto out_free_new_mask; |
| 4085 | |
| 4086 | |
| 4087 | cpuset_cpus_allowed(p, cpus_allowed); |
| 4088 | cpumask_and(new_mask, in_mask, cpus_allowed); |
| 4089 | |
| 4090 | /* |
| 4091 | * Since bandwidth control happens on root_domain basis, |
| 4092 | * if admission test is enabled, we only admit -deadline |
| 4093 | * tasks allowed to run on all the CPUs in the task's |
| 4094 | * root_domain. |
| 4095 | */ |
| 4096 | #ifdef CONFIG_SMP |
| 4097 | if (task_has_dl_policy(p) && dl_bandwidth_enabled()) { |
| 4098 | rcu_read_lock(); |
| 4099 | if (!cpumask_subset(task_rq(p)->rd->span, new_mask)) { |
| 4100 | retval = -EBUSY; |
| 4101 | rcu_read_unlock(); |
| 4102 | goto out_free_new_mask; |
| 4103 | } |
| 4104 | rcu_read_unlock(); |
| 4105 | } |
| 4106 | #endif |
| 4107 | again: |
| 4108 | retval = set_cpus_allowed_ptr(p, new_mask); |
| 4109 | |
| 4110 | if (!retval) { |
| 4111 | cpuset_cpus_allowed(p, cpus_allowed); |
| 4112 | if (!cpumask_subset(new_mask, cpus_allowed)) { |
| 4113 | /* |
| 4114 | * We must have raced with a concurrent cpuset |
| 4115 | * update. Just reset the cpus_allowed to the |
| 4116 | * cpuset's cpus_allowed |
| 4117 | */ |
| 4118 | cpumask_copy(new_mask, cpus_allowed); |
| 4119 | goto again; |
| 4120 | } |
| 4121 | } |
| 4122 | out_free_new_mask: |
| 4123 | free_cpumask_var(new_mask); |
| 4124 | out_free_cpus_allowed: |
| 4125 | free_cpumask_var(cpus_allowed); |
| 4126 | out_put_task: |
| 4127 | put_task_struct(p); |
| 4128 | return retval; |
| 4129 | } |
| 4130 | |
| 4131 | static int get_user_cpu_mask(unsigned long __user *user_mask_ptr, unsigned len, |
| 4132 | struct cpumask *new_mask) |
| 4133 | { |
| 4134 | if (len < cpumask_size()) |
| 4135 | cpumask_clear(new_mask); |
| 4136 | else if (len > cpumask_size()) |
| 4137 | len = cpumask_size(); |
| 4138 | |
| 4139 | return copy_from_user(new_mask, user_mask_ptr, len) ? -EFAULT : 0; |
| 4140 | } |
| 4141 | |
| 4142 | /** |
| 4143 | * sys_sched_setaffinity - set the cpu affinity of a process |
| 4144 | * @pid: pid of the process |
| 4145 | * @len: length in bytes of the bitmask pointed to by user_mask_ptr |
| 4146 | * @user_mask_ptr: user-space pointer to the new cpu mask |
| 4147 | * |
| 4148 | * Return: 0 on success. An error code otherwise. |
| 4149 | */ |
| 4150 | SYSCALL_DEFINE3(sched_setaffinity, pid_t, pid, unsigned int, len, |
| 4151 | unsigned long __user *, user_mask_ptr) |
| 4152 | { |
| 4153 | cpumask_var_t new_mask; |
| 4154 | int retval; |
| 4155 | |
| 4156 | if (!alloc_cpumask_var(&new_mask, GFP_KERNEL)) |
| 4157 | return -ENOMEM; |
| 4158 | |
| 4159 | retval = get_user_cpu_mask(user_mask_ptr, len, new_mask); |
| 4160 | if (retval == 0) |
| 4161 | retval = sched_setaffinity(pid, new_mask); |
| 4162 | free_cpumask_var(new_mask); |
| 4163 | return retval; |
| 4164 | } |
| 4165 | |
| 4166 | long sched_getaffinity(pid_t pid, struct cpumask *mask) |
| 4167 | { |
| 4168 | struct task_struct *p; |
| 4169 | unsigned long flags; |
| 4170 | int retval; |
| 4171 | |
| 4172 | rcu_read_lock(); |
| 4173 | |
| 4174 | retval = -ESRCH; |
| 4175 | p = find_process_by_pid(pid); |
| 4176 | if (!p) |
| 4177 | goto out_unlock; |
| 4178 | |
| 4179 | retval = security_task_getscheduler(p); |
| 4180 | if (retval) |
| 4181 | goto out_unlock; |
| 4182 | |
| 4183 | raw_spin_lock_irqsave(&p->pi_lock, flags); |
| 4184 | cpumask_and(mask, &p->cpus_allowed, cpu_active_mask); |
| 4185 | raw_spin_unlock_irqrestore(&p->pi_lock, flags); |
| 4186 | |
| 4187 | out_unlock: |
| 4188 | rcu_read_unlock(); |
| 4189 | |
| 4190 | return retval; |
| 4191 | } |
| 4192 | |
| 4193 | /** |
| 4194 | * sys_sched_getaffinity - get the cpu affinity of a process |
| 4195 | * @pid: pid of the process |
| 4196 | * @len: length in bytes of the bitmask pointed to by user_mask_ptr |
| 4197 | * @user_mask_ptr: user-space pointer to hold the current cpu mask |
| 4198 | * |
| 4199 | * Return: 0 on success. An error code otherwise. |
| 4200 | */ |
| 4201 | SYSCALL_DEFINE3(sched_getaffinity, pid_t, pid, unsigned int, len, |
| 4202 | unsigned long __user *, user_mask_ptr) |
| 4203 | { |
| 4204 | int ret; |
| 4205 | cpumask_var_t mask; |
| 4206 | |
| 4207 | if ((len * BITS_PER_BYTE) < nr_cpu_ids) |
| 4208 | return -EINVAL; |
| 4209 | if (len & (sizeof(unsigned long)-1)) |
| 4210 | return -EINVAL; |
| 4211 | |
| 4212 | if (!alloc_cpumask_var(&mask, GFP_KERNEL)) |
| 4213 | return -ENOMEM; |
| 4214 | |
| 4215 | ret = sched_getaffinity(pid, mask); |
| 4216 | if (ret == 0) { |
| 4217 | size_t retlen = min_t(size_t, len, cpumask_size()); |
| 4218 | |
| 4219 | if (copy_to_user(user_mask_ptr, mask, retlen)) |
| 4220 | ret = -EFAULT; |
| 4221 | else |
| 4222 | ret = retlen; |
| 4223 | } |
| 4224 | free_cpumask_var(mask); |
| 4225 | |
| 4226 | return ret; |
| 4227 | } |
| 4228 | |
| 4229 | /** |
| 4230 | * sys_sched_yield - yield the current processor to other threads. |
| 4231 | * |
| 4232 | * This function yields the current CPU to other tasks. If there are no |
| 4233 | * other threads running on this CPU then this function will return. |
| 4234 | * |
| 4235 | * Return: 0. |
| 4236 | */ |
| 4237 | SYSCALL_DEFINE0(sched_yield) |
| 4238 | { |
| 4239 | struct rq *rq = this_rq_lock(); |
| 4240 | |
| 4241 | schedstat_inc(rq, yld_count); |
| 4242 | current->sched_class->yield_task(rq); |
| 4243 | |
| 4244 | /* |
| 4245 | * Since we are going to call schedule() anyway, there's |
| 4246 | * no need to preempt or enable interrupts: |
| 4247 | */ |
| 4248 | __release(rq->lock); |
| 4249 | spin_release(&rq->lock.dep_map, 1, _THIS_IP_); |
| 4250 | do_raw_spin_unlock(&rq->lock); |
| 4251 | sched_preempt_enable_no_resched(); |
| 4252 | |
| 4253 | schedule(); |
| 4254 | |
| 4255 | return 0; |
| 4256 | } |
| 4257 | |
| 4258 | int __sched _cond_resched(void) |
| 4259 | { |
| 4260 | if (should_resched()) { |
| 4261 | preempt_schedule_common(); |
| 4262 | return 1; |
| 4263 | } |
| 4264 | return 0; |
| 4265 | } |
| 4266 | EXPORT_SYMBOL(_cond_resched); |
| 4267 | |
| 4268 | /* |
| 4269 | * __cond_resched_lock() - if a reschedule is pending, drop the given lock, |
| 4270 | * call schedule, and on return reacquire the lock. |
| 4271 | * |
| 4272 | * This works OK both with and without CONFIG_PREEMPT. We do strange low-level |
| 4273 | * operations here to prevent schedule() from being called twice (once via |
| 4274 | * spin_unlock(), once by hand). |
| 4275 | */ |
| 4276 | int __cond_resched_lock(spinlock_t *lock) |
| 4277 | { |
| 4278 | int resched = should_resched(); |
| 4279 | int ret = 0; |
| 4280 | |
| 4281 | lockdep_assert_held(lock); |
| 4282 | |
| 4283 | if (spin_needbreak(lock) || resched) { |
| 4284 | spin_unlock(lock); |
| 4285 | if (resched) |
| 4286 | preempt_schedule_common(); |
| 4287 | else |
| 4288 | cpu_relax(); |
| 4289 | ret = 1; |
| 4290 | spin_lock(lock); |
| 4291 | } |
| 4292 | return ret; |
| 4293 | } |
| 4294 | EXPORT_SYMBOL(__cond_resched_lock); |
| 4295 | |
| 4296 | int __sched __cond_resched_softirq(void) |
| 4297 | { |
| 4298 | BUG_ON(!in_softirq()); |
| 4299 | |
| 4300 | if (should_resched()) { |
| 4301 | local_bh_enable(); |
| 4302 | preempt_schedule_common(); |
| 4303 | local_bh_disable(); |
| 4304 | return 1; |
| 4305 | } |
| 4306 | return 0; |
| 4307 | } |
| 4308 | EXPORT_SYMBOL(__cond_resched_softirq); |
| 4309 | |
| 4310 | /** |
| 4311 | * yield - yield the current processor to other threads. |
| 4312 | * |
| 4313 | * Do not ever use this function, there's a 99% chance you're doing it wrong. |
| 4314 | * |
| 4315 | * The scheduler is at all times free to pick the calling task as the most |
| 4316 | * eligible task to run, if removing the yield() call from your code breaks |
| 4317 | * it, its already broken. |
| 4318 | * |
| 4319 | * Typical broken usage is: |
| 4320 | * |
| 4321 | * while (!event) |
| 4322 | * yield(); |
| 4323 | * |
| 4324 | * where one assumes that yield() will let 'the other' process run that will |
| 4325 | * make event true. If the current task is a SCHED_FIFO task that will never |
| 4326 | * happen. Never use yield() as a progress guarantee!! |
| 4327 | * |
| 4328 | * If you want to use yield() to wait for something, use wait_event(). |
| 4329 | * If you want to use yield() to be 'nice' for others, use cond_resched(). |
| 4330 | * If you still want to use yield(), do not! |
| 4331 | */ |
| 4332 | void __sched yield(void) |
| 4333 | { |
| 4334 | set_current_state(TASK_RUNNING); |
| 4335 | sys_sched_yield(); |
| 4336 | } |
| 4337 | EXPORT_SYMBOL(yield); |
| 4338 | |
| 4339 | /** |
| 4340 | * yield_to - yield the current processor to another thread in |
| 4341 | * your thread group, or accelerate that thread toward the |
| 4342 | * processor it's on. |
| 4343 | * @p: target task |
| 4344 | * @preempt: whether task preemption is allowed or not |
| 4345 | * |
| 4346 | * It's the caller's job to ensure that the target task struct |
| 4347 | * can't go away on us before we can do any checks. |
| 4348 | * |
| 4349 | * Return: |
| 4350 | * true (>0) if we indeed boosted the target task. |
| 4351 | * false (0) if we failed to boost the target. |
| 4352 | * -ESRCH if there's no task to yield to. |
| 4353 | */ |
| 4354 | int __sched yield_to(struct task_struct *p, bool preempt) |
| 4355 | { |
| 4356 | struct task_struct *curr = current; |
| 4357 | struct rq *rq, *p_rq; |
| 4358 | unsigned long flags; |
| 4359 | int yielded = 0; |
| 4360 | |
| 4361 | local_irq_save(flags); |
| 4362 | rq = this_rq(); |
| 4363 | |
| 4364 | again: |
| 4365 | p_rq = task_rq(p); |
| 4366 | /* |
| 4367 | * If we're the only runnable task on the rq and target rq also |
| 4368 | * has only one task, there's absolutely no point in yielding. |
| 4369 | */ |
| 4370 | if (rq->nr_running == 1 && p_rq->nr_running == 1) { |
| 4371 | yielded = -ESRCH; |
| 4372 | goto out_irq; |
| 4373 | } |
| 4374 | |
| 4375 | double_rq_lock(rq, p_rq); |
| 4376 | if (task_rq(p) != p_rq) { |
| 4377 | double_rq_unlock(rq, p_rq); |
| 4378 | goto again; |
| 4379 | } |
| 4380 | |
| 4381 | if (!curr->sched_class->yield_to_task) |
| 4382 | goto out_unlock; |
| 4383 | |
| 4384 | if (curr->sched_class != p->sched_class) |
| 4385 | goto out_unlock; |
| 4386 | |
| 4387 | if (task_running(p_rq, p) || p->state) |
| 4388 | goto out_unlock; |
| 4389 | |
| 4390 | yielded = curr->sched_class->yield_to_task(rq, p, preempt); |
| 4391 | if (yielded) { |
| 4392 | schedstat_inc(rq, yld_count); |
| 4393 | /* |
| 4394 | * Make p's CPU reschedule; pick_next_entity takes care of |
| 4395 | * fairness. |
| 4396 | */ |
| 4397 | if (preempt && rq != p_rq) |
| 4398 | resched_curr(p_rq); |
| 4399 | } |
| 4400 | |
| 4401 | out_unlock: |
| 4402 | double_rq_unlock(rq, p_rq); |
| 4403 | out_irq: |
| 4404 | local_irq_restore(flags); |
| 4405 | |
| 4406 | if (yielded > 0) |
| 4407 | schedule(); |
| 4408 | |
| 4409 | return yielded; |
| 4410 | } |
| 4411 | EXPORT_SYMBOL_GPL(yield_to); |
| 4412 | |
| 4413 | /* |
| 4414 | * This task is about to go to sleep on IO. Increment rq->nr_iowait so |
| 4415 | * that process accounting knows that this is a task in IO wait state. |
| 4416 | */ |
| 4417 | long __sched io_schedule_timeout(long timeout) |
| 4418 | { |
| 4419 | int old_iowait = current->in_iowait; |
| 4420 | struct rq *rq; |
| 4421 | long ret; |
| 4422 | |
| 4423 | current->in_iowait = 1; |
| 4424 | blk_schedule_flush_plug(current); |
| 4425 | |
| 4426 | delayacct_blkio_start(); |
| 4427 | rq = raw_rq(); |
| 4428 | atomic_inc(&rq->nr_iowait); |
| 4429 | ret = schedule_timeout(timeout); |
| 4430 | current->in_iowait = old_iowait; |
| 4431 | atomic_dec(&rq->nr_iowait); |
| 4432 | delayacct_blkio_end(); |
| 4433 | |
| 4434 | return ret; |
| 4435 | } |
| 4436 | EXPORT_SYMBOL(io_schedule_timeout); |
| 4437 | |
| 4438 | /** |
| 4439 | * sys_sched_get_priority_max - return maximum RT priority. |
| 4440 | * @policy: scheduling class. |
| 4441 | * |
| 4442 | * Return: On success, this syscall returns the maximum |
| 4443 | * rt_priority that can be used by a given scheduling class. |
| 4444 | * On failure, a negative error code is returned. |
| 4445 | */ |
| 4446 | SYSCALL_DEFINE1(sched_get_priority_max, int, policy) |
| 4447 | { |
| 4448 | int ret = -EINVAL; |
| 4449 | |
| 4450 | switch (policy) { |
| 4451 | case SCHED_FIFO: |
| 4452 | case SCHED_RR: |
| 4453 | ret = MAX_USER_RT_PRIO-1; |
| 4454 | break; |
| 4455 | case SCHED_DEADLINE: |
| 4456 | case SCHED_NORMAL: |
| 4457 | case SCHED_BATCH: |
| 4458 | case SCHED_IDLE: |
| 4459 | ret = 0; |
| 4460 | break; |
| 4461 | } |
| 4462 | return ret; |
| 4463 | } |
| 4464 | |
| 4465 | /** |
| 4466 | * sys_sched_get_priority_min - return minimum RT priority. |
| 4467 | * @policy: scheduling class. |
| 4468 | * |
| 4469 | * Return: On success, this syscall returns the minimum |
| 4470 | * rt_priority that can be used by a given scheduling class. |
| 4471 | * On failure, a negative error code is returned. |
| 4472 | */ |
| 4473 | SYSCALL_DEFINE1(sched_get_priority_min, int, policy) |
| 4474 | { |
| 4475 | int ret = -EINVAL; |
| 4476 | |
| 4477 | switch (policy) { |
| 4478 | case SCHED_FIFO: |
| 4479 | case SCHED_RR: |
| 4480 | ret = 1; |
| 4481 | break; |
| 4482 | case SCHED_DEADLINE: |
| 4483 | case SCHED_NORMAL: |
| 4484 | case SCHED_BATCH: |
| 4485 | case SCHED_IDLE: |
| 4486 | ret = 0; |
| 4487 | } |
| 4488 | return ret; |
| 4489 | } |
| 4490 | |
| 4491 | /** |
| 4492 | * sys_sched_rr_get_interval - return the default timeslice of a process. |
| 4493 | * @pid: pid of the process. |
| 4494 | * @interval: userspace pointer to the timeslice value. |
| 4495 | * |
| 4496 | * this syscall writes the default timeslice value of a given process |
| 4497 | * into the user-space timespec buffer. A value of '0' means infinity. |
| 4498 | * |
| 4499 | * Return: On success, 0 and the timeslice is in @interval. Otherwise, |
| 4500 | * an error code. |
| 4501 | */ |
| 4502 | SYSCALL_DEFINE2(sched_rr_get_interval, pid_t, pid, |
| 4503 | struct timespec __user *, interval) |
| 4504 | { |
| 4505 | struct task_struct *p; |
| 4506 | unsigned int time_slice; |
| 4507 | unsigned long flags; |
| 4508 | struct rq *rq; |
| 4509 | int retval; |
| 4510 | struct timespec t; |
| 4511 | |
| 4512 | if (pid < 0) |
| 4513 | return -EINVAL; |
| 4514 | |
| 4515 | retval = -ESRCH; |
| 4516 | rcu_read_lock(); |
| 4517 | p = find_process_by_pid(pid); |
| 4518 | if (!p) |
| 4519 | goto out_unlock; |
| 4520 | |
| 4521 | retval = security_task_getscheduler(p); |
| 4522 | if (retval) |
| 4523 | goto out_unlock; |
| 4524 | |
| 4525 | rq = task_rq_lock(p, &flags); |
| 4526 | time_slice = 0; |
| 4527 | if (p->sched_class->get_rr_interval) |
| 4528 | time_slice = p->sched_class->get_rr_interval(rq, p); |
| 4529 | task_rq_unlock(rq, p, &flags); |
| 4530 | |
| 4531 | rcu_read_unlock(); |
| 4532 | jiffies_to_timespec(time_slice, &t); |
| 4533 | retval = copy_to_user(interval, &t, sizeof(t)) ? -EFAULT : 0; |
| 4534 | return retval; |
| 4535 | |
| 4536 | out_unlock: |
| 4537 | rcu_read_unlock(); |
| 4538 | return retval; |
| 4539 | } |
| 4540 | |
| 4541 | static const char stat_nam[] = TASK_STATE_TO_CHAR_STR; |
| 4542 | |
| 4543 | void sched_show_task(struct task_struct *p) |
| 4544 | { |
| 4545 | unsigned long free = 0; |
| 4546 | int ppid; |
| 4547 | unsigned long state = p->state; |
| 4548 | |
| 4549 | if (state) |
| 4550 | state = __ffs(state) + 1; |
| 4551 | printk(KERN_INFO "%-15.15s %c", p->comm, |
| 4552 | state < sizeof(stat_nam) - 1 ? stat_nam[state] : '?'); |
| 4553 | #if BITS_PER_LONG == 32 |
| 4554 | if (state == TASK_RUNNING) |
| 4555 | printk(KERN_CONT " running "); |
| 4556 | else |
| 4557 | printk(KERN_CONT " %08lx ", thread_saved_pc(p)); |
| 4558 | #else |
| 4559 | if (state == TASK_RUNNING) |
| 4560 | printk(KERN_CONT " running task "); |
| 4561 | else |
| 4562 | printk(KERN_CONT " %016lx ", thread_saved_pc(p)); |
| 4563 | #endif |
| 4564 | #ifdef CONFIG_DEBUG_STACK_USAGE |
| 4565 | free = stack_not_used(p); |
| 4566 | #endif |
| 4567 | ppid = 0; |
| 4568 | rcu_read_lock(); |
| 4569 | if (pid_alive(p)) |
| 4570 | ppid = task_pid_nr(rcu_dereference(p->real_parent)); |
| 4571 | rcu_read_unlock(); |
| 4572 | printk(KERN_CONT "%5lu %5d %6d 0x%08lx\n", free, |
| 4573 | task_pid_nr(p), ppid, |
| 4574 | (unsigned long)task_thread_info(p)->flags); |
| 4575 | |
| 4576 | print_worker_info(KERN_INFO, p); |
| 4577 | show_stack(p, NULL); |
| 4578 | } |
| 4579 | |
| 4580 | void show_state_filter(unsigned long state_filter) |
| 4581 | { |
| 4582 | struct task_struct *g, *p; |
| 4583 | |
| 4584 | #if BITS_PER_LONG == 32 |
| 4585 | printk(KERN_INFO |
| 4586 | " task PC stack pid father\n"); |
| 4587 | #else |
| 4588 | printk(KERN_INFO |
| 4589 | " task PC stack pid father\n"); |
| 4590 | #endif |
| 4591 | rcu_read_lock(); |
| 4592 | for_each_process_thread(g, p) { |
| 4593 | /* |
| 4594 | * reset the NMI-timeout, listing all files on a slow |
| 4595 | * console might take a lot of time: |
| 4596 | */ |
| 4597 | touch_nmi_watchdog(); |
| 4598 | if (!state_filter || (p->state & state_filter)) |
| 4599 | sched_show_task(p); |
| 4600 | } |
| 4601 | |
| 4602 | touch_all_softlockup_watchdogs(); |
| 4603 | |
| 4604 | #ifdef CONFIG_SCHED_DEBUG |
| 4605 | sysrq_sched_debug_show(); |
| 4606 | #endif |
| 4607 | rcu_read_unlock(); |
| 4608 | /* |
| 4609 | * Only show locks if all tasks are dumped: |
| 4610 | */ |
| 4611 | if (!state_filter) |
| 4612 | debug_show_all_locks(); |
| 4613 | } |
| 4614 | |
| 4615 | void init_idle_bootup_task(struct task_struct *idle) |
| 4616 | { |
| 4617 | idle->sched_class = &idle_sched_class; |
| 4618 | } |
| 4619 | |
| 4620 | /** |
| 4621 | * init_idle - set up an idle thread for a given CPU |
| 4622 | * @idle: task in question |
| 4623 | * @cpu: cpu the idle task belongs to |
| 4624 | * |
| 4625 | * NOTE: this function does not set the idle thread's NEED_RESCHED |
| 4626 | * flag, to make booting more robust. |
| 4627 | */ |
| 4628 | void init_idle(struct task_struct *idle, int cpu) |
| 4629 | { |
| 4630 | struct rq *rq = cpu_rq(cpu); |
| 4631 | unsigned long flags; |
| 4632 | |
| 4633 | raw_spin_lock_irqsave(&rq->lock, flags); |
| 4634 | |
| 4635 | __sched_fork(0, idle); |
| 4636 | idle->state = TASK_RUNNING; |
| 4637 | idle->se.exec_start = sched_clock(); |
| 4638 | |
| 4639 | do_set_cpus_allowed(idle, cpumask_of(cpu)); |
| 4640 | /* |
| 4641 | * We're having a chicken and egg problem, even though we are |
| 4642 | * holding rq->lock, the cpu isn't yet set to this cpu so the |
| 4643 | * lockdep check in task_group() will fail. |
| 4644 | * |
| 4645 | * Similar case to sched_fork(). / Alternatively we could |
| 4646 | * use task_rq_lock() here and obtain the other rq->lock. |
| 4647 | * |
| 4648 | * Silence PROVE_RCU |
| 4649 | */ |
| 4650 | rcu_read_lock(); |
| 4651 | __set_task_cpu(idle, cpu); |
| 4652 | rcu_read_unlock(); |
| 4653 | |
| 4654 | rq->curr = rq->idle = idle; |
| 4655 | idle->on_rq = TASK_ON_RQ_QUEUED; |
| 4656 | #if defined(CONFIG_SMP) |
| 4657 | idle->on_cpu = 1; |
| 4658 | #endif |
| 4659 | raw_spin_unlock_irqrestore(&rq->lock, flags); |
| 4660 | |
| 4661 | /* Set the preempt count _outside_ the spinlocks! */ |
| 4662 | init_idle_preempt_count(idle, cpu); |
| 4663 | |
| 4664 | /* |
| 4665 | * The idle tasks have their own, simple scheduling class: |
| 4666 | */ |
| 4667 | idle->sched_class = &idle_sched_class; |
| 4668 | ftrace_graph_init_idle_task(idle, cpu); |
| 4669 | vtime_init_idle(idle, cpu); |
| 4670 | #if defined(CONFIG_SMP) |
| 4671 | sprintf(idle->comm, "%s/%d", INIT_TASK_COMM, cpu); |
| 4672 | #endif |
| 4673 | } |
| 4674 | |
| 4675 | int cpuset_cpumask_can_shrink(const struct cpumask *cur, |
| 4676 | const struct cpumask *trial) |
| 4677 | { |
| 4678 | int ret = 1, trial_cpus; |
| 4679 | struct dl_bw *cur_dl_b; |
| 4680 | unsigned long flags; |
| 4681 | |
| 4682 | if (!cpumask_weight(cur)) |
| 4683 | return ret; |
| 4684 | |
| 4685 | rcu_read_lock_sched(); |
| 4686 | cur_dl_b = dl_bw_of(cpumask_any(cur)); |
| 4687 | trial_cpus = cpumask_weight(trial); |
| 4688 | |
| 4689 | raw_spin_lock_irqsave(&cur_dl_b->lock, flags); |
| 4690 | if (cur_dl_b->bw != -1 && |
| 4691 | cur_dl_b->bw * trial_cpus < cur_dl_b->total_bw) |
| 4692 | ret = 0; |
| 4693 | raw_spin_unlock_irqrestore(&cur_dl_b->lock, flags); |
| 4694 | rcu_read_unlock_sched(); |
| 4695 | |
| 4696 | return ret; |
| 4697 | } |
| 4698 | |
| 4699 | int task_can_attach(struct task_struct *p, |
| 4700 | const struct cpumask *cs_cpus_allowed) |
| 4701 | { |
| 4702 | int ret = 0; |
| 4703 | |
| 4704 | /* |
| 4705 | * Kthreads which disallow setaffinity shouldn't be moved |
| 4706 | * to a new cpuset; we don't want to change their cpu |
| 4707 | * affinity and isolating such threads by their set of |
| 4708 | * allowed nodes is unnecessary. Thus, cpusets are not |
| 4709 | * applicable for such threads. This prevents checking for |
| 4710 | * success of set_cpus_allowed_ptr() on all attached tasks |
| 4711 | * before cpus_allowed may be changed. |
| 4712 | */ |
| 4713 | if (p->flags & PF_NO_SETAFFINITY) { |
| 4714 | ret = -EINVAL; |
| 4715 | goto out; |
| 4716 | } |
| 4717 | |
| 4718 | #ifdef CONFIG_SMP |
| 4719 | if (dl_task(p) && !cpumask_intersects(task_rq(p)->rd->span, |
| 4720 | cs_cpus_allowed)) { |
| 4721 | unsigned int dest_cpu = cpumask_any_and(cpu_active_mask, |
| 4722 | cs_cpus_allowed); |
| 4723 | struct dl_bw *dl_b; |
| 4724 | bool overflow; |
| 4725 | int cpus; |
| 4726 | unsigned long flags; |
| 4727 | |
| 4728 | rcu_read_lock_sched(); |
| 4729 | dl_b = dl_bw_of(dest_cpu); |
| 4730 | raw_spin_lock_irqsave(&dl_b->lock, flags); |
| 4731 | cpus = dl_bw_cpus(dest_cpu); |
| 4732 | overflow = __dl_overflow(dl_b, cpus, 0, p->dl.dl_bw); |
| 4733 | if (overflow) |
| 4734 | ret = -EBUSY; |
| 4735 | else { |
| 4736 | /* |
| 4737 | * We reserve space for this task in the destination |
| 4738 | * root_domain, as we can't fail after this point. |
| 4739 | * We will free resources in the source root_domain |
| 4740 | * later on (see set_cpus_allowed_dl()). |
| 4741 | */ |
| 4742 | __dl_add(dl_b, p->dl.dl_bw); |
| 4743 | } |
| 4744 | raw_spin_unlock_irqrestore(&dl_b->lock, flags); |
| 4745 | rcu_read_unlock_sched(); |
| 4746 | |
| 4747 | } |
| 4748 | #endif |
| 4749 | out: |
| 4750 | return ret; |
| 4751 | } |
| 4752 | |
| 4753 | #ifdef CONFIG_SMP |
| 4754 | /* |
| 4755 | * move_queued_task - move a queued task to new rq. |
| 4756 | * |
| 4757 | * Returns (locked) new rq. Old rq's lock is released. |
| 4758 | */ |
| 4759 | static struct rq *move_queued_task(struct task_struct *p, int new_cpu) |
| 4760 | { |
| 4761 | struct rq *rq = task_rq(p); |
| 4762 | |
| 4763 | lockdep_assert_held(&rq->lock); |
| 4764 | |
| 4765 | dequeue_task(rq, p, 0); |
| 4766 | p->on_rq = TASK_ON_RQ_MIGRATING; |
| 4767 | set_task_cpu(p, new_cpu); |
| 4768 | raw_spin_unlock(&rq->lock); |
| 4769 | |
| 4770 | rq = cpu_rq(new_cpu); |
| 4771 | |
| 4772 | raw_spin_lock(&rq->lock); |
| 4773 | BUG_ON(task_cpu(p) != new_cpu); |
| 4774 | p->on_rq = TASK_ON_RQ_QUEUED; |
| 4775 | enqueue_task(rq, p, 0); |
| 4776 | check_preempt_curr(rq, p, 0); |
| 4777 | |
| 4778 | return rq; |
| 4779 | } |
| 4780 | |
| 4781 | void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask) |
| 4782 | { |
| 4783 | if (p->sched_class->set_cpus_allowed) |
| 4784 | p->sched_class->set_cpus_allowed(p, new_mask); |
| 4785 | |
| 4786 | cpumask_copy(&p->cpus_allowed, new_mask); |
| 4787 | p->nr_cpus_allowed = cpumask_weight(new_mask); |
| 4788 | } |
| 4789 | |
| 4790 | /* |
| 4791 | * This is how migration works: |
| 4792 | * |
| 4793 | * 1) we invoke migration_cpu_stop() on the target CPU using |
| 4794 | * stop_one_cpu(). |
| 4795 | * 2) stopper starts to run (implicitly forcing the migrated thread |
| 4796 | * off the CPU) |
| 4797 | * 3) it checks whether the migrated task is still in the wrong runqueue. |
| 4798 | * 4) if it's in the wrong runqueue then the migration thread removes |
| 4799 | * it and puts it into the right queue. |
| 4800 | * 5) stopper completes and stop_one_cpu() returns and the migration |
| 4801 | * is done. |
| 4802 | */ |
| 4803 | |
| 4804 | /* |
| 4805 | * Change a given task's CPU affinity. Migrate the thread to a |
| 4806 | * proper CPU and schedule it away if the CPU it's executing on |
| 4807 | * is removed from the allowed bitmask. |
| 4808 | * |
| 4809 | * NOTE: the caller must have a valid reference to the task, the |
| 4810 | * task must not exit() & deallocate itself prematurely. The |
| 4811 | * call is not atomic; no spinlocks may be held. |
| 4812 | */ |
| 4813 | int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask) |
| 4814 | { |
| 4815 | unsigned long flags; |
| 4816 | struct rq *rq; |
| 4817 | unsigned int dest_cpu; |
| 4818 | int ret = 0; |
| 4819 | |
| 4820 | rq = task_rq_lock(p, &flags); |
| 4821 | |
| 4822 | if (cpumask_equal(&p->cpus_allowed, new_mask)) |
| 4823 | goto out; |
| 4824 | |
| 4825 | if (!cpumask_intersects(new_mask, cpu_active_mask)) { |
| 4826 | ret = -EINVAL; |
| 4827 | goto out; |
| 4828 | } |
| 4829 | |
| 4830 | do_set_cpus_allowed(p, new_mask); |
| 4831 | |
| 4832 | /* Can the task run on the task's current CPU? If so, we're done */ |
| 4833 | if (cpumask_test_cpu(task_cpu(p), new_mask)) |
| 4834 | goto out; |
| 4835 | |
| 4836 | dest_cpu = cpumask_any_and(cpu_active_mask, new_mask); |
| 4837 | if (task_running(rq, p) || p->state == TASK_WAKING) { |
| 4838 | struct migration_arg arg = { p, dest_cpu }; |
| 4839 | /* Need help from migration thread: drop lock and wait. */ |
| 4840 | task_rq_unlock(rq, p, &flags); |
| 4841 | stop_one_cpu(cpu_of(rq), migration_cpu_stop, &arg); |
| 4842 | tlb_migrate_finish(p->mm); |
| 4843 | return 0; |
| 4844 | } else if (task_on_rq_queued(p)) |
| 4845 | rq = move_queued_task(p, dest_cpu); |
| 4846 | out: |
| 4847 | task_rq_unlock(rq, p, &flags); |
| 4848 | |
| 4849 | return ret; |
| 4850 | } |
| 4851 | EXPORT_SYMBOL_GPL(set_cpus_allowed_ptr); |
| 4852 | |
| 4853 | /* |
| 4854 | * Move (not current) task off this cpu, onto dest cpu. We're doing |
| 4855 | * this because either it can't run here any more (set_cpus_allowed() |
| 4856 | * away from this CPU, or CPU going down), or because we're |
| 4857 | * attempting to rebalance this task on exec (sched_exec). |
| 4858 | * |
| 4859 | * So we race with normal scheduler movements, but that's OK, as long |
| 4860 | * as the task is no longer on this CPU. |
| 4861 | * |
| 4862 | * Returns non-zero if task was successfully migrated. |
| 4863 | */ |
| 4864 | static int __migrate_task(struct task_struct *p, int src_cpu, int dest_cpu) |
| 4865 | { |
| 4866 | struct rq *rq; |
| 4867 | int ret = 0; |
| 4868 | |
| 4869 | if (unlikely(!cpu_active(dest_cpu))) |
| 4870 | return ret; |
| 4871 | |
| 4872 | rq = cpu_rq(src_cpu); |
| 4873 | |
| 4874 | raw_spin_lock(&p->pi_lock); |
| 4875 | raw_spin_lock(&rq->lock); |
| 4876 | /* Already moved. */ |
| 4877 | if (task_cpu(p) != src_cpu) |
| 4878 | goto done; |
| 4879 | |
| 4880 | /* Affinity changed (again). */ |
| 4881 | if (!cpumask_test_cpu(dest_cpu, tsk_cpus_allowed(p))) |
| 4882 | goto fail; |
| 4883 | |
| 4884 | /* |
| 4885 | * If we're not on a rq, the next wake-up will ensure we're |
| 4886 | * placed properly. |
| 4887 | */ |
| 4888 | if (task_on_rq_queued(p)) |
| 4889 | rq = move_queued_task(p, dest_cpu); |
| 4890 | done: |
| 4891 | ret = 1; |
| 4892 | fail: |
| 4893 | raw_spin_unlock(&rq->lock); |
| 4894 | raw_spin_unlock(&p->pi_lock); |
| 4895 | return ret; |
| 4896 | } |
| 4897 | |
| 4898 | #ifdef CONFIG_NUMA_BALANCING |
| 4899 | /* Migrate current task p to target_cpu */ |
| 4900 | int migrate_task_to(struct task_struct *p, int target_cpu) |
| 4901 | { |
| 4902 | struct migration_arg arg = { p, target_cpu }; |
| 4903 | int curr_cpu = task_cpu(p); |
| 4904 | |
| 4905 | if (curr_cpu == target_cpu) |
| 4906 | return 0; |
| 4907 | |
| 4908 | if (!cpumask_test_cpu(target_cpu, tsk_cpus_allowed(p))) |
| 4909 | return -EINVAL; |
| 4910 | |
| 4911 | /* TODO: This is not properly updating schedstats */ |
| 4912 | |
| 4913 | trace_sched_move_numa(p, curr_cpu, target_cpu); |
| 4914 | return stop_one_cpu(curr_cpu, migration_cpu_stop, &arg); |
| 4915 | } |
| 4916 | |
| 4917 | /* |
| 4918 | * Requeue a task on a given node and accurately track the number of NUMA |
| 4919 | * tasks on the runqueues |
| 4920 | */ |
| 4921 | void sched_setnuma(struct task_struct *p, int nid) |
| 4922 | { |
| 4923 | struct rq *rq; |
| 4924 | unsigned long flags; |
| 4925 | bool queued, running; |
| 4926 | |
| 4927 | rq = task_rq_lock(p, &flags); |
| 4928 | queued = task_on_rq_queued(p); |
| 4929 | running = task_current(rq, p); |
| 4930 | |
| 4931 | if (queued) |
| 4932 | dequeue_task(rq, p, 0); |
| 4933 | if (running) |
| 4934 | put_prev_task(rq, p); |
| 4935 | |
| 4936 | p->numa_preferred_nid = nid; |
| 4937 | |
| 4938 | if (running) |
| 4939 | p->sched_class->set_curr_task(rq); |
| 4940 | if (queued) |
| 4941 | enqueue_task(rq, p, 0); |
| 4942 | task_rq_unlock(rq, p, &flags); |
| 4943 | } |
| 4944 | #endif |
| 4945 | |
| 4946 | /* |
| 4947 | * migration_cpu_stop - this will be executed by a highprio stopper thread |
| 4948 | * and performs thread migration by bumping thread off CPU then |
| 4949 | * 'pushing' onto another runqueue. |
| 4950 | */ |
| 4951 | static int migration_cpu_stop(void *data) |
| 4952 | { |
| 4953 | struct migration_arg *arg = data; |
| 4954 | |
| 4955 | /* |
| 4956 | * The original target cpu might have gone down and we might |
| 4957 | * be on another cpu but it doesn't matter. |
| 4958 | */ |
| 4959 | local_irq_disable(); |
| 4960 | /* |
| 4961 | * We need to explicitly wake pending tasks before running |
| 4962 | * __migrate_task() such that we will not miss enforcing cpus_allowed |
| 4963 | * during wakeups, see set_cpus_allowed_ptr()'s TASK_WAKING test. |
| 4964 | */ |
| 4965 | sched_ttwu_pending(); |
| 4966 | __migrate_task(arg->task, raw_smp_processor_id(), arg->dest_cpu); |
| 4967 | local_irq_enable(); |
| 4968 | return 0; |
| 4969 | } |
| 4970 | |
| 4971 | #ifdef CONFIG_HOTPLUG_CPU |
| 4972 | |
| 4973 | /* |
| 4974 | * Ensures that the idle task is using init_mm right before its cpu goes |
| 4975 | * offline. |
| 4976 | */ |
| 4977 | void idle_task_exit(void) |
| 4978 | { |
| 4979 | struct mm_struct *mm = current->active_mm; |
| 4980 | |
| 4981 | BUG_ON(cpu_online(smp_processor_id())); |
| 4982 | |
| 4983 | if (mm != &init_mm) { |
| 4984 | switch_mm(mm, &init_mm, current); |
| 4985 | finish_arch_post_lock_switch(); |
| 4986 | } |
| 4987 | mmdrop(mm); |
| 4988 | } |
| 4989 | |
| 4990 | /* |
| 4991 | * Since this CPU is going 'away' for a while, fold any nr_active delta |
| 4992 | * we might have. Assumes we're called after migrate_tasks() so that the |
| 4993 | * nr_active count is stable. |
| 4994 | * |
| 4995 | * Also see the comment "Global load-average calculations". |
| 4996 | */ |
| 4997 | static void calc_load_migrate(struct rq *rq) |
| 4998 | { |
| 4999 | long delta = calc_load_fold_active(rq); |
| 5000 | if (delta) |
| 5001 | atomic_long_add(delta, &calc_load_tasks); |
| 5002 | } |
| 5003 | |
| 5004 | static void put_prev_task_fake(struct rq *rq, struct task_struct *prev) |
| 5005 | { |
| 5006 | } |
| 5007 | |
| 5008 | static const struct sched_class fake_sched_class = { |
| 5009 | .put_prev_task = put_prev_task_fake, |
| 5010 | }; |
| 5011 | |
| 5012 | static struct task_struct fake_task = { |
| 5013 | /* |
| 5014 | * Avoid pull_{rt,dl}_task() |
| 5015 | */ |
| 5016 | .prio = MAX_PRIO + 1, |
| 5017 | .sched_class = &fake_sched_class, |
| 5018 | }; |
| 5019 | |
| 5020 | /* |
| 5021 | * Migrate all tasks from the rq, sleeping tasks will be migrated by |
| 5022 | * try_to_wake_up()->select_task_rq(). |
| 5023 | * |
| 5024 | * Called with rq->lock held even though we'er in stop_machine() and |
| 5025 | * there's no concurrency possible, we hold the required locks anyway |
| 5026 | * because of lock validation efforts. |
| 5027 | */ |
| 5028 | static void migrate_tasks(unsigned int dead_cpu) |
| 5029 | { |
| 5030 | struct rq *rq = cpu_rq(dead_cpu); |
| 5031 | struct task_struct *next, *stop = rq->stop; |
| 5032 | int dest_cpu; |
| 5033 | |
| 5034 | /* |
| 5035 | * Fudge the rq selection such that the below task selection loop |
| 5036 | * doesn't get stuck on the currently eligible stop task. |
| 5037 | * |
| 5038 | * We're currently inside stop_machine() and the rq is either stuck |
| 5039 | * in the stop_machine_cpu_stop() loop, or we're executing this code, |
| 5040 | * either way we should never end up calling schedule() until we're |
| 5041 | * done here. |
| 5042 | */ |
| 5043 | rq->stop = NULL; |
| 5044 | |
| 5045 | /* |
| 5046 | * put_prev_task() and pick_next_task() sched |
| 5047 | * class method both need to have an up-to-date |
| 5048 | * value of rq->clock[_task] |
| 5049 | */ |
| 5050 | update_rq_clock(rq); |
| 5051 | |
| 5052 | for ( ; ; ) { |
| 5053 | /* |
| 5054 | * There's this thread running, bail when that's the only |
| 5055 | * remaining thread. |
| 5056 | */ |
| 5057 | if (rq->nr_running == 1) |
| 5058 | break; |
| 5059 | |
| 5060 | next = pick_next_task(rq, &fake_task); |
| 5061 | BUG_ON(!next); |
| 5062 | next->sched_class->put_prev_task(rq, next); |
| 5063 | |
| 5064 | /* Find suitable destination for @next, with force if needed. */ |
| 5065 | dest_cpu = select_fallback_rq(dead_cpu, next); |
| 5066 | raw_spin_unlock(&rq->lock); |
| 5067 | |
| 5068 | __migrate_task(next, dead_cpu, dest_cpu); |
| 5069 | |
| 5070 | raw_spin_lock(&rq->lock); |
| 5071 | } |
| 5072 | |
| 5073 | rq->stop = stop; |
| 5074 | } |
| 5075 | |
| 5076 | #endif /* CONFIG_HOTPLUG_CPU */ |
| 5077 | |
| 5078 | #if defined(CONFIG_SCHED_DEBUG) && defined(CONFIG_SYSCTL) |
| 5079 | |
| 5080 | static struct ctl_table sd_ctl_dir[] = { |
| 5081 | { |
| 5082 | .procname = "sched_domain", |
| 5083 | .mode = 0555, |
| 5084 | }, |
| 5085 | {} |
| 5086 | }; |
| 5087 | |
| 5088 | static struct ctl_table sd_ctl_root[] = { |
| 5089 | { |
| 5090 | .procname = "kernel", |
| 5091 | .mode = 0555, |
| 5092 | .child = sd_ctl_dir, |
| 5093 | }, |
| 5094 | {} |
| 5095 | }; |
| 5096 | |
| 5097 | static struct ctl_table *sd_alloc_ctl_entry(int n) |
| 5098 | { |
| 5099 | struct ctl_table *entry = |
| 5100 | kcalloc(n, sizeof(struct ctl_table), GFP_KERNEL); |
| 5101 | |
| 5102 | return entry; |
| 5103 | } |
| 5104 | |
| 5105 | static void sd_free_ctl_entry(struct ctl_table **tablep) |
| 5106 | { |
| 5107 | struct ctl_table *entry; |
| 5108 | |
| 5109 | /* |
| 5110 | * In the intermediate directories, both the child directory and |
| 5111 | * procname are dynamically allocated and could fail but the mode |
| 5112 | * will always be set. In the lowest directory the names are |
| 5113 | * static strings and all have proc handlers. |
| 5114 | */ |
| 5115 | for (entry = *tablep; entry->mode; entry++) { |
| 5116 | if (entry->child) |
| 5117 | sd_free_ctl_entry(&entry->child); |
| 5118 | if (entry->proc_handler == NULL) |
| 5119 | kfree(entry->procname); |
| 5120 | } |
| 5121 | |
| 5122 | kfree(*tablep); |
| 5123 | *tablep = NULL; |
| 5124 | } |
| 5125 | |
| 5126 | static int min_load_idx = 0; |
| 5127 | static int max_load_idx = CPU_LOAD_IDX_MAX-1; |
| 5128 | |
| 5129 | static void |
| 5130 | set_table_entry(struct ctl_table *entry, |
| 5131 | const char *procname, void *data, int maxlen, |
| 5132 | umode_t mode, proc_handler *proc_handler, |
| 5133 | bool load_idx) |
| 5134 | { |
| 5135 | entry->procname = procname; |
| 5136 | entry->data = data; |
| 5137 | entry->maxlen = maxlen; |
| 5138 | entry->mode = mode; |
| 5139 | entry->proc_handler = proc_handler; |
| 5140 | |
| 5141 | if (load_idx) { |
| 5142 | entry->extra1 = &min_load_idx; |
| 5143 | entry->extra2 = &max_load_idx; |
| 5144 | } |
| 5145 | } |
| 5146 | |
| 5147 | static struct ctl_table * |
| 5148 | sd_alloc_ctl_domain_table(struct sched_domain *sd) |
| 5149 | { |
| 5150 | struct ctl_table *table = sd_alloc_ctl_entry(14); |
| 5151 | |
| 5152 | if (table == NULL) |
| 5153 | return NULL; |
| 5154 | |
| 5155 | set_table_entry(&table[0], "min_interval", &sd->min_interval, |
| 5156 | sizeof(long), 0644, proc_doulongvec_minmax, false); |
| 5157 | set_table_entry(&table[1], "max_interval", &sd->max_interval, |
| 5158 | sizeof(long), 0644, proc_doulongvec_minmax, false); |
| 5159 | set_table_entry(&table[2], "busy_idx", &sd->busy_idx, |
| 5160 | sizeof(int), 0644, proc_dointvec_minmax, true); |
| 5161 | set_table_entry(&table[3], "idle_idx", &sd->idle_idx, |
| 5162 | sizeof(int), 0644, proc_dointvec_minmax, true); |
| 5163 | set_table_entry(&table[4], "newidle_idx", &sd->newidle_idx, |
| 5164 | sizeof(int), 0644, proc_dointvec_minmax, true); |
| 5165 | set_table_entry(&table[5], "wake_idx", &sd->wake_idx, |
| 5166 | sizeof(int), 0644, proc_dointvec_minmax, true); |
| 5167 | set_table_entry(&table[6], "forkexec_idx", &sd->forkexec_idx, |
| 5168 | sizeof(int), 0644, proc_dointvec_minmax, true); |
| 5169 | set_table_entry(&table[7], "busy_factor", &sd->busy_factor, |
| 5170 | sizeof(int), 0644, proc_dointvec_minmax, false); |
| 5171 | set_table_entry(&table[8], "imbalance_pct", &sd->imbalance_pct, |
| 5172 | sizeof(int), 0644, proc_dointvec_minmax, false); |
| 5173 | set_table_entry(&table[9], "cache_nice_tries", |
| 5174 | &sd->cache_nice_tries, |
| 5175 | sizeof(int), 0644, proc_dointvec_minmax, false); |
| 5176 | set_table_entry(&table[10], "flags", &sd->flags, |
| 5177 | sizeof(int), 0644, proc_dointvec_minmax, false); |
| 5178 | set_table_entry(&table[11], "max_newidle_lb_cost", |
| 5179 | &sd->max_newidle_lb_cost, |
| 5180 | sizeof(long), 0644, proc_doulongvec_minmax, false); |
| 5181 | set_table_entry(&table[12], "name", sd->name, |
| 5182 | CORENAME_MAX_SIZE, 0444, proc_dostring, false); |
| 5183 | /* &table[13] is terminator */ |
| 5184 | |
| 5185 | return table; |
| 5186 | } |
| 5187 | |
| 5188 | static struct ctl_table *sd_alloc_ctl_cpu_table(int cpu) |
| 5189 | { |
| 5190 | struct ctl_table *entry, *table; |
| 5191 | struct sched_domain *sd; |
| 5192 | int domain_num = 0, i; |
| 5193 | char buf[32]; |
| 5194 | |
| 5195 | for_each_domain(cpu, sd) |
| 5196 | domain_num++; |
| 5197 | entry = table = sd_alloc_ctl_entry(domain_num + 1); |
| 5198 | if (table == NULL) |
| 5199 | return NULL; |
| 5200 | |
| 5201 | i = 0; |
| 5202 | for_each_domain(cpu, sd) { |
| 5203 | snprintf(buf, 32, "domain%d", i); |
| 5204 | entry->procname = kstrdup(buf, GFP_KERNEL); |
| 5205 | entry->mode = 0555; |
| 5206 | entry->child = sd_alloc_ctl_domain_table(sd); |
| 5207 | entry++; |
| 5208 | i++; |
| 5209 | } |
| 5210 | return table; |
| 5211 | } |
| 5212 | |
| 5213 | static struct ctl_table_header *sd_sysctl_header; |
| 5214 | static void register_sched_domain_sysctl(void) |
| 5215 | { |
| 5216 | int i, cpu_num = num_possible_cpus(); |
| 5217 | struct ctl_table *entry = sd_alloc_ctl_entry(cpu_num + 1); |
| 5218 | char buf[32]; |
| 5219 | |
| 5220 | WARN_ON(sd_ctl_dir[0].child); |
| 5221 | sd_ctl_dir[0].child = entry; |
| 5222 | |
| 5223 | if (entry == NULL) |
| 5224 | return; |
| 5225 | |
| 5226 | for_each_possible_cpu(i) { |
| 5227 | snprintf(buf, 32, "cpu%d", i); |
| 5228 | entry->procname = kstrdup(buf, GFP_KERNEL); |
| 5229 | entry->mode = 0555; |
| 5230 | entry->child = sd_alloc_ctl_cpu_table(i); |
| 5231 | entry++; |
| 5232 | } |
| 5233 | |
| 5234 | WARN_ON(sd_sysctl_header); |
| 5235 | sd_sysctl_header = register_sysctl_table(sd_ctl_root); |
| 5236 | } |
| 5237 | |
| 5238 | /* may be called multiple times per register */ |
| 5239 | static void unregister_sched_domain_sysctl(void) |
| 5240 | { |
| 5241 | if (sd_sysctl_header) |
| 5242 | unregister_sysctl_table(sd_sysctl_header); |
| 5243 | sd_sysctl_header = NULL; |
| 5244 | if (sd_ctl_dir[0].child) |
| 5245 | sd_free_ctl_entry(&sd_ctl_dir[0].child); |
| 5246 | } |
| 5247 | #else |
| 5248 | static void register_sched_domain_sysctl(void) |
| 5249 | { |
| 5250 | } |
| 5251 | static void unregister_sched_domain_sysctl(void) |
| 5252 | { |
| 5253 | } |
| 5254 | #endif |
| 5255 | |
| 5256 | static void set_rq_online(struct rq *rq) |
| 5257 | { |
| 5258 | if (!rq->online) { |
| 5259 | const struct sched_class *class; |
| 5260 | |
| 5261 | cpumask_set_cpu(rq->cpu, rq->rd->online); |
| 5262 | rq->online = 1; |
| 5263 | |
| 5264 | for_each_class(class) { |
| 5265 | if (class->rq_online) |
| 5266 | class->rq_online(rq); |
| 5267 | } |
| 5268 | } |
| 5269 | } |
| 5270 | |
| 5271 | static void set_rq_offline(struct rq *rq) |
| 5272 | { |
| 5273 | if (rq->online) { |
| 5274 | const struct sched_class *class; |
| 5275 | |
| 5276 | for_each_class(class) { |
| 5277 | if (class->rq_offline) |
| 5278 | class->rq_offline(rq); |
| 5279 | } |
| 5280 | |
| 5281 | cpumask_clear_cpu(rq->cpu, rq->rd->online); |
| 5282 | rq->online = 0; |
| 5283 | } |
| 5284 | } |
| 5285 | |
| 5286 | /* |
| 5287 | * migration_call - callback that gets triggered when a CPU is added. |
| 5288 | * Here we can start up the necessary migration thread for the new CPU. |
| 5289 | */ |
| 5290 | static int |
| 5291 | migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu) |
| 5292 | { |
| 5293 | int cpu = (long)hcpu; |
| 5294 | unsigned long flags; |
| 5295 | struct rq *rq = cpu_rq(cpu); |
| 5296 | |
| 5297 | switch (action & ~CPU_TASKS_FROZEN) { |
| 5298 | |
| 5299 | case CPU_UP_PREPARE: |
| 5300 | rq->calc_load_update = calc_load_update; |
| 5301 | break; |
| 5302 | |
| 5303 | case CPU_ONLINE: |
| 5304 | /* Update our root-domain */ |
| 5305 | raw_spin_lock_irqsave(&rq->lock, flags); |
| 5306 | if (rq->rd) { |
| 5307 | BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span)); |
| 5308 | |
| 5309 | set_rq_online(rq); |
| 5310 | } |
| 5311 | raw_spin_unlock_irqrestore(&rq->lock, flags); |
| 5312 | break; |
| 5313 | |
| 5314 | #ifdef CONFIG_HOTPLUG_CPU |
| 5315 | case CPU_DYING: |
| 5316 | sched_ttwu_pending(); |
| 5317 | /* Update our root-domain */ |
| 5318 | raw_spin_lock_irqsave(&rq->lock, flags); |
| 5319 | if (rq->rd) { |
| 5320 | BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span)); |
| 5321 | set_rq_offline(rq); |
| 5322 | } |
| 5323 | migrate_tasks(cpu); |
| 5324 | BUG_ON(rq->nr_running != 1); /* the migration thread */ |
| 5325 | raw_spin_unlock_irqrestore(&rq->lock, flags); |
| 5326 | break; |
| 5327 | |
| 5328 | case CPU_DEAD: |
| 5329 | calc_load_migrate(rq); |
| 5330 | break; |
| 5331 | #endif |
| 5332 | } |
| 5333 | |
| 5334 | update_max_interval(); |
| 5335 | |
| 5336 | return NOTIFY_OK; |
| 5337 | } |
| 5338 | |
| 5339 | /* |
| 5340 | * Register at high priority so that task migration (migrate_all_tasks) |
| 5341 | * happens before everything else. This has to be lower priority than |
| 5342 | * the notifier in the perf_event subsystem, though. |
| 5343 | */ |
| 5344 | static struct notifier_block migration_notifier = { |
| 5345 | .notifier_call = migration_call, |
| 5346 | .priority = CPU_PRI_MIGRATION, |
| 5347 | }; |
| 5348 | |
| 5349 | static void set_cpu_rq_start_time(void) |
| 5350 | { |
| 5351 | int cpu = smp_processor_id(); |
| 5352 | struct rq *rq = cpu_rq(cpu); |
| 5353 | rq->age_stamp = sched_clock_cpu(cpu); |
| 5354 | } |
| 5355 | |
| 5356 | static int sched_cpu_active(struct notifier_block *nfb, |
| 5357 | unsigned long action, void *hcpu) |
| 5358 | { |
| 5359 | switch (action & ~CPU_TASKS_FROZEN) { |
| 5360 | case CPU_STARTING: |
| 5361 | set_cpu_rq_start_time(); |
| 5362 | return NOTIFY_OK; |
| 5363 | case CPU_DOWN_FAILED: |
| 5364 | set_cpu_active((long)hcpu, true); |
| 5365 | return NOTIFY_OK; |
| 5366 | default: |
| 5367 | return NOTIFY_DONE; |
| 5368 | } |
| 5369 | } |
| 5370 | |
| 5371 | static int sched_cpu_inactive(struct notifier_block *nfb, |
| 5372 | unsigned long action, void *hcpu) |
| 5373 | { |
| 5374 | switch (action & ~CPU_TASKS_FROZEN) { |
| 5375 | case CPU_DOWN_PREPARE: |
| 5376 | set_cpu_active((long)hcpu, false); |
| 5377 | return NOTIFY_OK; |
| 5378 | default: |
| 5379 | return NOTIFY_DONE; |
| 5380 | } |
| 5381 | } |
| 5382 | |
| 5383 | static int __init migration_init(void) |
| 5384 | { |
| 5385 | void *cpu = (void *)(long)smp_processor_id(); |
| 5386 | int err; |
| 5387 | |
| 5388 | /* Initialize migration for the boot CPU */ |
| 5389 | err = migration_call(&migration_notifier, CPU_UP_PREPARE, cpu); |
| 5390 | BUG_ON(err == NOTIFY_BAD); |
| 5391 | migration_call(&migration_notifier, CPU_ONLINE, cpu); |
| 5392 | register_cpu_notifier(&migration_notifier); |
| 5393 | |
| 5394 | /* Register cpu active notifiers */ |
| 5395 | cpu_notifier(sched_cpu_active, CPU_PRI_SCHED_ACTIVE); |
| 5396 | cpu_notifier(sched_cpu_inactive, CPU_PRI_SCHED_INACTIVE); |
| 5397 | |
| 5398 | return 0; |
| 5399 | } |
| 5400 | early_initcall(migration_init); |
| 5401 | #endif |
| 5402 | |
| 5403 | #ifdef CONFIG_SMP |
| 5404 | |
| 5405 | static cpumask_var_t sched_domains_tmpmask; /* sched_domains_mutex */ |
| 5406 | |
| 5407 | #ifdef CONFIG_SCHED_DEBUG |
| 5408 | |
| 5409 | static __read_mostly int sched_debug_enabled; |
| 5410 | |
| 5411 | static int __init sched_debug_setup(char *str) |
| 5412 | { |
| 5413 | sched_debug_enabled = 1; |
| 5414 | |
| 5415 | return 0; |
| 5416 | } |
| 5417 | early_param("sched_debug", sched_debug_setup); |
| 5418 | |
| 5419 | static inline bool sched_debug(void) |
| 5420 | { |
| 5421 | return sched_debug_enabled; |
| 5422 | } |
| 5423 | |
| 5424 | static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level, |
| 5425 | struct cpumask *groupmask) |
| 5426 | { |
| 5427 | struct sched_group *group = sd->groups; |
| 5428 | |
| 5429 | cpumask_clear(groupmask); |
| 5430 | |
| 5431 | printk(KERN_DEBUG "%*s domain %d: ", level, "", level); |
| 5432 | |
| 5433 | if (!(sd->flags & SD_LOAD_BALANCE)) { |
| 5434 | printk("does not load-balance\n"); |
| 5435 | if (sd->parent) |
| 5436 | printk(KERN_ERR "ERROR: !SD_LOAD_BALANCE domain" |
| 5437 | " has parent"); |
| 5438 | return -1; |
| 5439 | } |
| 5440 | |
| 5441 | printk(KERN_CONT "span %*pbl level %s\n", |
| 5442 | cpumask_pr_args(sched_domain_span(sd)), sd->name); |
| 5443 | |
| 5444 | if (!cpumask_test_cpu(cpu, sched_domain_span(sd))) { |
| 5445 | printk(KERN_ERR "ERROR: domain->span does not contain " |
| 5446 | "CPU%d\n", cpu); |
| 5447 | } |
| 5448 | if (!cpumask_test_cpu(cpu, sched_group_cpus(group))) { |
| 5449 | printk(KERN_ERR "ERROR: domain->groups does not contain" |
| 5450 | " CPU%d\n", cpu); |
| 5451 | } |
| 5452 | |
| 5453 | printk(KERN_DEBUG "%*s groups:", level + 1, ""); |
| 5454 | do { |
| 5455 | if (!group) { |
| 5456 | printk("\n"); |
| 5457 | printk(KERN_ERR "ERROR: group is NULL\n"); |
| 5458 | break; |
| 5459 | } |
| 5460 | |
| 5461 | if (!cpumask_weight(sched_group_cpus(group))) { |
| 5462 | printk(KERN_CONT "\n"); |
| 5463 | printk(KERN_ERR "ERROR: empty group\n"); |
| 5464 | break; |
| 5465 | } |
| 5466 | |
| 5467 | if (!(sd->flags & SD_OVERLAP) && |
| 5468 | cpumask_intersects(groupmask, sched_group_cpus(group))) { |
| 5469 | printk(KERN_CONT "\n"); |
| 5470 | printk(KERN_ERR "ERROR: repeated CPUs\n"); |
| 5471 | break; |
| 5472 | } |
| 5473 | |
| 5474 | cpumask_or(groupmask, groupmask, sched_group_cpus(group)); |
| 5475 | |
| 5476 | printk(KERN_CONT " %*pbl", |
| 5477 | cpumask_pr_args(sched_group_cpus(group))); |
| 5478 | if (group->sgc->capacity != SCHED_CAPACITY_SCALE) { |
| 5479 | printk(KERN_CONT " (cpu_capacity = %d)", |
| 5480 | group->sgc->capacity); |
| 5481 | } |
| 5482 | |
| 5483 | group = group->next; |
| 5484 | } while (group != sd->groups); |
| 5485 | printk(KERN_CONT "\n"); |
| 5486 | |
| 5487 | if (!cpumask_equal(sched_domain_span(sd), groupmask)) |
| 5488 | printk(KERN_ERR "ERROR: groups don't span domain->span\n"); |
| 5489 | |
| 5490 | if (sd->parent && |
| 5491 | !cpumask_subset(groupmask, sched_domain_span(sd->parent))) |
| 5492 | printk(KERN_ERR "ERROR: parent span is not a superset " |
| 5493 | "of domain->span\n"); |
| 5494 | return 0; |
| 5495 | } |
| 5496 | |
| 5497 | static void sched_domain_debug(struct sched_domain *sd, int cpu) |
| 5498 | { |
| 5499 | int level = 0; |
| 5500 | |
| 5501 | if (!sched_debug_enabled) |
| 5502 | return; |
| 5503 | |
| 5504 | if (!sd) { |
| 5505 | printk(KERN_DEBUG "CPU%d attaching NULL sched-domain.\n", cpu); |
| 5506 | return; |
| 5507 | } |
| 5508 | |
| 5509 | printk(KERN_DEBUG "CPU%d attaching sched-domain:\n", cpu); |
| 5510 | |
| 5511 | for (;;) { |
| 5512 | if (sched_domain_debug_one(sd, cpu, level, sched_domains_tmpmask)) |
| 5513 | break; |
| 5514 | level++; |
| 5515 | sd = sd->parent; |
| 5516 | if (!sd) |
| 5517 | break; |
| 5518 | } |
| 5519 | } |
| 5520 | #else /* !CONFIG_SCHED_DEBUG */ |
| 5521 | # define sched_domain_debug(sd, cpu) do { } while (0) |
| 5522 | static inline bool sched_debug(void) |
| 5523 | { |
| 5524 | return false; |
| 5525 | } |
| 5526 | #endif /* CONFIG_SCHED_DEBUG */ |
| 5527 | |
| 5528 | static int sd_degenerate(struct sched_domain *sd) |
| 5529 | { |
| 5530 | if (cpumask_weight(sched_domain_span(sd)) == 1) |
| 5531 | return 1; |
| 5532 | |
| 5533 | /* Following flags need at least 2 groups */ |
| 5534 | if (sd->flags & (SD_LOAD_BALANCE | |
| 5535 | SD_BALANCE_NEWIDLE | |
| 5536 | SD_BALANCE_FORK | |
| 5537 | SD_BALANCE_EXEC | |
| 5538 | SD_SHARE_CPUCAPACITY | |
| 5539 | SD_SHARE_PKG_RESOURCES | |
| 5540 | SD_SHARE_POWERDOMAIN)) { |
| 5541 | if (sd->groups != sd->groups->next) |
| 5542 | return 0; |
| 5543 | } |
| 5544 | |
| 5545 | /* Following flags don't use groups */ |
| 5546 | if (sd->flags & (SD_WAKE_AFFINE)) |
| 5547 | return 0; |
| 5548 | |
| 5549 | return 1; |
| 5550 | } |
| 5551 | |
| 5552 | static int |
| 5553 | sd_parent_degenerate(struct sched_domain *sd, struct sched_domain *parent) |
| 5554 | { |
| 5555 | unsigned long cflags = sd->flags, pflags = parent->flags; |
| 5556 | |
| 5557 | if (sd_degenerate(parent)) |
| 5558 | return 1; |
| 5559 | |
| 5560 | if (!cpumask_equal(sched_domain_span(sd), sched_domain_span(parent))) |
| 5561 | return 0; |
| 5562 | |
| 5563 | /* Flags needing groups don't count if only 1 group in parent */ |
| 5564 | if (parent->groups == parent->groups->next) { |
| 5565 | pflags &= ~(SD_LOAD_BALANCE | |
| 5566 | SD_BALANCE_NEWIDLE | |
| 5567 | SD_BALANCE_FORK | |
| 5568 | SD_BALANCE_EXEC | |
| 5569 | SD_SHARE_CPUCAPACITY | |
| 5570 | SD_SHARE_PKG_RESOURCES | |
| 5571 | SD_PREFER_SIBLING | |
| 5572 | SD_SHARE_POWERDOMAIN); |
| 5573 | if (nr_node_ids == 1) |
| 5574 | pflags &= ~SD_SERIALIZE; |
| 5575 | } |
| 5576 | if (~cflags & pflags) |
| 5577 | return 0; |
| 5578 | |
| 5579 | return 1; |
| 5580 | } |
| 5581 | |
| 5582 | static void free_rootdomain(struct rcu_head *rcu) |
| 5583 | { |
| 5584 | struct root_domain *rd = container_of(rcu, struct root_domain, rcu); |
| 5585 | |
| 5586 | cpupri_cleanup(&rd->cpupri); |
| 5587 | cpudl_cleanup(&rd->cpudl); |
| 5588 | free_cpumask_var(rd->dlo_mask); |
| 5589 | free_cpumask_var(rd->rto_mask); |
| 5590 | free_cpumask_var(rd->online); |
| 5591 | free_cpumask_var(rd->span); |
| 5592 | kfree(rd); |
| 5593 | } |
| 5594 | |
| 5595 | static void rq_attach_root(struct rq *rq, struct root_domain *rd) |
| 5596 | { |
| 5597 | struct root_domain *old_rd = NULL; |
| 5598 | unsigned long flags; |
| 5599 | |
| 5600 | raw_spin_lock_irqsave(&rq->lock, flags); |
| 5601 | |
| 5602 | if (rq->rd) { |
| 5603 | old_rd = rq->rd; |
| 5604 | |
| 5605 | if (cpumask_test_cpu(rq->cpu, old_rd->online)) |
| 5606 | set_rq_offline(rq); |
| 5607 | |
| 5608 | cpumask_clear_cpu(rq->cpu, old_rd->span); |
| 5609 | |
| 5610 | /* |
| 5611 | * If we dont want to free the old_rd yet then |
| 5612 | * set old_rd to NULL to skip the freeing later |
| 5613 | * in this function: |
| 5614 | */ |
| 5615 | if (!atomic_dec_and_test(&old_rd->refcount)) |
| 5616 | old_rd = NULL; |
| 5617 | } |
| 5618 | |
| 5619 | atomic_inc(&rd->refcount); |
| 5620 | rq->rd = rd; |
| 5621 | |
| 5622 | cpumask_set_cpu(rq->cpu, rd->span); |
| 5623 | if (cpumask_test_cpu(rq->cpu, cpu_active_mask)) |
| 5624 | set_rq_online(rq); |
| 5625 | |
| 5626 | raw_spin_unlock_irqrestore(&rq->lock, flags); |
| 5627 | |
| 5628 | if (old_rd) |
| 5629 | call_rcu_sched(&old_rd->rcu, free_rootdomain); |
| 5630 | } |
| 5631 | |
| 5632 | static int init_rootdomain(struct root_domain *rd) |
| 5633 | { |
| 5634 | memset(rd, 0, sizeof(*rd)); |
| 5635 | |
| 5636 | if (!alloc_cpumask_var(&rd->span, GFP_KERNEL)) |
| 5637 | goto out; |
| 5638 | if (!alloc_cpumask_var(&rd->online, GFP_KERNEL)) |
| 5639 | goto free_span; |
| 5640 | if (!alloc_cpumask_var(&rd->dlo_mask, GFP_KERNEL)) |
| 5641 | goto free_online; |
| 5642 | if (!alloc_cpumask_var(&rd->rto_mask, GFP_KERNEL)) |
| 5643 | goto free_dlo_mask; |
| 5644 | |
| 5645 | init_dl_bw(&rd->dl_bw); |
| 5646 | if (cpudl_init(&rd->cpudl) != 0) |
| 5647 | goto free_dlo_mask; |
| 5648 | |
| 5649 | if (cpupri_init(&rd->cpupri) != 0) |
| 5650 | goto free_rto_mask; |
| 5651 | return 0; |
| 5652 | |
| 5653 | free_rto_mask: |
| 5654 | free_cpumask_var(rd->rto_mask); |
| 5655 | free_dlo_mask: |
| 5656 | free_cpumask_var(rd->dlo_mask); |
| 5657 | free_online: |
| 5658 | free_cpumask_var(rd->online); |
| 5659 | free_span: |
| 5660 | free_cpumask_var(rd->span); |
| 5661 | out: |
| 5662 | return -ENOMEM; |
| 5663 | } |
| 5664 | |
| 5665 | /* |
| 5666 | * By default the system creates a single root-domain with all cpus as |
| 5667 | * members (mimicking the global state we have today). |
| 5668 | */ |
| 5669 | struct root_domain def_root_domain; |
| 5670 | |
| 5671 | static void init_defrootdomain(void) |
| 5672 | { |
| 5673 | init_rootdomain(&def_root_domain); |
| 5674 | |
| 5675 | atomic_set(&def_root_domain.refcount, 1); |
| 5676 | } |
| 5677 | |
| 5678 | static struct root_domain *alloc_rootdomain(void) |
| 5679 | { |
| 5680 | struct root_domain *rd; |
| 5681 | |
| 5682 | rd = kmalloc(sizeof(*rd), GFP_KERNEL); |
| 5683 | if (!rd) |
| 5684 | return NULL; |
| 5685 | |
| 5686 | if (init_rootdomain(rd) != 0) { |
| 5687 | kfree(rd); |
| 5688 | return NULL; |
| 5689 | } |
| 5690 | |
| 5691 | return rd; |
| 5692 | } |
| 5693 | |
| 5694 | static void free_sched_groups(struct sched_group *sg, int free_sgc) |
| 5695 | { |
| 5696 | struct sched_group *tmp, *first; |
| 5697 | |
| 5698 | if (!sg) |
| 5699 | return; |
| 5700 | |
| 5701 | first = sg; |
| 5702 | do { |
| 5703 | tmp = sg->next; |
| 5704 | |
| 5705 | if (free_sgc && atomic_dec_and_test(&sg->sgc->ref)) |
| 5706 | kfree(sg->sgc); |
| 5707 | |
| 5708 | kfree(sg); |
| 5709 | sg = tmp; |
| 5710 | } while (sg != first); |
| 5711 | } |
| 5712 | |
| 5713 | static void free_sched_domain(struct rcu_head *rcu) |
| 5714 | { |
| 5715 | struct sched_domain *sd = container_of(rcu, struct sched_domain, rcu); |
| 5716 | |
| 5717 | /* |
| 5718 | * If its an overlapping domain it has private groups, iterate and |
| 5719 | * nuke them all. |
| 5720 | */ |
| 5721 | if (sd->flags & SD_OVERLAP) { |
| 5722 | free_sched_groups(sd->groups, 1); |
| 5723 | } else if (atomic_dec_and_test(&sd->groups->ref)) { |
| 5724 | kfree(sd->groups->sgc); |
| 5725 | kfree(sd->groups); |
| 5726 | } |
| 5727 | kfree(sd); |
| 5728 | } |
| 5729 | |
| 5730 | static void destroy_sched_domain(struct sched_domain *sd, int cpu) |
| 5731 | { |
| 5732 | call_rcu(&sd->rcu, free_sched_domain); |
| 5733 | } |
| 5734 | |
| 5735 | static void destroy_sched_domains(struct sched_domain *sd, int cpu) |
| 5736 | { |
| 5737 | for (; sd; sd = sd->parent) |
| 5738 | destroy_sched_domain(sd, cpu); |
| 5739 | } |
| 5740 | |
| 5741 | /* |
| 5742 | * Keep a special pointer to the highest sched_domain that has |
| 5743 | * SD_SHARE_PKG_RESOURCE set (Last Level Cache Domain) for this |
| 5744 | * allows us to avoid some pointer chasing select_idle_sibling(). |
| 5745 | * |
| 5746 | * Also keep a unique ID per domain (we use the first cpu number in |
| 5747 | * the cpumask of the domain), this allows us to quickly tell if |
| 5748 | * two cpus are in the same cache domain, see cpus_share_cache(). |
| 5749 | */ |
| 5750 | DEFINE_PER_CPU(struct sched_domain *, sd_llc); |
| 5751 | DEFINE_PER_CPU(int, sd_llc_size); |
| 5752 | DEFINE_PER_CPU(int, sd_llc_id); |
| 5753 | DEFINE_PER_CPU(struct sched_domain *, sd_numa); |
| 5754 | DEFINE_PER_CPU(struct sched_domain *, sd_busy); |
| 5755 | DEFINE_PER_CPU(struct sched_domain *, sd_asym); |
| 5756 | |
| 5757 | static void update_top_cache_domain(int cpu) |
| 5758 | { |
| 5759 | struct sched_domain *sd; |
| 5760 | struct sched_domain *busy_sd = NULL; |
| 5761 | int id = cpu; |
| 5762 | int size = 1; |
| 5763 | |
| 5764 | sd = highest_flag_domain(cpu, SD_SHARE_PKG_RESOURCES); |
| 5765 | if (sd) { |
| 5766 | id = cpumask_first(sched_domain_span(sd)); |
| 5767 | size = cpumask_weight(sched_domain_span(sd)); |
| 5768 | busy_sd = sd->parent; /* sd_busy */ |
| 5769 | } |
| 5770 | rcu_assign_pointer(per_cpu(sd_busy, cpu), busy_sd); |
| 5771 | |
| 5772 | rcu_assign_pointer(per_cpu(sd_llc, cpu), sd); |
| 5773 | per_cpu(sd_llc_size, cpu) = size; |
| 5774 | per_cpu(sd_llc_id, cpu) = id; |
| 5775 | |
| 5776 | sd = lowest_flag_domain(cpu, SD_NUMA); |
| 5777 | rcu_assign_pointer(per_cpu(sd_numa, cpu), sd); |
| 5778 | |
| 5779 | sd = highest_flag_domain(cpu, SD_ASYM_PACKING); |
| 5780 | rcu_assign_pointer(per_cpu(sd_asym, cpu), sd); |
| 5781 | } |
| 5782 | |
| 5783 | /* |
| 5784 | * Attach the domain 'sd' to 'cpu' as its base domain. Callers must |
| 5785 | * hold the hotplug lock. |
| 5786 | */ |
| 5787 | static void |
| 5788 | cpu_attach_domain(struct sched_domain *sd, struct root_domain *rd, int cpu) |
| 5789 | { |
| 5790 | struct rq *rq = cpu_rq(cpu); |
| 5791 | struct sched_domain *tmp; |
| 5792 | |
| 5793 | /* Remove the sched domains which do not contribute to scheduling. */ |
| 5794 | for (tmp = sd; tmp; ) { |
| 5795 | struct sched_domain *parent = tmp->parent; |
| 5796 | if (!parent) |
| 5797 | break; |
| 5798 | |
| 5799 | if (sd_parent_degenerate(tmp, parent)) { |
| 5800 | tmp->parent = parent->parent; |
| 5801 | if (parent->parent) |
| 5802 | parent->parent->child = tmp; |
| 5803 | /* |
| 5804 | * Transfer SD_PREFER_SIBLING down in case of a |
| 5805 | * degenerate parent; the spans match for this |
| 5806 | * so the property transfers. |
| 5807 | */ |
| 5808 | if (parent->flags & SD_PREFER_SIBLING) |
| 5809 | tmp->flags |= SD_PREFER_SIBLING; |
| 5810 | destroy_sched_domain(parent, cpu); |
| 5811 | } else |
| 5812 | tmp = tmp->parent; |
| 5813 | } |
| 5814 | |
| 5815 | if (sd && sd_degenerate(sd)) { |
| 5816 | tmp = sd; |
| 5817 | sd = sd->parent; |
| 5818 | destroy_sched_domain(tmp, cpu); |
| 5819 | if (sd) |
| 5820 | sd->child = NULL; |
| 5821 | } |
| 5822 | |
| 5823 | sched_domain_debug(sd, cpu); |
| 5824 | |
| 5825 | rq_attach_root(rq, rd); |
| 5826 | tmp = rq->sd; |
| 5827 | rcu_assign_pointer(rq->sd, sd); |
| 5828 | destroy_sched_domains(tmp, cpu); |
| 5829 | |
| 5830 | update_top_cache_domain(cpu); |
| 5831 | } |
| 5832 | |
| 5833 | /* Setup the mask of cpus configured for isolated domains */ |
| 5834 | static int __init isolated_cpu_setup(char *str) |
| 5835 | { |
| 5836 | alloc_bootmem_cpumask_var(&cpu_isolated_map); |
| 5837 | cpulist_parse(str, cpu_isolated_map); |
| 5838 | return 1; |
| 5839 | } |
| 5840 | |
| 5841 | __setup("isolcpus=", isolated_cpu_setup); |
| 5842 | |
| 5843 | struct s_data { |
| 5844 | struct sched_domain ** __percpu sd; |
| 5845 | struct root_domain *rd; |
| 5846 | }; |
| 5847 | |
| 5848 | enum s_alloc { |
| 5849 | sa_rootdomain, |
| 5850 | sa_sd, |
| 5851 | sa_sd_storage, |
| 5852 | sa_none, |
| 5853 | }; |
| 5854 | |
| 5855 | /* |
| 5856 | * Build an iteration mask that can exclude certain CPUs from the upwards |
| 5857 | * domain traversal. |
| 5858 | * |
| 5859 | * Asymmetric node setups can result in situations where the domain tree is of |
| 5860 | * unequal depth, make sure to skip domains that already cover the entire |
| 5861 | * range. |
| 5862 | * |
| 5863 | * In that case build_sched_domains() will have terminated the iteration early |
| 5864 | * and our sibling sd spans will be empty. Domains should always include the |
| 5865 | * cpu they're built on, so check that. |
| 5866 | * |
| 5867 | */ |
| 5868 | static void build_group_mask(struct sched_domain *sd, struct sched_group *sg) |
| 5869 | { |
| 5870 | const struct cpumask *span = sched_domain_span(sd); |
| 5871 | struct sd_data *sdd = sd->private; |
| 5872 | struct sched_domain *sibling; |
| 5873 | int i; |
| 5874 | |
| 5875 | for_each_cpu(i, span) { |
| 5876 | sibling = *per_cpu_ptr(sdd->sd, i); |
| 5877 | if (!cpumask_test_cpu(i, sched_domain_span(sibling))) |
| 5878 | continue; |
| 5879 | |
| 5880 | cpumask_set_cpu(i, sched_group_mask(sg)); |
| 5881 | } |
| 5882 | } |
| 5883 | |
| 5884 | /* |
| 5885 | * Return the canonical balance cpu for this group, this is the first cpu |
| 5886 | * of this group that's also in the iteration mask. |
| 5887 | */ |
| 5888 | int group_balance_cpu(struct sched_group *sg) |
| 5889 | { |
| 5890 | return cpumask_first_and(sched_group_cpus(sg), sched_group_mask(sg)); |
| 5891 | } |
| 5892 | |
| 5893 | static int |
| 5894 | build_overlap_sched_groups(struct sched_domain *sd, int cpu) |
| 5895 | { |
| 5896 | struct sched_group *first = NULL, *last = NULL, *groups = NULL, *sg; |
| 5897 | const struct cpumask *span = sched_domain_span(sd); |
| 5898 | struct cpumask *covered = sched_domains_tmpmask; |
| 5899 | struct sd_data *sdd = sd->private; |
| 5900 | struct sched_domain *sibling; |
| 5901 | int i; |
| 5902 | |
| 5903 | cpumask_clear(covered); |
| 5904 | |
| 5905 | for_each_cpu(i, span) { |
| 5906 | struct cpumask *sg_span; |
| 5907 | |
| 5908 | if (cpumask_test_cpu(i, covered)) |
| 5909 | continue; |
| 5910 | |
| 5911 | sibling = *per_cpu_ptr(sdd->sd, i); |
| 5912 | |
| 5913 | /* See the comment near build_group_mask(). */ |
| 5914 | if (!cpumask_test_cpu(i, sched_domain_span(sibling))) |
| 5915 | continue; |
| 5916 | |
| 5917 | sg = kzalloc_node(sizeof(struct sched_group) + cpumask_size(), |
| 5918 | GFP_KERNEL, cpu_to_node(cpu)); |
| 5919 | |
| 5920 | if (!sg) |
| 5921 | goto fail; |
| 5922 | |
| 5923 | sg_span = sched_group_cpus(sg); |
| 5924 | if (sibling->child) |
| 5925 | cpumask_copy(sg_span, sched_domain_span(sibling->child)); |
| 5926 | else |
| 5927 | cpumask_set_cpu(i, sg_span); |
| 5928 | |
| 5929 | cpumask_or(covered, covered, sg_span); |
| 5930 | |
| 5931 | sg->sgc = *per_cpu_ptr(sdd->sgc, i); |
| 5932 | if (atomic_inc_return(&sg->sgc->ref) == 1) |
| 5933 | build_group_mask(sd, sg); |
| 5934 | |
| 5935 | /* |
| 5936 | * Initialize sgc->capacity such that even if we mess up the |
| 5937 | * domains and no possible iteration will get us here, we won't |
| 5938 | * die on a /0 trap. |
| 5939 | */ |
| 5940 | sg->sgc->capacity = SCHED_CAPACITY_SCALE * cpumask_weight(sg_span); |
| 5941 | |
| 5942 | /* |
| 5943 | * Make sure the first group of this domain contains the |
| 5944 | * canonical balance cpu. Otherwise the sched_domain iteration |
| 5945 | * breaks. See update_sg_lb_stats(). |
| 5946 | */ |
| 5947 | if ((!groups && cpumask_test_cpu(cpu, sg_span)) || |
| 5948 | group_balance_cpu(sg) == cpu) |
| 5949 | groups = sg; |
| 5950 | |
| 5951 | if (!first) |
| 5952 | first = sg; |
| 5953 | if (last) |
| 5954 | last->next = sg; |
| 5955 | last = sg; |
| 5956 | last->next = first; |
| 5957 | } |
| 5958 | sd->groups = groups; |
| 5959 | |
| 5960 | return 0; |
| 5961 | |
| 5962 | fail: |
| 5963 | free_sched_groups(first, 0); |
| 5964 | |
| 5965 | return -ENOMEM; |
| 5966 | } |
| 5967 | |
| 5968 | static int get_group(int cpu, struct sd_data *sdd, struct sched_group **sg) |
| 5969 | { |
| 5970 | struct sched_domain *sd = *per_cpu_ptr(sdd->sd, cpu); |
| 5971 | struct sched_domain *child = sd->child; |
| 5972 | |
| 5973 | if (child) |
| 5974 | cpu = cpumask_first(sched_domain_span(child)); |
| 5975 | |
| 5976 | if (sg) { |
| 5977 | *sg = *per_cpu_ptr(sdd->sg, cpu); |
| 5978 | (*sg)->sgc = *per_cpu_ptr(sdd->sgc, cpu); |
| 5979 | atomic_set(&(*sg)->sgc->ref, 1); /* for claim_allocations */ |
| 5980 | } |
| 5981 | |
| 5982 | return cpu; |
| 5983 | } |
| 5984 | |
| 5985 | /* |
| 5986 | * build_sched_groups will build a circular linked list of the groups |
| 5987 | * covered by the given span, and will set each group's ->cpumask correctly, |
| 5988 | * and ->cpu_capacity to 0. |
| 5989 | * |
| 5990 | * Assumes the sched_domain tree is fully constructed |
| 5991 | */ |
| 5992 | static int |
| 5993 | build_sched_groups(struct sched_domain *sd, int cpu) |
| 5994 | { |
| 5995 | struct sched_group *first = NULL, *last = NULL; |
| 5996 | struct sd_data *sdd = sd->private; |
| 5997 | const struct cpumask *span = sched_domain_span(sd); |
| 5998 | struct cpumask *covered; |
| 5999 | int i; |
| 6000 | |
| 6001 | get_group(cpu, sdd, &sd->groups); |
| 6002 | atomic_inc(&sd->groups->ref); |
| 6003 | |
| 6004 | if (cpu != cpumask_first(span)) |
| 6005 | return 0; |
| 6006 | |
| 6007 | lockdep_assert_held(&sched_domains_mutex); |
| 6008 | covered = sched_domains_tmpmask; |
| 6009 | |
| 6010 | cpumask_clear(covered); |
| 6011 | |
| 6012 | for_each_cpu(i, span) { |
| 6013 | struct sched_group *sg; |
| 6014 | int group, j; |
| 6015 | |
| 6016 | if (cpumask_test_cpu(i, covered)) |
| 6017 | continue; |
| 6018 | |
| 6019 | group = get_group(i, sdd, &sg); |
| 6020 | cpumask_setall(sched_group_mask(sg)); |
| 6021 | |
| 6022 | for_each_cpu(j, span) { |
| 6023 | if (get_group(j, sdd, NULL) != group) |
| 6024 | continue; |
| 6025 | |
| 6026 | cpumask_set_cpu(j, covered); |
| 6027 | cpumask_set_cpu(j, sched_group_cpus(sg)); |
| 6028 | } |
| 6029 | |
| 6030 | if (!first) |
| 6031 | first = sg; |
| 6032 | if (last) |
| 6033 | last->next = sg; |
| 6034 | last = sg; |
| 6035 | } |
| 6036 | last->next = first; |
| 6037 | |
| 6038 | return 0; |
| 6039 | } |
| 6040 | |
| 6041 | /* |
| 6042 | * Initialize sched groups cpu_capacity. |
| 6043 | * |
| 6044 | * cpu_capacity indicates the capacity of sched group, which is used while |
| 6045 | * distributing the load between different sched groups in a sched domain. |
| 6046 | * Typically cpu_capacity for all the groups in a sched domain will be same |
| 6047 | * unless there are asymmetries in the topology. If there are asymmetries, |
| 6048 | * group having more cpu_capacity will pickup more load compared to the |
| 6049 | * group having less cpu_capacity. |
| 6050 | */ |
| 6051 | static void init_sched_groups_capacity(int cpu, struct sched_domain *sd) |
| 6052 | { |
| 6053 | struct sched_group *sg = sd->groups; |
| 6054 | |
| 6055 | WARN_ON(!sg); |
| 6056 | |
| 6057 | do { |
| 6058 | sg->group_weight = cpumask_weight(sched_group_cpus(sg)); |
| 6059 | sg = sg->next; |
| 6060 | } while (sg != sd->groups); |
| 6061 | |
| 6062 | if (cpu != group_balance_cpu(sg)) |
| 6063 | return; |
| 6064 | |
| 6065 | update_group_capacity(sd, cpu); |
| 6066 | atomic_set(&sg->sgc->nr_busy_cpus, sg->group_weight); |
| 6067 | } |
| 6068 | |
| 6069 | /* |
| 6070 | * Initializers for schedule domains |
| 6071 | * Non-inlined to reduce accumulated stack pressure in build_sched_domains() |
| 6072 | */ |
| 6073 | |
| 6074 | static int default_relax_domain_level = -1; |
| 6075 | int sched_domain_level_max; |
| 6076 | |
| 6077 | static int __init setup_relax_domain_level(char *str) |
| 6078 | { |
| 6079 | if (kstrtoint(str, 0, &default_relax_domain_level)) |
| 6080 | pr_warn("Unable to set relax_domain_level\n"); |
| 6081 | |
| 6082 | return 1; |
| 6083 | } |
| 6084 | __setup("relax_domain_level=", setup_relax_domain_level); |
| 6085 | |
| 6086 | static void set_domain_attribute(struct sched_domain *sd, |
| 6087 | struct sched_domain_attr *attr) |
| 6088 | { |
| 6089 | int request; |
| 6090 | |
| 6091 | if (!attr || attr->relax_domain_level < 0) { |
| 6092 | if (default_relax_domain_level < 0) |
| 6093 | return; |
| 6094 | else |
| 6095 | request = default_relax_domain_level; |
| 6096 | } else |
| 6097 | request = attr->relax_domain_level; |
| 6098 | if (request < sd->level) { |
| 6099 | /* turn off idle balance on this domain */ |
| 6100 | sd->flags &= ~(SD_BALANCE_WAKE|SD_BALANCE_NEWIDLE); |
| 6101 | } else { |
| 6102 | /* turn on idle balance on this domain */ |
| 6103 | sd->flags |= (SD_BALANCE_WAKE|SD_BALANCE_NEWIDLE); |
| 6104 | } |
| 6105 | } |
| 6106 | |
| 6107 | static void __sdt_free(const struct cpumask *cpu_map); |
| 6108 | static int __sdt_alloc(const struct cpumask *cpu_map); |
| 6109 | |
| 6110 | static void __free_domain_allocs(struct s_data *d, enum s_alloc what, |
| 6111 | const struct cpumask *cpu_map) |
| 6112 | { |
| 6113 | switch (what) { |
| 6114 | case sa_rootdomain: |
| 6115 | if (!atomic_read(&d->rd->refcount)) |
| 6116 | free_rootdomain(&d->rd->rcu); /* fall through */ |
| 6117 | case sa_sd: |
| 6118 | free_percpu(d->sd); /* fall through */ |
| 6119 | case sa_sd_storage: |
| 6120 | __sdt_free(cpu_map); /* fall through */ |
| 6121 | case sa_none: |
| 6122 | break; |
| 6123 | } |
| 6124 | } |
| 6125 | |
| 6126 | static enum s_alloc __visit_domain_allocation_hell(struct s_data *d, |
| 6127 | const struct cpumask *cpu_map) |
| 6128 | { |
| 6129 | memset(d, 0, sizeof(*d)); |
| 6130 | |
| 6131 | if (__sdt_alloc(cpu_map)) |
| 6132 | return sa_sd_storage; |
| 6133 | d->sd = alloc_percpu(struct sched_domain *); |
| 6134 | if (!d->sd) |
| 6135 | return sa_sd_storage; |
| 6136 | d->rd = alloc_rootdomain(); |
| 6137 | if (!d->rd) |
| 6138 | return sa_sd; |
| 6139 | return sa_rootdomain; |
| 6140 | } |
| 6141 | |
| 6142 | /* |
| 6143 | * NULL the sd_data elements we've used to build the sched_domain and |
| 6144 | * sched_group structure so that the subsequent __free_domain_allocs() |
| 6145 | * will not free the data we're using. |
| 6146 | */ |
| 6147 | static void claim_allocations(int cpu, struct sched_domain *sd) |
| 6148 | { |
| 6149 | struct sd_data *sdd = sd->private; |
| 6150 | |
| 6151 | WARN_ON_ONCE(*per_cpu_ptr(sdd->sd, cpu) != sd); |
| 6152 | *per_cpu_ptr(sdd->sd, cpu) = NULL; |
| 6153 | |
| 6154 | if (atomic_read(&(*per_cpu_ptr(sdd->sg, cpu))->ref)) |
| 6155 | *per_cpu_ptr(sdd->sg, cpu) = NULL; |
| 6156 | |
| 6157 | if (atomic_read(&(*per_cpu_ptr(sdd->sgc, cpu))->ref)) |
| 6158 | *per_cpu_ptr(sdd->sgc, cpu) = NULL; |
| 6159 | } |
| 6160 | |
| 6161 | #ifdef CONFIG_NUMA |
| 6162 | static int sched_domains_numa_levels; |
| 6163 | enum numa_topology_type sched_numa_topology_type; |
| 6164 | static int *sched_domains_numa_distance; |
| 6165 | int sched_max_numa_distance; |
| 6166 | static struct cpumask ***sched_domains_numa_masks; |
| 6167 | static int sched_domains_curr_level; |
| 6168 | #endif |
| 6169 | |
| 6170 | /* |
| 6171 | * SD_flags allowed in topology descriptions. |
| 6172 | * |
| 6173 | * SD_SHARE_CPUCAPACITY - describes SMT topologies |
| 6174 | * SD_SHARE_PKG_RESOURCES - describes shared caches |
| 6175 | * SD_NUMA - describes NUMA topologies |
| 6176 | * SD_SHARE_POWERDOMAIN - describes shared power domain |
| 6177 | * |
| 6178 | * Odd one out: |
| 6179 | * SD_ASYM_PACKING - describes SMT quirks |
| 6180 | */ |
| 6181 | #define TOPOLOGY_SD_FLAGS \ |
| 6182 | (SD_SHARE_CPUCAPACITY | \ |
| 6183 | SD_SHARE_PKG_RESOURCES | \ |
| 6184 | SD_NUMA | \ |
| 6185 | SD_ASYM_PACKING | \ |
| 6186 | SD_SHARE_POWERDOMAIN) |
| 6187 | |
| 6188 | static struct sched_domain * |
| 6189 | sd_init(struct sched_domain_topology_level *tl, int cpu) |
| 6190 | { |
| 6191 | struct sched_domain *sd = *per_cpu_ptr(tl->data.sd, cpu); |
| 6192 | int sd_weight, sd_flags = 0; |
| 6193 | |
| 6194 | #ifdef CONFIG_NUMA |
| 6195 | /* |
| 6196 | * Ugly hack to pass state to sd_numa_mask()... |
| 6197 | */ |
| 6198 | sched_domains_curr_level = tl->numa_level; |
| 6199 | #endif |
| 6200 | |
| 6201 | sd_weight = cpumask_weight(tl->mask(cpu)); |
| 6202 | |
| 6203 | if (tl->sd_flags) |
| 6204 | sd_flags = (*tl->sd_flags)(); |
| 6205 | if (WARN_ONCE(sd_flags & ~TOPOLOGY_SD_FLAGS, |
| 6206 | "wrong sd_flags in topology description\n")) |
| 6207 | sd_flags &= ~TOPOLOGY_SD_FLAGS; |
| 6208 | |
| 6209 | *sd = (struct sched_domain){ |
| 6210 | .min_interval = sd_weight, |
| 6211 | .max_interval = 2*sd_weight, |
| 6212 | .busy_factor = 32, |
| 6213 | .imbalance_pct = 125, |
| 6214 | |
| 6215 | .cache_nice_tries = 0, |
| 6216 | .busy_idx = 0, |
| 6217 | .idle_idx = 0, |
| 6218 | .newidle_idx = 0, |
| 6219 | .wake_idx = 0, |
| 6220 | .forkexec_idx = 0, |
| 6221 | |
| 6222 | .flags = 1*SD_LOAD_BALANCE |
| 6223 | | 1*SD_BALANCE_NEWIDLE |
| 6224 | | 1*SD_BALANCE_EXEC |
| 6225 | | 1*SD_BALANCE_FORK |
| 6226 | | 0*SD_BALANCE_WAKE |
| 6227 | | 1*SD_WAKE_AFFINE |
| 6228 | | 0*SD_SHARE_CPUCAPACITY |
| 6229 | | 0*SD_SHARE_PKG_RESOURCES |
| 6230 | | 0*SD_SERIALIZE |
| 6231 | | 0*SD_PREFER_SIBLING |
| 6232 | | 0*SD_NUMA |
| 6233 | | sd_flags |
| 6234 | , |
| 6235 | |
| 6236 | .last_balance = jiffies, |
| 6237 | .balance_interval = sd_weight, |
| 6238 | .smt_gain = 0, |
| 6239 | .max_newidle_lb_cost = 0, |
| 6240 | .next_decay_max_lb_cost = jiffies, |
| 6241 | #ifdef CONFIG_SCHED_DEBUG |
| 6242 | .name = tl->name, |
| 6243 | #endif |
| 6244 | }; |
| 6245 | |
| 6246 | /* |
| 6247 | * Convert topological properties into behaviour. |
| 6248 | */ |
| 6249 | |
| 6250 | if (sd->flags & SD_SHARE_CPUCAPACITY) { |
| 6251 | sd->flags |= SD_PREFER_SIBLING; |
| 6252 | sd->imbalance_pct = 110; |
| 6253 | sd->smt_gain = 1178; /* ~15% */ |
| 6254 | |
| 6255 | } else if (sd->flags & SD_SHARE_PKG_RESOURCES) { |
| 6256 | sd->imbalance_pct = 117; |
| 6257 | sd->cache_nice_tries = 1; |
| 6258 | sd->busy_idx = 2; |
| 6259 | |
| 6260 | #ifdef CONFIG_NUMA |
| 6261 | } else if (sd->flags & SD_NUMA) { |
| 6262 | sd->cache_nice_tries = 2; |
| 6263 | sd->busy_idx = 3; |
| 6264 | sd->idle_idx = 2; |
| 6265 | |
| 6266 | sd->flags |= SD_SERIALIZE; |
| 6267 | if (sched_domains_numa_distance[tl->numa_level] > RECLAIM_DISTANCE) { |
| 6268 | sd->flags &= ~(SD_BALANCE_EXEC | |
| 6269 | SD_BALANCE_FORK | |
| 6270 | SD_WAKE_AFFINE); |
| 6271 | } |
| 6272 | |
| 6273 | #endif |
| 6274 | } else { |
| 6275 | sd->flags |= SD_PREFER_SIBLING; |
| 6276 | sd->cache_nice_tries = 1; |
| 6277 | sd->busy_idx = 2; |
| 6278 | sd->idle_idx = 1; |
| 6279 | } |
| 6280 | |
| 6281 | sd->private = &tl->data; |
| 6282 | |
| 6283 | return sd; |
| 6284 | } |
| 6285 | |
| 6286 | /* |
| 6287 | * Topology list, bottom-up. |
| 6288 | */ |
| 6289 | static struct sched_domain_topology_level default_topology[] = { |
| 6290 | #ifdef CONFIG_SCHED_SMT |
| 6291 | { cpu_smt_mask, cpu_smt_flags, SD_INIT_NAME(SMT) }, |
| 6292 | #endif |
| 6293 | #ifdef CONFIG_SCHED_MC |
| 6294 | { cpu_coregroup_mask, cpu_core_flags, SD_INIT_NAME(MC) }, |
| 6295 | #endif |
| 6296 | { cpu_cpu_mask, SD_INIT_NAME(DIE) }, |
| 6297 | { NULL, }, |
| 6298 | }; |
| 6299 | |
| 6300 | struct sched_domain_topology_level *sched_domain_topology = default_topology; |
| 6301 | |
| 6302 | #define for_each_sd_topology(tl) \ |
| 6303 | for (tl = sched_domain_topology; tl->mask; tl++) |
| 6304 | |
| 6305 | void set_sched_topology(struct sched_domain_topology_level *tl) |
| 6306 | { |
| 6307 | sched_domain_topology = tl; |
| 6308 | } |
| 6309 | |
| 6310 | #ifdef CONFIG_NUMA |
| 6311 | |
| 6312 | static const struct cpumask *sd_numa_mask(int cpu) |
| 6313 | { |
| 6314 | return sched_domains_numa_masks[sched_domains_curr_level][cpu_to_node(cpu)]; |
| 6315 | } |
| 6316 | |
| 6317 | static void sched_numa_warn(const char *str) |
| 6318 | { |
| 6319 | static int done = false; |
| 6320 | int i,j; |
| 6321 | |
| 6322 | if (done) |
| 6323 | return; |
| 6324 | |
| 6325 | done = true; |
| 6326 | |
| 6327 | printk(KERN_WARNING "ERROR: %s\n\n", str); |
| 6328 | |
| 6329 | for (i = 0; i < nr_node_ids; i++) { |
| 6330 | printk(KERN_WARNING " "); |
| 6331 | for (j = 0; j < nr_node_ids; j++) |
| 6332 | printk(KERN_CONT "%02d ", node_distance(i,j)); |
| 6333 | printk(KERN_CONT "\n"); |
| 6334 | } |
| 6335 | printk(KERN_WARNING "\n"); |
| 6336 | } |
| 6337 | |
| 6338 | bool find_numa_distance(int distance) |
| 6339 | { |
| 6340 | int i; |
| 6341 | |
| 6342 | if (distance == node_distance(0, 0)) |
| 6343 | return true; |
| 6344 | |
| 6345 | for (i = 0; i < sched_domains_numa_levels; i++) { |
| 6346 | if (sched_domains_numa_distance[i] == distance) |
| 6347 | return true; |
| 6348 | } |
| 6349 | |
| 6350 | return false; |
| 6351 | } |
| 6352 | |
| 6353 | /* |
| 6354 | * A system can have three types of NUMA topology: |
| 6355 | * NUMA_DIRECT: all nodes are directly connected, or not a NUMA system |
| 6356 | * NUMA_GLUELESS_MESH: some nodes reachable through intermediary nodes |
| 6357 | * NUMA_BACKPLANE: nodes can reach other nodes through a backplane |
| 6358 | * |
| 6359 | * The difference between a glueless mesh topology and a backplane |
| 6360 | * topology lies in whether communication between not directly |
| 6361 | * connected nodes goes through intermediary nodes (where programs |
| 6362 | * could run), or through backplane controllers. This affects |
| 6363 | * placement of programs. |
| 6364 | * |
| 6365 | * The type of topology can be discerned with the following tests: |
| 6366 | * - If the maximum distance between any nodes is 1 hop, the system |
| 6367 | * is directly connected. |
| 6368 | * - If for two nodes A and B, located N > 1 hops away from each other, |
| 6369 | * there is an intermediary node C, which is < N hops away from both |
| 6370 | * nodes A and B, the system is a glueless mesh. |
| 6371 | */ |
| 6372 | static void init_numa_topology_type(void) |
| 6373 | { |
| 6374 | int a, b, c, n; |
| 6375 | |
| 6376 | n = sched_max_numa_distance; |
| 6377 | |
| 6378 | if (n <= 1) |
| 6379 | sched_numa_topology_type = NUMA_DIRECT; |
| 6380 | |
| 6381 | for_each_online_node(a) { |
| 6382 | for_each_online_node(b) { |
| 6383 | /* Find two nodes furthest removed from each other. */ |
| 6384 | if (node_distance(a, b) < n) |
| 6385 | continue; |
| 6386 | |
| 6387 | /* Is there an intermediary node between a and b? */ |
| 6388 | for_each_online_node(c) { |
| 6389 | if (node_distance(a, c) < n && |
| 6390 | node_distance(b, c) < n) { |
| 6391 | sched_numa_topology_type = |
| 6392 | NUMA_GLUELESS_MESH; |
| 6393 | return; |
| 6394 | } |
| 6395 | } |
| 6396 | |
| 6397 | sched_numa_topology_type = NUMA_BACKPLANE; |
| 6398 | return; |
| 6399 | } |
| 6400 | } |
| 6401 | } |
| 6402 | |
| 6403 | static void sched_init_numa(void) |
| 6404 | { |
| 6405 | int next_distance, curr_distance = node_distance(0, 0); |
| 6406 | struct sched_domain_topology_level *tl; |
| 6407 | int level = 0; |
| 6408 | int i, j, k; |
| 6409 | |
| 6410 | sched_domains_numa_distance = kzalloc(sizeof(int) * nr_node_ids, GFP_KERNEL); |
| 6411 | if (!sched_domains_numa_distance) |
| 6412 | return; |
| 6413 | |
| 6414 | /* |
| 6415 | * O(nr_nodes^2) deduplicating selection sort -- in order to find the |
| 6416 | * unique distances in the node_distance() table. |
| 6417 | * |
| 6418 | * Assumes node_distance(0,j) includes all distances in |
| 6419 | * node_distance(i,j) in order to avoid cubic time. |
| 6420 | */ |
| 6421 | next_distance = curr_distance; |
| 6422 | for (i = 0; i < nr_node_ids; i++) { |
| 6423 | for (j = 0; j < nr_node_ids; j++) { |
| 6424 | for (k = 0; k < nr_node_ids; k++) { |
| 6425 | int distance = node_distance(i, k); |
| 6426 | |
| 6427 | if (distance > curr_distance && |
| 6428 | (distance < next_distance || |
| 6429 | next_distance == curr_distance)) |
| 6430 | next_distance = distance; |
| 6431 | |
| 6432 | /* |
| 6433 | * While not a strong assumption it would be nice to know |
| 6434 | * about cases where if node A is connected to B, B is not |
| 6435 | * equally connected to A. |
| 6436 | */ |
| 6437 | if (sched_debug() && node_distance(k, i) != distance) |
| 6438 | sched_numa_warn("Node-distance not symmetric"); |
| 6439 | |
| 6440 | if (sched_debug() && i && !find_numa_distance(distance)) |
| 6441 | sched_numa_warn("Node-0 not representative"); |
| 6442 | } |
| 6443 | if (next_distance != curr_distance) { |
| 6444 | sched_domains_numa_distance[level++] = next_distance; |
| 6445 | sched_domains_numa_levels = level; |
| 6446 | curr_distance = next_distance; |
| 6447 | } else break; |
| 6448 | } |
| 6449 | |
| 6450 | /* |
| 6451 | * In case of sched_debug() we verify the above assumption. |
| 6452 | */ |
| 6453 | if (!sched_debug()) |
| 6454 | break; |
| 6455 | } |
| 6456 | |
| 6457 | if (!level) |
| 6458 | return; |
| 6459 | |
| 6460 | /* |
| 6461 | * 'level' contains the number of unique distances, excluding the |
| 6462 | * identity distance node_distance(i,i). |
| 6463 | * |
| 6464 | * The sched_domains_numa_distance[] array includes the actual distance |
| 6465 | * numbers. |
| 6466 | */ |
| 6467 | |
| 6468 | /* |
| 6469 | * Here, we should temporarily reset sched_domains_numa_levels to 0. |
| 6470 | * If it fails to allocate memory for array sched_domains_numa_masks[][], |
| 6471 | * the array will contain less then 'level' members. This could be |
| 6472 | * dangerous when we use it to iterate array sched_domains_numa_masks[][] |
| 6473 | * in other functions. |
| 6474 | * |
| 6475 | * We reset it to 'level' at the end of this function. |
| 6476 | */ |
| 6477 | sched_domains_numa_levels = 0; |
| 6478 | |
| 6479 | sched_domains_numa_masks = kzalloc(sizeof(void *) * level, GFP_KERNEL); |
| 6480 | if (!sched_domains_numa_masks) |
| 6481 | return; |
| 6482 | |
| 6483 | /* |
| 6484 | * Now for each level, construct a mask per node which contains all |
| 6485 | * cpus of nodes that are that many hops away from us. |
| 6486 | */ |
| 6487 | for (i = 0; i < level; i++) { |
| 6488 | sched_domains_numa_masks[i] = |
| 6489 | kzalloc(nr_node_ids * sizeof(void *), GFP_KERNEL); |
| 6490 | if (!sched_domains_numa_masks[i]) |
| 6491 | return; |
| 6492 | |
| 6493 | for (j = 0; j < nr_node_ids; j++) { |
| 6494 | struct cpumask *mask = kzalloc(cpumask_size(), GFP_KERNEL); |
| 6495 | if (!mask) |
| 6496 | return; |
| 6497 | |
| 6498 | sched_domains_numa_masks[i][j] = mask; |
| 6499 | |
| 6500 | for (k = 0; k < nr_node_ids; k++) { |
| 6501 | if (node_distance(j, k) > sched_domains_numa_distance[i]) |
| 6502 | continue; |
| 6503 | |
| 6504 | cpumask_or(mask, mask, cpumask_of_node(k)); |
| 6505 | } |
| 6506 | } |
| 6507 | } |
| 6508 | |
| 6509 | /* Compute default topology size */ |
| 6510 | for (i = 0; sched_domain_topology[i].mask; i++); |
| 6511 | |
| 6512 | tl = kzalloc((i + level + 1) * |
| 6513 | sizeof(struct sched_domain_topology_level), GFP_KERNEL); |
| 6514 | if (!tl) |
| 6515 | return; |
| 6516 | |
| 6517 | /* |
| 6518 | * Copy the default topology bits.. |
| 6519 | */ |
| 6520 | for (i = 0; sched_domain_topology[i].mask; i++) |
| 6521 | tl[i] = sched_domain_topology[i]; |
| 6522 | |
| 6523 | /* |
| 6524 | * .. and append 'j' levels of NUMA goodness. |
| 6525 | */ |
| 6526 | for (j = 0; j < level; i++, j++) { |
| 6527 | tl[i] = (struct sched_domain_topology_level){ |
| 6528 | .mask = sd_numa_mask, |
| 6529 | .sd_flags = cpu_numa_flags, |
| 6530 | .flags = SDTL_OVERLAP, |
| 6531 | .numa_level = j, |
| 6532 | SD_INIT_NAME(NUMA) |
| 6533 | }; |
| 6534 | } |
| 6535 | |
| 6536 | sched_domain_topology = tl; |
| 6537 | |
| 6538 | sched_domains_numa_levels = level; |
| 6539 | sched_max_numa_distance = sched_domains_numa_distance[level - 1]; |
| 6540 | |
| 6541 | init_numa_topology_type(); |
| 6542 | } |
| 6543 | |
| 6544 | static void sched_domains_numa_masks_set(int cpu) |
| 6545 | { |
| 6546 | int i, j; |
| 6547 | int node = cpu_to_node(cpu); |
| 6548 | |
| 6549 | for (i = 0; i < sched_domains_numa_levels; i++) { |
| 6550 | for (j = 0; j < nr_node_ids; j++) { |
| 6551 | if (node_distance(j, node) <= sched_domains_numa_distance[i]) |
| 6552 | cpumask_set_cpu(cpu, sched_domains_numa_masks[i][j]); |
| 6553 | } |
| 6554 | } |
| 6555 | } |
| 6556 | |
| 6557 | static void sched_domains_numa_masks_clear(int cpu) |
| 6558 | { |
| 6559 | int i, j; |
| 6560 | for (i = 0; i < sched_domains_numa_levels; i++) { |
| 6561 | for (j = 0; j < nr_node_ids; j++) |
| 6562 | cpumask_clear_cpu(cpu, sched_domains_numa_masks[i][j]); |
| 6563 | } |
| 6564 | } |
| 6565 | |
| 6566 | /* |
| 6567 | * Update sched_domains_numa_masks[level][node] array when new cpus |
| 6568 | * are onlined. |
| 6569 | */ |
| 6570 | static int sched_domains_numa_masks_update(struct notifier_block *nfb, |
| 6571 | unsigned long action, |
| 6572 | void *hcpu) |
| 6573 | { |
| 6574 | int cpu = (long)hcpu; |
| 6575 | |
| 6576 | switch (action & ~CPU_TASKS_FROZEN) { |
| 6577 | case CPU_ONLINE: |
| 6578 | sched_domains_numa_masks_set(cpu); |
| 6579 | break; |
| 6580 | |
| 6581 | case CPU_DEAD: |
| 6582 | sched_domains_numa_masks_clear(cpu); |
| 6583 | break; |
| 6584 | |
| 6585 | default: |
| 6586 | return NOTIFY_DONE; |
| 6587 | } |
| 6588 | |
| 6589 | return NOTIFY_OK; |
| 6590 | } |
| 6591 | #else |
| 6592 | static inline void sched_init_numa(void) |
| 6593 | { |
| 6594 | } |
| 6595 | |
| 6596 | static int sched_domains_numa_masks_update(struct notifier_block *nfb, |
| 6597 | unsigned long action, |
| 6598 | void *hcpu) |
| 6599 | { |
| 6600 | return 0; |
| 6601 | } |
| 6602 | #endif /* CONFIG_NUMA */ |
| 6603 | |
| 6604 | static int __sdt_alloc(const struct cpumask *cpu_map) |
| 6605 | { |
| 6606 | struct sched_domain_topology_level *tl; |
| 6607 | int j; |
| 6608 | |
| 6609 | for_each_sd_topology(tl) { |
| 6610 | struct sd_data *sdd = &tl->data; |
| 6611 | |
| 6612 | sdd->sd = alloc_percpu(struct sched_domain *); |
| 6613 | if (!sdd->sd) |
| 6614 | return -ENOMEM; |
| 6615 | |
| 6616 | sdd->sg = alloc_percpu(struct sched_group *); |
| 6617 | if (!sdd->sg) |
| 6618 | return -ENOMEM; |
| 6619 | |
| 6620 | sdd->sgc = alloc_percpu(struct sched_group_capacity *); |
| 6621 | if (!sdd->sgc) |
| 6622 | return -ENOMEM; |
| 6623 | |
| 6624 | for_each_cpu(j, cpu_map) { |
| 6625 | struct sched_domain *sd; |
| 6626 | struct sched_group *sg; |
| 6627 | struct sched_group_capacity *sgc; |
| 6628 | |
| 6629 | sd = kzalloc_node(sizeof(struct sched_domain) + cpumask_size(), |
| 6630 | GFP_KERNEL, cpu_to_node(j)); |
| 6631 | if (!sd) |
| 6632 | return -ENOMEM; |
| 6633 | |
| 6634 | *per_cpu_ptr(sdd->sd, j) = sd; |
| 6635 | |
| 6636 | sg = kzalloc_node(sizeof(struct sched_group) + cpumask_size(), |
| 6637 | GFP_KERNEL, cpu_to_node(j)); |
| 6638 | if (!sg) |
| 6639 | return -ENOMEM; |
| 6640 | |
| 6641 | sg->next = sg; |
| 6642 | |
| 6643 | *per_cpu_ptr(sdd->sg, j) = sg; |
| 6644 | |
| 6645 | sgc = kzalloc_node(sizeof(struct sched_group_capacity) + cpumask_size(), |
| 6646 | GFP_KERNEL, cpu_to_node(j)); |
| 6647 | if (!sgc) |
| 6648 | return -ENOMEM; |
| 6649 | |
| 6650 | *per_cpu_ptr(sdd->sgc, j) = sgc; |
| 6651 | } |
| 6652 | } |
| 6653 | |
| 6654 | return 0; |
| 6655 | } |
| 6656 | |
| 6657 | static void __sdt_free(const struct cpumask *cpu_map) |
| 6658 | { |
| 6659 | struct sched_domain_topology_level *tl; |
| 6660 | int j; |
| 6661 | |
| 6662 | for_each_sd_topology(tl) { |
| 6663 | struct sd_data *sdd = &tl->data; |
| 6664 | |
| 6665 | for_each_cpu(j, cpu_map) { |
| 6666 | struct sched_domain *sd; |
| 6667 | |
| 6668 | if (sdd->sd) { |
| 6669 | sd = *per_cpu_ptr(sdd->sd, j); |
| 6670 | if (sd && (sd->flags & SD_OVERLAP)) |
| 6671 | free_sched_groups(sd->groups, 0); |
| 6672 | kfree(*per_cpu_ptr(sdd->sd, j)); |
| 6673 | } |
| 6674 | |
| 6675 | if (sdd->sg) |
| 6676 | kfree(*per_cpu_ptr(sdd->sg, j)); |
| 6677 | if (sdd->sgc) |
| 6678 | kfree(*per_cpu_ptr(sdd->sgc, j)); |
| 6679 | } |
| 6680 | free_percpu(sdd->sd); |
| 6681 | sdd->sd = NULL; |
| 6682 | free_percpu(sdd->sg); |
| 6683 | sdd->sg = NULL; |
| 6684 | free_percpu(sdd->sgc); |
| 6685 | sdd->sgc = NULL; |
| 6686 | } |
| 6687 | } |
| 6688 | |
| 6689 | struct sched_domain *build_sched_domain(struct sched_domain_topology_level *tl, |
| 6690 | const struct cpumask *cpu_map, struct sched_domain_attr *attr, |
| 6691 | struct sched_domain *child, int cpu) |
| 6692 | { |
| 6693 | struct sched_domain *sd = sd_init(tl, cpu); |
| 6694 | if (!sd) |
| 6695 | return child; |
| 6696 | |
| 6697 | cpumask_and(sched_domain_span(sd), cpu_map, tl->mask(cpu)); |
| 6698 | if (child) { |
| 6699 | sd->level = child->level + 1; |
| 6700 | sched_domain_level_max = max(sched_domain_level_max, sd->level); |
| 6701 | child->parent = sd; |
| 6702 | sd->child = child; |
| 6703 | |
| 6704 | if (!cpumask_subset(sched_domain_span(child), |
| 6705 | sched_domain_span(sd))) { |
| 6706 | pr_err("BUG: arch topology borken\n"); |
| 6707 | #ifdef CONFIG_SCHED_DEBUG |
| 6708 | pr_err(" the %s domain not a subset of the %s domain\n", |
| 6709 | child->name, sd->name); |
| 6710 | #endif |
| 6711 | /* Fixup, ensure @sd has at least @child cpus. */ |
| 6712 | cpumask_or(sched_domain_span(sd), |
| 6713 | sched_domain_span(sd), |
| 6714 | sched_domain_span(child)); |
| 6715 | } |
| 6716 | |
| 6717 | } |
| 6718 | set_domain_attribute(sd, attr); |
| 6719 | |
| 6720 | return sd; |
| 6721 | } |
| 6722 | |
| 6723 | /* |
| 6724 | * Build sched domains for a given set of cpus and attach the sched domains |
| 6725 | * to the individual cpus |
| 6726 | */ |
| 6727 | static int build_sched_domains(const struct cpumask *cpu_map, |
| 6728 | struct sched_domain_attr *attr) |
| 6729 | { |
| 6730 | enum s_alloc alloc_state; |
| 6731 | struct sched_domain *sd; |
| 6732 | struct s_data d; |
| 6733 | int i, ret = -ENOMEM; |
| 6734 | |
| 6735 | alloc_state = __visit_domain_allocation_hell(&d, cpu_map); |
| 6736 | if (alloc_state != sa_rootdomain) |
| 6737 | goto error; |
| 6738 | |
| 6739 | /* Set up domains for cpus specified by the cpu_map. */ |
| 6740 | for_each_cpu(i, cpu_map) { |
| 6741 | struct sched_domain_topology_level *tl; |
| 6742 | |
| 6743 | sd = NULL; |
| 6744 | for_each_sd_topology(tl) { |
| 6745 | sd = build_sched_domain(tl, cpu_map, attr, sd, i); |
| 6746 | if (tl == sched_domain_topology) |
| 6747 | *per_cpu_ptr(d.sd, i) = sd; |
| 6748 | if (tl->flags & SDTL_OVERLAP || sched_feat(FORCE_SD_OVERLAP)) |
| 6749 | sd->flags |= SD_OVERLAP; |
| 6750 | if (cpumask_equal(cpu_map, sched_domain_span(sd))) |
| 6751 | break; |
| 6752 | } |
| 6753 | } |
| 6754 | |
| 6755 | /* Build the groups for the domains */ |
| 6756 | for_each_cpu(i, cpu_map) { |
| 6757 | for (sd = *per_cpu_ptr(d.sd, i); sd; sd = sd->parent) { |
| 6758 | sd->span_weight = cpumask_weight(sched_domain_span(sd)); |
| 6759 | if (sd->flags & SD_OVERLAP) { |
| 6760 | if (build_overlap_sched_groups(sd, i)) |
| 6761 | goto error; |
| 6762 | } else { |
| 6763 | if (build_sched_groups(sd, i)) |
| 6764 | goto error; |
| 6765 | } |
| 6766 | } |
| 6767 | } |
| 6768 | |
| 6769 | /* Calculate CPU capacity for physical packages and nodes */ |
| 6770 | for (i = nr_cpumask_bits-1; i >= 0; i--) { |
| 6771 | if (!cpumask_test_cpu(i, cpu_map)) |
| 6772 | continue; |
| 6773 | |
| 6774 | for (sd = *per_cpu_ptr(d.sd, i); sd; sd = sd->parent) { |
| 6775 | claim_allocations(i, sd); |
| 6776 | init_sched_groups_capacity(i, sd); |
| 6777 | } |
| 6778 | } |
| 6779 | |
| 6780 | /* Attach the domains */ |
| 6781 | rcu_read_lock(); |
| 6782 | for_each_cpu(i, cpu_map) { |
| 6783 | sd = *per_cpu_ptr(d.sd, i); |
| 6784 | cpu_attach_domain(sd, d.rd, i); |
| 6785 | } |
| 6786 | rcu_read_unlock(); |
| 6787 | |
| 6788 | ret = 0; |
| 6789 | error: |
| 6790 | __free_domain_allocs(&d, alloc_state, cpu_map); |
| 6791 | return ret; |
| 6792 | } |
| 6793 | |
| 6794 | static cpumask_var_t *doms_cur; /* current sched domains */ |
| 6795 | static int ndoms_cur; /* number of sched domains in 'doms_cur' */ |
| 6796 | static struct sched_domain_attr *dattr_cur; |
| 6797 | /* attribues of custom domains in 'doms_cur' */ |
| 6798 | |
| 6799 | /* |
| 6800 | * Special case: If a kmalloc of a doms_cur partition (array of |
| 6801 | * cpumask) fails, then fallback to a single sched domain, |
| 6802 | * as determined by the single cpumask fallback_doms. |
| 6803 | */ |
| 6804 | static cpumask_var_t fallback_doms; |
| 6805 | |
| 6806 | /* |
| 6807 | * arch_update_cpu_topology lets virtualized architectures update the |
| 6808 | * cpu core maps. It is supposed to return 1 if the topology changed |
| 6809 | * or 0 if it stayed the same. |
| 6810 | */ |
| 6811 | int __weak arch_update_cpu_topology(void) |
| 6812 | { |
| 6813 | return 0; |
| 6814 | } |
| 6815 | |
| 6816 | cpumask_var_t *alloc_sched_domains(unsigned int ndoms) |
| 6817 | { |
| 6818 | int i; |
| 6819 | cpumask_var_t *doms; |
| 6820 | |
| 6821 | doms = kmalloc(sizeof(*doms) * ndoms, GFP_KERNEL); |
| 6822 | if (!doms) |
| 6823 | return NULL; |
| 6824 | for (i = 0; i < ndoms; i++) { |
| 6825 | if (!alloc_cpumask_var(&doms[i], GFP_KERNEL)) { |
| 6826 | free_sched_domains(doms, i); |
| 6827 | return NULL; |
| 6828 | } |
| 6829 | } |
| 6830 | return doms; |
| 6831 | } |
| 6832 | |
| 6833 | void free_sched_domains(cpumask_var_t doms[], unsigned int ndoms) |
| 6834 | { |
| 6835 | unsigned int i; |
| 6836 | for (i = 0; i < ndoms; i++) |
| 6837 | free_cpumask_var(doms[i]); |
| 6838 | kfree(doms); |
| 6839 | } |
| 6840 | |
| 6841 | /* |
| 6842 | * Set up scheduler domains and groups. Callers must hold the hotplug lock. |
| 6843 | * For now this just excludes isolated cpus, but could be used to |
| 6844 | * exclude other special cases in the future. |
| 6845 | */ |
| 6846 | static int init_sched_domains(const struct cpumask *cpu_map) |
| 6847 | { |
| 6848 | int err; |
| 6849 | |
| 6850 | arch_update_cpu_topology(); |
| 6851 | ndoms_cur = 1; |
| 6852 | doms_cur = alloc_sched_domains(ndoms_cur); |
| 6853 | if (!doms_cur) |
| 6854 | doms_cur = &fallback_doms; |
| 6855 | cpumask_andnot(doms_cur[0], cpu_map, cpu_isolated_map); |
| 6856 | err = build_sched_domains(doms_cur[0], NULL); |
| 6857 | register_sched_domain_sysctl(); |
| 6858 | |
| 6859 | return err; |
| 6860 | } |
| 6861 | |
| 6862 | /* |
| 6863 | * Detach sched domains from a group of cpus specified in cpu_map |
| 6864 | * These cpus will now be attached to the NULL domain |
| 6865 | */ |
| 6866 | static void detach_destroy_domains(const struct cpumask *cpu_map) |
| 6867 | { |
| 6868 | int i; |
| 6869 | |
| 6870 | rcu_read_lock(); |
| 6871 | for_each_cpu(i, cpu_map) |
| 6872 | cpu_attach_domain(NULL, &def_root_domain, i); |
| 6873 | rcu_read_unlock(); |
| 6874 | } |
| 6875 | |
| 6876 | /* handle null as "default" */ |
| 6877 | static int dattrs_equal(struct sched_domain_attr *cur, int idx_cur, |
| 6878 | struct sched_domain_attr *new, int idx_new) |
| 6879 | { |
| 6880 | struct sched_domain_attr tmp; |
| 6881 | |
| 6882 | /* fast path */ |
| 6883 | if (!new && !cur) |
| 6884 | return 1; |
| 6885 | |
| 6886 | tmp = SD_ATTR_INIT; |
| 6887 | return !memcmp(cur ? (cur + idx_cur) : &tmp, |
| 6888 | new ? (new + idx_new) : &tmp, |
| 6889 | sizeof(struct sched_domain_attr)); |
| 6890 | } |
| 6891 | |
| 6892 | /* |
| 6893 | * Partition sched domains as specified by the 'ndoms_new' |
| 6894 | * cpumasks in the array doms_new[] of cpumasks. This compares |
| 6895 | * doms_new[] to the current sched domain partitioning, doms_cur[]. |
| 6896 | * It destroys each deleted domain and builds each new domain. |
| 6897 | * |
| 6898 | * 'doms_new' is an array of cpumask_var_t's of length 'ndoms_new'. |
| 6899 | * The masks don't intersect (don't overlap.) We should setup one |
| 6900 | * sched domain for each mask. CPUs not in any of the cpumasks will |
| 6901 | * not be load balanced. If the same cpumask appears both in the |
| 6902 | * current 'doms_cur' domains and in the new 'doms_new', we can leave |
| 6903 | * it as it is. |
| 6904 | * |
| 6905 | * The passed in 'doms_new' should be allocated using |
| 6906 | * alloc_sched_domains. This routine takes ownership of it and will |
| 6907 | * free_sched_domains it when done with it. If the caller failed the |
| 6908 | * alloc call, then it can pass in doms_new == NULL && ndoms_new == 1, |
| 6909 | * and partition_sched_domains() will fallback to the single partition |
| 6910 | * 'fallback_doms', it also forces the domains to be rebuilt. |
| 6911 | * |
| 6912 | * If doms_new == NULL it will be replaced with cpu_online_mask. |
| 6913 | * ndoms_new == 0 is a special case for destroying existing domains, |
| 6914 | * and it will not create the default domain. |
| 6915 | * |
| 6916 | * Call with hotplug lock held |
| 6917 | */ |
| 6918 | void partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[], |
| 6919 | struct sched_domain_attr *dattr_new) |
| 6920 | { |
| 6921 | int i, j, n; |
| 6922 | int new_topology; |
| 6923 | |
| 6924 | mutex_lock(&sched_domains_mutex); |
| 6925 | |
| 6926 | /* always unregister in case we don't destroy any domains */ |
| 6927 | unregister_sched_domain_sysctl(); |
| 6928 | |
| 6929 | /* Let architecture update cpu core mappings. */ |
| 6930 | new_topology = arch_update_cpu_topology(); |
| 6931 | |
| 6932 | n = doms_new ? ndoms_new : 0; |
| 6933 | |
| 6934 | /* Destroy deleted domains */ |
| 6935 | for (i = 0; i < ndoms_cur; i++) { |
| 6936 | for (j = 0; j < n && !new_topology; j++) { |
| 6937 | if (cpumask_equal(doms_cur[i], doms_new[j]) |
| 6938 | && dattrs_equal(dattr_cur, i, dattr_new, j)) |
| 6939 | goto match1; |
| 6940 | } |
| 6941 | /* no match - a current sched domain not in new doms_new[] */ |
| 6942 | detach_destroy_domains(doms_cur[i]); |
| 6943 | match1: |
| 6944 | ; |
| 6945 | } |
| 6946 | |
| 6947 | n = ndoms_cur; |
| 6948 | if (doms_new == NULL) { |
| 6949 | n = 0; |
| 6950 | doms_new = &fallback_doms; |
| 6951 | cpumask_andnot(doms_new[0], cpu_active_mask, cpu_isolated_map); |
| 6952 | WARN_ON_ONCE(dattr_new); |
| 6953 | } |
| 6954 | |
| 6955 | /* Build new domains */ |
| 6956 | for (i = 0; i < ndoms_new; i++) { |
| 6957 | for (j = 0; j < n && !new_topology; j++) { |
| 6958 | if (cpumask_equal(doms_new[i], doms_cur[j]) |
| 6959 | && dattrs_equal(dattr_new, i, dattr_cur, j)) |
| 6960 | goto match2; |
| 6961 | } |
| 6962 | /* no match - add a new doms_new */ |
| 6963 | build_sched_domains(doms_new[i], dattr_new ? dattr_new + i : NULL); |
| 6964 | match2: |
| 6965 | ; |
| 6966 | } |
| 6967 | |
| 6968 | /* Remember the new sched domains */ |
| 6969 | if (doms_cur != &fallback_doms) |
| 6970 | free_sched_domains(doms_cur, ndoms_cur); |
| 6971 | kfree(dattr_cur); /* kfree(NULL) is safe */ |
| 6972 | doms_cur = doms_new; |
| 6973 | dattr_cur = dattr_new; |
| 6974 | ndoms_cur = ndoms_new; |
| 6975 | |
| 6976 | register_sched_domain_sysctl(); |
| 6977 | |
| 6978 | mutex_unlock(&sched_domains_mutex); |
| 6979 | } |
| 6980 | |
| 6981 | static int num_cpus_frozen; /* used to mark begin/end of suspend/resume */ |
| 6982 | |
| 6983 | /* |
| 6984 | * Update cpusets according to cpu_active mask. If cpusets are |
| 6985 | * disabled, cpuset_update_active_cpus() becomes a simple wrapper |
| 6986 | * around partition_sched_domains(). |
| 6987 | * |
| 6988 | * If we come here as part of a suspend/resume, don't touch cpusets because we |
| 6989 | * want to restore it back to its original state upon resume anyway. |
| 6990 | */ |
| 6991 | static int cpuset_cpu_active(struct notifier_block *nfb, unsigned long action, |
| 6992 | void *hcpu) |
| 6993 | { |
| 6994 | switch (action) { |
| 6995 | case CPU_ONLINE_FROZEN: |
| 6996 | case CPU_DOWN_FAILED_FROZEN: |
| 6997 | |
| 6998 | /* |
| 6999 | * num_cpus_frozen tracks how many CPUs are involved in suspend |
| 7000 | * resume sequence. As long as this is not the last online |
| 7001 | * operation in the resume sequence, just build a single sched |
| 7002 | * domain, ignoring cpusets. |
| 7003 | */ |
| 7004 | num_cpus_frozen--; |
| 7005 | if (likely(num_cpus_frozen)) { |
| 7006 | partition_sched_domains(1, NULL, NULL); |
| 7007 | break; |
| 7008 | } |
| 7009 | |
| 7010 | /* |
| 7011 | * This is the last CPU online operation. So fall through and |
| 7012 | * restore the original sched domains by considering the |
| 7013 | * cpuset configurations. |
| 7014 | */ |
| 7015 | |
| 7016 | case CPU_ONLINE: |
| 7017 | cpuset_update_active_cpus(true); |
| 7018 | break; |
| 7019 | default: |
| 7020 | return NOTIFY_DONE; |
| 7021 | } |
| 7022 | return NOTIFY_OK; |
| 7023 | } |
| 7024 | |
| 7025 | static int cpuset_cpu_inactive(struct notifier_block *nfb, unsigned long action, |
| 7026 | void *hcpu) |
| 7027 | { |
| 7028 | unsigned long flags; |
| 7029 | long cpu = (long)hcpu; |
| 7030 | struct dl_bw *dl_b; |
| 7031 | bool overflow; |
| 7032 | int cpus; |
| 7033 | |
| 7034 | switch (action) { |
| 7035 | case CPU_DOWN_PREPARE: |
| 7036 | rcu_read_lock_sched(); |
| 7037 | dl_b = dl_bw_of(cpu); |
| 7038 | |
| 7039 | raw_spin_lock_irqsave(&dl_b->lock, flags); |
| 7040 | cpus = dl_bw_cpus(cpu); |
| 7041 | overflow = __dl_overflow(dl_b, cpus, 0, 0); |
| 7042 | raw_spin_unlock_irqrestore(&dl_b->lock, flags); |
| 7043 | |
| 7044 | rcu_read_unlock_sched(); |
| 7045 | |
| 7046 | if (overflow) |
| 7047 | return notifier_from_errno(-EBUSY); |
| 7048 | cpuset_update_active_cpus(false); |
| 7049 | break; |
| 7050 | case CPU_DOWN_PREPARE_FROZEN: |
| 7051 | num_cpus_frozen++; |
| 7052 | partition_sched_domains(1, NULL, NULL); |
| 7053 | break; |
| 7054 | default: |
| 7055 | return NOTIFY_DONE; |
| 7056 | } |
| 7057 | return NOTIFY_OK; |
| 7058 | } |
| 7059 | |
| 7060 | void __init sched_init_smp(void) |
| 7061 | { |
| 7062 | cpumask_var_t non_isolated_cpus; |
| 7063 | |
| 7064 | alloc_cpumask_var(&non_isolated_cpus, GFP_KERNEL); |
| 7065 | alloc_cpumask_var(&fallback_doms, GFP_KERNEL); |
| 7066 | |
| 7067 | sched_init_numa(); |
| 7068 | |
| 7069 | /* |
| 7070 | * There's no userspace yet to cause hotplug operations; hence all the |
| 7071 | * cpu masks are stable and all blatant races in the below code cannot |
| 7072 | * happen. |
| 7073 | */ |
| 7074 | mutex_lock(&sched_domains_mutex); |
| 7075 | init_sched_domains(cpu_active_mask); |
| 7076 | cpumask_andnot(non_isolated_cpus, cpu_possible_mask, cpu_isolated_map); |
| 7077 | if (cpumask_empty(non_isolated_cpus)) |
| 7078 | cpumask_set_cpu(smp_processor_id(), non_isolated_cpus); |
| 7079 | mutex_unlock(&sched_domains_mutex); |
| 7080 | |
| 7081 | hotcpu_notifier(sched_domains_numa_masks_update, CPU_PRI_SCHED_ACTIVE); |
| 7082 | hotcpu_notifier(cpuset_cpu_active, CPU_PRI_CPUSET_ACTIVE); |
| 7083 | hotcpu_notifier(cpuset_cpu_inactive, CPU_PRI_CPUSET_INACTIVE); |
| 7084 | |
| 7085 | init_hrtick(); |
| 7086 | |
| 7087 | /* Move init over to a non-isolated CPU */ |
| 7088 | if (set_cpus_allowed_ptr(current, non_isolated_cpus) < 0) |
| 7089 | BUG(); |
| 7090 | sched_init_granularity(); |
| 7091 | free_cpumask_var(non_isolated_cpus); |
| 7092 | |
| 7093 | init_sched_rt_class(); |
| 7094 | init_sched_dl_class(); |
| 7095 | } |
| 7096 | #else |
| 7097 | void __init sched_init_smp(void) |
| 7098 | { |
| 7099 | sched_init_granularity(); |
| 7100 | } |
| 7101 | #endif /* CONFIG_SMP */ |
| 7102 | |
| 7103 | const_debug unsigned int sysctl_timer_migration = 1; |
| 7104 | |
| 7105 | int in_sched_functions(unsigned long addr) |
| 7106 | { |
| 7107 | return in_lock_functions(addr) || |
| 7108 | (addr >= (unsigned long)__sched_text_start |
| 7109 | && addr < (unsigned long)__sched_text_end); |
| 7110 | } |
| 7111 | |
| 7112 | #ifdef CONFIG_CGROUP_SCHED |
| 7113 | /* |
| 7114 | * Default task group. |
| 7115 | * Every task in system belongs to this group at bootup. |
| 7116 | */ |
| 7117 | struct task_group root_task_group; |
| 7118 | LIST_HEAD(task_groups); |
| 7119 | #endif |
| 7120 | |
| 7121 | DECLARE_PER_CPU(cpumask_var_t, load_balance_mask); |
| 7122 | |
| 7123 | void __init sched_init(void) |
| 7124 | { |
| 7125 | int i, j; |
| 7126 | unsigned long alloc_size = 0, ptr; |
| 7127 | |
| 7128 | #ifdef CONFIG_FAIR_GROUP_SCHED |
| 7129 | alloc_size += 2 * nr_cpu_ids * sizeof(void **); |
| 7130 | #endif |
| 7131 | #ifdef CONFIG_RT_GROUP_SCHED |
| 7132 | alloc_size += 2 * nr_cpu_ids * sizeof(void **); |
| 7133 | #endif |
| 7134 | if (alloc_size) { |
| 7135 | ptr = (unsigned long)kzalloc(alloc_size, GFP_NOWAIT); |
| 7136 | |
| 7137 | #ifdef CONFIG_FAIR_GROUP_SCHED |
| 7138 | root_task_group.se = (struct sched_entity **)ptr; |
| 7139 | ptr += nr_cpu_ids * sizeof(void **); |
| 7140 | |
| 7141 | root_task_group.cfs_rq = (struct cfs_rq **)ptr; |
| 7142 | ptr += nr_cpu_ids * sizeof(void **); |
| 7143 | |
| 7144 | #endif /* CONFIG_FAIR_GROUP_SCHED */ |
| 7145 | #ifdef CONFIG_RT_GROUP_SCHED |
| 7146 | root_task_group.rt_se = (struct sched_rt_entity **)ptr; |
| 7147 | ptr += nr_cpu_ids * sizeof(void **); |
| 7148 | |
| 7149 | root_task_group.rt_rq = (struct rt_rq **)ptr; |
| 7150 | ptr += nr_cpu_ids * sizeof(void **); |
| 7151 | |
| 7152 | #endif /* CONFIG_RT_GROUP_SCHED */ |
| 7153 | } |
| 7154 | #ifdef CONFIG_CPUMASK_OFFSTACK |
| 7155 | for_each_possible_cpu(i) { |
| 7156 | per_cpu(load_balance_mask, i) = (cpumask_var_t)kzalloc_node( |
| 7157 | cpumask_size(), GFP_KERNEL, cpu_to_node(i)); |
| 7158 | } |
| 7159 | #endif /* CONFIG_CPUMASK_OFFSTACK */ |
| 7160 | |
| 7161 | init_rt_bandwidth(&def_rt_bandwidth, |
| 7162 | global_rt_period(), global_rt_runtime()); |
| 7163 | init_dl_bandwidth(&def_dl_bandwidth, |
| 7164 | global_rt_period(), global_rt_runtime()); |
| 7165 | |
| 7166 | #ifdef CONFIG_SMP |
| 7167 | init_defrootdomain(); |
| 7168 | #endif |
| 7169 | |
| 7170 | #ifdef CONFIG_RT_GROUP_SCHED |
| 7171 | init_rt_bandwidth(&root_task_group.rt_bandwidth, |
| 7172 | global_rt_period(), global_rt_runtime()); |
| 7173 | #endif /* CONFIG_RT_GROUP_SCHED */ |
| 7174 | |
| 7175 | #ifdef CONFIG_CGROUP_SCHED |
| 7176 | list_add(&root_task_group.list, &task_groups); |
| 7177 | INIT_LIST_HEAD(&root_task_group.children); |
| 7178 | INIT_LIST_HEAD(&root_task_group.siblings); |
| 7179 | autogroup_init(&init_task); |
| 7180 | |
| 7181 | #endif /* CONFIG_CGROUP_SCHED */ |
| 7182 | |
| 7183 | for_each_possible_cpu(i) { |
| 7184 | struct rq *rq; |
| 7185 | |
| 7186 | rq = cpu_rq(i); |
| 7187 | raw_spin_lock_init(&rq->lock); |
| 7188 | rq->nr_running = 0; |
| 7189 | rq->calc_load_active = 0; |
| 7190 | rq->calc_load_update = jiffies + LOAD_FREQ; |
| 7191 | init_cfs_rq(&rq->cfs); |
| 7192 | init_rt_rq(&rq->rt); |
| 7193 | init_dl_rq(&rq->dl); |
| 7194 | #ifdef CONFIG_FAIR_GROUP_SCHED |
| 7195 | root_task_group.shares = ROOT_TASK_GROUP_LOAD; |
| 7196 | INIT_LIST_HEAD(&rq->leaf_cfs_rq_list); |
| 7197 | /* |
| 7198 | * How much cpu bandwidth does root_task_group get? |
| 7199 | * |
| 7200 | * In case of task-groups formed thr' the cgroup filesystem, it |
| 7201 | * gets 100% of the cpu resources in the system. This overall |
| 7202 | * system cpu resource is divided among the tasks of |
| 7203 | * root_task_group and its child task-groups in a fair manner, |
| 7204 | * based on each entity's (task or task-group's) weight |
| 7205 | * (se->load.weight). |
| 7206 | * |
| 7207 | * In other words, if root_task_group has 10 tasks of weight |
| 7208 | * 1024) and two child groups A0 and A1 (of weight 1024 each), |
| 7209 | * then A0's share of the cpu resource is: |
| 7210 | * |
| 7211 | * A0's bandwidth = 1024 / (10*1024 + 1024 + 1024) = 8.33% |
| 7212 | * |
| 7213 | * We achieve this by letting root_task_group's tasks sit |
| 7214 | * directly in rq->cfs (i.e root_task_group->se[] = NULL). |
| 7215 | */ |
| 7216 | init_cfs_bandwidth(&root_task_group.cfs_bandwidth); |
| 7217 | init_tg_cfs_entry(&root_task_group, &rq->cfs, NULL, i, NULL); |
| 7218 | #endif /* CONFIG_FAIR_GROUP_SCHED */ |
| 7219 | |
| 7220 | rq->rt.rt_runtime = def_rt_bandwidth.rt_runtime; |
| 7221 | #ifdef CONFIG_RT_GROUP_SCHED |
| 7222 | init_tg_rt_entry(&root_task_group, &rq->rt, NULL, i, NULL); |
| 7223 | #endif |
| 7224 | |
| 7225 | for (j = 0; j < CPU_LOAD_IDX_MAX; j++) |
| 7226 | rq->cpu_load[j] = 0; |
| 7227 | |
| 7228 | rq->last_load_update_tick = jiffies; |
| 7229 | |
| 7230 | #ifdef CONFIG_SMP |
| 7231 | rq->sd = NULL; |
| 7232 | rq->rd = NULL; |
| 7233 | rq->cpu_capacity = rq->cpu_capacity_orig = SCHED_CAPACITY_SCALE; |
| 7234 | rq->balance_callback = NULL; |
| 7235 | rq->active_balance = 0; |
| 7236 | rq->next_balance = jiffies; |
| 7237 | rq->push_cpu = 0; |
| 7238 | rq->cpu = i; |
| 7239 | rq->online = 0; |
| 7240 | rq->idle_stamp = 0; |
| 7241 | rq->avg_idle = 2*sysctl_sched_migration_cost; |
| 7242 | rq->max_idle_balance_cost = sysctl_sched_migration_cost; |
| 7243 | |
| 7244 | INIT_LIST_HEAD(&rq->cfs_tasks); |
| 7245 | |
| 7246 | rq_attach_root(rq, &def_root_domain); |
| 7247 | #ifdef CONFIG_NO_HZ_COMMON |
| 7248 | rq->nohz_flags = 0; |
| 7249 | #endif |
| 7250 | #ifdef CONFIG_NO_HZ_FULL |
| 7251 | rq->last_sched_tick = 0; |
| 7252 | #endif |
| 7253 | #endif |
| 7254 | init_rq_hrtick(rq); |
| 7255 | atomic_set(&rq->nr_iowait, 0); |
| 7256 | } |
| 7257 | |
| 7258 | set_load_weight(&init_task); |
| 7259 | |
| 7260 | #ifdef CONFIG_PREEMPT_NOTIFIERS |
| 7261 | INIT_HLIST_HEAD(&init_task.preempt_notifiers); |
| 7262 | #endif |
| 7263 | |
| 7264 | /* |
| 7265 | * The boot idle thread does lazy MMU switching as well: |
| 7266 | */ |
| 7267 | atomic_inc(&init_mm.mm_count); |
| 7268 | enter_lazy_tlb(&init_mm, current); |
| 7269 | |
| 7270 | /* |
| 7271 | * During early bootup we pretend to be a normal task: |
| 7272 | */ |
| 7273 | current->sched_class = &fair_sched_class; |
| 7274 | |
| 7275 | /* |
| 7276 | * Make us the idle thread. Technically, schedule() should not be |
| 7277 | * called from this thread, however somewhere below it might be, |
| 7278 | * but because we are the idle thread, we just pick up running again |
| 7279 | * when this runqueue becomes "idle". |
| 7280 | */ |
| 7281 | init_idle(current, smp_processor_id()); |
| 7282 | |
| 7283 | calc_load_update = jiffies + LOAD_FREQ; |
| 7284 | |
| 7285 | #ifdef CONFIG_SMP |
| 7286 | zalloc_cpumask_var(&sched_domains_tmpmask, GFP_NOWAIT); |
| 7287 | /* May be allocated at isolcpus cmdline parse time */ |
| 7288 | if (cpu_isolated_map == NULL) |
| 7289 | zalloc_cpumask_var(&cpu_isolated_map, GFP_NOWAIT); |
| 7290 | idle_thread_set_boot_cpu(); |
| 7291 | set_cpu_rq_start_time(); |
| 7292 | #endif |
| 7293 | init_sched_fair_class(); |
| 7294 | |
| 7295 | scheduler_running = 1; |
| 7296 | } |
| 7297 | |
| 7298 | #ifdef CONFIG_DEBUG_ATOMIC_SLEEP |
| 7299 | static inline int preempt_count_equals(int preempt_offset) |
| 7300 | { |
| 7301 | int nested = (preempt_count() & ~PREEMPT_ACTIVE) + rcu_preempt_depth(); |
| 7302 | |
| 7303 | return (nested == preempt_offset); |
| 7304 | } |
| 7305 | |
| 7306 | void __might_sleep(const char *file, int line, int preempt_offset) |
| 7307 | { |
| 7308 | /* |
| 7309 | * Blocking primitives will set (and therefore destroy) current->state, |
| 7310 | * since we will exit with TASK_RUNNING make sure we enter with it, |
| 7311 | * otherwise we will destroy state. |
| 7312 | */ |
| 7313 | WARN_ONCE(current->state != TASK_RUNNING && current->task_state_change, |
| 7314 | "do not call blocking ops when !TASK_RUNNING; " |
| 7315 | "state=%lx set at [<%p>] %pS\n", |
| 7316 | current->state, |
| 7317 | (void *)current->task_state_change, |
| 7318 | (void *)current->task_state_change); |
| 7319 | |
| 7320 | ___might_sleep(file, line, preempt_offset); |
| 7321 | } |
| 7322 | EXPORT_SYMBOL(__might_sleep); |
| 7323 | |
| 7324 | void ___might_sleep(const char *file, int line, int preempt_offset) |
| 7325 | { |
| 7326 | static unsigned long prev_jiffy; /* ratelimiting */ |
| 7327 | |
| 7328 | rcu_sleep_check(); /* WARN_ON_ONCE() by default, no rate limit reqd. */ |
| 7329 | if ((preempt_count_equals(preempt_offset) && !irqs_disabled() && |
| 7330 | !is_idle_task(current)) || |
| 7331 | system_state != SYSTEM_RUNNING || oops_in_progress) |
| 7332 | return; |
| 7333 | if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy) |
| 7334 | return; |
| 7335 | prev_jiffy = jiffies; |
| 7336 | |
| 7337 | printk(KERN_ERR |
| 7338 | "BUG: sleeping function called from invalid context at %s:%d\n", |
| 7339 | file, line); |
| 7340 | printk(KERN_ERR |
| 7341 | "in_atomic(): %d, irqs_disabled(): %d, pid: %d, name: %s\n", |
| 7342 | in_atomic(), irqs_disabled(), |
| 7343 | current->pid, current->comm); |
| 7344 | |
| 7345 | if (task_stack_end_corrupted(current)) |
| 7346 | printk(KERN_EMERG "Thread overran stack, or stack corrupted\n"); |
| 7347 | |
| 7348 | debug_show_held_locks(current); |
| 7349 | if (irqs_disabled()) |
| 7350 | print_irqtrace_events(current); |
| 7351 | #ifdef CONFIG_DEBUG_PREEMPT |
| 7352 | if (!preempt_count_equals(preempt_offset)) { |
| 7353 | pr_err("Preemption disabled at:"); |
| 7354 | print_ip_sym(current->preempt_disable_ip); |
| 7355 | pr_cont("\n"); |
| 7356 | } |
| 7357 | #endif |
| 7358 | dump_stack(); |
| 7359 | } |
| 7360 | EXPORT_SYMBOL(___might_sleep); |
| 7361 | #endif |
| 7362 | |
| 7363 | #ifdef CONFIG_MAGIC_SYSRQ |
| 7364 | static void normalize_task(struct rq *rq, struct task_struct *p) |
| 7365 | { |
| 7366 | const struct sched_class *prev_class = p->sched_class; |
| 7367 | struct sched_attr attr = { |
| 7368 | .sched_policy = SCHED_NORMAL, |
| 7369 | }; |
| 7370 | int old_prio = p->prio; |
| 7371 | int queued; |
| 7372 | |
| 7373 | queued = task_on_rq_queued(p); |
| 7374 | if (queued) |
| 7375 | dequeue_task(rq, p, 0); |
| 7376 | __setscheduler(rq, p, &attr, false); |
| 7377 | if (queued) { |
| 7378 | enqueue_task(rq, p, 0); |
| 7379 | resched_curr(rq); |
| 7380 | } |
| 7381 | |
| 7382 | check_class_changed(rq, p, prev_class, old_prio); |
| 7383 | } |
| 7384 | |
| 7385 | void normalize_rt_tasks(void) |
| 7386 | { |
| 7387 | struct task_struct *g, *p; |
| 7388 | unsigned long flags; |
| 7389 | struct rq *rq; |
| 7390 | |
| 7391 | read_lock(&tasklist_lock); |
| 7392 | for_each_process_thread(g, p) { |
| 7393 | /* |
| 7394 | * Only normalize user tasks: |
| 7395 | */ |
| 7396 | if (p->flags & PF_KTHREAD) |
| 7397 | continue; |
| 7398 | |
| 7399 | p->se.exec_start = 0; |
| 7400 | #ifdef CONFIG_SCHEDSTATS |
| 7401 | p->se.statistics.wait_start = 0; |
| 7402 | p->se.statistics.sleep_start = 0; |
| 7403 | p->se.statistics.block_start = 0; |
| 7404 | #endif |
| 7405 | |
| 7406 | if (!dl_task(p) && !rt_task(p)) { |
| 7407 | /* |
| 7408 | * Renice negative nice level userspace |
| 7409 | * tasks back to 0: |
| 7410 | */ |
| 7411 | if (task_nice(p) < 0) |
| 7412 | set_user_nice(p, 0); |
| 7413 | continue; |
| 7414 | } |
| 7415 | |
| 7416 | rq = task_rq_lock(p, &flags); |
| 7417 | normalize_task(rq, p); |
| 7418 | task_rq_unlock(rq, p, &flags); |
| 7419 | } |
| 7420 | read_unlock(&tasklist_lock); |
| 7421 | } |
| 7422 | |
| 7423 | #endif /* CONFIG_MAGIC_SYSRQ */ |
| 7424 | |
| 7425 | #if defined(CONFIG_IA64) || defined(CONFIG_KGDB_KDB) |
| 7426 | /* |
| 7427 | * These functions are only useful for the IA64 MCA handling, or kdb. |
| 7428 | * |
| 7429 | * They can only be called when the whole system has been |
| 7430 | * stopped - every CPU needs to be quiescent, and no scheduling |
| 7431 | * activity can take place. Using them for anything else would |
| 7432 | * be a serious bug, and as a result, they aren't even visible |
| 7433 | * under any other configuration. |
| 7434 | */ |
| 7435 | |
| 7436 | /** |
| 7437 | * curr_task - return the current task for a given cpu. |
| 7438 | * @cpu: the processor in question. |
| 7439 | * |
| 7440 | * ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED! |
| 7441 | * |
| 7442 | * Return: The current task for @cpu. |
| 7443 | */ |
| 7444 | struct task_struct *curr_task(int cpu) |
| 7445 | { |
| 7446 | return cpu_curr(cpu); |
| 7447 | } |
| 7448 | |
| 7449 | #endif /* defined(CONFIG_IA64) || defined(CONFIG_KGDB_KDB) */ |
| 7450 | |
| 7451 | #ifdef CONFIG_IA64 |
| 7452 | /** |
| 7453 | * set_curr_task - set the current task for a given cpu. |
| 7454 | * @cpu: the processor in question. |
| 7455 | * @p: the task pointer to set. |
| 7456 | * |
| 7457 | * Description: This function must only be used when non-maskable interrupts |
| 7458 | * are serviced on a separate stack. It allows the architecture to switch the |
| 7459 | * notion of the current task on a cpu in a non-blocking manner. This function |
| 7460 | * must be called with all CPU's synchronized, and interrupts disabled, the |
| 7461 | * and caller must save the original value of the current task (see |
| 7462 | * curr_task() above) and restore that value before reenabling interrupts and |
| 7463 | * re-starting the system. |
| 7464 | * |
| 7465 | * ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED! |
| 7466 | */ |
| 7467 | void set_curr_task(int cpu, struct task_struct *p) |
| 7468 | { |
| 7469 | cpu_curr(cpu) = p; |
| 7470 | } |
| 7471 | |
| 7472 | #endif |
| 7473 | |
| 7474 | #ifdef CONFIG_CGROUP_SCHED |
| 7475 | /* task_group_lock serializes the addition/removal of task groups */ |
| 7476 | static DEFINE_SPINLOCK(task_group_lock); |
| 7477 | |
| 7478 | static void free_sched_group(struct task_group *tg) |
| 7479 | { |
| 7480 | free_fair_sched_group(tg); |
| 7481 | free_rt_sched_group(tg); |
| 7482 | autogroup_free(tg); |
| 7483 | kfree(tg); |
| 7484 | } |
| 7485 | |
| 7486 | /* allocate runqueue etc for a new task group */ |
| 7487 | struct task_group *sched_create_group(struct task_group *parent) |
| 7488 | { |
| 7489 | struct task_group *tg; |
| 7490 | |
| 7491 | tg = kzalloc(sizeof(*tg), GFP_KERNEL); |
| 7492 | if (!tg) |
| 7493 | return ERR_PTR(-ENOMEM); |
| 7494 | |
| 7495 | if (!alloc_fair_sched_group(tg, parent)) |
| 7496 | goto err; |
| 7497 | |
| 7498 | if (!alloc_rt_sched_group(tg, parent)) |
| 7499 | goto err; |
| 7500 | |
| 7501 | return tg; |
| 7502 | |
| 7503 | err: |
| 7504 | free_sched_group(tg); |
| 7505 | return ERR_PTR(-ENOMEM); |
| 7506 | } |
| 7507 | |
| 7508 | void sched_online_group(struct task_group *tg, struct task_group *parent) |
| 7509 | { |
| 7510 | unsigned long flags; |
| 7511 | |
| 7512 | spin_lock_irqsave(&task_group_lock, flags); |
| 7513 | list_add_rcu(&tg->list, &task_groups); |
| 7514 | |
| 7515 | WARN_ON(!parent); /* root should already exist */ |
| 7516 | |
| 7517 | tg->parent = parent; |
| 7518 | INIT_LIST_HEAD(&tg->children); |
| 7519 | list_add_rcu(&tg->siblings, &parent->children); |
| 7520 | spin_unlock_irqrestore(&task_group_lock, flags); |
| 7521 | } |
| 7522 | |
| 7523 | /* rcu callback to free various structures associated with a task group */ |
| 7524 | static void free_sched_group_rcu(struct rcu_head *rhp) |
| 7525 | { |
| 7526 | /* now it should be safe to free those cfs_rqs */ |
| 7527 | free_sched_group(container_of(rhp, struct task_group, rcu)); |
| 7528 | } |
| 7529 | |
| 7530 | /* Destroy runqueue etc associated with a task group */ |
| 7531 | void sched_destroy_group(struct task_group *tg) |
| 7532 | { |
| 7533 | /* wait for possible concurrent references to cfs_rqs complete */ |
| 7534 | call_rcu(&tg->rcu, free_sched_group_rcu); |
| 7535 | } |
| 7536 | |
| 7537 | void sched_offline_group(struct task_group *tg) |
| 7538 | { |
| 7539 | unsigned long flags; |
| 7540 | int i; |
| 7541 | |
| 7542 | /* end participation in shares distribution */ |
| 7543 | for_each_possible_cpu(i) |
| 7544 | unregister_fair_sched_group(tg, i); |
| 7545 | |
| 7546 | spin_lock_irqsave(&task_group_lock, flags); |
| 7547 | list_del_rcu(&tg->list); |
| 7548 | list_del_rcu(&tg->siblings); |
| 7549 | spin_unlock_irqrestore(&task_group_lock, flags); |
| 7550 | } |
| 7551 | |
| 7552 | /* change task's runqueue when it moves between groups. |
| 7553 | * The caller of this function should have put the task in its new group |
| 7554 | * by now. This function just updates tsk->se.cfs_rq and tsk->se.parent to |
| 7555 | * reflect its new group. |
| 7556 | */ |
| 7557 | void sched_move_task(struct task_struct *tsk) |
| 7558 | { |
| 7559 | struct task_group *tg; |
| 7560 | int queued, running; |
| 7561 | unsigned long flags; |
| 7562 | struct rq *rq; |
| 7563 | |
| 7564 | rq = task_rq_lock(tsk, &flags); |
| 7565 | |
| 7566 | running = task_current(rq, tsk); |
| 7567 | queued = task_on_rq_queued(tsk); |
| 7568 | |
| 7569 | if (queued) |
| 7570 | dequeue_task(rq, tsk, 0); |
| 7571 | if (unlikely(running)) |
| 7572 | put_prev_task(rq, tsk); |
| 7573 | |
| 7574 | /* |
| 7575 | * All callers are synchronized by task_rq_lock(); we do not use RCU |
| 7576 | * which is pointless here. Thus, we pass "true" to task_css_check() |
| 7577 | * to prevent lockdep warnings. |
| 7578 | */ |
| 7579 | tg = container_of(task_css_check(tsk, cpu_cgrp_id, true), |
| 7580 | struct task_group, css); |
| 7581 | tg = autogroup_task_group(tsk, tg); |
| 7582 | tsk->sched_task_group = tg; |
| 7583 | |
| 7584 | #ifdef CONFIG_FAIR_GROUP_SCHED |
| 7585 | if (tsk->sched_class->task_move_group) |
| 7586 | tsk->sched_class->task_move_group(tsk, queued); |
| 7587 | else |
| 7588 | #endif |
| 7589 | set_task_rq(tsk, task_cpu(tsk)); |
| 7590 | |
| 7591 | if (unlikely(running)) |
| 7592 | tsk->sched_class->set_curr_task(rq); |
| 7593 | if (queued) |
| 7594 | enqueue_task(rq, tsk, 0); |
| 7595 | |
| 7596 | task_rq_unlock(rq, tsk, &flags); |
| 7597 | } |
| 7598 | #endif /* CONFIG_CGROUP_SCHED */ |
| 7599 | |
| 7600 | #ifdef CONFIG_RT_GROUP_SCHED |
| 7601 | /* |
| 7602 | * Ensure that the real time constraints are schedulable. |
| 7603 | */ |
| 7604 | static DEFINE_MUTEX(rt_constraints_mutex); |
| 7605 | |
| 7606 | /* Must be called with tasklist_lock held */ |
| 7607 | static inline int tg_has_rt_tasks(struct task_group *tg) |
| 7608 | { |
| 7609 | struct task_struct *g, *p; |
| 7610 | |
| 7611 | /* |
| 7612 | * Autogroups do not have RT tasks; see autogroup_create(). |
| 7613 | */ |
| 7614 | if (task_group_is_autogroup(tg)) |
| 7615 | return 0; |
| 7616 | |
| 7617 | for_each_process_thread(g, p) { |
| 7618 | if (rt_task(p) && task_group(p) == tg) |
| 7619 | return 1; |
| 7620 | } |
| 7621 | |
| 7622 | return 0; |
| 7623 | } |
| 7624 | |
| 7625 | struct rt_schedulable_data { |
| 7626 | struct task_group *tg; |
| 7627 | u64 rt_period; |
| 7628 | u64 rt_runtime; |
| 7629 | }; |
| 7630 | |
| 7631 | static int tg_rt_schedulable(struct task_group *tg, void *data) |
| 7632 | { |
| 7633 | struct rt_schedulable_data *d = data; |
| 7634 | struct task_group *child; |
| 7635 | unsigned long total, sum = 0; |
| 7636 | u64 period, runtime; |
| 7637 | |
| 7638 | period = ktime_to_ns(tg->rt_bandwidth.rt_period); |
| 7639 | runtime = tg->rt_bandwidth.rt_runtime; |
| 7640 | |
| 7641 | if (tg == d->tg) { |
| 7642 | period = d->rt_period; |
| 7643 | runtime = d->rt_runtime; |
| 7644 | } |
| 7645 | |
| 7646 | /* |
| 7647 | * Cannot have more runtime than the period. |
| 7648 | */ |
| 7649 | if (runtime > period && runtime != RUNTIME_INF) |
| 7650 | return -EINVAL; |
| 7651 | |
| 7652 | /* |
| 7653 | * Ensure we don't starve existing RT tasks. |
| 7654 | */ |
| 7655 | if (rt_bandwidth_enabled() && !runtime && tg_has_rt_tasks(tg)) |
| 7656 | return -EBUSY; |
| 7657 | |
| 7658 | total = to_ratio(period, runtime); |
| 7659 | |
| 7660 | /* |
| 7661 | * Nobody can have more than the global setting allows. |
| 7662 | */ |
| 7663 | if (total > to_ratio(global_rt_period(), global_rt_runtime())) |
| 7664 | return -EINVAL; |
| 7665 | |
| 7666 | /* |
| 7667 | * The sum of our children's runtime should not exceed our own. |
| 7668 | */ |
| 7669 | list_for_each_entry_rcu(child, &tg->children, siblings) { |
| 7670 | period = ktime_to_ns(child->rt_bandwidth.rt_period); |
| 7671 | runtime = child->rt_bandwidth.rt_runtime; |
| 7672 | |
| 7673 | if (child == d->tg) { |
| 7674 | period = d->rt_period; |
| 7675 | runtime = d->rt_runtime; |
| 7676 | } |
| 7677 | |
| 7678 | sum += to_ratio(period, runtime); |
| 7679 | } |
| 7680 | |
| 7681 | if (sum > total) |
| 7682 | return -EINVAL; |
| 7683 | |
| 7684 | return 0; |
| 7685 | } |
| 7686 | |
| 7687 | static int __rt_schedulable(struct task_group *tg, u64 period, u64 runtime) |
| 7688 | { |
| 7689 | int ret; |
| 7690 | |
| 7691 | struct rt_schedulable_data data = { |
| 7692 | .tg = tg, |
| 7693 | .rt_period = period, |
| 7694 | .rt_runtime = runtime, |
| 7695 | }; |
| 7696 | |
| 7697 | rcu_read_lock(); |
| 7698 | ret = walk_tg_tree(tg_rt_schedulable, tg_nop, &data); |
| 7699 | rcu_read_unlock(); |
| 7700 | |
| 7701 | return ret; |
| 7702 | } |
| 7703 | |
| 7704 | static int tg_set_rt_bandwidth(struct task_group *tg, |
| 7705 | u64 rt_period, u64 rt_runtime) |
| 7706 | { |
| 7707 | int i, err = 0; |
| 7708 | |
| 7709 | /* |
| 7710 | * Disallowing the root group RT runtime is BAD, it would disallow the |
| 7711 | * kernel creating (and or operating) RT threads. |
| 7712 | */ |
| 7713 | if (tg == &root_task_group && rt_runtime == 0) |
| 7714 | return -EINVAL; |
| 7715 | |
| 7716 | /* No period doesn't make any sense. */ |
| 7717 | if (rt_period == 0) |
| 7718 | return -EINVAL; |
| 7719 | |
| 7720 | mutex_lock(&rt_constraints_mutex); |
| 7721 | read_lock(&tasklist_lock); |
| 7722 | err = __rt_schedulable(tg, rt_period, rt_runtime); |
| 7723 | if (err) |
| 7724 | goto unlock; |
| 7725 | |
| 7726 | raw_spin_lock_irq(&tg->rt_bandwidth.rt_runtime_lock); |
| 7727 | tg->rt_bandwidth.rt_period = ns_to_ktime(rt_period); |
| 7728 | tg->rt_bandwidth.rt_runtime = rt_runtime; |
| 7729 | |
| 7730 | for_each_possible_cpu(i) { |
| 7731 | struct rt_rq *rt_rq = tg->rt_rq[i]; |
| 7732 | |
| 7733 | raw_spin_lock(&rt_rq->rt_runtime_lock); |
| 7734 | rt_rq->rt_runtime = rt_runtime; |
| 7735 | raw_spin_unlock(&rt_rq->rt_runtime_lock); |
| 7736 | } |
| 7737 | raw_spin_unlock_irq(&tg->rt_bandwidth.rt_runtime_lock); |
| 7738 | unlock: |
| 7739 | read_unlock(&tasklist_lock); |
| 7740 | mutex_unlock(&rt_constraints_mutex); |
| 7741 | |
| 7742 | return err; |
| 7743 | } |
| 7744 | |
| 7745 | static int sched_group_set_rt_runtime(struct task_group *tg, long rt_runtime_us) |
| 7746 | { |
| 7747 | u64 rt_runtime, rt_period; |
| 7748 | |
| 7749 | rt_period = ktime_to_ns(tg->rt_bandwidth.rt_period); |
| 7750 | rt_runtime = (u64)rt_runtime_us * NSEC_PER_USEC; |
| 7751 | if (rt_runtime_us < 0) |
| 7752 | rt_runtime = RUNTIME_INF; |
| 7753 | |
| 7754 | return tg_set_rt_bandwidth(tg, rt_period, rt_runtime); |
| 7755 | } |
| 7756 | |
| 7757 | static long sched_group_rt_runtime(struct task_group *tg) |
| 7758 | { |
| 7759 | u64 rt_runtime_us; |
| 7760 | |
| 7761 | if (tg->rt_bandwidth.rt_runtime == RUNTIME_INF) |
| 7762 | return -1; |
| 7763 | |
| 7764 | rt_runtime_us = tg->rt_bandwidth.rt_runtime; |
| 7765 | do_div(rt_runtime_us, NSEC_PER_USEC); |
| 7766 | return rt_runtime_us; |
| 7767 | } |
| 7768 | |
| 7769 | static int sched_group_set_rt_period(struct task_group *tg, u64 rt_period_us) |
| 7770 | { |
| 7771 | u64 rt_runtime, rt_period; |
| 7772 | |
| 7773 | rt_period = rt_period_us * NSEC_PER_USEC; |
| 7774 | rt_runtime = tg->rt_bandwidth.rt_runtime; |
| 7775 | |
| 7776 | return tg_set_rt_bandwidth(tg, rt_period, rt_runtime); |
| 7777 | } |
| 7778 | |
| 7779 | static long sched_group_rt_period(struct task_group *tg) |
| 7780 | { |
| 7781 | u64 rt_period_us; |
| 7782 | |
| 7783 | rt_period_us = ktime_to_ns(tg->rt_bandwidth.rt_period); |
| 7784 | do_div(rt_period_us, NSEC_PER_USEC); |
| 7785 | return rt_period_us; |
| 7786 | } |
| 7787 | #endif /* CONFIG_RT_GROUP_SCHED */ |
| 7788 | |
| 7789 | #ifdef CONFIG_RT_GROUP_SCHED |
| 7790 | static int sched_rt_global_constraints(void) |
| 7791 | { |
| 7792 | int ret = 0; |
| 7793 | |
| 7794 | mutex_lock(&rt_constraints_mutex); |
| 7795 | read_lock(&tasklist_lock); |
| 7796 | ret = __rt_schedulable(NULL, 0, 0); |
| 7797 | read_unlock(&tasklist_lock); |
| 7798 | mutex_unlock(&rt_constraints_mutex); |
| 7799 | |
| 7800 | return ret; |
| 7801 | } |
| 7802 | |
| 7803 | static int sched_rt_can_attach(struct task_group *tg, struct task_struct *tsk) |
| 7804 | { |
| 7805 | /* Don't accept realtime tasks when there is no way for them to run */ |
| 7806 | if (rt_task(tsk) && tg->rt_bandwidth.rt_runtime == 0) |
| 7807 | return 0; |
| 7808 | |
| 7809 | return 1; |
| 7810 | } |
| 7811 | |
| 7812 | #else /* !CONFIG_RT_GROUP_SCHED */ |
| 7813 | static int sched_rt_global_constraints(void) |
| 7814 | { |
| 7815 | unsigned long flags; |
| 7816 | int i, ret = 0; |
| 7817 | |
| 7818 | raw_spin_lock_irqsave(&def_rt_bandwidth.rt_runtime_lock, flags); |
| 7819 | for_each_possible_cpu(i) { |
| 7820 | struct rt_rq *rt_rq = &cpu_rq(i)->rt; |
| 7821 | |
| 7822 | raw_spin_lock(&rt_rq->rt_runtime_lock); |
| 7823 | rt_rq->rt_runtime = global_rt_runtime(); |
| 7824 | raw_spin_unlock(&rt_rq->rt_runtime_lock); |
| 7825 | } |
| 7826 | raw_spin_unlock_irqrestore(&def_rt_bandwidth.rt_runtime_lock, flags); |
| 7827 | |
| 7828 | return ret; |
| 7829 | } |
| 7830 | #endif /* CONFIG_RT_GROUP_SCHED */ |
| 7831 | |
| 7832 | static int sched_dl_global_validate(void) |
| 7833 | { |
| 7834 | u64 runtime = global_rt_runtime(); |
| 7835 | u64 period = global_rt_period(); |
| 7836 | u64 new_bw = to_ratio(period, runtime); |
| 7837 | struct dl_bw *dl_b; |
| 7838 | int cpu, ret = 0; |
| 7839 | unsigned long flags; |
| 7840 | |
| 7841 | /* |
| 7842 | * Here we want to check the bandwidth not being set to some |
| 7843 | * value smaller than the currently allocated bandwidth in |
| 7844 | * any of the root_domains. |
| 7845 | * |
| 7846 | * FIXME: Cycling on all the CPUs is overdoing, but simpler than |
| 7847 | * cycling on root_domains... Discussion on different/better |
| 7848 | * solutions is welcome! |
| 7849 | */ |
| 7850 | for_each_possible_cpu(cpu) { |
| 7851 | rcu_read_lock_sched(); |
| 7852 | dl_b = dl_bw_of(cpu); |
| 7853 | |
| 7854 | raw_spin_lock_irqsave(&dl_b->lock, flags); |
| 7855 | if (new_bw < dl_b->total_bw) |
| 7856 | ret = -EBUSY; |
| 7857 | raw_spin_unlock_irqrestore(&dl_b->lock, flags); |
| 7858 | |
| 7859 | rcu_read_unlock_sched(); |
| 7860 | |
| 7861 | if (ret) |
| 7862 | break; |
| 7863 | } |
| 7864 | |
| 7865 | return ret; |
| 7866 | } |
| 7867 | |
| 7868 | static void sched_dl_do_global(void) |
| 7869 | { |
| 7870 | u64 new_bw = -1; |
| 7871 | struct dl_bw *dl_b; |
| 7872 | int cpu; |
| 7873 | unsigned long flags; |
| 7874 | |
| 7875 | def_dl_bandwidth.dl_period = global_rt_period(); |
| 7876 | def_dl_bandwidth.dl_runtime = global_rt_runtime(); |
| 7877 | |
| 7878 | if (global_rt_runtime() != RUNTIME_INF) |
| 7879 | new_bw = to_ratio(global_rt_period(), global_rt_runtime()); |
| 7880 | |
| 7881 | /* |
| 7882 | * FIXME: As above... |
| 7883 | */ |
| 7884 | for_each_possible_cpu(cpu) { |
| 7885 | rcu_read_lock_sched(); |
| 7886 | dl_b = dl_bw_of(cpu); |
| 7887 | |
| 7888 | raw_spin_lock_irqsave(&dl_b->lock, flags); |
| 7889 | dl_b->bw = new_bw; |
| 7890 | raw_spin_unlock_irqrestore(&dl_b->lock, flags); |
| 7891 | |
| 7892 | rcu_read_unlock_sched(); |
| 7893 | } |
| 7894 | } |
| 7895 | |
| 7896 | static int sched_rt_global_validate(void) |
| 7897 | { |
| 7898 | if (sysctl_sched_rt_period <= 0) |
| 7899 | return -EINVAL; |
| 7900 | |
| 7901 | if ((sysctl_sched_rt_runtime != RUNTIME_INF) && |
| 7902 | (sysctl_sched_rt_runtime > sysctl_sched_rt_period)) |
| 7903 | return -EINVAL; |
| 7904 | |
| 7905 | return 0; |
| 7906 | } |
| 7907 | |
| 7908 | static void sched_rt_do_global(void) |
| 7909 | { |
| 7910 | def_rt_bandwidth.rt_runtime = global_rt_runtime(); |
| 7911 | def_rt_bandwidth.rt_period = ns_to_ktime(global_rt_period()); |
| 7912 | } |
| 7913 | |
| 7914 | int sched_rt_handler(struct ctl_table *table, int write, |
| 7915 | void __user *buffer, size_t *lenp, |
| 7916 | loff_t *ppos) |
| 7917 | { |
| 7918 | int old_period, old_runtime; |
| 7919 | static DEFINE_MUTEX(mutex); |
| 7920 | int ret; |
| 7921 | |
| 7922 | mutex_lock(&mutex); |
| 7923 | old_period = sysctl_sched_rt_period; |
| 7924 | old_runtime = sysctl_sched_rt_runtime; |
| 7925 | |
| 7926 | ret = proc_dointvec(table, write, buffer, lenp, ppos); |
| 7927 | |
| 7928 | if (!ret && write) { |
| 7929 | ret = sched_rt_global_validate(); |
| 7930 | if (ret) |
| 7931 | goto undo; |
| 7932 | |
| 7933 | ret = sched_dl_global_validate(); |
| 7934 | if (ret) |
| 7935 | goto undo; |
| 7936 | |
| 7937 | ret = sched_rt_global_constraints(); |
| 7938 | if (ret) |
| 7939 | goto undo; |
| 7940 | |
| 7941 | sched_rt_do_global(); |
| 7942 | sched_dl_do_global(); |
| 7943 | } |
| 7944 | if (0) { |
| 7945 | undo: |
| 7946 | sysctl_sched_rt_period = old_period; |
| 7947 | sysctl_sched_rt_runtime = old_runtime; |
| 7948 | } |
| 7949 | mutex_unlock(&mutex); |
| 7950 | |
| 7951 | return ret; |
| 7952 | } |
| 7953 | |
| 7954 | int sched_rr_handler(struct ctl_table *table, int write, |
| 7955 | void __user *buffer, size_t *lenp, |
| 7956 | loff_t *ppos) |
| 7957 | { |
| 7958 | int ret; |
| 7959 | static DEFINE_MUTEX(mutex); |
| 7960 | |
| 7961 | mutex_lock(&mutex); |
| 7962 | ret = proc_dointvec(table, write, buffer, lenp, ppos); |
| 7963 | /* make sure that internally we keep jiffies */ |
| 7964 | /* also, writing zero resets timeslice to default */ |
| 7965 | if (!ret && write) { |
| 7966 | sched_rr_timeslice = sched_rr_timeslice <= 0 ? |
| 7967 | RR_TIMESLICE : msecs_to_jiffies(sched_rr_timeslice); |
| 7968 | } |
| 7969 | mutex_unlock(&mutex); |
| 7970 | return ret; |
| 7971 | } |
| 7972 | |
| 7973 | #ifdef CONFIG_CGROUP_SCHED |
| 7974 | |
| 7975 | static inline struct task_group *css_tg(struct cgroup_subsys_state *css) |
| 7976 | { |
| 7977 | return css ? container_of(css, struct task_group, css) : NULL; |
| 7978 | } |
| 7979 | |
| 7980 | static struct cgroup_subsys_state * |
| 7981 | cpu_cgroup_css_alloc(struct cgroup_subsys_state *parent_css) |
| 7982 | { |
| 7983 | struct task_group *parent = css_tg(parent_css); |
| 7984 | struct task_group *tg; |
| 7985 | |
| 7986 | if (!parent) { |
| 7987 | /* This is early initialization for the top cgroup */ |
| 7988 | return &root_task_group.css; |
| 7989 | } |
| 7990 | |
| 7991 | tg = sched_create_group(parent); |
| 7992 | if (IS_ERR(tg)) |
| 7993 | return ERR_PTR(-ENOMEM); |
| 7994 | |
| 7995 | return &tg->css; |
| 7996 | } |
| 7997 | |
| 7998 | static int cpu_cgroup_css_online(struct cgroup_subsys_state *css) |
| 7999 | { |
| 8000 | struct task_group *tg = css_tg(css); |
| 8001 | struct task_group *parent = css_tg(css->parent); |
| 8002 | |
| 8003 | if (parent) |
| 8004 | sched_online_group(tg, parent); |
| 8005 | return 0; |
| 8006 | } |
| 8007 | |
| 8008 | static void cpu_cgroup_css_free(struct cgroup_subsys_state *css) |
| 8009 | { |
| 8010 | struct task_group *tg = css_tg(css); |
| 8011 | |
| 8012 | sched_destroy_group(tg); |
| 8013 | } |
| 8014 | |
| 8015 | static void cpu_cgroup_css_offline(struct cgroup_subsys_state *css) |
| 8016 | { |
| 8017 | struct task_group *tg = css_tg(css); |
| 8018 | |
| 8019 | sched_offline_group(tg); |
| 8020 | } |
| 8021 | |
| 8022 | static void cpu_cgroup_fork(struct task_struct *task) |
| 8023 | { |
| 8024 | sched_move_task(task); |
| 8025 | } |
| 8026 | |
| 8027 | static int cpu_cgroup_can_attach(struct cgroup_subsys_state *css, |
| 8028 | struct cgroup_taskset *tset) |
| 8029 | { |
| 8030 | struct task_struct *task; |
| 8031 | |
| 8032 | cgroup_taskset_for_each(task, tset) { |
| 8033 | #ifdef CONFIG_RT_GROUP_SCHED |
| 8034 | if (!sched_rt_can_attach(css_tg(css), task)) |
| 8035 | return -EINVAL; |
| 8036 | #else |
| 8037 | /* We don't support RT-tasks being in separate groups */ |
| 8038 | if (task->sched_class != &fair_sched_class) |
| 8039 | return -EINVAL; |
| 8040 | #endif |
| 8041 | } |
| 8042 | return 0; |
| 8043 | } |
| 8044 | |
| 8045 | static void cpu_cgroup_attach(struct cgroup_subsys_state *css, |
| 8046 | struct cgroup_taskset *tset) |
| 8047 | { |
| 8048 | struct task_struct *task; |
| 8049 | |
| 8050 | cgroup_taskset_for_each(task, tset) |
| 8051 | sched_move_task(task); |
| 8052 | } |
| 8053 | |
| 8054 | static void cpu_cgroup_exit(struct cgroup_subsys_state *css, |
| 8055 | struct cgroup_subsys_state *old_css, |
| 8056 | struct task_struct *task) |
| 8057 | { |
| 8058 | /* |
| 8059 | * cgroup_exit() is called in the copy_process() failure path. |
| 8060 | * Ignore this case since the task hasn't ran yet, this avoids |
| 8061 | * trying to poke a half freed task state from generic code. |
| 8062 | */ |
| 8063 | if (!(task->flags & PF_EXITING)) |
| 8064 | return; |
| 8065 | |
| 8066 | sched_move_task(task); |
| 8067 | } |
| 8068 | |
| 8069 | #ifdef CONFIG_FAIR_GROUP_SCHED |
| 8070 | static int cpu_shares_write_u64(struct cgroup_subsys_state *css, |
| 8071 | struct cftype *cftype, u64 shareval) |
| 8072 | { |
| 8073 | return sched_group_set_shares(css_tg(css), scale_load(shareval)); |
| 8074 | } |
| 8075 | |
| 8076 | static u64 cpu_shares_read_u64(struct cgroup_subsys_state *css, |
| 8077 | struct cftype *cft) |
| 8078 | { |
| 8079 | struct task_group *tg = css_tg(css); |
| 8080 | |
| 8081 | return (u64) scale_load_down(tg->shares); |
| 8082 | } |
| 8083 | |
| 8084 | #ifdef CONFIG_CFS_BANDWIDTH |
| 8085 | static DEFINE_MUTEX(cfs_constraints_mutex); |
| 8086 | |
| 8087 | const u64 max_cfs_quota_period = 1 * NSEC_PER_SEC; /* 1s */ |
| 8088 | const u64 min_cfs_quota_period = 1 * NSEC_PER_MSEC; /* 1ms */ |
| 8089 | |
| 8090 | static int __cfs_schedulable(struct task_group *tg, u64 period, u64 runtime); |
| 8091 | |
| 8092 | static int tg_set_cfs_bandwidth(struct task_group *tg, u64 period, u64 quota) |
| 8093 | { |
| 8094 | int i, ret = 0, runtime_enabled, runtime_was_enabled; |
| 8095 | struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth; |
| 8096 | |
| 8097 | if (tg == &root_task_group) |
| 8098 | return -EINVAL; |
| 8099 | |
| 8100 | /* |
| 8101 | * Ensure we have at some amount of bandwidth every period. This is |
| 8102 | * to prevent reaching a state of large arrears when throttled via |
| 8103 | * entity_tick() resulting in prolonged exit starvation. |
| 8104 | */ |
| 8105 | if (quota < min_cfs_quota_period || period < min_cfs_quota_period) |
| 8106 | return -EINVAL; |
| 8107 | |
| 8108 | /* |
| 8109 | * Likewise, bound things on the otherside by preventing insane quota |
| 8110 | * periods. This also allows us to normalize in computing quota |
| 8111 | * feasibility. |
| 8112 | */ |
| 8113 | if (period > max_cfs_quota_period) |
| 8114 | return -EINVAL; |
| 8115 | |
| 8116 | /* |
| 8117 | * Prevent race between setting of cfs_rq->runtime_enabled and |
| 8118 | * unthrottle_offline_cfs_rqs(). |
| 8119 | */ |
| 8120 | get_online_cpus(); |
| 8121 | mutex_lock(&cfs_constraints_mutex); |
| 8122 | ret = __cfs_schedulable(tg, period, quota); |
| 8123 | if (ret) |
| 8124 | goto out_unlock; |
| 8125 | |
| 8126 | runtime_enabled = quota != RUNTIME_INF; |
| 8127 | runtime_was_enabled = cfs_b->quota != RUNTIME_INF; |
| 8128 | /* |
| 8129 | * If we need to toggle cfs_bandwidth_used, off->on must occur |
| 8130 | * before making related changes, and on->off must occur afterwards |
| 8131 | */ |
| 8132 | if (runtime_enabled && !runtime_was_enabled) |
| 8133 | cfs_bandwidth_usage_inc(); |
| 8134 | raw_spin_lock_irq(&cfs_b->lock); |
| 8135 | cfs_b->period = ns_to_ktime(period); |
| 8136 | cfs_b->quota = quota; |
| 8137 | |
| 8138 | __refill_cfs_bandwidth_runtime(cfs_b); |
| 8139 | /* restart the period timer (if active) to handle new period expiry */ |
| 8140 | if (runtime_enabled) |
| 8141 | start_cfs_bandwidth(cfs_b); |
| 8142 | raw_spin_unlock_irq(&cfs_b->lock); |
| 8143 | |
| 8144 | for_each_online_cpu(i) { |
| 8145 | struct cfs_rq *cfs_rq = tg->cfs_rq[i]; |
| 8146 | struct rq *rq = cfs_rq->rq; |
| 8147 | |
| 8148 | raw_spin_lock_irq(&rq->lock); |
| 8149 | cfs_rq->runtime_enabled = runtime_enabled; |
| 8150 | cfs_rq->runtime_remaining = 0; |
| 8151 | |
| 8152 | if (cfs_rq->throttled) |
| 8153 | unthrottle_cfs_rq(cfs_rq); |
| 8154 | raw_spin_unlock_irq(&rq->lock); |
| 8155 | } |
| 8156 | if (runtime_was_enabled && !runtime_enabled) |
| 8157 | cfs_bandwidth_usage_dec(); |
| 8158 | out_unlock: |
| 8159 | mutex_unlock(&cfs_constraints_mutex); |
| 8160 | put_online_cpus(); |
| 8161 | |
| 8162 | return ret; |
| 8163 | } |
| 8164 | |
| 8165 | int tg_set_cfs_quota(struct task_group *tg, long cfs_quota_us) |
| 8166 | { |
| 8167 | u64 quota, period; |
| 8168 | |
| 8169 | period = ktime_to_ns(tg->cfs_bandwidth.period); |
| 8170 | if (cfs_quota_us < 0) |
| 8171 | quota = RUNTIME_INF; |
| 8172 | else |
| 8173 | quota = (u64)cfs_quota_us * NSEC_PER_USEC; |
| 8174 | |
| 8175 | return tg_set_cfs_bandwidth(tg, period, quota); |
| 8176 | } |
| 8177 | |
| 8178 | long tg_get_cfs_quota(struct task_group *tg) |
| 8179 | { |
| 8180 | u64 quota_us; |
| 8181 | |
| 8182 | if (tg->cfs_bandwidth.quota == RUNTIME_INF) |
| 8183 | return -1; |
| 8184 | |
| 8185 | quota_us = tg->cfs_bandwidth.quota; |
| 8186 | do_div(quota_us, NSEC_PER_USEC); |
| 8187 | |
| 8188 | return quota_us; |
| 8189 | } |
| 8190 | |
| 8191 | int tg_set_cfs_period(struct task_group *tg, long cfs_period_us) |
| 8192 | { |
| 8193 | u64 quota, period; |
| 8194 | |
| 8195 | period = (u64)cfs_period_us * NSEC_PER_USEC; |
| 8196 | quota = tg->cfs_bandwidth.quota; |
| 8197 | |
| 8198 | return tg_set_cfs_bandwidth(tg, period, quota); |
| 8199 | } |
| 8200 | |
| 8201 | long tg_get_cfs_period(struct task_group *tg) |
| 8202 | { |
| 8203 | u64 cfs_period_us; |
| 8204 | |
| 8205 | cfs_period_us = ktime_to_ns(tg->cfs_bandwidth.period); |
| 8206 | do_div(cfs_period_us, NSEC_PER_USEC); |
| 8207 | |
| 8208 | return cfs_period_us; |
| 8209 | } |
| 8210 | |
| 8211 | static s64 cpu_cfs_quota_read_s64(struct cgroup_subsys_state *css, |
| 8212 | struct cftype *cft) |
| 8213 | { |
| 8214 | return tg_get_cfs_quota(css_tg(css)); |
| 8215 | } |
| 8216 | |
| 8217 | static int cpu_cfs_quota_write_s64(struct cgroup_subsys_state *css, |
| 8218 | struct cftype *cftype, s64 cfs_quota_us) |
| 8219 | { |
| 8220 | return tg_set_cfs_quota(css_tg(css), cfs_quota_us); |
| 8221 | } |
| 8222 | |
| 8223 | static u64 cpu_cfs_period_read_u64(struct cgroup_subsys_state *css, |
| 8224 | struct cftype *cft) |
| 8225 | { |
| 8226 | return tg_get_cfs_period(css_tg(css)); |
| 8227 | } |
| 8228 | |
| 8229 | static int cpu_cfs_period_write_u64(struct cgroup_subsys_state *css, |
| 8230 | struct cftype *cftype, u64 cfs_period_us) |
| 8231 | { |
| 8232 | return tg_set_cfs_period(css_tg(css), cfs_period_us); |
| 8233 | } |
| 8234 | |
| 8235 | struct cfs_schedulable_data { |
| 8236 | struct task_group *tg; |
| 8237 | u64 period, quota; |
| 8238 | }; |
| 8239 | |
| 8240 | /* |
| 8241 | * normalize group quota/period to be quota/max_period |
| 8242 | * note: units are usecs |
| 8243 | */ |
| 8244 | static u64 normalize_cfs_quota(struct task_group *tg, |
| 8245 | struct cfs_schedulable_data *d) |
| 8246 | { |
| 8247 | u64 quota, period; |
| 8248 | |
| 8249 | if (tg == d->tg) { |
| 8250 | period = d->period; |
| 8251 | quota = d->quota; |
| 8252 | } else { |
| 8253 | period = tg_get_cfs_period(tg); |
| 8254 | quota = tg_get_cfs_quota(tg); |
| 8255 | } |
| 8256 | |
| 8257 | /* note: these should typically be equivalent */ |
| 8258 | if (quota == RUNTIME_INF || quota == -1) |
| 8259 | return RUNTIME_INF; |
| 8260 | |
| 8261 | return to_ratio(period, quota); |
| 8262 | } |
| 8263 | |
| 8264 | static int tg_cfs_schedulable_down(struct task_group *tg, void *data) |
| 8265 | { |
| 8266 | struct cfs_schedulable_data *d = data; |
| 8267 | struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth; |
| 8268 | s64 quota = 0, parent_quota = -1; |
| 8269 | |
| 8270 | if (!tg->parent) { |
| 8271 | quota = RUNTIME_INF; |
| 8272 | } else { |
| 8273 | struct cfs_bandwidth *parent_b = &tg->parent->cfs_bandwidth; |
| 8274 | |
| 8275 | quota = normalize_cfs_quota(tg, d); |
| 8276 | parent_quota = parent_b->hierarchical_quota; |
| 8277 | |
| 8278 | /* |
| 8279 | * ensure max(child_quota) <= parent_quota, inherit when no |
| 8280 | * limit is set |
| 8281 | */ |
| 8282 | if (quota == RUNTIME_INF) |
| 8283 | quota = parent_quota; |
| 8284 | else if (parent_quota != RUNTIME_INF && quota > parent_quota) |
| 8285 | return -EINVAL; |
| 8286 | } |
| 8287 | cfs_b->hierarchical_quota = quota; |
| 8288 | |
| 8289 | return 0; |
| 8290 | } |
| 8291 | |
| 8292 | static int __cfs_schedulable(struct task_group *tg, u64 period, u64 quota) |
| 8293 | { |
| 8294 | int ret; |
| 8295 | struct cfs_schedulable_data data = { |
| 8296 | .tg = tg, |
| 8297 | .period = period, |
| 8298 | .quota = quota, |
| 8299 | }; |
| 8300 | |
| 8301 | if (quota != RUNTIME_INF) { |
| 8302 | do_div(data.period, NSEC_PER_USEC); |
| 8303 | do_div(data.quota, NSEC_PER_USEC); |
| 8304 | } |
| 8305 | |
| 8306 | rcu_read_lock(); |
| 8307 | ret = walk_tg_tree(tg_cfs_schedulable_down, tg_nop, &data); |
| 8308 | rcu_read_unlock(); |
| 8309 | |
| 8310 | return ret; |
| 8311 | } |
| 8312 | |
| 8313 | static int cpu_stats_show(struct seq_file *sf, void *v) |
| 8314 | { |
| 8315 | struct task_group *tg = css_tg(seq_css(sf)); |
| 8316 | struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth; |
| 8317 | |
| 8318 | seq_printf(sf, "nr_periods %d\n", cfs_b->nr_periods); |
| 8319 | seq_printf(sf, "nr_throttled %d\n", cfs_b->nr_throttled); |
| 8320 | seq_printf(sf, "throttled_time %llu\n", cfs_b->throttled_time); |
| 8321 | |
| 8322 | return 0; |
| 8323 | } |
| 8324 | #endif /* CONFIG_CFS_BANDWIDTH */ |
| 8325 | #endif /* CONFIG_FAIR_GROUP_SCHED */ |
| 8326 | |
| 8327 | #ifdef CONFIG_RT_GROUP_SCHED |
| 8328 | static int cpu_rt_runtime_write(struct cgroup_subsys_state *css, |
| 8329 | struct cftype *cft, s64 val) |
| 8330 | { |
| 8331 | return sched_group_set_rt_runtime(css_tg(css), val); |
| 8332 | } |
| 8333 | |
| 8334 | static s64 cpu_rt_runtime_read(struct cgroup_subsys_state *css, |
| 8335 | struct cftype *cft) |
| 8336 | { |
| 8337 | return sched_group_rt_runtime(css_tg(css)); |
| 8338 | } |
| 8339 | |
| 8340 | static int cpu_rt_period_write_uint(struct cgroup_subsys_state *css, |
| 8341 | struct cftype *cftype, u64 rt_period_us) |
| 8342 | { |
| 8343 | return sched_group_set_rt_period(css_tg(css), rt_period_us); |
| 8344 | } |
| 8345 | |
| 8346 | static u64 cpu_rt_period_read_uint(struct cgroup_subsys_state *css, |
| 8347 | struct cftype *cft) |
| 8348 | { |
| 8349 | return sched_group_rt_period(css_tg(css)); |
| 8350 | } |
| 8351 | #endif /* CONFIG_RT_GROUP_SCHED */ |
| 8352 | |
| 8353 | static struct cftype cpu_files[] = { |
| 8354 | #ifdef CONFIG_FAIR_GROUP_SCHED |
| 8355 | { |
| 8356 | .name = "shares", |
| 8357 | .read_u64 = cpu_shares_read_u64, |
| 8358 | .write_u64 = cpu_shares_write_u64, |
| 8359 | }, |
| 8360 | #endif |
| 8361 | #ifdef CONFIG_CFS_BANDWIDTH |
| 8362 | { |
| 8363 | .name = "cfs_quota_us", |
| 8364 | .read_s64 = cpu_cfs_quota_read_s64, |
| 8365 | .write_s64 = cpu_cfs_quota_write_s64, |
| 8366 | }, |
| 8367 | { |
| 8368 | .name = "cfs_period_us", |
| 8369 | .read_u64 = cpu_cfs_period_read_u64, |
| 8370 | .write_u64 = cpu_cfs_period_write_u64, |
| 8371 | }, |
| 8372 | { |
| 8373 | .name = "stat", |
| 8374 | .seq_show = cpu_stats_show, |
| 8375 | }, |
| 8376 | #endif |
| 8377 | #ifdef CONFIG_RT_GROUP_SCHED |
| 8378 | { |
| 8379 | .name = "rt_runtime_us", |
| 8380 | .read_s64 = cpu_rt_runtime_read, |
| 8381 | .write_s64 = cpu_rt_runtime_write, |
| 8382 | }, |
| 8383 | { |
| 8384 | .name = "rt_period_us", |
| 8385 | .read_u64 = cpu_rt_period_read_uint, |
| 8386 | .write_u64 = cpu_rt_period_write_uint, |
| 8387 | }, |
| 8388 | #endif |
| 8389 | { } /* terminate */ |
| 8390 | }; |
| 8391 | |
| 8392 | struct cgroup_subsys cpu_cgrp_subsys = { |
| 8393 | .css_alloc = cpu_cgroup_css_alloc, |
| 8394 | .css_free = cpu_cgroup_css_free, |
| 8395 | .css_online = cpu_cgroup_css_online, |
| 8396 | .css_offline = cpu_cgroup_css_offline, |
| 8397 | .fork = cpu_cgroup_fork, |
| 8398 | .can_attach = cpu_cgroup_can_attach, |
| 8399 | .attach = cpu_cgroup_attach, |
| 8400 | .exit = cpu_cgroup_exit, |
| 8401 | .legacy_cftypes = cpu_files, |
| 8402 | .early_init = 1, |
| 8403 | }; |
| 8404 | |
| 8405 | #endif /* CONFIG_CGROUP_SCHED */ |
| 8406 | |
| 8407 | void dump_cpu_task(int cpu) |
| 8408 | { |
| 8409 | pr_info("Task dump for CPU %d:\n", cpu); |
| 8410 | sched_show_task(cpu_curr(cpu)); |
| 8411 | } |