sched/cputime: Fix steal time accounting
[deliverable/linux.git] / kernel / sched / cputime.c
CommitLineData
73fbec60
FW
1#include <linux/export.h>
2#include <linux/sched.h>
3#include <linux/tsacct_kern.h>
4#include <linux/kernel_stat.h>
5#include <linux/static_key.h>
abf917cd 6#include <linux/context_tracking.h>
73fbec60 7#include "sched.h"
1fe7c4ef
SS
8#ifdef CONFIG_PARAVIRT
9#include <asm/paravirt.h>
10#endif
73fbec60
FW
11
12
13#ifdef CONFIG_IRQ_TIME_ACCOUNTING
14
15/*
16 * There are no locks covering percpu hardirq/softirq time.
bf9fae9f 17 * They are only modified in vtime_account, on corresponding CPU
73fbec60
FW
18 * with interrupts disabled. So, writes are safe.
19 * They are read and saved off onto struct rq in update_rq_clock().
20 * This may result in other CPU reading this CPU's irq time and can
bf9fae9f 21 * race with irq/vtime_account on this CPU. We would either get old
73fbec60
FW
22 * or new value with a side effect of accounting a slice of irq time to wrong
23 * task when irq is in progress while we read rq->clock. That is a worthy
24 * compromise in place of having locks on each irq in account_system_time.
25 */
26DEFINE_PER_CPU(u64, cpu_hardirq_time);
27DEFINE_PER_CPU(u64, cpu_softirq_time);
28
29static DEFINE_PER_CPU(u64, irq_start_time);
30static int sched_clock_irqtime;
31
32void enable_sched_clock_irqtime(void)
33{
34 sched_clock_irqtime = 1;
35}
36
37void disable_sched_clock_irqtime(void)
38{
39 sched_clock_irqtime = 0;
40}
41
42#ifndef CONFIG_64BIT
43DEFINE_PER_CPU(seqcount_t, irq_time_seq);
44#endif /* CONFIG_64BIT */
45
46/*
47 * Called before incrementing preempt_count on {soft,}irq_enter
48 * and before decrementing preempt_count on {soft,}irq_exit.
49 */
3e1df4f5 50void irqtime_account_irq(struct task_struct *curr)
73fbec60 51{
73fbec60
FW
52 s64 delta;
53 int cpu;
54
55 if (!sched_clock_irqtime)
56 return;
57
73fbec60
FW
58 cpu = smp_processor_id();
59 delta = sched_clock_cpu(cpu) - __this_cpu_read(irq_start_time);
60 __this_cpu_add(irq_start_time, delta);
61
62 irq_time_write_begin();
63 /*
64 * We do not account for softirq time from ksoftirqd here.
65 * We want to continue accounting softirq time to ksoftirqd thread
66 * in that case, so as not to confuse scheduler with a special task
67 * that do not consume any time, but still wants to run.
68 */
69 if (hardirq_count())
70 __this_cpu_add(cpu_hardirq_time, delta);
71 else if (in_serving_softirq() && curr != this_cpu_ksoftirqd())
72 __this_cpu_add(cpu_softirq_time, delta);
73
74 irq_time_write_end();
73fbec60 75}
3e1df4f5 76EXPORT_SYMBOL_GPL(irqtime_account_irq);
73fbec60 77
57430218 78static cputime_t irqtime_account_hi_update(cputime_t maxtime)
73fbec60
FW
79{
80 u64 *cpustat = kcpustat_this_cpu->cpustat;
81 unsigned long flags;
57430218 82 cputime_t irq_cputime;
73fbec60
FW
83
84 local_irq_save(flags);
57430218
RR
85 irq_cputime = nsecs_to_cputime64(this_cpu_read(cpu_hardirq_time)) -
86 cpustat[CPUTIME_IRQ];
87 irq_cputime = min(irq_cputime, maxtime);
88 cpustat[CPUTIME_IRQ] += irq_cputime;
73fbec60 89 local_irq_restore(flags);
57430218 90 return irq_cputime;
73fbec60
FW
91}
92
57430218 93static cputime_t irqtime_account_si_update(cputime_t maxtime)
73fbec60
FW
94{
95 u64 *cpustat = kcpustat_this_cpu->cpustat;
96 unsigned long flags;
57430218 97 cputime_t softirq_cputime;
73fbec60
FW
98
99 local_irq_save(flags);
57430218
RR
100 softirq_cputime = nsecs_to_cputime64(this_cpu_read(cpu_softirq_time)) -
101 cpustat[CPUTIME_SOFTIRQ];
102 softirq_cputime = min(softirq_cputime, maxtime);
103 cpustat[CPUTIME_SOFTIRQ] += softirq_cputime;
73fbec60 104 local_irq_restore(flags);
57430218 105 return softirq_cputime;
73fbec60
FW
106}
107
108#else /* CONFIG_IRQ_TIME_ACCOUNTING */
109
110#define sched_clock_irqtime (0)
111
57430218
RR
112static cputime_t irqtime_account_hi_update(cputime_t dummy)
113{
114 return 0;
115}
116
117static cputime_t irqtime_account_si_update(cputime_t dummy)
118{
119 return 0;
120}
121
73fbec60
FW
122#endif /* !CONFIG_IRQ_TIME_ACCOUNTING */
123
124static inline void task_group_account_field(struct task_struct *p, int index,
125 u64 tmp)
126{
73fbec60
FW
127 /*
128 * Since all updates are sure to touch the root cgroup, we
129 * get ourselves ahead and touch it first. If the root cgroup
130 * is the only cgroup, then nothing else should be necessary.
131 *
132 */
a4f61cc0 133 __this_cpu_add(kernel_cpustat.cpustat[index], tmp);
73fbec60 134
1966aaf7 135 cpuacct_account_field(p, index, tmp);
73fbec60
FW
136}
137
138/*
139 * Account user cpu time to a process.
140 * @p: the process that the cpu time gets accounted to
141 * @cputime: the cpu time spent in user space since the last update
142 * @cputime_scaled: cputime scaled by cpu frequency
143 */
144void account_user_time(struct task_struct *p, cputime_t cputime,
145 cputime_t cputime_scaled)
146{
147 int index;
148
149 /* Add user time to process. */
150 p->utime += cputime;
151 p->utimescaled += cputime_scaled;
152 account_group_user_time(p, cputime);
153
d0ea0268 154 index = (task_nice(p) > 0) ? CPUTIME_NICE : CPUTIME_USER;
73fbec60
FW
155
156 /* Add user time to cpustat. */
157 task_group_account_field(p, index, (__force u64) cputime);
158
159 /* Account for user time used */
6fac4829 160 acct_account_cputime(p);
73fbec60
FW
161}
162
163/*
164 * Account guest cpu time to a process.
165 * @p: the process that the cpu time gets accounted to
166 * @cputime: the cpu time spent in virtual machine since the last update
167 * @cputime_scaled: cputime scaled by cpu frequency
168 */
169static void account_guest_time(struct task_struct *p, cputime_t cputime,
170 cputime_t cputime_scaled)
171{
172 u64 *cpustat = kcpustat_this_cpu->cpustat;
173
174 /* Add guest time to process. */
175 p->utime += cputime;
176 p->utimescaled += cputime_scaled;
177 account_group_user_time(p, cputime);
178 p->gtime += cputime;
179
180 /* Add guest time to cpustat. */
d0ea0268 181 if (task_nice(p) > 0) {
73fbec60
FW
182 cpustat[CPUTIME_NICE] += (__force u64) cputime;
183 cpustat[CPUTIME_GUEST_NICE] += (__force u64) cputime;
184 } else {
185 cpustat[CPUTIME_USER] += (__force u64) cputime;
186 cpustat[CPUTIME_GUEST] += (__force u64) cputime;
187 }
188}
189
190/*
191 * Account system cpu time to a process and desired cpustat field
192 * @p: the process that the cpu time gets accounted to
193 * @cputime: the cpu time spent in kernel space since the last update
194 * @cputime_scaled: cputime scaled by cpu frequency
195 * @target_cputime64: pointer to cpustat field that has to be updated
196 */
197static inline
198void __account_system_time(struct task_struct *p, cputime_t cputime,
199 cputime_t cputime_scaled, int index)
200{
201 /* Add system time to process. */
202 p->stime += cputime;
203 p->stimescaled += cputime_scaled;
204 account_group_system_time(p, cputime);
205
206 /* Add system time to cpustat. */
207 task_group_account_field(p, index, (__force u64) cputime);
208
209 /* Account for system time used */
6fac4829 210 acct_account_cputime(p);
73fbec60
FW
211}
212
213/*
214 * Account system cpu time to a process.
215 * @p: the process that the cpu time gets accounted to
216 * @hardirq_offset: the offset to subtract from hardirq_count()
217 * @cputime: the cpu time spent in kernel space since the last update
218 * @cputime_scaled: cputime scaled by cpu frequency
219 */
220void account_system_time(struct task_struct *p, int hardirq_offset,
221 cputime_t cputime, cputime_t cputime_scaled)
222{
223 int index;
224
225 if ((p->flags & PF_VCPU) && (irq_count() - hardirq_offset == 0)) {
226 account_guest_time(p, cputime, cputime_scaled);
227 return;
228 }
229
230 if (hardirq_count() - hardirq_offset)
231 index = CPUTIME_IRQ;
232 else if (in_serving_softirq())
233 index = CPUTIME_SOFTIRQ;
234 else
235 index = CPUTIME_SYSTEM;
236
237 __account_system_time(p, cputime, cputime_scaled, index);
238}
239
240/*
241 * Account for involuntary wait time.
242 * @cputime: the cpu time spent in involuntary wait
243 */
244void account_steal_time(cputime_t cputime)
245{
246 u64 *cpustat = kcpustat_this_cpu->cpustat;
247
248 cpustat[CPUTIME_STEAL] += (__force u64) cputime;
249}
250
251/*
252 * Account for idle time.
253 * @cputime: the cpu time spent in idle wait
254 */
255void account_idle_time(cputime_t cputime)
256{
257 u64 *cpustat = kcpustat_this_cpu->cpustat;
258 struct rq *rq = this_rq();
259
260 if (atomic_read(&rq->nr_iowait) > 0)
261 cpustat[CPUTIME_IOWAIT] += (__force u64) cputime;
262 else
263 cpustat[CPUTIME_IDLE] += (__force u64) cputime;
264}
265
57430218 266static __always_inline cputime_t steal_account_process_time(cputime_t maxtime)
73fbec60
FW
267{
268#ifdef CONFIG_PARAVIRT
269 if (static_key_false(&paravirt_steal_enabled)) {
57430218 270 cputime_t steal_cputime;
dee08a72 271 u64 steal;
73fbec60
FW
272
273 steal = paravirt_steal_clock(smp_processor_id());
274 steal -= this_rq()->prev_steal_time;
275
57430218
RR
276 steal_cputime = min(nsecs_to_cputime(steal), maxtime);
277 account_steal_time(steal_cputime);
278 this_rq()->prev_steal_time += cputime_to_nsecs(steal_cputime);
73fbec60 279
57430218 280 return steal_cputime;
73fbec60
FW
281 }
282#endif
807e5b80 283 return 0;
73fbec60
FW
284}
285
57430218
RR
286/*
287 * Account how much elapsed time was spent in steal, irq, or softirq time.
288 */
289static inline cputime_t account_other_time(cputime_t max)
290{
291 cputime_t accounted;
292
293 accounted = steal_account_process_time(max);
294
295 if (accounted < max)
296 accounted += irqtime_account_hi_update(max - accounted);
297
298 if (accounted < max)
299 accounted += irqtime_account_si_update(max - accounted);
300
301 return accounted;
302}
303
a634f933
FW
304/*
305 * Accumulate raw cputime values of dead tasks (sig->[us]time) and live
306 * tasks (sum on group iteration) belonging to @tsk's group.
307 */
308void thread_group_cputime(struct task_struct *tsk, struct task_cputime *times)
309{
310 struct signal_struct *sig = tsk->signal;
6fac4829 311 cputime_t utime, stime;
a634f933 312 struct task_struct *t;
e78c3496 313 unsigned int seq, nextseq;
9c368b5b 314 unsigned long flags;
a634f933
FW
315
316 rcu_read_lock();
e78c3496
RR
317 /* Attempt a lockless read on the first round. */
318 nextseq = 0;
319 do {
320 seq = nextseq;
9c368b5b 321 flags = read_seqbegin_or_lock_irqsave(&sig->stats_lock, &seq);
e78c3496
RR
322 times->utime = sig->utime;
323 times->stime = sig->stime;
324 times->sum_exec_runtime = sig->sum_sched_runtime;
325
326 for_each_thread(tsk, t) {
327 task_cputime(t, &utime, &stime);
328 times->utime += utime;
329 times->stime += stime;
330 times->sum_exec_runtime += task_sched_runtime(t);
331 }
332 /* If lockless access failed, take the lock. */
333 nextseq = 1;
334 } while (need_seqretry(&sig->stats_lock, seq));
9c368b5b 335 done_seqretry_irqrestore(&sig->stats_lock, seq, flags);
a634f933
FW
336 rcu_read_unlock();
337}
338
73fbec60
FW
339#ifdef CONFIG_IRQ_TIME_ACCOUNTING
340/*
341 * Account a tick to a process and cpustat
342 * @p: the process that the cpu time gets accounted to
343 * @user_tick: is the tick from userspace
344 * @rq: the pointer to rq
345 *
346 * Tick demultiplexing follows the order
347 * - pending hardirq update
348 * - pending softirq update
349 * - user_time
350 * - idle_time
351 * - system time
352 * - check for guest_time
353 * - else account as system_time
354 *
355 * Check for hardirq is done both for system and user time as there is
356 * no timer going off while we are on hardirq and hence we may never get an
357 * opportunity to update it solely in system time.
358 * p->stime and friends are only updated on system time and not on irq
359 * softirq as those do not count in task exec_runtime any more.
360 */
361static void irqtime_account_process_tick(struct task_struct *p, int user_tick,
2d513868 362 struct rq *rq, int ticks)
73fbec60 363{
57430218
RR
364 u64 cputime = (__force u64) cputime_one_jiffy * ticks;
365 cputime_t scaled, other;
73fbec60 366
57430218
RR
367 /*
368 * When returning from idle, many ticks can get accounted at
369 * once, including some ticks of steal, irq, and softirq time.
370 * Subtract those ticks from the amount of time accounted to
371 * idle, or potentially user or system time. Due to rounding,
372 * other time can exceed ticks occasionally.
373 */
374 other = account_other_time(cputime);
375 if (other >= cputime)
73fbec60 376 return;
57430218
RR
377 cputime -= other;
378 scaled = cputime_to_scaled(cputime);
73fbec60 379
57430218 380 if (this_cpu_ksoftirqd() == p) {
73fbec60
FW
381 /*
382 * ksoftirqd time do not get accounted in cpu_softirq_time.
383 * So, we have to handle it separately here.
384 * Also, p->stime needs to be updated for ksoftirqd.
385 */
2d513868 386 __account_system_time(p, cputime, scaled, CPUTIME_SOFTIRQ);
73fbec60 387 } else if (user_tick) {
2d513868 388 account_user_time(p, cputime, scaled);
73fbec60 389 } else if (p == rq->idle) {
2d513868 390 account_idle_time(cputime);
73fbec60 391 } else if (p->flags & PF_VCPU) { /* System time or guest time */
2d513868 392 account_guest_time(p, cputime, scaled);
73fbec60 393 } else {
2d513868 394 __account_system_time(p, cputime, scaled, CPUTIME_SYSTEM);
73fbec60
FW
395 }
396}
397
398static void irqtime_account_idle_ticks(int ticks)
399{
73fbec60
FW
400 struct rq *rq = this_rq();
401
2d513868 402 irqtime_account_process_tick(current, 0, rq, ticks);
73fbec60
FW
403}
404#else /* CONFIG_IRQ_TIME_ACCOUNTING */
3f4724ea
FW
405static inline void irqtime_account_idle_ticks(int ticks) {}
406static inline void irqtime_account_process_tick(struct task_struct *p, int user_tick,
2d513868 407 struct rq *rq, int nr_ticks) {}
73fbec60
FW
408#endif /* CONFIG_IRQ_TIME_ACCOUNTING */
409
73fbec60
FW
410/*
411 * Use precise platform statistics if available:
412 */
413#ifdef CONFIG_VIRT_CPU_ACCOUNTING
a7e1a9e3 414
e3942ba0 415#ifndef __ARCH_HAS_VTIME_TASK_SWITCH
b0493406 416void vtime_common_task_switch(struct task_struct *prev)
e3942ba0
FW
417{
418 if (is_idle_task(prev))
419 vtime_account_idle(prev);
420 else
421 vtime_account_system(prev);
422
abf917cd 423#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
e3942ba0 424 vtime_account_user(prev);
abf917cd 425#endif
e3942ba0
FW
426 arch_vtime_task_switch(prev);
427}
428#endif
11113334 429
0cfdf9a1
FW
430#endif /* CONFIG_VIRT_CPU_ACCOUNTING */
431
432
433#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
a7e1a9e3
FW
434/*
435 * Archs that account the whole time spent in the idle task
436 * (outside irq) as idle time can rely on this and just implement
fd25b4c2 437 * vtime_account_system() and vtime_account_idle(). Archs that
a7e1a9e3
FW
438 * have other meaning of the idle time (s390 only includes the
439 * time spent by the CPU when it's in low power mode) must override
440 * vtime_account().
441 */
442#ifndef __ARCH_HAS_VTIME_ACCOUNT
0cfdf9a1 443void vtime_account_irq_enter(struct task_struct *tsk)
a7e1a9e3 444{
0cfdf9a1
FW
445 if (!in_interrupt() && is_idle_task(tsk))
446 vtime_account_idle(tsk);
447 else
448 vtime_account_system(tsk);
a7e1a9e3 449}
0cfdf9a1 450EXPORT_SYMBOL_GPL(vtime_account_irq_enter);
a7e1a9e3 451#endif /* __ARCH_HAS_VTIME_ACCOUNT */
9fbc42ea 452
9fbc42ea
FW
453void task_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st)
454{
455 *ut = p->utime;
456 *st = p->stime;
457}
9eec50b8 458EXPORT_SYMBOL_GPL(task_cputime_adjusted);
a7e1a9e3 459
9fbc42ea
FW
460void thread_group_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st)
461{
462 struct task_cputime cputime;
73fbec60 463
9fbc42ea
FW
464 thread_group_cputime(p, &cputime);
465
466 *ut = cputime.utime;
467 *st = cputime.stime;
468}
469#else /* !CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */
470/*
471 * Account a single tick of cpu time.
472 * @p: the process that the cpu time gets accounted to
473 * @user_tick: indicates if the tick is a user or a system tick
474 */
475void account_process_tick(struct task_struct *p, int user_tick)
73fbec60 476{
57430218 477 cputime_t cputime, scaled, steal;
9fbc42ea 478 struct rq *rq = this_rq();
73fbec60 479
55dbdcfa 480 if (vtime_accounting_cpu_enabled())
9fbc42ea
FW
481 return;
482
483 if (sched_clock_irqtime) {
2d513868 484 irqtime_account_process_tick(p, user_tick, rq, 1);
9fbc42ea
FW
485 return;
486 }
487
57430218
RR
488 cputime = cputime_one_jiffy;
489 steal = steal_account_process_time(cputime);
490
491 if (steal >= cputime)
9fbc42ea 492 return;
73fbec60 493
57430218
RR
494 cputime -= steal;
495 scaled = cputime_to_scaled(cputime);
496
9fbc42ea 497 if (user_tick)
57430218 498 account_user_time(p, cputime, scaled);
9fbc42ea 499 else if ((p != rq->idle) || (irq_count() != HARDIRQ_OFFSET))
57430218 500 account_system_time(p, HARDIRQ_OFFSET, cputime, scaled);
73fbec60 501 else
57430218 502 account_idle_time(cputime);
9fbc42ea 503}
73fbec60 504
9fbc42ea
FW
505/*
506 * Account multiple ticks of idle time.
507 * @ticks: number of stolen ticks
508 */
509void account_idle_ticks(unsigned long ticks)
510{
f9bcf1e0 511 cputime_t cputime, steal;
9fbc42ea
FW
512 if (sched_clock_irqtime) {
513 irqtime_account_idle_ticks(ticks);
514 return;
515 }
516
f9bcf1e0
WL
517 cputime = cputime_one_jiffy;
518 steal = steal_account_process_time(cputime);
519
520 if (steal >= cputime)
521 return;
522
523 cputime -= steal;
524 account_idle_time(cputime);
9fbc42ea 525}
73fbec60 526
d9a3c982 527/*
55eaa7c1
SG
528 * Perform (stime * rtime) / total, but avoid multiplication overflow by
529 * loosing precision when the numbers are big.
d9a3c982
FW
530 */
531static cputime_t scale_stime(u64 stime, u64 rtime, u64 total)
73fbec60 532{
55eaa7c1 533 u64 scaled;
73fbec60 534
55eaa7c1
SG
535 for (;;) {
536 /* Make sure "rtime" is the bigger of stime/rtime */
84f9f3a1
SG
537 if (stime > rtime)
538 swap(rtime, stime);
55eaa7c1
SG
539
540 /* Make sure 'total' fits in 32 bits */
541 if (total >> 32)
542 goto drop_precision;
543
544 /* Does rtime (and thus stime) fit in 32 bits? */
545 if (!(rtime >> 32))
546 break;
547
548 /* Can we just balance rtime/stime rather than dropping bits? */
549 if (stime >> 31)
550 goto drop_precision;
551
552 /* We can grow stime and shrink rtime and try to make them both fit */
553 stime <<= 1;
554 rtime >>= 1;
555 continue;
556
557drop_precision:
558 /* We drop from rtime, it has more bits than stime */
559 rtime >>= 1;
560 total >>= 1;
d9a3c982 561 }
73fbec60 562
55eaa7c1
SG
563 /*
564 * Make sure gcc understands that this is a 32x32->64 multiply,
565 * followed by a 64/32->64 divide.
566 */
567 scaled = div_u64((u64) (u32) stime * (u64) (u32) rtime, (u32)total);
d9a3c982 568 return (__force cputime_t) scaled;
73fbec60
FW
569}
570
347abad9 571/*
9d7fb042
PZ
572 * Adjust tick based cputime random precision against scheduler runtime
573 * accounting.
347abad9 574 *
9d7fb042
PZ
575 * Tick based cputime accounting depend on random scheduling timeslices of a
576 * task to be interrupted or not by the timer. Depending on these
577 * circumstances, the number of these interrupts may be over or
578 * under-optimistic, matching the real user and system cputime with a variable
579 * precision.
580 *
581 * Fix this by scaling these tick based values against the total runtime
582 * accounted by the CFS scheduler.
583 *
584 * This code provides the following guarantees:
585 *
586 * stime + utime == rtime
587 * stime_i+1 >= stime_i, utime_i+1 >= utime_i
588 *
589 * Assuming that rtime_i+1 >= rtime_i.
fa092057 590 */
d37f761d 591static void cputime_adjust(struct task_cputime *curr,
9d7fb042 592 struct prev_cputime *prev,
d37f761d 593 cputime_t *ut, cputime_t *st)
73fbec60 594{
5a8e01f8 595 cputime_t rtime, stime, utime;
9d7fb042 596 unsigned long flags;
fa092057 597
9d7fb042
PZ
598 /* Serialize concurrent callers such that we can honour our guarantees */
599 raw_spin_lock_irqsave(&prev->lock, flags);
d37f761d 600 rtime = nsecs_to_cputime(curr->sum_exec_runtime);
73fbec60 601
772c808a 602 /*
9d7fb042
PZ
603 * This is possible under two circumstances:
604 * - rtime isn't monotonic after all (a bug);
605 * - we got reordered by the lock.
606 *
607 * In both cases this acts as a filter such that the rest of the code
608 * can assume it is monotonic regardless of anything else.
772c808a
SG
609 */
610 if (prev->stime + prev->utime >= rtime)
611 goto out;
612
5a8e01f8
SG
613 stime = curr->stime;
614 utime = curr->utime;
615
616 if (utime == 0) {
617 stime = rtime;
9d7fb042
PZ
618 goto update;
619 }
5a8e01f8 620
9d7fb042
PZ
621 if (stime == 0) {
622 utime = rtime;
623 goto update;
d9a3c982 624 }
73fbec60 625
9d7fb042
PZ
626 stime = scale_stime((__force u64)stime, (__force u64)rtime,
627 (__force u64)(stime + utime));
628
629 /*
630 * Make sure stime doesn't go backwards; this preserves monotonicity
631 * for utime because rtime is monotonic.
632 *
633 * utime_i+1 = rtime_i+1 - stime_i
634 * = rtime_i+1 - (rtime_i - utime_i)
635 * = (rtime_i+1 - rtime_i) + utime_i
636 * >= utime_i
637 */
638 if (stime < prev->stime)
639 stime = prev->stime;
640 utime = rtime - stime;
641
642 /*
643 * Make sure utime doesn't go backwards; this still preserves
644 * monotonicity for stime, analogous argument to above.
645 */
646 if (utime < prev->utime) {
647 utime = prev->utime;
648 stime = rtime - utime;
649 }
d37f761d 650
9d7fb042
PZ
651update:
652 prev->stime = stime;
653 prev->utime = utime;
772c808a 654out:
d37f761d
FW
655 *ut = prev->utime;
656 *st = prev->stime;
9d7fb042 657 raw_spin_unlock_irqrestore(&prev->lock, flags);
d37f761d 658}
73fbec60 659
d37f761d
FW
660void task_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st)
661{
662 struct task_cputime cputime = {
d37f761d
FW
663 .sum_exec_runtime = p->se.sum_exec_runtime,
664 };
665
6fac4829 666 task_cputime(p, &cputime.utime, &cputime.stime);
d37f761d 667 cputime_adjust(&cputime, &p->prev_cputime, ut, st);
73fbec60 668}
9eec50b8 669EXPORT_SYMBOL_GPL(task_cputime_adjusted);
73fbec60 670
e80d0a1a 671void thread_group_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st)
73fbec60 672{
73fbec60 673 struct task_cputime cputime;
73fbec60
FW
674
675 thread_group_cputime(p, &cputime);
d37f761d 676 cputime_adjust(&cputime, &p->signal->prev_cputime, ut, st);
73fbec60 677}
9fbc42ea 678#endif /* !CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */
abf917cd
FW
679
680#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
ff9a9b4c 681static cputime_t vtime_delta(struct task_struct *tsk)
6a61671b 682{
ff9a9b4c 683 unsigned long now = READ_ONCE(jiffies);
6a61671b 684
ff9a9b4c 685 if (time_before(now, (unsigned long)tsk->vtime_snap))
6a61671b 686 return 0;
abf917cd 687
ff9a9b4c 688 return jiffies_to_cputime(now - tsk->vtime_snap);
6a61671b
FW
689}
690
691static cputime_t get_vtime_delta(struct task_struct *tsk)
abf917cd 692{
ff9a9b4c 693 unsigned long now = READ_ONCE(jiffies);
b58c3584 694 cputime_t delta, other;
abf917cd 695
57430218 696 delta = jiffies_to_cputime(now - tsk->vtime_snap);
b58c3584 697 other = account_other_time(delta);
7098c1ea 698 WARN_ON_ONCE(tsk->vtime_snap_whence == VTIME_INACTIVE);
ff9a9b4c 699 tsk->vtime_snap = now;
abf917cd 700
b58c3584 701 return delta - other;
abf917cd
FW
702}
703
6a61671b
FW
704static void __vtime_account_system(struct task_struct *tsk)
705{
706 cputime_t delta_cpu = get_vtime_delta(tsk);
707
708 account_system_time(tsk, irq_count(), delta_cpu, cputime_to_scaled(delta_cpu));
709}
710
abf917cd
FW
711void vtime_account_system(struct task_struct *tsk)
712{
ff9a9b4c
RR
713 if (!vtime_delta(tsk))
714 return;
715
b7ce2277 716 write_seqcount_begin(&tsk->vtime_seqcount);
6a61671b 717 __vtime_account_system(tsk);
b7ce2277 718 write_seqcount_end(&tsk->vtime_seqcount);
6a61671b 719}
3f4724ea 720
abf917cd
FW
721void vtime_account_user(struct task_struct *tsk)
722{
3f4724ea
FW
723 cputime_t delta_cpu;
724
b7ce2277 725 write_seqcount_begin(&tsk->vtime_seqcount);
6a61671b 726 tsk->vtime_snap_whence = VTIME_SYS;
ff9a9b4c
RR
727 if (vtime_delta(tsk)) {
728 delta_cpu = get_vtime_delta(tsk);
729 account_user_time(tsk, delta_cpu, cputime_to_scaled(delta_cpu));
730 }
b7ce2277 731 write_seqcount_end(&tsk->vtime_seqcount);
6a61671b
FW
732}
733
734void vtime_user_enter(struct task_struct *tsk)
735{
b7ce2277 736 write_seqcount_begin(&tsk->vtime_seqcount);
ff9a9b4c
RR
737 if (vtime_delta(tsk))
738 __vtime_account_system(tsk);
af2350bd 739 tsk->vtime_snap_whence = VTIME_USER;
b7ce2277 740 write_seqcount_end(&tsk->vtime_seqcount);
6a61671b
FW
741}
742
743void vtime_guest_enter(struct task_struct *tsk)
744{
5b206d48
FW
745 /*
746 * The flags must be updated under the lock with
747 * the vtime_snap flush and update.
748 * That enforces a right ordering and update sequence
749 * synchronization against the reader (task_gtime())
750 * that can thus safely catch up with a tickless delta.
751 */
b7ce2277 752 write_seqcount_begin(&tsk->vtime_seqcount);
ff9a9b4c
RR
753 if (vtime_delta(tsk))
754 __vtime_account_system(tsk);
6a61671b 755 current->flags |= PF_VCPU;
b7ce2277 756 write_seqcount_end(&tsk->vtime_seqcount);
6a61671b 757}
48d6a816 758EXPORT_SYMBOL_GPL(vtime_guest_enter);
6a61671b
FW
759
760void vtime_guest_exit(struct task_struct *tsk)
761{
b7ce2277 762 write_seqcount_begin(&tsk->vtime_seqcount);
6a61671b
FW
763 __vtime_account_system(tsk);
764 current->flags &= ~PF_VCPU;
b7ce2277 765 write_seqcount_end(&tsk->vtime_seqcount);
abf917cd 766}
48d6a816 767EXPORT_SYMBOL_GPL(vtime_guest_exit);
abf917cd
FW
768
769void vtime_account_idle(struct task_struct *tsk)
770{
6a61671b 771 cputime_t delta_cpu = get_vtime_delta(tsk);
abf917cd
FW
772
773 account_idle_time(delta_cpu);
774}
3f4724ea 775
6a61671b
FW
776void arch_vtime_task_switch(struct task_struct *prev)
777{
b7ce2277 778 write_seqcount_begin(&prev->vtime_seqcount);
7098c1ea 779 prev->vtime_snap_whence = VTIME_INACTIVE;
b7ce2277 780 write_seqcount_end(&prev->vtime_seqcount);
6a61671b 781
b7ce2277 782 write_seqcount_begin(&current->vtime_seqcount);
6a61671b 783 current->vtime_snap_whence = VTIME_SYS;
ff9a9b4c 784 current->vtime_snap = jiffies;
b7ce2277 785 write_seqcount_end(&current->vtime_seqcount);
6a61671b
FW
786}
787
45eacc69 788void vtime_init_idle(struct task_struct *t, int cpu)
6a61671b
FW
789{
790 unsigned long flags;
791
b7ce2277
FW
792 local_irq_save(flags);
793 write_seqcount_begin(&t->vtime_seqcount);
6a61671b 794 t->vtime_snap_whence = VTIME_SYS;
ff9a9b4c 795 t->vtime_snap = jiffies;
b7ce2277
FW
796 write_seqcount_end(&t->vtime_seqcount);
797 local_irq_restore(flags);
6a61671b
FW
798}
799
800cputime_t task_gtime(struct task_struct *t)
801{
6a61671b
FW
802 unsigned int seq;
803 cputime_t gtime;
804
e5925394 805 if (!vtime_accounting_enabled())
2541117b
HS
806 return t->gtime;
807
6a61671b 808 do {
b7ce2277 809 seq = read_seqcount_begin(&t->vtime_seqcount);
6a61671b
FW
810
811 gtime = t->gtime;
cab245d6 812 if (t->vtime_snap_whence == VTIME_SYS && t->flags & PF_VCPU)
6a61671b
FW
813 gtime += vtime_delta(t);
814
b7ce2277 815 } while (read_seqcount_retry(&t->vtime_seqcount, seq));
6a61671b
FW
816
817 return gtime;
818}
819
820/*
821 * Fetch cputime raw values from fields of task_struct and
822 * add up the pending nohz execution time since the last
823 * cputime snapshot.
824 */
825static void
826fetch_task_cputime(struct task_struct *t,
827 cputime_t *u_dst, cputime_t *s_dst,
828 cputime_t *u_src, cputime_t *s_src,
829 cputime_t *udelta, cputime_t *sdelta)
830{
6a61671b
FW
831 unsigned int seq;
832 unsigned long long delta;
833
834 do {
835 *udelta = 0;
836 *sdelta = 0;
837
b7ce2277 838 seq = read_seqcount_begin(&t->vtime_seqcount);
6a61671b
FW
839
840 if (u_dst)
841 *u_dst = *u_src;
842 if (s_dst)
843 *s_dst = *s_src;
844
845 /* Task is sleeping, nothing to add */
7098c1ea 846 if (t->vtime_snap_whence == VTIME_INACTIVE ||
6a61671b
FW
847 is_idle_task(t))
848 continue;
849
850 delta = vtime_delta(t);
851
852 /*
853 * Task runs either in user or kernel space, add pending nohz time to
854 * the right place.
855 */
856 if (t->vtime_snap_whence == VTIME_USER || t->flags & PF_VCPU) {
857 *udelta = delta;
858 } else {
859 if (t->vtime_snap_whence == VTIME_SYS)
860 *sdelta = delta;
861 }
b7ce2277 862 } while (read_seqcount_retry(&t->vtime_seqcount, seq));
6a61671b
FW
863}
864
865
866void task_cputime(struct task_struct *t, cputime_t *utime, cputime_t *stime)
867{
868 cputime_t udelta, sdelta;
869
e5925394 870 if (!vtime_accounting_enabled()) {
7877a0ba
HS
871 if (utime)
872 *utime = t->utime;
873 if (stime)
874 *stime = t->stime;
875 return;
876 }
877
6a61671b
FW
878 fetch_task_cputime(t, utime, stime, &t->utime,
879 &t->stime, &udelta, &sdelta);
880 if (utime)
881 *utime += udelta;
882 if (stime)
883 *stime += sdelta;
884}
885
886void task_cputime_scaled(struct task_struct *t,
887 cputime_t *utimescaled, cputime_t *stimescaled)
888{
889 cputime_t udelta, sdelta;
890
e5925394 891 if (!vtime_accounting_enabled()) {
7877a0ba
HS
892 if (utimescaled)
893 *utimescaled = t->utimescaled;
894 if (stimescaled)
895 *stimescaled = t->stimescaled;
896 return;
897 }
898
6a61671b
FW
899 fetch_task_cputime(t, utimescaled, stimescaled,
900 &t->utimescaled, &t->stimescaled, &udelta, &sdelta);
901 if (utimescaled)
902 *utimescaled += cputime_to_scaled(udelta);
903 if (stimescaled)
904 *stimescaled += cputime_to_scaled(sdelta);
905}
abf917cd 906#endif /* CONFIG_VIRT_CPU_ACCOUNTING_GEN */
This page took 0.203194 seconds and 5 git commands to generate.