1 #include <linux/export.h>
2 #include <linux/sched.h>
3 #include <linux/tsacct_kern.h>
4 #include <linux/kernel_stat.h>
5 #include <linux/static_key.h>
6 #include <linux/context_tracking.h>
10 #ifdef CONFIG_IRQ_TIME_ACCOUNTING
13 * There are no locks covering percpu hardirq/softirq time.
14 * They are only modified in vtime_account, on corresponding CPU
15 * with interrupts disabled. So, writes are safe.
16 * They are read and saved off onto struct rq in update_rq_clock().
17 * This may result in other CPU reading this CPU's irq time and can
18 * race with irq/vtime_account on this CPU. We would either get old
19 * or new value with a side effect of accounting a slice of irq time to wrong
20 * task when irq is in progress while we read rq->clock. That is a worthy
21 * compromise in place of having locks on each irq in account_system_time.
23 DEFINE_PER_CPU(u64
, cpu_hardirq_time
);
24 DEFINE_PER_CPU(u64
, cpu_softirq_time
);
26 static DEFINE_PER_CPU(u64
, irq_start_time
);
27 static int sched_clock_irqtime
;
29 void enable_sched_clock_irqtime(void)
31 sched_clock_irqtime
= 1;
34 void disable_sched_clock_irqtime(void)
36 sched_clock_irqtime
= 0;
40 DEFINE_PER_CPU(seqcount_t
, irq_time_seq
);
41 #endif /* CONFIG_64BIT */
44 * Called before incrementing preempt_count on {soft,}irq_enter
45 * and before decrementing preempt_count on {soft,}irq_exit.
47 void irqtime_account_irq(struct task_struct
*curr
)
53 if (!sched_clock_irqtime
)
56 local_irq_save(flags
);
58 cpu
= smp_processor_id();
59 delta
= sched_clock_cpu(cpu
) - __this_cpu_read(irq_start_time
);
60 __this_cpu_add(irq_start_time
, delta
);
62 irq_time_write_begin();
64 * We do not account for softirq time from ksoftirqd here.
65 * We want to continue accounting softirq time to ksoftirqd thread
66 * in that case, so as not to confuse scheduler with a special task
67 * that do not consume any time, but still wants to run.
70 __this_cpu_add(cpu_hardirq_time
, delta
);
71 else if (in_serving_softirq() && curr
!= this_cpu_ksoftirqd())
72 __this_cpu_add(cpu_softirq_time
, delta
);
75 local_irq_restore(flags
);
77 EXPORT_SYMBOL_GPL(irqtime_account_irq
);
79 static int irqtime_account_hi_update(void)
81 u64
*cpustat
= kcpustat_this_cpu
->cpustat
;
86 local_irq_save(flags
);
87 latest_ns
= this_cpu_read(cpu_hardirq_time
);
88 if (nsecs_to_cputime64(latest_ns
) > cpustat
[CPUTIME_IRQ
])
90 local_irq_restore(flags
);
94 static int irqtime_account_si_update(void)
96 u64
*cpustat
= kcpustat_this_cpu
->cpustat
;
101 local_irq_save(flags
);
102 latest_ns
= this_cpu_read(cpu_softirq_time
);
103 if (nsecs_to_cputime64(latest_ns
) > cpustat
[CPUTIME_SOFTIRQ
])
105 local_irq_restore(flags
);
109 #else /* CONFIG_IRQ_TIME_ACCOUNTING */
111 #define sched_clock_irqtime (0)
113 #endif /* !CONFIG_IRQ_TIME_ACCOUNTING */
115 static inline void task_group_account_field(struct task_struct
*p
, int index
,
118 #ifdef CONFIG_CGROUP_CPUACCT
119 struct kernel_cpustat
*kcpustat
;
123 * Since all updates are sure to touch the root cgroup, we
124 * get ourselves ahead and touch it first. If the root cgroup
125 * is the only cgroup, then nothing else should be necessary.
128 __get_cpu_var(kernel_cpustat
).cpustat
[index
] += tmp
;
130 #ifdef CONFIG_CGROUP_CPUACCT
131 if (unlikely(!cpuacct_subsys
.active
))
136 while (ca
&& (ca
!= &root_cpuacct
)) {
137 kcpustat
= this_cpu_ptr(ca
->cpustat
);
138 kcpustat
->cpustat
[index
] += tmp
;
146 * Account user cpu time to a process.
147 * @p: the process that the cpu time gets accounted to
148 * @cputime: the cpu time spent in user space since the last update
149 * @cputime_scaled: cputime scaled by cpu frequency
151 void account_user_time(struct task_struct
*p
, cputime_t cputime
,
152 cputime_t cputime_scaled
)
156 /* Add user time to process. */
158 p
->utimescaled
+= cputime_scaled
;
159 account_group_user_time(p
, cputime
);
161 index
= (TASK_NICE(p
) > 0) ? CPUTIME_NICE
: CPUTIME_USER
;
163 /* Add user time to cpustat. */
164 task_group_account_field(p
, index
, (__force u64
) cputime
);
166 /* Account for user time used */
167 acct_account_cputime(p
);
171 * Account guest cpu time to a process.
172 * @p: the process that the cpu time gets accounted to
173 * @cputime: the cpu time spent in virtual machine since the last update
174 * @cputime_scaled: cputime scaled by cpu frequency
176 static void account_guest_time(struct task_struct
*p
, cputime_t cputime
,
177 cputime_t cputime_scaled
)
179 u64
*cpustat
= kcpustat_this_cpu
->cpustat
;
181 /* Add guest time to process. */
183 p
->utimescaled
+= cputime_scaled
;
184 account_group_user_time(p
, cputime
);
187 /* Add guest time to cpustat. */
188 if (TASK_NICE(p
) > 0) {
189 cpustat
[CPUTIME_NICE
] += (__force u64
) cputime
;
190 cpustat
[CPUTIME_GUEST_NICE
] += (__force u64
) cputime
;
192 cpustat
[CPUTIME_USER
] += (__force u64
) cputime
;
193 cpustat
[CPUTIME_GUEST
] += (__force u64
) cputime
;
198 * Account system cpu time to a process and desired cpustat field
199 * @p: the process that the cpu time gets accounted to
200 * @cputime: the cpu time spent in kernel space since the last update
201 * @cputime_scaled: cputime scaled by cpu frequency
202 * @target_cputime64: pointer to cpustat field that has to be updated
205 void __account_system_time(struct task_struct
*p
, cputime_t cputime
,
206 cputime_t cputime_scaled
, int index
)
208 /* Add system time to process. */
210 p
->stimescaled
+= cputime_scaled
;
211 account_group_system_time(p
, cputime
);
213 /* Add system time to cpustat. */
214 task_group_account_field(p
, index
, (__force u64
) cputime
);
216 /* Account for system time used */
217 acct_account_cputime(p
);
221 * Account system cpu time to a process.
222 * @p: the process that the cpu time gets accounted to
223 * @hardirq_offset: the offset to subtract from hardirq_count()
224 * @cputime: the cpu time spent in kernel space since the last update
225 * @cputime_scaled: cputime scaled by cpu frequency
227 void account_system_time(struct task_struct
*p
, int hardirq_offset
,
228 cputime_t cputime
, cputime_t cputime_scaled
)
232 if ((p
->flags
& PF_VCPU
) && (irq_count() - hardirq_offset
== 0)) {
233 account_guest_time(p
, cputime
, cputime_scaled
);
237 if (hardirq_count() - hardirq_offset
)
239 else if (in_serving_softirq())
240 index
= CPUTIME_SOFTIRQ
;
242 index
= CPUTIME_SYSTEM
;
244 __account_system_time(p
, cputime
, cputime_scaled
, index
);
248 * Account for involuntary wait time.
249 * @cputime: the cpu time spent in involuntary wait
251 void account_steal_time(cputime_t cputime
)
253 u64
*cpustat
= kcpustat_this_cpu
->cpustat
;
255 cpustat
[CPUTIME_STEAL
] += (__force u64
) cputime
;
259 * Account for idle time.
260 * @cputime: the cpu time spent in idle wait
262 void account_idle_time(cputime_t cputime
)
264 u64
*cpustat
= kcpustat_this_cpu
->cpustat
;
265 struct rq
*rq
= this_rq();
267 if (atomic_read(&rq
->nr_iowait
) > 0)
268 cpustat
[CPUTIME_IOWAIT
] += (__force u64
) cputime
;
270 cpustat
[CPUTIME_IDLE
] += (__force u64
) cputime
;
273 static __always_inline
bool steal_account_process_tick(void)
275 #ifdef CONFIG_PARAVIRT
276 if (static_key_false(¶virt_steal_enabled
)) {
279 steal
= paravirt_steal_clock(smp_processor_id());
280 steal
-= this_rq()->prev_steal_time
;
282 st
= steal_ticks(steal
);
283 this_rq()->prev_steal_time
+= st
* TICK_NSEC
;
285 account_steal_time(st
);
293 * Accumulate raw cputime values of dead tasks (sig->[us]time) and live
294 * tasks (sum on group iteration) belonging to @tsk's group.
296 void thread_group_cputime(struct task_struct
*tsk
, struct task_cputime
*times
)
298 struct signal_struct
*sig
= tsk
->signal
;
299 cputime_t utime
, stime
;
300 struct task_struct
*t
;
302 times
->utime
= sig
->utime
;
303 times
->stime
= sig
->stime
;
304 times
->sum_exec_runtime
= sig
->sum_sched_runtime
;
307 /* make sure we can trust tsk->thread_group list */
308 if (!likely(pid_alive(tsk
)))
313 task_cputime(tsk
, &utime
, &stime
);
314 times
->utime
+= utime
;
315 times
->stime
+= stime
;
316 times
->sum_exec_runtime
+= task_sched_runtime(t
);
317 } while_each_thread(tsk
, t
);
322 #ifdef CONFIG_IRQ_TIME_ACCOUNTING
324 * Account a tick to a process and cpustat
325 * @p: the process that the cpu time gets accounted to
326 * @user_tick: is the tick from userspace
327 * @rq: the pointer to rq
329 * Tick demultiplexing follows the order
330 * - pending hardirq update
331 * - pending softirq update
335 * - check for guest_time
336 * - else account as system_time
338 * Check for hardirq is done both for system and user time as there is
339 * no timer going off while we are on hardirq and hence we may never get an
340 * opportunity to update it solely in system time.
341 * p->stime and friends are only updated on system time and not on irq
342 * softirq as those do not count in task exec_runtime any more.
344 static void irqtime_account_process_tick(struct task_struct
*p
, int user_tick
,
347 cputime_t one_jiffy_scaled
= cputime_to_scaled(cputime_one_jiffy
);
348 u64
*cpustat
= kcpustat_this_cpu
->cpustat
;
350 if (steal_account_process_tick())
353 if (irqtime_account_hi_update()) {
354 cpustat
[CPUTIME_IRQ
] += (__force u64
) cputime_one_jiffy
;
355 } else if (irqtime_account_si_update()) {
356 cpustat
[CPUTIME_SOFTIRQ
] += (__force u64
) cputime_one_jiffy
;
357 } else if (this_cpu_ksoftirqd() == p
) {
359 * ksoftirqd time do not get accounted in cpu_softirq_time.
360 * So, we have to handle it separately here.
361 * Also, p->stime needs to be updated for ksoftirqd.
363 __account_system_time(p
, cputime_one_jiffy
, one_jiffy_scaled
,
365 } else if (user_tick
) {
366 account_user_time(p
, cputime_one_jiffy
, one_jiffy_scaled
);
367 } else if (p
== rq
->idle
) {
368 account_idle_time(cputime_one_jiffy
);
369 } else if (p
->flags
& PF_VCPU
) { /* System time or guest time */
370 account_guest_time(p
, cputime_one_jiffy
, one_jiffy_scaled
);
372 __account_system_time(p
, cputime_one_jiffy
, one_jiffy_scaled
,
377 static void irqtime_account_idle_ticks(int ticks
)
380 struct rq
*rq
= this_rq();
382 for (i
= 0; i
< ticks
; i
++)
383 irqtime_account_process_tick(current
, 0, rq
);
385 #else /* CONFIG_IRQ_TIME_ACCOUNTING */
386 static inline void irqtime_account_idle_ticks(int ticks
) {}
387 static inline void irqtime_account_process_tick(struct task_struct
*p
, int user_tick
,
389 #endif /* CONFIG_IRQ_TIME_ACCOUNTING */
391 #ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
393 * Account a single tick of cpu time.
394 * @p: the process that the cpu time gets accounted to
395 * @user_tick: indicates if the tick is a user or a system tick
397 void account_process_tick(struct task_struct
*p
, int user_tick
)
399 cputime_t one_jiffy_scaled
= cputime_to_scaled(cputime_one_jiffy
);
400 struct rq
*rq
= this_rq();
402 if (vtime_accounting_enabled())
405 if (sched_clock_irqtime
) {
406 irqtime_account_process_tick(p
, user_tick
, rq
);
410 if (steal_account_process_tick())
414 account_user_time(p
, cputime_one_jiffy
, one_jiffy_scaled
);
415 else if ((p
!= rq
->idle
) || (irq_count() != HARDIRQ_OFFSET
))
416 account_system_time(p
, HARDIRQ_OFFSET
, cputime_one_jiffy
,
419 account_idle_time(cputime_one_jiffy
);
423 * Account multiple ticks of steal time.
424 * @p: the process from which the cpu time has been stolen
425 * @ticks: number of stolen ticks
427 void account_steal_ticks(unsigned long ticks
)
429 account_steal_time(jiffies_to_cputime(ticks
));
433 * Account multiple ticks of idle time.
434 * @ticks: number of stolen ticks
436 void account_idle_ticks(unsigned long ticks
)
439 if (sched_clock_irqtime
) {
440 irqtime_account_idle_ticks(ticks
);
444 account_idle_time(jiffies_to_cputime(ticks
));
446 #endif /* !CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */
449 * Use precise platform statistics if available:
451 #ifdef CONFIG_VIRT_CPU_ACCOUNTING
452 void task_cputime_adjusted(struct task_struct
*p
, cputime_t
*ut
, cputime_t
*st
)
458 void thread_group_cputime_adjusted(struct task_struct
*p
, cputime_t
*ut
, cputime_t
*st
)
460 struct task_cputime cputime
;
462 thread_group_cputime(p
, &cputime
);
468 #ifndef __ARCH_HAS_VTIME_TASK_SWITCH
469 void vtime_task_switch(struct task_struct
*prev
)
471 if (!vtime_accounting_enabled())
474 if (is_idle_task(prev
))
475 vtime_account_idle(prev
);
477 vtime_account_system(prev
);
479 #ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
480 vtime_account_user(prev
);
482 arch_vtime_task_switch(prev
);
487 * Archs that account the whole time spent in the idle task
488 * (outside irq) as idle time can rely on this and just implement
489 * vtime_account_system() and vtime_account_idle(). Archs that
490 * have other meaning of the idle time (s390 only includes the
491 * time spent by the CPU when it's in low power mode) must override
494 #ifndef __ARCH_HAS_VTIME_ACCOUNT
495 void vtime_account_irq_enter(struct task_struct
*tsk
)
497 if (!vtime_accounting_enabled())
500 if (!in_interrupt()) {
502 * If we interrupted user, context_tracking_in_user()
503 * is 1 because the context tracking don't hook
504 * on irq entry/exit. This way we know if
505 * we need to flush user time on kernel entry.
507 if (context_tracking_in_user()) {
508 vtime_account_user(tsk
);
512 if (is_idle_task(tsk
)) {
513 vtime_account_idle(tsk
);
517 vtime_account_system(tsk
);
519 EXPORT_SYMBOL_GPL(vtime_account_irq_enter
);
520 #endif /* __ARCH_HAS_VTIME_ACCOUNT */
522 #else /* !CONFIG_VIRT_CPU_ACCOUNTING */
524 static cputime_t
scale_stime(cputime_t stime
, cputime_t rtime
, cputime_t total
)
526 u64 temp
= (__force u64
) rtime
;
528 temp
*= (__force u64
) stime
;
530 if (sizeof(cputime_t
) == 4)
531 temp
= div_u64(temp
, (__force u32
) total
);
533 temp
= div64_u64(temp
, (__force u64
) total
);
535 return (__force cputime_t
) temp
;
539 * Adjust tick based cputime random precision against scheduler
540 * runtime accounting.
542 static void cputime_adjust(struct task_cputime
*curr
,
543 struct cputime
*prev
,
544 cputime_t
*ut
, cputime_t
*st
)
546 cputime_t rtime
, stime
, total
;
549 total
= stime
+ curr
->utime
;
552 * Tick based cputime accounting depend on random scheduling
553 * timeslices of a task to be interrupted or not by the timer.
554 * Depending on these circumstances, the number of these interrupts
555 * may be over or under-optimistic, matching the real user and system
556 * cputime with a variable precision.
558 * Fix this by scaling these tick based values against the total
559 * runtime accounted by the CFS scheduler.
561 rtime
= nsecs_to_cputime(curr
->sum_exec_runtime
);
564 stime
= scale_stime(stime
, rtime
, total
);
569 * If the tick based count grows faster than the scheduler one,
570 * the result of the scaling may go backward.
571 * Let's enforce monotonicity.
573 prev
->stime
= max(prev
->stime
, stime
);
574 prev
->utime
= max(prev
->utime
, rtime
- prev
->stime
);
580 void task_cputime_adjusted(struct task_struct
*p
, cputime_t
*ut
, cputime_t
*st
)
582 struct task_cputime cputime
= {
583 .sum_exec_runtime
= p
->se
.sum_exec_runtime
,
586 task_cputime(p
, &cputime
.utime
, &cputime
.stime
);
587 cputime_adjust(&cputime
, &p
->prev_cputime
, ut
, st
);
591 * Must be called with siglock held.
593 void thread_group_cputime_adjusted(struct task_struct
*p
, cputime_t
*ut
, cputime_t
*st
)
595 struct task_cputime cputime
;
597 thread_group_cputime(p
, &cputime
);
598 cputime_adjust(&cputime
, &p
->signal
->prev_cputime
, ut
, st
);
600 #endif /* !CONFIG_VIRT_CPU_ACCOUNTING */
602 #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
603 static unsigned long long vtime_delta(struct task_struct
*tsk
)
605 unsigned long long clock
;
607 clock
= local_clock();
608 if (clock
< tsk
->vtime_snap
)
611 return clock
- tsk
->vtime_snap
;
614 static cputime_t
get_vtime_delta(struct task_struct
*tsk
)
616 unsigned long long delta
= vtime_delta(tsk
);
618 WARN_ON_ONCE(tsk
->vtime_snap_whence
== VTIME_SLEEPING
);
619 tsk
->vtime_snap
+= delta
;
621 /* CHECKME: always safe to convert nsecs to cputime? */
622 return nsecs_to_cputime(delta
);
625 static void __vtime_account_system(struct task_struct
*tsk
)
627 cputime_t delta_cpu
= get_vtime_delta(tsk
);
629 account_system_time(tsk
, irq_count(), delta_cpu
, cputime_to_scaled(delta_cpu
));
632 void vtime_account_system(struct task_struct
*tsk
)
634 if (!vtime_accounting_enabled())
637 write_seqlock(&tsk
->vtime_seqlock
);
638 __vtime_account_system(tsk
);
639 write_sequnlock(&tsk
->vtime_seqlock
);
642 void vtime_account_irq_exit(struct task_struct
*tsk
)
644 if (!vtime_accounting_enabled())
647 write_seqlock(&tsk
->vtime_seqlock
);
648 if (context_tracking_in_user())
649 tsk
->vtime_snap_whence
= VTIME_USER
;
650 __vtime_account_system(tsk
);
651 write_sequnlock(&tsk
->vtime_seqlock
);
654 void vtime_account_user(struct task_struct
*tsk
)
658 if (!vtime_accounting_enabled())
661 delta_cpu
= get_vtime_delta(tsk
);
663 write_seqlock(&tsk
->vtime_seqlock
);
664 tsk
->vtime_snap_whence
= VTIME_SYS
;
665 account_user_time(tsk
, delta_cpu
, cputime_to_scaled(delta_cpu
));
666 write_sequnlock(&tsk
->vtime_seqlock
);
669 void vtime_user_enter(struct task_struct
*tsk
)
671 if (!vtime_accounting_enabled())
674 write_seqlock(&tsk
->vtime_seqlock
);
675 tsk
->vtime_snap_whence
= VTIME_USER
;
676 __vtime_account_system(tsk
);
677 write_sequnlock(&tsk
->vtime_seqlock
);
680 void vtime_guest_enter(struct task_struct
*tsk
)
682 write_seqlock(&tsk
->vtime_seqlock
);
683 __vtime_account_system(tsk
);
684 current
->flags
|= PF_VCPU
;
685 write_sequnlock(&tsk
->vtime_seqlock
);
688 void vtime_guest_exit(struct task_struct
*tsk
)
690 write_seqlock(&tsk
->vtime_seqlock
);
691 __vtime_account_system(tsk
);
692 current
->flags
&= ~PF_VCPU
;
693 write_sequnlock(&tsk
->vtime_seqlock
);
696 void vtime_account_idle(struct task_struct
*tsk
)
698 cputime_t delta_cpu
= get_vtime_delta(tsk
);
700 account_idle_time(delta_cpu
);
703 bool vtime_accounting_enabled(void)
705 return context_tracking_active();
708 void arch_vtime_task_switch(struct task_struct
*prev
)
710 write_seqlock(&prev
->vtime_seqlock
);
711 prev
->vtime_snap_whence
= VTIME_SLEEPING
;
712 write_sequnlock(&prev
->vtime_seqlock
);
714 write_seqlock(¤t
->vtime_seqlock
);
715 current
->vtime_snap_whence
= VTIME_SYS
;
716 current
->vtime_snap
= sched_clock();
717 write_sequnlock(¤t
->vtime_seqlock
);
720 void vtime_init_idle(struct task_struct
*t
)
724 write_seqlock_irqsave(&t
->vtime_seqlock
, flags
);
725 t
->vtime_snap_whence
= VTIME_SYS
;
726 t
->vtime_snap
= sched_clock();
727 write_sequnlock_irqrestore(&t
->vtime_seqlock
, flags
);
730 cputime_t
task_gtime(struct task_struct
*t
)
736 seq
= read_seqbegin(&t
->vtime_seqlock
);
739 if (t
->flags
& PF_VCPU
)
740 gtime
+= vtime_delta(t
);
742 } while (read_seqretry(&t
->vtime_seqlock
, seq
));
748 * Fetch cputime raw values from fields of task_struct and
749 * add up the pending nohz execution time since the last
753 fetch_task_cputime(struct task_struct
*t
,
754 cputime_t
*u_dst
, cputime_t
*s_dst
,
755 cputime_t
*u_src
, cputime_t
*s_src
,
756 cputime_t
*udelta
, cputime_t
*sdelta
)
759 unsigned long long delta
;
765 seq
= read_seqbegin(&t
->vtime_seqlock
);
772 /* Task is sleeping, nothing to add */
773 if (t
->vtime_snap_whence
== VTIME_SLEEPING
||
777 delta
= vtime_delta(t
);
780 * Task runs either in user or kernel space, add pending nohz time to
783 if (t
->vtime_snap_whence
== VTIME_USER
|| t
->flags
& PF_VCPU
) {
786 if (t
->vtime_snap_whence
== VTIME_SYS
)
789 } while (read_seqretry(&t
->vtime_seqlock
, seq
));
793 void task_cputime(struct task_struct
*t
, cputime_t
*utime
, cputime_t
*stime
)
795 cputime_t udelta
, sdelta
;
797 fetch_task_cputime(t
, utime
, stime
, &t
->utime
,
798 &t
->stime
, &udelta
, &sdelta
);
805 void task_cputime_scaled(struct task_struct
*t
,
806 cputime_t
*utimescaled
, cputime_t
*stimescaled
)
808 cputime_t udelta
, sdelta
;
810 fetch_task_cputime(t
, utimescaled
, stimescaled
,
811 &t
->utimescaled
, &t
->stimescaled
, &udelta
, &sdelta
);
813 *utimescaled
+= cputime_to_scaled(udelta
);
815 *stimescaled
+= cputime_to_scaled(sdelta
);
817 #endif /* CONFIG_VIRT_CPU_ACCOUNTING_GEN */