Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/bluetooth...
[deliverable/linux.git] / kernel / sched / stats.h
1
2 #ifdef CONFIG_SCHEDSTATS
3
4 /*
5 * Expects runqueue lock to be held for atomicity of update
6 */
7 static inline void
8 rq_sched_info_arrive(struct rq *rq, unsigned long long delta)
9 {
10 if (rq) {
11 rq->rq_sched_info.run_delay += delta;
12 rq->rq_sched_info.pcount++;
13 }
14 }
15
16 /*
17 * Expects runqueue lock to be held for atomicity of update
18 */
19 static inline void
20 rq_sched_info_depart(struct rq *rq, unsigned long long delta)
21 {
22 if (rq)
23 rq->rq_cpu_time += delta;
24 }
25
26 static inline void
27 rq_sched_info_dequeued(struct rq *rq, unsigned long long delta)
28 {
29 if (rq)
30 rq->rq_sched_info.run_delay += delta;
31 }
32 # define schedstat_inc(rq, field) do { (rq)->field++; } while (0)
33 # define schedstat_add(rq, field, amt) do { (rq)->field += (amt); } while (0)
34 # define schedstat_set(var, val) do { var = (val); } while (0)
35 #else /* !CONFIG_SCHEDSTATS */
36 static inline void
37 rq_sched_info_arrive(struct rq *rq, unsigned long long delta)
38 {}
39 static inline void
40 rq_sched_info_dequeued(struct rq *rq, unsigned long long delta)
41 {}
42 static inline void
43 rq_sched_info_depart(struct rq *rq, unsigned long long delta)
44 {}
45 # define schedstat_inc(rq, field) do { } while (0)
46 # define schedstat_add(rq, field, amt) do { } while (0)
47 # define schedstat_set(var, val) do { } while (0)
48 #endif
49
50 #if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
51 static inline void sched_info_reset_dequeued(struct task_struct *t)
52 {
53 t->sched_info.last_queued = 0;
54 }
55
56 /*
57 * We are interested in knowing how long it was from the *first* time a
58 * task was queued to the time that it finally hit a cpu, we call this routine
59 * from dequeue_task() to account for possible rq->clock skew across cpus. The
60 * delta taken on each cpu would annul the skew.
61 */
62 static inline void sched_info_dequeued(struct task_struct *t)
63 {
64 unsigned long long now = rq_clock(task_rq(t)), delta = 0;
65
66 if (unlikely(sched_info_on()))
67 if (t->sched_info.last_queued)
68 delta = now - t->sched_info.last_queued;
69 sched_info_reset_dequeued(t);
70 t->sched_info.run_delay += delta;
71
72 rq_sched_info_dequeued(task_rq(t), delta);
73 }
74
75 /*
76 * Called when a task finally hits the cpu. We can now calculate how
77 * long it was waiting to run. We also note when it began so that we
78 * can keep stats on how long its timeslice is.
79 */
80 static void sched_info_arrive(struct task_struct *t)
81 {
82 unsigned long long now = rq_clock(task_rq(t)), delta = 0;
83
84 if (t->sched_info.last_queued)
85 delta = now - t->sched_info.last_queued;
86 sched_info_reset_dequeued(t);
87 t->sched_info.run_delay += delta;
88 t->sched_info.last_arrival = now;
89 t->sched_info.pcount++;
90
91 rq_sched_info_arrive(task_rq(t), delta);
92 }
93
94 /*
95 * This function is only called from enqueue_task(), but also only updates
96 * the timestamp if it is already not set. It's assumed that
97 * sched_info_dequeued() will clear that stamp when appropriate.
98 */
99 static inline void sched_info_queued(struct task_struct *t)
100 {
101 if (unlikely(sched_info_on()))
102 if (!t->sched_info.last_queued)
103 t->sched_info.last_queued = rq_clock(task_rq(t));
104 }
105
106 /*
107 * Called when a process ceases being the active-running process involuntarily
108 * due, typically, to expiring its time slice (this may also be called when
109 * switching to the idle task). Now we can calculate how long we ran.
110 * Also, if the process is still in the TASK_RUNNING state, call
111 * sched_info_queued() to mark that it has now again started waiting on
112 * the runqueue.
113 */
114 static inline void sched_info_depart(struct task_struct *t)
115 {
116 unsigned long long delta = rq_clock(task_rq(t)) -
117 t->sched_info.last_arrival;
118
119 rq_sched_info_depart(task_rq(t), delta);
120
121 if (t->state == TASK_RUNNING)
122 sched_info_queued(t);
123 }
124
125 /*
126 * Called when tasks are switched involuntarily due, typically, to expiring
127 * their time slice. (This may also be called when switching to or from
128 * the idle task.) We are only called when prev != next.
129 */
130 static inline void
131 __sched_info_switch(struct task_struct *prev, struct task_struct *next)
132 {
133 struct rq *rq = task_rq(prev);
134
135 /*
136 * prev now departs the cpu. It's not interesting to record
137 * stats about how efficient we were at scheduling the idle
138 * process, however.
139 */
140 if (prev != rq->idle)
141 sched_info_depart(prev);
142
143 if (next != rq->idle)
144 sched_info_arrive(next);
145 }
146 static inline void
147 sched_info_switch(struct task_struct *prev, struct task_struct *next)
148 {
149 if (unlikely(sched_info_on()))
150 __sched_info_switch(prev, next);
151 }
152 #else
153 #define sched_info_queued(t) do { } while (0)
154 #define sched_info_reset_dequeued(t) do { } while (0)
155 #define sched_info_dequeued(t) do { } while (0)
156 #define sched_info_switch(t, next) do { } while (0)
157 #endif /* CONFIG_SCHEDSTATS || CONFIG_TASK_DELAY_ACCT */
158
159 /*
160 * The following are functions that support scheduler-internal time accounting.
161 * These functions are generally called at the timer tick. None of this depends
162 * on CONFIG_SCHEDSTATS.
163 */
164
165 /**
166 * cputimer_running - return true if cputimer is running
167 *
168 * @tsk: Pointer to target task.
169 */
170 static inline bool cputimer_running(struct task_struct *tsk)
171
172 {
173 struct thread_group_cputimer *cputimer = &tsk->signal->cputimer;
174
175 if (!cputimer->running)
176 return false;
177
178 /*
179 * After we flush the task's sum_exec_runtime to sig->sum_sched_runtime
180 * in __exit_signal(), we won't account to the signal struct further
181 * cputime consumed by that task, even though the task can still be
182 * ticking after __exit_signal().
183 *
184 * In order to keep a consistent behaviour between thread group cputime
185 * and thread group cputimer accounting, lets also ignore the cputime
186 * elapsing after __exit_signal() in any thread group timer running.
187 *
188 * This makes sure that POSIX CPU clocks and timers are synchronized, so
189 * that a POSIX CPU timer won't expire while the corresponding POSIX CPU
190 * clock delta is behind the expiring timer value.
191 */
192 if (unlikely(!tsk->sighand))
193 return false;
194
195 return true;
196 }
197
198 /**
199 * account_group_user_time - Maintain utime for a thread group.
200 *
201 * @tsk: Pointer to task structure.
202 * @cputime: Time value by which to increment the utime field of the
203 * thread_group_cputime structure.
204 *
205 * If thread group time is being maintained, get the structure for the
206 * running CPU and update the utime field there.
207 */
208 static inline void account_group_user_time(struct task_struct *tsk,
209 cputime_t cputime)
210 {
211 struct thread_group_cputimer *cputimer = &tsk->signal->cputimer;
212
213 if (!cputimer_running(tsk))
214 return;
215
216 raw_spin_lock(&cputimer->lock);
217 cputimer->cputime.utime += cputime;
218 raw_spin_unlock(&cputimer->lock);
219 }
220
221 /**
222 * account_group_system_time - Maintain stime for a thread group.
223 *
224 * @tsk: Pointer to task structure.
225 * @cputime: Time value by which to increment the stime field of the
226 * thread_group_cputime structure.
227 *
228 * If thread group time is being maintained, get the structure for the
229 * running CPU and update the stime field there.
230 */
231 static inline void account_group_system_time(struct task_struct *tsk,
232 cputime_t cputime)
233 {
234 struct thread_group_cputimer *cputimer = &tsk->signal->cputimer;
235
236 if (!cputimer_running(tsk))
237 return;
238
239 raw_spin_lock(&cputimer->lock);
240 cputimer->cputime.stime += cputime;
241 raw_spin_unlock(&cputimer->lock);
242 }
243
244 /**
245 * account_group_exec_runtime - Maintain exec runtime for a thread group.
246 *
247 * @tsk: Pointer to task structure.
248 * @ns: Time value by which to increment the sum_exec_runtime field
249 * of the thread_group_cputime structure.
250 *
251 * If thread group time is being maintained, get the structure for the
252 * running CPU and update the sum_exec_runtime field there.
253 */
254 static inline void account_group_exec_runtime(struct task_struct *tsk,
255 unsigned long long ns)
256 {
257 struct thread_group_cputimer *cputimer = &tsk->signal->cputimer;
258
259 if (!cputimer_running(tsk))
260 return;
261
262 raw_spin_lock(&cputimer->lock);
263 cputimer->cputime.sum_exec_runtime += ns;
264 raw_spin_unlock(&cputimer->lock);
265 }
This page took 0.038036 seconds and 6 git commands to generate.