Fix: correct typo in author email address
[deliverable/lttng-analyses.git] / linuxautomaton / linuxautomaton / sched.py
CommitLineData
4ed24f86
JD
1#!/usr/bin/env python3
2#
3# The MIT License (MIT)
4#
a3fa57c0 5# Copyright (C) 2015 - Julien Desfossez <jdesfossez@efficios.com>
4ed24f86
JD
6#
7# Permission is hereby granted, free of charge, to any person obtaining a copy
8# of this software and associated documentation files (the "Software"), to deal
9# in the Software without restriction, including without limitation the rights
10# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
11# copies of the Software, and to permit persons to whom the Software is
12# furnished to do so, subject to the following conditions:
13#
14# The above copyright notice and this permission notice shall be included in
15# all copies or substantial portions of the Software.
16#
17# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
20# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
23# SOFTWARE.
24
bd3cd7c5
JD
25from linuxautomaton import sp, sv
26from babeltrace import CTFScope
27
28
29class SchedStateProvider(sp.StateProvider):
30 def __init__(self, state):
31 self.state = state
32 self.cpus = state.cpus
33 self.tids = state.tids
34 self.dirty_pages = state.dirty_pages
4e99adc5 35 cbs = {
bd3cd7c5
JD
36 'sched_switch': self._process_sched_switch,
37 'sched_migrate_task': self._process_sched_migrate_task,
81430df6
JD
38 'sched_wakeup': self._process_sched_wakeup,
39 'sched_wakeup_new': self._process_sched_wakeup,
2f163c78
JD
40 'sched_process_fork': self._process_sched_process_fork,
41 'sched_process_exec': self._process_sched_process_exec,
bd3cd7c5 42 }
4e99adc5 43 self._register_cbs(cbs)
bd3cd7c5
JD
44
45 def process_event(self, ev):
46 self._process_event_cb(ev)
47
48 def sched_switch_per_cpu(self, cpu_id, ts, next_tid, event):
49 """Compute per-cpu usage"""
50 if cpu_id in self.cpus:
51 c = self.cpus[cpu_id]
52 if c.start_task_ns != 0:
53 c.cpu_ns += ts - c.start_task_ns
54 # exclude swapper process
55 if next_tid != 0:
56 c.start_task_ns = ts
57 c.current_tid = next_tid
58 else:
59 c.start_task_ns = 0
60 c.current_tid = -1
61 else:
62 self.add_cpu(cpu_id, ts, next_tid)
63 for context in event.keys():
64 if context.startswith("perf_"):
65 c.perf[context] = event[context]
66
67 def add_cpu(self, cpu_id, ts, next_tid):
68 c = sv.CPU()
69 c.cpu_id = cpu_id
70 c.current_tid = next_tid
71 # when we schedule a real task (not swapper)
72 c.start_task_ns = ts
73 # first activity on the sv.CPU
74 self.cpus[cpu_id] = c
75 self.cpus[cpu_id].total_per_cpu_pc_list = []
76
77 def sched_switch_per_tid(self, ts, prev_tid, next_tid,
78 next_comm, cpu_id, event, ret):
79 """Compute per-tid usage"""
80 # if we don't know yet the sv.CPU, skip this
81 if cpu_id not in self.cpus.keys():
82 self.add_cpu(cpu_id, ts, next_tid)
83 c = self.cpus[cpu_id]
84 # per-tid usage
85 if prev_tid in self.tids:
86 p = self.tids[prev_tid]
6ddb1dd6
JD
87 if p.last_sched is not None:
88 p.cpu_ns += (ts - p.last_sched)
bd3cd7c5
JD
89 # perf PMU counters checks
90 for context in event.field_list_with_scope(
91 CTFScope.STREAM_EVENT_CONTEXT):
92 if context.startswith("perf_"):
93 if context not in c.perf.keys():
94 c.perf[context] = event[context]
95 # add the difference between the last known value
96 # for this counter on the current sv.CPU
97 diff = event[context] - c.perf[context]
98 if context not in p.perf.keys():
99 p.perf[context] = diff
100 else:
101 p.perf[context] += diff
102 if diff > 0:
103 ret[context] = diff
104
105 # exclude swapper process
106 if next_tid == 0:
107 return ret
108
109 if next_tid not in self.tids:
110 p = sv.Process()
111 p.tid = next_tid
112 p.comm = next_comm
113 self.tids[next_tid] = p
114 else:
115 p = self.tids[next_tid]
116 p.comm = next_comm
117 p.last_sched = ts
118 for q in c.wakeup_queue:
119 if q["task"] == p:
120 ret["sched_latency"] = ts - q["ts"]
121 ret["next_tid"] = next_tid
122 c.wakeup_queue.remove(q)
123 return ret
124
125 def clear_dirty_pages(self, to_clean, reason):
126 cleaned = []
127# print("%s Cleaning nr : %d, current : %d, base : %d,
128# " cleaning %d, global %d" % \
129# (ns_to_hour_nsec(event.timestamp), nr, current,
130# self.dirty_pages["base_nr_dirty"],
131# to_clean, self.dirty_pages["global_nr_dirty"]))
132 if to_clean > len(self.dirty_pages["pages"]):
133 to_clean = len(self.dirty_pages["pages"])
134 for i in range(to_clean):
135 a = self.dirty_pages["pages"].pop(0)
136 cleaned.append(a)
137
138 # don't account background kernel threads emptying the
139 # page cache
140 if reason == "counter":
141 return
142
143 # flag all processes with a syscall in progress
144 for p in self.tids.values():
145 if len(p.current_syscall.keys()) == 0:
146 continue
147 p.current_syscall["pages_cleared"] = cleaned
148 return
149
150 def track_dirty_pages(self, event):
151 if "pages" not in self.dirty_pages.keys():
152 return
153 if "nr_dirty" not in event.keys():
154 # if the context is not available, only keep the
155 # last 1000 pages inserted (arbitrary)
156 if len(self.dirty_pages["pages"]) > 1000:
157 for i in range(len(self.dirty_pages["pages"]) - 1000):
158 self.dirty_pages["pages"].pop(0)
159 return
160 nr = event["nr_dirty"]
161# current = len(self.dirty_pages["pages"])
162
163 if self.dirty_pages["global_nr_dirty"] == -1:
164 self.dirty_pages["global_nr_dirty"] = nr
165 self.dirty_pages["base_nr_dirty"] = nr
166 return
167
168 # only cleanup when the counter goes down
169 if nr >= self.dirty_pages["global_nr_dirty"]:
170 self.dirty_pages["global_nr_dirty"] = nr
171 return
172
173 if nr <= self.dirty_pages["base_nr_dirty"]:
174 self.dirty_pages["base_nr_dirty"] = nr
175 self.dirty_pages["global_nr_dirty"] = nr
176# to_clean = current
177# elif (self.dirty_pages["global_nr_dirty"] - nr) < 0:
178# to_clean = current
179# else:
180# to_clean = self.dirty_pages["global_nr_dirty"] - nr
181# if to_clean > 0:
182# self.clear_dirty_pages(to_clean, "counter")
183 self.dirty_pages["global_nr_dirty"] = nr
184
185 def _process_sched_switch(self, event):
186 """Handle sched_switch event, returns a dict of changed values"""
187 prev_tid = event["prev_tid"]
188 next_comm = event["next_comm"]
189 next_tid = event["next_tid"]
190 cpu_id = event["cpu_id"]
191 ret = {}
192
193 self.sched_switch_per_tid(event.timestamp, prev_tid,
194 next_tid, next_comm,
195 cpu_id, event, ret)
196 # because of perf events check, we need to do the sv.CPU analysis after
197 # the per-tid analysis
198 self.sched_switch_per_cpu(cpu_id, event.timestamp, next_tid, event)
199 if next_tid > 0:
200 self.tids[next_tid].prev_tid = prev_tid
201 self.track_dirty_pages(event)
202
203 return ret
204
205 def _process_sched_migrate_task(self, event):
206 tid = event["tid"]
207 if tid not in self.tids:
208 p = sv.Process()
209 p.tid = tid
210 p.comm = event["comm"]
211 self.tids[tid] = p
212 else:
213 p = self.tids[tid]
214 p.migrate_count += 1
215
81430df6 216 def _process_sched_wakeup(self, event):
bd3cd7c5
JD
217 """Stores the sched_wakeup infos to compute scheduling latencies"""
218 target_cpu = event["target_cpu"]
219 tid = event["tid"]
220 if target_cpu not in self.cpus.keys():
221 c = sv.CPU()
222 c.cpu_id = target_cpu
223 self.cpus[target_cpu] = c
224 else:
225 c = self.cpus[target_cpu]
226
227 if tid not in self.tids:
228 p = sv.Process()
229 p.tid = tid
230 self.tids[tid] = p
231 else:
232 p = self.tids[tid]
233 c.wakeup_queue.append({"ts": event.timestamp, "task": p})
234
235 def fix_process(self, name, tid, pid):
236 if tid not in self.tids:
237 p = sv.Process()
238 p.tid = tid
239 self.tids[tid] = p
240 else:
241 p = self.tids[tid]
242 p.pid = pid
243 p.comm = name
244
245 if pid not in self.tids:
246 p = sv.Process()
247 p.tid = pid
248 self.tids[pid] = p
249 else:
250 p = self.tids[pid]
251 p.pid = pid
252 p.comm = name
253
254 def dup_fd(self, fd):
255 f = sv.FD()
256 f.filename = fd.filename
257 f.fd = fd.fd
258 f.fdtype = fd.fdtype
259 return f
260
2f163c78 261 def _process_sched_process_fork(self, event):
bd3cd7c5
JD
262 child_tid = event["child_tid"]
263 child_pid = event["child_pid"]
264 child_comm = event["child_comm"]
265 parent_pid = event["parent_pid"]
266 parent_tid = event["parent_pid"]
267 parent_comm = event["parent_comm"]
268 f = sv.Process()
269 f.tid = child_tid
270 f.pid = child_pid
271 f.comm = child_comm
272
273 # make sure the parent exists
274 self.fix_process(parent_comm, parent_tid, parent_pid)
275 p = self.tids[parent_pid]
276 for fd in p.fds.keys():
277 f.fds[fd] = self.dup_fd(p.fds[fd])
278 f.fds[fd].parent = parent_pid
279
280 self.tids[child_tid] = f
281
2f163c78 282 def _process_sched_process_exec(self, event):
bd3cd7c5
JD
283 tid = event["tid"]
284 if tid not in self.tids:
285 p = sv.Process()
286 p.tid = tid
287 self.tids[tid] = p
288 else:
289 p = self.tids[tid]
290 if "procname" in event.keys():
291 p.comm = event["procname"]
292 toremove = []
293 for fd in p.fds.keys():
294 if p.fds[fd].cloexec == 1:
295 toremove.append(fd)
296 for fd in toremove:
297 p.fds.pop(fd, None)
This page took 0.053656 seconds and 5 git commands to generate.