Fix some null warnings
[deliverable/tracecompass.git] / lttng / org.eclipse.tracecompass.lttng2.kernel.core / src / org / eclipse / tracecompass / internal / lttng2 / kernel / core / analysis / graph / handlers / TraceEventHandlerExecutionGraph.java
1 /*******************************************************************************
2 * Copyright (c) 2015 École Polytechnique de Montréal
3 *
4 * All rights reserved. This program and the accompanying materials are
5 * made available under the terms of the Eclipse Public License v1.0 which
6 * accompanies this distribution, and is available at
7 * http://www.eclipse.org/legal/epl-v10.html
8 *******************************************************************************/
9
10 package org.eclipse.tracecompass.internal.lttng2.kernel.core.analysis.graph.handlers;
11
12 import java.util.Collection;
13 import java.util.Collections;
14 import java.util.HashMap;
15 import java.util.Map;
16
17 import org.eclipse.core.runtime.NullProgressMonitor;
18 import org.eclipse.jdt.annotation.Nullable;
19 import org.eclipse.tracecompass.analysis.graph.core.base.TmfEdge;
20 import org.eclipse.tracecompass.analysis.graph.core.base.TmfEdge.EdgeType;
21 import org.eclipse.tracecompass.analysis.graph.core.base.TmfGraph;
22 import org.eclipse.tracecompass.analysis.graph.core.base.TmfVertex;
23 import org.eclipse.tracecompass.analysis.graph.core.base.TmfVertex.EdgeDirection;
24 import org.eclipse.tracecompass.analysis.os.linux.core.kernelanalysis.LinuxValues;
25 import org.eclipse.tracecompass.analysis.os.linux.core.model.HostThread;
26 import org.eclipse.tracecompass.common.core.NonNullUtils;
27 import org.eclipse.tracecompass.internal.lttng2.kernel.core.TcpEventStrings;
28 import org.eclipse.tracecompass.internal.lttng2.kernel.core.analysis.graph.building.LttngKernelExecGraphProvider;
29 import org.eclipse.tracecompass.internal.lttng2.kernel.core.analysis.graph.building.LttngKernelExecGraphProvider.Context;
30 import org.eclipse.tracecompass.internal.lttng2.kernel.core.analysis.graph.building.LttngKernelExecGraphProvider.ProcessStatus;
31 import org.eclipse.tracecompass.internal.lttng2.kernel.core.analysis.graph.model.EventField;
32 import org.eclipse.tracecompass.internal.lttng2.kernel.core.analysis.graph.model.LttngInterruptContext;
33 import org.eclipse.tracecompass.internal.lttng2.kernel.core.analysis.graph.model.LttngSystemModel;
34 import org.eclipse.tracecompass.internal.lttng2.kernel.core.analysis.graph.model.LttngWorker;
35 import org.eclipse.tracecompass.internal.lttng2.kernel.core.trace.layout.LttngEventLayout;
36 import org.eclipse.tracecompass.tmf.core.event.ITmfEvent;
37 import org.eclipse.tracecompass.tmf.core.event.aspect.TmfCpuAspect;
38 import org.eclipse.tracecompass.tmf.core.event.matching.IMatchProcessingUnit;
39 import org.eclipse.tracecompass.tmf.core.event.matching.TmfEventDependency;
40 import org.eclipse.tracecompass.tmf.core.event.matching.TmfEventMatching;
41 import org.eclipse.tracecompass.tmf.core.trace.ITmfTrace;
42 import org.eclipse.tracecompass.tmf.core.trace.TmfTraceUtils;
43
44 import com.google.common.collect.HashBasedTable;
45 import com.google.common.collect.Table;
46
47 /**
48 * Event handler that actually builds the execution graph from the events
49 *
50 * @author Francis Giraldeau
51 * @author Geneviève Bastien
52 */
53 public class TraceEventHandlerExecutionGraph extends BaseHandler {
54
55 /*
56 * The following IRQ constants was found empirically.
57 *
58 * TODO: other IRQ values should be determined from the lttng_statedump_interrupt events.
59 */
60 private static final int IRQ_TIMER = 0;
61
62 private static final NullProgressMonitor DEFAULT_PROGRESS_MONITOR = new NullProgressMonitor();
63
64 private final Table<String, Integer, LttngWorker> fKernel;
65 private final IMatchProcessingUnit fMatchProcessing;
66 private Map<ITmfEvent, TmfVertex> fTcpNodes;
67 private TmfEventMatching fTcpMatching;
68
69 /**
70 * Constructor
71 *
72 * @param provider
73 * The parent graph provider
74 */
75 public TraceEventHandlerExecutionGraph(LttngKernelExecGraphProvider provider) {
76 super(provider);
77 fKernel = NonNullUtils.checkNotNull(HashBasedTable.create());
78
79 fTcpNodes = new HashMap<>();
80 fMatchProcessing = new IMatchProcessingUnit() {
81
82 @Override
83 public void matchingEnded() {
84 }
85
86 @Override
87 public int countMatches() {
88 return 0;
89 }
90
91 @Override
92 public void addMatch(@Nullable TmfEventDependency match) {
93 if (match == null) {
94 return;
95 }
96 TmfVertex output = fTcpNodes.remove(match.getSourceEvent());
97 TmfVertex input = fTcpNodes.remove(match.getDestinationEvent());
98 if (output != null && input != null) {
99 output.linkVertical(input).setType(EdgeType.NETWORK);
100 }
101 }
102
103 @Override
104 public void init(Collection<ITmfTrace> fTraces) {
105
106 }
107
108 };
109
110 ITmfTrace trace = provider.getTrace();
111 fTcpMatching = new TmfEventMatching(Collections.singleton(trace), fMatchProcessing);
112 fTcpMatching.initMatching();
113 }
114
115 private LttngWorker getOrCreateKernelWorker(ITmfEvent event, Integer cpu) {
116 String host = event.getTrace().getHostId();
117 LttngWorker worker = fKernel.get(host, cpu);
118 if (worker == null) {
119 HostThread ht = new HostThread(host, -1);
120 worker = new LttngWorker(ht, "kernel/" + cpu, event.getTimestamp().getValue()); //$NON-NLS-1$
121 worker.setStatus(ProcessStatus.RUN);
122
123 fKernel.put(host, cpu, worker);
124 }
125 return worker;
126 }
127
128 @Override
129 public void handleEvent(ITmfEvent ev) {
130 String eventName = ev.getName();
131 LttngEventLayout eventLayout = getProvider().getEventLayout();
132
133 if (eventName.equals(eventLayout.eventSchedSwitch())) {
134 handleSchedSwitch(ev);
135 } else if (eventName.equals(eventLayout.eventSchedProcessTTWU())) {
136 if (traceHasEventSchedTTWU(ev.getTrace())) {
137 handleSchedWakeup(ev);
138 }
139 } else if (eventName.equals(eventLayout.eventSchedProcessWakeup())) {
140 if (!traceHasEventSchedTTWU(ev.getTrace())) {
141 handleSchedWakeup(ev);
142 }
143 } else if (eventName.equals(eventLayout.eventSchedProcessWakeupNew())) {
144 if (!traceHasEventSchedTTWU(ev.getTrace())) {
145 handleSchedWakeup(ev);
146 }
147 } else if (eventName.equals(eventLayout.eventSoftIrqEntry())) {
148 handleSoftirqEntry(ev);
149 } else if (eventName.equals(TcpEventStrings.INET_SOCK_LOCAL_IN) ||
150 eventName.equals(TcpEventStrings.NETIF_RECEIVE_SKB)) {
151 handleInetSockLocalIn(ev);
152 } else if (eventName.equals(TcpEventStrings.INET_SOCK_LOCAL_OUT) ||
153 eventName.equals(TcpEventStrings.NET_DEV_QUEUE)) {
154 handleInetSockLocalOut(ev);
155 }
156 }
157
158 private TmfVertex stateExtend(LttngWorker task, long ts) {
159 TmfGraph graph = NonNullUtils.checkNotNull(getProvider().getAssignedGraph());
160 TmfVertex node = new TmfVertex(ts);
161 ProcessStatus status = task.getStatus();
162 graph.append(task, node, resolveProcessStatus(status));
163 return node;
164 }
165
166 private TmfVertex stateChange(LttngWorker task, long ts) {
167 TmfGraph graph = NonNullUtils.checkNotNull(getProvider().getAssignedGraph());
168 TmfVertex node = new TmfVertex(ts);
169 ProcessStatus status = task.getOldStatus();
170 graph.append(task, node, resolveProcessStatus(status));
171 return node;
172 }
173
174 private static EdgeType resolveProcessStatus(ProcessStatus status) {
175 EdgeType ret = EdgeType.UNKNOWN;
176 switch (status) {
177 case DEAD:
178 break;
179 case EXIT:
180 case RUN:
181 ret = EdgeType.RUNNING;
182 break;
183 case UNKNOWN:
184 ret = EdgeType.UNKNOWN;
185 break;
186 case WAIT_BLOCKED:
187 ret = EdgeType.BLOCKED;
188 break;
189 case WAIT_CPU:
190 case WAIT_FORK:
191 ret = EdgeType.PREEMPTED;
192 break;
193 case ZOMBIE:
194 ret = EdgeType.UNKNOWN;
195 break;
196 default:
197 break;
198 }
199 return ret;
200 }
201
202 private void handleSchedSwitch(ITmfEvent event) {
203 String host = event.getTrace().getHostId();
204 long ts = event.getTimestamp().getValue();
205 LttngEventLayout eventLayout = getProvider().getEventLayout();
206 LttngSystemModel system = getProvider().getSystem();
207
208 Integer next = EventField.getInt(event, eventLayout.fieldNextTid());
209 Integer prev = EventField.getInt(event, eventLayout.fieldPrevTid());
210
211 LttngWorker nextTask = system.findWorker(new HostThread(host, next));
212 LttngWorker prevTask = system.findWorker(new HostThread(host, prev));
213
214 if (prevTask == null || nextTask == null) {
215 return;
216 }
217 stateChange(prevTask, ts);
218 stateChange(nextTask, ts);
219 }
220
221 private void handleSchedWakeup(ITmfEvent event) {
222 TmfGraph graph = NonNullUtils.checkNotNull(getProvider().getAssignedGraph());
223 String host = event.getTrace().getHostId();
224 Object cpuObj = TmfTraceUtils.resolveEventAspectOfClassForEvent(event.getTrace(), TmfCpuAspect.class, event);
225 if (cpuObj == null) {
226 throw new NullPointerException();
227 }
228 Integer cpu = (Integer) cpuObj;
229 LttngEventLayout eventLayout = getProvider().getEventLayout();
230 LttngSystemModel system = getProvider().getSystem();
231
232 long ts = event.getTimestamp().getValue();
233 Integer tid = EventField.getInt(event, eventLayout.fieldTid());
234
235 LttngWorker target = system.findWorker(new HostThread(host, tid));
236 LttngWorker current = system.getWorkerOnCpu(host, cpu);
237 if (target == null) {
238 return;
239 }
240
241 ProcessStatus status = target.getOldStatus();
242 switch (status) {
243 case WAIT_FORK:
244 waitFork(graph, ts, target, current);
245 break;
246 case WAIT_BLOCKED:
247 waitBlocked(event, graph, host, cpu, eventLayout, system, ts, target, current);
248 break;
249 case DEAD:
250 case EXIT:
251 case RUN:
252 case UNKNOWN:
253 case WAIT_CPU:
254 case ZOMBIE:
255 break;
256 default:
257 break;
258 }
259 }
260
261 private void waitBlocked(ITmfEvent event, TmfGraph graph, String host, Integer cpu, LttngEventLayout eventLayout, LttngSystemModel system, long ts, LttngWorker target, @Nullable LttngWorker current) {
262 LttngInterruptContext context = system.peekContextStack(host, cpu);
263 switch (context.getContext()) {
264 case HRTIMER:
265 // shortcut of appendTaskNode: resolve blocking source in situ
266 graph.append(target, new TmfVertex(ts), EdgeType.TIMER);
267 break;
268 case IRQ:
269 irq(graph, eventLayout, ts, target, context);
270 break;
271 case SOFTIRQ:
272 softIrq(event, graph, cpu, eventLayout, ts, target, context);
273 break;
274 case NONE:
275 none(ts, target, current);
276 break;
277 default:
278 break;
279 }
280 }
281
282 private void softIrq(ITmfEvent event, TmfGraph graph, Integer cpu, LttngEventLayout eventLayout, long ts, LttngWorker target, LttngInterruptContext context) {
283 TmfVertex wup = new TmfVertex(ts);
284 TmfEdge l2 = graph.append(target, wup);
285 if (l2 != null) {
286 int vec = EventField.getLong(context.getEvent(), eventLayout.fieldVec()).intValue();
287 l2.setType(resolveSoftirq(vec));
288 }
289 // special case for network related softirq
290 Long vec = EventField.getLong(context.getEvent(), eventLayout.fieldVec());
291 if (vec == LinuxValues.SOFTIRQ_NET_RX || vec == LinuxValues.SOFTIRQ_NET_TX) {
292 // create edge if wake up is caused by incoming packet
293 LttngWorker k = getOrCreateKernelWorker(event, cpu);
294 TmfVertex tail = graph.getTail(k);
295 if (tail != null && tail.getEdge(EdgeDirection.INCOMING_VERTICAL_EDGE) != null) {
296 TmfVertex kwup = stateExtend(k, event.getTimestamp().getValue());
297 kwup.linkVertical(wup);
298 }
299 }
300 }
301
302 private void none(long ts, LttngWorker target, @Nullable LttngWorker current) {
303 // task context wakeup
304 if (current != null) {
305 TmfVertex n0 = stateExtend(current, ts);
306 TmfVertex n1 = stateChange(target, ts);
307 n0.linkVertical(n1);
308 } else {
309 stateChange(target, ts);
310 }
311 }
312
313 private static void irq(TmfGraph graph, LttngEventLayout eventLayout, long ts, LttngWorker target, LttngInterruptContext context) {
314 TmfEdge link = graph.append(target, new TmfVertex(ts));
315 if (link != null) {
316 int vec = EventField.getLong(context.getEvent(), eventLayout.fieldIrq()).intValue();
317 link.setType(resolveIRQ(vec));
318 }
319 }
320
321 private void waitFork(TmfGraph graph, long ts, LttngWorker target, @Nullable LttngWorker current) {
322 if (current != null) {
323 TmfVertex n0 = stateExtend(current, ts);
324 TmfVertex n1 = stateChange(target, ts);
325 graph.link(n0, n1);
326 } else {
327 stateChange(target, ts);
328 }
329 }
330
331 private static EdgeType resolveIRQ(int vec) {
332 EdgeType ret = EdgeType.UNKNOWN;
333 switch (vec) {
334 case IRQ_TIMER:
335 ret = EdgeType.INTERRUPTED;
336 break;
337 default:
338 ret = EdgeType.UNKNOWN;
339 break;
340 }
341 return ret;
342 }
343
344 private static EdgeType resolveSoftirq(int vec) {
345 EdgeType ret = EdgeType.UNKNOWN;
346 switch (vec) {
347 case LinuxValues.SOFTIRQ_HRTIMER:
348 case LinuxValues.SOFTIRQ_TIMER:
349 ret = EdgeType.TIMER;
350 break;
351 case LinuxValues.SOFTIRQ_BLOCK:
352 case LinuxValues.SOFTIRQ_BLOCK_IOPOLL:
353 ret = EdgeType.BLOCK_DEVICE;
354 break;
355 case LinuxValues.SOFTIRQ_NET_RX:
356 case LinuxValues.SOFTIRQ_NET_TX:
357 ret = EdgeType.NETWORK;
358 break;
359 case LinuxValues.SOFTIRQ_SCHED:
360 ret = EdgeType.INTERRUPTED;
361 break;
362 default:
363 ret = EdgeType.UNKNOWN;
364 break;
365 }
366 return ret;
367 }
368
369 private void handleInetSockLocalIn(ITmfEvent event) {
370 Object cpuObj = TmfTraceUtils.resolveEventAspectOfClassForEvent(event.getTrace(), TmfCpuAspect.class, event);
371 if (cpuObj == null) {
372 throw new NullPointerException();
373 }
374 Integer cpu = (Integer) cpuObj;
375 String host = event.getTrace().getHostId();
376 LttngSystemModel system = getProvider().getSystem();
377
378 LttngInterruptContext intCtx = system.peekContextStack(host, cpu);
379 Context context = intCtx.getContext();
380 if (context == Context.SOFTIRQ) {
381 LttngWorker k = getOrCreateKernelWorker(event, cpu);
382 TmfVertex endpoint = stateExtend(k, event.getTimestamp().getValue());
383 fTcpNodes.put(event, endpoint);
384 // TODO add actual progress monitor
385 fTcpMatching.matchEvent(event, event.getTrace(), DEFAULT_PROGRESS_MONITOR);
386 }
387 }
388
389 private void handleInetSockLocalOut(ITmfEvent event) {
390 Object cpuObj = NonNullUtils.checkNotNull(TmfTraceUtils.resolveEventAspectOfClassForEvent(event.getTrace(), TmfCpuAspect.class, event));
391 Integer cpu = (Integer) cpuObj;
392 String host = event.getTrace().getHostId();
393 LttngSystemModel system = getProvider().getSystem();
394
395 LttngInterruptContext intCtx = system.peekContextStack(host, cpu);
396 Context context = intCtx.getContext();
397
398 LttngWorker sender = null;
399 if (context == Context.NONE) {
400 sender = system.getWorkerOnCpu(event.getTrace().getHostId(), cpu);
401 } else if (context == Context.SOFTIRQ) {
402 sender = getOrCreateKernelWorker(event, cpu);
403 }
404 if (sender == null) {
405 return;
406 }
407 TmfVertex endpoint = stateExtend(sender, event.getTimestamp().getValue());
408 fTcpNodes.put(event, endpoint);
409 // TODO, add actual progress monitor
410 fTcpMatching.matchEvent(event, event.getTrace(), new NullProgressMonitor());
411 }
412
413 private void handleSoftirqEntry(ITmfEvent event) {
414 LttngEventLayout eventLayout = getProvider().getEventLayout();
415 TmfGraph graph = NonNullUtils.checkNotNull(getProvider().getAssignedGraph());
416 Long vec = EventField.getLong(event, eventLayout.fieldVec());
417 if (vec == LinuxValues.SOFTIRQ_NET_RX || vec == LinuxValues.SOFTIRQ_NET_TX) {
418 Object cpuObj = NonNullUtils.checkNotNull(TmfTraceUtils.resolveEventAspectOfClassForEvent(event.getTrace(), TmfCpuAspect.class, event));
419 Integer cpu = (Integer) cpuObj;
420 LttngWorker k = getOrCreateKernelWorker(event, cpu);
421 graph.add(k, new TmfVertex(event.getTimestamp().getValue()));
422 }
423 }
424
425 }
This page took 0.04263 seconds and 5 git commands to generate.