1 /*******************************************************************************
2 * Copyright (c) 2015 École Polytechnique de Montréal
4 * All rights reserved. This program and the accompanying materials are
5 * made available under the terms of the Eclipse Public License v1.0 which
6 * accompanies this distribution, and is available at
7 * http://www.eclipse.org/legal/epl-v10.html
8 *******************************************************************************/
10 package org
.eclipse
.tracecompass
.internal
.lttng2
.kernel
.core
.analysis
.graph
.handlers
;
12 import java
.util
.Collection
;
13 import java
.util
.Collections
;
14 import java
.util
.HashMap
;
17 import org
.eclipse
.core
.runtime
.NullProgressMonitor
;
18 import org
.eclipse
.jdt
.annotation
.Nullable
;
19 import org
.eclipse
.tracecompass
.analysis
.graph
.core
.base
.TmfEdge
;
20 import org
.eclipse
.tracecompass
.analysis
.graph
.core
.base
.TmfEdge
.EdgeType
;
21 import org
.eclipse
.tracecompass
.analysis
.graph
.core
.base
.TmfGraph
;
22 import org
.eclipse
.tracecompass
.analysis
.graph
.core
.base
.TmfVertex
;
23 import org
.eclipse
.tracecompass
.analysis
.graph
.core
.base
.TmfVertex
.EdgeDirection
;
24 import org
.eclipse
.tracecompass
.analysis
.os
.linux
.core
.kernelanalysis
.LinuxValues
;
25 import org
.eclipse
.tracecompass
.analysis
.os
.linux
.core
.model
.HostThread
;
26 import org
.eclipse
.tracecompass
.common
.core
.NonNullUtils
;
27 import org
.eclipse
.tracecompass
.internal
.lttng2
.kernel
.core
.TcpEventStrings
;
28 import org
.eclipse
.tracecompass
.internal
.lttng2
.kernel
.core
.analysis
.graph
.building
.LttngKernelExecGraphProvider
;
29 import org
.eclipse
.tracecompass
.internal
.lttng2
.kernel
.core
.analysis
.graph
.building
.LttngKernelExecGraphProvider
.Context
;
30 import org
.eclipse
.tracecompass
.internal
.lttng2
.kernel
.core
.analysis
.graph
.building
.LttngKernelExecGraphProvider
.ProcessStatus
;
31 import org
.eclipse
.tracecompass
.internal
.lttng2
.kernel
.core
.analysis
.graph
.model
.EventField
;
32 import org
.eclipse
.tracecompass
.internal
.lttng2
.kernel
.core
.analysis
.graph
.model
.LttngInterruptContext
;
33 import org
.eclipse
.tracecompass
.internal
.lttng2
.kernel
.core
.analysis
.graph
.model
.LttngSystemModel
;
34 import org
.eclipse
.tracecompass
.internal
.lttng2
.kernel
.core
.analysis
.graph
.model
.LttngWorker
;
35 import org
.eclipse
.tracecompass
.internal
.lttng2
.kernel
.core
.trace
.layout
.LttngEventLayout
;
36 import org
.eclipse
.tracecompass
.tmf
.core
.event
.ITmfEvent
;
37 import org
.eclipse
.tracecompass
.tmf
.core
.event
.aspect
.TmfCpuAspect
;
38 import org
.eclipse
.tracecompass
.tmf
.core
.event
.matching
.IMatchProcessingUnit
;
39 import org
.eclipse
.tracecompass
.tmf
.core
.event
.matching
.TmfEventDependency
;
40 import org
.eclipse
.tracecompass
.tmf
.core
.event
.matching
.TmfEventMatching
;
41 import org
.eclipse
.tracecompass
.tmf
.core
.trace
.ITmfTrace
;
42 import org
.eclipse
.tracecompass
.tmf
.core
.trace
.TmfTraceUtils
;
44 import com
.google
.common
.collect
.HashBasedTable
;
45 import com
.google
.common
.collect
.Table
;
48 * Event handler that actually builds the execution graph from the events
50 * @author Francis Giraldeau
51 * @author Geneviève Bastien
53 public class TraceEventHandlerExecutionGraph
extends BaseHandler
{
56 * The following IRQ constants was found empirically.
58 * TODO: other IRQ values should be determined from the lttng_statedump_interrupt events.
60 private static final int IRQ_TIMER
= 0;
62 private static final NullProgressMonitor DEFAULT_PROGRESS_MONITOR
= new NullProgressMonitor();
64 private final Table
<String
, Integer
, LttngWorker
> fKernel
;
65 private final IMatchProcessingUnit fMatchProcessing
;
66 private Map
<ITmfEvent
, TmfVertex
> fTcpNodes
;
67 private TmfEventMatching fTcpMatching
;
73 * The parent graph provider
75 public TraceEventHandlerExecutionGraph(LttngKernelExecGraphProvider provider
) {
77 fKernel
= NonNullUtils
.checkNotNull(HashBasedTable
.create());
79 fTcpNodes
= new HashMap
<>();
80 fMatchProcessing
= new IMatchProcessingUnit() {
83 public void matchingEnded() {
87 public int countMatches() {
92 public void addMatch(@Nullable TmfEventDependency match
) {
96 TmfVertex output
= fTcpNodes
.remove(match
.getSourceEvent());
97 TmfVertex input
= fTcpNodes
.remove(match
.getDestinationEvent());
98 if (output
!= null && input
!= null) {
99 output
.linkVertical(input
).setType(EdgeType
.NETWORK
);
104 public void init(Collection
<ITmfTrace
> fTraces
) {
110 ITmfTrace trace
= provider
.getTrace();
111 fTcpMatching
= new TmfEventMatching(Collections
.singleton(trace
), fMatchProcessing
);
112 fTcpMatching
.initMatching();
115 private LttngWorker
getOrCreateKernelWorker(ITmfEvent event
, Integer cpu
) {
116 String host
= event
.getTrace().getHostId();
117 LttngWorker worker
= fKernel
.get(host
, cpu
);
118 if (worker
== null) {
119 HostThread ht
= new HostThread(host
, -1);
120 worker
= new LttngWorker(ht
, "kernel/" + cpu
, event
.getTimestamp().getValue()); //$NON-NLS-1$
121 worker
.setStatus(ProcessStatus
.RUN
);
123 fKernel
.put(host
, cpu
, worker
);
129 public void handleEvent(ITmfEvent ev
) {
130 String eventName
= ev
.getName();
131 LttngEventLayout eventLayout
= getProvider().getEventLayout();
133 if (eventName
.equals(eventLayout
.eventSchedSwitch())) {
134 handleSchedSwitch(ev
);
135 } else if (eventName
.equals(eventLayout
.eventSchedProcessTTWU())) {
136 if (traceHasEventSchedTTWU(ev
.getTrace())) {
137 handleSchedWakeup(ev
);
139 } else if (eventName
.equals(eventLayout
.eventSchedProcessWakeup())) {
140 if (!traceHasEventSchedTTWU(ev
.getTrace())) {
141 handleSchedWakeup(ev
);
143 } else if (eventName
.equals(eventLayout
.eventSchedProcessWakeupNew())) {
144 if (!traceHasEventSchedTTWU(ev
.getTrace())) {
145 handleSchedWakeup(ev
);
147 } else if (eventName
.equals(eventLayout
.eventSoftIrqEntry())) {
148 handleSoftirqEntry(ev
);
149 } else if (eventName
.equals(TcpEventStrings
.INET_SOCK_LOCAL_IN
) ||
150 eventName
.equals(TcpEventStrings
.NETIF_RECEIVE_SKB
)) {
151 handleInetSockLocalIn(ev
);
152 } else if (eventName
.equals(TcpEventStrings
.INET_SOCK_LOCAL_OUT
) ||
153 eventName
.equals(TcpEventStrings
.NET_DEV_QUEUE
)) {
154 handleInetSockLocalOut(ev
);
158 private TmfVertex
stateExtend(LttngWorker task
, long ts
) {
159 TmfGraph graph
= NonNullUtils
.checkNotNull(getProvider().getAssignedGraph());
160 TmfVertex node
= new TmfVertex(ts
);
161 ProcessStatus status
= task
.getStatus();
162 graph
.append(task
, node
, resolveProcessStatus(status
));
166 private TmfVertex
stateChange(LttngWorker task
, long ts
) {
167 TmfGraph graph
= NonNullUtils
.checkNotNull(getProvider().getAssignedGraph());
168 TmfVertex node
= new TmfVertex(ts
);
169 ProcessStatus status
= task
.getOldStatus();
170 graph
.append(task
, node
, resolveProcessStatus(status
));
174 private static EdgeType
resolveProcessStatus(ProcessStatus status
) {
175 EdgeType ret
= EdgeType
.UNKNOWN
;
181 ret
= EdgeType
.RUNNING
;
184 ret
= EdgeType
.UNKNOWN
;
187 ret
= EdgeType
.BLOCKED
;
191 ret
= EdgeType
.PREEMPTED
;
194 ret
= EdgeType
.UNKNOWN
;
202 private void handleSchedSwitch(ITmfEvent event
) {
203 String host
= event
.getTrace().getHostId();
204 long ts
= event
.getTimestamp().getValue();
205 LttngEventLayout eventLayout
= getProvider().getEventLayout();
206 LttngSystemModel system
= getProvider().getSystem();
208 Integer next
= EventField
.getInt(event
, eventLayout
.fieldNextTid());
209 Integer prev
= EventField
.getInt(event
, eventLayout
.fieldPrevTid());
211 LttngWorker nextTask
= system
.findWorker(new HostThread(host
, next
));
212 LttngWorker prevTask
= system
.findWorker(new HostThread(host
, prev
));
214 if (prevTask
== null || nextTask
== null) {
217 stateChange(prevTask
, ts
);
218 stateChange(nextTask
, ts
);
221 private void handleSchedWakeup(ITmfEvent event
) {
222 TmfGraph graph
= NonNullUtils
.checkNotNull(getProvider().getAssignedGraph());
223 String host
= event
.getTrace().getHostId();
224 Object cpuObj
= TmfTraceUtils
.resolveEventAspectOfClassForEvent(event
.getTrace(), TmfCpuAspect
.class, event
);
225 if (cpuObj
== null) {
226 throw new NullPointerException();
228 Integer cpu
= (Integer
) cpuObj
;
229 LttngEventLayout eventLayout
= getProvider().getEventLayout();
230 LttngSystemModel system
= getProvider().getSystem();
232 long ts
= event
.getTimestamp().getValue();
233 Integer tid
= EventField
.getInt(event
, eventLayout
.fieldTid());
235 LttngWorker target
= system
.findWorker(new HostThread(host
, tid
));
236 LttngWorker current
= system
.getWorkerOnCpu(host
, cpu
);
237 if (target
== null) {
241 ProcessStatus status
= target
.getOldStatus();
244 waitFork(graph
, ts
, target
, current
);
247 waitBlocked(event
, graph
, host
, cpu
, eventLayout
, system
, ts
, target
, current
);
261 private void waitBlocked(ITmfEvent event
, TmfGraph graph
, String host
, Integer cpu
, LttngEventLayout eventLayout
, LttngSystemModel system
, long ts
, LttngWorker target
, @Nullable LttngWorker current
) {
262 LttngInterruptContext context
= system
.peekContextStack(host
, cpu
);
263 switch (context
.getContext()) {
265 // shortcut of appendTaskNode: resolve blocking source in situ
266 graph
.append(target
, new TmfVertex(ts
), EdgeType
.TIMER
);
269 irq(graph
, eventLayout
, ts
, target
, context
);
272 softIrq(event
, graph
, cpu
, eventLayout
, ts
, target
, context
);
275 none(ts
, target
, current
);
282 private void softIrq(ITmfEvent event
, TmfGraph graph
, Integer cpu
, LttngEventLayout eventLayout
, long ts
, LttngWorker target
, LttngInterruptContext context
) {
283 TmfVertex wup
= new TmfVertex(ts
);
284 TmfEdge l2
= graph
.append(target
, wup
);
286 int vec
= EventField
.getLong(context
.getEvent(), eventLayout
.fieldVec()).intValue();
287 l2
.setType(resolveSoftirq(vec
));
289 // special case for network related softirq
290 Long vec
= EventField
.getLong(context
.getEvent(), eventLayout
.fieldVec());
291 if (vec
== LinuxValues
.SOFTIRQ_NET_RX
|| vec
== LinuxValues
.SOFTIRQ_NET_TX
) {
292 // create edge if wake up is caused by incoming packet
293 LttngWorker k
= getOrCreateKernelWorker(event
, cpu
);
294 TmfVertex tail
= graph
.getTail(k
);
295 if (tail
!= null && tail
.getEdge(EdgeDirection
.INCOMING_VERTICAL_EDGE
) != null) {
296 TmfVertex kwup
= stateExtend(k
, event
.getTimestamp().getValue());
297 kwup
.linkVertical(wup
);
302 private void none(long ts
, LttngWorker target
, @Nullable LttngWorker current
) {
303 // task context wakeup
304 if (current
!= null) {
305 TmfVertex n0
= stateExtend(current
, ts
);
306 TmfVertex n1
= stateChange(target
, ts
);
309 stateChange(target
, ts
);
313 private static void irq(TmfGraph graph
, LttngEventLayout eventLayout
, long ts
, LttngWorker target
, LttngInterruptContext context
) {
314 TmfEdge link
= graph
.append(target
, new TmfVertex(ts
));
316 int vec
= EventField
.getLong(context
.getEvent(), eventLayout
.fieldIrq()).intValue();
317 link
.setType(resolveIRQ(vec
));
321 private void waitFork(TmfGraph graph
, long ts
, LttngWorker target
, @Nullable LttngWorker current
) {
322 if (current
!= null) {
323 TmfVertex n0
= stateExtend(current
, ts
);
324 TmfVertex n1
= stateChange(target
, ts
);
327 stateChange(target
, ts
);
331 private static EdgeType
resolveIRQ(int vec
) {
332 EdgeType ret
= EdgeType
.UNKNOWN
;
335 ret
= EdgeType
.INTERRUPTED
;
338 ret
= EdgeType
.UNKNOWN
;
344 private static EdgeType
resolveSoftirq(int vec
) {
345 EdgeType ret
= EdgeType
.UNKNOWN
;
347 case LinuxValues
.SOFTIRQ_HRTIMER
:
348 case LinuxValues
.SOFTIRQ_TIMER
:
349 ret
= EdgeType
.TIMER
;
351 case LinuxValues
.SOFTIRQ_BLOCK
:
352 case LinuxValues
.SOFTIRQ_BLOCK_IOPOLL
:
353 ret
= EdgeType
.BLOCK_DEVICE
;
355 case LinuxValues
.SOFTIRQ_NET_RX
:
356 case LinuxValues
.SOFTIRQ_NET_TX
:
357 ret
= EdgeType
.NETWORK
;
359 case LinuxValues
.SOFTIRQ_SCHED
:
360 ret
= EdgeType
.INTERRUPTED
;
363 ret
= EdgeType
.UNKNOWN
;
369 private void handleInetSockLocalIn(ITmfEvent event
) {
370 Object cpuObj
= TmfTraceUtils
.resolveEventAspectOfClassForEvent(event
.getTrace(), TmfCpuAspect
.class, event
);
371 if (cpuObj
== null) {
372 throw new NullPointerException();
374 Integer cpu
= (Integer
) cpuObj
;
375 String host
= event
.getTrace().getHostId();
376 LttngSystemModel system
= getProvider().getSystem();
378 LttngInterruptContext intCtx
= system
.peekContextStack(host
, cpu
);
379 Context context
= intCtx
.getContext();
380 if (context
== Context
.SOFTIRQ
) {
381 LttngWorker k
= getOrCreateKernelWorker(event
, cpu
);
382 TmfVertex endpoint
= stateExtend(k
, event
.getTimestamp().getValue());
383 fTcpNodes
.put(event
, endpoint
);
384 // TODO add actual progress monitor
385 fTcpMatching
.matchEvent(event
, event
.getTrace(), DEFAULT_PROGRESS_MONITOR
);
389 private void handleInetSockLocalOut(ITmfEvent event
) {
390 Object cpuObj
= NonNullUtils
.checkNotNull(TmfTraceUtils
.resolveEventAspectOfClassForEvent(event
.getTrace(), TmfCpuAspect
.class, event
));
391 Integer cpu
= (Integer
) cpuObj
;
392 String host
= event
.getTrace().getHostId();
393 LttngSystemModel system
= getProvider().getSystem();
395 LttngInterruptContext intCtx
= system
.peekContextStack(host
, cpu
);
396 Context context
= intCtx
.getContext();
398 LttngWorker sender
= null;
399 if (context
== Context
.NONE
) {
400 sender
= system
.getWorkerOnCpu(event
.getTrace().getHostId(), cpu
);
401 } else if (context
== Context
.SOFTIRQ
) {
402 sender
= getOrCreateKernelWorker(event
, cpu
);
404 if (sender
== null) {
407 TmfVertex endpoint
= stateExtend(sender
, event
.getTimestamp().getValue());
408 fTcpNodes
.put(event
, endpoint
);
409 // TODO, add actual progress monitor
410 fTcpMatching
.matchEvent(event
, event
.getTrace(), new NullProgressMonitor());
413 private void handleSoftirqEntry(ITmfEvent event
) {
414 LttngEventLayout eventLayout
= getProvider().getEventLayout();
415 TmfGraph graph
= NonNullUtils
.checkNotNull(getProvider().getAssignedGraph());
416 Long vec
= EventField
.getLong(event
, eventLayout
.fieldVec());
417 if (vec
== LinuxValues
.SOFTIRQ_NET_RX
|| vec
== LinuxValues
.SOFTIRQ_NET_TX
) {
418 Object cpuObj
= NonNullUtils
.checkNotNull(TmfTraceUtils
.resolveEventAspectOfClassForEvent(event
.getTrace(), TmfCpuAspect
.class, event
));
419 Integer cpu
= (Integer
) cpuObj
;
420 LttngWorker k
= getOrCreateKernelWorker(event
, cpu
);
421 graph
.add(k
, new TmfVertex(event
.getTimestamp().getValue()));