1 /*******************************************************************************
2 * Copyright (c) 2015 École Polytechnique de Montréal
4 * All rights reserved. This program and the accompanying materials are
5 * made available under the terms of the Eclipse Public License v1.0 which
6 * accompanies this distribution, and is available at
7 * http://www.eclipse.org/legal/epl-v10.html
8 *******************************************************************************/
10 package org
.eclipse
.tracecompass
.internal
.lttng2
.kernel
.core
.analysis
.graph
.handlers
;
12 import java
.util
.Collection
;
13 import java
.util
.Collections
;
14 import java
.util
.HashMap
;
17 import org
.eclipse
.core
.runtime
.NullProgressMonitor
;
18 import org
.eclipse
.jdt
.annotation
.Nullable
;
19 import org
.eclipse
.tracecompass
.analysis
.graph
.core
.base
.TmfEdge
;
20 import org
.eclipse
.tracecompass
.analysis
.graph
.core
.base
.TmfEdge
.EdgeType
;
21 import org
.eclipse
.tracecompass
.analysis
.graph
.core
.base
.TmfGraph
;
22 import org
.eclipse
.tracecompass
.analysis
.graph
.core
.base
.TmfVertex
;
23 import org
.eclipse
.tracecompass
.analysis
.graph
.core
.base
.TmfVertex
.EdgeDirection
;
24 import org
.eclipse
.tracecompass
.analysis
.os
.linux
.core
.kernelanalysis
.LinuxValues
;
25 import org
.eclipse
.tracecompass
.analysis
.os
.linux
.core
.model
.HostThread
;
26 import org
.eclipse
.tracecompass
.analysis
.os
.linux
.core
.trace
.IKernelAnalysisEventLayout
;
27 import org
.eclipse
.tracecompass
.common
.core
.NonNullUtils
;
28 import org
.eclipse
.tracecompass
.internal
.lttng2
.kernel
.core
.TcpEventStrings
;
29 import org
.eclipse
.tracecompass
.internal
.lttng2
.kernel
.core
.analysis
.graph
.building
.LttngKernelExecGraphProvider
;
30 import org
.eclipse
.tracecompass
.internal
.lttng2
.kernel
.core
.analysis
.graph
.building
.LttngKernelExecGraphProvider
.Context
;
31 import org
.eclipse
.tracecompass
.internal
.lttng2
.kernel
.core
.analysis
.graph
.building
.LttngKernelExecGraphProvider
.ProcessStatus
;
32 import org
.eclipse
.tracecompass
.internal
.lttng2
.kernel
.core
.analysis
.graph
.model
.EventField
;
33 import org
.eclipse
.tracecompass
.internal
.lttng2
.kernel
.core
.analysis
.graph
.model
.LttngInterruptContext
;
34 import org
.eclipse
.tracecompass
.internal
.lttng2
.kernel
.core
.analysis
.graph
.model
.LttngSystemModel
;
35 import org
.eclipse
.tracecompass
.internal
.lttng2
.kernel
.core
.analysis
.graph
.model
.LttngWorker
;
36 import org
.eclipse
.tracecompass
.tmf
.core
.event
.ITmfEvent
;
37 import org
.eclipse
.tracecompass
.tmf
.core
.event
.aspect
.TmfCpuAspect
;
38 import org
.eclipse
.tracecompass
.tmf
.core
.event
.matching
.IMatchProcessingUnit
;
39 import org
.eclipse
.tracecompass
.tmf
.core
.event
.matching
.TmfEventDependency
;
40 import org
.eclipse
.tracecompass
.tmf
.core
.event
.matching
.TmfEventMatching
;
41 import org
.eclipse
.tracecompass
.tmf
.core
.trace
.ITmfTrace
;
42 import org
.eclipse
.tracecompass
.tmf
.core
.trace
.TmfTraceUtils
;
44 import com
.google
.common
.collect
.HashBasedTable
;
45 import com
.google
.common
.collect
.Table
;
48 * Event handler that actually builds the execution graph from the events
50 * @author Francis Giraldeau
51 * @author Geneviève Bastien
53 public class TraceEventHandlerExecutionGraph
extends BaseHandler
{
56 * The following IRQ constants was found empirically.
58 * TODO: other IRQ values should be determined from the lttng_statedump_interrupt events.
60 private static final int IRQ_TIMER
= 0;
62 private static final NullProgressMonitor DEFAULT_PROGRESS_MONITOR
= new NullProgressMonitor();
64 private final Table
<String
, Integer
, LttngWorker
> fKernel
;
65 private final IMatchProcessingUnit fMatchProcessing
;
66 private Map
<ITmfEvent
, TmfVertex
> fTcpNodes
;
67 private TmfEventMatching fTcpMatching
;
73 * The parent graph provider
75 public TraceEventHandlerExecutionGraph(LttngKernelExecGraphProvider provider
) {
77 fKernel
= NonNullUtils
.checkNotNull(HashBasedTable
.create());
79 fTcpNodes
= new HashMap
<>();
80 fMatchProcessing
= new IMatchProcessingUnit() {
83 public void matchingEnded() {
87 public int countMatches() {
92 public void addMatch(@Nullable TmfEventDependency match
) {
96 TmfVertex output
= fTcpNodes
.remove(match
.getSourceEvent());
97 TmfVertex input
= fTcpNodes
.remove(match
.getDestinationEvent());
98 if (output
!= null && input
!= null) {
99 output
.linkVertical(input
).setType(EdgeType
.NETWORK
);
104 public void init(Collection
<ITmfTrace
> fTraces
) {
110 ITmfTrace trace
= provider
.getTrace();
111 fTcpMatching
= new TmfEventMatching(Collections
.singleton(trace
), fMatchProcessing
);
112 fTcpMatching
.initMatching();
115 private LttngWorker
getOrCreateKernelWorker(ITmfEvent event
, Integer cpu
) {
116 String host
= event
.getTrace().getHostId();
117 LttngWorker worker
= fKernel
.get(host
, cpu
);
118 if (worker
== null) {
119 HostThread ht
= new HostThread(host
, -1);
120 worker
= new LttngWorker(ht
, "kernel/" + cpu
, event
.getTimestamp().getValue()); //$NON-NLS-1$
121 worker
.setStatus(ProcessStatus
.RUN
);
123 fKernel
.put(host
, cpu
, worker
);
129 public void handleEvent(ITmfEvent ev
) {
130 String eventName
= ev
.getName();
131 IKernelAnalysisEventLayout eventLayout
= getProvider().getEventLayout(ev
.getTrace());
133 if (eventName
.equals(eventLayout
.eventSchedSwitch())) {
134 handleSchedSwitch(ev
);
135 } else if (eventName
.equals(eventLayout
.eventSoftIrqEntry())) {
136 handleSoftirqEntry(ev
);
137 } else if (eventName
.equals(TcpEventStrings
.INET_SOCK_LOCAL_IN
) ||
138 eventName
.equals(TcpEventStrings
.NETIF_RECEIVE_SKB
)) {
139 handleInetSockLocalIn(ev
);
140 } else if (eventName
.equals(TcpEventStrings
.INET_SOCK_LOCAL_OUT
) ||
141 eventName
.equals(TcpEventStrings
.NET_DEV_QUEUE
)) {
142 handleInetSockLocalOut(ev
);
143 } else if (isWakeupEvent(ev
)) {
144 handleSchedWakeup(ev
);
148 private TmfVertex
stateExtend(LttngWorker task
, long ts
) {
149 TmfGraph graph
= NonNullUtils
.checkNotNull(getProvider().getAssignedGraph());
150 TmfVertex node
= new TmfVertex(ts
);
151 ProcessStatus status
= task
.getStatus();
152 graph
.append(task
, node
, resolveProcessStatus(status
));
156 private TmfVertex
stateChange(LttngWorker task
, long ts
) {
157 TmfGraph graph
= NonNullUtils
.checkNotNull(getProvider().getAssignedGraph());
158 TmfVertex node
= new TmfVertex(ts
);
159 ProcessStatus status
= task
.getOldStatus();
160 graph
.append(task
, node
, resolveProcessStatus(status
));
164 private static EdgeType
resolveProcessStatus(ProcessStatus status
) {
165 EdgeType ret
= EdgeType
.UNKNOWN
;
171 ret
= EdgeType
.RUNNING
;
174 ret
= EdgeType
.UNKNOWN
;
177 ret
= EdgeType
.BLOCKED
;
181 ret
= EdgeType
.PREEMPTED
;
184 ret
= EdgeType
.UNKNOWN
;
192 private void handleSchedSwitch(ITmfEvent event
) {
193 String host
= event
.getTrace().getHostId();
194 long ts
= event
.getTimestamp().getValue();
195 IKernelAnalysisEventLayout eventLayout
= getProvider().getEventLayout(event
.getTrace());
196 LttngSystemModel system
= getProvider().getSystem();
198 Integer next
= EventField
.getInt(event
, eventLayout
.fieldNextTid());
199 Integer prev
= EventField
.getInt(event
, eventLayout
.fieldPrevTid());
201 LttngWorker nextTask
= system
.findWorker(new HostThread(host
, next
));
202 LttngWorker prevTask
= system
.findWorker(new HostThread(host
, prev
));
204 if (prevTask
== null || nextTask
== null) {
207 stateChange(prevTask
, ts
);
208 stateChange(nextTask
, ts
);
211 private void handleSchedWakeup(ITmfEvent event
) {
212 TmfGraph graph
= NonNullUtils
.checkNotNull(getProvider().getAssignedGraph());
213 String host
= event
.getTrace().getHostId();
214 Integer cpu
= NonNullUtils
.checkNotNull(TmfTraceUtils
.resolveIntEventAspectOfClassForEvent(event
.getTrace(), TmfCpuAspect
.class, event
));
215 IKernelAnalysisEventLayout eventLayout
= getProvider().getEventLayout(event
.getTrace());
216 LttngSystemModel system
= getProvider().getSystem();
218 long ts
= event
.getTimestamp().getValue();
219 Integer tid
= EventField
.getInt(event
, eventLayout
.fieldTid());
221 LttngWorker target
= system
.findWorker(new HostThread(host
, tid
));
222 LttngWorker current
= system
.getWorkerOnCpu(host
, cpu
);
223 if (target
== null) {
227 ProcessStatus status
= target
.getOldStatus();
230 waitFork(graph
, ts
, target
, current
);
233 waitBlocked(event
, graph
, host
, cpu
, eventLayout
, system
, ts
, target
, current
);
247 private void waitBlocked(ITmfEvent event
, TmfGraph graph
, String host
, Integer cpu
, IKernelAnalysisEventLayout eventLayout
, LttngSystemModel system
, long ts
, LttngWorker target
, @Nullable LttngWorker current
) {
248 LttngInterruptContext context
= system
.peekContextStack(host
, cpu
);
249 switch (context
.getContext()) {
251 // shortcut of appendTaskNode: resolve blocking source in situ
252 graph
.append(target
, new TmfVertex(ts
), EdgeType
.TIMER
);
255 irq(graph
, eventLayout
, ts
, target
, context
);
258 softIrq(event
, graph
, cpu
, eventLayout
, ts
, target
, context
);
261 graph
.append(target
, new TmfVertex(ts
), EdgeType
.IPI
);
264 none(ts
, target
, current
);
271 private void softIrq(ITmfEvent event
, TmfGraph graph
, Integer cpu
, IKernelAnalysisEventLayout eventLayout
, long ts
, LttngWorker target
, LttngInterruptContext context
) {
272 TmfVertex wup
= new TmfVertex(ts
);
273 TmfEdge l2
= graph
.append(target
, wup
);
275 int vec
= EventField
.getLong(context
.getEvent(), eventLayout
.fieldVec()).intValue();
276 l2
.setType(resolveSoftirq(vec
));
278 // special case for network related softirq
279 Long vec
= EventField
.getLong(context
.getEvent(), eventLayout
.fieldVec());
280 if (vec
== LinuxValues
.SOFTIRQ_NET_RX
|| vec
== LinuxValues
.SOFTIRQ_NET_TX
) {
281 // create edge if wake up is caused by incoming packet
282 LttngWorker k
= getOrCreateKernelWorker(event
, cpu
);
283 TmfVertex tail
= graph
.getTail(k
);
284 if (tail
!= null && tail
.getEdge(EdgeDirection
.INCOMING_VERTICAL_EDGE
) != null) {
285 TmfVertex kwup
= stateExtend(k
, event
.getTimestamp().getValue());
286 kwup
.linkVertical(wup
);
291 private void none(long ts
, LttngWorker target
, @Nullable LttngWorker current
) {
292 // task context wakeup
293 if (current
!= null) {
294 TmfVertex n0
= stateExtend(current
, ts
);
295 TmfVertex n1
= stateChange(target
, ts
);
298 stateChange(target
, ts
);
302 private static void irq(TmfGraph graph
, IKernelAnalysisEventLayout eventLayout
, long ts
, LttngWorker target
, LttngInterruptContext context
) {
303 TmfEdge link
= graph
.append(target
, new TmfVertex(ts
));
305 int vec
= EventField
.getLong(context
.getEvent(), eventLayout
.fieldIrq()).intValue();
306 link
.setType(resolveIRQ(vec
));
310 private void waitFork(TmfGraph graph
, long ts
, LttngWorker target
, @Nullable LttngWorker current
) {
311 if (current
!= null) {
312 TmfVertex n0
= stateExtend(current
, ts
);
313 TmfVertex n1
= stateChange(target
, ts
);
316 stateChange(target
, ts
);
320 private static EdgeType
resolveIRQ(int vec
) {
321 EdgeType ret
= EdgeType
.UNKNOWN
;
324 ret
= EdgeType
.INTERRUPTED
;
327 ret
= EdgeType
.UNKNOWN
;
333 private static EdgeType
resolveSoftirq(int vec
) {
334 EdgeType ret
= EdgeType
.UNKNOWN
;
336 case LinuxValues
.SOFTIRQ_HRTIMER
:
337 case LinuxValues
.SOFTIRQ_TIMER
:
338 ret
= EdgeType
.TIMER
;
340 case LinuxValues
.SOFTIRQ_BLOCK
:
341 case LinuxValues
.SOFTIRQ_BLOCK_IOPOLL
:
342 ret
= EdgeType
.BLOCK_DEVICE
;
344 case LinuxValues
.SOFTIRQ_NET_RX
:
345 case LinuxValues
.SOFTIRQ_NET_TX
:
346 ret
= EdgeType
.NETWORK
;
348 case LinuxValues
.SOFTIRQ_SCHED
:
349 ret
= EdgeType
.INTERRUPTED
;
352 ret
= EdgeType
.UNKNOWN
;
358 private void handleInetSockLocalIn(ITmfEvent event
) {
359 Integer cpu
= NonNullUtils
.checkNotNull(TmfTraceUtils
.resolveIntEventAspectOfClassForEvent(event
.getTrace(), TmfCpuAspect
.class, event
));
360 String host
= event
.getTrace().getHostId();
361 LttngSystemModel system
= getProvider().getSystem();
363 LttngInterruptContext intCtx
= system
.peekContextStack(host
, cpu
);
364 Context context
= intCtx
.getContext();
365 if (context
== Context
.SOFTIRQ
) {
366 LttngWorker k
= getOrCreateKernelWorker(event
, cpu
);
367 TmfVertex endpoint
= stateExtend(k
, event
.getTimestamp().getValue());
368 fTcpNodes
.put(event
, endpoint
);
369 // TODO add actual progress monitor
370 fTcpMatching
.matchEvent(event
, event
.getTrace(), DEFAULT_PROGRESS_MONITOR
);
374 private void handleInetSockLocalOut(ITmfEvent event
) {
375 Integer cpu
= NonNullUtils
.checkNotNull(TmfTraceUtils
.resolveIntEventAspectOfClassForEvent(event
.getTrace(), TmfCpuAspect
.class, event
));
376 String host
= event
.getTrace().getHostId();
377 LttngSystemModel system
= getProvider().getSystem();
379 LttngInterruptContext intCtx
= system
.peekContextStack(host
, cpu
);
380 Context context
= intCtx
.getContext();
382 LttngWorker sender
= null;
383 if (context
== Context
.NONE
) {
384 sender
= system
.getWorkerOnCpu(event
.getTrace().getHostId(), cpu
);
385 } else if (context
== Context
.SOFTIRQ
) {
386 sender
= getOrCreateKernelWorker(event
, cpu
);
388 if (sender
== null) {
391 TmfVertex endpoint
= stateExtend(sender
, event
.getTimestamp().getValue());
392 fTcpNodes
.put(event
, endpoint
);
393 // TODO, add actual progress monitor
394 fTcpMatching
.matchEvent(event
, event
.getTrace(), new NullProgressMonitor());
397 private void handleSoftirqEntry(ITmfEvent event
) {
398 IKernelAnalysisEventLayout eventLayout
= getProvider().getEventLayout(event
.getTrace());
399 TmfGraph graph
= NonNullUtils
.checkNotNull(getProvider().getAssignedGraph());
400 Long vec
= EventField
.getLong(event
, eventLayout
.fieldVec());
401 if (vec
== LinuxValues
.SOFTIRQ_NET_RX
|| vec
== LinuxValues
.SOFTIRQ_NET_TX
) {
402 Integer cpu
= NonNullUtils
.checkNotNull(TmfTraceUtils
.resolveIntEventAspectOfClassForEvent(event
.getTrace(), TmfCpuAspect
.class, event
));
403 LttngWorker k
= getOrCreateKernelWorker(event
, cpu
);
404 graph
.add(k
, new TmfVertex(event
.getTimestamp().getValue()));