1 /*******************************************************************************
2 * Copyright (c) 2015 École Polytechnique de Montréal
4 * All rights reserved. This program and the accompanying materials are
5 * made available under the terms of the Eclipse Public License v1.0 which
6 * accompanies this distribution, and is available at
7 * http://www.eclipse.org/legal/epl-v10.html
8 *******************************************************************************/
10 package org
.eclipse
.tracecompass
.internal
.lttng2
.kernel
.core
.analysis
.graph
.handlers
;
12 import java
.util
.Collection
;
13 import java
.util
.Collections
;
14 import java
.util
.HashMap
;
17 import org
.eclipse
.core
.runtime
.NullProgressMonitor
;
18 import org
.eclipse
.jdt
.annotation
.Nullable
;
19 import org
.eclipse
.tracecompass
.analysis
.graph
.core
.base
.TmfEdge
;
20 import org
.eclipse
.tracecompass
.analysis
.graph
.core
.base
.TmfEdge
.EdgeType
;
21 import org
.eclipse
.tracecompass
.analysis
.graph
.core
.base
.TmfGraph
;
22 import org
.eclipse
.tracecompass
.analysis
.graph
.core
.base
.TmfVertex
;
23 import org
.eclipse
.tracecompass
.analysis
.graph
.core
.base
.TmfVertex
.EdgeDirection
;
24 import org
.eclipse
.tracecompass
.analysis
.os
.linux
.core
.kernelanalysis
.LinuxValues
;
25 import org
.eclipse
.tracecompass
.analysis
.os
.linux
.core
.model
.HostThread
;
26 import org
.eclipse
.tracecompass
.common
.core
.NonNullUtils
;
27 import org
.eclipse
.tracecompass
.internal
.lttng2
.kernel
.core
.TcpEventStrings
;
28 import org
.eclipse
.tracecompass
.internal
.lttng2
.kernel
.core
.analysis
.graph
.building
.LttngKernelExecGraphProvider
;
29 import org
.eclipse
.tracecompass
.internal
.lttng2
.kernel
.core
.analysis
.graph
.building
.LttngKernelExecGraphProvider
.Context
;
30 import org
.eclipse
.tracecompass
.internal
.lttng2
.kernel
.core
.analysis
.graph
.building
.LttngKernelExecGraphProvider
.ProcessStatus
;
31 import org
.eclipse
.tracecompass
.internal
.lttng2
.kernel
.core
.analysis
.graph
.model
.EventField
;
32 import org
.eclipse
.tracecompass
.internal
.lttng2
.kernel
.core
.analysis
.graph
.model
.LttngInterruptContext
;
33 import org
.eclipse
.tracecompass
.internal
.lttng2
.kernel
.core
.analysis
.graph
.model
.LttngSystemModel
;
34 import org
.eclipse
.tracecompass
.internal
.lttng2
.kernel
.core
.analysis
.graph
.model
.LttngWorker
;
35 import org
.eclipse
.tracecompass
.internal
.lttng2
.kernel
.core
.trace
.layout
.LttngEventLayout
;
36 import org
.eclipse
.tracecompass
.tmf
.core
.event
.ITmfEvent
;
37 import org
.eclipse
.tracecompass
.tmf
.core
.event
.aspect
.TmfCpuAspect
;
38 import org
.eclipse
.tracecompass
.tmf
.core
.event
.matching
.IMatchProcessingUnit
;
39 import org
.eclipse
.tracecompass
.tmf
.core
.event
.matching
.TmfEventDependency
;
40 import org
.eclipse
.tracecompass
.tmf
.core
.event
.matching
.TmfEventMatching
;
41 import org
.eclipse
.tracecompass
.tmf
.core
.trace
.ITmfTrace
;
42 import org
.eclipse
.tracecompass
.tmf
.core
.trace
.TmfTraceUtils
;
44 import com
.google
.common
.collect
.HashBasedTable
;
45 import com
.google
.common
.collect
.Table
;
48 * Event handler that actually builds the execution graph from the events
50 * @author Francis Giraldeau
51 * @author Geneviève Bastien
53 public class TraceEventHandlerExecutionGraph
extends BaseHandler
{
56 * The following IRQ constants was found empirically.
58 * TODO: other IRQ values should be determined from the lttng_statedump_interrupt events.
60 private static final int IRQ_TIMER
= 0;
62 private static final NullProgressMonitor DEFAULT_PROGRESS_MONITOR
= new NullProgressMonitor();
64 private final Table
<String
, Integer
, LttngWorker
> fKernel
;
65 private final IMatchProcessingUnit fMatchProcessing
;
66 private Map
<ITmfEvent
, TmfVertex
> fTcpNodes
;
67 private TmfEventMatching fTcpMatching
;
73 * The parent graph provider
75 public TraceEventHandlerExecutionGraph(LttngKernelExecGraphProvider provider
) {
77 fKernel
= NonNullUtils
.checkNotNull(HashBasedTable
.create());
79 fTcpNodes
= new HashMap
<>();
80 fMatchProcessing
= new IMatchProcessingUnit() {
83 public void matchingEnded() {
87 public int countMatches() {
92 public void addMatch(@Nullable TmfEventDependency match
) {
96 TmfVertex output
= fTcpNodes
.remove(match
.getSourceEvent());
97 TmfVertex input
= fTcpNodes
.remove(match
.getDestinationEvent());
98 if (output
!= null && input
!= null) {
99 output
.linkVertical(input
).setType(EdgeType
.NETWORK
);
104 public void init(Collection
<ITmfTrace
> fTraces
) {
110 ITmfTrace trace
= provider
.getTrace();
111 fTcpMatching
= new TmfEventMatching(Collections
.singleton(trace
), fMatchProcessing
);
112 fTcpMatching
.initMatching();
115 private LttngWorker
getOrCreateKernelWorker(ITmfEvent event
, Integer cpu
) {
116 String host
= event
.getTrace().getHostId();
117 LttngWorker worker
= fKernel
.get(host
, cpu
);
118 if (worker
== null) {
119 HostThread ht
= new HostThread(host
, -1);
120 worker
= new LttngWorker(ht
, "kernel/" + cpu
, event
.getTimestamp().getValue()); //$NON-NLS-1$
121 worker
.setStatus(ProcessStatus
.RUN
);
123 fKernel
.put(host
, cpu
, worker
);
129 public void handleEvent(ITmfEvent ev
) {
130 String eventName
= ev
.getName();
131 LttngEventLayout eventLayout
= getProvider().getEventLayout();
133 if (eventName
.equals(eventLayout
.eventSchedSwitch())) {
134 handleSchedSwitch(ev
);
135 } else if (eventName
.equals(eventLayout
.eventSchedProcessTTWU())) {
136 if (traceHasEventSchedTTWU(ev
.getTrace())) {
137 handleSchedWakeup(ev
);
139 } else if (eventName
.equals(eventLayout
.eventSchedProcessWakeup())) {
140 if (!traceHasEventSchedTTWU(ev
.getTrace())) {
141 handleSchedWakeup(ev
);
143 } else if (eventName
.equals(eventLayout
.eventSchedProcessWakeupNew())) {
144 if (!traceHasEventSchedTTWU(ev
.getTrace())) {
145 handleSchedWakeup(ev
);
147 } else if (eventName
.equals(eventLayout
.eventSoftIrqEntry())) {
148 handleSoftirqEntry(ev
);
149 } else if (eventName
.equals(TcpEventStrings
.INET_SOCK_LOCAL_IN
) ||
150 eventName
.equals(TcpEventStrings
.NETIF_RECEIVE_SKB
)) {
151 handleInetSockLocalIn(ev
);
152 } else if (eventName
.equals(TcpEventStrings
.INET_SOCK_LOCAL_OUT
) ||
153 eventName
.equals(TcpEventStrings
.NET_DEV_QUEUE
)) {
154 handleInetSockLocalOut(ev
);
158 private TmfVertex
stateExtend(LttngWorker task
, long ts
) {
159 TmfGraph graph
= NonNullUtils
.checkNotNull(getProvider().getAssignedGraph());
160 TmfVertex node
= new TmfVertex(ts
);
161 ProcessStatus status
= task
.getStatus();
162 graph
.append(task
, node
, resolveProcessStatus(status
));
166 private TmfVertex
stateChange(LttngWorker task
, long ts
) {
167 TmfGraph graph
= NonNullUtils
.checkNotNull(getProvider().getAssignedGraph());
168 TmfVertex node
= new TmfVertex(ts
);
169 ProcessStatus status
= task
.getOldStatus();
170 graph
.append(task
, node
, resolveProcessStatus(status
));
174 private static EdgeType
resolveProcessStatus(ProcessStatus status
) {
175 EdgeType ret
= EdgeType
.UNKNOWN
;
181 ret
= EdgeType
.RUNNING
;
184 ret
= EdgeType
.UNKNOWN
;
187 ret
= EdgeType
.BLOCKED
;
191 ret
= EdgeType
.PREEMPTED
;
194 ret
= EdgeType
.UNKNOWN
;
202 private void handleSchedSwitch(ITmfEvent event
) {
203 String host
= event
.getTrace().getHostId();
204 long ts
= event
.getTimestamp().getValue();
205 LttngEventLayout eventLayout
= getProvider().getEventLayout();
206 LttngSystemModel system
= getProvider().getSystem();
208 Integer next
= EventField
.getInt(event
, eventLayout
.fieldNextTid());
209 Integer prev
= EventField
.getInt(event
, eventLayout
.fieldPrevTid());
211 LttngWorker nextTask
= system
.findWorker(new HostThread(host
, next
));
212 LttngWorker prevTask
= system
.findWorker(new HostThread(host
, prev
));
214 if (prevTask
== null || nextTask
== null) {
217 stateChange(prevTask
, ts
);
218 stateChange(nextTask
, ts
);
221 private void handleSchedWakeup(ITmfEvent event
) {
222 TmfGraph graph
= NonNullUtils
.checkNotNull(getProvider().getAssignedGraph());
223 String host
= event
.getTrace().getHostId();
224 Integer cpu
= NonNullUtils
.checkNotNull(TmfTraceUtils
.resolveIntEventAspectOfClassForEvent(event
.getTrace(), TmfCpuAspect
.class, event
));
225 LttngEventLayout eventLayout
= getProvider().getEventLayout();
226 LttngSystemModel system
= getProvider().getSystem();
228 long ts
= event
.getTimestamp().getValue();
229 Integer tid
= EventField
.getInt(event
, eventLayout
.fieldTid());
231 LttngWorker target
= system
.findWorker(new HostThread(host
, tid
));
232 LttngWorker current
= system
.getWorkerOnCpu(host
, cpu
);
233 if (target
== null) {
237 ProcessStatus status
= target
.getOldStatus();
240 waitFork(graph
, ts
, target
, current
);
243 waitBlocked(event
, graph
, host
, cpu
, eventLayout
, system
, ts
, target
, current
);
257 private void waitBlocked(ITmfEvent event
, TmfGraph graph
, String host
, Integer cpu
, LttngEventLayout eventLayout
, LttngSystemModel system
, long ts
, LttngWorker target
, @Nullable LttngWorker current
) {
258 LttngInterruptContext context
= system
.peekContextStack(host
, cpu
);
259 switch (context
.getContext()) {
261 // shortcut of appendTaskNode: resolve blocking source in situ
262 graph
.append(target
, new TmfVertex(ts
), EdgeType
.TIMER
);
265 irq(graph
, eventLayout
, ts
, target
, context
);
268 softIrq(event
, graph
, cpu
, eventLayout
, ts
, target
, context
);
271 none(ts
, target
, current
);
278 private void softIrq(ITmfEvent event
, TmfGraph graph
, Integer cpu
, LttngEventLayout eventLayout
, long ts
, LttngWorker target
, LttngInterruptContext context
) {
279 TmfVertex wup
= new TmfVertex(ts
);
280 TmfEdge l2
= graph
.append(target
, wup
);
282 int vec
= EventField
.getLong(context
.getEvent(), eventLayout
.fieldVec()).intValue();
283 l2
.setType(resolveSoftirq(vec
));
285 // special case for network related softirq
286 Long vec
= EventField
.getLong(context
.getEvent(), eventLayout
.fieldVec());
287 if (vec
== LinuxValues
.SOFTIRQ_NET_RX
|| vec
== LinuxValues
.SOFTIRQ_NET_TX
) {
288 // create edge if wake up is caused by incoming packet
289 LttngWorker k
= getOrCreateKernelWorker(event
, cpu
);
290 TmfVertex tail
= graph
.getTail(k
);
291 if (tail
!= null && tail
.getEdge(EdgeDirection
.INCOMING_VERTICAL_EDGE
) != null) {
292 TmfVertex kwup
= stateExtend(k
, event
.getTimestamp().getValue());
293 kwup
.linkVertical(wup
);
298 private void none(long ts
, LttngWorker target
, @Nullable LttngWorker current
) {
299 // task context wakeup
300 if (current
!= null) {
301 TmfVertex n0
= stateExtend(current
, ts
);
302 TmfVertex n1
= stateChange(target
, ts
);
305 stateChange(target
, ts
);
309 private static void irq(TmfGraph graph
, LttngEventLayout eventLayout
, long ts
, LttngWorker target
, LttngInterruptContext context
) {
310 TmfEdge link
= graph
.append(target
, new TmfVertex(ts
));
312 int vec
= EventField
.getLong(context
.getEvent(), eventLayout
.fieldIrq()).intValue();
313 link
.setType(resolveIRQ(vec
));
317 private void waitFork(TmfGraph graph
, long ts
, LttngWorker target
, @Nullable LttngWorker current
) {
318 if (current
!= null) {
319 TmfVertex n0
= stateExtend(current
, ts
);
320 TmfVertex n1
= stateChange(target
, ts
);
323 stateChange(target
, ts
);
327 private static EdgeType
resolveIRQ(int vec
) {
328 EdgeType ret
= EdgeType
.UNKNOWN
;
331 ret
= EdgeType
.INTERRUPTED
;
334 ret
= EdgeType
.UNKNOWN
;
340 private static EdgeType
resolveSoftirq(int vec
) {
341 EdgeType ret
= EdgeType
.UNKNOWN
;
343 case LinuxValues
.SOFTIRQ_HRTIMER
:
344 case LinuxValues
.SOFTIRQ_TIMER
:
345 ret
= EdgeType
.TIMER
;
347 case LinuxValues
.SOFTIRQ_BLOCK
:
348 case LinuxValues
.SOFTIRQ_BLOCK_IOPOLL
:
349 ret
= EdgeType
.BLOCK_DEVICE
;
351 case LinuxValues
.SOFTIRQ_NET_RX
:
352 case LinuxValues
.SOFTIRQ_NET_TX
:
353 ret
= EdgeType
.NETWORK
;
355 case LinuxValues
.SOFTIRQ_SCHED
:
356 ret
= EdgeType
.INTERRUPTED
;
359 ret
= EdgeType
.UNKNOWN
;
365 private void handleInetSockLocalIn(ITmfEvent event
) {
366 Integer cpu
= NonNullUtils
.checkNotNull(TmfTraceUtils
.resolveIntEventAspectOfClassForEvent(event
.getTrace(), TmfCpuAspect
.class, event
));
367 String host
= event
.getTrace().getHostId();
368 LttngSystemModel system
= getProvider().getSystem();
370 LttngInterruptContext intCtx
= system
.peekContextStack(host
, cpu
);
371 Context context
= intCtx
.getContext();
372 if (context
== Context
.SOFTIRQ
) {
373 LttngWorker k
= getOrCreateKernelWorker(event
, cpu
);
374 TmfVertex endpoint
= stateExtend(k
, event
.getTimestamp().getValue());
375 fTcpNodes
.put(event
, endpoint
);
376 // TODO add actual progress monitor
377 fTcpMatching
.matchEvent(event
, event
.getTrace(), DEFAULT_PROGRESS_MONITOR
);
381 private void handleInetSockLocalOut(ITmfEvent event
) {
382 Integer cpu
= NonNullUtils
.checkNotNull(TmfTraceUtils
.resolveIntEventAspectOfClassForEvent(event
.getTrace(), TmfCpuAspect
.class, event
));
383 String host
= event
.getTrace().getHostId();
384 LttngSystemModel system
= getProvider().getSystem();
386 LttngInterruptContext intCtx
= system
.peekContextStack(host
, cpu
);
387 Context context
= intCtx
.getContext();
389 LttngWorker sender
= null;
390 if (context
== Context
.NONE
) {
391 sender
= system
.getWorkerOnCpu(event
.getTrace().getHostId(), cpu
);
392 } else if (context
== Context
.SOFTIRQ
) {
393 sender
= getOrCreateKernelWorker(event
, cpu
);
395 if (sender
== null) {
398 TmfVertex endpoint
= stateExtend(sender
, event
.getTimestamp().getValue());
399 fTcpNodes
.put(event
, endpoint
);
400 // TODO, add actual progress monitor
401 fTcpMatching
.matchEvent(event
, event
.getTrace(), new NullProgressMonitor());
404 private void handleSoftirqEntry(ITmfEvent event
) {
405 LttngEventLayout eventLayout
= getProvider().getEventLayout();
406 TmfGraph graph
= NonNullUtils
.checkNotNull(getProvider().getAssignedGraph());
407 Long vec
= EventField
.getLong(event
, eventLayout
.fieldVec());
408 if (vec
== LinuxValues
.SOFTIRQ_NET_RX
|| vec
== LinuxValues
.SOFTIRQ_NET_TX
) {
409 Integer cpu
= NonNullUtils
.checkNotNull(TmfTraceUtils
.resolveIntEventAspectOfClassForEvent(event
.getTrace(), TmfCpuAspect
.class, event
));
410 LttngWorker k
= getOrCreateKernelWorker(event
, cpu
);
411 graph
.add(k
, new TmfVertex(event
.getTimestamp().getValue()));