1 /*******************************************************************************
2 * Copyright (c) 2011, 2015 Ericsson
4 * All rights reserved. This program and the accompanying materials are
5 * made available under the terms of the Eclipse Public License v1.0 which
6 * accompanies this distribution, and is available at
7 * http://www.eclipse.org/legal/epl-v10.html
10 * Francois Chouinard - Initial API and implementation
11 * Bernd Hufmann - Implementation of new interfaces/listeners and support for
12 * time stamp in any order
13 * Francois Chouinard - Moved from LTTng to TMF
14 * Francois Chouinard - Added support for empty initial buckets
15 * Patrick Tasse - Support selection range
16 * Jean-Christian Kouamé, Simon Delisle - Added support to manage lost events
17 * Xavier Raynaud - Support multi-trace coloring
18 *******************************************************************************/
20 package org
.eclipse
.tracecompass
.tmf
.ui
.views
.histogram
;
22 import java
.util
.Arrays
;
23 import java
.util
.Collection
;
24 import java
.util
.LinkedHashMap
;
27 import org
.eclipse
.core
.runtime
.ListenerList
;
28 import org
.eclipse
.tracecompass
.tmf
.core
.timestamp
.TmfTimeRange
;
29 import org
.eclipse
.tracecompass
.tmf
.core
.trace
.ITmfTrace
;
30 import org
.eclipse
.tracecompass
.tmf
.core
.trace
.TmfTraceManager
;
32 import com
.google
.common
.base
.Function
;
33 import com
.google
.common
.collect
.FluentIterable
;
36 * Histogram-independent data model.
38 * It has the following characteristics:
40 * <li>The <i>basetime</i> is the timestamp of the first event
41 * <li>There is a fixed number (<i>n</i>) of buckets of uniform duration (
43 * <li>The <i>timespan</i> of the model is thus: <i>n</i> * <i>d</i> time units
44 * <li>Bucket <i>i</i> holds the number of events that occurred in time range: [
45 * <i>basetime</i> + <i>i</i> * <i>d</i>, <i>basetime</i> + (<i>i</i> + 1) *
48 * Initially, the bucket durations is set to 1ns. As the events are read, they
49 * are tallied (using <i>countEvent()</i>) in the appropriate bucket (relative
50 * to the <i>basetime</i>).
52 * Eventually, an event will have a timestamp that exceeds the <i>timespan</i>
53 * high end (determined by <i>n</i>, the number of buckets, and <i>d</i>, the
54 * bucket duration). At this point, the histogram needs to be compacted. This is
55 * done by simply merging adjacent buckets by pair, in effect doubling the
56 * <i>timespan</i> (<i>timespan'</i> = <i>n</i> * <i>d'</i>, where <i>d'</i> = 2
57 * <i>d</i>). This compaction happens as needed as the trace is read.
59 * The model allows for timestamps in not increasing order. The timestamps can
60 * be fed to the model in any order. If an event has a timestamp less than the
61 * <i>basetime</i>, the buckets will be moved to the right to account for the
62 * new smaller timestamp. The new <i>basetime</i> is a multiple of the bucket
63 * duration smaller then the previous <i>basetime</i>. Note that the
64 * <i>basetime</i> might no longer be the timestamp of an event. If necessary,
65 * the buckets will be compacted before moving to the right. This might be
66 * necessary to not lose any event counts at the end of the buckets array.
68 * The mapping from the model to the UI is performed by the <i>scaleTo()</i>
69 * method. By keeping the number of buckets <i>n</i> relatively large with
70 * respect to to the number of pixels in the actual histogram, we should achieve
71 * a nice result when visualizing the histogram.
75 * @author Francois Chouinard
77 public class HistogramDataModel
implements IHistogramDataModel
{
79 // ------------------------------------------------------------------------
81 // ------------------------------------------------------------------------
84 * The default number of buckets
86 public static final int DEFAULT_NUMBER_OF_BUCKETS
= 16 * 1000;
89 * Number of events after which listeners will be notified.
91 public static final int REFRESH_FREQUENCY
= DEFAULT_NUMBER_OF_BUCKETS
;
93 // ------------------------------------------------------------------------
95 // ------------------------------------------------------------------------
98 private ITmfTrace fTrace
= null;
99 private final Map
<ITmfTrace
, Integer
> fTraceMap
= new LinkedHashMap
<>();
102 private final int fNbBuckets
;
103 private final HistogramBucket
[] fBuckets
;
104 private final long[] fLostEventsBuckets
;
105 private long fBucketDuration
;
106 private long fNbEvents
;
107 private int fLastBucket
;
110 private long fFirstBucketTime
; // could be negative when analyzing events
111 // with descending order!!!
112 private long fFirstEventTime
;
113 private long fEndTime
;
114 private long fSelectionBegin
;
115 private long fSelectionEnd
;
116 private long fTimeLimit
;
118 // Private listener lists
119 private final ListenerList fModelListeners
;
121 // ------------------------------------------------------------------------
123 // ------------------------------------------------------------------------
126 * Default constructor with default number of buckets.
128 public HistogramDataModel() {
129 this(0, DEFAULT_NUMBER_OF_BUCKETS
);
133 * Default constructor with default number of buckets.
136 * The histogram start time
138 public HistogramDataModel(long startTime
) {
139 this(startTime
, DEFAULT_NUMBER_OF_BUCKETS
);
143 * Constructor with non-default number of buckets.
146 * A number of buckets.
148 public HistogramDataModel(int nbBuckets
) {
153 * Constructor with non-default number of buckets.
156 * the histogram start time
158 * A number of buckets.
160 public HistogramDataModel(long startTime
, int nbBuckets
) {
161 fFirstBucketTime
= fFirstEventTime
= fEndTime
= startTime
;
162 fNbBuckets
= nbBuckets
;
163 fBuckets
= new HistogramBucket
[nbBuckets
];
164 fLostEventsBuckets
= new long[nbBuckets
];
165 fModelListeners
= new ListenerList();
175 public HistogramDataModel(HistogramDataModel other
) {
176 fNbBuckets
= other
.fNbBuckets
;
177 fBuckets
= new HistogramBucket
[fNbBuckets
];
178 for (int i
= 0; i
< fNbBuckets
; i
++) {
179 fBuckets
[i
] = new HistogramBucket(other
.fBuckets
[i
]);
181 fLostEventsBuckets
= Arrays
.copyOf(other
.fLostEventsBuckets
, fNbBuckets
);
182 fBucketDuration
= Math
.max(other
.fBucketDuration
, 1);
183 fNbEvents
= other
.fNbEvents
;
184 fLastBucket
= other
.fLastBucket
;
185 fFirstBucketTime
= other
.fFirstBucketTime
;
186 fFirstEventTime
= other
.fFirstEventTime
;
187 fEndTime
= other
.fEndTime
;
188 fSelectionBegin
= other
.fSelectionBegin
;
189 fSelectionEnd
= other
.fSelectionEnd
;
190 fTimeLimit
= other
.fTimeLimit
;
191 fModelListeners
= new ListenerList();
192 Object
[] listeners
= other
.fModelListeners
.getListeners();
193 for (Object listener
: listeners
) {
194 fModelListeners
.add(listener
);
199 * Disposes the data model
201 public void dispose() {
206 // ------------------------------------------------------------------------
208 // ------------------------------------------------------------------------
211 * Returns the number of events in the data model.
213 * @return number of events.
215 public long getNbEvents() {
220 * Returns the number of buckets in the model.
222 * @return number of buckets.
224 public int getNbBuckets() {
229 * Returns the current bucket duration.
231 * @return bucket duration
233 public long getBucketDuration() {
234 return fBucketDuration
;
238 * Returns the time value of the first bucket in the model.
240 * @return time of first bucket.
242 public long getFirstBucketTime() {
243 return fFirstBucketTime
;
247 * Returns the time of the first event in the model.
249 * @return time of first event.
251 public long getStartTime() {
252 return fFirstEventTime
;
256 * Sets the trace of this model.
259 * - a {@link ITmfTrace}
261 public void setTrace(ITmfTrace trace
) {
265 for (ITmfTrace tr
: TmfTraceManager
.getTraceSet(fTrace
)) {
266 fTraceMap
.put(tr
, i
);
272 * Gets the trace of this model.
274 * @return a {@link ITmfTrace}
276 public ITmfTrace
getTrace() {
281 * Gets the traces names of this model.
283 * @return an array of trace names
285 public String
[] getTraceNames() {
286 FluentIterable
<ITmfTrace
> traces
= FluentIterable
.from(TmfTraceManager
.getTraceSet(fTrace
));
287 FluentIterable
<String
> traceNames
= traces
.transform(new Function
<ITmfTrace
, String
>() {
289 public String
apply(ITmfTrace input
) {
290 return input
.getName();
293 return traceNames
.toArray(String
.class);
297 * Gets the number of traces of this model.
299 * @return the number of traces of this model.
301 public int getNbTraces() {
302 Collection
<ITmfTrace
> traces
= TmfTraceManager
.getTraceSet(fTrace
);
303 if (traces
.isEmpty()) {
306 return traces
.size();
310 * Sets the model start time
313 * the histogram range start time
315 * the histogram range end time
317 public void setTimeRange(long startTime
, long endTime
) {
318 fFirstBucketTime
= fFirstEventTime
= fEndTime
= startTime
;
321 while (endTime
>= fTimeLimit
) {
327 * Set the end time. Setting this ensures that the corresponding bucket is
328 * displayed regardless of the event counts.
331 * the time of the last used bucket
333 public void setEndTime(long endTime
) {
335 fLastBucket
= (int) ((endTime
- fFirstBucketTime
) / fBucketDuration
);
339 * Returns the end time.
341 * @return the time of the last used bucket
343 public long getEndTime() {
348 * Returns the begin time of the current selection in the model.
350 * @return the begin time of the current selection.
352 public long getSelectionBegin() {
353 return fSelectionBegin
;
357 * Returns the end time of the current selection in the model.
359 * @return the end time of the current selection.
361 public long getSelectionEnd() {
362 return fSelectionEnd
;
366 * Returns the time limit with is: start time + nbBuckets * bucketDuration
368 * @return the time limit.
370 public long getTimeLimit() {
374 // ------------------------------------------------------------------------
376 // ------------------------------------------------------------------------
379 * Add a listener to the model to be informed about model changes.
384 public void addHistogramListener(IHistogramModelListener listener
) {
385 fModelListeners
.add(listener
);
389 * Remove a given model listener.
392 * A listener to remove.
394 public void removeHistogramListener(IHistogramModelListener listener
) {
395 fModelListeners
.remove(listener
);
398 // Notify listeners (always)
399 private void fireModelUpdateNotification() {
400 fireModelUpdateNotification(0);
403 // Notify listener on boundary
404 private void fireModelUpdateNotification(long count
) {
405 if ((count
% REFRESH_FREQUENCY
) == 0) {
406 Object
[] listeners
= fModelListeners
.getListeners();
407 for (Object listener2
: listeners
) {
408 IHistogramModelListener listener
= (IHistogramModelListener
) listener2
;
409 listener
.modelUpdated();
414 // ------------------------------------------------------------------------
416 // ------------------------------------------------------------------------
419 public void complete() {
420 fireModelUpdateNotification();
424 * Clear the histogram model.
426 * @see org.eclipse.tracecompass.tmf.ui.views.distribution.model.IBaseDistributionModel#clear()
429 public synchronized void clear() {
430 Arrays
.fill(fBuckets
, null);
431 Arrays
.fill(fLostEventsBuckets
, 0);
433 fFirstBucketTime
= 0;
440 fireModelUpdateNotification();
444 * Sets the current selection time range (no notification of listeners)
447 * The selection begin time.
449 * The selection end time.
451 public void setSelection(long beginTime
, long endTime
) {
452 fSelectionBegin
= beginTime
;
453 fSelectionEnd
= endTime
;
457 * Sets the current selection time range with notification of listeners
460 * The selection begin time.
462 * The selection end time.
464 public void setSelectionNotifyListeners(long beginTime
, long endTime
) {
465 fSelectionBegin
= beginTime
;
466 fSelectionEnd
= endTime
;
467 fireModelUpdateNotification();
471 * Add event to the correct bucket, compacting the if needed.
474 * The current event Count (for notification purposes)
476 * The timestamp of the event to count
481 public synchronized void countEvent(long eventCount
, long timestamp
, ITmfTrace trace
) {
488 // Set the start/end time if not already done
489 if ((fFirstBucketTime
== 0) && (fLastBucket
== -1) && (fBuckets
[0] == null) && (timestamp
> 0)) {
490 fFirstBucketTime
= timestamp
;
491 fFirstEventTime
= timestamp
;
495 if (timestamp
< fFirstEventTime
) {
496 fFirstEventTime
= timestamp
;
499 if (fEndTime
< timestamp
) {
500 fEndTime
= timestamp
;
503 if (timestamp
>= fFirstBucketTime
) {
506 while (timestamp
>= fTimeLimit
) {
512 // get offset for adjustment
513 long preMergeOffset
= getOffset(timestamp
);
516 while ((fLastBucket
+ preMergeOffset
) >= fNbBuckets
) {
518 preMergeOffset
= getOffset(timestamp
);
521 // after merging the offset should be less than number of buckets
522 int offset
= (int) preMergeOffset
;
525 fLastBucket
= fLastBucket
+ offset
;
527 fFirstBucketTime
= fFirstBucketTime
- (offset
* fBucketDuration
);
531 // Increment the right bucket
532 int index
= (int) ((timestamp
- fFirstBucketTime
) / fBucketDuration
);
533 if (fBuckets
[index
] == null) {
534 fBuckets
[index
] = new HistogramBucket(getNbTraces());
536 Integer traceIndex
= fTraceMap
.get(trace
);
537 if (traceIndex
== null) {
540 fBuckets
[index
].addEvent(traceIndex
);
542 if (fLastBucket
< index
) {
546 fireModelUpdateNotification(eventCount
);
550 * Add lost event to the correct bucket, compacting the if needed.
553 * time range of a lost event
554 * @param nbLostEvents
555 * the number of lost events
557 * Full range or time range for histogram request
559 public void countLostEvent(TmfTimeRange timeRange
, long nbLostEvents
, boolean fullRange
) {
561 long startTime
= timeRange
.getStartTime().getValue();
562 long endTime
= timeRange
.getEndTime().getValue();
565 if (startTime
< 0 || endTime
< 0) {
569 // Set the start/end time if not already done
570 if ((fFirstBucketTime
== 0) && (fLastBucket
== -1) && (fBuckets
[0] == null)) {
571 fFirstBucketTime
= startTime
;
572 fFirstEventTime
= startTime
;
578 while (endTime
>= fTimeLimit
) {
583 int indexStart
= (int) ((startTime
- fFirstBucketTime
) / fBucketDuration
);
584 int indexEnd
= (int) ((endTime
- fFirstBucketTime
) / fBucketDuration
);
585 int nbBucketRange
= (indexEnd
- indexStart
) + 1;
587 int lostEventPerBucket
= (int) Math
.ceil((double) nbLostEvents
/ nbBucketRange
);
588 long lastLostCol
= Math
.max(1, nbLostEvents
- lostEventPerBucket
* (nbBucketRange
- 1));
590 // Increment the right bucket, bear in mind that ranges make it almost
591 // certain that some lost events are out of range
592 for (int index
= indexStart
; index
<= indexEnd
&& index
< fLostEventsBuckets
.length
; index
++) {
593 if (index
== (indexStart
+ nbBucketRange
- 1)) {
594 fLostEventsBuckets
[index
] += lastLostCol
;
596 fLostEventsBuckets
[index
] += lostEventPerBucket
;
602 fireModelUpdateNotification(nbLostEvents
);
606 * Scale the model data to the width, height and bar width requested.
609 * A width of the histogram canvas
611 * A height of the histogram canvas
613 * A width (in pixel) of a histogram bar
614 * @return the result array of size [width] and where the highest value
615 * doesn't exceed [height]
617 * @see org.eclipse.tracecompass.tmf.ui.views.histogram.IHistogramDataModel#scaleTo(int,
621 public HistogramScaledData
scaleTo(int width
, int height
, int barWidth
) {
623 if ((width
<= 0) || (height
<= 0) || (barWidth
<= 0)) {
624 throw new AssertionError("Invalid histogram dimensions (" + width
+ "x" + height
+ ", barWidth=" + barWidth
+ ")"); //$NON-NLS-1$ //$NON-NLS-2$ //$NON-NLS-3$ //$NON-NLS-4$
626 if (fBucketDuration
== 0) {
627 throw new IllegalStateException("Bucket width is 0, that should be impossible"); //$NON-NLS-1$
630 // The result structure
631 HistogramScaledData result
= new HistogramScaledData(width
, height
, barWidth
);
633 // Scale horizontally
634 result
.fMaxValue
= 0;
636 int nbBars
= width
/ barWidth
;
637 double bucketsPerBar
= ((double) fLastBucket
/ nbBars
);
638 final long modelBucketStartTime
= fFirstBucketTime
;
639 final long modelBucketEndTime
= fEndTime
;
641 * If there is only one model bucket, use a duration of 1 to spread the
642 * value over the scaled width, but store a scaled bucket duration of 0
643 * to prevent the half-bucket offset in the bucket time calculations.
645 double bucketDuration
= Math
.max(modelBucketEndTime
- modelBucketStartTime
, 1) / (double) nbBars
;
646 result
.fBucketDuration
= fLastBucket
== 0 ?
0 : bucketDuration
;
648 int scaledCountLostEvent
= 0;
649 int offset
= (int) (0.5 / bucketDuration
);
650 for (int i
= 0; i
< result
.fData
.length
; i
++) {
651 result
.fData
[i
] = new HistogramBucket(getNbTraces());
653 for (int modelIndex
= 0; modelIndex
<= fLastBucket
; modelIndex
++) {
654 double done
= (double) modelIndex
/ (double) (fLastBucket
);
655 double doneNext
= (double) (modelIndex
+ 1) / (double) (fLastBucket
);
656 final int scaledStart
= Math
.max((int) (done
* nbBars
) - offset
, 0);
657 final int scaledEnd
= Math
.min((int) (doneNext
* nbBars
) - offset
, nbBars
- 1);
658 int scaledIndex
= scaledStart
;
659 final HistogramBucket currentModelBucket
= fBuckets
[modelIndex
];
660 if (currentModelBucket
!= null) {
662 // Make sure last model bucket counted in last scaled index
663 scaledIndex
= Math
.min(scaledIndex
, nbBars
- 1);
664 if (result
.fData
[scaledIndex
].getNbEvents() == 0) {
666 scaledCountLostEvent
= 0;
668 result
.fData
[scaledIndex
].add(currentModelBucket
);
669 result
.fLostEventsData
[scaledIndex
] += fLostEventsBuckets
[modelIndex
];
670 scaledCountLostEvent
+= fLostEventsBuckets
[modelIndex
];
671 scaledCount
+= currentModelBucket
.getNbEvents();
672 if (!currentModelBucket
.isEmpty()) {
673 result
.fLastBucket
= scaledIndex
;
675 if (result
.fMaxValue
< scaledCount
) {
676 result
.fMaxValue
= scaledCount
;
678 if (result
.fMaxCombinedValue
< scaledCount
+ scaledCountLostEvent
) {
679 result
.fMaxCombinedValue
= scaledCount
+ scaledCountLostEvent
;
682 } while (scaledIndex
< scaledEnd
);
687 if (result
.fMaxValue
> 0) {
688 result
.fScalingFactor
= (double) height
/ result
.fMaxValue
;
690 if (result
.fMaxCombinedValue
> 0) {
691 result
.fScalingFactorCombined
= (double) height
/ result
.fMaxCombinedValue
;
694 fBucketDuration
= Math
.max(fBucketDuration
, 1);
696 // Set selection begin and end index in the scaled histogram
697 if (fSelectionBegin
== fEndTime
) {
698 // make sure selection is visible at the end
699 result
.fSelectionBeginBucket
= result
.fWidth
- 1;
701 result
.fSelectionBeginBucket
= (int) Math
.round(((fSelectionBegin
- fFirstBucketTime
) / (double) fBucketDuration
) / bucketsPerBar
);
704 if (fSelectionEnd
== fEndTime
) {
705 // make sure selection is visible at the end
706 result
.fSelectionEndBucket
= result
.fWidth
- 1;
708 result
.fSelectionEndBucket
= (int) Math
.round(((fSelectionEnd
- fFirstBucketTime
) / (double) fBucketDuration
) / bucketsPerBar
);
711 result
.fFirstBucketTime
= fFirstBucketTime
;
712 result
.fFirstEventTime
= fFirstEventTime
;
716 // ------------------------------------------------------------------------
718 // ------------------------------------------------------------------------
720 private void updateEndTime() {
721 fTimeLimit
= fFirstBucketTime
+ (fNbBuckets
* fBucketDuration
);
724 private void mergeBuckets() {
725 for (int i
= 0; i
< (fNbBuckets
/ 2); i
++) {
726 fBuckets
[i
] = new HistogramBucket(fBuckets
[2 * i
], fBuckets
[(2 * i
) + 1]);
727 fLostEventsBuckets
[i
] = fLostEventsBuckets
[2 * i
] + fLostEventsBuckets
[(2 * i
) + 1];
729 Arrays
.fill(fBuckets
, fNbBuckets
/ 2, fNbBuckets
, null);
730 Arrays
.fill(fLostEventsBuckets
, fNbBuckets
/ 2, fNbBuckets
, 0);
731 fBucketDuration
*= 2;
733 fLastBucket
= (fNbBuckets
/ 2) - 1;
736 private void moveBuckets(int offset
) {
737 for (int i
= fNbBuckets
- 1; i
>= offset
; i
--) {
738 fBuckets
[i
] = new HistogramBucket(fBuckets
[i
- offset
]);
739 fLostEventsBuckets
[i
] = fLostEventsBuckets
[i
- offset
];
742 for (int i
= 0; i
< offset
; i
++) {
744 fLostEventsBuckets
[i
] = 0;
748 private long getOffset(long timestamp
) {
749 long offset
= (fFirstBucketTime
- timestamp
) / fBucketDuration
;
750 if (((fFirstBucketTime
- timestamp
) % fBucketDuration
) != 0) {