1 /*******************************************************************************
2 * Copyright (c) 2011 Ericsson
4 * All rights reserved. This program and the accompanying materials are
5 * made available under the terms of the Eclipse Public License v1.0 which
6 * accompanies this distribution, and is available at
7 * http://www.eclipse.org/legal/epl-v10.html
10 * Francois Chouinard - Initial API and implementation
11 *******************************************************************************/
13 package org
.eclipse
.linuxtools
.lttng
.ui
.views
.histogram
;
15 import java
.util
.Arrays
;
17 import org
.eclipse
.linuxtools
.lttng
.exceptions
.EventOutOfSequenceException
;
18 import org
.eclipse
.linuxtools
.lttng
.ui
.LTTngUILogger
;
21 * <b><u>HistogramDataModel</u></b>
23 * Histogram-independent data model with the following characteristics:
25 * <li>The <i>basetime</i> is the timestamp of the first event
26 * <li>There is a fixed number (<i>n</i>) of buckets of uniform duration
28 * <li>The <i>timespan</i> of the model is thus: <i>n</i> * <i>d</i> time units
29 * <li>Bucket <i>i</i> holds the number of events that occurred in time range:
30 * [<i>basetime</i> + <i>i</i> * <i>d</i>, <i>basetime</i> + (<i>i</i> + 1) *
33 * Initially, the bucket durations is set to 1ns. As the events are read, they
34 * are tallied (using <i>countEvent()</i>) in the appropriate bucket (relative
35 * to the <i>basetime</i>).
37 * Eventually, an event will have a timestamp that exceeds the <i>timespan</i>
38 * high end (determined by <i>n</i>, the number of buckets, and <i>d</i>, the
39 * bucket duration). At this point, the histogram needs to be compacted. This is
40 * done by simply merging adjacent buckets by pair, in effect doubling the
41 * <i>timespan</i> (<i>timespan'</i> = <i>n</i> * <i>d'</i>, where <i>d'</i> =
42 * 2<i>d</i>). This compaction happens as needed as the trace is read.
44 * The mapping from the model to the UI is performed by the <i>scaleTo()</i>
45 * method. By keeping the number of buckets <i>n</i> relatively large with
46 * respect to to the number of pixels in the actual histogram, we should achieve
47 * a nice result when visualizing the histogram.
49 * TODO: Add filter support for more refined event counting (e.g. by trace,
52 * TODO: Cut-off eccentric values? TODO: Support for going back in time?
54 public class HistogramDataModel
{
56 // ------------------------------------------------------------------------
58 // ------------------------------------------------------------------------
60 // The default number of buckets
61 public static final int DEFAULT_NUMBER_OF_BUCKETS
= 16 * 1000;
63 // // The ratio where an eccentric value will be truncated
64 // private static final int MAX_TO_AVERAGE_CUTOFF_RATIO = 5;
66 // ------------------------------------------------------------------------
68 // ------------------------------------------------------------------------
71 private final int fNbBuckets
;
72 private final long[] fBuckets
;
73 private long fBucketDuration
;
74 private long fNbEvents
;
75 private int fLastBucket
;
78 private long fFirstEventTime
;
79 private long fLastEventTime
;
80 private long fCurrentEventTime
;
81 private long fTimeLimit
;
83 // ------------------------------------------------------------------------
85 // ------------------------------------------------------------------------
87 public HistogramDataModel() {
88 this(DEFAULT_NUMBER_OF_BUCKETS
);
91 public HistogramDataModel(int nbBuckets
) {
92 fNbBuckets
= nbBuckets
;
93 fBuckets
= new long[nbBuckets
];
97 public HistogramDataModel(HistogramDataModel other
) {
98 fNbBuckets
= other
.fNbBuckets
;
99 fBuckets
= Arrays
.copyOf(other
.fBuckets
, fNbBuckets
);
100 fBucketDuration
= other
.fBucketDuration
;
101 fNbEvents
= other
.fNbEvents
;
102 fLastBucket
= other
.fLastBucket
;
103 fFirstEventTime
= other
.fFirstEventTime
;
104 fLastEventTime
= other
.fLastEventTime
;
105 fCurrentEventTime
= other
.fCurrentEventTime
;
106 fTimeLimit
= other
.fTimeLimit
;
109 // ------------------------------------------------------------------------
111 // ------------------------------------------------------------------------
113 public long getNbEvents() {
117 public int getNbBuckets() {
121 public long getBucketDuration() {
122 return fBucketDuration
;
125 public long getStartTime() {
126 return fFirstEventTime
;
129 public long getEndTime() {
130 return fLastEventTime
;
133 public long getCurrentEventTime() {
134 return fCurrentEventTime
;
137 public long getTimeLimit() {
141 // ------------------------------------------------------------------------
143 // ------------------------------------------------------------------------
146 * Clear the histogram model.
148 public void clear() {
149 Arrays
.fill(fBuckets
, 0);
153 fCurrentEventTime
= 0;
155 fBucketDuration
= 1; // 1ns
160 * Sets the current event time
164 public void setCurrentEvent(long timestamp
) {
165 fCurrentEventTime
= timestamp
;
169 * Add event to the correct bucket, compacting the if needed.
171 * @param timestamp the timestamp of the event to count
173 public void countEvent(long timestamp
) {
174 // Set the start/end time if not already done
175 if (fLastBucket
== 0 && fBuckets
[0] == 0 && timestamp
> 0) {
176 fFirstEventTime
= timestamp
;
179 if (fLastEventTime
< timestamp
) {
180 fLastEventTime
= timestamp
;
184 while (timestamp
>= fTimeLimit
) {
189 if (timestamp
< fFirstEventTime
) {
190 String message
= "Out of order timestamp. Going back in time?"; //$NON-NLS-1$
191 EventOutOfSequenceException exception
= new EventOutOfSequenceException(message
);
192 LTTngUILogger
.logError(message
, exception
);
196 // Increment the right bucket
197 int index
= (int) ((timestamp
- fFirstEventTime
) / fBucketDuration
);
200 if (fLastBucket
< index
)
205 * Scale the model data to the width and height requested.
209 * @return the result array of size [width] and where the highest value
210 * doesn't exceed [height]
212 public HistogramScaledData
scaleTo(int width
, int height
) {
214 if (width
<= 0 || height
<= 0)
215 throw new AssertionError("Invalid histogram dimensions (" + width
+ "x" + height
+ ")");
217 // The result structure
218 HistogramScaledData result
= new HistogramScaledData(width
, height
);
220 // Scale horizontally
221 result
.fMaxValue
= 0;
222 int bucketsPerBar
= fLastBucket
/ width
+ 1;
223 result
.fBucketDuration
= bucketsPerBar
* fBucketDuration
;
224 for (int i
= 0; i
< width
; i
++) {
226 for (int j
= i
* bucketsPerBar
; j
< (i
+ 1) * bucketsPerBar
; j
++) {
229 count
+= fBuckets
[j
];
231 result
.fData
[i
] = count
;
232 result
.fLastBucket
= i
;
233 if (result
.fMaxValue
< count
)
234 result
.fMaxValue
= count
;
238 if (result
.fMaxValue
> 0) {
239 result
.fScalingFactor
= (double) height
/ result
.fMaxValue
;
242 // Set the current event index in the scaled histogram
243 if (fCurrentEventTime
>= fFirstEventTime
&& fCurrentEventTime
<= fLastEventTime
)
244 result
.fCurrentBucket
= (int) ((fCurrentEventTime
- fFirstEventTime
) / fBucketDuration
) / bucketsPerBar
;
246 result
.fCurrentBucket
= HistogramScaledData
.OUT_OF_RANGE_BUCKET
;
251 // ------------------------------------------------------------------------
253 // ------------------------------------------------------------------------
255 private void updateEndTime() {
256 fTimeLimit
= fFirstEventTime
+ fNbBuckets
* fBucketDuration
;
259 private void mergeBuckets() {
260 for (int i
= 0; i
< fNbBuckets
/ 2; i
++) {
261 fBuckets
[i
] = fBuckets
[2 * i
] + fBuckets
[2 * i
+ 1];
263 Arrays
.fill(fBuckets
, fNbBuckets
/ 2, fNbBuckets
, 0);
264 fBucketDuration
*= 2;
266 fLastBucket
= fNbBuckets
/ 2 - 1;