9abce76b972169068454917a258d9c601c040fb5
[deliverable/tracecompass.git] / org.eclipse.tracecompass.tmf.ui / src / org / eclipse / tracecompass / tmf / ui / views / histogram / HistogramDataModel.java
1 /*******************************************************************************
2 * Copyright (c) 2011, 2015 Ericsson
3 *
4 * All rights reserved. This program and the accompanying materials are
5 * made available under the terms of the Eclipse Public License v1.0 which
6 * accompanies this distribution, and is available at
7 * http://www.eclipse.org/legal/epl-v10.html
8 *
9 * Contributors:
10 * Francois Chouinard - Initial API and implementation
11 * Bernd Hufmann - Implementation of new interfaces/listeners and support for
12 * time stamp in any order
13 * Francois Chouinard - Moved from LTTng to TMF
14 * Francois Chouinard - Added support for empty initial buckets
15 * Patrick Tasse - Support selection range
16 * Jean-Christian Kouamé, Simon Delisle - Added support to manage lost events
17 * Xavier Raynaud - Support multi-trace coloring
18 *******************************************************************************/
19
20 package org.eclipse.tracecompass.tmf.ui.views.histogram;
21
22 import java.util.Arrays;
23 import java.util.Collection;
24 import java.util.LinkedHashMap;
25 import java.util.Map;
26
27 import org.eclipse.core.runtime.ListenerList;
28 import org.eclipse.tracecompass.tmf.core.timestamp.TmfTimeRange;
29 import org.eclipse.tracecompass.tmf.core.trace.ITmfTrace;
30 import org.eclipse.tracecompass.tmf.core.trace.TmfTraceManager;
31
32 import com.google.common.base.Function;
33 import com.google.common.collect.FluentIterable;
34
35 /**
36 * Histogram-independent data model.
37 *
38 * It has the following characteristics:
39 * <ul>
40 * <li>The <i>basetime</i> is the timestamp of the first event
41 * <li>There is a fixed number (<i>n</i>) of buckets of uniform duration (
42 * <i>d</i>)
43 * <li>The <i>timespan</i> of the model is thus: <i>n</i> * <i>d</i> time units
44 * <li>Bucket <i>i</i> holds the number of events that occurred in time range: [
45 * <i>basetime</i> + <i>i</i> * <i>d</i>, <i>basetime</i> + (<i>i</i> + 1) *
46 * <i>d</i>)
47 * </ul>
48 * Initially, the bucket durations is set to 1ns. As the events are read, they
49 * are tallied (using <i>countEvent()</i>) in the appropriate bucket (relative
50 * to the <i>basetime</i>).
51 * <p>
52 * Eventually, an event will have a timestamp that exceeds the <i>timespan</i>
53 * high end (determined by <i>n</i>, the number of buckets, and <i>d</i>, the
54 * bucket duration). At this point, the histogram needs to be compacted. This is
55 * done by simply merging adjacent buckets by pair, in effect doubling the
56 * <i>timespan</i> (<i>timespan'</i> = <i>n</i> * <i>d'</i>, where <i>d'</i> = 2
57 * <i>d</i>). This compaction happens as needed as the trace is read.
58 * <p>
59 * The model allows for timestamps in not increasing order. The timestamps can
60 * be fed to the model in any order. If an event has a timestamp less than the
61 * <i>basetime</i>, the buckets will be moved to the right to account for the
62 * new smaller timestamp. The new <i>basetime</i> is a multiple of the bucket
63 * duration smaller then the previous <i>basetime</i>. Note that the
64 * <i>basetime</i> might no longer be the timestamp of an event. If necessary,
65 * the buckets will be compacted before moving to the right. This might be
66 * necessary to not lose any event counts at the end of the buckets array.
67 * <p>
68 * The mapping from the model to the UI is performed by the <i>scaleTo()</i>
69 * method. By keeping the number of buckets <i>n</i> relatively large with
70 * respect to to the number of pixels in the actual histogram, we should achieve
71 * a nice result when visualizing the histogram.
72 * <p>
73 *
74 * @version 2.0
75 * @author Francois Chouinard
76 */
77 public class HistogramDataModel implements IHistogramDataModel {
78
79 // ------------------------------------------------------------------------
80 // Constants
81 // ------------------------------------------------------------------------
82
83 /**
84 * The default number of buckets
85 */
86 public static final int DEFAULT_NUMBER_OF_BUCKETS = 16 * 1000;
87
88 /**
89 * Number of events after which listeners will be notified.
90 */
91 public static final int REFRESH_FREQUENCY = DEFAULT_NUMBER_OF_BUCKETS;
92
93 // ------------------------------------------------------------------------
94 // Attributes
95 // ------------------------------------------------------------------------
96
97 // Trace management
98 private ITmfTrace fTrace = null;
99 private final Map<ITmfTrace, Integer> fTraceMap = new LinkedHashMap<>();
100
101 // Bucket management
102 private final int fNbBuckets;
103 private final HistogramBucket[] fBuckets;
104 private final long[] fLostEventsBuckets;
105 private long fBucketDuration;
106 private long fNbEvents;
107 private int fLastBucket;
108
109 // Timestamps
110 private long fFirstBucketTime; // could be negative when analyzing events
111 // with descending order!!!
112 private long fFirstEventTime;
113 private long fEndTime;
114 private long fSelectionBegin;
115 private long fSelectionEnd;
116 private long fTimeLimit;
117
118 // Private listener lists
119 private final ListenerList fModelListeners;
120
121 // ------------------------------------------------------------------------
122 // Constructors
123 // ------------------------------------------------------------------------
124
125 /**
126 * Default constructor with default number of buckets.
127 */
128 public HistogramDataModel() {
129 this(0, DEFAULT_NUMBER_OF_BUCKETS);
130 }
131
132 /**
133 * Default constructor with default number of buckets.
134 *
135 * @param startTime
136 * The histogram start time
137 */
138 public HistogramDataModel(long startTime) {
139 this(startTime, DEFAULT_NUMBER_OF_BUCKETS);
140 }
141
142 /**
143 * Constructor with non-default number of buckets.
144 *
145 * @param nbBuckets
146 * A number of buckets.
147 */
148 public HistogramDataModel(int nbBuckets) {
149 this(0, nbBuckets);
150 }
151
152 /**
153 * Constructor with non-default number of buckets.
154 *
155 * @param startTime
156 * the histogram start time
157 * @param nbBuckets
158 * A number of buckets.
159 */
160 public HistogramDataModel(long startTime, int nbBuckets) {
161 fFirstBucketTime = fFirstEventTime = fEndTime = startTime;
162 fNbBuckets = nbBuckets;
163 fBuckets = new HistogramBucket[nbBuckets];
164 fLostEventsBuckets = new long[nbBuckets];
165 fModelListeners = new ListenerList();
166 clear();
167 }
168
169 /**
170 * Copy constructor.
171 *
172 * @param other
173 * A model to copy.
174 */
175 public HistogramDataModel(HistogramDataModel other) {
176 fNbBuckets = other.fNbBuckets;
177 fBuckets = new HistogramBucket[fNbBuckets];
178 for (int i = 0; i < fNbBuckets; i++) {
179 fBuckets[i] = new HistogramBucket(other.fBuckets[i]);
180 }
181 fLostEventsBuckets = Arrays.copyOf(other.fLostEventsBuckets, fNbBuckets);
182 fBucketDuration = Math.max(other.fBucketDuration, 1);
183 fNbEvents = other.fNbEvents;
184 fLastBucket = other.fLastBucket;
185 fFirstBucketTime = other.fFirstBucketTime;
186 fFirstEventTime = other.fFirstEventTime;
187 fEndTime = other.fEndTime;
188 fSelectionBegin = other.fSelectionBegin;
189 fSelectionEnd = other.fSelectionEnd;
190 fTimeLimit = other.fTimeLimit;
191 fModelListeners = new ListenerList();
192 Object[] listeners = other.fModelListeners.getListeners();
193 for (Object listener : listeners) {
194 fModelListeners.add(listener);
195 }
196 }
197
198 /**
199 * Disposes the data model
200 */
201 public void dispose() {
202 fTraceMap.clear();
203 fTrace = null;
204 }
205
206 // ------------------------------------------------------------------------
207 // Accessors
208 // ------------------------------------------------------------------------
209
210 /**
211 * Returns the number of events in the data model.
212 *
213 * @return number of events.
214 */
215 public long getNbEvents() {
216 return fNbEvents;
217 }
218
219 /**
220 * Returns the number of buckets in the model.
221 *
222 * @return number of buckets.
223 */
224 public int getNbBuckets() {
225 return fNbBuckets;
226 }
227
228 /**
229 * Returns the current bucket duration.
230 *
231 * @return bucket duration
232 */
233 public long getBucketDuration() {
234 return fBucketDuration;
235 }
236
237 /**
238 * Returns the time value of the first bucket in the model.
239 *
240 * @return time of first bucket.
241 */
242 public long getFirstBucketTime() {
243 return fFirstBucketTime;
244 }
245
246 /**
247 * Returns the time of the first event in the model.
248 *
249 * @return time of first event.
250 */
251 public long getStartTime() {
252 return fFirstEventTime;
253 }
254
255 /**
256 * Sets the trace of this model.
257 *
258 * @param trace
259 * - a {@link ITmfTrace}
260 */
261 public void setTrace(ITmfTrace trace) {
262 this.fTrace = trace;
263 fTraceMap.clear();
264 int i = 0;
265 for (ITmfTrace tr : TmfTraceManager.getTraceSet(fTrace)) {
266 fTraceMap.put(tr, i);
267 i++;
268 }
269 }
270
271 /**
272 * Gets the trace of this model.
273 *
274 * @return a {@link ITmfTrace}
275 */
276 public ITmfTrace getTrace() {
277 return this.fTrace;
278 }
279
280 /**
281 * Gets the traces names of this model.
282 *
283 * @return an array of trace names
284 */
285 public String[] getTraceNames() {
286 FluentIterable<ITmfTrace> traces = FluentIterable.from(TmfTraceManager.getTraceSet(fTrace));
287 FluentIterable<String> traceNames = traces.transform(new Function<ITmfTrace, String>() {
288 @Override
289 public String apply(ITmfTrace input) {
290 return input.getName();
291 }
292 });
293 return traceNames.toArray(String.class);
294 }
295
296 /**
297 * Gets the number of traces of this model.
298 *
299 * @return the number of traces of this model.
300 */
301 public int getNbTraces() {
302 Collection<ITmfTrace> traces = TmfTraceManager.getTraceSet(fTrace);
303 if (traces.isEmpty()) {
304 return 1; //
305 }
306 return traces.size();
307 }
308
309 /**
310 * Sets the model start time
311 *
312 * @param startTime
313 * the histogram range start time
314 * @param endTime
315 * the histogram range end time
316 */
317 public void setTimeRange(long startTime, long endTime) {
318 fFirstBucketTime = fFirstEventTime = fEndTime = startTime;
319 fBucketDuration = 1;
320 updateEndTime();
321 while (endTime >= fTimeLimit) {
322 mergeBuckets();
323 }
324 }
325
326 /**
327 * Set the end time. Setting this ensures that the corresponding bucket is
328 * displayed regardless of the event counts.
329 *
330 * @param endTime
331 * the time of the last used bucket
332 */
333 public void setEndTime(long endTime) {
334 fEndTime = endTime;
335 fLastBucket = (int) ((endTime - fFirstBucketTime) / fBucketDuration);
336 }
337
338 /**
339 * Returns the end time.
340 *
341 * @return the time of the last used bucket
342 */
343 public long getEndTime() {
344 return fEndTime;
345 }
346
347 /**
348 * Returns the begin time of the current selection in the model.
349 *
350 * @return the begin time of the current selection.
351 */
352 public long getSelectionBegin() {
353 return fSelectionBegin;
354 }
355
356 /**
357 * Returns the end time of the current selection in the model.
358 *
359 * @return the end time of the current selection.
360 */
361 public long getSelectionEnd() {
362 return fSelectionEnd;
363 }
364
365 /**
366 * Returns the time limit with is: start time + nbBuckets * bucketDuration
367 *
368 * @return the time limit.
369 */
370 public long getTimeLimit() {
371 return fTimeLimit;
372 }
373
374 // ------------------------------------------------------------------------
375 // Listener handling
376 // ------------------------------------------------------------------------
377
378 /**
379 * Add a listener to the model to be informed about model changes.
380 *
381 * @param listener
382 * A listener to add.
383 */
384 public void addHistogramListener(IHistogramModelListener listener) {
385 fModelListeners.add(listener);
386 }
387
388 /**
389 * Remove a given model listener.
390 *
391 * @param listener
392 * A listener to remove.
393 */
394 public void removeHistogramListener(IHistogramModelListener listener) {
395 fModelListeners.remove(listener);
396 }
397
398 // Notify listeners (always)
399 private void fireModelUpdateNotification() {
400 fireModelUpdateNotification(0);
401 }
402
403 // Notify listener on boundary
404 private void fireModelUpdateNotification(long count) {
405 if ((count % REFRESH_FREQUENCY) == 0) {
406 Object[] listeners = fModelListeners.getListeners();
407 for (Object listener2 : listeners) {
408 IHistogramModelListener listener = (IHistogramModelListener) listener2;
409 listener.modelUpdated();
410 }
411 }
412 }
413
414 // ------------------------------------------------------------------------
415 // Operations
416 // ------------------------------------------------------------------------
417
418 @Override
419 public void complete() {
420 fireModelUpdateNotification();
421 }
422
423 /**
424 * Clear the histogram model.
425 *
426 * @see org.eclipse.tracecompass.tmf.ui.views.distribution.model.IBaseDistributionModel#clear()
427 */
428 @Override
429 public synchronized void clear() {
430 Arrays.fill(fBuckets, null);
431 Arrays.fill(fLostEventsBuckets, 0);
432 fNbEvents = 0;
433 fFirstBucketTime = 0;
434 fEndTime = 0;
435 fSelectionBegin = 0;
436 fSelectionEnd = 0;
437 fLastBucket = -1;
438 fBucketDuration = 1;
439 updateEndTime();
440 fireModelUpdateNotification();
441 }
442
443 /**
444 * Sets the current selection time range (no notification of listeners)
445 *
446 * @param beginTime
447 * The selection begin time.
448 * @param endTime
449 * The selection end time.
450 */
451 public void setSelection(long beginTime, long endTime) {
452 fSelectionBegin = beginTime;
453 fSelectionEnd = endTime;
454 }
455
456 /**
457 * Sets the current selection time range with notification of listeners
458 *
459 * @param beginTime
460 * The selection begin time.
461 * @param endTime
462 * The selection end time.
463 */
464 public void setSelectionNotifyListeners(long beginTime, long endTime) {
465 fSelectionBegin = beginTime;
466 fSelectionEnd = endTime;
467 fireModelUpdateNotification();
468 }
469
470 /**
471 * Add event to the correct bucket, compacting the if needed.
472 *
473 * @param eventCount
474 * The current event Count (for notification purposes)
475 * @param timestamp
476 * The timestamp of the event to count
477 * @param trace
478 * The event trace
479 */
480 @Override
481 public synchronized void countEvent(long eventCount, long timestamp, ITmfTrace trace) {
482
483 // Validate
484 if (timestamp < 0) {
485 return;
486 }
487
488 // Set the start/end time if not already done
489 if ((fFirstBucketTime == 0) && (fLastBucket == -1) && (fBuckets[0] == null) && (timestamp > 0)) {
490 fFirstBucketTime = timestamp;
491 fFirstEventTime = timestamp;
492 updateEndTime();
493 }
494
495 if (timestamp < fFirstEventTime) {
496 fFirstEventTime = timestamp;
497 }
498
499 if (fEndTime < timestamp) {
500 fEndTime = timestamp;
501 }
502
503 if (timestamp >= fFirstBucketTime) {
504
505 // Compact as needed
506 while (timestamp >= fTimeLimit) {
507 mergeBuckets();
508 }
509
510 } else {
511
512 // get offset for adjustment
513 long preMergeOffset = getOffset(timestamp);
514
515 // Compact as needed
516 while ((fLastBucket + preMergeOffset) >= fNbBuckets) {
517 mergeBuckets();
518 preMergeOffset = getOffset(timestamp);
519 }
520
521 // after merging the offset should be less than number of buckets
522 int offset = (int) preMergeOffset;
523 moveBuckets(offset);
524
525 fLastBucket = fLastBucket + offset;
526
527 fFirstBucketTime = fFirstBucketTime - (offset * fBucketDuration);
528 updateEndTime();
529 }
530
531 // Increment the right bucket
532 int index = (int) ((timestamp - fFirstBucketTime) / fBucketDuration);
533 if (fBuckets[index] == null) {
534 fBuckets[index] = new HistogramBucket(getNbTraces());
535 }
536 Integer traceIndex = fTraceMap.get(trace);
537 if (traceIndex == null) {
538 traceIndex = 0;
539 }
540 fBuckets[index].addEvent(traceIndex);
541 fNbEvents++;
542 if (fLastBucket < index) {
543 fLastBucket = index;
544 }
545
546 fireModelUpdateNotification(eventCount);
547 }
548
549 /**
550 * Add lost event to the correct bucket, compacting the if needed.
551 *
552 * @param timeRange
553 * time range of a lost event
554 * @param nbLostEvents
555 * the number of lost events
556 * @param fullRange
557 * Full range or time range for histogram request
558 */
559 public void countLostEvent(TmfTimeRange timeRange, long nbLostEvents, boolean fullRange) {
560
561 long startTime = timeRange.getStartTime().getValue();
562 long endTime = timeRange.getEndTime().getValue();
563
564 // Validate
565 if (startTime < 0 || endTime < 0) {
566 return;
567 }
568
569 // Set the start/end time if not already done
570 if ((fFirstBucketTime == 0) && (fLastBucket == -1) && (fBuckets[0] == null)) {
571 fFirstBucketTime = startTime;
572 fFirstEventTime = startTime;
573 updateEndTime();
574 }
575
576 // Compact as needed
577 if (fullRange) {
578 while (endTime >= fTimeLimit) {
579 mergeBuckets();
580 }
581 }
582
583 int indexStart = (int) ((startTime - fFirstBucketTime) / fBucketDuration);
584 int indexEnd = (int) ((endTime - fFirstBucketTime) / fBucketDuration);
585 int nbBucketRange = (indexEnd - indexStart) + 1;
586
587 int lostEventPerBucket = (int) Math.ceil((double) nbLostEvents / nbBucketRange);
588 long lastLostCol = Math.max(1, nbLostEvents - lostEventPerBucket * (nbBucketRange - 1));
589
590 // Increment the right bucket, bear in mind that ranges make it almost
591 // certain that some lost events are out of range
592 for (int index = indexStart; index <= indexEnd && index < fLostEventsBuckets.length; index++) {
593 if (index == (indexStart + nbBucketRange - 1)) {
594 fLostEventsBuckets[index] += lastLostCol;
595 } else {
596 fLostEventsBuckets[index] += lostEventPerBucket;
597 }
598 }
599
600 fNbEvents++;
601
602 fireModelUpdateNotification(nbLostEvents);
603 }
604
605 /**
606 * Scale the model data to the width, height and bar width requested.
607 *
608 * @param width
609 * A width of the histogram canvas
610 * @param height
611 * A height of the histogram canvas
612 * @param barWidth
613 * A width (in pixel) of a histogram bar
614 * @return the result array of size [width] and where the highest value
615 * doesn't exceed [height]
616 *
617 * @see org.eclipse.tracecompass.tmf.ui.views.histogram.IHistogramDataModel#scaleTo(int,
618 * int, int)
619 */
620 @Override
621 public HistogramScaledData scaleTo(int width, int height, int barWidth) {
622 // Basic validation
623 if ((width <= 0) || (height <= 0) || (barWidth <= 0)) {
624 throw new AssertionError("Invalid histogram dimensions (" + width + "x" + height + ", barWidth=" + barWidth + ")"); //$NON-NLS-1$ //$NON-NLS-2$ //$NON-NLS-3$ //$NON-NLS-4$
625 }
626 if (fBucketDuration == 0) {
627 throw new IllegalStateException("Bucket width is 0, that should be impossible"); //$NON-NLS-1$
628 }
629
630 // The result structure
631 HistogramScaledData result = new HistogramScaledData(width, height, barWidth);
632
633 // Scale horizontally
634 result.fMaxValue = 0;
635
636 int nbBars = width / barWidth;
637 double bucketsPerBar = ((double) fLastBucket / nbBars);
638 final long modelBucketStartTime = fFirstBucketTime;
639 final long modelBucketEndTime = fEndTime;
640 /*
641 * If there is only one model bucket, use a duration of 1 to spread the
642 * value over the scaled width, but store a scaled bucket duration of 0
643 * to prevent the half-bucket offset in the bucket time calculations.
644 */
645 double bucketDuration = Math.max(modelBucketEndTime - modelBucketStartTime, 1) / (double) nbBars;
646 result.fBucketDuration = fLastBucket == 0 ? 0 : bucketDuration;
647 int scaledCount = 0;
648 int scaledCountLostEvent = 0;
649 int offset = (int) (0.5 / bucketDuration);
650 for (int i = 0; i < result.fData.length; i++) {
651 result.fData[i] = new HistogramBucket(getNbTraces());
652 }
653 for (int modelIndex = 0; modelIndex <= fLastBucket; modelIndex++) {
654 double done = (double) modelIndex / (double) (fLastBucket);
655 double doneNext = (double) (modelIndex + 1) / (double) (fLastBucket);
656 final int scaledStart = Math.max((int) (done * nbBars) - offset, 0);
657 final int scaledEnd = Math.min((int) (doneNext * nbBars) - offset, nbBars - 1);
658 int scaledIndex = scaledStart;
659 final HistogramBucket currentModelBucket = fBuckets[modelIndex];
660 if (currentModelBucket != null) {
661 do {
662 // Make sure last model bucket counted in last scaled index
663 scaledIndex = Math.min(scaledIndex, nbBars - 1);
664 if (result.fData[scaledIndex].getNbEvents() == 0) {
665 scaledCount = 0;
666 scaledCountLostEvent = 0;
667 }
668 result.fData[scaledIndex].add(currentModelBucket);
669 result.fLostEventsData[scaledIndex] += fLostEventsBuckets[modelIndex];
670 scaledCountLostEvent += fLostEventsBuckets[modelIndex];
671 scaledCount += currentModelBucket.getNbEvents();
672 if (!currentModelBucket.isEmpty()) {
673 result.fLastBucket = scaledIndex;
674 }
675 if (result.fMaxValue < scaledCount) {
676 result.fMaxValue = scaledCount;
677 }
678 if (result.fMaxCombinedValue < scaledCount + scaledCountLostEvent) {
679 result.fMaxCombinedValue = scaledCount + scaledCountLostEvent;
680 }
681 scaledIndex++;
682 } while (scaledIndex < scaledEnd);
683 }
684 }
685
686 // Scale vertically
687 if (result.fMaxValue > 0) {
688 result.fScalingFactor = (double) height / result.fMaxValue;
689 }
690 if (result.fMaxCombinedValue > 0) {
691 result.fScalingFactorCombined = (double) height / result.fMaxCombinedValue;
692 }
693
694 fBucketDuration = Math.max(fBucketDuration, 1);
695
696 // Set selection begin and end index in the scaled histogram
697 if (fSelectionBegin == fEndTime) {
698 // make sure selection is visible at the end
699 result.fSelectionBeginBucket = result.fWidth - 1;
700 } else {
701 result.fSelectionBeginBucket = (int) Math.round(((fSelectionBegin - fFirstBucketTime) / (double) fBucketDuration) / bucketsPerBar);
702 }
703
704 if (fSelectionEnd == fEndTime) {
705 // make sure selection is visible at the end
706 result.fSelectionEndBucket = result.fWidth - 1;
707 } else {
708 result.fSelectionEndBucket = (int) Math.round(((fSelectionEnd - fFirstBucketTime) / (double) fBucketDuration) / bucketsPerBar);
709 }
710
711 result.fFirstBucketTime = fFirstBucketTime;
712 result.fFirstEventTime = fFirstEventTime;
713 return result;
714 }
715
716 // ------------------------------------------------------------------------
717 // Helper functions
718 // ------------------------------------------------------------------------
719
720 private void updateEndTime() {
721 fTimeLimit = fFirstBucketTime + (fNbBuckets * fBucketDuration);
722 }
723
724 private void mergeBuckets() {
725 for (int i = 0; i < (fNbBuckets / 2); i++) {
726 fBuckets[i] = new HistogramBucket(fBuckets[2 * i], fBuckets[(2 * i) + 1]);
727 fLostEventsBuckets[i] = fLostEventsBuckets[2 * i] + fLostEventsBuckets[(2 * i) + 1];
728 }
729 Arrays.fill(fBuckets, fNbBuckets / 2, fNbBuckets, null);
730 Arrays.fill(fLostEventsBuckets, fNbBuckets / 2, fNbBuckets, 0);
731 fBucketDuration *= 2;
732 updateEndTime();
733 fLastBucket = (fNbBuckets / 2) - 1;
734 }
735
736 private void moveBuckets(int offset) {
737 for (int i = fNbBuckets - 1; i >= offset; i--) {
738 fBuckets[i] = new HistogramBucket(fBuckets[i - offset]);
739 fLostEventsBuckets[i] = fLostEventsBuckets[i - offset];
740 }
741
742 for (int i = 0; i < offset; i++) {
743 fBuckets[i] = null;
744 fLostEventsBuckets[i] = 0;
745 }
746 }
747
748 private long getOffset(long timestamp) {
749 long offset = (fFirstBucketTime - timestamp) / fBucketDuration;
750 if (((fFirstBucketTime - timestamp) % fBucketDuration) != 0) {
751 offset++;
752 }
753 return offset;
754 }
755 }
This page took 0.064443 seconds and 4 git commands to generate.