-/*******************************************************************************\r
- * Copyright (c) 2011 Ericsson\r
- * \r
- * All rights reserved. This program and the accompanying materials are\r
- * made available under the terms of the Eclipse Public License v1.0 which\r
- * accompanies this distribution, and is available at\r
- * http://www.eclipse.org/legal/epl-v10.html\r
- * \r
- * Contributors:\r
- * Patrick Tasse - Initial API and implementation\r
- ******************************************************************************/\r
-\r
-package org.eclipse.linuxtools.tmf.ui.viewers.events;\r
-\r
-import java.util.ArrayList;\r
-\r
-import org.eclipse.core.runtime.IProgressMonitor;\r
-import org.eclipse.core.runtime.IStatus;\r
-import org.eclipse.core.runtime.Status;\r
-import org.eclipse.core.runtime.jobs.Job;\r
-import org.eclipse.linuxtools.tmf.core.component.ITmfDataProvider;\r
-import org.eclipse.linuxtools.tmf.core.event.TmfEvent;\r
-import org.eclipse.linuxtools.tmf.core.filter.ITmfFilter;\r
-import org.eclipse.linuxtools.tmf.core.request.TmfDataRequest;\r
-import org.eclipse.linuxtools.tmf.core.trace.ITmfTrace;\r
-\r
-public class TmfEventsCache {\r
-\r
- public class CachedEvent {\r
- TmfEvent event;\r
- long rank;\r
-\r
- public CachedEvent (TmfEvent event, long rank) {\r
- this.event = event;\r
- this.rank = rank;\r
- }\r
- }\r
-\r
- private CachedEvent[] fCache;\r
- private int fCacheStartIndex = 0;\r
- private int fCacheEndIndex = 0;\r
-\r
- private ITmfTrace<?> fTrace;\r
- private TmfEventsTable fTable;\r
- private ITmfFilter fFilter;\r
- private ArrayList<Integer> fFilterIndex = new ArrayList<Integer>(); // contains the event rank at each 'cache size' filtered events\r
-\r
- public TmfEventsCache(int cacheSize, TmfEventsTable table) {\r
- fCache = new CachedEvent[cacheSize];\r
- fTable = table;\r
- }\r
- \r
- public void setTrace(ITmfTrace<?> trace) {\r
- fTrace = trace;\r
- clear();\r
- }\r
- \r
- public void clear() {\r
- fCacheStartIndex = 0;\r
- fCacheEndIndex = 0;\r
- fFilterIndex.clear();\r
- }\r
-\r
- public void applyFilter(ITmfFilter filter) {\r
- fFilter = filter;\r
- clear();\r
- }\r
- \r
- public void clearFilter() {\r
- fFilter = null;\r
- clear();\r
- }\r
- \r
- public CachedEvent getEvent(int index) {\r
- if ((index >= fCacheStartIndex) && (index < fCacheEndIndex)) {\r
- int i = index - fCacheStartIndex;\r
- return fCache[i];\r
- }\r
- populateCache(index);\r
- return null;\r
- }\r
-\r
- public CachedEvent peekEvent(int index) {\r
- if ((index >= fCacheStartIndex) && (index < fCacheEndIndex)) {\r
- int i = index - fCacheStartIndex;\r
- return fCache[i];\r
- }\r
- return null;\r
- }\r
- \r
- public synchronized void storeEvent(TmfEvent event, long rank, int index) {\r
- if (fCacheStartIndex == fCacheEndIndex) {\r
- fCacheStartIndex = index;\r
- fCacheEndIndex = index;\r
- }\r
- if (index == fCacheEndIndex) {\r
- int i = index - fCacheStartIndex;\r
- if (i < fCache.length) {\r
- fCache[i] = new CachedEvent(event.clone(), rank);\r
- fCacheEndIndex++;\r
- }\r
- }\r
- if (fFilter != null && index % fCache.length == 0) {\r
- int i = index / fCache.length;\r
- fFilterIndex.add(i, new Integer((int) rank));\r
- }\r
- }\r
- \r
- @SuppressWarnings("unchecked")\r
- public int getFilteredEventIndex(final long rank) {\r
- int current;\r
- int startRank;\r
- TmfDataRequest<TmfEvent> request;\r
- synchronized (this) {\r
- int start = 0;\r
- int end = fFilterIndex.size();\r
- \r
- if (fCacheEndIndex - fCacheStartIndex > 1) {\r
- if (rank < fCache[0].rank) {\r
- end = fCacheStartIndex / fCache.length + 1;\r
- } else if (rank > fCache[fCacheEndIndex - fCacheStartIndex - 1].rank) {\r
- start = fCacheEndIndex / fCache.length;\r
- } else {\r
- for (int i = 0; i < fCacheEndIndex - fCacheStartIndex; i++) {\r
- if (fCache[i].rank >= rank) {\r
- return fCacheStartIndex + i;\r
- }\r
- }\r
- return fCacheEndIndex;\r
- }\r
- }\r
- \r
- current = (start + end) / 2;\r
- while (current != start) {\r
- if (rank < fFilterIndex.get(current)) {\r
- end = current;\r
- current = (start + end) / 2;\r
- } else {\r
- start = current;\r
- current = (start + end) / 2;\r
- }\r
- }\r
- startRank = fFilterIndex.get(current);\r
- }\r
- \r
- final int index = current * fCache.length;\r
- \r
- class DataRequest<T extends TmfEvent> extends TmfDataRequest<T> {\r
- int fRank;\r
- int fIndex;\r
- \r
- DataRequest(Class<T> dataType, int start, int nbRequested) {\r
- super(dataType, start, nbRequested);\r
- fRank = start;\r
- fIndex = index;\r
- }\r
- \r
- @Override\r
- public void handleData(T event) {\r
- super.handleData(event);\r
- if (isCancelled()) return;\r
- if (fRank >= rank) {\r
- cancel();\r
- return;\r
- }\r
- fRank++;\r
- if (fFilter.matches(event)) {\r
- fIndex++;\r
- }\r
- }\r
-\r
- public int getFilteredIndex() {\r
- return fIndex;\r
- }\r
- }\r
- \r
- request = new DataRequest<TmfEvent>(TmfEvent.class, startRank, TmfDataRequest.ALL_DATA);\r
- ((ITmfDataProvider<TmfEvent>) fTrace).sendRequest(request);\r
- try {\r
- request.waitForCompletion();\r
- return ((DataRequest<TmfEvent>) request).getFilteredIndex();\r
- } catch (InterruptedException e) {\r
- }\r
- return 0;\r
- }\r
- \r
- // ------------------------------------------------------------------------\r
- // Event cache population\r
- // ------------------------------------------------------------------------\r
- \r
- // The event fetching job\r
- private Job job;\r
- private synchronized void populateCache(final int index) {\r
-\r
- /* Check if the current job will fetch the requested event:\r
- * 1. The job must exist\r
- * 2. It must be running (i.e. not completed)\r
- * 3. The requested index must be within the cache range\r
- * \r
- * If the job meets these conditions, we simply exit.\r
- * Otherwise, we create a new job but we might have to cancel\r
- * an existing job for an obsolete range.\r
- */\r
- if (job != null) {\r
- if (job.getState() != Job.NONE) {\r
- if (index >= fCacheStartIndex && index < (fCacheStartIndex + fCache.length)) {\r
- return;\r
- }\r
- // The new index is out of the requested range\r
- // Kill the job and start a new one\r
- job.cancel();\r
- }\r
- }\r
- \r
- fCacheStartIndex = index;\r
- fCacheEndIndex = index;\r
-\r
- job = new Job("Fetching Events") { //$NON-NLS-1$\r
- private int startIndex = index;\r
- private int skipCount = 0;\r
- @Override\r
- @SuppressWarnings("unchecked")\r
- protected IStatus run(final IProgressMonitor monitor) {\r
-\r
- int nbRequested;\r
- if (fFilter == null) {\r
- nbRequested = fCache.length;\r
- } else {\r
- nbRequested = TmfDataRequest.ALL_DATA;\r
- int i = index / fCache.length;\r
- if (i < fFilterIndex.size()) {\r
- startIndex = fFilterIndex.get(i);\r
- skipCount = index - (i * fCache.length);\r
- }\r
- }\r
- \r
- TmfDataRequest<TmfEvent> request = new TmfDataRequest<TmfEvent>(TmfEvent.class, startIndex, nbRequested) {\r
- private int count = 0;\r
- private long rank = startIndex;\r
- @Override\r
- public void handleData(TmfEvent event) {\r
- // If the job is canceled, cancel the request so waitForCompletion() will unlock\r
- if (monitor.isCanceled()) {\r
- cancel();\r
- return;\r
- }\r
- super.handleData(event);\r
- if (event != null) {\r
- if ((fFilter == null || fFilter.matches(event)) && skipCount-- <= 0) {\r
- synchronized (TmfEventsCache.this) {\r
- fCache[count] = new CachedEvent(event.clone(), rank);\r
- count++;\r
- fCacheEndIndex++;\r
- }\r
- if (fFilter != null) {\r
- fTable.cacheUpdated(false);\r
- }\r
- }\r
- }\r
- if (count >= fCache.length) {\r
- cancel();\r
- } else if (fFilter != null && count >= fTable.getTable().getItemCount() - 3) { // -1 for header row, -2 for top and bottom filter status rows\r
- cancel();\r
- }\r
- rank++;\r
- }\r
- };\r
-\r
- ((ITmfDataProvider<TmfEvent>) fTrace).sendRequest(request);\r
- try {\r
- request.waitForCompletion();\r
- } catch (InterruptedException e) {\r
- e.printStackTrace();\r
- }\r
-\r
- fTable.cacheUpdated(true);\r
- \r
- // Flag the UI thread that the cache is ready\r
- if (monitor.isCanceled()) {\r
- return Status.CANCEL_STATUS;\r
- } else {\r
- return Status.OK_STATUS;\r
- }\r
- }\r
- };\r
- //job.setSystem(true);\r
- job.setPriority(Job.SHORT);\r
- job.schedule();\r
- }\r
-\r
-}\r
+/*******************************************************************************
+ * Copyright (c) 2011 Ericsson
+ *
+ * All rights reserved. This program and the accompanying materials are
+ * made available under the terms of the Eclipse Public License v1.0 which
+ * accompanies this distribution, and is available at
+ * http://www.eclipse.org/legal/epl-v10.html
+ *
+ * Contributors:
+ * Patrick Tasse - Initial API and implementation
+ ******************************************************************************/
+
+package org.eclipse.linuxtools.tmf.ui.viewers.events;
+
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.List;
+
+import org.eclipse.core.runtime.IProgressMonitor;
+import org.eclipse.core.runtime.IStatus;
+import org.eclipse.core.runtime.Status;
+import org.eclipse.core.runtime.jobs.Job;
+import org.eclipse.linuxtools.internal.tmf.ui.Activator;
+import org.eclipse.linuxtools.tmf.core.component.ITmfDataProvider;
+import org.eclipse.linuxtools.tmf.core.event.ITmfEvent;
+import org.eclipse.linuxtools.tmf.core.filter.ITmfFilter;
+import org.eclipse.linuxtools.tmf.core.request.TmfDataRequest;
+import org.eclipse.linuxtools.tmf.core.trace.ITmfTrace;
+
+/**
+ * The generic TMF Events table events cache
+ *
+ * This can help avoid re-reading the trace when the user scrolls a window,
+ * for example.
+ *
+ * @version 1.0
+ * @author Patrick Tasse
+ */
+public class TmfEventsCache {
+
+ /**
+ * The generic TMF Events table cached event
+ *
+ * @version 1.0
+ * @author Patrick Tasse
+ */
+ public static class CachedEvent {
+ ITmfEvent event;
+ long rank;
+
+ /**
+ * Constructor for new cached events.
+ *
+ * @param iTmfEvent
+ * The original trace event
+ * @param rank
+ * The rank of this event in the trace
+ */
+ public CachedEvent (ITmfEvent iTmfEvent, long rank) {
+ this.event = iTmfEvent;
+ this.rank = rank;
+ }
+ }
+
+ private final CachedEvent[] fCache;
+ private int fCacheStartIndex = 0;
+ private int fCacheEndIndex = 0;
+
+ private ITmfTrace fTrace;
+ private final TmfEventsTable fTable;
+ private ITmfFilter fFilter;
+ private final List<Integer> fFilterIndex = new ArrayList<Integer>(); // contains the event rank at each 'cache size' filtered events
+
+ /**
+ * Constructor for the event cache
+ *
+ * @param cacheSize
+ * The size of the cache, in number of events
+ * @param table
+ * The Events table this cache will cover
+ */
+ public TmfEventsCache(int cacheSize, TmfEventsTable table) {
+ fCache = new CachedEvent[cacheSize];
+ fTable = table;
+ }
+
+ /**
+ * Assign a new trace to this events cache. This clears the current
+ * contents.
+ *
+ * @param trace
+ * The trace to assign.
+ */
+ public void setTrace(ITmfTrace trace) {
+ fTrace = trace;
+ clear();
+ }
+
+ /**
+ * Clear the current contents of this cache.
+ */
+ public synchronized void clear() {
+ if (job != null && job.getState() != Job.NONE) {
+ job.cancel();
+ }
+ Arrays.fill(fCache, null);
+ fCacheStartIndex = 0;
+ fCacheEndIndex = 0;
+ fFilterIndex.clear();
+ }
+
+ /**
+ * Apply a filter on this event cache. This clears the current cache
+ * contents.
+ *
+ * @param filter
+ * The ITmfFilter to apply.
+ */
+ public void applyFilter(ITmfFilter filter) {
+ fFilter = filter;
+ clear();
+ }
+
+ /**
+ * Clear the current filter on this cache. This also clears the current
+ * cache contents.
+ */
+ public void clearFilter() {
+ fFilter = null;
+ clear();
+ }
+
+ /**
+ * Get an event from the cache. This will remove the event from the cache.
+ *
+ * FIXME this does not currently remove the event!
+ *
+ * @param index
+ * The index of this event in the cache
+ * @return The cached event, or 'null' if there is no event at that index
+ */
+ public synchronized CachedEvent getEvent(int index) {
+ if ((index >= fCacheStartIndex) && (index < fCacheEndIndex)) {
+ int i = index - fCacheStartIndex;
+ return fCache[i];
+ }
+ populateCache(index);
+ return null;
+ }
+
+ /**
+ * Read an event, but without removing it from the cache.
+ *
+ * @param index
+ * Index of the event to peek
+ * @return A reference to the event, or 'null' if there is no event at this
+ * index
+ */
+ public synchronized CachedEvent peekEvent(int index) {
+ if ((index >= fCacheStartIndex) && (index < fCacheEndIndex)) {
+ int i = index - fCacheStartIndex;
+ return fCache[i];
+ }
+ return null;
+ }
+
+ /**
+ * Add a trace event to the cache.
+ *
+ * @param event
+ * The original trace event to be cached
+ * @param rank
+ * The rank of this event in the trace
+ * @param index
+ * The index this event will occupy in the cache
+ */
+ public synchronized void storeEvent(ITmfEvent event, long rank, int index) {
+ if (index == fCacheEndIndex) {
+ int i = index - fCacheStartIndex;
+ if (i < fCache.length) {
+ fCache[i] = new CachedEvent(event.clone(), rank);
+ fCacheEndIndex++;
+ }
+ }
+ if ((fFilter != null) && ((index % fCache.length) == 0)) {
+ int i = index / fCache.length;
+ fFilterIndex.add(i, Integer.valueOf((int) rank));
+ }
+ }
+
+ /**
+ * Get the cache index of an event from his rank in the trace. This will
+ * take in consideration any filter that might be applied.
+ *
+ * @param rank
+ * The rank of the event in the trace
+ * @return The position (index) this event should use once cached
+ */
+ public int getFilteredEventIndex(final long rank) {
+ int current;
+ int startRank;
+ TmfDataRequest request;
+ final ITmfFilter filter = fFilter;
+ synchronized (this) {
+ int start = 0;
+ int end = fFilterIndex.size();
+
+ if ((fCacheEndIndex - fCacheStartIndex) > 1) {
+ if (rank < fCache[0].rank) {
+ end = (fCacheStartIndex / fCache.length) + 1;
+ } else if (rank > fCache[fCacheEndIndex - fCacheStartIndex - 1].rank) {
+ start = fCacheEndIndex / fCache.length;
+ } else {
+ for (int i = 0; i < (fCacheEndIndex - fCacheStartIndex); i++) {
+ if (fCache[i].rank >= rank) {
+ return fCacheStartIndex + i;
+ }
+ }
+ return fCacheEndIndex;
+ }
+ }
+
+ current = (start + end) / 2;
+ while (current != start) {
+ if (rank < fFilterIndex.get(current)) {
+ end = current;
+ current = (start + end) / 2;
+ } else {
+ start = current;
+ current = (start + end) / 2;
+ }
+ }
+ startRank = fFilterIndex.size() > 0 ? fFilterIndex.get(current) : 0;
+ }
+
+ final int index = current * fCache.length;
+
+ class DataRequest extends TmfDataRequest {
+ ITmfFilter requestFilter;
+ int requestRank;
+ int requestIndex;
+
+ DataRequest(Class<? extends ITmfEvent> dataType, ITmfFilter reqFilter, int start, int nbRequested) {
+ super(dataType, start, nbRequested);
+ requestFilter = reqFilter;
+ requestRank = start;
+ requestIndex = index;
+ }
+
+ @Override
+ public void handleData(ITmfEvent event) {
+ super.handleData(event);
+ if (isCancelled()) {
+ return;
+ }
+ if (requestRank >= rank) {
+ cancel();
+ return;
+ }
+ requestRank++;
+ if (requestFilter.matches(event)) {
+ requestIndex++;
+ }
+ }
+
+ public int getFilteredIndex() {
+ return requestIndex;
+ }
+ }
+
+ request = new DataRequest(ITmfEvent.class, filter, startRank, TmfDataRequest.ALL_DATA);
+ ((ITmfDataProvider) fTrace).sendRequest(request);
+ try {
+ request.waitForCompletion();
+ return ((DataRequest) request).getFilteredIndex();
+ } catch (InterruptedException e) {
+ Activator.getDefault().logError("Filter request interrupted!", e); //$NON-NLS-1$
+ }
+ return 0;
+ }
+
+ // ------------------------------------------------------------------------
+ // Event cache population
+ // ------------------------------------------------------------------------
+
+ // The event fetching job
+ private Job job;
+ private synchronized void populateCache(final int index) {
+
+ /* Check if the current job will fetch the requested event:
+ * 1. The job must exist
+ * 2. It must be running (i.e. not completed)
+ * 3. The requested index must be within the cache range
+ *
+ * If the job meets these conditions, we simply exit.
+ * Otherwise, we create a new job but we might have to cancel
+ * an existing job for an obsolete range.
+ */
+ if (job != null) {
+ if (job.getState() != Job.NONE) {
+ if ((index >= fCacheStartIndex) && (index < (fCacheStartIndex + fCache.length))) {
+ return;
+ }
+ // The new index is out of the requested range
+ // Kill the job and start a new one
+ job.cancel();
+ }
+ }
+
+ fCacheStartIndex = index;
+ fCacheEndIndex = index;
+
+ job = new Job("Fetching Events") { //$NON-NLS-1$
+ private int startIndex = index;
+ private int skipCount = 0;
+ @Override
+ protected IStatus run(final IProgressMonitor monitor) {
+
+ int nbRequested;
+ if (fFilter == null) {
+ nbRequested = fCache.length;
+ } else {
+ nbRequested = TmfDataRequest.ALL_DATA;
+ int i = index / fCache.length;
+ if (i < fFilterIndex.size()) {
+ startIndex = fFilterIndex.get(i);
+ skipCount = index - (i * fCache.length);
+ }
+ }
+
+ TmfDataRequest request = new TmfDataRequest(ITmfEvent.class, startIndex, nbRequested) {
+ private int count = 0;
+ private long rank = startIndex;
+ @Override
+ public void handleData(ITmfEvent event) {
+ // If the job is canceled, cancel the request so waitForCompletion() will unlock
+ if (monitor.isCanceled()) {
+ cancel();
+ return;
+ }
+ super.handleData(event);
+ if (event != null) {
+ if (((fFilter == null) || fFilter.matches(event)) && (skipCount-- <= 0)) {
+ synchronized (TmfEventsCache.this) {
+ if (monitor.isCanceled()) {
+ return;
+ }
+ fCache[count] = new CachedEvent(event.clone(), rank);
+ count++;
+ fCacheEndIndex++;
+ }
+ if (fFilter != null) {
+ fTable.cacheUpdated(false);
+ }
+ }
+ }
+ if (count >= fCache.length) {
+ cancel();
+ } else if ((fFilter != null) && (count >= (fTable.getTable().getItemCount() - 3))) { // -1 for header row, -2 for top and bottom filter status rows
+ cancel();
+ }
+ rank++;
+ }
+ };
+
+ ((ITmfDataProvider) fTrace).sendRequest(request);
+ try {
+ request.waitForCompletion();
+ } catch (InterruptedException e) {
+ Activator.getDefault().logError("Wait for completion interrupted for populateCache ", e); //$NON-NLS-1$
+ }
+
+ fTable.cacheUpdated(true);
+
+ // Flag the UI thread that the cache is ready
+ if (monitor.isCanceled()) {
+ return Status.CANCEL_STATUS;
+ }
+ return Status.OK_STATUS;
+ }
+ };
+ //job.setSystem(true);
+ job.setPriority(Job.SHORT);
+ job.schedule();
+ }
+
+}