ss: History trees can define their own node types
[deliverable/tracecompass.git] / statesystem / org.eclipse.tracecompass.statesystem.core / src / org / eclipse / tracecompass / internal / statesystem / core / backend / historytree / HT_IO.java
index 66cd434ec58d7baa488e93b15bc8b4a7a5beae8c..b26225b733b99ea620d8f30183963be6d81a635d 100644 (file)
 
 package org.eclipse.tracecompass.internal.statesystem.core.backend.historytree;
 
+import static org.eclipse.tracecompass.common.core.NonNullUtils.checkNotNull;
+
 import java.io.File;
 import java.io.FileInputStream;
 import java.io.FileOutputStream;
 import java.io.IOException;
 import java.nio.channels.ClosedChannelException;
 import java.nio.channels.FileChannel;
+import java.util.logging.Logger;
 
 import org.eclipse.jdt.annotation.NonNull;
-import org.eclipse.jdt.annotation.NonNullByDefault;
+import java.util.Objects;
+import java.util.concurrent.ExecutionException;
+
+import org.eclipse.jdt.annotation.Nullable;
+import org.eclipse.tracecompass.common.core.log.TraceCompassLog;
 import org.eclipse.tracecompass.internal.statesystem.core.Activator;
+import org.eclipse.tracecompass.internal.statesystem.core.backend.historytree.IHistoryTree.IHTNodeFactory;
+
+import com.google.common.cache.CacheBuilder;
+import com.google.common.cache.CacheLoader;
+import com.google.common.cache.LoadingCache;
 
 /**
  * This class abstracts inputs/outputs of the HistoryTree nodes.
@@ -33,29 +45,72 @@ import org.eclipse.tracecompass.internal.statesystem.core.Activator;
  * HistoryTree must contain 1 and only 1 HT_IO element.
  *
  * @author Alexandre Montplaisir
- *
  */
-class HT_IO {
+public class HT_IO {
+
+    private static final Logger LOGGER = TraceCompassLog.getLogger(HT_IO.class);
+
+    // ------------------------------------------------------------------------
+    // Global cache of nodes
+    // ------------------------------------------------------------------------
 
-    @NonNullByDefault
-    private static final class CacheElement {
-        private final HTNode value;
-        private final HT_IO key;
+    private static final class CacheKey {
 
-        public CacheElement(HT_IO ss, HTNode node) {
-            key = ss;
-            value = node;
+        public final HT_IO fStateHistory;
+        public final int fSeqNumber;
+
+        public CacheKey(HT_IO stateHistory,  int seqNumber) {
+            fStateHistory = stateHistory;
+            fSeqNumber = seqNumber;
         }
 
-        public HT_IO getKey() {
-            return key;
+        @Override
+        public int hashCode() {
+            return Objects.hash(fStateHistory, fSeqNumber);
         }
 
-        public HTNode getValue() {
-            return value;
+        @Override
+        public boolean equals(@Nullable Object obj) {
+            if (this == obj) {
+                return true;
+            }
+            if (obj == null) {
+                return false;
+            }
+            if (getClass() != obj.getClass()) {
+                return false;
+            }
+            CacheKey other = (CacheKey) obj;
+            return (fStateHistory.equals(other.fStateHistory) &&
+                    fSeqNumber == other.fSeqNumber);
         }
     }
 
+    private static final int CACHE_SIZE = 256;
+
+    private static final LoadingCache<CacheKey, HTNode> NODE_CACHE =
+        checkNotNull(CacheBuilder.newBuilder()
+            .maximumSize(CACHE_SIZE)
+            .build(new CacheLoader<CacheKey, HTNode>() {
+                @Override
+                public HTNode load(CacheKey key) throws IOException {
+                    HT_IO io = key.fStateHistory;
+                    int seqNb = key.fSeqNumber;
+
+                    LOGGER.finest(() -> "[HtIo:CacheMiss] seqNum=" + seqNb); //$NON-NLS-1$
+
+                    synchronized (io) {
+                        io.seekFCToNodePos(io.fFileChannelIn, seqNb);
+                        return HTNode.readNode(io.fConfig, io.fFileChannelIn, key.fStateHistory.fNodeFactory);
+                    }
+                }
+            }));
+
+
+    // ------------------------------------------------------------------------
+    // Instance fields
+    // ------------------------------------------------------------------------
+
     /* Configuration of the History Tree */
     private final HTConfig fConfig;
 
@@ -65,13 +120,13 @@ class HT_IO {
     private final FileChannel fFileChannelIn;
     private final FileChannel fFileChannelOut;
 
-    // TODO test/benchmark optimal cache size
-    /**
-     * Cache size, must be a power of 2
-     */
-    private static final int CACHE_SIZE = 256;
-    private static final int CACHE_MASK = CACHE_SIZE - 1;
-    private static final CacheElement NODE_CACHE[] = new CacheElement[CACHE_SIZE];
+    private final IHTNodeFactory fNodeFactory;
+
+    // ------------------------------------------------------------------------
+    // Methods
+    // ------------------------------------------------------------------------
+
+
 
     /**
      * Standard constructor
@@ -80,11 +135,13 @@ class HT_IO {
      *            The configuration object for the StateHistoryTree
      * @param newFile
      *            Flag indicating that the file must be created from scratch
+     * @param nodeFactory
+     *            The factory to create new nodes for this tree
      *
      * @throws IOException
      *             An exception can be thrown when file cannot be accessed
      */
-    public HT_IO(HTConfig config, boolean newFile) throws IOException {
+    public HT_IO(HTConfig config, boolean newFile, IHTNodeFactory nodeFactory) throws IOException {
         fConfig = config;
 
         File historyTreeFile = config.getStateFile();
@@ -112,6 +169,7 @@ class HT_IO {
         }
         fFileChannelIn = fFileInputStream.getChannel();
         fFileChannelOut = fFileOutputStream.getChannel();
+        fNodeFactory = nodeFactory;
     }
 
     /**
@@ -125,55 +183,69 @@ class HT_IO {
      *             reading. Instead of using a big reader-writer lock, we'll
      *             just catch this exception.
      */
-    public synchronized @NonNull HTNode readNode(int seqNumber) throws ClosedChannelException {
-        /* Do a cache lookup */
-        int offset = (seqNumber + hashCode()) & CACHE_MASK;
-        CacheElement cachedNode = NODE_CACHE[offset];
-
-        if (cachedNode != null && cachedNode.getKey() == this && cachedNode.getValue().getSequenceNumber() == seqNumber) {
-            return cachedNode.getValue();
-        }
-
-        /* Lookup on disk */
+    public @NonNull HTNode readNode(int seqNumber) throws ClosedChannelException {
+        /* Do a cache lookup. If it's not present it will be loaded from disk */
+        LOGGER.finest(() -> "[HtIo:CacheLookup] seqNum=" + seqNumber); //$NON-NLS-1$
+        CacheKey key = new CacheKey(this, seqNumber);
         try {
-            seekFCToNodePos(fFileChannelIn, seqNumber);
-            HTNode readNode = HTNode.readNode(fConfig, fFileChannelIn);
-
-            /* Put the node in the cache. */
-            NODE_CACHE[offset] = new CacheElement(this, readNode);
-            return readNode;
+            return checkNotNull(NODE_CACHE.get(key));
 
-        } catch (ClosedChannelException e) {
-            throw e;
-        } catch (IOException e) {
+        } catch (ExecutionException e) {
+            /* Get the inner exception that was generated */
+            Throwable cause = e.getCause();
+            if (cause instanceof ClosedChannelException) {
+                throw (ClosedChannelException) cause;
+            }
             /*
-             * Other types of IOExceptions shouldn't happen at this point though
+             * Other types of IOExceptions shouldn't happen at this point though.
              */
             Activator.getDefault().logError(e.getMessage(), e);
             throw new IllegalStateException();
         }
     }
 
-    public synchronized void writeNode(HTNode node) {
+    /**
+     * Write the given node to disk.
+     *
+     * @param node
+     *            The node to write.
+     */
+    public void writeNode(HTNode node) {
         try {
-            /* Insert the node into the cache. */
             int seqNumber = node.getSequenceNumber();
-            int offset = (seqNumber + hashCode()) & CACHE_MASK;
-            NODE_CACHE[offset] = new CacheElement(this, node);
+
+            /* "Write-back" the node into the cache */
+            CacheKey key = new CacheKey(this, seqNumber);
+            NODE_CACHE.put(key, node);
 
             /* Position ourselves at the start of the node and write it */
-            seekFCToNodePos(fFileChannelOut, seqNumber);
-            node.writeSelf(fFileChannelOut);
+            synchronized (this) {
+                seekFCToNodePos(fFileChannelOut, seqNumber);
+                node.writeSelf(fFileChannelOut);
+            }
         } catch (IOException e) {
             /* If we were able to open the file, we should be fine now... */
             Activator.getDefault().logError(e.getMessage(), e);
         }
     }
 
+    /**
+     * Get the output file channel, used for writing.
+     *
+     * @return The output file channel
+     */
     public FileChannel getFcOut() {
         return fFileChannelOut;
     }
 
+    /**
+     * Retrieve the input stream with which to write the attribute tree.
+     *
+     * @param nodeOffset
+     *            The offset in the file, in number of nodes. This should be
+     *            after all the nodes.
+     * @return The correctly-seeked input stream
+     */
     public FileInputStream supplyATReader(int nodeOffset) {
         try {
             /*
@@ -187,6 +259,9 @@ class HT_IO {
         return fFileInputStream;
     }
 
+    /**
+     * Close all file channels and streams.
+     */
     public synchronized void closeFile() {
         try {
             fFileInputStream.close();
@@ -196,6 +271,9 @@ class HT_IO {
         }
     }
 
+    /**
+     * Delete the history tree file
+     */
     public synchronized void deleteFile() {
         closeFile();
 
@@ -223,7 +301,7 @@ class HT_IO {
          * Cast to (long) is needed to make sure the result is a long too and
          * doesn't get truncated
          */
-        fc.position(HistoryTree.TREE_HEADER_SIZE
+        fc.position(IHistoryTree.TREE_HEADER_SIZE
                 + ((long) seqNumber) * fConfig.getBlockSize());
     }
 
This page took 0.028277 seconds and 5 git commands to generate.