ss: Revert "Switch the statesystem to the datastore HT"
authorGeneviève Bastien <gbastien+lttng@versatic.net>
Wed, 8 Feb 2017 14:46:11 +0000 (09:46 -0500)
committerGenevieve Bastien <gbastien+lttng@versatic.net>
Wed, 8 Feb 2017 18:31:39 +0000 (13:31 -0500)
This reverts commit cf8efcef276dab1aff04b8ddbfedb50ae8a09dab.

The new code path seems to introduce performance issues, especially with
benchmarks. Some investigation is needed, but reverting it is the safest
route for now.

Change-Id: I54dc7c4e297e9b633115145770648145030cc39b
Signed-off-by: Geneviève Bastien <gbastien+lttng@versatic.net>
Reviewed-on: https://git.eclipse.org/r/90640
Reviewed-by: Hudson CI
24 files changed:
statesystem/org.eclipse.tracecompass.datastore.core/META-INF/MANIFEST.MF
statesystem/org.eclipse.tracecompass.statesystem.core.tests/META-INF/MANIFEST.MF
statesystem/org.eclipse.tracecompass.statesystem.core.tests/src/org/eclipse/tracecompass/statesystem/core/tests/backend/HistoryTreeBackendTest.java
statesystem/org.eclipse.tracecompass.statesystem.core.tests/src/org/eclipse/tracecompass/statesystem/core/tests/backend/historytree/HTIntervalStringReadWriteTest.java [new file with mode: 0644]
statesystem/org.eclipse.tracecompass.statesystem.core.tests/src/org/eclipse/tracecompass/statesystem/core/tests/backend/historytree/HistoryTreeTest.java [new file with mode: 0644]
statesystem/org.eclipse.tracecompass.statesystem.core.tests/src/org/eclipse/tracecompass/statesystem/core/tests/backend/historytree/HistoryTreeWithBackendTest.java [new file with mode: 0644]
statesystem/org.eclipse.tracecompass.statesystem.core.tests/src/org/eclipse/tracecompass/statesystem/core/tests/backend/historytree/SSIntervalStringReadWriteTest.java [deleted file]
statesystem/org.eclipse.tracecompass.statesystem.core.tests/stubs/org/eclipse/tracecompass/statesystem/core/tests/stubs/backend/HistoryTreeBackendStub.java [new file with mode: 0644]
statesystem/org.eclipse.tracecompass.statesystem.core.tests/stubs/org/eclipse/tracecompass/statesystem/core/tests/stubs/backend/HistoryTreeClassicStub.java [new file with mode: 0644]
statesystem/org.eclipse.tracecompass.statesystem.core.tests/stubs/org/eclipse/tracecompass/statesystem/core/tests/stubs/backend/package-info.java [new file with mode: 0644]
statesystem/org.eclipse.tracecompass.statesystem.core/META-INF/MANIFEST.MF
statesystem/org.eclipse.tracecompass.statesystem.core/src/org/eclipse/tracecompass/internal/statesystem/core/backend/historytree/HTConfig.java [new file with mode: 0644]
statesystem/org.eclipse.tracecompass.statesystem.core/src/org/eclipse/tracecompass/internal/statesystem/core/backend/historytree/HTInterval.java [new file with mode: 0644]
statesystem/org.eclipse.tracecompass.statesystem.core/src/org/eclipse/tracecompass/internal/statesystem/core/backend/historytree/HTNode.java [new file with mode: 0644]
statesystem/org.eclipse.tracecompass.statesystem.core/src/org/eclipse/tracecompass/internal/statesystem/core/backend/historytree/HT_IO.java [new file with mode: 0644]
statesystem/org.eclipse.tracecompass.statesystem.core/src/org/eclipse/tracecompass/internal/statesystem/core/backend/historytree/HistoryTreeBackend.java
statesystem/org.eclipse.tracecompass.statesystem.core/src/org/eclipse/tracecompass/internal/statesystem/core/backend/historytree/HistoryTreeFactory.java
statesystem/org.eclipse.tracecompass.statesystem.core/src/org/eclipse/tracecompass/internal/statesystem/core/backend/historytree/IHistoryTree.java [new file with mode: 0644]
statesystem/org.eclipse.tracecompass.statesystem.core/src/org/eclipse/tracecompass/internal/statesystem/core/backend/historytree/LeafNode.java [new file with mode: 0644]
statesystem/org.eclipse.tracecompass.statesystem.core/src/org/eclipse/tracecompass/internal/statesystem/core/backend/historytree/ParentNode.java [new file with mode: 0644]
statesystem/org.eclipse.tracecompass.statesystem.core/src/org/eclipse/tracecompass/internal/statesystem/core/backend/historytree/StateSystemInterval.java [deleted file]
statesystem/org.eclipse.tracecompass.statesystem.core/src/org/eclipse/tracecompass/internal/statesystem/core/backend/historytree/ThreadedHistoryTreeBackend.java
statesystem/org.eclipse.tracecompass.statesystem.core/src/org/eclipse/tracecompass/internal/statesystem/core/backend/historytree/classic/CoreNode.java [new file with mode: 0644]
statesystem/org.eclipse.tracecompass.statesystem.core/src/org/eclipse/tracecompass/internal/statesystem/core/backend/historytree/classic/HistoryTreeClassic.java [new file with mode: 0644]

index 0f5636223cf574eb5de588417f9f7495cf3c9870..ac403ae469ad4df2c12a14516ab4098a5b2c0a73 100644 (file)
@@ -14,12 +14,12 @@ Export-Package: org.eclipse.tracecompass.internal.datastore.core;x-internal:=tru
  org.eclipse.tracecompass.internal.datastore.core.condition;x-internal:=true,
  org.eclipse.tracecompass.internal.datastore.core.historytree;x-internal:=true,
  org.eclipse.tracecompass.internal.datastore.core.serialization;x-internal:=true,
- org.eclipse.tracecompass.internal.provisional.datastore.core.condition;x-friends:="org.eclipse.tracecompass.statesystem.core,org.eclipse.tracecompass.statesystem.core.tests",
+ org.eclipse.tracecompass.internal.provisional.datastore.core.condition;x-internal:=true,
  org.eclipse.tracecompass.internal.provisional.datastore.core.exceptions,
- org.eclipse.tracecompass.internal.provisional.datastore.core.historytree;x-friends:="org.eclipse.tracecompass.statesystem.core,org.eclipse.tracecompass.statesystem.core.tests",
- org.eclipse.tracecompass.internal.provisional.datastore.core.historytree.classic;x-friends:="org.eclipse.tracecompass.statesystem.core,org.eclipse.tracecompass.statesystem.core.tests",
+ org.eclipse.tracecompass.internal.provisional.datastore.core.historytree;x-internal:=true,
+ org.eclipse.tracecompass.internal.provisional.datastore.core.historytree.classic;x-internal:=true,
  org.eclipse.tracecompass.internal.provisional.datastore.core.historytree.overlapping;x-internal:=true,
- org.eclipse.tracecompass.internal.provisional.datastore.core.interval;x-friends:="org.eclipse.tracecompass.statesystem.core,org.eclipse.tracecompass.statesystem.core.tests",
+ org.eclipse.tracecompass.internal.provisional.datastore.core.interval;x-internal:=true,
  org.eclipse.tracecompass.internal.provisional.datastore.core.serialization;x-friends:="org.eclipse.tracecompass.statesystem.core,org.eclipse.tracecompass.statesystem.core.tests"
 Import-Package: com.google.common.annotations,
  com.google.common.base,
index 13a6a41f88d60a422fd865a70aa46228cb9167b4..b4ad6120a8ecdc3ef36c30beef3f62fd55de35b9 100644 (file)
@@ -20,6 +20,7 @@ Export-Package: org.eclipse.tracecompass.statesystem.core.tests,
  org.eclipse.tracecompass.statesystem.core.tests.perf.historytree,
  org.eclipse.tracecompass.statesystem.core.tests.shared.utils,
  org.eclipse.tracecompass.statesystem.core.tests.statevalue,
+ org.eclipse.tracecompass.statesystem.core.tests.stubs.backend,
  org.eclipse.tracecompass.statesystem.core.tests.stubs.statevalues
 Import-Package: com.google.common.base,
  com.google.common.collect,
index 7c98eb80732620264b8b8add40a63c499e9b7e36..a33715ad6bbc2b51b2d84ff033250920d94390f1 100644 (file)
@@ -20,11 +20,9 @@ import java.util.HashSet;
 import java.util.Map;
 import java.util.Set;
 
-import org.eclipse.tracecompass.internal.provisional.datastore.core.exceptions.RangeException;
 import org.eclipse.tracecompass.internal.statesystem.core.backend.historytree.HistoryTreeBackend;
 import org.eclipse.tracecompass.statesystem.core.backend.IStateHistoryBackend;
 import org.junit.After;
-import org.junit.Test;
 import org.junit.runner.RunWith;
 import org.junit.runners.Parameterized;
 import org.junit.runners.Parameterized.Parameters;
@@ -117,13 +115,4 @@ public class HistoryTreeBackendTest extends StateHistoryBackendTestBase {
         fBackendMap.put(reOpenedBackend, historyTreeFile);
         return reOpenedBackend;
     }
-
-    /**
-     * This backend throws a different exception.
-     */
-    @Override
-    @Test(expected = RangeException.class)
-    public void testIntervalBeforeStart() {
-        super.testIntervalBeforeStart();
-    }
 }
diff --git a/statesystem/org.eclipse.tracecompass.statesystem.core.tests/src/org/eclipse/tracecompass/statesystem/core/tests/backend/historytree/HTIntervalStringReadWriteTest.java b/statesystem/org.eclipse.tracecompass.statesystem.core.tests/src/org/eclipse/tracecompass/statesystem/core/tests/backend/historytree/HTIntervalStringReadWriteTest.java
new file mode 100644 (file)
index 0000000..1f1b7c8
--- /dev/null
@@ -0,0 +1,184 @@
+/*******************************************************************************
+ * Copyright (c) 2016 EfficiOS Inc., Alexandre Montplaisir
+ *
+ * All rights reserved. This program and the accompanying materials
+ * are made available under the terms of the Eclipse Public License v1.0
+ * which accompanies this distribution, and is available at
+ * http://www.eclipse.org/legal/epl-v10.html
+ *******************************************************************************/
+
+package org.eclipse.tracecompass.statesystem.core.tests.backend.historytree;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.fail;
+
+import java.io.File;
+import java.io.FileInputStream;
+import java.io.FileOutputStream;
+import java.io.IOException;
+import java.nio.ByteBuffer;
+import java.nio.ByteOrder;
+import java.nio.channels.FileChannel;
+import java.nio.charset.Charset;
+import java.util.List;
+import java.util.Set;
+import java.util.stream.Collectors;
+import java.util.stream.IntStream;
+
+import org.eclipse.jdt.annotation.NonNullByDefault;
+import org.eclipse.tracecompass.internal.statesystem.core.backend.historytree.HTInterval;
+import org.eclipse.tracecompass.statesystem.core.statevalue.TmfStateValue;
+import org.junit.Test;
+import org.junit.runner.RunWith;
+import org.junit.runners.Parameterized;
+import org.junit.runners.Parameterized.Parameters;
+
+import com.google.common.collect.ImmutableList;
+import com.google.common.collect.ImmutableSet;
+import com.google.common.collect.Sets;
+
+/**
+ * Test the reading/writing logic in {@link HTInterval}, particularly regarding
+ * the string length limitations.
+ *
+ * @author Alexandre Montplaisir
+ */
+@RunWith(Parameterized.class)
+@NonNullByDefault({})
+public class HTIntervalStringReadWriteTest {
+
+    private static final Charset CHARSET = Charset.forName("UTF-8");
+
+    private int fNbChars;
+    private int fCharLength;
+    private char fChar;
+
+    /**
+     * Parameter generator.
+     *
+     * Generates a combination of all possible string lengths and all possible
+     * character lengths.
+     *
+     * @return The test parameters
+     */
+    @Parameters(name = "nb of chars: {0}, char length: {1}")
+    public static Iterable<Object[]> getTestParams() {
+        Set<List<Integer>> set = Sets.cartesianProduct(ImmutableList.of(
+                ImmutableSet.of(
+                        0,
+                        10,
+                        Byte.MAX_VALUE - 1,
+                        (int) Byte.MAX_VALUE,
+                        Byte.MAX_VALUE + 1,
+
+                        Short.MAX_VALUE / 2 - 1,
+                        Short.MAX_VALUE / 2,
+                        Short.MAX_VALUE / 2 + 1,
+
+                        Short.MAX_VALUE / 3 - 1,
+                        Short.MAX_VALUE / 3,
+                        Short.MAX_VALUE / 3 + 1,
+
+                        Short.MAX_VALUE - 1,
+                        (int) Short.MAX_VALUE,
+                        Short.MAX_VALUE + 1),
+
+                ImmutableSet.of(1, 2, 3)
+                ));
+
+        return set.stream()
+                .map(List::toArray)
+                .collect(Collectors.toList());
+    }
+
+    /**
+     * Test constructor, take the generated parameters as parameters.
+     *
+     * @param nbChars
+     *            The number of characters in the test string.
+     * @param charLength
+     *            The length (in bytes) of the UTF-8-encoded form of the
+     *            character being used.
+     */
+    public HTIntervalStringReadWriteTest(Integer nbChars, Integer charLength) {
+        fNbChars = nbChars.intValue();
+        fCharLength = charLength.intValue();
+        switch (charLength) {
+        case 1:
+            fChar = 'a';
+            break;
+        case 2:
+            fChar = 'é';
+            break;
+        case 3:
+            fChar = '长'; // "chang" means long / length in Chinese!
+            break;
+        default:
+            throw new IllegalArgumentException();
+        }
+    }
+
+    /**
+     * Test method
+     *
+     * @throws IOException
+     *             Fails the test
+     */
+    @Test
+    public void testStringWithChars() throws IOException {
+        StringBuilder sb = new StringBuilder();
+        IntStream.range(0, fNbChars).forEach(i -> sb.append(fChar));
+        String str = sb.toString();
+        assertEquals(fNbChars, str.length());
+        assertEquals(fNbChars * fCharLength, str.getBytes(CHARSET).length);
+
+        TmfStateValue value = TmfStateValue.newValueString(str);
+        if (fNbChars * fCharLength > Short.MAX_VALUE) {
+            /* For sizes that are known to be too long, expect an exception */
+            try {
+                new HTInterval(0, 10, 1, value);
+            } catch (IllegalArgumentException e) {
+                return;
+            }
+            fail();
+        }
+        HTInterval interval = new HTInterval(0, 10, 1, value);
+        writeAndReadInterval(interval);
+    }
+
+    private static void writeAndReadInterval(HTInterval interval) throws IOException {
+        int sizeOnDisk = interval.getSizeOnDisk();
+
+        /* Write the interval to a file */
+        File tempFile = File.createTempFile("test-interval", ".interval");
+        try (FileOutputStream fos = new FileOutputStream(tempFile, false);
+                FileChannel fc = fos.getChannel();) {
+
+            ByteBuffer bb = ByteBuffer.allocate(sizeOnDisk);
+            bb.order(ByteOrder.LITTLE_ENDIAN);
+            bb.clear();
+
+            interval.writeInterval(bb);
+            bb.flip();
+            int written = fc.write(bb);
+            assertEquals(sizeOnDisk, written);
+        }
+
+        /* Read the interval from the file */
+        HTInterval readInterval;
+        try (FileInputStream fis = new FileInputStream(tempFile);
+                FileChannel fc = fis.getChannel();) {
+
+            ByteBuffer bb = ByteBuffer.allocate(sizeOnDisk);
+            bb.order(ByteOrder.LITTLE_ENDIAN);
+            bb.clear();
+
+            int read = fc.read(bb);
+            assertEquals(sizeOnDisk, read);
+            bb.flip();
+            readInterval = HTInterval.readFrom(bb);
+        }
+
+        assertEquals(interval, readInterval);
+    }
+}
diff --git a/statesystem/org.eclipse.tracecompass.statesystem.core.tests/src/org/eclipse/tracecompass/statesystem/core/tests/backend/historytree/HistoryTreeTest.java b/statesystem/org.eclipse.tracecompass.statesystem.core.tests/src/org/eclipse/tracecompass/statesystem/core/tests/backend/historytree/HistoryTreeTest.java
new file mode 100644 (file)
index 0000000..13b759c
--- /dev/null
@@ -0,0 +1,323 @@
+/*******************************************************************************
+ * Copyright (c) 2015 École Polytechnique de Montréal
+ *
+ * All rights reserved. This program and the accompanying materials are
+ * made available under the terms of the Eclipse Public License v1.0 which
+ * accompanies this distribution, and is available at
+ * http://www.eclipse.org/legal/epl-v10.html
+ *******************************************************************************/
+
+package org.eclipse.tracecompass.statesystem.core.tests.backend.historytree;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.fail;
+
+import java.io.File;
+import java.io.IOException;
+import java.nio.channels.ClosedChannelException;
+import java.util.List;
+
+import org.eclipse.jdt.annotation.Nullable;
+import org.eclipse.tracecompass.internal.statesystem.core.backend.historytree.HTConfig;
+import org.eclipse.tracecompass.internal.statesystem.core.backend.historytree.HTInterval;
+import org.eclipse.tracecompass.internal.statesystem.core.backend.historytree.HTNode;
+import org.eclipse.tracecompass.internal.statesystem.core.backend.historytree.IHistoryTree;
+import org.eclipse.tracecompass.statesystem.core.statevalue.TmfStateValue;
+import org.eclipse.tracecompass.statesystem.core.tests.stubs.backend.HistoryTreeClassicStub;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+
+/**
+ * Tests the history tree
+ *
+ * @author Geneviève Bastien
+ */
+public class HistoryTreeTest {
+
+
+    /* Minimal allowed blocksize */
+    private static final int BLOCK_SIZE = HistoryTreeClassicStub.MINIMUM_BLOCK_SIZE;
+
+    private static final HTInterval NULL_INTERVAL = new HTInterval(10, 20, 1, TmfStateValue.nullValue());
+
+    /* String with 23 characters, interval in file will be 25 bytes long */
+    private static final String TEST_STRING = "abcdefghifklmnopqrstuvw";
+    private static final TmfStateValue STRING_VALUE = TmfStateValue.newValueString(TEST_STRING);
+    private static final HTInterval STRING_INTERVAL = new HTInterval(10, 20, 1, STRING_VALUE);
+
+    private static final TmfStateValue LONG_VALUE = TmfStateValue.newValueLong(10L);
+    private static final HTInterval LONG_INTERVAL = new HTInterval(10, 20, 1, LONG_VALUE);
+
+    private static final TmfStateValue INT_VALUE = TmfStateValue.newValueInt(1);
+    private static final HTInterval INT_INTERVAL = new HTInterval(10, 20, 1, INT_VALUE);
+
+    private @Nullable File fTempFile;
+
+    /**
+     * Create the temporary file for this history tree
+     */
+    @Before
+    public void setupTest() {
+        try {
+            fTempFile = File.createTempFile("tmpStateSystem", null);
+        } catch (IOException e) {
+            fail(e.getMessage());
+        }
+    }
+
+    /**
+     * Delete the temporary history tree file after the test
+     */
+    @After
+    public void cleanup() {
+        if (fTempFile != null) {
+            fTempFile.delete();
+        }
+    }
+
+    /**
+     * Setup a history tree.
+     *
+     * @param maxChildren
+     *            The max number of children per node in the tree (tree config
+     *            option)
+     */
+    private HistoryTreeClassicStub setupSmallTree(int maxChildren) {
+        HistoryTreeClassicStub ht = null;
+        try {
+            File newFile = fTempFile;
+            assertNotNull(newFile);
+            HTConfig config = new HTConfig(newFile,
+                    BLOCK_SIZE,
+                    maxChildren, /* Number of children */
+                    1, /* Provider version */
+                    1); /* Start time */
+            ht = new HistoryTreeClassicStub(config);
+
+        } catch (IOException e) {
+            fail(e.getMessage());
+        }
+
+        assertNotNull(ht);
+        return ht;
+    }
+
+    /**
+     * Setup a history tree with config MAX_CHILDREN = 3.
+     */
+    private HistoryTreeClassicStub setupSmallTree() {
+        return setupSmallTree(3);
+    }
+
+    private static long fillValues(IHistoryTree ht, TmfStateValue value, int nbValues, long start) {
+        for (int i = 0; i < nbValues; i++) {
+            ht.insertInterval(new HTInterval(start + i, start + i + 1, 1, value));
+        }
+        return start + nbValues;
+    }
+
+    /**
+     * Insert intervals in the tree to fill the current leaf node to capacity,
+     * without exceeding it.
+     *
+     * This guarantees that the following insertion will create new nodes.
+     *
+     * @param ht
+     *            The history tree in which to insert
+     * @return Start time of the current leaf node. Future insertions should be
+     *         greater than or equal to this to make sure the intervals go in
+     *         the leaf node.
+     */
+    private static long fillNextLeafNode(HistoryTreeClassicStub ht, long leafNodeStart) {
+        int prevCount = ht.getNodeCount();
+        int prevDepth = ht.getDepth();
+
+        /* Fill the following leaf node */
+        HTNode node = ht.getLatestLeaf();
+        int intervalSize = STRING_INTERVAL.getSizeOnDisk();
+        int nodeFreeSpace = node.getNodeFreeSpace();
+        int nbIntervals = nodeFreeSpace / intervalSize;
+        long ret = fillValues(ht, STRING_VALUE, nbIntervals, leafNodeStart);
+
+        /* Make sure we haven't changed the depth or node count */
+        assertEquals(prevCount, ht.getNodeCount());
+        assertEquals(prevDepth, ht.getDepth());
+
+        return ret;
+    }
+
+    /**
+     * Test that nodes are filled
+     *
+     * It fills nodes with sequential intervals from one attribute only, so that
+     * leafs should be filled.
+     */
+    @Test
+    public void testSequentialFill() {
+        HistoryTreeClassicStub ht = setupSmallTree();
+
+        HTNode node = ht.getLatestLeaf();
+        assertEquals(0, node.getNodeUsagePercent());
+
+        /* Add null intervals up to ~10% */
+        int nodeFreeSpace = node.getNodeFreeSpace();
+        int intervalSize = NULL_INTERVAL.getSizeOnDisk();
+        int nbIntervals = nodeFreeSpace / 10 / intervalSize;
+        long start = fillValues(ht, TmfStateValue.nullValue(), nbIntervals, 1);
+        assertEquals(nodeFreeSpace - nbIntervals * intervalSize , node.getNodeFreeSpace());
+
+        /* Add integer intervals up to ~20% */
+        nodeFreeSpace = node.getNodeFreeSpace();
+        intervalSize = INT_INTERVAL.getSizeOnDisk();
+        nbIntervals = nodeFreeSpace / 10 / intervalSize;
+        start = fillValues(ht, INT_VALUE, nbIntervals, start);
+        assertEquals(nodeFreeSpace - nbIntervals * intervalSize , node.getNodeFreeSpace());
+
+        /* Add long intervals up to ~30% */
+        nodeFreeSpace = node.getNodeFreeSpace();
+        intervalSize = LONG_INTERVAL.getSizeOnDisk();
+        nbIntervals = nodeFreeSpace / 10 / intervalSize;
+        start = fillValues(ht, LONG_VALUE, nbIntervals, start);
+        assertEquals(nodeFreeSpace - nbIntervals * intervalSize , node.getNodeFreeSpace());
+
+        /* Add string intervals up to ~40% */
+        nodeFreeSpace = node.getNodeFreeSpace();
+        intervalSize = STRING_INTERVAL.getSizeOnDisk();
+        nbIntervals = nodeFreeSpace / 10 / intervalSize;
+        start = fillValues(ht, STRING_VALUE, nbIntervals, start);
+        assertEquals(nodeFreeSpace - nbIntervals * intervalSize , node.getNodeFreeSpace());
+
+    }
+
+    /**
+     * Test the addition of new nodes to the tree and make sure the tree is
+     * built with the right structure
+     */
+    @Test
+    public void testDepth() {
+        HistoryTreeClassicStub ht = setupSmallTree();
+
+        /* Fill a first node */
+        HTNode node = ht.getLatestLeaf();
+        int nodeFreeSpace = node.getNodeFreeSpace();
+        int nbIntervals = nodeFreeSpace / (STRING_INTERVAL.getSizeOnDisk());
+        long start = fillValues(ht, STRING_VALUE, nbIntervals, 1);
+
+        /* Add intervals that should add a sibling to the node */
+        assertEquals(1, ht.getNodeCount());
+        assertEquals(1, ht.getDepth());
+        start = fillValues(ht, STRING_VALUE, 1, start);
+        assertEquals(3, ht.getNodeCount());
+        assertEquals(2, ht.getDepth());
+
+        /* Fill the latest leaf node (2nd child) */
+        node = ht.getLatestLeaf();
+        nodeFreeSpace = node.getNodeFreeSpace();
+        nbIntervals = nodeFreeSpace / (STRING_INTERVAL.getSizeOnDisk());
+        start = fillValues(ht, STRING_VALUE, nbIntervals, start);
+
+        /*
+         * Add an interval that should add another sibling to the previous nodes
+         */
+        start = fillValues(ht, STRING_VALUE, 1, start);
+        assertEquals(4, ht.getNodeCount());
+        assertEquals(2, ht.getDepth());
+
+        /* Fill the latest leaf node (3rd and last child) */
+        node = ht.getLatestLeaf();
+        nodeFreeSpace = node.getNodeFreeSpace();
+        nbIntervals = nodeFreeSpace / (STRING_INTERVAL.getSizeOnDisk());
+        start = fillValues(ht, STRING_VALUE, nbIntervals, start);
+
+        /* The new node created here should generate a new branch */
+        start = fillValues(ht, STRING_VALUE, 1, start);
+        assertEquals(7, ht.getNodeCount());
+        assertEquals(3, ht.getDepth());
+    }
+
+    /**
+     * Make sure the node sequence numbers and parent pointers are set correctly
+     * when new nodes are created.
+     *
+     * <p>
+     * We are building a tree whose node sequence numbers will look like this at
+     * the end:
+     * </p>
+     *
+     * <pre>
+     *     3
+     *    / \
+     *   1   4
+     *  / \   \
+     * 0   2   5
+     * </pre>
+     *
+     * <p>
+     * However while building, the parent pointers may be different.
+     * </p>
+     *
+     * @throws ClosedChannelException
+     *             If the test fails
+     */
+    @Test
+    public void testNodeSequenceNumbers() throws ClosedChannelException {
+        /* Represents the start time of the current leaf node */
+        long start = 1;
+
+        HistoryTreeClassicStub ht = setupSmallTree(2);
+        start = fillNextLeafNode(ht, start);
+
+        List<HTNode> branch = ht.getLatestBranch();
+        assertEquals(1, branch.size());
+        assertEquals( 0, branch.get(0).getSequenceNumber());
+        assertEquals(-1, branch.get(0).getParentSequenceNumber());
+
+        /* Create a new branch */
+        start = fillValues(ht, STRING_VALUE, 1, start);
+        start = fillNextLeafNode(ht, start);
+        assertEquals(3, ht.getNodeCount());
+        assertEquals(2, ht.getDepth());
+
+        /* Make sure the first node's parent was updated */
+        HTNode node = ht.readNode(0);
+        assertEquals(0, node.getSequenceNumber());
+        assertEquals(1, node.getParentSequenceNumber());
+
+        /* Make sure the new branch is alright */
+        branch = ht.getLatestBranch();
+        assertEquals(2, branch.size());
+        assertEquals( 1, branch.get(0).getSequenceNumber());
+        assertEquals(-1, branch.get(0).getParentSequenceNumber());
+        assertEquals( 2, branch.get(1).getSequenceNumber());
+        assertEquals( 1, branch.get(1).getParentSequenceNumber());
+
+        /* Create a third branch */
+        start = fillValues(ht, STRING_VALUE, 1, start);
+        start = fillNextLeafNode(ht, start);
+        assertEquals(6, ht.getNodeCount());
+        assertEquals(3, ht.getDepth());
+
+        /* Make sure all previous nodes are still correct */
+        node = ht.readNode(0);
+        assertEquals(0, node.getSequenceNumber());
+        assertEquals(1, node.getParentSequenceNumber());
+        node = ht.readNode(1);
+        assertEquals(1, node.getSequenceNumber());
+        assertEquals(3, node.getParentSequenceNumber());
+        node = ht.readNode(2);
+        assertEquals(2, node.getSequenceNumber());
+        assertEquals(1, node.getParentSequenceNumber());
+
+        /* Verify the contents of the new latest branch */
+        branch = ht.getLatestBranch();
+        assertEquals(3, branch.size());
+        assertEquals( 3, branch.get(0).getSequenceNumber());
+        assertEquals(-1, branch.get(0).getParentSequenceNumber());
+        assertEquals( 4, branch.get(1).getSequenceNumber());
+        assertEquals( 3, branch.get(1).getParentSequenceNumber());
+        assertEquals( 5, branch.get(2).getSequenceNumber());
+        assertEquals( 4, branch.get(2).getParentSequenceNumber());
+    }
+}
diff --git a/statesystem/org.eclipse.tracecompass.statesystem.core.tests/src/org/eclipse/tracecompass/statesystem/core/tests/backend/historytree/HistoryTreeWithBackendTest.java b/statesystem/org.eclipse.tracecompass.statesystem.core.tests/src/org/eclipse/tracecompass/statesystem/core/tests/backend/historytree/HistoryTreeWithBackendTest.java
new file mode 100644 (file)
index 0000000..85ca8dc
--- /dev/null
@@ -0,0 +1,134 @@
+/*******************************************************************************
+ * Copyright (c) 2016 École Polytechnique de Montréal
+ *
+ * All rights reserved. This program and the accompanying materials are
+ * made available under the terms of the Eclipse Public License v1.0 which
+ * accompanies this distribution, and is available at
+ * http://www.eclipse.org/legal/epl-v10.html
+ *******************************************************************************/
+
+package org.eclipse.tracecompass.statesystem.core.tests.backend.historytree;
+
+import static org.junit.Assert.fail;
+
+import java.io.File;
+import java.io.IOException;
+import java.util.Arrays;
+
+import org.eclipse.tracecompass.common.core.NonNullUtils;
+import org.eclipse.tracecompass.internal.statesystem.core.backend.historytree.HTInterval;
+import org.eclipse.tracecompass.internal.statesystem.core.backend.historytree.HTNode;
+import org.eclipse.tracecompass.internal.statesystem.core.backend.historytree.HistoryTreeBackend;
+import org.eclipse.tracecompass.internal.statesystem.core.backend.historytree.IHistoryTree;
+import org.eclipse.tracecompass.statesystem.core.statevalue.TmfStateValue;
+import org.eclipse.tracecompass.statesystem.core.tests.stubs.backend.HistoryTreeBackendStub;
+import org.eclipse.tracecompass.statesystem.core.tests.stubs.backend.HistoryTreeBackendStub.HistoryTreeType;
+import org.junit.Test;
+import org.junit.runner.RunWith;
+import org.junit.runners.Parameterized;
+import org.junit.runners.Parameterized.Parameters;
+
+/**
+ * Test the {@link HistoryTreeBackend}-specific behavior and its interactions
+ * with the {@link IHistoryTree} classes.
+ *
+ * @author Geneviève Bastien
+ */
+@RunWith(Parameterized.class)
+public class HistoryTreeWithBackendTest {
+
+    /** State system ID */
+    private static final String SSID = "test";
+    /** Provider version */
+    private static final int PROVIDER_VERSION = 0;
+
+    /** Default maximum number of children nodes */
+    private static final int MAX_CHILDREN = 3;
+    /** Default block size */
+    private static final int BLOCK_SIZE = 4096;
+
+    private final HistoryTreeType fHtType;
+
+    /**
+     * @return The arrays of parameters
+     */
+    @Parameters(name = "{0}")
+    public static Iterable<Object[]> getParameters() {
+        return Arrays.asList(new Object[][] {
+                { HistoryTreeType.CLASSIC }
+        });
+    }
+
+    /**
+     * Constructor
+     *
+     * @param htType
+     *            The type of history tree to use
+     */
+    public HistoryTreeWithBackendTest(HistoryTreeType htType) {
+        fHtType = htType;
+    }
+
+    /**
+     * Test the behavior of the history tree after at least a depth of 3
+     */
+    @Test
+    public void testFillNodes() {
+        try {
+            // Test case parameters
+            final int nbAttr = 5;
+            final int depthToRead = 3;
+
+            long startTime = 1;
+            File historyTreeFile = NonNullUtils.checkNotNull(File.createTempFile("HistoryTreeBackendTest", ".ht"));
+            HistoryTreeBackendStub.setTreeType(fHtType);
+            HistoryTreeBackendStub backend = new HistoryTreeBackendStub(SSID, historyTreeFile, PROVIDER_VERSION, startTime, BLOCK_SIZE, MAX_CHILDREN);
+
+            int duration = nbAttr;
+            int quarkTest = nbAttr;
+            long time = startTime + duration;
+
+            HTInterval interval = new HTInterval(startTime, time, quarkTest, TmfStateValue.newValueLong(time));
+            // Insert a first interval for the test attribute
+            backend.insertPastState(interval.getStartTime(), interval.getEndTime(), interval.getAttribute(), interval.getStateValue());
+
+            /*
+             * insert cascading intervals to fill 2 levels of history tree, so
+             * that we start another branch
+             */
+            while (backend.getHistoryTree().getDepth() < depthToRead) {
+                backend.insertPastState(
+                        Math.max(startTime, time - duration),
+                        time - 1,
+                        (int) time % nbAttr,
+                        TmfStateValue.newValueLong(time));
+                time++;
+            }
+
+            // entirely fill the latest leaf with cascading intervals
+            HTNode latestLeaf = backend.getHistoryTree().getLatestLeaf();
+            /*
+             * Add an interval while there is still room for it or make sure the
+             * node does not get written on disk in the meantime.
+             */
+            while (interval.getSizeOnDisk() <= latestLeaf.getNodeFreeSpace() || latestLeaf.isOnDisk()) {
+                backend.insertPastState(
+                        Math.max(startTime, time - duration),
+                        time - 1,
+                        (int) time % nbAttr,
+                        TmfStateValue.newValueLong(time));
+                time++;
+            }
+
+            // Add an interval that does not fit in latest leaf, but starts
+            // before the current branch
+            backend.insertPastState(interval.getEndTime() + 1, time, quarkTest, TmfStateValue.newValueLong(time));
+
+            backend.getHistoryTree().assertIntegrity();
+
+        } catch (IOException e) {
+            fail(e.getMessage());
+        }
+    }
+
+}
diff --git a/statesystem/org.eclipse.tracecompass.statesystem.core.tests/src/org/eclipse/tracecompass/statesystem/core/tests/backend/historytree/SSIntervalStringReadWriteTest.java b/statesystem/org.eclipse.tracecompass.statesystem.core.tests/src/org/eclipse/tracecompass/statesystem/core/tests/backend/historytree/SSIntervalStringReadWriteTest.java
deleted file mode 100644 (file)
index 5ceac60..0000000
+++ /dev/null
@@ -1,191 +0,0 @@
-/*******************************************************************************
- * Copyright (c) 2016 EfficiOS Inc., Alexandre Montplaisir
- *
- * All rights reserved. This program and the accompanying materials
- * are made available under the terms of the Eclipse Public License v1.0
- * which accompanies this distribution, and is available at
- * http://www.eclipse.org/legal/epl-v10.html
- *******************************************************************************/
-
-package org.eclipse.tracecompass.statesystem.core.tests.backend.historytree;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.fail;
-
-import java.io.File;
-import java.io.FileInputStream;
-import java.io.FileOutputStream;
-import java.io.IOException;
-import java.nio.ByteBuffer;
-import java.nio.ByteOrder;
-import java.nio.channels.FileChannel;
-import java.nio.charset.Charset;
-import java.util.List;
-import java.util.Set;
-import java.util.stream.Collectors;
-import java.util.stream.IntStream;
-
-import org.eclipse.jdt.annotation.NonNullByDefault;
-import org.eclipse.tracecompass.internal.provisional.datastore.core.serialization.ISafeByteBufferReader;
-import org.eclipse.tracecompass.internal.provisional.datastore.core.serialization.ISafeByteBufferWriter;
-import org.eclipse.tracecompass.internal.provisional.datastore.core.serialization.SafeByteBufferFactory;
-import org.eclipse.tracecompass.internal.statesystem.core.backend.historytree.StateSystemInterval;
-import org.eclipse.tracecompass.statesystem.core.statevalue.TmfStateValue;
-import org.junit.Test;
-import org.junit.runner.RunWith;
-import org.junit.runners.Parameterized;
-import org.junit.runners.Parameterized.Parameters;
-
-import com.google.common.collect.ImmutableList;
-import com.google.common.collect.ImmutableSet;
-import com.google.common.collect.Sets;
-
-/**
- * Test the reading/writing logic in {@link StateSystemInterval}, particularly regarding
- * the string length limitations.
- *
- * @author Alexandre Montplaisir
- */
-@RunWith(Parameterized.class)
-@NonNullByDefault({})
-public class SSIntervalStringReadWriteTest {
-
-    private static final Charset CHARSET = Charset.forName("UTF-8");
-
-    private int fNbChars;
-    private int fCharLength;
-    private char fChar;
-
-    /**
-     * Parameter generator.
-     *
-     * Generates a combination of all possible string lengths and all possible
-     * character lengths.
-     *
-     * @return The test parameters
-     */
-    @Parameters(name = "nb of chars: {0}, char length: {1}")
-    public static Iterable<Object[]> getTestParams() {
-        Set<List<Integer>> set = Sets.cartesianProduct(ImmutableList.of(
-                ImmutableSet.of(
-                        0,
-                        10,
-                        Byte.MAX_VALUE - 1,
-                        (int) Byte.MAX_VALUE,
-                        Byte.MAX_VALUE + 1,
-
-                        Short.MAX_VALUE / 2 - 1,
-                        Short.MAX_VALUE / 2,
-                        Short.MAX_VALUE / 2 + 1,
-
-                        Short.MAX_VALUE / 3 - 1,
-                        Short.MAX_VALUE / 3,
-                        Short.MAX_VALUE / 3 + 1,
-
-                        Short.MAX_VALUE - 1,
-                        (int) Short.MAX_VALUE,
-                        Short.MAX_VALUE + 1),
-
-                ImmutableSet.of(1, 2, 3)
-                ));
-
-        return set.stream()
-                .map(List::toArray)
-                .collect(Collectors.toList());
-    }
-
-    /**
-     * Test constructor, take the generated parameters as parameters.
-     *
-     * @param nbChars
-     *            The number of characters in the test string.
-     * @param charLength
-     *            The length (in bytes) of the UTF-8-encoded form of the
-     *            character being used.
-     */
-    public SSIntervalStringReadWriteTest(Integer nbChars, Integer charLength) {
-        fNbChars = nbChars.intValue();
-        fCharLength = charLength.intValue();
-        switch (charLength) {
-        case 1:
-            fChar = 'a';
-            break;
-        case 2:
-            fChar = 'é';
-            break;
-        case 3:
-            fChar = '长'; // "chang" means long / length in Chinese!
-            break;
-        default:
-            throw new IllegalArgumentException();
-        }
-    }
-
-    /**
-     * Test method
-     *
-     * @throws IOException
-     *             Fails the test
-     */
-    @Test
-    public void testStringWithChars() throws IOException {
-        StringBuilder sb = new StringBuilder();
-        IntStream.range(0, fNbChars).forEach(i -> sb.append(fChar));
-        String str = sb.toString();
-        assertEquals(fNbChars, str.length());
-        assertEquals(fNbChars * fCharLength, str.getBytes(CHARSET).length);
-
-        TmfStateValue value = TmfStateValue.newValueString(str);
-        if (fNbChars * fCharLength > Short.MAX_VALUE) {
-            /* For sizes that are known to be too long, expect an exception */
-            try {
-                new StateSystemInterval(0, 10, 1, value);
-            } catch (IllegalArgumentException e) {
-                return;
-            }
-            fail();
-        }
-        StateSystemInterval interval = new StateSystemInterval(0, 10, 1, value);
-        writeAndReadInterval(interval);
-    }
-
-    private static void writeAndReadInterval(StateSystemInterval interval) throws IOException {
-        int sizeOnDisk = interval.getSizeOnDisk();
-
-        /* Write the interval to a file */
-        File tempFile = File.createTempFile("test-interval", ".interval");
-        try (FileOutputStream fos = new FileOutputStream(tempFile, false);
-                FileChannel fc = fos.getChannel();) {
-
-            ByteBuffer bb = ByteBuffer.allocate(sizeOnDisk);
-            bb.order(ByteOrder.LITTLE_ENDIAN);
-            bb.clear();
-
-            ISafeByteBufferWriter sbbw = SafeByteBufferFactory.wrapWriter(bb, sizeOnDisk);
-            interval.writeSegment(sbbw);
-
-            bb.flip();
-            int written = fc.write(bb);
-            assertEquals(sizeOnDisk, written);
-        }
-
-        /* Read the interval from the file */
-        StateSystemInterval readInterval;
-        try (FileInputStream fis = new FileInputStream(tempFile);
-                FileChannel fc = fis.getChannel();) {
-
-            ByteBuffer bb = ByteBuffer.allocate(sizeOnDisk);
-            bb.order(ByteOrder.LITTLE_ENDIAN);
-            bb.clear();
-
-            int read = fc.read(bb);
-            assertEquals(sizeOnDisk, read);
-            bb.flip();
-
-            ISafeByteBufferReader sbbr = SafeByteBufferFactory.wrapReader(bb, sizeOnDisk);
-            readInterval = StateSystemInterval.DESERIALISER.readInterval(sbbr);
-        }
-
-        assertEquals(interval, readInterval);
-    }
-}
diff --git a/statesystem/org.eclipse.tracecompass.statesystem.core.tests/stubs/org/eclipse/tracecompass/statesystem/core/tests/stubs/backend/HistoryTreeBackendStub.java b/statesystem/org.eclipse.tracecompass.statesystem.core.tests/stubs/org/eclipse/tracecompass/statesystem/core/tests/stubs/backend/HistoryTreeBackendStub.java
new file mode 100644 (file)
index 0000000..ede7014
--- /dev/null
@@ -0,0 +1,170 @@
+/*******************************************************************************
+ * Copyright (c) 2016 École Polytechnique de Montréal
+ *
+ * All rights reserved. This program and the accompanying materials are
+ * made available under the terms of the Eclipse Public License v1.0 which
+ * accompanies this distribution, and is available at
+ * http://www.eclipse.org/legal/epl-v10.html
+ *******************************************************************************/
+
+package org.eclipse.tracecompass.statesystem.core.tests.stubs.backend;
+
+import java.io.File;
+import java.io.IOException;
+import java.io.PrintWriter;
+
+import org.eclipse.tracecompass.internal.statesystem.core.backend.historytree.HTConfig;
+import org.eclipse.tracecompass.internal.statesystem.core.backend.historytree.HistoryTreeBackend;
+import org.eclipse.tracecompass.internal.statesystem.core.backend.historytree.IHistoryTree;
+
+/**
+ * Stub class for the {@link HistoryTreeBackend}. It creates a
+ * {@link HistoryTreeClassicStub} to grant access to some protected methods.
+ *
+ * @author Geneviève Bastien
+ */
+public class HistoryTreeBackendStub extends HistoryTreeBackend {
+
+    private static HistoryTreeType HT_TYPE = HistoryTreeType.CLASSIC;
+
+    /**
+     * Sets the type of tree to build. Since the history tree is initialized in
+     * the parent's constructor, this stub class needs to know the type of tree
+     * to build.
+     *
+     * @param htType
+     *            The type of history tree to build for this backend
+     */
+    public static void setTreeType(HistoryTreeType htType) {
+        HT_TYPE = htType;
+    }
+
+    /**
+     * Enumeration of all history tree types implemented. This will be used to
+     * create the right type of history tree
+     */
+    public enum HistoryTreeType {
+        /**
+         * The classic history tree
+         */
+        CLASSIC
+    }
+
+    /**
+     * Constructor for new history files. Use this when creating a new history
+     * from scratch.
+     *
+     * @param ssid
+     *            The state system's ID
+     * @param newStateFile
+     *            The filename/location where to store the state history (Should
+     *            end in .ht)
+     * @param providerVersion
+     *            Version of of the state provider. We will only try to reopen
+     *            existing files if this version matches the one in the
+     *            framework.
+     * @param startTime
+     *            The earliest time stamp that will be stored in the history
+     * @param blockSize
+     *            The size of the blocks in the history file. This should be a
+     *            multiple of 4096.
+     * @param maxChildren
+     *            The maximum number of children each core node can have
+     * @throws IOException
+     *             Thrown if we can't create the file for some reason
+     */
+    public HistoryTreeBackendStub(String ssid,
+            File newStateFile,
+            int providerVersion,
+            long startTime,
+            int blockSize,
+            int maxChildren) throws IOException {
+        super(ssid, newStateFile, providerVersion, startTime, blockSize, maxChildren);
+    }
+
+    /**
+     * Existing history constructor. Use this to open an existing state-file.
+     *
+     * @param ssid
+     *            The state system's id
+     * @param existingStateFile
+     *            Filename/location of the history we want to load
+     * @param providerVersion
+     *            Expected version of of the state provider plugin.
+     * @throws IOException
+     *             If we can't read the file, if it doesn't exist, is not
+     *             recognized, or if the version of the file does not match the
+     *             expected providerVersion.
+     */
+    public HistoryTreeBackendStub(String ssid, File existingStateFile, int providerVersion)
+            throws IOException {
+        super(ssid, existingStateFile, providerVersion);
+    }
+
+    @Override
+    protected IHistoryTree initializeSHT(HTConfig conf) throws IOException {
+        switch (HT_TYPE) {
+        case CLASSIC:
+            return new HistoryTreeClassicStub(conf);
+        default:
+            return new HistoryTreeClassicStub(conf);
+        }
+    }
+
+    @Override
+    protected IHistoryTree initializeSHT(File existingStateFile, int providerVersion) throws IOException {
+        switch (HT_TYPE) {
+        case CLASSIC:
+            return new HistoryTreeClassicStub(existingStateFile, providerVersion);
+        default:
+            return new HistoryTreeClassicStub(existingStateFile, providerVersion);
+        }
+    }
+
+    /**
+     * Get the History Tree built by this backend.
+     *
+     * @return The history tree
+     */
+    public HistoryTreeClassicStub getHistoryTree() {
+        return (HistoryTreeClassicStub) super.getSHT();
+    }
+
+    /**
+     * Debug method to print the contents of the history backend.
+     *
+     * @param writer
+     *            The PrintWriter where to write the output
+     */
+    public void debugPrint(PrintWriter writer) {
+        /* By default don't print out all the intervals */
+        debugPrint(writer, false, -1);
+    }
+
+    /**
+     * The basic debugPrint method will print the tree structure, but not their
+     * contents.
+     *
+     * This method here print the contents (the intervals) as well.
+     *
+     * @param writer
+     *            The PrintWriter to which the debug info will be written
+     * @param printIntervals
+     *            Should we also print every contained interval individually?
+     * @param ts
+     *            The timestamp that nodes have to intersect for intervals to be
+     *            printed. A negative value will print intervals for all nodes.
+     *            The timestamp only applies if printIntervals is true.
+     */
+    public void debugPrint(PrintWriter writer, boolean printIntervals, long ts) {
+        /* Only used for debugging, shouldn't be externalized */
+        writer.println("------------------------------"); //$NON-NLS-1$
+        writer.println("State History Tree:\n"); //$NON-NLS-1$
+        writer.println(getSHT().toString());
+        writer.println("Average node utilization: " //$NON-NLS-1$
+                + getAverageNodeUsage());
+        writer.println(""); //$NON-NLS-1$
+
+        getHistoryTree().debugPrintFullTree(writer, printIntervals, ts);
+    }
+}
diff --git a/statesystem/org.eclipse.tracecompass.statesystem.core.tests/stubs/org/eclipse/tracecompass/statesystem/core/tests/stubs/backend/HistoryTreeClassicStub.java b/statesystem/org.eclipse.tracecompass.statesystem.core.tests/stubs/org/eclipse/tracecompass/statesystem/core/tests/stubs/backend/HistoryTreeClassicStub.java
new file mode 100644 (file)
index 0000000..2508db5
--- /dev/null
@@ -0,0 +1,280 @@
+/*******************************************************************************
+ * Copyright (c) 2015 École Polytechnique de Montréal
+ *
+ * All rights reserved. This program and the accompanying materials are
+ * made available under the terms of the Eclipse Public License v1.0 which
+ * accompanies this distribution, and is available at
+ * http://www.eclipse.org/legal/epl-v10.html
+ *******************************************************************************/
+
+package org.eclipse.tracecompass.statesystem.core.tests.stubs.backend;
+
+import static org.eclipse.tracecompass.common.core.NonNullUtils.checkNotNull;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+
+import java.io.File;
+import java.io.IOException;
+import java.io.PrintWriter;
+import java.nio.channels.ClosedChannelException;
+import java.util.Collection;
+import java.util.List;
+
+import org.eclipse.tracecompass.internal.statesystem.core.Activator;
+import org.eclipse.tracecompass.internal.statesystem.core.backend.historytree.HTConfig;
+import org.eclipse.tracecompass.internal.statesystem.core.backend.historytree.HTNode;
+import org.eclipse.tracecompass.internal.statesystem.core.backend.historytree.IHistoryTree;
+import org.eclipse.tracecompass.internal.statesystem.core.backend.historytree.ParentNode;
+import org.eclipse.tracecompass.internal.statesystem.core.backend.historytree.classic.CoreNode;
+import org.eclipse.tracecompass.internal.statesystem.core.backend.historytree.classic.HistoryTreeClassic;
+
+import com.google.common.collect.Iterables;
+
+/**
+ * Stub class to unit test the history tree. You can set the size of the
+ * interval section before using the tree, in order to fine-tune the test.
+ *
+ * Note to developers: This tree is not meant to be used with a backend. It just
+ * exposes some info from the history tree.
+ *
+ * @author Geneviève Bastien
+ */
+public class HistoryTreeClassicStub extends HistoryTreeClassic {
+
+    /**
+     * Minimum size a block of this tree should have
+     */
+    public static final int MINIMUM_BLOCK_SIZE = IHistoryTree.TREE_HEADER_SIZE;
+
+    /**
+     * Constructor for this history tree stub
+     *
+     * @param conf
+     *            The config to use for this History Tree.
+     * @throws IOException
+     *             If an error happens trying to open/write to the file
+     *             specified in the config
+     */
+    public HistoryTreeClassicStub(HTConfig conf) throws IOException {
+        super(conf);
+    }
+
+    /**
+     * "Reader" constructor : instantiate a SHTree from an existing tree file on
+     * disk
+     *
+     * @param existingStateFile
+     *            Path/filename of the history-file we are to open
+     * @param expProviderVersion
+     *            The expected version of the state provider
+     * @throws IOException
+     *             If an error happens reading the file
+     */
+    public HistoryTreeClassicStub(File existingStateFile, int expProviderVersion) throws IOException {
+        super(existingStateFile, expProviderVersion);
+    }
+
+    // ------------------------------------------------------------------------
+    // Extra test accessors
+    // ------------------------------------------------------------------------
+
+    @Override
+    public List<HTNode> getLatestBranch() {
+        /* Super method is not public */
+        return checkNotNull(super.getLatestBranch());
+    }
+
+    /**
+     * Get the latest leaf of the tree
+     *
+     * @return The current leaf node of the tree
+     */
+    public HTNode getLatestLeaf() {
+        List<HTNode> latest = getLatestBranch();
+        return Iterables.getLast(latest);
+    }
+
+    /**
+     * Get the node from the latest branch at a given position, 0 being the root
+     * and <size of latest branch - 1> being a leaf node.
+     *
+     * @param pos
+     *            The position at which to return the node
+     * @return The node at position pos
+     */
+    public HTNode getNodeAt(int pos) {
+        List<HTNode> latest = getLatestBranch();
+        return latest.get(pos);
+    }
+
+    /**
+     * Get the depth of the tree
+     *
+     * @return The depth of the tree
+     */
+    public int getDepth() {
+        return getLatestBranch().size();
+    }
+
+    // ------------------------------------------------------------------------
+    // Debug printing methods
+    // ------------------------------------------------------------------------
+
+    /**
+     * Print out the full tree for debugging purposes
+     *
+     * @param writer
+     *            PrintWriter in which to write the output
+     * @param printIntervals
+     *            Flag to enable full output of the interval information
+     * @param ts
+     *            The timestamp that nodes have to intersect for intervals to be
+     *            printed. A negative value will print intervals for all nodes.
+     *            The timestamp only applies if printIntervals is true.
+     */
+    public void debugPrintFullTree(PrintWriter writer, boolean printIntervals, long ts) {
+        preOrderPrint(writer, false, getLatestBranch().get(0), 0, ts);
+
+        if (printIntervals) {
+            preOrderPrint(writer, true, getLatestBranch().get(0), 0, ts);
+        }
+        writer.println('\n');
+    }
+
+    /**
+     * Start at currentNode and print the contents of all its children, in
+     * pre-order. Give the root node in parameter to visit the whole tree, and
+     * have a nice overview.
+     */
+    private void preOrderPrint(PrintWriter writer, boolean printIntervals,
+            HTNode currentNode, int curDepth, long ts) {
+
+        writer.println(currentNode.toString());
+        /*
+         * Print intervals only if timestamp is negative or within the range of
+         * the node
+         */
+        if (printIntervals &&
+                (ts <= 0 ||
+                        (ts >= currentNode.getNodeStart() && ts <= currentNode.getNodeEnd()))) {
+            currentNode.debugPrintIntervals(writer);
+        }
+
+        switch (currentNode.getNodeType()) {
+        case LEAF:
+            /* Stop if it's the leaf node */
+            return;
+
+        case CORE:
+            try {
+                final CoreNode node = (CoreNode) currentNode;
+                /* If node extensions were used, they would be printed here. */
+
+                /* Print the child nodes */
+                for (int i = 0; i < node.getNbChildren(); i++) {
+                    HTNode nextNode = getTreeIO().readNode(node.getChild(i));
+                    for (int j = 0; j < curDepth; j++) {
+                        writer.print("  ");
+                    }
+                    writer.print("+-");
+                    preOrderPrint(writer, printIntervals, nextNode, curDepth + 1, ts);
+                }
+            } catch (ClosedChannelException e) {
+                Activator.getDefault().logError(e.getMessage());
+            }
+            break;
+
+        default:
+            break;
+        }
+    }
+
+    // ------------------------------------------------------------------------
+    // Assertion methods, for use with JUnit tests
+    // ------------------------------------------------------------------------
+
+    /**
+     * Check the integrity of all the nodes in the tree. Calls
+     * {@link #assertNodeIntegrity} for every node in the tree.
+     */
+    public void assertIntegrity() {
+        try {
+            for (int i = 0; i < getNodeCount(); i++) {
+                assertNodeIntegrity(getNode(i));
+            }
+        } catch (ClosedChannelException e) {
+            fail(e.getMessage());
+        }
+    }
+
+    /**
+     * Debugging method to make sure all intervals contained in the given node
+     * have valid start and end times.
+     *
+     * @param node
+     *            The node to check
+     */
+    private void assertNodeIntegrity(HTNode node) {
+        if (node instanceof ParentNode) {
+            assertChildrenIntegrity((ParentNode) node);
+        }
+
+        /* Check that all intervals are within the node's range */
+        // TODO: Get the intervals of a node
+
+    }
+
+    private void assertChildrenIntegrity(ParentNode node) {
+        try {
+            /*
+             * Test that this node's start and end times match the start of the
+             * first child and the end of the last child, respectively
+             */
+            if (node.getNbChildren() > 0) {
+                HTNode childNode = getNode(node.getChild(0));
+                assertEquals("Equals start time of parent " + node.getSequenceNumber() + " and first child " + childNode.getSequenceNumber(),
+                        node.getNodeStart(), childNode.getNodeStart());
+                if (node.isOnDisk()) {
+                    childNode = getNode(node.getLatestChild());
+                    assertEquals("Equals end time of parent " + node.getSequenceNumber() + " and last child " + childNode.getSequenceNumber(),
+                            node.getNodeEnd(), childNode.getNodeEnd());
+                }
+            }
+
+            /*
+             * Test that the childStartTimes[] array matches the real nodes'
+             * start times
+             *
+             * Also test that children range is within the parent's range
+             */
+            for (int i = 0; i < node.getNbChildren(); i++) {
+                HTNode childNode = getNode(node.getChild(i));
+                assertEquals("Start time in parent " + node.getSequenceNumber() + " of child at index " + i,
+                        childNode.getNodeStart(), node.getChildStart(i));
+                assertTrue("Child at index " + i + " of parent " + node.getSequenceNumber() + " has correct start time",
+                        node.getNodeStart() <= childNode.getNodeStart());
+                if (node.isOnDisk() && childNode.isOnDisk()) {
+                    assertTrue("Child at index " + i + " of parent " + node.getSequenceNumber() + " has correct start time",
+                            childNode.getNodeEnd() <= childNode.getNodeEnd());
+                }
+                testIntersectingChildren(node, childNode);
+            }
+
+        } catch (ClosedChannelException e) {
+            fail(e.getMessage());
+        }
+    }
+
+    private static void testIntersectingChildren(ParentNode parent, HTNode child) {
+        int childSequence = child.getSequenceNumber();
+        boolean shouldBeInCollection;
+        Collection<Integer> nextChildren;
+        for (long t = parent.getNodeStart(); t < parent.getNodeEnd(); t++) {
+            shouldBeInCollection = (t >= child.getNodeStart() && t <= child.getNodeEnd());
+            nextChildren = parent.selectNextChildren(t);
+            assertEquals(shouldBeInCollection, nextChildren.contains(childSequence));
+        }
+    }
+
+}
diff --git a/statesystem/org.eclipse.tracecompass.statesystem.core.tests/stubs/org/eclipse/tracecompass/statesystem/core/tests/stubs/backend/package-info.java b/statesystem/org.eclipse.tracecompass.statesystem.core.tests/stubs/org/eclipse/tracecompass/statesystem/core/tests/stubs/backend/package-info.java
new file mode 100644 (file)
index 0000000..6d73c86
--- /dev/null
@@ -0,0 +1,11 @@
+/*******************************************************************************
+ * Copyright (c) 2015 École Polytechnique de Montréal
+ *
+ * All rights reserved. This program and the accompanying materials
+ * are made available under the terms of the Eclipse Public License v1.0
+ * which accompanies this distribution, and is available at
+ * http://www.eclipse.org/legal/epl-v10.html
+ *******************************************************************************/
+
+@org.eclipse.jdt.annotation.NonNullByDefault
+package org.eclipse.tracecompass.statesystem.core.tests.stubs.backend;
index 444f4fc9416ab1c1d11337218341c4ec6482055c..81f3d3cb3f8c92793f25e4a179750e0902aeb6ae 100644 (file)
@@ -16,6 +16,7 @@ Export-Package: org.eclipse.tracecompass.internal.provisional.statesystem.core.s
  org.eclipse.tracecompass.internal.statesystem.core;x-friends:="org.eclipse.tracecompass.statesystem.core.tests",
  org.eclipse.tracecompass.internal.statesystem.core.backend;x-internal:=true,
  org.eclipse.tracecompass.internal.statesystem.core.backend.historytree;x-friends:="org.eclipse.tracecompass.statesystem.core.tests",
+ org.eclipse.tracecompass.internal.statesystem.core.backend.historytree.classic;x-friends:="org.eclipse.tracecompass.statesystem.core.tests",
  org.eclipse.tracecompass.statesystem.core,
  org.eclipse.tracecompass.statesystem.core.backend,
  org.eclipse.tracecompass.statesystem.core.exceptions,
diff --git a/statesystem/org.eclipse.tracecompass.statesystem.core/src/org/eclipse/tracecompass/internal/statesystem/core/backend/historytree/HTConfig.java b/statesystem/org.eclipse.tracecompass.statesystem.core/src/org/eclipse/tracecompass/internal/statesystem/core/backend/historytree/HTConfig.java
new file mode 100644 (file)
index 0000000..597df81
--- /dev/null
@@ -0,0 +1,125 @@
+/*******************************************************************************
+ * Copyright (c) 2012, 2014 Ericsson
+ * Copyright (c) 2010, 2011 École Polytechnique de Montréal
+ * Copyright (c) 2010, 2011 Alexandre Montplaisir <alexandre.montplaisir@gmail.com>
+ *
+ * All rights reserved. This program and the accompanying materials are
+ * made available under the terms of the Eclipse Public License v1.0 which
+ * accompanies this distribution, and is available at
+ * http://www.eclipse.org/legal/epl-v10.html
+ *
+ *******************************************************************************/
+
+package org.eclipse.tracecompass.internal.statesystem.core.backend.historytree;
+
+import java.io.File;
+
+/**
+ * Configuration object for the {@link IHistoryTree}.
+ *
+ * @author Alexandre Montplaisir
+ */
+public final class HTConfig {
+
+    private static final int DEFAULT_BLOCKSIZE = 64 * 1024;
+    private static final int DEFAULT_MAXCHILDREN = 50;
+
+    private final File stateFile;
+    private final int blockSize;
+    private final int maxChildren;
+    private final int providerVersion;
+    private final long treeStart;
+
+    /**
+     * Full constructor.
+     *
+     * @param newStateFile
+     *            The name of the history file
+     * @param blockSize
+     *            The size of each "block" on disk. One node will always fit in
+     *            one block.
+     * @param maxChildren
+     *            The maximum number of children allowed per core (non-leaf)
+     *            node.
+     * @param providerVersion
+     *            The version of the state provider. If a file already exists,
+     *            and their versions match, the history file will not be rebuilt
+     *            uselessly.
+     * @param startTime
+     *            The start time of the history
+     */
+    public HTConfig(File newStateFile, int blockSize, int maxChildren,
+            int providerVersion, long startTime) {
+        this.stateFile = newStateFile;
+        this.blockSize = blockSize;
+        this.maxChildren = maxChildren;
+        this.providerVersion = providerVersion;
+        this.treeStart = startTime;
+    }
+
+    /**
+     * Version of the constructor using default values for 'blockSize' and
+     * 'maxChildren'.
+     *
+     * @param newStateFile
+     *            The name of the history file
+     * @param providerVersion
+     *            The version of the state provider. If a file already exists,
+     *            and their versions match, the history file will not be rebuilt
+     *            uselessly.
+     * @param startTime
+     *            The start time of the history
+     */
+    public HTConfig(File newStateFile, int providerVersion, long startTime) {
+        this(newStateFile, DEFAULT_BLOCKSIZE, DEFAULT_MAXCHILDREN, providerVersion, startTime);
+    }
+
+    // ------------------------------------------------------------------------
+    // Getters
+    // ------------------------------------------------------------------------
+
+    /**
+     * Get the history file.
+     *
+     * @return The history file
+     */
+    public File getStateFile() {
+        return stateFile;
+    }
+
+    /**
+     * Get the configure block size.
+     *
+     * @return The block size
+     */
+    public int getBlockSize() {
+        return blockSize;
+    }
+
+    /**
+     * Get the maximum amount of children allowed.
+     *
+     * @return The maximum amount of children
+     */
+    public int getMaxChildren() {
+        return maxChildren;
+    }
+
+    /**
+     * Get the state provider's version.
+     *
+     * @return The state provider's version
+     */
+    public int getProviderVersion() {
+        return providerVersion;
+    }
+
+    /**
+     * Get the start time of the history
+     *
+     * @return The start time
+     */
+    public long getTreeStart() {
+        return treeStart;
+    }
+}
diff --git a/statesystem/org.eclipse.tracecompass.statesystem.core/src/org/eclipse/tracecompass/internal/statesystem/core/backend/historytree/HTInterval.java b/statesystem/org.eclipse.tracecompass.statesystem.core/src/org/eclipse/tracecompass/internal/statesystem/core/backend/historytree/HTInterval.java
new file mode 100644 (file)
index 0000000..e0ab848
--- /dev/null
@@ -0,0 +1,411 @@
+/*******************************************************************************
+ * Copyright (c) 2012, 2015 Ericsson, École Polytechnique de Montréal
+ * Copyright (c) 2010, 2011 Alexandre Montplaisir <alexandre.montplaisir@gmail.com>
+ *
+ * All rights reserved. This program and the accompanying materials are
+ * made available under the terms of the Eclipse Public License v1.0 which
+ * accompanies this distribution, and is available at
+ * http://www.eclipse.org/legal/epl-v10.html
+ *
+ * Contributors:
+ *    Alexandre Montplaisir - Initial API and implementation
+ *    Florian Wininger - Allow to change the size of a interval
+ *    Patrick Tasse - Add message to exceptions
+ *******************************************************************************/
+
+package org.eclipse.tracecompass.internal.statesystem.core.backend.historytree;
+
+import java.io.IOException;
+import java.nio.ByteBuffer;
+import java.nio.charset.Charset;
+import java.util.Objects;
+
+import org.eclipse.jdt.annotation.NonNull;
+import org.eclipse.tracecompass.internal.provisional.datastore.core.serialization.ISafeByteBufferReader;
+import org.eclipse.tracecompass.internal.provisional.datastore.core.serialization.ISafeByteBufferWriter;
+import org.eclipse.tracecompass.internal.provisional.datastore.core.serialization.SafeByteBufferFactory;
+import org.eclipse.tracecompass.internal.provisional.statesystem.core.statevalue.CustomStateValue;
+import org.eclipse.tracecompass.statesystem.core.exceptions.TimeRangeException;
+import org.eclipse.tracecompass.statesystem.core.interval.ITmfStateInterval;
+import org.eclipse.tracecompass.statesystem.core.statevalue.ITmfStateValue;
+import org.eclipse.tracecompass.statesystem.core.statevalue.TmfStateValue;
+
+/**
+ * The interval component, which will be contained in a node of the History
+ * Tree.
+ *
+ * @author Alexandre Montplaisir
+ */
+public final class HTInterval implements ITmfStateInterval {
+
+    private static final Charset CHARSET = Charset.forName("UTF-8"); //$NON-NLS-1$
+
+    private static final String errMsg = "Invalid interval data. Maybe your file is corrupt?"; //$NON-NLS-1$
+
+    /* 'Byte' equivalent for state values types */
+    private static final byte TYPE_NULL = -1;
+    private static final byte TYPE_INTEGER = 0;
+    private static final byte TYPE_STRING = 1;
+    private static final byte TYPE_LONG = 2;
+    private static final byte TYPE_DOUBLE = 3;
+    private static final byte TYPE_CUSTOM = 20;
+
+    private final long start;
+    private final long end;
+    private final int attribute;
+    private final @NonNull TmfStateValue sv;
+
+    /** Number of bytes used by this interval when it is written to disk */
+    private final int fSizeOnDisk;
+
+    /**
+     * Standard constructor
+     *
+     * @param intervalStart
+     *            Start time of the interval
+     * @param intervalEnd
+     *            End time of the interval
+     * @param attribute
+     *            Attribute (quark) to which the state represented by this
+     *            interval belongs
+     * @param value
+     *            State value represented by this interval
+     * @throws TimeRangeException
+     *             If the start time or end time are invalid
+     */
+    public HTInterval(long intervalStart, long intervalEnd, int attribute,
+            @NonNull TmfStateValue value) throws TimeRangeException {
+        if (intervalStart > intervalEnd) {
+            throw new TimeRangeException("Start:" + intervalStart + ", End:" + intervalEnd); //$NON-NLS-1$ //$NON-NLS-2$
+        }
+
+        this.start = intervalStart;
+        this.end = intervalEnd;
+        this.attribute = attribute;
+        this.sv = value;
+        this.fSizeOnDisk = computeSizeOnDisk(sv);
+    }
+
+    /**
+     * Compute how much space (in bytes) an interval will take in its serialized
+     * form on disk. This is dependent on its state value.
+     */
+    private static int computeSizeOnDisk(ITmfStateValue sv) {
+        /*
+         * Minimum size is 2x long (start and end), 1x int (attribute) and 1x
+         * byte (value type).
+         */
+        int minSize = Long.BYTES + Long.BYTES + Integer.BYTES + Byte.BYTES;
+
+        switch (sv.getType()) {
+        case NULL:
+            return minSize;
+        case INTEGER:
+            return (minSize + Integer.BYTES);
+        case LONG:
+            return (minSize + Long.BYTES);
+        case DOUBLE:
+            return (minSize + Double.BYTES);
+        case STRING:
+            String str = sv.unboxStr();
+            int strLength = str.getBytes(CHARSET).length;
+
+            if (strLength > Short.MAX_VALUE) {
+                throw new IllegalArgumentException("String is too long to be stored in state system: " + str); //$NON-NLS-1$
+            }
+
+            /*
+             * String's length + 3 (2 bytes for size, 1 byte for \0 at the end)
+             */
+            return (minSize + strLength + 3);
+        case CUSTOM:
+            /* Length of serialized value (short) + state value */
+            return (minSize + Short.BYTES + ((CustomStateValue) sv).getSerializedSize());
+        default:
+            /*
+             * It's very important that we know how to write the state value in
+             * the file!!
+             */
+            throw new IllegalStateException();
+        }
+    }
+
+    /**
+     * "Faster" constructor for inner use only. When we build an interval when
+     * reading it from disk (with {@link #readFrom}), we already know the size
+     * of the strings entry, so there is no need to call
+     * {@link #computeStringsEntrySize()} and do an extra copy.
+     */
+    private HTInterval(long intervalStart, long intervalEnd, int attribute,
+            @NonNull TmfStateValue value, int size) throws TimeRangeException {
+        if (intervalStart > intervalEnd) {
+            throw new TimeRangeException("Start:" + intervalStart + ", End:" + intervalEnd); //$NON-NLS-1$ //$NON-NLS-2$
+        }
+
+        this.start = intervalStart;
+        this.end = intervalEnd;
+        this.attribute = attribute;
+        this.sv = value;
+        this.fSizeOnDisk = size;
+    }
+
+    /**
+     * Reader factory method. Builds the interval using an already-allocated
+     * ByteBuffer, which normally comes from a NIO FileChannel.
+     *
+     * The interval is just a start, end, attribute and value, this is the
+     * layout of the HTInterval on disk
+     * <ul>
+     * <li>start (8 bytes)</li>
+     * <li>end (8 bytes)</li>
+     * <li>attribute (4 bytes)</li>
+     * <li>sv type (1 byte)</li>
+     * <li>sv ( 0 bytes for null, 4 for int , 8 for long and double, and the
+     * length of the string +2 for strings (it's variable))</li>
+     * </ul>
+     *
+     * @param buffer
+     *            The ByteBuffer from which to read the information
+     * @return The interval object
+     * @throws IOException
+     *             If there was an error reading from the buffer
+     */
+    public static final HTInterval readFrom(ByteBuffer buffer) throws IOException {
+        TmfStateValue value;
+
+        int posStart = buffer.position();
+        /* Read the Data Section entry */
+        long intervalStart = buffer.getLong();
+        long intervalEnd = buffer.getLong();
+        int attribute = buffer.getInt();
+
+        /* Read the 'type' of the value, then react accordingly */
+        byte valueType = buffer.get();
+        switch (valueType) {
+
+        case TYPE_NULL:
+            value = TmfStateValue.nullValue();
+            break;
+
+        case TYPE_INTEGER:
+            value = TmfStateValue.newValueInt(buffer.getInt());
+            break;
+
+        case TYPE_STRING: {
+            /* the first short = the size to read */
+            int valueSize = buffer.getShort();
+
+            byte[] array = new byte[valueSize];
+            buffer.get(array);
+            value = TmfStateValue.newValueString(new String(array, CHARSET));
+
+            /* Confirm the 0'ed byte at the end */
+            byte res = buffer.get();
+            if (res != 0) {
+                throw new IOException(errMsg);
+            }
+            break;
+        }
+
+        case TYPE_LONG:
+            /* Go read the matching entry in the Strings section of the block */
+            value = TmfStateValue.newValueLong(buffer.getLong());
+            break;
+
+        case TYPE_DOUBLE:
+            /* Go read the matching entry in the Strings section of the block */
+            value = TmfStateValue.newValueDouble(buffer.getDouble());
+            break;
+
+        case TYPE_CUSTOM: {
+            short valueSize = buffer.getShort();
+            ISafeByteBufferReader safeBuffer = SafeByteBufferFactory.wrapReader(buffer, valueSize);
+            value = CustomStateValue.readSerializedValue(safeBuffer);
+            break;
+        }
+        default:
+            /* Unknown data, better to not make anything up... */
+            throw new IOException(errMsg);
+        }
+
+        try {
+            return new HTInterval(intervalStart, intervalEnd, attribute, value, buffer.position() - posStart);
+        } catch (TimeRangeException e) {
+            throw new IOException(errMsg);
+        }
+    }
+
+    /**
+     * Antagonist of the previous constructor, write the Data entry
+     * corresponding to this interval in a ByteBuffer (mapped to a block in the
+     * history-file, hopefully)
+     *
+     * The interval is just a start, end, attribute and value, this is the
+     * layout of the HTInterval on disk
+     * <ul>
+     * <li>start (8 bytes)</li>
+     * <li>end (8 bytes)</li>
+     * <li>attribute (4 bytes)</li>
+     * <li>sv type (1 byte)</li>
+     * <li>sv ( 0 bytes for null, 4 for int , 8 for long and double, and the
+     * length of the string +2 for strings (it's variable))</li>
+     * </ul>
+     *
+     * @param buffer
+     *            The already-allocated ByteBuffer corresponding to a SHT Node
+     */
+    public void writeInterval(ByteBuffer buffer) {
+        final byte byteFromType = getByteFromType(sv.getType());
+
+        buffer.putLong(start);
+        buffer.putLong(end);
+        buffer.putInt(attribute);
+        buffer.put(byteFromType);
+
+        switch (byteFromType) {
+        case TYPE_NULL:
+            break;
+        case TYPE_INTEGER:
+            buffer.putInt(sv.unboxInt());
+            break;
+
+        case TYPE_STRING: {
+            String string = sv.unboxStr();
+            byte[] strArray = string.getBytes(CHARSET);
+
+            /*
+             * Write the Strings entry (1st byte = size, then the bytes, then
+             * the 0). We have checked the string length at the constructor.
+             */
+            buffer.putShort((short) strArray.length);
+            buffer.put(strArray);
+            buffer.put((byte) 0);
+            break;
+        }
+
+        case TYPE_LONG:
+            buffer.putLong(sv.unboxLong());
+            break;
+
+        case TYPE_DOUBLE:
+            buffer.putDouble(sv.unboxDouble());
+            break;
+
+        case TYPE_CUSTOM: {
+            int size = ((CustomStateValue) sv).getSerializedSize();
+            buffer.putShort((short) size);
+            ISafeByteBufferWriter safeBuffer = SafeByteBufferFactory.wrapWriter(buffer, size);
+            ((CustomStateValue) sv).serialize(safeBuffer);
+            break;
+        }
+
+        default:
+            break;
+        }
+    }
+
+    @Override
+    public long getStartTime() {
+        return start;
+    }
+
+    @Override
+    public long getEndTime() {
+        return end;
+    }
+
+    @Override
+    public int getAttribute() {
+        return attribute;
+    }
+
+    @Override
+    public ITmfStateValue getStateValue() {
+        return sv;
+    }
+
+    @Override
+    public boolean intersects(long timestamp) {
+        if (start <= timestamp) {
+            if (end >= timestamp) {
+                return true;
+            }
+        }
+        return false;
+    }
+
+    /**
+     * Total serialized size of this interval
+     *
+     * @return The interval size
+     */
+    public int getSizeOnDisk() {
+        return fSizeOnDisk;
+    }
+
+    @Override
+    public boolean equals(Object obj) {
+        if (this == obj) {
+            return true;
+        }
+        if (obj == null) {
+            return false;
+        }
+        if (getClass() != obj.getClass()) {
+            return false;
+        }
+        HTInterval other = (HTInterval) obj;
+        return (start == other.start &&
+                end == other.end &&
+                attribute == other.attribute &&
+                sv.equals(other.sv));
+    }
+
+    @Override
+    public int hashCode() {
+        return Objects.hash(start, end, attribute, sv);
+    }
+
+    @Override
+    public String toString() {
+        /* Only for debug, should not be externalized */
+        StringBuilder sb = new StringBuilder();
+        sb.append('[');
+        sb.append(start);
+        sb.append(", "); //$NON-NLS-1$
+        sb.append(end);
+        sb.append(']');
+
+        sb.append(", attribute = "); //$NON-NLS-1$
+        sb.append(attribute);
+
+        sb.append(", value = "); //$NON-NLS-1$
+        sb.append(sv.toString());
+
+        return sb.toString();
+    }
+
+    /**
+     * Here we determine how state values "types" are written in the 8-bit field
+     * that indicates the value type in the file.
+     */
+    private static byte getByteFromType(ITmfStateValue.Type type) {
+        switch (type) {
+        case NULL:
+            return TYPE_NULL;
+        case INTEGER:
+            return TYPE_INTEGER;
+        case STRING:
+            return TYPE_STRING;
+        case LONG:
+            return TYPE_LONG;
+        case DOUBLE:
+            return TYPE_DOUBLE;
+        case CUSTOM:
+            return TYPE_CUSTOM;
+        default:
+            /* Should not happen if the switch is fully covered */
+            throw new IllegalStateException();
+        }
+    }
+}
diff --git a/statesystem/org.eclipse.tracecompass.statesystem.core/src/org/eclipse/tracecompass/internal/statesystem/core/backend/historytree/HTNode.java b/statesystem/org.eclipse.tracecompass.statesystem.core/src/org/eclipse/tracecompass/internal/statesystem/core/backend/historytree/HTNode.java
new file mode 100644 (file)
index 0000000..776dd44
--- /dev/null
@@ -0,0 +1,674 @@
+/*******************************************************************************
+ * Copyright (c) 2010, 2016 Ericsson, École Polytechnique de Montréal, and others
+ *
+ * All rights reserved. This program and the accompanying materials are
+ * made available under the terms of the Eclipse Public License v1.0 which
+ * accompanies this distribution, and is available at
+ * http://www.eclipse.org/legal/epl-v10.html
+ *
+ * Contributors:
+ *   Alexandre Montplaisir - Initial API and implementation
+ *   Florian Wininger - Add Extension and Leaf Node
+ *   Patrick Tasse - Keep interval list sorted on insert
+ *******************************************************************************/
+
+package org.eclipse.tracecompass.internal.statesystem.core.backend.historytree;
+
+import java.io.IOException;
+import java.io.PrintWriter;
+import java.nio.ByteBuffer;
+import java.nio.ByteOrder;
+import java.nio.channels.FileChannel;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.Comparator;
+import java.util.List;
+import java.util.concurrent.locks.ReentrantReadWriteLock;
+
+import org.eclipse.jdt.annotation.NonNull;
+import org.eclipse.tracecompass.statesystem.core.exceptions.TimeRangeException;
+import org.eclipse.tracecompass.statesystem.core.interval.ITmfStateInterval;
+import org.eclipse.tracecompass.statesystem.core.statevalue.TmfStateValue;
+
+import com.google.common.collect.Iterables;
+
+/**
+ * The base class for all the types of nodes that go in the History Tree.
+ *
+ * @author Alexandre Montplaisir
+ */
+public abstract class HTNode {
+
+    // ------------------------------------------------------------------------
+    // Class fields
+    // ------------------------------------------------------------------------
+
+    /**
+     * The type of node
+     */
+    public static enum NodeType {
+        /**
+         * Core node, which is a "front" node, at any level of the tree except
+         * the bottom-most one. It has children, and may have extensions.
+         */
+        CORE,
+        /**
+         * Leaf node, which is a node at the last bottom level of the tree. It
+         * cannot have any children or extensions.
+         */
+        LEAF;
+
+        /**
+         * Determine a node type by reading a serialized byte.
+         *
+         * @param rep
+         *            The byte representation of the node type
+         * @return The corresponding NodeType
+         * @throws IOException
+         *             If the NodeType is unrecognized
+         */
+        public static NodeType fromByte(byte rep) throws IOException {
+            switch (rep) {
+            case 1:
+                return CORE;
+            case 2:
+                return LEAF;
+            default:
+                throw new IOException();
+            }
+        }
+
+        /**
+         * Get the byte representation of this node type. It can then be read
+         * with {@link #fromByte}.
+         *
+         * @return The byte matching this node type
+         */
+        public byte toByte() {
+            switch (this) {
+            case CORE:
+                return 1;
+            case LEAF:
+                return 2;
+            default:
+                throw new IllegalStateException();
+            }
+        }
+    }
+
+    /**
+     * <pre>
+     *  1 - byte (type)
+     * 16 - 2x long (start time, end time)
+     * 16 - 3x int (seq number, parent seq number, intervalcount)
+     *  1 - byte (done or not)
+     * </pre>
+     */
+    private static final int COMMON_HEADER_SIZE = Byte.BYTES
+            + 2 * Long.BYTES
+            + 3 * Integer.BYTES
+            + Byte.BYTES;
+
+    // ------------------------------------------------------------------------
+    // Attributes
+    // ------------------------------------------------------------------------
+
+    /* Configuration of the History Tree to which belongs this node */
+    private final HTConfig fConfig;
+
+    /* Time range of this node */
+    private final long fNodeStart;
+    private long fNodeEnd;
+
+    /* Sequence number = position in the node section of the file */
+    private final int fSequenceNumber;
+    private int fParentSequenceNumber; /* = -1 if this node is the root node */
+
+    /* Sum of bytes of all intervals in the node */
+    private int fSizeOfIntervalSection;
+
+    /* True if this node was read from disk (meaning its end time is now fixed) */
+    private volatile boolean fIsOnDisk;
+
+    /* Vector containing all the intervals contained in this node */
+    private final List<HTInterval> fIntervals;
+
+    /* Lock used to protect the accesses to intervals, nodeEnd and such */
+    private final ReentrantReadWriteLock fRwl = new ReentrantReadWriteLock(false);
+
+    /** Order of intervals in a HTNode: sorted by end times, then by start times. */
+    private static final Comparator<ITmfStateInterval> NODE_ORDER = Comparator
+            .comparingLong(ITmfStateInterval::getEndTime)
+            .thenComparingLong(ITmfStateInterval::getStartTime)
+            .thenComparingInt(ITmfStateInterval::getAttribute);
+
+    /**
+     * Constructor
+     *
+     * @param config
+     *            Configuration of the History Tree
+     * @param seqNumber
+     *            The (unique) sequence number assigned to this particular node
+     * @param parentSeqNumber
+     *            The sequence number of this node's parent node
+     * @param start
+     *            The earliest timestamp stored in this node
+     */
+    protected HTNode(HTConfig config, int seqNumber, int parentSeqNumber, long start) {
+        fConfig = config;
+        fNodeStart = start;
+        fSequenceNumber = seqNumber;
+        fParentSequenceNumber = parentSeqNumber;
+
+        fSizeOfIntervalSection = 0;
+        fIsOnDisk = false;
+        fIntervals = new ArrayList<>();
+    }
+
+    /**
+     * Reader factory method. Build a Node object (of the right type) by reading
+     * a block in the file.
+     *
+     * @param config
+     *            Configuration of the History Tree
+     * @param fc
+     *            FileChannel to the history file, ALREADY SEEKED at the start
+     *            of the node.
+     * @param nodeFactory
+     *            The factory to create the nodes for this tree
+     * @return The node object
+     * @throws IOException
+     *             If there was an error reading from the file channel
+     */
+    public static final @NonNull HTNode readNode(HTConfig config, FileChannel fc, IHistoryTree.IHTNodeFactory nodeFactory)
+            throws IOException {
+        HTNode newNode = null;
+        int res, i;
+
+        ByteBuffer buffer = ByteBuffer.allocate(config.getBlockSize());
+        buffer.order(ByteOrder.LITTLE_ENDIAN);
+        buffer.clear();
+        res = fc.read(buffer);
+        assert (res == config.getBlockSize());
+        buffer.flip();
+
+        /* Read the common header part */
+        byte typeByte = buffer.get();
+        NodeType type = NodeType.fromByte(typeByte);
+        long start = buffer.getLong();
+        long end = buffer.getLong();
+        int seqNb = buffer.getInt();
+        int parentSeqNb = buffer.getInt();
+        int intervalCount = buffer.getInt();
+        buffer.get(); // TODO Used to be "isDone", to be removed from the header
+
+        /* Now the rest of the header depends on the node type */
+        switch (type) {
+        case CORE:
+            /* Core nodes */
+            newNode = nodeFactory.createCoreNode(config, seqNb, parentSeqNb, start);
+            newNode.readSpecificHeader(buffer);
+            break;
+
+        case LEAF:
+            /* Leaf nodes */
+            newNode = nodeFactory.createLeafNode(config, seqNb, parentSeqNb, start);
+            newNode.readSpecificHeader(buffer);
+            break;
+
+        default:
+            /* Unrecognized node type */
+            throw new IOException();
+        }
+
+        /*
+         * At this point, we should be done reading the header and 'buffer'
+         * should only have the intervals left
+         */
+        for (i = 0; i < intervalCount; i++) {
+            HTInterval interval = HTInterval.readFrom(buffer);
+            newNode.fIntervals.add(interval);
+            newNode.fSizeOfIntervalSection += interval.getSizeOnDisk();
+        }
+
+        /* Assign the node's other information we have read previously */
+        newNode.fNodeEnd = end;
+        newNode.fIsOnDisk = true;
+
+        return newNode;
+    }
+
+    /**
+     * Write this node to the given file channel.
+     *
+     * @param fc
+     *            The file channel to write to (should be sought to be correct
+     *            position)
+     * @throws IOException
+     *             If there was an error writing
+     */
+    public final void writeSelf(FileChannel fc) throws IOException {
+        /*
+         * Yes, we are taking the *read* lock here, because we are reading the
+         * information in the node to write it to disk.
+         */
+        fRwl.readLock().lock();
+        try {
+            final int blockSize = fConfig.getBlockSize();
+
+            ByteBuffer buffer = ByteBuffer.allocate(blockSize);
+            buffer.order(ByteOrder.LITTLE_ENDIAN);
+            buffer.clear();
+
+            /* Write the common header part */
+            buffer.put(getNodeType().toByte());
+            buffer.putLong(fNodeStart);
+            buffer.putLong(fNodeEnd);
+            buffer.putInt(fSequenceNumber);
+            buffer.putInt(fParentSequenceNumber);
+            buffer.putInt(fIntervals.size());
+            buffer.put((byte) 1); // TODO Used to be "isDone", to be removed from header
+
+            /* Now call the inner method to write the specific header part */
+            writeSpecificHeader(buffer);
+
+            /* Back to us, we write the intervals */
+            fIntervals.forEach(i -> i.writeInterval(buffer));
+            if (blockSize - buffer.position() != getNodeFreeSpace()) {
+                throw new IllegalStateException("Wrong free space: Actual: " + (blockSize - buffer.position()) + ", Expected: " + getNodeFreeSpace()); //$NON-NLS-1$ //$NON-NLS-2$
+            }
+            /*
+             * Fill the rest with zeros
+             */
+            while (buffer.position() < blockSize) {
+                buffer.put((byte) 0);
+            }
+
+            /* Finally, write everything in the Buffer to disk */
+            buffer.flip();
+            int res = fc.write(buffer);
+            if (res != blockSize) {
+                throw new IllegalStateException("Wrong size of block written: Actual: " + res + ", Expected: " + blockSize); //$NON-NLS-1$ //$NON-NLS-2$
+            }
+
+        } finally {
+            fRwl.readLock().unlock();
+        }
+        fIsOnDisk = true;
+    }
+
+    // ------------------------------------------------------------------------
+    // Accessors
+    // ------------------------------------------------------------------------
+
+    /**
+     * Retrieve the history tree configuration used for this node.
+     *
+     * @return The history tree config
+     */
+    protected HTConfig getConfig() {
+        return fConfig;
+    }
+
+    /**
+     * Get the start time of this node.
+     *
+     * @return The start time of this node
+     */
+    public long getNodeStart() {
+        return fNodeStart;
+    }
+
+    /**
+     * Get the end time of this node.
+     *
+     * @return The end time of this node
+     */
+    public long getNodeEnd() {
+        if (fIsOnDisk) {
+            return fNodeEnd;
+        }
+        return 0;
+    }
+
+    /**
+     * Get the sequence number of this node.
+     *
+     * @return The sequence number of this node
+     */
+    public int getSequenceNumber() {
+        return fSequenceNumber;
+    }
+
+    /**
+     * Get the sequence number of this node's parent.
+     *
+     * @return The parent sequence number
+     */
+    public int getParentSequenceNumber() {
+        return fParentSequenceNumber;
+    }
+
+    /**
+     * Change this node's parent. Used when we create a new root node for
+     * example.
+     *
+     * @param newParent
+     *            The sequence number of the node that is the new parent
+     */
+    public void setParentSequenceNumber(int newParent) {
+        fParentSequenceNumber = newParent;
+    }
+
+    /**
+     * Return if this node is "done" (full and written to disk).
+     *
+     * @return If this node is done or not
+     */
+    public boolean isOnDisk() {
+        return fIsOnDisk;
+    }
+
+    /**
+     * Add an interval to this node
+     *
+     * @param newInterval
+     *            Interval to add to this node
+     */
+    public void addInterval(HTInterval newInterval) {
+        fRwl.writeLock().lock();
+        try {
+            /* Just in case, should be checked before even calling this function */
+            assert (newInterval.getSizeOnDisk() <= getNodeFreeSpace());
+
+            /* Find the insert position to keep the list sorted */
+            int index = 0;
+            if (!fIntervals.isEmpty()) {
+                index = Collections.binarySearch(fIntervals, newInterval, NODE_ORDER);
+                /*
+                 * Interval should not already be in the node, binarySearch will
+                 * return (-insertionPoint - 1).
+                 */
+                index = -index - 1;
+            }
+
+            fIntervals.add(index, newInterval);
+            fSizeOfIntervalSection += newInterval.getSizeOnDisk();
+
+        } finally {
+            fRwl.writeLock().unlock();
+        }
+    }
+
+    /**
+     * We've received word from the containerTree that newest nodes now exist to
+     * our right. (Puts isDone = true and sets the endtime)
+     *
+     * @param endtime
+     *            The nodeEnd time that the node will have
+     */
+    public void closeThisNode(long endtime) {
+        fRwl.writeLock().lock();
+        try {
+            /**
+             * FIXME: was assert (endtime >= fNodeStart); but that exception
+             * is reached with an empty node that has start time endtime + 1
+             */
+//            if (endtime < fNodeStart) {
+//                throw new IllegalArgumentException("Endtime " + endtime + " cannot be lower than start time " + fNodeStart);
+//            }
+
+            if (!fIntervals.isEmpty()) {
+                /*
+                 * Make sure there are no intervals in this node with their
+                 * EndTime > the one requested. Only need to check the last one
+                 * since they are sorted
+                 */
+                if (endtime < Iterables.getLast(fIntervals).getEndTime()) {
+                    throw new IllegalArgumentException("Closing end time should be greater than or equal to the end time of the intervals of this node"); //$NON-NLS-1$
+                }
+            }
+
+            fNodeEnd = endtime;
+        } finally {
+            fRwl.writeLock().unlock();
+        }
+    }
+
+    /**
+     * The method to fill up the stateInfo (passed on from the Current State
+     * Tree when it does a query on the SHT). We'll replace the data in that
+     * vector with whatever relevant we can find from this node
+     *
+     * @param stateInfo
+     *            The same stateInfo that comes from SHT's doQuery()
+     * @param t
+     *            The timestamp for which the query is for. Only return
+     *            intervals that intersect t.
+     * @throws TimeRangeException
+     *             If 't' is invalid
+     */
+    public void writeInfoFromNode(List<ITmfStateInterval> stateInfo, long t)
+            throws TimeRangeException {
+        /* This is from a state system query, we are "reading" this node */
+        fRwl.readLock().lock();
+        try {
+            for (int i = getStartIndexFor(t); i < fIntervals.size(); i++) {
+                /*
+                 * Now we only have to compare the Start times, since we now the
+                 * End times necessarily fit.
+                 *
+                 * Second condition is to ignore new attributes that might have
+                 * been created after stateInfo was instantiated (they would be
+                 * null anyway).
+                 */
+                ITmfStateInterval interval = fIntervals.get(i);
+                if (t >= interval.getStartTime() &&
+                        interval.getAttribute() < stateInfo.size()) {
+                    stateInfo.set(interval.getAttribute(), interval);
+                }
+            }
+        } finally {
+            fRwl.readLock().unlock();
+        }
+    }
+
+    /**
+     * Get a single Interval from the information in this node If the
+     * key/timestamp pair cannot be found, we return null.
+     *
+     * @param key
+     *            The attribute quark to look for
+     * @param t
+     *            The timestamp
+     * @return The Interval containing the information we want, or null if it
+     *         wasn't found
+     * @throws TimeRangeException
+     *             If 't' is invalid
+     */
+    public HTInterval getRelevantInterval(int key, long t) throws TimeRangeException {
+        fRwl.readLock().lock();
+        try {
+            for (int i = getStartIndexFor(t); i < fIntervals.size(); i++) {
+                HTInterval curInterval = fIntervals.get(i);
+                if (curInterval.getAttribute() == key
+                        && curInterval.getStartTime() <= t) {
+                    return curInterval;
+                }
+            }
+
+            /* We didn't find the relevant information in this node */
+            return null;
+
+        } finally {
+            fRwl.readLock().unlock();
+        }
+    }
+
+    private int getStartIndexFor(long t) throws TimeRangeException {
+        /* Should only be called by methods with the readLock taken */
+
+        if (fIntervals.isEmpty()) {
+            return 0;
+        }
+
+        /*
+         * Since the intervals are sorted by end time then by start time, we can
+         * skip all the ones at the beginning whose end times are smaller than
+         * 't'. We search for a dummy interval from [Long.MIN_VALUE, t], which
+         * will return the first interval that ends with a time >= t.
+         */
+        HTInterval dummy = new HTInterval(Long.MIN_VALUE, t, 0, TmfStateValue.nullValue());
+        int index = Collections.binarySearch(fIntervals, dummy, NODE_ORDER);
+
+        /* Handle negative binarySearch return */
+        return (index >= 0 ? index : -index - 1);
+    }
+
+    /**
+     * Return the total header size of this node (will depend on the node type).
+     *
+     * @return The total header size
+     */
+    public final int getTotalHeaderSize() {
+        return COMMON_HEADER_SIZE + getSpecificHeaderSize();
+    }
+
+    /**
+     * @return The offset, within the node, where the Data section ends
+     */
+    private int getDataSectionEndOffset() {
+        return getTotalHeaderSize() + fSizeOfIntervalSection;
+    }
+
+    /**
+     * Returns the free space in the node, which is simply put, the
+     * stringSectionOffset - dataSectionOffset
+     *
+     * @return The amount of free space in the node (in bytes)
+     */
+    public int getNodeFreeSpace() {
+        fRwl.readLock().lock();
+        int ret = fConfig.getBlockSize() - getDataSectionEndOffset();
+        fRwl.readLock().unlock();
+
+        return ret;
+    }
+
+    /**
+     * Returns the current space utilization of this node, as a percentage.
+     * (used space / total usable space, which excludes the header)
+     *
+     * @return The percentage (value between 0 and 100) of space utilization in
+     *         in this node.
+     */
+    public long getNodeUsagePercent() {
+        fRwl.readLock().lock();
+        try {
+            final int blockSize = fConfig.getBlockSize();
+            float freePercent = (float) getNodeFreeSpace()
+                    / (float) (blockSize - getTotalHeaderSize())
+                    * 100F;
+            return (long) (100L - freePercent);
+
+        } finally {
+            fRwl.readLock().unlock();
+        }
+    }
+
+    /**
+     * @name Debugging functions
+     */
+
+    @SuppressWarnings("nls")
+    @Override
+    public String toString() {
+        /* Only used for debugging, shouldn't be externalized */
+        return String.format("Node #%d, %s, %s, %d intervals (%d%% used), [%d - %s]",
+                fSequenceNumber,
+                (fParentSequenceNumber == -1) ? "Root" : "Parent #" + fParentSequenceNumber,
+                toStringSpecific(),
+                fIntervals.size(),
+                getNodeUsagePercent(),
+                fNodeStart,
+                (fIsOnDisk || fNodeEnd != 0) ? fNodeEnd : "...");
+    }
+
+    /**
+     * Debugging function that prints out the contents of this node
+     *
+     * @param writer
+     *            PrintWriter in which we will print the debug output
+     */
+    @SuppressWarnings("nls")
+    public void debugPrintIntervals(PrintWriter writer) {
+        /* Only used for debugging, shouldn't be externalized */
+        writer.println("Intervals for node #" + fSequenceNumber + ":");
+
+        /* Array of children */
+        if (getNodeType() != NodeType.LEAF) { /* Only Core Nodes can have children */
+            ParentNode thisNode = (ParentNode) this;
+            writer.print("  " + thisNode.getNbChildren() + " children");
+            if (thisNode.getNbChildren() >= 1) {
+                writer.print(": [ " + thisNode.getChild(0));
+                for (int i = 1; i < thisNode.getNbChildren(); i++) {
+                    writer.print(", " + thisNode.getChild(i));
+                }
+                writer.print(']');
+            }
+            writer.print('\n');
+        }
+
+        /* List of intervals in the node */
+        writer.println("  Intervals contained:");
+        for (int i = 0; i < fIntervals.size(); i++) {
+            writer.println(fIntervals.get(i).toString());
+        }
+        writer.println('\n');
+    }
+
+    // ------------------------------------------------------------------------
+    // Abstract methods
+    // ------------------------------------------------------------------------
+
+    /**
+     * Get the byte value representing the node type.
+     *
+     * @return The node type
+     */
+    public abstract NodeType getNodeType();
+
+    /**
+     * Return the specific header size of this node. This means the size
+     * occupied by the type-specific section of the header (not counting the
+     * common part).
+     *
+     * @return The specific header size
+     */
+    protected abstract int getSpecificHeaderSize();
+
+    /**
+     * Read the type-specific part of the node header from a byte buffer.
+     *
+     * @param buffer
+     *            The byte buffer to read from. It should be already positioned
+     *            correctly.
+     */
+    protected abstract void readSpecificHeader(ByteBuffer buffer);
+
+    /**
+     * Write the type-specific part of the header in a byte buffer.
+     *
+     * @param buffer
+     *            The buffer to write to. It should already be at the correct
+     *            position.
+     */
+    protected abstract void writeSpecificHeader(ByteBuffer buffer);
+
+    /**
+     * Node-type-specific toString method. Used for debugging.
+     *
+     * @return A string representing the node
+     */
+    protected abstract String toStringSpecific();
+}
diff --git a/statesystem/org.eclipse.tracecompass.statesystem.core/src/org/eclipse/tracecompass/internal/statesystem/core/backend/historytree/HT_IO.java b/statesystem/org.eclipse.tracecompass.statesystem.core/src/org/eclipse/tracecompass/internal/statesystem/core/backend/historytree/HT_IO.java
new file mode 100644 (file)
index 0000000..b26225b
--- /dev/null
@@ -0,0 +1,308 @@
+/*******************************************************************************
+ * Copyright (c) 2012, 2014 Ericsson
+ * Copyright (c) 2010, 2011 École Polytechnique de Montréal
+ * Copyright (c) 2010, 2011 Alexandre Montplaisir <alexandre.montplaisir@gmail.com>
+ *
+ * All rights reserved. This program and the accompanying materials are
+ * made available under the terms of the Eclipse Public License v1.0 which
+ * accompanies this distribution, and is available at
+ * http://www.eclipse.org/legal/epl-v10.html
+ *
+ *******************************************************************************/
+
+package org.eclipse.tracecompass.internal.statesystem.core.backend.historytree;
+
+import static org.eclipse.tracecompass.common.core.NonNullUtils.checkNotNull;
+
+import java.io.File;
+import java.io.FileInputStream;
+import java.io.FileOutputStream;
+import java.io.IOException;
+import java.nio.channels.ClosedChannelException;
+import java.nio.channels.FileChannel;
+import java.util.logging.Logger;
+
+import org.eclipse.jdt.annotation.NonNull;
+import java.util.Objects;
+import java.util.concurrent.ExecutionException;
+
+import org.eclipse.jdt.annotation.Nullable;
+import org.eclipse.tracecompass.common.core.log.TraceCompassLog;
+import org.eclipse.tracecompass.internal.statesystem.core.Activator;
+import org.eclipse.tracecompass.internal.statesystem.core.backend.historytree.IHistoryTree.IHTNodeFactory;
+
+import com.google.common.cache.CacheBuilder;
+import com.google.common.cache.CacheLoader;
+import com.google.common.cache.LoadingCache;
+
+/**
+ * This class abstracts inputs/outputs of the HistoryTree nodes.
+ *
+ * It contains all the methods and descriptors to handle reading/writing nodes
+ * to the tree-file on disk and all the caching mechanisms.
+ *
+ * This abstraction is mainly for code isolation/clarification purposes. Every
+ * HistoryTree must contain 1 and only 1 HT_IO element.
+ *
+ * @author Alexandre Montplaisir
+ */
+public class HT_IO {
+
+    private static final Logger LOGGER = TraceCompassLog.getLogger(HT_IO.class);
+
+    // ------------------------------------------------------------------------
+    // Global cache of nodes
+    // ------------------------------------------------------------------------
+
+    private static final class CacheKey {
+
+        public final HT_IO fStateHistory;
+        public final int fSeqNumber;
+
+        public CacheKey(HT_IO stateHistory,  int seqNumber) {
+            fStateHistory = stateHistory;
+            fSeqNumber = seqNumber;
+        }
+
+        @Override
+        public int hashCode() {
+            return Objects.hash(fStateHistory, fSeqNumber);
+        }
+
+        @Override
+        public boolean equals(@Nullable Object obj) {
+            if (this == obj) {
+                return true;
+            }
+            if (obj == null) {
+                return false;
+            }
+            if (getClass() != obj.getClass()) {
+                return false;
+            }
+            CacheKey other = (CacheKey) obj;
+            return (fStateHistory.equals(other.fStateHistory) &&
+                    fSeqNumber == other.fSeqNumber);
+        }
+    }
+
+    private static final int CACHE_SIZE = 256;
+
+    private static final LoadingCache<CacheKey, HTNode> NODE_CACHE =
+        checkNotNull(CacheBuilder.newBuilder()
+            .maximumSize(CACHE_SIZE)
+            .build(new CacheLoader<CacheKey, HTNode>() {
+                @Override
+                public HTNode load(CacheKey key) throws IOException {
+                    HT_IO io = key.fStateHistory;
+                    int seqNb = key.fSeqNumber;
+
+                    LOGGER.finest(() -> "[HtIo:CacheMiss] seqNum=" + seqNb); //$NON-NLS-1$
+
+                    synchronized (io) {
+                        io.seekFCToNodePos(io.fFileChannelIn, seqNb);
+                        return HTNode.readNode(io.fConfig, io.fFileChannelIn, key.fStateHistory.fNodeFactory);
+                    }
+                }
+            }));
+
+
+    // ------------------------------------------------------------------------
+    // Instance fields
+    // ------------------------------------------------------------------------
+
+    /* Configuration of the History Tree */
+    private final HTConfig fConfig;
+
+    /* Fields related to the file I/O */
+    private final FileInputStream fFileInputStream;
+    private final FileOutputStream fFileOutputStream;
+    private final FileChannel fFileChannelIn;
+    private final FileChannel fFileChannelOut;
+
+    private final IHTNodeFactory fNodeFactory;
+
+    // ------------------------------------------------------------------------
+    // Methods
+    // ------------------------------------------------------------------------
+
+
+
+    /**
+     * Standard constructor
+     *
+     * @param config
+     *            The configuration object for the StateHistoryTree
+     * @param newFile
+     *            Flag indicating that the file must be created from scratch
+     * @param nodeFactory
+     *            The factory to create new nodes for this tree
+     *
+     * @throws IOException
+     *             An exception can be thrown when file cannot be accessed
+     */
+    public HT_IO(HTConfig config, boolean newFile, IHTNodeFactory nodeFactory) throws IOException {
+        fConfig = config;
+
+        File historyTreeFile = config.getStateFile();
+        if (newFile) {
+            boolean success1 = true;
+            /* Create a new empty History Tree file */
+            if (historyTreeFile.exists()) {
+                success1 = historyTreeFile.delete();
+            }
+            boolean success2 = historyTreeFile.createNewFile();
+            if (!(success1 && success2)) {
+                /* It seems we do not have permission to create the new file */
+                throw new IOException("Cannot create new file at " + //$NON-NLS-1$
+                        historyTreeFile.getName());
+            }
+            fFileInputStream = new FileInputStream(historyTreeFile);
+            fFileOutputStream = new FileOutputStream(historyTreeFile, false);
+        } else {
+            /*
+             * We want to open an existing file, make sure we don't squash the
+             * existing content when opening the fos!
+             */
+            fFileInputStream = new FileInputStream(historyTreeFile);
+            fFileOutputStream = new FileOutputStream(historyTreeFile, true);
+        }
+        fFileChannelIn = fFileInputStream.getChannel();
+        fFileChannelOut = fFileOutputStream.getChannel();
+        fNodeFactory = nodeFactory;
+    }
+
+    /**
+     * Read a node from the file on disk.
+     *
+     * @param seqNumber
+     *            The sequence number of the node to read.
+     * @return The object representing the node
+     * @throws ClosedChannelException
+     *             Usually happens because the file was closed while we were
+     *             reading. Instead of using a big reader-writer lock, we'll
+     *             just catch this exception.
+     */
+    public @NonNull HTNode readNode(int seqNumber) throws ClosedChannelException {
+        /* Do a cache lookup. If it's not present it will be loaded from disk */
+        LOGGER.finest(() -> "[HtIo:CacheLookup] seqNum=" + seqNumber); //$NON-NLS-1$
+        CacheKey key = new CacheKey(this, seqNumber);
+        try {
+            return checkNotNull(NODE_CACHE.get(key));
+
+        } catch (ExecutionException e) {
+            /* Get the inner exception that was generated */
+            Throwable cause = e.getCause();
+            if (cause instanceof ClosedChannelException) {
+                throw (ClosedChannelException) cause;
+            }
+            /*
+             * Other types of IOExceptions shouldn't happen at this point though.
+             */
+            Activator.getDefault().logError(e.getMessage(), e);
+            throw new IllegalStateException();
+        }
+    }
+
+    /**
+     * Write the given node to disk.
+     *
+     * @param node
+     *            The node to write.
+     */
+    public void writeNode(HTNode node) {
+        try {
+            int seqNumber = node.getSequenceNumber();
+
+            /* "Write-back" the node into the cache */
+            CacheKey key = new CacheKey(this, seqNumber);
+            NODE_CACHE.put(key, node);
+
+            /* Position ourselves at the start of the node and write it */
+            synchronized (this) {
+                seekFCToNodePos(fFileChannelOut, seqNumber);
+                node.writeSelf(fFileChannelOut);
+            }
+        } catch (IOException e) {
+            /* If we were able to open the file, we should be fine now... */
+            Activator.getDefault().logError(e.getMessage(), e);
+        }
+    }
+
+    /**
+     * Get the output file channel, used for writing.
+     *
+     * @return The output file channel
+     */
+    public FileChannel getFcOut() {
+        return fFileChannelOut;
+    }
+
+    /**
+     * Retrieve the input stream with which to write the attribute tree.
+     *
+     * @param nodeOffset
+     *            The offset in the file, in number of nodes. This should be
+     *            after all the nodes.
+     * @return The correctly-seeked input stream
+     */
+    public FileInputStream supplyATReader(int nodeOffset) {
+        try {
+            /*
+             * Position ourselves at the start of the Mapping section in the
+             * file (which is right after the Blocks)
+             */
+            seekFCToNodePos(fFileChannelIn, nodeOffset);
+        } catch (IOException e) {
+            Activator.getDefault().logError(e.getMessage(), e);
+        }
+        return fFileInputStream;
+    }
+
+    /**
+     * Close all file channels and streams.
+     */
+    public synchronized void closeFile() {
+        try {
+            fFileInputStream.close();
+            fFileOutputStream.close();
+        } catch (IOException e) {
+            Activator.getDefault().logError(e.getMessage(), e);
+        }
+    }
+
+    /**
+     * Delete the history tree file
+     */
+    public synchronized void deleteFile() {
+        closeFile();
+
+        File historyTreeFile = fConfig.getStateFile();
+        if (!historyTreeFile.delete()) {
+            /* We didn't succeed in deleting the file */
+            Activator.getDefault().logError("Failed to delete" + historyTreeFile.getName()); //$NON-NLS-1$
+        }
+    }
+
+    /**
+     * Seek the given FileChannel to the position corresponding to the node that
+     * has seqNumber
+     *
+     * @param fc
+     *            the channel to seek
+     * @param seqNumber
+     *            the node sequence number to seek the channel to
+     * @throws IOException
+     *             If some other I/O error occurs
+     */
+    private void seekFCToNodePos(FileChannel fc, int seqNumber)
+            throws IOException {
+        /*
+         * Cast to (long) is needed to make sure the result is a long too and
+         * doesn't get truncated
+         */
+        fc.position(IHistoryTree.TREE_HEADER_SIZE
+                + ((long) seqNumber) * fConfig.getBlockSize());
+    }
+
+}
index 2a6b19d9b396b6a242dea24b97be942eec125b42..5e752deeb98b3d09ed35c390aa329a776c25517b 100644 (file)
@@ -18,13 +18,15 @@ package org.eclipse.tracecompass.internal.statesystem.core.backend.historytree;
 import java.io.File;
 import java.io.FileInputStream;
 import java.io.IOException;
+import java.nio.channels.ClosedChannelException;
+import java.util.Deque;
+import java.util.LinkedList;
 import java.util.List;
 import java.util.logging.Logger;
 
 import org.eclipse.jdt.annotation.NonNull;
 import org.eclipse.tracecompass.common.core.log.TraceCompassLog;
-import org.eclipse.tracecompass.internal.provisional.datastore.core.condition.RangeCondition;
-import org.eclipse.tracecompass.internal.provisional.datastore.core.historytree.IHistoryTree;
+import org.eclipse.tracecompass.internal.statesystem.core.Activator;
 import org.eclipse.tracecompass.statesystem.core.backend.IStateHistoryBackend;
 import org.eclipse.tracecompass.statesystem.core.exceptions.StateSystemDisposedException;
 import org.eclipse.tracecompass.statesystem.core.exceptions.TimeRangeException;
@@ -33,7 +35,6 @@ import org.eclipse.tracecompass.statesystem.core.statevalue.ITmfStateValue;
 import org.eclipse.tracecompass.statesystem.core.statevalue.TmfStateValue;
 
 import com.google.common.annotations.VisibleForTesting;
-import com.google.common.collect.Iterables;
 
 /**
  * History Tree backend for storing a state history. This is the basic version
@@ -50,7 +51,7 @@ public class HistoryTreeBackend implements IStateHistoryBackend {
     /**
      * The history tree that sits underneath.
      */
-    private final @NonNull IHistoryTree<@NonNull StateSystemInterval> fSht;
+    private final @NonNull IHistoryTree fSht;
 
     /** Indicates if the history tree construction is done */
     private volatile boolean fFinishedBuilding = false;
@@ -98,18 +99,15 @@ public class HistoryTreeBackend implements IStateHistoryBackend {
      *             Thrown if we can't create the file for some reason
      */
     public HistoryTreeBackend(@NonNull String ssid,
-            @NonNull File newStateFile,
+            File newStateFile,
             int providerVersion,
             long startTime,
             int blockSize,
             int maxChildren) throws IOException {
-
         fSsid = ssid;
-        fSht = initializeSHT(newStateFile,
-                blockSize,
-                maxChildren,
-                providerVersion,
-                startTime);
+        final HTConfig conf = new HTConfig(newStateFile, blockSize, maxChildren,
+                providerVersion, startTime);
+        fSht = initializeSHT(conf);
     }
 
     /**
@@ -132,8 +130,7 @@ public class HistoryTreeBackend implements IStateHistoryBackend {
      *             Thrown if we can't create the file for some reason
      * @since 1.0
      */
-    public HistoryTreeBackend(@NonNull String ssid, @NonNull File newStateFile,
-            int providerVersion, long startTime)
+    public HistoryTreeBackend(@NonNull String ssid, File newStateFile, int providerVersion, long startTime)
             throws IOException {
         this(ssid, newStateFile, providerVersion, startTime, 64 * 1024, 50);
     }
@@ -160,40 +157,18 @@ public class HistoryTreeBackend implements IStateHistoryBackend {
     }
 
     /**
-     * Instantiate the new history tree to be used.
+     * New-tree initializer for the History Tree wrapped by this backend. Can be
+     * overriden to use different implementations.
      *
-     * @param stateHistoryFile
-     *            The name of the history file
-     * @param blockSize
-     *            The size of each "block" on disk in bytes. One node will
-     *            always fit in one block. It should be at least 4096.
-     * @param maxChildren
-     *            The maximum number of children allowed per core (non-leaf)
-     *            node.
-     * @param providerVersion
-     *            The version of the state provider. If a file already exists,
-     *            and their versions match, the history file will not be rebuilt
-     *            uselessly.
-     * @param treeStart
-     *            The start time of the history
+     * @param conf
+     *            The HTConfig configuration object
      * @return The new history tree
      * @throws IOException
-     *             If an error happens trying to open/write to the file
-     *             specified in the config
+     *             If there was a problem during creation
      */
     @VisibleForTesting
-    protected @NonNull IHistoryTree<@NonNull StateSystemInterval> initializeSHT(
-            @NonNull File stateHistoryFile,
-            int blockSize,
-            int maxChildren,
-            int providerVersion,
-            long treeStart) throws IOException {
-
-        return HistoryTreeFactory.createHistoryTree(stateHistoryFile,
-                blockSize,
-                maxChildren,
-                providerVersion,
-                treeStart);
+    protected @NonNull IHistoryTree initializeSHT(@NonNull HTConfig conf) throws IOException {
+        return HistoryTreeFactory.createHistoryTree(conf);
     }
 
     /**
@@ -209,8 +184,7 @@ public class HistoryTreeBackend implements IStateHistoryBackend {
      *             If there was a problem during creation
      */
     @VisibleForTesting
-    protected @NonNull IHistoryTree<@NonNull StateSystemInterval>initializeSHT(
-            @NonNull File existingStateFile, int providerVersion) throws IOException {
+    protected @NonNull IHistoryTree initializeSHT(@NonNull File existingStateFile, int providerVersion) throws IOException {
         return HistoryTreeFactory.createFromFile(existingStateFile.toPath(), providerVersion);
     }
 
@@ -223,7 +197,7 @@ public class HistoryTreeBackend implements IStateHistoryBackend {
      *
      * @return The history tree
      */
-    protected final @NonNull IHistoryTree<@NonNull StateSystemInterval> getSHT() {
+    protected final @NonNull IHistoryTree getSHT() {
         return fSht;
     }
 
@@ -245,11 +219,11 @@ public class HistoryTreeBackend implements IStateHistoryBackend {
     @Override
     public void insertPastState(long stateStartTime, long stateEndTime,
             int quark, ITmfStateValue value) throws TimeRangeException {
-        StateSystemInterval interval = new StateSystemInterval(stateStartTime, stateEndTime,
+        HTInterval interval = new HTInterval(stateStartTime, stateEndTime,
                 quark, (TmfStateValue) value);
 
         /* Start insertions at the "latest leaf" */
-        getSHT().insert(interval);
+        getSHT().insertInterval(interval);
     }
 
     @Override
@@ -298,24 +272,41 @@ public class HistoryTreeBackend implements IStateHistoryBackend {
             throws TimeRangeException, StateSystemDisposedException {
         checkValidTime(t);
 
-        RangeCondition<@NonNull Long> rc = RangeCondition.singleton(t);
+        /* Queue is a stack of nodes containing nodes intersecting t */
+        Deque<Integer> queue = new LinkedList<>();
+
+        /* We start by reading the information in the root node */
+        queue.add(getSHT().getRootNode().getSequenceNumber());
+
+        /* Then we follow the down in the relevant children */
+        try {
+            while (!queue.isEmpty()) {
+                int sequenceNumber = queue.pop();
+                HTNode currentNode = getSHT().readNode(sequenceNumber);
+                if (currentNode.getNodeType() == HTNode.NodeType.CORE) {
+                    /* Here we add the relevant children nodes for BFS */
+                    queue.addAll(((ParentNode) currentNode).selectNextChildren(t));
+                }
+                currentNode.writeInfoFromNode(stateInfo, t);
+            }
+        } catch (ClosedChannelException e) {
+            throw new StateSystemDisposedException(e);
+        }
 
-        Iterable<StateSystemInterval> intervals = getSHT().getMatchingIntervals(rc, e -> true);
-        Iterables.filter(intervals, e -> e.getAttribute() < stateInfo.size())
-                .forEach(e -> stateInfo.set(e.getAttribute(), e));
+        /*
+         * The stateInfo should now be filled with everything needed, we pass
+         * the control back to the State System.
+         */
     }
 
     @Override
     public ITmfStateInterval doSingularQuery(long t, int attributeQuark)
             throws TimeRangeException, StateSystemDisposedException {
-            checkValidTime(t);
-
-            RangeCondition<@NonNull Long> rc = RangeCondition.singleton(t);
-
-            Iterable<StateSystemInterval> intervals = getSHT()
-                    .getMatchingIntervals(rc, e -> e.getAttribute() == attributeQuark);
-
-            return Iterables.getFirst(intervals, null);
+        try {
+            return getRelevantInterval(t, attributeQuark);
+        } catch (ClosedChannelException e) {
+            throw new StateSystemDisposedException(e);
+        }
     }
 
     private void checkValidTime(long t) {
@@ -327,6 +318,29 @@ public class HistoryTreeBackend implements IStateHistoryBackend {
         }
     }
 
+    /**
+     * Inner method to find the interval in the tree containing the requested
+     * key/timestamp pair, wherever in which node it is.
+     */
+    private HTInterval getRelevantInterval(long t, int key)
+            throws TimeRangeException, ClosedChannelException {
+        checkValidTime(t);
+
+        Deque<Integer> queue = new LinkedList<>();
+        queue.add(getSHT().getRootNode().getSequenceNumber());
+        HTInterval interval = null;
+        while (interval == null && !queue.isEmpty()) {
+            int sequenceNumber = queue.pop();
+            HTNode currentNode = getSHT().readNode(sequenceNumber);
+            if (currentNode.getNodeType() == HTNode.NodeType.CORE) {
+                /* Here we add the relevant children nodes for BFS */
+                queue.addAll(((ParentNode) currentNode).selectNextChildren(t));
+            }
+            interval = currentNode.getRelevantInterval(key, t);
+        }
+        return interval;
+    }
+
     /**
      * Return the size of the tree history file
      *
@@ -342,28 +356,25 @@ public class HistoryTreeBackend implements IStateHistoryBackend {
      * @return Average node usage %
      */
     public int getAverageNodeUsage() {
-        return 0;
-        // TODO Reimplement, elsewhere?
-
-//        HTNode node;
-//        long total = 0;
-//        long ret;
-//
-//        try {
-//            for (int seq = 0; seq < getSHT().getNodeCount(); seq++) {
-//                node = getSHT().readNode(seq);
-//                total += node.getNodeUsagePercent();
-//            }
-//        } catch (ClosedChannelException e) {
-//            Activator.getDefault().logError(e.getMessage(), e);
-//        }
-//
-//        ret = total / getSHT().getNodeCount();
-//        /* The return value should be a percentage */
-//        if (ret < 0 || ret > 100) {
-//            throw new IllegalStateException("Average node usage is not a percentage: " + ret); //$NON-NLS-1$
-//        }
-//        return (int) ret;
+        HTNode node;
+        long total = 0;
+        long ret;
+
+        try {
+            for (int seq = 0; seq < getSHT().getNodeCount(); seq++) {
+                node = getSHT().readNode(seq);
+                total += node.getNodeUsagePercent();
+            }
+        } catch (ClosedChannelException e) {
+            Activator.getDefault().logError(e.getMessage(), e);
+        }
+
+        ret = total / getSHT().getNodeCount();
+        /* The return value should be a percentage */
+        if (ret < 0 || ret > 100) {
+            throw new IllegalStateException("Average node usage is not a percentage: " + ret); //$NON-NLS-1$
+        }
+        return (int) ret;
     }
 
 }
index c52797a4f779e127eba052eb9eb623176755fda8..31d4487d97a907d6353bf95c44437a7d5871bc30 100644 (file)
@@ -9,7 +9,6 @@
 
 package org.eclipse.tracecompass.internal.statesystem.core.backend.historytree;
 
-import java.io.File;
 import java.io.IOException;
 import java.nio.ByteBuffer;
 import java.nio.ByteOrder;
@@ -19,8 +18,7 @@ import java.nio.file.Path;
 import java.nio.file.StandardOpenOption;
 
 import org.eclipse.jdt.annotation.NonNullByDefault;
-import org.eclipse.tracecompass.internal.provisional.datastore.core.historytree.IHistoryTree;
-import org.eclipse.tracecompass.internal.provisional.datastore.core.historytree.classic.ClassicHistoryTree;
+import org.eclipse.tracecompass.internal.statesystem.core.backend.historytree.classic.HistoryTreeClassic;
 
 /**
  * Class that contains factory methods to build different types of history trees
@@ -34,40 +32,18 @@ public final class HistoryTreeFactory {
     }
 
     /**
-     * Create a new History Tree from scratch, specifying all configuration
-     * parameters.
+     * Create a new State History from scratch, using a {@link HTConfig} object
+     * for configuration.
      *
-     * @param stateHistoryFile
-     *            The name of the history file
-     * @param blockSize
-     *            The size of each "block" on disk in bytes. One node will
-     *            always fit in one block. It should be at least 4096.
-     * @param maxChildren
-     *            The maximum number of children allowed per core (non-leaf)
-     *            node.
-     * @param providerVersion
-     *            The version of the state provider. If a file already exists,
-     *            and their versions match, the history file will not be rebuilt
-     *            uselessly.
-     * @param treeStart
-     *            The start time of the history
-     * @return The new history tree
+     * @param conf
+     *            The config to use for this History Tree.
+     * @return the new state history tree
      * @throws IOException
      *             If an error happens trying to open/write to the file
      *             specified in the config
      */
-    public static IHistoryTree<StateSystemInterval> createHistoryTree(File stateHistoryFile,
-            int blockSize,
-            int maxChildren,
-            int providerVersion,
-            long treeStart) throws IOException {
-
-        return new ClassicHistoryTree<>(stateHistoryFile,
-                blockSize,
-                maxChildren,
-                providerVersion,
-                treeStart,
-                StateSystemInterval.DESERIALISER);
+    public static IHistoryTree createHistoryTree(HTConfig conf) throws IOException {
+        return new HistoryTreeClassic(conf);
     }
 
     /**
@@ -82,8 +58,7 @@ public final class HistoryTreeFactory {
      * @throws IOException
      *             If an error happens reading the file
      */
-    public static IHistoryTree<StateSystemInterval> createFromFile(
-            Path existingStateFile, int expectedProviderVersion) throws IOException {
+    public static IHistoryTree createFromFile(Path existingStateFile, int expectedProviderVersion) throws IOException {
         /*
          * Check the file exists and has a positive length. These verifications
          * will also be done in the HT's constructor.
@@ -109,9 +84,8 @@ public final class HistoryTreeFactory {
          */
         int magicNumber = buffer.getInt();
         switch (magicNumber) {
-        case ClassicHistoryTree.HISTORY_FILE_MAGIC_NUMBER:
-            return new ClassicHistoryTree<>(existingStateFile.toFile(),
-                    expectedProviderVersion, StateSystemInterval.DESERIALISER);
+        case HistoryTreeClassic.HISTORY_FILE_MAGIC_NUMBER:
+            return new HistoryTreeClassic(existingStateFile.toFile(), expectedProviderVersion);
         default:
             throw new IOException("Not a known history tree file"); //$NON-NLS-1$
         }
diff --git a/statesystem/org.eclipse.tracecompass.statesystem.core/src/org/eclipse/tracecompass/internal/statesystem/core/backend/historytree/IHistoryTree.java b/statesystem/org.eclipse.tracecompass.statesystem.core/src/org/eclipse/tracecompass/internal/statesystem/core/backend/historytree/IHistoryTree.java
new file mode 100644 (file)
index 0000000..299db35
--- /dev/null
@@ -0,0 +1,199 @@
+/*******************************************************************************
+ * Copyright (c) 2010, 2016 Ericsson, École Polytechnique de Montréal, and others
+ *
+ * All rights reserved. This program and the accompanying materials are
+ * made available under the terms of the Eclipse Public License v1.0 which
+ * accompanies this distribution, and is available at
+ * http://www.eclipse.org/legal/epl-v10.html
+ *******************************************************************************/
+
+package org.eclipse.tracecompass.internal.statesystem.core.backend.historytree;
+
+import java.io.File;
+import java.io.FileInputStream;
+import java.io.IOException;
+import java.nio.channels.ClosedChannelException;
+import org.eclipse.tracecompass.statesystem.core.exceptions.TimeRangeException;
+
+/**
+ * Meta-container for the History Tree. This structure contains all the
+ * high-level data relevant to the tree.
+ *
+ * @author Alexandre Montplaisir
+ * @author Geneviève Bastien
+ */
+public interface IHistoryTree {
+
+    /**
+     * Interface for history to create the various HTNodes
+     */
+    interface IHTNodeFactory {
+
+        /**
+         * Creates a new core node for the specific history tree
+         *
+         * @param config
+         *            Configuration of the History Tree
+         * @param seqNumber
+         *            The (unique) sequence number assigned to this particular
+         *            node
+         * @param parentSeqNumber
+         *            The sequence number of this node's parent node
+         * @param start
+         *            The earliest timestamp stored in this node
+         * @return The new core node
+         * @throws IOException
+         *             any exception occurring while trying to read/create the
+         *             node
+         */
+        HTNode createCoreNode(HTConfig config, int seqNumber, int parentSeqNumber, long start) throws IOException;
+
+        /**
+         * Creates a new core node for the specific history tree
+         *
+         * @param config
+         *            Configuration of the History Tree
+         * @param seqNumber
+         *            The (unique) sequence number assigned to this particular
+         *            node
+         * @param parentSeqNumber
+         *            The sequence number of this node's parent node
+         * @param start
+         *            The earliest timestamp stored in this node
+         * @return The new core node
+         * @throws IOException
+         *             any exception occurring while trying to read/create the
+         *             node
+         */
+        HTNode createLeafNode(HTConfig config, int seqNumber, int parentSeqNumber, long start) throws IOException;
+    }
+
+    /**
+     * Size of the "tree header" in the tree-file The nodes will use this offset
+     * to know where they should be in the file. This should always be a
+     * multiple of 4K.
+     */
+    int TREE_HEADER_SIZE = 4096;
+
+    /**
+     * "Save" the tree to disk. This method will cause the treeIO object to
+     * commit all nodes to disk and then return the RandomAccessFile descriptor
+     * so the Tree object can save its configuration into the header of the
+     * file.
+     *
+     * @param requestedEndTime
+     *            The greatest timestamp present in the history tree
+     */
+    void closeTree(long requestedEndTime);
+
+    // ------------------------------------------------------------------------
+    // Accessors
+    // ------------------------------------------------------------------------
+
+    /**
+     * Get the start time of this tree.
+     *
+     * @return The start time
+     */
+    long getTreeStart();
+
+    /**
+     * Get the current end time of this tree.
+     *
+     * @return The end time
+     */
+    long getTreeEnd();
+
+    /**
+     * Get the number of nodes in this tree.
+     *
+     * @return The number of nodes
+     */
+    int getNodeCount();
+
+    /**
+     * Get the current root node of this tree
+     *
+     * @return The root node
+     */
+    HTNode getRootNode();
+
+    // ------------------------------------------------------------------------
+    // HT_IO interface
+    // ------------------------------------------------------------------------
+
+    /**
+     * Return the FileInputStream reader with which we will read an attribute
+     * tree (it will be sought to the correct position).
+     *
+     * @return The FileInputStream indicating the file and position from which
+     *         the attribute tree can be read.
+     */
+    FileInputStream supplyATReader();
+
+    /**
+     * Return the file to which we will write the attribute tree.
+     *
+     * @return The file to which we will write the attribute tree
+     */
+    File supplyATWriterFile();
+
+    /**
+     * Return the position in the file (given by {@link #supplyATWriterFile})
+     * where to start writing the attribute tree.
+     *
+     * @return The position in the file where to start writing
+     */
+    long supplyATWriterFilePos();
+
+    /**
+     * Read a node from the tree.
+     *
+     * @param seqNumber
+     *            The sequence number of the node to read
+     * @return The node
+     * @throws ClosedChannelException
+     *             If the tree IO is unavailable
+     */
+    HTNode readNode(int seqNumber) throws ClosedChannelException;
+
+    /**
+     * Write a node object to the history file.
+     *
+     * @param node
+     *            The node to write to disk
+     */
+    void writeNode(HTNode node);
+
+    /**
+     * Close the history file.
+     */
+    void closeFile();
+
+    /**
+     * Delete the history file.
+     */
+    void deleteFile();
+
+    // ------------------------------------------------------------------------
+    // Operations
+    // ------------------------------------------------------------------------
+
+    /**
+     * Insert an interval in the tree.
+     *
+     * @param interval
+     *            The interval to be inserted
+     * @throws TimeRangeException
+     *             If the start of end time of the interval are invalid
+     */
+    void insertInterval(HTInterval interval) throws TimeRangeException;
+
+    /**
+     * Get the current size of the history file.
+     *
+     * @return The history file size
+     */
+    long getFileSize();
+
+}
diff --git a/statesystem/org.eclipse.tracecompass.statesystem.core/src/org/eclipse/tracecompass/internal/statesystem/core/backend/historytree/LeafNode.java b/statesystem/org.eclipse.tracecompass.statesystem.core/src/org/eclipse/tracecompass/internal/statesystem/core/backend/historytree/LeafNode.java
new file mode 100644 (file)
index 0000000..d4cecf3
--- /dev/null
@@ -0,0 +1,71 @@
+/*******************************************************************************
+ * Copyright (c) 2014, 2016 École Polytechnique de Montréal and others
+ *
+ * All rights reserved. This program and the accompanying materials are
+ * made available under the terms of the Eclipse Public License v1.0 which
+ * accompanies this distribution, and is available at
+ * http://www.eclipse.org/legal/epl-v10.html
+ *
+ * Contributors:
+ *   Florian Wininger - Initial API and implementation
+ *******************************************************************************/
+
+package org.eclipse.tracecompass.internal.statesystem.core.backend.historytree;
+
+import java.nio.ByteBuffer;
+
+/**
+ * A Leaf node is a last-level node of a History Tree.
+ *
+ * A leaf node cannot have children, so it extends HTNode without adding
+ * anything in particular.
+ *
+ * @author Florian Wininger
+ */
+public final class LeafNode extends HTNode {
+
+    /**
+     * Initial constructor. Use this to initialize a new EMPTY node.
+     *
+     * @param config
+     *            Configuration of the History Tree
+     * @param seqNumber
+     *            The (unique) sequence number assigned to this particular node
+     * @param parentSeqNumber
+     *            The sequence number of this node's parent node
+     * @param start
+     *            The earliest timestamp stored in this node
+     */
+    public LeafNode(HTConfig config, int seqNumber, int parentSeqNumber,
+            long start) {
+        super(config, seqNumber, parentSeqNumber, start);
+    }
+
+    @Override
+    protected void readSpecificHeader(ByteBuffer buffer) {
+        /* No specific header part */
+    }
+
+    @Override
+    protected void writeSpecificHeader(ByteBuffer buffer) {
+        /* No specific header part */
+    }
+
+    @Override
+    public NodeType getNodeType() {
+        return NodeType.LEAF;
+    }
+
+    @Override
+    protected int getSpecificHeaderSize() {
+        /* Empty */
+        return 0;
+    }
+
+    @Override
+    public String toStringSpecific() {
+        /* Only used for debugging, shouldn't be externalized */
+        return "Leaf Node"; //$NON-NLS-1$;
+    }
+
+}
diff --git a/statesystem/org.eclipse.tracecompass.statesystem.core/src/org/eclipse/tracecompass/internal/statesystem/core/backend/historytree/ParentNode.java b/statesystem/org.eclipse.tracecompass.statesystem.core/src/org/eclipse/tracecompass/internal/statesystem/core/backend/historytree/ParentNode.java
new file mode 100644 (file)
index 0000000..15003bd
--- /dev/null
@@ -0,0 +1,96 @@
+/*******************************************************************************
+ * Copyright (c) 2010, 2016 Ericsson, École Polytechnique de Montréal, and others
+ *
+ * All rights reserved. This program and the accompanying materials are
+ * made available under the terms of the Eclipse Public License v1.0 which
+ * accompanies this distribution, and is available at
+ * http://www.eclipse.org/legal/epl-v10.html
+ *******************************************************************************/
+
+package org.eclipse.tracecompass.internal.statesystem.core.backend.historytree;
+
+import java.util.Collection;
+import org.eclipse.jdt.annotation.NonNull;
+import org.eclipse.tracecompass.statesystem.core.exceptions.TimeRangeException;
+
+/**
+ * A Core node is a first-level node of a History Tree which is not a leaf node.
+ *
+ * It extends HTNode by adding support for child nodes, and also extensions.
+ *
+ * @author Alexandre Montplaisir
+ * @author Florian Wininger
+ */
+public abstract class ParentNode extends HTNode {
+
+    /**
+     * Initial constructor. Use this to initialize a new EMPTY node.
+     *
+     * @param config
+     *            Configuration of the History Tree
+     * @param seqNumber
+     *            The (unique) sequence number assigned to this particular node
+     * @param parentSeqNumber
+     *            The sequence number of this node's parent node
+     * @param start
+     *            The earliest timestamp stored in this node
+     */
+    public ParentNode(HTConfig config, int seqNumber, int parentSeqNumber,
+            long start) {
+        super(config, seqNumber, parentSeqNumber, start);
+    }
+
+    /**
+     * Return the number of child nodes this node has.
+     *
+     * @return The number of child nodes
+     */
+    public abstract int getNbChildren();
+
+    /**
+     * Get the child node corresponding to the specified index
+     *
+     * @param index The index of the child to lookup
+     * @return The child node
+     */
+    public abstract int getChild(int index);
+
+    /**
+     * Get the latest (right-most) child node of this node.
+     *
+     * @return The latest child node
+     */
+    public abstract int getLatestChild();
+
+    /**
+     * Get the start time of the specified child node.
+     *
+     * @param index
+     *            The index of the child node
+     * @return The start time of the that child node.
+     */
+    public abstract long getChildStart(int index);
+
+    /**
+     * Tell this node that it has a new child (Congrats!)
+     *
+     * @param childNode
+     *            The SHTNode object of the new child
+     */
+    public abstract void linkNewChild(HTNode childNode);
+
+    /**
+     * Inner method to select the sequence numbers for the children of the
+     * current node that intersect the given timestamp. Useful for moving down
+     * the tree.
+     *
+     * @param t
+     *            The timestamp to choose which child is the next one
+     * @return Collection of sequence numbers of the child nodes that intersect
+     *         t, non-null empty collection if this is a Leaf Node
+     * @throws TimeRangeException
+     *             If t is out of the node's range
+     */
+    public abstract @NonNull Collection<@NonNull Integer> selectNextChildren(long t);
+
+}
diff --git a/statesystem/org.eclipse.tracecompass.statesystem.core/src/org/eclipse/tracecompass/internal/statesystem/core/backend/historytree/StateSystemInterval.java b/statesystem/org.eclipse.tracecompass.statesystem.core/src/org/eclipse/tracecompass/internal/statesystem/core/backend/historytree/StateSystemInterval.java
deleted file mode 100644 (file)
index cc99e8b..0000000
+++ /dev/null
@@ -1,342 +0,0 @@
-/*******************************************************************************
- * Copyright (c) 2012, 2015 Ericsson, École Polytechnique de Montréal
- * Copyright (c) 2010, 2011 Alexandre Montplaisir <alexandre.montplaisir@gmail.com>
- *
- * All rights reserved. This program and the accompanying materials are
- * made available under the terms of the Eclipse Public License v1.0 which
- * accompanies this distribution, and is available at
- * http://www.eclipse.org/legal/epl-v10.html
- *
- * Contributors:
- *    Alexandre Montplaisir - Initial API and implementation
- *    Florian Wininger - Allow to change the size of a interval
- *    Patrick Tasse - Add message to exceptions
- *******************************************************************************/
-
-package org.eclipse.tracecompass.internal.statesystem.core.backend.historytree;
-
-import static org.eclipse.tracecompass.common.core.NonNullUtils.checkNotNull;
-
-import java.nio.charset.Charset;
-import java.util.Objects;
-
-import org.eclipse.jdt.annotation.NonNull;
-import org.eclipse.tracecompass.internal.provisional.datastore.core.interval.HTInterval;
-import org.eclipse.tracecompass.internal.provisional.datastore.core.interval.IHTIntervalReader;
-import org.eclipse.tracecompass.internal.provisional.datastore.core.serialization.ISafeByteBufferWriter;
-import org.eclipse.tracecompass.internal.provisional.statesystem.core.statevalue.CustomStateValue;
-import org.eclipse.tracecompass.statesystem.core.exceptions.TimeRangeException;
-import org.eclipse.tracecompass.statesystem.core.interval.ITmfStateInterval;
-import org.eclipse.tracecompass.statesystem.core.statevalue.ITmfStateValue;
-import org.eclipse.tracecompass.statesystem.core.statevalue.TmfStateValue;
-
-/**
- * The interval component, which will be contained in a node of the History
- * Tree.
- *
- * @author Alexandre Montplaisir
- */
-public final class StateSystemInterval extends HTInterval implements ITmfStateInterval {
-
-    private static final Charset CHARSET = Charset.forName("UTF-8"); //$NON-NLS-1$
-
-    private static final String errMsg = "Invalid interval data. Maybe your file is corrupt?"; //$NON-NLS-1$
-
-    /* 'Byte' equivalent for state values types */
-    private static final byte TYPE_NULL = -1;
-    private static final byte TYPE_INTEGER = 0;
-    private static final byte TYPE_STRING = 1;
-    private static final byte TYPE_LONG = 2;
-    private static final byte TYPE_DOUBLE = 3;
-    private static final byte TYPE_CUSTOM = 20;
-
-    private final int fAttribute;
-    private final @NonNull TmfStateValue fSv;
-
-    /** Number of bytes used by this interval when it is written to disk */
-    private final int fSizeOnDisk;
-
-    /**
-     * Standard constructor
-     *
-     * @param intervalStart
-     *            Start time of the interval
-     * @param intervalEnd
-     *            End time of the interval
-     * @param attribute
-     *            Attribute (quark) to which the state represented by this
-     *            interval belongs
-     * @param value
-     *            State value represented by this interval
-     * @throws TimeRangeException
-     *             If the start time or end time are invalid
-     */
-    public StateSystemInterval(long intervalStart, long intervalEnd, int attribute,
-            @NonNull TmfStateValue value) throws TimeRangeException {
-        super(intervalStart, intervalEnd);
-
-        fAttribute = attribute;
-        fSv = value;
-        fSizeOnDisk = computeSizeOnDisk(fSv);
-    }
-
-    /**
-     * Compute how much space (in bytes) an interval will take in its serialized
-     * form on disk. This is dependent on its state value.
-     */
-    private static int computeSizeOnDisk(ITmfStateValue sv) {
-        /*
-         * Minimum size is 2x long (start and end), 1x int (attribute) and 1x
-         * byte (value type).
-         */
-        int minSize = Long.BYTES + Long.BYTES + Integer.BYTES + Byte.BYTES;
-
-        switch (sv.getType()) {
-        case NULL:
-            return minSize;
-        case INTEGER:
-            return (minSize + Integer.BYTES);
-        case LONG:
-            return (minSize + Long.BYTES);
-        case DOUBLE:
-            return (minSize + Double.BYTES);
-        case STRING:
-            String str = sv.unboxStr();
-            int strLength = str.getBytes(CHARSET).length;
-
-            if (strLength > Short.MAX_VALUE) {
-                throw new IllegalArgumentException("String is too long to be stored in state system: " + str); //$NON-NLS-1$
-            }
-
-            /*
-             * String's length + 3 (2 bytes for size, 1 byte for \0 at the end)
-             */
-            return (minSize + strLength + 3);
-        case CUSTOM:
-            /* Length of serialized value (short) + state value */
-            return (minSize + Short.BYTES + ((CustomStateValue) sv).getSerializedSize());
-        default:
-            /*
-             * It's very important that we know how to write the state value in
-             * the file!!
-             */
-            throw new IllegalStateException();
-        }
-    }
-
-    /**
-     * Reader object for this interval type.
-     */
-    public static final @NonNull IHTIntervalReader<@NonNull StateSystemInterval> DESERIALISER = buffer -> {
-        TmfStateValue value;
-
-        /* Read the Data Section entry */
-        long intervalStart = buffer.getLong();
-        long intervalEnd = buffer.getLong();
-        int attribute = buffer.getInt();
-
-        /* Read the 'type' of the value, then react accordingly */
-        byte valueType = buffer.get();
-        switch (valueType) {
-
-        case TYPE_NULL:
-            value = TmfStateValue.nullValue();
-            break;
-
-        case TYPE_INTEGER:
-            value = TmfStateValue.newValueInt(buffer.getInt());
-            break;
-
-        case TYPE_STRING: {
-            /* the first short = the size to read */
-            int valueSize = buffer.getShort();
-
-            byte[] array = new byte[valueSize];
-            buffer.get(array);
-            value = TmfStateValue.newValueString(new String(array, CHARSET));
-
-            /* Confirm the 0'ed byte at the end */
-            byte res = buffer.get();
-            if (res != 0) {
-                throw new RuntimeException(errMsg);
-            }
-            break;
-        }
-
-        case TYPE_LONG:
-            /* Go read the matching entry in the Strings section of the block */
-            value = TmfStateValue.newValueLong(buffer.getLong());
-            break;
-
-        case TYPE_DOUBLE:
-            /* Go read the matching entry in the Strings section of the block */
-            value = TmfStateValue.newValueDouble(buffer.getDouble());
-            break;
-
-        case TYPE_CUSTOM: {
-            buffer.getShort();
-            // short valueSize = buffer.getShort();
-            // ISafeByteBufferReader safeBuffer =
-            // SafeByteBufferFactory.wrapReader(buffer, valueSize);
-            value = CustomStateValue.readSerializedValue(buffer);
-            break;
-        }
-        default:
-            /* Unknown data, better to not make anything up... */
-            throw new RuntimeException(errMsg);
-        }
-
-        try {
-            return new StateSystemInterval(intervalStart, intervalEnd, attribute, value);
-        } catch (TimeRangeException e) {
-            throw new RuntimeException(errMsg);
-        }
-    };
-
-    @Override
-    public void writeSegment(ISafeByteBufferWriter buffer) {
-        final byte byteFromType = getByteFromType(fSv.getType());
-
-        buffer.putLong(getStart());
-        buffer.putLong(getEnd());
-        buffer.putInt(fAttribute);
-        buffer.put(byteFromType);
-
-        switch (byteFromType) {
-        case TYPE_NULL:
-            break;
-        case TYPE_INTEGER:
-            buffer.putInt(fSv.unboxInt());
-            break;
-
-        case TYPE_STRING: {
-            String string = fSv.unboxStr();
-            byte[] strArray = string.getBytes(CHARSET);
-
-            /*
-             * Write the Strings entry (1st byte = size, then the bytes, then
-             * the 0). We have checked the string length at the constructor.
-             */
-            buffer.putShort((short) strArray.length);
-            buffer.put(strArray);
-            buffer.put((byte) 0);
-            break;
-        }
-
-        case TYPE_LONG:
-            buffer.putLong(fSv.unboxLong());
-            break;
-
-        case TYPE_DOUBLE:
-            buffer.putDouble(fSv.unboxDouble());
-            break;
-
-        case TYPE_CUSTOM: {
-            int size = ((CustomStateValue) fSv).getSerializedSize();
-            buffer.putShort((short) size);
-            // TODO: We send the full safe buffer to the custom value, its size
-            // should set to the necessary size only
-            ((CustomStateValue) fSv).serialize(buffer);
-            break;
-        }
-
-        default:
-            break;
-        }
-    }
-
-    // FIXME Remove these two duplicate methods
-
-    @Override
-    public long getStartTime() {
-        return getStart();
-    }
-
-    @Override
-    public long getEndTime() {
-        return getEnd();
-    }
-
-    @Override
-    public int getAttribute() {
-        return fAttribute;
-    }
-
-    @Override
-    public ITmfStateValue getStateValue() {
-        return fSv;
-    }
-
-    @Override
-    public boolean intersects(long timestamp) {
-        /*
-         * Need to explicitly re-define due to conflicting methods in
-         * super-interfaces.
-         */
-        return super.intersects(timestamp);
-    }
-
-    @Override
-    public int getSizeOnDisk() {
-        return fSizeOnDisk;
-    }
-
-    @Override
-    public int hashCode() {
-        return Objects.hash(super.hashCode(), fAttribute, fSv);
-    }
-
-    @Override
-    public boolean equals(Object obj) {
-        if (!super.equals(obj)) {
-            return false;
-        }
-
-        StateSystemInterval other = (StateSystemInterval) checkNotNull(obj);
-        return (fAttribute == other.fAttribute
-                && fSv.equals(other.fSv));
-    }
-
-    @Override
-    public String toString() {
-        /* Only for debug, should not be externalized */
-        StringBuilder sb = new StringBuilder();
-        sb.append('[');
-        sb.append(getStart());
-        sb.append(", "); //$NON-NLS-1$
-        sb.append(getEnd());
-        sb.append(']');
-
-        sb.append(", attribute = "); //$NON-NLS-1$
-        sb.append(fAttribute);
-
-        sb.append(", value = "); //$NON-NLS-1$
-        sb.append(fSv.toString());
-
-        return sb.toString();
-    }
-
-    /**
-     * Here we determine how state values "types" are written in the 8-bit field
-     * that indicates the value type in the file.
-     */
-    private static byte getByteFromType(ITmfStateValue.Type type) {
-        switch (type) {
-        case NULL:
-            return TYPE_NULL;
-        case INTEGER:
-            return TYPE_INTEGER;
-        case STRING:
-            return TYPE_STRING;
-        case LONG:
-            return TYPE_LONG;
-        case DOUBLE:
-            return TYPE_DOUBLE;
-        case CUSTOM:
-            return TYPE_CUSTOM;
-        default:
-            /* Should not happen if the switch is fully covered */
-            throw new IllegalStateException();
-        }
-    }
-}
index 9cbf916e8bf66a3b71fd8944dc029d3debaf5075..f55141f01cf2412db93487f5caa155750c825062 100644 (file)
@@ -37,7 +37,7 @@ public final class ThreadedHistoryTreeBackend extends HistoryTreeBackend
         implements Runnable {
 
     private static final int CHUNK_SIZE = 127;
-    private final @NonNull BufferedBlockingQueue<StateSystemInterval> intervalQueue;
+    private final @NonNull BufferedBlockingQueue<HTInterval> intervalQueue;
     private final @NonNull Thread shtThread;
     /**
      * The backend tracks its end time separately from the tree, to take into
@@ -73,7 +73,7 @@ public final class ThreadedHistoryTreeBackend extends HistoryTreeBackend
      *             If there was a problem opening the history file for writing
      */
     public ThreadedHistoryTreeBackend(@NonNull String ssid,
-            @NonNull File newStateFile,
+            File newStateFile,
             int providerVersion,
             long startTime,
             int queueSize,
@@ -110,7 +110,7 @@ public final class ThreadedHistoryTreeBackend extends HistoryTreeBackend
      *             If there was a problem opening the history file for writing
      */
     public ThreadedHistoryTreeBackend(@NonNull String ssid,
-            @NonNull File newStateFile,
+            File newStateFile,
             int providerVersion,
             long startTime,
             int queueSize)
@@ -139,7 +139,7 @@ public final class ThreadedHistoryTreeBackend extends HistoryTreeBackend
          * underneath, we'll put them in the Queue. They will then be taken and
          * processed by the other thread executing the run() method.
          */
-        StateSystemInterval interval = new StateSystemInterval(stateStartTime, stateEndTime,
+        HTInterval interval = new HTInterval(stateStartTime, stateEndTime,
                 quark, (TmfStateValue) value);
         intervalQueue.put(interval);
         fEndTime = Math.max(fEndTime, stateEndTime);
@@ -186,7 +186,7 @@ public final class ThreadedHistoryTreeBackend extends HistoryTreeBackend
          * closeTree()
          */
         try {
-            StateSystemInterval pill = new StateSystemInterval(-1, endTime, -1, TmfStateValue.nullValue());
+            HTInterval pill = new HTInterval(-1, endTime, -1, TmfStateValue.nullValue());
             intervalQueue.put(pill);
             intervalQueue.flushInputBuffer();
             shtThread.join();
@@ -200,10 +200,10 @@ public final class ThreadedHistoryTreeBackend extends HistoryTreeBackend
     @Override
     public void run() {
         try {
-            StateSystemInterval currentInterval = intervalQueue.blockingPeek();
+            HTInterval currentInterval = intervalQueue.blockingPeek();
             while (currentInterval.getStartTime() != -1) {
                 /* Send the interval to the History Tree */
-                getSHT().insert(currentInterval);
+                getSHT().insertInterval(currentInterval);
                 /* Actually remove the interval from the queue */
                 // FIXME Replace with remove() once it is implemented.
                 intervalQueue.take();
diff --git a/statesystem/org.eclipse.tracecompass.statesystem.core/src/org/eclipse/tracecompass/internal/statesystem/core/backend/historytree/classic/CoreNode.java b/statesystem/org.eclipse.tracecompass.statesystem.core/src/org/eclipse/tracecompass/internal/statesystem/core/backend/historytree/classic/CoreNode.java
new file mode 100644 (file)
index 0000000..946c8bf
--- /dev/null
@@ -0,0 +1,255 @@
+/*******************************************************************************
+ * Copyright (c) 2010, 2016 Ericsson, École Polytechnique de Montréal, and others
+ *
+ * All rights reserved. This program and the accompanying materials are
+ * made available under the terms of the Eclipse Public License v1.0 which
+ * accompanies this distribution, and is available at
+ * http://www.eclipse.org/legal/epl-v10.html
+ *
+ * Contributors:
+ *   Alexandre Montplaisir - Initial API and implementation
+ *   Florian Wininger - Add Extension and Leaf Node
+ *******************************************************************************/
+
+package org.eclipse.tracecompass.internal.statesystem.core.backend.historytree.classic;
+
+import java.nio.ByteBuffer;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.concurrent.locks.ReentrantReadWriteLock;
+
+import org.eclipse.tracecompass.internal.statesystem.core.backend.historytree.HTConfig;
+import org.eclipse.tracecompass.internal.statesystem.core.backend.historytree.HTNode;
+import org.eclipse.tracecompass.internal.statesystem.core.backend.historytree.ParentNode;
+import org.eclipse.tracecompass.statesystem.core.exceptions.TimeRangeException;
+
+/**
+ * A Core node is a first-level node of a History Tree which is not a leaf node.
+ *
+ * It extends HTNode by adding support for child nodes, and also extensions.
+ *
+ * @author Alexandre Montplaisir
+ */
+public final class CoreNode extends ParentNode {
+
+    /** Nb. of children this node has */
+    private int nbChildren;
+
+    /** Seq. numbers of the children nodes (size = MAX_NB_CHILDREN) */
+    private int[] children;
+
+    /** Start times of each of the children (size = MAX_NB_CHILDREN) */
+    private long[] childStart;
+
+    /** Seq number of this node's extension. -1 if none */
+    private volatile int extension = -1;
+
+    /**
+     * Lock used to gate the accesses to the children arrays. Meant to be a
+     * different lock from the one in {@link HTNode}.
+     */
+    private final ReentrantReadWriteLock rwl = new ReentrantReadWriteLock(false);
+
+    /**
+     * Initial constructor. Use this to initialize a new EMPTY node.
+     *
+     * @param config
+     *            Configuration of the History Tree
+     * @param seqNumber
+     *            The (unique) sequence number assigned to this particular node
+     * @param parentSeqNumber
+     *            The sequence number of this node's parent node
+     * @param start
+     *            The earliest timestamp stored in this node
+     */
+    public CoreNode(HTConfig config, int seqNumber, int parentSeqNumber,
+            long start) {
+        super(config, seqNumber, parentSeqNumber, start);
+        this.nbChildren = 0;
+        int size = config.getMaxChildren();
+
+        /*
+         * We instantiate the two following arrays at full size right away,
+         * since we want to reserve that space in the node's header.
+         * "this.nbChildren" will tell us how many relevant entries there are in
+         * those tables.
+         */
+        this.children = new int[size];
+        this.childStart = new long[size];
+    }
+
+    @Override
+    protected void readSpecificHeader(ByteBuffer buffer) {
+        int size = getConfig().getMaxChildren();
+
+        extension = buffer.getInt();
+        nbChildren = buffer.getInt();
+
+        children = new int[size];
+        for (int i = 0; i < nbChildren; i++) {
+            children[i] = buffer.getInt();
+        }
+        for (int i = nbChildren; i < size; i++) {
+            buffer.getInt();
+        }
+
+        this.childStart = new long[size];
+        for (int i = 0; i < nbChildren; i++) {
+            childStart[i] = buffer.getLong();
+        }
+        for (int i = nbChildren; i < size; i++) {
+            buffer.getLong();
+        }
+    }
+
+    @Override
+    protected void writeSpecificHeader(ByteBuffer buffer) {
+        int size = getConfig().getMaxChildren();
+
+        buffer.putInt(extension);
+        buffer.putInt(nbChildren);
+
+        /* Write the "children's seq number" array */
+        for (int i = 0; i < nbChildren; i++) {
+            buffer.putInt(children[i]);
+        }
+        for (int i = nbChildren; i < size; i++) {
+            buffer.putInt(0);
+        }
+
+        /* Write the "children's start times" array */
+        for (int i = 0; i < nbChildren; i++) {
+            buffer.putLong(childStart[i]);
+        }
+        for (int i = nbChildren; i < size; i++) {
+            buffer.putLong(0);
+        }
+    }
+
+    @Override
+    public int getNbChildren() {
+        rwl.readLock().lock();
+        try {
+            return nbChildren;
+        } finally {
+            rwl.readLock().unlock();
+        }
+    }
+
+    @Override
+    public int getChild(int index) {
+        rwl.readLock().lock();
+        try {
+            return children[index];
+        } finally {
+            rwl.readLock().unlock();
+        }
+    }
+
+    @Override
+    public int getLatestChild() {
+        rwl.readLock().lock();
+        try {
+            return children[nbChildren - 1];
+        } finally {
+            rwl.readLock().unlock();
+        }
+    }
+
+    @Override
+    public long getChildStart(int index) {
+        rwl.readLock().lock();
+        try {
+            return childStart[index];
+        } finally {
+            rwl.readLock().unlock();
+        }
+    }
+
+    /**
+     * Get the sequence number of the extension to this node (if there is one).
+     *
+     * @return The sequence number of the extended node. '-1' is returned if
+     *         there is no extension node.
+     */
+    public int getExtensionSequenceNumber() {
+        rwl.readLock().lock();
+        try {
+            return extension;
+        } finally {
+            rwl.readLock().unlock();
+        }
+    }
+
+    @Override
+    public void linkNewChild(HTNode childNode) {
+        rwl.writeLock().lock();
+        try {
+            if (nbChildren >= getConfig().getMaxChildren()) {
+                throw new IllegalStateException("Asked to link another child but parent already has maximum number of children"); //$NON-NLS-1$
+            }
+
+            children[nbChildren] = childNode.getSequenceNumber();
+            childStart[nbChildren] = childNode.getNodeStart();
+            nbChildren++;
+
+        } finally {
+            rwl.writeLock().unlock();
+        }
+    }
+
+    @Override
+    public Collection<Integer> selectNextChildren(long t) throws TimeRangeException {
+        if (t < getNodeStart() || (isOnDisk() && t > getNodeEnd())) {
+            throw new TimeRangeException("Requesting children outside the node's range: " + t); //$NON-NLS-1$
+        }
+        rwl.readLock().lock();
+        try {
+            int potentialNextSeqNb = -1;
+            for (int i = 0; i < nbChildren; i++) {
+                if (t >= childStart[i]) {
+                    potentialNextSeqNb = children[i];
+                } else {
+                    break;
+                }
+            }
+
+            if (potentialNextSeqNb == -1) {
+                throw new IllegalStateException("No next child node found"); //$NON-NLS-1$
+            }
+            return Collections.singleton(potentialNextSeqNb);
+        } finally {
+            rwl.readLock().unlock();
+        }
+    }
+
+    @Override
+    public NodeType getNodeType() {
+        return NodeType.CORE;
+    }
+
+    @Override
+    protected int getSpecificHeaderSize() {
+        int maxChildren = getConfig().getMaxChildren();
+        int specificSize =
+                  Integer.BYTES /* 1x int (extension node) */
+                + Integer.BYTES /* 1x int (nbChildren) */
+
+                /* MAX_NB * int ('children' table) */
+                + Integer.BYTES * maxChildren
+
+                /* MAX_NB * Timevalue ('childStart' table) */
+                + Long.BYTES * maxChildren;
+
+        return specificSize;
+    }
+
+    @Override
+    public String toStringSpecific() {
+        /* Only used for debugging, shouldn't be externalized */
+        return String.format("Core Node, %d children %s", //$NON-NLS-1$
+                nbChildren, Arrays.toString(Arrays.copyOf(children, nbChildren)));
+    }
+
+}
diff --git a/statesystem/org.eclipse.tracecompass.statesystem.core/src/org/eclipse/tracecompass/internal/statesystem/core/backend/historytree/classic/HistoryTreeClassic.java b/statesystem/org.eclipse.tracecompass.statesystem.core/src/org/eclipse/tracecompass/internal/statesystem/core/backend/historytree/classic/HistoryTreeClassic.java
new file mode 100644 (file)
index 0000000..a59bfe3
--- /dev/null
@@ -0,0 +1,639 @@
+/*******************************************************************************
+ * Copyright (c) 2010, 2016 Ericsson, École Polytechnique de Montréal, and others
+ *
+ * All rights reserved. This program and the accompanying materials are
+ * made available under the terms of the Eclipse Public License v1.0 which
+ * accompanies this distribution, and is available at
+ * http://www.eclipse.org/legal/epl-v10.html
+ *
+ * Contributors:
+ *   Alexandre Montplaisir - Initial API and implementation
+ *   Florian Wininger - Add Extension and Leaf Node
+ *   Patrick Tasse - Add message to exceptions
+ *******************************************************************************/
+
+package org.eclipse.tracecompass.internal.statesystem.core.backend.historytree.classic;
+
+import java.io.File;
+import java.io.FileInputStream;
+import java.io.IOException;
+import java.nio.ByteBuffer;
+import java.nio.ByteOrder;
+import java.nio.channels.ClosedChannelException;
+import java.nio.channels.FileChannel;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.List;
+
+import org.eclipse.jdt.annotation.NonNull;
+import org.eclipse.tracecompass.internal.statesystem.core.backend.historytree.HTConfig;
+import org.eclipse.tracecompass.internal.statesystem.core.backend.historytree.HTInterval;
+import org.eclipse.tracecompass.internal.statesystem.core.backend.historytree.HTNode;
+import org.eclipse.tracecompass.internal.statesystem.core.backend.historytree.HT_IO;
+import org.eclipse.tracecompass.internal.statesystem.core.backend.historytree.IHistoryTree;
+import org.eclipse.tracecompass.internal.statesystem.core.backend.historytree.LeafNode;
+import org.eclipse.tracecompass.internal.statesystem.core.backend.historytree.ParentNode;
+import org.eclipse.tracecompass.statesystem.core.ITmfStateSystemBuilder;
+import org.eclipse.tracecompass.statesystem.core.exceptions.TimeRangeException;
+
+import com.google.common.annotations.VisibleForTesting;
+import com.google.common.collect.ImmutableList;
+
+/**
+ * Meta-container for the History Tree. This structure contains all the
+ * high-level data relevant to the tree.
+ *
+ * @author Alexandre Montplaisir
+ */
+public class HistoryTreeClassic implements IHistoryTree {
+
+    /**
+     * The magic number for this file format.
+     */
+    public static final int HISTORY_FILE_MAGIC_NUMBER = 0x05FFA900;
+
+    /** File format version. Increment when breaking compatibility. */
+    private static final int FILE_VERSION = 7;
+
+    private static final IHTNodeFactory CLASSIC_NODE_FACTORY = new IHTNodeFactory() {
+
+        @Override
+        public HTNode createCoreNode(HTConfig config, int seqNumber, int parentSeqNumber, long start) {
+            return new CoreNode(config, seqNumber, parentSeqNumber, start);
+        }
+
+        @Override
+        public HTNode createLeafNode(HTConfig config, int seqNumber, int parentSeqNumber, long start) {
+            return new LeafNode(config, seqNumber, parentSeqNumber, start);
+        }
+
+    };
+
+    // ------------------------------------------------------------------------
+    // Tree-specific configuration
+    // ------------------------------------------------------------------------
+
+    /** Container for all the configuration constants */
+    private final HTConfig fConfig;
+
+    /** Reader/writer object */
+    private final @NonNull HT_IO fTreeIO;
+
+    // ------------------------------------------------------------------------
+    // Variable Fields (will change throughout the existence of the SHT)
+    // ------------------------------------------------------------------------
+
+    /** Latest timestamp found in the tree (at any given moment) */
+    private long fTreeEnd;
+
+    /** The total number of nodes that exists in this tree */
+    private int fNodeCount;
+
+    /** "Cache" to keep the active nodes in memory */
+    private final @NonNull List<@NonNull HTNode> fLatestBranch;
+
+    // ------------------------------------------------------------------------
+    // Constructors/"Destructors"
+    // ------------------------------------------------------------------------
+
+    /**
+     * Create a new State History from scratch, using a {@link HTConfig} object
+     * for configuration.
+     *
+     * @param conf
+     *            The config to use for this History Tree.
+     * @throws IOException
+     *             If an error happens trying to open/write to the file
+     *             specified in the config
+     */
+    public HistoryTreeClassic(HTConfig conf) throws IOException {
+        /*
+         * Simple check to make sure we have enough place in the 0th block for
+         * the tree configuration
+         */
+        if (conf.getBlockSize() < TREE_HEADER_SIZE) {
+            throw new IllegalArgumentException();
+        }
+
+        fConfig = conf;
+        fTreeEnd = conf.getTreeStart();
+        fNodeCount = 0;
+        fLatestBranch = Collections.synchronizedList(new ArrayList<>());
+
+        /* Prepare the IO object */
+        fTreeIO = new HT_IO(fConfig, true, CLASSIC_NODE_FACTORY);
+
+        /* Add the first node to the tree */
+        LeafNode firstNode = initNewLeafNode(-1, conf.getTreeStart());
+        fLatestBranch.add(firstNode);
+    }
+
+    /**
+     * "Reader" constructor : instantiate a SHTree from an existing tree file on
+     * disk
+     *
+     * @param existingStateFile
+     *            Path/filename of the history-file we are to open
+     * @param expProviderVersion
+     *            The expected version of the state provider
+     * @throws IOException
+     *             If an error happens reading the file
+     */
+    public HistoryTreeClassic(File existingStateFile, int expProviderVersion) throws IOException {
+        /*
+         * Open the file ourselves, get the tree header information we need,
+         * then pass on the descriptor to the TreeIO object.
+         */
+        int rootNodeSeqNb, res;
+        int bs, maxc;
+        long startTime;
+
+        /* Java I/O mumbo jumbo... */
+        if (!existingStateFile.exists()) {
+            throw new IOException("Selected state file does not exist"); //$NON-NLS-1$
+        }
+        if (existingStateFile.length() <= 0) {
+            throw new IOException("Empty target file"); //$NON-NLS-1$
+        }
+
+        try (FileInputStream fis = new FileInputStream(existingStateFile);
+                FileChannel fc = fis.getChannel();) {
+
+            ByteBuffer buffer = ByteBuffer.allocate(TREE_HEADER_SIZE);
+            buffer.order(ByteOrder.LITTLE_ENDIAN);
+            buffer.clear();
+
+            res = fc.read(buffer);
+            if (res != TREE_HEADER_SIZE) {
+                throw new IOException("Invalid header size"); //$NON-NLS-1$
+            }
+
+            buffer.flip();
+
+            /*
+             * Check the magic number to make sure we're opening the right type
+             * of file
+             */
+            res = buffer.getInt();
+            if (res != HISTORY_FILE_MAGIC_NUMBER) {
+                throw new IOException("Wrong magic number"); //$NON-NLS-1$
+            }
+
+            res = buffer.getInt(); /* File format version number */
+            if (res != FILE_VERSION) {
+                throw new IOException("Mismatching History Tree file format versions"); //$NON-NLS-1$
+            }
+
+            res = buffer.getInt(); /* Event handler's version number */
+            if (res != expProviderVersion &&
+                    expProviderVersion != ITmfStateSystemBuilder.IGNORE_PROVIDER_VERSION) {
+                /*
+                 * The existing history was built using an event handler that
+                 * doesn't match the current one in the framework.
+                 *
+                 * Information could be all wrong. Instead of keeping an
+                 * incorrect history file, a rebuild is done.
+                 */
+                throw new IOException("Mismatching event handler versions"); //$NON-NLS-1$
+            }
+
+            bs = buffer.getInt(); /* Block Size */
+            maxc = buffer.getInt(); /* Max nb of children per node */
+
+            fNodeCount = buffer.getInt();
+            rootNodeSeqNb = buffer.getInt();
+            startTime = buffer.getLong();
+
+            fConfig = new HTConfig(existingStateFile, bs, maxc, expProviderVersion, startTime);
+        }
+
+        /*
+         * FIXME We close fis here and the TreeIO will then reopen the same
+         * file, not extremely elegant. But how to pass the information here to
+         * the SHT otherwise?
+         */
+        fTreeIO = new HT_IO(fConfig, false, CLASSIC_NODE_FACTORY);
+
+        fLatestBranch = buildLatestBranch(rootNodeSeqNb);
+        fTreeEnd = getRootNode().getNodeEnd();
+
+        /*
+         * Make sure the history start time we read previously is consistent
+         * with was is actually in the root node.
+         */
+        if (startTime != getRootNode().getNodeStart()) {
+            throw new IOException("Inconsistent start times in the" + //$NON-NLS-1$
+                    "history file, it might be corrupted."); //$NON-NLS-1$
+        }
+    }
+
+    /**
+     * Rebuild the latestBranch "cache" object by reading the nodes from disk
+     * (When we are opening an existing file on disk and want to append to it,
+     * for example).
+     *
+     * @param rootNodeSeqNb
+     *            The sequence number of the root node, so we know where to
+     *            start
+     * @throws ClosedChannelException
+     */
+    private @NonNull List<@NonNull HTNode> buildLatestBranch(int rootNodeSeqNb) throws ClosedChannelException {
+        List<@NonNull HTNode> list = new ArrayList<>();
+
+        HTNode nextChildNode = fTreeIO.readNode(rootNodeSeqNb);
+        list.add(nextChildNode);
+
+        /* Follow the last branch up to the leaf */
+        while (nextChildNode.getNodeType() == HTNode.NodeType.CORE) {
+            nextChildNode = fTreeIO.readNode(((CoreNode) nextChildNode).getLatestChild());
+            list.add(nextChildNode);
+        }
+        return Collections.synchronizedList(list);
+    }
+
+    @Override
+    public void closeTree(long requestedEndTime) {
+        /* This is an important operation, queries can wait */
+        synchronized (fLatestBranch) {
+            /*
+             * Work-around the "empty branches" that get created when the root
+             * node becomes full. Overwrite the tree's end time with the
+             * original wanted end-time, to ensure no queries are sent into
+             * those empty nodes.
+             *
+             * This won't be needed once extended nodes are implemented.
+             */
+            fTreeEnd = requestedEndTime;
+
+            /* Close off the latest branch of the tree */
+            for (int i = 0; i < fLatestBranch.size(); i++) {
+                fLatestBranch.get(i).closeThisNode(fTreeEnd);
+                fTreeIO.writeNode(fLatestBranch.get(i));
+            }
+
+            try (FileChannel fc = fTreeIO.getFcOut();) {
+                ByteBuffer buffer = ByteBuffer.allocate(TREE_HEADER_SIZE);
+                buffer.order(ByteOrder.LITTLE_ENDIAN);
+                buffer.clear();
+
+                /* Save the config of the tree to the header of the file */
+                fc.position(0);
+
+                buffer.putInt(HISTORY_FILE_MAGIC_NUMBER);
+
+                buffer.putInt(FILE_VERSION);
+                buffer.putInt(fConfig.getProviderVersion());
+
+                buffer.putInt(fConfig.getBlockSize());
+                buffer.putInt(fConfig.getMaxChildren());
+
+                buffer.putInt(fNodeCount);
+
+                /* root node seq. nb */
+                buffer.putInt(fLatestBranch.get(0).getSequenceNumber());
+
+                /* start time of this history */
+                buffer.putLong(fLatestBranch.get(0).getNodeStart());
+
+                buffer.flip();
+                int res = fc.write(buffer);
+                assert (res <= TREE_HEADER_SIZE);
+                /* done writing the file header */
+
+            } catch (IOException e) {
+                /*
+                 * If we were able to write so far, there should not be any
+                 * problem at this point...
+                 */
+                throw new RuntimeException("State system write error"); //$NON-NLS-1$
+            }
+        }
+    }
+
+    // ------------------------------------------------------------------------
+    // Accessors
+    // ------------------------------------------------------------------------
+
+    @Override
+    public long getTreeStart() {
+        return fConfig.getTreeStart();
+    }
+
+    @Override
+    public long getTreeEnd() {
+        return fTreeEnd;
+    }
+
+    @Override
+    public int getNodeCount() {
+        return fNodeCount;
+    }
+
+    @Override
+    public HTNode getRootNode() {
+        return fLatestBranch.get(0);
+    }
+
+    /**
+     * Return the latest branch of the tree. That branch is immutable. Used for
+     * unit testing and debugging.
+     *
+     * @return The immutable latest branch
+     */
+    @VisibleForTesting
+    protected List<@NonNull HTNode> getLatestBranch() {
+        return ImmutableList.copyOf(fLatestBranch);
+    }
+
+    /**
+     * Read a node at sequence number
+     *
+     * @param seqNum
+     *            The sequence number of the node to read
+     * @return The HTNode object
+     * @throws ClosedChannelException
+     *             Exception thrown when reading the node
+     */
+    @VisibleForTesting
+    protected @NonNull HTNode getNode(int seqNum) throws ClosedChannelException {
+        // First, check in the latest branch if the node is there
+        for (HTNode node : fLatestBranch) {
+            if (node.getSequenceNumber() == seqNum) {
+                return node;
+            }
+        }
+        return fTreeIO.readNode(seqNum);
+    }
+
+    /**
+     * Retrieve the TreeIO object. Should only be used for testing.
+     *
+     * @return The TreeIO
+     */
+    @VisibleForTesting
+    protected @NonNull HT_IO getTreeIO() {
+        return fTreeIO;
+    }
+
+    // ------------------------------------------------------------------------
+    // HT_IO interface
+    // ------------------------------------------------------------------------
+
+    @Override
+    public FileInputStream supplyATReader() {
+        return fTreeIO.supplyATReader(getNodeCount());
+    }
+
+    @Override
+    public File supplyATWriterFile() {
+        return fConfig.getStateFile();
+    }
+
+    @Override
+    public long supplyATWriterFilePos() {
+        return IHistoryTree.TREE_HEADER_SIZE
+                + ((long) getNodeCount() * fConfig.getBlockSize());
+    }
+
+    @Override
+    public HTNode readNode(int seqNumber) throws ClosedChannelException {
+        /* Try to read the node from memory */
+        synchronized (fLatestBranch) {
+            for (HTNode node : fLatestBranch) {
+                if (node.getSequenceNumber() == seqNumber) {
+                    return node;
+                }
+            }
+        }
+
+        /* Read the node from disk */
+        return fTreeIO.readNode(seqNumber);
+    }
+
+    @Override
+    public void writeNode(HTNode node) {
+        fTreeIO.writeNode(node);
+    }
+
+    @Override
+    public void closeFile() {
+        fTreeIO.closeFile();
+    }
+
+    @Override
+    public void deleteFile() {
+        fTreeIO.deleteFile();
+    }
+
+    // ------------------------------------------------------------------------
+    // Operations
+    // ------------------------------------------------------------------------
+
+    @Override
+    public void insertInterval(HTInterval interval) throws TimeRangeException {
+        if (interval.getStartTime() < fConfig.getTreeStart()) {
+            throw new TimeRangeException("Interval Start:" + interval.getStartTime() + ", Config Start:" + fConfig.getTreeStart()); //$NON-NLS-1$ //$NON-NLS-2$
+        }
+        tryInsertAtNode(interval, fLatestBranch.size() - 1);
+    }
+
+    /**
+     * Inner method to find in which node we should add the interval.
+     *
+     * @param interval
+     *            The interval to add to the tree
+     * @param indexOfNode
+     *            The index *in the latestBranch* where we are trying the
+     *            insertion
+     */
+    private void tryInsertAtNode(HTInterval interval, int indexOfNode) {
+        HTNode targetNode = fLatestBranch.get(indexOfNode);
+
+        /* Verify if there is enough room in this node to store this interval */
+        if (interval.getSizeOnDisk() > targetNode.getNodeFreeSpace()) {
+            /* Nope, not enough room. Insert in a new sibling instead. */
+            addSiblingNode(indexOfNode);
+            tryInsertAtNode(interval, fLatestBranch.size() - 1);
+            return;
+        }
+
+        /* Make sure the interval time range fits this node */
+        if (interval.getStartTime() < targetNode.getNodeStart()) {
+            /*
+             * No, this interval starts before the startTime of this node. We
+             * need to check recursively in parents if it can fit.
+             */
+            tryInsertAtNode(interval, indexOfNode - 1);
+            return;
+        }
+
+        /*
+         * Ok, there is room, and the interval fits in this time slot. Let's add
+         * it.
+         */
+        targetNode.addInterval(interval);
+
+        /* Update treeEnd if needed */
+        if (interval.getEndTime() > fTreeEnd) {
+            fTreeEnd = interval.getEndTime();
+        }
+    }
+
+    /**
+     * Method to add a sibling to any node in the latest branch. This will add
+     * children back down to the leaf level, if needed.
+     *
+     * @param indexOfNode
+     *            The index in latestBranch where we start adding
+     */
+    private void addSiblingNode(int indexOfNode) {
+        synchronized (fLatestBranch) {
+            final long splitTime = fTreeEnd;
+
+            if (indexOfNode >= fLatestBranch.size()) {
+                /*
+                 * We need to make sure (indexOfNode - 1) doesn't get the last
+                 * node in the branch, because that one is a Leaf Node.
+                 */
+                throw new IllegalStateException();
+            }
+
+            /* Check if we need to add a new root node */
+            if (indexOfNode == 0) {
+                addNewRootNode();
+                return;
+            }
+
+            /* Check if we can indeed add a child to the target parent */
+            if (((ParentNode) fLatestBranch.get(indexOfNode - 1)).getNbChildren() == fConfig.getMaxChildren()) {
+                /* If not, add a branch starting one level higher instead */
+                addSiblingNode(indexOfNode - 1);
+                return;
+            }
+
+            /* Split off the new branch from the old one */
+            for (int i = indexOfNode; i < fLatestBranch.size(); i++) {
+                fLatestBranch.get(i).closeThisNode(splitTime);
+                fTreeIO.writeNode(fLatestBranch.get(i));
+
+                ParentNode prevNode = (ParentNode) fLatestBranch.get(i - 1);
+                HTNode newNode;
+
+                switch (fLatestBranch.get(i).getNodeType()) {
+                case CORE:
+                    newNode = initNewCoreNode(prevNode.getSequenceNumber(), splitTime + 1);
+                    break;
+                case LEAF:
+                    newNode = initNewLeafNode(prevNode.getSequenceNumber(), splitTime + 1);
+                    break;
+                default:
+                    throw new IllegalStateException();
+                }
+
+                prevNode.linkNewChild(newNode);
+                fLatestBranch.set(i, newNode);
+            }
+        }
+    }
+
+    /**
+     * Similar to the previous method, except here we rebuild a completely new
+     * latestBranch
+     */
+    private void addNewRootNode() {
+        final long splitTime = fTreeEnd;
+
+        HTNode oldRootNode = fLatestBranch.get(0);
+        ParentNode newRootNode = initNewCoreNode(-1, fConfig.getTreeStart());
+
+        /* Tell the old root node that it isn't root anymore */
+        oldRootNode.setParentSequenceNumber(newRootNode.getSequenceNumber());
+
+        /* Close off the whole current latestBranch */
+
+        for (int i = 0; i < fLatestBranch.size(); i++) {
+            fLatestBranch.get(i).closeThisNode(splitTime);
+            fTreeIO.writeNode(fLatestBranch.get(i));
+        }
+
+        /* Link the new root to its first child (the previous root node) */
+        newRootNode.linkNewChild(oldRootNode);
+
+        /* Rebuild a new latestBranch */
+        int depth = fLatestBranch.size();
+        fLatestBranch.clear();
+        fLatestBranch.add(newRootNode);
+
+        // Create new coreNode
+        for (int i = 1; i < depth; i++) {
+            ParentNode prevNode = (ParentNode) fLatestBranch.get(i - 1);
+            ParentNode newNode = initNewCoreNode(prevNode.getSequenceNumber(),
+                    splitTime + 1);
+            prevNode.linkNewChild(newNode);
+            fLatestBranch.add(newNode);
+        }
+
+        // Create the new leafNode
+        ParentNode prevNode = (ParentNode) fLatestBranch.get(depth - 1);
+        LeafNode newNode = initNewLeafNode(prevNode.getSequenceNumber(), splitTime + 1);
+        prevNode.linkNewChild(newNode);
+        fLatestBranch.add(newNode);
+    }
+
+    /**
+     * Add a new empty core node to the tree.
+     *
+     * @param parentSeqNumber
+     *            Sequence number of this node's parent
+     * @param startTime
+     *            Start time of the new node
+     * @return The newly created node
+     */
+    private @NonNull ParentNode initNewCoreNode(int parentSeqNumber, long startTime) {
+        ParentNode newNode = new CoreNode(fConfig, fNodeCount, parentSeqNumber,
+                startTime);
+        fNodeCount++;
+        return newNode;
+    }
+
+    /**
+     * Add a new empty leaf node to the tree.
+     *
+     * @param parentSeqNumber
+     *            Sequence number of this node's parent
+     * @param startTime
+     *            Start time of the new node
+     * @return The newly created node
+     */
+    private @NonNull LeafNode initNewLeafNode(int parentSeqNumber, long startTime) {
+        LeafNode newNode = new LeafNode(fConfig, fNodeCount, parentSeqNumber,
+                startTime);
+        fNodeCount++;
+        return newNode;
+    }
+
+    @Override
+    public long getFileSize() {
+        return fConfig.getStateFile().length();
+    }
+
+    // ------------------------------------------------------------------------
+    // Test/debugging methods
+    // ------------------------------------------------------------------------
+
+    /* Only used for debugging, shouldn't be externalized */
+    @SuppressWarnings("nls")
+    @Override
+    public String toString() {
+        return "Information on the current tree:\n\n" + "Blocksize: "
+                + fConfig.getBlockSize() + "\n" + "Max nb. of children per node: "
+                + fConfig.getMaxChildren() + "\n" + "Number of nodes: " + fNodeCount
+                + "\n" + "Depth of the tree: " + fLatestBranch.size() + "\n"
+                + "Size of the treefile: " + getFileSize() + "\n"
+                + "Root node has sequence number: "
+                + fLatestBranch.get(0).getSequenceNumber() + "\n"
+                + "'Latest leaf' has sequence number: "
+                + fLatestBranch.get(fLatestBranch.size() - 1).getSequenceNumber();
+    }
+
+}
This page took 0.080525 seconds and 5 git commands to generate.