From 5e7913a44935f27755a23171764dc83133eb78a0 Mon Sep 17 00:00:00 2001 From: =?utf8?q?Genevi=C3=A8ve=20Bastien?= Date: Fri, 25 Nov 2016 15:28:30 -0500 Subject: [PATCH] datastore: Add generic history tree MIME-Version: 1.0 Content-Type: text/plain; charset=utf8 Content-Transfer-Encoding: 8bit Adds an history interface and abstract implementation as well as some classes needed to serialize data to the tree and exceptions. Change-Id: I8bd100cc9c6a0f586b3ab6ae32999a4086ea3640 Signed-off-by: Geneviève Bastien Signed-off-by: Alexandre Montplaisir Reviewed-on: https://git.eclipse.org/r/84478 Reviewed-by: Hudson CI --- .../META-INF/MANIFEST.MF | 8 + .../internal/datastore/core/Activator.java | 4 +- .../datastore/core/historytree/HtIo.java | 378 +++++++ .../core/historytree/package-info.java | 11 + .../internal/datastore/core/package-info.java | 2 +- .../core/exceptions/RangeException.java | 53 + .../core/historytree/AbstractHistoryTree.java | 906 +++++++++++++++++ .../datastore/core/historytree/HTNode.java | 941 ++++++++++++++++++ .../datastore/core/historytree/IHTNode.java | 271 +++++ .../core/historytree/IHistoryTree.java | 184 ++++ .../core/historytree/package-info.java | 11 + .../datastore/core/interval/HTInterval.java | 100 ++ .../datastore/core/interval/IHTInterval.java | 49 + .../core/interval/IHTIntervalReader.java | 32 + .../core/interval/ISerializableObject.java | 36 + .../datastore/core/interval/package-info.java | 11 + 16 files changed, 2995 insertions(+), 2 deletions(-) create mode 100644 statesystem/org.eclipse.tracecompass.datastore.core/src/org/eclipse/tracecompass/internal/datastore/core/historytree/HtIo.java create mode 100644 statesystem/org.eclipse.tracecompass.datastore.core/src/org/eclipse/tracecompass/internal/datastore/core/historytree/package-info.java create mode 100644 statesystem/org.eclipse.tracecompass.datastore.core/src/org/eclipse/tracecompass/internal/provisional/datastore/core/exceptions/RangeException.java create mode 100644 statesystem/org.eclipse.tracecompass.datastore.core/src/org/eclipse/tracecompass/internal/provisional/datastore/core/historytree/AbstractHistoryTree.java create mode 100644 statesystem/org.eclipse.tracecompass.datastore.core/src/org/eclipse/tracecompass/internal/provisional/datastore/core/historytree/HTNode.java create mode 100644 statesystem/org.eclipse.tracecompass.datastore.core/src/org/eclipse/tracecompass/internal/provisional/datastore/core/historytree/IHTNode.java create mode 100644 statesystem/org.eclipse.tracecompass.datastore.core/src/org/eclipse/tracecompass/internal/provisional/datastore/core/historytree/IHistoryTree.java create mode 100644 statesystem/org.eclipse.tracecompass.datastore.core/src/org/eclipse/tracecompass/internal/provisional/datastore/core/historytree/package-info.java create mode 100644 statesystem/org.eclipse.tracecompass.datastore.core/src/org/eclipse/tracecompass/internal/provisional/datastore/core/interval/HTInterval.java create mode 100644 statesystem/org.eclipse.tracecompass.datastore.core/src/org/eclipse/tracecompass/internal/provisional/datastore/core/interval/IHTInterval.java create mode 100644 statesystem/org.eclipse.tracecompass.datastore.core/src/org/eclipse/tracecompass/internal/provisional/datastore/core/interval/IHTIntervalReader.java create mode 100644 statesystem/org.eclipse.tracecompass.datastore.core/src/org/eclipse/tracecompass/internal/provisional/datastore/core/interval/ISerializableObject.java create mode 100644 statesystem/org.eclipse.tracecompass.datastore.core/src/org/eclipse/tracecompass/internal/provisional/datastore/core/interval/package-info.java diff --git a/statesystem/org.eclipse.tracecompass.datastore.core/META-INF/MANIFEST.MF b/statesystem/org.eclipse.tracecompass.datastore.core/META-INF/MANIFEST.MF index 90e9a09424..76108753a2 100644 --- a/statesystem/org.eclipse.tracecompass.datastore.core/META-INF/MANIFEST.MF +++ b/statesystem/org.eclipse.tracecompass.datastore.core/META-INF/MANIFEST.MF @@ -12,6 +12,14 @@ Require-Bundle: org.eclipse.core.runtime, org.eclipse.tracecompass.common.core Export-Package: org.eclipse.tracecompass.internal.datastore.core;x-internal:=true, org.eclipse.tracecompass.internal.datastore.core.condition;x-internal:=true, + org.eclipse.tracecompass.internal.datastore.core.historytree;x-internal:=true, org.eclipse.tracecompass.internal.datastore.core.serialization;x-internal:=true, org.eclipse.tracecompass.internal.provisional.datastore.core.condition;x-internal:=true, + org.eclipse.tracecompass.internal.provisional.datastore.core.exceptions, + org.eclipse.tracecompass.internal.provisional.datastore.core.historytree;x-internal:=true, + org.eclipse.tracecompass.internal.provisional.datastore.core.interval;x-internal:=true, org.eclipse.tracecompass.internal.provisional.datastore.core.serialization;x-friends:="org.eclipse.tracecompass.statesystem.core,org.eclipse.tracecompass.statesystem.core.tests" +Import-Package: com.google.common.annotations, + com.google.common.base, + com.google.common.cache, + com.google.common.collect diff --git a/statesystem/org.eclipse.tracecompass.datastore.core/src/org/eclipse/tracecompass/internal/datastore/core/Activator.java b/statesystem/org.eclipse.tracecompass.datastore.core/src/org/eclipse/tracecompass/internal/datastore/core/Activator.java index 471603d96f..24cf602b3e 100644 --- a/statesystem/org.eclipse.tracecompass.datastore.core/src/org/eclipse/tracecompass/internal/datastore/core/Activator.java +++ b/statesystem/org.eclipse.tracecompass.datastore.core/src/org/eclipse/tracecompass/internal/datastore/core/Activator.java @@ -13,11 +13,13 @@ import org.eclipse.tracecompass.common.core.TraceCompassActivator; /** * The activator class controls the plug-in life cycle + * + * @author Geneviève Bastien */ public class Activator extends TraceCompassActivator { /** The plug-in ID */ - public static final String PLUGIN_ID = "org.eclipse.tracecompass.backends.core"; //$NON-NLS-1$ + public static final String PLUGIN_ID = "org.eclipse.tracecompass.datastore.core"; //$NON-NLS-1$ /** * The constructor diff --git a/statesystem/org.eclipse.tracecompass.datastore.core/src/org/eclipse/tracecompass/internal/datastore/core/historytree/HtIo.java b/statesystem/org.eclipse.tracecompass.datastore.core/src/org/eclipse/tracecompass/internal/datastore/core/historytree/HtIo.java new file mode 100644 index 0000000000..46294252f6 --- /dev/null +++ b/statesystem/org.eclipse.tracecompass.datastore.core/src/org/eclipse/tracecompass/internal/datastore/core/historytree/HtIo.java @@ -0,0 +1,378 @@ +/******************************************************************************* + * Copyright (c) 2010, 2017 École Polytechnique de Montréal and others + * + * All rights reserved. This program and the accompanying materials are + * made available under the terms of the Eclipse Public License v1.0 which + * accompanies this distribution, and is available at + * http://www.eclipse.org/legal/epl-v10.html + *******************************************************************************/ + +package org.eclipse.tracecompass.internal.datastore.core.historytree; + +import static org.eclipse.tracecompass.common.core.NonNullUtils.checkNotNull; + +import java.io.File; +import java.io.FileInputStream; +import java.io.FileOutputStream; +import java.io.IOException; +import java.nio.channels.ClosedChannelException; +import java.nio.channels.FileChannel; +import java.util.Objects; +import java.util.concurrent.ExecutionException; +import java.util.logging.Logger; + +import org.eclipse.jdt.annotation.Nullable; +import org.eclipse.tracecompass.common.core.log.TraceCompassLog; +import org.eclipse.tracecompass.internal.datastore.core.Activator; +import org.eclipse.tracecompass.internal.provisional.datastore.core.historytree.AbstractHistoryTree.IHTNodeFactory; +import org.eclipse.tracecompass.internal.provisional.datastore.core.interval.IHTInterval; +import org.eclipse.tracecompass.internal.provisional.datastore.core.interval.IHTIntervalReader; +import org.eclipse.tracecompass.internal.provisional.datastore.core.historytree.HTNode; +import org.eclipse.tracecompass.internal.provisional.datastore.core.historytree.IHistoryTree; + +import com.google.common.annotations.VisibleForTesting; +import com.google.common.cache.CacheBuilder; +import com.google.common.cache.CacheLoader; +import com.google.common.cache.LoadingCache; + +/** + * This class abstracts inputs/outputs of the HistoryTree nodes. + * + * It contains all the methods and descriptors to handle reading/writing nodes + * to the tree-file on disk and all the caching mechanisms. + * + * This abstraction is mainly for code isolation/clarification purposes. Every + * HistoryTree must contain 1 and only 1 HT_IO element. + * + * @author Alexandre Montplaisir + * @author Geneviève Bastien + * @param + * The type of objects that will be saved in the tree + * @param + * The base type of the nodes of this tree + */ +public class HtIo> { + + private static final Logger LOGGER = TraceCompassLog.getLogger(HtIo.class); + + // ------------------------------------------------------------------------ + // Global cache of nodes + // ------------------------------------------------------------------------ + + private static final class CacheKey { + + public final HtIo> fHistoryTreeIo; + public final int fSeqNumber; + + public CacheKey(HtIo> htio, int seqNumber) { + fHistoryTreeIo = htio; + fSeqNumber = seqNumber; + } + + @Override + public int hashCode() { + return Objects.hash(fHistoryTreeIo, fSeqNumber); + } + + @Override + public boolean equals(@Nullable Object obj) { + if (this == obj) { + return true; + } + if (obj == null) { + return false; + } + if (getClass() != obj.getClass()) { + return false; + } + CacheKey other = (CacheKey) obj; + return (fHistoryTreeIo.equals(other.fHistoryTreeIo) && + fSeqNumber == other.fSeqNumber); + } + } + + private static final int CACHE_SIZE = 256; + + private static final LoadingCache> NODE_CACHE = checkNotNull(CacheBuilder.newBuilder() + .maximumSize(CACHE_SIZE) + .build(new CacheLoader>() { + @Override + public HTNode load(CacheKey key) throws IOException { + HtIo> io = key.fHistoryTreeIo; + int seqNb = key.fSeqNumber; + + LOGGER.finest(() -> "[HtIo:CacheMiss] seqNum=" + seqNb); //$NON-NLS-1$ + + synchronized (io) { + io.seekFCToNodePos(io.fFileChannelIn, seqNb); + return HTNode.readNode(io.fBlockSize, + io.fNodeMaxChildren, + io.fFileChannelIn, + io.fObjectReader, + io.fNodeFactory); + } + } + })); + + /** + * This method invalidates all data in the cache so nodes will have to be + * read again + */ + @VisibleForTesting + static void clearCache() { + NODE_CACHE.invalidateAll(); + } + + /** + * Get whether a node is present in the cache + * + * @param htio + * The htio object that contains the node + * @param seqNum + * The sequence number of the node to check + * @return true if the node is present in the cache, + * false otherwise + */ + @VisibleForTesting + static > boolean isInCache(HtIo htio, int seqNum) { + @SuppressWarnings("unchecked") + @Nullable HTNode present = NODE_CACHE.getIfPresent(new CacheKey((HtIo>) htio, seqNum)); + return (present != null); + } + + // ------------------------------------------------------------------------ + // Instance fields + // ------------------------------------------------------------------------ + + /* Relevant configuration elements from the History Tree */ + private final File fStateHistoryFile; + private final int fBlockSize; + private final int fNodeMaxChildren; + private final IHTIntervalReader fObjectReader; + private final IHTNodeFactory fNodeFactory; + + /* Fields related to the file I/O */ + private final FileInputStream fFileInputStream; + private final FileOutputStream fFileOutputStream; + private final FileChannel fFileChannelIn; + private final FileChannel fFileChannelOut; + + // ------------------------------------------------------------------------ + // Methods + // ------------------------------------------------------------------------ + + /** + * Standard constructor + * + * @param stateHistoryFile + * The name of the history file + * @param blockSize + * The size of each "block" on disk in bytes. One node will + * always fit in one block. It should be at least 4096. + * @param nodeMaxChildren + * The maximum number of children allowed per core (non-leaf) + * node. + * @param newFile + * Flag indicating that the file must be created from scratch + * @param intervalReader + * The factory to create new tree data elements when reading from + * the disk + * @param nodeFactory + * The factory to create new nodes for this tree + * @throws IOException + * An exception can be thrown when file cannot be accessed + */ + public HtIo(File stateHistoryFile, + int blockSize, + int nodeMaxChildren, + boolean newFile, + IHTIntervalReader intervalReader, + IHTNodeFactory nodeFactory) throws IOException { + + fBlockSize = blockSize; + fNodeMaxChildren = nodeMaxChildren; + fObjectReader = intervalReader; + fNodeFactory = nodeFactory; + + fStateHistoryFile = stateHistoryFile; + if (newFile) { + boolean success1 = true; + /* Create a new empty History Tree file */ + if (fStateHistoryFile.exists()) { + success1 = fStateHistoryFile.delete(); + } + boolean success2 = fStateHistoryFile.createNewFile(); + if (!(success1 && success2)) { + /* It seems we do not have permission to create the new file */ + throw new IOException("Cannot create new file at " + //$NON-NLS-1$ + fStateHistoryFile.getName()); + } + fFileInputStream = new FileInputStream(fStateHistoryFile); + fFileOutputStream = new FileOutputStream(fStateHistoryFile, false); + } else { + /* + * We want to open an existing file, make sure we don't squash the + * existing content when opening the fos! + */ + fFileInputStream = new FileInputStream(fStateHistoryFile); + fFileOutputStream = new FileOutputStream(fStateHistoryFile, true); + } + fFileChannelIn = fFileInputStream.getChannel(); + fFileChannelOut = fFileOutputStream.getChannel(); + } + + /** + * Read a node from the file on disk. + * + * @param seqNumber + * The sequence number of the node to read. + * @return The object representing the node + * @throws ClosedChannelException + * Usually happens because the file was closed while we were + * reading. Instead of using a big reader-writer lock, we'll + * just catch this exception. + */ + @SuppressWarnings("unchecked") + public N readNode(int seqNumber) throws ClosedChannelException { + /* Do a cache lookup. If it's not present it will be loaded from disk */ + LOGGER.finest(() -> "[HtIo:CacheLookup] seqNum=" + seqNumber); //$NON-NLS-1$ + CacheKey key = new CacheKey((HtIo>) this, seqNumber); + try { + return (N) checkNotNull(NODE_CACHE.get(key)); + + } catch (ExecutionException e) { + /* Get the inner exception that was generated */ + Throwable cause = e.getCause(); + if (cause instanceof ClosedChannelException) { + throw (ClosedChannelException) cause; + } + /* + * Other types of IOExceptions shouldn't happen at this point + * though. + */ + Activator.getInstance().logError(e.getMessage(), e); + throw new IllegalStateException(); + } + } + + /** + * Write the given node to disk. + * + * @param node + * The node to write. + */ + @SuppressWarnings("unchecked") + public void writeNode(N node) { + try { + int seqNumber = node.getSequenceNumber(); + + /* "Write-back" the node into the cache */ + CacheKey key = new CacheKey((HtIo>) this, seqNumber); + NODE_CACHE.put(key, (HTNode) node); + + /* Position ourselves at the start of the node and write it */ + synchronized (this) { + seekFCToNodePos(fFileChannelOut, seqNumber); + node.writeSelf(fFileChannelOut); + } + } catch (IOException e) { + /* If we were able to open the file, we should be fine now... */ + Activator.getInstance().logError(e.getMessage(), e); + } + } + + /** + * Get the output file channel, used for writing, positioned after a certain + * number of nodes, or at the beginning. + * + * FIXME: Do not expose the file output. Use rather a method to + * writeAtEnd(int nodeOffset, ByteBuffer) + * + * @param nodeOffset + * The offset in the file, in number of nodes. If the value is + * lower than 0, the file will be positioned at the beginning. + * @return The correctly-seeked input stream + */ + public FileOutputStream getFileWriter(int nodeOffset) { + try { + if (nodeOffset < 0) { + fFileChannelOut.position(0); + } else { + seekFCToNodePos(fFileChannelOut, nodeOffset); + } + } catch (IOException e) { + Activator.getInstance().logError(e.getMessage(), e); + } + return fFileOutputStream; + } + + /** + * Retrieve the input stream with which to write the attribute tree. + * + * FIXME: Do not expose the stream, have a method to write at the end + * instead + * + * @param nodeOffset + * The offset in the file, in number of nodes. This should be + * after all the nodes. + * @return The correctly-seeked input stream + */ + public FileInputStream supplyATReader(int nodeOffset) { + try { + /* + * Position ourselves at the start of the Mapping section in the + * file (which is right after the Blocks) + */ + seekFCToNodePos(fFileChannelIn, nodeOffset); + } catch (IOException e) { + Activator.getInstance().logError(e.getMessage(), e); + } + return fFileInputStream; + } + + /** + * Close all file channels and streams. + */ + public synchronized void closeFile() { + try { + fFileInputStream.close(); + fFileOutputStream.close(); + } catch (IOException e) { + Activator.getInstance().logError(e.getMessage(), e); + } + } + + /** + * Delete the history tree file + */ + public synchronized void deleteFile() { + closeFile(); + + if (!fStateHistoryFile.delete()) { + /* We didn't succeed in deleting the file */ + Activator.getInstance().logError("Failed to delete" + fStateHistoryFile.getName()); //$NON-NLS-1$ + } + } + + /** + * Seek the given FileChannel to the position corresponding to the node that + * has seqNumber + * + * @param fc + * the channel to seek + * @param seqNumber + * the node sequence number to seek the channel to + * @throws IOException + * If some other I/O error occurs + */ + private void seekFCToNodePos(FileChannel fc, int seqNumber) + throws IOException { + /* + * Cast to (long) is needed to make sure the result is a long too and + * doesn't get truncated + */ + fc.position(IHistoryTree.TREE_HEADER_SIZE + + ((long) seqNumber) * fBlockSize); + } + +} diff --git a/statesystem/org.eclipse.tracecompass.datastore.core/src/org/eclipse/tracecompass/internal/datastore/core/historytree/package-info.java b/statesystem/org.eclipse.tracecompass.datastore.core/src/org/eclipse/tracecompass/internal/datastore/core/historytree/package-info.java new file mode 100644 index 0000000000..e8a7ab70cb --- /dev/null +++ b/statesystem/org.eclipse.tracecompass.datastore.core/src/org/eclipse/tracecompass/internal/datastore/core/historytree/package-info.java @@ -0,0 +1,11 @@ +/******************************************************************************* + * Copyright (c) 2017 École Polytechnique de Montréal + * + * All rights reserved. This program and the accompanying materials are made + * available under the terms of the Eclipse Public License v1.0 which + * accompanies this distribution, and is available at + * http://www.eclipse.org/legal/epl-v10.html + *******************************************************************************/ + +@org.eclipse.jdt.annotation.NonNullByDefault +package org.eclipse.tracecompass.internal.datastore.core.historytree; diff --git a/statesystem/org.eclipse.tracecompass.datastore.core/src/org/eclipse/tracecompass/internal/datastore/core/package-info.java b/statesystem/org.eclipse.tracecompass.datastore.core/src/org/eclipse/tracecompass/internal/datastore/core/package-info.java index 0fbc2fb91f..8159803b32 100644 --- a/statesystem/org.eclipse.tracecompass.datastore.core/src/org/eclipse/tracecompass/internal/datastore/core/package-info.java +++ b/statesystem/org.eclipse.tracecompass.datastore.core/src/org/eclipse/tracecompass/internal/datastore/core/package-info.java @@ -1,5 +1,5 @@ /******************************************************************************* - * Copyright (c) 2015 Ericsson + * Copyright (c) 2016 École Polytechnique de Montréal * * All rights reserved. This program and the accompanying materials * are made available under the terms of the Eclipse Public License v1.0 diff --git a/statesystem/org.eclipse.tracecompass.datastore.core/src/org/eclipse/tracecompass/internal/provisional/datastore/core/exceptions/RangeException.java b/statesystem/org.eclipse.tracecompass.datastore.core/src/org/eclipse/tracecompass/internal/provisional/datastore/core/exceptions/RangeException.java new file mode 100644 index 0000000000..caa3f9e994 --- /dev/null +++ b/statesystem/org.eclipse.tracecompass.datastore.core/src/org/eclipse/tracecompass/internal/provisional/datastore/core/exceptions/RangeException.java @@ -0,0 +1,53 @@ +/******************************************************************************* + * Copyright (c) 2012, 2017 Ericsson and others + * + * All rights reserved. This program and the accompanying materials are + * made available under the terms of the Eclipse Public License v1.0 which + * accompanies this distribution, and is available at + * http://www.eclipse.org/legal/epl-v10.html + *******************************************************************************/ + +package org.eclipse.tracecompass.internal.provisional.datastore.core.exceptions; + +/** + * Generic exception for when the user specifies an history range. Usually + * history range must be within the range of the trace or state history being + * queried. + * + * For insertions, it's forbidden to insert new states "in the past" (before + * where the cursor is), so this exception is also thrown in that case. + * + * @author Alexandre Montplaisir + */ +public class RangeException extends RuntimeException { + + private static final long serialVersionUID = -4067685227260254532L; + + /** + * Default constructor + */ + public RangeException() { + } + + /** + * Constructor with a message + * + * @param message + * Message to attach to this exception + */ + public RangeException(String message) { + super(message); + } + + /** + * Constructor with both a message and a cause. + * + * @param message + * Message to attach to this exception + * @param e + * Cause of this exception + */ + public RangeException(String message, Throwable e) { + super(message, e); + } +} diff --git a/statesystem/org.eclipse.tracecompass.datastore.core/src/org/eclipse/tracecompass/internal/provisional/datastore/core/historytree/AbstractHistoryTree.java b/statesystem/org.eclipse.tracecompass.datastore.core/src/org/eclipse/tracecompass/internal/provisional/datastore/core/historytree/AbstractHistoryTree.java new file mode 100644 index 0000000000..750fdd18c6 --- /dev/null +++ b/statesystem/org.eclipse.tracecompass.datastore.core/src/org/eclipse/tracecompass/internal/provisional/datastore/core/historytree/AbstractHistoryTree.java @@ -0,0 +1,906 @@ +/******************************************************************************* + * Copyright (c) 2017 École Polytechnique de Montréal + * + * All rights reserved. This program and the accompanying materials are + * made available under the terms of the Eclipse Public License v1.0 which + * accompanies this distribution, and is available at + * http://www.eclipse.org/legal/epl-v10.html + *******************************************************************************/ + +package org.eclipse.tracecompass.internal.provisional.datastore.core.historytree; + +import java.io.File; +import java.io.FileInputStream; +import java.io.FileOutputStream; +import java.io.IOException; +import java.nio.ByteBuffer; +import java.nio.ByteOrder; +import java.nio.channels.ClosedChannelException; +import java.nio.channels.FileChannel; +import java.util.ArrayList; +import java.util.Collections; +import java.util.Deque; +import java.util.LinkedList; +import java.util.List; +import java.util.concurrent.locks.ReentrantReadWriteLock; +import java.util.function.Predicate; + +import org.eclipse.jdt.annotation.NonNull; +import org.eclipse.tracecompass.common.core.NonNullUtils; +import org.eclipse.tracecompass.internal.datastore.core.historytree.HtIo; +import org.eclipse.tracecompass.internal.provisional.datastore.core.condition.RangeCondition; +import org.eclipse.tracecompass.internal.provisional.datastore.core.exceptions.RangeException; +import org.eclipse.tracecompass.internal.provisional.datastore.core.historytree.IHTNode.NodeType; +import org.eclipse.tracecompass.internal.provisional.datastore.core.interval.IHTInterval; +import org.eclipse.tracecompass.internal.provisional.datastore.core.interval.IHTIntervalReader; + +import com.google.common.annotations.VisibleForTesting; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.Iterables; + +/** + * Base class for history trees that encapsulates the logic to read from/write + * to a file. + * + * @author Alexandre Montplaisir + * @author Geneviève Bastien + * @param + * The type of intervals that will be saved in the tree + * @param + * The base type of the nodes of this tree + */ +public abstract class AbstractHistoryTree> + implements IHistoryTree { + + /** + * Interface for history to create the various HTNodes + * + * @param + * The type of intervals that will be saved in the node + * @param + * The base type of the nodes of this tree + */ + @FunctionalInterface + public interface IHTNodeFactory> { + + /** + * Creates a new node for the specific history tree + * + * @param type + * The type of node to create. See {@link IHTNode.NodeType}. + * @param blockSize + * The size (in bytes) of each node once serialized to disk + * @param maxChildren + * The maximum number of amount a single core node can have + * @param seqNumber + * The (unique) sequence number assigned to this particular + * node + * @param parentSeqNumber + * The sequence number of this node's parent node + * @param start + * The earliest timestamp stored in this node + * @return The new core node + */ + N createNode(NodeType type, int blockSize, int maxChildren, + int seqNumber, int parentSeqNumber, long start); + } + + // ------------------------------------------------------------------------ + // Tree-specific configuration + // ------------------------------------------------------------------------ + + /* Tree configuration constants */ + private final File fHistoryFile; + private final int fBlockSize; + private final int fMaxChildren; + private final int fProviderVersion; + private final long fTreeStart; + private final IHTIntervalReader fIntervalReader; + + /** Reader/writer object */ + private HtIo fTreeIO; + + // ------------------------------------------------------------------------ + // Variable Fields (will change throughout the existence of the SHT) + // ------------------------------------------------------------------------ + + /** Latest timestamp found in the tree (at any given moment) */ + private long fTreeEnd; + + /** The total number of nodes that exists in this tree */ + private int fNodeCount; + + /** "Cache" to keep the active nodes in memory */ + private final List fLatestBranch; + + /* Lock used to protect the accesses to the HT_IO object */ + private final ReentrantReadWriteLock fRwl = new ReentrantReadWriteLock(false); + + /** + * Create a new State History from scratch, specifying all configuration + * parameters. + * + * @param stateHistoryFile + * The name of the history file + * @param blockSize + * The size of each "block" on disk in bytes. One node will + * always fit in one block. It should be at least 4096. + * @param maxChildren + * The maximum number of children allowed per core (non-leaf) + * node. + * @param providerVersion + * The version of the state provider. If a file already exists, + * and their versions match, the history file will not be rebuilt + * uselessly. + * @param treeStart + * The start time of the history + * @param intervalReader + * The factory to create new tree intervals when reading from + * the disk + * @throws IOException + * If an error happens trying to open/write to the file + * specified in the config + */ + public AbstractHistoryTree(File stateHistoryFile, + int blockSize, + int maxChildren, + int providerVersion, + long treeStart, + IHTIntervalReader intervalReader) throws IOException { + /* + * Simple check to make sure we have enough place in the 0th block for + * the tree configuration + */ + if (blockSize < TREE_HEADER_SIZE) { + throw new IllegalArgumentException(); + } + + fHistoryFile = stateHistoryFile; + fBlockSize = blockSize; + fMaxChildren = maxChildren; + fProviderVersion = providerVersion; + fTreeStart = treeStart; + fIntervalReader = intervalReader; + + fTreeEnd = treeStart; + fNodeCount = 0; + fLatestBranch = NonNullUtils.checkNotNull(Collections.synchronizedList(new ArrayList<>())); + + /* Prepare the IO object */ + fTreeIO = new HtIo<>(stateHistoryFile, + blockSize, + maxChildren, + true, + intervalReader, + getNodeFactory()); + + /* Add the first node to the tree */ + N firstNode = initNewLeafNode(-1, treeStart); + fLatestBranch.add(firstNode); + } + + /** + * "Reader" constructor : instantiate a SHTree from an existing tree file on + * disk + * + * @param existingStateFile + * Path/filename of the history-file we are to open + * @param expectedProviderVersion + * The expected version of the state provider + * @param intervalReader + * The factory used to read segments from the history tree + * @throws IOException + * If an error happens reading the file + */ + public AbstractHistoryTree(File existingStateFile, + int expectedProviderVersion, + IHTIntervalReader intervalReader) throws IOException { + /* + * Open the file ourselves, get the tree header information we need, + * then pass on the descriptor to the TreeIO object. + */ + int rootNodeSeqNb, res; + int bs, maxc; + long startTime; + + /* Java I/O mumbo jumbo... */ + if (!existingStateFile.exists()) { + throw new IOException("Selected state file does not exist"); //$NON-NLS-1$ + } + if (existingStateFile.length() <= 0) { + throw new IOException("Empty target file"); //$NON-NLS-1$ + } + + try (FileInputStream fis = new FileInputStream(existingStateFile); + FileChannel fc = fis.getChannel();) { + + ByteBuffer buffer = ByteBuffer.allocate(TREE_HEADER_SIZE); + buffer.order(ByteOrder.LITTLE_ENDIAN); + buffer.clear(); + + res = fc.read(buffer); + if (res != TREE_HEADER_SIZE) { + throw new IOException("Invalid header size"); //$NON-NLS-1$ + } + + buffer.flip(); + + /* + * Check the magic number to make sure we're opening the right type + * of file + */ + res = buffer.getInt(); + if (res != getMagicNumber()) { + throw new IOException("Wrong magic number"); //$NON-NLS-1$ + } + + res = buffer.getInt(); /* File format version number */ + if (res != getFileVersion()) { + throw new IOException("Mismatching History Tree file format versions"); //$NON-NLS-1$ + } + + res = buffer.getInt(); /* Event handler's version number */ + if (res != expectedProviderVersion) { + /* + * The existing history was built using an event handler that + * doesn't match the current one in the framework. + * + * Information could be all wrong. Instead of keeping an + * incorrect history file, a rebuild is done. + */ + throw new IOException("Mismatching event handler versions"); //$NON-NLS-1$ + } + + bs = buffer.getInt(); /* Block Size */ + maxc = buffer.getInt(); /* Max nb of children per node */ + + fNodeCount = buffer.getInt(); + rootNodeSeqNb = buffer.getInt(); + startTime = buffer.getLong(); + + /* Set all other permanent configuration options */ + fHistoryFile = existingStateFile; + fBlockSize = bs; + fMaxChildren = maxc; + fProviderVersion = expectedProviderVersion; + fIntervalReader = intervalReader; + fTreeStart = startTime; + } + + /* + * FIXME We close fis here and the TreeIO will then reopen the same + * file, not extremely elegant. But how to pass the information here to + * the SHT otherwise? + */ + fTreeIO = new HtIo<>(fHistoryFile, + fBlockSize, + fMaxChildren, + false, + fIntervalReader, + getNodeFactory()); + + fLatestBranch = buildLatestBranch(rootNodeSeqNb); + fTreeEnd = getRootNode().getNodeEnd(); + + /* + * Make sure the history start time we read previously is consistent + * with was is actually in the root node. + */ + if (startTime != getRootNode().getNodeStart()) { + throw new IOException("Inconsistent start times in the " + //$NON-NLS-1$ + "history file, it might be corrupted."); //$NON-NLS-1$ + } + } + + /** + * Rebuild the latestBranch "cache" object by reading the nodes from disk + * (When we are opening an existing file on disk and want to append to it, + * for example). + * + * @param rootNodeSeqNb + * The sequence number of the root node, so we know where to + * start + * @throws ClosedChannelException + */ + private List buildLatestBranch(int rootNodeSeqNb) throws ClosedChannelException { + List list = new ArrayList<>(); + + N nextChildNode = fTreeIO.readNode(rootNodeSeqNb); + list.add(nextChildNode); + + // TODO: Do we need the full latest branch? The latest leaf may not be + // the one we'll query first... Won't it build itself later? + + /* Follow the last branch up to the leaf */ + while (nextChildNode.getNodeType() == HTNode.NodeType.CORE) { + nextChildNode = fTreeIO.readNode(nextChildNode.getLatestChild()); + list.add(nextChildNode); + } + return Collections.synchronizedList(list); + } + + // ------------------------------------------------------------------------ + // Accessors + // ------------------------------------------------------------------------ + + @Override + public long getTreeStart() { + return fTreeStart; + } + + @Override + public long getTreeEnd() { + return fTreeEnd; + } + + /** + * Get the number of nodes in this tree. + * + * @return The number of nodes + */ + public int getNodeCount() { + return fNodeCount; + } + + /** + * Get the current root node of this tree + * + * @return The root node + */ + public N getRootNode() { + return fLatestBranch.get(0); + } + + @Override + public long getFileSize() { + return fHistoryFile.length(); + } + + /** + * Return the latest branch of the tree. That branch is immutable. + * + * @return The immutable latest branch + */ + @VisibleForTesting + protected List getLatestBranch() { + return ImmutableList.copyOf(fLatestBranch); + } + + /** + * Get the node in the latest branch at a depth. If the depth is too large, + * it will throw an IndexOutOfBoundsException + * + * @param depth + * The depth at which to get the node + * @return The node at depth + */ + protected final N getLatestNode(int depth) { + if (depth > fLatestBranch.size()) { + throw new IndexOutOfBoundsException("Trying to get latest node too deep"); //$NON-NLS-1$ + } + return fLatestBranch.get(depth); + } + + /** + * Get the magic number for the history file. This number should be specific + * for each implementation of the history tree. + * + * @return The magic number for the history file + */ + protected abstract int getMagicNumber(); + + /** + * Get the file version for the history file. This file version should be + * modified for a history tree class whenever changing the format of the + * file. Different versions of the file may not be compatible. + * + * @return The file version for the history file. + */ + protected abstract int getFileVersion(); + + /** + * Get the factory to use to create new nodes for this history tree. + * + * This method is called in the constructor of the abstract class, so + * assigning the factory to a final field may cause NullPointerException + * since that final field may not be initialized the first time this is + * called. + * + * @return The NodeFactory for the History Tree + */ + protected abstract IHTNodeFactory getNodeFactory(); + + /** + * Read a node with a given sequence number + * + * @param seqNum + * The sequence number of the node to read + * @return The HTNode object + * @throws ClosedChannelException + * Exception thrown when reading the node, if the file was + * closed + */ + @VisibleForTesting + protected @NonNull N getNode(int seqNum) throws ClosedChannelException { + // First, check in the latest branch if the node is there + for (N node : fLatestBranch) { + if (node.getSequenceNumber() == seqNum) { + return node; + } + } + return fTreeIO.readNode(seqNum); + } + + /** + * Retrieve the TreeIO object. Should only be used for testing. + * + * @return The TreeIO + */ + @VisibleForTesting + protected HtIo getTreeIO() { + return fTreeIO; + } + + // ------------------------------------------------------------------------ + // HT_IO interface + // ------------------------------------------------------------------------ + + // TODO Remove from here + @Override + public FileInputStream supplyATReader() { + fRwl.readLock().lock(); + try { + return fTreeIO.supplyATReader(getNodeCount()); + } finally { + fRwl.readLock().unlock(); + } + } + + // TODO Remove from here + @Override + public File supplyATWriterFile() { + return fHistoryFile; + } + + // TODO Remove from here + @Override + public long supplyATWriterFilePos() { + return IHistoryTree.TREE_HEADER_SIZE + + ((long) getNodeCount() * fBlockSize); + } + + /** + * Read a node from the tree. + * + * @param seqNumber + * The sequence number of the node to read + * @return The node + * @throws ClosedChannelException + * If the tree IO is unavailable + */ + public N readNode(int seqNumber) throws ClosedChannelException { + /* Try to read the node from memory */ + synchronized (fLatestBranch) { + for (N node : fLatestBranch) { + if (node.getSequenceNumber() == seqNumber) { + return node; + } + } + } + + fRwl.readLock().lock(); + try { + /* Read the node from disk */ + return fTreeIO.readNode(seqNumber); + } finally { + fRwl.readLock().unlock(); + } + } + + /** + * Write a node object to the history file. + * + * @param node + * The node to write to disk + */ + public void writeNode(N node) { + fRwl.readLock().lock(); + try { + fTreeIO.writeNode(node); + } finally { + fRwl.readLock().unlock(); + } + } + + /** + * Close the history file. + */ + @Override + public void closeFile() { + fRwl.writeLock().lock(); + try { + fTreeIO.closeFile(); + clearContent(); + } finally { + fRwl.writeLock().unlock(); + } + } + + /** + * Delete the history file. + */ + @Override + public void deleteFile() { + fRwl.writeLock().lock(); + try { + fTreeIO.deleteFile(); + clearContent(); + } finally { + fRwl.writeLock().unlock(); + } + } + + @Override + public void cleanFile() throws IOException { + fRwl.writeLock().lock(); + try { + closeTree(fTreeEnd); + fTreeIO.deleteFile(); + + fTreeIO = new HtIo<>(fHistoryFile, + fBlockSize, + fMaxChildren, + true, + fIntervalReader, + getNodeFactory()); + + clearContent(); + /* Add the first node to the tree */ + N firstNode = initNewLeafNode(-1, fTreeStart); + fLatestBranch.add(firstNode); + } finally { + fRwl.writeLock().unlock(); + } + } + + private void clearContent() { + // Re-initialize the content of the tree after the file is deleted or + // closed + fNodeCount = 0; + fLatestBranch.clear(); + } + + // ------------------------------------------------------------------------ + // Operations + // ------------------------------------------------------------------------ + + /** + * Insert an interval in the tree. + * + * @param interval + * The interval to be inserted + * @throws RangeException + * If the start of end time of the interval are invalid + */ + @Override + public synchronized void insert(E interval) throws RangeException { + if (interval.getStart() < fTreeStart) { + throw new RangeException("Interval Start:" + interval.getStart() + ", Config Start:" + fTreeStart); //$NON-NLS-1$ //$NON-NLS-2$ + } + tryInsertAtNode(interval, fLatestBranch.size() - 1); + } + + /** + * Add a new empty core node to the tree. + * + * @param parentSeqNumber + * Sequence number of this node's parent + * @param startTime + * Start time of the new node + * @return The newly created node + */ + protected final N initNewCoreNode(int parentSeqNumber, long startTime) { + N newNode = getNodeFactory().createNode(NodeType.CORE, fBlockSize, fMaxChildren, + fNodeCount, parentSeqNumber, startTime); + fNodeCount++; + return newNode; + } + + /** + * Add a new empty leaf node to the tree. + * + * @param parentSeqNumber + * Sequence number of this node's parent + * @param startTime + * Start time of the new node + * @return The newly created node + */ + protected final N initNewLeafNode(int parentSeqNumber, long startTime) { + N newNode = getNodeFactory().createNode(NodeType.LEAF, fBlockSize, fMaxChildren, + fNodeCount, parentSeqNumber, startTime); + fNodeCount++; + return newNode; + } + + /** + * Inner method to find in which node we should add the interval. + * + * @param interval + * The interval to add to the tree + * @param depth + * The index *in the latestBranch* where we are trying the + * insertion + */ + protected final void tryInsertAtNode(E interval, int depth) { + N targetNode = getLatestBranch().get(depth); + + /* Verify if there is enough room in this node to store this interval */ + if (interval.getSizeOnDisk() > targetNode.getNodeFreeSpace()) { + /* Nope, not enough room. Insert in a new sibling instead. */ + addSiblingNode(depth, getNewBranchStart(depth, interval)); + tryInsertAtNode(interval, getLatestBranch().size() - 1); + return; + } + + /* Make sure the interval time range fits this node */ + if (interval.getStart() < targetNode.getNodeStart()) { + /* + * No, this interval starts before the startTime of this node. We + * need to check recursively in parents if it can fit. + */ + tryInsertAtNode(interval, depth - 1); + return; + } + + /* + * Ok, there is room, and the interval fits in this time slot. Let's add + * it. + */ + targetNode.add(interval); + + updateEndTime(interval); + } + + /** + * Get the start time of the new node of the branch that will be added + * starting at depth. + * + * Note that the depth is the depth of the last node that was filled and to + * which a sibling should be added. But depending on the returned start + * time, the actual new branch may start at a lower depth if the start time + * happens to be lesser than the parent's start time. + * + * @param depth + * The depth of the last node that was filled and at which the + * new branch should start. + * @param interval + * The interval that is about to be inserted + * @return The value that should be the start time of the sibling node + */ + protected abstract long getNewBranchStart(int depth, E interval); + + /** + * Method to add a sibling to any node in the latest branch. This will add + * children back down to the leaf level, if needed. + * + * @param depth + * The depth in latestBranch where we start adding + * @param newNodeStartTime + * The start time of the new node + */ + private final void addSiblingNode(int depth, long newNodeStartTime) { + synchronized (fLatestBranch) { + final long splitTime = fTreeEnd; + + if (depth >= fLatestBranch.size()) { + /* + * We need to make sure (indexOfNode - 1) doesn't get the last + * node in the branch, because that one is a Leaf Node. + */ + throw new IllegalStateException(); + } + + /* Check if we need to add a new root node */ + if (depth == 0) { + addNewRootNode(newNodeStartTime); + return; + } + + /* + * Check if we can indeed add a child to the target parent and if + * the new start time is not before the target parent. + */ + if (fLatestBranch.get(depth - 1).getNbChildren() == fMaxChildren || + newNodeStartTime < fLatestBranch.get(depth - 1).getNodeStart()) { + /* If not, add a branch starting one level higher instead */ + addSiblingNode(depth - 1, newNodeStartTime); + return; + } + + /* + * Close nodes from the leaf up because some parent nodes may need + * to get updated when their children are closed + */ + for (int i = fLatestBranch.size() - 1; i >= depth; i--) { + fLatestBranch.get(i).closeThisNode(splitTime); + fTreeIO.writeNode(fLatestBranch.get(i)); + } + + /* Split off the new branch from the old one */ + for (int i = depth; i < fLatestBranch.size(); i++) { + N prevNode = fLatestBranch.get(i - 1); + N newNode; + + switch (fLatestBranch.get(i).getNodeType()) { + case CORE: + newNode = initNewCoreNode(prevNode.getSequenceNumber(), newNodeStartTime); + break; + case LEAF: + newNode = initNewLeafNode(prevNode.getSequenceNumber(), newNodeStartTime); + break; + default: + throw new IllegalStateException(); + } + + prevNode.linkNewChild(newNode); + fLatestBranch.set(i, newNode); + } + } + } + + /** + * Similar to the previous method, except here we rebuild a completely new + * latestBranch + */ + private void addNewRootNode(long newNodeStartTime) { + final long nodeEnd = fTreeEnd; + + N oldRootNode = fLatestBranch.get(0); + N newRootNode = initNewCoreNode(-1, fTreeStart); + + /* Tell the old root node that it isn't root anymore */ + oldRootNode.setParentSequenceNumber(newRootNode.getSequenceNumber()); + + /* Close off the whole current latestBranch */ + for (int i = fLatestBranch.size() - 1; i >= 0; i--) { + fLatestBranch.get(i).closeThisNode(nodeEnd); + fTreeIO.writeNode(fLatestBranch.get(i)); + } + + /* Link the new root to its first child (the previous root node) */ + newRootNode.linkNewChild(oldRootNode); + + /* Rebuild a new latestBranch */ + int depth = fLatestBranch.size(); + fLatestBranch.clear(); + fLatestBranch.add(newRootNode); + + // Create new coreNode + for (int i = 1; i < depth; i++) { + N prevNode = fLatestBranch.get(i - 1); + N newNode = initNewCoreNode(prevNode.getSequenceNumber(), newNodeStartTime); + prevNode.linkNewChild(newNode); + fLatestBranch.add(newNode); + } + + // Create the new leafNode + N prevNode = fLatestBranch.get(depth - 1); + N newNode = initNewLeafNode(prevNode.getSequenceNumber(), newNodeStartTime); + prevNode.linkNewChild(newNode); + fLatestBranch.add(newNode); + } + + /** + * Update the tree's end time with this interval data + * + * @param interval + * The interval that was just added to the tree + */ + protected void updateEndTime(E interval) { + fTreeEnd = Math.max(fTreeEnd, interval.getEnd()); + } + + @Override + public void closeTree(long requestedEndTime) { + /* This is an important operation, queries can wait */ + synchronized (fLatestBranch) { + /* + * Work-around the "empty branches" that get created when the root + * node becomes full. Overwrite the tree's end time with the + * original wanted end-time, to ensure no queries are sent into + * those empty nodes. + */ + fTreeEnd = requestedEndTime; + + /* Close off the latest branch of the tree */ + for (int i = fLatestBranch.size() - 1; i >= 0; i--) { + fLatestBranch.get(i).closeThisNode(fTreeEnd); + fTreeIO.writeNode(fLatestBranch.get(i)); + } + + try (FileOutputStream fc = fTreeIO.getFileWriter(-1);) { + ByteBuffer buffer = ByteBuffer.allocate(TREE_HEADER_SIZE); + buffer.order(ByteOrder.LITTLE_ENDIAN); + buffer.clear(); + + buffer.putInt(getMagicNumber()); + + buffer.putInt(getFileVersion()); + buffer.putInt(fProviderVersion); + + buffer.putInt(fBlockSize); + buffer.putInt(fMaxChildren); + + buffer.putInt(fNodeCount); + + /* root node seq. nb */ + buffer.putInt(fLatestBranch.get(0).getSequenceNumber()); + + /* start time of this history */ + buffer.putLong(fLatestBranch.get(0).getNodeStart()); + + buffer.flip(); + fc.write(buffer.array()); + /* done writing the file header */ + + } catch (IOException e) { + /* + * If we were able to write so far, there should not be any + * problem at this point... + */ + throw new RuntimeException("State system write error"); //$NON-NLS-1$ + } + } + } + + @Override + public Iterable getMatchingIntervals(RangeCondition timeCondition, + Predicate extraPredicate) { + + // TODO Change this to evaluate the nodes lazily + + List> intervalsOfNodes = new LinkedList<>(); + + /* Queue is a stack of nodes containing nodes intersecting t */ + Deque queue = new LinkedList<>(); + /* We start by reading the information in the root node */ + queue.add(getRootNode().getSequenceNumber()); + + /* Then we follow the down in the relevant children */ + try { + while (!queue.isEmpty()) { + int sequenceNumber = queue.pop(); + HTNode currentNode = readNode(sequenceNumber); + RangeCondition nodeCondition = timeCondition.subCondition( + currentNode.getNodeStart(), currentNode.getNodeEnd()); + + if (nodeCondition == null) { + continue; + } + + if (currentNode.getNodeType() == HTNode.NodeType.CORE) { + /* Here we add the relevant children nodes for BFS */ + queue.addAll(currentNode.selectNextChildren(nodeCondition)); + } + Iterable nodeIntervals = currentNode.getMatchingIntervals(nodeCondition, extraPredicate); + intervalsOfNodes.add(nodeIntervals); + } + } catch (ClosedChannelException e) { + } + return Iterables.concat(intervalsOfNodes); + } + + @Override + public String toString() { + return "Information on the current tree:\n\n" + "Blocksize: " //$NON-NLS-1$ //$NON-NLS-2$ + + fBlockSize + "\n" + "Max nb. of children per node: " //$NON-NLS-1$//$NON-NLS-2$ + + fMaxChildren + "\n" + "Number of nodes: " + fNodeCount //$NON-NLS-1$//$NON-NLS-2$ + + "\n" + "Depth of the tree: " + fLatestBranch.size() + "\n" //$NON-NLS-1$ //$NON-NLS-2$ //$NON-NLS-3$ + + "Size of the treefile: " + getFileSize() + "\n" //$NON-NLS-1$//$NON-NLS-2$ + + "Root node has sequence number: " //$NON-NLS-1$ + + fLatestBranch.get(0).getSequenceNumber() + "\n" //$NON-NLS-1$ + + "'Latest leaf' has sequence number: " //$NON-NLS-1$ + + fLatestBranch.get(fLatestBranch.size() - 1).getSequenceNumber(); + } + +} diff --git a/statesystem/org.eclipse.tracecompass.datastore.core/src/org/eclipse/tracecompass/internal/provisional/datastore/core/historytree/HTNode.java b/statesystem/org.eclipse.tracecompass.datastore.core/src/org/eclipse/tracecompass/internal/provisional/datastore/core/historytree/HTNode.java new file mode 100644 index 0000000000..04c7d26930 --- /dev/null +++ b/statesystem/org.eclipse.tracecompass.datastore.core/src/org/eclipse/tracecompass/internal/provisional/datastore/core/historytree/HTNode.java @@ -0,0 +1,941 @@ +/******************************************************************************* + * Copyright (c) 2017 École Polytechnique de Montréal + * + * All rights reserved. This program and the accompanying materials are + * made available under the terms of the Eclipse Public License v1.0 which + * accompanies this distribution, and is available at + * http://www.eclipse.org/legal/epl-v10.html + *******************************************************************************/ + +package org.eclipse.tracecompass.internal.provisional.datastore.core.historytree; + +import java.io.IOException; +import java.nio.ByteBuffer; +import java.nio.ByteOrder; +import java.nio.channels.FileChannel; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; +import java.util.Collections; +import java.util.Comparator; +import java.util.List; +import java.util.Objects; +import java.util.concurrent.locks.ReentrantReadWriteLock; +import java.util.function.Predicate; +import java.util.stream.Collectors; + +import org.eclipse.jdt.annotation.NonNull; +import org.eclipse.jdt.annotation.Nullable; +import org.eclipse.tracecompass.internal.provisional.datastore.core.condition.RangeCondition; +import org.eclipse.tracecompass.internal.provisional.datastore.core.exceptions.RangeException; +import org.eclipse.tracecompass.internal.provisional.datastore.core.historytree.AbstractHistoryTree.IHTNodeFactory; +import org.eclipse.tracecompass.internal.provisional.datastore.core.interval.IHTInterval; +import org.eclipse.tracecompass.internal.provisional.datastore.core.interval.IHTIntervalReader; +import org.eclipse.tracecompass.internal.provisional.datastore.core.serialization.ISafeByteBufferReader; +import org.eclipse.tracecompass.internal.provisional.datastore.core.serialization.SafeByteBufferFactory; + +import com.google.common.annotations.VisibleForTesting; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.Iterables; + +/** + * The base class for all the types of nodes that go in the History Tree. + * + * @author Alexandre Montplaisir + * @author Geneviève Bastien + * @param + * The type of objects that will be saved in the tree + */ +public class HTNode implements IHTNode { + + // ------------------------------------------------------------------------ + // Class fields + // ------------------------------------------------------------------------ + + /** + * Size of the basic node header. This is backward-compatible with previous + * state sytem history trees + * + *
+     *  1 - byte (the type of node)
+     * 16 - 2x long (start time, end time)
+     * 16 - 3x int (seq number, parent seq number, intervalcount)
+     *  1 - byte (done or not)
+     * 
+ */ + @VisibleForTesting + protected static final int COMMON_HEADER_SIZE = Byte.BYTES + + 2 * Long.BYTES + + 3 * Integer.BYTES + + Byte.BYTES; + + // ------------------------------------------------------------------------ + // Attributes + // ------------------------------------------------------------------------ + + /** + * Default implementation of the interval comparator, which sorts first by + * end times, then by start times + */ + private final Comparator fDefaultIntervalComparator = Comparator + .comparingLong(E::getEnd) + .thenComparingLong(E::getStart); + + /* Node configuration, defined by the history tree */ + private final int fBlockSize; + private final int fMaxChildren; + + /* Time range of this node */ + private final long fNodeStart; + private long fNodeEnd; + + /* Sequence number = position in the node section of the file */ + private final int fSequenceNumber; + private int fParentSequenceNumber; /* = -1 if this node is the root node */ + + /* Sum of bytes of all objects in the node */ + private int fSizeOfContentSection; + + /* + * True if this node was saved on disk (meaning its end time is now fixed) + */ + private volatile boolean fIsOnDisk; + + /* List containing all the intervals contained in this node */ + private final List fIntervals; + + /* Lock used to protect the accesses to objects, nodeEnd and such */ + private final ReentrantReadWriteLock fRwl = new ReentrantReadWriteLock(false); + + /* Object containing extra data for core nodes */ + private final @Nullable CoreNodeData fExtraData; + + /** + * A class that encapsulates data about children of this node. This class + * will be constructed by the core node and contains the extra header data, + * methods to read/write the header data, etc. + * + * This base class for CORE nodes just saves the children, ie their sequence + * number. + * + * @author Geneviève Bastien + */ + protected static class CoreNodeData { + + /** Back-reference to the node class */ + private final HTNode fNode; + + /** + * Seq. numbers of the children nodes (size = max number of nodes, + * determined by configuration) + */ + private final int[] fChildren; + + /** Nb. of children this node has */ + private int fNbChildren; + + /** + * Constructor + * + * @param node + * The node containing this extra data. + */ + public CoreNodeData(HTNode node) { + fNode = node; + fNbChildren = 0; + /* + * We instantiate the following array at full size right away, since + * we want to reserve that space in the node's header. "fNbChildren" + * will tell us how many relevant entries there are in those tables. + */ + fChildren = new int[fNode.fMaxChildren]; + } + + /** + * Read the specific header for this extra node data + * + * @param buffer + * The byte buffer in which to read + */ + protected void readSpecificHeader(ByteBuffer buffer) { + fNode.takeWriteLock(); + try { + int size = fNode.getMaxChildren(); + + fNbChildren = buffer.getInt(); + + for (int i = 0; i < fNbChildren; i++) { + fChildren[i] = buffer.getInt(); + } + for (int i = fNbChildren; i < size; i++) { + buffer.getInt(); + } + } finally { + fNode.releaseWriteLock(); + } + } + + /** + * Write the specific header for this extra node data + * + * @param buffer + * The byte buffer in which to write + */ + protected void writeSpecificHeader(ByteBuffer buffer) { + fNode.takeReadLock(); + try { + buffer.putInt(fNbChildren); + + /* Write the "children's seq number" array */ + for (int i = 0; i < fNbChildren; i++) { + buffer.putInt(fChildren[i]); + } + for (int i = fNbChildren; i < fNode.getMaxChildren(); i++) { + buffer.putInt(0); + } + } finally { + fNode.releaseReadLock(); + } + } + + /** + * Get the number of children + * + * @return The number of children + */ + public int getNbChildren() { + fNode.takeReadLock(); + try { + return fNbChildren; + } finally { + fNode.releaseReadLock(); + } + } + + /** + * Get the child sequence number at an index + * + * @param index + * The index of the child to get + * @return The sequence number of the child node + */ + public int getChild(int index) { + fNode.takeReadLock(); + try { + if (index >= fNbChildren) { + throw new IndexOutOfBoundsException("The child at index " + index + " does not exist"); //$NON-NLS-1$ //$NON-NLS-2$ + } + return fChildren[index]; + } finally { + fNode.releaseReadLock(); + } + } + + /** + * Get the sequence number of the last child node of this one + * + * @return The sequence number of the last child + */ + public int getLatestChild() { + fNode.takeReadLock(); + try { + return fChildren[fNbChildren - 1]; + } finally { + fNode.releaseReadLock(); + } + } + + /** + * Add a new child to this node's data + * + * @param childNode + * The child node to add + */ + public void linkNewChild(IHTNode childNode) { + fNode.takeWriteLock(); + try { + if (fNbChildren >= fNode.getMaxChildren()) { + throw new IllegalStateException("Asked to link another child but parent already has maximum number of children"); //$NON-NLS-1$ + } + + fChildren[fNbChildren] = childNode.getSequenceNumber(); + fNbChildren++; + + } finally { + fNode.releaseWriteLock(); + } + } + + /** + * Get the size of the extra header space necessary for this node's + * extra data + * + * @return The extra header size + */ + protected int getSpecificHeaderSize() { + int maxChildren = fNode.getMaxChildren(); + int specificSize = Integer.BYTES /* 1x int (nbChildren) */ + + /* MAX_NB * int ('children' table) */ + + Integer.BYTES * maxChildren; + + return specificSize; + } + + @Override + public String toString() { + /* Only used for debugging, shouldn't be externalized */ + return String.format("Core Node, %d children %s", //$NON-NLS-1$ + fNbChildren, Arrays.toString(Arrays.copyOf(fChildren, fNbChildren))); + } + + /** + * Inner method to select the sequence numbers for the children of the + * current node that intersect the given timestamp. Useful for moving + * down the tree. + * + * @param timeCondition + * The time-based RangeCondition to choose which children + * match. + * @return Collection of sequence numbers of the child nodes that + * intersect t, non-null empty collection if this is a Leaf Node + */ + public final Collection selectNextChildren(RangeCondition timeCondition) { + fNode.takeReadLock(); + try { + return selectNextIndices(timeCondition).stream() + .map(i -> fChildren[i]) + .collect(Collectors.toList()); + } finally { + fNode.releaseReadLock(); + } + } + + /** + * Inner method to select the indices of the children of the current + * node that intersect the given timestamp. Useful for moving down the + * tree. + * + * This is the method that children implementations of this node should + * override. They may call + * super.selectNextIndices(timeCondition) to get access to + * the subset of indices that match the parent's condition and add their + * own filters. When this method is called a read-lock is already taken + * on the node + * + * @param timeCondition + * The time-based RangeCondition to choose which children + * match. + * @return Collection of the indices of the child nodes that intersect + * the time condition + */ + protected Collection selectNextIndices(RangeCondition timeCondition) { + /* By default, all children are returned */ + List childList = new ArrayList<>(); + for (int i = 0; i < fNbChildren; i++) { + childList.add(i); + } + + return childList; + } + + } + + /** + * Constructor + * + * @param type + * The type of this node + * @param blockSize + * The size (in bytes) of a serialized node on disk + * @param maxChildren + * The maximum allowed number of children per node + * @param seqNumber + * The (unique) sequence number assigned to this particular node + * @param parentSeqNumber + * The sequence number of this node's parent node + * @param start + * The earliest timestamp stored in this node + */ + public HTNode(NodeType type, int blockSize, int maxChildren, int seqNumber, + int parentSeqNumber, long start) { + fBlockSize = blockSize; + fMaxChildren = maxChildren; + + fNodeStart = start; + fSequenceNumber = seqNumber; + fParentSequenceNumber = parentSeqNumber; + + fSizeOfContentSection = 0; + fIsOnDisk = false; + fIntervals = new ArrayList<>(); + + fExtraData = createNodeExtraData(type); + } + + /** + * Reader factory method. Build a Node object (of the right type) by reading + * a block in the file. + * + * @param blockSize + * The size (in bytes) of a serialized node on disk + * @param maxChildren + * The maximum allowed number of children per node + * @param fc + * FileChannel to the history file, ALREADY SEEKED at the start + * of the node. + * @param objectReader + * The reader to read serialized node objects + * @param nodeFactory + * The factory to create the nodes for this tree + * @return The node object + * @throws IOException + * If there was an error reading from the file channel + */ + public static final > N readNode( + int blockSize, + int maxChildren, + FileChannel fc, + IHTIntervalReader objectReader, + IHTNodeFactory nodeFactory) throws IOException { + + N newNode; + + ByteBuffer buffer = ByteBuffer.allocate(blockSize); + buffer.order(ByteOrder.LITTLE_ENDIAN); + buffer.clear(); + int res = fc.read(buffer); + if (res != blockSize) { + throw new IOException("The block for the HTNode is not the right size: " + res); //$NON-NLS-1$ + } + buffer.flip(); + + /* Read the common header part */ + byte typeByte = buffer.get(); + NodeType type = NodeType.fromByte(typeByte); + long start = buffer.getLong(); + long end = buffer.getLong(); + int seqNb = buffer.getInt(); + int parentSeqNb = buffer.getInt(); + int intervalCount = buffer.getInt(); + buffer.get(); // TODO Used to be "isDone", to be removed from the header + + /* Now the rest of the header depends on the node type */ + switch (type) { + case CORE: + case LEAF: + newNode = nodeFactory.createNode(type, blockSize, maxChildren, seqNb, parentSeqNb, start); + newNode.readSpecificHeader(buffer); + break; + + default: + /* Unrecognized node type */ + throw new IOException(); + } + + /* + * At this point, we should be done reading the header and 'buffer' + * should only have the intervals left + */ + ISafeByteBufferReader readBuffer = SafeByteBufferFactory.wrapReader(buffer, res - buffer.position()); + for (int i = 0; i < intervalCount; i++) { + E interval = objectReader.readInterval(readBuffer); + newNode.add(interval); + } + + /* Assign the node's other information we have read previously */ + newNode.setNodeEnd(end); + newNode.setOnDisk(); + + return newNode; + } + + /** + * Create a node's extra data object for the given node type + * + * @param type + * The type of node + * @return The node's extra data object, or null if there is + * none + */ + protected @Nullable CoreNodeData createNodeExtraData(NodeType type) { + if (type == NodeType.CORE) { + return new CoreNodeData(this); + } + return null; + } + + @Override + public final void writeSelf(FileChannel fc) throws IOException { + /* + * Yes, we are taking the *read* lock here, because we are reading the + * information in the node to write it to disk. + */ + fRwl.readLock().lock(); + try { + final int blockSize = getBlockSize(); + + ByteBuffer buffer = ByteBuffer.allocate(blockSize); + buffer.order(ByteOrder.LITTLE_ENDIAN); + buffer.clear(); + + /* Write the common header part */ + buffer.put(getNodeType().toByte()); + buffer.putLong(fNodeStart); + buffer.putLong(fNodeEnd); + buffer.putInt(fSequenceNumber); + buffer.putInt(fParentSequenceNumber); + buffer.putInt(fIntervals.size()); + buffer.put((byte) 1); // TODO Used to be "isDone", to be removed + // from header + + /* Now call the inner method to write the specific header part */ + writeSpecificHeader(buffer); + + /* Back to us, we write the intervals */ + fIntervals.forEach(i -> i.writeSegment(SafeByteBufferFactory.wrapWriter(buffer, i.getSizeOnDisk()))); + if (blockSize - buffer.position() != getNodeFreeSpace()) { + throw new IllegalStateException("Wrong free space: Actual: " + (blockSize - buffer.position()) + ", Expected: " + getNodeFreeSpace()); //$NON-NLS-1$ //$NON-NLS-2$ + } + /* + * Fill the rest with zeros + */ + while (buffer.position() < blockSize) { + buffer.put((byte) 0); + } + + /* Finally, write everything in the Buffer to disk */ + buffer.flip(); + int res = fc.write(buffer); + if (res != blockSize) { + throw new IllegalStateException("Wrong size of block written: Actual: " + res + ", Expected: " + blockSize); //$NON-NLS-1$ //$NON-NLS-2$ + } + + } finally { + fRwl.readLock().unlock(); + } + fIsOnDisk = true; + } + + // ------------------------------------------------------------------------ + // Accessors + // ------------------------------------------------------------------------ + + /** + * Get this node's block size. + * + * @return The block size + */ + protected final int getBlockSize() { + return fBlockSize; + } + + /** + * Get this node's maximum amount of children. + * + * @return The maximum amount of children + */ + protected final int getMaxChildren() { + return fMaxChildren; + } + + /** + * Get the interval comparator. Intervals will always be stored sorted + * according to this comparator. This can be used by insertion or retrieval + * algorithms. + * + * Sub-classes may override this to change or specify the interval + * comparator. + * + * @return The way intervals are to be sorted in this node + */ + protected Comparator getIntervalComparator() { + return fDefaultIntervalComparator; + } + + @Override + public long getNodeStart() { + return fNodeStart; + } + + @Override + public long getNodeEnd() { + if (fIsOnDisk) { + return fNodeEnd; + } + return Long.MAX_VALUE; + } + + @Override + public int getSequenceNumber() { + return fSequenceNumber; + } + + @Override + public int getParentSequenceNumber() { + return fParentSequenceNumber; + } + + @Override + public void setParentSequenceNumber(int newParent) { + fParentSequenceNumber = newParent; + } + + @Override + public boolean isOnDisk() { + return fIsOnDisk; + } + + /** + * Get the node's extra data. + * + * @return The node extra data + */ + protected @Nullable CoreNodeData getCoreNodeData() { + return fExtraData; + } + + /** + * Get the list of objects in this node. This list is immutable. All objects + * must be inserted through the {@link #add(IHTInterval)} method + * + * @return The list of intervals in this node + */ + protected List getIntervals() { + return ImmutableList.copyOf(fIntervals); + } + + /** + * Set this node's end time. Called by the reader factory. + * + * @param end + * The end time of the node + */ + protected void setNodeEnd(long end) { + fNodeEnd = end; + } + + /** + * Set this node to be on disk. Called by the reader factory. + */ + protected void setOnDisk() { + fIsOnDisk = true; + } + + @Override + public void add(E newInterval) { + fRwl.writeLock().lock(); + try { + /* + * Just in case, should be checked before even calling this function + */ + int objSize = newInterval.getSizeOnDisk(); + if (objSize > getNodeFreeSpace()) { + throw new IllegalArgumentException("The interval to insert (" + objSize + ") is larger than available space (" + getNodeFreeSpace() + ")"); //$NON-NLS-1$ //$NON-NLS-2$ //$NON-NLS-3$ + } + + int insertPoint = Collections.binarySearch(fIntervals, newInterval, getIntervalComparator()); + insertPoint = (insertPoint >= 0 ? insertPoint : -insertPoint - 1); + fIntervals.add(insertPoint, newInterval); + + fSizeOfContentSection += objSize; + + } finally { + fRwl.writeLock().unlock(); + } + } + + @Override + public Iterable getMatchingIntervals(RangeCondition timeCondition, + Predicate extraPredicate) { + + // TODO Use getIntervalComparator() to restrict the dataset further + // TODO Benchmark using/returning streams instead of iterables + + if (isOnDisk()) { + @NonNull Iterable ret = fIntervals; + ret = Iterables.filter(ret, interval -> timeCondition.intersects(interval.getStart(), interval.getEnd())); + ret = Iterables.filter(ret, interval -> extraPredicate.test(interval)); + return ret; + } + + takeReadLock(); + try { + return fIntervals.stream() + .filter(interval -> timeCondition.intersects(interval.getStart(), interval.getEnd())) + .filter(extraPredicate) + /* + * Because this class works with read locks, we can't + * return a lazy stream unfortunately. Room for improvement? + */ + .collect(Collectors.toList()); + + } finally { + releaseReadLock(); + } + } + + @Override + public void closeThisNode(long endtime) { + fRwl.writeLock().lock(); + try { + /** + * FIXME: was assert (endtime >= fNodeStart); but that exception is + * reached with an empty node that has start time endtime + 1 + */ + // if (endtime < fNodeStart) { + // throw new IllegalArgumentException("Endtime " + endtime + " + // cannot be lower than start time " + fNodeStart); + // } + + if (!fIntervals.isEmpty()) { + /* + * Make sure there are no intervals in this node with their + * EndTime > the one requested. Only need to check the last one + * since they are sorted + */ + if (endtime < fIntervals.get(fIntervals.size() - 1).getEnd()) { + throw new IllegalArgumentException("Closing end time should be greater than or equal to the end time of the objects of this node"); //$NON-NLS-1$ + } + } + + fNodeEnd = endtime; + } finally { + fRwl.writeLock().unlock(); + } + } + + @Override + public final int getTotalHeaderSize() { + return COMMON_HEADER_SIZE + getSpecificHeaderSize(); + } + + /** + * @return The offset, within the node, where the Data section ends + */ + private int getDataSectionEndOffset() { + return getTotalHeaderSize() + fSizeOfContentSection; + } + + @Override + public int getNodeFreeSpace() { + fRwl.readLock().lock(); + try { + int ret = getBlockSize() - getDataSectionEndOffset(); + return ret; + } finally { + fRwl.readLock().unlock(); + } + } + + @Override + public long getNodeUsagePercent() { + fRwl.readLock().lock(); + try { + final int blockSize = getBlockSize(); + float freePercent = (float) getNodeFreeSpace() + / (float) (blockSize - getTotalHeaderSize()) + * 100F; + return (long) (100L - freePercent); + + } finally { + fRwl.readLock().unlock(); + } + } + + @Override + public NodeType getNodeType() { + @Nullable + CoreNodeData extraData = getCoreNodeData(); + if (extraData == null) { + return NodeType.LEAF; + } + return NodeType.CORE; + } + + /** + * Return the specific header size of this node. This means the size + * occupied by the type-specific section of the header (not counting the + * common part). + * + * @return The specific header size + */ + protected int getSpecificHeaderSize() { + CoreNodeData extraData = fExtraData; + if (extraData != null) { + return extraData.getSpecificHeaderSize(); + } + return 0; + } + + /** + * Read the specific header for this node + * + * @param buffer + * The buffer to read from + */ + protected void readSpecificHeader(ByteBuffer buffer) { + CoreNodeData extraData = fExtraData; + if (extraData != null) { + extraData.readSpecificHeader(buffer); + } + } + + /** + * Write the type-specific part of the header in a byte buffer. + * + * @param buffer + * The buffer to write to. It should already be at the correct + * position. + */ + protected void writeSpecificHeader(ByteBuffer buffer) { + CoreNodeData extraData = fExtraData; + if (extraData != null) { + extraData.writeSpecificHeader(buffer); + } + } + + /** + * Node-type-specific toString method. Used for debugging. + * + * @return A string representing the node + */ + protected String toStringSpecific() { + return ""; //$NON-NLS-1$ + } + + @Override + public boolean isEmpty() { + return fIntervals.isEmpty(); + } + + // ------------------------------------------- + // Core node methods + // ------------------------------------------- + + @Override + public int getNbChildren() { + CoreNodeData extraData = fExtraData; + if (extraData != null) { + return extraData.getNbChildren(); + } + return IHTNode.super.getNbChildren(); + } + + @Override + public int getChild(int index) { + CoreNodeData extraData = fExtraData; + if (extraData != null) { + return extraData.getChild(index); + } + return IHTNode.super.getChild(index); + } + + @Override + public int getLatestChild() { + CoreNodeData extraData = fExtraData; + if (extraData != null) { + return extraData.getLatestChild(); + } + return IHTNode.super.getLatestChild(); + } + + @Override + public void linkNewChild(@NonNull IHTNode childNode) { + CoreNodeData extraData = fExtraData; + if (extraData != null) { + extraData.linkNewChild(childNode); + return; + } + IHTNode.super.linkNewChild(childNode); + } + + @Override + public Collection selectNextChildren(RangeCondition timeCondition) + throws RangeException { + CoreNodeData extraData = fExtraData; + if (extraData != null) { + return extraData.selectNextChildren(timeCondition); + } + return IHTNode.super.selectNextChildren(timeCondition); + } + + // ----------------------------------------- + // Locking + // ----------------------------------------- + + /** + * Takes a read lock on the fields of this class. Each call to this method + * should be followed by a {@link HTNode#releaseReadLock()}, in a + * try-finally clause + */ + protected void takeReadLock() { + fRwl.readLock().lock(); + } + + /** + * Releases a read lock on the fields of this class. A call to this method + * should have been preceded by a call to {@link HTNode#takeReadLock()} + */ + protected void releaseReadLock() { + fRwl.readLock().unlock(); + } + + /** + * Takes a write lock on the fields of this class. Each call to this method + * should be followed by a {@link HTNode#releaseWriteLock()}, in a + * try-finally clause + */ + protected void takeWriteLock() { + fRwl.writeLock().lock(); + } + + /** + * Releases a write lock on the fields of this class. A call to this method + * should have been preceded by a call to {@link HTNode#takeWriteLock()} + */ + protected void releaseWriteLock() { + fRwl.writeLock().unlock(); + } + + // ----------------------------------------- + // Object methods + // ----------------------------------------- + + @SuppressWarnings("nls") + @Override + public String toString() { + /* Only used for debugging, shouldn't be externalized */ + return String.format("Node #%d, %s, %s, %d intervals (%d%% used), [%d - %s]", + fSequenceNumber, + (fParentSequenceNumber == -1) ? "Root" : "Parent #" + fParentSequenceNumber, + toStringSpecific(), + fIntervals.size(), + getNodeUsagePercent(), + fNodeStart, + (fIsOnDisk || fNodeEnd != 0) ? fNodeEnd : "..."); + } + + @Override + public int hashCode() { + return Objects.hash(fBlockSize, fMaxChildren, fNodeStart, fNodeEnd, fSequenceNumber, fParentSequenceNumber); + } + + @Override + public boolean equals(@Nullable Object obj) { + if (obj == null) { + return false; + } + if (obj.getClass() != getClass()) { + return false; + } + HTNode other = (HTNode) obj; + return (fBlockSize == other.fBlockSize && + fMaxChildren == other.fMaxChildren && + fNodeStart == other.fNodeStart && + fNodeEnd == other.fNodeEnd && + fSequenceNumber == other.fSequenceNumber && + fParentSequenceNumber == other.fParentSequenceNumber); + } + +} diff --git a/statesystem/org.eclipse.tracecompass.datastore.core/src/org/eclipse/tracecompass/internal/provisional/datastore/core/historytree/IHTNode.java b/statesystem/org.eclipse.tracecompass.datastore.core/src/org/eclipse/tracecompass/internal/provisional/datastore/core/historytree/IHTNode.java new file mode 100644 index 0000000000..2106382eba --- /dev/null +++ b/statesystem/org.eclipse.tracecompass.datastore.core/src/org/eclipse/tracecompass/internal/provisional/datastore/core/historytree/IHTNode.java @@ -0,0 +1,271 @@ +/******************************************************************************* + * Copyright (c) 2017 École Polytechnique de Montréal + * + * All rights reserved. This program and the accompanying materials are + * made available under the terms of the Eclipse Public License v1.0 which + * accompanies this distribution, and is available at + * http://www.eclipse.org/legal/epl-v10.html + *******************************************************************************/ + +package org.eclipse.tracecompass.internal.provisional.datastore.core.historytree; + +import java.io.IOException; +import java.nio.channels.FileChannel; +import java.util.Collection; +import java.util.Collections; +import java.util.function.Predicate; + +import org.eclipse.tracecompass.internal.provisional.datastore.core.condition.RangeCondition; +import org.eclipse.tracecompass.internal.provisional.datastore.core.exceptions.RangeException; +import org.eclipse.tracecompass.internal.provisional.datastore.core.interval.IHTInterval; + +/** + * Interface for history tree nodes + * + * @author Geneviève Bastien + * @param + * The type of objects that will be saved in the tree + */ +public interface IHTNode { + + /** + * The type of node + */ + public enum NodeType { + /** + * Core node, which is a "front" node, at any level of the tree except + * the bottom-most one. It has children, and may have extensions. + */ + CORE((byte) 1), + /** + * Leaf node, which is a node at the last bottom level of the tree. It + * cannot have any children or extensions. + */ + LEAF((byte) 2); + + private final byte fByte; + + NodeType(byte rep) { + fByte = rep; + } + + /** + * Determine a node type by reading a serialized byte. + * + * @param rep + * The byte representation of the node type + * @return The corresponding NodeType + */ + public static NodeType fromByte(byte rep) { + switch (rep) { + case 1: + return CORE; + case 2: + return LEAF; + default: + throw new IllegalArgumentException("The NodeType byte " + rep + " is not a valid type"); //$NON-NLS-1$ //$NON-NLS-2$ + } + } + + /** + * Get the byte representation of this node type. It can then be read + * with {@link #fromByte}. + * + * @return The byte matching this node type + */ + public byte toByte() { + return fByte; + } + } + + /** + * Write this node to the given file channel. + * + * @param fc + * The file channel to write to (should be sought to be correct + * position) + * @throws IOException + * If there was an error writing + */ + void writeSelf(FileChannel fc) throws IOException; + + /** + * Get the start time of this node. + * + * @return The start time of this node + */ + long getNodeStart(); + + /** + * Get the end time of this node. Will return {@link Long#MAX_VALUE} if the + * node is not yet written to disk, as the real end time is not yet known. + * + * @return The end time of this node. + */ + long getNodeEnd(); + + /** + * Get the sequence number of this node. + * + * @return The sequence number of this node + */ + int getSequenceNumber(); + + /** + * Get the sequence number of this node's parent. + * + * @return The parent sequence number + */ + int getParentSequenceNumber(); + + /** + * Change this node's parent. Used when we create a new root node for + * example. + * + * @param newParent + * The sequence number of the node that is the new parent + */ + void setParentSequenceNumber(int newParent); + + /** + * Return if this node is "done" (full and written to disk). + * + * @return If this node is done or not + */ + boolean isOnDisk(); + + /** + * Add an interval to this node. The caller of this method must make sure that + * there is enough space on this node to add this object. Also, it is the + * responsibility of the caller to make sure that the element to add is + * within the boundary of this node. No check on start and end is expected + * to be done in this method. + * + * @param newInterval + * Interval to add to this node + */ + void add(E newInterval); + + /** + * We've received word from the containerTree that newest nodes now exist to + * our right. (Puts isDone = true and sets the endtime) + * + * @param endtime + * The nodeEnd time that the node will have + */ + void closeThisNode(long endtime); + + /** + * Retrieve the intervals inside this node that match the given conditions. + * + * @param timeCondition + * The time-based RangeCondition + * @param extraPredicate + * Extra predicate to run on the elements. Only those also + * matching this predicate will be returned. + * @return Iterable of the elements in this node matching the condtions + */ + Iterable getMatchingIntervals(RangeCondition timeCondition, + Predicate extraPredicate); + + /** + * Return the total header size of this node (will depend on the node type). + * + * @return The total header size + */ + int getTotalHeaderSize(); + + /** + * Returns the free space left in the node to write objects + * + * @return The amount of free space in the node (in bytes) + */ + int getNodeFreeSpace(); + + /** + * Returns the current space utilization of this node, as a percentage. + * (used space / total usable space, which excludes the header) + * + * @return The percentage (value between 0 and 100) of space utilization in + * this node. + */ + long getNodeUsagePercent(); + + /** + * Get the type of this node + * + * @return The node type + */ + NodeType getNodeType(); + + /** + * Return whether this node has elements in it or is empty + * + * @return true if the node is empty + */ + boolean isEmpty(); + + // --------------------------------------- + // Methods for nodes with children. Leaf nodes can use these default methods + // --------------------------------------- + + /** + * Return the number of child nodes this node has. + * + * @return The number of child nodes + */ + default int getNbChildren() { + return 0; + } + + /** + * Get the child node corresponding to the specified index. It will throw an + * {@link IndexOutOfBoundsException} if there is no children at this index. + * + * @param index + * The index of the child to lookup + * @return The child node + */ + default int getChild(int index) { + throw new IndexOutOfBoundsException("This node does not have any children"); //$NON-NLS-1$ + } + + /** + * Get the latest (right-most) child node of this node. This applies only if + * the node is allowed to have children, ie is a {@link NodeType#CORE} node, + * otherwise this method is not supported. + * + * @return The latest child node + */ + default int getLatestChild() { + throw new UnsupportedOperationException("This node does not support children"); //$NON-NLS-1$ + } + + /** + * Tell this node that it has a new child. This applies only if the node is + * allowed to have children, ie is a {@link NodeType#CORE} node, otherwise + * this method is not supported. + * + * @param childNode + * The new child node to add to this one + */ + default void linkNewChild(IHTNode childNode) { + throw new UnsupportedOperationException("This node does not support children"); //$NON-NLS-1$ + } + + /** + * Method to select the sequence numbers for the children of the current + * node that intersect the given timestamp. Useful when navigating the tree. + * + * @param timeCondition + * The time-based range condition to choose which child is the next one + * @return Collection of sequence numbers of the child nodes that intersect + * the time condition, non-null empty collection if this is a Leaf Node + * @throws RangeException + * If t is out of the node's range + */ + default Collection selectNextChildren(RangeCondition timeCondition) { + return Collections.emptyList(); + } + +} diff --git a/statesystem/org.eclipse.tracecompass.datastore.core/src/org/eclipse/tracecompass/internal/provisional/datastore/core/historytree/IHistoryTree.java b/statesystem/org.eclipse.tracecompass.datastore.core/src/org/eclipse/tracecompass/internal/provisional/datastore/core/historytree/IHistoryTree.java new file mode 100644 index 0000000000..76614371d3 --- /dev/null +++ b/statesystem/org.eclipse.tracecompass.datastore.core/src/org/eclipse/tracecompass/internal/provisional/datastore/core/historytree/IHistoryTree.java @@ -0,0 +1,184 @@ +/******************************************************************************* + * Copyright (c) 2017 École Polytechnique de Montréal + * + * All rights reserved. This program and the accompanying materials are + * made available under the terms of the Eclipse Public License v1.0 which + * accompanies this distribution, and is available at + * http://www.eclipse.org/legal/epl-v10.html + *******************************************************************************/ + +package org.eclipse.tracecompass.internal.provisional.datastore.core.historytree; + +import java.io.File; +import java.io.FileInputStream; +import java.io.IOException; +import java.nio.channels.ClosedChannelException; +import java.util.function.Predicate; + +import org.eclipse.tracecompass.internal.provisional.datastore.core.condition.RangeCondition; +import org.eclipse.tracecompass.internal.provisional.datastore.core.exceptions.RangeException; +import org.eclipse.tracecompass.internal.provisional.datastore.core.interval.IHTInterval; + +/** + * An interface for trees on disk that save temporal data. + * + * These trees will typically be built from the leaf up, so they will have 2 + * node types: + *
    + *
  • Leaf nodes which have no children and just contain on disk objects + *
  • Core nodes which have children nodes and will save in their headers all + * the information necessary to retrieve their children optimally + *
+ * + * The {@link AbstractHistoryTree} supply a base implementation of the tree with + * all the logic to write the data on disk and can be extended to just add the + * specific behavior of an implementation. + * + * A Base class for the nodes are also available: {@link HTNode}, the base class + * for all nodes, it contains a subclass {@link HTNode.CoreNodeData} that can + * be extended to save header or any other data concerning the children of a + * node for example, or any other type of data to save in the header of a node. + * + * @author Geneviève Bastien + * @param + * The type of objects that will be saved in the tree + */ +public interface IHistoryTree { + + /** + * Size of the "tree header" in the tree-file The nodes will use this offset + * to know where they should be in the file. This should always be a + * multiple of 4K. + */ + public static final int TREE_HEADER_SIZE = 4096; + + // ------------------------------------------------------------------------ + // Accessors + // ------------------------------------------------------------------------ + + /** + * Get the start time of this tree. + * + * @return The start time + */ + long getTreeStart(); + + /** + * Get the current end time of this tree. + * + * @return The end time + */ + long getTreeEnd(); + + /** + * Get the current size of the history file. + * + * @return The history file size + */ + long getFileSize(); + + // ------------------------------------------------------------------------ + // HtIo interface + // ------------------------------------------------------------------------ + + /** + * Close the history file. + * + * Once the file is closed, the history tree cannot be queried anymore. + * Querying it might throw {@link ClosedChannelException} or have + * unpredictable results. + */ + void closeFile(); + + /** + * Delete the history file. + * + * Once the file is closed, the history tree cannot be queried anymore. + * Querying it might throw {@link ClosedChannelException} or have + * unpredictable results. + */ + void deleteFile(); + + /** + * Creates a new empty tree file and removes the previous file. The history + * tree can still be queried but will be empty after the call to this + * method. + * + * @throws IOException + * Exceptions thrown when deleting or creating the file + */ + void cleanFile() throws IOException; + + // ------------------------------------------------------------------------ + // Operations + // ------------------------------------------------------------------------ + + /** + * Insert an interval into the tree. + * + * @param interval + * The interval to be inserted + * @throws RangeException + * If the start or end time of the object are invalid + */ + void insert(E interval) throws RangeException; + + /** + * "Save" the tree to disk. This method will cause the treeIO object to + * commit all nodes to disk and the header of the tree should also be saved + * on disk + * + * @param requestedEndTime + * The greatest timestamp present in the history tree + */ + void closeTree(long requestedEndTime); + + /** + * Query the tree to retrieve the intervals matching the given conditions. + * + * @param timeCondition + * Time-based RangeCondition, can represent a single timestamp, a + * series of punctual timestamps, or a time range. + * @param extraPredicate + * Extra check to run on the elements to determine if they should + * be returned or not. This will be checked at the node level, so + * if it's known in advance it might be advantageous to pass it + * here rather than checking it yourself on the returned + * Iterable. + * @return An Iterable of the matching elements + */ + Iterable getMatchingIntervals(RangeCondition timeCondition, + Predicate extraPredicate); + + // ------------------------------------------------------------------------ + // Attribute-tree reading/writing operations + // + // FIXME These are statesystem-specific, should be removed from this + // interface and its implementations. The SS should save that info + // in a separate file + // ------------------------------------------------------------------------ + + /** + * Return the FileInputStream reader with which we will read an attribute + * tree (it will be sought to the correct position). + * + * @return The FileInputStream indicating the file and position from which + * the attribute tree can be read. + */ + FileInputStream supplyATReader(); + + /** + * Return the file to which we will write the attribute tree. + * + * @return The file to which we will write the attribute tree + */ + File supplyATWriterFile(); + + /** + * Return the position in the file (given by {@link #supplyATWriterFile}) + * where to start writing the attribute tree. + * + * @return The position in the file where to start writing + */ + long supplyATWriterFilePos(); +} diff --git a/statesystem/org.eclipse.tracecompass.datastore.core/src/org/eclipse/tracecompass/internal/provisional/datastore/core/historytree/package-info.java b/statesystem/org.eclipse.tracecompass.datastore.core/src/org/eclipse/tracecompass/internal/provisional/datastore/core/historytree/package-info.java new file mode 100644 index 0000000000..ce894f3f60 --- /dev/null +++ b/statesystem/org.eclipse.tracecompass.datastore.core/src/org/eclipse/tracecompass/internal/provisional/datastore/core/historytree/package-info.java @@ -0,0 +1,11 @@ +/******************************************************************************* + * Copyright (c) 2017 École Polytechnique de Montréal + * + * All rights reserved. This program and the accompanying materials are made + * available under the terms of the Eclipse Public License v1.0 which + * accompanies this distribution, and is available at + * http://www.eclipse.org/legal/epl-v10.html + *******************************************************************************/ + +@org.eclipse.jdt.annotation.NonNullByDefault +package org.eclipse.tracecompass.internal.provisional.datastore.core.historytree; diff --git a/statesystem/org.eclipse.tracecompass.datastore.core/src/org/eclipse/tracecompass/internal/provisional/datastore/core/interval/HTInterval.java b/statesystem/org.eclipse.tracecompass.datastore.core/src/org/eclipse/tracecompass/internal/provisional/datastore/core/interval/HTInterval.java new file mode 100644 index 0000000000..6fe493eddc --- /dev/null +++ b/statesystem/org.eclipse.tracecompass.datastore.core/src/org/eclipse/tracecompass/internal/provisional/datastore/core/interval/HTInterval.java @@ -0,0 +1,100 @@ +/******************************************************************************* + * Copyright (c) 2017 École Polytechnique de Montréal + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the Eclipse Public License v1.0 + * which accompanies this distribution, and is available at + * http://www.eclipse.org/legal/epl-v10.html + *******************************************************************************/ + +package org.eclipse.tracecompass.internal.provisional.datastore.core.interval; + +import java.util.Objects; +import java.util.StringJoiner; + +import org.eclipse.jdt.annotation.NonNull; +import org.eclipse.jdt.annotation.Nullable; +import org.eclipse.tracecompass.internal.provisional.datastore.core.serialization.ISafeByteBufferWriter; + +/** + * Basic implementation of {@link IHTInterval}. + * + * @author Geneviève Bastien + */ +public class HTInterval implements IHTInterval { + + private final long fStart; + private final long fEnd; + + /** + * The object to use to read a BaseHtObject from the disk + */ + public static final IHTIntervalReader INTERVAL_READER = + (buffer) -> new HTInterval(buffer.getLong(), buffer.getLong()); + + /** + * Create a new segment. + * + * The end position should be equal to or greater than the start position. + * + * @param start + * Start position of the segment + * @param end + * End position of the segment + */ + public HTInterval(long start, long end) { + if (end < start) { + throw new IllegalArgumentException(); + } + fStart = start; + fEnd = end; + } + + @Override + public long getStart() { + return fStart; + } + + @Override + public long getEnd() { + return fEnd; + } + + @Override + public String toString() { + return (new StringJoiner(", ", "[", "]")) //$NON-NLS-1$ //$NON-NLS-2$ //$NON-NLS-3$ + .add(String.valueOf(fStart)) + .add(String.valueOf(fEnd)) + .toString(); + } + + @Override + public int getSizeOnDisk() { + return 2 * Long.BYTES; + } + + @Override + public void writeSegment(@NonNull ISafeByteBufferWriter buffer) { + buffer.putLong(fStart); + buffer.putLong(fEnd); + } + + @Override + public int hashCode() { + return Objects.hash(fStart, fEnd); + } + + @Override + public boolean equals(@Nullable Object obj) { + if (obj == null) { + return false; + } + if (obj.getClass() != getClass()) { + return false; + } + HTInterval other = (HTInterval) obj; + return (fStart == other.fStart + && fEnd == other.fEnd); + } + +} diff --git a/statesystem/org.eclipse.tracecompass.datastore.core/src/org/eclipse/tracecompass/internal/provisional/datastore/core/interval/IHTInterval.java b/statesystem/org.eclipse.tracecompass.datastore.core/src/org/eclipse/tracecompass/internal/provisional/datastore/core/interval/IHTInterval.java new file mode 100644 index 0000000000..75769a048c --- /dev/null +++ b/statesystem/org.eclipse.tracecompass.datastore.core/src/org/eclipse/tracecompass/internal/provisional/datastore/core/interval/IHTInterval.java @@ -0,0 +1,49 @@ +/******************************************************************************* + * Copyright (c) 2017 École Polytechnique de Montréal + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the Eclipse Public License v1.0 + * which accompanies this distribution, and is available at + * http://www.eclipse.org/legal/epl-v10.html + *******************************************************************************/ + +package org.eclipse.tracecompass.internal.provisional.datastore.core.interval; + +/** + * Generic interface for any serializable object (like a time range) that can be used in the + * generic history tree. + * + * @author Alexandre Montplaisir + * @author Geneviève Bastien + */ +public interface IHTInterval extends ISerializableObject { + + /** + * The start position/time of the object. + * + * @return The start position + */ + long getStart(); + + /** + * The end position/time of the object + * + * @return The end position + */ + long getEnd(); + + /** + * Utility method to check if the current interval intersects a timestamp. + * + * @param timestamp + * The timestamp to check + * @return If it intersects or not + */ + default boolean intersects(long timestamp) { + if (getStart() <= timestamp && timestamp <= getEnd()) { + return true; + } + return false; + } + +} diff --git a/statesystem/org.eclipse.tracecompass.datastore.core/src/org/eclipse/tracecompass/internal/provisional/datastore/core/interval/IHTIntervalReader.java b/statesystem/org.eclipse.tracecompass.datastore.core/src/org/eclipse/tracecompass/internal/provisional/datastore/core/interval/IHTIntervalReader.java new file mode 100644 index 0000000000..df2f9dfcef --- /dev/null +++ b/statesystem/org.eclipse.tracecompass.datastore.core/src/org/eclipse/tracecompass/internal/provisional/datastore/core/interval/IHTIntervalReader.java @@ -0,0 +1,32 @@ +/******************************************************************************* + * Copyright (c) 2017 École Polytechnique de Montréal + * + * All rights reserved. This program and the accompanying materials are + * made available under the terms of the Eclipse Public License v1.0 which + * accompanies this distribution, and is available at + * http://www.eclipse.org/legal/epl-v10.html + *******************************************************************************/ + +package org.eclipse.tracecompass.internal.provisional.datastore.core.interval; + +import org.eclipse.tracecompass.internal.provisional.datastore.core.serialization.ISafeByteBufferReader; + +/** + * A factory that reads object from a byte buffer and create a new object + * + * @author Geneviève Bastien + * @param + * The type of objects that will be read + */ +@FunctionalInterface +public interface IHTIntervalReader { + + /** + * Method to deserialize segments to disk for Segment History Tree + * + * @param buffer + * HTNode buffer to read from + * @return the Segment read from the buffer + */ + E readInterval(ISafeByteBufferReader buffer); +} diff --git a/statesystem/org.eclipse.tracecompass.datastore.core/src/org/eclipse/tracecompass/internal/provisional/datastore/core/interval/ISerializableObject.java b/statesystem/org.eclipse.tracecompass.datastore.core/src/org/eclipse/tracecompass/internal/provisional/datastore/core/interval/ISerializableObject.java new file mode 100644 index 0000000000..9baae3c414 --- /dev/null +++ b/statesystem/org.eclipse.tracecompass.datastore.core/src/org/eclipse/tracecompass/internal/provisional/datastore/core/interval/ISerializableObject.java @@ -0,0 +1,36 @@ +/******************************************************************************* + * Copyright (c) 2017 École Polytechnique de Montréal + * + * All rights reserved. This program and the accompanying materials are + * made available under the terms of the Eclipse Public License v1.0 which + * accompanies this distribution, and is available at + * http://www.eclipse.org/legal/epl-v10.html + *******************************************************************************/ + +package org.eclipse.tracecompass.internal.provisional.datastore.core.interval; + +import org.eclipse.tracecompass.internal.provisional.datastore.core.serialization.ISafeByteBufferWriter; + +/** + * An object that can be serialized + * + * @author Geneviève Bastien + */ +public interface ISerializableObject { + + /** + * Get the size on disk in bytes of an object + * + * @return the size occupied by this segment when stored in a Segment + * History Tree (in bytes) + */ + int getSizeOnDisk(); + + /** + * Method to serialize an object to a safe byte buffer + * + * @param buffer + * The safe byte buffer to write to + */ + void writeSegment(ISafeByteBufferWriter buffer); +} diff --git a/statesystem/org.eclipse.tracecompass.datastore.core/src/org/eclipse/tracecompass/internal/provisional/datastore/core/interval/package-info.java b/statesystem/org.eclipse.tracecompass.datastore.core/src/org/eclipse/tracecompass/internal/provisional/datastore/core/interval/package-info.java new file mode 100644 index 0000000000..b7db7bb060 --- /dev/null +++ b/statesystem/org.eclipse.tracecompass.datastore.core/src/org/eclipse/tracecompass/internal/provisional/datastore/core/interval/package-info.java @@ -0,0 +1,11 @@ +/******************************************************************************* + * Copyright (c) 2017 École Polytechnique de Montréal + * + * All rights reserved. This program and the accompanying materials are made + * available under the terms of the Eclipse Public License v1.0 which + * accompanies this distribution, and is available at + * http://www.eclipse.org/legal/epl-v10.html + *******************************************************************************/ + +@org.eclipse.jdt.annotation.NonNullByDefault +package org.eclipse.tracecompass.internal.provisional.datastore.core.interval; -- 2.34.1