1 /*******************************************************************************
2 * Copyright (c) 2017 École Polytechnique de Montréal
4 * All rights reserved. This program and the accompanying materials are
5 * made available under the terms of the Eclipse Public License v1.0 which
6 * accompanies this distribution, and is available at
7 * http://www.eclipse.org/legal/epl-v10.html
8 *******************************************************************************/
10 package org
.eclipse
.tracecompass
.internal
.provisional
.datastore
.core
.historytree
;
12 import java
.io
.IOException
;
13 import java
.nio
.ByteBuffer
;
14 import java
.nio
.ByteOrder
;
15 import java
.nio
.channels
.FileChannel
;
16 import java
.util
.ArrayList
;
17 import java
.util
.Arrays
;
18 import java
.util
.Collection
;
19 import java
.util
.Collections
;
20 import java
.util
.Comparator
;
21 import java
.util
.List
;
22 import java
.util
.Objects
;
23 import java
.util
.concurrent
.locks
.ReentrantReadWriteLock
;
24 import java
.util
.function
.Predicate
;
25 import java
.util
.stream
.Collectors
;
27 import org
.eclipse
.jdt
.annotation
.NonNull
;
28 import org
.eclipse
.jdt
.annotation
.Nullable
;
29 import org
.eclipse
.tracecompass
.internal
.provisional
.datastore
.core
.condition
.RangeCondition
;
30 import org
.eclipse
.tracecompass
.internal
.provisional
.datastore
.core
.exceptions
.RangeException
;
31 import org
.eclipse
.tracecompass
.internal
.provisional
.datastore
.core
.historytree
.AbstractHistoryTree
.IHTNodeFactory
;
32 import org
.eclipse
.tracecompass
.internal
.provisional
.datastore
.core
.interval
.IHTInterval
;
33 import org
.eclipse
.tracecompass
.internal
.provisional
.datastore
.core
.interval
.IHTIntervalReader
;
34 import org
.eclipse
.tracecompass
.internal
.provisional
.datastore
.core
.serialization
.ISafeByteBufferReader
;
35 import org
.eclipse
.tracecompass
.internal
.provisional
.datastore
.core
.serialization
.SafeByteBufferFactory
;
37 import com
.google
.common
.annotations
.VisibleForTesting
;
38 import com
.google
.common
.collect
.ImmutableList
;
39 import com
.google
.common
.collect
.Iterables
;
42 * The base class for all the types of nodes that go in the History Tree.
44 * @author Alexandre Montplaisir
45 * @author Geneviève Bastien
47 * The type of objects that will be saved in the tree
49 public class HTNode
<E
extends IHTInterval
> implements IHTNode
<E
> {
51 // ------------------------------------------------------------------------
53 // ------------------------------------------------------------------------
56 * Size of the basic node header. This is backward-compatible with previous
57 * state sytem history trees
60 * 1 - byte (the type of node)
61 * 16 - 2x long (start time, end time)
62 * 16 - 3x int (seq number, parent seq number, intervalcount)
63 * 1 - byte (done or not)
67 protected static final int COMMON_HEADER_SIZE
= Byte
.BYTES
72 // ------------------------------------------------------------------------
74 // ------------------------------------------------------------------------
77 * Default implementation of the interval comparator, which sorts first by
78 * end times, then by start times
80 private final Comparator
<E
> fDefaultIntervalComparator
= Comparator
81 .comparingLong(E
::getEnd
)
82 .thenComparingLong(E
::getStart
);
84 /* Node configuration, defined by the history tree */
85 private final int fBlockSize
;
86 private final int fMaxChildren
;
88 /* Time range of this node */
89 private final long fNodeStart
;
90 private long fNodeEnd
;
92 /* Sequence number = position in the node section of the file */
93 private final int fSequenceNumber
;
94 private int fParentSequenceNumber
; /* = -1 if this node is the root node */
96 /* Sum of bytes of all objects in the node */
97 private int fSizeOfContentSection
;
100 * True if this node was saved on disk (meaning its end time is now fixed)
102 private volatile boolean fIsOnDisk
;
104 /* List containing all the intervals contained in this node */
105 private final List
<E
> fIntervals
;
107 /* Lock used to protect the accesses to objects, nodeEnd and such */
108 private final ReentrantReadWriteLock fRwl
= new ReentrantReadWriteLock(false);
110 /* Object containing extra data for core nodes */
111 private final @Nullable CoreNodeData fExtraData
;
114 * A class that encapsulates data about children of this node. This class
115 * will be constructed by the core node and contains the extra header data,
116 * methods to read/write the header data, etc.
118 * This base class for CORE nodes just saves the children, ie their sequence
121 * @author Geneviève Bastien
123 protected static class CoreNodeData
{
125 /** Back-reference to the node class */
126 private final HTNode
<?
> fNode
;
129 * Seq. numbers of the children nodes (size = max number of nodes,
130 * determined by configuration)
132 private final int[] fChildren
;
134 /** Nb. of children this node has */
135 private int fNbChildren
;
141 * The node containing this extra data.
143 public CoreNodeData(HTNode
<?
> node
) {
147 * We instantiate the following array at full size right away, since
148 * we want to reserve that space in the node's header. "fNbChildren"
149 * will tell us how many relevant entries there are in those tables.
151 fChildren
= new int[fNode
.fMaxChildren
];
155 * Read the specific header for this extra node data
158 * The byte buffer in which to read
160 protected void readSpecificHeader(ByteBuffer buffer
) {
161 fNode
.takeWriteLock();
163 int size
= fNode
.getMaxChildren();
165 fNbChildren
= buffer
.getInt();
167 for (int i
= 0; i
< fNbChildren
; i
++) {
168 fChildren
[i
] = buffer
.getInt();
170 for (int i
= fNbChildren
; i
< size
; i
++) {
174 fNode
.releaseWriteLock();
179 * Write the specific header for this extra node data
182 * The byte buffer in which to write
184 protected void writeSpecificHeader(ByteBuffer buffer
) {
185 fNode
.takeReadLock();
187 buffer
.putInt(fNbChildren
);
189 /* Write the "children's seq number" array */
190 for (int i
= 0; i
< fNbChildren
; i
++) {
191 buffer
.putInt(fChildren
[i
]);
193 for (int i
= fNbChildren
; i
< fNode
.getMaxChildren(); i
++) {
197 fNode
.releaseReadLock();
202 * Get the number of children
204 * @return The number of children
206 public int getNbChildren() {
207 fNode
.takeReadLock();
211 fNode
.releaseReadLock();
216 * Get the child sequence number at an index
219 * The index of the child to get
220 * @return The sequence number of the child node
222 public int getChild(int index
) {
223 fNode
.takeReadLock();
225 if (index
>= fNbChildren
) {
226 throw new IndexOutOfBoundsException("The child at index " + index
+ " does not exist"); //$NON-NLS-1$ //$NON-NLS-2$
228 return fChildren
[index
];
230 fNode
.releaseReadLock();
235 * Get the sequence number of the last child node of this one
237 * @return The sequence number of the last child
239 public int getLatestChild() {
240 fNode
.takeReadLock();
242 return fChildren
[fNbChildren
- 1];
244 fNode
.releaseReadLock();
249 * Add a new child to this node's data
252 * The child node to add
254 public void linkNewChild(IHTNode
<?
> childNode
) {
255 fNode
.takeWriteLock();
257 if (fNbChildren
>= fNode
.getMaxChildren()) {
258 throw new IllegalStateException("Asked to link another child but parent already has maximum number of children"); //$NON-NLS-1$
261 fChildren
[fNbChildren
] = childNode
.getSequenceNumber();
265 fNode
.releaseWriteLock();
270 * Get the size of the extra header space necessary for this node's
273 * @return The extra header size
275 protected int getSpecificHeaderSize() {
276 int maxChildren
= fNode
.getMaxChildren();
277 int specificSize
= Integer
.BYTES
/* 1x int (nbChildren) */
279 /* MAX_NB * int ('children' table) */
280 + Integer
.BYTES
* maxChildren
;
286 public String
toString() {
287 /* Only used for debugging, shouldn't be externalized */
288 return String
.format("Core Node, %d children %s", //$NON-NLS-1$
289 fNbChildren
, Arrays
.toString(Arrays
.copyOf(fChildren
, fNbChildren
)));
293 * Inner method to select the sequence numbers for the children of the
294 * current node that intersect the given timestamp. Useful for moving
297 * @param timeCondition
298 * The time-based RangeCondition to choose which children
300 * @return Collection of sequence numbers of the child nodes that
301 * intersect t, non-null empty collection if this is a Leaf Node
303 public final Collection
<Integer
> selectNextChildren(RangeCondition
<Long
> timeCondition
) {
304 fNode
.takeReadLock();
306 return selectNextIndices(timeCondition
).stream()
307 .map(i
-> fChildren
[i
])
308 .collect(Collectors
.toList());
310 fNode
.releaseReadLock();
315 * Inner method to select the indices of the children of the current
316 * node that intersect the given timestamp. Useful for moving down the
319 * This is the method that children implementations of this node should
320 * override. They may call
321 * <code>super.selectNextIndices(timeCondition)</code> to get access to
322 * the subset of indices that match the parent's condition and add their
323 * own filters. When this method is called a read-lock is already taken
326 * @param timeCondition
327 * The time-based RangeCondition to choose which children
329 * @return Collection of the indices of the child nodes that intersect
332 protected Collection
<Integer
> selectNextIndices(RangeCondition
<Long
> timeCondition
) {
333 /* By default, all children are returned */
334 List
<Integer
> childList
= new ArrayList
<>();
335 for (int i
= 0; i
< fNbChildren
; i
++) {
348 * The type of this node
350 * The size (in bytes) of a serialized node on disk
352 * The maximum allowed number of children per node
354 * The (unique) sequence number assigned to this particular node
355 * @param parentSeqNumber
356 * The sequence number of this node's parent node
358 * The earliest timestamp stored in this node
360 public HTNode(NodeType type
, int blockSize
, int maxChildren
, int seqNumber
,
361 int parentSeqNumber
, long start
) {
362 fBlockSize
= blockSize
;
363 fMaxChildren
= maxChildren
;
366 fSequenceNumber
= seqNumber
;
367 fParentSequenceNumber
= parentSeqNumber
;
369 fSizeOfContentSection
= 0;
371 fIntervals
= new ArrayList
<>();
373 fExtraData
= createNodeExtraData(type
);
377 * Reader factory method. Build a Node object (of the right type) by reading
378 * a block in the file.
381 * The size (in bytes) of a serialized node on disk
383 * The maximum allowed number of children per node
385 * FileChannel to the history file, ALREADY SEEKED at the start
387 * @param objectReader
388 * The reader to read serialized node objects
390 * The factory to create the nodes for this tree
391 * @return The node object
392 * @throws IOException
393 * If there was an error reading from the file channel
395 public static final <E
extends IHTInterval
, N
extends HTNode
<E
>> N
readNode(
399 IHTIntervalReader
<E
> objectReader
,
400 IHTNodeFactory
<E
, N
> nodeFactory
) throws IOException
{
404 ByteBuffer buffer
= ByteBuffer
.allocate(blockSize
);
405 buffer
.order(ByteOrder
.LITTLE_ENDIAN
);
407 int res
= fc
.read(buffer
);
408 if (res
!= blockSize
) {
409 throw new IOException("The block for the HTNode is not the right size: " + res
); //$NON-NLS-1$
413 /* Read the common header part */
414 byte typeByte
= buffer
.get();
415 NodeType type
= NodeType
.fromByte(typeByte
);
416 long start
= buffer
.getLong();
417 long end
= buffer
.getLong();
418 int seqNb
= buffer
.getInt();
419 int parentSeqNb
= buffer
.getInt();
420 int intervalCount
= buffer
.getInt();
421 buffer
.get(); // TODO Used to be "isDone", to be removed from the header
423 /* Now the rest of the header depends on the node type */
427 newNode
= nodeFactory
.createNode(type
, blockSize
, maxChildren
, seqNb
, parentSeqNb
, start
);
428 newNode
.readSpecificHeader(buffer
);
432 /* Unrecognized node type */
433 throw new IOException();
437 * At this point, we should be done reading the header and 'buffer'
438 * should only have the intervals left
440 ISafeByteBufferReader readBuffer
= SafeByteBufferFactory
.wrapReader(buffer
, res
- buffer
.position());
441 for (int i
= 0; i
< intervalCount
; i
++) {
442 E interval
= objectReader
.readInterval(readBuffer
);
443 newNode
.add(interval
);
446 /* Assign the node's other information we have read previously */
447 newNode
.setNodeEnd(end
);
454 * Create a node's extra data object for the given node type
458 * @return The node's extra data object, or <code>null</code> if there is
461 protected @Nullable CoreNodeData
createNodeExtraData(NodeType type
) {
462 if (type
== NodeType
.CORE
) {
463 return new CoreNodeData(this);
469 public final void writeSelf(FileChannel fc
) throws IOException
{
471 * Yes, we are taking the *read* lock here, because we are reading the
472 * information in the node to write it to disk.
474 fRwl
.readLock().lock();
476 final int blockSize
= getBlockSize();
478 ByteBuffer buffer
= ByteBuffer
.allocate(blockSize
);
479 buffer
.order(ByteOrder
.LITTLE_ENDIAN
);
482 /* Write the common header part */
483 buffer
.put(getNodeType().toByte());
484 buffer
.putLong(fNodeStart
);
485 buffer
.putLong(fNodeEnd
);
486 buffer
.putInt(fSequenceNumber
);
487 buffer
.putInt(fParentSequenceNumber
);
488 buffer
.putInt(fIntervals
.size());
489 buffer
.put((byte) 1); // TODO Used to be "isDone", to be removed
492 /* Now call the inner method to write the specific header part */
493 writeSpecificHeader(buffer
);
495 /* Back to us, we write the intervals */
496 fIntervals
.forEach(i
-> i
.writeSegment(SafeByteBufferFactory
.wrapWriter(buffer
, i
.getSizeOnDisk())));
497 if (blockSize
- buffer
.position() != getNodeFreeSpace()) {
498 throw new IllegalStateException("Wrong free space: Actual: " + (blockSize
- buffer
.position()) + ", Expected: " + getNodeFreeSpace()); //$NON-NLS-1$ //$NON-NLS-2$
501 * Fill the rest with zeros
503 while (buffer
.position() < blockSize
) {
504 buffer
.put((byte) 0);
507 /* Finally, write everything in the Buffer to disk */
509 int res
= fc
.write(buffer
);
510 if (res
!= blockSize
) {
511 throw new IllegalStateException("Wrong size of block written: Actual: " + res
+ ", Expected: " + blockSize
); //$NON-NLS-1$ //$NON-NLS-2$
515 fRwl
.readLock().unlock();
520 // ------------------------------------------------------------------------
522 // ------------------------------------------------------------------------
525 * Get this node's block size.
527 * @return The block size
529 protected final int getBlockSize() {
534 * Get this node's maximum amount of children.
536 * @return The maximum amount of children
538 protected final int getMaxChildren() {
543 * Get the interval comparator. Intervals will always be stored sorted
544 * according to this comparator. This can be used by insertion or retrieval
547 * Sub-classes may override this to change or specify the interval
550 * @return The way intervals are to be sorted in this node
552 protected Comparator
<E
> getIntervalComparator() {
553 return fDefaultIntervalComparator
;
557 public long getNodeStart() {
562 public long getNodeEnd() {
566 return Long
.MAX_VALUE
;
570 public int getSequenceNumber() {
571 return fSequenceNumber
;
575 public int getParentSequenceNumber() {
576 return fParentSequenceNumber
;
580 public void setParentSequenceNumber(int newParent
) {
581 fParentSequenceNumber
= newParent
;
585 public boolean isOnDisk() {
590 * Get the node's extra data.
592 * @return The node extra data
594 protected @Nullable CoreNodeData
getCoreNodeData() {
599 * Get the list of objects in this node. This list is immutable. All objects
600 * must be inserted through the {@link #add(IHTInterval)} method
602 * @return The list of intervals in this node
604 protected List
<E
> getIntervals() {
605 return ImmutableList
.copyOf(fIntervals
);
609 * Set this node's end time. Called by the reader factory.
612 * The end time of the node
614 protected void setNodeEnd(long end
) {
619 * Set this node to be on disk. Called by the reader factory.
621 protected void setOnDisk() {
626 public void add(E newInterval
) {
627 fRwl
.writeLock().lock();
630 * Just in case, should be checked before even calling this function
632 int objSize
= newInterval
.getSizeOnDisk();
633 if (objSize
> getNodeFreeSpace()) {
634 throw new IllegalArgumentException("The interval to insert (" + objSize
+ ") is larger than available space (" + getNodeFreeSpace() + ")"); //$NON-NLS-1$ //$NON-NLS-2$ //$NON-NLS-3$
637 int insertPoint
= Collections
.binarySearch(fIntervals
, newInterval
, getIntervalComparator());
638 insertPoint
= (insertPoint
>= 0 ? insertPoint
: -insertPoint
- 1);
639 fIntervals
.add(insertPoint
, newInterval
);
641 fSizeOfContentSection
+= objSize
;
644 fRwl
.writeLock().unlock();
649 public Iterable
<E
> getMatchingIntervals(RangeCondition
<Long
> timeCondition
,
650 Predicate
<E
> extraPredicate
) {
652 // TODO Use getIntervalComparator() to restrict the dataset further
653 // TODO Benchmark using/returning streams instead of iterables
656 @NonNull Iterable
<E
> ret
= fIntervals
;
657 ret
= Iterables
.filter(ret
, interval
-> timeCondition
.intersects(interval
.getStart(), interval
.getEnd()));
658 ret
= Iterables
.filter(ret
, interval
-> extraPredicate
.test(interval
));
664 return fIntervals
.stream()
665 .filter(interval
-> timeCondition
.intersects(interval
.getStart(), interval
.getEnd()))
666 .filter(extraPredicate
)
668 * Because this class works with read locks, we can't
669 * return a lazy stream unfortunately. Room for improvement?
671 .collect(Collectors
.toList());
679 public void closeThisNode(long endtime
) {
680 fRwl
.writeLock().lock();
683 * FIXME: was assert (endtime >= fNodeStart); but that exception is
684 * reached with an empty node that has start time endtime + 1
686 // if (endtime < fNodeStart) {
687 // throw new IllegalArgumentException("Endtime " + endtime + "
688 // cannot be lower than start time " + fNodeStart);
691 if (!fIntervals
.isEmpty()) {
693 * Make sure there are no intervals in this node with their
694 * EndTime > the one requested. Only need to check the last one
695 * since they are sorted
697 if (endtime
< fIntervals
.get(fIntervals
.size() - 1).getEnd()) {
698 throw new IllegalArgumentException("Closing end time should be greater than or equal to the end time of the objects of this node"); //$NON-NLS-1$
704 fRwl
.writeLock().unlock();
709 public final int getTotalHeaderSize() {
710 return COMMON_HEADER_SIZE
+ getSpecificHeaderSize();
714 * @return The offset, within the node, where the Data section ends
716 private int getDataSectionEndOffset() {
717 return getTotalHeaderSize() + fSizeOfContentSection
;
721 public int getNodeFreeSpace() {
722 fRwl
.readLock().lock();
724 int ret
= getBlockSize() - getDataSectionEndOffset();
727 fRwl
.readLock().unlock();
732 public long getNodeUsagePercent() {
733 fRwl
.readLock().lock();
735 final int blockSize
= getBlockSize();
736 float freePercent
= (float) getNodeFreeSpace()
737 / (float) (blockSize
- getTotalHeaderSize())
739 return (long) (100L - freePercent
);
742 fRwl
.readLock().unlock();
747 public NodeType
getNodeType() {
749 CoreNodeData extraData
= getCoreNodeData();
750 if (extraData
== null) {
751 return NodeType
.LEAF
;
753 return NodeType
.CORE
;
757 * Return the specific header size of this node. This means the size
758 * occupied by the type-specific section of the header (not counting the
761 * @return The specific header size
763 protected int getSpecificHeaderSize() {
764 CoreNodeData extraData
= fExtraData
;
765 if (extraData
!= null) {
766 return extraData
.getSpecificHeaderSize();
772 * Read the specific header for this node
775 * The buffer to read from
777 protected void readSpecificHeader(ByteBuffer buffer
) {
778 CoreNodeData extraData
= fExtraData
;
779 if (extraData
!= null) {
780 extraData
.readSpecificHeader(buffer
);
785 * Write the type-specific part of the header in a byte buffer.
788 * The buffer to write to. It should already be at the correct
791 protected void writeSpecificHeader(ByteBuffer buffer
) {
792 CoreNodeData extraData
= fExtraData
;
793 if (extraData
!= null) {
794 extraData
.writeSpecificHeader(buffer
);
799 * Node-type-specific toString method. Used for debugging.
801 * @return A string representing the node
803 protected String
toStringSpecific() {
804 return ""; //$NON-NLS-1$
808 public boolean isEmpty() {
809 return fIntervals
.isEmpty();
812 // -------------------------------------------
814 // -------------------------------------------
817 public int getNbChildren() {
818 CoreNodeData extraData
= fExtraData
;
819 if (extraData
!= null) {
820 return extraData
.getNbChildren();
822 return IHTNode
.super.getNbChildren();
826 public int getChild(int index
) {
827 CoreNodeData extraData
= fExtraData
;
828 if (extraData
!= null) {
829 return extraData
.getChild(index
);
831 return IHTNode
.super.getChild(index
);
835 public int getLatestChild() {
836 CoreNodeData extraData
= fExtraData
;
837 if (extraData
!= null) {
838 return extraData
.getLatestChild();
840 return IHTNode
.super.getLatestChild();
844 public void linkNewChild(@NonNull IHTNode
<E
> childNode
) {
845 CoreNodeData extraData
= fExtraData
;
846 if (extraData
!= null) {
847 extraData
.linkNewChild(childNode
);
850 IHTNode
.super.linkNewChild(childNode
);
854 public Collection
<Integer
> selectNextChildren(RangeCondition
<Long
> timeCondition
)
855 throws RangeException
{
856 CoreNodeData extraData
= fExtraData
;
857 if (extraData
!= null) {
858 return extraData
.selectNextChildren(timeCondition
);
860 return IHTNode
.super.selectNextChildren(timeCondition
);
863 // -----------------------------------------
865 // -----------------------------------------
868 * Takes a read lock on the fields of this class. Each call to this method
869 * should be followed by a {@link HTNode#releaseReadLock()}, in a
872 protected void takeReadLock() {
873 fRwl
.readLock().lock();
877 * Releases a read lock on the fields of this class. A call to this method
878 * should have been preceded by a call to {@link HTNode#takeReadLock()}
880 protected void releaseReadLock() {
881 fRwl
.readLock().unlock();
885 * Takes a write lock on the fields of this class. Each call to this method
886 * should be followed by a {@link HTNode#releaseWriteLock()}, in a
889 protected void takeWriteLock() {
890 fRwl
.writeLock().lock();
894 * Releases a write lock on the fields of this class. A call to this method
895 * should have been preceded by a call to {@link HTNode#takeWriteLock()}
897 protected void releaseWriteLock() {
898 fRwl
.writeLock().unlock();
901 // -----------------------------------------
903 // -----------------------------------------
905 @SuppressWarnings("nls")
907 public String
toString() {
908 /* Only used for debugging, shouldn't be externalized */
909 return String
.format("Node #%d, %s, %s, %d intervals (%d%% used), [%d - %s]",
911 (fParentSequenceNumber
== -1) ?
"Root" : "Parent #" + fParentSequenceNumber
,
914 getNodeUsagePercent(),
916 (fIsOnDisk
|| fNodeEnd
!= 0) ? fNodeEnd
: "...");
920 public int hashCode() {
921 return Objects
.hash(fBlockSize
, fMaxChildren
, fNodeStart
, fNodeEnd
, fSequenceNumber
, fParentSequenceNumber
);
925 public boolean equals(@Nullable Object obj
) {
929 if (obj
.getClass() != getClass()) {
932 HTNode
<?
extends IHTInterval
> other
= (HTNode
<?
>) obj
;
933 return (fBlockSize
== other
.fBlockSize
&&
934 fMaxChildren
== other
.fMaxChildren
&&
935 fNodeStart
== other
.fNodeStart
&&
936 fNodeEnd
== other
.fNodeEnd
&&
937 fSequenceNumber
== other
.fSequenceNumber
&&
938 fParentSequenceNumber
== other
.fParentSequenceNumber
);