1 /*******************************************************************************
2 * Copyright (c) 2010, 2016 Ericsson, École Polytechnique de Montréal, and others
4 * All rights reserved. This program and the accompanying materials are
5 * made available under the terms of the Eclipse Public License v1.0 which
6 * accompanies this distribution, and is available at
7 * http://www.eclipse.org/legal/epl-v10.html
10 * Alexandre Montplaisir - Initial API and implementation
11 * Florian Wininger - Add Extension and Leaf Node
12 * Patrick Tasse - Keep interval list sorted on insert
13 *******************************************************************************/
15 package org
.eclipse
.tracecompass
.internal
.statesystem
.core
.backend
.historytree
;
17 import java
.io
.IOException
;
18 import java
.io
.PrintWriter
;
19 import java
.nio
.ByteBuffer
;
20 import java
.nio
.ByteOrder
;
21 import java
.nio
.channels
.FileChannel
;
22 import java
.util
.ArrayList
;
23 import java
.util
.Collections
;
24 import java
.util
.Comparator
;
25 import java
.util
.List
;
26 import java
.util
.concurrent
.locks
.ReentrantReadWriteLock
;
28 import org
.eclipse
.jdt
.annotation
.NonNull
;
29 import org
.eclipse
.tracecompass
.statesystem
.core
.exceptions
.TimeRangeException
;
30 import org
.eclipse
.tracecompass
.statesystem
.core
.interval
.ITmfStateInterval
;
31 import org
.eclipse
.tracecompass
.statesystem
.core
.statevalue
.TmfStateValue
;
33 import com
.google
.common
.collect
.Iterables
;
36 * The base class for all the types of nodes that go in the History Tree.
38 * @author Alexandre Montplaisir
40 public abstract class HTNode
{
42 // ------------------------------------------------------------------------
44 // ------------------------------------------------------------------------
49 public static enum NodeType
{
51 * Core node, which is a "front" node, at any level of the tree except
52 * the bottom-most one. It has children, and may have extensions.
56 * Leaf node, which is a node at the last bottom level of the tree. It
57 * cannot have any children or extensions.
62 * Determine a node type by reading a serialized byte.
65 * The byte representation of the node type
66 * @return The corresponding NodeType
68 * If the NodeType is unrecognized
70 public static NodeType
fromByte(byte rep
) throws IOException
{
77 throw new IOException();
82 * Get the byte representation of this node type. It can then be read
83 * with {@link #fromByte}.
85 * @return The byte matching this node type
87 public byte toByte() {
94 throw new IllegalStateException();
102 * 16 - 2x long (start time, end time)
103 * 16 - 3x int (seq number, parent seq number, intervalcount)
104 * 1 - byte (done or not)
107 private static final int COMMON_HEADER_SIZE
= Byte
.BYTES
112 // ------------------------------------------------------------------------
114 // ------------------------------------------------------------------------
116 /* Configuration of the History Tree to which belongs this node */
117 private final HTConfig fConfig
;
119 /* Time range of this node */
120 private final long fNodeStart
;
121 private long fNodeEnd
;
123 /* Sequence number = position in the node section of the file */
124 private final int fSequenceNumber
;
125 private int fParentSequenceNumber
; /* = -1 if this node is the root node */
127 /* Sum of bytes of all intervals in the node */
128 private int fSizeOfIntervalSection
;
130 /* True if this node was read from disk (meaning its end time is now fixed) */
131 private volatile boolean fIsOnDisk
;
133 /* Vector containing all the intervals contained in this node */
134 private final List
<HTInterval
> fIntervals
;
136 /* Lock used to protect the accesses to intervals, nodeEnd and such */
137 private final ReentrantReadWriteLock fRwl
= new ReentrantReadWriteLock(false);
139 /** Order of intervals in a HTNode: sorted by end times, then by start times. */
140 private static final Comparator
<ITmfStateInterval
> NODE_ORDER
= Comparator
141 .comparingLong(ITmfStateInterval
::getEndTime
)
142 .thenComparingLong(ITmfStateInterval
::getStartTime
)
143 .thenComparingInt(ITmfStateInterval
::getAttribute
);
149 * Configuration of the History Tree
151 * The (unique) sequence number assigned to this particular node
152 * @param parentSeqNumber
153 * The sequence number of this node's parent node
155 * The earliest timestamp stored in this node
157 protected HTNode(HTConfig config
, int seqNumber
, int parentSeqNumber
, long start
) {
160 fSequenceNumber
= seqNumber
;
161 fParentSequenceNumber
= parentSeqNumber
;
163 fSizeOfIntervalSection
= 0;
165 fIntervals
= new ArrayList
<>();
169 * Reader factory method. Build a Node object (of the right type) by reading
170 * a block in the file.
173 * Configuration of the History Tree
175 * FileChannel to the history file, ALREADY SEEKED at the start
178 * The factory to create the nodes for this tree
179 * @return The node object
180 * @throws IOException
181 * If there was an error reading from the file channel
183 public static final @NonNull HTNode
readNode(HTConfig config
, FileChannel fc
, IHistoryTree
.IHTNodeFactory nodeFactory
)
185 HTNode newNode
= null;
188 ByteBuffer buffer
= ByteBuffer
.allocate(config
.getBlockSize());
189 buffer
.order(ByteOrder
.LITTLE_ENDIAN
);
191 res
= fc
.read(buffer
);
192 assert (res
== config
.getBlockSize());
195 /* Read the common header part */
196 byte typeByte
= buffer
.get();
197 NodeType type
= NodeType
.fromByte(typeByte
);
198 long start
= buffer
.getLong();
199 long end
= buffer
.getLong();
200 int seqNb
= buffer
.getInt();
201 int parentSeqNb
= buffer
.getInt();
202 int intervalCount
= buffer
.getInt();
203 buffer
.get(); // TODO Used to be "isDone", to be removed from the header
205 /* Now the rest of the header depends on the node type */
209 newNode
= nodeFactory
.createCoreNode(config
, seqNb
, parentSeqNb
, start
);
210 newNode
.readSpecificHeader(buffer
);
215 newNode
= nodeFactory
.createLeafNode(config
, seqNb
, parentSeqNb
, start
);
216 newNode
.readSpecificHeader(buffer
);
220 /* Unrecognized node type */
221 throw new IOException();
225 * At this point, we should be done reading the header and 'buffer'
226 * should only have the intervals left
228 for (i
= 0; i
< intervalCount
; i
++) {
229 HTInterval interval
= HTInterval
.readFrom(buffer
);
230 newNode
.fIntervals
.add(interval
);
231 newNode
.fSizeOfIntervalSection
+= interval
.getSizeOnDisk();
234 /* Assign the node's other information we have read previously */
235 newNode
.fNodeEnd
= end
;
236 newNode
.fIsOnDisk
= true;
242 * Write this node to the given file channel.
245 * The file channel to write to (should be sought to be correct
247 * @throws IOException
248 * If there was an error writing
250 public final void writeSelf(FileChannel fc
) throws IOException
{
252 * Yes, we are taking the *read* lock here, because we are reading the
253 * information in the node to write it to disk.
255 fRwl
.readLock().lock();
257 final int blockSize
= fConfig
.getBlockSize();
259 ByteBuffer buffer
= ByteBuffer
.allocate(blockSize
);
260 buffer
.order(ByteOrder
.LITTLE_ENDIAN
);
263 /* Write the common header part */
264 buffer
.put(getNodeType().toByte());
265 buffer
.putLong(fNodeStart
);
266 buffer
.putLong(fNodeEnd
);
267 buffer
.putInt(fSequenceNumber
);
268 buffer
.putInt(fParentSequenceNumber
);
269 buffer
.putInt(fIntervals
.size());
270 buffer
.put((byte) 1); // TODO Used to be "isDone", to be removed from header
272 /* Now call the inner method to write the specific header part */
273 writeSpecificHeader(buffer
);
275 /* Back to us, we write the intervals */
276 fIntervals
.forEach(i
-> i
.writeInterval(buffer
));
277 if (blockSize
- buffer
.position() != getNodeFreeSpace()) {
278 throw new IllegalStateException("Wrong free space: Actual: " + (blockSize
- buffer
.position()) + ", Expected: " + getNodeFreeSpace()); //$NON-NLS-1$ //$NON-NLS-2$
281 * Fill the rest with zeros
283 while (buffer
.position() < blockSize
) {
284 buffer
.put((byte) 0);
287 /* Finally, write everything in the Buffer to disk */
289 int res
= fc
.write(buffer
);
290 if (res
!= blockSize
) {
291 throw new IllegalStateException("Wrong size of block written: Actual: " + res
+ ", Expected: " + blockSize
); //$NON-NLS-1$ //$NON-NLS-2$
295 fRwl
.readLock().unlock();
300 // ------------------------------------------------------------------------
302 // ------------------------------------------------------------------------
305 * Retrieve the history tree configuration used for this node.
307 * @return The history tree config
309 protected HTConfig
getConfig() {
314 * Get the start time of this node.
316 * @return The start time of this node
318 public long getNodeStart() {
323 * Get the end time of this node.
325 * @return The end time of this node
327 public long getNodeEnd() {
335 * Get the sequence number of this node.
337 * @return The sequence number of this node
339 public int getSequenceNumber() {
340 return fSequenceNumber
;
344 * Get the sequence number of this node's parent.
346 * @return The parent sequence number
348 public int getParentSequenceNumber() {
349 return fParentSequenceNumber
;
353 * Change this node's parent. Used when we create a new root node for
357 * The sequence number of the node that is the new parent
359 public void setParentSequenceNumber(int newParent
) {
360 fParentSequenceNumber
= newParent
;
364 * Return if this node is "done" (full and written to disk).
366 * @return If this node is done or not
368 public boolean isOnDisk() {
373 * Add an interval to this node
376 * Interval to add to this node
378 public void addInterval(HTInterval newInterval
) {
379 fRwl
.writeLock().lock();
381 /* Just in case, should be checked before even calling this function */
382 assert (newInterval
.getSizeOnDisk() <= getNodeFreeSpace());
384 /* Find the insert position to keep the list sorted */
386 if (!fIntervals
.isEmpty()) {
387 index
= Collections
.binarySearch(fIntervals
, newInterval
, NODE_ORDER
);
389 * Interval should not already be in the node, binarySearch will
390 * return (-insertionPoint - 1).
395 fIntervals
.add(index
, newInterval
);
396 fSizeOfIntervalSection
+= newInterval
.getSizeOnDisk();
399 fRwl
.writeLock().unlock();
404 * We've received word from the containerTree that newest nodes now exist to
405 * our right. (Puts isDone = true and sets the endtime)
408 * The nodeEnd time that the node will have
410 public void closeThisNode(long endtime
) {
411 fRwl
.writeLock().lock();
414 * FIXME: was assert (endtime >= fNodeStart); but that exception
415 * is reached with an empty node that has start time endtime + 1
417 // if (endtime < fNodeStart) {
418 // throw new IllegalArgumentException("Endtime " + endtime + " cannot be lower than start time " + fNodeStart);
421 if (!fIntervals
.isEmpty()) {
423 * Make sure there are no intervals in this node with their
424 * EndTime > the one requested. Only need to check the last one
425 * since they are sorted
427 if (endtime
< Iterables
.getLast(fIntervals
).getEndTime()) {
428 throw new IllegalArgumentException("Closing end time should be greater than or equal to the end time of the intervals of this node"); //$NON-NLS-1$
434 fRwl
.writeLock().unlock();
439 * The method to fill up the stateInfo (passed on from the Current State
440 * Tree when it does a query on the SHT). We'll replace the data in that
441 * vector with whatever relevant we can find from this node
444 * The same stateInfo that comes from SHT's doQuery()
446 * The timestamp for which the query is for. Only return
447 * intervals that intersect t.
448 * @throws TimeRangeException
451 public void writeInfoFromNode(List
<ITmfStateInterval
> stateInfo
, long t
)
452 throws TimeRangeException
{
453 /* This is from a state system query, we are "reading" this node */
454 fRwl
.readLock().lock();
456 for (int i
= getStartIndexFor(t
); i
< fIntervals
.size(); i
++) {
458 * Now we only have to compare the Start times, since we now the
459 * End times necessarily fit.
461 * Second condition is to ignore new attributes that might have
462 * been created after stateInfo was instantiated (they would be
465 ITmfStateInterval interval
= fIntervals
.get(i
);
466 if (t
>= interval
.getStartTime() &&
467 interval
.getAttribute() < stateInfo
.size()) {
468 stateInfo
.set(interval
.getAttribute(), interval
);
472 fRwl
.readLock().unlock();
477 * Get a single Interval from the information in this node If the
478 * key/timestamp pair cannot be found, we return null.
481 * The attribute quark to look for
484 * @return The Interval containing the information we want, or null if it
486 * @throws TimeRangeException
489 public HTInterval
getRelevantInterval(int key
, long t
) throws TimeRangeException
{
490 fRwl
.readLock().lock();
492 for (int i
= getStartIndexFor(t
); i
< fIntervals
.size(); i
++) {
493 HTInterval curInterval
= fIntervals
.get(i
);
494 if (curInterval
.getAttribute() == key
495 && curInterval
.getStartTime() <= t
) {
500 /* We didn't find the relevant information in this node */
504 fRwl
.readLock().unlock();
508 private int getStartIndexFor(long t
) throws TimeRangeException
{
509 /* Should only be called by methods with the readLock taken */
511 if (fIntervals
.isEmpty()) {
516 * Since the intervals are sorted by end time then by start time, we can
517 * skip all the ones at the beginning whose end times are smaller than
518 * 't'. We search for a dummy interval from [Long.MIN_VALUE, t], which
519 * will return the first interval that ends with a time >= t.
521 HTInterval dummy
= new HTInterval(Long
.MIN_VALUE
, t
, 0, TmfStateValue
.nullValue());
522 int index
= Collections
.binarySearch(fIntervals
, dummy
, NODE_ORDER
);
524 /* Handle negative binarySearch return */
525 return (index
>= 0 ? index
: -index
- 1);
529 * Return the total header size of this node (will depend on the node type).
531 * @return The total header size
533 public final int getTotalHeaderSize() {
534 return COMMON_HEADER_SIZE
+ getSpecificHeaderSize();
538 * @return The offset, within the node, where the Data section ends
540 private int getDataSectionEndOffset() {
541 return getTotalHeaderSize() + fSizeOfIntervalSection
;
545 * Returns the free space in the node, which is simply put, the
546 * stringSectionOffset - dataSectionOffset
548 * @return The amount of free space in the node (in bytes)
550 public int getNodeFreeSpace() {
551 fRwl
.readLock().lock();
552 int ret
= fConfig
.getBlockSize() - getDataSectionEndOffset();
553 fRwl
.readLock().unlock();
559 * Returns the current space utilization of this node, as a percentage.
560 * (used space / total usable space, which excludes the header)
562 * @return The percentage (value between 0 and 100) of space utilization in
565 public long getNodeUsagePercent() {
566 fRwl
.readLock().lock();
568 final int blockSize
= fConfig
.getBlockSize();
569 float freePercent
= (float) getNodeFreeSpace()
570 / (float) (blockSize
- getTotalHeaderSize())
572 return (long) (100L - freePercent
);
575 fRwl
.readLock().unlock();
580 * @name Debugging functions
583 @SuppressWarnings("nls")
585 public String
toString() {
586 /* Only used for debugging, shouldn't be externalized */
587 return String
.format("Node #%d, %s, %s, %d intervals (%d%% used), [%d - %s]",
589 (fParentSequenceNumber
== -1) ?
"Root" : "Parent #" + fParentSequenceNumber
,
592 getNodeUsagePercent(),
594 (fIsOnDisk
|| fNodeEnd
!= 0) ? fNodeEnd
: "...");
598 * Debugging function that prints out the contents of this node
601 * PrintWriter in which we will print the debug output
603 @SuppressWarnings("nls")
604 public void debugPrintIntervals(PrintWriter writer
) {
605 /* Only used for debugging, shouldn't be externalized */
606 writer
.println("Intervals for node #" + fSequenceNumber
+ ":");
608 /* Array of children */
609 if (getNodeType() != NodeType
.LEAF
) { /* Only Core Nodes can have children */
610 ParentNode thisNode
= (ParentNode
) this;
611 writer
.print(" " + thisNode
.getNbChildren() + " children");
612 if (thisNode
.getNbChildren() >= 1) {
613 writer
.print(": [ " + thisNode
.getChild(0));
614 for (int i
= 1; i
< thisNode
.getNbChildren(); i
++) {
615 writer
.print(", " + thisNode
.getChild(i
));
622 /* List of intervals in the node */
623 writer
.println(" Intervals contained:");
624 for (int i
= 0; i
< fIntervals
.size(); i
++) {
625 writer
.println(fIntervals
.get(i
).toString());
627 writer
.println('\n');
630 // ------------------------------------------------------------------------
632 // ------------------------------------------------------------------------
635 * Get the byte value representing the node type.
637 * @return The node type
639 public abstract NodeType
getNodeType();
642 * Return the specific header size of this node. This means the size
643 * occupied by the type-specific section of the header (not counting the
646 * @return The specific header size
648 protected abstract int getSpecificHeaderSize();
651 * Read the type-specific part of the node header from a byte buffer.
654 * The byte buffer to read from. It should be already positioned
657 protected abstract void readSpecificHeader(ByteBuffer buffer
);
660 * Write the type-specific part of the header in a byte buffer.
663 * The buffer to write to. It should already be at the correct
666 protected abstract void writeSpecificHeader(ByteBuffer buffer
);
669 * Node-type-specific toString method. Used for debugging.
671 * @return A string representing the node
673 protected abstract String
toStringSpecific();