1 /*******************************************************************************
2 * Copyright (c) 2010, 2015 Ericsson, École Polytechnique de Montréal, and others
4 * All rights reserved. This program and the accompanying materials are
5 * made available under the terms of the Eclipse Public License v1.0 which
6 * accompanies this distribution, and is available at
7 * http://www.eclipse.org/legal/epl-v10.html
10 * Alexandre Montplaisir - Initial API and implementation
11 * Florian Wininger - Add Extension and Leaf Node
12 * Patrick Tasse - Keep interval list sorted on insert
13 *******************************************************************************/
15 package org
.eclipse
.tracecompass
.internal
.statesystem
.core
.backend
.historytree
;
17 import java
.io
.IOException
;
18 import java
.io
.PrintWriter
;
19 import java
.nio
.ByteBuffer
;
20 import java
.nio
.ByteOrder
;
21 import java
.nio
.channels
.FileChannel
;
22 import java
.util
.ArrayList
;
23 import java
.util
.Collections
;
24 import java
.util
.List
;
25 import java
.util
.concurrent
.locks
.ReentrantReadWriteLock
;
27 import org
.eclipse
.tracecompass
.statesystem
.core
.exceptions
.TimeRangeException
;
28 import org
.eclipse
.tracecompass
.statesystem
.core
.interval
.ITmfStateInterval
;
29 import org
.eclipse
.tracecompass
.statesystem
.core
.statevalue
.TmfStateValue
;
32 * The base class for all the types of nodes that go in the History Tree.
34 * @author Alexandre Montplaisir
36 public abstract class HTNode
{
38 // ------------------------------------------------------------------------
40 // ------------------------------------------------------------------------
45 public static enum NodeType
{
47 * Core node, which is a "front" node, at any level of the tree except
48 * the bottom-most one. It has children, and may have extensions.
52 * Leaf node, which is a node at the last bottom level of the tree. It
53 * cannot have any children or extensions.
58 * Determine a node type by reading a serialized byte.
61 * The byte representation of the node type
62 * @return The corresponding NodeType
64 * If the NodeType is unrecognized
66 public static NodeType
fromByte(byte rep
) throws IOException
{
73 throw new IOException();
78 * Get the byte representation of this node type. It can then be read
79 * with {@link #fromByte}.
81 * @return The byte matching this node type
83 public byte toByte() {
90 throw new IllegalStateException();
95 // ------------------------------------------------------------------------
97 // ------------------------------------------------------------------------
99 /* Configuration of the History Tree to which belongs this node */
100 private final HTConfig config
;
102 /* Time range of this node */
103 private final long nodeStart
;
104 private long nodeEnd
;
106 /* Sequence number = position in the node section of the file */
107 private final int sequenceNumber
;
108 private int parentSequenceNumber
; /* = -1 if this node is the root node */
110 /* Where the Strings section begins (from the start of the node */
111 private int stringSectionOffset
;
113 /* Sum of bytes of all intervals in the node */
114 private int sizeOfIntervalSection
;
116 /* True if this node was read from disk (meaning its end time is now fixed) */
117 private volatile boolean isOnDisk
;
119 /* Vector containing all the intervals contained in this node */
120 private final List
<HTInterval
> intervals
;
122 /* Lock used to protect the accesses to intervals, nodeEnd and such */
123 private final ReentrantReadWriteLock rwl
= new ReentrantReadWriteLock(false);
129 * Configuration of the History Tree
131 * The (unique) sequence number assigned to this particular node
132 * @param parentSeqNumber
133 * The sequence number of this node's parent node
135 * The earliest timestamp stored in this node
137 protected HTNode(HTConfig config
, int seqNumber
, int parentSeqNumber
, long start
) {
138 this.config
= config
;
139 this.nodeStart
= start
;
140 this.sequenceNumber
= seqNumber
;
141 this.parentSequenceNumber
= parentSeqNumber
;
143 this.stringSectionOffset
= config
.getBlockSize();
144 this.sizeOfIntervalSection
= 0;
145 this.isOnDisk
= false;
146 this.intervals
= new ArrayList
<>();
150 * Reader factory method. Build a Node object (of the right type) by reading
151 * a block in the file.
154 * Configuration of the History Tree
156 * FileChannel to the history file, ALREADY SEEKED at the start
158 * @return The node object
159 * @throws IOException
160 * If there was an error reading from the file channel
162 public static final HTNode
readNode(HTConfig config
, FileChannel fc
)
164 HTNode newNode
= null;
167 ByteBuffer buffer
= ByteBuffer
.allocate(config
.getBlockSize());
168 buffer
.order(ByteOrder
.LITTLE_ENDIAN
);
170 res
= fc
.read(buffer
);
171 assert (res
== config
.getBlockSize());
174 /* Read the common header part */
175 byte typeByte
= buffer
.get();
176 NodeType type
= NodeType
.fromByte(typeByte
);
177 long start
= buffer
.getLong();
178 long end
= buffer
.getLong();
179 int seqNb
= buffer
.getInt();
180 int parentSeqNb
= buffer
.getInt();
181 int intervalCount
= buffer
.getInt();
182 int stringSectionOffset
= buffer
.getInt();
183 buffer
.get(); // TODO Used to be "isDone", to be removed from the header
185 /* Now the rest of the header depends on the node type */
189 newNode
= new CoreNode(config
, seqNb
, parentSeqNb
, start
);
190 newNode
.readSpecificHeader(buffer
);
195 newNode
= new LeafNode(config
, seqNb
, parentSeqNb
, start
);
196 newNode
.readSpecificHeader(buffer
);
200 /* Unrecognized node type */
201 throw new IOException();
205 * At this point, we should be done reading the header and 'buffer'
206 * should only have the intervals left
208 for (i
= 0; i
< intervalCount
; i
++) {
209 newNode
.intervals
.add(HTInterval
.readFrom(buffer
));
212 /* Assign the node's other information we have read previously */
213 newNode
.nodeEnd
= end
;
214 newNode
.stringSectionOffset
= stringSectionOffset
;
215 newNode
.isOnDisk
= true;
221 * Write this node to the given file channel.
224 * The file channel to write to (should be sought to be correct
226 * @throws IOException
227 * If there was an error writing
229 public final void writeSelf(FileChannel fc
) throws IOException
{
231 * Yes, we are taking the *read* lock here, because we are reading the
232 * information in the node to write it to disk.
234 rwl
.readLock().lock();
236 final int blockSize
= config
.getBlockSize();
237 int curStringsEntryEndPos
= blockSize
;
239 ByteBuffer buffer
= ByteBuffer
.allocate(blockSize
);
240 buffer
.order(ByteOrder
.LITTLE_ENDIAN
);
243 /* Write the common header part */
244 buffer
.put(this.getNodeType().toByte());
245 buffer
.putLong(nodeStart
);
246 buffer
.putLong(nodeEnd
);
247 buffer
.putInt(sequenceNumber
);
248 buffer
.putInt(parentSequenceNumber
);
249 buffer
.putInt(intervals
.size());
250 buffer
.putInt(stringSectionOffset
);
251 buffer
.put((byte) 1); // TODO Used to be "isDone", to be removed from header
253 /* Now call the inner method to write the specific header part */
254 this.writeSpecificHeader(buffer
);
256 /* Back to us, we write the intervals */
257 for (HTInterval interval
: intervals
) {
258 int size
= interval
.writeInterval(buffer
, curStringsEntryEndPos
);
259 curStringsEntryEndPos
-= size
;
263 * Write padding between the end of the Data section and the start
264 * of the Strings section (needed to fill the node in case there is
265 * no Strings section)
267 while (buffer
.position() < stringSectionOffset
) {
268 buffer
.put((byte) 0);
272 * If the offsets were right, the size of the Strings section should
273 * be == to the expected size
275 assert (curStringsEntryEndPos
== stringSectionOffset
);
277 /* Finally, write everything in the Buffer to disk */
279 // if we don't do this, flip() will lose what's after.
280 buffer
.position(blockSize
);
283 int res
= fc
.write(buffer
);
284 assert (res
== blockSize
);
287 rwl
.readLock().unlock();
292 // ------------------------------------------------------------------------
294 // ------------------------------------------------------------------------
297 * Retrieve the history tree configuration used for this node.
299 * @return The history tree config
301 protected HTConfig
getConfig() {
306 * Get the start time of this node.
308 * @return The start time of this node
310 public long getNodeStart() {
315 * Get the end time of this node.
317 * @return The end time of this node
319 public long getNodeEnd() {
327 * Get the sequence number of this node.
329 * @return The sequence number of this node
331 public int getSequenceNumber() {
332 return sequenceNumber
;
336 * Get the sequence number of this node's parent.
338 * @return The parent sequence number
340 public int getParentSequenceNumber() {
341 return parentSequenceNumber
;
345 * Change this node's parent. Used when we create a new root node for
349 * The sequence number of the node that is the new parent
351 public void setParentSequenceNumber(int newParent
) {
352 parentSequenceNumber
= newParent
;
356 * Return if this node is "done" (full and written to disk).
358 * @return If this node is done or not
360 public boolean isOnDisk() {
365 * Add an interval to this node
368 * Interval to add to this node
370 public void addInterval(HTInterval newInterval
) {
371 rwl
.writeLock().lock();
373 /* Just in case, should be checked before even calling this function */
374 assert (newInterval
.getIntervalSize() <= this.getNodeFreeSpace());
376 /* Find the insert position to keep the list sorted */
377 int index
= intervals
.size();
378 while (index
> 0 && newInterval
.compareTo(intervals
.get(index
- 1)) < 0) {
382 intervals
.add(index
, newInterval
);
383 sizeOfIntervalSection
+= newInterval
.getIntervalSize();
385 /* Update the in-node offset "pointer" */
386 stringSectionOffset
-= (newInterval
.getStringsEntrySize());
388 rwl
.writeLock().unlock();
393 * We've received word from the containerTree that newest nodes now exist to
394 * our right. (Puts isDone = true and sets the endtime)
397 * The nodeEnd time that the node will have
399 public void closeThisNode(long endtime
) {
400 rwl
.writeLock().lock();
402 assert (endtime
>= this.nodeStart
);
404 if (!intervals
.isEmpty()) {
406 * Make sure there are no intervals in this node with their
407 * EndTime > the one requested. Only need to check the last one
408 * since they are sorted
410 assert (endtime
>= intervals
.get(intervals
.size() - 1).getEndTime());
413 this.nodeEnd
= endtime
;
415 rwl
.writeLock().unlock();
420 * The method to fill up the stateInfo (passed on from the Current State
421 * Tree when it does a query on the SHT). We'll replace the data in that
422 * vector with whatever relevant we can find from this node
425 * The same stateInfo that comes from SHT's doQuery()
427 * The timestamp for which the query is for. Only return
428 * intervals that intersect t.
429 * @throws TimeRangeException
432 public void writeInfoFromNode(List
<ITmfStateInterval
> stateInfo
, long t
)
433 throws TimeRangeException
{
434 /* This is from a state system query, we are "reading" this node */
435 rwl
.readLock().lock();
437 for (int i
= getStartIndexFor(t
); i
< intervals
.size(); i
++) {
439 * Now we only have to compare the Start times, since we now the
440 * End times necessarily fit.
442 * Second condition is to ignore new attributes that might have
443 * been created after stateInfo was instantiated (they would be
446 ITmfStateInterval interval
= intervals
.get(i
);
447 if (interval
.getStartTime() <= t
&&
448 interval
.getAttribute() < stateInfo
.size()) {
449 stateInfo
.set(interval
.getAttribute(), interval
);
453 rwl
.readLock().unlock();
458 * Get a single Interval from the information in this node If the
459 * key/timestamp pair cannot be found, we return null.
462 * The attribute quark to look for
465 * @return The Interval containing the information we want, or null if it
467 * @throws TimeRangeException
470 public HTInterval
getRelevantInterval(int key
, long t
) throws TimeRangeException
{
471 rwl
.readLock().lock();
473 for (int i
= getStartIndexFor(t
); i
< intervals
.size(); i
++) {
474 HTInterval curInterval
= intervals
.get(i
);
475 if (curInterval
.getAttribute() == key
476 && curInterval
.getStartTime() <= t
477 && curInterval
.getEndTime() >= t
) {
482 /* We didn't find the relevant information in this node */
486 rwl
.readLock().unlock();
490 private int getStartIndexFor(long t
) throws TimeRangeException
{
491 /* Should only be called by methods with the readLock taken */
493 if (intervals
.isEmpty()) {
497 * Since the intervals are sorted by end time, we can skip all the ones
498 * at the beginning whose end times are smaller than 't'. Java does
499 * provides a .binarySearch method, but its API is quite weird...
501 HTInterval dummy
= new HTInterval(0, t
, 0, TmfStateValue
.nullValue());
502 int index
= Collections
.binarySearch(intervals
, dummy
);
506 * .binarySearch returns a negative number if the exact value was
507 * not found. Here we just want to know where to start searching, we
508 * don't care if the value is exact or not.
514 /* Sometimes binarySearch yields weird stuff... */
518 if (index
>= intervals
.size()) {
519 index
= intervals
.size() - 1;
523 * Another API quirkiness, the returned index is the one of the *last*
524 * element of a series of equal endtimes, which happens sometimes. We
525 * want the *first* element of such a series, to read through them
529 && intervals
.get(index
- 1).compareTo(intervals
.get(index
)) == 0) {
539 * 16 - 2x long (start time, end time)
540 * 16 - 4x int (seq number, parent seq number, intervalcount,
541 * strings section pos.)
542 * 1 - byte (done or not)
545 private static final int COMMON_HEADER_SIZE
= 34;
548 * Return the total header size of this node (will depend on the node type).
550 * @return The total header size
552 public final int getTotalHeaderSize() {
553 return COMMON_HEADER_SIZE
+ getSpecificHeaderSize();
557 * @return The offset, within the node, where the Data section ends
559 private int getDataSectionEndOffset() {
560 return this.getTotalHeaderSize() + sizeOfIntervalSection
;
564 * Returns the free space in the node, which is simply put, the
565 * stringSectionOffset - dataSectionOffset
567 * @return The amount of free space in the node (in bytes)
569 public int getNodeFreeSpace() {
570 rwl
.readLock().lock();
571 int ret
= stringSectionOffset
- this.getDataSectionEndOffset();
572 rwl
.readLock().unlock();
578 * Returns the current space utilization of this node, as a percentage.
579 * (used space / total usable space, which excludes the header)
581 * @return The percentage (value between 0 and 100) of space utilization in
584 public long getNodeUsagePercent() {
585 rwl
.readLock().lock();
587 final int blockSize
= config
.getBlockSize();
588 float freePercent
= (float) this.getNodeFreeSpace()
589 / (float) (blockSize
- this.getTotalHeaderSize())
591 return (long) (100L - freePercent
);
594 rwl
.readLock().unlock();
599 * @name Debugging functions
602 @SuppressWarnings("nls")
604 public String
toString() {
605 /* Only used for debugging, shouldn't be externalized */
606 StringBuffer buf
= new StringBuffer("Node #" + sequenceNumber
+ ", ");
607 buf
.append(this.toStringSpecific());
608 buf
.append(intervals
.size() + " intervals (" + this.getNodeUsagePercent()
611 buf
.append("[" + this.nodeStart
+ " - ");
613 buf
= buf
.append("" + this.nodeEnd
+ "]");
615 buf
= buf
.append("...]");
617 return buf
.toString();
621 * Debugging function that prints out the contents of this node
624 * PrintWriter in which we will print the debug output
626 @SuppressWarnings("nls")
627 public void debugPrintIntervals(PrintWriter writer
) {
628 /* Only used for debugging, shouldn't be externalized */
629 writer
.println("Node #" + sequenceNumber
+ ":");
631 /* Array of children */
632 if (this.getNodeType() == NodeType
.CORE
) { /* Only Core Nodes can have children */
633 CoreNode thisNode
= (CoreNode
) this;
634 writer
.print(" " + thisNode
.getNbChildren() + " children");
635 if (thisNode
.getNbChildren() >= 1) {
636 writer
.print(": [ " + thisNode
.getChild(0));
637 for (int i
= 1; i
< thisNode
.getNbChildren(); i
++) {
638 writer
.print(", " + thisNode
.getChild(i
));
645 /* List of intervals in the node */
646 writer
.println(" Intervals contained:");
647 for (int i
= 0; i
< intervals
.size(); i
++) {
648 writer
.println(intervals
.get(i
).toString());
650 writer
.println('\n');
653 // ------------------------------------------------------------------------
655 // ------------------------------------------------------------------------
658 * Get the byte value representing the node type.
660 * @return The node type
662 public abstract NodeType
getNodeType();
665 * Return the specific header size of this node. This means the size
666 * occupied by the type-specific section of the header (not counting the
669 * @return The specific header size
671 protected abstract int getSpecificHeaderSize();
674 * Read the type-specific part of the node header from a byte buffer.
677 * The byte buffer to read from. It should be already positioned
680 protected abstract void readSpecificHeader(ByteBuffer buffer
);
683 * Write the type-specific part of the header in a byte buffer.
686 * The buffer to write to. It should already be at the correct
689 protected abstract void writeSpecificHeader(ByteBuffer buffer
);
692 * Node-type-specific toString method. Used for debugging.
694 * @return A string representing the node
696 protected abstract String
toStringSpecific();