1 /*******************************************************************************
2 * Copyright (c) 2012, 2014 Ericsson
3 * Copyright (c) 2010, 2011 École Polytechnique de Montréal
4 * Copyright (c) 2010, 2011 Alexandre Montplaisir <alexandre.montplaisir@gmail.com>
6 * All rights reserved. This program and the accompanying materials are
7 * made available under the terms of the Eclipse Public License v1.0 which
8 * accompanies this distribution, and is available at
9 * http://www.eclipse.org/legal/epl-v10.html
11 *******************************************************************************/
13 package org
.eclipse
.linuxtools
.internal
.tmf
.core
.statesystem
.backends
.historytree
;
15 import java
.io
.IOException
;
16 import java
.io
.PrintWriter
;
17 import java
.nio
.ByteBuffer
;
18 import java
.nio
.ByteOrder
;
19 import java
.nio
.channels
.FileChannel
;
20 import java
.util
.ArrayList
;
21 import java
.util
.Collections
;
22 import java
.util
.List
;
23 import java
.util
.concurrent
.locks
.ReentrantReadWriteLock
;
25 import org
.eclipse
.linuxtools
.tmf
.core
.exceptions
.TimeRangeException
;
26 import org
.eclipse
.linuxtools
.tmf
.core
.interval
.ITmfStateInterval
;
27 import org
.eclipse
.linuxtools
.tmf
.core
.statevalue
.TmfStateValue
;
30 * The base class for all the types of nodes that go in the History Tree.
32 * @author Alexandre Montplaisir
34 public abstract class HTNode
{
36 /* Configuration of the History Tree to which belongs this node */
37 private final HTConfig config
;
39 /* Time range of this node */
40 private final long nodeStart
;
43 /* Sequence number = position in the node section of the file */
44 private final int sequenceNumber
;
45 private int parentSequenceNumber
; /* = -1 if this node is the root node */
47 /* Where the Strings section begins (from the start of the node */
48 private int stringSectionOffset
;
50 /* Sum of bytes of all intervals in the node */
51 private int sizeOfIntervalSection
;
53 /* True if this node was read from disk (meaning its end time is now fixed) */
54 private volatile boolean isOnDisk
;
56 /* Vector containing all the intervals contained in this node */
57 private final List
<HTInterval
> intervals
;
59 /* Lock used to protect the accesses to intervals, nodeEnd and such */
60 private final ReentrantReadWriteLock rwl
= new ReentrantReadWriteLock(false);
66 * Configuration of the History Tree
68 * The (unique) sequence number assigned to this particular node
69 * @param parentSeqNumber
70 * The sequence number of this node's parent node
72 * The earliest timestamp stored in this node
74 protected HTNode(HTConfig config
, int seqNumber
, int parentSeqNumber
, long start
) {
76 this.nodeStart
= start
;
77 this.sequenceNumber
= seqNumber
;
78 this.parentSequenceNumber
= parentSeqNumber
;
80 this.stringSectionOffset
= config
.getBlockSize();
81 this.sizeOfIntervalSection
= 0;
82 this.isOnDisk
= false;
83 this.intervals
= new ArrayList
<>();
87 * Reader factory method. Build a Node object (of the right type) by reading
88 * a block in the file.
91 * Configuration of the History Tree
93 * FileChannel to the history file, ALREADY SEEKED at the start
95 * @return The node object
97 * If there was an error reading from the file channel
99 public static final HTNode
readNode(HTConfig config
, FileChannel fc
)
101 HTNode newNode
= null;
104 ByteBuffer buffer
= ByteBuffer
.allocate(config
.getBlockSize());
105 buffer
.order(ByteOrder
.LITTLE_ENDIAN
);
107 res
= fc
.read(buffer
);
108 assert (res
== config
.getBlockSize());
111 /* Read the common header part */
112 byte type
= buffer
.get();
113 long start
= buffer
.getLong();
114 long end
= buffer
.getLong();
115 int seqNb
= buffer
.getInt();
116 int parentSeqNb
= buffer
.getInt();
117 int intervalCount
= buffer
.getInt();
118 int stringSectionOffset
= buffer
.getInt();
119 buffer
.get(); // TODO Used to be "isDone", to be removed from the header
121 /* Now the rest of the header depends on the node type */
125 newNode
= new CoreNode(config
, seqNb
, parentSeqNb
, start
);
126 newNode
.readSpecificHeader(buffer
);
129 // TODO implement other node types
136 // /* "Claudette" (extended) nodes */
140 /* Unrecognized node type */
141 throw new IOException();
145 * At this point, we should be done reading the header and 'buffer'
146 * should only have the intervals left
148 for (i
= 0; i
< intervalCount
; i
++) {
149 newNode
.intervals
.add(HTInterval
.readFrom(buffer
));
152 /* Assign the node's other information we have read previously */
153 newNode
.nodeEnd
= end
;
154 newNode
.stringSectionOffset
= stringSectionOffset
;
155 newNode
.isOnDisk
= true;
161 * Write this node to the given file channel.
164 * The file channel to write to (should be sought to be correct
166 * @throws IOException
167 * If there was an error writing
169 public final void writeSelf(FileChannel fc
) throws IOException
{
171 * Yes, we are taking the *read* lock here, because we are reading the
172 * information in the node to write it to disk.
174 rwl
.readLock().lock();
176 final int blockSize
= config
.getBlockSize();
177 int curStringsEntryEndPos
= blockSize
;
179 ByteBuffer buffer
= ByteBuffer
.allocate(blockSize
);
180 buffer
.order(ByteOrder
.LITTLE_ENDIAN
);
183 /* Write the common header part */
184 buffer
.put(this.getNodeType());
185 buffer
.putLong(nodeStart
);
186 buffer
.putLong(nodeEnd
);
187 buffer
.putInt(sequenceNumber
);
188 buffer
.putInt(parentSequenceNumber
);
189 buffer
.putInt(intervals
.size());
190 buffer
.putInt(stringSectionOffset
);
191 buffer
.put((byte) 1); // TODO Used to be "isDone", to be removed from header
193 /* Now call the inner method to write the specific header part */
194 this.writeSpecificHeader(buffer
);
196 /* Back to us, we write the intervals */
197 for (HTInterval interval
: intervals
) {
198 int size
= interval
.writeInterval(buffer
, curStringsEntryEndPos
);
199 curStringsEntryEndPos
-= size
;
203 * Write padding between the end of the Data section and the start
204 * of the Strings section (needed to fill the node in case there is
205 * no Strings section)
207 while (buffer
.position() < stringSectionOffset
) {
208 buffer
.put((byte) 0);
212 * If the offsets were right, the size of the Strings section should
213 * be == to the expected size
215 assert (curStringsEntryEndPos
== stringSectionOffset
);
217 /* Finally, write everything in the Buffer to disk */
219 // if we don't do this, flip() will lose what's after.
220 buffer
.position(blockSize
);
223 int res
= fc
.write(buffer
);
224 assert (res
== blockSize
);
227 rwl
.readLock().unlock();
232 // ------------------------------------------------------------------------
234 // ------------------------------------------------------------------------
237 * Retrieve the history tree configuration used for this node.
239 * @return The history tree config
241 protected HTConfig
getConfig() {
246 * Get the start time of this node.
248 * @return The start time of this node
250 public long getNodeStart() {
255 * Get the end time of this node.
257 * @return The end time of this node
259 public long getNodeEnd() {
267 * Get the sequence number of this node.
269 * @return The sequence number of this node
271 public int getSequenceNumber() {
272 return sequenceNumber
;
276 * Get the sequence number of this node's parent.
278 * @return The parent sequence number
280 public int getParentSequenceNumber() {
281 return parentSequenceNumber
;
285 * Change this node's parent. Used when we create a new root node for
289 * The sequence number of the node that is the new parent
291 public void setParentSequenceNumber(int newParent
) {
292 parentSequenceNumber
= newParent
;
296 * Return if this node is "done" (full and written to disk).
298 * @return If this node is done or not
300 public boolean isOnDisk() {
305 * Add an interval to this node
308 * Interval to add to this node
310 public void addInterval(HTInterval newInterval
) {
311 rwl
.writeLock().lock();
313 /* Just in case, should be checked before even calling this function */
314 assert (newInterval
.getIntervalSize() <= this.getNodeFreeSpace());
316 intervals
.add(newInterval
);
317 sizeOfIntervalSection
+= newInterval
.getIntervalSize();
319 /* Update the in-node offset "pointer" */
320 stringSectionOffset
-= (newInterval
.getStringsEntrySize());
322 rwl
.writeLock().unlock();
327 * We've received word from the containerTree that newest nodes now exist to
328 * our right. (Puts isDone = true and sets the endtime)
331 * The nodeEnd time that the node will have
333 public void closeThisNode(long endtime
) {
334 rwl
.writeLock().lock();
336 assert (endtime
>= this.nodeStart
);
338 if (intervals
.size() > 0) {
340 * Sort the intervals by ascending order of their end time. This
341 * speeds up lookups a bit
343 Collections
.sort(intervals
);
346 * Make sure there are no intervals in this node with their
347 * EndTime > the one requested. Only need to check the last one
348 * since they are now sorted
350 assert (endtime
>= intervals
.get(intervals
.size() - 1).getEndTime());
353 this.nodeEnd
= endtime
;
355 rwl
.writeLock().unlock();
360 * The method to fill up the stateInfo (passed on from the Current State
361 * Tree when it does a query on the SHT). We'll replace the data in that
362 * vector with whatever relevant we can find from this node
365 * The same stateInfo that comes from SHT's doQuery()
367 * The timestamp for which the query is for. Only return
368 * intervals that intersect t.
369 * @throws TimeRangeException
372 public void writeInfoFromNode(List
<ITmfStateInterval
> stateInfo
, long t
)
373 throws TimeRangeException
{
374 /* This is from a state system query, we are "reading" this node */
375 rwl
.readLock().lock();
377 if (intervals
.size() == 0) {
380 int startIndex
= getStartIndexFor(t
);
382 for (int i
= startIndex
; i
< intervals
.size(); i
++) {
384 * Now we only have to compare the Start times, since we now the
385 * End times necessarily fit.
387 * Second condition is to ignore new attributes that might have
388 * been created after stateInfo was instantiated (they would be
391 ITmfStateInterval interval
= intervals
.get(i
);
392 if (interval
.getStartTime() <= t
&&
393 interval
.getAttribute() < stateInfo
.size()) {
394 stateInfo
.set(interval
.getAttribute(), interval
);
398 rwl
.readLock().unlock();
403 * Get a single Interval from the information in this node If the
404 * key/timestamp pair cannot be found, we return null.
407 * The attribute quark to look for
410 * @return The Interval containing the information we want, or null if it
412 * @throws TimeRangeException If 't' is invalid
414 public HTInterval
getRelevantInterval(int key
, long t
) throws TimeRangeException
{
415 rwl
.readLock().lock();
417 if (intervals
.size() == 0) {
421 int startIndex
= getStartIndexFor(t
);
423 for (int i
= startIndex
; i
< intervals
.size(); i
++) {
424 HTInterval curInterval
= intervals
.get(i
);
425 if (curInterval
.getAttribute() == key
426 && curInterval
.getStartTime() <= t
427 && curInterval
.getEndTime() >= t
) {
431 /* We didn't find the relevant information in this node */
435 rwl
.readLock().unlock();
439 private int getStartIndexFor(long t
) throws TimeRangeException
{
440 /* Should only be called by methods with the readLock taken */
442 * Since the intervals are sorted by end time, we can skip all the ones
443 * at the beginning whose end times are smaller than 't'. Java does
444 * provides a .binarySearch method, but its API is quite weird...
446 HTInterval dummy
= new HTInterval(0, t
, 0, TmfStateValue
.nullValue());
447 int index
= Collections
.binarySearch(intervals
, dummy
);
451 * .binarySearch returns a negative number if the exact value was
452 * not found. Here we just want to know where to start searching, we
453 * don't care if the value is exact or not.
459 /* Sometimes binarySearch yields weird stuff... */
463 if (index
>= intervals
.size()) {
464 index
= intervals
.size() - 1;
468 * Another API quirkiness, the returned index is the one of the *last*
469 * element of a series of equal endtimes, which happens sometimes. We
470 * want the *first* element of such a series, to read through them
474 && intervals
.get(index
- 1).compareTo(intervals
.get(index
)) == 0) {
485 * 16 - 2x long (start time, end time)
486 * 16 - 4x int (seq number, parent seq number, intervalcount,
487 * strings section pos.)
488 * 1 - byte (done or not)
491 private static final int COMMON_HEADER_SIZE
= 34;
494 * Return the total header size of this node (will depend on the node type).
496 * @return The total header size
498 public final int getTotalHeaderSize() {
499 return COMMON_HEADER_SIZE
+ getSpecificHeaderSize();
503 * @return The offset, within the node, where the Data section ends
505 private int getDataSectionEndOffset() {
506 return this.getTotalHeaderSize() + sizeOfIntervalSection
;
510 * Returns the free space in the node, which is simply put, the
511 * stringSectionOffset - dataSectionOffset
513 * @return The amount of free space in the node (in bytes)
515 public int getNodeFreeSpace() {
516 rwl
.readLock().lock();
517 int ret
= stringSectionOffset
- this.getDataSectionEndOffset();
518 rwl
.readLock().unlock();
524 * Returns the current space utilization of this node, as a percentage.
525 * (used space / total usable space, which excludes the header)
527 * @return The percentage (value between 0 and 100) of space utilization in
530 public long getNodeUsagePercent() {
531 rwl
.readLock().lock();
533 final int blockSize
= config
.getBlockSize();
534 float freePercent
= (float) this.getNodeFreeSpace()
535 / (float) (blockSize
- this.getTotalHeaderSize())
537 return (long) (100L - freePercent
);
540 rwl
.readLock().unlock();
545 * @name Debugging functions
548 @SuppressWarnings("nls")
550 public String
toString() {
551 /* Only used for debugging, shouldn't be externalized */
552 StringBuffer buf
= new StringBuffer("Node #" + sequenceNumber
+ ", ");
553 buf
.append(this.toStringSpecific());
554 buf
.append(intervals
.size() + " intervals (" + this.getNodeUsagePercent()
557 buf
.append("[" + this.nodeStart
+ " - ");
559 buf
= buf
.append("" + this.nodeEnd
+ "]");
561 buf
= buf
.append("...]");
563 return buf
.toString();
567 * Debugging function that prints out the contents of this node
570 * PrintWriter in which we will print the debug output
572 @SuppressWarnings("nls")
573 public void debugPrintIntervals(PrintWriter writer
) {
574 /* Only used for debugging, shouldn't be externalized */
575 writer
.println("Node #" + sequenceNumber
+ ":");
577 /* Array of children */
578 if (this.getNodeType() == 1) { /* Only Core Nodes can have children */
579 CoreNode thisNode
= (CoreNode
) this;
580 writer
.print(" " + thisNode
.getNbChildren() + " children");
581 if (thisNode
.getNbChildren() >= 1) {
582 writer
.print(": [ " + thisNode
.getChild(0));
583 for (int i
= 1; i
< thisNode
.getNbChildren(); i
++) {
584 writer
.print(", " + thisNode
.getChild(i
));
591 /* List of intervals in the node */
592 writer
.println(" Intervals contained:");
593 for (int i
= 0; i
< intervals
.size(); i
++) {
594 writer
.println(intervals
.get(i
).toString());
596 writer
.println('\n');
599 // ------------------------------------------------------------------------
601 // ------------------------------------------------------------------------
604 * Get the byte value representing the node type.
606 * @return The node type
608 public abstract byte getNodeType();
611 * Return the specific header size of this node. This means the size
612 * occupied by the type-specific section of the header (not counting the
615 * @return The specific header size
617 protected abstract int getSpecificHeaderSize();
620 * Read the type-specific part of the node header from a byte buffer.
623 * The byte buffer to read from. It should be already positioned
626 protected abstract void readSpecificHeader(ByteBuffer buffer
);
629 * Write the type-specific part of the header in a byte buffer.
632 * The buffer to write to. It should already be at the correct
635 protected abstract void writeSpecificHeader(ByteBuffer buffer
);
638 * Node-type-specific toString method. Used for debugging.
640 * @return A string representing the node
642 protected abstract String
toStringSpecific();