common: Move plugins to their own sub-directory
[deliverable/tracecompass.git] / org.eclipse.tracecompass.statesystem.core / src / org / eclipse / tracecompass / internal / statesystem / core / backend / historytree / HTNode.java
1 /*******************************************************************************
2 * Copyright (c) 2010, 2015 Ericsson, École Polytechnique de Montréal, and others
3 *
4 * All rights reserved. This program and the accompanying materials are
5 * made available under the terms of the Eclipse Public License v1.0 which
6 * accompanies this distribution, and is available at
7 * http://www.eclipse.org/legal/epl-v10.html
8 *
9 * Contributors:
10 * Alexandre Montplaisir - Initial API and implementation
11 * Florian Wininger - Add Extension and Leaf Node
12 * Patrick Tasse - Keep interval list sorted on insert
13 *******************************************************************************/
14
15 package org.eclipse.tracecompass.internal.statesystem.core.backend.historytree;
16
17 import java.io.IOException;
18 import java.io.PrintWriter;
19 import java.nio.ByteBuffer;
20 import java.nio.ByteOrder;
21 import java.nio.channels.FileChannel;
22 import java.util.ArrayList;
23 import java.util.Collections;
24 import java.util.List;
25 import java.util.concurrent.locks.ReentrantReadWriteLock;
26
27 import org.eclipse.tracecompass.statesystem.core.exceptions.TimeRangeException;
28 import org.eclipse.tracecompass.statesystem.core.interval.ITmfStateInterval;
29 import org.eclipse.tracecompass.statesystem.core.statevalue.TmfStateValue;
30
31 /**
32 * The base class for all the types of nodes that go in the History Tree.
33 *
34 * @author Alexandre Montplaisir
35 */
36 public abstract class HTNode {
37
38 // ------------------------------------------------------------------------
39 // Class fields
40 // ------------------------------------------------------------------------
41
42 /**
43 * The type of node
44 */
45 public static enum NodeType {
46 /**
47 * Core node, which is a "front" node, at any level of the tree except
48 * the bottom-most one. It has children, and may have extensions.
49 */
50 CORE,
51 /**
52 * Leaf node, which is a node at the last bottom level of the tree. It
53 * cannot have any children or extensions.
54 */
55 LEAF;
56
57 /**
58 * Determine a node type by reading a serialized byte.
59 *
60 * @param rep
61 * The byte representation of the node type
62 * @return The corresponding NodeType
63 * @throws IOException
64 * If the NodeType is unrecognized
65 */
66 public static NodeType fromByte(byte rep) throws IOException {
67 switch (rep) {
68 case 1:
69 return CORE;
70 case 2:
71 return LEAF;
72 default:
73 throw new IOException();
74 }
75 }
76
77 /**
78 * Get the byte representation of this node type. It can then be read
79 * with {@link #fromByte}.
80 *
81 * @return The byte matching this node type
82 */
83 public byte toByte() {
84 switch (this) {
85 case CORE:
86 return 1;
87 case LEAF:
88 return 2;
89 default:
90 throw new IllegalStateException();
91 }
92 }
93 }
94
95 // ------------------------------------------------------------------------
96 // Attributes
97 // ------------------------------------------------------------------------
98
99 /* Configuration of the History Tree to which belongs this node */
100 private final HTConfig config;
101
102 /* Time range of this node */
103 private final long nodeStart;
104 private long nodeEnd;
105
106 /* Sequence number = position in the node section of the file */
107 private final int sequenceNumber;
108 private int parentSequenceNumber; /* = -1 if this node is the root node */
109
110 /* Where the Strings section begins (from the start of the node */
111 private int stringSectionOffset;
112
113 /* Sum of bytes of all intervals in the node */
114 private int sizeOfIntervalSection;
115
116 /* True if this node was read from disk (meaning its end time is now fixed) */
117 private volatile boolean isOnDisk;
118
119 /* Vector containing all the intervals contained in this node */
120 private final List<HTInterval> intervals;
121
122 /* Lock used to protect the accesses to intervals, nodeEnd and such */
123 private final ReentrantReadWriteLock rwl = new ReentrantReadWriteLock(false);
124
125 /**
126 * Constructor
127 *
128 * @param config
129 * Configuration of the History Tree
130 * @param seqNumber
131 * The (unique) sequence number assigned to this particular node
132 * @param parentSeqNumber
133 * The sequence number of this node's parent node
134 * @param start
135 * The earliest timestamp stored in this node
136 */
137 protected HTNode(HTConfig config, int seqNumber, int parentSeqNumber, long start) {
138 this.config = config;
139 this.nodeStart = start;
140 this.sequenceNumber = seqNumber;
141 this.parentSequenceNumber = parentSeqNumber;
142
143 this.stringSectionOffset = config.getBlockSize();
144 this.sizeOfIntervalSection = 0;
145 this.isOnDisk = false;
146 this.intervals = new ArrayList<>();
147 }
148
149 /**
150 * Reader factory method. Build a Node object (of the right type) by reading
151 * a block in the file.
152 *
153 * @param config
154 * Configuration of the History Tree
155 * @param fc
156 * FileChannel to the history file, ALREADY SEEKED at the start
157 * of the node.
158 * @return The node object
159 * @throws IOException
160 * If there was an error reading from the file channel
161 */
162 public static final HTNode readNode(HTConfig config, FileChannel fc)
163 throws IOException {
164 HTNode newNode = null;
165 int res, i;
166
167 ByteBuffer buffer = ByteBuffer.allocate(config.getBlockSize());
168 buffer.order(ByteOrder.LITTLE_ENDIAN);
169 buffer.clear();
170 res = fc.read(buffer);
171 assert (res == config.getBlockSize());
172 buffer.flip();
173
174 /* Read the common header part */
175 byte typeByte = buffer.get();
176 NodeType type = NodeType.fromByte(typeByte);
177 long start = buffer.getLong();
178 long end = buffer.getLong();
179 int seqNb = buffer.getInt();
180 int parentSeqNb = buffer.getInt();
181 int intervalCount = buffer.getInt();
182 int stringSectionOffset = buffer.getInt();
183 buffer.get(); // TODO Used to be "isDone", to be removed from the header
184
185 /* Now the rest of the header depends on the node type */
186 switch (type) {
187 case CORE:
188 /* Core nodes */
189 newNode = new CoreNode(config, seqNb, parentSeqNb, start);
190 newNode.readSpecificHeader(buffer);
191 break;
192
193 case LEAF:
194 /* Leaf nodes */
195 newNode = new LeafNode(config, seqNb, parentSeqNb, start);
196 newNode.readSpecificHeader(buffer);
197 break;
198
199 default:
200 /* Unrecognized node type */
201 throw new IOException();
202 }
203
204 /*
205 * At this point, we should be done reading the header and 'buffer'
206 * should only have the intervals left
207 */
208 for (i = 0; i < intervalCount; i++) {
209 newNode.intervals.add(HTInterval.readFrom(buffer));
210 }
211
212 /* Assign the node's other information we have read previously */
213 newNode.nodeEnd = end;
214 newNode.stringSectionOffset = stringSectionOffset;
215 newNode.isOnDisk = true;
216
217 return newNode;
218 }
219
220 /**
221 * Write this node to the given file channel.
222 *
223 * @param fc
224 * The file channel to write to (should be sought to be correct
225 * position)
226 * @throws IOException
227 * If there was an error writing
228 */
229 public final void writeSelf(FileChannel fc) throws IOException {
230 /*
231 * Yes, we are taking the *read* lock here, because we are reading the
232 * information in the node to write it to disk.
233 */
234 rwl.readLock().lock();
235 try {
236 final int blockSize = config.getBlockSize();
237 int curStringsEntryEndPos = blockSize;
238
239 ByteBuffer buffer = ByteBuffer.allocate(blockSize);
240 buffer.order(ByteOrder.LITTLE_ENDIAN);
241 buffer.clear();
242
243 /* Write the common header part */
244 buffer.put(this.getNodeType().toByte());
245 buffer.putLong(nodeStart);
246 buffer.putLong(nodeEnd);
247 buffer.putInt(sequenceNumber);
248 buffer.putInt(parentSequenceNumber);
249 buffer.putInt(intervals.size());
250 buffer.putInt(stringSectionOffset);
251 buffer.put((byte) 1); // TODO Used to be "isDone", to be removed from header
252
253 /* Now call the inner method to write the specific header part */
254 this.writeSpecificHeader(buffer);
255
256 /* Back to us, we write the intervals */
257 for (HTInterval interval : intervals) {
258 int size = interval.writeInterval(buffer, curStringsEntryEndPos);
259 curStringsEntryEndPos -= size;
260 }
261
262 /*
263 * Write padding between the end of the Data section and the start
264 * of the Strings section (needed to fill the node in case there is
265 * no Strings section)
266 */
267 while (buffer.position() < stringSectionOffset) {
268 buffer.put((byte) 0);
269 }
270
271 /*
272 * If the offsets were right, the size of the Strings section should
273 * be == to the expected size
274 */
275 assert (curStringsEntryEndPos == stringSectionOffset);
276
277 /* Finally, write everything in the Buffer to disk */
278
279 // if we don't do this, flip() will lose what's after.
280 buffer.position(blockSize);
281
282 buffer.flip();
283 int res = fc.write(buffer);
284 assert (res == blockSize);
285
286 } finally {
287 rwl.readLock().unlock();
288 }
289 isOnDisk = true;
290 }
291
292 // ------------------------------------------------------------------------
293 // Accessors
294 // ------------------------------------------------------------------------
295
296 /**
297 * Retrieve the history tree configuration used for this node.
298 *
299 * @return The history tree config
300 */
301 protected HTConfig getConfig() {
302 return config;
303 }
304
305 /**
306 * Get the start time of this node.
307 *
308 * @return The start time of this node
309 */
310 public long getNodeStart() {
311 return nodeStart;
312 }
313
314 /**
315 * Get the end time of this node.
316 *
317 * @return The end time of this node
318 */
319 public long getNodeEnd() {
320 if (this.isOnDisk) {
321 return nodeEnd;
322 }
323 return 0;
324 }
325
326 /**
327 * Get the sequence number of this node.
328 *
329 * @return The sequence number of this node
330 */
331 public int getSequenceNumber() {
332 return sequenceNumber;
333 }
334
335 /**
336 * Get the sequence number of this node's parent.
337 *
338 * @return The parent sequence number
339 */
340 public int getParentSequenceNumber() {
341 return parentSequenceNumber;
342 }
343
344 /**
345 * Change this node's parent. Used when we create a new root node for
346 * example.
347 *
348 * @param newParent
349 * The sequence number of the node that is the new parent
350 */
351 public void setParentSequenceNumber(int newParent) {
352 parentSequenceNumber = newParent;
353 }
354
355 /**
356 * Return if this node is "done" (full and written to disk).
357 *
358 * @return If this node is done or not
359 */
360 public boolean isOnDisk() {
361 return isOnDisk;
362 }
363
364 /**
365 * Add an interval to this node
366 *
367 * @param newInterval
368 * Interval to add to this node
369 */
370 public void addInterval(HTInterval newInterval) {
371 rwl.writeLock().lock();
372 try {
373 /* Just in case, should be checked before even calling this function */
374 assert (newInterval.getIntervalSize() <= this.getNodeFreeSpace());
375
376 /* Find the insert position to keep the list sorted */
377 int index = intervals.size();
378 while (index > 0 && newInterval.compareTo(intervals.get(index - 1)) < 0) {
379 index--;
380 }
381
382 intervals.add(index, newInterval);
383 sizeOfIntervalSection += newInterval.getIntervalSize();
384
385 /* Update the in-node offset "pointer" */
386 stringSectionOffset -= (newInterval.getStringsEntrySize());
387 } finally {
388 rwl.writeLock().unlock();
389 }
390 }
391
392 /**
393 * We've received word from the containerTree that newest nodes now exist to
394 * our right. (Puts isDone = true and sets the endtime)
395 *
396 * @param endtime
397 * The nodeEnd time that the node will have
398 */
399 public void closeThisNode(long endtime) {
400 rwl.writeLock().lock();
401 try {
402 assert (endtime >= this.nodeStart);
403
404 if (!intervals.isEmpty()) {
405 /*
406 * Make sure there are no intervals in this node with their
407 * EndTime > the one requested. Only need to check the last one
408 * since they are sorted
409 */
410 assert (endtime >= intervals.get(intervals.size() - 1).getEndTime());
411 }
412
413 this.nodeEnd = endtime;
414 } finally {
415 rwl.writeLock().unlock();
416 }
417 }
418
419 /**
420 * The method to fill up the stateInfo (passed on from the Current State
421 * Tree when it does a query on the SHT). We'll replace the data in that
422 * vector with whatever relevant we can find from this node
423 *
424 * @param stateInfo
425 * The same stateInfo that comes from SHT's doQuery()
426 * @param t
427 * The timestamp for which the query is for. Only return
428 * intervals that intersect t.
429 * @throws TimeRangeException
430 * If 't' is invalid
431 */
432 public void writeInfoFromNode(List<ITmfStateInterval> stateInfo, long t)
433 throws TimeRangeException {
434 /* This is from a state system query, we are "reading" this node */
435 rwl.readLock().lock();
436 try {
437 for (int i = getStartIndexFor(t); i < intervals.size(); i++) {
438 /*
439 * Now we only have to compare the Start times, since we now the
440 * End times necessarily fit.
441 *
442 * Second condition is to ignore new attributes that might have
443 * been created after stateInfo was instantiated (they would be
444 * null anyway).
445 */
446 ITmfStateInterval interval = intervals.get(i);
447 if (interval.getStartTime() <= t &&
448 interval.getAttribute() < stateInfo.size()) {
449 stateInfo.set(interval.getAttribute(), interval);
450 }
451 }
452 } finally {
453 rwl.readLock().unlock();
454 }
455 }
456
457 /**
458 * Get a single Interval from the information in this node If the
459 * key/timestamp pair cannot be found, we return null.
460 *
461 * @param key
462 * The attribute quark to look for
463 * @param t
464 * The timestamp
465 * @return The Interval containing the information we want, or null if it
466 * wasn't found
467 * @throws TimeRangeException
468 * If 't' is invalid
469 */
470 public HTInterval getRelevantInterval(int key, long t) throws TimeRangeException {
471 rwl.readLock().lock();
472 try {
473 for (int i = getStartIndexFor(t); i < intervals.size(); i++) {
474 HTInterval curInterval = intervals.get(i);
475 if (curInterval.getAttribute() == key
476 && curInterval.getStartTime() <= t
477 && curInterval.getEndTime() >= t) {
478 return curInterval;
479 }
480 }
481
482 /* We didn't find the relevant information in this node */
483 return null;
484
485 } finally {
486 rwl.readLock().unlock();
487 }
488 }
489
490 private int getStartIndexFor(long t) throws TimeRangeException {
491 /* Should only be called by methods with the readLock taken */
492
493 if (intervals.isEmpty()) {
494 return 0;
495 }
496 /*
497 * Since the intervals are sorted by end time, we can skip all the ones
498 * at the beginning whose end times are smaller than 't'. Java does
499 * provides a .binarySearch method, but its API is quite weird...
500 */
501 HTInterval dummy = new HTInterval(0, t, 0, TmfStateValue.nullValue());
502 int index = Collections.binarySearch(intervals, dummy);
503
504 if (index < 0) {
505 /*
506 * .binarySearch returns a negative number if the exact value was
507 * not found. Here we just want to know where to start searching, we
508 * don't care if the value is exact or not.
509 */
510 index = -index - 1;
511
512 }
513
514 /* Sometimes binarySearch yields weird stuff... */
515 if (index < 0) {
516 index = 0;
517 }
518 if (index >= intervals.size()) {
519 index = intervals.size() - 1;
520 }
521
522 /*
523 * Another API quirkiness, the returned index is the one of the *last*
524 * element of a series of equal endtimes, which happens sometimes. We
525 * want the *first* element of such a series, to read through them
526 * again.
527 */
528 while (index > 0
529 && intervals.get(index - 1).compareTo(intervals.get(index)) == 0) {
530 index--;
531 }
532
533 return index;
534 }
535
536 /**
537 * <pre>
538 * 1 - byte (type)
539 * 16 - 2x long (start time, end time)
540 * 16 - 4x int (seq number, parent seq number, intervalcount,
541 * strings section pos.)
542 * 1 - byte (done or not)
543 * </pre>
544 */
545 private static final int COMMON_HEADER_SIZE = 34;
546
547 /**
548 * Return the total header size of this node (will depend on the node type).
549 *
550 * @return The total header size
551 */
552 public final int getTotalHeaderSize() {
553 return COMMON_HEADER_SIZE + getSpecificHeaderSize();
554 }
555
556 /**
557 * @return The offset, within the node, where the Data section ends
558 */
559 private int getDataSectionEndOffset() {
560 return this.getTotalHeaderSize() + sizeOfIntervalSection;
561 }
562
563 /**
564 * Returns the free space in the node, which is simply put, the
565 * stringSectionOffset - dataSectionOffset
566 *
567 * @return The amount of free space in the node (in bytes)
568 */
569 public int getNodeFreeSpace() {
570 rwl.readLock().lock();
571 int ret = stringSectionOffset - this.getDataSectionEndOffset();
572 rwl.readLock().unlock();
573
574 return ret;
575 }
576
577 /**
578 * Returns the current space utilization of this node, as a percentage.
579 * (used space / total usable space, which excludes the header)
580 *
581 * @return The percentage (value between 0 and 100) of space utilization in
582 * in this node.
583 */
584 public long getNodeUsagePercent() {
585 rwl.readLock().lock();
586 try {
587 final int blockSize = config.getBlockSize();
588 float freePercent = (float) this.getNodeFreeSpace()
589 / (float) (blockSize - this.getTotalHeaderSize())
590 * 100F;
591 return (long) (100L - freePercent);
592
593 } finally {
594 rwl.readLock().unlock();
595 }
596 }
597
598 /**
599 * @name Debugging functions
600 */
601
602 @SuppressWarnings("nls")
603 @Override
604 public String toString() {
605 /* Only used for debugging, shouldn't be externalized */
606 StringBuffer buf = new StringBuffer("Node #" + sequenceNumber + ", ");
607 buf.append(this.toStringSpecific());
608 buf.append(intervals.size() + " intervals (" + this.getNodeUsagePercent()
609 + "% used), ");
610
611 buf.append("[" + this.nodeStart + " - ");
612 if (this.isOnDisk) {
613 buf = buf.append("" + this.nodeEnd + "]");
614 } else {
615 buf = buf.append("...]");
616 }
617 return buf.toString();
618 }
619
620 /**
621 * Debugging function that prints out the contents of this node
622 *
623 * @param writer
624 * PrintWriter in which we will print the debug output
625 */
626 @SuppressWarnings("nls")
627 public void debugPrintIntervals(PrintWriter writer) {
628 /* Only used for debugging, shouldn't be externalized */
629 writer.println("Node #" + sequenceNumber + ":");
630
631 /* Array of children */
632 if (this.getNodeType() == NodeType.CORE) { /* Only Core Nodes can have children */
633 CoreNode thisNode = (CoreNode) this;
634 writer.print(" " + thisNode.getNbChildren() + " children");
635 if (thisNode.getNbChildren() >= 1) {
636 writer.print(": [ " + thisNode.getChild(0));
637 for (int i = 1; i < thisNode.getNbChildren(); i++) {
638 writer.print(", " + thisNode.getChild(i));
639 }
640 writer.print(']');
641 }
642 writer.print('\n');
643 }
644
645 /* List of intervals in the node */
646 writer.println(" Intervals contained:");
647 for (int i = 0; i < intervals.size(); i++) {
648 writer.println(intervals.get(i).toString());
649 }
650 writer.println('\n');
651 }
652
653 // ------------------------------------------------------------------------
654 // Abstract methods
655 // ------------------------------------------------------------------------
656
657 /**
658 * Get the byte value representing the node type.
659 *
660 * @return The node type
661 */
662 public abstract NodeType getNodeType();
663
664 /**
665 * Return the specific header size of this node. This means the size
666 * occupied by the type-specific section of the header (not counting the
667 * common part).
668 *
669 * @return The specific header size
670 */
671 protected abstract int getSpecificHeaderSize();
672
673 /**
674 * Read the type-specific part of the node header from a byte buffer.
675 *
676 * @param buffer
677 * The byte buffer to read from. It should be already positioned
678 * correctly.
679 */
680 protected abstract void readSpecificHeader(ByteBuffer buffer);
681
682 /**
683 * Write the type-specific part of the header in a byte buffer.
684 *
685 * @param buffer
686 * The buffer to write to. It should already be at the correct
687 * position.
688 */
689 protected abstract void writeSpecificHeader(ByteBuffer buffer);
690
691 /**
692 * Node-type-specific toString method. Used for debugging.
693 *
694 * @return A string representing the node
695 */
696 protected abstract String toStringSpecific();
697 }
This page took 0.066834 seconds and 5 git commands to generate.