08727c1afe97cba90570aed3711a7c591c067719
[deliverable/tracecompass.git] / statesystem / org.eclipse.tracecompass.statesystem.core / src / org / eclipse / tracecompass / internal / statesystem / core / backend / historytree / HTNode.java
1 /*******************************************************************************
2 * Copyright (c) 2010, 2015 Ericsson, École Polytechnique de Montréal, and others
3 *
4 * All rights reserved. This program and the accompanying materials are
5 * made available under the terms of the Eclipse Public License v1.0 which
6 * accompanies this distribution, and is available at
7 * http://www.eclipse.org/legal/epl-v10.html
8 *
9 * Contributors:
10 * Alexandre Montplaisir - Initial API and implementation
11 * Florian Wininger - Add Extension and Leaf Node
12 * Patrick Tasse - Keep interval list sorted on insert
13 *******************************************************************************/
14
15 package org.eclipse.tracecompass.internal.statesystem.core.backend.historytree;
16
17 import java.io.IOException;
18 import java.io.PrintWriter;
19 import java.nio.ByteBuffer;
20 import java.nio.ByteOrder;
21 import java.nio.channels.FileChannel;
22 import java.util.ArrayList;
23 import java.util.Collections;
24 import java.util.List;
25 import java.util.concurrent.locks.ReentrantReadWriteLock;
26
27 import org.eclipse.tracecompass.statesystem.core.exceptions.TimeRangeException;
28 import org.eclipse.tracecompass.statesystem.core.interval.ITmfStateInterval;
29 import org.eclipse.tracecompass.statesystem.core.statevalue.TmfStateValue;
30
31 import com.google.common.collect.Iterables;
32
33 /**
34 * The base class for all the types of nodes that go in the History Tree.
35 *
36 * @author Alexandre Montplaisir
37 */
38 public abstract class HTNode {
39
40 // ------------------------------------------------------------------------
41 // Class fields
42 // ------------------------------------------------------------------------
43
44 /**
45 * The type of node
46 */
47 public static enum NodeType {
48 /**
49 * Core node, which is a "front" node, at any level of the tree except
50 * the bottom-most one. It has children, and may have extensions.
51 */
52 CORE,
53 /**
54 * Leaf node, which is a node at the last bottom level of the tree. It
55 * cannot have any children or extensions.
56 */
57 LEAF;
58
59 /**
60 * Determine a node type by reading a serialized byte.
61 *
62 * @param rep
63 * The byte representation of the node type
64 * @return The corresponding NodeType
65 * @throws IOException
66 * If the NodeType is unrecognized
67 */
68 public static NodeType fromByte(byte rep) throws IOException {
69 switch (rep) {
70 case 1:
71 return CORE;
72 case 2:
73 return LEAF;
74 default:
75 throw new IOException();
76 }
77 }
78
79 /**
80 * Get the byte representation of this node type. It can then be read
81 * with {@link #fromByte}.
82 *
83 * @return The byte matching this node type
84 */
85 public byte toByte() {
86 switch (this) {
87 case CORE:
88 return 1;
89 case LEAF:
90 return 2;
91 default:
92 throw new IllegalStateException();
93 }
94 }
95 }
96
97 /**
98 * <pre>
99 * 1 - byte (type)
100 * 16 - 2x long (start time, end time)
101 * 16 - 4x int (seq number, parent seq number, intervalcount,
102 * strings section pos.)
103 * 1 - byte (done or not)
104 * </pre>
105 */
106 private static final int COMMON_HEADER_SIZE = 34;
107
108 // ------------------------------------------------------------------------
109 // Attributes
110 // ------------------------------------------------------------------------
111
112 /* Configuration of the History Tree to which belongs this node */
113 private final HTConfig fConfig;
114
115 /* Time range of this node */
116 private final long fNodeStart;
117 private long fNodeEnd;
118
119 /* Sequence number = position in the node section of the file */
120 private final int fSequenceNumber;
121 private int fParentSequenceNumber; /* = -1 if this node is the root node */
122
123 /* Where the Strings section begins (from the start of the node */
124 private int fStringSectionOffset;
125
126 /* Sum of bytes of all intervals in the node */
127 private int fSizeOfIntervalSection;
128
129 /* True if this node was read from disk (meaning its end time is now fixed) */
130 private volatile boolean fIsOnDisk;
131
132 /* Vector containing all the intervals contained in this node */
133 private final List<HTInterval> fIntervals;
134
135 /* Lock used to protect the accesses to intervals, nodeEnd and such */
136 private final ReentrantReadWriteLock fRwl = new ReentrantReadWriteLock(false);
137
138 /**
139 * Constructor
140 *
141 * @param config
142 * Configuration of the History Tree
143 * @param seqNumber
144 * The (unique) sequence number assigned to this particular node
145 * @param parentSeqNumber
146 * The sequence number of this node's parent node
147 * @param start
148 * The earliest timestamp stored in this node
149 */
150 protected HTNode(HTConfig config, int seqNumber, int parentSeqNumber, long start) {
151 fConfig = config;
152 fNodeStart = start;
153 fSequenceNumber = seqNumber;
154 fParentSequenceNumber = parentSeqNumber;
155
156 fStringSectionOffset = config.getBlockSize();
157 fSizeOfIntervalSection = 0;
158 fIsOnDisk = false;
159 fIntervals = new ArrayList<>();
160 }
161
162 /**
163 * Reader factory method. Build a Node object (of the right type) by reading
164 * a block in the file.
165 *
166 * @param config
167 * Configuration of the History Tree
168 * @param fc
169 * FileChannel to the history file, ALREADY SEEKED at the start
170 * of the node.
171 * @return The node object
172 * @throws IOException
173 * If there was an error reading from the file channel
174 */
175 public static final HTNode readNode(HTConfig config, FileChannel fc)
176 throws IOException {
177 HTNode newNode = null;
178 int res, i;
179
180 ByteBuffer buffer = ByteBuffer.allocate(config.getBlockSize());
181 buffer.order(ByteOrder.LITTLE_ENDIAN);
182 buffer.clear();
183 res = fc.read(buffer);
184 assert (res == config.getBlockSize());
185 buffer.flip();
186
187 /* Read the common header part */
188 byte typeByte = buffer.get();
189 NodeType type = NodeType.fromByte(typeByte);
190 long start = buffer.getLong();
191 long end = buffer.getLong();
192 int seqNb = buffer.getInt();
193 int parentSeqNb = buffer.getInt();
194 int intervalCount = buffer.getInt();
195 int stringSectionOffset = buffer.getInt();
196 buffer.get(); // TODO Used to be "isDone", to be removed from the header
197
198 /* Now the rest of the header depends on the node type */
199 switch (type) {
200 case CORE:
201 /* Core nodes */
202 newNode = new CoreNode(config, seqNb, parentSeqNb, start);
203 newNode.readSpecificHeader(buffer);
204 break;
205
206 case LEAF:
207 /* Leaf nodes */
208 newNode = new LeafNode(config, seqNb, parentSeqNb, start);
209 newNode.readSpecificHeader(buffer);
210 break;
211
212 default:
213 /* Unrecognized node type */
214 throw new IOException();
215 }
216
217 /*
218 * At this point, we should be done reading the header and 'buffer'
219 * should only have the intervals left
220 */
221 for (i = 0; i < intervalCount; i++) {
222 HTInterval interval = HTInterval.readFrom(buffer);
223 newNode.fIntervals.add(interval);
224 newNode.fSizeOfIntervalSection += HTInterval.DATA_ENTRY_SIZE;
225 }
226
227 /* Assign the node's other information we have read previously */
228 newNode.fNodeEnd = end;
229 newNode.fStringSectionOffset = stringSectionOffset;
230 newNode.fIsOnDisk = true;
231
232 return newNode;
233 }
234
235 /**
236 * Write this node to the given file channel.
237 *
238 * @param fc
239 * The file channel to write to (should be sought to be correct
240 * position)
241 * @throws IOException
242 * If there was an error writing
243 */
244 public final void writeSelf(FileChannel fc) throws IOException {
245 /*
246 * Yes, we are taking the *read* lock here, because we are reading the
247 * information in the node to write it to disk.
248 */
249 fRwl.readLock().lock();
250 try {
251 final int blockSize = fConfig.getBlockSize();
252 int curStringsEntryEndPos = blockSize;
253
254 ByteBuffer buffer = ByteBuffer.allocate(blockSize);
255 buffer.order(ByteOrder.LITTLE_ENDIAN);
256 buffer.clear();
257
258 /* Write the common header part */
259 buffer.put(getNodeType().toByte());
260 buffer.putLong(fNodeStart);
261 buffer.putLong(fNodeEnd);
262 buffer.putInt(fSequenceNumber);
263 buffer.putInt(fParentSequenceNumber);
264 buffer.putInt(fIntervals.size());
265 buffer.putInt(fStringSectionOffset);
266 buffer.put((byte) 1); // TODO Used to be "isDone", to be removed from header
267
268 /* Now call the inner method to write the specific header part */
269 writeSpecificHeader(buffer);
270
271 /* Back to us, we write the intervals */
272 for (HTInterval interval : fIntervals) {
273 int size = interval.writeInterval(buffer, curStringsEntryEndPos);
274 curStringsEntryEndPos -= size;
275 }
276
277 /*
278 * Write padding between the end of the Data section and the start
279 * of the Strings section (needed to fill the node in case there is
280 * no Strings section)
281 */
282 while (buffer.position() < fStringSectionOffset) {
283 buffer.put((byte) 0);
284 }
285
286 /*
287 * If the offsets were right, the size of the Strings section should
288 * be == to the expected size
289 */
290 if (curStringsEntryEndPos != fStringSectionOffset) {
291 throw new IllegalStateException("Wrong size of Strings section: Actual: " + curStringsEntryEndPos + ", Expected: " + fStringSectionOffset); //$NON-NLS-1$ //$NON-NLS-2$
292 }
293
294 /* Finally, write everything in the Buffer to disk */
295
296 // if we don't do this, flip() will lose what's after.
297 buffer.position(blockSize);
298
299 buffer.flip();
300 int res = fc.write(buffer);
301 if (res != blockSize) {
302 throw new IllegalStateException("Wrong size of block written: Actual: " + res + ", Expected: " + blockSize); //$NON-NLS-1$ //$NON-NLS-2$
303 }
304
305 } finally {
306 fRwl.readLock().unlock();
307 }
308 fIsOnDisk = true;
309 }
310
311 // ------------------------------------------------------------------------
312 // Accessors
313 // ------------------------------------------------------------------------
314
315 /**
316 * Retrieve the history tree configuration used for this node.
317 *
318 * @return The history tree config
319 */
320 protected HTConfig getConfig() {
321 return fConfig;
322 }
323
324 /**
325 * Get the start time of this node.
326 *
327 * @return The start time of this node
328 */
329 public long getNodeStart() {
330 return fNodeStart;
331 }
332
333 /**
334 * Get the end time of this node.
335 *
336 * @return The end time of this node
337 */
338 public long getNodeEnd() {
339 if (fIsOnDisk) {
340 return fNodeEnd;
341 }
342 return 0;
343 }
344
345 /**
346 * Get the sequence number of this node.
347 *
348 * @return The sequence number of this node
349 */
350 public int getSequenceNumber() {
351 return fSequenceNumber;
352 }
353
354 /**
355 * Get the sequence number of this node's parent.
356 *
357 * @return The parent sequence number
358 */
359 public int getParentSequenceNumber() {
360 return fParentSequenceNumber;
361 }
362
363 /**
364 * Change this node's parent. Used when we create a new root node for
365 * example.
366 *
367 * @param newParent
368 * The sequence number of the node that is the new parent
369 */
370 public void setParentSequenceNumber(int newParent) {
371 fParentSequenceNumber = newParent;
372 }
373
374 /**
375 * Return if this node is "done" (full and written to disk).
376 *
377 * @return If this node is done or not
378 */
379 public boolean isOnDisk() {
380 return fIsOnDisk;
381 }
382
383 /**
384 * Add an interval to this node
385 *
386 * @param newInterval
387 * Interval to add to this node
388 */
389 public void addInterval(HTInterval newInterval) {
390 fRwl.writeLock().lock();
391 try {
392 /* Just in case, should be checked before even calling this function */
393 assert (newInterval.getIntervalSize() <= getNodeFreeSpace());
394
395 /* Find the insert position to keep the list sorted */
396 int index = fIntervals.size();
397 while (index > 0 && newInterval.compareTo(fIntervals.get(index - 1)) < 0) {
398 index--;
399 }
400
401 fIntervals.add(index, newInterval);
402 fSizeOfIntervalSection += HTInterval.DATA_ENTRY_SIZE;
403
404 /* Update the in-node offset "pointer" */
405 fStringSectionOffset -= (newInterval.getStringsEntrySize());
406 } finally {
407 fRwl.writeLock().unlock();
408 }
409 }
410
411 /**
412 * We've received word from the containerTree that newest nodes now exist to
413 * our right. (Puts isDone = true and sets the endtime)
414 *
415 * @param endtime
416 * The nodeEnd time that the node will have
417 */
418 public void closeThisNode(long endtime) {
419 fRwl.writeLock().lock();
420 try {
421 /**
422 * FIXME: was assert (endtime >= fNodeStart); but that exception
423 * is reached with an empty node that has start time endtime + 1
424 */
425 // if (endtime < fNodeStart) {
426 // throw new IllegalArgumentException("Endtime " + endtime + " cannot be lower than start time " + fNodeStart);
427 // }
428
429 if (!fIntervals.isEmpty()) {
430 /*
431 * Make sure there are no intervals in this node with their
432 * EndTime > the one requested. Only need to check the last one
433 * since they are sorted
434 */
435 if (endtime < Iterables.getLast(fIntervals).getEndTime()) {
436 throw new IllegalArgumentException("Closing end time should be greater than or equal to the end time of the intervals of this node"); //$NON-NLS-1$
437 }
438 }
439
440 fNodeEnd = endtime;
441 } finally {
442 fRwl.writeLock().unlock();
443 }
444 }
445
446 /**
447 * The method to fill up the stateInfo (passed on from the Current State
448 * Tree when it does a query on the SHT). We'll replace the data in that
449 * vector with whatever relevant we can find from this node
450 *
451 * @param stateInfo
452 * The same stateInfo that comes from SHT's doQuery()
453 * @param t
454 * The timestamp for which the query is for. Only return
455 * intervals that intersect t.
456 * @throws TimeRangeException
457 * If 't' is invalid
458 */
459 public void writeInfoFromNode(List<ITmfStateInterval> stateInfo, long t)
460 throws TimeRangeException {
461 /* This is from a state system query, we are "reading" this node */
462 fRwl.readLock().lock();
463 try {
464 for (int i = getStartIndexFor(t); i < fIntervals.size(); i++) {
465 /*
466 * Now we only have to compare the Start times, since we now the
467 * End times necessarily fit.
468 *
469 * Second condition is to ignore new attributes that might have
470 * been created after stateInfo was instantiated (they would be
471 * null anyway).
472 */
473 ITmfStateInterval interval = fIntervals.get(i);
474 if (interval.getStartTime() <= t &&
475 interval.getAttribute() < stateInfo.size()) {
476 stateInfo.set(interval.getAttribute(), interval);
477 }
478 }
479 } finally {
480 fRwl.readLock().unlock();
481 }
482 }
483
484 /**
485 * Get a single Interval from the information in this node If the
486 * key/timestamp pair cannot be found, we return null.
487 *
488 * @param key
489 * The attribute quark to look for
490 * @param t
491 * The timestamp
492 * @return The Interval containing the information we want, or null if it
493 * wasn't found
494 * @throws TimeRangeException
495 * If 't' is invalid
496 */
497 public HTInterval getRelevantInterval(int key, long t) throws TimeRangeException {
498 fRwl.readLock().lock();
499 try {
500 for (int i = getStartIndexFor(t); i < fIntervals.size(); i++) {
501 HTInterval curInterval = fIntervals.get(i);
502 if (curInterval.getAttribute() == key
503 && curInterval.getStartTime() <= t
504 && curInterval.getEndTime() >= t) {
505 return curInterval;
506 }
507 }
508
509 /* We didn't find the relevant information in this node */
510 return null;
511
512 } finally {
513 fRwl.readLock().unlock();
514 }
515 }
516
517 private int getStartIndexFor(long t) throws TimeRangeException {
518 /* Should only be called by methods with the readLock taken */
519
520 if (fIntervals.isEmpty()) {
521 return 0;
522 }
523 /*
524 * Since the intervals are sorted by end time, we can skip all the ones
525 * at the beginning whose end times are smaller than 't'. Java does
526 * provides a .binarySearch method, but its API is quite weird...
527 */
528 HTInterval dummy = new HTInterval(0, t, 0, TmfStateValue.nullValue());
529 int index = Collections.binarySearch(fIntervals, dummy);
530
531 if (index < 0) {
532 /*
533 * .binarySearch returns a negative number if the exact value was
534 * not found. Here we just want to know where to start searching, we
535 * don't care if the value is exact or not.
536 */
537 index = -index - 1;
538
539 }
540
541 /* Sometimes binarySearch yields weird stuff... */
542 if (index < 0) {
543 index = 0;
544 }
545 if (index >= fIntervals.size()) {
546 index = fIntervals.size() - 1;
547 }
548
549 /*
550 * Another API quirkiness, the returned index is the one of the *last*
551 * element of a series of equal endtimes, which happens sometimes. We
552 * want the *first* element of such a series, to read through them
553 * again.
554 */
555 while (index > 0
556 && fIntervals.get(index - 1).compareTo(fIntervals.get(index)) == 0) {
557 index--;
558 }
559
560 return index;
561 }
562
563 /**
564 * Return the total header size of this node (will depend on the node type).
565 *
566 * @return The total header size
567 */
568 public final int getTotalHeaderSize() {
569 return COMMON_HEADER_SIZE + getSpecificHeaderSize();
570 }
571
572 /**
573 * @return The offset, within the node, where the Data section ends
574 */
575 private int getDataSectionEndOffset() {
576 return getTotalHeaderSize() + fSizeOfIntervalSection;
577 }
578
579 /**
580 * Returns the free space in the node, which is simply put, the
581 * stringSectionOffset - dataSectionOffset
582 *
583 * @return The amount of free space in the node (in bytes)
584 */
585 public int getNodeFreeSpace() {
586 fRwl.readLock().lock();
587 int ret = fStringSectionOffset - getDataSectionEndOffset();
588 fRwl.readLock().unlock();
589
590 return ret;
591 }
592
593 /**
594 * Returns the current space utilization of this node, as a percentage.
595 * (used space / total usable space, which excludes the header)
596 *
597 * @return The percentage (value between 0 and 100) of space utilization in
598 * in this node.
599 */
600 public long getNodeUsagePercent() {
601 fRwl.readLock().lock();
602 try {
603 final int blockSize = fConfig.getBlockSize();
604 float freePercent = (float) getNodeFreeSpace()
605 / (float) (blockSize - getTotalHeaderSize())
606 * 100F;
607 return (long) (100L - freePercent);
608
609 } finally {
610 fRwl.readLock().unlock();
611 }
612 }
613
614 /**
615 * @name Debugging functions
616 */
617
618 @SuppressWarnings("nls")
619 @Override
620 public String toString() {
621 /* Only used for debugging, shouldn't be externalized */
622 StringBuffer buf = new StringBuffer("Node #" + fSequenceNumber + ", ");
623 buf.append(toStringSpecific());
624 buf.append(fIntervals.size() + " intervals (" + getNodeUsagePercent()
625 + "% used), ");
626
627 buf.append("[" + fNodeStart + " - ");
628 if (fIsOnDisk) {
629 buf = buf.append("" + fNodeEnd + "]");
630 } else {
631 buf = buf.append("...]");
632 }
633 return buf.toString();
634 }
635
636 /**
637 * Debugging function that prints out the contents of this node
638 *
639 * @param writer
640 * PrintWriter in which we will print the debug output
641 */
642 @SuppressWarnings("nls")
643 public void debugPrintIntervals(PrintWriter writer) {
644 /* Only used for debugging, shouldn't be externalized */
645 writer.println("Node #" + fSequenceNumber + ":");
646
647 /* Array of children */
648 if (getNodeType() == NodeType.CORE) { /* Only Core Nodes can have children */
649 CoreNode thisNode = (CoreNode) this;
650 writer.print(" " + thisNode.getNbChildren() + " children");
651 if (thisNode.getNbChildren() >= 1) {
652 writer.print(": [ " + thisNode.getChild(0));
653 for (int i = 1; i < thisNode.getNbChildren(); i++) {
654 writer.print(", " + thisNode.getChild(i));
655 }
656 writer.print(']');
657 }
658 writer.print('\n');
659 }
660
661 /* List of intervals in the node */
662 writer.println(" Intervals contained:");
663 for (int i = 0; i < fIntervals.size(); i++) {
664 writer.println(fIntervals.get(i).toString());
665 }
666 writer.println('\n');
667 }
668
669 // ------------------------------------------------------------------------
670 // Abstract methods
671 // ------------------------------------------------------------------------
672
673 /**
674 * Get the byte value representing the node type.
675 *
676 * @return The node type
677 */
678 public abstract NodeType getNodeType();
679
680 /**
681 * Return the specific header size of this node. This means the size
682 * occupied by the type-specific section of the header (not counting the
683 * common part).
684 *
685 * @return The specific header size
686 */
687 protected abstract int getSpecificHeaderSize();
688
689 /**
690 * Read the type-specific part of the node header from a byte buffer.
691 *
692 * @param buffer
693 * The byte buffer to read from. It should be already positioned
694 * correctly.
695 */
696 protected abstract void readSpecificHeader(ByteBuffer buffer);
697
698 /**
699 * Write the type-specific part of the header in a byte buffer.
700 *
701 * @param buffer
702 * The buffer to write to. It should already be at the correct
703 * position.
704 */
705 protected abstract void writeSpecificHeader(ByteBuffer buffer);
706
707 /**
708 * Node-type-specific toString method. Used for debugging.
709 *
710 * @return A string representing the node
711 */
712 protected abstract String toStringSpecific();
713 }
This page took 0.085746 seconds and 4 git commands to generate.