b006d190af7412debc039617d4d84f04e6cdfe20
[deliverable/tracecompass.git] / statesystem / org.eclipse.tracecompass.statesystem.core / src / org / eclipse / tracecompass / internal / statesystem / core / backend / historytree / HTNode.java
1 /*******************************************************************************
2 * Copyright (c) 2010, 2016 Ericsson, École Polytechnique de Montréal, and others
3 *
4 * All rights reserved. This program and the accompanying materials are
5 * made available under the terms of the Eclipse Public License v1.0 which
6 * accompanies this distribution, and is available at
7 * http://www.eclipse.org/legal/epl-v10.html
8 *
9 * Contributors:
10 * Alexandre Montplaisir - Initial API and implementation
11 * Florian Wininger - Add Extension and Leaf Node
12 * Patrick Tasse - Keep interval list sorted on insert
13 *******************************************************************************/
14
15 package org.eclipse.tracecompass.internal.statesystem.core.backend.historytree;
16
17 import java.io.IOException;
18 import java.io.PrintWriter;
19 import java.nio.ByteBuffer;
20 import java.nio.ByteOrder;
21 import java.nio.channels.FileChannel;
22 import java.util.ArrayList;
23 import java.util.Collections;
24 import java.util.List;
25 import java.util.concurrent.locks.ReentrantReadWriteLock;
26
27 import org.eclipse.jdt.annotation.NonNull;
28 import org.eclipse.tracecompass.statesystem.core.exceptions.TimeRangeException;
29 import org.eclipse.tracecompass.statesystem.core.interval.ITmfStateInterval;
30 import org.eclipse.tracecompass.statesystem.core.statevalue.TmfStateValue;
31
32 import com.google.common.collect.Iterables;
33
34 /**
35 * The base class for all the types of nodes that go in the History Tree.
36 *
37 * @author Alexandre Montplaisir
38 */
39 public abstract class HTNode {
40
41 // ------------------------------------------------------------------------
42 // Class fields
43 // ------------------------------------------------------------------------
44
45 /**
46 * The type of node
47 */
48 public static enum NodeType {
49 /**
50 * Core node, which is a "front" node, at any level of the tree except
51 * the bottom-most one. It has children, and may have extensions.
52 */
53 CORE,
54 /**
55 * Leaf node, which is a node at the last bottom level of the tree. It
56 * cannot have any children or extensions.
57 */
58 LEAF;
59
60 /**
61 * Determine a node type by reading a serialized byte.
62 *
63 * @param rep
64 * The byte representation of the node type
65 * @return The corresponding NodeType
66 * @throws IOException
67 * If the NodeType is unrecognized
68 */
69 public static NodeType fromByte(byte rep) throws IOException {
70 switch (rep) {
71 case 1:
72 return CORE;
73 case 2:
74 return LEAF;
75 default:
76 throw new IOException();
77 }
78 }
79
80 /**
81 * Get the byte representation of this node type. It can then be read
82 * with {@link #fromByte}.
83 *
84 * @return The byte matching this node type
85 */
86 public byte toByte() {
87 switch (this) {
88 case CORE:
89 return 1;
90 case LEAF:
91 return 2;
92 default:
93 throw new IllegalStateException();
94 }
95 }
96 }
97
98 /**
99 * <pre>
100 * 1 - byte (type)
101 * 16 - 2x long (start time, end time)
102 * 16 - 4x int (seq number, parent seq number, intervalcount,
103 * strings section pos.)
104 * 1 - byte (done or not)
105 * </pre>
106 */
107 private static final int COMMON_HEADER_SIZE = 34;
108
109 // ------------------------------------------------------------------------
110 // Attributes
111 // ------------------------------------------------------------------------
112
113 /* Configuration of the History Tree to which belongs this node */
114 private final HTConfig fConfig;
115
116 /* Time range of this node */
117 private final long fNodeStart;
118 private long fNodeEnd;
119
120 /* Sequence number = position in the node section of the file */
121 private final int fSequenceNumber;
122 private int fParentSequenceNumber; /* = -1 if this node is the root node */
123
124 /* Where the Strings section begins (from the start of the node */
125 private int fStringSectionOffset;
126
127 /* Sum of bytes of all intervals in the node */
128 private int fSizeOfIntervalSection;
129
130 /* True if this node was read from disk (meaning its end time is now fixed) */
131 private volatile boolean fIsOnDisk;
132
133 /* Vector containing all the intervals contained in this node */
134 private final List<HTInterval> fIntervals;
135
136 /* Lock used to protect the accesses to intervals, nodeEnd and such */
137 private final ReentrantReadWriteLock fRwl = new ReentrantReadWriteLock(false);
138
139 /**
140 * Constructor
141 *
142 * @param config
143 * Configuration of the History Tree
144 * @param seqNumber
145 * The (unique) sequence number assigned to this particular node
146 * @param parentSeqNumber
147 * The sequence number of this node's parent node
148 * @param start
149 * The earliest timestamp stored in this node
150 */
151 protected HTNode(HTConfig config, int seqNumber, int parentSeqNumber, long start) {
152 fConfig = config;
153 fNodeStart = start;
154 fSequenceNumber = seqNumber;
155 fParentSequenceNumber = parentSeqNumber;
156
157 fStringSectionOffset = config.getBlockSize();
158 fSizeOfIntervalSection = 0;
159 fIsOnDisk = false;
160 fIntervals = new ArrayList<>();
161 }
162
163 /**
164 * Reader factory method. Build a Node object (of the right type) by reading
165 * a block in the file.
166 *
167 * @param config
168 * Configuration of the History Tree
169 * @param fc
170 * FileChannel to the history file, ALREADY SEEKED at the start
171 * of the node.
172 * @return The node object
173 * @throws IOException
174 * If there was an error reading from the file channel
175 */
176 public static final @NonNull HTNode readNode(HTConfig config, FileChannel fc)
177 throws IOException {
178 HTNode newNode = null;
179 int res, i;
180
181 ByteBuffer buffer = ByteBuffer.allocate(config.getBlockSize());
182 buffer.order(ByteOrder.LITTLE_ENDIAN);
183 buffer.clear();
184 res = fc.read(buffer);
185 assert (res == config.getBlockSize());
186 buffer.flip();
187
188 /* Read the common header part */
189 byte typeByte = buffer.get();
190 NodeType type = NodeType.fromByte(typeByte);
191 long start = buffer.getLong();
192 long end = buffer.getLong();
193 int seqNb = buffer.getInt();
194 int parentSeqNb = buffer.getInt();
195 int intervalCount = buffer.getInt();
196 int stringSectionOffset = buffer.getInt();
197 buffer.get(); // TODO Used to be "isDone", to be removed from the header
198
199 /* Now the rest of the header depends on the node type */
200 switch (type) {
201 case CORE:
202 /* Core nodes */
203 newNode = new CoreNode(config, seqNb, parentSeqNb, start);
204 newNode.readSpecificHeader(buffer);
205 break;
206
207 case LEAF:
208 /* Leaf nodes */
209 newNode = new LeafNode(config, seqNb, parentSeqNb, start);
210 newNode.readSpecificHeader(buffer);
211 break;
212
213 default:
214 /* Unrecognized node type */
215 throw new IOException();
216 }
217
218 /*
219 * At this point, we should be done reading the header and 'buffer'
220 * should only have the intervals left
221 */
222 for (i = 0; i < intervalCount; i++) {
223 HTInterval interval = HTInterval.readFrom(buffer);
224 newNode.fIntervals.add(interval);
225 newNode.fSizeOfIntervalSection += HTInterval.DATA_ENTRY_SIZE;
226 }
227
228 /* Assign the node's other information we have read previously */
229 newNode.fNodeEnd = end;
230 newNode.fStringSectionOffset = stringSectionOffset;
231 newNode.fIsOnDisk = true;
232
233 return newNode;
234 }
235
236 /**
237 * Write this node to the given file channel.
238 *
239 * @param fc
240 * The file channel to write to (should be sought to be correct
241 * position)
242 * @throws IOException
243 * If there was an error writing
244 */
245 public final void writeSelf(FileChannel fc) throws IOException {
246 /*
247 * Yes, we are taking the *read* lock here, because we are reading the
248 * information in the node to write it to disk.
249 */
250 fRwl.readLock().lock();
251 try {
252 final int blockSize = fConfig.getBlockSize();
253 int curStringsEntryEndPos = blockSize;
254
255 ByteBuffer buffer = ByteBuffer.allocate(blockSize);
256 buffer.order(ByteOrder.LITTLE_ENDIAN);
257 buffer.clear();
258
259 /* Write the common header part */
260 buffer.put(getNodeType().toByte());
261 buffer.putLong(fNodeStart);
262 buffer.putLong(fNodeEnd);
263 buffer.putInt(fSequenceNumber);
264 buffer.putInt(fParentSequenceNumber);
265 buffer.putInt(fIntervals.size());
266 buffer.putInt(fStringSectionOffset);
267 buffer.put((byte) 1); // TODO Used to be "isDone", to be removed from header
268
269 /* Now call the inner method to write the specific header part */
270 writeSpecificHeader(buffer);
271
272 /* Back to us, we write the intervals */
273 for (HTInterval interval : fIntervals) {
274 int size = interval.writeInterval(buffer, curStringsEntryEndPos);
275 curStringsEntryEndPos -= size;
276 }
277
278 /*
279 * Write padding between the end of the Data section and the start
280 * of the Strings section (needed to fill the node in case there is
281 * no Strings section)
282 */
283 while (buffer.position() < fStringSectionOffset) {
284 buffer.put((byte) 0);
285 }
286
287 /*
288 * If the offsets were right, the size of the Strings section should
289 * be == to the expected size
290 */
291 if (curStringsEntryEndPos != fStringSectionOffset) {
292 throw new IllegalStateException("Wrong size of Strings section: Actual: " + curStringsEntryEndPos + ", Expected: " + fStringSectionOffset); //$NON-NLS-1$ //$NON-NLS-2$
293 }
294
295 /* Finally, write everything in the Buffer to disk */
296
297 // if we don't do this, flip() will lose what's after.
298 buffer.position(blockSize);
299
300 buffer.flip();
301 int res = fc.write(buffer);
302 if (res != blockSize) {
303 throw new IllegalStateException("Wrong size of block written: Actual: " + res + ", Expected: " + blockSize); //$NON-NLS-1$ //$NON-NLS-2$
304 }
305
306 } finally {
307 fRwl.readLock().unlock();
308 }
309 fIsOnDisk = true;
310 }
311
312 // ------------------------------------------------------------------------
313 // Accessors
314 // ------------------------------------------------------------------------
315
316 /**
317 * Retrieve the history tree configuration used for this node.
318 *
319 * @return The history tree config
320 */
321 protected HTConfig getConfig() {
322 return fConfig;
323 }
324
325 /**
326 * Get the start time of this node.
327 *
328 * @return The start time of this node
329 */
330 public long getNodeStart() {
331 return fNodeStart;
332 }
333
334 /**
335 * Get the end time of this node.
336 *
337 * @return The end time of this node
338 */
339 public long getNodeEnd() {
340 if (fIsOnDisk) {
341 return fNodeEnd;
342 }
343 return 0;
344 }
345
346 /**
347 * Get the sequence number of this node.
348 *
349 * @return The sequence number of this node
350 */
351 public int getSequenceNumber() {
352 return fSequenceNumber;
353 }
354
355 /**
356 * Get the sequence number of this node's parent.
357 *
358 * @return The parent sequence number
359 */
360 public int getParentSequenceNumber() {
361 return fParentSequenceNumber;
362 }
363
364 /**
365 * Change this node's parent. Used when we create a new root node for
366 * example.
367 *
368 * @param newParent
369 * The sequence number of the node that is the new parent
370 */
371 public void setParentSequenceNumber(int newParent) {
372 fParentSequenceNumber = newParent;
373 }
374
375 /**
376 * Return if this node is "done" (full and written to disk).
377 *
378 * @return If this node is done or not
379 */
380 public boolean isOnDisk() {
381 return fIsOnDisk;
382 }
383
384 /**
385 * Add an interval to this node
386 *
387 * @param newInterval
388 * Interval to add to this node
389 */
390 public void addInterval(HTInterval newInterval) {
391 fRwl.writeLock().lock();
392 try {
393 /* Just in case, should be checked before even calling this function */
394 assert (newInterval.getIntervalSize() <= getNodeFreeSpace());
395
396 /* Find the insert position to keep the list sorted */
397 int index = fIntervals.size();
398 while (index > 0 && newInterval.compareTo(fIntervals.get(index - 1)) < 0) {
399 index--;
400 }
401
402 fIntervals.add(index, newInterval);
403 fSizeOfIntervalSection += HTInterval.DATA_ENTRY_SIZE;
404
405 /* Update the in-node offset "pointer" */
406 fStringSectionOffset -= (newInterval.getStringsEntrySize());
407 } finally {
408 fRwl.writeLock().unlock();
409 }
410 }
411
412 /**
413 * We've received word from the containerTree that newest nodes now exist to
414 * our right. (Puts isDone = true and sets the endtime)
415 *
416 * @param endtime
417 * The nodeEnd time that the node will have
418 */
419 public void closeThisNode(long endtime) {
420 fRwl.writeLock().lock();
421 try {
422 /**
423 * FIXME: was assert (endtime >= fNodeStart); but that exception
424 * is reached with an empty node that has start time endtime + 1
425 */
426 // if (endtime < fNodeStart) {
427 // throw new IllegalArgumentException("Endtime " + endtime + " cannot be lower than start time " + fNodeStart);
428 // }
429
430 if (!fIntervals.isEmpty()) {
431 /*
432 * Make sure there are no intervals in this node with their
433 * EndTime > the one requested. Only need to check the last one
434 * since they are sorted
435 */
436 if (endtime < Iterables.getLast(fIntervals).getEndTime()) {
437 throw new IllegalArgumentException("Closing end time should be greater than or equal to the end time of the intervals of this node"); //$NON-NLS-1$
438 }
439 }
440
441 fNodeEnd = endtime;
442 } finally {
443 fRwl.writeLock().unlock();
444 }
445 }
446
447 /**
448 * The method to fill up the stateInfo (passed on from the Current State
449 * Tree when it does a query on the SHT). We'll replace the data in that
450 * vector with whatever relevant we can find from this node
451 *
452 * @param stateInfo
453 * The same stateInfo that comes from SHT's doQuery()
454 * @param t
455 * The timestamp for which the query is for. Only return
456 * intervals that intersect t.
457 * @throws TimeRangeException
458 * If 't' is invalid
459 */
460 public void writeInfoFromNode(List<ITmfStateInterval> stateInfo, long t)
461 throws TimeRangeException {
462 /* This is from a state system query, we are "reading" this node */
463 fRwl.readLock().lock();
464 try {
465 for (int i = getStartIndexFor(t); i < fIntervals.size(); i++) {
466 /*
467 * Now we only have to compare the Start times, since we now the
468 * End times necessarily fit.
469 *
470 * Second condition is to ignore new attributes that might have
471 * been created after stateInfo was instantiated (they would be
472 * null anyway).
473 */
474 ITmfStateInterval interval = fIntervals.get(i);
475 if (interval.getStartTime() <= t &&
476 interval.getAttribute() < stateInfo.size()) {
477 stateInfo.set(interval.getAttribute(), interval);
478 }
479 }
480 } finally {
481 fRwl.readLock().unlock();
482 }
483 }
484
485 /**
486 * Get a single Interval from the information in this node If the
487 * key/timestamp pair cannot be found, we return null.
488 *
489 * @param key
490 * The attribute quark to look for
491 * @param t
492 * The timestamp
493 * @return The Interval containing the information we want, or null if it
494 * wasn't found
495 * @throws TimeRangeException
496 * If 't' is invalid
497 */
498 public HTInterval getRelevantInterval(int key, long t) throws TimeRangeException {
499 fRwl.readLock().lock();
500 try {
501 for (int i = getStartIndexFor(t); i < fIntervals.size(); i++) {
502 HTInterval curInterval = fIntervals.get(i);
503 if (curInterval.getAttribute() == key
504 && curInterval.getStartTime() <= t
505 && curInterval.getEndTime() >= t) {
506 return curInterval;
507 }
508 }
509
510 /* We didn't find the relevant information in this node */
511 return null;
512
513 } finally {
514 fRwl.readLock().unlock();
515 }
516 }
517
518 private int getStartIndexFor(long t) throws TimeRangeException {
519 /* Should only be called by methods with the readLock taken */
520
521 if (fIntervals.isEmpty()) {
522 return 0;
523 }
524 /*
525 * Since the intervals are sorted by end time, we can skip all the ones
526 * at the beginning whose end times are smaller than 't'. Java does
527 * provides a .binarySearch method, but its API is quite weird...
528 */
529 HTInterval dummy = new HTInterval(0, t, 0, TmfStateValue.nullValue());
530 int index = Collections.binarySearch(fIntervals, dummy);
531
532 if (index < 0) {
533 /*
534 * .binarySearch returns a negative number if the exact value was
535 * not found. Here we just want to know where to start searching, we
536 * don't care if the value is exact or not.
537 */
538 index = -index - 1;
539
540 } else {
541 /*
542 * Another API quirkiness, the returned index is the one of the *last*
543 * element of a series of equal endtimes, which happens sometimes. We
544 * want the *first* element of such a series, to read through them
545 * again.
546 */
547 while (index > 0
548 && fIntervals.get(index - 1).compareTo(fIntervals.get(index)) == 0) {
549 index--;
550 }
551 }
552
553 return index;
554 }
555
556 /**
557 * Return the total header size of this node (will depend on the node type).
558 *
559 * @return The total header size
560 */
561 public final int getTotalHeaderSize() {
562 return COMMON_HEADER_SIZE + getSpecificHeaderSize();
563 }
564
565 /**
566 * @return The offset, within the node, where the Data section ends
567 */
568 private int getDataSectionEndOffset() {
569 return getTotalHeaderSize() + fSizeOfIntervalSection;
570 }
571
572 /**
573 * Returns the free space in the node, which is simply put, the
574 * stringSectionOffset - dataSectionOffset
575 *
576 * @return The amount of free space in the node (in bytes)
577 */
578 public int getNodeFreeSpace() {
579 fRwl.readLock().lock();
580 int ret = fStringSectionOffset - getDataSectionEndOffset();
581 fRwl.readLock().unlock();
582
583 return ret;
584 }
585
586 /**
587 * Returns the current space utilization of this node, as a percentage.
588 * (used space / total usable space, which excludes the header)
589 *
590 * @return The percentage (value between 0 and 100) of space utilization in
591 * in this node.
592 */
593 public long getNodeUsagePercent() {
594 fRwl.readLock().lock();
595 try {
596 final int blockSize = fConfig.getBlockSize();
597 float freePercent = (float) getNodeFreeSpace()
598 / (float) (blockSize - getTotalHeaderSize())
599 * 100F;
600 return (long) (100L - freePercent);
601
602 } finally {
603 fRwl.readLock().unlock();
604 }
605 }
606
607 /**
608 * @name Debugging functions
609 */
610
611 @SuppressWarnings("nls")
612 @Override
613 public String toString() {
614 /* Only used for debugging, shouldn't be externalized */
615 return String.format("Node #%d, %s, %s, %d intervals (%d%% used), [%d - %s]",
616 fSequenceNumber,
617 (fParentSequenceNumber == -1) ? "Root" : "Parent #" + fParentSequenceNumber,
618 toStringSpecific(),
619 fIntervals.size(),
620 getNodeUsagePercent(),
621 fNodeStart,
622 (fIsOnDisk || fNodeEnd != 0) ? fNodeEnd : "...");
623 }
624
625 /**
626 * Debugging function that prints out the contents of this node
627 *
628 * @param writer
629 * PrintWriter in which we will print the debug output
630 */
631 @SuppressWarnings("nls")
632 public void debugPrintIntervals(PrintWriter writer) {
633 /* Only used for debugging, shouldn't be externalized */
634 writer.println("Node #" + fSequenceNumber + ":");
635
636 /* Array of children */
637 if (getNodeType() == NodeType.CORE) { /* Only Core Nodes can have children */
638 CoreNode thisNode = (CoreNode) this;
639 writer.print(" " + thisNode.getNbChildren() + " children");
640 if (thisNode.getNbChildren() >= 1) {
641 writer.print(": [ " + thisNode.getChild(0));
642 for (int i = 1; i < thisNode.getNbChildren(); i++) {
643 writer.print(", " + thisNode.getChild(i));
644 }
645 writer.print(']');
646 }
647 writer.print('\n');
648 }
649
650 /* List of intervals in the node */
651 writer.println(" Intervals contained:");
652 for (int i = 0; i < fIntervals.size(); i++) {
653 writer.println(fIntervals.get(i).toString());
654 }
655 writer.println('\n');
656 }
657
658 // ------------------------------------------------------------------------
659 // Abstract methods
660 // ------------------------------------------------------------------------
661
662 /**
663 * Get the byte value representing the node type.
664 *
665 * @return The node type
666 */
667 public abstract NodeType getNodeType();
668
669 /**
670 * Return the specific header size of this node. This means the size
671 * occupied by the type-specific section of the header (not counting the
672 * common part).
673 *
674 * @return The specific header size
675 */
676 protected abstract int getSpecificHeaderSize();
677
678 /**
679 * Read the type-specific part of the node header from a byte buffer.
680 *
681 * @param buffer
682 * The byte buffer to read from. It should be already positioned
683 * correctly.
684 */
685 protected abstract void readSpecificHeader(ByteBuffer buffer);
686
687 /**
688 * Write the type-specific part of the header in a byte buffer.
689 *
690 * @param buffer
691 * The buffer to write to. It should already be at the correct
692 * position.
693 */
694 protected abstract void writeSpecificHeader(ByteBuffer buffer);
695
696 /**
697 * Node-type-specific toString method. Used for debugging.
698 *
699 * @return A string representing the node
700 */
701 protected abstract String toStringSpecific();
702 }
This page took 0.052212 seconds and 4 git commands to generate.