9cb7b7457239d3d318782b29c65ebd6e82efb838
[deliverable/tracecompass.git] / statesystem / org.eclipse.tracecompass.statesystem.core / src / org / eclipse / tracecompass / internal / statesystem / core / backend / historytree / HTNode.java
1 /*******************************************************************************
2 * Copyright (c) 2010, 2016 Ericsson, École Polytechnique de Montréal, and others
3 *
4 * All rights reserved. This program and the accompanying materials are
5 * made available under the terms of the Eclipse Public License v1.0 which
6 * accompanies this distribution, and is available at
7 * http://www.eclipse.org/legal/epl-v10.html
8 *
9 * Contributors:
10 * Alexandre Montplaisir - Initial API and implementation
11 * Florian Wininger - Add Extension and Leaf Node
12 * Patrick Tasse - Keep interval list sorted on insert
13 *******************************************************************************/
14
15 package org.eclipse.tracecompass.internal.statesystem.core.backend.historytree;
16
17 import java.io.IOException;
18 import java.io.PrintWriter;
19 import java.nio.ByteBuffer;
20 import java.nio.ByteOrder;
21 import java.nio.channels.FileChannel;
22 import java.util.ArrayList;
23 import java.util.Collections;
24 import java.util.List;
25 import java.util.concurrent.locks.ReentrantReadWriteLock;
26
27 import org.eclipse.jdt.annotation.NonNull;
28 import org.eclipse.tracecompass.statesystem.core.exceptions.TimeRangeException;
29 import org.eclipse.tracecompass.statesystem.core.interval.ITmfStateInterval;
30 import org.eclipse.tracecompass.statesystem.core.statevalue.TmfStateValue;
31
32 import com.google.common.collect.Iterables;
33
34 /**
35 * The base class for all the types of nodes that go in the History Tree.
36 *
37 * @author Alexandre Montplaisir
38 */
39 public abstract class HTNode {
40
41 // ------------------------------------------------------------------------
42 // Class fields
43 // ------------------------------------------------------------------------
44
45 /**
46 * The type of node
47 */
48 public static enum NodeType {
49 /**
50 * Core node, which is a "front" node, at any level of the tree except
51 * the bottom-most one. It has children, and may have extensions.
52 */
53 CORE,
54 /**
55 * Leaf node, which is a node at the last bottom level of the tree. It
56 * cannot have any children or extensions.
57 */
58 LEAF;
59
60 /**
61 * Determine a node type by reading a serialized byte.
62 *
63 * @param rep
64 * The byte representation of the node type
65 * @return The corresponding NodeType
66 * @throws IOException
67 * If the NodeType is unrecognized
68 */
69 public static NodeType fromByte(byte rep) throws IOException {
70 switch (rep) {
71 case 1:
72 return CORE;
73 case 2:
74 return LEAF;
75 default:
76 throw new IOException();
77 }
78 }
79
80 /**
81 * Get the byte representation of this node type. It can then be read
82 * with {@link #fromByte}.
83 *
84 * @return The byte matching this node type
85 */
86 public byte toByte() {
87 switch (this) {
88 case CORE:
89 return 1;
90 case LEAF:
91 return 2;
92 default:
93 throw new IllegalStateException();
94 }
95 }
96 }
97
98 /**
99 * <pre>
100 * 1 - byte (type)
101 * 16 - 2x long (start time, end time)
102 * 16 - 4x int (seq number, parent seq number, intervalcount,
103 * strings section pos.)
104 * 1 - byte (done or not)
105 * </pre>
106 */
107 private static final int COMMON_HEADER_SIZE = Byte.BYTES
108 + 2 * Long.BYTES
109 + 4 * Integer.BYTES
110 + Byte.BYTES;
111
112 // ------------------------------------------------------------------------
113 // Attributes
114 // ------------------------------------------------------------------------
115
116 /* Configuration of the History Tree to which belongs this node */
117 private final HTConfig fConfig;
118
119 /* Time range of this node */
120 private final long fNodeStart;
121 private long fNodeEnd;
122
123 /* Sequence number = position in the node section of the file */
124 private final int fSequenceNumber;
125 private int fParentSequenceNumber; /* = -1 if this node is the root node */
126
127 /* Sum of bytes of all intervals in the node */
128 private int fSizeOfIntervalSection;
129
130 /* True if this node was read from disk (meaning its end time is now fixed) */
131 private volatile boolean fIsOnDisk;
132
133 /* Vector containing all the intervals contained in this node */
134 private final List<HTInterval> fIntervals;
135
136 /* Lock used to protect the accesses to intervals, nodeEnd and such */
137 private final ReentrantReadWriteLock fRwl = new ReentrantReadWriteLock(false);
138
139 /**
140 * Constructor
141 *
142 * @param config
143 * Configuration of the History Tree
144 * @param seqNumber
145 * The (unique) sequence number assigned to this particular node
146 * @param parentSeqNumber
147 * The sequence number of this node's parent node
148 * @param start
149 * The earliest timestamp stored in this node
150 */
151 protected HTNode(HTConfig config, int seqNumber, int parentSeqNumber, long start) {
152 fConfig = config;
153 fNodeStart = start;
154 fSequenceNumber = seqNumber;
155 fParentSequenceNumber = parentSeqNumber;
156
157 fSizeOfIntervalSection = 0;
158 fIsOnDisk = false;
159 fIntervals = new ArrayList<>();
160 }
161
162 /**
163 * Reader factory method. Build a Node object (of the right type) by reading
164 * a block in the file.
165 *
166 * @param config
167 * Configuration of the History Tree
168 * @param fc
169 * FileChannel to the history file, ALREADY SEEKED at the start
170 * of the node.
171 * @return The node object
172 * @throws IOException
173 * If there was an error reading from the file channel
174 */
175 public static final @NonNull HTNode readNode(HTConfig config, FileChannel fc)
176 throws IOException {
177 HTNode newNode = null;
178 int res, i;
179
180 ByteBuffer buffer = ByteBuffer.allocate(config.getBlockSize());
181 buffer.order(ByteOrder.LITTLE_ENDIAN);
182 buffer.clear();
183 res = fc.read(buffer);
184 assert (res == config.getBlockSize());
185 buffer.flip();
186
187 /* Read the common header part */
188 byte typeByte = buffer.get();
189 NodeType type = NodeType.fromByte(typeByte);
190 long start = buffer.getLong();
191 long end = buffer.getLong();
192 int seqNb = buffer.getInt();
193 int parentSeqNb = buffer.getInt();
194 int intervalCount = buffer.getInt();
195 buffer.get(); // TODO Used to be "isDone", to be removed from the header
196
197 /* Now the rest of the header depends on the node type */
198 switch (type) {
199 case CORE:
200 /* Core nodes */
201 newNode = new CoreNode(config, seqNb, parentSeqNb, start);
202 newNode.readSpecificHeader(buffer);
203 break;
204
205 case LEAF:
206 /* Leaf nodes */
207 newNode = new LeafNode(config, seqNb, parentSeqNb, start);
208 newNode.readSpecificHeader(buffer);
209 break;
210
211 default:
212 /* Unrecognized node type */
213 throw new IOException();
214 }
215
216 /*
217 * At this point, we should be done reading the header and 'buffer'
218 * should only have the intervals left
219 */
220 for (i = 0; i < intervalCount; i++) {
221 HTInterval interval = HTInterval.readFrom(buffer);
222 newNode.fIntervals.add(interval);
223 newNode.fSizeOfIntervalSection += interval.getSizeOnDisk();
224 }
225
226 /* Assign the node's other information we have read previously */
227 newNode.fNodeEnd = end;
228 newNode.fIsOnDisk = true;
229
230 return newNode;
231 }
232
233 /**
234 * Write this node to the given file channel.
235 *
236 * @param fc
237 * The file channel to write to (should be sought to be correct
238 * position)
239 * @throws IOException
240 * If there was an error writing
241 */
242 public final void writeSelf(FileChannel fc) throws IOException {
243 /*
244 * Yes, we are taking the *read* lock here, because we are reading the
245 * information in the node to write it to disk.
246 */
247 fRwl.readLock().lock();
248 try {
249 final int blockSize = fConfig.getBlockSize();
250
251 ByteBuffer buffer = ByteBuffer.allocate(blockSize);
252 buffer.order(ByteOrder.LITTLE_ENDIAN);
253 buffer.clear();
254
255 /* Write the common header part */
256 buffer.put(getNodeType().toByte());
257 buffer.putLong(fNodeStart);
258 buffer.putLong(fNodeEnd);
259 buffer.putInt(fSequenceNumber);
260 buffer.putInt(fParentSequenceNumber);
261 buffer.putInt(fIntervals.size());
262 buffer.put((byte) 1); // TODO Used to be "isDone", to be removed from header
263
264 /* Now call the inner method to write the specific header part */
265 writeSpecificHeader(buffer);
266
267 /* Back to us, we write the intervals */
268 fIntervals.forEach(i -> i.writeInterval(buffer));
269
270 /*
271 * Fill the rest with zeros
272 */
273 while (buffer.position() < blockSize) {
274 buffer.put((byte) 0);
275 }
276
277 /* Finally, write everything in the Buffer to disk */
278 buffer.flip();
279 int res = fc.write(buffer);
280 if (res != blockSize) {
281 throw new IllegalStateException("Wrong size of block written: Actual: " + res + ", Expected: " + blockSize); //$NON-NLS-1$ //$NON-NLS-2$
282 }
283
284 } finally {
285 fRwl.readLock().unlock();
286 }
287 fIsOnDisk = true;
288 }
289
290 // ------------------------------------------------------------------------
291 // Accessors
292 // ------------------------------------------------------------------------
293
294 /**
295 * Retrieve the history tree configuration used for this node.
296 *
297 * @return The history tree config
298 */
299 protected HTConfig getConfig() {
300 return fConfig;
301 }
302
303 /**
304 * Get the start time of this node.
305 *
306 * @return The start time of this node
307 */
308 public long getNodeStart() {
309 return fNodeStart;
310 }
311
312 /**
313 * Get the end time of this node.
314 *
315 * @return The end time of this node
316 */
317 public long getNodeEnd() {
318 if (fIsOnDisk) {
319 return fNodeEnd;
320 }
321 return 0;
322 }
323
324 /**
325 * Get the sequence number of this node.
326 *
327 * @return The sequence number of this node
328 */
329 public int getSequenceNumber() {
330 return fSequenceNumber;
331 }
332
333 /**
334 * Get the sequence number of this node's parent.
335 *
336 * @return The parent sequence number
337 */
338 public int getParentSequenceNumber() {
339 return fParentSequenceNumber;
340 }
341
342 /**
343 * Change this node's parent. Used when we create a new root node for
344 * example.
345 *
346 * @param newParent
347 * The sequence number of the node that is the new parent
348 */
349 public void setParentSequenceNumber(int newParent) {
350 fParentSequenceNumber = newParent;
351 }
352
353 /**
354 * Return if this node is "done" (full and written to disk).
355 *
356 * @return If this node is done or not
357 */
358 public boolean isOnDisk() {
359 return fIsOnDisk;
360 }
361
362 /**
363 * Add an interval to this node
364 *
365 * @param newInterval
366 * Interval to add to this node
367 */
368 public void addInterval(HTInterval newInterval) {
369 fRwl.writeLock().lock();
370 try {
371 /* Just in case, should be checked before even calling this function */
372 assert (newInterval.getSizeOnDisk() <= getNodeFreeSpace());
373
374 /* Find the insert position to keep the list sorted */
375 int index = fIntervals.size();
376 while (index > 0 && newInterval.compareTo(fIntervals.get(index - 1)) < 0) {
377 index--;
378 }
379
380 fIntervals.add(index, newInterval);
381 fSizeOfIntervalSection += newInterval.getSizeOnDisk();
382
383 } finally {
384 fRwl.writeLock().unlock();
385 }
386 }
387
388 /**
389 * We've received word from the containerTree that newest nodes now exist to
390 * our right. (Puts isDone = true and sets the endtime)
391 *
392 * @param endtime
393 * The nodeEnd time that the node will have
394 */
395 public void closeThisNode(long endtime) {
396 fRwl.writeLock().lock();
397 try {
398 /**
399 * FIXME: was assert (endtime >= fNodeStart); but that exception
400 * is reached with an empty node that has start time endtime + 1
401 */
402 // if (endtime < fNodeStart) {
403 // throw new IllegalArgumentException("Endtime " + endtime + " cannot be lower than start time " + fNodeStart);
404 // }
405
406 if (!fIntervals.isEmpty()) {
407 /*
408 * Make sure there are no intervals in this node with their
409 * EndTime > the one requested. Only need to check the last one
410 * since they are sorted
411 */
412 if (endtime < Iterables.getLast(fIntervals).getEndTime()) {
413 throw new IllegalArgumentException("Closing end time should be greater than or equal to the end time of the intervals of this node"); //$NON-NLS-1$
414 }
415 }
416
417 fNodeEnd = endtime;
418 } finally {
419 fRwl.writeLock().unlock();
420 }
421 }
422
423 /**
424 * The method to fill up the stateInfo (passed on from the Current State
425 * Tree when it does a query on the SHT). We'll replace the data in that
426 * vector with whatever relevant we can find from this node
427 *
428 * @param stateInfo
429 * The same stateInfo that comes from SHT's doQuery()
430 * @param t
431 * The timestamp for which the query is for. Only return
432 * intervals that intersect t.
433 * @throws TimeRangeException
434 * If 't' is invalid
435 */
436 public void writeInfoFromNode(List<ITmfStateInterval> stateInfo, long t)
437 throws TimeRangeException {
438 /* This is from a state system query, we are "reading" this node */
439 fRwl.readLock().lock();
440 try {
441 for (int i = getStartIndexFor(t); i < fIntervals.size(); i++) {
442 /*
443 * Now we only have to compare the Start times, since we now the
444 * End times necessarily fit.
445 *
446 * Second condition is to ignore new attributes that might have
447 * been created after stateInfo was instantiated (they would be
448 * null anyway).
449 */
450 ITmfStateInterval interval = fIntervals.get(i);
451 if (t >= interval.getStartTime() &&
452 interval.getAttribute() < stateInfo.size()) {
453 stateInfo.set(interval.getAttribute(), interval);
454 }
455 }
456 } finally {
457 fRwl.readLock().unlock();
458 }
459 }
460
461 /**
462 * Get a single Interval from the information in this node If the
463 * key/timestamp pair cannot be found, we return null.
464 *
465 * @param key
466 * The attribute quark to look for
467 * @param t
468 * The timestamp
469 * @return The Interval containing the information we want, or null if it
470 * wasn't found
471 * @throws TimeRangeException
472 * If 't' is invalid
473 */
474 public HTInterval getRelevantInterval(int key, long t) throws TimeRangeException {
475 fRwl.readLock().lock();
476 try {
477 for (int i = getStartIndexFor(t); i < fIntervals.size(); i++) {
478 HTInterval curInterval = fIntervals.get(i);
479 if (curInterval.getAttribute() == key
480 && curInterval.getStartTime() <= t
481 && curInterval.getEndTime() >= t) {
482 return curInterval;
483 }
484 }
485
486 /* We didn't find the relevant information in this node */
487 return null;
488
489 } finally {
490 fRwl.readLock().unlock();
491 }
492 }
493
494 private int getStartIndexFor(long t) throws TimeRangeException {
495 /* Should only be called by methods with the readLock taken */
496
497 if (fIntervals.isEmpty()) {
498 return 0;
499 }
500 /*
501 * Since the intervals are sorted by end time, we can skip all the ones
502 * at the beginning whose end times are smaller than 't'. Java does
503 * provides a .binarySearch method, but its API is quite weird...
504 */
505 HTInterval dummy = new HTInterval(0, t, 0, TmfStateValue.nullValue());
506 int index = Collections.binarySearch(fIntervals, dummy);
507
508 if (index < 0) {
509 /*
510 * .binarySearch returns a negative number if the exact value was
511 * not found. Here we just want to know where to start searching, we
512 * don't care if the value is exact or not.
513 */
514 index = -index - 1;
515
516 } else {
517 /*
518 * Another API quirkiness, the returned index is the one of the *last*
519 * element of a series of equal endtimes, which happens sometimes. We
520 * want the *first* element of such a series, to read through them
521 * again.
522 */
523 while (index > 0
524 && fIntervals.get(index - 1).compareTo(fIntervals.get(index)) == 0) {
525 index--;
526 }
527 }
528
529 return index;
530 }
531
532 /**
533 * Return the total header size of this node (will depend on the node type).
534 *
535 * @return The total header size
536 */
537 public final int getTotalHeaderSize() {
538 return COMMON_HEADER_SIZE + getSpecificHeaderSize();
539 }
540
541 /**
542 * @return The offset, within the node, where the Data section ends
543 */
544 private int getDataSectionEndOffset() {
545 return getTotalHeaderSize() + fSizeOfIntervalSection;
546 }
547
548 /**
549 * Returns the free space in the node, which is simply put, the
550 * stringSectionOffset - dataSectionOffset
551 *
552 * @return The amount of free space in the node (in bytes)
553 */
554 public int getNodeFreeSpace() {
555 fRwl.readLock().lock();
556 int ret = fConfig.getBlockSize() - getDataSectionEndOffset();
557 fRwl.readLock().unlock();
558
559 return ret;
560 }
561
562 /**
563 * Returns the current space utilization of this node, as a percentage.
564 * (used space / total usable space, which excludes the header)
565 *
566 * @return The percentage (value between 0 and 100) of space utilization in
567 * in this node.
568 */
569 public long getNodeUsagePercent() {
570 fRwl.readLock().lock();
571 try {
572 final int blockSize = fConfig.getBlockSize();
573 float freePercent = (float) getNodeFreeSpace()
574 / (float) (blockSize - getTotalHeaderSize())
575 * 100F;
576 return (long) (100L - freePercent);
577
578 } finally {
579 fRwl.readLock().unlock();
580 }
581 }
582
583 /**
584 * @name Debugging functions
585 */
586
587 @SuppressWarnings("nls")
588 @Override
589 public String toString() {
590 /* Only used for debugging, shouldn't be externalized */
591 return String.format("Node #%d, %s, %s, %d intervals (%d%% used), [%d - %s]",
592 fSequenceNumber,
593 (fParentSequenceNumber == -1) ? "Root" : "Parent #" + fParentSequenceNumber,
594 toStringSpecific(),
595 fIntervals.size(),
596 getNodeUsagePercent(),
597 fNodeStart,
598 (fIsOnDisk || fNodeEnd != 0) ? fNodeEnd : "...");
599 }
600
601 /**
602 * Debugging function that prints out the contents of this node
603 *
604 * @param writer
605 * PrintWriter in which we will print the debug output
606 */
607 @SuppressWarnings("nls")
608 public void debugPrintIntervals(PrintWriter writer) {
609 /* Only used for debugging, shouldn't be externalized */
610 writer.println("Intervals for node #" + fSequenceNumber + ":");
611
612 /* Array of children */
613 if (getNodeType() == NodeType.CORE) { /* Only Core Nodes can have children */
614 CoreNode thisNode = (CoreNode) this;
615 writer.print(" " + thisNode.getNbChildren() + " children");
616 if (thisNode.getNbChildren() >= 1) {
617 writer.print(": [ " + thisNode.getChild(0));
618 for (int i = 1; i < thisNode.getNbChildren(); i++) {
619 writer.print(", " + thisNode.getChild(i));
620 }
621 writer.print(']');
622 }
623 writer.print('\n');
624 }
625
626 /* List of intervals in the node */
627 writer.println(" Intervals contained:");
628 for (int i = 0; i < fIntervals.size(); i++) {
629 writer.println(fIntervals.get(i).toString());
630 }
631 writer.println('\n');
632 }
633
634 // ------------------------------------------------------------------------
635 // Abstract methods
636 // ------------------------------------------------------------------------
637
638 /**
639 * Get the byte value representing the node type.
640 *
641 * @return The node type
642 */
643 public abstract NodeType getNodeType();
644
645 /**
646 * Return the specific header size of this node. This means the size
647 * occupied by the type-specific section of the header (not counting the
648 * common part).
649 *
650 * @return The specific header size
651 */
652 protected abstract int getSpecificHeaderSize();
653
654 /**
655 * Read the type-specific part of the node header from a byte buffer.
656 *
657 * @param buffer
658 * The byte buffer to read from. It should be already positioned
659 * correctly.
660 */
661 protected abstract void readSpecificHeader(ByteBuffer buffer);
662
663 /**
664 * Write the type-specific part of the header in a byte buffer.
665 *
666 * @param buffer
667 * The buffer to write to. It should already be at the correct
668 * position.
669 */
670 protected abstract void writeSpecificHeader(ByteBuffer buffer);
671
672 /**
673 * Node-type-specific toString method. Used for debugging.
674 *
675 * @return A string representing the node
676 */
677 protected abstract String toStringSpecific();
678 }
This page took 0.045829 seconds and 4 git commands to generate.