ss: no longer have a strings section in the HTNodes
[deliverable/tracecompass.git] / statesystem / org.eclipse.tracecompass.statesystem.core / src / org / eclipse / tracecompass / internal / statesystem / core / backend / historytree / HTNode.java
1 /*******************************************************************************
2 * Copyright (c) 2010, 2016 Ericsson, École Polytechnique de Montréal, and others
3 *
4 * All rights reserved. This program and the accompanying materials are
5 * made available under the terms of the Eclipse Public License v1.0 which
6 * accompanies this distribution, and is available at
7 * http://www.eclipse.org/legal/epl-v10.html
8 *
9 * Contributors:
10 * Alexandre Montplaisir - Initial API and implementation
11 * Florian Wininger - Add Extension and Leaf Node
12 * Patrick Tasse - Keep interval list sorted on insert
13 *******************************************************************************/
14
15 package org.eclipse.tracecompass.internal.statesystem.core.backend.historytree;
16
17 import java.io.IOException;
18 import java.io.PrintWriter;
19 import java.nio.ByteBuffer;
20 import java.nio.ByteOrder;
21 import java.nio.channels.FileChannel;
22 import java.util.ArrayList;
23 import java.util.Collections;
24 import java.util.List;
25 import java.util.concurrent.locks.ReentrantReadWriteLock;
26
27 import org.eclipse.jdt.annotation.NonNull;
28 import org.eclipse.tracecompass.statesystem.core.exceptions.TimeRangeException;
29 import org.eclipse.tracecompass.statesystem.core.interval.ITmfStateInterval;
30 import org.eclipse.tracecompass.statesystem.core.statevalue.TmfStateValue;
31
32 import com.google.common.collect.Iterables;
33
34 /**
35 * The base class for all the types of nodes that go in the History Tree.
36 *
37 * @author Alexandre Montplaisir
38 */
39 public abstract class HTNode {
40
41 // ------------------------------------------------------------------------
42 // Class fields
43 // ------------------------------------------------------------------------
44
45 /**
46 * The type of node
47 */
48 public static enum NodeType {
49 /**
50 * Core node, which is a "front" node, at any level of the tree except
51 * the bottom-most one. It has children, and may have extensions.
52 */
53 CORE,
54 /**
55 * Leaf node, which is a node at the last bottom level of the tree. It
56 * cannot have any children or extensions.
57 */
58 LEAF;
59
60 /**
61 * Determine a node type by reading a serialized byte.
62 *
63 * @param rep
64 * The byte representation of the node type
65 * @return The corresponding NodeType
66 * @throws IOException
67 * If the NodeType is unrecognized
68 */
69 public static NodeType fromByte(byte rep) throws IOException {
70 switch (rep) {
71 case 1:
72 return CORE;
73 case 2:
74 return LEAF;
75 default:
76 throw new IOException();
77 }
78 }
79
80 /**
81 * Get the byte representation of this node type. It can then be read
82 * with {@link #fromByte}.
83 *
84 * @return The byte matching this node type
85 */
86 public byte toByte() {
87 switch (this) {
88 case CORE:
89 return 1;
90 case LEAF:
91 return 2;
92 default:
93 throw new IllegalStateException();
94 }
95 }
96 }
97
98 /**
99 * <pre>
100 * 1 - byte (type)
101 * 16 - 2x long (start time, end time)
102 * 16 - 4x int (seq number, parent seq number, intervalcount,
103 * strings section pos.)
104 * 1 - byte (done or not)
105 * </pre>
106 */
107 private static final int COMMON_HEADER_SIZE = 34;
108
109 // ------------------------------------------------------------------------
110 // Attributes
111 // ------------------------------------------------------------------------
112
113 /* Configuration of the History Tree to which belongs this node */
114 private final HTConfig fConfig;
115
116 /* Time range of this node */
117 private final long fNodeStart;
118 private long fNodeEnd;
119
120 /* Sequence number = position in the node section of the file */
121 private final int fSequenceNumber;
122 private int fParentSequenceNumber; /* = -1 if this node is the root node */
123
124 /* Sum of bytes of all intervals in the node */
125 private int fSizeOfIntervalSection;
126
127 /* True if this node was read from disk (meaning its end time is now fixed) */
128 private volatile boolean fIsOnDisk;
129
130 /* Vector containing all the intervals contained in this node */
131 private final List<HTInterval> fIntervals;
132
133 /* Lock used to protect the accesses to intervals, nodeEnd and such */
134 private final ReentrantReadWriteLock fRwl = new ReentrantReadWriteLock(false);
135
136 /**
137 * Constructor
138 *
139 * @param config
140 * Configuration of the History Tree
141 * @param seqNumber
142 * The (unique) sequence number assigned to this particular node
143 * @param parentSeqNumber
144 * The sequence number of this node's parent node
145 * @param start
146 * The earliest timestamp stored in this node
147 */
148 protected HTNode(HTConfig config, int seqNumber, int parentSeqNumber, long start) {
149 fConfig = config;
150 fNodeStart = start;
151 fSequenceNumber = seqNumber;
152 fParentSequenceNumber = parentSeqNumber;
153
154 fSizeOfIntervalSection = 0;
155 fIsOnDisk = false;
156 fIntervals = new ArrayList<>();
157 }
158
159 /**
160 * Reader factory method. Build a Node object (of the right type) by reading
161 * a block in the file.
162 *
163 * @param config
164 * Configuration of the History Tree
165 * @param fc
166 * FileChannel to the history file, ALREADY SEEKED at the start
167 * of the node.
168 * @return The node object
169 * @throws IOException
170 * If there was an error reading from the file channel
171 */
172 public static final @NonNull HTNode readNode(HTConfig config, FileChannel fc)
173 throws IOException {
174 HTNode newNode = null;
175 int res, i;
176
177 ByteBuffer buffer = ByteBuffer.allocate(config.getBlockSize());
178 buffer.order(ByteOrder.LITTLE_ENDIAN);
179 buffer.clear();
180 res = fc.read(buffer);
181 assert (res == config.getBlockSize());
182 buffer.flip();
183
184 /* Read the common header part */
185 byte typeByte = buffer.get();
186 NodeType type = NodeType.fromByte(typeByte);
187 long start = buffer.getLong();
188 long end = buffer.getLong();
189 int seqNb = buffer.getInt();
190 int parentSeqNb = buffer.getInt();
191 int intervalCount = buffer.getInt();
192 buffer.get(); // TODO Used to be "isDone", to be removed from the header
193
194 /* Now the rest of the header depends on the node type */
195 switch (type) {
196 case CORE:
197 /* Core nodes */
198 newNode = new CoreNode(config, seqNb, parentSeqNb, start);
199 newNode.readSpecificHeader(buffer);
200 break;
201
202 case LEAF:
203 /* Leaf nodes */
204 newNode = new LeafNode(config, seqNb, parentSeqNb, start);
205 newNode.readSpecificHeader(buffer);
206 break;
207
208 default:
209 /* Unrecognized node type */
210 throw new IOException();
211 }
212
213 /*
214 * At this point, we should be done reading the header and 'buffer'
215 * should only have the intervals left
216 */
217 for (i = 0; i < intervalCount; i++) {
218 HTInterval interval = HTInterval.readFrom(buffer);
219 newNode.fIntervals.add(interval);
220 newNode.fSizeOfIntervalSection += interval.getSizeOnDisk();
221 }
222
223 /* Assign the node's other information we have read previously */
224 newNode.fNodeEnd = end;
225 newNode.fIsOnDisk = true;
226
227 return newNode;
228 }
229
230 /**
231 * Write this node to the given file channel.
232 *
233 * @param fc
234 * The file channel to write to (should be sought to be correct
235 * position)
236 * @throws IOException
237 * If there was an error writing
238 */
239 public final void writeSelf(FileChannel fc) throws IOException {
240 /*
241 * Yes, we are taking the *read* lock here, because we are reading the
242 * information in the node to write it to disk.
243 */
244 fRwl.readLock().lock();
245 try {
246 final int blockSize = fConfig.getBlockSize();
247
248 ByteBuffer buffer = ByteBuffer.allocate(blockSize);
249 buffer.order(ByteOrder.LITTLE_ENDIAN);
250 buffer.clear();
251
252 /* Write the common header part */
253 buffer.put(getNodeType().toByte());
254 buffer.putLong(fNodeStart);
255 buffer.putLong(fNodeEnd);
256 buffer.putInt(fSequenceNumber);
257 buffer.putInt(fParentSequenceNumber);
258 buffer.putInt(fIntervals.size());
259 buffer.put((byte) 1); // TODO Used to be "isDone", to be removed from header
260
261 /* Now call the inner method to write the specific header part */
262 writeSpecificHeader(buffer);
263
264 /* Back to us, we write the intervals */
265 fIntervals.forEach(i -> i.writeInterval(buffer));
266
267 /*
268 * Fill the rest with zeros
269 */
270 while (buffer.position() < blockSize) {
271 buffer.put((byte) 0);
272 }
273
274 /* Finally, write everything in the Buffer to disk */
275 buffer.flip();
276 int res = fc.write(buffer);
277 if (res != blockSize) {
278 throw new IllegalStateException("Wrong size of block written: Actual: " + res + ", Expected: " + blockSize); //$NON-NLS-1$ //$NON-NLS-2$
279 }
280
281 } finally {
282 fRwl.readLock().unlock();
283 }
284 fIsOnDisk = true;
285 }
286
287 // ------------------------------------------------------------------------
288 // Accessors
289 // ------------------------------------------------------------------------
290
291 /**
292 * Retrieve the history tree configuration used for this node.
293 *
294 * @return The history tree config
295 */
296 protected HTConfig getConfig() {
297 return fConfig;
298 }
299
300 /**
301 * Get the start time of this node.
302 *
303 * @return The start time of this node
304 */
305 public long getNodeStart() {
306 return fNodeStart;
307 }
308
309 /**
310 * Get the end time of this node.
311 *
312 * @return The end time of this node
313 */
314 public long getNodeEnd() {
315 if (fIsOnDisk) {
316 return fNodeEnd;
317 }
318 return 0;
319 }
320
321 /**
322 * Get the sequence number of this node.
323 *
324 * @return The sequence number of this node
325 */
326 public int getSequenceNumber() {
327 return fSequenceNumber;
328 }
329
330 /**
331 * Get the sequence number of this node's parent.
332 *
333 * @return The parent sequence number
334 */
335 public int getParentSequenceNumber() {
336 return fParentSequenceNumber;
337 }
338
339 /**
340 * Change this node's parent. Used when we create a new root node for
341 * example.
342 *
343 * @param newParent
344 * The sequence number of the node that is the new parent
345 */
346 public void setParentSequenceNumber(int newParent) {
347 fParentSequenceNumber = newParent;
348 }
349
350 /**
351 * Return if this node is "done" (full and written to disk).
352 *
353 * @return If this node is done or not
354 */
355 public boolean isOnDisk() {
356 return fIsOnDisk;
357 }
358
359 /**
360 * Add an interval to this node
361 *
362 * @param newInterval
363 * Interval to add to this node
364 */
365 public void addInterval(HTInterval newInterval) {
366 fRwl.writeLock().lock();
367 try {
368 /* Just in case, should be checked before even calling this function */
369 assert (newInterval.getSizeOnDisk() <= getNodeFreeSpace());
370
371 /* Find the insert position to keep the list sorted */
372 int index = fIntervals.size();
373 while (index > 0 && newInterval.compareTo(fIntervals.get(index - 1)) < 0) {
374 index--;
375 }
376
377 fIntervals.add(index, newInterval);
378 fSizeOfIntervalSection += newInterval.getSizeOnDisk();
379
380 } finally {
381 fRwl.writeLock().unlock();
382 }
383 }
384
385 /**
386 * We've received word from the containerTree that newest nodes now exist to
387 * our right. (Puts isDone = true and sets the endtime)
388 *
389 * @param endtime
390 * The nodeEnd time that the node will have
391 */
392 public void closeThisNode(long endtime) {
393 fRwl.writeLock().lock();
394 try {
395 /**
396 * FIXME: was assert (endtime >= fNodeStart); but that exception
397 * is reached with an empty node that has start time endtime + 1
398 */
399 // if (endtime < fNodeStart) {
400 // throw new IllegalArgumentException("Endtime " + endtime + " cannot be lower than start time " + fNodeStart);
401 // }
402
403 if (!fIntervals.isEmpty()) {
404 /*
405 * Make sure there are no intervals in this node with their
406 * EndTime > the one requested. Only need to check the last one
407 * since they are sorted
408 */
409 if (endtime < Iterables.getLast(fIntervals).getEndTime()) {
410 throw new IllegalArgumentException("Closing end time should be greater than or equal to the end time of the intervals of this node"); //$NON-NLS-1$
411 }
412 }
413
414 fNodeEnd = endtime;
415 } finally {
416 fRwl.writeLock().unlock();
417 }
418 }
419
420 /**
421 * The method to fill up the stateInfo (passed on from the Current State
422 * Tree when it does a query on the SHT). We'll replace the data in that
423 * vector with whatever relevant we can find from this node
424 *
425 * @param stateInfo
426 * The same stateInfo that comes from SHT's doQuery()
427 * @param t
428 * The timestamp for which the query is for. Only return
429 * intervals that intersect t.
430 * @throws TimeRangeException
431 * If 't' is invalid
432 */
433 public void writeInfoFromNode(List<ITmfStateInterval> stateInfo, long t)
434 throws TimeRangeException {
435 /* This is from a state system query, we are "reading" this node */
436 fRwl.readLock().lock();
437 try {
438 for (int i = getStartIndexFor(t); i < fIntervals.size(); i++) {
439 /*
440 * Now we only have to compare the Start times, since we now the
441 * End times necessarily fit.
442 *
443 * Second condition is to ignore new attributes that might have
444 * been created after stateInfo was instantiated (they would be
445 * null anyway).
446 */
447 ITmfStateInterval interval = fIntervals.get(i);
448 if (interval.getStartTime() <= t &&
449 interval.getAttribute() < stateInfo.size()) {
450 stateInfo.set(interval.getAttribute(), interval);
451 }
452 }
453 } finally {
454 fRwl.readLock().unlock();
455 }
456 }
457
458 /**
459 * Get a single Interval from the information in this node If the
460 * key/timestamp pair cannot be found, we return null.
461 *
462 * @param key
463 * The attribute quark to look for
464 * @param t
465 * The timestamp
466 * @return The Interval containing the information we want, or null if it
467 * wasn't found
468 * @throws TimeRangeException
469 * If 't' is invalid
470 */
471 public HTInterval getRelevantInterval(int key, long t) throws TimeRangeException {
472 fRwl.readLock().lock();
473 try {
474 for (int i = getStartIndexFor(t); i < fIntervals.size(); i++) {
475 HTInterval curInterval = fIntervals.get(i);
476 if (curInterval.getAttribute() == key
477 && curInterval.getStartTime() <= t
478 && curInterval.getEndTime() >= t) {
479 return curInterval;
480 }
481 }
482
483 /* We didn't find the relevant information in this node */
484 return null;
485
486 } finally {
487 fRwl.readLock().unlock();
488 }
489 }
490
491 private int getStartIndexFor(long t) throws TimeRangeException {
492 /* Should only be called by methods with the readLock taken */
493
494 if (fIntervals.isEmpty()) {
495 return 0;
496 }
497 /*
498 * Since the intervals are sorted by end time, we can skip all the ones
499 * at the beginning whose end times are smaller than 't'. Java does
500 * provides a .binarySearch method, but its API is quite weird...
501 */
502 HTInterval dummy = new HTInterval(0, t, 0, TmfStateValue.nullValue());
503 int index = Collections.binarySearch(fIntervals, dummy);
504
505 if (index < 0) {
506 /*
507 * .binarySearch returns a negative number if the exact value was
508 * not found. Here we just want to know where to start searching, we
509 * don't care if the value is exact or not.
510 */
511 index = -index - 1;
512
513 } else {
514 /*
515 * Another API quirkiness, the returned index is the one of the *last*
516 * element of a series of equal endtimes, which happens sometimes. We
517 * want the *first* element of such a series, to read through them
518 * again.
519 */
520 while (index > 0
521 && fIntervals.get(index - 1).compareTo(fIntervals.get(index)) == 0) {
522 index--;
523 }
524 }
525
526 return index;
527 }
528
529 /**
530 * Return the total header size of this node (will depend on the node type).
531 *
532 * @return The total header size
533 */
534 public final int getTotalHeaderSize() {
535 return COMMON_HEADER_SIZE + getSpecificHeaderSize();
536 }
537
538 /**
539 * @return The offset, within the node, where the Data section ends
540 */
541 private int getDataSectionEndOffset() {
542 return getTotalHeaderSize() + fSizeOfIntervalSection;
543 }
544
545 /**
546 * Returns the free space in the node, which is simply put, the
547 * stringSectionOffset - dataSectionOffset
548 *
549 * @return The amount of free space in the node (in bytes)
550 */
551 public int getNodeFreeSpace() {
552 fRwl.readLock().lock();
553 int ret = fConfig.getBlockSize() - getDataSectionEndOffset();
554 fRwl.readLock().unlock();
555
556 return ret;
557 }
558
559 /**
560 * Returns the current space utilization of this node, as a percentage.
561 * (used space / total usable space, which excludes the header)
562 *
563 * @return The percentage (value between 0 and 100) of space utilization in
564 * in this node.
565 */
566 public long getNodeUsagePercent() {
567 fRwl.readLock().lock();
568 try {
569 final int blockSize = fConfig.getBlockSize();
570 float freePercent = (float) getNodeFreeSpace()
571 / (float) (blockSize - getTotalHeaderSize())
572 * 100F;
573 return (long) (100L - freePercent);
574
575 } finally {
576 fRwl.readLock().unlock();
577 }
578 }
579
580 /**
581 * @name Debugging functions
582 */
583
584 @SuppressWarnings("nls")
585 @Override
586 public String toString() {
587 /* Only used for debugging, shouldn't be externalized */
588 return String.format("Node #%d, %s, %s, %d intervals (%d%% used), [%d - %s]",
589 fSequenceNumber,
590 (fParentSequenceNumber == -1) ? "Root" : "Parent #" + fParentSequenceNumber,
591 toStringSpecific(),
592 fIntervals.size(),
593 getNodeUsagePercent(),
594 fNodeStart,
595 (fIsOnDisk || fNodeEnd != 0) ? fNodeEnd : "...");
596 }
597
598 /**
599 * Debugging function that prints out the contents of this node
600 *
601 * @param writer
602 * PrintWriter in which we will print the debug output
603 */
604 @SuppressWarnings("nls")
605 public void debugPrintIntervals(PrintWriter writer) {
606 /* Only used for debugging, shouldn't be externalized */
607 writer.println("Node #" + fSequenceNumber + ":");
608
609 /* Array of children */
610 if (getNodeType() == NodeType.CORE) { /* Only Core Nodes can have children */
611 CoreNode thisNode = (CoreNode) this;
612 writer.print(" " + thisNode.getNbChildren() + " children");
613 if (thisNode.getNbChildren() >= 1) {
614 writer.print(": [ " + thisNode.getChild(0));
615 for (int i = 1; i < thisNode.getNbChildren(); i++) {
616 writer.print(", " + thisNode.getChild(i));
617 }
618 writer.print(']');
619 }
620 writer.print('\n');
621 }
622
623 /* List of intervals in the node */
624 writer.println(" Intervals contained:");
625 for (int i = 0; i < fIntervals.size(); i++) {
626 writer.println(fIntervals.get(i).toString());
627 }
628 writer.println('\n');
629 }
630
631 // ------------------------------------------------------------------------
632 // Abstract methods
633 // ------------------------------------------------------------------------
634
635 /**
636 * Get the byte value representing the node type.
637 *
638 * @return The node type
639 */
640 public abstract NodeType getNodeType();
641
642 /**
643 * Return the specific header size of this node. This means the size
644 * occupied by the type-specific section of the header (not counting the
645 * common part).
646 *
647 * @return The specific header size
648 */
649 protected abstract int getSpecificHeaderSize();
650
651 /**
652 * Read the type-specific part of the node header from a byte buffer.
653 *
654 * @param buffer
655 * The byte buffer to read from. It should be already positioned
656 * correctly.
657 */
658 protected abstract void readSpecificHeader(ByteBuffer buffer);
659
660 /**
661 * Write the type-specific part of the header in a byte buffer.
662 *
663 * @param buffer
664 * The buffer to write to. It should already be at the correct
665 * position.
666 */
667 protected abstract void writeSpecificHeader(ByteBuffer buffer);
668
669 /**
670 * Node-type-specific toString method. Used for debugging.
671 *
672 * @return A string representing the node
673 */
674 protected abstract String toStringSpecific();
675 }
This page took 0.055961 seconds and 6 git commands to generate.