pcap: Move plugins to the Trace Compass namespace
[deliverable/tracecompass.git] / org.eclipse.linuxtools.statesystem.core / src / org / eclipse / linuxtools / internal / statesystem / core / backend / historytree / HTNode.java
CommitLineData
a52fde77 1/*******************************************************************************
bb7f92ce 2 * Copyright (c) 2010, 2014 Ericsson, École Polytechnique de Montréal, and others
6f4e8ec0 3 *
a52fde77
AM
4 * All rights reserved. This program and the accompanying materials are
5 * made available under the terms of the Eclipse Public License v1.0 which
6 * accompanies this distribution, and is available at
7 * http://www.eclipse.org/legal/epl-v10.html
6f4e8ec0 8 *
bb7f92ce
FW
9 * Contributors:
10 * Alexandre Montplaisir - Initial API and implementation
11 * Florian Wininger - Add Extension and Leaf Node
a52fde77
AM
12 *******************************************************************************/
13
bcec0116 14package org.eclipse.linuxtools.internal.statesystem.core.backend.historytree;
a52fde77
AM
15
16import java.io.IOException;
17import java.io.PrintWriter;
18import java.nio.ByteBuffer;
19import java.nio.ByteOrder;
20import java.nio.channels.FileChannel;
21import java.util.ArrayList;
22import java.util.Collections;
23import java.util.List;
62197b87 24import java.util.concurrent.locks.ReentrantReadWriteLock;
a52fde77 25
bcec0116
AM
26import org.eclipse.linuxtools.statesystem.core.exceptions.TimeRangeException;
27import org.eclipse.linuxtools.statesystem.core.interval.ITmfStateInterval;
28import org.eclipse.linuxtools.statesystem.core.statevalue.TmfStateValue;
a52fde77
AM
29
30/**
31 * The base class for all the types of nodes that go in the History Tree.
6f4e8ec0 32 *
ffd0aa67 33 * @author Alexandre Montplaisir
a52fde77 34 */
8d47cc34 35public abstract class HTNode {
a52fde77 36
bb7f92ce
FW
37 // ------------------------------------------------------------------------
38 // Class fields
39 // ------------------------------------------------------------------------
40
41 /**
42 * The type of node
43 */
44 public static enum NodeType {
45 /**
46 * Core node, which is a "front" node, at any level of the tree except
47 * the bottom-most one. It has children, and may have extensions.
48 */
49 CORE,
50 /**
51 * Leaf node, which is a node at the last bottom level of the tree. It
52 * cannot have any children or extensions.
53 */
54 LEAF;
55
56 /**
57 * Determine a node type by reading a serialized byte.
58 *
59 * @param rep
60 * The byte representation of the node type
61 * @return The corresponding NodeType
62 * @throws IOException
63 * If the NodeType is unrecognized
64 */
65 public static NodeType fromByte(byte rep) throws IOException {
66 switch (rep) {
67 case 1:
68 return CORE;
69 case 2:
70 return LEAF;
71 default:
72 throw new IOException();
73 }
74 }
75
76 /**
77 * Get the byte representation of this node type. It can then be read
78 * with {@link #fromByte}.
79 *
80 * @return The byte matching this node type
81 */
82 public byte toByte() {
83 switch (this) {
84 case CORE:
85 return 1;
86 case LEAF:
87 return 2;
88 default:
89 throw new IllegalStateException();
90 }
91 }
92 }
93
94 // ------------------------------------------------------------------------
95 // Attributes
96 // ------------------------------------------------------------------------
97
ffd0aa67
EB
98 /* Configuration of the History Tree to which belongs this node */
99 private final HTConfig config;
a52fde77
AM
100
101 /* Time range of this node */
102 private final long nodeStart;
103 private long nodeEnd;
104
105 /* Sequence number = position in the node section of the file */
106 private final int sequenceNumber;
107 private int parentSequenceNumber; /* = -1 if this node is the root node */
108
109 /* Where the Strings section begins (from the start of the node */
110 private int stringSectionOffset;
111
b0136ad6
FW
112 /* Sum of bytes of all intervals in the node */
113 private int sizeOfIntervalSection;
114
045badfe
AM
115 /* True if this node was read from disk (meaning its end time is now fixed) */
116 private volatile boolean isOnDisk;
a52fde77
AM
117
118 /* Vector containing all the intervals contained in this node */
cb42195c 119 private final List<HTInterval> intervals;
a52fde77 120
62197b87
AM
121 /* Lock used to protect the accesses to intervals, nodeEnd and such */
122 private final ReentrantReadWriteLock rwl = new ReentrantReadWriteLock(false);
123
8d47cc34
AM
124 /**
125 * Constructor
126 *
127 * @param config
128 * Configuration of the History Tree
129 * @param seqNumber
130 * The (unique) sequence number assigned to this particular node
131 * @param parentSeqNumber
132 * The sequence number of this node's parent node
133 * @param start
134 * The earliest timestamp stored in this node
135 */
136 protected HTNode(HTConfig config, int seqNumber, int parentSeqNumber, long start) {
ffd0aa67 137 this.config = config;
a52fde77
AM
138 this.nodeStart = start;
139 this.sequenceNumber = seqNumber;
140 this.parentSequenceNumber = parentSeqNumber;
141
ffd0aa67 142 this.stringSectionOffset = config.getBlockSize();
b0136ad6 143 this.sizeOfIntervalSection = 0;
045badfe 144 this.isOnDisk = false;
a4524c1b 145 this.intervals = new ArrayList<>();
a52fde77
AM
146 }
147
148 /**
8d47cc34
AM
149 * Reader factory method. Build a Node object (of the right type) by reading
150 * a block in the file.
6f4e8ec0 151 *
ffd0aa67
EB
152 * @param config
153 * Configuration of the History Tree
a52fde77
AM
154 * @param fc
155 * FileChannel to the history file, ALREADY SEEKED at the start
156 * of the node.
8d47cc34 157 * @return The node object
a52fde77 158 * @throws IOException
8d47cc34 159 * If there was an error reading from the file channel
a52fde77 160 */
8d47cc34 161 public static final HTNode readNode(HTConfig config, FileChannel fc)
a52fde77
AM
162 throws IOException {
163 HTNode newNode = null;
164 int res, i;
165
ffd0aa67 166 ByteBuffer buffer = ByteBuffer.allocate(config.getBlockSize());
a52fde77
AM
167 buffer.order(ByteOrder.LITTLE_ENDIAN);
168 buffer.clear();
169 res = fc.read(buffer);
ffd0aa67 170 assert (res == config.getBlockSize());
a52fde77
AM
171 buffer.flip();
172
173 /* Read the common header part */
bb7f92ce
FW
174 byte typeByte = buffer.get();
175 NodeType type = NodeType.fromByte(typeByte);
a52fde77
AM
176 long start = buffer.getLong();
177 long end = buffer.getLong();
178 int seqNb = buffer.getInt();
179 int parentSeqNb = buffer.getInt();
180 int intervalCount = buffer.getInt();
181 int stringSectionOffset = buffer.getInt();
045badfe 182 buffer.get(); // TODO Used to be "isDone", to be removed from the header
a52fde77
AM
183
184 /* Now the rest of the header depends on the node type */
185 switch (type) {
bb7f92ce 186 case CORE:
a52fde77 187 /* Core nodes */
ffd0aa67 188 newNode = new CoreNode(config, seqNb, parentSeqNb, start);
a52fde77
AM
189 newNode.readSpecificHeader(buffer);
190 break;
191
bb7f92ce
FW
192 case LEAF:
193 /* Leaf nodes */
194 newNode = new LeafNode(config, seqNb, parentSeqNb, start);
195 newNode.readSpecificHeader(buffer);
196 break;
a52fde77
AM
197
198 default:
199 /* Unrecognized node type */
200 throw new IOException();
201 }
202
203 /*
204 * At this point, we should be done reading the header and 'buffer'
205 * should only have the intervals left
206 */
207 for (i = 0; i < intervalCount; i++) {
208 newNode.intervals.add(HTInterval.readFrom(buffer));
209 }
210
211 /* Assign the node's other information we have read previously */
212 newNode.nodeEnd = end;
213 newNode.stringSectionOffset = stringSectionOffset;
045badfe 214 newNode.isOnDisk = true;
a52fde77
AM
215
216 return newNode;
217 }
218
8d47cc34
AM
219 /**
220 * Write this node to the given file channel.
221 *
222 * @param fc
223 * The file channel to write to (should be sought to be correct
224 * position)
225 * @throws IOException
226 * If there was an error writing
227 */
228 public final void writeSelf(FileChannel fc) throws IOException {
a52fde77 229 /*
62197b87
AM
230 * Yes, we are taking the *read* lock here, because we are reading the
231 * information in the node to write it to disk.
a52fde77 232 */
62197b87
AM
233 rwl.readLock().lock();
234 try {
235 final int blockSize = config.getBlockSize();
236 int curStringsEntryEndPos = blockSize;
237
238 ByteBuffer buffer = ByteBuffer.allocate(blockSize);
239 buffer.order(ByteOrder.LITTLE_ENDIAN);
240 buffer.clear();
241
242 /* Write the common header part */
bb7f92ce 243 buffer.put(this.getNodeType().toByte());
62197b87
AM
244 buffer.putLong(nodeStart);
245 buffer.putLong(nodeEnd);
246 buffer.putInt(sequenceNumber);
247 buffer.putInt(parentSequenceNumber);
248 buffer.putInt(intervals.size());
249 buffer.putInt(stringSectionOffset);
250 buffer.put((byte) 1); // TODO Used to be "isDone", to be removed from header
251
252 /* Now call the inner method to write the specific header part */
253 this.writeSpecificHeader(buffer);
254
255 /* Back to us, we write the intervals */
256 for (HTInterval interval : intervals) {
257 int size = interval.writeInterval(buffer, curStringsEntryEndPos);
258 curStringsEntryEndPos -= size;
259 }
a52fde77 260
62197b87
AM
261 /*
262 * Write padding between the end of the Data section and the start
263 * of the Strings section (needed to fill the node in case there is
264 * no Strings section)
265 */
266 while (buffer.position() < stringSectionOffset) {
267 buffer.put((byte) 0);
268 }
a52fde77 269
62197b87
AM
270 /*
271 * If the offsets were right, the size of the Strings section should
272 * be == to the expected size
273 */
274 assert (curStringsEntryEndPos == stringSectionOffset);
a52fde77 275
62197b87 276 /* Finally, write everything in the Buffer to disk */
a52fde77 277
62197b87
AM
278 // if we don't do this, flip() will lose what's after.
279 buffer.position(blockSize);
280
281 buffer.flip();
282 int res = fc.write(buffer);
283 assert (res == blockSize);
284
285 } finally {
286 rwl.readLock().unlock();
287 }
045badfe 288 isOnDisk = true;
cb42195c
AM
289 }
290
291 // ------------------------------------------------------------------------
292 // Accessors
293 // ------------------------------------------------------------------------
294
8d47cc34
AM
295 /**
296 * Retrieve the history tree configuration used for this node.
297 *
298 * @return The history tree config
299 */
300 protected HTConfig getConfig() {
ffd0aa67 301 return config;
a52fde77
AM
302 }
303
8d47cc34
AM
304 /**
305 * Get the start time of this node.
306 *
307 * @return The start time of this node
308 */
309 public long getNodeStart() {
a52fde77
AM
310 return nodeStart;
311 }
312
8d47cc34
AM
313 /**
314 * Get the end time of this node.
315 *
bb7f92ce 316 * @return The end time of this node
8d47cc34
AM
317 */
318 public long getNodeEnd() {
045badfe 319 if (this.isOnDisk) {
a52fde77
AM
320 return nodeEnd;
321 }
322 return 0;
323 }
324
8d47cc34
AM
325 /**
326 * Get the sequence number of this node.
327 *
328 * @return The sequence number of this node
329 */
330 public int getSequenceNumber() {
a52fde77
AM
331 return sequenceNumber;
332 }
333
8d47cc34
AM
334 /**
335 * Get the sequence number of this node's parent.
336 *
337 * @return The parent sequence number
338 */
339 public int getParentSequenceNumber() {
a52fde77
AM
340 return parentSequenceNumber;
341 }
342
343 /**
344 * Change this node's parent. Used when we create a new root node for
345 * example.
8d47cc34
AM
346 *
347 * @param newParent
348 * The sequence number of the node that is the new parent
a52fde77 349 */
8d47cc34 350 public void setParentSequenceNumber(int newParent) {
a52fde77
AM
351 parentSequenceNumber = newParent;
352 }
353
8d47cc34
AM
354 /**
355 * Return if this node is "done" (full and written to disk).
356 *
357 * @return If this node is done or not
358 */
045badfe
AM
359 public boolean isOnDisk() {
360 return isOnDisk;
a52fde77
AM
361 }
362
363 /**
364 * Add an interval to this node
6f4e8ec0 365 *
a52fde77 366 * @param newInterval
8d47cc34 367 * Interval to add to this node
a52fde77 368 */
8d47cc34 369 public void addInterval(HTInterval newInterval) {
62197b87
AM
370 rwl.writeLock().lock();
371 try {
372 /* Just in case, should be checked before even calling this function */
373 assert (newInterval.getIntervalSize() <= this.getNodeFreeSpace());
a52fde77 374
62197b87 375 intervals.add(newInterval);
b0136ad6 376 sizeOfIntervalSection += newInterval.getIntervalSize();
a52fde77 377
62197b87
AM
378 /* Update the in-node offset "pointer" */
379 stringSectionOffset -= (newInterval.getStringsEntrySize());
380 } finally {
381 rwl.writeLock().unlock();
382 }
a52fde77
AM
383 }
384
385 /**
386 * We've received word from the containerTree that newest nodes now exist to
387 * our right. (Puts isDone = true and sets the endtime)
6f4e8ec0 388 *
a52fde77
AM
389 * @param endtime
390 * The nodeEnd time that the node will have
a52fde77 391 */
8d47cc34 392 public void closeThisNode(long endtime) {
62197b87
AM
393 rwl.writeLock().lock();
394 try {
395 assert (endtime >= this.nodeStart);
396
6642afb4 397 if (!intervals.isEmpty()) {
62197b87
AM
398 /*
399 * Sort the intervals by ascending order of their end time. This
400 * speeds up lookups a bit
401 */
402 Collections.sort(intervals);
403
404 /*
405 * Make sure there are no intervals in this node with their
406 * EndTime > the one requested. Only need to check the last one
407 * since they are now sorted
408 */
409 assert (endtime >= intervals.get(intervals.size() - 1).getEndTime());
410 }
a52fde77 411
62197b87
AM
412 this.nodeEnd = endtime;
413 } finally {
414 rwl.writeLock().unlock();
a52fde77 415 }
a52fde77
AM
416 }
417
418 /**
419 * The method to fill up the stateInfo (passed on from the Current State
420 * Tree when it does a query on the SHT). We'll replace the data in that
421 * vector with whatever relevant we can find from this node
6f4e8ec0 422 *
a52fde77
AM
423 * @param stateInfo
424 * The same stateInfo that comes from SHT's doQuery()
425 * @param t
426 * The timestamp for which the query is for. Only return
427 * intervals that intersect t.
428 * @throws TimeRangeException
8d47cc34 429 * If 't' is invalid
a52fde77 430 */
8d47cc34 431 public void writeInfoFromNode(List<ITmfStateInterval> stateInfo, long t)
a52fde77 432 throws TimeRangeException {
62197b87
AM
433 /* This is from a state system query, we are "reading" this node */
434 rwl.readLock().lock();
435 try {
6642afb4 436 for (int i = getStartIndexFor(t); i < intervals.size(); i++) {
62197b87
AM
437 /*
438 * Now we only have to compare the Start times, since we now the
1d8028cd
AM
439 * End times necessarily fit.
440 *
441 * Second condition is to ignore new attributes that might have
442 * been created after stateInfo was instantiated (they would be
443 * null anyway).
62197b87 444 */
1d8028cd
AM
445 ITmfStateInterval interval = intervals.get(i);
446 if (interval.getStartTime() <= t &&
447 interval.getAttribute() < stateInfo.size()) {
448 stateInfo.set(interval.getAttribute(), interval);
62197b87 449 }
a52fde77 450 }
62197b87
AM
451 } finally {
452 rwl.readLock().unlock();
a52fde77 453 }
a52fde77
AM
454 }
455
456 /**
457 * Get a single Interval from the information in this node If the
458 * key/timestamp pair cannot be found, we return null.
6f4e8ec0 459 *
a52fde77 460 * @param key
8d47cc34 461 * The attribute quark to look for
a52fde77 462 * @param t
8d47cc34 463 * The timestamp
a52fde77
AM
464 * @return The Interval containing the information we want, or null if it
465 * wasn't found
bb7f92ce
FW
466 * @throws TimeRangeException
467 * If 't' is invalid
a52fde77 468 */
8d47cc34 469 public HTInterval getRelevantInterval(int key, long t) throws TimeRangeException {
62197b87
AM
470 rwl.readLock().lock();
471 try {
6642afb4 472 for (int i = getStartIndexFor(t); i < intervals.size(); i++) {
62197b87
AM
473 HTInterval curInterval = intervals.get(i);
474 if (curInterval.getAttribute() == key
475 && curInterval.getStartTime() <= t
476 && curInterval.getEndTime() >= t) {
477 return curInterval;
478 }
a52fde77 479 }
6642afb4 480
62197b87
AM
481 /* We didn't find the relevant information in this node */
482 return null;
483
484 } finally {
485 rwl.readLock().unlock();
a52fde77 486 }
a52fde77
AM
487 }
488
489 private int getStartIndexFor(long t) throws TimeRangeException {
62197b87 490 /* Should only be called by methods with the readLock taken */
6642afb4
FW
491
492 if (intervals.isEmpty()) {
493 return 0;
494 }
a52fde77
AM
495 /*
496 * Since the intervals are sorted by end time, we can skip all the ones
497 * at the beginning whose end times are smaller than 't'. Java does
498 * provides a .binarySearch method, but its API is quite weird...
499 */
62197b87
AM
500 HTInterval dummy = new HTInterval(0, t, 0, TmfStateValue.nullValue());
501 int index = Collections.binarySearch(intervals, dummy);
a52fde77
AM
502
503 if (index < 0) {
504 /*
505 * .binarySearch returns a negative number if the exact value was
506 * not found. Here we just want to know where to start searching, we
507 * don't care if the value is exact or not.
508 */
509 index = -index - 1;
510
511 }
512
513 /* Sometimes binarySearch yields weird stuff... */
514 if (index < 0) {
515 index = 0;
516 }
517 if (index >= intervals.size()) {
518 index = intervals.size() - 1;
519 }
520
521 /*
522 * Another API quirkiness, the returned index is the one of the *last*
523 * element of a series of equal endtimes, which happens sometimes. We
524 * want the *first* element of such a series, to read through them
525 * again.
526 */
527 while (index > 0
528 && intervals.get(index - 1).compareTo(intervals.get(index)) == 0) {
529 index--;
530 }
a52fde77
AM
531
532 return index;
533 }
534
62197b87
AM
535 /**
536 * <pre>
537 * 1 - byte (type)
538 * 16 - 2x long (start time, end time)
539 * 16 - 4x int (seq number, parent seq number, intervalcount,
540 * strings section pos.)
541 * 1 - byte (done or not)
542 * </pre>
543 */
544 private static final int COMMON_HEADER_SIZE = 34;
545
546 /**
547 * Return the total header size of this node (will depend on the node type).
548 *
549 * @return The total header size
550 */
551 public final int getTotalHeaderSize() {
552 return COMMON_HEADER_SIZE + getSpecificHeaderSize();
553 }
554
a52fde77
AM
555 /**
556 * @return The offset, within the node, where the Data section ends
557 */
558 private int getDataSectionEndOffset() {
b0136ad6 559 return this.getTotalHeaderSize() + sizeOfIntervalSection;
a52fde77
AM
560 }
561
562 /**
563 * Returns the free space in the node, which is simply put, the
564 * stringSectionOffset - dataSectionOffset
8d47cc34
AM
565 *
566 * @return The amount of free space in the node (in bytes)
a52fde77 567 */
8d47cc34 568 public int getNodeFreeSpace() {
62197b87
AM
569 rwl.readLock().lock();
570 int ret = stringSectionOffset - this.getDataSectionEndOffset();
571 rwl.readLock().unlock();
572
573 return ret;
a52fde77
AM
574 }
575
576 /**
8d47cc34 577 * Returns the current space utilization of this node, as a percentage.
a52fde77 578 * (used space / total usable space, which excludes the header)
8d47cc34
AM
579 *
580 * @return The percentage (value between 0 and 100) of space utilization in
581 * in this node.
a52fde77 582 */
8d47cc34 583 public long getNodeUsagePercent() {
62197b87
AM
584 rwl.readLock().lock();
585 try {
586 final int blockSize = config.getBlockSize();
587 float freePercent = (float) this.getNodeFreeSpace()
588 / (float) (blockSize - this.getTotalHeaderSize())
589 * 100F;
590 return (long) (100L - freePercent);
591
592 } finally {
593 rwl.readLock().unlock();
594 }
a52fde77
AM
595 }
596
a52fde77
AM
597 /**
598 * @name Debugging functions
599 */
600
601 @SuppressWarnings("nls")
602 @Override
603 public String toString() {
604 /* Only used for debugging, shouldn't be externalized */
605 StringBuffer buf = new StringBuffer("Node #" + sequenceNumber + ", ");
606 buf.append(this.toStringSpecific());
8d47cc34 607 buf.append(intervals.size() + " intervals (" + this.getNodeUsagePercent()
a52fde77
AM
608 + "% used), ");
609
610 buf.append("[" + this.nodeStart + " - ");
045badfe 611 if (this.isOnDisk) {
a52fde77
AM
612 buf = buf.append("" + this.nodeEnd + "]");
613 } else {
614 buf = buf.append("...]");
615 }
616 return buf.toString();
617 }
618
619 /**
620 * Debugging function that prints out the contents of this node
6f4e8ec0 621 *
a52fde77
AM
622 * @param writer
623 * PrintWriter in which we will print the debug output
624 */
625 @SuppressWarnings("nls")
8d47cc34 626 public void debugPrintIntervals(PrintWriter writer) {
a52fde77
AM
627 /* Only used for debugging, shouldn't be externalized */
628 writer.println("Node #" + sequenceNumber + ":");
629
630 /* Array of children */
bb7f92ce 631 if (this.getNodeType() == NodeType.CORE) { /* Only Core Nodes can have children */
a52fde77
AM
632 CoreNode thisNode = (CoreNode) this;
633 writer.print(" " + thisNode.getNbChildren() + " children");
634 if (thisNode.getNbChildren() >= 1) {
635 writer.print(": [ " + thisNode.getChild(0));
636 for (int i = 1; i < thisNode.getNbChildren(); i++) {
637 writer.print(", " + thisNode.getChild(i));
638 }
639 writer.print(']');
640 }
641 writer.print('\n');
642 }
643
644 /* List of intervals in the node */
645 writer.println(" Intervals contained:");
646 for (int i = 0; i < intervals.size(); i++) {
647 writer.println(intervals.get(i).toString());
648 }
649 writer.println('\n');
650 }
651
6f4e8ec0
AM
652 // ------------------------------------------------------------------------
653 // Abstract methods
654 // ------------------------------------------------------------------------
a52fde77 655
8d47cc34
AM
656 /**
657 * Get the byte value representing the node type.
658 *
659 * @return The node type
660 */
bb7f92ce 661 public abstract NodeType getNodeType();
a52fde77 662
8d47cc34 663 /**
62197b87
AM
664 * Return the specific header size of this node. This means the size
665 * occupied by the type-specific section of the header (not counting the
666 * common part).
8d47cc34 667 *
62197b87 668 * @return The specific header size
8d47cc34 669 */
62197b87 670 protected abstract int getSpecificHeaderSize();
a52fde77 671
8d47cc34
AM
672 /**
673 * Read the type-specific part of the node header from a byte buffer.
674 *
675 * @param buffer
676 * The byte buffer to read from. It should be already positioned
677 * correctly.
678 */
62197b87 679 protected abstract void readSpecificHeader(ByteBuffer buffer);
a52fde77 680
8d47cc34
AM
681 /**
682 * Write the type-specific part of the header in a byte buffer.
683 *
684 * @param buffer
685 * The buffer to write to. It should already be at the correct
686 * position.
687 */
62197b87 688 protected abstract void writeSpecificHeader(ByteBuffer buffer);
a52fde77 689
8d47cc34
AM
690 /**
691 * Node-type-specific toString method. Used for debugging.
692 *
693 * @return A string representing the node
694 */
62197b87 695 protected abstract String toStringSpecific();
a52fde77 696}
This page took 0.081834 seconds and 5 git commands to generate.