charts: Make the custom chart formatters immutable
[deliverable/tracecompass.git] / statesystem / org.eclipse.tracecompass.statesystem.core / src / org / eclipse / tracecompass / internal / statesystem / core / backend / historytree / HTNode.java
CommitLineData
a52fde77 1/*******************************************************************************
b2ca67ca 2 * Copyright (c) 2010, 2016 Ericsson, École Polytechnique de Montréal, and others
6f4e8ec0 3 *
a52fde77
AM
4 * All rights reserved. This program and the accompanying materials are
5 * made available under the terms of the Eclipse Public License v1.0 which
6 * accompanies this distribution, and is available at
7 * http://www.eclipse.org/legal/epl-v10.html
6f4e8ec0 8 *
bb7f92ce
FW
9 * Contributors:
10 * Alexandre Montplaisir - Initial API and implementation
11 * Florian Wininger - Add Extension and Leaf Node
2a966f6f 12 * Patrick Tasse - Keep interval list sorted on insert
a52fde77
AM
13 *******************************************************************************/
14
e894a508 15package org.eclipse.tracecompass.internal.statesystem.core.backend.historytree;
a52fde77
AM
16
17import java.io.IOException;
18import java.io.PrintWriter;
19import java.nio.ByteBuffer;
20import java.nio.ByteOrder;
21import java.nio.channels.FileChannel;
22import java.util.ArrayList;
23import java.util.Collections;
3996feb9 24import java.util.Comparator;
a52fde77 25import java.util.List;
62197b87 26import java.util.concurrent.locks.ReentrantReadWriteLock;
a52fde77 27
aa353506 28import org.eclipse.jdt.annotation.NonNull;
e894a508
AM
29import org.eclipse.tracecompass.statesystem.core.exceptions.TimeRangeException;
30import org.eclipse.tracecompass.statesystem.core.interval.ITmfStateInterval;
31import org.eclipse.tracecompass.statesystem.core.statevalue.TmfStateValue;
a52fde77 32
822798a3
GB
33import com.google.common.collect.Iterables;
34
a52fde77
AM
35/**
36 * The base class for all the types of nodes that go in the History Tree.
6f4e8ec0 37 *
ffd0aa67 38 * @author Alexandre Montplaisir
a52fde77 39 */
8d47cc34 40public abstract class HTNode {
a52fde77 41
bb7f92ce
FW
42 // ------------------------------------------------------------------------
43 // Class fields
44 // ------------------------------------------------------------------------
45
46 /**
47 * The type of node
48 */
49 public static enum NodeType {
50 /**
51 * Core node, which is a "front" node, at any level of the tree except
52 * the bottom-most one. It has children, and may have extensions.
53 */
54 CORE,
55 /**
56 * Leaf node, which is a node at the last bottom level of the tree. It
57 * cannot have any children or extensions.
58 */
59 LEAF;
60
61 /**
62 * Determine a node type by reading a serialized byte.
63 *
64 * @param rep
65 * The byte representation of the node type
66 * @return The corresponding NodeType
67 * @throws IOException
68 * If the NodeType is unrecognized
69 */
70 public static NodeType fromByte(byte rep) throws IOException {
71 switch (rep) {
72 case 1:
73 return CORE;
74 case 2:
75 return LEAF;
76 default:
77 throw new IOException();
78 }
79 }
80
81 /**
82 * Get the byte representation of this node type. It can then be read
83 * with {@link #fromByte}.
84 *
85 * @return The byte matching this node type
86 */
87 public byte toByte() {
88 switch (this) {
89 case CORE:
90 return 1;
91 case LEAF:
92 return 2;
93 default:
94 throw new IllegalStateException();
95 }
96 }
97 }
98
0e9b2f07
GB
99 /**
100 * <pre>
101 * 1 - byte (type)
102 * 16 - 2x long (start time, end time)
2d1f2bee 103 * 16 - 3x int (seq number, parent seq number, intervalcount)
0e9b2f07
GB
104 * 1 - byte (done or not)
105 * </pre>
106 */
d0ed4962
LPD
107 private static final int COMMON_HEADER_SIZE = Byte.BYTES
108 + 2 * Long.BYTES
2d1f2bee 109 + 3 * Integer.BYTES
d0ed4962 110 + Byte.BYTES;
0e9b2f07 111
bb7f92ce
FW
112 // ------------------------------------------------------------------------
113 // Attributes
114 // ------------------------------------------------------------------------
115
ffd0aa67 116 /* Configuration of the History Tree to which belongs this node */
0e9b2f07 117 private final HTConfig fConfig;
a52fde77
AM
118
119 /* Time range of this node */
0e9b2f07
GB
120 private final long fNodeStart;
121 private long fNodeEnd;
a52fde77
AM
122
123 /* Sequence number = position in the node section of the file */
0e9b2f07
GB
124 private final int fSequenceNumber;
125 private int fParentSequenceNumber; /* = -1 if this node is the root node */
a52fde77 126
b0136ad6 127 /* Sum of bytes of all intervals in the node */
0e9b2f07 128 private int fSizeOfIntervalSection;
b0136ad6 129
045badfe 130 /* True if this node was read from disk (meaning its end time is now fixed) */
0e9b2f07 131 private volatile boolean fIsOnDisk;
a52fde77
AM
132
133 /* Vector containing all the intervals contained in this node */
0e9b2f07 134 private final List<HTInterval> fIntervals;
a52fde77 135
62197b87 136 /* Lock used to protect the accesses to intervals, nodeEnd and such */
0e9b2f07 137 private final ReentrantReadWriteLock fRwl = new ReentrantReadWriteLock(false);
62197b87 138
3996feb9
LPD
139 /** Order of intervals in a HTNode: sorted by end times, then by start times. */
140 private static final Comparator<ITmfStateInterval> NODE_ORDER = Comparator
141 .comparingLong(ITmfStateInterval::getEndTime)
142 .thenComparingLong(ITmfStateInterval::getStartTime)
143 .thenComparingInt(ITmfStateInterval::getAttribute);
144
8d47cc34
AM
145 /**
146 * Constructor
147 *
148 * @param config
149 * Configuration of the History Tree
150 * @param seqNumber
151 * The (unique) sequence number assigned to this particular node
152 * @param parentSeqNumber
153 * The sequence number of this node's parent node
154 * @param start
155 * The earliest timestamp stored in this node
156 */
157 protected HTNode(HTConfig config, int seqNumber, int parentSeqNumber, long start) {
0e9b2f07
GB
158 fConfig = config;
159 fNodeStart = start;
160 fSequenceNumber = seqNumber;
161 fParentSequenceNumber = parentSeqNumber;
162
0e9b2f07
GB
163 fSizeOfIntervalSection = 0;
164 fIsOnDisk = false;
165 fIntervals = new ArrayList<>();
a52fde77
AM
166 }
167
168 /**
8d47cc34
AM
169 * Reader factory method. Build a Node object (of the right type) by reading
170 * a block in the file.
6f4e8ec0 171 *
ffd0aa67
EB
172 * @param config
173 * Configuration of the History Tree
a52fde77
AM
174 * @param fc
175 * FileChannel to the history file, ALREADY SEEKED at the start
176 * of the node.
f4baf640
GB
177 * @param nodeFactory
178 * The factory to create the nodes for this tree
8d47cc34 179 * @return The node object
a52fde77 180 * @throws IOException
8d47cc34 181 * If there was an error reading from the file channel
a52fde77 182 */
f4baf640 183 public static final @NonNull HTNode readNode(HTConfig config, FileChannel fc, IHistoryTree.IHTNodeFactory nodeFactory)
a52fde77
AM
184 throws IOException {
185 HTNode newNode = null;
186 int res, i;
187
ffd0aa67 188 ByteBuffer buffer = ByteBuffer.allocate(config.getBlockSize());
a52fde77
AM
189 buffer.order(ByteOrder.LITTLE_ENDIAN);
190 buffer.clear();
191 res = fc.read(buffer);
ffd0aa67 192 assert (res == config.getBlockSize());
a52fde77
AM
193 buffer.flip();
194
195 /* Read the common header part */
bb7f92ce
FW
196 byte typeByte = buffer.get();
197 NodeType type = NodeType.fromByte(typeByte);
a52fde77
AM
198 long start = buffer.getLong();
199 long end = buffer.getLong();
200 int seqNb = buffer.getInt();
201 int parentSeqNb = buffer.getInt();
202 int intervalCount = buffer.getInt();
045badfe 203 buffer.get(); // TODO Used to be "isDone", to be removed from the header
a52fde77
AM
204
205 /* Now the rest of the header depends on the node type */
206 switch (type) {
bb7f92ce 207 case CORE:
a52fde77 208 /* Core nodes */
f4baf640 209 newNode = nodeFactory.createCoreNode(config, seqNb, parentSeqNb, start);
a52fde77
AM
210 newNode.readSpecificHeader(buffer);
211 break;
212
bb7f92ce
FW
213 case LEAF:
214 /* Leaf nodes */
f4baf640 215 newNode = nodeFactory.createLeafNode(config, seqNb, parentSeqNb, start);
bb7f92ce
FW
216 newNode.readSpecificHeader(buffer);
217 break;
a52fde77
AM
218
219 default:
220 /* Unrecognized node type */
221 throw new IOException();
222 }
223
224 /*
225 * At this point, we should be done reading the header and 'buffer'
226 * should only have the intervals left
227 */
228 for (i = 0; i < intervalCount; i++) {
0ce45cd4 229 HTInterval interval = HTInterval.readFrom(buffer);
0e9b2f07 230 newNode.fIntervals.add(interval);
59d30d83 231 newNode.fSizeOfIntervalSection += interval.getSizeOnDisk();
a52fde77
AM
232 }
233
234 /* Assign the node's other information we have read previously */
0e9b2f07 235 newNode.fNodeEnd = end;
0e9b2f07 236 newNode.fIsOnDisk = true;
a52fde77
AM
237
238 return newNode;
239 }
240
8d47cc34
AM
241 /**
242 * Write this node to the given file channel.
243 *
244 * @param fc
245 * The file channel to write to (should be sought to be correct
246 * position)
247 * @throws IOException
248 * If there was an error writing
249 */
250 public final void writeSelf(FileChannel fc) throws IOException {
a52fde77 251 /*
62197b87
AM
252 * Yes, we are taking the *read* lock here, because we are reading the
253 * information in the node to write it to disk.
a52fde77 254 */
0e9b2f07 255 fRwl.readLock().lock();
62197b87 256 try {
0e9b2f07 257 final int blockSize = fConfig.getBlockSize();
62197b87
AM
258
259 ByteBuffer buffer = ByteBuffer.allocate(blockSize);
260 buffer.order(ByteOrder.LITTLE_ENDIAN);
261 buffer.clear();
262
263 /* Write the common header part */
0e9b2f07
GB
264 buffer.put(getNodeType().toByte());
265 buffer.putLong(fNodeStart);
266 buffer.putLong(fNodeEnd);
267 buffer.putInt(fSequenceNumber);
268 buffer.putInt(fParentSequenceNumber);
269 buffer.putInt(fIntervals.size());
62197b87
AM
270 buffer.put((byte) 1); // TODO Used to be "isDone", to be removed from header
271
272 /* Now call the inner method to write the specific header part */
0e9b2f07 273 writeSpecificHeader(buffer);
62197b87
AM
274
275 /* Back to us, we write the intervals */
59d30d83 276 fIntervals.forEach(i -> i.writeInterval(buffer));
2d1f2bee
LPD
277 if (blockSize - buffer.position() != getNodeFreeSpace()) {
278 throw new IllegalStateException("Wrong free space: Actual: " + (blockSize - buffer.position()) + ", Expected: " + getNodeFreeSpace()); //$NON-NLS-1$ //$NON-NLS-2$
279 }
62197b87 280 /*
59d30d83 281 * Fill the rest with zeros
62197b87 282 */
59d30d83 283 while (buffer.position() < blockSize) {
62197b87
AM
284 buffer.put((byte) 0);
285 }
a52fde77 286
62197b87 287 /* Finally, write everything in the Buffer to disk */
62197b87
AM
288 buffer.flip();
289 int res = fc.write(buffer);
822798a3
GB
290 if (res != blockSize) {
291 throw new IllegalStateException("Wrong size of block written: Actual: " + res + ", Expected: " + blockSize); //$NON-NLS-1$ //$NON-NLS-2$
292 }
62197b87
AM
293
294 } finally {
0e9b2f07 295 fRwl.readLock().unlock();
62197b87 296 }
0e9b2f07 297 fIsOnDisk = true;
cb42195c
AM
298 }
299
300 // ------------------------------------------------------------------------
301 // Accessors
302 // ------------------------------------------------------------------------
303
8d47cc34
AM
304 /**
305 * Retrieve the history tree configuration used for this node.
306 *
307 * @return The history tree config
308 */
309 protected HTConfig getConfig() {
0e9b2f07 310 return fConfig;
a52fde77
AM
311 }
312
8d47cc34
AM
313 /**
314 * Get the start time of this node.
315 *
316 * @return The start time of this node
317 */
318 public long getNodeStart() {
0e9b2f07 319 return fNodeStart;
a52fde77
AM
320 }
321
8d47cc34
AM
322 /**
323 * Get the end time of this node.
324 *
bb7f92ce 325 * @return The end time of this node
8d47cc34
AM
326 */
327 public long getNodeEnd() {
0e9b2f07
GB
328 if (fIsOnDisk) {
329 return fNodeEnd;
a52fde77
AM
330 }
331 return 0;
332 }
333
8d47cc34
AM
334 /**
335 * Get the sequence number of this node.
336 *
337 * @return The sequence number of this node
338 */
339 public int getSequenceNumber() {
0e9b2f07 340 return fSequenceNumber;
a52fde77
AM
341 }
342
8d47cc34
AM
343 /**
344 * Get the sequence number of this node's parent.
345 *
346 * @return The parent sequence number
347 */
348 public int getParentSequenceNumber() {
0e9b2f07 349 return fParentSequenceNumber;
a52fde77
AM
350 }
351
352 /**
353 * Change this node's parent. Used when we create a new root node for
354 * example.
8d47cc34
AM
355 *
356 * @param newParent
357 * The sequence number of the node that is the new parent
a52fde77 358 */
8d47cc34 359 public void setParentSequenceNumber(int newParent) {
0e9b2f07 360 fParentSequenceNumber = newParent;
a52fde77
AM
361 }
362
8d47cc34
AM
363 /**
364 * Return if this node is "done" (full and written to disk).
365 *
366 * @return If this node is done or not
367 */
045badfe 368 public boolean isOnDisk() {
0e9b2f07 369 return fIsOnDisk;
a52fde77
AM
370 }
371
372 /**
373 * Add an interval to this node
6f4e8ec0 374 *
a52fde77 375 * @param newInterval
8d47cc34 376 * Interval to add to this node
a52fde77 377 */
8d47cc34 378 public void addInterval(HTInterval newInterval) {
0e9b2f07 379 fRwl.writeLock().lock();
62197b87
AM
380 try {
381 /* Just in case, should be checked before even calling this function */
59d30d83 382 assert (newInterval.getSizeOnDisk() <= getNodeFreeSpace());
a52fde77 383
2a966f6f 384 /* Find the insert position to keep the list sorted */
3996feb9
LPD
385 int index = 0;
386 if (!fIntervals.isEmpty()) {
387 index = Collections.binarySearch(fIntervals, newInterval, NODE_ORDER);
388 /*
389 * Interval should not already be in the node, binarySearch will
390 * return (-insertionPoint - 1).
391 */
392 index = -index - 1;
2a966f6f
PT
393 }
394
0e9b2f07 395 fIntervals.add(index, newInterval);
59d30d83 396 fSizeOfIntervalSection += newInterval.getSizeOnDisk();
a52fde77 397
62197b87 398 } finally {
0e9b2f07 399 fRwl.writeLock().unlock();
62197b87 400 }
a52fde77
AM
401 }
402
403 /**
404 * We've received word from the containerTree that newest nodes now exist to
405 * our right. (Puts isDone = true and sets the endtime)
6f4e8ec0 406 *
a52fde77
AM
407 * @param endtime
408 * The nodeEnd time that the node will have
a52fde77 409 */
8d47cc34 410 public void closeThisNode(long endtime) {
0e9b2f07 411 fRwl.writeLock().lock();
62197b87 412 try {
822798a3
GB
413 /**
414 * FIXME: was assert (endtime >= fNodeStart); but that exception
415 * is reached with an empty node that has start time endtime + 1
416 */
417// if (endtime < fNodeStart) {
418// throw new IllegalArgumentException("Endtime " + endtime + " cannot be lower than start time " + fNodeStart);
419// }
62197b87 420
0e9b2f07 421 if (!fIntervals.isEmpty()) {
62197b87
AM
422 /*
423 * Make sure there are no intervals in this node with their
424 * EndTime > the one requested. Only need to check the last one
2a966f6f 425 * since they are sorted
62197b87 426 */
822798a3
GB
427 if (endtime < Iterables.getLast(fIntervals).getEndTime()) {
428 throw new IllegalArgumentException("Closing end time should be greater than or equal to the end time of the intervals of this node"); //$NON-NLS-1$
429 }
62197b87 430 }
a52fde77 431
0e9b2f07 432 fNodeEnd = endtime;
62197b87 433 } finally {
0e9b2f07 434 fRwl.writeLock().unlock();
a52fde77 435 }
a52fde77
AM
436 }
437
438 /**
439 * The method to fill up the stateInfo (passed on from the Current State
440 * Tree when it does a query on the SHT). We'll replace the data in that
441 * vector with whatever relevant we can find from this node
6f4e8ec0 442 *
a52fde77
AM
443 * @param stateInfo
444 * The same stateInfo that comes from SHT's doQuery()
445 * @param t
446 * The timestamp for which the query is for. Only return
447 * intervals that intersect t.
448 * @throws TimeRangeException
8d47cc34 449 * If 't' is invalid
a52fde77 450 */
8d47cc34 451 public void writeInfoFromNode(List<ITmfStateInterval> stateInfo, long t)
a52fde77 452 throws TimeRangeException {
62197b87 453 /* This is from a state system query, we are "reading" this node */
0e9b2f07 454 fRwl.readLock().lock();
62197b87 455 try {
0e9b2f07 456 for (int i = getStartIndexFor(t); i < fIntervals.size(); i++) {
62197b87
AM
457 /*
458 * Now we only have to compare the Start times, since we now the
1d8028cd
AM
459 * End times necessarily fit.
460 *
461 * Second condition is to ignore new attributes that might have
462 * been created after stateInfo was instantiated (they would be
463 * null anyway).
62197b87 464 */
0e9b2f07 465 ITmfStateInterval interval = fIntervals.get(i);
d0ed4962 466 if (t >= interval.getStartTime() &&
1d8028cd
AM
467 interval.getAttribute() < stateInfo.size()) {
468 stateInfo.set(interval.getAttribute(), interval);
62197b87 469 }
a52fde77 470 }
62197b87 471 } finally {
0e9b2f07 472 fRwl.readLock().unlock();
a52fde77 473 }
a52fde77
AM
474 }
475
476 /**
477 * Get a single Interval from the information in this node If the
478 * key/timestamp pair cannot be found, we return null.
6f4e8ec0 479 *
a52fde77 480 * @param key
8d47cc34 481 * The attribute quark to look for
a52fde77 482 * @param t
8d47cc34 483 * The timestamp
a52fde77
AM
484 * @return The Interval containing the information we want, or null if it
485 * wasn't found
bb7f92ce
FW
486 * @throws TimeRangeException
487 * If 't' is invalid
a52fde77 488 */
8d47cc34 489 public HTInterval getRelevantInterval(int key, long t) throws TimeRangeException {
0e9b2f07 490 fRwl.readLock().lock();
62197b87 491 try {
0e9b2f07
GB
492 for (int i = getStartIndexFor(t); i < fIntervals.size(); i++) {
493 HTInterval curInterval = fIntervals.get(i);
62197b87 494 if (curInterval.getAttribute() == key
3996feb9 495 && curInterval.getStartTime() <= t) {
62197b87
AM
496 return curInterval;
497 }
a52fde77 498 }
6642afb4 499
62197b87
AM
500 /* We didn't find the relevant information in this node */
501 return null;
502
503 } finally {
0e9b2f07 504 fRwl.readLock().unlock();
a52fde77 505 }
a52fde77
AM
506 }
507
508 private int getStartIndexFor(long t) throws TimeRangeException {
62197b87 509 /* Should only be called by methods with the readLock taken */
6642afb4 510
0e9b2f07 511 if (fIntervals.isEmpty()) {
6642afb4
FW
512 return 0;
513 }
3996feb9 514
a52fde77 515 /*
3996feb9
LPD
516 * Since the intervals are sorted by end time then by start time, we can
517 * skip all the ones at the beginning whose end times are smaller than
518 * 't'. We search for a dummy interval from [Long.MIN_VALUE, t], which
519 * will return the first interval that ends with a time >= t.
a52fde77 520 */
3996feb9
LPD
521 HTInterval dummy = new HTInterval(Long.MIN_VALUE, t, 0, TmfStateValue.nullValue());
522 int index = Collections.binarySearch(fIntervals, dummy, NODE_ORDER);
a52fde77 523
3996feb9
LPD
524 /* Handle negative binarySearch return */
525 return (index >= 0 ? index : -index - 1);
a52fde77
AM
526 }
527
62197b87
AM
528 /**
529 * Return the total header size of this node (will depend on the node type).
530 *
531 * @return The total header size
532 */
533 public final int getTotalHeaderSize() {
534 return COMMON_HEADER_SIZE + getSpecificHeaderSize();
535 }
536
a52fde77
AM
537 /**
538 * @return The offset, within the node, where the Data section ends
539 */
540 private int getDataSectionEndOffset() {
0e9b2f07 541 return getTotalHeaderSize() + fSizeOfIntervalSection;
a52fde77
AM
542 }
543
544 /**
545 * Returns the free space in the node, which is simply put, the
546 * stringSectionOffset - dataSectionOffset
8d47cc34
AM
547 *
548 * @return The amount of free space in the node (in bytes)
a52fde77 549 */
8d47cc34 550 public int getNodeFreeSpace() {
0e9b2f07 551 fRwl.readLock().lock();
59d30d83 552 int ret = fConfig.getBlockSize() - getDataSectionEndOffset();
0e9b2f07 553 fRwl.readLock().unlock();
62197b87
AM
554
555 return ret;
a52fde77
AM
556 }
557
558 /**
8d47cc34 559 * Returns the current space utilization of this node, as a percentage.
a52fde77 560 * (used space / total usable space, which excludes the header)
8d47cc34
AM
561 *
562 * @return The percentage (value between 0 and 100) of space utilization in
563 * in this node.
a52fde77 564 */
8d47cc34 565 public long getNodeUsagePercent() {
0e9b2f07 566 fRwl.readLock().lock();
62197b87 567 try {
0e9b2f07
GB
568 final int blockSize = fConfig.getBlockSize();
569 float freePercent = (float) getNodeFreeSpace()
570 / (float) (blockSize - getTotalHeaderSize())
62197b87
AM
571 * 100F;
572 return (long) (100L - freePercent);
573
574 } finally {
0e9b2f07 575 fRwl.readLock().unlock();
62197b87 576 }
a52fde77
AM
577 }
578
a52fde77
AM
579 /**
580 * @name Debugging functions
581 */
582
583 @SuppressWarnings("nls")
584 @Override
585 public String toString() {
586 /* Only used for debugging, shouldn't be externalized */
b2ca67ca
PT
587 return String.format("Node #%d, %s, %s, %d intervals (%d%% used), [%d - %s]",
588 fSequenceNumber,
589 (fParentSequenceNumber == -1) ? "Root" : "Parent #" + fParentSequenceNumber,
590 toStringSpecific(),
591 fIntervals.size(),
592 getNodeUsagePercent(),
593 fNodeStart,
594 (fIsOnDisk || fNodeEnd != 0) ? fNodeEnd : "...");
a52fde77
AM
595 }
596
597 /**
598 * Debugging function that prints out the contents of this node
6f4e8ec0 599 *
a52fde77
AM
600 * @param writer
601 * PrintWriter in which we will print the debug output
602 */
603 @SuppressWarnings("nls")
8d47cc34 604 public void debugPrintIntervals(PrintWriter writer) {
a52fde77 605 /* Only used for debugging, shouldn't be externalized */
dbf883bb 606 writer.println("Intervals for node #" + fSequenceNumber + ":");
a52fde77
AM
607
608 /* Array of children */
f4baf640
GB
609 if (getNodeType() != NodeType.LEAF) { /* Only Core Nodes can have children */
610 ParentNode thisNode = (ParentNode) this;
a52fde77
AM
611 writer.print(" " + thisNode.getNbChildren() + " children");
612 if (thisNode.getNbChildren() >= 1) {
613 writer.print(": [ " + thisNode.getChild(0));
614 for (int i = 1; i < thisNode.getNbChildren(); i++) {
615 writer.print(", " + thisNode.getChild(i));
616 }
617 writer.print(']');
618 }
619 writer.print('\n');
620 }
621
622 /* List of intervals in the node */
623 writer.println(" Intervals contained:");
0e9b2f07
GB
624 for (int i = 0; i < fIntervals.size(); i++) {
625 writer.println(fIntervals.get(i).toString());
a52fde77
AM
626 }
627 writer.println('\n');
628 }
629
6f4e8ec0
AM
630 // ------------------------------------------------------------------------
631 // Abstract methods
632 // ------------------------------------------------------------------------
a52fde77 633
8d47cc34
AM
634 /**
635 * Get the byte value representing the node type.
636 *
637 * @return The node type
638 */
bb7f92ce 639 public abstract NodeType getNodeType();
a52fde77 640
8d47cc34 641 /**
62197b87
AM
642 * Return the specific header size of this node. This means the size
643 * occupied by the type-specific section of the header (not counting the
644 * common part).
8d47cc34 645 *
62197b87 646 * @return The specific header size
8d47cc34 647 */
62197b87 648 protected abstract int getSpecificHeaderSize();
a52fde77 649
8d47cc34
AM
650 /**
651 * Read the type-specific part of the node header from a byte buffer.
652 *
653 * @param buffer
654 * The byte buffer to read from. It should be already positioned
655 * correctly.
656 */
62197b87 657 protected abstract void readSpecificHeader(ByteBuffer buffer);
a52fde77 658
8d47cc34
AM
659 /**
660 * Write the type-specific part of the header in a byte buffer.
661 *
662 * @param buffer
663 * The buffer to write to. It should already be at the correct
664 * position.
665 */
62197b87 666 protected abstract void writeSpecificHeader(ByteBuffer buffer);
a52fde77 667
8d47cc34
AM
668 /**
669 * Node-type-specific toString method. Used for debugging.
670 *
671 * @return A string representing the node
672 */
62197b87 673 protected abstract String toStringSpecific();
a52fde77 674}
This page took 0.150669 seconds and 5 git commands to generate.