ss: History trees can define their own node types
[deliverable/tracecompass.git] / statesystem / org.eclipse.tracecompass.statesystem.core / src / org / eclipse / tracecompass / internal / statesystem / core / backend / historytree / HTNode.java
CommitLineData
a52fde77 1/*******************************************************************************
b2ca67ca 2 * Copyright (c) 2010, 2016 Ericsson, École Polytechnique de Montréal, and others
6f4e8ec0 3 *
a52fde77
AM
4 * All rights reserved. This program and the accompanying materials are
5 * made available under the terms of the Eclipse Public License v1.0 which
6 * accompanies this distribution, and is available at
7 * http://www.eclipse.org/legal/epl-v10.html
6f4e8ec0 8 *
bb7f92ce
FW
9 * Contributors:
10 * Alexandre Montplaisir - Initial API and implementation
11 * Florian Wininger - Add Extension and Leaf Node
2a966f6f 12 * Patrick Tasse - Keep interval list sorted on insert
a52fde77
AM
13 *******************************************************************************/
14
e894a508 15package org.eclipse.tracecompass.internal.statesystem.core.backend.historytree;
a52fde77
AM
16
17import java.io.IOException;
18import java.io.PrintWriter;
19import java.nio.ByteBuffer;
20import java.nio.ByteOrder;
21import java.nio.channels.FileChannel;
22import java.util.ArrayList;
23import java.util.Collections;
24import java.util.List;
62197b87 25import java.util.concurrent.locks.ReentrantReadWriteLock;
a52fde77 26
aa353506 27import org.eclipse.jdt.annotation.NonNull;
e894a508
AM
28import org.eclipse.tracecompass.statesystem.core.exceptions.TimeRangeException;
29import org.eclipse.tracecompass.statesystem.core.interval.ITmfStateInterval;
30import org.eclipse.tracecompass.statesystem.core.statevalue.TmfStateValue;
a52fde77 31
822798a3
GB
32import com.google.common.collect.Iterables;
33
a52fde77
AM
34/**
35 * The base class for all the types of nodes that go in the History Tree.
6f4e8ec0 36 *
ffd0aa67 37 * @author Alexandre Montplaisir
a52fde77 38 */
8d47cc34 39public abstract class HTNode {
a52fde77 40
bb7f92ce
FW
41 // ------------------------------------------------------------------------
42 // Class fields
43 // ------------------------------------------------------------------------
44
45 /**
46 * The type of node
47 */
48 public static enum NodeType {
49 /**
50 * Core node, which is a "front" node, at any level of the tree except
51 * the bottom-most one. It has children, and may have extensions.
52 */
53 CORE,
54 /**
55 * Leaf node, which is a node at the last bottom level of the tree. It
56 * cannot have any children or extensions.
57 */
58 LEAF;
59
60 /**
61 * Determine a node type by reading a serialized byte.
62 *
63 * @param rep
64 * The byte representation of the node type
65 * @return The corresponding NodeType
66 * @throws IOException
67 * If the NodeType is unrecognized
68 */
69 public static NodeType fromByte(byte rep) throws IOException {
70 switch (rep) {
71 case 1:
72 return CORE;
73 case 2:
74 return LEAF;
75 default:
76 throw new IOException();
77 }
78 }
79
80 /**
81 * Get the byte representation of this node type. It can then be read
82 * with {@link #fromByte}.
83 *
84 * @return The byte matching this node type
85 */
86 public byte toByte() {
87 switch (this) {
88 case CORE:
89 return 1;
90 case LEAF:
91 return 2;
92 default:
93 throw new IllegalStateException();
94 }
95 }
96 }
97
0e9b2f07
GB
98 /**
99 * <pre>
100 * 1 - byte (type)
101 * 16 - 2x long (start time, end time)
102 * 16 - 4x int (seq number, parent seq number, intervalcount,
103 * strings section pos.)
104 * 1 - byte (done or not)
105 * </pre>
106 */
d0ed4962
LPD
107 private static final int COMMON_HEADER_SIZE = Byte.BYTES
108 + 2 * Long.BYTES
109 + 4 * Integer.BYTES
110 + Byte.BYTES;
0e9b2f07 111
bb7f92ce
FW
112 // ------------------------------------------------------------------------
113 // Attributes
114 // ------------------------------------------------------------------------
115
ffd0aa67 116 /* Configuration of the History Tree to which belongs this node */
0e9b2f07 117 private final HTConfig fConfig;
a52fde77
AM
118
119 /* Time range of this node */
0e9b2f07
GB
120 private final long fNodeStart;
121 private long fNodeEnd;
a52fde77
AM
122
123 /* Sequence number = position in the node section of the file */
0e9b2f07
GB
124 private final int fSequenceNumber;
125 private int fParentSequenceNumber; /* = -1 if this node is the root node */
a52fde77 126
b0136ad6 127 /* Sum of bytes of all intervals in the node */
0e9b2f07 128 private int fSizeOfIntervalSection;
b0136ad6 129
045badfe 130 /* True if this node was read from disk (meaning its end time is now fixed) */
0e9b2f07 131 private volatile boolean fIsOnDisk;
a52fde77
AM
132
133 /* Vector containing all the intervals contained in this node */
0e9b2f07 134 private final List<HTInterval> fIntervals;
a52fde77 135
62197b87 136 /* Lock used to protect the accesses to intervals, nodeEnd and such */
0e9b2f07 137 private final ReentrantReadWriteLock fRwl = new ReentrantReadWriteLock(false);
62197b87 138
8d47cc34
AM
139 /**
140 * Constructor
141 *
142 * @param config
143 * Configuration of the History Tree
144 * @param seqNumber
145 * The (unique) sequence number assigned to this particular node
146 * @param parentSeqNumber
147 * The sequence number of this node's parent node
148 * @param start
149 * The earliest timestamp stored in this node
150 */
151 protected HTNode(HTConfig config, int seqNumber, int parentSeqNumber, long start) {
0e9b2f07
GB
152 fConfig = config;
153 fNodeStart = start;
154 fSequenceNumber = seqNumber;
155 fParentSequenceNumber = parentSeqNumber;
156
0e9b2f07
GB
157 fSizeOfIntervalSection = 0;
158 fIsOnDisk = false;
159 fIntervals = new ArrayList<>();
a52fde77
AM
160 }
161
162 /**
8d47cc34
AM
163 * Reader factory method. Build a Node object (of the right type) by reading
164 * a block in the file.
6f4e8ec0 165 *
ffd0aa67
EB
166 * @param config
167 * Configuration of the History Tree
a52fde77
AM
168 * @param fc
169 * FileChannel to the history file, ALREADY SEEKED at the start
170 * of the node.
f4baf640
GB
171 * @param nodeFactory
172 * The factory to create the nodes for this tree
8d47cc34 173 * @return The node object
a52fde77 174 * @throws IOException
8d47cc34 175 * If there was an error reading from the file channel
a52fde77 176 */
f4baf640 177 public static final @NonNull HTNode readNode(HTConfig config, FileChannel fc, IHistoryTree.IHTNodeFactory nodeFactory)
a52fde77
AM
178 throws IOException {
179 HTNode newNode = null;
180 int res, i;
181
ffd0aa67 182 ByteBuffer buffer = ByteBuffer.allocate(config.getBlockSize());
a52fde77
AM
183 buffer.order(ByteOrder.LITTLE_ENDIAN);
184 buffer.clear();
185 res = fc.read(buffer);
ffd0aa67 186 assert (res == config.getBlockSize());
a52fde77
AM
187 buffer.flip();
188
189 /* Read the common header part */
bb7f92ce
FW
190 byte typeByte = buffer.get();
191 NodeType type = NodeType.fromByte(typeByte);
a52fde77
AM
192 long start = buffer.getLong();
193 long end = buffer.getLong();
194 int seqNb = buffer.getInt();
195 int parentSeqNb = buffer.getInt();
196 int intervalCount = buffer.getInt();
045badfe 197 buffer.get(); // TODO Used to be "isDone", to be removed from the header
a52fde77
AM
198
199 /* Now the rest of the header depends on the node type */
200 switch (type) {
bb7f92ce 201 case CORE:
a52fde77 202 /* Core nodes */
f4baf640 203 newNode = nodeFactory.createCoreNode(config, seqNb, parentSeqNb, start);
a52fde77
AM
204 newNode.readSpecificHeader(buffer);
205 break;
206
bb7f92ce
FW
207 case LEAF:
208 /* Leaf nodes */
f4baf640 209 newNode = nodeFactory.createLeafNode(config, seqNb, parentSeqNb, start);
bb7f92ce
FW
210 newNode.readSpecificHeader(buffer);
211 break;
a52fde77
AM
212
213 default:
214 /* Unrecognized node type */
215 throw new IOException();
216 }
217
218 /*
219 * At this point, we should be done reading the header and 'buffer'
220 * should only have the intervals left
221 */
222 for (i = 0; i < intervalCount; i++) {
0ce45cd4 223 HTInterval interval = HTInterval.readFrom(buffer);
0e9b2f07 224 newNode.fIntervals.add(interval);
59d30d83 225 newNode.fSizeOfIntervalSection += interval.getSizeOnDisk();
a52fde77
AM
226 }
227
228 /* Assign the node's other information we have read previously */
0e9b2f07 229 newNode.fNodeEnd = end;
0e9b2f07 230 newNode.fIsOnDisk = true;
a52fde77
AM
231
232 return newNode;
233 }
234
8d47cc34
AM
235 /**
236 * Write this node to the given file channel.
237 *
238 * @param fc
239 * The file channel to write to (should be sought to be correct
240 * position)
241 * @throws IOException
242 * If there was an error writing
243 */
244 public final void writeSelf(FileChannel fc) throws IOException {
a52fde77 245 /*
62197b87
AM
246 * Yes, we are taking the *read* lock here, because we are reading the
247 * information in the node to write it to disk.
a52fde77 248 */
0e9b2f07 249 fRwl.readLock().lock();
62197b87 250 try {
0e9b2f07 251 final int blockSize = fConfig.getBlockSize();
62197b87
AM
252
253 ByteBuffer buffer = ByteBuffer.allocate(blockSize);
254 buffer.order(ByteOrder.LITTLE_ENDIAN);
255 buffer.clear();
256
257 /* Write the common header part */
0e9b2f07
GB
258 buffer.put(getNodeType().toByte());
259 buffer.putLong(fNodeStart);
260 buffer.putLong(fNodeEnd);
261 buffer.putInt(fSequenceNumber);
262 buffer.putInt(fParentSequenceNumber);
263 buffer.putInt(fIntervals.size());
62197b87
AM
264 buffer.put((byte) 1); // TODO Used to be "isDone", to be removed from header
265
266 /* Now call the inner method to write the specific header part */
0e9b2f07 267 writeSpecificHeader(buffer);
62197b87
AM
268
269 /* Back to us, we write the intervals */
59d30d83 270 fIntervals.forEach(i -> i.writeInterval(buffer));
a52fde77 271
62197b87 272 /*
59d30d83 273 * Fill the rest with zeros
62197b87 274 */
59d30d83 275 while (buffer.position() < blockSize) {
62197b87
AM
276 buffer.put((byte) 0);
277 }
a52fde77 278
62197b87 279 /* Finally, write everything in the Buffer to disk */
62197b87
AM
280 buffer.flip();
281 int res = fc.write(buffer);
822798a3
GB
282 if (res != blockSize) {
283 throw new IllegalStateException("Wrong size of block written: Actual: " + res + ", Expected: " + blockSize); //$NON-NLS-1$ //$NON-NLS-2$
284 }
62197b87
AM
285
286 } finally {
0e9b2f07 287 fRwl.readLock().unlock();
62197b87 288 }
0e9b2f07 289 fIsOnDisk = true;
cb42195c
AM
290 }
291
292 // ------------------------------------------------------------------------
293 // Accessors
294 // ------------------------------------------------------------------------
295
8d47cc34
AM
296 /**
297 * Retrieve the history tree configuration used for this node.
298 *
299 * @return The history tree config
300 */
301 protected HTConfig getConfig() {
0e9b2f07 302 return fConfig;
a52fde77
AM
303 }
304
8d47cc34
AM
305 /**
306 * Get the start time of this node.
307 *
308 * @return The start time of this node
309 */
310 public long getNodeStart() {
0e9b2f07 311 return fNodeStart;
a52fde77
AM
312 }
313
8d47cc34
AM
314 /**
315 * Get the end time of this node.
316 *
bb7f92ce 317 * @return The end time of this node
8d47cc34
AM
318 */
319 public long getNodeEnd() {
0e9b2f07
GB
320 if (fIsOnDisk) {
321 return fNodeEnd;
a52fde77
AM
322 }
323 return 0;
324 }
325
8d47cc34
AM
326 /**
327 * Get the sequence number of this node.
328 *
329 * @return The sequence number of this node
330 */
331 public int getSequenceNumber() {
0e9b2f07 332 return fSequenceNumber;
a52fde77
AM
333 }
334
8d47cc34
AM
335 /**
336 * Get the sequence number of this node's parent.
337 *
338 * @return The parent sequence number
339 */
340 public int getParentSequenceNumber() {
0e9b2f07 341 return fParentSequenceNumber;
a52fde77
AM
342 }
343
344 /**
345 * Change this node's parent. Used when we create a new root node for
346 * example.
8d47cc34
AM
347 *
348 * @param newParent
349 * The sequence number of the node that is the new parent
a52fde77 350 */
8d47cc34 351 public void setParentSequenceNumber(int newParent) {
0e9b2f07 352 fParentSequenceNumber = newParent;
a52fde77
AM
353 }
354
8d47cc34
AM
355 /**
356 * Return if this node is "done" (full and written to disk).
357 *
358 * @return If this node is done or not
359 */
045badfe 360 public boolean isOnDisk() {
0e9b2f07 361 return fIsOnDisk;
a52fde77
AM
362 }
363
364 /**
365 * Add an interval to this node
6f4e8ec0 366 *
a52fde77 367 * @param newInterval
8d47cc34 368 * Interval to add to this node
a52fde77 369 */
8d47cc34 370 public void addInterval(HTInterval newInterval) {
0e9b2f07 371 fRwl.writeLock().lock();
62197b87
AM
372 try {
373 /* Just in case, should be checked before even calling this function */
59d30d83 374 assert (newInterval.getSizeOnDisk() <= getNodeFreeSpace());
a52fde77 375
2a966f6f 376 /* Find the insert position to keep the list sorted */
0e9b2f07
GB
377 int index = fIntervals.size();
378 while (index > 0 && newInterval.compareTo(fIntervals.get(index - 1)) < 0) {
2a966f6f
PT
379 index--;
380 }
381
0e9b2f07 382 fIntervals.add(index, newInterval);
59d30d83 383 fSizeOfIntervalSection += newInterval.getSizeOnDisk();
a52fde77 384
62197b87 385 } finally {
0e9b2f07 386 fRwl.writeLock().unlock();
62197b87 387 }
a52fde77
AM
388 }
389
390 /**
391 * We've received word from the containerTree that newest nodes now exist to
392 * our right. (Puts isDone = true and sets the endtime)
6f4e8ec0 393 *
a52fde77
AM
394 * @param endtime
395 * The nodeEnd time that the node will have
a52fde77 396 */
8d47cc34 397 public void closeThisNode(long endtime) {
0e9b2f07 398 fRwl.writeLock().lock();
62197b87 399 try {
822798a3
GB
400 /**
401 * FIXME: was assert (endtime >= fNodeStart); but that exception
402 * is reached with an empty node that has start time endtime + 1
403 */
404// if (endtime < fNodeStart) {
405// throw new IllegalArgumentException("Endtime " + endtime + " cannot be lower than start time " + fNodeStart);
406// }
62197b87 407
0e9b2f07 408 if (!fIntervals.isEmpty()) {
62197b87
AM
409 /*
410 * Make sure there are no intervals in this node with their
411 * EndTime > the one requested. Only need to check the last one
2a966f6f 412 * since they are sorted
62197b87 413 */
822798a3
GB
414 if (endtime < Iterables.getLast(fIntervals).getEndTime()) {
415 throw new IllegalArgumentException("Closing end time should be greater than or equal to the end time of the intervals of this node"); //$NON-NLS-1$
416 }
62197b87 417 }
a52fde77 418
0e9b2f07 419 fNodeEnd = endtime;
62197b87 420 } finally {
0e9b2f07 421 fRwl.writeLock().unlock();
a52fde77 422 }
a52fde77
AM
423 }
424
425 /**
426 * The method to fill up the stateInfo (passed on from the Current State
427 * Tree when it does a query on the SHT). We'll replace the data in that
428 * vector with whatever relevant we can find from this node
6f4e8ec0 429 *
a52fde77
AM
430 * @param stateInfo
431 * The same stateInfo that comes from SHT's doQuery()
432 * @param t
433 * The timestamp for which the query is for. Only return
434 * intervals that intersect t.
435 * @throws TimeRangeException
8d47cc34 436 * If 't' is invalid
a52fde77 437 */
8d47cc34 438 public void writeInfoFromNode(List<ITmfStateInterval> stateInfo, long t)
a52fde77 439 throws TimeRangeException {
62197b87 440 /* This is from a state system query, we are "reading" this node */
0e9b2f07 441 fRwl.readLock().lock();
62197b87 442 try {
0e9b2f07 443 for (int i = getStartIndexFor(t); i < fIntervals.size(); i++) {
62197b87
AM
444 /*
445 * Now we only have to compare the Start times, since we now the
1d8028cd
AM
446 * End times necessarily fit.
447 *
448 * Second condition is to ignore new attributes that might have
449 * been created after stateInfo was instantiated (they would be
450 * null anyway).
62197b87 451 */
0e9b2f07 452 ITmfStateInterval interval = fIntervals.get(i);
d0ed4962 453 if (t >= interval.getStartTime() &&
1d8028cd
AM
454 interval.getAttribute() < stateInfo.size()) {
455 stateInfo.set(interval.getAttribute(), interval);
62197b87 456 }
a52fde77 457 }
62197b87 458 } finally {
0e9b2f07 459 fRwl.readLock().unlock();
a52fde77 460 }
a52fde77
AM
461 }
462
463 /**
464 * Get a single Interval from the information in this node If the
465 * key/timestamp pair cannot be found, we return null.
6f4e8ec0 466 *
a52fde77 467 * @param key
8d47cc34 468 * The attribute quark to look for
a52fde77 469 * @param t
8d47cc34 470 * The timestamp
a52fde77
AM
471 * @return The Interval containing the information we want, or null if it
472 * wasn't found
bb7f92ce
FW
473 * @throws TimeRangeException
474 * If 't' is invalid
a52fde77 475 */
8d47cc34 476 public HTInterval getRelevantInterval(int key, long t) throws TimeRangeException {
0e9b2f07 477 fRwl.readLock().lock();
62197b87 478 try {
0e9b2f07
GB
479 for (int i = getStartIndexFor(t); i < fIntervals.size(); i++) {
480 HTInterval curInterval = fIntervals.get(i);
62197b87
AM
481 if (curInterval.getAttribute() == key
482 && curInterval.getStartTime() <= t
483 && curInterval.getEndTime() >= t) {
484 return curInterval;
485 }
a52fde77 486 }
6642afb4 487
62197b87
AM
488 /* We didn't find the relevant information in this node */
489 return null;
490
491 } finally {
0e9b2f07 492 fRwl.readLock().unlock();
a52fde77 493 }
a52fde77
AM
494 }
495
496 private int getStartIndexFor(long t) throws TimeRangeException {
62197b87 497 /* Should only be called by methods with the readLock taken */
6642afb4 498
0e9b2f07 499 if (fIntervals.isEmpty()) {
6642afb4
FW
500 return 0;
501 }
a52fde77
AM
502 /*
503 * Since the intervals are sorted by end time, we can skip all the ones
504 * at the beginning whose end times are smaller than 't'. Java does
505 * provides a .binarySearch method, but its API is quite weird...
506 */
62197b87 507 HTInterval dummy = new HTInterval(0, t, 0, TmfStateValue.nullValue());
0e9b2f07 508 int index = Collections.binarySearch(fIntervals, dummy);
a52fde77
AM
509
510 if (index < 0) {
511 /*
512 * .binarySearch returns a negative number if the exact value was
513 * not found. Here we just want to know where to start searching, we
514 * don't care if the value is exact or not.
515 */
516 index = -index - 1;
517
3df04466
PT
518 } else {
519 /*
520 * Another API quirkiness, the returned index is the one of the *last*
521 * element of a series of equal endtimes, which happens sometimes. We
522 * want the *first* element of such a series, to read through them
523 * again.
524 */
525 while (index > 0
526 && fIntervals.get(index - 1).compareTo(fIntervals.get(index)) == 0) {
527 index--;
528 }
a52fde77 529 }
a52fde77
AM
530
531 return index;
532 }
533
62197b87
AM
534 /**
535 * Return the total header size of this node (will depend on the node type).
536 *
537 * @return The total header size
538 */
539 public final int getTotalHeaderSize() {
540 return COMMON_HEADER_SIZE + getSpecificHeaderSize();
541 }
542
a52fde77
AM
543 /**
544 * @return The offset, within the node, where the Data section ends
545 */
546 private int getDataSectionEndOffset() {
0e9b2f07 547 return getTotalHeaderSize() + fSizeOfIntervalSection;
a52fde77
AM
548 }
549
550 /**
551 * Returns the free space in the node, which is simply put, the
552 * stringSectionOffset - dataSectionOffset
8d47cc34
AM
553 *
554 * @return The amount of free space in the node (in bytes)
a52fde77 555 */
8d47cc34 556 public int getNodeFreeSpace() {
0e9b2f07 557 fRwl.readLock().lock();
59d30d83 558 int ret = fConfig.getBlockSize() - getDataSectionEndOffset();
0e9b2f07 559 fRwl.readLock().unlock();
62197b87
AM
560
561 return ret;
a52fde77
AM
562 }
563
564 /**
8d47cc34 565 * Returns the current space utilization of this node, as a percentage.
a52fde77 566 * (used space / total usable space, which excludes the header)
8d47cc34
AM
567 *
568 * @return The percentage (value between 0 and 100) of space utilization in
569 * in this node.
a52fde77 570 */
8d47cc34 571 public long getNodeUsagePercent() {
0e9b2f07 572 fRwl.readLock().lock();
62197b87 573 try {
0e9b2f07
GB
574 final int blockSize = fConfig.getBlockSize();
575 float freePercent = (float) getNodeFreeSpace()
576 / (float) (blockSize - getTotalHeaderSize())
62197b87
AM
577 * 100F;
578 return (long) (100L - freePercent);
579
580 } finally {
0e9b2f07 581 fRwl.readLock().unlock();
62197b87 582 }
a52fde77
AM
583 }
584
a52fde77
AM
585 /**
586 * @name Debugging functions
587 */
588
589 @SuppressWarnings("nls")
590 @Override
591 public String toString() {
592 /* Only used for debugging, shouldn't be externalized */
b2ca67ca
PT
593 return String.format("Node #%d, %s, %s, %d intervals (%d%% used), [%d - %s]",
594 fSequenceNumber,
595 (fParentSequenceNumber == -1) ? "Root" : "Parent #" + fParentSequenceNumber,
596 toStringSpecific(),
597 fIntervals.size(),
598 getNodeUsagePercent(),
599 fNodeStart,
600 (fIsOnDisk || fNodeEnd != 0) ? fNodeEnd : "...");
a52fde77
AM
601 }
602
603 /**
604 * Debugging function that prints out the contents of this node
6f4e8ec0 605 *
a52fde77
AM
606 * @param writer
607 * PrintWriter in which we will print the debug output
608 */
609 @SuppressWarnings("nls")
8d47cc34 610 public void debugPrintIntervals(PrintWriter writer) {
a52fde77 611 /* Only used for debugging, shouldn't be externalized */
dbf883bb 612 writer.println("Intervals for node #" + fSequenceNumber + ":");
a52fde77
AM
613
614 /* Array of children */
f4baf640
GB
615 if (getNodeType() != NodeType.LEAF) { /* Only Core Nodes can have children */
616 ParentNode thisNode = (ParentNode) this;
a52fde77
AM
617 writer.print(" " + thisNode.getNbChildren() + " children");
618 if (thisNode.getNbChildren() >= 1) {
619 writer.print(": [ " + thisNode.getChild(0));
620 for (int i = 1; i < thisNode.getNbChildren(); i++) {
621 writer.print(", " + thisNode.getChild(i));
622 }
623 writer.print(']');
624 }
625 writer.print('\n');
626 }
627
628 /* List of intervals in the node */
629 writer.println(" Intervals contained:");
0e9b2f07
GB
630 for (int i = 0; i < fIntervals.size(); i++) {
631 writer.println(fIntervals.get(i).toString());
a52fde77
AM
632 }
633 writer.println('\n');
634 }
635
6f4e8ec0
AM
636 // ------------------------------------------------------------------------
637 // Abstract methods
638 // ------------------------------------------------------------------------
a52fde77 639
8d47cc34
AM
640 /**
641 * Get the byte value representing the node type.
642 *
643 * @return The node type
644 */
bb7f92ce 645 public abstract NodeType getNodeType();
a52fde77 646
8d47cc34 647 /**
62197b87
AM
648 * Return the specific header size of this node. This means the size
649 * occupied by the type-specific section of the header (not counting the
650 * common part).
8d47cc34 651 *
62197b87 652 * @return The specific header size
8d47cc34 653 */
62197b87 654 protected abstract int getSpecificHeaderSize();
a52fde77 655
8d47cc34
AM
656 /**
657 * Read the type-specific part of the node header from a byte buffer.
658 *
659 * @param buffer
660 * The byte buffer to read from. It should be already positioned
661 * correctly.
662 */
62197b87 663 protected abstract void readSpecificHeader(ByteBuffer buffer);
a52fde77 664
8d47cc34
AM
665 /**
666 * Write the type-specific part of the header in a byte buffer.
667 *
668 * @param buffer
669 * The buffer to write to. It should already be at the correct
670 * position.
671 */
62197b87 672 protected abstract void writeSpecificHeader(ByteBuffer buffer);
a52fde77 673
8d47cc34
AM
674 /**
675 * Node-type-specific toString method. Used for debugging.
676 *
677 * @return A string representing the node
678 */
62197b87 679 protected abstract String toStringSpecific();
a52fde77 680}
This page took 0.118127 seconds and 5 git commands to generate.