ss: update some classes to current code standard
[deliverable/tracecompass.git] / statesystem / org.eclipse.tracecompass.statesystem.core / src / org / eclipse / tracecompass / internal / statesystem / core / backend / historytree / HTNode.java
CommitLineData
a52fde77 1/*******************************************************************************
2a966f6f 2 * Copyright (c) 2010, 2015 Ericsson, École Polytechnique de Montréal, and others
6f4e8ec0 3 *
a52fde77
AM
4 * All rights reserved. This program and the accompanying materials are
5 * made available under the terms of the Eclipse Public License v1.0 which
6 * accompanies this distribution, and is available at
7 * http://www.eclipse.org/legal/epl-v10.html
6f4e8ec0 8 *
bb7f92ce
FW
9 * Contributors:
10 * Alexandre Montplaisir - Initial API and implementation
11 * Florian Wininger - Add Extension and Leaf Node
2a966f6f 12 * Patrick Tasse - Keep interval list sorted on insert
a52fde77
AM
13 *******************************************************************************/
14
e894a508 15package org.eclipse.tracecompass.internal.statesystem.core.backend.historytree;
a52fde77
AM
16
17import java.io.IOException;
18import java.io.PrintWriter;
19import java.nio.ByteBuffer;
20import java.nio.ByteOrder;
21import java.nio.channels.FileChannel;
22import java.util.ArrayList;
23import java.util.Collections;
24import java.util.List;
62197b87 25import java.util.concurrent.locks.ReentrantReadWriteLock;
a52fde77 26
e894a508
AM
27import org.eclipse.tracecompass.statesystem.core.exceptions.TimeRangeException;
28import org.eclipse.tracecompass.statesystem.core.interval.ITmfStateInterval;
29import org.eclipse.tracecompass.statesystem.core.statevalue.TmfStateValue;
a52fde77
AM
30
31/**
32 * The base class for all the types of nodes that go in the History Tree.
6f4e8ec0 33 *
ffd0aa67 34 * @author Alexandre Montplaisir
a52fde77 35 */
8d47cc34 36public abstract class HTNode {
a52fde77 37
bb7f92ce
FW
38 // ------------------------------------------------------------------------
39 // Class fields
40 // ------------------------------------------------------------------------
41
42 /**
43 * The type of node
44 */
45 public static enum NodeType {
46 /**
47 * Core node, which is a "front" node, at any level of the tree except
48 * the bottom-most one. It has children, and may have extensions.
49 */
50 CORE,
51 /**
52 * Leaf node, which is a node at the last bottom level of the tree. It
53 * cannot have any children or extensions.
54 */
55 LEAF;
56
57 /**
58 * Determine a node type by reading a serialized byte.
59 *
60 * @param rep
61 * The byte representation of the node type
62 * @return The corresponding NodeType
63 * @throws IOException
64 * If the NodeType is unrecognized
65 */
66 public static NodeType fromByte(byte rep) throws IOException {
67 switch (rep) {
68 case 1:
69 return CORE;
70 case 2:
71 return LEAF;
72 default:
73 throw new IOException();
74 }
75 }
76
77 /**
78 * Get the byte representation of this node type. It can then be read
79 * with {@link #fromByte}.
80 *
81 * @return The byte matching this node type
82 */
83 public byte toByte() {
84 switch (this) {
85 case CORE:
86 return 1;
87 case LEAF:
88 return 2;
89 default:
90 throw new IllegalStateException();
91 }
92 }
93 }
94
0e9b2f07
GB
95 /**
96 * <pre>
97 * 1 - byte (type)
98 * 16 - 2x long (start time, end time)
99 * 16 - 4x int (seq number, parent seq number, intervalcount,
100 * strings section pos.)
101 * 1 - byte (done or not)
102 * </pre>
103 */
104 private static final int COMMON_HEADER_SIZE = 34;
105
bb7f92ce
FW
106 // ------------------------------------------------------------------------
107 // Attributes
108 // ------------------------------------------------------------------------
109
ffd0aa67 110 /* Configuration of the History Tree to which belongs this node */
0e9b2f07 111 private final HTConfig fConfig;
a52fde77
AM
112
113 /* Time range of this node */
0e9b2f07
GB
114 private final long fNodeStart;
115 private long fNodeEnd;
a52fde77
AM
116
117 /* Sequence number = position in the node section of the file */
0e9b2f07
GB
118 private final int fSequenceNumber;
119 private int fParentSequenceNumber; /* = -1 if this node is the root node */
a52fde77
AM
120
121 /* Where the Strings section begins (from the start of the node */
0e9b2f07 122 private int fStringSectionOffset;
a52fde77 123
b0136ad6 124 /* Sum of bytes of all intervals in the node */
0e9b2f07 125 private int fSizeOfIntervalSection;
b0136ad6 126
045badfe 127 /* True if this node was read from disk (meaning its end time is now fixed) */
0e9b2f07 128 private volatile boolean fIsOnDisk;
a52fde77
AM
129
130 /* Vector containing all the intervals contained in this node */
0e9b2f07 131 private final List<HTInterval> fIntervals;
a52fde77 132
62197b87 133 /* Lock used to protect the accesses to intervals, nodeEnd and such */
0e9b2f07 134 private final ReentrantReadWriteLock fRwl = new ReentrantReadWriteLock(false);
62197b87 135
8d47cc34
AM
136 /**
137 * Constructor
138 *
139 * @param config
140 * Configuration of the History Tree
141 * @param seqNumber
142 * The (unique) sequence number assigned to this particular node
143 * @param parentSeqNumber
144 * The sequence number of this node's parent node
145 * @param start
146 * The earliest timestamp stored in this node
147 */
148 protected HTNode(HTConfig config, int seqNumber, int parentSeqNumber, long start) {
0e9b2f07
GB
149 fConfig = config;
150 fNodeStart = start;
151 fSequenceNumber = seqNumber;
152 fParentSequenceNumber = parentSeqNumber;
153
154 fStringSectionOffset = config.getBlockSize();
155 fSizeOfIntervalSection = 0;
156 fIsOnDisk = false;
157 fIntervals = new ArrayList<>();
a52fde77
AM
158 }
159
160 /**
8d47cc34
AM
161 * Reader factory method. Build a Node object (of the right type) by reading
162 * a block in the file.
6f4e8ec0 163 *
ffd0aa67
EB
164 * @param config
165 * Configuration of the History Tree
a52fde77
AM
166 * @param fc
167 * FileChannel to the history file, ALREADY SEEKED at the start
168 * of the node.
8d47cc34 169 * @return The node object
a52fde77 170 * @throws IOException
8d47cc34 171 * If there was an error reading from the file channel
a52fde77 172 */
8d47cc34 173 public static final HTNode readNode(HTConfig config, FileChannel fc)
a52fde77
AM
174 throws IOException {
175 HTNode newNode = null;
176 int res, i;
177
ffd0aa67 178 ByteBuffer buffer = ByteBuffer.allocate(config.getBlockSize());
a52fde77
AM
179 buffer.order(ByteOrder.LITTLE_ENDIAN);
180 buffer.clear();
181 res = fc.read(buffer);
ffd0aa67 182 assert (res == config.getBlockSize());
a52fde77
AM
183 buffer.flip();
184
185 /* Read the common header part */
bb7f92ce
FW
186 byte typeByte = buffer.get();
187 NodeType type = NodeType.fromByte(typeByte);
a52fde77
AM
188 long start = buffer.getLong();
189 long end = buffer.getLong();
190 int seqNb = buffer.getInt();
191 int parentSeqNb = buffer.getInt();
192 int intervalCount = buffer.getInt();
193 int stringSectionOffset = buffer.getInt();
045badfe 194 buffer.get(); // TODO Used to be "isDone", to be removed from the header
a52fde77
AM
195
196 /* Now the rest of the header depends on the node type */
197 switch (type) {
bb7f92ce 198 case CORE:
a52fde77 199 /* Core nodes */
ffd0aa67 200 newNode = new CoreNode(config, seqNb, parentSeqNb, start);
a52fde77
AM
201 newNode.readSpecificHeader(buffer);
202 break;
203
bb7f92ce
FW
204 case LEAF:
205 /* Leaf nodes */
206 newNode = new LeafNode(config, seqNb, parentSeqNb, start);
207 newNode.readSpecificHeader(buffer);
208 break;
a52fde77
AM
209
210 default:
211 /* Unrecognized node type */
212 throw new IOException();
213 }
214
215 /*
216 * At this point, we should be done reading the header and 'buffer'
217 * should only have the intervals left
218 */
219 for (i = 0; i < intervalCount; i++) {
0ce45cd4 220 HTInterval interval = HTInterval.readFrom(buffer);
0e9b2f07
GB
221 newNode.fIntervals.add(interval);
222 newNode.fSizeOfIntervalSection += interval.getIntervalSize();
a52fde77
AM
223 }
224
225 /* Assign the node's other information we have read previously */
0e9b2f07
GB
226 newNode.fNodeEnd = end;
227 newNode.fStringSectionOffset = stringSectionOffset;
228 newNode.fIsOnDisk = true;
a52fde77
AM
229
230 return newNode;
231 }
232
8d47cc34
AM
233 /**
234 * Write this node to the given file channel.
235 *
236 * @param fc
237 * The file channel to write to (should be sought to be correct
238 * position)
239 * @throws IOException
240 * If there was an error writing
241 */
242 public final void writeSelf(FileChannel fc) throws IOException {
a52fde77 243 /*
62197b87
AM
244 * Yes, we are taking the *read* lock here, because we are reading the
245 * information in the node to write it to disk.
a52fde77 246 */
0e9b2f07 247 fRwl.readLock().lock();
62197b87 248 try {
0e9b2f07 249 final int blockSize = fConfig.getBlockSize();
62197b87
AM
250 int curStringsEntryEndPos = blockSize;
251
252 ByteBuffer buffer = ByteBuffer.allocate(blockSize);
253 buffer.order(ByteOrder.LITTLE_ENDIAN);
254 buffer.clear();
255
256 /* Write the common header part */
0e9b2f07
GB
257 buffer.put(getNodeType().toByte());
258 buffer.putLong(fNodeStart);
259 buffer.putLong(fNodeEnd);
260 buffer.putInt(fSequenceNumber);
261 buffer.putInt(fParentSequenceNumber);
262 buffer.putInt(fIntervals.size());
263 buffer.putInt(fStringSectionOffset);
62197b87
AM
264 buffer.put((byte) 1); // TODO Used to be "isDone", to be removed from header
265
266 /* Now call the inner method to write the specific header part */
0e9b2f07 267 writeSpecificHeader(buffer);
62197b87
AM
268
269 /* Back to us, we write the intervals */
0e9b2f07 270 for (HTInterval interval : fIntervals) {
62197b87
AM
271 int size = interval.writeInterval(buffer, curStringsEntryEndPos);
272 curStringsEntryEndPos -= size;
273 }
a52fde77 274
62197b87
AM
275 /*
276 * Write padding between the end of the Data section and the start
277 * of the Strings section (needed to fill the node in case there is
278 * no Strings section)
279 */
0e9b2f07 280 while (buffer.position() < fStringSectionOffset) {
62197b87
AM
281 buffer.put((byte) 0);
282 }
a52fde77 283
62197b87
AM
284 /*
285 * If the offsets were right, the size of the Strings section should
286 * be == to the expected size
287 */
0e9b2f07 288 assert (curStringsEntryEndPos == fStringSectionOffset);
a52fde77 289
62197b87 290 /* Finally, write everything in the Buffer to disk */
a52fde77 291
62197b87
AM
292 // if we don't do this, flip() will lose what's after.
293 buffer.position(blockSize);
294
295 buffer.flip();
296 int res = fc.write(buffer);
297 assert (res == blockSize);
298
299 } finally {
0e9b2f07 300 fRwl.readLock().unlock();
62197b87 301 }
0e9b2f07 302 fIsOnDisk = true;
cb42195c
AM
303 }
304
305 // ------------------------------------------------------------------------
306 // Accessors
307 // ------------------------------------------------------------------------
308
8d47cc34
AM
309 /**
310 * Retrieve the history tree configuration used for this node.
311 *
312 * @return The history tree config
313 */
314 protected HTConfig getConfig() {
0e9b2f07 315 return fConfig;
a52fde77
AM
316 }
317
8d47cc34
AM
318 /**
319 * Get the start time of this node.
320 *
321 * @return The start time of this node
322 */
323 public long getNodeStart() {
0e9b2f07 324 return fNodeStart;
a52fde77
AM
325 }
326
8d47cc34
AM
327 /**
328 * Get the end time of this node.
329 *
bb7f92ce 330 * @return The end time of this node
8d47cc34
AM
331 */
332 public long getNodeEnd() {
0e9b2f07
GB
333 if (fIsOnDisk) {
334 return fNodeEnd;
a52fde77
AM
335 }
336 return 0;
337 }
338
8d47cc34
AM
339 /**
340 * Get the sequence number of this node.
341 *
342 * @return The sequence number of this node
343 */
344 public int getSequenceNumber() {
0e9b2f07 345 return fSequenceNumber;
a52fde77
AM
346 }
347
8d47cc34
AM
348 /**
349 * Get the sequence number of this node's parent.
350 *
351 * @return The parent sequence number
352 */
353 public int getParentSequenceNumber() {
0e9b2f07 354 return fParentSequenceNumber;
a52fde77
AM
355 }
356
357 /**
358 * Change this node's parent. Used when we create a new root node for
359 * example.
8d47cc34
AM
360 *
361 * @param newParent
362 * The sequence number of the node that is the new parent
a52fde77 363 */
8d47cc34 364 public void setParentSequenceNumber(int newParent) {
0e9b2f07 365 fParentSequenceNumber = newParent;
a52fde77
AM
366 }
367
8d47cc34
AM
368 /**
369 * Return if this node is "done" (full and written to disk).
370 *
371 * @return If this node is done or not
372 */
045badfe 373 public boolean isOnDisk() {
0e9b2f07 374 return fIsOnDisk;
a52fde77
AM
375 }
376
377 /**
378 * Add an interval to this node
6f4e8ec0 379 *
a52fde77 380 * @param newInterval
8d47cc34 381 * Interval to add to this node
a52fde77 382 */
8d47cc34 383 public void addInterval(HTInterval newInterval) {
0e9b2f07 384 fRwl.writeLock().lock();
62197b87
AM
385 try {
386 /* Just in case, should be checked before even calling this function */
0e9b2f07 387 assert (newInterval.getIntervalSize() <= getNodeFreeSpace());
a52fde77 388
2a966f6f 389 /* Find the insert position to keep the list sorted */
0e9b2f07
GB
390 int index = fIntervals.size();
391 while (index > 0 && newInterval.compareTo(fIntervals.get(index - 1)) < 0) {
2a966f6f
PT
392 index--;
393 }
394
0e9b2f07
GB
395 fIntervals.add(index, newInterval);
396 fSizeOfIntervalSection += newInterval.getIntervalSize();
a52fde77 397
62197b87 398 /* Update the in-node offset "pointer" */
0e9b2f07 399 fStringSectionOffset -= (newInterval.getStringsEntrySize());
62197b87 400 } finally {
0e9b2f07 401 fRwl.writeLock().unlock();
62197b87 402 }
a52fde77
AM
403 }
404
405 /**
406 * We've received word from the containerTree that newest nodes now exist to
407 * our right. (Puts isDone = true and sets the endtime)
6f4e8ec0 408 *
a52fde77
AM
409 * @param endtime
410 * The nodeEnd time that the node will have
a52fde77 411 */
8d47cc34 412 public void closeThisNode(long endtime) {
0e9b2f07 413 fRwl.writeLock().lock();
62197b87 414 try {
0e9b2f07 415 assert (endtime >= fNodeStart);
62197b87 416
0e9b2f07 417 if (!fIntervals.isEmpty()) {
62197b87
AM
418 /*
419 * Make sure there are no intervals in this node with their
420 * EndTime > the one requested. Only need to check the last one
2a966f6f 421 * since they are sorted
62197b87 422 */
0e9b2f07 423 assert (endtime >= fIntervals.get(fIntervals.size() - 1).getEndTime());
62197b87 424 }
a52fde77 425
0e9b2f07 426 fNodeEnd = endtime;
62197b87 427 } finally {
0e9b2f07 428 fRwl.writeLock().unlock();
a52fde77 429 }
a52fde77
AM
430 }
431
432 /**
433 * The method to fill up the stateInfo (passed on from the Current State
434 * Tree when it does a query on the SHT). We'll replace the data in that
435 * vector with whatever relevant we can find from this node
6f4e8ec0 436 *
a52fde77
AM
437 * @param stateInfo
438 * The same stateInfo that comes from SHT's doQuery()
439 * @param t
440 * The timestamp for which the query is for. Only return
441 * intervals that intersect t.
442 * @throws TimeRangeException
8d47cc34 443 * If 't' is invalid
a52fde77 444 */
8d47cc34 445 public void writeInfoFromNode(List<ITmfStateInterval> stateInfo, long t)
a52fde77 446 throws TimeRangeException {
62197b87 447 /* This is from a state system query, we are "reading" this node */
0e9b2f07 448 fRwl.readLock().lock();
62197b87 449 try {
0e9b2f07 450 for (int i = getStartIndexFor(t); i < fIntervals.size(); i++) {
62197b87
AM
451 /*
452 * Now we only have to compare the Start times, since we now the
1d8028cd
AM
453 * End times necessarily fit.
454 *
455 * Second condition is to ignore new attributes that might have
456 * been created after stateInfo was instantiated (they would be
457 * null anyway).
62197b87 458 */
0e9b2f07 459 ITmfStateInterval interval = fIntervals.get(i);
1d8028cd
AM
460 if (interval.getStartTime() <= t &&
461 interval.getAttribute() < stateInfo.size()) {
462 stateInfo.set(interval.getAttribute(), interval);
62197b87 463 }
a52fde77 464 }
62197b87 465 } finally {
0e9b2f07 466 fRwl.readLock().unlock();
a52fde77 467 }
a52fde77
AM
468 }
469
470 /**
471 * Get a single Interval from the information in this node If the
472 * key/timestamp pair cannot be found, we return null.
6f4e8ec0 473 *
a52fde77 474 * @param key
8d47cc34 475 * The attribute quark to look for
a52fde77 476 * @param t
8d47cc34 477 * The timestamp
a52fde77
AM
478 * @return The Interval containing the information we want, or null if it
479 * wasn't found
bb7f92ce
FW
480 * @throws TimeRangeException
481 * If 't' is invalid
a52fde77 482 */
8d47cc34 483 public HTInterval getRelevantInterval(int key, long t) throws TimeRangeException {
0e9b2f07 484 fRwl.readLock().lock();
62197b87 485 try {
0e9b2f07
GB
486 for (int i = getStartIndexFor(t); i < fIntervals.size(); i++) {
487 HTInterval curInterval = fIntervals.get(i);
62197b87
AM
488 if (curInterval.getAttribute() == key
489 && curInterval.getStartTime() <= t
490 && curInterval.getEndTime() >= t) {
491 return curInterval;
492 }
a52fde77 493 }
6642afb4 494
62197b87
AM
495 /* We didn't find the relevant information in this node */
496 return null;
497
498 } finally {
0e9b2f07 499 fRwl.readLock().unlock();
a52fde77 500 }
a52fde77
AM
501 }
502
503 private int getStartIndexFor(long t) throws TimeRangeException {
62197b87 504 /* Should only be called by methods with the readLock taken */
6642afb4 505
0e9b2f07 506 if (fIntervals.isEmpty()) {
6642afb4
FW
507 return 0;
508 }
a52fde77
AM
509 /*
510 * Since the intervals are sorted by end time, we can skip all the ones
511 * at the beginning whose end times are smaller than 't'. Java does
512 * provides a .binarySearch method, but its API is quite weird...
513 */
62197b87 514 HTInterval dummy = new HTInterval(0, t, 0, TmfStateValue.nullValue());
0e9b2f07 515 int index = Collections.binarySearch(fIntervals, dummy);
a52fde77
AM
516
517 if (index < 0) {
518 /*
519 * .binarySearch returns a negative number if the exact value was
520 * not found. Here we just want to know where to start searching, we
521 * don't care if the value is exact or not.
522 */
523 index = -index - 1;
524
525 }
526
527 /* Sometimes binarySearch yields weird stuff... */
528 if (index < 0) {
529 index = 0;
530 }
0e9b2f07
GB
531 if (index >= fIntervals.size()) {
532 index = fIntervals.size() - 1;
a52fde77
AM
533 }
534
535 /*
536 * Another API quirkiness, the returned index is the one of the *last*
537 * element of a series of equal endtimes, which happens sometimes. We
538 * want the *first* element of such a series, to read through them
539 * again.
540 */
541 while (index > 0
0e9b2f07 542 && fIntervals.get(index - 1).compareTo(fIntervals.get(index)) == 0) {
a52fde77
AM
543 index--;
544 }
a52fde77
AM
545
546 return index;
547 }
548
62197b87
AM
549 /**
550 * Return the total header size of this node (will depend on the node type).
551 *
552 * @return The total header size
553 */
554 public final int getTotalHeaderSize() {
555 return COMMON_HEADER_SIZE + getSpecificHeaderSize();
556 }
557
a52fde77
AM
558 /**
559 * @return The offset, within the node, where the Data section ends
560 */
561 private int getDataSectionEndOffset() {
0e9b2f07 562 return getTotalHeaderSize() + fSizeOfIntervalSection;
a52fde77
AM
563 }
564
565 /**
566 * Returns the free space in the node, which is simply put, the
567 * stringSectionOffset - dataSectionOffset
8d47cc34
AM
568 *
569 * @return The amount of free space in the node (in bytes)
a52fde77 570 */
8d47cc34 571 public int getNodeFreeSpace() {
0e9b2f07
GB
572 fRwl.readLock().lock();
573 int ret = fStringSectionOffset - getDataSectionEndOffset();
574 fRwl.readLock().unlock();
62197b87
AM
575
576 return ret;
a52fde77
AM
577 }
578
579 /**
8d47cc34 580 * Returns the current space utilization of this node, as a percentage.
a52fde77 581 * (used space / total usable space, which excludes the header)
8d47cc34
AM
582 *
583 * @return The percentage (value between 0 and 100) of space utilization in
584 * in this node.
a52fde77 585 */
8d47cc34 586 public long getNodeUsagePercent() {
0e9b2f07 587 fRwl.readLock().lock();
62197b87 588 try {
0e9b2f07
GB
589 final int blockSize = fConfig.getBlockSize();
590 float freePercent = (float) getNodeFreeSpace()
591 / (float) (blockSize - getTotalHeaderSize())
62197b87
AM
592 * 100F;
593 return (long) (100L - freePercent);
594
595 } finally {
0e9b2f07 596 fRwl.readLock().unlock();
62197b87 597 }
a52fde77
AM
598 }
599
a52fde77
AM
600 /**
601 * @name Debugging functions
602 */
603
604 @SuppressWarnings("nls")
605 @Override
606 public String toString() {
607 /* Only used for debugging, shouldn't be externalized */
0e9b2f07
GB
608 StringBuffer buf = new StringBuffer("Node #" + fSequenceNumber + ", ");
609 buf.append(toStringSpecific());
610 buf.append(fIntervals.size() + " intervals (" + getNodeUsagePercent()
a52fde77
AM
611 + "% used), ");
612
0e9b2f07
GB
613 buf.append("[" + fNodeStart + " - ");
614 if (fIsOnDisk) {
615 buf = buf.append("" + fNodeEnd + "]");
a52fde77
AM
616 } else {
617 buf = buf.append("...]");
618 }
619 return buf.toString();
620 }
621
622 /**
623 * Debugging function that prints out the contents of this node
6f4e8ec0 624 *
a52fde77
AM
625 * @param writer
626 * PrintWriter in which we will print the debug output
627 */
628 @SuppressWarnings("nls")
8d47cc34 629 public void debugPrintIntervals(PrintWriter writer) {
a52fde77 630 /* Only used for debugging, shouldn't be externalized */
0e9b2f07 631 writer.println("Node #" + fSequenceNumber + ":");
a52fde77
AM
632
633 /* Array of children */
0e9b2f07 634 if (getNodeType() == NodeType.CORE) { /* Only Core Nodes can have children */
a52fde77
AM
635 CoreNode thisNode = (CoreNode) this;
636 writer.print(" " + thisNode.getNbChildren() + " children");
637 if (thisNode.getNbChildren() >= 1) {
638 writer.print(": [ " + thisNode.getChild(0));
639 for (int i = 1; i < thisNode.getNbChildren(); i++) {
640 writer.print(", " + thisNode.getChild(i));
641 }
642 writer.print(']');
643 }
644 writer.print('\n');
645 }
646
647 /* List of intervals in the node */
648 writer.println(" Intervals contained:");
0e9b2f07
GB
649 for (int i = 0; i < fIntervals.size(); i++) {
650 writer.println(fIntervals.get(i).toString());
a52fde77
AM
651 }
652 writer.println('\n');
653 }
654
6f4e8ec0
AM
655 // ------------------------------------------------------------------------
656 // Abstract methods
657 // ------------------------------------------------------------------------
a52fde77 658
8d47cc34
AM
659 /**
660 * Get the byte value representing the node type.
661 *
662 * @return The node type
663 */
bb7f92ce 664 public abstract NodeType getNodeType();
a52fde77 665
8d47cc34 666 /**
62197b87
AM
667 * Return the specific header size of this node. This means the size
668 * occupied by the type-specific section of the header (not counting the
669 * common part).
8d47cc34 670 *
62197b87 671 * @return The specific header size
8d47cc34 672 */
62197b87 673 protected abstract int getSpecificHeaderSize();
a52fde77 674
8d47cc34
AM
675 /**
676 * Read the type-specific part of the node header from a byte buffer.
677 *
678 * @param buffer
679 * The byte buffer to read from. It should be already positioned
680 * correctly.
681 */
62197b87 682 protected abstract void readSpecificHeader(ByteBuffer buffer);
a52fde77 683
8d47cc34
AM
684 /**
685 * Write the type-specific part of the header in a byte buffer.
686 *
687 * @param buffer
688 * The buffer to write to. It should already be at the correct
689 * position.
690 */
62197b87 691 protected abstract void writeSpecificHeader(ByteBuffer buffer);
a52fde77 692
8d47cc34
AM
693 /**
694 * Node-type-specific toString method. Used for debugging.
695 *
696 * @return A string representing the node
697 */
62197b87 698 protected abstract String toStringSpecific();
a52fde77 699}
This page took 0.168128 seconds and 5 git commands to generate.