datastore: Add generic history tree
[deliverable/tracecompass.git] / statesystem / org.eclipse.tracecompass.datastore.core / src / org / eclipse / tracecompass / internal / provisional / datastore / core / historytree / HTNode.java
1 /*******************************************************************************
2 * Copyright (c) 2017 École Polytechnique de Montréal
3 *
4 * All rights reserved. This program and the accompanying materials are
5 * made available under the terms of the Eclipse Public License v1.0 which
6 * accompanies this distribution, and is available at
7 * http://www.eclipse.org/legal/epl-v10.html
8 *******************************************************************************/
9
10 package org.eclipse.tracecompass.internal.provisional.datastore.core.historytree;
11
12 import java.io.IOException;
13 import java.nio.ByteBuffer;
14 import java.nio.ByteOrder;
15 import java.nio.channels.FileChannel;
16 import java.util.ArrayList;
17 import java.util.Arrays;
18 import java.util.Collection;
19 import java.util.Collections;
20 import java.util.Comparator;
21 import java.util.List;
22 import java.util.Objects;
23 import java.util.concurrent.locks.ReentrantReadWriteLock;
24 import java.util.function.Predicate;
25 import java.util.stream.Collectors;
26
27 import org.eclipse.jdt.annotation.NonNull;
28 import org.eclipse.jdt.annotation.Nullable;
29 import org.eclipse.tracecompass.internal.provisional.datastore.core.condition.RangeCondition;
30 import org.eclipse.tracecompass.internal.provisional.datastore.core.exceptions.RangeException;
31 import org.eclipse.tracecompass.internal.provisional.datastore.core.historytree.AbstractHistoryTree.IHTNodeFactory;
32 import org.eclipse.tracecompass.internal.provisional.datastore.core.interval.IHTInterval;
33 import org.eclipse.tracecompass.internal.provisional.datastore.core.interval.IHTIntervalReader;
34 import org.eclipse.tracecompass.internal.provisional.datastore.core.serialization.ISafeByteBufferReader;
35 import org.eclipse.tracecompass.internal.provisional.datastore.core.serialization.SafeByteBufferFactory;
36
37 import com.google.common.annotations.VisibleForTesting;
38 import com.google.common.collect.ImmutableList;
39 import com.google.common.collect.Iterables;
40
41 /**
42 * The base class for all the types of nodes that go in the History Tree.
43 *
44 * @author Alexandre Montplaisir
45 * @author Geneviève Bastien
46 * @param <E>
47 * The type of objects that will be saved in the tree
48 */
49 public class HTNode<E extends IHTInterval> implements IHTNode<E> {
50
51 // ------------------------------------------------------------------------
52 // Class fields
53 // ------------------------------------------------------------------------
54
55 /**
56 * Size of the basic node header. This is backward-compatible with previous
57 * state sytem history trees
58 *
59 * <pre>
60 * 1 - byte (the type of node)
61 * 16 - 2x long (start time, end time)
62 * 16 - 3x int (seq number, parent seq number, intervalcount)
63 * 1 - byte (done or not)
64 * </pre>
65 */
66 @VisibleForTesting
67 protected static final int COMMON_HEADER_SIZE = Byte.BYTES
68 + 2 * Long.BYTES
69 + 3 * Integer.BYTES
70 + Byte.BYTES;
71
72 // ------------------------------------------------------------------------
73 // Attributes
74 // ------------------------------------------------------------------------
75
76 /**
77 * Default implementation of the interval comparator, which sorts first by
78 * end times, then by start times
79 */
80 private final Comparator<E> fDefaultIntervalComparator = Comparator
81 .comparingLong(E::getEnd)
82 .thenComparingLong(E::getStart);
83
84 /* Node configuration, defined by the history tree */
85 private final int fBlockSize;
86 private final int fMaxChildren;
87
88 /* Time range of this node */
89 private final long fNodeStart;
90 private long fNodeEnd;
91
92 /* Sequence number = position in the node section of the file */
93 private final int fSequenceNumber;
94 private int fParentSequenceNumber; /* = -1 if this node is the root node */
95
96 /* Sum of bytes of all objects in the node */
97 private int fSizeOfContentSection;
98
99 /*
100 * True if this node was saved on disk (meaning its end time is now fixed)
101 */
102 private volatile boolean fIsOnDisk;
103
104 /* List containing all the intervals contained in this node */
105 private final List<E> fIntervals;
106
107 /* Lock used to protect the accesses to objects, nodeEnd and such */
108 private final ReentrantReadWriteLock fRwl = new ReentrantReadWriteLock(false);
109
110 /* Object containing extra data for core nodes */
111 private final @Nullable CoreNodeData fExtraData;
112
113 /**
114 * A class that encapsulates data about children of this node. This class
115 * will be constructed by the core node and contains the extra header data,
116 * methods to read/write the header data, etc.
117 *
118 * This base class for CORE nodes just saves the children, ie their sequence
119 * number.
120 *
121 * @author Geneviève Bastien
122 */
123 protected static class CoreNodeData {
124
125 /** Back-reference to the node class */
126 private final HTNode<?> fNode;
127
128 /**
129 * Seq. numbers of the children nodes (size = max number of nodes,
130 * determined by configuration)
131 */
132 private final int[] fChildren;
133
134 /** Nb. of children this node has */
135 private int fNbChildren;
136
137 /**
138 * Constructor
139 *
140 * @param node
141 * The node containing this extra data.
142 */
143 public CoreNodeData(HTNode<?> node) {
144 fNode = node;
145 fNbChildren = 0;
146 /*
147 * We instantiate the following array at full size right away, since
148 * we want to reserve that space in the node's header. "fNbChildren"
149 * will tell us how many relevant entries there are in those tables.
150 */
151 fChildren = new int[fNode.fMaxChildren];
152 }
153
154 /**
155 * Read the specific header for this extra node data
156 *
157 * @param buffer
158 * The byte buffer in which to read
159 */
160 protected void readSpecificHeader(ByteBuffer buffer) {
161 fNode.takeWriteLock();
162 try {
163 int size = fNode.getMaxChildren();
164
165 fNbChildren = buffer.getInt();
166
167 for (int i = 0; i < fNbChildren; i++) {
168 fChildren[i] = buffer.getInt();
169 }
170 for (int i = fNbChildren; i < size; i++) {
171 buffer.getInt();
172 }
173 } finally {
174 fNode.releaseWriteLock();
175 }
176 }
177
178 /**
179 * Write the specific header for this extra node data
180 *
181 * @param buffer
182 * The byte buffer in which to write
183 */
184 protected void writeSpecificHeader(ByteBuffer buffer) {
185 fNode.takeReadLock();
186 try {
187 buffer.putInt(fNbChildren);
188
189 /* Write the "children's seq number" array */
190 for (int i = 0; i < fNbChildren; i++) {
191 buffer.putInt(fChildren[i]);
192 }
193 for (int i = fNbChildren; i < fNode.getMaxChildren(); i++) {
194 buffer.putInt(0);
195 }
196 } finally {
197 fNode.releaseReadLock();
198 }
199 }
200
201 /**
202 * Get the number of children
203 *
204 * @return The number of children
205 */
206 public int getNbChildren() {
207 fNode.takeReadLock();
208 try {
209 return fNbChildren;
210 } finally {
211 fNode.releaseReadLock();
212 }
213 }
214
215 /**
216 * Get the child sequence number at an index
217 *
218 * @param index
219 * The index of the child to get
220 * @return The sequence number of the child node
221 */
222 public int getChild(int index) {
223 fNode.takeReadLock();
224 try {
225 if (index >= fNbChildren) {
226 throw new IndexOutOfBoundsException("The child at index " + index + " does not exist"); //$NON-NLS-1$ //$NON-NLS-2$
227 }
228 return fChildren[index];
229 } finally {
230 fNode.releaseReadLock();
231 }
232 }
233
234 /**
235 * Get the sequence number of the last child node of this one
236 *
237 * @return The sequence number of the last child
238 */
239 public int getLatestChild() {
240 fNode.takeReadLock();
241 try {
242 return fChildren[fNbChildren - 1];
243 } finally {
244 fNode.releaseReadLock();
245 }
246 }
247
248 /**
249 * Add a new child to this node's data
250 *
251 * @param childNode
252 * The child node to add
253 */
254 public void linkNewChild(IHTNode<?> childNode) {
255 fNode.takeWriteLock();
256 try {
257 if (fNbChildren >= fNode.getMaxChildren()) {
258 throw new IllegalStateException("Asked to link another child but parent already has maximum number of children"); //$NON-NLS-1$
259 }
260
261 fChildren[fNbChildren] = childNode.getSequenceNumber();
262 fNbChildren++;
263
264 } finally {
265 fNode.releaseWriteLock();
266 }
267 }
268
269 /**
270 * Get the size of the extra header space necessary for this node's
271 * extra data
272 *
273 * @return The extra header size
274 */
275 protected int getSpecificHeaderSize() {
276 int maxChildren = fNode.getMaxChildren();
277 int specificSize = Integer.BYTES /* 1x int (nbChildren) */
278
279 /* MAX_NB * int ('children' table) */
280 + Integer.BYTES * maxChildren;
281
282 return specificSize;
283 }
284
285 @Override
286 public String toString() {
287 /* Only used for debugging, shouldn't be externalized */
288 return String.format("Core Node, %d children %s", //$NON-NLS-1$
289 fNbChildren, Arrays.toString(Arrays.copyOf(fChildren, fNbChildren)));
290 }
291
292 /**
293 * Inner method to select the sequence numbers for the children of the
294 * current node that intersect the given timestamp. Useful for moving
295 * down the tree.
296 *
297 * @param timeCondition
298 * The time-based RangeCondition to choose which children
299 * match.
300 * @return Collection of sequence numbers of the child nodes that
301 * intersect t, non-null empty collection if this is a Leaf Node
302 */
303 public final Collection<Integer> selectNextChildren(RangeCondition<Long> timeCondition) {
304 fNode.takeReadLock();
305 try {
306 return selectNextIndices(timeCondition).stream()
307 .map(i -> fChildren[i])
308 .collect(Collectors.toList());
309 } finally {
310 fNode.releaseReadLock();
311 }
312 }
313
314 /**
315 * Inner method to select the indices of the children of the current
316 * node that intersect the given timestamp. Useful for moving down the
317 * tree.
318 *
319 * This is the method that children implementations of this node should
320 * override. They may call
321 * <code>super.selectNextIndices(timeCondition)</code> to get access to
322 * the subset of indices that match the parent's condition and add their
323 * own filters. When this method is called a read-lock is already taken
324 * on the node
325 *
326 * @param timeCondition
327 * The time-based RangeCondition to choose which children
328 * match.
329 * @return Collection of the indices of the child nodes that intersect
330 * the time condition
331 */
332 protected Collection<Integer> selectNextIndices(RangeCondition<Long> timeCondition) {
333 /* By default, all children are returned */
334 List<Integer> childList = new ArrayList<>();
335 for (int i = 0; i < fNbChildren; i++) {
336 childList.add(i);
337 }
338
339 return childList;
340 }
341
342 }
343
344 /**
345 * Constructor
346 *
347 * @param type
348 * The type of this node
349 * @param blockSize
350 * The size (in bytes) of a serialized node on disk
351 * @param maxChildren
352 * The maximum allowed number of children per node
353 * @param seqNumber
354 * The (unique) sequence number assigned to this particular node
355 * @param parentSeqNumber
356 * The sequence number of this node's parent node
357 * @param start
358 * The earliest timestamp stored in this node
359 */
360 public HTNode(NodeType type, int blockSize, int maxChildren, int seqNumber,
361 int parentSeqNumber, long start) {
362 fBlockSize = blockSize;
363 fMaxChildren = maxChildren;
364
365 fNodeStart = start;
366 fSequenceNumber = seqNumber;
367 fParentSequenceNumber = parentSeqNumber;
368
369 fSizeOfContentSection = 0;
370 fIsOnDisk = false;
371 fIntervals = new ArrayList<>();
372
373 fExtraData = createNodeExtraData(type);
374 }
375
376 /**
377 * Reader factory method. Build a Node object (of the right type) by reading
378 * a block in the file.
379 *
380 * @param blockSize
381 * The size (in bytes) of a serialized node on disk
382 * @param maxChildren
383 * The maximum allowed number of children per node
384 * @param fc
385 * FileChannel to the history file, ALREADY SEEKED at the start
386 * of the node.
387 * @param objectReader
388 * The reader to read serialized node objects
389 * @param nodeFactory
390 * The factory to create the nodes for this tree
391 * @return The node object
392 * @throws IOException
393 * If there was an error reading from the file channel
394 */
395 public static final <E extends IHTInterval, N extends HTNode<E>> N readNode(
396 int blockSize,
397 int maxChildren,
398 FileChannel fc,
399 IHTIntervalReader<E> objectReader,
400 IHTNodeFactory<E, N> nodeFactory) throws IOException {
401
402 N newNode;
403
404 ByteBuffer buffer = ByteBuffer.allocate(blockSize);
405 buffer.order(ByteOrder.LITTLE_ENDIAN);
406 buffer.clear();
407 int res = fc.read(buffer);
408 if (res != blockSize) {
409 throw new IOException("The block for the HTNode is not the right size: " + res); //$NON-NLS-1$
410 }
411 buffer.flip();
412
413 /* Read the common header part */
414 byte typeByte = buffer.get();
415 NodeType type = NodeType.fromByte(typeByte);
416 long start = buffer.getLong();
417 long end = buffer.getLong();
418 int seqNb = buffer.getInt();
419 int parentSeqNb = buffer.getInt();
420 int intervalCount = buffer.getInt();
421 buffer.get(); // TODO Used to be "isDone", to be removed from the header
422
423 /* Now the rest of the header depends on the node type */
424 switch (type) {
425 case CORE:
426 case LEAF:
427 newNode = nodeFactory.createNode(type, blockSize, maxChildren, seqNb, parentSeqNb, start);
428 newNode.readSpecificHeader(buffer);
429 break;
430
431 default:
432 /* Unrecognized node type */
433 throw new IOException();
434 }
435
436 /*
437 * At this point, we should be done reading the header and 'buffer'
438 * should only have the intervals left
439 */
440 ISafeByteBufferReader readBuffer = SafeByteBufferFactory.wrapReader(buffer, res - buffer.position());
441 for (int i = 0; i < intervalCount; i++) {
442 E interval = objectReader.readInterval(readBuffer);
443 newNode.add(interval);
444 }
445
446 /* Assign the node's other information we have read previously */
447 newNode.setNodeEnd(end);
448 newNode.setOnDisk();
449
450 return newNode;
451 }
452
453 /**
454 * Create a node's extra data object for the given node type
455 *
456 * @param type
457 * The type of node
458 * @return The node's extra data object, or <code>null</code> if there is
459 * none
460 */
461 protected @Nullable CoreNodeData createNodeExtraData(NodeType type) {
462 if (type == NodeType.CORE) {
463 return new CoreNodeData(this);
464 }
465 return null;
466 }
467
468 @Override
469 public final void writeSelf(FileChannel fc) throws IOException {
470 /*
471 * Yes, we are taking the *read* lock here, because we are reading the
472 * information in the node to write it to disk.
473 */
474 fRwl.readLock().lock();
475 try {
476 final int blockSize = getBlockSize();
477
478 ByteBuffer buffer = ByteBuffer.allocate(blockSize);
479 buffer.order(ByteOrder.LITTLE_ENDIAN);
480 buffer.clear();
481
482 /* Write the common header part */
483 buffer.put(getNodeType().toByte());
484 buffer.putLong(fNodeStart);
485 buffer.putLong(fNodeEnd);
486 buffer.putInt(fSequenceNumber);
487 buffer.putInt(fParentSequenceNumber);
488 buffer.putInt(fIntervals.size());
489 buffer.put((byte) 1); // TODO Used to be "isDone", to be removed
490 // from header
491
492 /* Now call the inner method to write the specific header part */
493 writeSpecificHeader(buffer);
494
495 /* Back to us, we write the intervals */
496 fIntervals.forEach(i -> i.writeSegment(SafeByteBufferFactory.wrapWriter(buffer, i.getSizeOnDisk())));
497 if (blockSize - buffer.position() != getNodeFreeSpace()) {
498 throw new IllegalStateException("Wrong free space: Actual: " + (blockSize - buffer.position()) + ", Expected: " + getNodeFreeSpace()); //$NON-NLS-1$ //$NON-NLS-2$
499 }
500 /*
501 * Fill the rest with zeros
502 */
503 while (buffer.position() < blockSize) {
504 buffer.put((byte) 0);
505 }
506
507 /* Finally, write everything in the Buffer to disk */
508 buffer.flip();
509 int res = fc.write(buffer);
510 if (res != blockSize) {
511 throw new IllegalStateException("Wrong size of block written: Actual: " + res + ", Expected: " + blockSize); //$NON-NLS-1$ //$NON-NLS-2$
512 }
513
514 } finally {
515 fRwl.readLock().unlock();
516 }
517 fIsOnDisk = true;
518 }
519
520 // ------------------------------------------------------------------------
521 // Accessors
522 // ------------------------------------------------------------------------
523
524 /**
525 * Get this node's block size.
526 *
527 * @return The block size
528 */
529 protected final int getBlockSize() {
530 return fBlockSize;
531 }
532
533 /**
534 * Get this node's maximum amount of children.
535 *
536 * @return The maximum amount of children
537 */
538 protected final int getMaxChildren() {
539 return fMaxChildren;
540 }
541
542 /**
543 * Get the interval comparator. Intervals will always be stored sorted
544 * according to this comparator. This can be used by insertion or retrieval
545 * algorithms.
546 *
547 * Sub-classes may override this to change or specify the interval
548 * comparator.
549 *
550 * @return The way intervals are to be sorted in this node
551 */
552 protected Comparator<E> getIntervalComparator() {
553 return fDefaultIntervalComparator;
554 }
555
556 @Override
557 public long getNodeStart() {
558 return fNodeStart;
559 }
560
561 @Override
562 public long getNodeEnd() {
563 if (fIsOnDisk) {
564 return fNodeEnd;
565 }
566 return Long.MAX_VALUE;
567 }
568
569 @Override
570 public int getSequenceNumber() {
571 return fSequenceNumber;
572 }
573
574 @Override
575 public int getParentSequenceNumber() {
576 return fParentSequenceNumber;
577 }
578
579 @Override
580 public void setParentSequenceNumber(int newParent) {
581 fParentSequenceNumber = newParent;
582 }
583
584 @Override
585 public boolean isOnDisk() {
586 return fIsOnDisk;
587 }
588
589 /**
590 * Get the node's extra data.
591 *
592 * @return The node extra data
593 */
594 protected @Nullable CoreNodeData getCoreNodeData() {
595 return fExtraData;
596 }
597
598 /**
599 * Get the list of objects in this node. This list is immutable. All objects
600 * must be inserted through the {@link #add(IHTInterval)} method
601 *
602 * @return The list of intervals in this node
603 */
604 protected List<E> getIntervals() {
605 return ImmutableList.copyOf(fIntervals);
606 }
607
608 /**
609 * Set this node's end time. Called by the reader factory.
610 *
611 * @param end
612 * The end time of the node
613 */
614 protected void setNodeEnd(long end) {
615 fNodeEnd = end;
616 }
617
618 /**
619 * Set this node to be on disk. Called by the reader factory.
620 */
621 protected void setOnDisk() {
622 fIsOnDisk = true;
623 }
624
625 @Override
626 public void add(E newInterval) {
627 fRwl.writeLock().lock();
628 try {
629 /*
630 * Just in case, should be checked before even calling this function
631 */
632 int objSize = newInterval.getSizeOnDisk();
633 if (objSize > getNodeFreeSpace()) {
634 throw new IllegalArgumentException("The interval to insert (" + objSize + ") is larger than available space (" + getNodeFreeSpace() + ")"); //$NON-NLS-1$ //$NON-NLS-2$ //$NON-NLS-3$
635 }
636
637 int insertPoint = Collections.binarySearch(fIntervals, newInterval, getIntervalComparator());
638 insertPoint = (insertPoint >= 0 ? insertPoint : -insertPoint - 1);
639 fIntervals.add(insertPoint, newInterval);
640
641 fSizeOfContentSection += objSize;
642
643 } finally {
644 fRwl.writeLock().unlock();
645 }
646 }
647
648 @Override
649 public Iterable<E> getMatchingIntervals(RangeCondition<Long> timeCondition,
650 Predicate<E> extraPredicate) {
651
652 // TODO Use getIntervalComparator() to restrict the dataset further
653 // TODO Benchmark using/returning streams instead of iterables
654
655 if (isOnDisk()) {
656 @NonNull Iterable<E> ret = fIntervals;
657 ret = Iterables.filter(ret, interval -> timeCondition.intersects(interval.getStart(), interval.getEnd()));
658 ret = Iterables.filter(ret, interval -> extraPredicate.test(interval));
659 return ret;
660 }
661
662 takeReadLock();
663 try {
664 return fIntervals.stream()
665 .filter(interval -> timeCondition.intersects(interval.getStart(), interval.getEnd()))
666 .filter(extraPredicate)
667 /*
668 * Because this class works with read locks, we can't
669 * return a lazy stream unfortunately. Room for improvement?
670 */
671 .collect(Collectors.toList());
672
673 } finally {
674 releaseReadLock();
675 }
676 }
677
678 @Override
679 public void closeThisNode(long endtime) {
680 fRwl.writeLock().lock();
681 try {
682 /**
683 * FIXME: was assert (endtime >= fNodeStart); but that exception is
684 * reached with an empty node that has start time endtime + 1
685 */
686 // if (endtime < fNodeStart) {
687 // throw new IllegalArgumentException("Endtime " + endtime + "
688 // cannot be lower than start time " + fNodeStart);
689 // }
690
691 if (!fIntervals.isEmpty()) {
692 /*
693 * Make sure there are no intervals in this node with their
694 * EndTime > the one requested. Only need to check the last one
695 * since they are sorted
696 */
697 if (endtime < fIntervals.get(fIntervals.size() - 1).getEnd()) {
698 throw new IllegalArgumentException("Closing end time should be greater than or equal to the end time of the objects of this node"); //$NON-NLS-1$
699 }
700 }
701
702 fNodeEnd = endtime;
703 } finally {
704 fRwl.writeLock().unlock();
705 }
706 }
707
708 @Override
709 public final int getTotalHeaderSize() {
710 return COMMON_HEADER_SIZE + getSpecificHeaderSize();
711 }
712
713 /**
714 * @return The offset, within the node, where the Data section ends
715 */
716 private int getDataSectionEndOffset() {
717 return getTotalHeaderSize() + fSizeOfContentSection;
718 }
719
720 @Override
721 public int getNodeFreeSpace() {
722 fRwl.readLock().lock();
723 try {
724 int ret = getBlockSize() - getDataSectionEndOffset();
725 return ret;
726 } finally {
727 fRwl.readLock().unlock();
728 }
729 }
730
731 @Override
732 public long getNodeUsagePercent() {
733 fRwl.readLock().lock();
734 try {
735 final int blockSize = getBlockSize();
736 float freePercent = (float) getNodeFreeSpace()
737 / (float) (blockSize - getTotalHeaderSize())
738 * 100F;
739 return (long) (100L - freePercent);
740
741 } finally {
742 fRwl.readLock().unlock();
743 }
744 }
745
746 @Override
747 public NodeType getNodeType() {
748 @Nullable
749 CoreNodeData extraData = getCoreNodeData();
750 if (extraData == null) {
751 return NodeType.LEAF;
752 }
753 return NodeType.CORE;
754 }
755
756 /**
757 * Return the specific header size of this node. This means the size
758 * occupied by the type-specific section of the header (not counting the
759 * common part).
760 *
761 * @return The specific header size
762 */
763 protected int getSpecificHeaderSize() {
764 CoreNodeData extraData = fExtraData;
765 if (extraData != null) {
766 return extraData.getSpecificHeaderSize();
767 }
768 return 0;
769 }
770
771 /**
772 * Read the specific header for this node
773 *
774 * @param buffer
775 * The buffer to read from
776 */
777 protected void readSpecificHeader(ByteBuffer buffer) {
778 CoreNodeData extraData = fExtraData;
779 if (extraData != null) {
780 extraData.readSpecificHeader(buffer);
781 }
782 }
783
784 /**
785 * Write the type-specific part of the header in a byte buffer.
786 *
787 * @param buffer
788 * The buffer to write to. It should already be at the correct
789 * position.
790 */
791 protected void writeSpecificHeader(ByteBuffer buffer) {
792 CoreNodeData extraData = fExtraData;
793 if (extraData != null) {
794 extraData.writeSpecificHeader(buffer);
795 }
796 }
797
798 /**
799 * Node-type-specific toString method. Used for debugging.
800 *
801 * @return A string representing the node
802 */
803 protected String toStringSpecific() {
804 return ""; //$NON-NLS-1$
805 }
806
807 @Override
808 public boolean isEmpty() {
809 return fIntervals.isEmpty();
810 }
811
812 // -------------------------------------------
813 // Core node methods
814 // -------------------------------------------
815
816 @Override
817 public int getNbChildren() {
818 CoreNodeData extraData = fExtraData;
819 if (extraData != null) {
820 return extraData.getNbChildren();
821 }
822 return IHTNode.super.getNbChildren();
823 }
824
825 @Override
826 public int getChild(int index) {
827 CoreNodeData extraData = fExtraData;
828 if (extraData != null) {
829 return extraData.getChild(index);
830 }
831 return IHTNode.super.getChild(index);
832 }
833
834 @Override
835 public int getLatestChild() {
836 CoreNodeData extraData = fExtraData;
837 if (extraData != null) {
838 return extraData.getLatestChild();
839 }
840 return IHTNode.super.getLatestChild();
841 }
842
843 @Override
844 public void linkNewChild(@NonNull IHTNode<E> childNode) {
845 CoreNodeData extraData = fExtraData;
846 if (extraData != null) {
847 extraData.linkNewChild(childNode);
848 return;
849 }
850 IHTNode.super.linkNewChild(childNode);
851 }
852
853 @Override
854 public Collection<Integer> selectNextChildren(RangeCondition<Long> timeCondition)
855 throws RangeException {
856 CoreNodeData extraData = fExtraData;
857 if (extraData != null) {
858 return extraData.selectNextChildren(timeCondition);
859 }
860 return IHTNode.super.selectNextChildren(timeCondition);
861 }
862
863 // -----------------------------------------
864 // Locking
865 // -----------------------------------------
866
867 /**
868 * Takes a read lock on the fields of this class. Each call to this method
869 * should be followed by a {@link HTNode#releaseReadLock()}, in a
870 * try-finally clause
871 */
872 protected void takeReadLock() {
873 fRwl.readLock().lock();
874 }
875
876 /**
877 * Releases a read lock on the fields of this class. A call to this method
878 * should have been preceded by a call to {@link HTNode#takeReadLock()}
879 */
880 protected void releaseReadLock() {
881 fRwl.readLock().unlock();
882 }
883
884 /**
885 * Takes a write lock on the fields of this class. Each call to this method
886 * should be followed by a {@link HTNode#releaseWriteLock()}, in a
887 * try-finally clause
888 */
889 protected void takeWriteLock() {
890 fRwl.writeLock().lock();
891 }
892
893 /**
894 * Releases a write lock on the fields of this class. A call to this method
895 * should have been preceded by a call to {@link HTNode#takeWriteLock()}
896 */
897 protected void releaseWriteLock() {
898 fRwl.writeLock().unlock();
899 }
900
901 // -----------------------------------------
902 // Object methods
903 // -----------------------------------------
904
905 @SuppressWarnings("nls")
906 @Override
907 public String toString() {
908 /* Only used for debugging, shouldn't be externalized */
909 return String.format("Node #%d, %s, %s, %d intervals (%d%% used), [%d - %s]",
910 fSequenceNumber,
911 (fParentSequenceNumber == -1) ? "Root" : "Parent #" + fParentSequenceNumber,
912 toStringSpecific(),
913 fIntervals.size(),
914 getNodeUsagePercent(),
915 fNodeStart,
916 (fIsOnDisk || fNodeEnd != 0) ? fNodeEnd : "...");
917 }
918
919 @Override
920 public int hashCode() {
921 return Objects.hash(fBlockSize, fMaxChildren, fNodeStart, fNodeEnd, fSequenceNumber, fParentSequenceNumber);
922 }
923
924 @Override
925 public boolean equals(@Nullable Object obj) {
926 if (obj == null) {
927 return false;
928 }
929 if (obj.getClass() != getClass()) {
930 return false;
931 }
932 HTNode<? extends IHTInterval> other = (HTNode<?>) obj;
933 return (fBlockSize == other.fBlockSize &&
934 fMaxChildren == other.fMaxChildren &&
935 fNodeStart == other.fNodeStart &&
936 fNodeEnd == other.fNodeEnd &&
937 fSequenceNumber == other.fSequenceNumber &&
938 fParentSequenceNumber == other.fParentSequenceNumber);
939 }
940
941 }
This page took 0.087821 seconds and 5 git commands to generate.