datastore: Add generic history tree
[deliverable/tracecompass.git] / statesystem / org.eclipse.tracecompass.datastore.core / src / org / eclipse / tracecompass / internal / provisional / datastore / core / historytree / AbstractHistoryTree.java
1 /*******************************************************************************
2 * Copyright (c) 2017 École Polytechnique de Montréal
3 *
4 * All rights reserved. This program and the accompanying materials are
5 * made available under the terms of the Eclipse Public License v1.0 which
6 * accompanies this distribution, and is available at
7 * http://www.eclipse.org/legal/epl-v10.html
8 *******************************************************************************/
9
10 package org.eclipse.tracecompass.internal.provisional.datastore.core.historytree;
11
12 import java.io.File;
13 import java.io.FileInputStream;
14 import java.io.FileOutputStream;
15 import java.io.IOException;
16 import java.nio.ByteBuffer;
17 import java.nio.ByteOrder;
18 import java.nio.channels.ClosedChannelException;
19 import java.nio.channels.FileChannel;
20 import java.util.ArrayList;
21 import java.util.Collections;
22 import java.util.Deque;
23 import java.util.LinkedList;
24 import java.util.List;
25 import java.util.concurrent.locks.ReentrantReadWriteLock;
26 import java.util.function.Predicate;
27
28 import org.eclipse.jdt.annotation.NonNull;
29 import org.eclipse.tracecompass.common.core.NonNullUtils;
30 import org.eclipse.tracecompass.internal.datastore.core.historytree.HtIo;
31 import org.eclipse.tracecompass.internal.provisional.datastore.core.condition.RangeCondition;
32 import org.eclipse.tracecompass.internal.provisional.datastore.core.exceptions.RangeException;
33 import org.eclipse.tracecompass.internal.provisional.datastore.core.historytree.IHTNode.NodeType;
34 import org.eclipse.tracecompass.internal.provisional.datastore.core.interval.IHTInterval;
35 import org.eclipse.tracecompass.internal.provisional.datastore.core.interval.IHTIntervalReader;
36
37 import com.google.common.annotations.VisibleForTesting;
38 import com.google.common.collect.ImmutableList;
39 import com.google.common.collect.Iterables;
40
41 /**
42 * Base class for history trees that encapsulates the logic to read from/write
43 * to a file.
44 *
45 * @author Alexandre Montplaisir
46 * @author Geneviève Bastien
47 * @param <E>
48 * The type of intervals that will be saved in the tree
49 * @param <N>
50 * The base type of the nodes of this tree
51 */
52 public abstract class AbstractHistoryTree<E extends IHTInterval, N extends HTNode<E>>
53 implements IHistoryTree<E> {
54
55 /**
56 * Interface for history to create the various HTNodes
57 *
58 * @param <E>
59 * The type of intervals that will be saved in the node
60 * @param <N>
61 * The base type of the nodes of this tree
62 */
63 @FunctionalInterface
64 public interface IHTNodeFactory<E extends IHTInterval, N extends HTNode<E>> {
65
66 /**
67 * Creates a new node for the specific history tree
68 *
69 * @param type
70 * The type of node to create. See {@link IHTNode.NodeType}.
71 * @param blockSize
72 * The size (in bytes) of each node once serialized to disk
73 * @param maxChildren
74 * The maximum number of amount a single core node can have
75 * @param seqNumber
76 * The (unique) sequence number assigned to this particular
77 * node
78 * @param parentSeqNumber
79 * The sequence number of this node's parent node
80 * @param start
81 * The earliest timestamp stored in this node
82 * @return The new core node
83 */
84 N createNode(NodeType type, int blockSize, int maxChildren,
85 int seqNumber, int parentSeqNumber, long start);
86 }
87
88 // ------------------------------------------------------------------------
89 // Tree-specific configuration
90 // ------------------------------------------------------------------------
91
92 /* Tree configuration constants */
93 private final File fHistoryFile;
94 private final int fBlockSize;
95 private final int fMaxChildren;
96 private final int fProviderVersion;
97 private final long fTreeStart;
98 private final IHTIntervalReader<E> fIntervalReader;
99
100 /** Reader/writer object */
101 private HtIo<E, N> fTreeIO;
102
103 // ------------------------------------------------------------------------
104 // Variable Fields (will change throughout the existence of the SHT)
105 // ------------------------------------------------------------------------
106
107 /** Latest timestamp found in the tree (at any given moment) */
108 private long fTreeEnd;
109
110 /** The total number of nodes that exists in this tree */
111 private int fNodeCount;
112
113 /** "Cache" to keep the active nodes in memory */
114 private final List<N> fLatestBranch;
115
116 /* Lock used to protect the accesses to the HT_IO object */
117 private final ReentrantReadWriteLock fRwl = new ReentrantReadWriteLock(false);
118
119 /**
120 * Create a new State History from scratch, specifying all configuration
121 * parameters.
122 *
123 * @param stateHistoryFile
124 * The name of the history file
125 * @param blockSize
126 * The size of each "block" on disk in bytes. One node will
127 * always fit in one block. It should be at least 4096.
128 * @param maxChildren
129 * The maximum number of children allowed per core (non-leaf)
130 * node.
131 * @param providerVersion
132 * The version of the state provider. If a file already exists,
133 * and their versions match, the history file will not be rebuilt
134 * uselessly.
135 * @param treeStart
136 * The start time of the history
137 * @param intervalReader
138 * The factory to create new tree intervals when reading from
139 * the disk
140 * @throws IOException
141 * If an error happens trying to open/write to the file
142 * specified in the config
143 */
144 public AbstractHistoryTree(File stateHistoryFile,
145 int blockSize,
146 int maxChildren,
147 int providerVersion,
148 long treeStart,
149 IHTIntervalReader<E> intervalReader) throws IOException {
150 /*
151 * Simple check to make sure we have enough place in the 0th block for
152 * the tree configuration
153 */
154 if (blockSize < TREE_HEADER_SIZE) {
155 throw new IllegalArgumentException();
156 }
157
158 fHistoryFile = stateHistoryFile;
159 fBlockSize = blockSize;
160 fMaxChildren = maxChildren;
161 fProviderVersion = providerVersion;
162 fTreeStart = treeStart;
163 fIntervalReader = intervalReader;
164
165 fTreeEnd = treeStart;
166 fNodeCount = 0;
167 fLatestBranch = NonNullUtils.checkNotNull(Collections.synchronizedList(new ArrayList<>()));
168
169 /* Prepare the IO object */
170 fTreeIO = new HtIo<>(stateHistoryFile,
171 blockSize,
172 maxChildren,
173 true,
174 intervalReader,
175 getNodeFactory());
176
177 /* Add the first node to the tree */
178 N firstNode = initNewLeafNode(-1, treeStart);
179 fLatestBranch.add(firstNode);
180 }
181
182 /**
183 * "Reader" constructor : instantiate a SHTree from an existing tree file on
184 * disk
185 *
186 * @param existingStateFile
187 * Path/filename of the history-file we are to open
188 * @param expectedProviderVersion
189 * The expected version of the state provider
190 * @param intervalReader
191 * The factory used to read segments from the history tree
192 * @throws IOException
193 * If an error happens reading the file
194 */
195 public AbstractHistoryTree(File existingStateFile,
196 int expectedProviderVersion,
197 IHTIntervalReader<E> intervalReader) throws IOException {
198 /*
199 * Open the file ourselves, get the tree header information we need,
200 * then pass on the descriptor to the TreeIO object.
201 */
202 int rootNodeSeqNb, res;
203 int bs, maxc;
204 long startTime;
205
206 /* Java I/O mumbo jumbo... */
207 if (!existingStateFile.exists()) {
208 throw new IOException("Selected state file does not exist"); //$NON-NLS-1$
209 }
210 if (existingStateFile.length() <= 0) {
211 throw new IOException("Empty target file"); //$NON-NLS-1$
212 }
213
214 try (FileInputStream fis = new FileInputStream(existingStateFile);
215 FileChannel fc = fis.getChannel();) {
216
217 ByteBuffer buffer = ByteBuffer.allocate(TREE_HEADER_SIZE);
218 buffer.order(ByteOrder.LITTLE_ENDIAN);
219 buffer.clear();
220
221 res = fc.read(buffer);
222 if (res != TREE_HEADER_SIZE) {
223 throw new IOException("Invalid header size"); //$NON-NLS-1$
224 }
225
226 buffer.flip();
227
228 /*
229 * Check the magic number to make sure we're opening the right type
230 * of file
231 */
232 res = buffer.getInt();
233 if (res != getMagicNumber()) {
234 throw new IOException("Wrong magic number"); //$NON-NLS-1$
235 }
236
237 res = buffer.getInt(); /* File format version number */
238 if (res != getFileVersion()) {
239 throw new IOException("Mismatching History Tree file format versions"); //$NON-NLS-1$
240 }
241
242 res = buffer.getInt(); /* Event handler's version number */
243 if (res != expectedProviderVersion) {
244 /*
245 * The existing history was built using an event handler that
246 * doesn't match the current one in the framework.
247 *
248 * Information could be all wrong. Instead of keeping an
249 * incorrect history file, a rebuild is done.
250 */
251 throw new IOException("Mismatching event handler versions"); //$NON-NLS-1$
252 }
253
254 bs = buffer.getInt(); /* Block Size */
255 maxc = buffer.getInt(); /* Max nb of children per node */
256
257 fNodeCount = buffer.getInt();
258 rootNodeSeqNb = buffer.getInt();
259 startTime = buffer.getLong();
260
261 /* Set all other permanent configuration options */
262 fHistoryFile = existingStateFile;
263 fBlockSize = bs;
264 fMaxChildren = maxc;
265 fProviderVersion = expectedProviderVersion;
266 fIntervalReader = intervalReader;
267 fTreeStart = startTime;
268 }
269
270 /*
271 * FIXME We close fis here and the TreeIO will then reopen the same
272 * file, not extremely elegant. But how to pass the information here to
273 * the SHT otherwise?
274 */
275 fTreeIO = new HtIo<>(fHistoryFile,
276 fBlockSize,
277 fMaxChildren,
278 false,
279 fIntervalReader,
280 getNodeFactory());
281
282 fLatestBranch = buildLatestBranch(rootNodeSeqNb);
283 fTreeEnd = getRootNode().getNodeEnd();
284
285 /*
286 * Make sure the history start time we read previously is consistent
287 * with was is actually in the root node.
288 */
289 if (startTime != getRootNode().getNodeStart()) {
290 throw new IOException("Inconsistent start times in the " + //$NON-NLS-1$
291 "history file, it might be corrupted."); //$NON-NLS-1$
292 }
293 }
294
295 /**
296 * Rebuild the latestBranch "cache" object by reading the nodes from disk
297 * (When we are opening an existing file on disk and want to append to it,
298 * for example).
299 *
300 * @param rootNodeSeqNb
301 * The sequence number of the root node, so we know where to
302 * start
303 * @throws ClosedChannelException
304 */
305 private List<N> buildLatestBranch(int rootNodeSeqNb) throws ClosedChannelException {
306 List<N> list = new ArrayList<>();
307
308 N nextChildNode = fTreeIO.readNode(rootNodeSeqNb);
309 list.add(nextChildNode);
310
311 // TODO: Do we need the full latest branch? The latest leaf may not be
312 // the one we'll query first... Won't it build itself later?
313
314 /* Follow the last branch up to the leaf */
315 while (nextChildNode.getNodeType() == HTNode.NodeType.CORE) {
316 nextChildNode = fTreeIO.readNode(nextChildNode.getLatestChild());
317 list.add(nextChildNode);
318 }
319 return Collections.synchronizedList(list);
320 }
321
322 // ------------------------------------------------------------------------
323 // Accessors
324 // ------------------------------------------------------------------------
325
326 @Override
327 public long getTreeStart() {
328 return fTreeStart;
329 }
330
331 @Override
332 public long getTreeEnd() {
333 return fTreeEnd;
334 }
335
336 /**
337 * Get the number of nodes in this tree.
338 *
339 * @return The number of nodes
340 */
341 public int getNodeCount() {
342 return fNodeCount;
343 }
344
345 /**
346 * Get the current root node of this tree
347 *
348 * @return The root node
349 */
350 public N getRootNode() {
351 return fLatestBranch.get(0);
352 }
353
354 @Override
355 public long getFileSize() {
356 return fHistoryFile.length();
357 }
358
359 /**
360 * Return the latest branch of the tree. That branch is immutable.
361 *
362 * @return The immutable latest branch
363 */
364 @VisibleForTesting
365 protected List<N> getLatestBranch() {
366 return ImmutableList.copyOf(fLatestBranch);
367 }
368
369 /**
370 * Get the node in the latest branch at a depth. If the depth is too large,
371 * it will throw an IndexOutOfBoundsException
372 *
373 * @param depth
374 * The depth at which to get the node
375 * @return The node at depth
376 */
377 protected final N getLatestNode(int depth) {
378 if (depth > fLatestBranch.size()) {
379 throw new IndexOutOfBoundsException("Trying to get latest node too deep"); //$NON-NLS-1$
380 }
381 return fLatestBranch.get(depth);
382 }
383
384 /**
385 * Get the magic number for the history file. This number should be specific
386 * for each implementation of the history tree.
387 *
388 * @return The magic number for the history file
389 */
390 protected abstract int getMagicNumber();
391
392 /**
393 * Get the file version for the history file. This file version should be
394 * modified for a history tree class whenever changing the format of the
395 * file. Different versions of the file may not be compatible.
396 *
397 * @return The file version for the history file.
398 */
399 protected abstract int getFileVersion();
400
401 /**
402 * Get the factory to use to create new nodes for this history tree.
403 *
404 * This method is called in the constructor of the abstract class, so
405 * assigning the factory to a final field may cause NullPointerException
406 * since that final field may not be initialized the first time this is
407 * called.
408 *
409 * @return The NodeFactory for the History Tree
410 */
411 protected abstract IHTNodeFactory<E, N> getNodeFactory();
412
413 /**
414 * Read a node with a given sequence number
415 *
416 * @param seqNum
417 * The sequence number of the node to read
418 * @return The HTNode object
419 * @throws ClosedChannelException
420 * Exception thrown when reading the node, if the file was
421 * closed
422 */
423 @VisibleForTesting
424 protected @NonNull N getNode(int seqNum) throws ClosedChannelException {
425 // First, check in the latest branch if the node is there
426 for (N node : fLatestBranch) {
427 if (node.getSequenceNumber() == seqNum) {
428 return node;
429 }
430 }
431 return fTreeIO.readNode(seqNum);
432 }
433
434 /**
435 * Retrieve the TreeIO object. Should only be used for testing.
436 *
437 * @return The TreeIO
438 */
439 @VisibleForTesting
440 protected HtIo<E, N> getTreeIO() {
441 return fTreeIO;
442 }
443
444 // ------------------------------------------------------------------------
445 // HT_IO interface
446 // ------------------------------------------------------------------------
447
448 // TODO Remove from here
449 @Override
450 public FileInputStream supplyATReader() {
451 fRwl.readLock().lock();
452 try {
453 return fTreeIO.supplyATReader(getNodeCount());
454 } finally {
455 fRwl.readLock().unlock();
456 }
457 }
458
459 // TODO Remove from here
460 @Override
461 public File supplyATWriterFile() {
462 return fHistoryFile;
463 }
464
465 // TODO Remove from here
466 @Override
467 public long supplyATWriterFilePos() {
468 return IHistoryTree.TREE_HEADER_SIZE
469 + ((long) getNodeCount() * fBlockSize);
470 }
471
472 /**
473 * Read a node from the tree.
474 *
475 * @param seqNumber
476 * The sequence number of the node to read
477 * @return The node
478 * @throws ClosedChannelException
479 * If the tree IO is unavailable
480 */
481 public N readNode(int seqNumber) throws ClosedChannelException {
482 /* Try to read the node from memory */
483 synchronized (fLatestBranch) {
484 for (N node : fLatestBranch) {
485 if (node.getSequenceNumber() == seqNumber) {
486 return node;
487 }
488 }
489 }
490
491 fRwl.readLock().lock();
492 try {
493 /* Read the node from disk */
494 return fTreeIO.readNode(seqNumber);
495 } finally {
496 fRwl.readLock().unlock();
497 }
498 }
499
500 /**
501 * Write a node object to the history file.
502 *
503 * @param node
504 * The node to write to disk
505 */
506 public void writeNode(N node) {
507 fRwl.readLock().lock();
508 try {
509 fTreeIO.writeNode(node);
510 } finally {
511 fRwl.readLock().unlock();
512 }
513 }
514
515 /**
516 * Close the history file.
517 */
518 @Override
519 public void closeFile() {
520 fRwl.writeLock().lock();
521 try {
522 fTreeIO.closeFile();
523 clearContent();
524 } finally {
525 fRwl.writeLock().unlock();
526 }
527 }
528
529 /**
530 * Delete the history file.
531 */
532 @Override
533 public void deleteFile() {
534 fRwl.writeLock().lock();
535 try {
536 fTreeIO.deleteFile();
537 clearContent();
538 } finally {
539 fRwl.writeLock().unlock();
540 }
541 }
542
543 @Override
544 public void cleanFile() throws IOException {
545 fRwl.writeLock().lock();
546 try {
547 closeTree(fTreeEnd);
548 fTreeIO.deleteFile();
549
550 fTreeIO = new HtIo<>(fHistoryFile,
551 fBlockSize,
552 fMaxChildren,
553 true,
554 fIntervalReader,
555 getNodeFactory());
556
557 clearContent();
558 /* Add the first node to the tree */
559 N firstNode = initNewLeafNode(-1, fTreeStart);
560 fLatestBranch.add(firstNode);
561 } finally {
562 fRwl.writeLock().unlock();
563 }
564 }
565
566 private void clearContent() {
567 // Re-initialize the content of the tree after the file is deleted or
568 // closed
569 fNodeCount = 0;
570 fLatestBranch.clear();
571 }
572
573 // ------------------------------------------------------------------------
574 // Operations
575 // ------------------------------------------------------------------------
576
577 /**
578 * Insert an interval in the tree.
579 *
580 * @param interval
581 * The interval to be inserted
582 * @throws RangeException
583 * If the start of end time of the interval are invalid
584 */
585 @Override
586 public synchronized void insert(E interval) throws RangeException {
587 if (interval.getStart() < fTreeStart) {
588 throw new RangeException("Interval Start:" + interval.getStart() + ", Config Start:" + fTreeStart); //$NON-NLS-1$ //$NON-NLS-2$
589 }
590 tryInsertAtNode(interval, fLatestBranch.size() - 1);
591 }
592
593 /**
594 * Add a new empty core node to the tree.
595 *
596 * @param parentSeqNumber
597 * Sequence number of this node's parent
598 * @param startTime
599 * Start time of the new node
600 * @return The newly created node
601 */
602 protected final N initNewCoreNode(int parentSeqNumber, long startTime) {
603 N newNode = getNodeFactory().createNode(NodeType.CORE, fBlockSize, fMaxChildren,
604 fNodeCount, parentSeqNumber, startTime);
605 fNodeCount++;
606 return newNode;
607 }
608
609 /**
610 * Add a new empty leaf node to the tree.
611 *
612 * @param parentSeqNumber
613 * Sequence number of this node's parent
614 * @param startTime
615 * Start time of the new node
616 * @return The newly created node
617 */
618 protected final N initNewLeafNode(int parentSeqNumber, long startTime) {
619 N newNode = getNodeFactory().createNode(NodeType.LEAF, fBlockSize, fMaxChildren,
620 fNodeCount, parentSeqNumber, startTime);
621 fNodeCount++;
622 return newNode;
623 }
624
625 /**
626 * Inner method to find in which node we should add the interval.
627 *
628 * @param interval
629 * The interval to add to the tree
630 * @param depth
631 * The index *in the latestBranch* where we are trying the
632 * insertion
633 */
634 protected final void tryInsertAtNode(E interval, int depth) {
635 N targetNode = getLatestBranch().get(depth);
636
637 /* Verify if there is enough room in this node to store this interval */
638 if (interval.getSizeOnDisk() > targetNode.getNodeFreeSpace()) {
639 /* Nope, not enough room. Insert in a new sibling instead. */
640 addSiblingNode(depth, getNewBranchStart(depth, interval));
641 tryInsertAtNode(interval, getLatestBranch().size() - 1);
642 return;
643 }
644
645 /* Make sure the interval time range fits this node */
646 if (interval.getStart() < targetNode.getNodeStart()) {
647 /*
648 * No, this interval starts before the startTime of this node. We
649 * need to check recursively in parents if it can fit.
650 */
651 tryInsertAtNode(interval, depth - 1);
652 return;
653 }
654
655 /*
656 * Ok, there is room, and the interval fits in this time slot. Let's add
657 * it.
658 */
659 targetNode.add(interval);
660
661 updateEndTime(interval);
662 }
663
664 /**
665 * Get the start time of the new node of the branch that will be added
666 * starting at depth.
667 *
668 * Note that the depth is the depth of the last node that was filled and to
669 * which a sibling should be added. But depending on the returned start
670 * time, the actual new branch may start at a lower depth if the start time
671 * happens to be lesser than the parent's start time.
672 *
673 * @param depth
674 * The depth of the last node that was filled and at which the
675 * new branch should start.
676 * @param interval
677 * The interval that is about to be inserted
678 * @return The value that should be the start time of the sibling node
679 */
680 protected abstract long getNewBranchStart(int depth, E interval);
681
682 /**
683 * Method to add a sibling to any node in the latest branch. This will add
684 * children back down to the leaf level, if needed.
685 *
686 * @param depth
687 * The depth in latestBranch where we start adding
688 * @param newNodeStartTime
689 * The start time of the new node
690 */
691 private final void addSiblingNode(int depth, long newNodeStartTime) {
692 synchronized (fLatestBranch) {
693 final long splitTime = fTreeEnd;
694
695 if (depth >= fLatestBranch.size()) {
696 /*
697 * We need to make sure (indexOfNode - 1) doesn't get the last
698 * node in the branch, because that one is a Leaf Node.
699 */
700 throw new IllegalStateException();
701 }
702
703 /* Check if we need to add a new root node */
704 if (depth == 0) {
705 addNewRootNode(newNodeStartTime);
706 return;
707 }
708
709 /*
710 * Check if we can indeed add a child to the target parent and if
711 * the new start time is not before the target parent.
712 */
713 if (fLatestBranch.get(depth - 1).getNbChildren() == fMaxChildren ||
714 newNodeStartTime < fLatestBranch.get(depth - 1).getNodeStart()) {
715 /* If not, add a branch starting one level higher instead */
716 addSiblingNode(depth - 1, newNodeStartTime);
717 return;
718 }
719
720 /*
721 * Close nodes from the leaf up because some parent nodes may need
722 * to get updated when their children are closed
723 */
724 for (int i = fLatestBranch.size() - 1; i >= depth; i--) {
725 fLatestBranch.get(i).closeThisNode(splitTime);
726 fTreeIO.writeNode(fLatestBranch.get(i));
727 }
728
729 /* Split off the new branch from the old one */
730 for (int i = depth; i < fLatestBranch.size(); i++) {
731 N prevNode = fLatestBranch.get(i - 1);
732 N newNode;
733
734 switch (fLatestBranch.get(i).getNodeType()) {
735 case CORE:
736 newNode = initNewCoreNode(prevNode.getSequenceNumber(), newNodeStartTime);
737 break;
738 case LEAF:
739 newNode = initNewLeafNode(prevNode.getSequenceNumber(), newNodeStartTime);
740 break;
741 default:
742 throw new IllegalStateException();
743 }
744
745 prevNode.linkNewChild(newNode);
746 fLatestBranch.set(i, newNode);
747 }
748 }
749 }
750
751 /**
752 * Similar to the previous method, except here we rebuild a completely new
753 * latestBranch
754 */
755 private void addNewRootNode(long newNodeStartTime) {
756 final long nodeEnd = fTreeEnd;
757
758 N oldRootNode = fLatestBranch.get(0);
759 N newRootNode = initNewCoreNode(-1, fTreeStart);
760
761 /* Tell the old root node that it isn't root anymore */
762 oldRootNode.setParentSequenceNumber(newRootNode.getSequenceNumber());
763
764 /* Close off the whole current latestBranch */
765 for (int i = fLatestBranch.size() - 1; i >= 0; i--) {
766 fLatestBranch.get(i).closeThisNode(nodeEnd);
767 fTreeIO.writeNode(fLatestBranch.get(i));
768 }
769
770 /* Link the new root to its first child (the previous root node) */
771 newRootNode.linkNewChild(oldRootNode);
772
773 /* Rebuild a new latestBranch */
774 int depth = fLatestBranch.size();
775 fLatestBranch.clear();
776 fLatestBranch.add(newRootNode);
777
778 // Create new coreNode
779 for (int i = 1; i < depth; i++) {
780 N prevNode = fLatestBranch.get(i - 1);
781 N newNode = initNewCoreNode(prevNode.getSequenceNumber(), newNodeStartTime);
782 prevNode.linkNewChild(newNode);
783 fLatestBranch.add(newNode);
784 }
785
786 // Create the new leafNode
787 N prevNode = fLatestBranch.get(depth - 1);
788 N newNode = initNewLeafNode(prevNode.getSequenceNumber(), newNodeStartTime);
789 prevNode.linkNewChild(newNode);
790 fLatestBranch.add(newNode);
791 }
792
793 /**
794 * Update the tree's end time with this interval data
795 *
796 * @param interval
797 * The interval that was just added to the tree
798 */
799 protected void updateEndTime(E interval) {
800 fTreeEnd = Math.max(fTreeEnd, interval.getEnd());
801 }
802
803 @Override
804 public void closeTree(long requestedEndTime) {
805 /* This is an important operation, queries can wait */
806 synchronized (fLatestBranch) {
807 /*
808 * Work-around the "empty branches" that get created when the root
809 * node becomes full. Overwrite the tree's end time with the
810 * original wanted end-time, to ensure no queries are sent into
811 * those empty nodes.
812 */
813 fTreeEnd = requestedEndTime;
814
815 /* Close off the latest branch of the tree */
816 for (int i = fLatestBranch.size() - 1; i >= 0; i--) {
817 fLatestBranch.get(i).closeThisNode(fTreeEnd);
818 fTreeIO.writeNode(fLatestBranch.get(i));
819 }
820
821 try (FileOutputStream fc = fTreeIO.getFileWriter(-1);) {
822 ByteBuffer buffer = ByteBuffer.allocate(TREE_HEADER_SIZE);
823 buffer.order(ByteOrder.LITTLE_ENDIAN);
824 buffer.clear();
825
826 buffer.putInt(getMagicNumber());
827
828 buffer.putInt(getFileVersion());
829 buffer.putInt(fProviderVersion);
830
831 buffer.putInt(fBlockSize);
832 buffer.putInt(fMaxChildren);
833
834 buffer.putInt(fNodeCount);
835
836 /* root node seq. nb */
837 buffer.putInt(fLatestBranch.get(0).getSequenceNumber());
838
839 /* start time of this history */
840 buffer.putLong(fLatestBranch.get(0).getNodeStart());
841
842 buffer.flip();
843 fc.write(buffer.array());
844 /* done writing the file header */
845
846 } catch (IOException e) {
847 /*
848 * If we were able to write so far, there should not be any
849 * problem at this point...
850 */
851 throw new RuntimeException("State system write error"); //$NON-NLS-1$
852 }
853 }
854 }
855
856 @Override
857 public Iterable<E> getMatchingIntervals(RangeCondition<Long> timeCondition,
858 Predicate<E> extraPredicate) {
859
860 // TODO Change this to evaluate the nodes lazily
861
862 List<Iterable<E>> intervalsOfNodes = new LinkedList<>();
863
864 /* Queue is a stack of nodes containing nodes intersecting t */
865 Deque<Integer> queue = new LinkedList<>();
866 /* We start by reading the information in the root node */
867 queue.add(getRootNode().getSequenceNumber());
868
869 /* Then we follow the down in the relevant children */
870 try {
871 while (!queue.isEmpty()) {
872 int sequenceNumber = queue.pop();
873 HTNode<E> currentNode = readNode(sequenceNumber);
874 RangeCondition<Long> nodeCondition = timeCondition.subCondition(
875 currentNode.getNodeStart(), currentNode.getNodeEnd());
876
877 if (nodeCondition == null) {
878 continue;
879 }
880
881 if (currentNode.getNodeType() == HTNode.NodeType.CORE) {
882 /* Here we add the relevant children nodes for BFS */
883 queue.addAll(currentNode.selectNextChildren(nodeCondition));
884 }
885 Iterable<E> nodeIntervals = currentNode.getMatchingIntervals(nodeCondition, extraPredicate);
886 intervalsOfNodes.add(nodeIntervals);
887 }
888 } catch (ClosedChannelException e) {
889 }
890 return Iterables.concat(intervalsOfNodes);
891 }
892
893 @Override
894 public String toString() {
895 return "Information on the current tree:\n\n" + "Blocksize: " //$NON-NLS-1$ //$NON-NLS-2$
896 + fBlockSize + "\n" + "Max nb. of children per node: " //$NON-NLS-1$//$NON-NLS-2$
897 + fMaxChildren + "\n" + "Number of nodes: " + fNodeCount //$NON-NLS-1$//$NON-NLS-2$
898 + "\n" + "Depth of the tree: " + fLatestBranch.size() + "\n" //$NON-NLS-1$ //$NON-NLS-2$ //$NON-NLS-3$
899 + "Size of the treefile: " + getFileSize() + "\n" //$NON-NLS-1$//$NON-NLS-2$
900 + "Root node has sequence number: " //$NON-NLS-1$
901 + fLatestBranch.get(0).getSequenceNumber() + "\n" //$NON-NLS-1$
902 + "'Latest leaf' has sequence number: " //$NON-NLS-1$
903 + fLatestBranch.get(fLatestBranch.size() - 1).getSequenceNumber();
904 }
905
906 }
This page took 0.057105 seconds and 5 git commands to generate.