datastore: Add API for single queries
[deliverable/tracecompass.git] / statesystem / org.eclipse.tracecompass.datastore.core / src / org / eclipse / tracecompass / internal / provisional / datastore / core / historytree / AbstractHistoryTree.java
CommitLineData
5e7913a4
GB
1/*******************************************************************************
2 * Copyright (c) 2017 École Polytechnique de Montréal
3 *
4 * All rights reserved. This program and the accompanying materials are
5 * made available under the terms of the Eclipse Public License v1.0 which
6 * accompanies this distribution, and is available at
7 * http://www.eclipse.org/legal/epl-v10.html
8 *******************************************************************************/
9
10package org.eclipse.tracecompass.internal.provisional.datastore.core.historytree;
11
12import java.io.File;
13import java.io.FileInputStream;
14import java.io.FileOutputStream;
15import java.io.IOException;
16import java.nio.ByteBuffer;
17import java.nio.ByteOrder;
18import java.nio.channels.ClosedChannelException;
19import java.nio.channels.FileChannel;
20import java.util.ArrayList;
8b03451b 21import java.util.Collection;
5e7913a4
GB
22import java.util.Collections;
23import java.util.Deque;
24import java.util.LinkedList;
25import java.util.List;
26import java.util.concurrent.locks.ReentrantReadWriteLock;
27import java.util.function.Predicate;
28
29import org.eclipse.jdt.annotation.NonNull;
54d250a3 30import org.eclipse.jdt.annotation.Nullable;
5e7913a4
GB
31import org.eclipse.tracecompass.common.core.NonNullUtils;
32import org.eclipse.tracecompass.internal.datastore.core.historytree.HtIo;
33import org.eclipse.tracecompass.internal.provisional.datastore.core.condition.RangeCondition;
34import org.eclipse.tracecompass.internal.provisional.datastore.core.exceptions.RangeException;
35import org.eclipse.tracecompass.internal.provisional.datastore.core.historytree.IHTNode.NodeType;
36import org.eclipse.tracecompass.internal.provisional.datastore.core.interval.IHTInterval;
37import org.eclipse.tracecompass.internal.provisional.datastore.core.interval.IHTIntervalReader;
38
39import com.google.common.annotations.VisibleForTesting;
40import com.google.common.collect.ImmutableList;
41import com.google.common.collect.Iterables;
42
43/**
44 * Base class for history trees that encapsulates the logic to read from/write
45 * to a file.
46 *
47 * @author Alexandre Montplaisir
48 * @author Geneviève Bastien
49 * @param <E>
50 * The type of intervals that will be saved in the tree
51 * @param <N>
52 * The base type of the nodes of this tree
53 */
54public abstract class AbstractHistoryTree<E extends IHTInterval, N extends HTNode<E>>
55 implements IHistoryTree<E> {
56
57 /**
58 * Interface for history to create the various HTNodes
59 *
60 * @param <E>
61 * The type of intervals that will be saved in the node
62 * @param <N>
63 * The base type of the nodes of this tree
64 */
65 @FunctionalInterface
66 public interface IHTNodeFactory<E extends IHTInterval, N extends HTNode<E>> {
67
68 /**
69 * Creates a new node for the specific history tree
70 *
71 * @param type
72 * The type of node to create. See {@link IHTNode.NodeType}.
73 * @param blockSize
74 * The size (in bytes) of each node once serialized to disk
75 * @param maxChildren
76 * The maximum number of amount a single core node can have
77 * @param seqNumber
78 * The (unique) sequence number assigned to this particular
79 * node
80 * @param parentSeqNumber
81 * The sequence number of this node's parent node
82 * @param start
83 * The earliest timestamp stored in this node
84 * @return The new core node
85 */
86 N createNode(NodeType type, int blockSize, int maxChildren,
87 int seqNumber, int parentSeqNumber, long start);
88 }
89
90 // ------------------------------------------------------------------------
91 // Tree-specific configuration
92 // ------------------------------------------------------------------------
93
94 /* Tree configuration constants */
95 private final File fHistoryFile;
96 private final int fBlockSize;
97 private final int fMaxChildren;
98 private final int fProviderVersion;
99 private final long fTreeStart;
100 private final IHTIntervalReader<E> fIntervalReader;
101
102 /** Reader/writer object */
103 private HtIo<E, N> fTreeIO;
104
105 // ------------------------------------------------------------------------
106 // Variable Fields (will change throughout the existence of the SHT)
107 // ------------------------------------------------------------------------
108
109 /** Latest timestamp found in the tree (at any given moment) */
110 private long fTreeEnd;
111
112 /** The total number of nodes that exists in this tree */
113 private int fNodeCount;
114
115 /** "Cache" to keep the active nodes in memory */
116 private final List<N> fLatestBranch;
117
118 /* Lock used to protect the accesses to the HT_IO object */
119 private final ReentrantReadWriteLock fRwl = new ReentrantReadWriteLock(false);
120
121 /**
122 * Create a new State History from scratch, specifying all configuration
123 * parameters.
124 *
125 * @param stateHistoryFile
126 * The name of the history file
127 * @param blockSize
128 * The size of each "block" on disk in bytes. One node will
129 * always fit in one block. It should be at least 4096.
130 * @param maxChildren
131 * The maximum number of children allowed per core (non-leaf)
132 * node.
133 * @param providerVersion
134 * The version of the state provider. If a file already exists,
135 * and their versions match, the history file will not be rebuilt
136 * uselessly.
137 * @param treeStart
138 * The start time of the history
139 * @param intervalReader
140 * The factory to create new tree intervals when reading from
141 * the disk
142 * @throws IOException
143 * If an error happens trying to open/write to the file
144 * specified in the config
145 */
146 public AbstractHistoryTree(File stateHistoryFile,
147 int blockSize,
148 int maxChildren,
149 int providerVersion,
150 long treeStart,
151 IHTIntervalReader<E> intervalReader) throws IOException {
152 /*
153 * Simple check to make sure we have enough place in the 0th block for
154 * the tree configuration
155 */
156 if (blockSize < TREE_HEADER_SIZE) {
157 throw new IllegalArgumentException();
158 }
159
160 fHistoryFile = stateHistoryFile;
161 fBlockSize = blockSize;
162 fMaxChildren = maxChildren;
163 fProviderVersion = providerVersion;
164 fTreeStart = treeStart;
165 fIntervalReader = intervalReader;
166
167 fTreeEnd = treeStart;
168 fNodeCount = 0;
169 fLatestBranch = NonNullUtils.checkNotNull(Collections.synchronizedList(new ArrayList<>()));
170
171 /* Prepare the IO object */
172 fTreeIO = new HtIo<>(stateHistoryFile,
173 blockSize,
174 maxChildren,
175 true,
176 intervalReader,
177 getNodeFactory());
178
179 /* Add the first node to the tree */
180 N firstNode = initNewLeafNode(-1, treeStart);
181 fLatestBranch.add(firstNode);
182 }
183
184 /**
185 * "Reader" constructor : instantiate a SHTree from an existing tree file on
186 * disk
187 *
188 * @param existingStateFile
189 * Path/filename of the history-file we are to open
190 * @param expectedProviderVersion
191 * The expected version of the state provider
192 * @param intervalReader
193 * The factory used to read segments from the history tree
194 * @throws IOException
195 * If an error happens reading the file
196 */
197 public AbstractHistoryTree(File existingStateFile,
198 int expectedProviderVersion,
199 IHTIntervalReader<E> intervalReader) throws IOException {
200 /*
201 * Open the file ourselves, get the tree header information we need,
202 * then pass on the descriptor to the TreeIO object.
203 */
204 int rootNodeSeqNb, res;
205 int bs, maxc;
206 long startTime;
207
208 /* Java I/O mumbo jumbo... */
209 if (!existingStateFile.exists()) {
210 throw new IOException("Selected state file does not exist"); //$NON-NLS-1$
211 }
212 if (existingStateFile.length() <= 0) {
213 throw new IOException("Empty target file"); //$NON-NLS-1$
214 }
215
216 try (FileInputStream fis = new FileInputStream(existingStateFile);
217 FileChannel fc = fis.getChannel();) {
218
219 ByteBuffer buffer = ByteBuffer.allocate(TREE_HEADER_SIZE);
220 buffer.order(ByteOrder.LITTLE_ENDIAN);
221 buffer.clear();
222
223 res = fc.read(buffer);
224 if (res != TREE_HEADER_SIZE) {
225 throw new IOException("Invalid header size"); //$NON-NLS-1$
226 }
227
228 buffer.flip();
229
230 /*
231 * Check the magic number to make sure we're opening the right type
232 * of file
233 */
234 res = buffer.getInt();
235 if (res != getMagicNumber()) {
236 throw new IOException("Wrong magic number"); //$NON-NLS-1$
237 }
238
239 res = buffer.getInt(); /* File format version number */
240 if (res != getFileVersion()) {
241 throw new IOException("Mismatching History Tree file format versions"); //$NON-NLS-1$
242 }
243
244 res = buffer.getInt(); /* Event handler's version number */
245 if (res != expectedProviderVersion) {
246 /*
247 * The existing history was built using an event handler that
248 * doesn't match the current one in the framework.
249 *
250 * Information could be all wrong. Instead of keeping an
251 * incorrect history file, a rebuild is done.
252 */
253 throw new IOException("Mismatching event handler versions"); //$NON-NLS-1$
254 }
255
256 bs = buffer.getInt(); /* Block Size */
257 maxc = buffer.getInt(); /* Max nb of children per node */
258
259 fNodeCount = buffer.getInt();
260 rootNodeSeqNb = buffer.getInt();
261 startTime = buffer.getLong();
262
263 /* Set all other permanent configuration options */
264 fHistoryFile = existingStateFile;
265 fBlockSize = bs;
266 fMaxChildren = maxc;
267 fProviderVersion = expectedProviderVersion;
268 fIntervalReader = intervalReader;
269 fTreeStart = startTime;
270 }
271
272 /*
273 * FIXME We close fis here and the TreeIO will then reopen the same
274 * file, not extremely elegant. But how to pass the information here to
275 * the SHT otherwise?
276 */
277 fTreeIO = new HtIo<>(fHistoryFile,
278 fBlockSize,
279 fMaxChildren,
280 false,
281 fIntervalReader,
282 getNodeFactory());
283
284 fLatestBranch = buildLatestBranch(rootNodeSeqNb);
285 fTreeEnd = getRootNode().getNodeEnd();
286
287 /*
288 * Make sure the history start time we read previously is consistent
289 * with was is actually in the root node.
290 */
291 if (startTime != getRootNode().getNodeStart()) {
292 throw new IOException("Inconsistent start times in the " + //$NON-NLS-1$
293 "history file, it might be corrupted."); //$NON-NLS-1$
294 }
295 }
296
297 /**
298 * Rebuild the latestBranch "cache" object by reading the nodes from disk
299 * (When we are opening an existing file on disk and want to append to it,
300 * for example).
301 *
302 * @param rootNodeSeqNb
303 * The sequence number of the root node, so we know where to
304 * start
305 * @throws ClosedChannelException
306 */
307 private List<N> buildLatestBranch(int rootNodeSeqNb) throws ClosedChannelException {
308 List<N> list = new ArrayList<>();
309
310 N nextChildNode = fTreeIO.readNode(rootNodeSeqNb);
311 list.add(nextChildNode);
312
313 // TODO: Do we need the full latest branch? The latest leaf may not be
314 // the one we'll query first... Won't it build itself later?
315
316 /* Follow the last branch up to the leaf */
317 while (nextChildNode.getNodeType() == HTNode.NodeType.CORE) {
318 nextChildNode = fTreeIO.readNode(nextChildNode.getLatestChild());
319 list.add(nextChildNode);
320 }
321 return Collections.synchronizedList(list);
322 }
323
324 // ------------------------------------------------------------------------
325 // Accessors
326 // ------------------------------------------------------------------------
327
328 @Override
329 public long getTreeStart() {
330 return fTreeStart;
331 }
332
333 @Override
334 public long getTreeEnd() {
335 return fTreeEnd;
336 }
337
338 /**
339 * Get the number of nodes in this tree.
340 *
341 * @return The number of nodes
342 */
343 public int getNodeCount() {
344 return fNodeCount;
345 }
346
347 /**
348 * Get the current root node of this tree
349 *
350 * @return The root node
351 */
352 public N getRootNode() {
353 return fLatestBranch.get(0);
354 }
355
356 @Override
357 public long getFileSize() {
358 return fHistoryFile.length();
359 }
360
361 /**
362 * Return the latest branch of the tree. That branch is immutable.
363 *
364 * @return The immutable latest branch
365 */
366 @VisibleForTesting
8b03451b 367 List<N> getLatestBranch() {
5e7913a4
GB
368 return ImmutableList.copyOf(fLatestBranch);
369 }
370
371 /**
372 * Get the node in the latest branch at a depth. If the depth is too large,
373 * it will throw an IndexOutOfBoundsException
374 *
375 * @param depth
376 * The depth at which to get the node
377 * @return The node at depth
378 */
fb7125d6 379 protected N getLatestNode(int depth) {
5e7913a4
GB
380 if (depth > fLatestBranch.size()) {
381 throw new IndexOutOfBoundsException("Trying to get latest node too deep"); //$NON-NLS-1$
382 }
383 return fLatestBranch.get(depth);
384 }
385
386 /**
387 * Get the magic number for the history file. This number should be specific
388 * for each implementation of the history tree.
389 *
390 * @return The magic number for the history file
391 */
392 protected abstract int getMagicNumber();
393
394 /**
395 * Get the file version for the history file. This file version should be
396 * modified for a history tree class whenever changing the format of the
397 * file. Different versions of the file may not be compatible.
398 *
399 * @return The file version for the history file.
400 */
401 protected abstract int getFileVersion();
402
403 /**
404 * Get the factory to use to create new nodes for this history tree.
405 *
406 * This method is called in the constructor of the abstract class, so
407 * assigning the factory to a final field may cause NullPointerException
408 * since that final field may not be initialized the first time this is
409 * called.
410 *
411 * @return The NodeFactory for the History Tree
412 */
413 protected abstract IHTNodeFactory<E, N> getNodeFactory();
414
415 /**
416 * Read a node with a given sequence number
417 *
418 * @param seqNum
419 * The sequence number of the node to read
420 * @return The HTNode object
421 * @throws ClosedChannelException
422 * Exception thrown when reading the node, if the file was
423 * closed
424 */
425 @VisibleForTesting
8b03451b 426 @NonNull N getNode(int seqNum) throws ClosedChannelException {
5e7913a4
GB
427 // First, check in the latest branch if the node is there
428 for (N node : fLatestBranch) {
429 if (node.getSequenceNumber() == seqNum) {
430 return node;
431 }
432 }
433 return fTreeIO.readNode(seqNum);
434 }
435
436 /**
437 * Retrieve the TreeIO object. Should only be used for testing.
438 *
439 * @return The TreeIO
440 */
441 @VisibleForTesting
8b03451b 442 HtIo<E, N> getTreeIO() {
5e7913a4
GB
443 return fTreeIO;
444 }
445
446 // ------------------------------------------------------------------------
447 // HT_IO interface
448 // ------------------------------------------------------------------------
449
450 // TODO Remove from here
451 @Override
452 public FileInputStream supplyATReader() {
453 fRwl.readLock().lock();
454 try {
455 return fTreeIO.supplyATReader(getNodeCount());
456 } finally {
457 fRwl.readLock().unlock();
458 }
459 }
460
461 // TODO Remove from here
462 @Override
463 public File supplyATWriterFile() {
464 return fHistoryFile;
465 }
466
467 // TODO Remove from here
468 @Override
469 public long supplyATWriterFilePos() {
470 return IHistoryTree.TREE_HEADER_SIZE
471 + ((long) getNodeCount() * fBlockSize);
472 }
473
474 /**
475 * Read a node from the tree.
476 *
477 * @param seqNumber
478 * The sequence number of the node to read
479 * @return The node
480 * @throws ClosedChannelException
481 * If the tree IO is unavailable
482 */
483 public N readNode(int seqNumber) throws ClosedChannelException {
484 /* Try to read the node from memory */
485 synchronized (fLatestBranch) {
486 for (N node : fLatestBranch) {
487 if (node.getSequenceNumber() == seqNumber) {
488 return node;
489 }
490 }
491 }
492
493 fRwl.readLock().lock();
494 try {
495 /* Read the node from disk */
496 return fTreeIO.readNode(seqNumber);
497 } finally {
498 fRwl.readLock().unlock();
499 }
500 }
501
502 /**
503 * Write a node object to the history file.
504 *
505 * @param node
506 * The node to write to disk
507 */
508 public void writeNode(N node) {
509 fRwl.readLock().lock();
510 try {
511 fTreeIO.writeNode(node);
512 } finally {
513 fRwl.readLock().unlock();
514 }
515 }
516
517 /**
518 * Close the history file.
519 */
520 @Override
521 public void closeFile() {
522 fRwl.writeLock().lock();
523 try {
524 fTreeIO.closeFile();
525 clearContent();
526 } finally {
527 fRwl.writeLock().unlock();
528 }
529 }
530
531 /**
532 * Delete the history file.
533 */
534 @Override
535 public void deleteFile() {
536 fRwl.writeLock().lock();
537 try {
538 fTreeIO.deleteFile();
539 clearContent();
540 } finally {
541 fRwl.writeLock().unlock();
542 }
543 }
544
545 @Override
546 public void cleanFile() throws IOException {
547 fRwl.writeLock().lock();
548 try {
549 closeTree(fTreeEnd);
550 fTreeIO.deleteFile();
551
552 fTreeIO = new HtIo<>(fHistoryFile,
553 fBlockSize,
554 fMaxChildren,
555 true,
556 fIntervalReader,
557 getNodeFactory());
558
559 clearContent();
560 /* Add the first node to the tree */
561 N firstNode = initNewLeafNode(-1, fTreeStart);
562 fLatestBranch.add(firstNode);
563 } finally {
564 fRwl.writeLock().unlock();
565 }
566 }
567
568 private void clearContent() {
569 // Re-initialize the content of the tree after the file is deleted or
570 // closed
571 fNodeCount = 0;
572 fLatestBranch.clear();
573 }
574
575 // ------------------------------------------------------------------------
576 // Operations
577 // ------------------------------------------------------------------------
578
579 /**
580 * Insert an interval in the tree.
581 *
582 * @param interval
583 * The interval to be inserted
584 * @throws RangeException
585 * If the start of end time of the interval are invalid
586 */
587 @Override
588 public synchronized void insert(E interval) throws RangeException {
589 if (interval.getStart() < fTreeStart) {
590 throw new RangeException("Interval Start:" + interval.getStart() + ", Config Start:" + fTreeStart); //$NON-NLS-1$ //$NON-NLS-2$
591 }
592 tryInsertAtNode(interval, fLatestBranch.size() - 1);
593 }
594
595 /**
596 * Add a new empty core node to the tree.
597 *
598 * @param parentSeqNumber
599 * Sequence number of this node's parent
600 * @param startTime
601 * Start time of the new node
602 * @return The newly created node
603 */
604 protected final N initNewCoreNode(int parentSeqNumber, long startTime) {
605 N newNode = getNodeFactory().createNode(NodeType.CORE, fBlockSize, fMaxChildren,
606 fNodeCount, parentSeqNumber, startTime);
607 fNodeCount++;
608 return newNode;
609 }
610
611 /**
612 * Add a new empty leaf node to the tree.
613 *
614 * @param parentSeqNumber
615 * Sequence number of this node's parent
616 * @param startTime
617 * Start time of the new node
618 * @return The newly created node
619 */
620 protected final N initNewLeafNode(int parentSeqNumber, long startTime) {
621 N newNode = getNodeFactory().createNode(NodeType.LEAF, fBlockSize, fMaxChildren,
622 fNodeCount, parentSeqNumber, startTime);
623 fNodeCount++;
624 return newNode;
625 }
626
627 /**
628 * Inner method to find in which node we should add the interval.
629 *
630 * @param interval
631 * The interval to add to the tree
632 * @param depth
633 * The index *in the latestBranch* where we are trying the
634 * insertion
635 */
636 protected final void tryInsertAtNode(E interval, int depth) {
637 N targetNode = getLatestBranch().get(depth);
fb7125d6 638 informInsertingAtDepth(depth);
5e7913a4
GB
639
640 /* Verify if there is enough room in this node to store this interval */
641 if (interval.getSizeOnDisk() > targetNode.getNodeFreeSpace()) {
642 /* Nope, not enough room. Insert in a new sibling instead. */
643 addSiblingNode(depth, getNewBranchStart(depth, interval));
644 tryInsertAtNode(interval, getLatestBranch().size() - 1);
645 return;
646 }
647
648 /* Make sure the interval time range fits this node */
649 if (interval.getStart() < targetNode.getNodeStart()) {
650 /*
651 * No, this interval starts before the startTime of this node. We
652 * need to check recursively in parents if it can fit.
653 */
654 tryInsertAtNode(interval, depth - 1);
655 return;
656 }
657
658 /*
659 * Ok, there is room, and the interval fits in this time slot. Let's add
660 * it.
661 */
662 targetNode.add(interval);
663
664 updateEndTime(interval);
665 }
666
fb7125d6
LPD
667 /**
668 * Informs the tree that the insertion is requested at a given depth. When
669 * this is called, the element is not yet inserted, but the last call to
670 * this for an element will represent the depth at which is was really
671 * inserted. By default, this method does nothing and should not be
672 * necessary for concrete implementations, but it can be used by unit tests
673 * to check to position of insertion of elements.
674 *
675 * @param depth
676 * The depth at which the last insertion was done
677 */
678 @VisibleForTesting
679 protected void informInsertingAtDepth(int depth) {
680
681 }
682
5e7913a4
GB
683 /**
684 * Get the start time of the new node of the branch that will be added
685 * starting at depth.
686 *
687 * Note that the depth is the depth of the last node that was filled and to
688 * which a sibling should be added. But depending on the returned start
689 * time, the actual new branch may start at a lower depth if the start time
690 * happens to be lesser than the parent's start time.
691 *
692 * @param depth
693 * The depth of the last node that was filled and at which the
694 * new branch should start.
695 * @param interval
696 * The interval that is about to be inserted
697 * @return The value that should be the start time of the sibling node
698 */
699 protected abstract long getNewBranchStart(int depth, E interval);
700
701 /**
702 * Method to add a sibling to any node in the latest branch. This will add
703 * children back down to the leaf level, if needed.
704 *
705 * @param depth
706 * The depth in latestBranch where we start adding
707 * @param newNodeStartTime
708 * The start time of the new node
709 */
710 private final void addSiblingNode(int depth, long newNodeStartTime) {
711 synchronized (fLatestBranch) {
712 final long splitTime = fTreeEnd;
713
714 if (depth >= fLatestBranch.size()) {
715 /*
716 * We need to make sure (indexOfNode - 1) doesn't get the last
717 * node in the branch, because that one is a Leaf Node.
718 */
719 throw new IllegalStateException();
720 }
721
722 /* Check if we need to add a new root node */
723 if (depth == 0) {
724 addNewRootNode(newNodeStartTime);
725 return;
726 }
727
728 /*
729 * Check if we can indeed add a child to the target parent and if
730 * the new start time is not before the target parent.
731 */
732 if (fLatestBranch.get(depth - 1).getNbChildren() == fMaxChildren ||
733 newNodeStartTime < fLatestBranch.get(depth - 1).getNodeStart()) {
734 /* If not, add a branch starting one level higher instead */
735 addSiblingNode(depth - 1, newNodeStartTime);
736 return;
737 }
738
739 /*
740 * Close nodes from the leaf up because some parent nodes may need
741 * to get updated when their children are closed
742 */
743 for (int i = fLatestBranch.size() - 1; i >= depth; i--) {
744 fLatestBranch.get(i).closeThisNode(splitTime);
745 fTreeIO.writeNode(fLatestBranch.get(i));
746 }
747
748 /* Split off the new branch from the old one */
749 for (int i = depth; i < fLatestBranch.size(); i++) {
750 N prevNode = fLatestBranch.get(i - 1);
751 N newNode;
752
753 switch (fLatestBranch.get(i).getNodeType()) {
754 case CORE:
755 newNode = initNewCoreNode(prevNode.getSequenceNumber(), newNodeStartTime);
756 break;
757 case LEAF:
758 newNode = initNewLeafNode(prevNode.getSequenceNumber(), newNodeStartTime);
759 break;
760 default:
761 throw new IllegalStateException();
762 }
763
764 prevNode.linkNewChild(newNode);
765 fLatestBranch.set(i, newNode);
766 }
767 }
768 }
769
770 /**
771 * Similar to the previous method, except here we rebuild a completely new
772 * latestBranch
773 */
774 private void addNewRootNode(long newNodeStartTime) {
775 final long nodeEnd = fTreeEnd;
776
777 N oldRootNode = fLatestBranch.get(0);
778 N newRootNode = initNewCoreNode(-1, fTreeStart);
779
780 /* Tell the old root node that it isn't root anymore */
781 oldRootNode.setParentSequenceNumber(newRootNode.getSequenceNumber());
782
783 /* Close off the whole current latestBranch */
784 for (int i = fLatestBranch.size() - 1; i >= 0; i--) {
785 fLatestBranch.get(i).closeThisNode(nodeEnd);
786 fTreeIO.writeNode(fLatestBranch.get(i));
787 }
788
789 /* Link the new root to its first child (the previous root node) */
790 newRootNode.linkNewChild(oldRootNode);
791
792 /* Rebuild a new latestBranch */
793 int depth = fLatestBranch.size();
794 fLatestBranch.clear();
795 fLatestBranch.add(newRootNode);
796
797 // Create new coreNode
798 for (int i = 1; i < depth; i++) {
799 N prevNode = fLatestBranch.get(i - 1);
800 N newNode = initNewCoreNode(prevNode.getSequenceNumber(), newNodeStartTime);
801 prevNode.linkNewChild(newNode);
802 fLatestBranch.add(newNode);
803 }
804
805 // Create the new leafNode
806 N prevNode = fLatestBranch.get(depth - 1);
807 N newNode = initNewLeafNode(prevNode.getSequenceNumber(), newNodeStartTime);
808 prevNode.linkNewChild(newNode);
809 fLatestBranch.add(newNode);
810 }
811
812 /**
813 * Update the tree's end time with this interval data
814 *
815 * @param interval
816 * The interval that was just added to the tree
817 */
818 protected void updateEndTime(E interval) {
819 fTreeEnd = Math.max(fTreeEnd, interval.getEnd());
820 }
821
822 @Override
823 public void closeTree(long requestedEndTime) {
824 /* This is an important operation, queries can wait */
825 synchronized (fLatestBranch) {
826 /*
827 * Work-around the "empty branches" that get created when the root
828 * node becomes full. Overwrite the tree's end time with the
829 * original wanted end-time, to ensure no queries are sent into
830 * those empty nodes.
831 */
832 fTreeEnd = requestedEndTime;
833
834 /* Close off the latest branch of the tree */
835 for (int i = fLatestBranch.size() - 1; i >= 0; i--) {
836 fLatestBranch.get(i).closeThisNode(fTreeEnd);
837 fTreeIO.writeNode(fLatestBranch.get(i));
838 }
839
840 try (FileOutputStream fc = fTreeIO.getFileWriter(-1);) {
841 ByteBuffer buffer = ByteBuffer.allocate(TREE_HEADER_SIZE);
842 buffer.order(ByteOrder.LITTLE_ENDIAN);
843 buffer.clear();
844
845 buffer.putInt(getMagicNumber());
846
847 buffer.putInt(getFileVersion());
848 buffer.putInt(fProviderVersion);
849
850 buffer.putInt(fBlockSize);
851 buffer.putInt(fMaxChildren);
852
853 buffer.putInt(fNodeCount);
854
855 /* root node seq. nb */
856 buffer.putInt(fLatestBranch.get(0).getSequenceNumber());
857
858 /* start time of this history */
859 buffer.putLong(fLatestBranch.get(0).getNodeStart());
860
861 buffer.flip();
862 fc.write(buffer.array());
863 /* done writing the file header */
864
865 } catch (IOException e) {
866 /*
867 * If we were able to write so far, there should not be any
868 * problem at this point...
869 */
870 throw new RuntimeException("State system write error"); //$NON-NLS-1$
871 }
872 }
873 }
874
875 @Override
876 public Iterable<E> getMatchingIntervals(RangeCondition<Long> timeCondition,
877 Predicate<E> extraPredicate) {
878
879 // TODO Change this to evaluate the nodes lazily
880
881 List<Iterable<E>> intervalsOfNodes = new LinkedList<>();
882
883 /* Queue is a stack of nodes containing nodes intersecting t */
884 Deque<Integer> queue = new LinkedList<>();
885 /* We start by reading the information in the root node */
886 queue.add(getRootNode().getSequenceNumber());
887
888 /* Then we follow the down in the relevant children */
889 try {
890 while (!queue.isEmpty()) {
891 int sequenceNumber = queue.pop();
892 HTNode<E> currentNode = readNode(sequenceNumber);
893 RangeCondition<Long> nodeCondition = timeCondition.subCondition(
894 currentNode.getNodeStart(), currentNode.getNodeEnd());
895
896 if (nodeCondition == null) {
897 continue;
898 }
899
900 if (currentNode.getNodeType() == HTNode.NodeType.CORE) {
901 /* Here we add the relevant children nodes for BFS */
902 queue.addAll(currentNode.selectNextChildren(nodeCondition));
903 }
904 Iterable<E> nodeIntervals = currentNode.getMatchingIntervals(nodeCondition, extraPredicate);
905 intervalsOfNodes.add(nodeIntervals);
906 }
907 } catch (ClosedChannelException e) {
908 }
909 return Iterables.concat(intervalsOfNodes);
910 }
911
54d250a3
GB
912 @Override
913 public @Nullable E getMatchingInterval(RangeCondition<Long> timeCondition,
914 Predicate<E> extraPredicate) {
915
916 /* Queue a stack of nodes containing nodes intersecting t */
917 Deque<Integer> queue = new LinkedList<>();
918 /* We start by reading the information in the root node */
919 queue.add(getRootNode().getSequenceNumber());
920
921 /* Then we follow the down in the relevant children until we find the interval */
922 try {
923 while (!queue.isEmpty()) {
924 int sequenceNumber = queue.pop();
925 HTNode<E> currentNode = readNode(sequenceNumber);
926
927 @Nullable E interval = currentNode.getMatchingInterval(timeCondition, extraPredicate);
928 if (interval != null) {
929 return interval;
930 }
931
932 if (currentNode.getNodeType() == HTNode.NodeType.CORE) {
933 /* Here we add the relevant children nodes for BFS */
934 queue.addAll(currentNode.selectNextChildren(timeCondition));
935 }
936 }
937 } catch (ClosedChannelException e) {
938 }
939 return null;
940 }
941
5e7913a4
GB
942 @Override
943 public String toString() {
944 return "Information on the current tree:\n\n" + "Blocksize: " //$NON-NLS-1$ //$NON-NLS-2$
945 + fBlockSize + "\n" + "Max nb. of children per node: " //$NON-NLS-1$//$NON-NLS-2$
946 + fMaxChildren + "\n" + "Number of nodes: " + fNodeCount //$NON-NLS-1$//$NON-NLS-2$
947 + "\n" + "Depth of the tree: " + fLatestBranch.size() + "\n" //$NON-NLS-1$ //$NON-NLS-2$ //$NON-NLS-3$
948 + "Size of the treefile: " + getFileSize() + "\n" //$NON-NLS-1$//$NON-NLS-2$
949 + "Root node has sequence number: " //$NON-NLS-1$
950 + fLatestBranch.get(0).getSequenceNumber() + "\n" //$NON-NLS-1$
951 + "'Latest leaf' has sequence number: " //$NON-NLS-1$
952 + fLatestBranch.get(fLatestBranch.size() - 1).getSequenceNumber();
953 }
954
8b03451b
GB
955
956 // ------------------------------------------------------------------------
957 // Test-specific methods
958 // ------------------------------------------------------------------------
959
960 /**
961 * Get the current depth of the tree.
962 *
963 * @return The current depth
964 */
965 @VisibleForTesting
966 protected int getDepth() {
967 return getLatestBranch().size();
968 }
969
970 /**
971 * Get the leaf (bottom-most) node of the latest branch.
972 *
973 * @return The latest leaf
974 */
975 @VisibleForTesting
976 protected N getLatestLeaf() {
977 List<N> latestBranch = getLatestBranch();
978 return latestBranch.get(latestBranch.size() - 1);
979 }
980
981 /**
982 * Verify a node's specific information about a child.
983 *
984 * @param parent
985 * The parent node
986 * @param index
987 * The index of the child in the parent's extra data
988 * @param child
989 * The child node to verify
990 * @return False if a problem was found, true otherwise
991 */
992 @VisibleForTesting
993 protected boolean verifyChildrenSpecific(N parent,
994 int index,
995 N child) {
996 // Nothing to do for the default implementation
997 return true;
998 }
999
1000 /**
1001 * This method should verify in the whole time range of the parent node that
1002 * the child node appears or not as a next children for a given timestamp.
1003 *
1004 * @param parent
1005 * The parent node
1006 * @param child
1007 * The child node
1008 * @return False if a problem was found, true otherwise
1009 */
1010 @VisibleForTesting
1011 protected boolean verifyIntersectingChildren(N parent, N child) {
1012 int childSequence = child.getSequenceNumber();
1013 boolean shouldBeInCollection;
1014 Collection<Integer> nextChildren;
1015 for (long t = parent.getNodeStart(); t < parent.getNodeEnd(); t++) {
1016 shouldBeInCollection = true;
1017 nextChildren = parent.selectNextChildren(RangeCondition.singleton(t));
1018 if (shouldBeInCollection != nextChildren.contains(childSequence)) {
1019 return false;
1020 }
1021 }
1022 return true;
1023 }
5e7913a4 1024}
This page took 0.077716 seconds and 5 git commands to generate.