datastore: Keep a snapshot of the latest branch
[deliverable/tracecompass.git] / statesystem / org.eclipse.tracecompass.datastore.core / src / org / eclipse / tracecompass / internal / provisional / datastore / core / historytree / AbstractHistoryTree.java
1 /*******************************************************************************
2 * Copyright (c) 2017 École Polytechnique de Montréal
3 *
4 * All rights reserved. This program and the accompanying materials are
5 * made available under the terms of the Eclipse Public License v1.0 which
6 * accompanies this distribution, and is available at
7 * http://www.eclipse.org/legal/epl-v10.html
8 *******************************************************************************/
9
10 package org.eclipse.tracecompass.internal.provisional.datastore.core.historytree;
11
12 import java.io.File;
13 import java.io.FileInputStream;
14 import java.io.FileOutputStream;
15 import java.io.IOException;
16 import java.nio.ByteBuffer;
17 import java.nio.ByteOrder;
18 import java.nio.channels.ClosedChannelException;
19 import java.nio.channels.FileChannel;
20 import java.util.ArrayList;
21 import java.util.Collection;
22 import java.util.Collections;
23 import java.util.Deque;
24 import java.util.LinkedList;
25 import java.util.List;
26 import java.util.concurrent.locks.ReentrantReadWriteLock;
27 import java.util.function.Predicate;
28
29 import org.eclipse.jdt.annotation.NonNull;
30 import org.eclipse.jdt.annotation.Nullable;
31 import org.eclipse.tracecompass.common.core.NonNullUtils;
32 import org.eclipse.tracecompass.internal.datastore.core.historytree.HtIo;
33 import org.eclipse.tracecompass.internal.provisional.datastore.core.condition.TimeRangeCondition;
34 import org.eclipse.tracecompass.internal.provisional.datastore.core.exceptions.RangeException;
35 import org.eclipse.tracecompass.internal.provisional.datastore.core.historytree.IHTNode.NodeType;
36 import org.eclipse.tracecompass.internal.provisional.datastore.core.interval.IHTInterval;
37 import org.eclipse.tracecompass.internal.provisional.datastore.core.interval.IHTIntervalReader;
38
39 import com.google.common.annotations.VisibleForTesting;
40 import com.google.common.collect.ImmutableList;
41 import com.google.common.collect.Iterables;
42
43 /**
44 * Base class for history trees that encapsulates the logic to read from/write
45 * to a file.
46 *
47 * @author Alexandre Montplaisir
48 * @author Geneviève Bastien
49 * @param <E>
50 * The type of intervals that will be saved in the tree
51 * @param <N>
52 * The base type of the nodes of this tree
53 */
54 public abstract class AbstractHistoryTree<E extends IHTInterval, N extends HTNode<E>>
55 implements IHistoryTree<E> {
56
57 /**
58 * Interface for history to create the various HTNodes
59 *
60 * @param <E>
61 * The type of intervals that will be saved in the node
62 * @param <N>
63 * The base type of the nodes of this tree
64 */
65 @FunctionalInterface
66 public interface IHTNodeFactory<E extends IHTInterval, N extends HTNode<E>> {
67
68 /**
69 * Creates a new node for the specific history tree
70 *
71 * @param type
72 * The type of node to create. See {@link IHTNode.NodeType}.
73 * @param blockSize
74 * The size (in bytes) of each node once serialized to disk
75 * @param maxChildren
76 * The maximum number of amount a single core node can have
77 * @param seqNumber
78 * The (unique) sequence number assigned to this particular
79 * node
80 * @param parentSeqNumber
81 * The sequence number of this node's parent node
82 * @param start
83 * The earliest timestamp stored in this node
84 * @return The new core node
85 */
86 N createNode(NodeType type, int blockSize, int maxChildren,
87 int seqNumber, int parentSeqNumber, long start);
88 }
89
90 // ------------------------------------------------------------------------
91 // Tree-specific configuration
92 // ------------------------------------------------------------------------
93
94 /* Tree configuration constants */
95 private final File fHistoryFile;
96 private final int fBlockSize;
97 private final int fMaxChildren;
98 private final int fProviderVersion;
99 private final long fTreeStart;
100 private final IHTIntervalReader<E> fIntervalReader;
101
102 /** Reader/writer object */
103 private HtIo<E, N> fTreeIO;
104
105 // ------------------------------------------------------------------------
106 // Variable Fields (will change throughout the existence of the SHT)
107 // ------------------------------------------------------------------------
108
109 /** Latest timestamp found in the tree (at any given moment) */
110 private long fTreeEnd;
111
112 /** The total number of nodes that exists in this tree */
113 private int fNodeCount;
114
115 /** "Cache" to keep the active nodes in memory */
116 private final List<N> fLatestBranch;
117
118 /* Lock used to protect the accesses to the HT_IO object */
119 private final ReentrantReadWriteLock fRwl = new ReentrantReadWriteLock(false);
120
121 private transient @Nullable List<N> fLatestBranchSnapshot = null;
122
123 /**
124 * Create a new State History from scratch, specifying all configuration
125 * parameters.
126 *
127 * @param stateHistoryFile
128 * The name of the history file
129 * @param blockSize
130 * The size of each "block" on disk in bytes. One node will
131 * always fit in one block. It should be at least 4096.
132 * @param maxChildren
133 * The maximum number of children allowed per core (non-leaf)
134 * node.
135 * @param providerVersion
136 * The version of the state provider. If a file already exists,
137 * and their versions match, the history file will not be rebuilt
138 * uselessly.
139 * @param treeStart
140 * The start time of the history
141 * @param intervalReader
142 * The factory to create new tree intervals when reading from
143 * the disk
144 * @throws IOException
145 * If an error happens trying to open/write to the file
146 * specified in the config
147 */
148 public AbstractHistoryTree(File stateHistoryFile,
149 int blockSize,
150 int maxChildren,
151 int providerVersion,
152 long treeStart,
153 IHTIntervalReader<E> intervalReader) throws IOException {
154 /*
155 * Simple check to make sure we have enough place in the 0th block for
156 * the tree configuration
157 */
158 if (blockSize < TREE_HEADER_SIZE) {
159 throw new IllegalArgumentException();
160 }
161
162 fHistoryFile = stateHistoryFile;
163 fBlockSize = blockSize;
164 fMaxChildren = maxChildren;
165 fProviderVersion = providerVersion;
166 fTreeStart = treeStart;
167 fIntervalReader = intervalReader;
168
169 fTreeEnd = treeStart;
170 fNodeCount = 0;
171 fLatestBranch = NonNullUtils.checkNotNull(Collections.synchronizedList(new ArrayList<>()));
172
173 /* Prepare the IO object */
174 fTreeIO = new HtIo<>(stateHistoryFile,
175 blockSize,
176 maxChildren,
177 true,
178 intervalReader,
179 getNodeFactory());
180
181 /* Add the first node to the tree */
182 N firstNode = initNewLeafNode(-1, treeStart);
183 fLatestBranch.add(firstNode);
184 }
185
186 /**
187 * "Reader" constructor : instantiate a SHTree from an existing tree file on
188 * disk
189 *
190 * @param existingStateFile
191 * Path/filename of the history-file we are to open
192 * @param expectedProviderVersion
193 * The expected version of the state provider
194 * @param intervalReader
195 * The factory used to read segments from the history tree
196 * @throws IOException
197 * If an error happens reading the file
198 */
199 public AbstractHistoryTree(File existingStateFile,
200 int expectedProviderVersion,
201 IHTIntervalReader<E> intervalReader) throws IOException {
202 /*
203 * Open the file ourselves, get the tree header information we need,
204 * then pass on the descriptor to the TreeIO object.
205 */
206 int rootNodeSeqNb, res;
207 int bs, maxc;
208 long startTime;
209
210 /* Java I/O mumbo jumbo... */
211 if (!existingStateFile.exists()) {
212 throw new IOException("Selected state file does not exist"); //$NON-NLS-1$
213 }
214 if (existingStateFile.length() <= 0) {
215 throw new IOException("Empty target file"); //$NON-NLS-1$
216 }
217
218 try (FileInputStream fis = new FileInputStream(existingStateFile);
219 FileChannel fc = fis.getChannel();) {
220
221 ByteBuffer buffer = ByteBuffer.allocate(TREE_HEADER_SIZE);
222 buffer.order(ByteOrder.LITTLE_ENDIAN);
223 buffer.clear();
224
225 res = fc.read(buffer);
226 if (res != TREE_HEADER_SIZE) {
227 throw new IOException("Invalid header size"); //$NON-NLS-1$
228 }
229
230 buffer.flip();
231
232 /*
233 * Check the magic number to make sure we're opening the right type
234 * of file
235 */
236 res = buffer.getInt();
237 if (res != getMagicNumber()) {
238 throw new IOException("Wrong magic number"); //$NON-NLS-1$
239 }
240
241 res = buffer.getInt(); /* File format version number */
242 if (res != getFileVersion()) {
243 throw new IOException("Mismatching History Tree file format versions"); //$NON-NLS-1$
244 }
245
246 res = buffer.getInt(); /* Event handler's version number */
247 if (res != expectedProviderVersion) {
248 /*
249 * The existing history was built using an event handler that
250 * doesn't match the current one in the framework.
251 *
252 * Information could be all wrong. Instead of keeping an
253 * incorrect history file, a rebuild is done.
254 */
255 throw new IOException("Mismatching event handler versions"); //$NON-NLS-1$
256 }
257
258 bs = buffer.getInt(); /* Block Size */
259 maxc = buffer.getInt(); /* Max nb of children per node */
260
261 fNodeCount = buffer.getInt();
262 rootNodeSeqNb = buffer.getInt();
263 startTime = buffer.getLong();
264
265 /* Set all other permanent configuration options */
266 fHistoryFile = existingStateFile;
267 fBlockSize = bs;
268 fMaxChildren = maxc;
269 fProviderVersion = expectedProviderVersion;
270 fIntervalReader = intervalReader;
271 fTreeStart = startTime;
272 }
273
274 /*
275 * FIXME We close fis here and the TreeIO will then reopen the same
276 * file, not extremely elegant. But how to pass the information here to
277 * the SHT otherwise?
278 */
279 fTreeIO = new HtIo<>(fHistoryFile,
280 fBlockSize,
281 fMaxChildren,
282 false,
283 fIntervalReader,
284 getNodeFactory());
285
286 fLatestBranch = buildLatestBranch(rootNodeSeqNb);
287 fTreeEnd = getRootNode().getNodeEnd();
288
289 /*
290 * Make sure the history start time we read previously is consistent
291 * with was is actually in the root node.
292 */
293 if (startTime != getRootNode().getNodeStart()) {
294 throw new IOException("Inconsistent start times in the " + //$NON-NLS-1$
295 "history file, it might be corrupted."); //$NON-NLS-1$
296 }
297 }
298
299 /**
300 * Rebuild the latestBranch "cache" object by reading the nodes from disk
301 * (When we are opening an existing file on disk and want to append to it,
302 * for example).
303 *
304 * @param rootNodeSeqNb
305 * The sequence number of the root node, so we know where to
306 * start
307 * @throws ClosedChannelException
308 */
309 private List<N> buildLatestBranch(int rootNodeSeqNb) throws ClosedChannelException {
310 List<N> list = new ArrayList<>();
311
312 N nextChildNode = fTreeIO.readNode(rootNodeSeqNb);
313 list.add(nextChildNode);
314
315 // TODO: Do we need the full latest branch? The latest leaf may not be
316 // the one we'll query first... Won't it build itself later?
317
318 /* Follow the last branch up to the leaf */
319 while (nextChildNode.getNodeType() == HTNode.NodeType.CORE) {
320 nextChildNode = fTreeIO.readNode(nextChildNode.getLatestChild());
321 list.add(nextChildNode);
322 }
323 return Collections.synchronizedList(list);
324 }
325
326 // ------------------------------------------------------------------------
327 // Accessors
328 // ------------------------------------------------------------------------
329
330 @Override
331 public long getTreeStart() {
332 return fTreeStart;
333 }
334
335 @Override
336 public long getTreeEnd() {
337 return fTreeEnd;
338 }
339
340 /**
341 * Get the number of nodes in this tree.
342 *
343 * @return The number of nodes
344 */
345 public int getNodeCount() {
346 return fNodeCount;
347 }
348
349 /**
350 * Get the current root node of this tree
351 *
352 * @return The root node
353 */
354 public N getRootNode() {
355 return fLatestBranch.get(0);
356 }
357
358 @Override
359 public long getFileSize() {
360 return fHistoryFile.length();
361 }
362
363 /**
364 * Return the latest branch of the tree. That branch is immutable.
365 *
366 * @return The immutable latest branch
367 */
368 @VisibleForTesting
369 final List<N> getLatestBranch() {
370 List<N> latestBranchSnapshot = fLatestBranchSnapshot;
371 if (latestBranchSnapshot == null) {
372 synchronized (fLatestBranch) {
373 latestBranchSnapshot = ImmutableList.copyOf(fLatestBranch);
374 fLatestBranchSnapshot = latestBranchSnapshot;
375 }
376 }
377 return latestBranchSnapshot;
378 }
379
380 /**
381 * Get the node in the latest branch at a depth. If the depth is too large,
382 * it will throw an IndexOutOfBoundsException
383 *
384 * @param depth
385 * The depth at which to get the node
386 * @return The node at depth
387 */
388 protected N getLatestNode(int depth) {
389 if (depth > fLatestBranch.size()) {
390 throw new IndexOutOfBoundsException("Trying to get latest node too deep"); //$NON-NLS-1$
391 }
392 return fLatestBranch.get(depth);
393 }
394
395 /**
396 * Get the magic number for the history file. This number should be specific
397 * for each implementation of the history tree.
398 *
399 * @return The magic number for the history file
400 */
401 protected abstract int getMagicNumber();
402
403 /**
404 * Get the file version for the history file. This file version should be
405 * modified for a history tree class whenever changing the format of the
406 * file. Different versions of the file may not be compatible.
407 *
408 * @return The file version for the history file.
409 */
410 protected abstract int getFileVersion();
411
412 /**
413 * Get the factory to use to create new nodes for this history tree.
414 *
415 * This method is called in the constructor of the abstract class, so
416 * assigning the factory to a final field may cause NullPointerException
417 * since that final field may not be initialized the first time this is
418 * called.
419 *
420 * @return The NodeFactory for the History Tree
421 */
422 protected abstract IHTNodeFactory<E, N> getNodeFactory();
423
424 /**
425 * Read a node with a given sequence number
426 *
427 * @param seqNum
428 * The sequence number of the node to read
429 * @return The HTNode object
430 * @throws ClosedChannelException
431 * Exception thrown when reading the node, if the file was
432 * closed
433 */
434 @VisibleForTesting
435 @NonNull N getNode(int seqNum) throws ClosedChannelException {
436 // First, check in the latest branch if the node is there
437 for (N node : fLatestBranch) {
438 if (node.getSequenceNumber() == seqNum) {
439 return node;
440 }
441 }
442 return fTreeIO.readNode(seqNum);
443 }
444
445 /**
446 * Retrieve the TreeIO object. Should only be used for testing.
447 *
448 * @return The TreeIO
449 */
450 @VisibleForTesting
451 HtIo<E, N> getTreeIO() {
452 return fTreeIO;
453 }
454
455 // ------------------------------------------------------------------------
456 // HT_IO interface
457 // ------------------------------------------------------------------------
458
459 // TODO Remove from here
460 @Override
461 public FileInputStream supplyATReader() {
462 fRwl.readLock().lock();
463 try {
464 return fTreeIO.supplyATReader(getNodeCount());
465 } finally {
466 fRwl.readLock().unlock();
467 }
468 }
469
470 // TODO Remove from here
471 @Override
472 public File supplyATWriterFile() {
473 return fHistoryFile;
474 }
475
476 // TODO Remove from here
477 @Override
478 public long supplyATWriterFilePos() {
479 return IHistoryTree.TREE_HEADER_SIZE
480 + ((long) getNodeCount() * fBlockSize);
481 }
482
483 /**
484 * Read a node from the tree.
485 *
486 * @param seqNumber
487 * The sequence number of the node to read
488 * @return The node
489 * @throws ClosedChannelException
490 * If the tree IO is unavailable
491 */
492 public N readNode(int seqNumber) throws ClosedChannelException {
493 /* Try to read the node from memory */
494 synchronized (fLatestBranch) {
495 for (N node : fLatestBranch) {
496 if (node.getSequenceNumber() == seqNumber) {
497 return node;
498 }
499 }
500 }
501
502 fRwl.readLock().lock();
503 try {
504 /* Read the node from disk */
505 return fTreeIO.readNode(seqNumber);
506 } finally {
507 fRwl.readLock().unlock();
508 }
509 }
510
511 /**
512 * Write a node object to the history file.
513 *
514 * @param node
515 * The node to write to disk
516 */
517 public void writeNode(N node) {
518 fRwl.readLock().lock();
519 try {
520 fTreeIO.writeNode(node);
521 } finally {
522 fRwl.readLock().unlock();
523 }
524 }
525
526 /**
527 * Close the history file.
528 */
529 @Override
530 public void closeFile() {
531 fRwl.writeLock().lock();
532 try {
533 fTreeIO.closeFile();
534 clearContent();
535 } finally {
536 fRwl.writeLock().unlock();
537 }
538 }
539
540 /**
541 * Delete the history file.
542 */
543 @Override
544 public void deleteFile() {
545 fRwl.writeLock().lock();
546 try {
547 fTreeIO.deleteFile();
548 clearContent();
549 } finally {
550 fRwl.writeLock().unlock();
551 }
552 }
553
554 @Override
555 public void cleanFile() throws IOException {
556 fRwl.writeLock().lock();
557 try {
558 closeTree(fTreeEnd);
559 fTreeIO.deleteFile();
560
561 fTreeIO = new HtIo<>(fHistoryFile,
562 fBlockSize,
563 fMaxChildren,
564 true,
565 fIntervalReader,
566 getNodeFactory());
567
568 clearContent();
569 /* Add the first node to the tree */
570 N firstNode = initNewLeafNode(-1, fTreeStart);
571 fLatestBranch.add(firstNode);
572 } finally {
573 fRwl.writeLock().unlock();
574 }
575 }
576
577 private void clearContent() {
578 // Re-initialize the content of the tree after the file is deleted or
579 // closed
580 fNodeCount = 0;
581 fLatestBranch.clear();
582 }
583
584 // ------------------------------------------------------------------------
585 // Operations
586 // ------------------------------------------------------------------------
587
588 /**
589 * Insert an interval in the tree.
590 *
591 * @param interval
592 * The interval to be inserted
593 * @throws RangeException
594 * If the start of end time of the interval are invalid
595 */
596 @Override
597 public synchronized void insert(E interval) throws RangeException {
598 if (interval.getStart() < fTreeStart) {
599 throw new RangeException("Interval Start:" + interval.getStart() + ", Config Start:" + fTreeStart); //$NON-NLS-1$ //$NON-NLS-2$
600 }
601 tryInsertAtNode(interval, fLatestBranch.size() - 1);
602 }
603
604 /**
605 * Add a new empty core node to the tree.
606 *
607 * @param parentSeqNumber
608 * Sequence number of this node's parent
609 * @param startTime
610 * Start time of the new node
611 * @return The newly created node
612 */
613 protected final N initNewCoreNode(int parentSeqNumber, long startTime) {
614 N newNode = getNodeFactory().createNode(NodeType.CORE, fBlockSize, fMaxChildren,
615 fNodeCount, parentSeqNumber, startTime);
616 fNodeCount++;
617 return newNode;
618 }
619
620 /**
621 * Add a new empty leaf node to the tree.
622 *
623 * @param parentSeqNumber
624 * Sequence number of this node's parent
625 * @param startTime
626 * Start time of the new node
627 * @return The newly created node
628 */
629 protected final N initNewLeafNode(int parentSeqNumber, long startTime) {
630 N newNode = getNodeFactory().createNode(NodeType.LEAF, fBlockSize, fMaxChildren,
631 fNodeCount, parentSeqNumber, startTime);
632 fNodeCount++;
633 return newNode;
634 }
635
636 /**
637 * Inner method to find in which node we should add the interval.
638 *
639 * @param interval
640 * The interval to add to the tree
641 * @param depth
642 * The index *in the latestBranch* where we are trying the
643 * insertion
644 */
645 protected final void tryInsertAtNode(E interval, int depth) {
646 N targetNode = fLatestBranch.get(depth);
647 informInsertingAtDepth(depth);
648
649 /* Verify if there is enough room in this node to store this interval */
650 if (interval.getSizeOnDisk() > targetNode.getNodeFreeSpace()) {
651 /* Nope, not enough room. Insert in a new sibling instead. */
652 addSiblingNode(depth, getNewBranchStart(depth, interval));
653 tryInsertAtNode(interval, getLatestBranch().size() - 1);
654 return;
655 }
656
657 /* Make sure the interval time range fits this node */
658 if (interval.getStart() < targetNode.getNodeStart()) {
659 /*
660 * No, this interval starts before the startTime of this node. We
661 * need to check recursively in parents if it can fit.
662 */
663 tryInsertAtNode(interval, depth - 1);
664 return;
665 }
666
667 /*
668 * Ok, there is room, and the interval fits in this time slot. Let's add
669 * it.
670 */
671 targetNode.add(interval);
672
673 updateEndTime(interval);
674 }
675
676 /**
677 * Informs the tree that the insertion is requested at a given depth. When
678 * this is called, the element is not yet inserted, but the last call to
679 * this for an element will represent the depth at which is was really
680 * inserted. By default, this method does nothing and should not be
681 * necessary for concrete implementations, but it can be used by unit tests
682 * to check to position of insertion of elements.
683 *
684 * @param depth
685 * The depth at which the last insertion was done
686 */
687 @VisibleForTesting
688 protected void informInsertingAtDepth(int depth) {
689
690 }
691
692 /**
693 * Get the start time of the new node of the branch that will be added
694 * starting at depth.
695 *
696 * Note that the depth is the depth of the last node that was filled and to
697 * which a sibling should be added. But depending on the returned start
698 * time, the actual new branch may start at a lower depth if the start time
699 * happens to be lesser than the parent's start time.
700 *
701 * @param depth
702 * The depth of the last node that was filled and at which the
703 * new branch should start.
704 * @param interval
705 * The interval that is about to be inserted
706 * @return The value that should be the start time of the sibling node
707 */
708 protected abstract long getNewBranchStart(int depth, E interval);
709
710 /**
711 * Method to add a sibling to any node in the latest branch. This will add
712 * children back down to the leaf level, if needed.
713 *
714 * @param depth
715 * The depth in latestBranch where we start adding
716 * @param newNodeStartTime
717 * The start time of the new node
718 */
719 private final void addSiblingNode(int depth, long newNodeStartTime) {
720 synchronized (fLatestBranch) {
721 final long splitTime = fTreeEnd;
722 fLatestBranchSnapshot = null;
723
724 if (depth >= fLatestBranch.size()) {
725 /*
726 * We need to make sure (indexOfNode - 1) doesn't get the last
727 * node in the branch, because that one is a Leaf Node.
728 */
729 throw new IllegalStateException();
730 }
731
732 /* Check if we need to add a new root node */
733 if (depth == 0) {
734 addNewRootNode(newNodeStartTime);
735 return;
736 }
737
738 /*
739 * Check if we can indeed add a child to the target parent and if
740 * the new start time is not before the target parent.
741 */
742 if (fLatestBranch.get(depth - 1).getNbChildren() == fMaxChildren ||
743 newNodeStartTime < fLatestBranch.get(depth - 1).getNodeStart()) {
744 /* If not, add a branch starting one level higher instead */
745 addSiblingNode(depth - 1, newNodeStartTime);
746 return;
747 }
748
749 /*
750 * Close nodes from the leaf up because some parent nodes may need
751 * to get updated when their children are closed
752 */
753 for (int i = fLatestBranch.size() - 1; i >= depth; i--) {
754 fLatestBranch.get(i).closeThisNode(splitTime);
755 fTreeIO.writeNode(fLatestBranch.get(i));
756 }
757
758 /* Split off the new branch from the old one */
759 for (int i = depth; i < fLatestBranch.size(); i++) {
760 N prevNode = fLatestBranch.get(i - 1);
761 N newNode;
762
763 switch (fLatestBranch.get(i).getNodeType()) {
764 case CORE:
765 newNode = initNewCoreNode(prevNode.getSequenceNumber(), newNodeStartTime);
766 break;
767 case LEAF:
768 newNode = initNewLeafNode(prevNode.getSequenceNumber(), newNodeStartTime);
769 break;
770 default:
771 throw new IllegalStateException();
772 }
773
774 prevNode.linkNewChild(newNode);
775 fLatestBranch.set(i, newNode);
776 }
777 }
778 }
779
780 /**
781 * Similar to the previous method, except here we rebuild a completely new
782 * latestBranch
783 */
784 private void addNewRootNode(long newNodeStartTime) {
785 final long nodeEnd = fTreeEnd;
786
787 N oldRootNode = fLatestBranch.get(0);
788 N newRootNode = initNewCoreNode(-1, fTreeStart);
789
790 /* Tell the old root node that it isn't root anymore */
791 oldRootNode.setParentSequenceNumber(newRootNode.getSequenceNumber());
792
793 /* Close off the whole current latestBranch */
794 for (int i = fLatestBranch.size() - 1; i >= 0; i--) {
795 fLatestBranch.get(i).closeThisNode(nodeEnd);
796 fTreeIO.writeNode(fLatestBranch.get(i));
797 }
798
799 /* Link the new root to its first child (the previous root node) */
800 newRootNode.linkNewChild(oldRootNode);
801
802 /* Rebuild a new latestBranch */
803 int depth = fLatestBranch.size();
804 fLatestBranch.clear();
805 fLatestBranch.add(newRootNode);
806
807 // Create new coreNode
808 for (int i = 1; i < depth; i++) {
809 N prevNode = fLatestBranch.get(i - 1);
810 N newNode = initNewCoreNode(prevNode.getSequenceNumber(), newNodeStartTime);
811 prevNode.linkNewChild(newNode);
812 fLatestBranch.add(newNode);
813 }
814
815 // Create the new leafNode
816 N prevNode = fLatestBranch.get(depth - 1);
817 N newNode = initNewLeafNode(prevNode.getSequenceNumber(), newNodeStartTime);
818 prevNode.linkNewChild(newNode);
819 fLatestBranch.add(newNode);
820 }
821
822 /**
823 * Update the tree's end time with this interval data
824 *
825 * @param interval
826 * The interval that was just added to the tree
827 */
828 protected void updateEndTime(E interval) {
829 fTreeEnd = Math.max(fTreeEnd, interval.getEnd());
830 }
831
832 @Override
833 public void closeTree(long requestedEndTime) {
834 /* This is an important operation, queries can wait */
835 synchronized (fLatestBranch) {
836 /*
837 * Work-around the "empty branches" that get created when the root
838 * node becomes full. Overwrite the tree's end time with the
839 * original wanted end-time, to ensure no queries are sent into
840 * those empty nodes.
841 */
842 fTreeEnd = requestedEndTime;
843
844 /* Close off the latest branch of the tree */
845 for (int i = fLatestBranch.size() - 1; i >= 0; i--) {
846 fLatestBranch.get(i).closeThisNode(fTreeEnd);
847 fTreeIO.writeNode(fLatestBranch.get(i));
848 }
849
850 try (FileOutputStream fc = fTreeIO.getFileWriter(-1);) {
851 ByteBuffer buffer = ByteBuffer.allocate(TREE_HEADER_SIZE);
852 buffer.order(ByteOrder.LITTLE_ENDIAN);
853 buffer.clear();
854
855 buffer.putInt(getMagicNumber());
856
857 buffer.putInt(getFileVersion());
858 buffer.putInt(fProviderVersion);
859
860 buffer.putInt(fBlockSize);
861 buffer.putInt(fMaxChildren);
862
863 buffer.putInt(fNodeCount);
864
865 /* root node seq. nb */
866 buffer.putInt(fLatestBranch.get(0).getSequenceNumber());
867
868 /* start time of this history */
869 buffer.putLong(fLatestBranch.get(0).getNodeStart());
870
871 buffer.flip();
872 fc.write(buffer.array());
873 /* done writing the file header */
874
875 } catch (IOException e) {
876 /*
877 * If we were able to write so far, there should not be any
878 * problem at this point...
879 */
880 throw new RuntimeException("State system write error"); //$NON-NLS-1$
881 }
882 }
883 }
884
885 @Override
886 public Iterable<E> getMatchingIntervals(TimeRangeCondition timeCondition,
887 Predicate<E> extraPredicate) {
888
889 // TODO Change this to evaluate the nodes lazily
890
891 List<Iterable<E>> intervalsOfNodes = new LinkedList<>();
892
893 /* Queue is a stack of nodes containing nodes intersecting t */
894 Deque<Integer> queue = new LinkedList<>();
895 /* We start by reading the information in the root node */
896 queue.add(getRootNode().getSequenceNumber());
897
898 /* Then we follow the down in the relevant children */
899 try {
900 while (!queue.isEmpty()) {
901 int sequenceNumber = queue.pop();
902 HTNode<E> currentNode = readNode(sequenceNumber);
903 TimeRangeCondition nodeCondition = timeCondition.subCondition(
904 currentNode.getNodeStart(), currentNode.getNodeEnd());
905
906 if (nodeCondition == null) {
907 continue;
908 }
909
910 if (currentNode.getNodeType() == HTNode.NodeType.CORE) {
911 /* Here we add the relevant children nodes for BFS */
912 queue.addAll(currentNode.selectNextChildren(nodeCondition));
913 }
914 Iterable<E> nodeIntervals = currentNode.getMatchingIntervals(nodeCondition, extraPredicate);
915 intervalsOfNodes.add(nodeIntervals);
916 }
917 } catch (ClosedChannelException e) {
918 }
919 return Iterables.concat(intervalsOfNodes);
920 }
921
922 @Override
923 public @Nullable E getMatchingInterval(TimeRangeCondition timeCondition,
924 Predicate<E> extraPredicate) {
925
926 /* Queue a stack of nodes containing nodes intersecting t */
927 Deque<Integer> queue = new LinkedList<>();
928 /* We start by reading the information in the root node */
929 queue.add(getRootNode().getSequenceNumber());
930
931 /* Then we follow the down in the relevant children until we find the interval */
932 try {
933 while (!queue.isEmpty()) {
934 int sequenceNumber = queue.pop();
935 HTNode<E> currentNode = readNode(sequenceNumber);
936
937 @Nullable E interval = currentNode.getMatchingInterval(timeCondition, extraPredicate);
938 if (interval != null) {
939 return interval;
940 }
941
942 if (currentNode.getNodeType() == HTNode.NodeType.CORE) {
943 /* Here we add the relevant children nodes for BFS */
944 queue.addAll(currentNode.selectNextChildren(timeCondition));
945 }
946 }
947 } catch (ClosedChannelException e) {
948 }
949 return null;
950 }
951
952 @Override
953 public String toString() {
954 return "Information on the current tree:\n\n" + "Blocksize: " //$NON-NLS-1$ //$NON-NLS-2$
955 + fBlockSize + "\n" + "Max nb. of children per node: " //$NON-NLS-1$//$NON-NLS-2$
956 + fMaxChildren + "\n" + "Number of nodes: " + fNodeCount //$NON-NLS-1$//$NON-NLS-2$
957 + "\n" + "Depth of the tree: " + fLatestBranch.size() + "\n" //$NON-NLS-1$ //$NON-NLS-2$ //$NON-NLS-3$
958 + "Size of the treefile: " + getFileSize() + "\n" //$NON-NLS-1$//$NON-NLS-2$
959 + "Root node has sequence number: " //$NON-NLS-1$
960 + fLatestBranch.get(0).getSequenceNumber() + "\n" //$NON-NLS-1$
961 + "'Latest leaf' has sequence number: " //$NON-NLS-1$
962 + fLatestBranch.get(fLatestBranch.size() - 1).getSequenceNumber();
963 }
964
965
966 // ------------------------------------------------------------------------
967 // Test-specific methods
968 // ------------------------------------------------------------------------
969
970 /**
971 * Get the current depth of the tree.
972 *
973 * @return The current depth
974 */
975 @VisibleForTesting
976 protected int getDepth() {
977 return getLatestBranch().size();
978 }
979
980 /**
981 * Get the leaf (bottom-most) node of the latest branch.
982 *
983 * @return The latest leaf
984 */
985 @VisibleForTesting
986 protected N getLatestLeaf() {
987 List<N> latestBranch = getLatestBranch();
988 return latestBranch.get(latestBranch.size() - 1);
989 }
990
991 /**
992 * Verify a node's specific information about a child.
993 *
994 * @param parent
995 * The parent node
996 * @param index
997 * The index of the child in the parent's extra data
998 * @param child
999 * The child node to verify
1000 * @return False if a problem was found, true otherwise
1001 */
1002 @VisibleForTesting
1003 protected boolean verifyChildrenSpecific(N parent,
1004 int index,
1005 N child) {
1006 // Nothing to do for the default implementation
1007 return true;
1008 }
1009
1010 /**
1011 * This method should verify in the whole time range of the parent node that
1012 * the child node appears or not as a next children for a given timestamp.
1013 *
1014 * @param parent
1015 * The parent node
1016 * @param child
1017 * The child node
1018 * @return False if a problem was found, true otherwise
1019 */
1020 @VisibleForTesting
1021 protected boolean verifyIntersectingChildren(N parent, N child) {
1022 int childSequence = child.getSequenceNumber();
1023 boolean shouldBeInCollection;
1024 Collection<Integer> nextChildren;
1025 for (long t = parent.getNodeStart(); t < parent.getNodeEnd(); t++) {
1026 shouldBeInCollection = true;
1027 nextChildren = parent.selectNextChildren(TimeRangeCondition.singleton(t));
1028 if (shouldBeInCollection != nextChildren.contains(childSequence)) {
1029 return false;
1030 }
1031 }
1032 return true;
1033 }
1034 }
This page took 0.059251 seconds and 5 git commands to generate.