Commit | Line | Data |
---|---|---|
5e7913a4 GB |
1 | /******************************************************************************* |
2 | * Copyright (c) 2017 École Polytechnique de Montréal | |
3 | * | |
4 | * All rights reserved. This program and the accompanying materials are | |
5 | * made available under the terms of the Eclipse Public License v1.0 which | |
6 | * accompanies this distribution, and is available at | |
7 | * http://www.eclipse.org/legal/epl-v10.html | |
8 | *******************************************************************************/ | |
9 | ||
10 | package org.eclipse.tracecompass.internal.provisional.datastore.core.historytree; | |
11 | ||
12 | import java.io.File; | |
13 | import java.io.FileInputStream; | |
14 | import java.io.FileOutputStream; | |
15 | import java.io.IOException; | |
16 | import java.nio.ByteBuffer; | |
17 | import java.nio.ByteOrder; | |
18 | import java.nio.channels.ClosedChannelException; | |
19 | import java.nio.channels.FileChannel; | |
20 | import java.util.ArrayList; | |
8b03451b | 21 | import java.util.Collection; |
5e7913a4 GB |
22 | import java.util.Collections; |
23 | import java.util.Deque; | |
24 | import java.util.LinkedList; | |
25 | import java.util.List; | |
26 | import java.util.concurrent.locks.ReentrantReadWriteLock; | |
27 | import java.util.function.Predicate; | |
28 | ||
29 | import org.eclipse.jdt.annotation.NonNull; | |
54d250a3 | 30 | import org.eclipse.jdt.annotation.Nullable; |
5e7913a4 | 31 | import org.eclipse.tracecompass.common.core.NonNullUtils; |
dad84716 GB |
32 | import org.eclipse.tracecompass.datastore.core.interval.IHTInterval; |
33 | import org.eclipse.tracecompass.datastore.core.interval.IHTIntervalReader; | |
5e7913a4 | 34 | import org.eclipse.tracecompass.internal.datastore.core.historytree.HtIo; |
c2e3f4dd | 35 | import org.eclipse.tracecompass.internal.provisional.datastore.core.condition.TimeRangeCondition; |
5e7913a4 GB |
36 | import org.eclipse.tracecompass.internal.provisional.datastore.core.exceptions.RangeException; |
37 | import org.eclipse.tracecompass.internal.provisional.datastore.core.historytree.IHTNode.NodeType; | |
5e7913a4 GB |
38 | |
39 | import com.google.common.annotations.VisibleForTesting; | |
40 | import com.google.common.collect.ImmutableList; | |
5e7913a4 GB |
41 | |
42 | /** | |
43 | * Base class for history trees that encapsulates the logic to read from/write | |
44 | * to a file. | |
45 | * | |
46 | * @author Alexandre Montplaisir | |
47 | * @author Geneviève Bastien | |
48 | * @param <E> | |
49 | * The type of intervals that will be saved in the tree | |
50 | * @param <N> | |
51 | * The base type of the nodes of this tree | |
52 | */ | |
53 | public abstract class AbstractHistoryTree<E extends IHTInterval, N extends HTNode<E>> | |
54 | implements IHistoryTree<E> { | |
55 | ||
56 | /** | |
57 | * Interface for history to create the various HTNodes | |
58 | * | |
59 | * @param <E> | |
60 | * The type of intervals that will be saved in the node | |
61 | * @param <N> | |
62 | * The base type of the nodes of this tree | |
63 | */ | |
64 | @FunctionalInterface | |
65 | public interface IHTNodeFactory<E extends IHTInterval, N extends HTNode<E>> { | |
66 | ||
67 | /** | |
68 | * Creates a new node for the specific history tree | |
69 | * | |
70 | * @param type | |
71 | * The type of node to create. See {@link IHTNode.NodeType}. | |
72 | * @param blockSize | |
73 | * The size (in bytes) of each node once serialized to disk | |
74 | * @param maxChildren | |
75 | * The maximum number of amount a single core node can have | |
76 | * @param seqNumber | |
77 | * The (unique) sequence number assigned to this particular | |
78 | * node | |
79 | * @param parentSeqNumber | |
80 | * The sequence number of this node's parent node | |
81 | * @param start | |
82 | * The earliest timestamp stored in this node | |
83 | * @return The new core node | |
84 | */ | |
85 | N createNode(NodeType type, int blockSize, int maxChildren, | |
86 | int seqNumber, int parentSeqNumber, long start); | |
87 | } | |
88 | ||
89 | // ------------------------------------------------------------------------ | |
90 | // Tree-specific configuration | |
91 | // ------------------------------------------------------------------------ | |
92 | ||
93 | /* Tree configuration constants */ | |
94 | private final File fHistoryFile; | |
95 | private final int fBlockSize; | |
96 | private final int fMaxChildren; | |
97 | private final int fProviderVersion; | |
98 | private final long fTreeStart; | |
99 | private final IHTIntervalReader<E> fIntervalReader; | |
100 | ||
101 | /** Reader/writer object */ | |
102 | private HtIo<E, N> fTreeIO; | |
103 | ||
104 | // ------------------------------------------------------------------------ | |
105 | // Variable Fields (will change throughout the existence of the SHT) | |
106 | // ------------------------------------------------------------------------ | |
107 | ||
108 | /** Latest timestamp found in the tree (at any given moment) */ | |
109 | private long fTreeEnd; | |
110 | ||
111 | /** The total number of nodes that exists in this tree */ | |
112 | private int fNodeCount; | |
113 | ||
114 | /** "Cache" to keep the active nodes in memory */ | |
115 | private final List<N> fLatestBranch; | |
116 | ||
117 | /* Lock used to protect the accesses to the HT_IO object */ | |
118 | private final ReentrantReadWriteLock fRwl = new ReentrantReadWriteLock(false); | |
119 | ||
2abec5dd GB |
120 | private transient @Nullable List<N> fLatestBranchSnapshot = null; |
121 | ||
5e7913a4 GB |
122 | /** |
123 | * Create a new State History from scratch, specifying all configuration | |
124 | * parameters. | |
125 | * | |
126 | * @param stateHistoryFile | |
127 | * The name of the history file | |
128 | * @param blockSize | |
129 | * The size of each "block" on disk in bytes. One node will | |
130 | * always fit in one block. It should be at least 4096. | |
131 | * @param maxChildren | |
132 | * The maximum number of children allowed per core (non-leaf) | |
133 | * node. | |
134 | * @param providerVersion | |
135 | * The version of the state provider. If a file already exists, | |
136 | * and their versions match, the history file will not be rebuilt | |
137 | * uselessly. | |
138 | * @param treeStart | |
139 | * The start time of the history | |
140 | * @param intervalReader | |
141 | * The factory to create new tree intervals when reading from | |
142 | * the disk | |
143 | * @throws IOException | |
144 | * If an error happens trying to open/write to the file | |
145 | * specified in the config | |
146 | */ | |
147 | public AbstractHistoryTree(File stateHistoryFile, | |
148 | int blockSize, | |
149 | int maxChildren, | |
150 | int providerVersion, | |
151 | long treeStart, | |
152 | IHTIntervalReader<E> intervalReader) throws IOException { | |
153 | /* | |
154 | * Simple check to make sure we have enough place in the 0th block for | |
155 | * the tree configuration | |
156 | */ | |
157 | if (blockSize < TREE_HEADER_SIZE) { | |
158 | throw new IllegalArgumentException(); | |
159 | } | |
160 | ||
161 | fHistoryFile = stateHistoryFile; | |
162 | fBlockSize = blockSize; | |
163 | fMaxChildren = maxChildren; | |
164 | fProviderVersion = providerVersion; | |
165 | fTreeStart = treeStart; | |
166 | fIntervalReader = intervalReader; | |
167 | ||
168 | fTreeEnd = treeStart; | |
169 | fNodeCount = 0; | |
170 | fLatestBranch = NonNullUtils.checkNotNull(Collections.synchronizedList(new ArrayList<>())); | |
171 | ||
172 | /* Prepare the IO object */ | |
173 | fTreeIO = new HtIo<>(stateHistoryFile, | |
174 | blockSize, | |
175 | maxChildren, | |
176 | true, | |
177 | intervalReader, | |
178 | getNodeFactory()); | |
179 | ||
180 | /* Add the first node to the tree */ | |
181 | N firstNode = initNewLeafNode(-1, treeStart); | |
182 | fLatestBranch.add(firstNode); | |
183 | } | |
184 | ||
185 | /** | |
186 | * "Reader" constructor : instantiate a SHTree from an existing tree file on | |
187 | * disk | |
188 | * | |
189 | * @param existingStateFile | |
190 | * Path/filename of the history-file we are to open | |
191 | * @param expectedProviderVersion | |
192 | * The expected version of the state provider | |
193 | * @param intervalReader | |
194 | * The factory used to read segments from the history tree | |
195 | * @throws IOException | |
196 | * If an error happens reading the file | |
197 | */ | |
198 | public AbstractHistoryTree(File existingStateFile, | |
199 | int expectedProviderVersion, | |
200 | IHTIntervalReader<E> intervalReader) throws IOException { | |
201 | /* | |
202 | * Open the file ourselves, get the tree header information we need, | |
203 | * then pass on the descriptor to the TreeIO object. | |
204 | */ | |
205 | int rootNodeSeqNb, res; | |
206 | int bs, maxc; | |
207 | long startTime; | |
208 | ||
209 | /* Java I/O mumbo jumbo... */ | |
210 | if (!existingStateFile.exists()) { | |
211 | throw new IOException("Selected state file does not exist"); //$NON-NLS-1$ | |
212 | } | |
213 | if (existingStateFile.length() <= 0) { | |
214 | throw new IOException("Empty target file"); //$NON-NLS-1$ | |
215 | } | |
216 | ||
217 | try (FileInputStream fis = new FileInputStream(existingStateFile); | |
218 | FileChannel fc = fis.getChannel();) { | |
219 | ||
220 | ByteBuffer buffer = ByteBuffer.allocate(TREE_HEADER_SIZE); | |
221 | buffer.order(ByteOrder.LITTLE_ENDIAN); | |
222 | buffer.clear(); | |
223 | ||
224 | res = fc.read(buffer); | |
225 | if (res != TREE_HEADER_SIZE) { | |
226 | throw new IOException("Invalid header size"); //$NON-NLS-1$ | |
227 | } | |
228 | ||
229 | buffer.flip(); | |
230 | ||
231 | /* | |
232 | * Check the magic number to make sure we're opening the right type | |
233 | * of file | |
234 | */ | |
235 | res = buffer.getInt(); | |
236 | if (res != getMagicNumber()) { | |
237 | throw new IOException("Wrong magic number"); //$NON-NLS-1$ | |
238 | } | |
239 | ||
240 | res = buffer.getInt(); /* File format version number */ | |
241 | if (res != getFileVersion()) { | |
242 | throw new IOException("Mismatching History Tree file format versions"); //$NON-NLS-1$ | |
243 | } | |
244 | ||
245 | res = buffer.getInt(); /* Event handler's version number */ | |
246 | if (res != expectedProviderVersion) { | |
247 | /* | |
248 | * The existing history was built using an event handler that | |
249 | * doesn't match the current one in the framework. | |
250 | * | |
251 | * Information could be all wrong. Instead of keeping an | |
252 | * incorrect history file, a rebuild is done. | |
253 | */ | |
254 | throw new IOException("Mismatching event handler versions"); //$NON-NLS-1$ | |
255 | } | |
256 | ||
257 | bs = buffer.getInt(); /* Block Size */ | |
258 | maxc = buffer.getInt(); /* Max nb of children per node */ | |
259 | ||
260 | fNodeCount = buffer.getInt(); | |
261 | rootNodeSeqNb = buffer.getInt(); | |
262 | startTime = buffer.getLong(); | |
263 | ||
264 | /* Set all other permanent configuration options */ | |
265 | fHistoryFile = existingStateFile; | |
266 | fBlockSize = bs; | |
267 | fMaxChildren = maxc; | |
268 | fProviderVersion = expectedProviderVersion; | |
269 | fIntervalReader = intervalReader; | |
270 | fTreeStart = startTime; | |
271 | } | |
272 | ||
273 | /* | |
274 | * FIXME We close fis here and the TreeIO will then reopen the same | |
275 | * file, not extremely elegant. But how to pass the information here to | |
276 | * the SHT otherwise? | |
277 | */ | |
278 | fTreeIO = new HtIo<>(fHistoryFile, | |
279 | fBlockSize, | |
280 | fMaxChildren, | |
281 | false, | |
282 | fIntervalReader, | |
283 | getNodeFactory()); | |
284 | ||
285 | fLatestBranch = buildLatestBranch(rootNodeSeqNb); | |
286 | fTreeEnd = getRootNode().getNodeEnd(); | |
287 | ||
288 | /* | |
289 | * Make sure the history start time we read previously is consistent | |
290 | * with was is actually in the root node. | |
291 | */ | |
292 | if (startTime != getRootNode().getNodeStart()) { | |
293 | throw new IOException("Inconsistent start times in the " + //$NON-NLS-1$ | |
294 | "history file, it might be corrupted."); //$NON-NLS-1$ | |
295 | } | |
296 | } | |
297 | ||
298 | /** | |
299 | * Rebuild the latestBranch "cache" object by reading the nodes from disk | |
300 | * (When we are opening an existing file on disk and want to append to it, | |
301 | * for example). | |
302 | * | |
303 | * @param rootNodeSeqNb | |
304 | * The sequence number of the root node, so we know where to | |
305 | * start | |
306 | * @throws ClosedChannelException | |
307 | */ | |
308 | private List<N> buildLatestBranch(int rootNodeSeqNb) throws ClosedChannelException { | |
309 | List<N> list = new ArrayList<>(); | |
310 | ||
311 | N nextChildNode = fTreeIO.readNode(rootNodeSeqNb); | |
312 | list.add(nextChildNode); | |
313 | ||
314 | // TODO: Do we need the full latest branch? The latest leaf may not be | |
315 | // the one we'll query first... Won't it build itself later? | |
316 | ||
317 | /* Follow the last branch up to the leaf */ | |
318 | while (nextChildNode.getNodeType() == HTNode.NodeType.CORE) { | |
319 | nextChildNode = fTreeIO.readNode(nextChildNode.getLatestChild()); | |
320 | list.add(nextChildNode); | |
321 | } | |
322 | return Collections.synchronizedList(list); | |
323 | } | |
324 | ||
325 | // ------------------------------------------------------------------------ | |
326 | // Accessors | |
327 | // ------------------------------------------------------------------------ | |
328 | ||
329 | @Override | |
330 | public long getTreeStart() { | |
331 | return fTreeStart; | |
332 | } | |
333 | ||
334 | @Override | |
335 | public long getTreeEnd() { | |
336 | return fTreeEnd; | |
337 | } | |
338 | ||
339 | /** | |
340 | * Get the number of nodes in this tree. | |
341 | * | |
342 | * @return The number of nodes | |
343 | */ | |
344 | public int getNodeCount() { | |
345 | return fNodeCount; | |
346 | } | |
347 | ||
348 | /** | |
349 | * Get the current root node of this tree | |
350 | * | |
351 | * @return The root node | |
352 | */ | |
353 | public N getRootNode() { | |
354 | return fLatestBranch.get(0); | |
355 | } | |
356 | ||
357 | @Override | |
358 | public long getFileSize() { | |
359 | return fHistoryFile.length(); | |
360 | } | |
361 | ||
362 | /** | |
363 | * Return the latest branch of the tree. That branch is immutable. | |
364 | * | |
365 | * @return The immutable latest branch | |
366 | */ | |
367 | @VisibleForTesting | |
7182d50c | 368 | protected final List<N> getLatestBranch() { |
2abec5dd GB |
369 | List<N> latestBranchSnapshot = fLatestBranchSnapshot; |
370 | if (latestBranchSnapshot == null) { | |
371 | synchronized (fLatestBranch) { | |
372 | latestBranchSnapshot = ImmutableList.copyOf(fLatestBranch); | |
373 | fLatestBranchSnapshot = latestBranchSnapshot; | |
374 | } | |
375 | } | |
376 | return latestBranchSnapshot; | |
5e7913a4 GB |
377 | } |
378 | ||
379 | /** | |
380 | * Get the node in the latest branch at a depth. If the depth is too large, | |
381 | * it will throw an IndexOutOfBoundsException | |
382 | * | |
383 | * @param depth | |
384 | * The depth at which to get the node | |
385 | * @return The node at depth | |
386 | */ | |
fb7125d6 | 387 | protected N getLatestNode(int depth) { |
5e7913a4 GB |
388 | if (depth > fLatestBranch.size()) { |
389 | throw new IndexOutOfBoundsException("Trying to get latest node too deep"); //$NON-NLS-1$ | |
390 | } | |
391 | return fLatestBranch.get(depth); | |
392 | } | |
393 | ||
394 | /** | |
395 | * Get the magic number for the history file. This number should be specific | |
396 | * for each implementation of the history tree. | |
397 | * | |
398 | * @return The magic number for the history file | |
399 | */ | |
400 | protected abstract int getMagicNumber(); | |
401 | ||
402 | /** | |
403 | * Get the file version for the history file. This file version should be | |
404 | * modified for a history tree class whenever changing the format of the | |
405 | * file. Different versions of the file may not be compatible. | |
406 | * | |
407 | * @return The file version for the history file. | |
408 | */ | |
409 | protected abstract int getFileVersion(); | |
410 | ||
411 | /** | |
412 | * Get the factory to use to create new nodes for this history tree. | |
413 | * | |
414 | * This method is called in the constructor of the abstract class, so | |
415 | * assigning the factory to a final field may cause NullPointerException | |
416 | * since that final field may not be initialized the first time this is | |
417 | * called. | |
418 | * | |
419 | * @return The NodeFactory for the History Tree | |
420 | */ | |
421 | protected abstract IHTNodeFactory<E, N> getNodeFactory(); | |
422 | ||
423 | /** | |
424 | * Read a node with a given sequence number | |
425 | * | |
426 | * @param seqNum | |
427 | * The sequence number of the node to read | |
428 | * @return The HTNode object | |
429 | * @throws ClosedChannelException | |
430 | * Exception thrown when reading the node, if the file was | |
431 | * closed | |
432 | */ | |
433 | @VisibleForTesting | |
7182d50c | 434 | protected @NonNull N getNode(int seqNum) throws ClosedChannelException { |
5e7913a4 GB |
435 | // First, check in the latest branch if the node is there |
436 | for (N node : fLatestBranch) { | |
437 | if (node.getSequenceNumber() == seqNum) { | |
438 | return node; | |
439 | } | |
440 | } | |
441 | return fTreeIO.readNode(seqNum); | |
442 | } | |
443 | ||
444 | /** | |
445 | * Retrieve the TreeIO object. Should only be used for testing. | |
446 | * | |
447 | * @return The TreeIO | |
448 | */ | |
449 | @VisibleForTesting | |
8b03451b | 450 | HtIo<E, N> getTreeIO() { |
5e7913a4 GB |
451 | return fTreeIO; |
452 | } | |
453 | ||
454 | // ------------------------------------------------------------------------ | |
455 | // HT_IO interface | |
456 | // ------------------------------------------------------------------------ | |
457 | ||
458 | // TODO Remove from here | |
459 | @Override | |
460 | public FileInputStream supplyATReader() { | |
461 | fRwl.readLock().lock(); | |
462 | try { | |
463 | return fTreeIO.supplyATReader(getNodeCount()); | |
464 | } finally { | |
465 | fRwl.readLock().unlock(); | |
466 | } | |
467 | } | |
468 | ||
469 | // TODO Remove from here | |
470 | @Override | |
471 | public File supplyATWriterFile() { | |
472 | return fHistoryFile; | |
473 | } | |
474 | ||
475 | // TODO Remove from here | |
476 | @Override | |
477 | public long supplyATWriterFilePos() { | |
478 | return IHistoryTree.TREE_HEADER_SIZE | |
479 | + ((long) getNodeCount() * fBlockSize); | |
480 | } | |
481 | ||
482 | /** | |
483 | * Read a node from the tree. | |
484 | * | |
485 | * @param seqNumber | |
486 | * The sequence number of the node to read | |
487 | * @return The node | |
488 | * @throws ClosedChannelException | |
489 | * If the tree IO is unavailable | |
490 | */ | |
491 | public N readNode(int seqNumber) throws ClosedChannelException { | |
492 | /* Try to read the node from memory */ | |
493 | synchronized (fLatestBranch) { | |
494 | for (N node : fLatestBranch) { | |
495 | if (node.getSequenceNumber() == seqNumber) { | |
496 | return node; | |
497 | } | |
498 | } | |
499 | } | |
500 | ||
501 | fRwl.readLock().lock(); | |
502 | try { | |
503 | /* Read the node from disk */ | |
504 | return fTreeIO.readNode(seqNumber); | |
505 | } finally { | |
506 | fRwl.readLock().unlock(); | |
507 | } | |
508 | } | |
509 | ||
510 | /** | |
511 | * Write a node object to the history file. | |
512 | * | |
513 | * @param node | |
514 | * The node to write to disk | |
515 | */ | |
516 | public void writeNode(N node) { | |
517 | fRwl.readLock().lock(); | |
518 | try { | |
519 | fTreeIO.writeNode(node); | |
520 | } finally { | |
521 | fRwl.readLock().unlock(); | |
522 | } | |
523 | } | |
524 | ||
525 | /** | |
526 | * Close the history file. | |
527 | */ | |
528 | @Override | |
529 | public void closeFile() { | |
530 | fRwl.writeLock().lock(); | |
531 | try { | |
532 | fTreeIO.closeFile(); | |
533 | clearContent(); | |
534 | } finally { | |
535 | fRwl.writeLock().unlock(); | |
536 | } | |
537 | } | |
538 | ||
539 | /** | |
540 | * Delete the history file. | |
541 | */ | |
542 | @Override | |
543 | public void deleteFile() { | |
544 | fRwl.writeLock().lock(); | |
545 | try { | |
546 | fTreeIO.deleteFile(); | |
547 | clearContent(); | |
548 | } finally { | |
549 | fRwl.writeLock().unlock(); | |
550 | } | |
551 | } | |
552 | ||
553 | @Override | |
554 | public void cleanFile() throws IOException { | |
555 | fRwl.writeLock().lock(); | |
556 | try { | |
557 | closeTree(fTreeEnd); | |
558 | fTreeIO.deleteFile(); | |
559 | ||
560 | fTreeIO = new HtIo<>(fHistoryFile, | |
561 | fBlockSize, | |
562 | fMaxChildren, | |
563 | true, | |
564 | fIntervalReader, | |
565 | getNodeFactory()); | |
566 | ||
567 | clearContent(); | |
568 | /* Add the first node to the tree */ | |
569 | N firstNode = initNewLeafNode(-1, fTreeStart); | |
570 | fLatestBranch.add(firstNode); | |
571 | } finally { | |
572 | fRwl.writeLock().unlock(); | |
573 | } | |
574 | } | |
575 | ||
576 | private void clearContent() { | |
577 | // Re-initialize the content of the tree after the file is deleted or | |
578 | // closed | |
579 | fNodeCount = 0; | |
580 | fLatestBranch.clear(); | |
581 | } | |
582 | ||
583 | // ------------------------------------------------------------------------ | |
584 | // Operations | |
585 | // ------------------------------------------------------------------------ | |
586 | ||
587 | /** | |
588 | * Insert an interval in the tree. | |
589 | * | |
590 | * @param interval | |
591 | * The interval to be inserted | |
592 | * @throws RangeException | |
593 | * If the start of end time of the interval are invalid | |
594 | */ | |
595 | @Override | |
596 | public synchronized void insert(E interval) throws RangeException { | |
597 | if (interval.getStart() < fTreeStart) { | |
598 | throw new RangeException("Interval Start:" + interval.getStart() + ", Config Start:" + fTreeStart); //$NON-NLS-1$ //$NON-NLS-2$ | |
599 | } | |
600 | tryInsertAtNode(interval, fLatestBranch.size() - 1); | |
601 | } | |
602 | ||
603 | /** | |
604 | * Add a new empty core node to the tree. | |
605 | * | |
606 | * @param parentSeqNumber | |
607 | * Sequence number of this node's parent | |
608 | * @param startTime | |
609 | * Start time of the new node | |
610 | * @return The newly created node | |
611 | */ | |
612 | protected final N initNewCoreNode(int parentSeqNumber, long startTime) { | |
613 | N newNode = getNodeFactory().createNode(NodeType.CORE, fBlockSize, fMaxChildren, | |
614 | fNodeCount, parentSeqNumber, startTime); | |
615 | fNodeCount++; | |
616 | return newNode; | |
617 | } | |
618 | ||
619 | /** | |
620 | * Add a new empty leaf node to the tree. | |
621 | * | |
622 | * @param parentSeqNumber | |
623 | * Sequence number of this node's parent | |
624 | * @param startTime | |
625 | * Start time of the new node | |
626 | * @return The newly created node | |
627 | */ | |
628 | protected final N initNewLeafNode(int parentSeqNumber, long startTime) { | |
629 | N newNode = getNodeFactory().createNode(NodeType.LEAF, fBlockSize, fMaxChildren, | |
630 | fNodeCount, parentSeqNumber, startTime); | |
631 | fNodeCount++; | |
632 | return newNode; | |
633 | } | |
634 | ||
635 | /** | |
636 | * Inner method to find in which node we should add the interval. | |
637 | * | |
638 | * @param interval | |
639 | * The interval to add to the tree | |
640 | * @param depth | |
641 | * The index *in the latestBranch* where we are trying the | |
642 | * insertion | |
643 | */ | |
644 | protected final void tryInsertAtNode(E interval, int depth) { | |
2abec5dd | 645 | N targetNode = fLatestBranch.get(depth); |
fb7125d6 | 646 | informInsertingAtDepth(depth); |
5e7913a4 GB |
647 | |
648 | /* Verify if there is enough room in this node to store this interval */ | |
649 | if (interval.getSizeOnDisk() > targetNode.getNodeFreeSpace()) { | |
650 | /* Nope, not enough room. Insert in a new sibling instead. */ | |
651 | addSiblingNode(depth, getNewBranchStart(depth, interval)); | |
652 | tryInsertAtNode(interval, getLatestBranch().size() - 1); | |
653 | return; | |
654 | } | |
655 | ||
656 | /* Make sure the interval time range fits this node */ | |
657 | if (interval.getStart() < targetNode.getNodeStart()) { | |
658 | /* | |
659 | * No, this interval starts before the startTime of this node. We | |
660 | * need to check recursively in parents if it can fit. | |
661 | */ | |
662 | tryInsertAtNode(interval, depth - 1); | |
663 | return; | |
664 | } | |
665 | ||
666 | /* | |
667 | * Ok, there is room, and the interval fits in this time slot. Let's add | |
668 | * it. | |
669 | */ | |
670 | targetNode.add(interval); | |
671 | ||
672 | updateEndTime(interval); | |
673 | } | |
674 | ||
fb7125d6 LPD |
675 | /** |
676 | * Informs the tree that the insertion is requested at a given depth. When | |
677 | * this is called, the element is not yet inserted, but the last call to | |
678 | * this for an element will represent the depth at which is was really | |
679 | * inserted. By default, this method does nothing and should not be | |
680 | * necessary for concrete implementations, but it can be used by unit tests | |
681 | * to check to position of insertion of elements. | |
682 | * | |
683 | * @param depth | |
684 | * The depth at which the last insertion was done | |
685 | */ | |
686 | @VisibleForTesting | |
687 | protected void informInsertingAtDepth(int depth) { | |
688 | ||
689 | } | |
690 | ||
5e7913a4 GB |
691 | /** |
692 | * Get the start time of the new node of the branch that will be added | |
693 | * starting at depth. | |
694 | * | |
695 | * Note that the depth is the depth of the last node that was filled and to | |
696 | * which a sibling should be added. But depending on the returned start | |
697 | * time, the actual new branch may start at a lower depth if the start time | |
698 | * happens to be lesser than the parent's start time. | |
699 | * | |
700 | * @param depth | |
701 | * The depth of the last node that was filled and at which the | |
702 | * new branch should start. | |
703 | * @param interval | |
704 | * The interval that is about to be inserted | |
705 | * @return The value that should be the start time of the sibling node | |
706 | */ | |
707 | protected abstract long getNewBranchStart(int depth, E interval); | |
708 | ||
709 | /** | |
710 | * Method to add a sibling to any node in the latest branch. This will add | |
711 | * children back down to the leaf level, if needed. | |
712 | * | |
713 | * @param depth | |
714 | * The depth in latestBranch where we start adding | |
715 | * @param newNodeStartTime | |
716 | * The start time of the new node | |
717 | */ | |
718 | private final void addSiblingNode(int depth, long newNodeStartTime) { | |
719 | synchronized (fLatestBranch) { | |
720 | final long splitTime = fTreeEnd; | |
2abec5dd | 721 | fLatestBranchSnapshot = null; |
5e7913a4 GB |
722 | |
723 | if (depth >= fLatestBranch.size()) { | |
724 | /* | |
725 | * We need to make sure (indexOfNode - 1) doesn't get the last | |
726 | * node in the branch, because that one is a Leaf Node. | |
727 | */ | |
728 | throw new IllegalStateException(); | |
729 | } | |
730 | ||
731 | /* Check if we need to add a new root node */ | |
732 | if (depth == 0) { | |
733 | addNewRootNode(newNodeStartTime); | |
734 | return; | |
735 | } | |
736 | ||
737 | /* | |
738 | * Check if we can indeed add a child to the target parent and if | |
739 | * the new start time is not before the target parent. | |
740 | */ | |
741 | if (fLatestBranch.get(depth - 1).getNbChildren() == fMaxChildren || | |
742 | newNodeStartTime < fLatestBranch.get(depth - 1).getNodeStart()) { | |
743 | /* If not, add a branch starting one level higher instead */ | |
744 | addSiblingNode(depth - 1, newNodeStartTime); | |
745 | return; | |
746 | } | |
747 | ||
748 | /* | |
749 | * Close nodes from the leaf up because some parent nodes may need | |
750 | * to get updated when their children are closed | |
751 | */ | |
752 | for (int i = fLatestBranch.size() - 1; i >= depth; i--) { | |
753 | fLatestBranch.get(i).closeThisNode(splitTime); | |
754 | fTreeIO.writeNode(fLatestBranch.get(i)); | |
755 | } | |
756 | ||
757 | /* Split off the new branch from the old one */ | |
758 | for (int i = depth; i < fLatestBranch.size(); i++) { | |
759 | N prevNode = fLatestBranch.get(i - 1); | |
760 | N newNode; | |
761 | ||
762 | switch (fLatestBranch.get(i).getNodeType()) { | |
763 | case CORE: | |
764 | newNode = initNewCoreNode(prevNode.getSequenceNumber(), newNodeStartTime); | |
765 | break; | |
766 | case LEAF: | |
767 | newNode = initNewLeafNode(prevNode.getSequenceNumber(), newNodeStartTime); | |
768 | break; | |
769 | default: | |
770 | throw new IllegalStateException(); | |
771 | } | |
772 | ||
773 | prevNode.linkNewChild(newNode); | |
774 | fLatestBranch.set(i, newNode); | |
775 | } | |
776 | } | |
777 | } | |
778 | ||
779 | /** | |
780 | * Similar to the previous method, except here we rebuild a completely new | |
781 | * latestBranch | |
782 | */ | |
783 | private void addNewRootNode(long newNodeStartTime) { | |
784 | final long nodeEnd = fTreeEnd; | |
785 | ||
786 | N oldRootNode = fLatestBranch.get(0); | |
787 | N newRootNode = initNewCoreNode(-1, fTreeStart); | |
788 | ||
789 | /* Tell the old root node that it isn't root anymore */ | |
790 | oldRootNode.setParentSequenceNumber(newRootNode.getSequenceNumber()); | |
791 | ||
792 | /* Close off the whole current latestBranch */ | |
793 | for (int i = fLatestBranch.size() - 1; i >= 0; i--) { | |
794 | fLatestBranch.get(i).closeThisNode(nodeEnd); | |
795 | fTreeIO.writeNode(fLatestBranch.get(i)); | |
796 | } | |
797 | ||
798 | /* Link the new root to its first child (the previous root node) */ | |
799 | newRootNode.linkNewChild(oldRootNode); | |
800 | ||
801 | /* Rebuild a new latestBranch */ | |
802 | int depth = fLatestBranch.size(); | |
803 | fLatestBranch.clear(); | |
804 | fLatestBranch.add(newRootNode); | |
805 | ||
806 | // Create new coreNode | |
807 | for (int i = 1; i < depth; i++) { | |
808 | N prevNode = fLatestBranch.get(i - 1); | |
809 | N newNode = initNewCoreNode(prevNode.getSequenceNumber(), newNodeStartTime); | |
810 | prevNode.linkNewChild(newNode); | |
811 | fLatestBranch.add(newNode); | |
812 | } | |
813 | ||
814 | // Create the new leafNode | |
815 | N prevNode = fLatestBranch.get(depth - 1); | |
816 | N newNode = initNewLeafNode(prevNode.getSequenceNumber(), newNodeStartTime); | |
817 | prevNode.linkNewChild(newNode); | |
818 | fLatestBranch.add(newNode); | |
819 | } | |
820 | ||
821 | /** | |
822 | * Update the tree's end time with this interval data | |
823 | * | |
824 | * @param interval | |
825 | * The interval that was just added to the tree | |
826 | */ | |
827 | protected void updateEndTime(E interval) { | |
828 | fTreeEnd = Math.max(fTreeEnd, interval.getEnd()); | |
829 | } | |
830 | ||
831 | @Override | |
832 | public void closeTree(long requestedEndTime) { | |
833 | /* This is an important operation, queries can wait */ | |
834 | synchronized (fLatestBranch) { | |
835 | /* | |
836 | * Work-around the "empty branches" that get created when the root | |
837 | * node becomes full. Overwrite the tree's end time with the | |
838 | * original wanted end-time, to ensure no queries are sent into | |
839 | * those empty nodes. | |
840 | */ | |
841 | fTreeEnd = requestedEndTime; | |
842 | ||
843 | /* Close off the latest branch of the tree */ | |
844 | for (int i = fLatestBranch.size() - 1; i >= 0; i--) { | |
845 | fLatestBranch.get(i).closeThisNode(fTreeEnd); | |
846 | fTreeIO.writeNode(fLatestBranch.get(i)); | |
847 | } | |
848 | ||
849 | try (FileOutputStream fc = fTreeIO.getFileWriter(-1);) { | |
850 | ByteBuffer buffer = ByteBuffer.allocate(TREE_HEADER_SIZE); | |
851 | buffer.order(ByteOrder.LITTLE_ENDIAN); | |
852 | buffer.clear(); | |
853 | ||
854 | buffer.putInt(getMagicNumber()); | |
855 | ||
856 | buffer.putInt(getFileVersion()); | |
857 | buffer.putInt(fProviderVersion); | |
858 | ||
859 | buffer.putInt(fBlockSize); | |
860 | buffer.putInt(fMaxChildren); | |
861 | ||
862 | buffer.putInt(fNodeCount); | |
863 | ||
864 | /* root node seq. nb */ | |
865 | buffer.putInt(fLatestBranch.get(0).getSequenceNumber()); | |
866 | ||
867 | /* start time of this history */ | |
868 | buffer.putLong(fLatestBranch.get(0).getNodeStart()); | |
869 | ||
870 | buffer.flip(); | |
871 | fc.write(buffer.array()); | |
872 | /* done writing the file header */ | |
873 | ||
874 | } catch (IOException e) { | |
875 | /* | |
876 | * If we were able to write so far, there should not be any | |
877 | * problem at this point... | |
878 | */ | |
879 | throw new RuntimeException("State system write error"); //$NON-NLS-1$ | |
880 | } | |
881 | } | |
882 | } | |
883 | ||
884 | @Override | |
c2e3f4dd | 885 | public Iterable<E> getMatchingIntervals(TimeRangeCondition timeCondition, |
5e7913a4 GB |
886 | Predicate<E> extraPredicate) { |
887 | ||
888 | // TODO Change this to evaluate the nodes lazily | |
889 | ||
90846f6e | 890 | List<E> intervalsOfNodes = new ArrayList<>(); |
5e7913a4 GB |
891 | |
892 | /* Queue is a stack of nodes containing nodes intersecting t */ | |
893 | Deque<Integer> queue = new LinkedList<>(); | |
894 | /* We start by reading the information in the root node */ | |
895 | queue.add(getRootNode().getSequenceNumber()); | |
896 | ||
897 | /* Then we follow the down in the relevant children */ | |
898 | try { | |
899 | while (!queue.isEmpty()) { | |
900 | int sequenceNumber = queue.pop(); | |
901 | HTNode<E> currentNode = readNode(sequenceNumber); | |
c2e3f4dd | 902 | TimeRangeCondition nodeCondition = timeCondition.subCondition( |
5e7913a4 GB |
903 | currentNode.getNodeStart(), currentNode.getNodeEnd()); |
904 | ||
905 | if (nodeCondition == null) { | |
906 | continue; | |
907 | } | |
908 | ||
909 | if (currentNode.getNodeType() == HTNode.NodeType.CORE) { | |
910 | /* Here we add the relevant children nodes for BFS */ | |
d0b4e0dd | 911 | queue.addAll(currentNode.selectNextChildren(nodeCondition, currentNode.getCoreDataPredicate(extraPredicate))); |
5e7913a4 | 912 | } |
90846f6e GB |
913 | Collection<E> nodeIntervals = currentNode.getMatchingIntervals(nodeCondition, extraPredicate); |
914 | intervalsOfNodes.addAll(nodeIntervals); | |
5e7913a4 GB |
915 | } |
916 | } catch (ClosedChannelException e) { | |
917 | } | |
90846f6e | 918 | return intervalsOfNodes; |
5e7913a4 GB |
919 | } |
920 | ||
54d250a3 | 921 | @Override |
c2e3f4dd | 922 | public @Nullable E getMatchingInterval(TimeRangeCondition timeCondition, |
54d250a3 GB |
923 | Predicate<E> extraPredicate) { |
924 | ||
925 | /* Queue a stack of nodes containing nodes intersecting t */ | |
926 | Deque<Integer> queue = new LinkedList<>(); | |
927 | /* We start by reading the information in the root node */ | |
928 | queue.add(getRootNode().getSequenceNumber()); | |
929 | ||
930 | /* Then we follow the down in the relevant children until we find the interval */ | |
931 | try { | |
932 | while (!queue.isEmpty()) { | |
933 | int sequenceNumber = queue.pop(); | |
934 | HTNode<E> currentNode = readNode(sequenceNumber); | |
935 | ||
936 | @Nullable E interval = currentNode.getMatchingInterval(timeCondition, extraPredicate); | |
937 | if (interval != null) { | |
938 | return interval; | |
939 | } | |
940 | ||
941 | if (currentNode.getNodeType() == HTNode.NodeType.CORE) { | |
942 | /* Here we add the relevant children nodes for BFS */ | |
943 | queue.addAll(currentNode.selectNextChildren(timeCondition)); | |
944 | } | |
945 | } | |
946 | } catch (ClosedChannelException e) { | |
947 | } | |
948 | return null; | |
949 | } | |
950 | ||
5e7913a4 GB |
951 | @Override |
952 | public String toString() { | |
953 | return "Information on the current tree:\n\n" + "Blocksize: " //$NON-NLS-1$ //$NON-NLS-2$ | |
954 | + fBlockSize + "\n" + "Max nb. of children per node: " //$NON-NLS-1$//$NON-NLS-2$ | |
955 | + fMaxChildren + "\n" + "Number of nodes: " + fNodeCount //$NON-NLS-1$//$NON-NLS-2$ | |
956 | + "\n" + "Depth of the tree: " + fLatestBranch.size() + "\n" //$NON-NLS-1$ //$NON-NLS-2$ //$NON-NLS-3$ | |
957 | + "Size of the treefile: " + getFileSize() + "\n" //$NON-NLS-1$//$NON-NLS-2$ | |
958 | + "Root node has sequence number: " //$NON-NLS-1$ | |
959 | + fLatestBranch.get(0).getSequenceNumber() + "\n" //$NON-NLS-1$ | |
960 | + "'Latest leaf' has sequence number: " //$NON-NLS-1$ | |
961 | + fLatestBranch.get(fLatestBranch.size() - 1).getSequenceNumber(); | |
962 | } | |
963 | ||
8b03451b GB |
964 | |
965 | // ------------------------------------------------------------------------ | |
966 | // Test-specific methods | |
967 | // ------------------------------------------------------------------------ | |
968 | ||
969 | /** | |
970 | * Get the current depth of the tree. | |
971 | * | |
972 | * @return The current depth | |
973 | */ | |
974 | @VisibleForTesting | |
975 | protected int getDepth() { | |
976 | return getLatestBranch().size(); | |
977 | } | |
978 | ||
979 | /** | |
980 | * Get the leaf (bottom-most) node of the latest branch. | |
981 | * | |
982 | * @return The latest leaf | |
983 | */ | |
984 | @VisibleForTesting | |
985 | protected N getLatestLeaf() { | |
986 | List<N> latestBranch = getLatestBranch(); | |
987 | return latestBranch.get(latestBranch.size() - 1); | |
988 | } | |
989 | ||
990 | /** | |
991 | * Verify a node's specific information about a child. | |
992 | * | |
993 | * @param parent | |
994 | * The parent node | |
995 | * @param index | |
996 | * The index of the child in the parent's extra data | |
997 | * @param child | |
998 | * The child node to verify | |
999 | * @return False if a problem was found, true otherwise | |
1000 | */ | |
1001 | @VisibleForTesting | |
1002 | protected boolean verifyChildrenSpecific(N parent, | |
1003 | int index, | |
1004 | N child) { | |
1005 | // Nothing to do for the default implementation | |
1006 | return true; | |
1007 | } | |
1008 | ||
1009 | /** | |
1010 | * This method should verify in the whole time range of the parent node that | |
1011 | * the child node appears or not as a next children for a given timestamp. | |
1012 | * | |
1013 | * @param parent | |
1014 | * The parent node | |
1015 | * @param child | |
1016 | * The child node | |
1017 | * @return False if a problem was found, true otherwise | |
1018 | */ | |
1019 | @VisibleForTesting | |
1020 | protected boolean verifyIntersectingChildren(N parent, N child) { | |
1021 | int childSequence = child.getSequenceNumber(); | |
1022 | boolean shouldBeInCollection; | |
1023 | Collection<Integer> nextChildren; | |
1024 | for (long t = parent.getNodeStart(); t < parent.getNodeEnd(); t++) { | |
1025 | shouldBeInCollection = true; | |
c2e3f4dd | 1026 | nextChildren = parent.selectNextChildren(TimeRangeCondition.singleton(t)); |
8b03451b GB |
1027 | if (shouldBeInCollection != nextChildren.contains(childSequence)) { |
1028 | return false; | |
1029 | } | |
1030 | } | |
1031 | return true; | |
1032 | } | |
5e7913a4 | 1033 | } |