Commit | Line | Data |
---|---|---|
a52fde77 | 1 | /******************************************************************************* |
b2ca67ca | 2 | * Copyright (c) 2010, 2016 Ericsson, École Polytechnique de Montréal, and others |
6f4e8ec0 | 3 | * |
a52fde77 AM |
4 | * All rights reserved. This program and the accompanying materials are |
5 | * made available under the terms of the Eclipse Public License v1.0 which | |
6 | * accompanies this distribution, and is available at | |
7 | * http://www.eclipse.org/legal/epl-v10.html | |
6f4e8ec0 | 8 | * |
bb7f92ce FW |
9 | * Contributors: |
10 | * Alexandre Montplaisir - Initial API and implementation | |
11 | * Florian Wininger - Add Extension and Leaf Node | |
2a966f6f | 12 | * Patrick Tasse - Keep interval list sorted on insert |
a52fde77 AM |
13 | *******************************************************************************/ |
14 | ||
e894a508 | 15 | package org.eclipse.tracecompass.internal.statesystem.core.backend.historytree; |
a52fde77 AM |
16 | |
17 | import java.io.IOException; | |
18 | import java.io.PrintWriter; | |
19 | import java.nio.ByteBuffer; | |
20 | import java.nio.ByteOrder; | |
21 | import java.nio.channels.FileChannel; | |
22 | import java.util.ArrayList; | |
23 | import java.util.Collections; | |
24 | import java.util.List; | |
62197b87 | 25 | import java.util.concurrent.locks.ReentrantReadWriteLock; |
a52fde77 | 26 | |
aa353506 | 27 | import org.eclipse.jdt.annotation.NonNull; |
e894a508 AM |
28 | import org.eclipse.tracecompass.statesystem.core.exceptions.TimeRangeException; |
29 | import org.eclipse.tracecompass.statesystem.core.interval.ITmfStateInterval; | |
30 | import org.eclipse.tracecompass.statesystem.core.statevalue.TmfStateValue; | |
a52fde77 | 31 | |
822798a3 GB |
32 | import com.google.common.collect.Iterables; |
33 | ||
a52fde77 AM |
34 | /** |
35 | * The base class for all the types of nodes that go in the History Tree. | |
6f4e8ec0 | 36 | * |
ffd0aa67 | 37 | * @author Alexandre Montplaisir |
a52fde77 | 38 | */ |
8d47cc34 | 39 | public abstract class HTNode { |
a52fde77 | 40 | |
bb7f92ce FW |
41 | // ------------------------------------------------------------------------ |
42 | // Class fields | |
43 | // ------------------------------------------------------------------------ | |
44 | ||
45 | /** | |
46 | * The type of node | |
47 | */ | |
48 | public static enum NodeType { | |
49 | /** | |
50 | * Core node, which is a "front" node, at any level of the tree except | |
51 | * the bottom-most one. It has children, and may have extensions. | |
52 | */ | |
53 | CORE, | |
54 | /** | |
55 | * Leaf node, which is a node at the last bottom level of the tree. It | |
56 | * cannot have any children or extensions. | |
57 | */ | |
58 | LEAF; | |
59 | ||
60 | /** | |
61 | * Determine a node type by reading a serialized byte. | |
62 | * | |
63 | * @param rep | |
64 | * The byte representation of the node type | |
65 | * @return The corresponding NodeType | |
66 | * @throws IOException | |
67 | * If the NodeType is unrecognized | |
68 | */ | |
69 | public static NodeType fromByte(byte rep) throws IOException { | |
70 | switch (rep) { | |
71 | case 1: | |
72 | return CORE; | |
73 | case 2: | |
74 | return LEAF; | |
75 | default: | |
76 | throw new IOException(); | |
77 | } | |
78 | } | |
79 | ||
80 | /** | |
81 | * Get the byte representation of this node type. It can then be read | |
82 | * with {@link #fromByte}. | |
83 | * | |
84 | * @return The byte matching this node type | |
85 | */ | |
86 | public byte toByte() { | |
87 | switch (this) { | |
88 | case CORE: | |
89 | return 1; | |
90 | case LEAF: | |
91 | return 2; | |
92 | default: | |
93 | throw new IllegalStateException(); | |
94 | } | |
95 | } | |
96 | } | |
97 | ||
0e9b2f07 GB |
98 | /** |
99 | * <pre> | |
100 | * 1 - byte (type) | |
101 | * 16 - 2x long (start time, end time) | |
2d1f2bee | 102 | * 16 - 3x int (seq number, parent seq number, intervalcount) |
0e9b2f07 GB |
103 | * 1 - byte (done or not) |
104 | * </pre> | |
105 | */ | |
d0ed4962 LPD |
106 | private static final int COMMON_HEADER_SIZE = Byte.BYTES |
107 | + 2 * Long.BYTES | |
2d1f2bee | 108 | + 3 * Integer.BYTES |
d0ed4962 | 109 | + Byte.BYTES; |
0e9b2f07 | 110 | |
bb7f92ce FW |
111 | // ------------------------------------------------------------------------ |
112 | // Attributes | |
113 | // ------------------------------------------------------------------------ | |
114 | ||
ffd0aa67 | 115 | /* Configuration of the History Tree to which belongs this node */ |
0e9b2f07 | 116 | private final HTConfig fConfig; |
a52fde77 AM |
117 | |
118 | /* Time range of this node */ | |
0e9b2f07 GB |
119 | private final long fNodeStart; |
120 | private long fNodeEnd; | |
a52fde77 AM |
121 | |
122 | /* Sequence number = position in the node section of the file */ | |
0e9b2f07 GB |
123 | private final int fSequenceNumber; |
124 | private int fParentSequenceNumber; /* = -1 if this node is the root node */ | |
a52fde77 | 125 | |
b0136ad6 | 126 | /* Sum of bytes of all intervals in the node */ |
0e9b2f07 | 127 | private int fSizeOfIntervalSection; |
b0136ad6 | 128 | |
045badfe | 129 | /* True if this node was read from disk (meaning its end time is now fixed) */ |
0e9b2f07 | 130 | private volatile boolean fIsOnDisk; |
a52fde77 AM |
131 | |
132 | /* Vector containing all the intervals contained in this node */ | |
0e9b2f07 | 133 | private final List<HTInterval> fIntervals; |
a52fde77 | 134 | |
62197b87 | 135 | /* Lock used to protect the accesses to intervals, nodeEnd and such */ |
0e9b2f07 | 136 | private final ReentrantReadWriteLock fRwl = new ReentrantReadWriteLock(false); |
62197b87 | 137 | |
8d47cc34 AM |
138 | /** |
139 | * Constructor | |
140 | * | |
141 | * @param config | |
142 | * Configuration of the History Tree | |
143 | * @param seqNumber | |
144 | * The (unique) sequence number assigned to this particular node | |
145 | * @param parentSeqNumber | |
146 | * The sequence number of this node's parent node | |
147 | * @param start | |
148 | * The earliest timestamp stored in this node | |
149 | */ | |
150 | protected HTNode(HTConfig config, int seqNumber, int parentSeqNumber, long start) { | |
0e9b2f07 GB |
151 | fConfig = config; |
152 | fNodeStart = start; | |
153 | fSequenceNumber = seqNumber; | |
154 | fParentSequenceNumber = parentSeqNumber; | |
155 | ||
0e9b2f07 GB |
156 | fSizeOfIntervalSection = 0; |
157 | fIsOnDisk = false; | |
158 | fIntervals = new ArrayList<>(); | |
a52fde77 AM |
159 | } |
160 | ||
161 | /** | |
8d47cc34 AM |
162 | * Reader factory method. Build a Node object (of the right type) by reading |
163 | * a block in the file. | |
6f4e8ec0 | 164 | * |
ffd0aa67 EB |
165 | * @param config |
166 | * Configuration of the History Tree | |
a52fde77 AM |
167 | * @param fc |
168 | * FileChannel to the history file, ALREADY SEEKED at the start | |
169 | * of the node. | |
f4baf640 GB |
170 | * @param nodeFactory |
171 | * The factory to create the nodes for this tree | |
8d47cc34 | 172 | * @return The node object |
a52fde77 | 173 | * @throws IOException |
8d47cc34 | 174 | * If there was an error reading from the file channel |
a52fde77 | 175 | */ |
f4baf640 | 176 | public static final @NonNull HTNode readNode(HTConfig config, FileChannel fc, IHistoryTree.IHTNodeFactory nodeFactory) |
a52fde77 AM |
177 | throws IOException { |
178 | HTNode newNode = null; | |
179 | int res, i; | |
180 | ||
ffd0aa67 | 181 | ByteBuffer buffer = ByteBuffer.allocate(config.getBlockSize()); |
a52fde77 AM |
182 | buffer.order(ByteOrder.LITTLE_ENDIAN); |
183 | buffer.clear(); | |
184 | res = fc.read(buffer); | |
ffd0aa67 | 185 | assert (res == config.getBlockSize()); |
a52fde77 AM |
186 | buffer.flip(); |
187 | ||
188 | /* Read the common header part */ | |
bb7f92ce FW |
189 | byte typeByte = buffer.get(); |
190 | NodeType type = NodeType.fromByte(typeByte); | |
a52fde77 AM |
191 | long start = buffer.getLong(); |
192 | long end = buffer.getLong(); | |
193 | int seqNb = buffer.getInt(); | |
194 | int parentSeqNb = buffer.getInt(); | |
195 | int intervalCount = buffer.getInt(); | |
045badfe | 196 | buffer.get(); // TODO Used to be "isDone", to be removed from the header |
a52fde77 AM |
197 | |
198 | /* Now the rest of the header depends on the node type */ | |
199 | switch (type) { | |
bb7f92ce | 200 | case CORE: |
a52fde77 | 201 | /* Core nodes */ |
f4baf640 | 202 | newNode = nodeFactory.createCoreNode(config, seqNb, parentSeqNb, start); |
a52fde77 AM |
203 | newNode.readSpecificHeader(buffer); |
204 | break; | |
205 | ||
bb7f92ce FW |
206 | case LEAF: |
207 | /* Leaf nodes */ | |
f4baf640 | 208 | newNode = nodeFactory.createLeafNode(config, seqNb, parentSeqNb, start); |
bb7f92ce FW |
209 | newNode.readSpecificHeader(buffer); |
210 | break; | |
a52fde77 AM |
211 | |
212 | default: | |
213 | /* Unrecognized node type */ | |
214 | throw new IOException(); | |
215 | } | |
216 | ||
217 | /* | |
218 | * At this point, we should be done reading the header and 'buffer' | |
219 | * should only have the intervals left | |
220 | */ | |
221 | for (i = 0; i < intervalCount; i++) { | |
0ce45cd4 | 222 | HTInterval interval = HTInterval.readFrom(buffer); |
0e9b2f07 | 223 | newNode.fIntervals.add(interval); |
59d30d83 | 224 | newNode.fSizeOfIntervalSection += interval.getSizeOnDisk(); |
a52fde77 AM |
225 | } |
226 | ||
227 | /* Assign the node's other information we have read previously */ | |
0e9b2f07 | 228 | newNode.fNodeEnd = end; |
0e9b2f07 | 229 | newNode.fIsOnDisk = true; |
a52fde77 AM |
230 | |
231 | return newNode; | |
232 | } | |
233 | ||
8d47cc34 AM |
234 | /** |
235 | * Write this node to the given file channel. | |
236 | * | |
237 | * @param fc | |
238 | * The file channel to write to (should be sought to be correct | |
239 | * position) | |
240 | * @throws IOException | |
241 | * If there was an error writing | |
242 | */ | |
243 | public final void writeSelf(FileChannel fc) throws IOException { | |
a52fde77 | 244 | /* |
62197b87 AM |
245 | * Yes, we are taking the *read* lock here, because we are reading the |
246 | * information in the node to write it to disk. | |
a52fde77 | 247 | */ |
0e9b2f07 | 248 | fRwl.readLock().lock(); |
62197b87 | 249 | try { |
0e9b2f07 | 250 | final int blockSize = fConfig.getBlockSize(); |
62197b87 AM |
251 | |
252 | ByteBuffer buffer = ByteBuffer.allocate(blockSize); | |
253 | buffer.order(ByteOrder.LITTLE_ENDIAN); | |
254 | buffer.clear(); | |
255 | ||
256 | /* Write the common header part */ | |
0e9b2f07 GB |
257 | buffer.put(getNodeType().toByte()); |
258 | buffer.putLong(fNodeStart); | |
259 | buffer.putLong(fNodeEnd); | |
260 | buffer.putInt(fSequenceNumber); | |
261 | buffer.putInt(fParentSequenceNumber); | |
262 | buffer.putInt(fIntervals.size()); | |
62197b87 AM |
263 | buffer.put((byte) 1); // TODO Used to be "isDone", to be removed from header |
264 | ||
265 | /* Now call the inner method to write the specific header part */ | |
0e9b2f07 | 266 | writeSpecificHeader(buffer); |
62197b87 AM |
267 | |
268 | /* Back to us, we write the intervals */ | |
59d30d83 | 269 | fIntervals.forEach(i -> i.writeInterval(buffer)); |
2d1f2bee LPD |
270 | if (blockSize - buffer.position() != getNodeFreeSpace()) { |
271 | throw new IllegalStateException("Wrong free space: Actual: " + (blockSize - buffer.position()) + ", Expected: " + getNodeFreeSpace()); //$NON-NLS-1$ //$NON-NLS-2$ | |
272 | } | |
62197b87 | 273 | /* |
59d30d83 | 274 | * Fill the rest with zeros |
62197b87 | 275 | */ |
59d30d83 | 276 | while (buffer.position() < blockSize) { |
62197b87 AM |
277 | buffer.put((byte) 0); |
278 | } | |
a52fde77 | 279 | |
62197b87 | 280 | /* Finally, write everything in the Buffer to disk */ |
62197b87 AM |
281 | buffer.flip(); |
282 | int res = fc.write(buffer); | |
822798a3 GB |
283 | if (res != blockSize) { |
284 | throw new IllegalStateException("Wrong size of block written: Actual: " + res + ", Expected: " + blockSize); //$NON-NLS-1$ //$NON-NLS-2$ | |
285 | } | |
62197b87 AM |
286 | |
287 | } finally { | |
0e9b2f07 | 288 | fRwl.readLock().unlock(); |
62197b87 | 289 | } |
0e9b2f07 | 290 | fIsOnDisk = true; |
cb42195c AM |
291 | } |
292 | ||
293 | // ------------------------------------------------------------------------ | |
294 | // Accessors | |
295 | // ------------------------------------------------------------------------ | |
296 | ||
8d47cc34 AM |
297 | /** |
298 | * Retrieve the history tree configuration used for this node. | |
299 | * | |
300 | * @return The history tree config | |
301 | */ | |
302 | protected HTConfig getConfig() { | |
0e9b2f07 | 303 | return fConfig; |
a52fde77 AM |
304 | } |
305 | ||
8d47cc34 AM |
306 | /** |
307 | * Get the start time of this node. | |
308 | * | |
309 | * @return The start time of this node | |
310 | */ | |
311 | public long getNodeStart() { | |
0e9b2f07 | 312 | return fNodeStart; |
a52fde77 AM |
313 | } |
314 | ||
8d47cc34 AM |
315 | /** |
316 | * Get the end time of this node. | |
317 | * | |
bb7f92ce | 318 | * @return The end time of this node |
8d47cc34 AM |
319 | */ |
320 | public long getNodeEnd() { | |
0e9b2f07 GB |
321 | if (fIsOnDisk) { |
322 | return fNodeEnd; | |
a52fde77 AM |
323 | } |
324 | return 0; | |
325 | } | |
326 | ||
8d47cc34 AM |
327 | /** |
328 | * Get the sequence number of this node. | |
329 | * | |
330 | * @return The sequence number of this node | |
331 | */ | |
332 | public int getSequenceNumber() { | |
0e9b2f07 | 333 | return fSequenceNumber; |
a52fde77 AM |
334 | } |
335 | ||
8d47cc34 AM |
336 | /** |
337 | * Get the sequence number of this node's parent. | |
338 | * | |
339 | * @return The parent sequence number | |
340 | */ | |
341 | public int getParentSequenceNumber() { | |
0e9b2f07 | 342 | return fParentSequenceNumber; |
a52fde77 AM |
343 | } |
344 | ||
345 | /** | |
346 | * Change this node's parent. Used when we create a new root node for | |
347 | * example. | |
8d47cc34 AM |
348 | * |
349 | * @param newParent | |
350 | * The sequence number of the node that is the new parent | |
a52fde77 | 351 | */ |
8d47cc34 | 352 | public void setParentSequenceNumber(int newParent) { |
0e9b2f07 | 353 | fParentSequenceNumber = newParent; |
a52fde77 AM |
354 | } |
355 | ||
8d47cc34 AM |
356 | /** |
357 | * Return if this node is "done" (full and written to disk). | |
358 | * | |
359 | * @return If this node is done or not | |
360 | */ | |
045badfe | 361 | public boolean isOnDisk() { |
0e9b2f07 | 362 | return fIsOnDisk; |
a52fde77 AM |
363 | } |
364 | ||
365 | /** | |
366 | * Add an interval to this node | |
6f4e8ec0 | 367 | * |
a52fde77 | 368 | * @param newInterval |
8d47cc34 | 369 | * Interval to add to this node |
a52fde77 | 370 | */ |
8d47cc34 | 371 | public void addInterval(HTInterval newInterval) { |
0e9b2f07 | 372 | fRwl.writeLock().lock(); |
62197b87 AM |
373 | try { |
374 | /* Just in case, should be checked before even calling this function */ | |
59d30d83 | 375 | assert (newInterval.getSizeOnDisk() <= getNodeFreeSpace()); |
a52fde77 | 376 | |
2a966f6f | 377 | /* Find the insert position to keep the list sorted */ |
0e9b2f07 GB |
378 | int index = fIntervals.size(); |
379 | while (index > 0 && newInterval.compareTo(fIntervals.get(index - 1)) < 0) { | |
2a966f6f PT |
380 | index--; |
381 | } | |
382 | ||
0e9b2f07 | 383 | fIntervals.add(index, newInterval); |
59d30d83 | 384 | fSizeOfIntervalSection += newInterval.getSizeOnDisk(); |
a52fde77 | 385 | |
62197b87 | 386 | } finally { |
0e9b2f07 | 387 | fRwl.writeLock().unlock(); |
62197b87 | 388 | } |
a52fde77 AM |
389 | } |
390 | ||
391 | /** | |
392 | * We've received word from the containerTree that newest nodes now exist to | |
393 | * our right. (Puts isDone = true and sets the endtime) | |
6f4e8ec0 | 394 | * |
a52fde77 AM |
395 | * @param endtime |
396 | * The nodeEnd time that the node will have | |
a52fde77 | 397 | */ |
8d47cc34 | 398 | public void closeThisNode(long endtime) { |
0e9b2f07 | 399 | fRwl.writeLock().lock(); |
62197b87 | 400 | try { |
822798a3 GB |
401 | /** |
402 | * FIXME: was assert (endtime >= fNodeStart); but that exception | |
403 | * is reached with an empty node that has start time endtime + 1 | |
404 | */ | |
405 | // if (endtime < fNodeStart) { | |
406 | // throw new IllegalArgumentException("Endtime " + endtime + " cannot be lower than start time " + fNodeStart); | |
407 | // } | |
62197b87 | 408 | |
0e9b2f07 | 409 | if (!fIntervals.isEmpty()) { |
62197b87 AM |
410 | /* |
411 | * Make sure there are no intervals in this node with their | |
412 | * EndTime > the one requested. Only need to check the last one | |
2a966f6f | 413 | * since they are sorted |
62197b87 | 414 | */ |
822798a3 GB |
415 | if (endtime < Iterables.getLast(fIntervals).getEndTime()) { |
416 | throw new IllegalArgumentException("Closing end time should be greater than or equal to the end time of the intervals of this node"); //$NON-NLS-1$ | |
417 | } | |
62197b87 | 418 | } |
a52fde77 | 419 | |
0e9b2f07 | 420 | fNodeEnd = endtime; |
62197b87 | 421 | } finally { |
0e9b2f07 | 422 | fRwl.writeLock().unlock(); |
a52fde77 | 423 | } |
a52fde77 AM |
424 | } |
425 | ||
426 | /** | |
427 | * The method to fill up the stateInfo (passed on from the Current State | |
428 | * Tree when it does a query on the SHT). We'll replace the data in that | |
429 | * vector with whatever relevant we can find from this node | |
6f4e8ec0 | 430 | * |
a52fde77 AM |
431 | * @param stateInfo |
432 | * The same stateInfo that comes from SHT's doQuery() | |
433 | * @param t | |
434 | * The timestamp for which the query is for. Only return | |
435 | * intervals that intersect t. | |
436 | * @throws TimeRangeException | |
8d47cc34 | 437 | * If 't' is invalid |
a52fde77 | 438 | */ |
8d47cc34 | 439 | public void writeInfoFromNode(List<ITmfStateInterval> stateInfo, long t) |
a52fde77 | 440 | throws TimeRangeException { |
62197b87 | 441 | /* This is from a state system query, we are "reading" this node */ |
0e9b2f07 | 442 | fRwl.readLock().lock(); |
62197b87 | 443 | try { |
0e9b2f07 | 444 | for (int i = getStartIndexFor(t); i < fIntervals.size(); i++) { |
62197b87 AM |
445 | /* |
446 | * Now we only have to compare the Start times, since we now the | |
1d8028cd AM |
447 | * End times necessarily fit. |
448 | * | |
449 | * Second condition is to ignore new attributes that might have | |
450 | * been created after stateInfo was instantiated (they would be | |
451 | * null anyway). | |
62197b87 | 452 | */ |
0e9b2f07 | 453 | ITmfStateInterval interval = fIntervals.get(i); |
d0ed4962 | 454 | if (t >= interval.getStartTime() && |
1d8028cd AM |
455 | interval.getAttribute() < stateInfo.size()) { |
456 | stateInfo.set(interval.getAttribute(), interval); | |
62197b87 | 457 | } |
a52fde77 | 458 | } |
62197b87 | 459 | } finally { |
0e9b2f07 | 460 | fRwl.readLock().unlock(); |
a52fde77 | 461 | } |
a52fde77 AM |
462 | } |
463 | ||
464 | /** | |
465 | * Get a single Interval from the information in this node If the | |
466 | * key/timestamp pair cannot be found, we return null. | |
6f4e8ec0 | 467 | * |
a52fde77 | 468 | * @param key |
8d47cc34 | 469 | * The attribute quark to look for |
a52fde77 | 470 | * @param t |
8d47cc34 | 471 | * The timestamp |
a52fde77 AM |
472 | * @return The Interval containing the information we want, or null if it |
473 | * wasn't found | |
bb7f92ce FW |
474 | * @throws TimeRangeException |
475 | * If 't' is invalid | |
a52fde77 | 476 | */ |
8d47cc34 | 477 | public HTInterval getRelevantInterval(int key, long t) throws TimeRangeException { |
0e9b2f07 | 478 | fRwl.readLock().lock(); |
62197b87 | 479 | try { |
0e9b2f07 GB |
480 | for (int i = getStartIndexFor(t); i < fIntervals.size(); i++) { |
481 | HTInterval curInterval = fIntervals.get(i); | |
62197b87 AM |
482 | if (curInterval.getAttribute() == key |
483 | && curInterval.getStartTime() <= t | |
484 | && curInterval.getEndTime() >= t) { | |
485 | return curInterval; | |
486 | } | |
a52fde77 | 487 | } |
6642afb4 | 488 | |
62197b87 AM |
489 | /* We didn't find the relevant information in this node */ |
490 | return null; | |
491 | ||
492 | } finally { | |
0e9b2f07 | 493 | fRwl.readLock().unlock(); |
a52fde77 | 494 | } |
a52fde77 AM |
495 | } |
496 | ||
497 | private int getStartIndexFor(long t) throws TimeRangeException { | |
62197b87 | 498 | /* Should only be called by methods with the readLock taken */ |
6642afb4 | 499 | |
0e9b2f07 | 500 | if (fIntervals.isEmpty()) { |
6642afb4 FW |
501 | return 0; |
502 | } | |
a52fde77 AM |
503 | /* |
504 | * Since the intervals are sorted by end time, we can skip all the ones | |
505 | * at the beginning whose end times are smaller than 't'. Java does | |
506 | * provides a .binarySearch method, but its API is quite weird... | |
507 | */ | |
62197b87 | 508 | HTInterval dummy = new HTInterval(0, t, 0, TmfStateValue.nullValue()); |
0e9b2f07 | 509 | int index = Collections.binarySearch(fIntervals, dummy); |
a52fde77 AM |
510 | |
511 | if (index < 0) { | |
512 | /* | |
513 | * .binarySearch returns a negative number if the exact value was | |
514 | * not found. Here we just want to know where to start searching, we | |
515 | * don't care if the value is exact or not. | |
516 | */ | |
517 | index = -index - 1; | |
518 | ||
3df04466 PT |
519 | } else { |
520 | /* | |
521 | * Another API quirkiness, the returned index is the one of the *last* | |
522 | * element of a series of equal endtimes, which happens sometimes. We | |
523 | * want the *first* element of such a series, to read through them | |
524 | * again. | |
525 | */ | |
526 | while (index > 0 | |
527 | && fIntervals.get(index - 1).compareTo(fIntervals.get(index)) == 0) { | |
528 | index--; | |
529 | } | |
a52fde77 | 530 | } |
a52fde77 AM |
531 | |
532 | return index; | |
533 | } | |
534 | ||
62197b87 AM |
535 | /** |
536 | * Return the total header size of this node (will depend on the node type). | |
537 | * | |
538 | * @return The total header size | |
539 | */ | |
540 | public final int getTotalHeaderSize() { | |
541 | return COMMON_HEADER_SIZE + getSpecificHeaderSize(); | |
542 | } | |
543 | ||
a52fde77 AM |
544 | /** |
545 | * @return The offset, within the node, where the Data section ends | |
546 | */ | |
547 | private int getDataSectionEndOffset() { | |
0e9b2f07 | 548 | return getTotalHeaderSize() + fSizeOfIntervalSection; |
a52fde77 AM |
549 | } |
550 | ||
551 | /** | |
552 | * Returns the free space in the node, which is simply put, the | |
553 | * stringSectionOffset - dataSectionOffset | |
8d47cc34 AM |
554 | * |
555 | * @return The amount of free space in the node (in bytes) | |
a52fde77 | 556 | */ |
8d47cc34 | 557 | public int getNodeFreeSpace() { |
0e9b2f07 | 558 | fRwl.readLock().lock(); |
59d30d83 | 559 | int ret = fConfig.getBlockSize() - getDataSectionEndOffset(); |
0e9b2f07 | 560 | fRwl.readLock().unlock(); |
62197b87 AM |
561 | |
562 | return ret; | |
a52fde77 AM |
563 | } |
564 | ||
565 | /** | |
8d47cc34 | 566 | * Returns the current space utilization of this node, as a percentage. |
a52fde77 | 567 | * (used space / total usable space, which excludes the header) |
8d47cc34 AM |
568 | * |
569 | * @return The percentage (value between 0 and 100) of space utilization in | |
570 | * in this node. | |
a52fde77 | 571 | */ |
8d47cc34 | 572 | public long getNodeUsagePercent() { |
0e9b2f07 | 573 | fRwl.readLock().lock(); |
62197b87 | 574 | try { |
0e9b2f07 GB |
575 | final int blockSize = fConfig.getBlockSize(); |
576 | float freePercent = (float) getNodeFreeSpace() | |
577 | / (float) (blockSize - getTotalHeaderSize()) | |
62197b87 AM |
578 | * 100F; |
579 | return (long) (100L - freePercent); | |
580 | ||
581 | } finally { | |
0e9b2f07 | 582 | fRwl.readLock().unlock(); |
62197b87 | 583 | } |
a52fde77 AM |
584 | } |
585 | ||
a52fde77 AM |
586 | /** |
587 | * @name Debugging functions | |
588 | */ | |
589 | ||
590 | @SuppressWarnings("nls") | |
591 | @Override | |
592 | public String toString() { | |
593 | /* Only used for debugging, shouldn't be externalized */ | |
b2ca67ca PT |
594 | return String.format("Node #%d, %s, %s, %d intervals (%d%% used), [%d - %s]", |
595 | fSequenceNumber, | |
596 | (fParentSequenceNumber == -1) ? "Root" : "Parent #" + fParentSequenceNumber, | |
597 | toStringSpecific(), | |
598 | fIntervals.size(), | |
599 | getNodeUsagePercent(), | |
600 | fNodeStart, | |
601 | (fIsOnDisk || fNodeEnd != 0) ? fNodeEnd : "..."); | |
a52fde77 AM |
602 | } |
603 | ||
604 | /** | |
605 | * Debugging function that prints out the contents of this node | |
6f4e8ec0 | 606 | * |
a52fde77 AM |
607 | * @param writer |
608 | * PrintWriter in which we will print the debug output | |
609 | */ | |
610 | @SuppressWarnings("nls") | |
8d47cc34 | 611 | public void debugPrintIntervals(PrintWriter writer) { |
a52fde77 | 612 | /* Only used for debugging, shouldn't be externalized */ |
dbf883bb | 613 | writer.println("Intervals for node #" + fSequenceNumber + ":"); |
a52fde77 AM |
614 | |
615 | /* Array of children */ | |
f4baf640 GB |
616 | if (getNodeType() != NodeType.LEAF) { /* Only Core Nodes can have children */ |
617 | ParentNode thisNode = (ParentNode) this; | |
a52fde77 AM |
618 | writer.print(" " + thisNode.getNbChildren() + " children"); |
619 | if (thisNode.getNbChildren() >= 1) { | |
620 | writer.print(": [ " + thisNode.getChild(0)); | |
621 | for (int i = 1; i < thisNode.getNbChildren(); i++) { | |
622 | writer.print(", " + thisNode.getChild(i)); | |
623 | } | |
624 | writer.print(']'); | |
625 | } | |
626 | writer.print('\n'); | |
627 | } | |
628 | ||
629 | /* List of intervals in the node */ | |
630 | writer.println(" Intervals contained:"); | |
0e9b2f07 GB |
631 | for (int i = 0; i < fIntervals.size(); i++) { |
632 | writer.println(fIntervals.get(i).toString()); | |
a52fde77 AM |
633 | } |
634 | writer.println('\n'); | |
635 | } | |
636 | ||
6f4e8ec0 AM |
637 | // ------------------------------------------------------------------------ |
638 | // Abstract methods | |
639 | // ------------------------------------------------------------------------ | |
a52fde77 | 640 | |
8d47cc34 AM |
641 | /** |
642 | * Get the byte value representing the node type. | |
643 | * | |
644 | * @return The node type | |
645 | */ | |
bb7f92ce | 646 | public abstract NodeType getNodeType(); |
a52fde77 | 647 | |
8d47cc34 | 648 | /** |
62197b87 AM |
649 | * Return the specific header size of this node. This means the size |
650 | * occupied by the type-specific section of the header (not counting the | |
651 | * common part). | |
8d47cc34 | 652 | * |
62197b87 | 653 | * @return The specific header size |
8d47cc34 | 654 | */ |
62197b87 | 655 | protected abstract int getSpecificHeaderSize(); |
a52fde77 | 656 | |
8d47cc34 AM |
657 | /** |
658 | * Read the type-specific part of the node header from a byte buffer. | |
659 | * | |
660 | * @param buffer | |
661 | * The byte buffer to read from. It should be already positioned | |
662 | * correctly. | |
663 | */ | |
62197b87 | 664 | protected abstract void readSpecificHeader(ByteBuffer buffer); |
a52fde77 | 665 | |
8d47cc34 AM |
666 | /** |
667 | * Write the type-specific part of the header in a byte buffer. | |
668 | * | |
669 | * @param buffer | |
670 | * The buffer to write to. It should already be at the correct | |
671 | * position. | |
672 | */ | |
62197b87 | 673 | protected abstract void writeSpecificHeader(ByteBuffer buffer); |
a52fde77 | 674 | |
8d47cc34 AM |
675 | /** |
676 | * Node-type-specific toString method. Used for debugging. | |
677 | * | |
678 | * @return A string representing the node | |
679 | */ | |
62197b87 | 680 | protected abstract String toStringSpecific(); |
a52fde77 | 681 | } |