Commit | Line | Data |
---|---|---|
a52fde77 | 1 | /******************************************************************************* |
b2ca67ca | 2 | * Copyright (c) 2010, 2016 Ericsson, École Polytechnique de Montréal, and others |
6f4e8ec0 | 3 | * |
a52fde77 AM |
4 | * All rights reserved. This program and the accompanying materials are |
5 | * made available under the terms of the Eclipse Public License v1.0 which | |
6 | * accompanies this distribution, and is available at | |
7 | * http://www.eclipse.org/legal/epl-v10.html | |
6f4e8ec0 | 8 | * |
bb7f92ce FW |
9 | * Contributors: |
10 | * Alexandre Montplaisir - Initial API and implementation | |
11 | * Florian Wininger - Add Extension and Leaf Node | |
2a966f6f | 12 | * Patrick Tasse - Keep interval list sorted on insert |
a52fde77 AM |
13 | *******************************************************************************/ |
14 | ||
e894a508 | 15 | package org.eclipse.tracecompass.internal.statesystem.core.backend.historytree; |
a52fde77 AM |
16 | |
17 | import java.io.IOException; | |
18 | import java.io.PrintWriter; | |
19 | import java.nio.ByteBuffer; | |
20 | import java.nio.ByteOrder; | |
21 | import java.nio.channels.FileChannel; | |
22 | import java.util.ArrayList; | |
23 | import java.util.Collections; | |
24 | import java.util.List; | |
62197b87 | 25 | import java.util.concurrent.locks.ReentrantReadWriteLock; |
a52fde77 | 26 | |
aa353506 | 27 | import org.eclipse.jdt.annotation.NonNull; |
e894a508 AM |
28 | import org.eclipse.tracecompass.statesystem.core.exceptions.TimeRangeException; |
29 | import org.eclipse.tracecompass.statesystem.core.interval.ITmfStateInterval; | |
30 | import org.eclipse.tracecompass.statesystem.core.statevalue.TmfStateValue; | |
a52fde77 | 31 | |
822798a3 GB |
32 | import com.google.common.collect.Iterables; |
33 | ||
a52fde77 AM |
34 | /** |
35 | * The base class for all the types of nodes that go in the History Tree. | |
6f4e8ec0 | 36 | * |
ffd0aa67 | 37 | * @author Alexandre Montplaisir |
a52fde77 | 38 | */ |
8d47cc34 | 39 | public abstract class HTNode { |
a52fde77 | 40 | |
bb7f92ce FW |
41 | // ------------------------------------------------------------------------ |
42 | // Class fields | |
43 | // ------------------------------------------------------------------------ | |
44 | ||
45 | /** | |
46 | * The type of node | |
47 | */ | |
48 | public static enum NodeType { | |
49 | /** | |
50 | * Core node, which is a "front" node, at any level of the tree except | |
51 | * the bottom-most one. It has children, and may have extensions. | |
52 | */ | |
53 | CORE, | |
54 | /** | |
55 | * Leaf node, which is a node at the last bottom level of the tree. It | |
56 | * cannot have any children or extensions. | |
57 | */ | |
58 | LEAF; | |
59 | ||
60 | /** | |
61 | * Determine a node type by reading a serialized byte. | |
62 | * | |
63 | * @param rep | |
64 | * The byte representation of the node type | |
65 | * @return The corresponding NodeType | |
66 | * @throws IOException | |
67 | * If the NodeType is unrecognized | |
68 | */ | |
69 | public static NodeType fromByte(byte rep) throws IOException { | |
70 | switch (rep) { | |
71 | case 1: | |
72 | return CORE; | |
73 | case 2: | |
74 | return LEAF; | |
75 | default: | |
76 | throw new IOException(); | |
77 | } | |
78 | } | |
79 | ||
80 | /** | |
81 | * Get the byte representation of this node type. It can then be read | |
82 | * with {@link #fromByte}. | |
83 | * | |
84 | * @return The byte matching this node type | |
85 | */ | |
86 | public byte toByte() { | |
87 | switch (this) { | |
88 | case CORE: | |
89 | return 1; | |
90 | case LEAF: | |
91 | return 2; | |
92 | default: | |
93 | throw new IllegalStateException(); | |
94 | } | |
95 | } | |
96 | } | |
97 | ||
0e9b2f07 GB |
98 | /** |
99 | * <pre> | |
100 | * 1 - byte (type) | |
101 | * 16 - 2x long (start time, end time) | |
102 | * 16 - 4x int (seq number, parent seq number, intervalcount, | |
103 | * strings section pos.) | |
104 | * 1 - byte (done or not) | |
105 | * </pre> | |
106 | */ | |
107 | private static final int COMMON_HEADER_SIZE = 34; | |
108 | ||
bb7f92ce FW |
109 | // ------------------------------------------------------------------------ |
110 | // Attributes | |
111 | // ------------------------------------------------------------------------ | |
112 | ||
ffd0aa67 | 113 | /* Configuration of the History Tree to which belongs this node */ |
0e9b2f07 | 114 | private final HTConfig fConfig; |
a52fde77 AM |
115 | |
116 | /* Time range of this node */ | |
0e9b2f07 GB |
117 | private final long fNodeStart; |
118 | private long fNodeEnd; | |
a52fde77 AM |
119 | |
120 | /* Sequence number = position in the node section of the file */ | |
0e9b2f07 GB |
121 | private final int fSequenceNumber; |
122 | private int fParentSequenceNumber; /* = -1 if this node is the root node */ | |
a52fde77 AM |
123 | |
124 | /* Where the Strings section begins (from the start of the node */ | |
0e9b2f07 | 125 | private int fStringSectionOffset; |
a52fde77 | 126 | |
b0136ad6 | 127 | /* Sum of bytes of all intervals in the node */ |
0e9b2f07 | 128 | private int fSizeOfIntervalSection; |
b0136ad6 | 129 | |
045badfe | 130 | /* True if this node was read from disk (meaning its end time is now fixed) */ |
0e9b2f07 | 131 | private volatile boolean fIsOnDisk; |
a52fde77 AM |
132 | |
133 | /* Vector containing all the intervals contained in this node */ | |
0e9b2f07 | 134 | private final List<HTInterval> fIntervals; |
a52fde77 | 135 | |
62197b87 | 136 | /* Lock used to protect the accesses to intervals, nodeEnd and such */ |
0e9b2f07 | 137 | private final ReentrantReadWriteLock fRwl = new ReentrantReadWriteLock(false); |
62197b87 | 138 | |
8d47cc34 AM |
139 | /** |
140 | * Constructor | |
141 | * | |
142 | * @param config | |
143 | * Configuration of the History Tree | |
144 | * @param seqNumber | |
145 | * The (unique) sequence number assigned to this particular node | |
146 | * @param parentSeqNumber | |
147 | * The sequence number of this node's parent node | |
148 | * @param start | |
149 | * The earliest timestamp stored in this node | |
150 | */ | |
151 | protected HTNode(HTConfig config, int seqNumber, int parentSeqNumber, long start) { | |
0e9b2f07 GB |
152 | fConfig = config; |
153 | fNodeStart = start; | |
154 | fSequenceNumber = seqNumber; | |
155 | fParentSequenceNumber = parentSeqNumber; | |
156 | ||
157 | fStringSectionOffset = config.getBlockSize(); | |
158 | fSizeOfIntervalSection = 0; | |
159 | fIsOnDisk = false; | |
160 | fIntervals = new ArrayList<>(); | |
a52fde77 AM |
161 | } |
162 | ||
163 | /** | |
8d47cc34 AM |
164 | * Reader factory method. Build a Node object (of the right type) by reading |
165 | * a block in the file. | |
6f4e8ec0 | 166 | * |
ffd0aa67 EB |
167 | * @param config |
168 | * Configuration of the History Tree | |
a52fde77 AM |
169 | * @param fc |
170 | * FileChannel to the history file, ALREADY SEEKED at the start | |
171 | * of the node. | |
8d47cc34 | 172 | * @return The node object |
a52fde77 | 173 | * @throws IOException |
8d47cc34 | 174 | * If there was an error reading from the file channel |
a52fde77 | 175 | */ |
aa353506 | 176 | public static final @NonNull HTNode readNode(HTConfig config, FileChannel fc) |
a52fde77 AM |
177 | throws IOException { |
178 | HTNode newNode = null; | |
179 | int res, i; | |
180 | ||
ffd0aa67 | 181 | ByteBuffer buffer = ByteBuffer.allocate(config.getBlockSize()); |
a52fde77 AM |
182 | buffer.order(ByteOrder.LITTLE_ENDIAN); |
183 | buffer.clear(); | |
184 | res = fc.read(buffer); | |
ffd0aa67 | 185 | assert (res == config.getBlockSize()); |
a52fde77 AM |
186 | buffer.flip(); |
187 | ||
188 | /* Read the common header part */ | |
bb7f92ce FW |
189 | byte typeByte = buffer.get(); |
190 | NodeType type = NodeType.fromByte(typeByte); | |
a52fde77 AM |
191 | long start = buffer.getLong(); |
192 | long end = buffer.getLong(); | |
193 | int seqNb = buffer.getInt(); | |
194 | int parentSeqNb = buffer.getInt(); | |
195 | int intervalCount = buffer.getInt(); | |
196 | int stringSectionOffset = buffer.getInt(); | |
045badfe | 197 | buffer.get(); // TODO Used to be "isDone", to be removed from the header |
a52fde77 AM |
198 | |
199 | /* Now the rest of the header depends on the node type */ | |
200 | switch (type) { | |
bb7f92ce | 201 | case CORE: |
a52fde77 | 202 | /* Core nodes */ |
ffd0aa67 | 203 | newNode = new CoreNode(config, seqNb, parentSeqNb, start); |
a52fde77 AM |
204 | newNode.readSpecificHeader(buffer); |
205 | break; | |
206 | ||
bb7f92ce FW |
207 | case LEAF: |
208 | /* Leaf nodes */ | |
209 | newNode = new LeafNode(config, seqNb, parentSeqNb, start); | |
210 | newNode.readSpecificHeader(buffer); | |
211 | break; | |
a52fde77 AM |
212 | |
213 | default: | |
214 | /* Unrecognized node type */ | |
215 | throw new IOException(); | |
216 | } | |
217 | ||
218 | /* | |
219 | * At this point, we should be done reading the header and 'buffer' | |
220 | * should only have the intervals left | |
221 | */ | |
222 | for (i = 0; i < intervalCount; i++) { | |
0ce45cd4 | 223 | HTInterval interval = HTInterval.readFrom(buffer); |
0e9b2f07 | 224 | newNode.fIntervals.add(interval); |
f3476b68 | 225 | newNode.fSizeOfIntervalSection += HTInterval.DATA_ENTRY_SIZE; |
a52fde77 AM |
226 | } |
227 | ||
228 | /* Assign the node's other information we have read previously */ | |
0e9b2f07 GB |
229 | newNode.fNodeEnd = end; |
230 | newNode.fStringSectionOffset = stringSectionOffset; | |
231 | newNode.fIsOnDisk = true; | |
a52fde77 AM |
232 | |
233 | return newNode; | |
234 | } | |
235 | ||
8d47cc34 AM |
236 | /** |
237 | * Write this node to the given file channel. | |
238 | * | |
239 | * @param fc | |
240 | * The file channel to write to (should be sought to be correct | |
241 | * position) | |
242 | * @throws IOException | |
243 | * If there was an error writing | |
244 | */ | |
245 | public final void writeSelf(FileChannel fc) throws IOException { | |
a52fde77 | 246 | /* |
62197b87 AM |
247 | * Yes, we are taking the *read* lock here, because we are reading the |
248 | * information in the node to write it to disk. | |
a52fde77 | 249 | */ |
0e9b2f07 | 250 | fRwl.readLock().lock(); |
62197b87 | 251 | try { |
0e9b2f07 | 252 | final int blockSize = fConfig.getBlockSize(); |
62197b87 AM |
253 | int curStringsEntryEndPos = blockSize; |
254 | ||
255 | ByteBuffer buffer = ByteBuffer.allocate(blockSize); | |
256 | buffer.order(ByteOrder.LITTLE_ENDIAN); | |
257 | buffer.clear(); | |
258 | ||
259 | /* Write the common header part */ | |
0e9b2f07 GB |
260 | buffer.put(getNodeType().toByte()); |
261 | buffer.putLong(fNodeStart); | |
262 | buffer.putLong(fNodeEnd); | |
263 | buffer.putInt(fSequenceNumber); | |
264 | buffer.putInt(fParentSequenceNumber); | |
265 | buffer.putInt(fIntervals.size()); | |
266 | buffer.putInt(fStringSectionOffset); | |
62197b87 AM |
267 | buffer.put((byte) 1); // TODO Used to be "isDone", to be removed from header |
268 | ||
269 | /* Now call the inner method to write the specific header part */ | |
0e9b2f07 | 270 | writeSpecificHeader(buffer); |
62197b87 AM |
271 | |
272 | /* Back to us, we write the intervals */ | |
0e9b2f07 | 273 | for (HTInterval interval : fIntervals) { |
62197b87 AM |
274 | int size = interval.writeInterval(buffer, curStringsEntryEndPos); |
275 | curStringsEntryEndPos -= size; | |
276 | } | |
a52fde77 | 277 | |
62197b87 AM |
278 | /* |
279 | * Write padding between the end of the Data section and the start | |
280 | * of the Strings section (needed to fill the node in case there is | |
281 | * no Strings section) | |
282 | */ | |
0e9b2f07 | 283 | while (buffer.position() < fStringSectionOffset) { |
62197b87 AM |
284 | buffer.put((byte) 0); |
285 | } | |
a52fde77 | 286 | |
62197b87 AM |
287 | /* |
288 | * If the offsets were right, the size of the Strings section should | |
289 | * be == to the expected size | |
290 | */ | |
822798a3 GB |
291 | if (curStringsEntryEndPos != fStringSectionOffset) { |
292 | throw new IllegalStateException("Wrong size of Strings section: Actual: " + curStringsEntryEndPos + ", Expected: " + fStringSectionOffset); //$NON-NLS-1$ //$NON-NLS-2$ | |
293 | } | |
a52fde77 | 294 | |
62197b87 | 295 | /* Finally, write everything in the Buffer to disk */ |
a52fde77 | 296 | |
62197b87 AM |
297 | // if we don't do this, flip() will lose what's after. |
298 | buffer.position(blockSize); | |
299 | ||
300 | buffer.flip(); | |
301 | int res = fc.write(buffer); | |
822798a3 GB |
302 | if (res != blockSize) { |
303 | throw new IllegalStateException("Wrong size of block written: Actual: " + res + ", Expected: " + blockSize); //$NON-NLS-1$ //$NON-NLS-2$ | |
304 | } | |
62197b87 AM |
305 | |
306 | } finally { | |
0e9b2f07 | 307 | fRwl.readLock().unlock(); |
62197b87 | 308 | } |
0e9b2f07 | 309 | fIsOnDisk = true; |
cb42195c AM |
310 | } |
311 | ||
312 | // ------------------------------------------------------------------------ | |
313 | // Accessors | |
314 | // ------------------------------------------------------------------------ | |
315 | ||
8d47cc34 AM |
316 | /** |
317 | * Retrieve the history tree configuration used for this node. | |
318 | * | |
319 | * @return The history tree config | |
320 | */ | |
321 | protected HTConfig getConfig() { | |
0e9b2f07 | 322 | return fConfig; |
a52fde77 AM |
323 | } |
324 | ||
8d47cc34 AM |
325 | /** |
326 | * Get the start time of this node. | |
327 | * | |
328 | * @return The start time of this node | |
329 | */ | |
330 | public long getNodeStart() { | |
0e9b2f07 | 331 | return fNodeStart; |
a52fde77 AM |
332 | } |
333 | ||
8d47cc34 AM |
334 | /** |
335 | * Get the end time of this node. | |
336 | * | |
bb7f92ce | 337 | * @return The end time of this node |
8d47cc34 AM |
338 | */ |
339 | public long getNodeEnd() { | |
0e9b2f07 GB |
340 | if (fIsOnDisk) { |
341 | return fNodeEnd; | |
a52fde77 AM |
342 | } |
343 | return 0; | |
344 | } | |
345 | ||
8d47cc34 AM |
346 | /** |
347 | * Get the sequence number of this node. | |
348 | * | |
349 | * @return The sequence number of this node | |
350 | */ | |
351 | public int getSequenceNumber() { | |
0e9b2f07 | 352 | return fSequenceNumber; |
a52fde77 AM |
353 | } |
354 | ||
8d47cc34 AM |
355 | /** |
356 | * Get the sequence number of this node's parent. | |
357 | * | |
358 | * @return The parent sequence number | |
359 | */ | |
360 | public int getParentSequenceNumber() { | |
0e9b2f07 | 361 | return fParentSequenceNumber; |
a52fde77 AM |
362 | } |
363 | ||
364 | /** | |
365 | * Change this node's parent. Used when we create a new root node for | |
366 | * example. | |
8d47cc34 AM |
367 | * |
368 | * @param newParent | |
369 | * The sequence number of the node that is the new parent | |
a52fde77 | 370 | */ |
8d47cc34 | 371 | public void setParentSequenceNumber(int newParent) { |
0e9b2f07 | 372 | fParentSequenceNumber = newParent; |
a52fde77 AM |
373 | } |
374 | ||
8d47cc34 AM |
375 | /** |
376 | * Return if this node is "done" (full and written to disk). | |
377 | * | |
378 | * @return If this node is done or not | |
379 | */ | |
045badfe | 380 | public boolean isOnDisk() { |
0e9b2f07 | 381 | return fIsOnDisk; |
a52fde77 AM |
382 | } |
383 | ||
384 | /** | |
385 | * Add an interval to this node | |
6f4e8ec0 | 386 | * |
a52fde77 | 387 | * @param newInterval |
8d47cc34 | 388 | * Interval to add to this node |
a52fde77 | 389 | */ |
8d47cc34 | 390 | public void addInterval(HTInterval newInterval) { |
0e9b2f07 | 391 | fRwl.writeLock().lock(); |
62197b87 AM |
392 | try { |
393 | /* Just in case, should be checked before even calling this function */ | |
0e9b2f07 | 394 | assert (newInterval.getIntervalSize() <= getNodeFreeSpace()); |
a52fde77 | 395 | |
2a966f6f | 396 | /* Find the insert position to keep the list sorted */ |
0e9b2f07 GB |
397 | int index = fIntervals.size(); |
398 | while (index > 0 && newInterval.compareTo(fIntervals.get(index - 1)) < 0) { | |
2a966f6f PT |
399 | index--; |
400 | } | |
401 | ||
0e9b2f07 | 402 | fIntervals.add(index, newInterval); |
f3476b68 | 403 | fSizeOfIntervalSection += HTInterval.DATA_ENTRY_SIZE; |
a52fde77 | 404 | |
62197b87 | 405 | /* Update the in-node offset "pointer" */ |
0e9b2f07 | 406 | fStringSectionOffset -= (newInterval.getStringsEntrySize()); |
62197b87 | 407 | } finally { |
0e9b2f07 | 408 | fRwl.writeLock().unlock(); |
62197b87 | 409 | } |
a52fde77 AM |
410 | } |
411 | ||
412 | /** | |
413 | * We've received word from the containerTree that newest nodes now exist to | |
414 | * our right. (Puts isDone = true and sets the endtime) | |
6f4e8ec0 | 415 | * |
a52fde77 AM |
416 | * @param endtime |
417 | * The nodeEnd time that the node will have | |
a52fde77 | 418 | */ |
8d47cc34 | 419 | public void closeThisNode(long endtime) { |
0e9b2f07 | 420 | fRwl.writeLock().lock(); |
62197b87 | 421 | try { |
822798a3 GB |
422 | /** |
423 | * FIXME: was assert (endtime >= fNodeStart); but that exception | |
424 | * is reached with an empty node that has start time endtime + 1 | |
425 | */ | |
426 | // if (endtime < fNodeStart) { | |
427 | // throw new IllegalArgumentException("Endtime " + endtime + " cannot be lower than start time " + fNodeStart); | |
428 | // } | |
62197b87 | 429 | |
0e9b2f07 | 430 | if (!fIntervals.isEmpty()) { |
62197b87 AM |
431 | /* |
432 | * Make sure there are no intervals in this node with their | |
433 | * EndTime > the one requested. Only need to check the last one | |
2a966f6f | 434 | * since they are sorted |
62197b87 | 435 | */ |
822798a3 GB |
436 | if (endtime < Iterables.getLast(fIntervals).getEndTime()) { |
437 | throw new IllegalArgumentException("Closing end time should be greater than or equal to the end time of the intervals of this node"); //$NON-NLS-1$ | |
438 | } | |
62197b87 | 439 | } |
a52fde77 | 440 | |
0e9b2f07 | 441 | fNodeEnd = endtime; |
62197b87 | 442 | } finally { |
0e9b2f07 | 443 | fRwl.writeLock().unlock(); |
a52fde77 | 444 | } |
a52fde77 AM |
445 | } |
446 | ||
447 | /** | |
448 | * The method to fill up the stateInfo (passed on from the Current State | |
449 | * Tree when it does a query on the SHT). We'll replace the data in that | |
450 | * vector with whatever relevant we can find from this node | |
6f4e8ec0 | 451 | * |
a52fde77 AM |
452 | * @param stateInfo |
453 | * The same stateInfo that comes from SHT's doQuery() | |
454 | * @param t | |
455 | * The timestamp for which the query is for. Only return | |
456 | * intervals that intersect t. | |
457 | * @throws TimeRangeException | |
8d47cc34 | 458 | * If 't' is invalid |
a52fde77 | 459 | */ |
8d47cc34 | 460 | public void writeInfoFromNode(List<ITmfStateInterval> stateInfo, long t) |
a52fde77 | 461 | throws TimeRangeException { |
62197b87 | 462 | /* This is from a state system query, we are "reading" this node */ |
0e9b2f07 | 463 | fRwl.readLock().lock(); |
62197b87 | 464 | try { |
0e9b2f07 | 465 | for (int i = getStartIndexFor(t); i < fIntervals.size(); i++) { |
62197b87 AM |
466 | /* |
467 | * Now we only have to compare the Start times, since we now the | |
1d8028cd AM |
468 | * End times necessarily fit. |
469 | * | |
470 | * Second condition is to ignore new attributes that might have | |
471 | * been created after stateInfo was instantiated (they would be | |
472 | * null anyway). | |
62197b87 | 473 | */ |
0e9b2f07 | 474 | ITmfStateInterval interval = fIntervals.get(i); |
1d8028cd AM |
475 | if (interval.getStartTime() <= t && |
476 | interval.getAttribute() < stateInfo.size()) { | |
477 | stateInfo.set(interval.getAttribute(), interval); | |
62197b87 | 478 | } |
a52fde77 | 479 | } |
62197b87 | 480 | } finally { |
0e9b2f07 | 481 | fRwl.readLock().unlock(); |
a52fde77 | 482 | } |
a52fde77 AM |
483 | } |
484 | ||
485 | /** | |
486 | * Get a single Interval from the information in this node If the | |
487 | * key/timestamp pair cannot be found, we return null. | |
6f4e8ec0 | 488 | * |
a52fde77 | 489 | * @param key |
8d47cc34 | 490 | * The attribute quark to look for |
a52fde77 | 491 | * @param t |
8d47cc34 | 492 | * The timestamp |
a52fde77 AM |
493 | * @return The Interval containing the information we want, or null if it |
494 | * wasn't found | |
bb7f92ce FW |
495 | * @throws TimeRangeException |
496 | * If 't' is invalid | |
a52fde77 | 497 | */ |
8d47cc34 | 498 | public HTInterval getRelevantInterval(int key, long t) throws TimeRangeException { |
0e9b2f07 | 499 | fRwl.readLock().lock(); |
62197b87 | 500 | try { |
0e9b2f07 GB |
501 | for (int i = getStartIndexFor(t); i < fIntervals.size(); i++) { |
502 | HTInterval curInterval = fIntervals.get(i); | |
62197b87 AM |
503 | if (curInterval.getAttribute() == key |
504 | && curInterval.getStartTime() <= t | |
505 | && curInterval.getEndTime() >= t) { | |
506 | return curInterval; | |
507 | } | |
a52fde77 | 508 | } |
6642afb4 | 509 | |
62197b87 AM |
510 | /* We didn't find the relevant information in this node */ |
511 | return null; | |
512 | ||
513 | } finally { | |
0e9b2f07 | 514 | fRwl.readLock().unlock(); |
a52fde77 | 515 | } |
a52fde77 AM |
516 | } |
517 | ||
518 | private int getStartIndexFor(long t) throws TimeRangeException { | |
62197b87 | 519 | /* Should only be called by methods with the readLock taken */ |
6642afb4 | 520 | |
0e9b2f07 | 521 | if (fIntervals.isEmpty()) { |
6642afb4 FW |
522 | return 0; |
523 | } | |
a52fde77 AM |
524 | /* |
525 | * Since the intervals are sorted by end time, we can skip all the ones | |
526 | * at the beginning whose end times are smaller than 't'. Java does | |
527 | * provides a .binarySearch method, but its API is quite weird... | |
528 | */ | |
62197b87 | 529 | HTInterval dummy = new HTInterval(0, t, 0, TmfStateValue.nullValue()); |
0e9b2f07 | 530 | int index = Collections.binarySearch(fIntervals, dummy); |
a52fde77 AM |
531 | |
532 | if (index < 0) { | |
533 | /* | |
534 | * .binarySearch returns a negative number if the exact value was | |
535 | * not found. Here we just want to know where to start searching, we | |
536 | * don't care if the value is exact or not. | |
537 | */ | |
538 | index = -index - 1; | |
539 | ||
3df04466 PT |
540 | } else { |
541 | /* | |
542 | * Another API quirkiness, the returned index is the one of the *last* | |
543 | * element of a series of equal endtimes, which happens sometimes. We | |
544 | * want the *first* element of such a series, to read through them | |
545 | * again. | |
546 | */ | |
547 | while (index > 0 | |
548 | && fIntervals.get(index - 1).compareTo(fIntervals.get(index)) == 0) { | |
549 | index--; | |
550 | } | |
a52fde77 | 551 | } |
a52fde77 AM |
552 | |
553 | return index; | |
554 | } | |
555 | ||
62197b87 AM |
556 | /** |
557 | * Return the total header size of this node (will depend on the node type). | |
558 | * | |
559 | * @return The total header size | |
560 | */ | |
561 | public final int getTotalHeaderSize() { | |
562 | return COMMON_HEADER_SIZE + getSpecificHeaderSize(); | |
563 | } | |
564 | ||
a52fde77 AM |
565 | /** |
566 | * @return The offset, within the node, where the Data section ends | |
567 | */ | |
568 | private int getDataSectionEndOffset() { | |
0e9b2f07 | 569 | return getTotalHeaderSize() + fSizeOfIntervalSection; |
a52fde77 AM |
570 | } |
571 | ||
572 | /** | |
573 | * Returns the free space in the node, which is simply put, the | |
574 | * stringSectionOffset - dataSectionOffset | |
8d47cc34 AM |
575 | * |
576 | * @return The amount of free space in the node (in bytes) | |
a52fde77 | 577 | */ |
8d47cc34 | 578 | public int getNodeFreeSpace() { |
0e9b2f07 GB |
579 | fRwl.readLock().lock(); |
580 | int ret = fStringSectionOffset - getDataSectionEndOffset(); | |
581 | fRwl.readLock().unlock(); | |
62197b87 AM |
582 | |
583 | return ret; | |
a52fde77 AM |
584 | } |
585 | ||
586 | /** | |
8d47cc34 | 587 | * Returns the current space utilization of this node, as a percentage. |
a52fde77 | 588 | * (used space / total usable space, which excludes the header) |
8d47cc34 AM |
589 | * |
590 | * @return The percentage (value between 0 and 100) of space utilization in | |
591 | * in this node. | |
a52fde77 | 592 | */ |
8d47cc34 | 593 | public long getNodeUsagePercent() { |
0e9b2f07 | 594 | fRwl.readLock().lock(); |
62197b87 | 595 | try { |
0e9b2f07 GB |
596 | final int blockSize = fConfig.getBlockSize(); |
597 | float freePercent = (float) getNodeFreeSpace() | |
598 | / (float) (blockSize - getTotalHeaderSize()) | |
62197b87 AM |
599 | * 100F; |
600 | return (long) (100L - freePercent); | |
601 | ||
602 | } finally { | |
0e9b2f07 | 603 | fRwl.readLock().unlock(); |
62197b87 | 604 | } |
a52fde77 AM |
605 | } |
606 | ||
a52fde77 AM |
607 | /** |
608 | * @name Debugging functions | |
609 | */ | |
610 | ||
611 | @SuppressWarnings("nls") | |
612 | @Override | |
613 | public String toString() { | |
614 | /* Only used for debugging, shouldn't be externalized */ | |
b2ca67ca PT |
615 | return String.format("Node #%d, %s, %s, %d intervals (%d%% used), [%d - %s]", |
616 | fSequenceNumber, | |
617 | (fParentSequenceNumber == -1) ? "Root" : "Parent #" + fParentSequenceNumber, | |
618 | toStringSpecific(), | |
619 | fIntervals.size(), | |
620 | getNodeUsagePercent(), | |
621 | fNodeStart, | |
622 | (fIsOnDisk || fNodeEnd != 0) ? fNodeEnd : "..."); | |
a52fde77 AM |
623 | } |
624 | ||
625 | /** | |
626 | * Debugging function that prints out the contents of this node | |
6f4e8ec0 | 627 | * |
a52fde77 AM |
628 | * @param writer |
629 | * PrintWriter in which we will print the debug output | |
630 | */ | |
631 | @SuppressWarnings("nls") | |
8d47cc34 | 632 | public void debugPrintIntervals(PrintWriter writer) { |
a52fde77 | 633 | /* Only used for debugging, shouldn't be externalized */ |
0e9b2f07 | 634 | writer.println("Node #" + fSequenceNumber + ":"); |
a52fde77 AM |
635 | |
636 | /* Array of children */ | |
0e9b2f07 | 637 | if (getNodeType() == NodeType.CORE) { /* Only Core Nodes can have children */ |
a52fde77 AM |
638 | CoreNode thisNode = (CoreNode) this; |
639 | writer.print(" " + thisNode.getNbChildren() + " children"); | |
640 | if (thisNode.getNbChildren() >= 1) { | |
641 | writer.print(": [ " + thisNode.getChild(0)); | |
642 | for (int i = 1; i < thisNode.getNbChildren(); i++) { | |
643 | writer.print(", " + thisNode.getChild(i)); | |
644 | } | |
645 | writer.print(']'); | |
646 | } | |
647 | writer.print('\n'); | |
648 | } | |
649 | ||
650 | /* List of intervals in the node */ | |
651 | writer.println(" Intervals contained:"); | |
0e9b2f07 GB |
652 | for (int i = 0; i < fIntervals.size(); i++) { |
653 | writer.println(fIntervals.get(i).toString()); | |
a52fde77 AM |
654 | } |
655 | writer.println('\n'); | |
656 | } | |
657 | ||
6f4e8ec0 AM |
658 | // ------------------------------------------------------------------------ |
659 | // Abstract methods | |
660 | // ------------------------------------------------------------------------ | |
a52fde77 | 661 | |
8d47cc34 AM |
662 | /** |
663 | * Get the byte value representing the node type. | |
664 | * | |
665 | * @return The node type | |
666 | */ | |
bb7f92ce | 667 | public abstract NodeType getNodeType(); |
a52fde77 | 668 | |
8d47cc34 | 669 | /** |
62197b87 AM |
670 | * Return the specific header size of this node. This means the size |
671 | * occupied by the type-specific section of the header (not counting the | |
672 | * common part). | |
8d47cc34 | 673 | * |
62197b87 | 674 | * @return The specific header size |
8d47cc34 | 675 | */ |
62197b87 | 676 | protected abstract int getSpecificHeaderSize(); |
a52fde77 | 677 | |
8d47cc34 AM |
678 | /** |
679 | * Read the type-specific part of the node header from a byte buffer. | |
680 | * | |
681 | * @param buffer | |
682 | * The byte buffer to read from. It should be already positioned | |
683 | * correctly. | |
684 | */ | |
62197b87 | 685 | protected abstract void readSpecificHeader(ByteBuffer buffer); |
a52fde77 | 686 | |
8d47cc34 AM |
687 | /** |
688 | * Write the type-specific part of the header in a byte buffer. | |
689 | * | |
690 | * @param buffer | |
691 | * The buffer to write to. It should already be at the correct | |
692 | * position. | |
693 | */ | |
62197b87 | 694 | protected abstract void writeSpecificHeader(ByteBuffer buffer); |
a52fde77 | 695 | |
8d47cc34 AM |
696 | /** |
697 | * Node-type-specific toString method. Used for debugging. | |
698 | * | |
699 | * @return A string representing the node | |
700 | */ | |
62197b87 | 701 | protected abstract String toStringSpecific(); |
a52fde77 | 702 | } |