* 1 - byte (done or not)
* </pre>
*/
- private static final int COMMON_HEADER_SIZE = 34;
+ private static final int COMMON_HEADER_SIZE = Byte.BYTES
+ + 2 * Long.BYTES
+ + 4 * Integer.BYTES
+ + Byte.BYTES;
// ------------------------------------------------------------------------
// Attributes
private final int fSequenceNumber;
private int fParentSequenceNumber; /* = -1 if this node is the root node */
- /* Where the Strings section begins (from the start of the node */
- private int fStringSectionOffset;
-
/* Sum of bytes of all intervals in the node */
private int fSizeOfIntervalSection;
fSequenceNumber = seqNumber;
fParentSequenceNumber = parentSeqNumber;
- fStringSectionOffset = config.getBlockSize();
fSizeOfIntervalSection = 0;
fIsOnDisk = false;
fIntervals = new ArrayList<>();
* @param fc
* FileChannel to the history file, ALREADY SEEKED at the start
* of the node.
+ * @param nodeFactory
+ * The factory to create the nodes for this tree
* @return The node object
* @throws IOException
* If there was an error reading from the file channel
*/
- public static final @NonNull HTNode readNode(HTConfig config, FileChannel fc)
+ public static final @NonNull HTNode readNode(HTConfig config, FileChannel fc, IHistoryTree.IHTNodeFactory nodeFactory)
throws IOException {
HTNode newNode = null;
int res, i;
int seqNb = buffer.getInt();
int parentSeqNb = buffer.getInt();
int intervalCount = buffer.getInt();
- int stringSectionOffset = buffer.getInt();
buffer.get(); // TODO Used to be "isDone", to be removed from the header
/* Now the rest of the header depends on the node type */
switch (type) {
case CORE:
/* Core nodes */
- newNode = new CoreNode(config, seqNb, parentSeqNb, start);
+ newNode = nodeFactory.createCoreNode(config, seqNb, parentSeqNb, start);
newNode.readSpecificHeader(buffer);
break;
case LEAF:
/* Leaf nodes */
- newNode = new LeafNode(config, seqNb, parentSeqNb, start);
+ newNode = nodeFactory.createLeafNode(config, seqNb, parentSeqNb, start);
newNode.readSpecificHeader(buffer);
break;
for (i = 0; i < intervalCount; i++) {
HTInterval interval = HTInterval.readFrom(buffer);
newNode.fIntervals.add(interval);
- newNode.fSizeOfIntervalSection += HTInterval.DATA_ENTRY_SIZE;
+ newNode.fSizeOfIntervalSection += interval.getSizeOnDisk();
}
/* Assign the node's other information we have read previously */
newNode.fNodeEnd = end;
- newNode.fStringSectionOffset = stringSectionOffset;
newNode.fIsOnDisk = true;
return newNode;
fRwl.readLock().lock();
try {
final int blockSize = fConfig.getBlockSize();
- int curStringsEntryEndPos = blockSize;
ByteBuffer buffer = ByteBuffer.allocate(blockSize);
buffer.order(ByteOrder.LITTLE_ENDIAN);
buffer.putInt(fSequenceNumber);
buffer.putInt(fParentSequenceNumber);
buffer.putInt(fIntervals.size());
- buffer.putInt(fStringSectionOffset);
buffer.put((byte) 1); // TODO Used to be "isDone", to be removed from header
/* Now call the inner method to write the specific header part */
writeSpecificHeader(buffer);
/* Back to us, we write the intervals */
- for (HTInterval interval : fIntervals) {
- int size = interval.writeInterval(buffer, curStringsEntryEndPos);
- curStringsEntryEndPos -= size;
- }
+ fIntervals.forEach(i -> i.writeInterval(buffer));
/*
- * Write padding between the end of the Data section and the start
- * of the Strings section (needed to fill the node in case there is
- * no Strings section)
+ * Fill the rest with zeros
*/
- while (buffer.position() < fStringSectionOffset) {
+ while (buffer.position() < blockSize) {
buffer.put((byte) 0);
}
- /*
- * If the offsets were right, the size of the Strings section should
- * be == to the expected size
- */
- if (curStringsEntryEndPos != fStringSectionOffset) {
- throw new IllegalStateException("Wrong size of Strings section: Actual: " + curStringsEntryEndPos + ", Expected: " + fStringSectionOffset); //$NON-NLS-1$ //$NON-NLS-2$
- }
-
/* Finally, write everything in the Buffer to disk */
-
- // if we don't do this, flip() will lose what's after.
- buffer.position(blockSize);
-
buffer.flip();
int res = fc.write(buffer);
if (res != blockSize) {
fRwl.writeLock().lock();
try {
/* Just in case, should be checked before even calling this function */
- assert (newInterval.getIntervalSize() <= getNodeFreeSpace());
+ assert (newInterval.getSizeOnDisk() <= getNodeFreeSpace());
/* Find the insert position to keep the list sorted */
int index = fIntervals.size();
}
fIntervals.add(index, newInterval);
- fSizeOfIntervalSection += HTInterval.DATA_ENTRY_SIZE;
+ fSizeOfIntervalSection += newInterval.getSizeOnDisk();
- /* Update the in-node offset "pointer" */
- fStringSectionOffset -= (newInterval.getStringsEntrySize());
} finally {
fRwl.writeLock().unlock();
}
* null anyway).
*/
ITmfStateInterval interval = fIntervals.get(i);
- if (interval.getStartTime() <= t &&
+ if (t >= interval.getStartTime() &&
interval.getAttribute() < stateInfo.size()) {
stateInfo.set(interval.getAttribute(), interval);
}
*/
public int getNodeFreeSpace() {
fRwl.readLock().lock();
- int ret = fStringSectionOffset - getDataSectionEndOffset();
+ int ret = fConfig.getBlockSize() - getDataSectionEndOffset();
fRwl.readLock().unlock();
return ret;
@SuppressWarnings("nls")
public void debugPrintIntervals(PrintWriter writer) {
/* Only used for debugging, shouldn't be externalized */
- writer.println("Node #" + fSequenceNumber + ":");
+ writer.println("Intervals for node #" + fSequenceNumber + ":");
/* Array of children */
- if (getNodeType() == NodeType.CORE) { /* Only Core Nodes can have children */
- CoreNode thisNode = (CoreNode) this;
+ if (getNodeType() != NodeType.LEAF) { /* Only Core Nodes can have children */
+ ParentNode thisNode = (ParentNode) this;
writer.print(" " + thisNode.getNbChildren() + " children");
if (thisNode.getNbChildren() >= 1) {
writer.print(": [ " + thisNode.getChild(0));