package org.eclipse.linuxtools.internal.tmf.core.statesystem.backends.historytree;
import java.nio.ByteBuffer;
+import java.util.concurrent.locks.ReentrantReadWriteLock;
/**
* A Core node is a first-level node of a History Tree which is not a leaf node.
private long[] childStart;
/** Seq number of this node's extension. -1 if none */
- private int extension = -1;
+ private volatile int extension = -1;
+
+ /**
+ * Lock used to gate the accesses to the children arrays. Meant to be a
+ * different lock from the one in {@link HTNode}.
+ */
+ private final ReentrantReadWriteLock rwl = new ReentrantReadWriteLock(false);
/**
* Initial constructor. Use this to initialize a new EMPTY node.
}
@Override
- public void readSpecificHeader(ByteBuffer buffer) {
+ protected void readSpecificHeader(ByteBuffer buffer) {
int size = getConfig().getMaxChildren();
extension = buffer.getInt();
}
@Override
- public void writeSpecificHeader(ByteBuffer buffer) {
+ protected void writeSpecificHeader(ByteBuffer buffer) {
int size = getConfig().getMaxChildren();
buffer.putInt(extension);
* @return The number of child nodes
*/
public int getNbChildren() {
- return nbChildren;
+ rwl.readLock().lock();
+ int ret = nbChildren;
+ rwl.readLock().unlock();
+ return ret;
}
/**
* @return The child node
*/
public int getChild(int index) {
- return children[index];
+ rwl.readLock().lock();
+ try {
+ return children[index];
+ } finally {
+ rwl.readLock().unlock();
+ }
}
/**
* @return The latest child node
*/
public int getLatestChild() {
- return children[nbChildren - 1];
+ rwl.readLock().lock();
+ try {
+ return children[nbChildren - 1];
+ } finally {
+ rwl.readLock().unlock();
+ }
}
/**
* @return The start time of the that child node.
*/
public long getChildStart(int index) {
- return childStart[index];
+ rwl.readLock().lock();
+ try {
+ return childStart[index];
+ } finally {
+ rwl.readLock().unlock();
+ }
}
/**
* @return The start time of the latest child
*/
public long getLatestChildStart() {
- return childStart[nbChildren - 1];
+ rwl.readLock().lock();
+ try {
+ return childStart[nbChildren - 1];
+ } finally {
+ rwl.readLock().unlock();
+ }
}
/**
* The SHTNode object of the new child
*/
public void linkNewChild(CoreNode childNode) {
- assert (this.nbChildren < getConfig().getMaxChildren());
+ rwl.writeLock().lock();
+ try {
+ assert (nbChildren < getConfig().getMaxChildren());
- this.children[nbChildren] = childNode.getSequenceNumber();
- this.childStart[nbChildren] = childNode.getNodeStart();
- this.nbChildren++;
+ children[nbChildren] = childNode.getSequenceNumber();
+ childStart[nbChildren] = childNode.getNodeStart();
+ nbChildren++;
+
+ } finally {
+ rwl.writeLock().unlock();
+ }
}
@Override
}
@Override
- public int getTotalHeaderSize() {
+ protected int getSpecificHeaderSize() {
int maxChildren = getConfig().getMaxChildren();
int specificSize =
SIZE_INT /* 1x int (extension node) */
/* MAX_NB * Timevalue ('childStart' table) */
+ SIZE_LONG * maxChildren;
- return COMMON_HEADER_SIZE + specificSize;
+ return specificSize;
}
@Override
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
+import java.util.concurrent.locks.ReentrantReadWriteLock;
import org.eclipse.linuxtools.tmf.core.exceptions.TimeRangeException;
import org.eclipse.linuxtools.tmf.core.interval.ITmfStateInterval;
/**
* Size of an entry in the data section.
*
+ * <pre>
* 16 2 x Timevalue/long (interval start + end)
* + 4 int (key)
* + 1 byte (type)
* + 4 int (valueOffset)
+ * </pre>
*/
protected static final int DATA_ENTRY_SIZE = 25;
/* Vector containing all the intervals contained in this node */
private final List<HTInterval> intervals;
+ /* Lock used to protect the accesses to intervals, nodeEnd and such */
+ private final ReentrantReadWriteLock rwl = new ReentrantReadWriteLock(false);
+
/**
* Constructor
*
* If there was an error writing
*/
public final void writeSelf(FileChannel fc) throws IOException {
- final int blockSize = config.getBlockSize();
- int curStringsEntryEndPos = blockSize;
-
- ByteBuffer buffer = ByteBuffer.allocate(blockSize);
- buffer.order(ByteOrder.LITTLE_ENDIAN);
- buffer.clear();
-
- /* Write the common header part */
- buffer.put(this.getNodeType());
- buffer.putLong(nodeStart);
- buffer.putLong(nodeEnd);
- buffer.putInt(sequenceNumber);
- buffer.putInt(parentSequenceNumber);
- buffer.putInt(intervals.size());
- buffer.putInt(stringSectionOffset);
- buffer.put((byte) 1); // TODO Used to be "isDone", to be removed from header
-
- /* Now call the inner method to write the specific header part */
- this.writeSpecificHeader(buffer);
-
- /* Back to us, we write the intervals */
- for (HTInterval interval : intervals) {
- int size = interval.writeInterval(buffer, curStringsEntryEndPos);
- curStringsEntryEndPos -= size;
- }
-
/*
- * Write padding between the end of the Data section and the start of
- * the Strings section (needed to fill the node in case there is no
- * Strings section)
+ * Yes, we are taking the *read* lock here, because we are reading the
+ * information in the node to write it to disk.
*/
- while (buffer.position() < stringSectionOffset) {
- buffer.put((byte) 0);
- }
+ rwl.readLock().lock();
+ try {
+ final int blockSize = config.getBlockSize();
+ int curStringsEntryEndPos = blockSize;
+
+ ByteBuffer buffer = ByteBuffer.allocate(blockSize);
+ buffer.order(ByteOrder.LITTLE_ENDIAN);
+ buffer.clear();
+
+ /* Write the common header part */
+ buffer.put(this.getNodeType());
+ buffer.putLong(nodeStart);
+ buffer.putLong(nodeEnd);
+ buffer.putInt(sequenceNumber);
+ buffer.putInt(parentSequenceNumber);
+ buffer.putInt(intervals.size());
+ buffer.putInt(stringSectionOffset);
+ buffer.put((byte) 1); // TODO Used to be "isDone", to be removed from header
+
+ /* Now call the inner method to write the specific header part */
+ this.writeSpecificHeader(buffer);
+
+ /* Back to us, we write the intervals */
+ for (HTInterval interval : intervals) {
+ int size = interval.writeInterval(buffer, curStringsEntryEndPos);
+ curStringsEntryEndPos -= size;
+ }
- /*
- * If the offsets were right, the size of the Strings section should be
- * == to the expected size
- */
- assert (curStringsEntryEndPos == stringSectionOffset);
+ /*
+ * Write padding between the end of the Data section and the start
+ * of the Strings section (needed to fill the node in case there is
+ * no Strings section)
+ */
+ while (buffer.position() < stringSectionOffset) {
+ buffer.put((byte) 0);
+ }
- /* Finally, write everything in the Buffer to disk */
+ /*
+ * If the offsets were right, the size of the Strings section should
+ * be == to the expected size
+ */
+ assert (curStringsEntryEndPos == stringSectionOffset);
- // if we don't do this, flip() will lose what's after.
- buffer.position(blockSize);
+ /* Finally, write everything in the Buffer to disk */
- buffer.flip();
- int res = fc.write(buffer);
- assert (res == blockSize);
+ // if we don't do this, flip() will lose what's after.
+ buffer.position(blockSize);
+
+ buffer.flip();
+ int res = fc.write(buffer);
+ assert (res == blockSize);
+
+ } finally {
+ rwl.readLock().unlock();
+ }
isOnDisk = true;
}
* Interval to add to this node
*/
public void addInterval(HTInterval newInterval) {
- /* Just in case, but should be checked before even calling this function */
- assert (newInterval.getIntervalSize() <= this.getNodeFreeSpace());
+ rwl.writeLock().lock();
+ try {
+ /* Just in case, should be checked before even calling this function */
+ assert (newInterval.getIntervalSize() <= this.getNodeFreeSpace());
- intervals.add(newInterval);
+ intervals.add(newInterval);
- /* Update the in-node offset "pointer" */
- stringSectionOffset -= (newInterval.getStringsEntrySize());
+ /* Update the in-node offset "pointer" */
+ stringSectionOffset -= (newInterval.getStringsEntrySize());
+ } finally {
+ rwl.writeLock().unlock();
+ }
}
/**
* The nodeEnd time that the node will have
*/
public void closeThisNode(long endtime) {
- assert (endtime >= this.nodeStart);
-
- if (intervals.size() > 0) {
- /*
- * Sort the intervals by ascending order of their end time. This
- * speeds up lookups a bit
- */
- Collections.sort(intervals);
+ rwl.writeLock().lock();
+ try {
+ assert (endtime >= this.nodeStart);
+
+ if (intervals.size() > 0) {
+ /*
+ * Sort the intervals by ascending order of their end time. This
+ * speeds up lookups a bit
+ */
+ Collections.sort(intervals);
+
+ /*
+ * Make sure there are no intervals in this node with their
+ * EndTime > the one requested. Only need to check the last one
+ * since they are now sorted
+ */
+ assert (endtime >= intervals.get(intervals.size() - 1).getEndTime());
+ }
- /*
- * Make sure there are no intervals in this node with their EndTime
- * > the one requested. Only need to check the last one since they
- * are now sorted
- */
- assert (endtime >= intervals.get(intervals.size() - 1).getEndTime());
+ this.nodeEnd = endtime;
+ } finally {
+ rwl.writeLock().unlock();
}
-
- this.nodeEnd = endtime;
- return;
}
/**
*/
public void writeInfoFromNode(List<ITmfStateInterval> stateInfo, long t)
throws TimeRangeException {
- int startIndex;
-
- if (intervals.size() == 0) {
- return;
- }
- startIndex = getStartIndexFor(t);
-
- for (int i = startIndex; i < intervals.size(); i++) {
- /*
- * Now we only have to compare the Start times, since we now the End
- * times necessarily fit
- */
- if (intervals.get(i).getStartTime() <= t) {
- stateInfo.set(intervals.get(i).getAttribute(), intervals.get(i));
+ /* This is from a state system query, we are "reading" this node */
+ rwl.readLock().lock();
+ try {
+ if (intervals.size() == 0) {
+ return;
+ }
+ int startIndex = getStartIndexFor(t);
+
+ for (int i = startIndex; i < intervals.size(); i++) {
+ /*
+ * Now we only have to compare the Start times, since we now the
+ * End times necessarily fit
+ */
+ if (intervals.get(i).getStartTime() <= t) {
+ stateInfo.set(intervals.get(i).getAttribute(), intervals.get(i));
+ }
}
+ } finally {
+ rwl.readLock().unlock();
}
- return;
}
/**
* @throws TimeRangeException If 't' is invalid
*/
public HTInterval getRelevantInterval(int key, long t) throws TimeRangeException {
- int startIndex;
- HTInterval curInterval;
-
- if (intervals.size() == 0) {
- return null;
- }
+ rwl.readLock().lock();
+ try {
+ if (intervals.size() == 0) {
+ return null;
+ }
- startIndex = getStartIndexFor(t);
+ int startIndex = getStartIndexFor(t);
- for (int i = startIndex; i < intervals.size(); i++) {
- curInterval = intervals.get(i);
- if (curInterval.getAttribute() == key
- && curInterval.getStartTime() <= t
- && curInterval.getEndTime() >= t) {
- return curInterval;
+ for (int i = startIndex; i < intervals.size(); i++) {
+ HTInterval curInterval = intervals.get(i);
+ if (curInterval.getAttribute() == key
+ && curInterval.getStartTime() <= t
+ && curInterval.getEndTime() >= t) {
+ return curInterval;
+ }
}
+ /* We didn't find the relevant information in this node */
+ return null;
+
+ } finally {
+ rwl.readLock().unlock();
}
- /* We didn't find the relevant information in this node */
- return null;
}
private int getStartIndexFor(long t) throws TimeRangeException {
- HTInterval dummy;
- int index;
-
+ /* Should only be called by methods with the readLock taken */
/*
* Since the intervals are sorted by end time, we can skip all the ones
* at the beginning whose end times are smaller than 't'. Java does
* provides a .binarySearch method, but its API is quite weird...
*/
- dummy = new HTInterval(0, t, 0, TmfStateValue.nullValue());
- index = Collections.binarySearch(intervals, dummy);
+ HTInterval dummy = new HTInterval(0, t, 0, TmfStateValue.nullValue());
+ int index = Collections.binarySearch(intervals, dummy);
if (index < 0) {
/*
return index;
}
+
+ /**
+ * <pre>
+ * 1 - byte (type)
+ * 16 - 2x long (start time, end time)
+ * 16 - 4x int (seq number, parent seq number, intervalcount,
+ * strings section pos.)
+ * 1 - byte (done or not)
+ * </pre>
+ */
+ private static final int COMMON_HEADER_SIZE = 34;
+
+ /**
+ * Return the total header size of this node (will depend on the node type).
+ *
+ * @return The total header size
+ */
+ public final int getTotalHeaderSize() {
+ return COMMON_HEADER_SIZE + getSpecificHeaderSize();
+ }
+
/**
* @return The offset, within the node, where the Data section ends
*/
* @return The amount of free space in the node (in bytes)
*/
public int getNodeFreeSpace() {
- return stringSectionOffset - this.getDataSectionEndOffset();
+ rwl.readLock().lock();
+ int ret = stringSectionOffset - this.getDataSectionEndOffset();
+ rwl.readLock().unlock();
+
+ return ret;
}
/**
* in this node.
*/
public long getNodeUsagePercent() {
- final int blockSize = config.getBlockSize();
- float freePercent = (float) this.getNodeFreeSpace()
- / (float) (blockSize - this.getTotalHeaderSize())
- * 100F;
- return (long) (100L - freePercent);
+ rwl.readLock().lock();
+ try {
+ final int blockSize = config.getBlockSize();
+ float freePercent = (float) this.getNodeFreeSpace()
+ / (float) (blockSize - this.getTotalHeaderSize())
+ * 100F;
+ return (long) (100L - freePercent);
+
+ } finally {
+ rwl.readLock().unlock();
+ }
}
/**
writer.println('\n');
}
- /**
- * 1 - byte (type)
- *
- * 16 - 2x long (start time, end time)
- *
- * 16 - 4x int (seq number, parent seq number, intervalcount, strings
- * section pos.)
- *
- * 1 - byte (done or not)
- */
- protected static final int COMMON_HEADER_SIZE = 34;
-
// ------------------------------------------------------------------------
// Abstract methods
// ------------------------------------------------------------------------
public abstract byte getNodeType();
/**
- * Return the total header size of this node type.
+ * Return the specific header size of this node. This means the size
+ * occupied by the type-specific section of the header (not counting the
+ * common part).
*
- * @return The total header size
+ * @return The specific header size
*/
- public abstract int getTotalHeaderSize();
+ protected abstract int getSpecificHeaderSize();
/**
* Read the type-specific part of the node header from a byte buffer.
* The byte buffer to read from. It should be already positioned
* correctly.
*/
- public abstract void readSpecificHeader(ByteBuffer buffer);
+ protected abstract void readSpecificHeader(ByteBuffer buffer);
/**
* Write the type-specific part of the header in a byte buffer.
* The buffer to write to. It should already be at the correct
* position.
*/
- public abstract void writeSpecificHeader(ByteBuffer buffer);
+ protected abstract void writeSpecificHeader(ByteBuffer buffer);
/**
* Node-type-specific toString method. Used for debugging.
*
* @return A string representing the node
*/
- public abstract String toStringSpecific();
+ protected abstract String toStringSpecific();
}