Merge Generic State System core part
[deliverable/tracecompass.git] / org.eclipse.linuxtools.tmf.core / src / org / eclipse / linuxtools / tmf / core / statesystem / backend / historytree / HTNode.java
CommitLineData
a52fde77
AM
1/*******************************************************************************
2 * Copyright (c) 2012 Ericsson
3 * Copyright (c) 2010, 2011 École Polytechnique de Montréal
4 * Copyright (c) 2010, 2011 Alexandre Montplaisir <alexandre.montplaisir@gmail.com>
5 *
6 * All rights reserved. This program and the accompanying materials are
7 * made available under the terms of the Eclipse Public License v1.0 which
8 * accompanies this distribution, and is available at
9 * http://www.eclipse.org/legal/epl-v10.html
10 *
11 *******************************************************************************/
12
13package org.eclipse.linuxtools.tmf.core.statesystem.backend.historytree;
14
15import java.io.IOException;
16import java.io.PrintWriter;
17import java.nio.ByteBuffer;
18import java.nio.ByteOrder;
19import java.nio.channels.FileChannel;
20import java.util.ArrayList;
21import java.util.Collections;
22import java.util.List;
23
24import org.eclipse.linuxtools.tmf.core.interval.ITmfStateInterval;
25import org.eclipse.linuxtools.tmf.core.statesystem.TimeRangeException;
26import org.eclipse.linuxtools.tmf.core.statevalue.TmfStateValue;
27
28/**
29 * The base class for all the types of nodes that go in the History Tree.
30 *
31 * @author alexmont
32 *
33 */
34abstract class HTNode {
35
36 /* Reference to the History Tree to whom this node belongs */
37 protected final HistoryTree ownerTree;
38
39 /* Time range of this node */
40 private final long nodeStart;
41 private long nodeEnd;
42
43 /* Sequence number = position in the node section of the file */
44 private final int sequenceNumber;
45 private int parentSequenceNumber; /* = -1 if this node is the root node */
46
47 /* Where the Strings section begins (from the start of the node */
48 private int stringSectionOffset;
49
50 /* True if this node is closed (and to be committed to disk) */
51 private boolean isDone;
52
53 /* Vector containing all the intervals contained in this node */
54 private final ArrayList<HTInterval> intervals;
55
56 HTNode(HistoryTree tree, int seqNumber, int parentSeqNumber, long start) {
57 this.ownerTree = tree;
58 this.nodeStart = start;
59 this.sequenceNumber = seqNumber;
60 this.parentSequenceNumber = parentSeqNumber;
61
62 this.stringSectionOffset = ownerTree.config.blockSize;
63 this.isDone = false;
64 this.intervals = new ArrayList<HTInterval>();
65 }
66
67 /**
68 * Reader factory constructor. Build a Node object (of the right type) by
69 * reading a block in the file.
70 *
71 * @param tree
72 * Reference to the HT which will own this node
73 * @param fc
74 * FileChannel to the history file, ALREADY SEEKED at the start
75 * of the node.
76 * @throws IOException
77 */
78 final static HTNode readNode(HistoryTree tree, FileChannel fc)
79 throws IOException {
80 HTNode newNode = null;
81 int res, i;
82
83 ByteBuffer buffer = ByteBuffer.allocate(tree.config.blockSize);
84 buffer.order(ByteOrder.LITTLE_ENDIAN);
85 buffer.clear();
86 res = fc.read(buffer);
87 assert (res == tree.config.blockSize);
88 // This often breaks, so might as well keep this code not too far...
89 // if ( res != tree.config.blockSize ) {
90 // tree.debugPrintFullTree(new PrintWriter(System.out, true), null,
91 // false);
92 // assert ( false );
93 // }
94 buffer.flip();
95
96 /* Read the common header part */
97 byte type = buffer.get();
98 long start = buffer.getLong();
99 long end = buffer.getLong();
100 int seqNb = buffer.getInt();
101 int parentSeqNb = buffer.getInt();
102 int intervalCount = buffer.getInt();
103 int stringSectionOffset = buffer.getInt();
104 boolean done = byteToBool(buffer.get());
105
106 /* Now the rest of the header depends on the node type */
107 switch (type) {
108 case 1:
109 /* Core nodes */
110 newNode = new CoreNode(tree, seqNb, parentSeqNb, start);
111 newNode.readSpecificHeader(buffer);
112 break;
113
114 // TODO implement other node types
115 // case 2:
116 // /* Leaf nodes */
117 //
118 // break;
119 //
120 //
121 // case 3:
122 // /* "Claudette" (extended) nodes */
123 //
124 // break;
125
126 default:
127 /* Unrecognized node type */
128 throw new IOException();
129 }
130
131 /*
132 * At this point, we should be done reading the header and 'buffer'
133 * should only have the intervals left
134 */
135 for (i = 0; i < intervalCount; i++) {
136 newNode.intervals.add(HTInterval.readFrom(buffer));
137 }
138
139 /* Assign the node's other information we have read previously */
140 newNode.nodeEnd = end;
141 newNode.stringSectionOffset = stringSectionOffset;
142 newNode.isDone = done;
143
144 return newNode;
145 }
146
147 final void writeSelf(FileChannel fc) throws IOException {
148 int res, size;
149 int curStringsEntryEndPos = ownerTree.config.blockSize;
150
151 ByteBuffer buffer = ByteBuffer.allocate(ownerTree.config.blockSize);
152 buffer.order(ByteOrder.LITTLE_ENDIAN);
153 buffer.clear();
154
155 /* Write the common header part */
156 buffer.put(this.getNodeType());
157 buffer.putLong(nodeStart);
158 buffer.putLong(nodeEnd);
159 buffer.putInt(sequenceNumber);
160 buffer.putInt(parentSequenceNumber);
161 buffer.putInt(intervals.size());
162 buffer.putInt(stringSectionOffset);
163 buffer.put(boolToByte(isDone));
164
165 /* Now call the inner method to write the specific header part */
166 this.writeSpecificHeader(buffer);
167
168 /* Back to us, we write the intervals */
169 for (HTInterval interval : intervals) {
170 size = interval.writeInterval(buffer, curStringsEntryEndPos);
171 curStringsEntryEndPos -= size;
172 }
173
174 /*
175 * Write padding between the end of the Data section and the start of
176 * the Strings section (needed to fill the node in case there is no
177 * Strings section)
178 */
179 while (buffer.position() < stringSectionOffset) {
180 buffer.put((byte) 0);
181 }
182
183 /*
184 * If the offsets were right, the size of the Strings section should be
185 * == to the expected size
186 */
187 assert (curStringsEntryEndPos == stringSectionOffset);
188
189 /* Finally, write everything in the Buffer to disk */
190
191 // if we don't do this, flip() will lose what's after.
192 buffer.position(ownerTree.config.blockSize);
193
194 buffer.flip();
195 res = fc.write(buffer);
196 assert (res == ownerTree.config.blockSize);
197 }
198
199 /**
200 * Accessors
201 */
202 long getNodeStart() {
203 return nodeStart;
204 }
205
206 long getNodeEnd() {
207 if (this.isDone) {
208 return nodeEnd;
209 }
210 return 0;
211 }
212
213 int getSequenceNumber() {
214 return sequenceNumber;
215 }
216
217 int getParentSequenceNumber() {
218 return parentSequenceNumber;
219 }
220
221 /**
222 * Change this node's parent. Used when we create a new root node for
223 * example.
224 */
225 void setParentSequenceNumber(int newParent) {
226 parentSequenceNumber = newParent;
227 }
228
229 boolean isDone() {
230 return isDone;
231 }
232
233 /**
234 * Add an interval to this node
235 *
236 * @param newInterval
237 */
238 void addInterval(HTInterval newInterval) {
239 /* Just in case, but should be checked before even calling this function */
240 assert (newInterval.getIntervalSize() <= this.getNodeFreeSpace());
241
242 intervals.add(newInterval);
243
244 /* Update the in-node offset "pointer" */
245 stringSectionOffset -= (newInterval.getStringsEntrySize());
246 }
247
248 /**
249 * We've received word from the containerTree that newest nodes now exist to
250 * our right. (Puts isDone = true and sets the endtime)
251 *
252 * @param endtime
253 * The nodeEnd time that the node will have
254 * @throws TimeRangeException
255 */
256 void closeThisNode(long endtime) {
257 assert (endtime >= this.nodeStart);
258 // /* This also breaks often too */
259 // if ( endtime.getValue() <= this.nodeStart.getValue() ) {
260 // ownerTree.debugPrintFullTree(new PrintWriter(System.out, true), null,
261 // false);
262 // assert ( false );
263 // }
264
265 if (intervals.size() > 0) {
266 /*
267 * Sort the intervals by ascending order of their end time. This
268 * speeds up lookups a bit
269 */
270 Collections.sort(intervals);
271
272 /*
273 * Make sure there are no intervals in this node with their EndTime
274 * > the one requested. Only need to check the last one since they
275 * are now sorted
276 */
277 assert (endtime >= intervals.get(intervals.size() - 1).getEndTime());
278 }
279
280 this.isDone = true;
281 this.nodeEnd = endtime;
282 return;
283 }
284
285 /**
286 * The method to fill up the stateInfo (passed on from the Current State
287 * Tree when it does a query on the SHT). We'll replace the data in that
288 * vector with whatever relevant we can find from this node
289 *
290 * @param stateInfo
291 * The same stateInfo that comes from SHT's doQuery()
292 * @param t
293 * The timestamp for which the query is for. Only return
294 * intervals that intersect t.
295 * @throws TimeRangeException
296 */
297 void writeInfoFromNode(List<ITmfStateInterval> stateInfo, long t)
298 throws TimeRangeException {
299 assert (this.isDone); // not sure this will always be the case...
300 int startIndex;
301
302 if (intervals.size() == 0) {
303 return;
304 }
305 startIndex = getStartIndexFor(t);
306
307 for (int i = startIndex; i < intervals.size(); i++) {
308 /*
309 * Now we only have to compare the Start times, since we now the End
310 * times necessarily fit
311 */
312 if (intervals.get(i).getStartTime() <= t) {
313 stateInfo.set(intervals.get(i).getAttribute(), intervals.get(i));
314 }
315 }
316 return;
317 }
318
319 /**
320 * Get a single Interval from the information in this node If the
321 * key/timestamp pair cannot be found, we return null.
322 *
323 * @param key
324 * @param t
325 * @return The Interval containing the information we want, or null if it
326 * wasn't found
327 * @throws TimeRangeException
328 */
329 HTInterval getRelevantInterval(int key, long t) throws TimeRangeException {
330 assert (this.isDone);
331 int startIndex;
332
333 if (intervals.size() == 0) {
334 return null;
335 }
336
337 startIndex = getStartIndexFor(t);
338
339 for (int i = startIndex; i < intervals.size(); i++) {
340 if (intervals.get(i).getAttribute() == key) {
341 if (intervals.get(i).getStartTime() <= t) {
342 return intervals.get(i);
343 }
344 }
345 }
346 /* We didn't find the relevant information in this node */
347 return null;
348 }
349
350 private int getStartIndexFor(long t) throws TimeRangeException {
351 HTInterval dummy;
352 int index;
353
354 /*
355 * Since the intervals are sorted by end time, we can skip all the ones
356 * at the beginning whose end times are smaller than 't'. Java does
357 * provides a .binarySearch method, but its API is quite weird...
358 */
359 dummy = new HTInterval(0, t, 0, TmfStateValue.nullValue());
360 index = Collections.binarySearch(intervals, dummy);
361
362 if (index < 0) {
363 /*
364 * .binarySearch returns a negative number if the exact value was
365 * not found. Here we just want to know where to start searching, we
366 * don't care if the value is exact or not.
367 */
368 index = -index - 1;
369
370 }
371
372 /* Sometimes binarySearch yields weird stuff... */
373 if (index < 0) {
374 index = 0;
375 }
376 if (index >= intervals.size()) {
377 index = intervals.size() - 1;
378 }
379
380 /*
381 * Another API quirkiness, the returned index is the one of the *last*
382 * element of a series of equal endtimes, which happens sometimes. We
383 * want the *first* element of such a series, to read through them
384 * again.
385 */
386 while (index > 0
387 && intervals.get(index - 1).compareTo(intervals.get(index)) == 0) {
388 index--;
389 }
390 // FIXME F*ck all this, just do our own binary search in a saner way...
391
392 // //checks to make sure startIndex works how I think it does
393 // if ( startIndex > 0 ) { assert ( intervals.get(startIndex-1).getEnd()
394 // < t ); }
395 // assert ( intervals.get(startIndex).getEnd() >= t );
396 // if ( startIndex < intervals.size()-1 ) { assert (
397 // intervals.get(startIndex+1).getEnd() >= t ); }
398
399 return index;
400 }
401
402 /**
403 * @return The offset, within the node, where the Data section ends
404 */
405 private int getDataSectionEndOffset() {
406 return this.getTotalHeaderSize() + HTNode.getDataEntrySize()
407 * intervals.size();
408 }
409
410 /**
411 * Returns the free space in the node, which is simply put, the
412 * stringSectionOffset - dataSectionOffset
413 */
414 int getNodeFreeSpace() {
415 return stringSectionOffset - this.getDataSectionEndOffset();
416 }
417
418 /**
419 * Returns the current space utilisation of this node, as a percentage.
420 * (used space / total usable space, which excludes the header)
421 */
422 long getNodeUsagePRC() {
423 float freePercent = (float) this.getNodeFreeSpace()
424 / (float) (ownerTree.config.blockSize - this.getTotalHeaderSize())
425 * 100f;
426 return (long) (100L - freePercent);
427 }
428
429 protected final static int getDataEntrySize() {
430 return 16 /* 2 x Timevalue/long (interval start + end) */
431 + 4 /* int (key) */
432 + 1 /* byte (type) */
433 + 4; /* int (valueOffset) */
434 /* = 25 */
435 }
436
437 protected final static byte boolToByte(boolean thebool) {
438 if (thebool) {
439 return (byte) 1;
440 }
441 return (byte) 0;
442 }
443
444 final static boolean byteToBool(byte thebyte) {
445 return (thebyte == (byte) 1);
446 }
447
448 /**
449 * @name Debugging functions
450 */
451
452 @SuppressWarnings("nls")
453 @Override
454 public String toString() {
455 /* Only used for debugging, shouldn't be externalized */
456 StringBuffer buf = new StringBuffer("Node #" + sequenceNumber + ", ");
457 buf.append(this.toStringSpecific());
458 buf.append(intervals.size() + " intervals (" + this.getNodeUsagePRC()
459 + "% used), ");
460
461 buf.append("[" + this.nodeStart + " - ");
462 if (this.isDone) {
463 buf = buf.append("" + this.nodeEnd + "]");
464 } else {
465 buf = buf.append("...]");
466 }
467 return buf.toString();
468 }
469
470 /**
471 * Debugging function that prints out the contents of this node
472 *
473 * @param writer
474 * PrintWriter in which we will print the debug output
475 */
476 @SuppressWarnings("nls")
477 void debugPrintIntervals(PrintWriter writer) {
478 /* Only used for debugging, shouldn't be externalized */
479 writer.println("Node #" + sequenceNumber + ":");
480
481 /* Array of children */
482 if (this.getNodeType() == 1) { /* Only Core Nodes can have children */
483 CoreNode thisNode = (CoreNode) this;
484 writer.print(" " + thisNode.getNbChildren() + " children");
485 if (thisNode.getNbChildren() >= 1) {
486 writer.print(": [ " + thisNode.getChild(0));
487 for (int i = 1; i < thisNode.getNbChildren(); i++) {
488 writer.print(", " + thisNode.getChild(i));
489 }
490 writer.print(']');
491 }
492 writer.print('\n');
493 }
494
495 /* List of intervals in the node */
496 writer.println(" Intervals contained:");
497 for (int i = 0; i < intervals.size(); i++) {
498 writer.println(intervals.get(i).toString());
499 }
500 writer.println('\n');
501 }
502
503 final static int getCommonHeaderSize() {
504 /*
505 * 1 - byte (type)
506 *
507 * 16 - 2x long (start time, end time)
508 *
509 * 16 - 4x int (seq number, parent seq number, intervalcount, strings
510 * section pos.)
511 *
512 * 1 - byte (done or not)
513 */
514 return 34;
515 }
516
517 /**
518 * @name Abstract methods
519 */
520
521 protected abstract byte getNodeType();
522
523 protected abstract int getTotalHeaderSize();
524
525 protected abstract void readSpecificHeader(ByteBuffer buffer);
526
527 protected abstract void writeSpecificHeader(ByteBuffer buffer);
528
529 protected abstract String toStringSpecific();
530}
This page took 0.043401 seconds and 5 git commands to generate.