1 /*******************************************************************************
2 * Copyright (c) 2010, 2017 École Polytechnique de Montréal and others
4 * All rights reserved. This program and the accompanying materials are
5 * made available under the terms of the Eclipse Public License v1.0 which
6 * accompanies this distribution, and is available at
7 * http://www.eclipse.org/legal/epl-v10.html
8 *******************************************************************************/
10 package org
.eclipse
.tracecompass
.internal
.datastore
.core
.historytree
;
12 import static org
.eclipse
.tracecompass
.common
.core
.NonNullUtils
.checkNotNull
;
15 import java
.io
.FileInputStream
;
16 import java
.io
.FileOutputStream
;
17 import java
.io
.IOException
;
18 import java
.nio
.channels
.ClosedChannelException
;
19 import java
.nio
.channels
.FileChannel
;
20 import java
.util
.Objects
;
21 import java
.util
.concurrent
.ExecutionException
;
22 import java
.util
.logging
.Logger
;
24 import org
.eclipse
.jdt
.annotation
.Nullable
;
25 import org
.eclipse
.tracecompass
.common
.core
.log
.TraceCompassLog
;
26 import org
.eclipse
.tracecompass
.internal
.datastore
.core
.Activator
;
27 import org
.eclipse
.tracecompass
.internal
.provisional
.datastore
.core
.historytree
.AbstractHistoryTree
.IHTNodeFactory
;
28 import org
.eclipse
.tracecompass
.internal
.provisional
.datastore
.core
.interval
.IHTInterval
;
29 import org
.eclipse
.tracecompass
.internal
.provisional
.datastore
.core
.interval
.IHTIntervalReader
;
30 import org
.eclipse
.tracecompass
.internal
.provisional
.datastore
.core
.historytree
.HTNode
;
31 import org
.eclipse
.tracecompass
.internal
.provisional
.datastore
.core
.historytree
.IHistoryTree
;
33 import com
.google
.common
.annotations
.VisibleForTesting
;
34 import com
.google
.common
.cache
.CacheBuilder
;
35 import com
.google
.common
.cache
.CacheLoader
;
36 import com
.google
.common
.cache
.LoadingCache
;
39 * This class abstracts inputs/outputs of the HistoryTree nodes.
41 * It contains all the methods and descriptors to handle reading/writing nodes
42 * to the tree-file on disk and all the caching mechanisms.
44 * This abstraction is mainly for code isolation/clarification purposes. Every
45 * HistoryTree must contain 1 and only 1 HT_IO element.
47 * @author Alexandre Montplaisir
48 * @author Geneviève Bastien
50 * The type of objects that will be saved in the tree
52 * The base type of the nodes of this tree
54 public class HtIo
<E
extends IHTInterval
, N
extends HTNode
<E
>> {
56 private static final Logger LOGGER
= TraceCompassLog
.getLogger(HtIo
.class);
58 // ------------------------------------------------------------------------
59 // Global cache of nodes
60 // ------------------------------------------------------------------------
62 private static final class CacheKey
{
64 public final HtIo
<IHTInterval
, HTNode
<IHTInterval
>> fHistoryTreeIo
;
65 public final int fSeqNumber
;
67 public CacheKey(HtIo
<IHTInterval
, HTNode
<IHTInterval
>> htio
, int seqNumber
) {
68 fHistoryTreeIo
= htio
;
69 fSeqNumber
= seqNumber
;
73 public int hashCode() {
74 return Objects
.hash(fHistoryTreeIo
, fSeqNumber
);
78 public boolean equals(@Nullable Object obj
) {
85 if (getClass() != obj
.getClass()) {
88 CacheKey other
= (CacheKey
) obj
;
89 return (fHistoryTreeIo
.equals(other
.fHistoryTreeIo
) &&
90 fSeqNumber
== other
.fSeqNumber
);
94 private static final int CACHE_SIZE
= 256;
96 private static final LoadingCache
<CacheKey
, HTNode
<IHTInterval
>> NODE_CACHE
= checkNotNull(CacheBuilder
.newBuilder()
97 .maximumSize(CACHE_SIZE
)
98 .build(new CacheLoader
<CacheKey
, HTNode
<IHTInterval
>>() {
100 public HTNode
<IHTInterval
> load(CacheKey key
) throws IOException
{
101 HtIo
<IHTInterval
, HTNode
<IHTInterval
>> io
= key
.fHistoryTreeIo
;
102 int seqNb
= key
.fSeqNumber
;
104 LOGGER
.finest(() -> "[HtIo:CacheMiss] seqNum=" + seqNb
); //$NON-NLS-1$
107 io
.seekFCToNodePos(io
.fFileChannelIn
, seqNb
);
108 return HTNode
.readNode(io
.fBlockSize
,
118 * This method invalidates all data in the cache so nodes will have to be
122 static void clearCache() {
123 NODE_CACHE
.invalidateAll();
127 * Get whether a node is present in the cache
130 * The htio object that contains the node
132 * The sequence number of the node to check
133 * @return <code>true</code> if the node is present in the cache,
134 * <code>false</code> otherwise
137 static <E
extends IHTInterval
, N
extends HTNode
<E
>> boolean isInCache(HtIo
<E
, N
> htio
, int seqNum
) {
138 @SuppressWarnings("unchecked")
139 @Nullable HTNode
<IHTInterval
> present
= NODE_CACHE
.getIfPresent(new CacheKey((HtIo
<IHTInterval
, HTNode
<IHTInterval
>>) htio
, seqNum
));
140 return (present
!= null);
143 // ------------------------------------------------------------------------
145 // ------------------------------------------------------------------------
147 /* Relevant configuration elements from the History Tree */
148 private final File fStateHistoryFile
;
149 private final int fBlockSize
;
150 private final int fNodeMaxChildren
;
151 private final IHTIntervalReader
<E
> fObjectReader
;
152 private final IHTNodeFactory
<E
, N
> fNodeFactory
;
154 /* Fields related to the file I/O */
155 private final FileInputStream fFileInputStream
;
156 private final FileOutputStream fFileOutputStream
;
157 private final FileChannel fFileChannelIn
;
158 private final FileChannel fFileChannelOut
;
160 // ------------------------------------------------------------------------
162 // ------------------------------------------------------------------------
165 * Standard constructor
167 * @param stateHistoryFile
168 * The name of the history file
170 * The size of each "block" on disk in bytes. One node will
171 * always fit in one block. It should be at least 4096.
172 * @param nodeMaxChildren
173 * The maximum number of children allowed per core (non-leaf)
176 * Flag indicating that the file must be created from scratch
177 * @param intervalReader
178 * The factory to create new tree data elements when reading from
181 * The factory to create new nodes for this tree
182 * @throws IOException
183 * An exception can be thrown when file cannot be accessed
185 public HtIo(File stateHistoryFile
,
189 IHTIntervalReader
<E
> intervalReader
,
190 IHTNodeFactory
<E
, N
> nodeFactory
) throws IOException
{
192 fBlockSize
= blockSize
;
193 fNodeMaxChildren
= nodeMaxChildren
;
194 fObjectReader
= intervalReader
;
195 fNodeFactory
= nodeFactory
;
197 fStateHistoryFile
= stateHistoryFile
;
199 boolean success1
= true;
200 /* Create a new empty History Tree file */
201 if (fStateHistoryFile
.exists()) {
202 success1
= fStateHistoryFile
.delete();
204 boolean success2
= fStateHistoryFile
.createNewFile();
205 if (!(success1
&& success2
)) {
206 /* It seems we do not have permission to create the new file */
207 throw new IOException("Cannot create new file at " + //$NON-NLS-1$
208 fStateHistoryFile
.getName());
210 fFileInputStream
= new FileInputStream(fStateHistoryFile
);
211 fFileOutputStream
= new FileOutputStream(fStateHistoryFile
, false);
214 * We want to open an existing file, make sure we don't squash the
215 * existing content when opening the fos!
217 fFileInputStream
= new FileInputStream(fStateHistoryFile
);
218 fFileOutputStream
= new FileOutputStream(fStateHistoryFile
, true);
220 fFileChannelIn
= fFileInputStream
.getChannel();
221 fFileChannelOut
= fFileOutputStream
.getChannel();
225 * Read a node from the file on disk.
228 * The sequence number of the node to read.
229 * @return The object representing the node
230 * @throws ClosedChannelException
231 * Usually happens because the file was closed while we were
232 * reading. Instead of using a big reader-writer lock, we'll
233 * just catch this exception.
235 @SuppressWarnings("unchecked")
236 public N
readNode(int seqNumber
) throws ClosedChannelException
{
237 /* Do a cache lookup. If it's not present it will be loaded from disk */
238 LOGGER
.finest(() -> "[HtIo:CacheLookup] seqNum=" + seqNumber
); //$NON-NLS-1$
239 CacheKey key
= new CacheKey((HtIo
<IHTInterval
, HTNode
<IHTInterval
>>) this, seqNumber
);
241 return (N
) checkNotNull(NODE_CACHE
.get(key
));
243 } catch (ExecutionException e
) {
244 /* Get the inner exception that was generated */
245 Throwable cause
= e
.getCause();
246 if (cause
instanceof ClosedChannelException
) {
247 throw (ClosedChannelException
) cause
;
250 * Other types of IOExceptions shouldn't happen at this point
253 Activator
.getInstance().logError(e
.getMessage(), e
);
254 throw new IllegalStateException();
259 * Write the given node to disk.
264 @SuppressWarnings("unchecked")
265 public void writeNode(N node
) {
267 int seqNumber
= node
.getSequenceNumber();
269 /* "Write-back" the node into the cache */
270 CacheKey key
= new CacheKey((HtIo
<IHTInterval
, HTNode
<IHTInterval
>>) this, seqNumber
);
271 NODE_CACHE
.put(key
, (HTNode
<IHTInterval
>) node
);
273 /* Position ourselves at the start of the node and write it */
274 synchronized (this) {
275 seekFCToNodePos(fFileChannelOut
, seqNumber
);
276 node
.writeSelf(fFileChannelOut
);
278 } catch (IOException e
) {
279 /* If we were able to open the file, we should be fine now... */
280 Activator
.getInstance().logError(e
.getMessage(), e
);
285 * Get the output file channel, used for writing, positioned after a certain
286 * number of nodes, or at the beginning.
288 * FIXME: Do not expose the file output. Use rather a method to
289 * writeAtEnd(int nodeOffset, ByteBuffer)
292 * The offset in the file, in number of nodes. If the value is
293 * lower than 0, the file will be positioned at the beginning.
294 * @return The correctly-seeked input stream
296 public FileOutputStream
getFileWriter(int nodeOffset
) {
298 if (nodeOffset
< 0) {
299 fFileChannelOut
.position(0);
301 seekFCToNodePos(fFileChannelOut
, nodeOffset
);
303 } catch (IOException e
) {
304 Activator
.getInstance().logError(e
.getMessage(), e
);
306 return fFileOutputStream
;
310 * Retrieve the input stream with which to write the attribute tree.
312 * FIXME: Do not expose the stream, have a method to write at the end
316 * The offset in the file, in number of nodes. This should be
317 * after all the nodes.
318 * @return The correctly-seeked input stream
320 public FileInputStream
supplyATReader(int nodeOffset
) {
323 * Position ourselves at the start of the Mapping section in the
324 * file (which is right after the Blocks)
326 seekFCToNodePos(fFileChannelIn
, nodeOffset
);
327 } catch (IOException e
) {
328 Activator
.getInstance().logError(e
.getMessage(), e
);
330 return fFileInputStream
;
334 * Close all file channels and streams.
336 public synchronized void closeFile() {
338 fFileInputStream
.close();
339 fFileOutputStream
.close();
340 } catch (IOException e
) {
341 Activator
.getInstance().logError(e
.getMessage(), e
);
346 * Delete the history tree file
348 public synchronized void deleteFile() {
351 if (!fStateHistoryFile
.delete()) {
352 /* We didn't succeed in deleting the file */
353 Activator
.getInstance().logError("Failed to delete" + fStateHistoryFile
.getName()); //$NON-NLS-1$
358 * Seek the given FileChannel to the position corresponding to the node that
362 * the channel to seek
364 * the node sequence number to seek the channel to
365 * @throws IOException
366 * If some other I/O error occurs
368 private void seekFCToNodePos(FileChannel fc
, int seqNumber
)
371 * Cast to (long) is needed to make sure the result is a long too and
372 * doesn't get truncated
374 fc
.position(IHistoryTree
.TREE_HEADER_SIZE
375 + ((long) seqNumber
) * fBlockSize
);