ss: History trees can define their own node types
[deliverable/tracecompass.git] / statesystem / org.eclipse.tracecompass.statesystem.core / src / org / eclipse / tracecompass / internal / statesystem / core / backend / historytree / HT_IO.java
1 /*******************************************************************************
2 * Copyright (c) 2012, 2014 Ericsson
3 * Copyright (c) 2010, 2011 École Polytechnique de Montréal
4 * Copyright (c) 2010, 2011 Alexandre Montplaisir <alexandre.montplaisir@gmail.com>
5 *
6 * All rights reserved. This program and the accompanying materials are
7 * made available under the terms of the Eclipse Public License v1.0 which
8 * accompanies this distribution, and is available at
9 * http://www.eclipse.org/legal/epl-v10.html
10 *
11 *******************************************************************************/
12
13 package org.eclipse.tracecompass.internal.statesystem.core.backend.historytree;
14
15 import static org.eclipse.tracecompass.common.core.NonNullUtils.checkNotNull;
16
17 import java.io.File;
18 import java.io.FileInputStream;
19 import java.io.FileOutputStream;
20 import java.io.IOException;
21 import java.nio.channels.ClosedChannelException;
22 import java.nio.channels.FileChannel;
23 import java.util.logging.Logger;
24
25 import org.eclipse.jdt.annotation.NonNull;
26 import java.util.Objects;
27 import java.util.concurrent.ExecutionException;
28
29 import org.eclipse.jdt.annotation.Nullable;
30 import org.eclipse.tracecompass.common.core.log.TraceCompassLog;
31 import org.eclipse.tracecompass.internal.statesystem.core.Activator;
32 import org.eclipse.tracecompass.internal.statesystem.core.backend.historytree.IHistoryTree.IHTNodeFactory;
33
34 import com.google.common.cache.CacheBuilder;
35 import com.google.common.cache.CacheLoader;
36 import com.google.common.cache.LoadingCache;
37
38 /**
39 * This class abstracts inputs/outputs of the HistoryTree nodes.
40 *
41 * It contains all the methods and descriptors to handle reading/writing nodes
42 * to the tree-file on disk and all the caching mechanisms.
43 *
44 * This abstraction is mainly for code isolation/clarification purposes. Every
45 * HistoryTree must contain 1 and only 1 HT_IO element.
46 *
47 * @author Alexandre Montplaisir
48 */
49 public class HT_IO {
50
51 private static final Logger LOGGER = TraceCompassLog.getLogger(HT_IO.class);
52
53 // ------------------------------------------------------------------------
54 // Global cache of nodes
55 // ------------------------------------------------------------------------
56
57 private static final class CacheKey {
58
59 public final HT_IO fStateHistory;
60 public final int fSeqNumber;
61
62 public CacheKey(HT_IO stateHistory, int seqNumber) {
63 fStateHistory = stateHistory;
64 fSeqNumber = seqNumber;
65 }
66
67 @Override
68 public int hashCode() {
69 return Objects.hash(fStateHistory, fSeqNumber);
70 }
71
72 @Override
73 public boolean equals(@Nullable Object obj) {
74 if (this == obj) {
75 return true;
76 }
77 if (obj == null) {
78 return false;
79 }
80 if (getClass() != obj.getClass()) {
81 return false;
82 }
83 CacheKey other = (CacheKey) obj;
84 return (fStateHistory.equals(other.fStateHistory) &&
85 fSeqNumber == other.fSeqNumber);
86 }
87 }
88
89 private static final int CACHE_SIZE = 256;
90
91 private static final LoadingCache<CacheKey, HTNode> NODE_CACHE =
92 checkNotNull(CacheBuilder.newBuilder()
93 .maximumSize(CACHE_SIZE)
94 .build(new CacheLoader<CacheKey, HTNode>() {
95 @Override
96 public HTNode load(CacheKey key) throws IOException {
97 HT_IO io = key.fStateHistory;
98 int seqNb = key.fSeqNumber;
99
100 LOGGER.finest(() -> "[HtIo:CacheMiss] seqNum=" + seqNb); //$NON-NLS-1$
101
102 synchronized (io) {
103 io.seekFCToNodePos(io.fFileChannelIn, seqNb);
104 return HTNode.readNode(io.fConfig, io.fFileChannelIn, key.fStateHistory.fNodeFactory);
105 }
106 }
107 }));
108
109
110 // ------------------------------------------------------------------------
111 // Instance fields
112 // ------------------------------------------------------------------------
113
114 /* Configuration of the History Tree */
115 private final HTConfig fConfig;
116
117 /* Fields related to the file I/O */
118 private final FileInputStream fFileInputStream;
119 private final FileOutputStream fFileOutputStream;
120 private final FileChannel fFileChannelIn;
121 private final FileChannel fFileChannelOut;
122
123 private final IHTNodeFactory fNodeFactory;
124
125 // ------------------------------------------------------------------------
126 // Methods
127 // ------------------------------------------------------------------------
128
129
130
131 /**
132 * Standard constructor
133 *
134 * @param config
135 * The configuration object for the StateHistoryTree
136 * @param newFile
137 * Flag indicating that the file must be created from scratch
138 * @param nodeFactory
139 * The factory to create new nodes for this tree
140 *
141 * @throws IOException
142 * An exception can be thrown when file cannot be accessed
143 */
144 public HT_IO(HTConfig config, boolean newFile, IHTNodeFactory nodeFactory) throws IOException {
145 fConfig = config;
146
147 File historyTreeFile = config.getStateFile();
148 if (newFile) {
149 boolean success1 = true;
150 /* Create a new empty History Tree file */
151 if (historyTreeFile.exists()) {
152 success1 = historyTreeFile.delete();
153 }
154 boolean success2 = historyTreeFile.createNewFile();
155 if (!(success1 && success2)) {
156 /* It seems we do not have permission to create the new file */
157 throw new IOException("Cannot create new file at " + //$NON-NLS-1$
158 historyTreeFile.getName());
159 }
160 fFileInputStream = new FileInputStream(historyTreeFile);
161 fFileOutputStream = new FileOutputStream(historyTreeFile, false);
162 } else {
163 /*
164 * We want to open an existing file, make sure we don't squash the
165 * existing content when opening the fos!
166 */
167 fFileInputStream = new FileInputStream(historyTreeFile);
168 fFileOutputStream = new FileOutputStream(historyTreeFile, true);
169 }
170 fFileChannelIn = fFileInputStream.getChannel();
171 fFileChannelOut = fFileOutputStream.getChannel();
172 fNodeFactory = nodeFactory;
173 }
174
175 /**
176 * Read a node from the file on disk.
177 *
178 * @param seqNumber
179 * The sequence number of the node to read.
180 * @return The object representing the node
181 * @throws ClosedChannelException
182 * Usually happens because the file was closed while we were
183 * reading. Instead of using a big reader-writer lock, we'll
184 * just catch this exception.
185 */
186 public @NonNull HTNode readNode(int seqNumber) throws ClosedChannelException {
187 /* Do a cache lookup. If it's not present it will be loaded from disk */
188 LOGGER.finest(() -> "[HtIo:CacheLookup] seqNum=" + seqNumber); //$NON-NLS-1$
189 CacheKey key = new CacheKey(this, seqNumber);
190 try {
191 return checkNotNull(NODE_CACHE.get(key));
192
193 } catch (ExecutionException e) {
194 /* Get the inner exception that was generated */
195 Throwable cause = e.getCause();
196 if (cause instanceof ClosedChannelException) {
197 throw (ClosedChannelException) cause;
198 }
199 /*
200 * Other types of IOExceptions shouldn't happen at this point though.
201 */
202 Activator.getDefault().logError(e.getMessage(), e);
203 throw new IllegalStateException();
204 }
205 }
206
207 /**
208 * Write the given node to disk.
209 *
210 * @param node
211 * The node to write.
212 */
213 public void writeNode(HTNode node) {
214 try {
215 int seqNumber = node.getSequenceNumber();
216
217 /* "Write-back" the node into the cache */
218 CacheKey key = new CacheKey(this, seqNumber);
219 NODE_CACHE.put(key, node);
220
221 /* Position ourselves at the start of the node and write it */
222 synchronized (this) {
223 seekFCToNodePos(fFileChannelOut, seqNumber);
224 node.writeSelf(fFileChannelOut);
225 }
226 } catch (IOException e) {
227 /* If we were able to open the file, we should be fine now... */
228 Activator.getDefault().logError(e.getMessage(), e);
229 }
230 }
231
232 /**
233 * Get the output file channel, used for writing.
234 *
235 * @return The output file channel
236 */
237 public FileChannel getFcOut() {
238 return fFileChannelOut;
239 }
240
241 /**
242 * Retrieve the input stream with which to write the attribute tree.
243 *
244 * @param nodeOffset
245 * The offset in the file, in number of nodes. This should be
246 * after all the nodes.
247 * @return The correctly-seeked input stream
248 */
249 public FileInputStream supplyATReader(int nodeOffset) {
250 try {
251 /*
252 * Position ourselves at the start of the Mapping section in the
253 * file (which is right after the Blocks)
254 */
255 seekFCToNodePos(fFileChannelIn, nodeOffset);
256 } catch (IOException e) {
257 Activator.getDefault().logError(e.getMessage(), e);
258 }
259 return fFileInputStream;
260 }
261
262 /**
263 * Close all file channels and streams.
264 */
265 public synchronized void closeFile() {
266 try {
267 fFileInputStream.close();
268 fFileOutputStream.close();
269 } catch (IOException e) {
270 Activator.getDefault().logError(e.getMessage(), e);
271 }
272 }
273
274 /**
275 * Delete the history tree file
276 */
277 public synchronized void deleteFile() {
278 closeFile();
279
280 File historyTreeFile = fConfig.getStateFile();
281 if (!historyTreeFile.delete()) {
282 /* We didn't succeed in deleting the file */
283 Activator.getDefault().logError("Failed to delete" + historyTreeFile.getName()); //$NON-NLS-1$
284 }
285 }
286
287 /**
288 * Seek the given FileChannel to the position corresponding to the node that
289 * has seqNumber
290 *
291 * @param fc
292 * the channel to seek
293 * @param seqNumber
294 * the node sequence number to seek the channel to
295 * @throws IOException
296 * If some other I/O error occurs
297 */
298 private void seekFCToNodePos(FileChannel fc, int seqNumber)
299 throws IOException {
300 /*
301 * Cast to (long) is needed to make sure the result is a long too and
302 * doesn't get truncated
303 */
304 fc.position(IHistoryTree.TREE_HEADER_SIZE
305 + ((long) seqNumber) * fConfig.getBlockSize());
306 }
307
308 }
This page took 0.037805 seconds and 5 git commands to generate.