import org.simantics.acorn.ClusterManager;
import org.simantics.acorn.Persistable;
-import org.simantics.acorn.internal.ClusterChange;
+import org.simantics.acorn.exception.AcornAccessVerificationException;
+import org.simantics.acorn.exception.IllegalAcornStateException;
import org.simantics.acorn.internal.UndoClusterUpdateProcessor;
import org.simantics.compressions.CompressionCodec;
import org.simantics.compressions.Compressions;
import org.simantics.db.exception.DatabaseException;
import org.simantics.db.service.Bytes;
import org.simantics.utils.datastructures.Pair;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
import gnu.trove.list.array.TByteArrayList;
public class ClusterStreamChunk extends LRUObject<String, ClusterStreamChunk> implements Persistable {
+ private static final Logger LOGGER = LoggerFactory.getLogger(ClusterStreamChunk.class);
// 500KB is a fine chunk
private static int MAX_CHUNK_SIZE = 500*1024;
public ArrayList<ClusterUpdateOperation> operations = new ArrayList<ClusterUpdateOperation>();
// Stub
- public ClusterStreamChunk(ClusterManager manager, LRU<String, ClusterStreamChunk> LRU, Path readDir, String id, int offset, int length) {
- super(LRU, id, readDir, "clusterStream", offset, length, false, false);
+ public ClusterStreamChunk(ClusterManager manager, LRU<String, ClusterStreamChunk> LRU, Path readDir, String id, int offset, int length) throws AcornAccessVerificationException {
+ super(LRU, manager.getFileCache(), id, readDir, "clusterStream", offset, length, false, false);
this.manager = manager;
LRU.map(this);
}
// Creation
- public ClusterStreamChunk(ClusterManager manager, LRU<String, ClusterStreamChunk> LRU, String id) {
- super(LRU, id, LRU.getDirectory(), "clusterStream", true, true);
+ public ClusterStreamChunk(ClusterManager manager, LRU<String, ClusterStreamChunk> LRU, String id) throws AcornAccessVerificationException {
+ super(LRU, manager.getFileCache(), id, LRU.getDirectory(), "clusterStream", true, true);
this.manager = manager;
LRU.insert(this, accessTime);
}
makeResident(true);
ClusterUpdateOperation op = operations.get(chunkOffset);
- if(op == null) throw new IllegalStateException("Cluster Update Operation " + ccsId + " was not found.");
- if(op.ccs == null) throw new IllegalStateException("Cluster ChangeSet " + ccsId + " was not found.");
+ if(op == null) throw new IllegalAcornStateException("Cluster Update Operation " + ccsId + " was not found.");
+ if(op.ccs == null) throw new IllegalAcornStateException("Cluster ChangeSet " + ccsId + " was not found.");
UndoClusterUpdateProcessor proc = new UndoClusterUpdateProcessor(clusters, this, op.ccs);
- if(proc.version != ClusterChange.VERSION)
- return null;
// This cluster and CCS can still be under preparation => wait
clusters.clusterLRU.ensureUpdates(proc.getClusterUID());
}
- public void addOperation(ClusterUpdateOperation op) {
+ public void addOperation(ClusterUpdateOperation op) throws IllegalAcornStateException {
if(committed)
- throw new IllegalStateException();
+ throw new IllegalAcornStateException("Cannot add operation " + op + " to " + this + " if commited == true");
operations.add(op);
size += op.data.length;
// if(isCommitted()) {
}
@Override
- public boolean canBePersisted() {
+ public boolean canBePersisted() throws AcornAccessVerificationException {
if(!super.canBePersisted()) return false;
if(!isCommitted()) return false;
for(ClusterUpdateOperation op : operations) {
private static StreamDecompressor decompressor = new StreamDecompressor();
@Override
- public void fromFile(byte[] data_) {
+ public void fromFile(byte[] data_) throws IllegalAcornStateException, AcornAccessVerificationException {
try {
}
}
-
operations.add(op);
-
}
-
} catch (IOException e) {
-
- throw new IllegalStateException(e);
-
- }
-
+ throw new IllegalAcornStateException(e);
+ } catch (IllegalAcornStateException | AcornAccessVerificationException e) {
+ throw e;
+ }
}
@Override
protected boolean overwrite() {
return false;
}
-
+
+ @Override
+ public Logger getLogger() {
+ return LOGGER;
+ }
}
\ No newline at end of file