X-Git-Url: https://gerrit.simantics.org/r/gitweb?p=simantics%2Fplatform.git;a=blobdiff_plain;f=bundles%2Forg.simantics.acorn%2Fsrc%2Forg%2Fsimantics%2Facorn%2FClusterManager.java;h=40c5de37e8c1991e6e5ce846e2b90e7b651b6c5e;hp=5b8e5abb8fb8589d08bcd5e0a4b03577e8c078ba;hb=c26409b1caf2f1e560d37c5befd11b442399c3fe;hpb=65cf12eb906873cf56c10bd1f8d04ec8645e6cbd diff --git a/bundles/org.simantics.acorn/src/org/simantics/acorn/ClusterManager.java b/bundles/org.simantics.acorn/src/org/simantics/acorn/ClusterManager.java index 5b8e5abb8..40c5de37e 100644 --- a/bundles/org.simantics.acorn/src/org/simantics/acorn/ClusterManager.java +++ b/bundles/org.simantics.acorn/src/org/simantics/acorn/ClusterManager.java @@ -2,15 +2,21 @@ package org.simantics.acorn; import java.io.IOException; import java.math.BigInteger; +import java.nio.file.CopyOption; import java.nio.file.DirectoryStream; import java.nio.file.Files; import java.nio.file.Path; +import java.nio.file.StandardCopyOption; import java.util.ArrayList; import java.util.Collection; import java.util.HashMap; import java.util.Map; +import java.util.concurrent.atomic.AtomicBoolean; import org.simantics.acorn.cluster.ClusterImpl; +import org.simantics.acorn.exception.AcornAccessVerificationException; +import org.simantics.acorn.exception.IllegalAcornStateException; +import org.simantics.acorn.exception.InvalidHeadStateException; import org.simantics.acorn.internal.ClusterSupport2; import org.simantics.acorn.lru.ChangeSetInfo; import org.simantics.acorn.lru.ClusterInfo; @@ -18,22 +24,27 @@ import org.simantics.acorn.lru.ClusterLRU; import org.simantics.acorn.lru.ClusterStreamChunk; import org.simantics.acorn.lru.FileInfo; import org.simantics.acorn.lru.LRU; +import org.simantics.databoard.file.RuntimeIOException; import org.simantics.db.ClusterCreator; -import org.simantics.db.ServiceLocator; import org.simantics.db.Database.Session.ClusterIds; import org.simantics.db.Database.Session.ResourceSegment; +import org.simantics.db.ServiceLocator; import org.simantics.db.exception.DatabaseException; import org.simantics.db.impl.ClusterBase; import org.simantics.db.impl.ClusterI; import org.simantics.db.impl.ClusterSupport; import org.simantics.db.procore.cluster.ClusterTraits; -import org.simantics.db.server.ProCoreException; import org.simantics.db.service.ClusterSetsSupport; import org.simantics.db.service.ClusterUID; +import org.simantics.utils.FileUtils; import org.simantics.utils.threads.logger.ITask; import org.simantics.utils.threads.logger.ThreadLogger; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; public class ClusterManager { + + final static Logger LOGGER = LoggerFactory.getLogger(ClusterManager.class); private ArrayList currentChanges = new ArrayList(); @@ -62,52 +73,50 @@ public class ClusterManager { this.dbFolder = dbFolder; } - public ArrayList getChanges(long changeSetId) { + public ArrayList getChanges(long changeSetId) throws AcornAccessVerificationException, IllegalAcornStateException { ChangeSetInfo info = csLRU.getWithoutMutex(changeSetId); info.acquireMutex(); try { info.makeResident(); - return info.getCSSIds(); - } catch (Throwable t) { - throw new IllegalStateException(t); + return info.getCCSIds(); } finally { info.releaseMutex(); } } - public ClusterBase getClusterByClusterKey(int clusterKey) throws DatabaseException { + public ClusterBase getClusterByClusterKey(int clusterKey) throws DatabaseException, AcornAccessVerificationException, IllegalAcornStateException { return clusterLRU.getClusterByClusterKey(clusterKey); } - public ClusterBase getClusterByClusterUIDOrMake(ClusterUID clusterUID) throws DatabaseException { + public ClusterBase getClusterByClusterUIDOrMake(ClusterUID clusterUID) throws DatabaseException, AcornAccessVerificationException, IllegalAcornStateException { return clusterLRU.getClusterByClusterUIDOrMake(clusterUID); } - public ClusterImpl getClusterByClusterUIDOrMakeProxy(ClusterUID clusterUID) throws DatabaseException { + public ClusterImpl getClusterByClusterUIDOrMakeProxy(ClusterUID clusterUID) throws DatabaseException, AcornAccessVerificationException, IllegalAcornStateException { return clusterLRU.getClusterByClusterUIDOrMakeProxy(clusterUID); } - public int getClusterKeyByClusterUIDOrMake(ClusterUID clusterUID) { + public int getClusterKeyByClusterUIDOrMake(ClusterUID clusterUID) throws AcornAccessVerificationException { return clusterLRU.getClusterKeyByClusterUIDOrMake(clusterUID); } - public int getClusterKeyByClusterUIDOrMakeWithoutMutex(ClusterUID clusterUID) { + public int getClusterKeyByClusterUIDOrMakeWithoutMutex(ClusterUID clusterUID) throws IllegalAcornStateException, AcornAccessVerificationException { return clusterLRU.getClusterKeyByClusterUIDOrMakeWithoutMutex(clusterUID); } - public int getClusterKeyByUID(long id1, long id2) throws DatabaseException { + public int getClusterKeyByUID(long id1, long id2) throws DatabaseException, IllegalAcornStateException { return clusterLRU.getClusterKeyByUIDWithoutMutex(id1, id2); } - public T getClusterProxyByResourceKey(int resourceKey) throws DatabaseException { + public T getClusterProxyByResourceKey(int resourceKey) throws DatabaseException, AcornAccessVerificationException, IllegalAcornStateException { return clusterLRU.getClusterProxyByResourceKey(resourceKey); } - public ClusterUID getClusterUIDByResourceKey(int resourceKey) throws DatabaseException { + public ClusterUID getClusterUIDByResourceKey(int resourceKey) throws DatabaseException, AcornAccessVerificationException { return clusterLRU.getClusterUIDByResourceKey(resourceKey); } - public ClusterUID getClusterUIDByResourceKeyWithoutMutex(int resourceKey) throws DatabaseException { + public ClusterUID getClusterUIDByResourceKeyWithoutMutex(int resourceKey) throws DatabaseException, IllegalAcornStateException, AcornAccessVerificationException { return clusterLRU.getClusterUIDByResourceKeyWithoutMutex(resourceKey); } @@ -125,69 +134,199 @@ public class ClusterManager { } } - public synchronized boolean makeSnapshot(ServiceLocator locator, boolean force) throws IOException { - - // Maximum autosave frequency is per 60s - if(!force && System.nanoTime() - lastSnapshot < 10*1000000000L) { -// System.err.println("lastSnapshot too early"); - return false; - } - - // Cluster files are always there - // Nothing has been written => no need to do anything - long amountOfFiles = countFiles(workingDirectory); - if(!force && amountOfFiles < 3) { -// System.err.println("amountOfFiles < 3"); - return false; + // Add check to make sure if it safe to make snapshot (used with cancel which is not yet supported and may cause corrupted head.state writing) + private AtomicBoolean safeToMakeSnapshot = new AtomicBoolean(true); + private IllegalAcornStateException cause; + + public synchronized void purge(ServiceLocator locator) throws IllegalAcornStateException { + + try { + + // Schedule writing of all data to disk + refreshHeadState(); + // Wait for files to be written + synchronizeWorkingDirectory(); + + String currentDir = workingDirectory.getFileName().toString(); + Path baseline = workingDirectory.resolveSibling(currentDir + "_baseline"); + + Files.createDirectories(baseline); + + for(String clusterKey : state.clusters) { + String[] parts1 = clusterKey.split("#"); + String[] parts = parts1[0].split("\\."); + String readDirName = parts1[1]; + if(!readDirName.equals(currentDir)) { + String fileName = parts[0] + "." + parts[1] + ".cluster"; + Path from = dbFolder.resolve(readDirName).resolve(fileName); + Path to = baseline.resolve(fileName); + System.err.println("purge copies " + from + " => " + to); + Files.copy(from, to, StandardCopyOption.COPY_ATTRIBUTES); + long first = new BigInteger(parts[0], 16).longValue(); + long second = new BigInteger(parts[1], 16).longValue(); + ClusterUID uuid = ClusterUID.make(first, second); + ClusterInfo info = clusterLRU.getWithoutMutex(uuid); + info.moveTo(baseline); + } + } + + for (String fileKey : state.files) { + String[] parts = fileKey.split("#"); + String readDirName = parts[1]; + if(!readDirName.equals(currentDir)) { + String fileName = parts[0] + ".extFile"; + Path from = dbFolder.resolve(readDirName).resolve(fileName); + Path to = baseline.resolve(fileName); + System.err.println("purge copies " + from + " => " + to); + Files.copy(from, to, StandardCopyOption.COPY_ATTRIBUTES); + FileInfo info = fileLRU.getWithoutMutex(parts[0]); + info.moveTo(baseline); + } + } + + for (String fileKey : state.stream) { + String[] parts = fileKey.split("#"); + String readDirName = parts[1]; + if(!readDirName.equals(currentDir)) { + ClusterStreamChunk chunk = streamLRU.purge(parts[0]); + System.err.println("purge removes " + chunk); + } + } + + // Change sets + for (String fileKey : state.cs) { + String[] parts = fileKey.split("#"); + String readDirName = parts[1]; + if(!readDirName.equals(currentDir)) { + Long revisionId = Long.parseLong(parts[0]); + ChangeSetInfo info = csLRU.purge(revisionId); + System.err.println("purge removes " + info); + } +// Path readDir = dbFolder.resolve(parts[1]); +// Long revisionId = Long.parseLong(parts[0]); +// int offset = Integer.parseInt(parts[2]); +// int length = Integer.parseInt(parts[3]); +// ChangeSetInfo info = new ChangeSetInfo(csLRU, readDir, revisionId, offset, length); +// csLRU.map(info); + } + + state.tailChangeSetId = state.headChangeSetId; + + makeSnapshot(locator, true); + + Files.walk(dbFolder, 1).filter(Files::isDirectory).forEach(f -> tryPurgeDirectory(f)); + + } catch (IllegalAcornStateException e) { + notSafeToMakeSnapshot(e); + throw e; + } catch (IOException e) { + IllegalAcornStateException e1 = new IllegalAcornStateException(e); + notSafeToMakeSnapshot(e1); + throw e1; + } catch (AcornAccessVerificationException e) { + IllegalAcornStateException e1 = new IllegalAcornStateException(e); + notSafeToMakeSnapshot(e1); + throw e1; } - - System.err.println("makeSnapshot"); - - // Schedule writing of all data to disk - refreshHeadState(); - - // Wait for all files to be written - clusterLRU.shutdown(); - fileLRU.shutdown(); - streamLRU.shutdown(); - csLRU.shutdown(); + + } + + void tryPurgeDirectory(Path f) { - persistHeadState(); - mainState.save(dbFolder); + System.err.println("purge deletes " + f); - ClusterSetsSupport cssi = locator.getService(ClusterSetsSupport.class); - cssi.save(); + String currentDir = f.getFileName().toString(); + if(currentDir.endsWith("db")) + return; - amountOfFiles = countFiles(workingDirectory); - - System.err.println(" -finished: amount of files is " + amountOfFiles); + if(currentDir.endsWith("_baseline")) + currentDir = currentDir.replace("_baseline", ""); - workingDirectory = dbFolder.resolve(Integer.toString(mainState.headDir)); - if (!Files.exists(workingDirectory)) { - Files.createDirectories(workingDirectory); + int ordinal = Integer.parseInt(currentDir); + if(ordinal < mainState.headDir - 1) { + System.err.println("purge deletes " + f); + FileUtils.deleteDir(f.toFile()); } - - cssi.updateReadAndWriteDirectories(lastSessionDirectory, workingDirectory); - - clusterLRU.setWriteDir(workingDirectory); - fileLRU.setWriteDir(workingDirectory); - streamLRU.setWriteDir(workingDirectory); - csLRU.setWriteDir(workingDirectory); - - clusterLRU.resume(); - fileLRU.resume(); - streamLRU.resume(); - csLRU.resume(); - - lastSnapshot = System.nanoTime(); - - return true; } - - public void refreshHeadState() throws IOException { + public synchronized boolean makeSnapshot(ServiceLocator locator, boolean fullSave) throws IllegalAcornStateException { + try { + if (!safeToMakeSnapshot.get()) + throw cause; + // Maximum autosave frequency is per 60s + if(!fullSave && System.nanoTime() - lastSnapshot < 10*1000000000L) { + // System.err.println("lastSnapshot too early"); + return false; + } + + // Cluster files are always there + // Nothing has been written => no need to do anything + long amountOfFiles = countFiles(workingDirectory); + if(!fullSave && amountOfFiles == 0) { + // System.err.println("amountOfFiles < 3"); + return false; + } + + LOGGER.info("makeSnapshot"); + + // Schedule writing of all data to disk + refreshHeadState(); + + // Wait for all files to be written + clusterLRU.shutdown(); + fileLRU.shutdown(); + streamLRU.shutdown(); + csLRU.shutdown(); + + // Lets check if it is still safe to make a snapshot + if (!safeToMakeSnapshot.get()) + throw cause; + + ClusterSetsSupport cssi = locator.getService(ClusterSetsSupport.class); + cssi.save(); + + persistHeadState(); + + if (fullSave) + mainState.save(dbFolder); + + amountOfFiles = countFiles(workingDirectory); + + LOGGER.info(" -finished: amount of files is {}", amountOfFiles); + + workingDirectory = dbFolder.resolve(Integer.toString(mainState.headDir)); + if (!Files.exists(workingDirectory)) { + Files.createDirectories(workingDirectory); + } + + cssi.updateWriteDirectory(workingDirectory); + + clusterLRU.setWriteDir(workingDirectory); + fileLRU.setWriteDir(workingDirectory); + streamLRU.setWriteDir(workingDirectory); + csLRU.setWriteDir(workingDirectory); + + clusterLRU.resume(); + fileLRU.resume(); + streamLRU.resume(); + csLRU.resume(); + + lastSnapshot = System.nanoTime(); + + return true; + } catch (IllegalAcornStateException e) { + notSafeToMakeSnapshot(e); + throw e; + } catch (IOException e) { + IllegalAcornStateException e1 = new IllegalAcornStateException(e); + notSafeToMakeSnapshot(e1); + throw e1; + } + } + + private void refreshHeadState() throws IOException, IllegalAcornStateException { state.clusters.clear(); state.files.clear(); state.stream.clear(); @@ -197,13 +336,15 @@ public class ClusterManager { fileLRU.persist(state.files); streamLRU.persist(state.stream); csLRU.persist(state.cs); - } - public void persistHeadState() throws IOException { - + private void synchronizeWorkingDirectory() throws IOException { // Sync current working directory Files.walk(workingDirectory, 1).filter(Files::isRegularFile).forEach(FileIO::uncheckedSyncPath); + } + + private void persistHeadState() throws IOException { + synchronizeWorkingDirectory(); state.save(workingDirectory); mainState.headDir++; } @@ -241,7 +382,7 @@ public class ClusterManager { // } - private void acquireAll() { + private void acquireAll() throws IllegalAcornStateException { clusterLRU.acquireMutex(); fileLRU.acquireMutex(); streamLRU.acquireMutex(); @@ -255,10 +396,16 @@ public class ClusterManager { clusterLRU.releaseMutex(); } + private AtomicBoolean rollback = new AtomicBoolean(false); + + boolean rolledback() { + return rollback.get(); + } + public void load() throws IOException { // Main state - mainState = MainState.load(dbFolder); + mainState = MainState.load(dbFolder, t -> rollback.set(true)); lastSessionDirectory = dbFolder.resolve(Integer.toString(mainState.headDir - 1)); @@ -292,73 +439,74 @@ public class ClusterManager { throw new IOException("Could not load HeadState due to corruption", e); } } - - workingDirectory = dbFolder.resolve(Integer.toString(mainState.headDir)); - Files.createDirectories(workingDirectory); - - csLRU = new LRU("Change Set", workingDirectory); - streamLRU = new LRU("Cluster Stream", workingDirectory); - clusterLRU = new ClusterLRU(this, "Cluster", workingDirectory); - fileLRU = new LRU("External Value", workingDirectory); - - acquireAll(); - - // Clusters - for (String clusterKey : state.clusters) { - String[] parts1 = clusterKey.split("#"); - String[] parts = parts1[0].split("\\."); - long first = new BigInteger(parts[0], 16).longValue(); - long second = new BigInteger(parts[1], 16).longValue(); - ClusterUID uuid = ClusterUID.make(first, second); - Path readDir = dbFolder.resolve(parts1[1]); - int offset = Integer.parseInt(parts1[2]); - int length = Integer.parseInt(parts1[3]); - clusterLRU.map(new ClusterInfo(this, clusterLRU, readDir, uuid, offset, length)); - } - // Files - for (String fileKey : state.files) { -// System.err.println("loadFile: " + fileKey); - String[] parts = fileKey.split("#"); - Path readDir = dbFolder.resolve(parts[1]); - int offset = Integer.parseInt(parts[2]); - int length = Integer.parseInt(parts[3]); - FileInfo info = new FileInfo(fileLRU, readDir, parts[0], offset, length); - fileLRU.map(info); - } - // Update chunks - for (String fileKey : state.stream) { -// System.err.println("loadStream: " + fileKey); - String[] parts = fileKey.split("#"); - Path readDir = dbFolder.resolve(parts[1]); - int offset = Integer.parseInt(parts[2]); - int length = Integer.parseInt(parts[3]); - ClusterStreamChunk info = new ClusterStreamChunk(this, - streamLRU, readDir, parts[0], offset, length); - streamLRU.map(info); - } - // Change sets - for (String fileKey : state.cs) { - String[] parts = fileKey.split("#"); - Path readDir = dbFolder.resolve(parts[1]); - Long revisionId = Long.parseLong(parts[0]); - int offset = Integer.parseInt(parts[2]); - int length = Integer.parseInt(parts[3]); - ChangeSetInfo info = new ChangeSetInfo(csLRU, readDir, revisionId, offset, length); - csLRU.map(info); + try { + workingDirectory = dbFolder.resolve(Integer.toString(mainState.headDir)); + Files.createDirectories(workingDirectory); + + csLRU = new LRU(this, "Change Set", workingDirectory); + streamLRU = new LRU(this, "Cluster Stream", workingDirectory); + clusterLRU = new ClusterLRU(this, "Cluster", workingDirectory); + fileLRU = new LRU(this, "External Value", workingDirectory); + + acquireAll(); + + // Clusters + for (String clusterKey : state.clusters) { + String[] parts1 = clusterKey.split("#"); + String[] parts = parts1[0].split("\\."); + long first = new BigInteger(parts[0], 16).longValue(); + long second = new BigInteger(parts[1], 16).longValue(); + ClusterUID uuid = ClusterUID.make(first, second); + Path readDir = dbFolder.resolve(parts1[1]); + int offset = Integer.parseInt(parts1[2]); + int length = Integer.parseInt(parts1[3]); + clusterLRU.map(new ClusterInfo(this, clusterLRU, readDir, uuid, offset, length)); + } + // Files + for (String fileKey : state.files) { + // System.err.println("loadFile: " + fileKey); + String[] parts = fileKey.split("#"); + Path readDir = dbFolder.resolve(parts[1]); + int offset = Integer.parseInt(parts[2]); + int length = Integer.parseInt(parts[3]); + FileInfo info = new FileInfo(fileLRU, readDir, parts[0], offset, length); + fileLRU.map(info); + } + // Update chunks + for (String fileKey : state.stream) { + // System.err.println("loadStream: " + fileKey); + String[] parts = fileKey.split("#"); + Path readDir = dbFolder.resolve(parts[1]); + int offset = Integer.parseInt(parts[2]); + int length = Integer.parseInt(parts[3]); + ClusterStreamChunk info = new ClusterStreamChunk(this, + streamLRU, readDir, parts[0], offset, length); + streamLRU.map(info); + } + // Change sets + for (String fileKey : state.cs) { + String[] parts = fileKey.split("#"); + Path readDir = dbFolder.resolve(parts[1]); + Long revisionId = Long.parseLong(parts[0]); + int offset = Integer.parseInt(parts[2]); + int length = Integer.parseInt(parts[3]); + ChangeSetInfo info = new ChangeSetInfo(csLRU, readDir, revisionId, offset, length); + csLRU.map(info); + } + + releaseAll(); + } catch (IllegalAcornStateException | AcornAccessVerificationException e) { + // ROLLBACK ONE DIR UNTIL WE ARE FINE! + throw new IOException(e); } - - releaseAll(); - } - public T clone(ClusterUID uid, ClusterCreator creator) - throws DatabaseException { + public T clone(ClusterUID uid, ClusterCreator creator) throws DatabaseException, AcornAccessVerificationException, IllegalAcornStateException, IOException { clusterLRU.ensureUpdates(uid); ClusterInfo info = clusterLRU.getWithoutMutex(uid); return info.clone(uid, creator); - } //private int loadCounter = 0; @@ -375,38 +523,32 @@ public class ClusterManager { static Map tasks = new HashMap(); - public void update(ClusterUID uid, ClusterImpl clu) { - + public void update(ClusterUID uid, ClusterImpl clu) throws AcornAccessVerificationException, IllegalAcornStateException { ClusterInfo info = clusterLRU.getWithoutMutex(uid); info.acquireMutex(); try { info.update(clu); - } catch (Throwable t) { - throw new IllegalStateException(t); } finally { info.releaseMutex(); } - } public long getClusterIdOrCreate(ClusterUID clusterUID) { return 1; } - public int getResourceKey(ClusterUID uid, int index) { + public int getResourceKey(ClusterUID uid, int index) throws AcornAccessVerificationException { return clusterLRU.getResourceKey(uid, index); } - public int getResourceKeyWitoutMutex(ClusterUID uid, int index) { + public int getResourceKeyWitoutMutex(ClusterUID uid, int index) throws IllegalAcornStateException { return clusterLRU.getResourceKeyWithoutMutex(uid, index); } - public ClusterIds getClusterIds() throws ProCoreException { - + public ClusterIds getClusterIds() throws IllegalAcornStateException { clusterLRU.acquireMutex(); try { - Collection infos = clusterLRU.values(); final int status = infos.size(); final long[] firsts = new long[status]; @@ -439,61 +581,54 @@ public class ClusterManager { }; } catch (Throwable t) { - throw new IllegalStateException(t); + throw new IllegalAcornStateException(t); } finally { clusterLRU.releaseMutex(); } - } - public void addIntoCurrentChangeSet(String ccs) { - + public void addIntoCurrentChangeSet(String ccs) throws IllegalAcornStateException { csLRU.acquireMutex(); try { - currentChanges.add(ccs); - } catch (Throwable t) { - throw new IllegalStateException(t); + throw new IllegalAcornStateException(t); } finally { - csLRU.releaseMutex(); - } - } - public void commitChangeSet(long changeSetId, byte[] data) { + public void commitChangeSet(long changeSetId, byte[] data) throws IllegalAcornStateException { csLRU.acquireMutex(); try { ArrayList csids = new ArrayList(currentChanges); currentChanges = new ArrayList(); new ChangeSetInfo(csLRU, changeSetId, data, csids); } catch (Throwable t) { - throw new IllegalStateException(t); + throw new IllegalAcornStateException(t); } finally { csLRU.releaseMutex(); } } - public byte[] getMetadata(long changeSetId) { + public byte[] getMetadata(long changeSetId) throws AcornAccessVerificationException, IllegalAcornStateException { ChangeSetInfo info = csLRU.getWithoutMutex(changeSetId); if (info == null) return null; - info.acquireMutex(); - try { - return info.getMetadataBytes(); - } catch (Throwable t) { - throw new IllegalStateException(t); + info.acquireMutex(); + try { + return info.getMetadataBytes(); + } catch (IllegalAcornStateException | AcornAccessVerificationException e) { + throw e; + } catch (Throwable t) { + throw new IllegalAcornStateException(t); } finally { info.releaseMutex(); } - } - public byte[] getResourceFile(final byte[] clusterUID, - final int resourceIndex) throws ProCoreException { + public byte[] getResourceFile(final byte[] clusterUID, final int resourceIndex) throws AcornAccessVerificationException, IllegalAcornStateException { ClusterUID uid = ClusterUID.make(clusterUID, 0); String key = uid.toString() + "_" + resourceIndex; @@ -502,18 +637,16 @@ public class ClusterManager { info.acquireMutex(); try { return info.getResourceFile(); + } catch (IllegalAcornStateException | AcornAccessVerificationException e) { + throw e; } catch (Throwable t) { - throw new IllegalStateException(t); + throw new IllegalAcornStateException(t); } finally { info.releaseMutex(); } - } - public ResourceSegment getResourceSegment(final byte[] clusterUID, - final int resourceIndex, final long segmentOffset, short segmentSize) - throws ProCoreException { - + public ResourceSegment getResourceSegment(final byte[] clusterUID, final int resourceIndex, final long segmentOffset, short segmentSize) throws AcornAccessVerificationException, IllegalAcornStateException { ClusterUID uid = ClusterUID.make(clusterUID, 0); String key = uid.toString() + "_" + resourceIndex; @@ -523,55 +656,40 @@ public class ClusterManager { try { return info.getResourceSegment(clusterUID, resourceIndex, segmentOffset, segmentSize); } catch (Throwable t) { - throw new IllegalStateException(t); + throw new IllegalAcornStateException(t); } finally { info.releaseMutex(); } - } - public void modiFileEx(ClusterUID uid, int resourceKey, long offset, - long size, byte[] bytes, long pos, ClusterSupport support) { - + public void modiFileEx(ClusterUID uid, int resourceKey, long offset, long size, byte[] bytes, long pos, ClusterSupport support) throws IllegalAcornStateException { try { - - String key = uid.toString() - + "_" - + ClusterTraits - .getResourceIndexFromResourceKey(resourceKey); + String key = uid.toString() + "_" + ClusterTraits.getResourceIndexFromResourceKey(resourceKey); FileInfo info = null; - fileLRU.acquireMutex(); - try { - info = fileLRU.get(key); - if (info == null) + if (info == null) { info = new FileInfo(fileLRU, key, (int) (offset + size)); - - + } } catch (Throwable t) { - throw new IllegalStateException(t); + throw new IllegalAcornStateException(t); } finally { - fileLRU.releaseMutex(); - } info.acquireMutex(); try { info.updateData(bytes, offset, pos, size); } catch (Throwable t) { - throw new IllegalStateException(t); + throw new IllegalAcornStateException(t); } finally { info.releaseMutex(); } - } catch (DatabaseException e) { e.printStackTrace(); } - } public void shutdown() { @@ -581,4 +699,13 @@ public class ClusterManager { csLRU.shutdown(); } + public void notSafeToMakeSnapshot(IllegalAcornStateException t) { + this.safeToMakeSnapshot.compareAndSet(true, false); + this.cause = t; + } + + public long getTailChangeSetId() { + return state.tailChangeSetId; + } + }