X-Git-Url: https://gerrit.simantics.org/r/gitweb?p=simantics%2Fplatform.git;a=blobdiff_plain;f=bundles%2Forg.simantics.acorn%2Fsrc%2Forg%2Fsimantics%2Facorn%2FClusterManager.java;h=cb9ac578e1aade416162beffde3ad27b2051d4f9;hp=4c8df933777742532fcbf598f386362b81adafb5;hb=e5c006a3e29dcb1f29ae5bcc21ac28573bd37648;hpb=2aa77c56ec691aad39d1909bfbe4aff101e988fb diff --git a/bundles/org.simantics.acorn/src/org/simantics/acorn/ClusterManager.java b/bundles/org.simantics.acorn/src/org/simantics/acorn/ClusterManager.java index 4c8df9337..cb9ac578e 100644 --- a/bundles/org.simantics.acorn/src/org/simantics/acorn/ClusterManager.java +++ b/bundles/org.simantics.acorn/src/org/simantics/acorn/ClusterManager.java @@ -47,8 +47,9 @@ public class ClusterManager { private ArrayList currentChanges = new ArrayList(); public final Path dbFolder; - public Path lastSessionDirectory; - public Path workingDirectory; + private FileCache fileCache; + Path lastSessionDirectory; + Path workingDirectory; public LRU streamLRU; public LRU csLRU; @@ -67,8 +68,9 @@ public class ClusterManager { * */ - public ClusterManager(Path dbFolder) { + public ClusterManager(Path dbFolder, FileCache fileCache) { this.dbFolder = dbFolder; + this.fileCache = fileCache; } public ArrayList getChanges(long changeSetId) throws AcornAccessVerificationException, IllegalAcornStateException { @@ -158,7 +160,7 @@ public class ClusterManager { String fileName = parts[0] + "." + parts[1] + ".cluster"; Path from = dbFolder.resolve(readDirName).resolve(fileName); Path to = baseline.resolve(fileName); - System.err.println("purge copies " + from + " => " + to); + LOGGER.info("purge copies " + from + " => " + to); Files.copy(from, to, StandardCopyOption.COPY_ATTRIBUTES); long first = new BigInteger(parts[0], 16).longValue(); long second = new BigInteger(parts[1], 16).longValue(); @@ -175,7 +177,7 @@ public class ClusterManager { String fileName = parts[0] + ".extFile"; Path from = dbFolder.resolve(readDirName).resolve(fileName); Path to = baseline.resolve(fileName); - System.err.println("purge copies " + from + " => " + to); + LOGGER.info("purge copies " + from + " => " + to); Files.copy(from, to, StandardCopyOption.COPY_ATTRIBUTES); FileInfo info = fileLRU.getWithoutMutex(parts[0]); info.moveTo(baseline); @@ -187,7 +189,7 @@ public class ClusterManager { String readDirName = parts[1]; if(!readDirName.equals(currentDir)) { ClusterStreamChunk chunk = streamLRU.purge(parts[0]); - System.err.println("purge removes " + chunk); + LOGGER.info("purge removes " + chunk); } } @@ -198,7 +200,7 @@ public class ClusterManager { if(!readDirName.equals(currentDir)) { Long revisionId = Long.parseLong(parts[0]); ChangeSetInfo info = csLRU.purge(revisionId); - System.err.println("purge removes " + info); + LOGGER.info("purge removes " + info); } // Path readDir = dbFolder.resolve(parts[1]); // Long revisionId = Long.parseLong(parts[0]); @@ -232,7 +234,7 @@ public class ClusterManager { void tryPurgeDirectory(Path f) { - System.err.println("purge deletes " + f); + LOGGER.info("purge deletes " + f); String currentDir = f.getFileName().toString(); if(currentDir.endsWith("db")) @@ -243,7 +245,7 @@ public class ClusterManager { int ordinal = Integer.parseInt(currentDir); if(ordinal < mainState.headDir - 1) { - System.err.println("purge deletes " + f); + LOGGER.info("purge deletes " + f); FileUtils.deleteDir(f.toFile()); } @@ -255,7 +257,7 @@ public class ClusterManager { throw cause; // Maximum autosave frequency is per 60s if(!fullSave && System.nanoTime() - lastSnapshot < 10*1000000000L) { - // System.err.println("lastSnapshot too early"); + // LOGGER.info("lastSnapshot too early"); return false; } @@ -293,9 +295,7 @@ public class ClusterManager { } workingDirectory = dbFolder.resolve(Integer.toString(mainState.headDir)); - if (!Files.exists(workingDirectory)) { - Files.createDirectories(workingDirectory); - } + Files.createDirectories(workingDirectory); cssi.updateWriteDirectory(workingDirectory); @@ -370,10 +370,10 @@ public class ClusterManager { // e.printStackTrace(); // } - // System.err.println("-- load statistics --"); + // LOGGER.info("-- load statistics --"); // for(Pair entry : // CollectionUtils.valueSortedEntries(histogram)) { - // System.err.println(" " + entry.second + " " + entry.first); + // LOGGER.info(" " + entry.second + " " + entry.first); // } // } @@ -406,35 +406,39 @@ public class ClusterManager { lastSessionDirectory = dbFolder.resolve(Integer.toString(mainState.headDir - 1)); // Head State - try { - state = HeadState.load(lastSessionDirectory); - } catch (InvalidHeadStateException e) { - // For backwards compatibility only! - Throwable cause = e.getCause(); - if (cause instanceof Throwable) { - try { - org.simantics.db.javacore.HeadState oldState = org.simantics.db.javacore.HeadState.load(lastSessionDirectory); - - HeadState newState = new HeadState(); - newState.clusters = oldState.clusters; - newState.cs = oldState.cs; - newState.files = oldState.files; - newState.stream = oldState.stream; - newState.headChangeSetId = oldState.headChangeSetId; - newState.reservedIds = oldState.reservedIds; - newState.transactionId = oldState.transactionId; - state = newState; - } catch (InvalidHeadStateException e1) { - throw new IOException("Could not load HeadState due to corruption", e1); - } - } else { - // This should never happen as MainState.load() checks the integrity - // of head.state files and rolls back in cases of corruption until a - // consistent state is found (could be case 0 - initial db state) - // IF this does happen something is completely wrong - throw new IOException("Could not load HeadState due to corruption", e); - } - } + if (mainState.isInitial()) { + state = new HeadState(); + } else { + try { + state = HeadState.load(lastSessionDirectory); + } catch (InvalidHeadStateException e) { + // For backwards compatibility only! + Throwable cause = e.getCause(); + if (cause instanceof Throwable) { + try { + org.simantics.db.javacore.HeadState oldState = org.simantics.db.javacore.HeadState.load(lastSessionDirectory); + + HeadState newState = new HeadState(); + newState.clusters = oldState.clusters; + newState.cs = oldState.cs; + newState.files = oldState.files; + newState.stream = oldState.stream; + newState.headChangeSetId = oldState.headChangeSetId; + newState.reservedIds = oldState.reservedIds; + newState.transactionId = oldState.transactionId; + state = newState; + } catch (InvalidHeadStateException e1) { + throw new IOException("Could not load HeadState due to corruption", e1); + } + } else { + // This should never happen as MainState.load() checks the integrity + // of head.state files and rolls back in cases of corruption until a + // consistent state is found (could be case 0 - initial db state) + // IF this does happen something is completely wrong + throw new IOException("Could not load HeadState due to corruption", e); + } + } + } try { workingDirectory = dbFolder.resolve(Integer.toString(mainState.headDir)); Files.createDirectories(workingDirectory); @@ -460,17 +464,17 @@ public class ClusterManager { } // Files for (String fileKey : state.files) { - // System.err.println("loadFile: " + fileKey); + // LOGGER.info("loadFile: " + fileKey); String[] parts = fileKey.split("#"); Path readDir = dbFolder.resolve(parts[1]); int offset = Integer.parseInt(parts[2]); int length = Integer.parseInt(parts[3]); - FileInfo info = new FileInfo(fileLRU, readDir, parts[0], offset, length); + FileInfo info = new FileInfo(fileLRU, fileCache, readDir, parts[0], offset, length); fileLRU.map(info); } // Update chunks for (String fileKey : state.stream) { - // System.err.println("loadStream: " + fileKey); + // LOGGER.info("loadStream: " + fileKey); String[] parts = fileKey.split("#"); Path readDir = dbFolder.resolve(parts[1]); int offset = Integer.parseInt(parts[2]); @@ -486,7 +490,7 @@ public class ClusterManager { Long revisionId = Long.parseLong(parts[0]); int offset = Integer.parseInt(parts[2]); int length = Integer.parseInt(parts[3]); - ChangeSetInfo info = new ChangeSetInfo(csLRU, readDir, revisionId, offset, length); + ChangeSetInfo info = new ChangeSetInfo(csLRU, fileCache, readDir, revisionId, offset, length); csLRU.map(info); } @@ -600,7 +604,7 @@ public class ClusterManager { try { ArrayList csids = new ArrayList(currentChanges); currentChanges = new ArrayList(); - new ChangeSetInfo(csLRU, changeSetId, data, csids); + new ChangeSetInfo(csLRU, fileCache, changeSetId, data, csids); } catch (Throwable t) { throw new IllegalAcornStateException(t); } finally { @@ -667,7 +671,7 @@ public class ClusterManager { try { info = fileLRU.get(key); if (info == null) { - info = new FileInfo(fileLRU, key, (int) (offset + size)); + info = new FileInfo(fileLRU, fileCache, key, (int) (offset + size)); } } catch (Throwable t) { throw new IllegalAcornStateException(t); @@ -703,5 +707,9 @@ public class ClusterManager { public long getTailChangeSetId() { return state.tailChangeSetId; } - + + public FileCache getFileCache() { + return fileCache; + } + }