X-Git-Url: https://gerrit.simantics.org/r/gitweb?a=blobdiff_plain;f=bundles%2Forg.simantics.acorn%2Fsrc%2Forg%2Fsimantics%2Facorn%2FClusterManager.java;h=85daddd22af6d178154c3fcbaa7d3b756b61d894;hb=1dfeb7d5c49b1391cd9d877e1eddab18995cb151;hp=4c8df933777742532fcbf598f386362b81adafb5;hpb=04a7cb7f4ec34ddd4747723fd51ea22edd5ede0b;p=simantics%2Fplatform.git diff --git a/bundles/org.simantics.acorn/src/org/simantics/acorn/ClusterManager.java b/bundles/org.simantics.acorn/src/org/simantics/acorn/ClusterManager.java index 4c8df9337..85daddd22 100644 --- a/bundles/org.simantics.acorn/src/org/simantics/acorn/ClusterManager.java +++ b/bundles/org.simantics.acorn/src/org/simantics/acorn/ClusterManager.java @@ -158,7 +158,7 @@ public class ClusterManager { String fileName = parts[0] + "." + parts[1] + ".cluster"; Path from = dbFolder.resolve(readDirName).resolve(fileName); Path to = baseline.resolve(fileName); - System.err.println("purge copies " + from + " => " + to); + LOGGER.info("purge copies " + from + " => " + to); Files.copy(from, to, StandardCopyOption.COPY_ATTRIBUTES); long first = new BigInteger(parts[0], 16).longValue(); long second = new BigInteger(parts[1], 16).longValue(); @@ -175,7 +175,7 @@ public class ClusterManager { String fileName = parts[0] + ".extFile"; Path from = dbFolder.resolve(readDirName).resolve(fileName); Path to = baseline.resolve(fileName); - System.err.println("purge copies " + from + " => " + to); + LOGGER.info("purge copies " + from + " => " + to); Files.copy(from, to, StandardCopyOption.COPY_ATTRIBUTES); FileInfo info = fileLRU.getWithoutMutex(parts[0]); info.moveTo(baseline); @@ -187,7 +187,7 @@ public class ClusterManager { String readDirName = parts[1]; if(!readDirName.equals(currentDir)) { ClusterStreamChunk chunk = streamLRU.purge(parts[0]); - System.err.println("purge removes " + chunk); + LOGGER.info("purge removes " + chunk); } } @@ -198,7 +198,7 @@ public class ClusterManager { if(!readDirName.equals(currentDir)) { Long revisionId = Long.parseLong(parts[0]); ChangeSetInfo info = csLRU.purge(revisionId); - System.err.println("purge removes " + info); + LOGGER.info("purge removes " + info); } // Path readDir = dbFolder.resolve(parts[1]); // Long revisionId = Long.parseLong(parts[0]); @@ -232,7 +232,7 @@ public class ClusterManager { void tryPurgeDirectory(Path f) { - System.err.println("purge deletes " + f); + LOGGER.info("purge deletes " + f); String currentDir = f.getFileName().toString(); if(currentDir.endsWith("db")) @@ -243,7 +243,7 @@ public class ClusterManager { int ordinal = Integer.parseInt(currentDir); if(ordinal < mainState.headDir - 1) { - System.err.println("purge deletes " + f); + LOGGER.info("purge deletes " + f); FileUtils.deleteDir(f.toFile()); } @@ -255,7 +255,7 @@ public class ClusterManager { throw cause; // Maximum autosave frequency is per 60s if(!fullSave && System.nanoTime() - lastSnapshot < 10*1000000000L) { - // System.err.println("lastSnapshot too early"); + // LOGGER.info("lastSnapshot too early"); return false; } @@ -370,10 +370,10 @@ public class ClusterManager { // e.printStackTrace(); // } - // System.err.println("-- load statistics --"); + // LOGGER.info("-- load statistics --"); // for(Pair entry : // CollectionUtils.valueSortedEntries(histogram)) { - // System.err.println(" " + entry.second + " " + entry.first); + // LOGGER.info(" " + entry.second + " " + entry.first); // } // } @@ -406,35 +406,39 @@ public class ClusterManager { lastSessionDirectory = dbFolder.resolve(Integer.toString(mainState.headDir - 1)); // Head State - try { - state = HeadState.load(lastSessionDirectory); - } catch (InvalidHeadStateException e) { - // For backwards compatibility only! - Throwable cause = e.getCause(); - if (cause instanceof Throwable) { - try { - org.simantics.db.javacore.HeadState oldState = org.simantics.db.javacore.HeadState.load(lastSessionDirectory); - - HeadState newState = new HeadState(); - newState.clusters = oldState.clusters; - newState.cs = oldState.cs; - newState.files = oldState.files; - newState.stream = oldState.stream; - newState.headChangeSetId = oldState.headChangeSetId; - newState.reservedIds = oldState.reservedIds; - newState.transactionId = oldState.transactionId; - state = newState; - } catch (InvalidHeadStateException e1) { - throw new IOException("Could not load HeadState due to corruption", e1); - } - } else { - // This should never happen as MainState.load() checks the integrity - // of head.state files and rolls back in cases of corruption until a - // consistent state is found (could be case 0 - initial db state) - // IF this does happen something is completely wrong - throw new IOException("Could not load HeadState due to corruption", e); - } - } + if (mainState.isInitial()) { + state = new HeadState(); + } else { + try { + state = HeadState.load(lastSessionDirectory); + } catch (InvalidHeadStateException e) { + // For backwards compatibility only! + Throwable cause = e.getCause(); + if (cause instanceof Throwable) { + try { + org.simantics.db.javacore.HeadState oldState = org.simantics.db.javacore.HeadState.load(lastSessionDirectory); + + HeadState newState = new HeadState(); + newState.clusters = oldState.clusters; + newState.cs = oldState.cs; + newState.files = oldState.files; + newState.stream = oldState.stream; + newState.headChangeSetId = oldState.headChangeSetId; + newState.reservedIds = oldState.reservedIds; + newState.transactionId = oldState.transactionId; + state = newState; + } catch (InvalidHeadStateException e1) { + throw new IOException("Could not load HeadState due to corruption", e1); + } + } else { + // This should never happen as MainState.load() checks the integrity + // of head.state files and rolls back in cases of corruption until a + // consistent state is found (could be case 0 - initial db state) + // IF this does happen something is completely wrong + throw new IOException("Could not load HeadState due to corruption", e); + } + } + } try { workingDirectory = dbFolder.resolve(Integer.toString(mainState.headDir)); Files.createDirectories(workingDirectory); @@ -460,7 +464,7 @@ public class ClusterManager { } // Files for (String fileKey : state.files) { - // System.err.println("loadFile: " + fileKey); + // LOGGER.info("loadFile: " + fileKey); String[] parts = fileKey.split("#"); Path readDir = dbFolder.resolve(parts[1]); int offset = Integer.parseInt(parts[2]); @@ -470,7 +474,7 @@ public class ClusterManager { } // Update chunks for (String fileKey : state.stream) { - // System.err.println("loadStream: " + fileKey); + // LOGGER.info("loadStream: " + fileKey); String[] parts = fileKey.split("#"); Path readDir = dbFolder.resolve(parts[1]); int offset = Integer.parseInt(parts[2]);