X-Git-Url: https://gerrit.simantics.org/r/gitweb?a=blobdiff_plain;f=bundles%2Forg.simantics.acorn%2Fsrc%2Forg%2Fsimantics%2Facorn%2FClusterManager.java;h=a2cb2fa864c0cb6b0155a87442cd147e8e527eb6;hb=refs%2Fchanges%2F66%2F1666%2F1;hp=e0bcbebf7421906a61cd13ff626719aac0f00ef0;hpb=20dfd0ba5e518a3706cd749c645a0a79480ea36f;p=simantics%2Fplatform.git diff --git a/bundles/org.simantics.acorn/src/org/simantics/acorn/ClusterManager.java b/bundles/org.simantics.acorn/src/org/simantics/acorn/ClusterManager.java index e0bcbebf7..a2cb2fa86 100644 --- a/bundles/org.simantics.acorn/src/org/simantics/acorn/ClusterManager.java +++ b/bundles/org.simantics.acorn/src/org/simantics/acorn/ClusterManager.java @@ -158,7 +158,7 @@ public class ClusterManager { String fileName = parts[0] + "." + parts[1] + ".cluster"; Path from = dbFolder.resolve(readDirName).resolve(fileName); Path to = baseline.resolve(fileName); - System.err.println("purge copies " + from + " => " + to); + LOGGER.info("purge copies " + from + " => " + to); Files.copy(from, to, StandardCopyOption.COPY_ATTRIBUTES); long first = new BigInteger(parts[0], 16).longValue(); long second = new BigInteger(parts[1], 16).longValue(); @@ -175,7 +175,7 @@ public class ClusterManager { String fileName = parts[0] + ".extFile"; Path from = dbFolder.resolve(readDirName).resolve(fileName); Path to = baseline.resolve(fileName); - System.err.println("purge copies " + from + " => " + to); + LOGGER.info("purge copies " + from + " => " + to); Files.copy(from, to, StandardCopyOption.COPY_ATTRIBUTES); FileInfo info = fileLRU.getWithoutMutex(parts[0]); info.moveTo(baseline); @@ -187,7 +187,7 @@ public class ClusterManager { String readDirName = parts[1]; if(!readDirName.equals(currentDir)) { ClusterStreamChunk chunk = streamLRU.purge(parts[0]); - System.err.println("purge removes " + chunk); + LOGGER.info("purge removes " + chunk); } } @@ -198,7 +198,7 @@ public class ClusterManager { if(!readDirName.equals(currentDir)) { Long revisionId = Long.parseLong(parts[0]); ChangeSetInfo info = csLRU.purge(revisionId); - System.err.println("purge removes " + info); + LOGGER.info("purge removes " + info); } // Path readDir = dbFolder.resolve(parts[1]); // Long revisionId = Long.parseLong(parts[0]); @@ -232,7 +232,7 @@ public class ClusterManager { void tryPurgeDirectory(Path f) { - System.err.println("purge deletes " + f); + LOGGER.info("purge deletes " + f); String currentDir = f.getFileName().toString(); if(currentDir.endsWith("db")) @@ -243,7 +243,7 @@ public class ClusterManager { int ordinal = Integer.parseInt(currentDir); if(ordinal < mainState.headDir - 1) { - System.err.println("purge deletes " + f); + LOGGER.info("purge deletes " + f); FileUtils.deleteDir(f.toFile()); } @@ -255,7 +255,7 @@ public class ClusterManager { throw cause; // Maximum autosave frequency is per 60s if(!fullSave && System.nanoTime() - lastSnapshot < 10*1000000000L) { - // System.err.println("lastSnapshot too early"); + // LOGGER.info("lastSnapshot too early"); return false; } @@ -263,11 +263,11 @@ public class ClusterManager { // Nothing has been written => no need to do anything long amountOfFiles = countFiles(workingDirectory); if(!fullSave && amountOfFiles == 0) { - // System.err.println("amountOfFiles < 3"); + //LOGGER.info("makeSnapshot: " + amountOfFiles + " files, skipping snapshot"); return false; } - LOGGER.info("makeSnapshot"); + LOGGER.info("makeSnapshot: start with " + amountOfFiles + " files"); // Schedule writing of all data to disk refreshHeadState(); @@ -287,17 +287,13 @@ public class ClusterManager { persistHeadState(); - if (fullSave) - mainState.save(dbFolder); - - amountOfFiles = countFiles(workingDirectory); - - LOGGER.info(" -finished: amount of files is {}", amountOfFiles); - - workingDirectory = dbFolder.resolve(Integer.toString(mainState.headDir)); - if (!Files.exists(workingDirectory)) { - Files.createDirectories(workingDirectory); + if (LOGGER.isInfoEnabled()) { + amountOfFiles = countFiles(workingDirectory); + LOGGER.info(" -finished: amount of files is {}", amountOfFiles); } + + workingDirectory = dbFolder.resolve(Integer.toString(mainState.headDir)); + Files.createDirectories(workingDirectory); cssi.updateWriteDirectory(workingDirectory); @@ -372,10 +368,10 @@ public class ClusterManager { // e.printStackTrace(); // } - // System.err.println("-- load statistics --"); + // LOGGER.info("-- load statistics --"); // for(Pair entry : // CollectionUtils.valueSortedEntries(histogram)) { - // System.err.println(" " + entry.second + " " + entry.first); + // LOGGER.info(" " + entry.second + " " + entry.first); // } // } @@ -408,35 +404,39 @@ public class ClusterManager { lastSessionDirectory = dbFolder.resolve(Integer.toString(mainState.headDir - 1)); // Head State - try { - state = HeadState.load(lastSessionDirectory); - } catch (InvalidHeadStateException e) { - // For backwards compatibility only! - Throwable cause = e.getCause(); - if (cause instanceof Throwable) { - try { - org.simantics.db.javacore.HeadState oldState = org.simantics.db.javacore.HeadState.load(lastSessionDirectory); - - HeadState newState = new HeadState(); - newState.clusters = oldState.clusters; - newState.cs = oldState.cs; - newState.files = oldState.files; - newState.stream = oldState.stream; - newState.headChangeSetId = oldState.headChangeSetId; - newState.reservedIds = oldState.reservedIds; - newState.transactionId = oldState.transactionId; - state = newState; - } catch (InvalidHeadStateException e1) { - throw new IOException("Could not load HeadState due to corruption", e1); - } - } else { - // This should never happen as MainState.load() checks the integrity - // of head.state files and rolls back in cases of corruption until a - // consistent state is found (could be case 0 - initial db state) - // IF this does happen something is completely wrong - throw new IOException("Could not load HeadState due to corruption", e); - } - } + if (mainState.isInitial()) { + state = new HeadState(); + } else { + try { + state = HeadState.load(lastSessionDirectory); + } catch (InvalidHeadStateException e) { + // For backwards compatibility only! + Throwable cause = e.getCause(); + if (cause instanceof Throwable) { + try { + org.simantics.db.javacore.HeadState oldState = org.simantics.db.javacore.HeadState.load(lastSessionDirectory); + + HeadState newState = new HeadState(); + newState.clusters = oldState.clusters; + newState.cs = oldState.cs; + newState.files = oldState.files; + newState.stream = oldState.stream; + newState.headChangeSetId = oldState.headChangeSetId; + newState.reservedIds = oldState.reservedIds; + newState.transactionId = oldState.transactionId; + state = newState; + } catch (InvalidHeadStateException e1) { + throw new IOException("Could not load HeadState due to corruption", e1); + } + } else { + // This should never happen as MainState.load() checks the integrity + // of head.state files and rolls back in cases of corruption until a + // consistent state is found (could be case 0 - initial db state) + // IF this does happen something is completely wrong + throw new IOException("Could not load HeadState due to corruption", e); + } + } + } try { workingDirectory = dbFolder.resolve(Integer.toString(mainState.headDir)); Files.createDirectories(workingDirectory); @@ -462,7 +462,7 @@ public class ClusterManager { } // Files for (String fileKey : state.files) { - // System.err.println("loadFile: " + fileKey); + // LOGGER.info("loadFile: " + fileKey); String[] parts = fileKey.split("#"); Path readDir = dbFolder.resolve(parts[1]); int offset = Integer.parseInt(parts[2]); @@ -472,7 +472,7 @@ public class ClusterManager { } // Update chunks for (String fileKey : state.stream) { - // System.err.println("loadStream: " + fileKey); + // LOGGER.info("loadStream: " + fileKey); String[] parts = fileKey.split("#"); Path readDir = dbFolder.resolve(parts[1]); int offset = Integer.parseInt(parts[2]);