]> gerrit.simantics Code Review - simantics/platform.git/blobdiff - bundles/org.simantics.acorn/src/org/simantics/acorn/ClusterManager.java
NPE fix for Acorn cluster stream undo handling
[simantics/platform.git] / bundles / org.simantics.acorn / src / org / simantics / acorn / ClusterManager.java
index 40c5de37e8c1991e6e5ce846e2b90e7b651b6c5e..85daddd22af6d178154c3fcbaa7d3b756b61d894 100644 (file)
@@ -2,7 +2,6 @@ package org.simantics.acorn;
 
 import java.io.IOException;
 import java.math.BigInteger;
-import java.nio.file.CopyOption;
 import java.nio.file.DirectoryStream;
 import java.nio.file.Files;
 import java.nio.file.Path;
@@ -24,7 +23,6 @@ import org.simantics.acorn.lru.ClusterLRU;
 import org.simantics.acorn.lru.ClusterStreamChunk;
 import org.simantics.acorn.lru.FileInfo;
 import org.simantics.acorn.lru.LRU;
-import org.simantics.databoard.file.RuntimeIOException;
 import org.simantics.db.ClusterCreator;
 import org.simantics.db.Database.Session.ClusterIds;
 import org.simantics.db.Database.Session.ResourceSegment;
@@ -160,7 +158,7 @@ public class ClusterManager {
                                String fileName = parts[0] + "." + parts[1] + ".cluster";
                                Path from = dbFolder.resolve(readDirName).resolve(fileName);
                                Path to = baseline.resolve(fileName);
-                               System.err.println("purge copies " + from + "  => " + to);
+                               LOGGER.info("purge copies " + from + "  => " + to);
                                Files.copy(from, to, StandardCopyOption.COPY_ATTRIBUTES);
                                long first = new BigInteger(parts[0], 16).longValue();
                                long second = new BigInteger(parts[1], 16).longValue();
@@ -177,7 +175,7 @@ public class ClusterManager {
                                String fileName = parts[0] + ".extFile";
                                Path from = dbFolder.resolve(readDirName).resolve(fileName);
                                Path to = baseline.resolve(fileName);
-                               System.err.println("purge copies " + from + "  => " + to);
+                               LOGGER.info("purge copies " + from + "  => " + to);
                                Files.copy(from, to, StandardCopyOption.COPY_ATTRIBUTES);
                                FileInfo info = fileLRU.getWithoutMutex(parts[0]);
                                info.moveTo(baseline);
@@ -189,7 +187,7 @@ public class ClusterManager {
                        String readDirName = parts[1];
                        if(!readDirName.equals(currentDir)) {
                                ClusterStreamChunk chunk = streamLRU.purge(parts[0]);
-                               System.err.println("purge removes " + chunk);
+                               LOGGER.info("purge removes " + chunk);
                        }
                }
                
@@ -200,7 +198,7 @@ public class ClusterManager {
                        if(!readDirName.equals(currentDir)) {
                                Long revisionId = Long.parseLong(parts[0]);
                                ChangeSetInfo info = csLRU.purge(revisionId);
-                               System.err.println("purge removes " + info);
+                               LOGGER.info("purge removes " + info);
                        }
 //                     Path readDir = dbFolder.resolve(parts[1]);
 //                     Long revisionId = Long.parseLong(parts[0]);
@@ -234,7 +232,7 @@ public class ClusterManager {
        void tryPurgeDirectory(Path f) {
                
                
-               System.err.println("purge deletes " + f);
+               LOGGER.info("purge deletes " + f);
 
                String currentDir = f.getFileName().toString();
                if(currentDir.endsWith("db"))
@@ -245,7 +243,7 @@ public class ClusterManager {
 
                int ordinal = Integer.parseInt(currentDir);
                if(ordinal < mainState.headDir - 1) {
-                       System.err.println("purge deletes " + f);
+                       LOGGER.info("purge deletes " + f);
                        FileUtils.deleteDir(f.toFile());
                }
                
@@ -257,7 +255,7 @@ public class ClusterManager {
                throw cause;
                // Maximum autosave frequency is per 60s
                if(!fullSave && System.nanoTime() - lastSnapshot < 10*1000000000L) {
-    //             System.err.println("lastSnapshot too early");
+    //             LOGGER.info("lastSnapshot too early");
                    return false;
                }
     
@@ -265,11 +263,11 @@ public class ClusterManager {
                // Nothing has been written => no need to do anything
                long amountOfFiles = countFiles(workingDirectory);
                if(!fullSave && amountOfFiles == 0) {
-    //             System.err.println("amountOfFiles < 3");
+                       //LOGGER.info("makeSnapshot: " + amountOfFiles + " files, skipping snapshot");
                    return false;
                }
     
-               LOGGER.info("makeSnapshot");
+               LOGGER.info("makeSnapshot: start with " + amountOfFiles + " files");
     
                // Schedule writing of all data to disk
             refreshHeadState();
@@ -289,13 +287,11 @@ public class ClusterManager {
 
                persistHeadState();
 
-               if (fullSave)
-                   mainState.save(dbFolder);
-    
-               amountOfFiles = countFiles(workingDirectory);
-               
-               LOGGER.info(" -finished: amount of files is {}", amountOfFiles);
-    
+               if (LOGGER.isInfoEnabled()) {
+                       amountOfFiles = countFiles(workingDirectory);
+                       LOGGER.info(" -finished: amount of files is {}", amountOfFiles);
+               }
+
                workingDirectory = dbFolder.resolve(Integer.toString(mainState.headDir));
                if (!Files.exists(workingDirectory)) {
                    Files.createDirectories(workingDirectory);
@@ -374,10 +370,10 @@ public class ClusterManager {
 //                     e.printStackTrace();
 //             }
 
-               // System.err.println("-- load statistics --");
+               // LOGGER.info("-- load statistics --");
                // for(Pair<ClusterUID, Integer> entry :
                // CollectionUtils.valueSortedEntries(histogram)) {
-               // System.err.println(" " + entry.second + " " + entry.first);
+               // LOGGER.info(" " + entry.second + " " + entry.first);
                // }
 
 //     }
@@ -405,40 +401,44 @@ public class ClusterManager {
        public void load() throws IOException {
 
                // Main state
-               mainState = MainState.load(dbFolder, t -> rollback.set(true));
+               mainState = MainState.load(dbFolder, () -> rollback.set(true));
 
                lastSessionDirectory = dbFolder.resolve(Integer.toString(mainState.headDir - 1));
                
                // Head State
-               try {
-            state = HeadState.load(lastSessionDirectory);
-        } catch (InvalidHeadStateException e) {
-            // For backwards compatibility only!
-            Throwable cause = e.getCause();
-            if (cause instanceof Throwable) {
-                try {
-                    org.simantics.db.javacore.HeadState oldState = org.simantics.db.javacore.HeadState.load(lastSessionDirectory);
-                    
-                    HeadState newState = new HeadState();
-                    newState.clusters = oldState.clusters;
-                    newState.cs = oldState.cs;
-                    newState.files = oldState.files;
-                    newState.stream = oldState.stream;
-                    newState.headChangeSetId = oldState.headChangeSetId;
-                    newState.reservedIds = oldState.reservedIds;
-                    newState.transactionId = oldState.transactionId;
-                    state = newState;
-                } catch (InvalidHeadStateException e1) {
-                    throw new IOException("Could not load HeadState due to corruption", e1);
-                }
-            } else {
-                // This should never happen as MainState.load() checks the integrity
-                // of head.state files and rolls back in cases of corruption until a
-                // consistent state is found (could be case 0 - initial db state)
-                // IF this does happen something is completely wrong
-                throw new IOException("Could not load HeadState due to corruption", e);
-            }
-        }
+               if (mainState.isInitial()) {
+                       state = new HeadState();
+               } else {
+                       try {
+                   state = HeadState.load(lastSessionDirectory);
+               } catch (InvalidHeadStateException e) {
+                   // For backwards compatibility only!
+                   Throwable cause = e.getCause();
+                   if (cause instanceof Throwable) {
+                       try {
+                           org.simantics.db.javacore.HeadState oldState = org.simantics.db.javacore.HeadState.load(lastSessionDirectory);
+                           
+                           HeadState newState = new HeadState();
+                           newState.clusters = oldState.clusters;
+                           newState.cs = oldState.cs;
+                           newState.files = oldState.files;
+                           newState.stream = oldState.stream;
+                           newState.headChangeSetId = oldState.headChangeSetId;
+                           newState.reservedIds = oldState.reservedIds;
+                           newState.transactionId = oldState.transactionId;
+                           state = newState;
+                       } catch (InvalidHeadStateException e1) {
+                           throw new IOException("Could not load HeadState due to corruption", e1);
+                       }
+                   } else {
+                       // This should never happen as MainState.load() checks the integrity
+                       // of head.state files and rolls back in cases of corruption until a
+                       // consistent state is found (could be case 0 - initial db state)
+                       // IF this does happen something is completely wrong
+                       throw new IOException("Could not load HeadState due to corruption", e);
+                   }
+               }
+               }
                try {
                workingDirectory = dbFolder.resolve(Integer.toString(mainState.headDir));
                Files.createDirectories(workingDirectory);
@@ -464,7 +464,7 @@ public class ClusterManager {
                }
                // Files
                for (String fileKey : state.files) {
-    //                 System.err.println("loadFile: " + fileKey);
+    //                 LOGGER.info("loadFile: " + fileKey);
                        String[] parts = fileKey.split("#");
                        Path readDir = dbFolder.resolve(parts[1]);
                        int offset = Integer.parseInt(parts[2]);
@@ -474,7 +474,7 @@ public class ClusterManager {
                }
                // Update chunks
                for (String fileKey : state.stream) {
-    //                 System.err.println("loadStream: " + fileKey);
+    //                 LOGGER.info("loadStream: " + fileKey);
                        String[] parts = fileKey.split("#");
                        Path readDir = dbFolder.resolve(parts[1]);
                        int offset = Integer.parseInt(parts[2]);