import java.io.IOException;
import java.math.BigInteger;
-import java.nio.file.CopyOption;
import java.nio.file.DirectoryStream;
import java.nio.file.Files;
import java.nio.file.Path;
import org.simantics.acorn.lru.ClusterStreamChunk;
import org.simantics.acorn.lru.FileInfo;
import org.simantics.acorn.lru.LRU;
-import org.simantics.databoard.file.RuntimeIOException;
import org.simantics.db.ClusterCreator;
import org.simantics.db.Database.Session.ClusterIds;
import org.simantics.db.Database.Session.ResourceSegment;
String fileName = parts[0] + "." + parts[1] + ".cluster";
Path from = dbFolder.resolve(readDirName).resolve(fileName);
Path to = baseline.resolve(fileName);
- System.err.println("purge copies " + from + " => " + to);
+ LOGGER.info("purge copies " + from + " => " + to);
Files.copy(from, to, StandardCopyOption.COPY_ATTRIBUTES);
long first = new BigInteger(parts[0], 16).longValue();
long second = new BigInteger(parts[1], 16).longValue();
String fileName = parts[0] + ".extFile";
Path from = dbFolder.resolve(readDirName).resolve(fileName);
Path to = baseline.resolve(fileName);
- System.err.println("purge copies " + from + " => " + to);
+ LOGGER.info("purge copies " + from + " => " + to);
Files.copy(from, to, StandardCopyOption.COPY_ATTRIBUTES);
FileInfo info = fileLRU.getWithoutMutex(parts[0]);
info.moveTo(baseline);
String readDirName = parts[1];
if(!readDirName.equals(currentDir)) {
ClusterStreamChunk chunk = streamLRU.purge(parts[0]);
- System.err.println("purge removes " + chunk);
+ LOGGER.info("purge removes " + chunk);
}
}
if(!readDirName.equals(currentDir)) {
Long revisionId = Long.parseLong(parts[0]);
ChangeSetInfo info = csLRU.purge(revisionId);
- System.err.println("purge removes " + info);
+ LOGGER.info("purge removes " + info);
}
// Path readDir = dbFolder.resolve(parts[1]);
// Long revisionId = Long.parseLong(parts[0]);
void tryPurgeDirectory(Path f) {
- System.err.println("purge deletes " + f);
+ LOGGER.info("purge deletes " + f);
String currentDir = f.getFileName().toString();
if(currentDir.endsWith("db"))
int ordinal = Integer.parseInt(currentDir);
if(ordinal < mainState.headDir - 1) {
- System.err.println("purge deletes " + f);
+ LOGGER.info("purge deletes " + f);
FileUtils.deleteDir(f.toFile());
}
throw cause;
// Maximum autosave frequency is per 60s
if(!fullSave && System.nanoTime() - lastSnapshot < 10*1000000000L) {
- // System.err.println("lastSnapshot too early");
+ // LOGGER.info("lastSnapshot too early");
return false;
}
// Nothing has been written => no need to do anything
long amountOfFiles = countFiles(workingDirectory);
if(!fullSave && amountOfFiles == 0) {
- // System.err.println("amountOfFiles < 3");
+ //LOGGER.info("makeSnapshot: " + amountOfFiles + " files, skipping snapshot");
return false;
}
- LOGGER.info("makeSnapshot");
+ LOGGER.info("makeSnapshot: start with " + amountOfFiles + " files");
// Schedule writing of all data to disk
refreshHeadState();
persistHeadState();
- if (fullSave)
- mainState.save(dbFolder);
-
- amountOfFiles = countFiles(workingDirectory);
-
- LOGGER.info(" -finished: amount of files is {}", amountOfFiles);
-
- workingDirectory = dbFolder.resolve(Integer.toString(mainState.headDir));
- if (!Files.exists(workingDirectory)) {
- Files.createDirectories(workingDirectory);
+ if (LOGGER.isInfoEnabled()) {
+ amountOfFiles = countFiles(workingDirectory);
+ LOGGER.info(" -finished: amount of files is {}", amountOfFiles);
}
+
+ workingDirectory = dbFolder.resolve(Integer.toString(mainState.headDir));
+ Files.createDirectories(workingDirectory);
cssi.updateWriteDirectory(workingDirectory);
// e.printStackTrace();
// }
- // System.err.println("-- load statistics --");
+ // LOGGER.info("-- load statistics --");
// for(Pair<ClusterUID, Integer> entry :
// CollectionUtils.valueSortedEntries(histogram)) {
- // System.err.println(" " + entry.second + " " + entry.first);
+ // LOGGER.info(" " + entry.second + " " + entry.first);
// }
// }
public void load() throws IOException {
// Main state
- mainState = MainState.load(dbFolder, t -> rollback.set(true));
+ mainState = MainState.load(dbFolder, () -> rollback.set(true));
lastSessionDirectory = dbFolder.resolve(Integer.toString(mainState.headDir - 1));
// Head State
- try {
- state = HeadState.load(lastSessionDirectory);
- } catch (InvalidHeadStateException e) {
- // For backwards compatibility only!
- Throwable cause = e.getCause();
- if (cause instanceof Throwable) {
- try {
- org.simantics.db.javacore.HeadState oldState = org.simantics.db.javacore.HeadState.load(lastSessionDirectory);
-
- HeadState newState = new HeadState();
- newState.clusters = oldState.clusters;
- newState.cs = oldState.cs;
- newState.files = oldState.files;
- newState.stream = oldState.stream;
- newState.headChangeSetId = oldState.headChangeSetId;
- newState.reservedIds = oldState.reservedIds;
- newState.transactionId = oldState.transactionId;
- state = newState;
- } catch (InvalidHeadStateException e1) {
- throw new IOException("Could not load HeadState due to corruption", e1);
- }
- } else {
- // This should never happen as MainState.load() checks the integrity
- // of head.state files and rolls back in cases of corruption until a
- // consistent state is found (could be case 0 - initial db state)
- // IF this does happen something is completely wrong
- throw new IOException("Could not load HeadState due to corruption", e);
- }
- }
+ if (mainState.isInitial()) {
+ state = new HeadState();
+ } else {
+ try {
+ state = HeadState.load(lastSessionDirectory);
+ } catch (InvalidHeadStateException e) {
+ // For backwards compatibility only!
+ Throwable cause = e.getCause();
+ if (cause instanceof Throwable) {
+ try {
+ org.simantics.db.javacore.HeadState oldState = org.simantics.db.javacore.HeadState.load(lastSessionDirectory);
+
+ HeadState newState = new HeadState();
+ newState.clusters = oldState.clusters;
+ newState.cs = oldState.cs;
+ newState.files = oldState.files;
+ newState.stream = oldState.stream;
+ newState.headChangeSetId = oldState.headChangeSetId;
+ newState.reservedIds = oldState.reservedIds;
+ newState.transactionId = oldState.transactionId;
+ state = newState;
+ } catch (InvalidHeadStateException e1) {
+ throw new IOException("Could not load HeadState due to corruption", e1);
+ }
+ } else {
+ // This should never happen as MainState.load() checks the integrity
+ // of head.state files and rolls back in cases of corruption until a
+ // consistent state is found (could be case 0 - initial db state)
+ // IF this does happen something is completely wrong
+ throw new IOException("Could not load HeadState due to corruption", e);
+ }
+ }
+ }
try {
workingDirectory = dbFolder.resolve(Integer.toString(mainState.headDir));
Files.createDirectories(workingDirectory);
}
// Files
for (String fileKey : state.files) {
- // System.err.println("loadFile: " + fileKey);
+ // LOGGER.info("loadFile: " + fileKey);
String[] parts = fileKey.split("#");
Path readDir = dbFolder.resolve(parts[1]);
int offset = Integer.parseInt(parts[2]);
}
// Update chunks
for (String fileKey : state.stream) {
- // System.err.println("loadStream: " + fileKey);
+ // LOGGER.info("loadStream: " + fileKey);
String[] parts = fileKey.split("#");
Path readDir = dbFolder.resolve(parts[1]);
int offset = Integer.parseInt(parts[2]);