import java.io.IOException;
import java.math.BigInteger;
+import java.nio.file.CopyOption;
import java.nio.file.DirectoryStream;
import java.nio.file.Files;
import java.nio.file.Path;
+import java.nio.file.StandardCopyOption;
import java.util.ArrayList;
import java.util.Collection;
import java.util.HashMap;
import org.simantics.acorn.lru.ClusterStreamChunk;
import org.simantics.acorn.lru.FileInfo;
import org.simantics.acorn.lru.LRU;
+import org.simantics.databoard.file.RuntimeIOException;
import org.simantics.db.ClusterCreator;
import org.simantics.db.Database.Session.ClusterIds;
import org.simantics.db.Database.Session.ResourceSegment;
import org.simantics.db.procore.cluster.ClusterTraits;
import org.simantics.db.service.ClusterSetsSupport;
import org.simantics.db.service.ClusterUID;
+import org.simantics.utils.FileUtils;
import org.simantics.utils.threads.logger.ITask;
import org.simantics.utils.threads.logger.ThreadLogger;
import org.slf4j.Logger;
private AtomicBoolean safeToMakeSnapshot = new AtomicBoolean(true);
private IllegalAcornStateException cause;
+ public synchronized void purge(ServiceLocator locator) throws IllegalAcornStateException {
+
+ try {
+
+ // Schedule writing of all data to disk
+ refreshHeadState();
+ // Wait for files to be written
+ synchronizeWorkingDirectory();
+
+ String currentDir = workingDirectory.getFileName().toString();
+ Path baseline = workingDirectory.resolveSibling(currentDir + "_baseline");
+
+ Files.createDirectories(baseline);
+
+ for(String clusterKey : state.clusters) {
+ String[] parts1 = clusterKey.split("#");
+ String[] parts = parts1[0].split("\\.");
+ String readDirName = parts1[1];
+ if(!readDirName.equals(currentDir)) {
+ String fileName = parts[0] + "." + parts[1] + ".cluster";
+ Path from = dbFolder.resolve(readDirName).resolve(fileName);
+ Path to = baseline.resolve(fileName);
+ System.err.println("purge copies " + from + " => " + to);
+ Files.copy(from, to, StandardCopyOption.COPY_ATTRIBUTES);
+ long first = new BigInteger(parts[0], 16).longValue();
+ long second = new BigInteger(parts[1], 16).longValue();
+ ClusterUID uuid = ClusterUID.make(first, second);
+ ClusterInfo info = clusterLRU.getWithoutMutex(uuid);
+ info.moveTo(baseline);
+ }
+ }
+
+ for (String fileKey : state.files) {
+ String[] parts = fileKey.split("#");
+ String readDirName = parts[1];
+ if(!readDirName.equals(currentDir)) {
+ String fileName = parts[0] + ".extFile";
+ Path from = dbFolder.resolve(readDirName).resolve(fileName);
+ Path to = baseline.resolve(fileName);
+ System.err.println("purge copies " + from + " => " + to);
+ Files.copy(from, to, StandardCopyOption.COPY_ATTRIBUTES);
+ FileInfo info = fileLRU.getWithoutMutex(parts[0]);
+ info.moveTo(baseline);
+ }
+ }
+
+ for (String fileKey : state.stream) {
+ String[] parts = fileKey.split("#");
+ String readDirName = parts[1];
+ if(!readDirName.equals(currentDir)) {
+ ClusterStreamChunk chunk = streamLRU.purge(parts[0]);
+ System.err.println("purge removes " + chunk);
+ }
+ }
+
+ // Change sets
+ for (String fileKey : state.cs) {
+ String[] parts = fileKey.split("#");
+ String readDirName = parts[1];
+ if(!readDirName.equals(currentDir)) {
+ Long revisionId = Long.parseLong(parts[0]);
+ ChangeSetInfo info = csLRU.purge(revisionId);
+ System.err.println("purge removes " + info);
+ }
+// Path readDir = dbFolder.resolve(parts[1]);
+// Long revisionId = Long.parseLong(parts[0]);
+// int offset = Integer.parseInt(parts[2]);
+// int length = Integer.parseInt(parts[3]);
+// ChangeSetInfo info = new ChangeSetInfo(csLRU, readDir, revisionId, offset, length);
+// csLRU.map(info);
+ }
+
+ state.tailChangeSetId = state.headChangeSetId;
+
+ makeSnapshot(locator, true);
+
+ Files.walk(dbFolder, 1).filter(Files::isDirectory).forEach(f -> tryPurgeDirectory(f));
+
+ } catch (IllegalAcornStateException e) {
+ notSafeToMakeSnapshot(e);
+ throw e;
+ } catch (IOException e) {
+ IllegalAcornStateException e1 = new IllegalAcornStateException(e);
+ notSafeToMakeSnapshot(e1);
+ throw e1;
+ } catch (AcornAccessVerificationException e) {
+ IllegalAcornStateException e1 = new IllegalAcornStateException(e);
+ notSafeToMakeSnapshot(e1);
+ throw e1;
+ }
+
+ }
+
+ void tryPurgeDirectory(Path f) {
+
+
+ System.err.println("purge deletes " + f);
+
+ String currentDir = f.getFileName().toString();
+ if(currentDir.endsWith("db"))
+ return;
+
+ if(currentDir.endsWith("_baseline"))
+ currentDir = currentDir.replace("_baseline", "");
+
+ int ordinal = Integer.parseInt(currentDir);
+ if(ordinal < mainState.headDir - 1) {
+ System.err.println("purge deletes " + f);
+ FileUtils.deleteDir(f.toFile());
+ }
+
+ }
+
public synchronized boolean makeSnapshot(ServiceLocator locator, boolean fullSave) throws IllegalAcornStateException {
try {
if (!safeToMakeSnapshot.get())
// Cluster files are always there
// Nothing has been written => no need to do anything
long amountOfFiles = countFiles(workingDirectory);
- if(!fullSave && amountOfFiles < 3) {
+ if(!fullSave && amountOfFiles == 0) {
// System.err.println("amountOfFiles < 3");
return false;
}
if (!safeToMakeSnapshot.get())
throw cause;
+ ClusterSetsSupport cssi = locator.getService(ClusterSetsSupport.class);
+ cssi.save();
+
persistHeadState();
-
+
if (fullSave)
mainState.save(dbFolder);
- ClusterSetsSupport cssi = locator.getService(ClusterSetsSupport.class);
- cssi.save();
-
amountOfFiles = countFiles(workingDirectory);
LOGGER.info(" -finished: amount of files is {}", amountOfFiles);
csLRU.persist(state.cs);
}
- private void persistHeadState() throws IOException {
+ private void synchronizeWorkingDirectory() throws IOException {
// Sync current working directory
Files.walk(workingDirectory, 1).filter(Files::isRegularFile).forEach(FileIO::uncheckedSyncPath);
+ }
+
+ private void persistHeadState() throws IOException {
+ synchronizeWorkingDirectory();
state.save(workingDirectory);
mainState.headDir++;
}
this.cause = t;
}
+ public long getTailChangeSetId() {
+ return state.tailChangeSetId;
+ }
+
}