--- /dev/null
+<?xml version="1.0" encoding="UTF-8"?>
+<classpath>
+ <classpathentry kind="src" path="src"/>
+ <classpathentry kind="con" path="org.eclipse.jdt.launching.JRE_CONTAINER/org.eclipse.jdt.internal.debug.ui.launcher.StandardVMType/JavaSE-1.8"/>
+ <classpathentry kind="con" path="org.eclipse.pde.core.requiredPlugins"/>
+ <classpathentry kind="output" path="bin"/>
+</classpath>
--- /dev/null
+<?xml version="1.0" encoding="UTF-8"?>
+<projectDescription>
+ <name>org.simantics.acorn</name>
+ <comment></comment>
+ <projects>
+ </projects>
+ <buildSpec>
+ <buildCommand>
+ <name>org.eclipse.jdt.core.javabuilder</name>
+ <arguments>
+ </arguments>
+ </buildCommand>
+ <buildCommand>
+ <name>org.eclipse.pde.ManifestBuilder</name>
+ <arguments>
+ </arguments>
+ </buildCommand>
+ <buildCommand>
+ <name>org.eclipse.pde.SchemaBuilder</name>
+ <arguments>
+ </arguments>
+ </buildCommand>
+ <buildCommand>
+ <name>org.eclipse.pde.ds.core.builder</name>
+ <arguments>
+ </arguments>
+ </buildCommand>
+ </buildSpec>
+ <natures>
+ <nature>org.eclipse.pde.PluginNature</nature>
+ <nature>org.eclipse.jdt.core.javanature</nature>
+ </natures>
+</projectDescription>
--- /dev/null
+eclipse.preferences.version=1
+org.eclipse.jdt.core.compiler.codegen.inlineJsrBytecode=enabled
+org.eclipse.jdt.core.compiler.codegen.targetPlatform=1.8
+org.eclipse.jdt.core.compiler.compliance=1.8
+org.eclipse.jdt.core.compiler.problem.assertIdentifier=error
+org.eclipse.jdt.core.compiler.problem.enumIdentifier=error
+org.eclipse.jdt.core.compiler.source=1.8
--- /dev/null
+Manifest-Version: 1.0
+Bundle-ManifestVersion: 2
+Bundle-Name: Acorn Database for Simantics
+Bundle-SymbolicName: org.simantics.acorn
+Bundle-Version: 1.1.2.qualifier
+Bundle-Vendor: Semantum Oy
+Require-Bundle: gnu.trove3;bundle-version="3.0.0",
+ gnu.trove2;bundle-version="2.0.4",
+ org.simantics.db.impl;bundle-version="0.8.0",
+ org.simantics.db.server;bundle-version="1.0.0",
+ org.simantics.compressions;bundle-version="1.0.0",
+ org.simantics.backup,
+ org.eclipse.core.runtime;bundle-version="3.11.1",
+ org.simantics.db.procore
+Bundle-RequiredExecutionEnvironment: JavaSE-1.8
+Bundle-ActivationPolicy: lazy
+Bundle-Activator: org.simantics.acorn.internal.Activator
+Service-Component: OSGI-INF/component.xml, OSGI-INF/org.simantics.acorn.AcornDriver.xml
--- /dev/null
+<?xml version="1.0" encoding="UTF-8"?>
+<scr:component xmlns:scr="http://www.osgi.org/xmlns/scr/v1.1.0" name="org.simantics.acorn.backupProvider">
+ <implementation class="org.simantics.acorn.backup.AcornBackupProvider"/>
+ <service>
+ <provide interface="org.simantics.backup.IBackupProvider"/>
+ </service>
+</scr:component>
--- /dev/null
+<?xml version="1.0" encoding="UTF-8"?>
+<scr:component xmlns:scr="http://www.osgi.org/xmlns/scr/v1.1.0" name="AcornDriver">
+ <implementation class="org.simantics.acorn.AcornDriver"/>
+ <service>
+ <provide interface="org.simantics.db.Driver"/>
+ </service>
+</scr:component>
--- /dev/null
+###############################################################################
+# Copyright (c) 2007, 2010 Association for Decentralized Information Management
+# in Industry THTH ry.
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Eclipse Public License v1.0
+# which accompanies this distribution, and is available at
+# http://www.eclipse.org/legal/epl-v10.html
+#
+# Contributors:
+# VTT Technical Research Centre of Finland - initial API and implementation
+###############################################################################
+output.. = bin/
+bin.includes = META-INF/,\
+ .,\
+ log4j.properties,\
+ OSGI-INF/
+source.. = src/
--- /dev/null
+###############################################################################
+# Copyright (c) 2007, 2010 Association for Decentralized Information Management
+# in Industry THTH ry.
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Eclipse Public License v1.0
+# which accompanies this distribution, and is available at
+# http://www.eclipse.org/legal/epl-v10.html
+#
+# Contributors:
+# VTT Technical Research Centre of Finland - initial API and implementation
+###############################################################################
+# For the general syntax of property based configuration files see the
+# documentation of org.apache.log4j.PropertyConfigurator.
+
+# The root category uses the appender called rolling. If no priority is
+# specified, the root category assumes the default priority for root
+# which is DEBUG in log4j. The root category is the only category that
+# has a default priority. All other categories need not be assigned a
+# priority in which case they inherit their priority from the
+# hierarchy.
+
+# This will provide console output on log4j configuration loading
+#log4j.debug=true
+
+log4j.rootCategory=warn, stdout
+#log4j.rootCategory=warn
+
+# BEGIN APPENDER: CONSOLE APPENDER (stdout)
+# first: type of appender (fully qualified class name)
+log4j.appender.stdout=org.apache.log4j.ConsoleAppender
+
+# second: Any configuration information needed for that appender.
+# Many appenders require a layout.
+log4j.appender.stdout.layout=org.apache.log4j.TTCCLayout
+# log4j.appender.stdout.layout=org.apache.log4j.SimpleLayout
+
+# Possible information overload?
+# log4j.appender.stdout.layout=org.apache.log4j.PatternLayout
+# additionally, some layouts can take additional information --
+# like the ConversionPattern for the PatternLayout.
+# log4j.appender.stdout.layout.ConversionPattern=%d %-5p %-17c{2} (%30F:%L) %3x - %m%n
+# END APPENDER: CONSOLE APPENDER (stdout)
+
+# BEGIN APPENDER: ROLLING FILE APPENDER (rolling)
+#log4j.appender.rolling=com.tools.logging.PluginFileAppender
+#log4j.appender.rolling=org.apache.log4j.FileAppender
+log4j.appender.rolling=org.apache.log4j.RollingFileAppender
+log4j.appender.rolling.File=procore.log
+log4j.appender.rolling.append=true
+log4j.appender.rolling.MaxFileSize=8192KB
+# Keep one backup file
+log4j.appender.rolling.MaxBackupIndex=1
+log4j.appender.rolling.layout=org.apache.log4j.PatternLayout
+#log4j.appender.rolling.layout.ConversionPattern=%p %t %c - %m%n
+log4j.appender.rolling.layout.ConversionPattern=%-6r [%15.15t] %-5p %30.30c - %m%n
+# END APPENDER: ROLLING FILE APPENDER (rolling)
+
+# BEGIN APPENDER: PLUG-IN LOG APPENDER (plugin)
+log4j.appender.plugin=com.tools.logging.PluginLogAppender
+log4j.appender.plugin.layout=org.apache.log4j.PatternLayout
+#log4j.appender.plugin.layout.ConversionPattern=%p %t %c - %m%n
+log4j.appender.plugin.layout.ConversionPattern=%-6r [%15.15t] %-5p %30.30c - %m%n
+# END APPENDER: PLUG-IN LOG APPENDER (plugin)
--- /dev/null
+package org.simantics.acorn;
+
+import java.io.IOException;
+import java.nio.file.Files;
+import java.nio.file.Path;
+import java.util.HashMap;
+import java.util.Map;
+
+import org.simantics.acorn.internal.AcornDatabase;
+import org.simantics.db.Database;
+import org.simantics.db.server.ProCoreException;
+
+/**
+ * @author Tuukka Lehtonen
+ */
+public class AcornDatabaseManager {
+
+ private static Map<String, Database> dbs = new HashMap<String, Database>();
+
+ public static synchronized Database getDatabase(Path folder) throws ProCoreException {
+ Path canonical;
+ try {
+ if (!Files.exists(folder))
+ Files.createDirectories(folder);
+ canonical = folder.toRealPath();
+ } catch (IOException e) {
+ throw new ProCoreException("Could not get canonical path.", e);
+ }
+
+ String canonicalPath = canonical.toString();
+ Database db = dbs.get(canonicalPath);
+ if (null != db)
+ return db;
+
+ db = new AcornDatabase(canonical);
+ dbs.put(canonicalPath, db);
+ return db;
+ }
+
+}
\ No newline at end of file
--- /dev/null
+package org.simantics.acorn;
+
+import java.nio.file.Path;
+import java.nio.file.Paths;
+import java.util.Properties;
+
+import org.simantics.db.DatabaseUserAgent;
+import org.simantics.db.Driver;
+import org.simantics.db.ServerI;
+import org.simantics.db.ServerReference;
+import org.simantics.db.Session;
+import org.simantics.db.SessionReference;
+import org.simantics.db.exception.DatabaseException;
+
+public class AcornDriver implements Driver {
+
+ public static final String AcornDriverName = "acorn";
+
+ @Override
+ public String getName() {
+ return AcornDriverName;
+ }
+
+ @Override
+ public DatabaseUserAgent getDatabaseUserAgent(String address) throws DatabaseException {
+ Path dbFolder = Paths.get(address);
+ return AcornDatabaseManager.getDatabase(dbFolder).getUserAgent();
+ }
+
+ @Override
+ public void setDatabaseUserAgent(String address, DatabaseUserAgent dbUserAgent) throws DatabaseException {
+ Path dbFolder = Paths.get(address);
+ AcornDatabaseManager.getDatabase(dbFolder).setUserAgent(dbUserAgent);
+ }
+
+ @Override
+ public Session getSession(String address, Properties properties) throws DatabaseException {
+ Path dbFolder = Paths.get(address);
+ Session session = AcornSessionManagerImpl.getInstance().createSession(new SessionReference() {
+
+ @Override
+ public ServerReference getServerReference() {
+ return new ServerReference() {
+
+ @Override
+ public Path getDBFolder() {
+ return dbFolder;
+ }
+ };
+ }
+
+ @Override
+ public long getSessionId() {
+ return 0L;
+ }
+ }, null);
+ if (!properties.containsKey("clientId"))
+ properties.put("clientId", dbFolder.toFile().getAbsolutePath());
+ session.registerService(Properties.class, properties);
+ Session s = session.peekService(Session.class);
+ if (null == s)
+ session.registerService(Session.class, session);
+ return session;
+ }
+
+ @Override
+ public ServerI getServer(String address, Properties properties) throws DatabaseException {
+ return new AcornServerI(address);
+ }
+
+ @Override
+ public Management getManagement(String address, Properties properties) throws DatabaseException {
+ Path dbFolder = Paths.get(address);
+ return new AcornManagement(dbFolder, properties);
+ }
+
+ private static class AcornServerI implements ServerI {
+
+ private String address;
+
+ public AcornServerI(String address) {
+ this.address = address;
+ }
+
+ @Override
+ public void stop() throws DatabaseException {
+ AcornDatabaseManager.getDatabase(Paths.get(address)).tryToStop();
+ }
+
+ @Override
+ public void start() throws DatabaseException {
+ AcornDatabaseManager.getDatabase(Paths.get(address)).start();
+ }
+
+ @Override
+ public boolean isActive() throws DatabaseException {
+ return AcornDatabaseManager.getDatabase(Paths.get(address)).isRunning();
+ }
+
+ @Override
+ public String getAddress() throws DatabaseException {
+ return address;
+ }
+
+ @Override
+ public String executeAndDisconnect(String command) throws DatabaseException {
+ return "";
+ }
+
+ @Override
+ public String execute(String command) throws DatabaseException {
+ return "";
+ }
+ }
+
+}
--- /dev/null
+package org.simantics.acorn;
+
+import java.nio.file.Path;
+import java.util.Properties;
+
+import org.simantics.db.Database;
+import org.simantics.db.Driver.Management;
+import org.simantics.db.exception.DatabaseException;
+import org.simantics.db.server.ProCoreException;
+
+public class AcornManagement implements Management {
+
+ private final Database db;
+ private final Properties properties;
+
+ AcornManagement(Path dbFolder, Properties properties) throws ProCoreException {
+ db = AcornDatabaseManager.getDatabase(dbFolder);
+ this.properties = properties;
+ }
+
+ @Override
+ public boolean exist() throws DatabaseException {
+ return db.isFolderOk();
+ }
+
+ @Override
+ public void delete() throws DatabaseException {
+ db.deleteFiles();
+ if (exist())
+ throw new DatabaseException("Failed to delete database. folder=" + db.getFolder());
+ }
+
+ @Override
+ public void create() throws DatabaseException {
+ db.initFolder(properties);
+ if (!exist())
+ throw new DatabaseException("Failed to create Acorn database. folder=" + db.getFolder());
+ }
+
+ @Override
+ public void purge() throws DatabaseException {
+ db.purgeDatabase();
+ }
+
+ @Override
+ public void shutdown() throws DatabaseException {
+ db.tryToStop();
+ db.disconnect();
+ }
+
+}
--- /dev/null
+package org.simantics.acorn;
+
+import java.nio.file.Path;
+import java.util.concurrent.ConcurrentHashMap;
+
+import org.simantics.db.Database;
+import org.simantics.db.Session;
+import org.simantics.db.SessionErrorHandler;
+import org.simantics.db.SessionManager;
+import org.simantics.db.SessionReference;
+import org.simantics.db.authentication.UserAuthenticationAgent;
+import org.simantics.db.common.utils.Logger;
+import org.simantics.db.event.SessionEvent;
+import org.simantics.db.event.SessionListener;
+import org.simantics.db.exception.DatabaseException;
+import org.simantics.db.service.LifecycleSupport;
+import org.simantics.utils.datastructures.ListenerList;
+
+import fi.vtt.simantics.procore.internal.SessionImplDb;
+import fi.vtt.simantics.procore.internal.SessionImplSocket;
+
+public class AcornSessionManagerImpl implements SessionManager {
+
+ private static AcornSessionManagerImpl INSTANCE;
+
+ private ConcurrentHashMap<SessionImplSocket, SessionImplSocket> sessionMap = new ConcurrentHashMap<SessionImplSocket, SessionImplSocket>();
+ private ListenerList<SessionListener> sessionListeners = new ListenerList<SessionListener>(SessionListener.class);
+ private SessionErrorHandler errorHandler;
+
+ private Database database;
+
+ private AcornSessionManagerImpl() {}
+
+ void finish() {
+ sessionMap = null;
+ sessionListeners = null;
+ }
+
+ @Override
+ public void addSessionListener(SessionListener listener) {
+ sessionListeners.add(listener);
+ }
+
+ @Override
+ public Session createSession(SessionReference sessionReference, UserAuthenticationAgent authAgent)
+ throws DatabaseException {
+ SessionImplDb sessionImpl = new SessionImplDb(this, authAgent);
+ boolean ok = false;
+ try {
+ Path dbFolder = sessionReference.getServerReference().getDBFolder();
+ database = AcornDatabaseManager.getDatabase(dbFolder);
+ Database.Session dbSession = database.newSession(sessionImpl);
+ sessionImpl.connect(sessionReference, dbSession);
+ sessionMap.put(sessionImpl, sessionImpl);
+ fireSessionOpened(sessionImpl);
+ ok = true;
+ } catch (Throwable e) {
+ Logger.defaultLogError("Connection failed. See exception for details.", e);
+ try {
+ fireSessionClosed(sessionImpl, e);
+ sessionMap.remove(sessionImpl);
+ sessionImpl = null;
+ } catch (Throwable t) {
+ }
+ throw new DatabaseException(e);
+ } finally {
+ if (!ok && null != sessionImpl)
+ sessionImpl.getService(LifecycleSupport.class).close();
+ }
+ return sessionImpl;
+ }
+
+ @Override
+ public void removeSessionListener(SessionListener listener) {
+ sessionListeners.remove(listener);
+ }
+
+ private void fireSessionOpened(SessionImplSocket session) {
+ SessionEvent se = new SessionEvent(session, null);
+ for (SessionListener listener : sessionListeners.getListeners()) {
+ listener.sessionOpened(se);
+ }
+ }
+
+ private void fireSessionClosed(SessionImplSocket session, Throwable cause) {
+ SessionEvent se = new SessionEvent(session, cause);
+ for (SessionListener listener : sessionListeners.getListeners()) {
+ listener.sessionClosed(se);
+ }
+ }
+
+ @Override
+ public void shutdown(Session s, Throwable cause) {
+ SessionImplSocket sis = sessionMap.get(s);
+ if (null == sis)
+ return;
+ try {
+ fireSessionClosed(sis, cause);
+ } finally {
+ sessionMap.remove(s);
+ }
+ }
+
+ @Override
+ public SessionErrorHandler getErrorHandler() {
+ return errorHandler;
+ }
+
+ @Override
+ public void setErrorHandler(SessionErrorHandler errorHandler) {
+ this.errorHandler = errorHandler;
+ }
+
+ public synchronized static AcornSessionManagerImpl getInstance() {
+ if (INSTANCE == null)
+ INSTANCE = new AcornSessionManagerImpl();
+ return INSTANCE;
+ }
+
+ @Override
+ public Database getDatabase() {
+ return database;
+ }
+
+}
--- /dev/null
+package org.simantics.acorn;
+
+import java.io.IOException;
+import java.math.BigInteger;
+import java.nio.file.DirectoryStream;
+import java.nio.file.Files;
+import java.nio.file.Path;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.HashMap;
+import java.util.Map;
+
+import org.simantics.acorn.cluster.ClusterImpl;
+import org.simantics.acorn.internal.ClusterSupport2;
+import org.simantics.acorn.lru.ChangeSetInfo;
+import org.simantics.acorn.lru.ClusterInfo;
+import org.simantics.acorn.lru.ClusterLRU;
+import org.simantics.acorn.lru.ClusterStreamChunk;
+import org.simantics.acorn.lru.FileInfo;
+import org.simantics.acorn.lru.LRU;
+import org.simantics.db.ClusterCreator;
+import org.simantics.db.ServiceLocator;
+import org.simantics.db.Database.Session.ClusterIds;
+import org.simantics.db.Database.Session.ResourceSegment;
+import org.simantics.db.exception.DatabaseException;
+import org.simantics.db.impl.ClusterBase;
+import org.simantics.db.impl.ClusterI;
+import org.simantics.db.impl.ClusterSupport;
+import org.simantics.db.procore.cluster.ClusterTraits;
+import org.simantics.db.server.ProCoreException;
+import org.simantics.db.service.ClusterSetsSupport;
+import org.simantics.db.service.ClusterUID;
+import org.simantics.utils.threads.logger.ITask;
+import org.simantics.utils.threads.logger.ThreadLogger;
+
+public class ClusterManager {
+
+ private ArrayList<String> currentChanges = new ArrayList<String>();
+
+ public final Path dbFolder;
+ public Path lastSessionDirectory;
+ public Path workingDirectory;
+
+ public LRU<String, ClusterStreamChunk> streamLRU;
+ public LRU<Long, ChangeSetInfo> csLRU;
+ public ClusterLRU clusterLRU;
+ public LRU<String, FileInfo> fileLRU;
+
+ public MainState mainState;
+ public HeadState state;
+
+ private long lastSnapshot = System.nanoTime();
+
+ final public ClusterSupport2 support = new ClusterSupport2(this);
+
+ /*
+ * Public interface
+ *
+ */
+
+ public ClusterManager(Path dbFolder) {
+ this.dbFolder = dbFolder;
+ }
+
+ public ArrayList<String> getChanges(long changeSetId) {
+ ChangeSetInfo info = csLRU.getWithoutMutex(changeSetId);
+ info.acquireMutex();
+ try {
+ info.makeResident();
+ return info.getCSSIds();
+ } catch (Throwable t) {
+ throw new IllegalStateException(t);
+ } finally {
+ info.releaseMutex();
+ }
+ }
+
+ public ClusterBase getClusterByClusterKey(int clusterKey) throws DatabaseException {
+ return clusterLRU.getClusterByClusterKey(clusterKey);
+ }
+
+ public ClusterBase getClusterByClusterUIDOrMake(ClusterUID clusterUID) throws DatabaseException {
+ return clusterLRU.getClusterByClusterUIDOrMake(clusterUID);
+ }
+
+ public ClusterImpl getClusterByClusterUIDOrMakeProxy(ClusterUID clusterUID) throws DatabaseException {
+ return clusterLRU.getClusterByClusterUIDOrMakeProxy(clusterUID);
+ }
+
+ public int getClusterKeyByClusterUIDOrMake(ClusterUID clusterUID) {
+ return clusterLRU.getClusterKeyByClusterUIDOrMake(clusterUID);
+ }
+
+ public int getClusterKeyByClusterUIDOrMakeWithoutMutex(ClusterUID clusterUID) {
+ return clusterLRU.getClusterKeyByClusterUIDOrMakeWithoutMutex(clusterUID);
+ }
+
+ public int getClusterKeyByUID(long id1, long id2) throws DatabaseException {
+ return clusterLRU.getClusterKeyByUIDWithoutMutex(id1, id2);
+ }
+
+ public <T extends ClusterI> T getClusterProxyByResourceKey(int resourceKey) throws DatabaseException {
+ return clusterLRU.getClusterProxyByResourceKey(resourceKey);
+ }
+
+ public ClusterUID getClusterUIDByResourceKey(int resourceKey) throws DatabaseException {
+ return clusterLRU.getClusterUIDByResourceKey(resourceKey);
+ }
+
+ public ClusterUID getClusterUIDByResourceKeyWithoutMutex(int resourceKey) throws DatabaseException {
+ return clusterLRU.getClusterUIDByResourceKeyWithoutMutex(resourceKey);
+ }
+
+ /*
+ * Private implementation
+ *
+ */
+
+ private static long countFiles(Path directory) throws IOException {
+ try (DirectoryStream<Path> ds = Files.newDirectoryStream(directory)) {
+ int count = 0;
+ for (@SuppressWarnings("unused") Path p : ds)
+ ++count;
+ return count;
+ }
+ }
+
+ public synchronized boolean makeSnapshot(ServiceLocator locator, boolean force) throws IOException {
+
+ // Maximum autosave frequency is per 60s
+ if(!force && System.nanoTime() - lastSnapshot < 10*1000000000L) {
+// System.err.println("lastSnapshot too early");
+ return false;
+ }
+
+ // Cluster files are always there
+ // Nothing has been written => no need to do anything
+ long amountOfFiles = countFiles(workingDirectory);
+ if(!force && amountOfFiles < 3) {
+// System.err.println("amountOfFiles < 3");
+ return false;
+ }
+
+ System.err.println("makeSnapshot");
+
+ // Schedule writing of all data to disk
+ refreshHeadState();
+
+ // Wait for all files to be written
+ clusterLRU.shutdown();
+ fileLRU.shutdown();
+ streamLRU.shutdown();
+ csLRU.shutdown();
+
+ persistHeadState();
+
+ mainState.save(dbFolder);
+
+ ClusterSetsSupport cssi = locator.getService(ClusterSetsSupport.class);
+ cssi.save();
+
+ amountOfFiles = countFiles(workingDirectory);
+
+ System.err.println(" -finished: amount of files is " + amountOfFiles);
+
+ workingDirectory = dbFolder.resolve(Integer.toString(mainState.headDir));
+ if (!Files.exists(workingDirectory)) {
+ Files.createDirectories(workingDirectory);
+ }
+
+ cssi.updateReadAndWriteDirectories(lastSessionDirectory, workingDirectory);
+
+ clusterLRU.setWriteDir(workingDirectory);
+ fileLRU.setWriteDir(workingDirectory);
+ streamLRU.setWriteDir(workingDirectory);
+ csLRU.setWriteDir(workingDirectory);
+
+ clusterLRU.resume();
+ fileLRU.resume();
+ streamLRU.resume();
+ csLRU.resume();
+
+ lastSnapshot = System.nanoTime();
+
+ return true;
+
+ }
+
+ public void refreshHeadState() throws IOException {
+
+ state.clusters.clear();
+ state.files.clear();
+ state.stream.clear();
+ state.cs.clear();
+
+ clusterLRU.persist(state.clusters);
+ fileLRU.persist(state.files);
+ streamLRU.persist(state.stream);
+ csLRU.persist(state.cs);
+
+ }
+
+ public void persistHeadState() throws IOException {
+
+ // Sync current working directory
+ Files.walk(workingDirectory, 1).filter(Files::isRegularFile).forEach(FileIO::uncheckedSyncPath);
+ state.save(workingDirectory);
+ mainState.headDir++;
+ }
+
+
+// public void save() throws IOException {
+//
+// refreshHeadState();
+//
+// clusterLRU.shutdown();
+// fileLRU.shutdown();
+// streamLRU.shutdown();
+// csLRU.shutdown();
+//
+// persistHeadState();
+//
+// mainState.save(getBaseDirectory());
+
+// try {
+// ThreadLogVisualizer visualizer = new ThreadLogVisualizer();
+// visualizer.read(new DataInputStream(new FileInputStream(
+// ThreadLogger.LOG_FILE)));
+// visualizer.visualize3(new PrintStream(ThreadLogger.LOG_FILE
+// + ".svg"));
+// } catch (FileNotFoundException e) {
+// // TODO Auto-generated catch block
+// e.printStackTrace();
+// }
+
+ // System.err.println("-- load statistics --");
+ // for(Pair<ClusterUID, Integer> entry :
+ // CollectionUtils.valueSortedEntries(histogram)) {
+ // System.err.println(" " + entry.second + " " + entry.first);
+ // }
+
+// }
+
+ private void acquireAll() {
+ clusterLRU.acquireMutex();
+ fileLRU.acquireMutex();
+ streamLRU.acquireMutex();
+ csLRU.acquireMutex();
+ }
+
+ private void releaseAll() {
+ csLRU.releaseMutex();
+ streamLRU.releaseMutex();
+ fileLRU.releaseMutex();
+ clusterLRU.releaseMutex();
+ }
+
+ public void load() throws IOException {
+
+ // Main state
+ mainState = MainState.load(dbFolder);
+
+ lastSessionDirectory = dbFolder.resolve(Integer.toString(mainState.headDir - 1));
+
+ // Head State
+ try {
+ state = HeadState.load(lastSessionDirectory);
+ } catch (InvalidHeadStateException e) {
+ // For backwards compatibility only!
+ Throwable cause = e.getCause();
+ if (cause instanceof Throwable) {
+ try {
+ org.simantics.db.javacore.HeadState oldState = org.simantics.db.javacore.HeadState.load(lastSessionDirectory);
+
+ HeadState newState = new HeadState();
+ newState.clusters = oldState.clusters;
+ newState.cs = oldState.cs;
+ newState.files = oldState.files;
+ newState.stream = oldState.stream;
+ newState.headChangeSetId = oldState.headChangeSetId;
+ newState.reservedIds = oldState.reservedIds;
+ newState.transactionId = oldState.transactionId;
+ state = newState;
+ } catch (InvalidHeadStateException e1) {
+ throw new IOException("Could not load HeadState due to corruption", e1);
+ }
+ } else {
+ // This should never happen as MainState.load() checks the integrity
+ // of head.state files and rolls back in cases of corruption until a
+ // consistent state is found (could be case 0 - initial db state)
+ // IF this does happen something is completely wrong
+ throw new IOException("Could not load HeadState due to corruption", e);
+ }
+ }
+
+ workingDirectory = dbFolder.resolve(Integer.toString(mainState.headDir));
+ Files.createDirectories(workingDirectory);
+
+ csLRU = new LRU<Long, ChangeSetInfo>("Change Set", workingDirectory);
+ streamLRU = new LRU<String, ClusterStreamChunk>("Cluster Stream", workingDirectory);
+ clusterLRU = new ClusterLRU(this, "Cluster", workingDirectory);
+ fileLRU = new LRU<String, FileInfo>("External Value", workingDirectory);
+
+ acquireAll();
+
+ // Clusters
+ for (String clusterKey : state.clusters) {
+ String[] parts1 = clusterKey.split("#");
+ String[] parts = parts1[0].split("\\.");
+ long first = new BigInteger(parts[0], 16).longValue();
+ long second = new BigInteger(parts[1], 16).longValue();
+ ClusterUID uuid = ClusterUID.make(first, second);
+ Path readDir = dbFolder.resolve(parts1[1]);
+ int offset = Integer.parseInt(parts1[2]);
+ int length = Integer.parseInt(parts1[3]);
+ clusterLRU.map(new ClusterInfo(this, clusterLRU, readDir, uuid, offset, length));
+ }
+ // Files
+ for (String fileKey : state.files) {
+// System.err.println("loadFile: " + fileKey);
+ String[] parts = fileKey.split("#");
+ Path readDir = dbFolder.resolve(parts[1]);
+ int offset = Integer.parseInt(parts[2]);
+ int length = Integer.parseInt(parts[3]);
+ FileInfo info = new FileInfo(fileLRU, readDir, parts[0], offset, length);
+ fileLRU.map(info);
+ }
+ // Update chunks
+ for (String fileKey : state.stream) {
+// System.err.println("loadStream: " + fileKey);
+ String[] parts = fileKey.split("#");
+ Path readDir = dbFolder.resolve(parts[1]);
+ int offset = Integer.parseInt(parts[2]);
+ int length = Integer.parseInt(parts[3]);
+ ClusterStreamChunk info = new ClusterStreamChunk(this,
+ streamLRU, readDir, parts[0], offset, length);
+ streamLRU.map(info);
+ }
+ // Change sets
+ for (String fileKey : state.cs) {
+ String[] parts = fileKey.split("#");
+ Path readDir = dbFolder.resolve(parts[1]);
+ Long revisionId = Long.parseLong(parts[0]);
+ int offset = Integer.parseInt(parts[2]);
+ int length = Integer.parseInt(parts[3]);
+ ChangeSetInfo info = new ChangeSetInfo(csLRU, readDir, revisionId, offset, length);
+ csLRU.map(info);
+ }
+
+ releaseAll();
+
+ }
+
+ public <T> T clone(ClusterUID uid, ClusterCreator creator)
+ throws DatabaseException {
+
+ clusterLRU.ensureUpdates(uid);
+
+ ClusterInfo info = clusterLRU.getWithoutMutex(uid);
+ return info.clone(uid, creator);
+
+ }
+
+ //private int loadCounter = 0;
+
+ public static void startLog(String msg) {
+ tasks.put(msg, ThreadLogger.getInstance().begin(msg));
+ }
+
+ public static void endLog(String msg) {
+ ITask task = tasks.get(msg);
+ if (task != null)
+ task.finish();
+ }
+
+ static Map<String, ITask> tasks = new HashMap<String, ITask>();
+
+ public void update(ClusterUID uid, ClusterImpl clu) {
+
+ ClusterInfo info = clusterLRU.getWithoutMutex(uid);
+ info.acquireMutex();
+ try {
+ info.update(clu);
+ } catch (Throwable t) {
+ throw new IllegalStateException(t);
+ } finally {
+ info.releaseMutex();
+ }
+
+ }
+
+ public long getClusterIdOrCreate(ClusterUID clusterUID) {
+ return 1;
+ }
+
+ public int getResourceKey(ClusterUID uid, int index) {
+ return clusterLRU.getResourceKey(uid, index);
+ }
+
+ public int getResourceKeyWitoutMutex(ClusterUID uid, int index) {
+ return clusterLRU.getResourceKeyWithoutMutex(uid, index);
+ }
+
+ public ClusterIds getClusterIds() throws ProCoreException {
+
+ clusterLRU.acquireMutex();
+
+ try {
+
+ Collection<ClusterInfo> infos = clusterLRU.values();
+ final int status = infos.size();
+ final long[] firsts = new long[status];
+ final long[] seconds = new long[status];
+
+ int index = 0;
+ for (ClusterInfo info : infos) {
+ firsts[index] = 0;
+ seconds[index] = info.getKey().second;
+ index++;
+ }
+
+ return new ClusterIds() {
+
+ @Override
+ public int getStatus() {
+ return status;
+ }
+
+ @Override
+ public long[] getFirst() {
+ return firsts;
+ }
+
+ @Override
+ public long[] getSecond() {
+ return seconds;
+ }
+
+ };
+
+ } catch (Throwable t) {
+ throw new IllegalStateException(t);
+ } finally {
+ clusterLRU.releaseMutex();
+ }
+
+ }
+
+ public void addIntoCurrentChangeSet(String ccs) {
+
+ csLRU.acquireMutex();
+
+ try {
+
+ currentChanges.add(ccs);
+
+ } catch (Throwable t) {
+ throw new IllegalStateException(t);
+ } finally {
+
+ csLRU.releaseMutex();
+
+ }
+
+ }
+
+ public void commitChangeSet(long changeSetId, byte[] data) {
+ csLRU.acquireMutex();
+ try {
+ ArrayList<String> csids = new ArrayList<String>(currentChanges);
+ currentChanges = new ArrayList<String>();
+ new ChangeSetInfo(csLRU, changeSetId, data, csids);
+ } catch (Throwable t) {
+ throw new IllegalStateException(t);
+ } finally {
+ csLRU.releaseMutex();
+ }
+ }
+
+ public byte[] getMetadata(long changeSetId) {
+
+ ChangeSetInfo info = csLRU.getWithoutMutex(changeSetId);
+ if (info == null) return null;
+ info.acquireMutex();
+ try {
+ return info.getMetadataBytes();
+ } catch (Throwable t) {
+ throw new IllegalStateException(t);
+ } finally {
+ info.releaseMutex();
+ }
+
+ }
+
+ public byte[] getResourceFile(final byte[] clusterUID,
+ final int resourceIndex) throws ProCoreException {
+
+ ClusterUID uid = ClusterUID.make(clusterUID, 0);
+ String key = uid.toString() + "_" + resourceIndex;
+ FileInfo info = fileLRU.getWithoutMutex(key);
+ if(info == null) return null;
+ info.acquireMutex();
+ try {
+ return info.getResourceFile();
+ } catch (Throwable t) {
+ throw new IllegalStateException(t);
+ } finally {
+ info.releaseMutex();
+ }
+
+ }
+
+ public ResourceSegment getResourceSegment(final byte[] clusterUID,
+ final int resourceIndex, final long segmentOffset, short segmentSize)
+ throws ProCoreException {
+
+ ClusterUID uid = ClusterUID.make(clusterUID, 0);
+
+ String key = uid.toString() + "_" + resourceIndex;
+ FileInfo info = fileLRU.getWithoutMutex(key);
+ if(info == null) return null;
+ info.acquireMutex();
+ try {
+ return info.getResourceSegment(clusterUID, resourceIndex, segmentOffset, segmentSize);
+ } catch (Throwable t) {
+ throw new IllegalStateException(t);
+ } finally {
+ info.releaseMutex();
+ }
+
+ }
+
+ public void modiFileEx(ClusterUID uid, int resourceKey, long offset,
+ long size, byte[] bytes, long pos, ClusterSupport support) {
+
+ try {
+
+ String key = uid.toString()
+ + "_"
+ + ClusterTraits
+ .getResourceIndexFromResourceKey(resourceKey);
+
+ FileInfo info = null;
+
+ fileLRU.acquireMutex();
+
+ try {
+
+ info = fileLRU.get(key);
+ if (info == null)
+ info = new FileInfo(fileLRU, key, (int) (offset + size));
+
+
+ } catch (Throwable t) {
+ throw new IllegalStateException(t);
+ } finally {
+
+ fileLRU.releaseMutex();
+
+ }
+
+ info.acquireMutex();
+ try {
+ info.updateData(bytes, offset, pos, size);
+ } catch (Throwable t) {
+ throw new IllegalStateException(t);
+ } finally {
+ info.releaseMutex();
+ }
+
+ } catch (DatabaseException e) {
+ e.printStackTrace();
+ }
+
+ }
+
+ public void shutdown() {
+ clusterLRU.shutdown();
+ fileLRU.shutdown();
+ streamLRU.shutdown();
+ csLRU.shutdown();
+ }
+
+}
--- /dev/null
+package org.simantics.acorn;
+
+import java.io.Externalizable;
+import java.io.IOException;
+import java.io.ObjectInput;
+import java.io.ObjectOutput;
+import java.io.ObjectOutputStream;
+import java.nio.file.Files;
+import java.nio.file.Paths;
+import java.nio.file.StandardOpenOption;
+
+public class ExternalizableExample implements Externalizable {
+
+ public int first;
+ private long second;
+
+ public ExternalizableExample(int first, long second) {
+ this.first = first;
+ this.second = second;
+ }
+
+ @Override
+ public void writeExternal(ObjectOutput out) throws IOException {
+ out.writeInt(first);
+ out.writeLong(second);
+ }
+
+ @Override
+ public void readExternal(ObjectInput in) throws IOException, ClassNotFoundException {
+
+ }
+
+
+ public static void main(String[] args) {
+ Externalizable test = new ExternalizableExample(123, 3456);
+
+ try (ObjectOutputStream stream = new ObjectOutputStream(Files.newOutputStream(Paths.get("C:/Users/Jani Simomaa/Desktop/test"), StandardOpenOption.CREATE, StandardOpenOption.TRUNCATE_EXISTING))) {
+ stream.writeObject(test);
+ } catch (IOException e) {
+ e.printStackTrace();
+ }
+ }
+}
--- /dev/null
+package org.simantics.acorn;
+
+import java.io.BufferedOutputStream;
+import java.io.IOException;
+import java.io.OutputStream;
+import java.io.RandomAccessFile;
+import java.nio.ByteBuffer;
+import java.nio.channels.FileChannel;
+import java.nio.channels.SeekableByteChannel;
+import java.nio.file.Files;
+import java.nio.file.OpenOption;
+import java.nio.file.Path;
+import java.nio.file.Paths;
+import java.nio.file.StandardOpenOption;
+import java.nio.file.attribute.FileAttribute;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Map;
+import java.util.Set;
+
+import org.simantics.databoard.file.RuntimeIOException;
+
+public class FileIO {
+
+ private static final FileAttribute<?>[] NO_ATTRIBUTES = new FileAttribute[0];
+
+ private static final Set<OpenOption> CREATE_OPTIONS = new HashSet<>(2);
+ private static final Set<OpenOption> APPEND_OPTIONS = new HashSet<>(1);
+
+ static {
+ CREATE_OPTIONS.add(StandardOpenOption.WRITE);
+ CREATE_OPTIONS.add(StandardOpenOption.CREATE);
+
+ APPEND_OPTIONS.add(StandardOpenOption.APPEND);
+ }
+
+ private Path path;
+ private int writePosition = 0;
+
+ private FileIO(Path path) {
+ this.path = path;
+ }
+
+ private static Map<Path, FileIO> map = new HashMap<Path, FileIO>();
+
+ public static FileIO get(Path path) {
+ synchronized(map) {
+ FileIO existing = map.get(path);
+ if(existing == null) {
+ existing = new FileIO(path);
+ map.put(path, existing);
+ }
+ return existing;
+ }
+ }
+
+ //private static final boolean TRACE_SWAP = false;
+ private static final boolean TRACE_PERF = false;
+
+ public synchronized int saveBytes(byte[] bytes, int length, boolean overwrite) throws IOException {
+ if(overwrite) writePosition = 0;
+ int result = writePosition;
+ long start = System.nanoTime();
+ Set<OpenOption> options = writePosition == 0 ? CREATE_OPTIONS : APPEND_OPTIONS;
+
+ ByteBuffer bb = ByteBuffer.wrap(bytes, 0, length);
+ try (FileChannel fc = FileChannel.open(path, options, NO_ATTRIBUTES)) {
+ fc.write(bb);
+ }
+
+ writePosition += length;
+ if(TRACE_PERF) {
+ long duration = System.nanoTime()-start;
+ double ds = 1e-9*duration;
+ System.err.println("Wrote " + bytes.length + " bytes @ " + 1e-6*bytes.length / ds + "MB/s");
+ }
+ return result;
+ }
+
+ public synchronized byte[] readBytes(int offset, int length) throws IOException {
+ long start = System.nanoTime();
+ try (SeekableByteChannel channel = Files.newByteChannel(path)) {
+ channel.position(offset);
+ ByteBuffer buf = ByteBuffer.allocate(length);
+ int read = 0;
+ while (read < length) {
+ read += channel.read(buf);
+ }
+ byte[] result = buf.array();
+ if (result.length != length)
+ System.err.println("faa");
+ if (TRACE_PERF) {
+ long duration = System.nanoTime() - start;
+ double ds = 1e-9 * duration;
+ System.err.println("Read " + result.length + " bytes @ " + 1e-6 * result.length / ds + "MB/s");
+ }
+ return result;
+ }
+ }
+
+ public static void syncPath(Path f) throws IOException {
+ // Does not seem to need 's' according to unit test in Windows
+ try (RandomAccessFile raf = new RandomAccessFile(f.toFile(), "rw")) {
+ raf.getFD().sync();
+ }
+ }
+
+ static void uncheckedSyncPath(Path f) {
+ try {
+ syncPath(f);
+ } catch (IOException e) {
+ throw new RuntimeIOException(e);
+ }
+ }
+
+ public static void main(String[] args) throws Exception {
+
+ byte[] buf = new byte[1024*1024];
+
+ long s = System.nanoTime();
+
+ Path test = Paths.get("e:/work/test.dat");
+ OutputStream fs = Files.newOutputStream(test);
+ OutputStream os = new BufferedOutputStream(fs, 128*1024);
+
+ for(int i=0;i<40;i++) {
+ os.write(buf);
+ }
+
+ os.flush();
+ //fs.getFD().sync();
+ os.close();
+
+ syncPath(test);
+
+ long duration = System.nanoTime()-s;
+ System.err.println("Took " + 1e-6*duration + "ms.");
+
+
+ }
+
+}
--- /dev/null
+/*******************************************************************************
+ * Copyright (c) 2007, 2010 Association for Decentralized Information Management
+ * in Industry THTH ry.
+ * All rights reserved. This program and the accompanying materials
+ * are made available under the terms of the Eclipse Public License v1.0
+ * which accompanies this distribution, and is available at
+ * http://www.eclipse.org/legal/epl-v10.html
+ *
+ * Contributors:
+ * VTT Technical Research Centre of Finland - initial API and implementation
+ *******************************************************************************/
+package org.simantics.acorn;
+
+import java.io.IOException;
+import java.nio.file.Path;
+import java.util.ArrayList;
+import java.util.LinkedList;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.Semaphore;
+import java.util.concurrent.ThreadFactory;
+import java.util.concurrent.TimeUnit;
+
+import org.simantics.acorn.internal.ClusterChange;
+import org.simantics.acorn.internal.ClusterUpdateProcessorBase;
+import org.simantics.acorn.internal.UndoClusterUpdateProcessor;
+import org.simantics.acorn.lru.ClusterInfo;
+import org.simantics.acorn.lru.ClusterStreamChunk;
+import org.simantics.acorn.lru.ClusterUpdateOperation;
+import org.simantics.acorn.lru.ClusterChangeSet.Entry;
+import org.simantics.db.ClusterCreator;
+import org.simantics.db.Database;
+import org.simantics.db.ServiceLocator;
+import org.simantics.db.common.utils.Logger;
+import org.simantics.db.exception.DatabaseException;
+import org.simantics.db.exception.SDBException;
+import org.simantics.db.server.ProCoreException;
+import org.simantics.db.service.ClusterSetsSupport;
+import org.simantics.db.service.ClusterUID;
+import org.simantics.utils.datastructures.Pair;
+import org.simantics.utils.logging.TimeLogger;
+
+import gnu.trove.map.hash.TLongObjectHashMap;
+
+public class GraphClientImpl2 implements Database.Session {
+
+ public static final boolean DEBUG = false;
+
+ public final ClusterManager clusters;
+
+ private TransactionManager transactionManager = new TransactionManager();
+ private ExecutorService executor = Executors.newSingleThreadExecutor(new ClientThreadFactory("Core Main Program", false));
+ private ExecutorService saver = Executors.newSingleThreadExecutor(new ClientThreadFactory("Core Snapshot Saver", true));
+
+ private static GraphClientImpl2 INSTANCE;
+ private Path dbFolder;
+ private final Database database;
+ private ServiceLocator locator;
+ private MainProgram mainProgram;
+
+ static class ClientThreadFactory implements ThreadFactory {
+
+ final String name;
+ final boolean daemon;
+
+ public ClientThreadFactory(String name, boolean daemon) {
+ this.name = name;
+ this.daemon = daemon;
+ }
+
+ @Override
+ public Thread newThread(Runnable r) {
+ Thread thread = new Thread(r, name);
+ thread.setDaemon(daemon);
+ return thread;
+ }
+ }
+
+ public GraphClientImpl2(Database database, Path dbFolder, ServiceLocator locator) throws IOException {
+ this.database = database;
+ this.dbFolder = dbFolder;
+ this.locator = locator;
+ this.clusters = new ClusterManager(dbFolder);
+ load();
+ ClusterSetsSupport cssi = locator.getService(ClusterSetsSupport.class);
+ cssi.updateReadAndWriteDirectories(clusters.lastSessionDirectory, clusters.workingDirectory);
+ mainProgram = new MainProgram(this, clusters);
+ executor.execute(mainProgram);
+ INSTANCE = this;
+ }
+
+ public Path getDbFolder() {
+ return dbFolder;
+ }
+
+ public void tryMakeSnapshot() throws IOException {
+
+ if (isClosing)
+ return;
+
+ saver.execute(new Runnable() {
+
+ @Override
+ public void run() {
+ Transaction tr = null;
+ try {
+ // First take a write transaction
+ tr = askWriteTransaction(-1);
+ // Then make sure that MainProgram is idling
+ mainProgram.mutex.acquire();
+ try {
+ synchronized(mainProgram) {
+ if(mainProgram.operations.isEmpty()) {
+ makeSnapshot(false);
+ } else {
+ // MainProgram is becoming busy again - delay snapshotting
+ return;
+ }
+ }
+ } finally {
+ mainProgram.mutex.release();
+ }
+ } catch (IOException e) {
+ Logger.defaultLogError(e);
+ } catch (ProCoreException e) {
+ Logger.defaultLogError(e);
+ } catch (InterruptedException e) {
+ Logger.defaultLogError(e);
+ } finally {
+ try {
+ if(tr != null)
+ endTransaction(tr.getTransactionId());
+ } catch (ProCoreException e) {
+ Logger.defaultLogError(e);
+ }
+ }
+ }
+
+ });
+ }
+
+ public void makeSnapshot(boolean force) throws IOException {
+ if (safeToMakeSnapshot)
+ clusters.makeSnapshot(locator, force);
+ }
+
+ public <T> T clone(ClusterUID uid, ClusterCreator creator) throws DatabaseException {
+ return clusters.clone(uid, creator);
+ }
+
+// private void save() throws IOException {
+// clusters.save();
+// }
+
+ public void load() throws IOException {
+ clusters.load();
+ }
+
+// public void modiFileEx(ClusterUID uid, int resourceKey, long offset, long size, byte[] bytes, long pos, ClusterSupport support) {
+// clusters.modiFileEx(uid, resourceKey, offset, size, bytes, pos, support);
+// }
+
+ @Override
+ public Database getDatabase() {
+ return database;
+ }
+
+ private boolean closed = false;
+ private boolean isClosing = false;
+ // Add check to make sure if it safe to make snapshot (used with cancel which is not yet supported and may cause corrupted head.state writing)
+ private boolean safeToMakeSnapshot = true;
+
+ @Override
+ public void close() throws ProCoreException {
+ System.err.println("Closing " + this + " and mainProgram " + mainProgram);
+ if(!closed && !isClosing) {
+ isClosing = true;
+ try {
+ makeSnapshot(true);
+
+ mainProgram.close();
+ clusters.shutdown();
+ executor.shutdown();
+ saver.shutdown();
+ boolean executorTerminated = executor.awaitTermination(500, TimeUnit.MILLISECONDS);
+ boolean saverTerminated = saver.awaitTermination(500, TimeUnit.MILLISECONDS);
+
+ System.err.println("executorTerminated=" + executorTerminated + ", saverTerminated=" + saverTerminated);
+
+ INSTANCE = null;
+ mainProgram = null;
+ executor = null;
+ saver = null;
+
+ } catch (IOException | InterruptedException e) {
+ throw new ProCoreException(e);
+ }
+ }
+ closed = true;
+ //impl.close();
+ }
+
+ @Override
+ public void open() throws ProCoreException {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public boolean isClosed() throws ProCoreException {
+ return closed;
+ }
+
+ @Override
+ public void acceptCommit(long transactionId, long changeSetId, byte[] metadata) throws ProCoreException {
+
+ clusters.state.headChangeSetId++;
+
+ long committedChangeSetId = changeSetId + 1;
+
+ clusters.commitChangeSet(committedChangeSetId, metadata);
+
+ clusters.state.transactionId = transactionId;
+
+ mainProgram.committed();
+
+ TimeLogger.log("Accepted commit");
+
+ }
+
+ @Override
+ public long cancelCommit(long transactionId, long changeSetId,
+ byte[] metadata, OnChangeSetUpdate onChangeSetUpdate)
+ throws ProCoreException {
+ safeToMakeSnapshot = false;
+ throw new UnsupportedOperationException("org.simantics.acorn.GraphClientImpl2.cancelCommit() is not supported operation! Closing down to prevent further havoc");
+// System.err.println("GraphClientImpl2.cancelCommit() called!! this is experimental and might cause havoc!");
+// try {
+// undo(new long[] {changeSetId}, onChangeSetUpdate);
+// } catch (SDBException e) {
+// e.printStackTrace();
+// throw new ProCoreException(e);
+// }
+// clusters.state.headChangeSetId++;
+// return clusters.state.headChangeSetId;
+ }
+
+ @Override
+ public Transaction askReadTransaction() throws ProCoreException {
+ return transactionManager.askReadTransaction();
+ }
+
+ enum TransactionState {
+ IDLE,WRITE,READ
+ }
+
+ class TransactionRequest {
+ public TransactionState state;
+ public Semaphore semaphore;
+ public TransactionRequest(TransactionState state, Semaphore semaphore) {
+ this.state = state;
+ this.semaphore = semaphore;
+ }
+ }
+
+ class TransactionManager {
+
+ private TransactionState currentTransactionState = TransactionState.IDLE;
+
+ private int reads = 0;
+
+ LinkedList<TransactionRequest> requests = new LinkedList<TransactionRequest>();
+
+ TLongObjectHashMap<TransactionRequest> requestMap = new TLongObjectHashMap<TransactionRequest>();
+
+ private synchronized Transaction makeTransaction(TransactionRequest req) {
+
+ final int csId = clusters.state.headChangeSetId;
+ final long trId = clusters.state.transactionId+1;
+ requestMap.put(trId, req);
+ return new Transaction() {
+
+ @Override
+ public long getTransactionId() {
+ return trId;
+ }
+
+ @Override
+ public long getHeadChangeSetId() {
+ return csId;
+ }
+ };
+ }
+
+ /*
+ * This method cannot be synchronized since it waits and must support multiple entries
+ * by query thread(s) and internal transactions such as snapshot saver
+ */
+ public Transaction askReadTransaction() throws ProCoreException {
+
+ Semaphore semaphore = new Semaphore(0);
+
+ TransactionRequest req = queue(TransactionState.READ, semaphore);
+
+ try {
+ semaphore.acquire();
+ } catch (InterruptedException e) {
+ throw new ProCoreException(e);
+ }
+
+ return makeTransaction(req);
+
+ }
+
+ private synchronized void dispatch() {
+ TransactionRequest r = requests.removeFirst();
+ if(r.state == TransactionState.READ) reads++;
+ r.semaphore.release();
+ }
+
+ private synchronized void processRequests() {
+
+ while(true) {
+
+ if(requests.isEmpty()) return;
+ TransactionRequest req = requests.peek();
+
+ if(currentTransactionState == TransactionState.IDLE) {
+
+ // Accept anything while IDLE
+ currentTransactionState = req.state;
+ dispatch();
+
+ } else if (currentTransactionState == TransactionState.READ) {
+
+ if(req.state == currentTransactionState) {
+
+ // Allow other reads
+ dispatch();
+
+ } else {
+
+ // Wait
+ return;
+
+ }
+
+ } else if (currentTransactionState == TransactionState.WRITE) {
+
+ // Wait
+ return;
+
+ }
+
+ }
+
+ }
+
+ private synchronized TransactionRequest queue(TransactionState state, Semaphore semaphore) {
+ TransactionRequest req = new TransactionRequest(state, semaphore);
+ requests.addLast(req);
+ processRequests();
+ return req;
+ }
+
+ /*
+ * This method cannot be synchronized since it waits and must support multiple entries
+ * by query thread(s) and internal transactions such as snapshot saver
+ */
+ public Transaction askWriteTransaction()
+ throws ProCoreException {
+
+ Semaphore semaphore = new Semaphore(0);
+
+ TransactionRequest req = queue(TransactionState.WRITE, semaphore);
+
+ try {
+ semaphore.acquire();
+ } catch (InterruptedException e) {
+ throw new ProCoreException(e);
+ }
+
+ mainProgram.startTransaction(clusters.state.headChangeSetId+1);
+
+ return makeTransaction(req);
+
+ }
+
+ public synchronized long endTransaction(long transactionId) throws ProCoreException {
+
+ TransactionRequest req = requestMap.remove(transactionId);
+ if(req.state == TransactionState.WRITE) {
+ currentTransactionState = TransactionState.IDLE;
+ processRequests();
+ } else {
+ reads--;
+ if(reads == 0) {
+ currentTransactionState = TransactionState.IDLE;
+ processRequests();
+ }
+ }
+ return clusters.state.transactionId;
+ }
+
+ }
+
+ @Override
+ public Transaction askWriteTransaction(final long transactionId)
+ throws ProCoreException {
+ return transactionManager.askWriteTransaction();
+ }
+
+ @Override
+ public long endTransaction(long transactionId) throws ProCoreException {
+ return transactionManager.endTransaction(transactionId);
+ }
+
+ @Override
+ public String execute(String command) throws ProCoreException {
+ // This is called only by WriteGraphImpl.commitAccessorChanges
+ // We can ignore this in Acorn
+ return "";
+ }
+
+ @Override
+ public byte[] getChangeSetMetadata(long changeSetId)
+ throws ProCoreException {
+ return clusters.getMetadata(changeSetId);
+ }
+
+ @Override
+ public ChangeSetData getChangeSetData(long minChangeSetId,
+ long maxChangeSetId, OnChangeSetUpdate onChangeSetupate)
+ throws ProCoreException {
+
+ new Exception("GetChangeSetDataFunction " + minChangeSetId + " " + maxChangeSetId).printStackTrace();;
+ return null;
+
+ }
+
+ @Override
+ public ChangeSetIds getChangeSetIds() throws ProCoreException {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public Cluster getCluster(byte[] clusterId) throws ProCoreException {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public ClusterChanges getClusterChanges(long changeSetId, byte[] clusterId)
+ throws ProCoreException {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public ClusterIds getClusterIds() throws ProCoreException {
+ return clusters.getClusterIds();
+ }
+
+ @Override
+ public Information getInformation() throws ProCoreException {
+ return new Information() {
+
+ @Override
+ public String getServerId() {
+ return "server";
+ }
+
+ @Override
+ public String getProtocolId() {
+ return "";
+ }
+
+ @Override
+ public String getDatabaseId() {
+ return "database";
+ }
+
+ @Override
+ public long getFirstChangeSetId() {
+ return 0;
+ }
+
+ };
+ }
+
+ @Override
+ public Refresh getRefresh(long changeSetId) throws ProCoreException {
+
+ final ClusterIds ids = getClusterIds();
+
+ return new Refresh() {
+
+ @Override
+ public long getHeadChangeSetId() {
+ return clusters.state.headChangeSetId;
+ }
+
+ @Override
+ public long[] getFirst() {
+ return ids.getFirst();
+ }
+
+ @Override
+ public long[] getSecond() {
+ return ids.getSecond();
+ }
+
+ };
+
+ }
+
+ public byte[] getResourceFile(final byte[] clusterUID, final int resourceIndex) throws ProCoreException {
+ return clusters.getResourceFile(clusterUID, resourceIndex);
+ }
+
+ @Override
+ public ResourceSegment getResourceSegment(final byte[] clusterUID,
+ final int resourceIndex, final long segmentOffset, short segmentSize) throws ProCoreException {
+
+ return clusters.getResourceSegment(clusterUID, resourceIndex, segmentOffset, segmentSize);
+
+ }
+
+ @Override
+ public long reserveIds(int count) throws ProCoreException {
+ return clusters.state.reservedIds++;
+ }
+
+ @Override
+ public void updateCluster(byte[] operations) throws ProCoreException {
+
+ ClusterUpdateOperation operation = new ClusterUpdateOperation(clusters, operations);
+ ClusterInfo info = clusters.clusterLRU.getOrCreate(operation.uid, true);
+ if(info == null) throw new IllegalStateException();
+ info.acquireMutex();
+ try {
+ info.scheduleUpdate();
+ mainProgram.schedule(operation);
+ } catch (Throwable t) {
+ throw new IllegalStateException(t);
+ } finally {
+ info.releaseMutex();
+ }
+
+ }
+
+ private UndoClusterUpdateProcessor getUndoCSS(String ccsId) throws DatabaseException {
+
+ String[] ss = ccsId.split("\\.");
+ String chunkKey = ss[0];
+ int chunkOffset = Integer.parseInt(ss[1]);
+ ClusterStreamChunk chunk = clusters.streamLRU.getWithoutMutex(chunkKey);
+ if(chunk == null) throw new IllegalStateException("Cluster Stream Chunk " + chunkKey + " was not found.");
+ chunk.acquireMutex();
+ try {
+ return chunk.getUndoProcessor(clusters, chunkOffset, ccsId);
+ } catch (Throwable t) {
+ throw new IllegalStateException(t);
+ } finally {
+ chunk.releaseMutex();
+ }
+
+ }
+
+ private void performUndo(String ccsId, ArrayList<Pair<ClusterUID, byte[]>> clusterChanges, UndoClusterSupport support) throws ProCoreException, DatabaseException {
+
+ UndoClusterUpdateProcessor proc = getUndoCSS(ccsId);
+
+ int clusterKey = clusters.getClusterKeyByClusterUIDOrMakeWithoutMutex(proc.getClusterUID());
+
+ clusters.clusterLRU.acquireMutex();
+ try {
+
+ ClusterChange cs = new ClusterChange(clusterChanges, proc.getClusterUID());
+ for(int i=0;i<proc.entries.size();i++) {
+
+ Entry e = proc.entries.get(proc.entries.size() - 1 - i);
+ e.process(clusters, cs, clusterKey);
+
+ }
+
+ cs.flush();
+
+ } finally {
+ clusters.clusterLRU.releaseMutex();
+ }
+
+ }
+
+ @Override
+ public boolean undo(long[] changeSetIds, OnChangeSetUpdate onChangeSetUpdate) throws SDBException {
+
+ final ArrayList<Pair<ClusterUID, byte[]>> clusterChanges = new ArrayList<Pair<ClusterUID, byte[]>>();
+
+ UndoClusterSupport support = new UndoClusterSupport(clusters);
+
+ final int changeSetId = clusters.state.headChangeSetId;
+
+ if(ClusterUpdateProcessorBase.DEBUG)
+ System.err.println(" === BEGIN UNDO ===");
+
+ for(int i=0;i<changeSetIds.length;i++) {
+ final long id = changeSetIds[changeSetIds.length-1-i];
+ ArrayList<String> ccss = clusters.getChanges(id);
+ for(int j=0;j<ccss.size();j++) {
+ try {
+ if(ClusterUpdateProcessorBase.DEBUG)
+ System.err.println("performUndo " + ccss.get(ccss.size()-j-1));
+ performUndo(ccss.get(ccss.size()-j-1), clusterChanges, support);
+ } catch (DatabaseException e) {
+ e.printStackTrace();
+ }
+ }
+ }
+
+ if(ClusterUpdateProcessorBase.DEBUG)
+ System.err.println(" === END UNDO ===");
+
+ for(int i=0;i<clusterChanges.size();i++) {
+
+ final int changeSetIndex = i;
+
+ final Pair<ClusterUID, byte[]> pair = clusterChanges.get(i);
+
+ final ClusterUID cuid = pair.first;
+ final byte[] data = pair.second;
+
+ onChangeSetUpdate.onChangeSetUpdate(new ChangeSetUpdate() {
+
+ @Override
+ public long getChangeSetId() {
+ return changeSetId;
+ }
+
+ @Override
+ public int getChangeSetIndex() {
+ return 0;
+ }
+
+ @Override
+ public int getNumberOfClusterChangeSets() {
+ return clusterChanges.size();
+ }
+
+ @Override
+ public int getIndexOfClusterChangeSet() {
+ return changeSetIndex;
+ }
+
+ @Override
+ public byte[] getClusterId() {
+ return cuid.asBytes();
+ }
+
+ @Override
+ public boolean getNewCluster() {
+ return false;
+ }
+
+ @Override
+ public byte[] getData() {
+ return data;
+ }
+
+ });
+
+ }
+
+
+ return false;
+
+ }
+
+ public static GraphClientImpl2 getInstance() {
+ return INSTANCE;
+ }
+
+ public ServiceLocator getServiceLocator() {
+ return locator;
+ }
+
+ @Override
+ public boolean refreshEnabled() {
+ return false;
+ }
+
+
+
+
+
+
+
+
+
+
+
+ ////////////////////////
+
+
+
+
+
+
+
+
+
+
+
+
+}
+
--- /dev/null
+package org.simantics.acorn;
+
+import java.io.ByteArrayInputStream;
+import java.io.File;
+import java.io.IOException;
+import java.io.OutputStream;
+import java.nio.file.Files;
+import java.nio.file.Path;
+import java.security.MessageDigest;
+import java.security.NoSuchAlgorithmException;
+import java.util.ArrayList;
+import java.util.Arrays;
+
+import org.simantics.databoard.Bindings;
+import org.simantics.databoard.binding.mutable.MutableVariant;
+import org.simantics.databoard.serialization.Serializer;
+import org.simantics.databoard.util.binary.BinaryMemory;
+
+public class HeadState {
+
+ public int headChangeSetId = 0;
+ public long transactionId = 1;
+ public long reservedIds = 3;
+
+ public ArrayList<String> clusters = new ArrayList<>();
+ public ArrayList<String> files = new ArrayList<>();
+ public ArrayList<String> stream = new ArrayList<>();
+ public ArrayList<String> cs = new ArrayList<>();
+// public ArrayList<String> ccs = new ArrayList<String>();
+
+ public static HeadState load(Path directory) throws InvalidHeadStateException {
+ Path f = directory.resolve("head.state");
+
+ try {
+ byte[] bytes = Files.readAllBytes(f);
+ MessageDigest sha1 = MessageDigest.getInstance("SHA-1");
+ int digestLength = sha1.getDigestLength();
+
+ sha1.update(bytes, digestLength, bytes.length - digestLength);
+ byte[] newChecksum = sha1.digest();
+ if (!Arrays.equals(newChecksum, Arrays.copyOfRange(bytes, 0, digestLength))) {
+ throw new InvalidHeadStateException(
+ "Checksum " + Arrays.toString(newChecksum) + " does not match excpected "
+ + Arrays.toString(Arrays.copyOfRange(bytes, 0, digestLength)) + " for " + f.toAbsolutePath());
+ }
+
+ HeadState object = (HeadState) org.simantics.databoard.Files.readFile(new ByteArrayInputStream(bytes, digestLength, bytes.length - digestLength), Bindings.getBindingUnchecked(HeadState.class));
+ return object;
+
+ } catch (IOException i) {
+ return new HeadState();
+// throw new InvalidHeadStateException(i);
+ } catch (NoSuchAlgorithmException e) {
+ throw new Error("SHA-1 Algorithm not found", e);
+ } catch (Throwable t) {
+ throw new InvalidHeadStateException(t);
+ }
+ }
+
+ public void save(Path directory) throws IOException {
+ Path f = directory.resolve("head.state");
+ try {
+ BinaryMemory rf = new BinaryMemory(4096);
+ try {
+ MutableVariant v = new MutableVariant(Bindings.getBindingUnchecked(HeadState.class), this);
+ Serializer s = Bindings.getSerializerUnchecked( Bindings.VARIANT );
+ s.serialize(rf, v);
+ } finally {
+ rf.close();
+ }
+
+ byte[] bytes = rf.toByteBuffer().array();
+
+ MessageDigest sha1 = MessageDigest.getInstance("SHA-1");
+ sha1.update(bytes);
+ byte[] checksum = sha1.digest();
+
+ try (OutputStream out = Files.newOutputStream(f)) {
+ out.write(checksum);
+ out.write(bytes);
+ }
+ FileIO.syncPath(f);
+ } catch (NoSuchAlgorithmException e) {
+ throw new Error("SHA-1 digest not found, should not happen", e);
+ }
+ }
+
+ public static void validateHeadStateIntegrity(Path headState) throws InvalidHeadStateException, IOException {
+ try {
+ byte[] bytes = Files.readAllBytes(headState);
+ MessageDigest sha1 = MessageDigest.getInstance("SHA-1");
+ int digestLength = sha1.getDigestLength();
+ sha1.update(bytes, digestLength, bytes.length - digestLength);
+ byte[] newChecksum = sha1.digest();
+ if (!Arrays.equals(newChecksum, Arrays.copyOfRange(bytes, 0, digestLength))) {
+ throw new InvalidHeadStateException(
+ "Checksum " + Arrays.toString(newChecksum) + " does not match excpected "
+ + Arrays.toString(Arrays.copyOfRange(bytes, 0, digestLength)) + " for " + headState.toAbsolutePath());
+ }
+ } catch (NoSuchAlgorithmException e) {
+ throw new Error("SHA-1 digest not found, should not happen", e);
+ }
+ }
+}
--- /dev/null
+package org.simantics.acorn;
+
+public class InvalidHeadStateException extends Exception {
+
+ private static final long serialVersionUID = -7291859180968235955L;
+
+ public InvalidHeadStateException() {
+ super();
+ }
+
+ public InvalidHeadStateException(String message, Throwable cause, boolean enableSuppression,
+ boolean writableStackTrace) {
+ super(message, cause, enableSuppression, writableStackTrace);
+ }
+
+ public InvalidHeadStateException(String message, Throwable cause) {
+ super(message, cause);
+ }
+
+ public InvalidHeadStateException(String message) {
+ super(message);
+ }
+
+ public InvalidHeadStateException(Throwable cause) {
+ super(cause);
+ }
+}
--- /dev/null
+package org.simantics.acorn;
+
+import java.io.Closeable;
+import java.util.ArrayList;
+import java.util.Comparator;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.Map;
+import java.util.TreeMap;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.Semaphore;
+import java.util.concurrent.ThreadFactory;
+import java.util.concurrent.TimeUnit;
+
+import org.simantics.acorn.lru.ClusterStreamChunk;
+import org.simantics.acorn.lru.ClusterUpdateOperation;
+import org.simantics.db.service.ClusterUID;
+import org.simantics.utils.logging.TimeLogger;
+
+public class MainProgram implements Runnable, Closeable {
+
+ private static final int CLUSTER_THREADS = 4;
+ private static final int CHUNK_CACHE_SIZE = 100;
+
+ private final GraphClientImpl2 client;
+ private final ClusterManager clusters;
+ private final ExecutorService[] clusterUpdateThreads;
+ private final List<ClusterUpdateOperation>[] updateSchedules;
+
+ private int residentOperationBytes = 0;
+ private long currentChangeSetId = -1;
+ private int nextChunkId = 0;
+ private boolean alive = true;
+ private Semaphore deathBarrier = new Semaphore(0);
+
+ final Semaphore mutex = new Semaphore(1);
+ final LinkedList<ClusterStreamChunk> operations = new LinkedList<>();
+
+ static class ClusterThreadFactory implements ThreadFactory {
+
+ final String name;
+ final boolean daemon;
+
+ public ClusterThreadFactory(String name, boolean daemon) {
+ this.name = name;
+ this.daemon = daemon;
+ }
+
+ @Override
+ public Thread newThread(Runnable r) {
+ Thread thread = new Thread(r, name);
+ thread.setDaemon(daemon);
+ return thread;
+ }
+ }
+
+ public MainProgram(GraphClientImpl2 client, ClusterManager clusters) {
+
+ this.client = client;
+ this.clusters = clusters;
+ this.clusterUpdateThreads = new ExecutorService[CLUSTER_THREADS];
+ this.updateSchedules = new ArrayList[CLUSTER_THREADS];
+ for(int i=0;i<clusterUpdateThreads.length;i++) {
+ clusterUpdateThreads[i] = Executors.newSingleThreadExecutor(new ClusterThreadFactory("Cluster Updater " + (i+1), false));
+ updateSchedules[i] = new ArrayList<ClusterUpdateOperation>();
+ }
+ }
+
+ public void startTransaction(long id) {
+ currentChangeSetId = id;
+ nextChunkId = 0;
+ }
+
+ private static Comparator<ClusterUID> clusterComparator = new Comparator<ClusterUID>() {
+
+ @Override
+ public int compare(ClusterUID o1, ClusterUID o2) {
+ return Long.compare(o1.second, o2.second);
+ }
+ };
+
+ @Override
+ public void run() {
+ try {
+
+ mutex.acquire();
+ main:
+ while(alive) {
+
+ TreeMap<ClusterUID, List<ClusterUpdateOperation>> updates = new TreeMap<ClusterUID, List<ClusterUpdateOperation>>(clusterComparator);
+
+ synchronized(MainProgram.this) {
+
+ while(!operations.isEmpty() && updates.size() < 100) {
+
+ ClusterStreamChunk chunk = operations.pollFirst();
+
+ for(int i=chunk.nextToProcess;i<chunk.operations.size();i++) {
+ ClusterUpdateOperation o = chunk.operations.get(i);
+ ClusterUID uid = o.uid;
+ List<ClusterUpdateOperation> ops = updates.get(uid);
+ if(ops == null) {
+ ops = new ArrayList<ClusterUpdateOperation>();
+ updates.put(uid, ops);
+ }
+ ops.add(o);
+ }
+
+ chunk.nextToProcess = chunk.operations.size();
+
+ if(!chunk.isCommitted()) {
+ assert(operations.isEmpty());
+ operations.add(chunk);
+ break;
+ }
+
+ }
+
+ if(updates.isEmpty()) {
+ try {
+ long start = System.nanoTime();
+ mutex.release();
+ MainProgram.this.wait(5000);
+ mutex.acquire();
+ if (!alive)
+ break main;
+ long duration = System.nanoTime()-start;
+ if(duration > 4000000000L) {
+
+ // Was this a time-out or a new stream request?
+ if(operations.isEmpty()) {
+
+ /*
+ * We are idling here.
+ * Flush all caches gradually
+ */
+
+ // Write pending cs to disk
+ boolean written = clusters.csLRU.swapForced();
+ while(written) {
+ if(!updates.isEmpty()) break;
+ written = clusters.csLRU.swapForced();
+ }
+ // Write pending chunks to disk
+ written = clusters.streamLRU.swapForced();
+ while(written) {
+ if(!updates.isEmpty()) break;
+ written = clusters.streamLRU.swapForced();
+ }
+ // Write pending files to disk
+ written = clusters.fileLRU.swapForced();
+ while(written) {
+ if(!updates.isEmpty()) break;
+ written = clusters.fileLRU.swapForced();
+ }
+ // Write pending clusters to disk
+ written = clusters.clusterLRU.swapForced();
+ while(written) {
+ if(!updates.isEmpty()) break;
+ written = clusters.clusterLRU.swapForced();
+ }
+
+ client.tryMakeSnapshot();
+ }
+ }
+ } catch (InterruptedException e) {
+ e.printStackTrace();
+ }
+
+ }
+
+ }
+
+// long sss = System.nanoTime();
+
+ for(int i=0;i<CLUSTER_THREADS;i++)
+ updateSchedules[i].clear();
+
+ final Semaphore s = new Semaphore(0);
+
+ for(Map.Entry<ClusterUID, List<ClusterUpdateOperation>> entry : updates.entrySet()) {
+ ClusterUID key = entry.getKey();
+ int hash = key.hashCode() & (clusterUpdateThreads.length-1);
+ updateSchedules[hash].addAll(entry.getValue());
+ }
+
+ // final AtomicLong elapsed = new AtomicLong(0);
+ int acquireAmount = 0;
+ for(int i=0;i<CLUSTER_THREADS;i++) {
+ final List<ClusterUpdateOperation> ops = updateSchedules[i];
+ if (!ops.isEmpty()) {
+ acquireAmount++;
+ clusterUpdateThreads[i].execute(() -> {
+
+ //long st = System.nanoTime();
+ for(ClusterUpdateOperation op : ops) {
+ op.run();
+ }
+ s.release();
+ // long duration = System.nanoTime()-st;
+ // elapsed.addAndGet(duration);
+ // double dur = 1e-9*duration;
+ // if(dur > 0.05)
+ // System.err.println("duration=" + dur + "s. " + ops.size());
+ });
+ }
+ }
+
+ s.acquire(acquireAmount);
+
+ /*
+ * Here we are actively processing updates from client.
+ * Maintain necessary caching here.
+ */
+
+ clusters.streamLRU.acquireMutex();
+ try {
+ swapChunks();
+ } catch (Throwable t) {
+ throw new IllegalStateException(t);
+ } finally {
+ clusters.streamLRU.releaseMutex();
+ }
+ clusters.csLRU.acquireMutex();
+ try {
+ swapCS();
+ } catch (Throwable t) {
+ throw new IllegalStateException(t);
+ } finally {
+ clusters.csLRU.releaseMutex();
+ }
+
+ TimeLogger.log("Performed updates");
+
+ }
+
+ } catch (Throwable t) {
+ t.printStackTrace();
+ } finally {
+ deathBarrier.release();
+ }
+
+ }
+
+ /*
+ * Mutex for streamLRU is assumed here
+ *
+ */
+ private void swapChunks() {
+
+ // Cache chunks during update operations
+ boolean written = clusters.streamLRU.swap(Integer.MAX_VALUE, CHUNK_CACHE_SIZE);
+ while(written) {
+ written = clusters.streamLRU.swap(Integer.MAX_VALUE, CHUNK_CACHE_SIZE);
+ }
+
+ }
+
+ private void swapCS() {
+
+ // Cache chunks during update operations
+ boolean written = clusters.csLRU.swap(Integer.MAX_VALUE, CHUNK_CACHE_SIZE);
+ while(written) {
+ written = clusters.csLRU.swap(Integer.MAX_VALUE, CHUNK_CACHE_SIZE);
+ }
+
+ }
+
+ public synchronized void committed() {
+
+ ClusterStreamChunk last = operations.isEmpty() ? null : operations.getLast();
+ if (!alive) {
+ System.err.println("Trying to commit operation after MainProgram is closed! Operation is " + last);
+// return;
+ }
+ if(last != null) last.commit();
+
+ }
+
+ public synchronized void schedule(ClusterUpdateOperation operation) {
+ if (!alive) {
+ System.err.println("Trying to schedule operation after MainProgram is closed! Operation is " + operation);
+// return;
+ }
+ clusters.streamLRU.acquireMutex();
+
+ try {
+
+ ClusterStreamChunk last = operations.isEmpty() ? null : operations.getLast();
+ if(last == null || last.isCommitted()) {
+ String id = "" + currentChangeSetId + "-" + nextChunkId++;
+ last = new ClusterStreamChunk(clusters, clusters.streamLRU, id);
+ operations.add(last);
+ }
+
+ String chunkId = last.getKey();
+ int chunkOffset = last.operations.size();
+ operation.scheduled(chunkId + "." + chunkOffset);
+
+ last.addOperation(operation);
+
+ swapChunks();
+
+ notifyAll();
+
+ } catch (Throwable t) {
+ throw new IllegalStateException(t);
+ } finally {
+
+ clusters.streamLRU.releaseMutex();
+
+ }
+
+ }
+
+ @Override
+ public void close() {
+ alive = false;
+ synchronized (this) {
+ notifyAll();
+ }
+ try {
+ deathBarrier.acquire();
+ } catch (InterruptedException e) {
+ }
+
+ for (ExecutorService executor : clusterUpdateThreads)
+ executor.shutdown();
+
+ for (int i = 0; i < clusterUpdateThreads.length; i++) {
+ try {
+ ExecutorService executor = clusterUpdateThreads[i];
+ executor.awaitTermination(500, TimeUnit.MILLISECONDS);
+ clusterUpdateThreads[i] = null;
+ } catch (InterruptedException e) {
+ e.printStackTrace();
+ }
+ }
+ }
+
+}
--- /dev/null
+package org.simantics.acorn;
+
+import java.io.BufferedInputStream;
+import java.io.BufferedOutputStream;
+import java.io.IOException;
+import java.io.ObjectInputStream;
+import java.io.ObjectOutputStream;
+import java.io.Serializable;
+import java.nio.file.Files;
+import java.nio.file.Path;
+import java.util.List;
+import java.util.stream.Collectors;
+import java.util.stream.Stream;
+
+import org.simantics.databoard.file.RuntimeIOException;
+import org.simantics.utils.FileUtils;
+
+public class MainState implements Serializable {
+
+ private static final long serialVersionUID = 6237383147637270225L;
+
+ public int headDir = 0;
+
+ public MainState() {
+ }
+
+ public MainState(int headDir) {
+ this.headDir = headDir;
+ }
+
+ public static MainState load(Path directory) throws IOException {
+ Files.createDirectories(directory);
+ Path f = directory.resolve("main.state");
+ try {
+ MainState state = null;
+ try (ObjectInputStream in = new ObjectInputStream(new BufferedInputStream(Files.newInputStream(f)))) {
+ state = (MainState) in.readObject();
+ }
+ while (true) {
+ Path last = directory.resolve(Integer.toString(state.headDir - 1));
+ try {
+ Path headState = last.resolve("head.state");
+ HeadState.validateHeadStateIntegrity(headState);
+ break;
+ } catch (InvalidHeadStateException e) {
+ e.printStackTrace();
+ state.headDir--;
+ uncheckedDeleteAll(last);
+ }
+ }
+ return state;
+ } catch(IOException i) {
+ return new MainState( findNewHeadState(directory) );
+ } catch(ClassNotFoundException c) {
+ throw new Error("MainState class not found", c);
+ } finally {
+ if (Files.exists(f)) {
+ Files.delete(f);
+ }
+ }
+ }
+
+ public void save(Path directory) throws IOException {
+ Path f = directory.resolve("main.state");
+ try (ObjectOutputStream out = new ObjectOutputStream(new BufferedOutputStream(Files.newOutputStream(f)))) {
+ out.writeObject(this);
+ }
+ FileIO.syncPath(f);
+ }
+
+ private static boolean isInteger(Path p) {
+ try {
+ Integer.parseInt(p.getFileName().toString());
+ return true;
+ } catch (NumberFormatException e) {
+ return false;
+ }
+ }
+
+ /**
+ * TODO> shouldn't do two things in the same function, this does both head.state search and directory cleanup
+ *
+ * @param directory
+ * @return
+ * @throws IOException
+ */
+ private static int findNewHeadState(Path directory) throws IOException {
+ try (Stream<Path> s = Files.walk(directory, 1)) {
+ List<Path> reverseSortedPaths = s
+ .filter(p -> !p.equals(directory) && isInteger(p) && Files.isDirectory(p))
+ .sorted((p1, p2) -> {
+ int p1Name = Integer.parseInt(p1.getFileName().toString());
+ int p2Name = Integer.parseInt(p2.getFileName().toString());
+ return Integer.compare(p2Name, p1Name);
+ }).collect(Collectors.toList());
+
+ int largest = -1;
+ for (Path last : reverseSortedPaths) {
+ Path headState = last.resolve("head.state");
+ if (Files.exists(headState)) {
+ try {
+ HeadState.validateHeadStateIntegrity(headState);
+ largest = safeParseInt(-1, last.getFileName().toString());
+ break;
+ } catch (IOException | InvalidHeadStateException e) {
+ e.printStackTrace();
+ uncheckedDeleteAll(last);
+ }
+ } else {
+ uncheckedDeleteAll(last);
+ }
+ }
+ // +1 because we want to return the next head version to use,
+ // not the latest existing version.
+ return largest + 1;
+ }
+ }
+
+ private static int safeParseInt(int defaultValue, String s) {
+ try {
+ return Integer.parseInt(s);
+ } catch (NumberFormatException e) {
+ return defaultValue;
+ }
+ }
+
+ private static void uncheckedDeleteAll(Path path) {
+ try {
+ FileUtils.deleteAll(path.toFile());
+ } catch (IOException e) {
+ throw new RuntimeIOException(e);
+ }
+ }
+
+}
--- /dev/null
+package org.simantics.acorn;
+
+import java.io.IOException;
+import java.nio.file.Path;
+
+public interface Persistable {
+
+ void toFile(Path path) throws IOException ;
+ void fromFile(byte[] data);
+
+}
\ No newline at end of file
--- /dev/null
+package org.simantics.acorn;
+
+import java.io.InputStream;
+
+import org.simantics.db.Session;
+import org.simantics.db.exception.DatabaseException;
+import org.simantics.db.impl.ClusterBase;
+import org.simantics.db.impl.ClusterSupport;
+import org.simantics.db.impl.IClusterTable;
+import org.simantics.db.service.ClusterUID;
+
+public class UndoClusterSupport implements ClusterSupport {
+
+ final ClusterManager impl;
+
+ public UndoClusterSupport(ClusterManager impl) {
+ this.impl = impl;
+ }
+
+ @Override
+ public int createClusterKeyByClusterUID(ClusterUID clusterUID,
+ long clusterId) {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public ClusterBase getClusterByClusterUIDOrMake(ClusterUID clusterUID) {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public ClusterBase getClusterByClusterId(long clusterId) {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public ClusterBase getClusterByClusterKey(int clusterKey) {
+ try {
+ return impl.getClusterByClusterKey(clusterKey);
+ } catch (DatabaseException e) {
+ e.printStackTrace();
+ return null;
+ }
+ }
+
+ @Override
+ public int getClusterKeyByClusterUIDOrMake(ClusterUID clusterUID) {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public ClusterBase getClusterByResourceKey(int resourceKey) {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public long getClusterIdOrCreate(ClusterUID clusterUID) {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public void addStatement(Object cluster) {
+ }
+
+ @Override
+ public void cancelStatement(Object cluster) {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public void removeStatement(Object cluster) {
+ }
+
+ @Override
+ public void cancelValue(Object cluster) {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public void removeValue(Object cluster) {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public void setValue(Object cluster, long clusterId, byte[] bytes,
+ int length) {
+ }
+
+ @Override
+ public void modiValue(Object cluster, long clusterId, long voffset,
+ int length, byte[] bytes, int offset) {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public void setImmutable(Object cluster, boolean immutable) {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public void setDeleted(Object cluster, boolean deleted) {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public void createResource(Object cluster, short resourceIndex,
+ long clusterId) {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public void addStatementIndex(Object cluster, int resourceKey,
+ ClusterUID clusterUID, byte op) {
+ }
+
+ @Override
+ public void setStreamOff(boolean setOff) {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public boolean getStreamOff() {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public InputStream getValueStreamEx(int resourceIndex, long clusterId)
+ throws DatabaseException {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public byte[] getValueEx(int resourceIndex, long clusterId)
+ throws DatabaseException {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public byte[] getValueEx(int resourceIndex, long clusterId, long voffset,
+ int length) throws DatabaseException {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public long getValueSizeEx(int resourceIndex, long clusterId)
+ throws DatabaseException {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public int wait4RequestsLess(int limit) throws DatabaseException {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public Session getSession() {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public IClusterTable getClusterTable() {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public int getClusterKeyByClusterUIDOrMake(long id1, long id2) {
+ throw new UnsupportedOperationException();
+ }
+
+}
--- /dev/null
+package org.simantics.acorn.backup;
+
+import java.io.BufferedReader;
+import java.io.IOException;
+import java.nio.file.FileVisitResult;
+import java.nio.file.Files;
+import java.nio.file.LinkOption;
+import java.nio.file.Path;
+import java.nio.file.SimpleFileVisitor;
+import java.nio.file.StandardCopyOption;
+import java.nio.file.StandardOpenOption;
+import java.nio.file.attribute.BasicFileAttributes;
+import java.util.Arrays;
+import java.util.concurrent.Future;
+import java.util.concurrent.Semaphore;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.TimeoutException;
+
+import org.simantics.acorn.GraphClientImpl2;
+import org.simantics.backup.BackupException;
+import org.simantics.backup.IBackupProvider;
+import org.simantics.db.server.ProCoreException;
+import org.simantics.utils.FileUtils;
+
+/**
+ * @author Jani
+ *
+ * TODO: get rid of {@link GraphClientImpl2#getInstance()} invocations somehow in a cleaner way
+ */
+public class AcornBackupProvider implements IBackupProvider {
+
+ private static final String IDENTIFIER = "AcornBackupProvider";
+ private long trId = -1;
+ private final Semaphore lock = new Semaphore(1);
+
+ private static Path getAcornMetadataFile(Path dbFolder) {
+ return dbFolder.getParent().resolve(IDENTIFIER);
+ }
+
+ @Override
+ public void lock() throws BackupException {
+ try {
+ if (trId != -1)
+ throw new IllegalStateException(this + " backup provider is already locked");
+ trId = GraphClientImpl2.getInstance().askWriteTransaction(-1)
+ .getTransactionId();
+ } catch (ProCoreException e) {
+ e.printStackTrace();
+ }
+ }
+
+ @Override
+ public Future<BackupException> backup(Path targetPath, int revision) throws BackupException {
+ boolean releaseLock = true;
+ try {
+ lock.acquire();
+
+ GraphClientImpl2 client = GraphClientImpl2.getInstance();
+ client.makeSnapshot(true);
+
+ Path dbDir = client.getDbFolder();
+ int newestFolder = client.clusters.mainState.headDir - 1;
+ int latestFolder = -2;
+ Path AcornMetadataFile = getAcornMetadataFile(dbDir);
+ if (Files.exists(AcornMetadataFile)) {
+ try (BufferedReader br = Files.newBufferedReader(AcornMetadataFile)) {
+ latestFolder = Integer.parseInt( br.readLine() );
+ }
+ }
+
+ AcornBackupRunnable r = new AcornBackupRunnable(
+ lock, targetPath, revision, dbDir, latestFolder, newestFolder);
+ new Thread(r, "Acorn backup thread").start();
+
+ releaseLock = false;
+ return r;
+ } catch (InterruptedException e) {
+ releaseLock = false;
+ throw new BackupException("Failed to lock Acorn for backup.", e);
+ } catch (NumberFormatException e) {
+ throw new BackupException("Failed to read Acorn head state file.", e);
+ } catch (IOException e) {
+ throw new BackupException("I/O problem during Acorn backup.", e);
+ } finally {
+ if (releaseLock)
+ lock.release();
+ }
+ }
+
+ @Override
+ public void unlock() throws BackupException {
+ try {
+ if (trId == -1)
+ throw new BackupException(this + " backup provider is not locked");
+ GraphClientImpl2.getInstance().endTransaction(trId);
+ trId = -1;
+ } catch (ProCoreException e) {
+ throw new BackupException(e);
+ }
+ }
+
+ @Override
+ public void restore(Path fromPath, int revision) {
+ try {
+ // 1. Resolve initial backup restore target.
+ // This can be DB directory directly or a temporary directory that
+ // will replace the DB directory.
+ Path dbRoot = GraphClientImpl2.getInstance().getDbFolder();
+ Path restorePath = dbRoot;
+ if (!Files.exists(dbRoot, LinkOption.NOFOLLOW_LINKS)) {
+ Files.createDirectories(dbRoot);
+ } else {
+ Path dbRootParent = dbRoot.getParent();
+ restorePath = dbRootParent == null ? Files.createTempDirectory("restore")
+ : Files.createTempDirectory(dbRootParent, "restore");
+ }
+
+ // 2. Restore the backup.
+ Files.walkFileTree(fromPath, new RestoreCopyVisitor(restorePath, revision));
+
+ // 3. Override existing DB root with restored temporary copy if necessary.
+ if (dbRoot != restorePath) {
+ FileUtils.deleteAll(dbRoot.toFile());
+ Files.move(restorePath, dbRoot);
+ }
+ } catch (IOException e) {
+ e.printStackTrace();
+ }
+ }
+
+ private class RestoreCopyVisitor extends SimpleFileVisitor<Path> {
+
+ private final Path toPath;
+ private final int revision;
+ private Path currentSubFolder;
+
+ public RestoreCopyVisitor(Path toPath, int revision) {
+ this.toPath = toPath;
+ this.revision = revision;
+ }
+
+ @Override
+ public FileVisitResult preVisitDirectory(Path dir, BasicFileAttributes attrs) throws IOException {
+ Path dirName = dir.getFileName();
+ if (dirName.toString().equals(IDENTIFIER)) {
+ currentSubFolder = dir;
+ return FileVisitResult.CONTINUE;
+ } else if (dir.getParent().getFileName().toString().equals(IDENTIFIER)) {
+ Path targetPath = toPath.resolve(dirName);
+ if (!Files.exists(targetPath)) {
+ Files.createDirectory(targetPath);
+ }
+ return FileVisitResult.CONTINUE;
+ } else if (dirName.toString().length() == 1 && Character.isDigit(dirName.toString().charAt(0))) {
+ int dirNameInt = Integer.parseInt(dirName.toString());
+ if (dirNameInt <= revision) {
+ return FileVisitResult.CONTINUE;
+ } else {
+ return FileVisitResult.SKIP_SUBTREE;
+ }
+ } else {
+ return FileVisitResult.CONTINUE;
+ }
+ }
+
+ @Override
+ public FileVisitResult visitFile(Path file, BasicFileAttributes attrs) throws IOException {
+ if (file.getFileName().toString().endsWith(".tar.gz"))
+ return FileVisitResult.CONTINUE;
+ System.out.println("Restore " + file + " to " + toPath.resolve(currentSubFolder.relativize(file)));
+ Files.copy(file, toPath.resolve(currentSubFolder.relativize(file)), StandardCopyOption.REPLACE_EXISTING);
+ return FileVisitResult.CONTINUE;
+ }
+ }
+
+ private static class AcornBackupRunnable implements Runnable, Future<BackupException> {
+
+ private final Semaphore lock;
+ private final Path targetPath;
+ private final int revision;
+ private final Path baseDir;
+ private final int latestFolder;
+ private final int newestFolder;
+
+ private boolean done = false;
+ private final Semaphore completion = new Semaphore(0);
+ private BackupException exception = null;
+
+ public AcornBackupRunnable(Semaphore lock, Path targetPath, int revision,
+ Path baseDir, int latestFolder, int newestFolder) {
+ this.lock = lock;
+ this.targetPath = targetPath;
+ this.revision = revision;
+ this.baseDir = baseDir;
+ this.latestFolder = latestFolder;
+ this.newestFolder = newestFolder;
+ }
+
+ @Override
+ public void run() {
+ try {
+ doBackup();
+ writeHeadstateFile();
+ } catch (IOException e) {
+ exception = new BackupException("Acorn backup failed", e);
+ rollback();
+ } finally {
+ done = true;
+ lock.release();
+ completion.release();
+ }
+ }
+
+ private void doBackup() throws IOException {
+ Path target = targetPath.resolve(String.valueOf(revision)).resolve(IDENTIFIER);
+ if (!Files.exists(target))
+ Files.createDirectories(target);
+ Files.walkFileTree(baseDir,
+ new BackupCopyVisitor(baseDir, target));
+ }
+
+ private void writeHeadstateFile() throws IOException {
+ Path AcornMetadataFile = getAcornMetadataFile(baseDir);
+ if (!Files.exists(AcornMetadataFile)) {
+ Files.createFile(AcornMetadataFile);
+ }
+ Files.write(AcornMetadataFile,
+ Arrays.asList(Integer.toString(newestFolder)),
+ StandardOpenOption.WRITE,
+ StandardOpenOption.TRUNCATE_EXISTING,
+ StandardOpenOption.CREATE);
+ }
+
+ private void rollback() {
+ // TODO
+ }
+
+ private class BackupCopyVisitor extends SimpleFileVisitor<Path> {
+
+ private Path fromPath;
+ private Path toPath;
+
+ public BackupCopyVisitor(Path fromPath, Path toPath) {
+ this.fromPath = fromPath;
+ this.toPath = toPath;
+ }
+
+ @Override
+ public FileVisitResult preVisitDirectory(Path dir,
+ BasicFileAttributes attrs) throws IOException {
+ Path dirName = dir.getFileName();
+ if (dirName.equals(fromPath)) {
+ Path targetPath = toPath.resolve(fromPath.relativize(dir));
+ if (!Files.exists(targetPath)) {
+ Files.createDirectory(targetPath);
+ }
+ return FileVisitResult.CONTINUE;
+ } else {
+ int dirNameInt = Integer.parseInt(dirName.toString());
+ if (latestFolder < dirNameInt && dirNameInt <= newestFolder) {
+ Path targetPath = toPath.resolve(fromPath
+ .relativize(dir));
+ if (!Files.exists(targetPath)) {
+ Files.createDirectory(targetPath);
+ }
+ return FileVisitResult.CONTINUE;
+ }
+ return FileVisitResult.SKIP_SUBTREE;
+ }
+ }
+
+ @Override
+ public FileVisitResult visitFile(Path file,
+ BasicFileAttributes attrs) throws IOException {
+ System.out.println("Backup " + file + " to "
+ + toPath.resolve(fromPath.relativize(file)));
+ Files.copy(file, toPath.resolve(fromPath.relativize(file)),
+ StandardCopyOption.REPLACE_EXISTING);
+ return FileVisitResult.CONTINUE;
+ }
+ }
+
+ @Override
+ public boolean cancel(boolean mayInterruptIfRunning) {
+ return false;
+ }
+
+ @Override
+ public boolean isCancelled() {
+ return false;
+ }
+
+ @Override
+ public boolean isDone() {
+ return done;
+ }
+
+ @Override
+ public BackupException get() throws InterruptedException {
+ completion.acquire();
+ completion.release();
+ return exception;
+ }
+
+ @Override
+ public BackupException get(long timeout, TimeUnit unit) throws InterruptedException, TimeoutException {
+ if (completion.tryAcquire(timeout, unit))
+ completion.release();
+ else
+ throw new TimeoutException("Acorn backup completion waiting timed out.");
+ return exception;
+ }
+
+ }
+
+}
--- /dev/null
+/*******************************************************************************
+ * Copyright (c) 2007, 2010 Association for Decentralized Information Management
+ * in Industry THTH ry.
+ * All rights reserved. This program and the accompanying materials
+ * are made available under the terms of the Eclipse Public License v1.0
+ * which accompanies this distribution, and is available at
+ * http://www.eclipse.org/legal/epl-v10.html
+ *
+ * Contributors:
+ * VTT Technical Research Centre of Finland - initial API and implementation
+ *******************************************************************************/
+package org.simantics.acorn.cluster;
+
+import java.io.ByteArrayInputStream;
+import java.io.IOException;
+import java.io.InputStream;
+import java.util.Arrays;
+
+import org.simantics.acorn.internal.ClusterChange;
+import org.simantics.acorn.internal.ClusterStream;
+import org.simantics.acorn.internal.ClusterSupport2;
+import org.simantics.acorn.internal.DebugPolicy;
+import org.simantics.db.Resource;
+import org.simantics.db.exception.DatabaseException;
+import org.simantics.db.exception.ExternalValueException;
+import org.simantics.db.exception.ValidationException;
+import org.simantics.db.impl.ClusterI;
+import org.simantics.db.impl.ClusterI.PredicateProcedure;
+import org.simantics.db.impl.ClusterSupport;
+import org.simantics.db.impl.ClusterTraitsBase;
+import org.simantics.db.impl.ForEachObjectContextProcedure;
+import org.simantics.db.impl.ForEachObjectProcedure;
+import org.simantics.db.impl.ForPossibleRelatedValueContextProcedure;
+import org.simantics.db.impl.ForPossibleRelatedValueProcedure;
+import org.simantics.db.impl.IClusterTable;
+import org.simantics.db.impl.Table;
+import org.simantics.db.impl.TableHeader;
+import org.simantics.db.impl.graph.ReadGraphImpl;
+import org.simantics.db.impl.query.QueryProcessor;
+import org.simantics.db.procedure.AsyncContextMultiProcedure;
+import org.simantics.db.procedure.AsyncMultiProcedure;
+import org.simantics.db.procore.cluster.ClusterMap;
+import org.simantics.db.procore.cluster.ClusterPrintDebugInfo;
+import org.simantics.db.procore.cluster.ClusterTraits;
+import org.simantics.db.procore.cluster.CompleteTable;
+import org.simantics.db.procore.cluster.FlatTable;
+import org.simantics.db.procore.cluster.ForeignTable;
+import org.simantics.db.procore.cluster.ObjectTable;
+import org.simantics.db.procore.cluster.PredicateTable;
+import org.simantics.db.procore.cluster.ResourceTable;
+import org.simantics.db.procore.cluster.ValueTable;
+import org.simantics.db.service.ClusterUID;
+import org.simantics.utils.datastructures.Callback;
+
+final public class ClusterBig extends ClusterImpl {
+ private static final int TABLE_HEADER_SIZE = TableHeader.HEADER_SIZE + TableHeader.EXTRA_SIZE;
+ private static final int RESOURCE_TABLE_OFFSET = 0;
+ private static final int PREDICATE_TABLE_OFFSET = RESOURCE_TABLE_OFFSET + TABLE_HEADER_SIZE;
+ private static final int OBJECT_TABLE_OFFSET = PREDICATE_TABLE_OFFSET + TABLE_HEADER_SIZE;
+ private static final int VALUE_TABLE_OFFSET = OBJECT_TABLE_OFFSET + TABLE_HEADER_SIZE;
+ private static final int FLAT_TABLE_OFFSET = VALUE_TABLE_OFFSET + TABLE_HEADER_SIZE;
+ private static final int COMPLETE_TABLE_OFFSET = FLAT_TABLE_OFFSET + TABLE_HEADER_SIZE;
+ private static final int FOREIGN_TABLE_OFFSET = COMPLETE_TABLE_OFFSET + TABLE_HEADER_SIZE;
+ private static final int INT_HEADER_SIZE = FOREIGN_TABLE_OFFSET + TABLE_HEADER_SIZE;
+ private final int clusterBits;
+ final private ResourceTable resourceTable;
+ //final private ResourceTable movedResourceTable;
+ final private PredicateTable predicateTable;
+ final private ObjectTable objectTable;
+ final private ValueTable valueTable;
+ final private FlatTable flatTable;
+ final private ForeignTable foreignTable;
+ final private CompleteTable completeTable;
+ final private ClusterMap clusterMap;
+ final private int[] headerTable;
+ final private ClusterSupport2 clusterSupport;
+
+ public ClusterBig(IClusterTable clusterTable, ClusterUID clusterUID, int clusterKey, ClusterSupport2 support) {
+ super(clusterTable, clusterUID, clusterKey, support);
+ if(DebugPolicy.REPORT_CLUSTER_EVENTS)
+ new Exception(getClusterUID().toString()).printStackTrace();
+ this.headerTable = new int[INT_HEADER_SIZE];
+ this.resourceTable = new ResourceTable(this, headerTable, RESOURCE_TABLE_OFFSET);
+ this.foreignTable = new ForeignTable(this, headerTable, FOREIGN_TABLE_OFFSET);
+ this.predicateTable = new PredicateTable(this, headerTable, PREDICATE_TABLE_OFFSET);
+ this.objectTable = new ObjectTable(this, headerTable, OBJECT_TABLE_OFFSET);
+ this.valueTable = new ValueTable(this, headerTable, VALUE_TABLE_OFFSET);
+ this.completeTable = new CompleteTable(this, headerTable, COMPLETE_TABLE_OFFSET);
+ this.flatTable = null;
+ this.clusterMap = new ClusterMap(foreignTable, flatTable);
+ this.clusterSupport = support;
+ this.clusterBits = ClusterTraitsBase.getClusterBits(clusterKey);
+ this.importance = 0;
+// clusterTable.setDirtySizeInBytes(true);
+ }
+ protected ClusterBig(IClusterTable clusterTable, long[] longs, int[] ints, byte[] bytes, ClusterSupport2 support, int clusterKey)
+ throws DatabaseException {
+ super(clusterTable, checkValidity(0, longs, ints, bytes), clusterKey, support);
+ if(DebugPolicy.REPORT_CLUSTER_EVENTS)
+ new Exception(getClusterUID().toString()).printStackTrace();
+ if (ints.length < INT_HEADER_SIZE)
+ throw new IllegalArgumentException("Too small integer table for cluster.");
+ this.headerTable = ints;
+ this.resourceTable = new ResourceTable(this, ints, RESOURCE_TABLE_OFFSET, longs);
+ this.foreignTable = new ForeignTable(this, headerTable, FOREIGN_TABLE_OFFSET, longs);
+ this.predicateTable = new PredicateTable(this, ints, PREDICATE_TABLE_OFFSET, ints);
+ this.objectTable = new ObjectTable(this, ints, OBJECT_TABLE_OFFSET, ints);
+ this.valueTable = new ValueTable(this, ints, VALUE_TABLE_OFFSET, bytes);
+ this.flatTable = null;
+ this.completeTable = new CompleteTable(this, headerTable, COMPLETE_TABLE_OFFSET, ints);
+ this.clusterMap = new ClusterMap(foreignTable, flatTable);
+ this.clusterSupport = support;
+ this.clusterBits = ClusterTraitsBase.getClusterBits(clusterKey);
+ }
+ void analyse() {
+ System.out.println("Cluster " + clusterId);
+ System.out.println("-size:" + getUsedSpace());
+ System.out.println(" -rt:" + (resourceTable.getTableCapacity() * 8 + 8));
+ System.out.println(" -ft:" + foreignTable.getTableCapacity() * 8);
+ System.out.println(" -pt:" + predicateTable.getTableCapacity() * 4);
+ System.out.println(" -ot:" + objectTable.getTableCapacity() * 4);
+ System.out.println(" -ct:" + completeTable.getTableCapacity() * 4);
+ System.out.println(" -vt:" + valueTable.getTableCapacity());
+
+ System.out.println("-resourceTable:");
+ System.out.println(" -resourceCount=" + resourceTable.getResourceCount());
+ System.out.println(" -size=" + resourceTable.getTableSize());
+ System.out.println(" -capacity=" + resourceTable.getTableCapacity());
+ System.out.println(" -count=" + resourceTable.getTableCount());
+ System.out.println(" -size=" + resourceTable.getTableSize());
+ //resourceTable.analyse();
+ }
+ public void checkDirectReference(int dr)
+ throws DatabaseException {
+ if (!ClusterTraits.statementIndexIsDirect(dr))
+ throw new ValidationException("Reference is not direct. Reference=" + dr);
+ if (ClusterTraits.isFlat(dr))
+ throw new ValidationException("Reference is flat. Reference=" + dr);
+ if (ClusterTraits.isLocal(dr)) {
+ if (dr < 1 || dr > resourceTable.getUsedSize())
+ throw new ValidationException("Illegal local reference. Reference=" + dr);
+ } else {
+ int fi = ClusterTraits.getForeignIndexFromReference(dr);
+ int ri = ClusterTraits.getResourceIndexFromForeignReference(dr);
+ if (fi < 1 || fi > foreignTable.getUsedSize())
+ throw new ValidationException("Illegal foreign reference. Reference=" + dr + " foreign index=" + fi);
+ if (ri < 1 || ri > ClusterTraits.getMaxNumberOfResources())
+ throw new ValidationException("Illegal foreign reference. Reference=" + dr + " resource index=" + ri);
+ }
+ }
+ public void checkPredicateIndex(int pi)
+ throws DatabaseException {
+ predicateTable.checkPredicateSetIndex(this, pi);
+ }
+ public void checkObjectSetReference(int or)
+ throws DatabaseException {
+ if (ClusterTraits.statementIndexIsDirect(or))
+ throw new ValidationException("Illegal object set reference. Reference=" + or);
+ int oi = ClusterTraits.statementIndexGet(or);
+ this.objectTable.checkObjectSetIndex(this, oi);
+ }
+
+ public void checkValueInit()
+ throws DatabaseException {
+ valueTable.checkValueInit();
+ }
+ public void checkValue(int capacity, int index)
+ throws DatabaseException {
+ valueTable.checkValue(capacity, index);
+ }
+ public void checkValueFini()
+ throws DatabaseException {
+ valueTable.checkValueFini();
+ }
+ public void checkForeingIndex(int fi)
+ throws DatabaseException {
+ if (fi<1 || fi > foreignTable.getUsedSize())
+ throw new ValidationException("Illegal foreign index=" + fi);
+ }
+ public void checkCompleteSetReference(int cr)
+ throws DatabaseException {
+ if (!ClusterTraits.completeReferenceIsMultiple(cr))
+ throw new ValidationException("Illegal complete set reference. Reference=" + cr);
+ int ci = cr;
+ this.completeTable.checkCompleteSetIndex(this, ci);
+ }
+ public void check()
+ throws DatabaseException {
+ this.completeTable.check(this);
+ this.objectTable.check(this);
+ // Must be after object table check.
+ this.predicateTable.check(this);
+ this.resourceTable.check(this);
+ }
+ @Override
+ public CompleteTypeEnum getCompleteType(int resourceKey, ClusterSupport support)
+ throws DatabaseException {
+ final int resourceRef = getLocalReference(resourceKey);
+ int completeRef = resourceTable.getCompleteObjectRef(resourceRef);
+ CompleteTypeEnum ct = ClusterTraits.completeReferenceGetType(completeRef);
+ if (DEBUG)
+ System.out.println("Cluster.getCompleteType rk=" + resourceKey + " ct=" + ct);
+ int i = ct.getValue();
+ switch (i) {
+ case 0: return CompleteTypeEnum.NotComplete;
+ case 1: return CompleteTypeEnum.InstanceOf;
+ case 2: return CompleteTypeEnum.Inherits;
+ case 3: return CompleteTypeEnum.SubrelationOf;
+ default: throw new DatabaseException("Illegal complete type enumeration.");
+ }
+ }
+
+ @Override
+ public int getCompleteObjectKey(int resourceKey, ClusterSupport support)
+ throws DatabaseException {
+ final int resourceRef = getLocalReference(resourceKey);
+ int completeRef = resourceTable.getCompleteObjectRef(resourceRef);
+ int clusterIndex;
+ int resourceIndex = ClusterTraits.completeReferenceGetResourceIndex(completeRef);
+
+ ClusterI.CompleteTypeEnum completeType = ClusterTraits.completeReferenceGetType(completeRef);
+ if (completeType == ClusterI.CompleteTypeEnum.NotComplete)
+ throw new DatabaseException("Resource has multiple complete objects. Resource key=" + resourceKey + ".");
+
+ if (ClusterTraits.completeReferenceIsLocal(completeRef)) {
+ clusterIndex = clusterKey;
+ } else {
+ int foreignIndex = ClusterTraits.completeReferenceGetForeignIndex(completeRef);
+// System.err.println("completeRef=" + completeRef + " foreignIndex=" + foreignIndex );
+ ClusterUID clusterUID = foreignTable.getResourceUID(foreignIndex).asCID();
+ ClusterI c = support.getClusterByClusterUIDOrMake(clusterUID);
+ clusterIndex = c.getClusterKey();
+ }
+ int key = ClusterTraits.createResourceKey(clusterIndex, resourceIndex);
+ if (DEBUG)
+ System.out.println("Cluster.complete object rk=" + resourceKey + " ck=" + key);
+ return key;
+ }
+
+ @Override
+ public boolean isComplete(int resourceKey, ClusterSupport support)
+ throws DatabaseException {
+ final int resourceRef = getLocalReference(resourceKey);
+ int completeRef = resourceTable.getCompleteObjectRef(resourceRef);
+ ClusterI.CompleteTypeEnum completeType = ClusterTraits.completeReferenceGetType(completeRef);
+ boolean complete = completeType != ClusterI.CompleteTypeEnum.NotComplete;
+ if (DEBUG)
+ System.out.println("Cluster.key=" + resourceKey + " isComplete=" + complete);
+ return complete;
+ }
+
+ public int getSingleObject(int resourceKey, int predicateKey, int objectIndex, ClusterSupport support) throws DatabaseException {
+ if (DEBUG)
+ System.out.println("Cluster.getSingleObject: rk=" + resourceKey + " pk=" + predicateKey);
+ if (0 == objectIndex) {
+ final int resourceIndex = getLocalReference(resourceKey);
+ final int pRef = getInternalReferenceOrZero(predicateKey, support);
+ final ClusterI.CompleteTypeEnum pCompleteType = ClusterTraitsBase.getCompleteTypeFromResourceKey(predicateKey);
+ return resourceTable.getSingleObject(resourceIndex, support, pRef, pCompleteType, completeTable, this);
+ }
+ return objectTable.getSingleObject(objectIndex, support, this);
+ }
+
+ public void forObjects(int resourceKey, int predicateKey, int objectIndex, QueryProcessor processor, ReadGraphImpl graph, AsyncMultiProcedure<Resource> procedure,
+ ClusterSupport support) throws DatabaseException {
+ if (DEBUG)
+ System.out.println("Cluster.forObjects1: rk=" + resourceKey + " pk=" + predicateKey);
+ if (0 == objectIndex) {
+ final int resourceIndex = getLocalReference(resourceKey);
+ final int pRef = getInternalReferenceOrZero(predicateKey, support);
+ final ClusterI.CompleteTypeEnum pCompleteType = ClusterTraitsBase.getCompleteTypeFromResourceKey(predicateKey);
+ resourceTable.foreachObject(resourceIndex, graph, procedure, support, pRef, pCompleteType, completeTable, this);
+ return;
+ }
+ objectTable.foreachObject(graph, objectIndex, procedure, this);
+ }
+ public <C> void forObjects(int resourceKey, int predicateKey, int objectIndex, QueryProcessor processor, ReadGraphImpl graph, C context, AsyncContextMultiProcedure<C, Resource> procedure,
+ ClusterSupport support) throws DatabaseException {
+ if (DEBUG)
+ System.out.println("Cluster.forObjects1: rk=" + resourceKey + " pk=" + predicateKey);
+ if (0 == objectIndex) {
+ final int resourceIndex = getLocalReference(resourceKey);
+ final int pRef = getInternalReferenceOrZero(predicateKey, support);
+ final ClusterI.CompleteTypeEnum pCompleteType = ClusterTraitsBase.getCompleteTypeFromResourceKey(predicateKey);
+ resourceTable.foreachObject(resourceIndex, graph, context, procedure, support, pRef, pCompleteType, completeTable, this);
+ return;
+ }
+ objectTable.foreachObject(graph, objectIndex, context, procedure, this);
+ }
+ @Override
+ public <Context> boolean forObjects(int resourceKey, int predicateKey, int objectIndex, ObjectProcedure<Context> procedure,
+ Context context, ClusterSupport support) throws DatabaseException {
+ if (DEBUG)
+ System.out.println("Cluster.forObjects2: rk=" + resourceKey + " pk=" + predicateKey);
+ if (0 == objectIndex) {
+ final int resourceIndex = getLocalReference(resourceKey);
+ final int pRef = getInternalReferenceOrZero(predicateKey, support);
+ final ClusterI.CompleteTypeEnum pCompleteType = ClusterTraitsBase.getCompleteTypeFromResourceKey(predicateKey);
+ return resourceTable.foreachObject(resourceIndex, procedure, context, support, this, pRef, pCompleteType, completeTable);
+ }
+ return objectTable.foreachObject(objectIndex, procedure, context, support, this);
+ }
+
+ @Override
+ public int getSingleObject(int resourceKey, int predicateKey, ClusterSupport support) throws DatabaseException {
+ if (DEBUG)
+ System.out.println("Cluster.getSingleObject2: rk=" + resourceKey + " pk=" + predicateKey);
+ final int resourceIndex = getLocalReference(resourceKey);
+ final int pRef = getInternalReferenceOrZero(predicateKey, support);
+ final ClusterI.CompleteTypeEnum pCompleteType = ClusterTraitsBase.getCompleteTypeFromResourceKey(predicateKey);
+ if (ClusterI.CompleteTypeEnum.NotComplete != pCompleteType)
+ return resourceTable.getSingleObject(resourceIndex, support, pRef, pCompleteType, completeTable, this);
+ final int predicateIndex = resourceTable.getPredicateIndex(resourceIndex);
+ if (0 == predicateIndex)
+ return resourceTable.getSingleObject(resourceIndex, support, pRef, pCompleteType, completeTable, this);
+ int objectIndex = predicateTable.getObjectIndex(predicateIndex, pRef);
+ return getSingleObject(resourceKey, predicateKey, objectIndex, support);
+ }
+
+ @Override
+ public <T> int getSingleObject(int resourceKey, ForPossibleRelatedValueProcedure<T> procedure, ClusterSupport support) throws DatabaseException {
+ final int predicateKey = procedure.predicateKey;
+ if (DEBUG)
+ System.out.println("Cluster.getSingleObject2: rk=" + resourceKey + " pk=" + predicateKey);
+ final int resourceIndex = getLocalReference(resourceKey);
+ final int pRef = getInternalReferenceOrZero(predicateKey, support);
+ final ClusterI.CompleteTypeEnum pCompleteType = ClusterTraitsBase.getCompleteTypeFromResourceKey(predicateKey);
+ if (ClusterI.CompleteTypeEnum.NotComplete != pCompleteType)
+ return resourceTable.getSingleObject(resourceIndex, support, pRef, pCompleteType, completeTable, this);
+ final int predicateIndex = resourceTable.getPredicateIndex(resourceIndex);
+ if (0 == predicateIndex)
+ return resourceTable.getSingleObject(resourceIndex, support, pRef, pCompleteType, completeTable, this);
+ int objectIndex = predicateTable.getObjectIndex(predicateIndex, pRef);
+ return getSingleObject(resourceKey, predicateKey, objectIndex, support);
+ }
+
+ @Override
+ public <C, T> int getSingleObject(int resourceKey, ForPossibleRelatedValueContextProcedure<C, T> procedure, ClusterSupport support) throws DatabaseException {
+ final int predicateKey = procedure.predicateKey;
+ if (DEBUG)
+ System.out.println("Cluster.getSingleObject2: rk=" + resourceKey + " pk=" + predicateKey);
+ final int resourceIndex = getLocalReference(resourceKey);
+ final int pRef = getInternalReferenceOrZero(predicateKey, support);
+ final ClusterI.CompleteTypeEnum pCompleteType = ClusterTraitsBase.getCompleteTypeFromResourceKey(predicateKey);
+ if (ClusterI.CompleteTypeEnum.NotComplete != pCompleteType)
+ return resourceTable.getSingleObject(resourceIndex, support, pRef, pCompleteType, completeTable, this);
+ final int predicateIndex = resourceTable.getPredicateIndex(resourceIndex);
+ if (0 == predicateIndex)
+ return resourceTable.getSingleObject(resourceIndex, support, pRef, pCompleteType, completeTable, this);
+ int objectIndex = predicateTable.getObjectIndex(predicateIndex, pRef);
+ return getSingleObject(resourceKey, predicateKey, objectIndex, support);
+ }
+
+ @Override
+ public void forObjects(ReadGraphImpl graph, int resourceKey,
+ int predicateKey, AsyncMultiProcedure<Resource> procedure)
+ throws DatabaseException {
+
+ throw new UnsupportedOperationException();
+
+// SessionImplSocket session = (SessionImplSocket)graph.getSession();
+// ClusterSupport support = session.clusterTranslator;
+//
+// if (DEBUG)
+// System.out.println("Cluster.forObjects3: rk=" + resourceKey + " pk=" + predicateKey);
+// final int resourceIndex = getLocalReference(resourceKey);
+// final int pRef = getInternalReferenceOrZero(predicateKey, support);
+// final ClusterI.CompleteTypeEnum pCompleteType = ClusterTraitsBase.getCompleteTypeFromResourceKey(predicateKey);
+// if (ClusterI.CompleteTypeEnum.NotComplete != pCompleteType) {
+// resourceTable.foreachObject(resourceIndex, graph, procedure, support, pRef, pCompleteType, completeTable, this);
+// return;
+// }
+// final int predicateIndex = resourceTable.getPredicateIndex(resourceIndex);
+// if (0 == predicateIndex) {
+// resourceTable.foreachObject(resourceIndex, graph, procedure, support, pRef, pCompleteType, completeTable, this);
+// return;
+// }
+// int objectIndex = predicateTable.getObjectIndex(predicateIndex, pRef);
+// forObjects(resourceKey, predicateKey, objectIndex, graph.processor, graph, procedure, support);
+
+ }
+
+ @Override
+ public void forObjects(ReadGraphImpl graph, int resourceKey, ForEachObjectProcedure procedure) throws DatabaseException {
+
+ throw new UnsupportedOperationException();
+
+// SessionImplSocket session = (SessionImplSocket)graph.getSession();
+// ClusterSupport support = session.clusterTranslator;
+// final int predicateKey = procedure.predicateKey;
+// if (DEBUG)
+// System.out.println("Cluster.forObjects3: rk=" + resourceKey + " pk=" + predicateKey);
+// final int resourceIndex = getLocalReference(resourceKey);
+// final int pRef = getInternalReferenceOrZero(predicateKey, support);
+// final ClusterI.CompleteTypeEnum pCompleteType = ClusterTraitsBase.getCompleteTypeFromResourceKey(predicateKey);
+// if (ClusterI.CompleteTypeEnum.NotComplete != pCompleteType) {
+// resourceTable.foreachObject(resourceIndex, graph, procedure, support, pRef, pCompleteType, completeTable, this);
+// return;
+// }
+// final int predicateIndex = resourceTable.getPredicateIndex(resourceIndex);
+// if (0 == predicateIndex) {
+// resourceTable.foreachObject(resourceIndex, graph, procedure, support, pRef, pCompleteType, completeTable, this);
+// return;
+// }
+// int objectIndex = predicateTable.getObjectIndex(predicateIndex, pRef);
+// forObjects(resourceKey, predicateKey, objectIndex, graph.processor, graph, procedure, support);
+
+ }
+ @Override
+ public <C> void forObjects(ReadGraphImpl graph, int resourceKey, C context,
+ ForEachObjectContextProcedure<C> procedure) throws DatabaseException {
+
+ throw new UnsupportedOperationException();
+
+// SessionImplSocket session = (SessionImplSocket)graph.getSession();
+// ClusterSupport support = session.clusterTranslator;
+//
+// final int predicateKey = procedure.predicateKey;
+//
+// if (DEBUG)
+// System.out.println("Cluster.forObjects3: rk=" + resourceKey + " pk=" + predicateKey);
+// final int resourceIndex = getLocalReference(resourceKey);
+// final int pRef = getInternalReferenceOrZero(predicateKey, support);
+// final ClusterI.CompleteTypeEnum pCompleteType = ClusterTraitsBase.getCompleteTypeFromResourceKey(predicateKey);
+// if (ClusterI.CompleteTypeEnum.NotComplete != pCompleteType) {
+// resourceTable.foreachObject(resourceIndex, graph, context, procedure, support, pRef, pCompleteType, completeTable, this);
+// return;
+// }
+// final int predicateIndex = resourceTable.getPredicateIndex(resourceIndex);
+// if (0 == predicateIndex) {
+// resourceTable.foreachObject(resourceIndex, graph, context, procedure, support, pRef, pCompleteType, completeTable, this);
+// return;
+// }
+// int objectIndex = predicateTable.getObjectIndex(predicateIndex, pRef);
+// forObjects(resourceKey, predicateKey, objectIndex, graph.processor, graph, context, procedure, support);
+
+ }
+
+ @Override
+ public <Context> boolean forObjects(int resourceKey, int predicateKey,
+ ObjectProcedure<Context> procedure, Context context, ClusterSupport support)
+ throws DatabaseException {
+ if (DEBUG)
+ System.out.println("Cluster.forObjects4: rk=" + resourceKey + " pk=" + predicateKey);
+ final int resourceIndex = getLocalReference(resourceKey);
+ final int pRef = getInternalReferenceOrZero(predicateKey, support);
+ final ClusterI.CompleteTypeEnum pCompleteType = ClusterTraitsBase.getCompleteTypeFromResourceKey(predicateKey);
+ if (ClusterI.CompleteTypeEnum.NotComplete != pCompleteType)
+ return resourceTable.foreachObject(resourceIndex, procedure, context, support, this, pRef, pCompleteType, completeTable);
+ final int predicateIndex = resourceTable.getPredicateIndex(resourceIndex);
+ if (0 == predicateIndex)
+ return resourceTable.foreachObject(resourceIndex, procedure, context, support, this, pRef, pCompleteType, completeTable);
+ int objectIndex = predicateTable.getObjectIndex(predicateIndex, pRef);
+ return forObjects(resourceKey, predicateKey, objectIndex, procedure, context, support);
+ }
+ @Override
+ public <Context> boolean forPredicates(int resourceKey,
+ PredicateProcedure<Context> procedure, Context context, ClusterSupport support)
+ throws DatabaseException {
+ if (DEBUG)
+ System.out.println("Cluster.forPredicates: rk=" + resourceKey);
+ final int resourceIndex = getLocalReference(resourceKey);
+ final int predicateIndex = resourceTable.getPredicateIndex(resourceIndex);
+ if (0 == predicateIndex)
+ return resourceTable.foreachPredicate(resourceIndex,
+ procedure, context, support, this, completeTable);
+ else {
+ boolean broken = resourceTable.foreachPredicate(resourceIndex,
+ procedure, context, support, this, completeTable);
+ if (broken)
+ return true;
+ }
+ return predicateTable.foreachPredicate(predicateIndex, procedure, context, support, this);
+ }
+ @Override
+ public ClusterI addRelation(int sResourceKey, ClusterUID puid, int pResourceKey, ClusterUID ouid, int oResourceKey, ClusterSupport support)
+ throws DatabaseException {
+ if (DEBUG)
+ System.out.println("add rk=" + sResourceKey + " pk=" + pResourceKey + " ok=" + oResourceKey);
+ int sri = getLocalReferenceAnd(sResourceKey, support, ClusterChange.ADD_OPERATION);
+ int pri = getReferenceOrCreateIfForeign(pResourceKey, puid, support, ClusterStream.NULL_OPERATION);
+ int ori = getReferenceOrCreateIfForeign(oResourceKey, ouid, support, ClusterStream.NULL_OPERATION);
+ ClusterI.CompleteTypeEnum completeType = ClusterTraitsBase.getCompleteTypeFromResourceKey(pResourceKey);
+ boolean ret = addRelationInternal(sri, pri, ori, completeType);
+// check();
+ if (ret) {
+ support.addStatement(this);
+ return this;
+ } else {
+ support.cancelStatement(this);
+ return null;
+ }
+ }
+ @Override
+ public ClusterI addRelation(int sResourceKey, int pResourceKey, int oResourceKey, ClusterSupport support)
+ throws DatabaseException {
+ if (DEBUG)
+ System.out.println("add rk=" + sResourceKey + " pk=" + pResourceKey + " ok=" + oResourceKey);
+ int sri = getLocalReferenceAnd(sResourceKey, support, ClusterChange.ADD_OPERATION);
+ int pri = getReferenceOrCreateIfForeign(pResourceKey, support, ClusterStream.NULL_OPERATION);
+ int ori = getReferenceOrCreateIfForeign(oResourceKey, support, ClusterStream.NULL_OPERATION);
+ ClusterI.CompleteTypeEnum completeType = ClusterTraitsBase.getCompleteTypeFromResourceKey(pResourceKey);
+ boolean ret = addRelationInternal(sri, pri, ori, completeType);
+// check();
+ if (ret) {
+ support.addStatement(this);
+ return this;
+ } else {
+ support.cancelStatement(this);
+ return null;
+ }
+ }
+ @Override
+ public boolean removeRelation(int sResourceKey, int pResourceKey, int oResourceKey, ClusterSupport support)
+ throws DatabaseException {
+// check();
+ int sri = getLocalReferenceAnd(sResourceKey, support, ClusterChange.REMOVE_OPERATION);
+ int pri = getInternalReferenceOrZeroAnd(pResourceKey, support, ClusterStream.NULL_OPERATION);
+ int ori = getInternalReferenceOrZeroAnd(oResourceKey, support, ClusterStream.NULL_OPERATION);
+ boolean ret = false;
+ if (0 != pri && 0 != ori) {
+ ClusterI.CompleteTypeEnum completeType = ClusterTraitsBase.getCompleteTypeFromResourceKey(pResourceKey);
+ ret = removeRelationInternal(sri, pri, ori, completeType, support);
+ }
+ if (ret)
+ support.removeStatement(this);
+ else
+ support.cancelStatement(this);
+// check();
+ return ret;
+ }
+ @Override
+ public void denyRelation(int sResourceKey, int pResourceKey, int oResourceKey, ClusterSupport support)
+ throws DatabaseException {
+ int sri = checkResourceKeyIsOursAndGetResourceIndexIf(sResourceKey, support);
+ ResourceIndexAndId p = checkResourceKeyAndGetResourceIndexIf(pResourceKey, support);
+ ResourceIndexAndId o = checkResourceKeyAndGetResourceIndexIf(oResourceKey, support);
+ if (0 == sri || 0 == p.index || 0 == o.index)
+ return;
+// check();
+ ClusterI.CompleteTypeEnum completeType = ClusterTraitsBase.getCompleteTypeFromResourceKey(pResourceKey);
+ boolean ret = removeRelationInternal(sri, p.reference, o.reference, completeType, support);
+ if (ret) {
+ support.addStatementIndex(this, sResourceKey, getClusterUID(), ClusterChange.REMOVE_OPERATION);
+ support.addStatementIndex(this, pResourceKey, p.clusterUID, ClusterStream.NULL_OPERATION);
+ support.addStatementIndex(this, oResourceKey, o.clusterUID, ClusterStream.NULL_OPERATION);
+ support.removeStatement(this);
+ }
+// check();
+ return;
+ }
+ @Override
+ public InputStream getValueStream(int rResourceId, ClusterSupport support) throws DatabaseException {
+ if (DEBUG)
+ System.out.println("ClusterBig.getValue " + rResourceId);
+ int resourceIndex = getLocalReference(rResourceId);
+ try {
+ byte[] buffer = resourceTable.getValue(valueTable, resourceIndex);
+ if(buffer == null) return null;
+ return new ByteArrayInputStream(buffer);
+ } catch (ExternalValueException e) {
+ return support.getValueStreamEx(resourceIndex, clusterUID.second);
+ }
+ }
+ @Override
+ public byte[] getValue(int rResourceId, ClusterSupport support)
+ throws DatabaseException {
+ if (DEBUG)
+ System.out.println("ClusterBig.getValue " + rResourceId);
+ int resourceIndex = getLocalReference(rResourceId);
+ try {
+ return resourceTable.getValue(valueTable, resourceIndex);
+ } catch (ExternalValueException e) {
+ return clusterSupport.impl.getResourceFile(clusterUID.asBytes(), resourceIndex);
+// return support.getValueEx(resourceIndex, clusterUID.second);
+ }
+ }
+ @Override
+ public boolean hasValue(int rResourceId, ClusterSupport support)
+ throws DatabaseException {
+ int resourceIndex = getLocalReference(rResourceId);
+ return resourceTable.hasValue(resourceIndex);
+ }
+ @Override
+ public boolean removeValue(int rResourceId, ClusterSupport support)
+ throws DatabaseException {
+ int resourceIndex = getLocalReferenceAnd(rResourceId, support, ClusterChange.DELETE_OPERATION);
+ support.removeValue(this);
+ return resourceTable.removeValue(valueTable, resourceIndex);
+ }
+
+ @Override
+ public ClusterI setValue(int rResourceId, byte[] value, int length, ClusterSupport support)
+ throws DatabaseException {
+ int resourceIndex = getLocalReferenceAnd(rResourceId, support, ClusterStream.SET_OPERATION);
+ support.setValue(this, getClusterId(), value, length);
+ resourceTable.setValue(valueTable, resourceIndex, value, length);
+ return this;
+ }
+ @Override
+ public ClusterI modiValueEx(int rResourceId, long voffset, int length, byte[] value, int offset, ClusterSupport support)
+ throws DatabaseException {
+ int resourceIndex = getLocalReferenceAnd(rResourceId, support, ClusterStream.MODI_OPERATION);
+ support.modiValue(this, getClusterId(), voffset, length, value, offset);
+ resourceTable.setValueEx(valueTable, resourceIndex);
+ return this;
+ }
+ @Override
+ public byte[] readValueEx(int rResourceId, long voffset, int length, ClusterSupport support)
+ throws DatabaseException {
+ int resourceIndex = getLocalReference(rResourceId);
+ boolean isExternal = resourceTable.isValueEx(valueTable, resourceIndex);
+ if (!isExternal)
+ throw new DatabaseException("ClusterI.readValue supported only for external value. Resource key=" + rResourceId);
+ return support.getValueEx(resourceIndex, getClusterId(), voffset, length);
+ }
+ @Override
+ public long getValueSizeEx(int resourceKey, ClusterSupport support)
+ throws DatabaseException, ExternalValueException {
+ int resourceIndex = getLocalReference(resourceKey);
+ boolean isExternal = resourceTable.isValueEx(valueTable, resourceIndex);
+ if (!isExternal)
+ throw new ExternalValueException("ClusterI.getSize supported only for external value. Resource key=" + resourceKey);
+ return support.getValueSizeEx(resourceIndex, getClusterId());
+ }
+ public boolean isValueEx(int resourceKey)
+ throws DatabaseException {
+ int resourceIndex = getLocalReference(resourceKey);
+ return resourceTable.isValueEx(valueTable, resourceIndex);
+ }
+ @Override
+ public void setValueEx(int resourceKey)
+ throws DatabaseException {
+ int resourceIndex = getLocalReference(resourceKey);
+ resourceTable.setValueEx(valueTable, resourceIndex);
+ }
+ @Override
+ public int createResource(ClusterSupport support)
+ throws DatabaseException {
+ short resourceIndex = resourceTable.createResource();
+
+ if(DebugPolicy.REPORT_RESOURCE_ID_ALLOCATION)
+ System.out.println("[RID_ALLOCATION]: ClusterBig[" + clusterId + "] allocates " + resourceIndex);
+
+ support.createResource(this, resourceIndex, clusterId);
+ return ClusterTraits.createResourceKey(clusterKey, resourceIndex);
+ }
+ @Override
+ public boolean hasResource(int resourceKey, ClusterSupport support) {
+ int clusterKey = ClusterTraitsBase.getClusterKeyFromResourceKeyNoThrow(resourceKey);
+ if (this.clusterKey != clusterKey) // foreign resource
+ return false;
+ int resourceIndex;
+ try {
+ resourceIndex = ClusterTraits.getResourceIndexFromResourceKey(resourceKey);
+ } catch (DatabaseException e) {
+ return false;
+ }
+ if (resourceIndex > 0 & resourceIndex <= resourceTable.getTableCount())
+ return true;
+ else
+ return false;
+ }
+ @Override
+ public int getNumberOfResources(ClusterSupport support) {
+ return resourceTable.getUsedSize();
+ }
+ @Override
+ public long getUsedSpace() {
+ long rt = resourceTable.getTableCapacity() * 8 + 8; // (8 = cluster id)
+ long ft = foreignTable.getTableCapacity() * 8;
+ long pt = predicateTable.getTableCapacity() * 4;
+ long ot = objectTable.getTableCapacity() * 4;
+ long ct = completeTable.getTableCapacity() * 4;
+ long vt = valueTable.getTableCapacity() * 1;
+ long cm = clusterMap.getUsedSpace();
+
+ return rt + ft + pt + ot + ct + vt + cm;
+// System.out.println("resource table " + rt);
+// System.out.println("foreign table (non flat cluster table) " + ft);
+// System.out.println("predicate table " + pt);
+// long pt2 = getRealSizeOfPredicateTable() * 4;
+// System.out.println("predicate table real size " + pt2);
+// System.out.println("object table " + ot);
+// long ot2 = getRealSizeOfObjectTable() * 4;
+// System.out.println("object table real size " + ot2);
+// System.out.println("value table " + vt);
+ }
+ int getRealSizeOfPredicateTable() throws DatabaseException {
+ SizeOfPredicateTable proc = new SizeOfPredicateTable(resourceTable, predicateTable);
+ resourceTable.foreachResource(proc, 0, null, null);
+ return proc.getSize();
+ }
+ int getRealSizeOfObjectTable() throws DatabaseException {
+ SizeOfObjectTable proc = new SizeOfObjectTable(resourceTable, predicateTable, objectTable);
+ resourceTable.foreachResource(proc, 0, null, null);
+ return proc.getSize();
+ }
+ @Override
+ public boolean isEmpty() {
+ return resourceTable.getTableCount() == 0;
+ }
+ @Override
+ public void printDebugInfo(String message, ClusterSupport support)
+ throws DatabaseException {
+ predicateTable.printDebugInfo();
+ objectTable.printDebugInfo();
+ ClusterPrintDebugInfo proc = new ClusterPrintDebugInfo(this
+ , resourceTable, predicateTable, support, objectTable);
+ resourceTable.foreachResource(proc, 0, null, null);
+ }
+ private int getInternalReferenceOrZero(int resourceKey, ClusterSupport support)
+ throws DatabaseException {
+ int clusterKey = ClusterTraits.getClusterKeyFromResourceKey(resourceKey);
+ int resourceIndex = ClusterTraits.getResourceIndexFromResourceKey(resourceKey);
+ if (this.clusterKey != clusterKey) { // foreign resource
+ ClusterI foreignCluster = support.getClusterByClusterKey(clusterKey);
+ ClusterUID clusterUID = foreignCluster.getClusterUID();
+ int foreignResourceIndex = clusterMap.getForeignReferenceOrZero(resourceIndex, clusterUID);
+ return foreignResourceIndex;
+ }
+ return resourceIndex;
+ }
+ private int getInternalReferenceOrZeroAnd(int resourceKey, ClusterSupport support, byte op)
+ throws DatabaseException {
+ int clusterKey = ClusterTraits.getClusterKeyFromResourceKey(resourceKey);
+ int resourceIndex = ClusterTraits.getResourceIndexFromResourceKey(resourceKey);
+ if (this.clusterKey != clusterKey) { // foreign resource
+ ClusterUID clusterUID = clusterSupport.getClusterUIDByResourceKey(resourceKey);
+ int foreignResourceIndex = clusterMap.getForeignReferenceOrZero(resourceIndex, clusterUID);
+ support.addStatementIndex(this, resourceKey, clusterUID, op);
+ return foreignResourceIndex;
+ }
+ support.addStatementIndex(this, resourceKey, getClusterUID(), op);
+ return resourceIndex;
+ }
+ private short getLocalReference(int resourceKey) throws DatabaseException {
+ return ClusterTraitsBase.getResourceIndexFromResourceKeyNoThrow(resourceKey);
+ }
+ private int getLocalReferenceAnd(int resourceKey, ClusterSupport support, byte op)
+ throws DatabaseException {
+ int resourceIndex = getLocalReference(resourceKey);
+ support.addStatementIndex(this, resourceKey, getClusterUID(), op);
+ return resourceIndex;
+ }
+ private int checkResourceKeyIsOursAndGetResourceIndexIf(int resourceKey, ClusterSupport support)
+ throws DatabaseException {
+ int clusterShortId = ClusterTraits.getClusterKeyFromResourceKey(resourceKey);
+ if (this.clusterKey != clusterShortId)
+ return 0;
+ int resourceIndex = ClusterTraits.getResourceIndexFromResourceKey(resourceKey);
+ return resourceIndex;
+ }
+ private int getReferenceOrCreateIfForeign(int resourceKey, ClusterUID clusterUID, ClusterSupport support, byte op)
+ throws DatabaseException {
+ int clusterKey = ClusterTraits.getClusterKeyFromResourceKey(resourceKey);
+ int resourceIndex = ClusterTraits.getResourceIndexFromResourceKey(resourceKey);
+ if (this.clusterKey != clusterKey) {
+ support.addStatementIndex(this, resourceKey, clusterUID, op);
+ return clusterMap.getForeignReferenceOrCreateByResourceKey(resourceKey, clusterUID);
+ }
+ support.addStatementIndex(this, resourceKey, getClusterUID(), op);
+ return resourceIndex;
+ }
+ private int getReferenceOrCreateIfForeign(int resourceKey, ClusterSupport support, byte op)
+ throws DatabaseException {
+ int clusterKey = ClusterTraits.getClusterKeyFromResourceKey(resourceKey);
+ int resourceIndex = ClusterTraits.getResourceIndexFromResourceKey(resourceKey);
+ if (this.clusterKey != clusterKey) {
+ ClusterUID clusterUID = clusterSupport.getClusterUIDByResourceKey(resourceKey);
+ support.addStatementIndex(this, resourceKey, clusterUID, op);
+ return clusterMap.getForeignReferenceOrCreateByResourceKey(resourceKey, clusterUID);
+ }
+ support.addStatementIndex(this, resourceKey, getClusterUID(), op);
+ return resourceIndex;
+ }
+ private class ResourceIndexAndId {
+ ResourceIndexAndId(int reference, int index, ClusterUID clusterUID) {
+ this.reference = reference;
+ this.index = index;
+ this.clusterUID = clusterUID;
+ }
+ public final int reference;
+ public final int index;
+ public final ClusterUID clusterUID;
+ }
+ private ResourceIndexAndId checkResourceKeyAndGetResourceIndexIf(int resourceKey, ClusterSupport support)
+ throws DatabaseException {
+ int clusterKey = ClusterTraits.getClusterKeyFromResourceKey(resourceKey);
+ int resourceIndex = ClusterTraits.getResourceIndexFromResourceKey(resourceKey);
+ if (this.clusterKey != clusterKey) { // foreign resource
+ ClusterI foreignCluster = support.getClusterByClusterKey(clusterKey);
+ ClusterUID clusterUID = foreignCluster.getClusterUID();
+ int ref = clusterMap.getForeignReferenceOrCreateByResourceIndex(resourceIndex, clusterUID);
+ return new ResourceIndexAndId(ref, resourceIndex, clusterUID);
+ }
+ return new ResourceIndexAndId(resourceIndex, resourceIndex, getClusterUID());
+ }
+
+ @Override
+ final public int execute(int resourceIndex) throws DatabaseException {
+ int key;
+ if(resourceIndex > 0) {
+ key = clusterBits | resourceIndex;
+ } else {
+ ClusterUID clusterUID = clusterMap.getResourceUID(resourceIndex).asCID();
+ ClusterI cluster = clusterSupport.getClusterByClusterUIDOrMake(clusterUID);
+ int foreingResourceIndex = clusterMap.getForeignResourceIndex(resourceIndex);
+ key = ClusterTraits.createResourceKey(cluster.getClusterKey(), foreingResourceIndex);
+ }
+ if (DEBUG)
+ System.out.println("Cluster.execute key=" + key);
+ return key;
+ }
+
+ private boolean addRelationInternal(int sReference, int pReference, int oReference, ClusterI.CompleteTypeEnum completeType)
+ throws DatabaseException {
+ int predicateIndex = resourceTable.addStatement(sReference, pReference,
+ oReference, predicateTable, objectTable, completeType, completeTable);
+ if (0 == predicateIndex)
+ return true; // added to resourceTable
+ else if (0 > predicateIndex)
+ return false; // old complete statemenent
+ int newPredicateIndex = predicateTable.addPredicate(predicateIndex,
+ pReference, oReference, objectTable);
+ if (0 == newPredicateIndex)
+ return false;
+ if (predicateIndex != newPredicateIndex)
+ resourceTable.setPredicateIndex(sReference, newPredicateIndex);
+ return true;
+ }
+ private boolean removeRelationInternal(int sResourceIndex, int pResourceIndex,
+ int oResourceIndex, ClusterI.CompleteTypeEnum completeType, ClusterSupport support)
+ throws DatabaseException {
+ int predicateIndex = resourceTable.getPredicateIndex(sResourceIndex);
+ if (0 == predicateIndex || ClusterI.CompleteTypeEnum.NotComplete != completeType)
+ return resourceTable.removeStatementFromCache(sResourceIndex,
+ pResourceIndex, oResourceIndex, completeType, completeTable);
+ PredicateTable.Status ret = predicateTable.removePredicate(predicateIndex, pResourceIndex, oResourceIndex, objectTable);
+ switch (ret) {
+ case NothingRemoved:
+ return false;
+ case PredicateRemoved: {
+ if (0 == predicateTable.getPredicateSetSize(predicateIndex))
+ resourceTable.setPredicateIndex(sResourceIndex, 0);
+ // intentionally dropping to next case
+ } default:
+ break;
+ }
+ resourceTable.removeStatement(sResourceIndex,
+ pResourceIndex, oResourceIndex,
+ completeType, completeTable,
+ predicateTable, objectTable, this, support);
+ return true;
+ }
+ @Override
+ public void load() {
+ throw new Error("Not supported.");
+ }
+
+ @Override
+ public void load(Callback<DatabaseException> r) {
+ throw new Error("Not supported.");
+ }
+
+ public int makeResourceKey(int resourceIndex) throws DatabaseException {
+ int key = 0;
+ if (resourceIndex > 0) // local resource
+ key = ClusterTraits.createResourceKey(clusterKey, resourceIndex);
+ else {
+ ClusterUID clusterUID = clusterMap.getResourceUID(resourceIndex).asCID();
+ int clusterKey = clusterSupport.getClusterKeyByClusterUIDOrMake(clusterUID);
+ int foreingResourceIndex = clusterMap.getForeignResourceIndex(resourceIndex);
+ key = ClusterTraits.createResourceKey(clusterKey, foreingResourceIndex);
+ }
+ if (0 == key)
+ throw new DatabaseException("Failed to make resource key from " + resourceIndex);
+ return key;
+ }
+ @Override
+ public ClusterBig toBig(ClusterSupport support) throws DatabaseException {
+ throw new Error("Not implemented");
+ }
+ @Override
+ public void load(ClusterSupport session, Runnable callback) {
+ throw new Error("Not implemented");
+ }
+ @Override
+ public ClusterI getClusterByResourceKey(int resourceKey,
+ ClusterSupport support) {
+ throw new Error("Not implemented");
+ }
+ @Override
+ public void increaseReferenceCount(int amount) {
+ throw new Error("Not implemented");
+ }
+ @Override
+
+ public void decreaseReferenceCount(int amount) {
+ throw new Error("Not implemented");
+ }
+ @Override
+ public int getReferenceCount() {
+ throw new Error("Not implemented");
+ }
+ @Override
+ public void releaseMemory() {
+ }
+ @Override
+ public void compact() {
+ clusterMap.compact();
+ }
+ public boolean contains(int resourceKey) {
+ return ClusterTraitsBase.isCluster(clusterBits, resourceKey);
+ }
+ @Override
+ public ClusterTypeEnum getType() {
+ return ClusterTypeEnum.BIG;
+ }
+ @Override
+ public boolean getImmutable() {
+ int status = resourceTable.getClusterStatus();
+ return (status & ClusterStatus.ImmutableMaskSet) == 1;
+ }
+ @Override
+ public void setImmutable(boolean immutable, ClusterSupport support) {
+ int status = resourceTable.getClusterStatus();
+ if (immutable)
+ status |= ClusterStatus.ImmutableMaskSet;
+ else
+ status &= ClusterStatus.ImmutableMaskClear;
+ resourceTable.setClusterStatus(status);
+ support.setImmutable(this, immutable);
+ }
+
+ @Override
+ public ClusterTables store() throws IOException {
+
+ ClusterTables result = new ClusterTables();
+
+ int[] currentHeader = Arrays.copyOf(headerTable, INT_HEADER_SIZE);
+
+ int byteSize = valueTable.getTableSize();
+ byte[] byteBytes = new byte[byteSize];
+ valueTable.store(byteBytes, 0);
+
+ //FileUtils.writeFile(bytes, valueTable.table);
+
+ result.bytes = byteBytes;
+
+ int longSize = LONG_HEADER_SIZE + resourceTable.getTableSize() + foreignTable.getTableSize();
+ long[] longBytes = new long[longSize];
+
+ longBytes[0] = 0;
+ longBytes[1] = LONG_HEADER_VERSION;
+ longBytes[2] = 0;
+ longBytes[3] = clusterUID.second;
+
+// Bytes.writeLE8(longBytes, 0, 0);
+// Bytes.writeLE8(longBytes, 8, LONG_HEADER_VERSION);
+// Bytes.writeLE8(longBytes, 16, 0);
+// Bytes.writeLE8(longBytes, 24, clusterUID.second);
+
+ int longPos = resourceTable.store(longBytes, LONG_HEADER_SIZE);
+ foreignTable.store(longBytes, longPos);
+
+ result.longs = longBytes;
+
+// FileUtils.writeFile(longs, longBytes);
+
+ int intSize = INT_HEADER_SIZE + predicateTable.getTableSize() + objectTable.getTableSize() + completeTable.getTableSize();
+ int[] intBytes = new int[intSize];
+ int intPos = INT_HEADER_SIZE;
+ intPos = predicateTable.store(intBytes, intPos);
+ intPos = objectTable.store(intBytes, intPos);
+ intPos = completeTable.store(intBytes, intPos);
+ // write header
+ for(int i=0;i<INT_HEADER_SIZE;i++) {
+ int v = headerTable[i];
+ intBytes[i] = v;
+// Bytes.writeLE(intBytes, i<<2, v);
+ }
+
+ result.ints = intBytes;
+
+// FileUtils.writeFile(ints, intBytes);
+
+ for(int i=0;i<INT_HEADER_SIZE;i++)
+ headerTable[i] = currentHeader[i];
+
+ return result;
+
+ }
+
+ @Override
+ protected int getResourceTableCount() {
+ return resourceTable.getTableCount();
+ }
+ @Override
+ public boolean getDeleted() {
+ int status = resourceTable.getClusterStatus();
+ return (status & ClusterStatus.DeletedMaskSet) == ClusterStatus.DeletedMaskSet;
+ }
+ @Override
+ public void setDeleted(boolean deleted, ClusterSupport support) {
+ int status = resourceTable.getClusterStatus();
+ if (deleted)
+ status |= ClusterStatus.DeletedMaskSet;
+ else
+ status &= ClusterStatus.DeletedMaskClear;
+ resourceTable.setClusterStatus(status);
+ support.setDeleted(this, deleted);
+ }
+ @Override
+ public Table<?> getPredicateTable() {
+ return predicateTable;
+ }
+ @Override
+ public Table<?> getForeignTable() {
+ return foreignTable;
+ }
+ @Override
+ public Table<?> getCompleteTable() {
+ return completeTable;
+ }
+ @Override
+ public Table<?> getValueTable() {
+ return valueTable;
+ }
+ @Override
+ public Table<?> getObjectTable() {
+ return objectTable;
+ }
+}
+
+class SizeOfPredicateTable implements ClusterI.ObjectProcedure<Integer> {
+ private final ResourceTable mrResourceTable;
+ private final PredicateTable mrPredicateTable;
+ private int size = 0;
+ SizeOfPredicateTable(ResourceTable resourceTable
+ , PredicateTable predicateTable) {
+ mrResourceTable = resourceTable;
+ mrPredicateTable = predicateTable;
+ }
+ @Override
+ public boolean execute(Integer i, int resourceRef) {
+ int predicateIndex = mrResourceTable.getPredicateIndex(resourceRef);
+ if (0 == predicateIndex)
+ return false; // continue loop
+ size += mrPredicateTable.getPredicateSetSize(predicateIndex);
+ return false; // continue loop
+ }
+
+ public int getSize() {
+ return size;
+ }
+
+}
+
+class SizeOfObjectTable implements ClusterI.ObjectProcedure<Integer> {
+ private final ResourceTable mrResourceTable;
+ private final PredicateTable mrPredicateTable;
+ private final ObjectTable mrObjectTable;
+ private int size = 0;
+ SizeOfObjectTable(ResourceTable resourceTable
+ , PredicateTable predicateTable, ObjectTable objectTable) {
+ mrResourceTable = resourceTable;
+ mrPredicateTable = predicateTable;
+ mrObjectTable = objectTable;
+ }
+
+ @Override
+ public boolean execute(Integer i, int resourceRef) {
+ int predicateIndex = mrResourceTable.getPredicateIndex(resourceRef);
+ if (0 == predicateIndex)
+ return false; // continue loop
+ ClusterI.PredicateProcedure<Object> procedure = new PredicateProcedure<Object>() {
+ @Override
+ public boolean execute(Object context, int pRef, int oIndex) {
+ if (ClusterTraits.statementIndexIsDirect(oIndex))
+ return false; // no table space reserved, continue looping
+ int objectIndex;
+ try {
+ objectIndex = ClusterTraits.statementIndexGet(oIndex);
+ size += mrObjectTable.getObjectSetSize(objectIndex);
+ } catch (DatabaseException e) {
+ e.printStackTrace();
+ }
+ return false; // continue looping
+ }
+ };
+ try {
+ mrPredicateTable.foreachPredicate(predicateIndex, procedure, null, null, null);
+ } catch (DatabaseException e) {
+ e.printStackTrace();
+ }
+ return false; // continue loop
+ }
+
+ public int getSize() {
+ return size;
+ }
+
+}
\ No newline at end of file
--- /dev/null
+/*******************************************************************************
+ * Copyright (c) 2007, 2010 Association for Decentralized Information Management
+ * in Industry THTH ry.
+ * All rights reserved. This program and the accompanying materials
+ * are made available under the terms of the Eclipse Public License v1.0
+ * which accompanies this distribution, and is available at
+ * http://www.eclipse.org/legal/epl-v10.html
+ *
+ * Contributors:
+ * VTT Technical Research Centre of Finland - initial API and implementation
+ *******************************************************************************/
+package org.simantics.acorn.cluster;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.UUID;
+
+import org.simantics.acorn.internal.Change;
+import org.simantics.acorn.internal.ClusterChange;
+import org.simantics.acorn.internal.ClusterSupport2;
+import org.simantics.db.common.utils.Logger;
+import org.simantics.db.exception.DatabaseException;
+import org.simantics.db.exception.InvalidClusterException;
+import org.simantics.db.impl.ClusterBase;
+import org.simantics.db.impl.ClusterI;
+import org.simantics.db.impl.ClusterSupport;
+import org.simantics.db.impl.IClusterTable;
+import org.simantics.db.impl.Modifier;
+import org.simantics.db.service.ClusterCollectorPolicy.CollectorCluster;
+import org.simantics.db.service.ClusterUID;
+import org.simantics.db.service.ClusteringSupport.Id;
+import org.simantics.utils.strings.AlphanumComparator;
+
+public abstract class ClusterImpl extends ClusterBase implements Modifier, CollectorCluster {
+ protected static final int LONG_HEADER_SIZE = 7;
+ protected static final long LONG_HEADER_VERSION = 1;
+ protected static ClusterUID checkValidity(long type, long[] longs, int[] ints, byte[] bytes)
+ throws InvalidClusterException {
+ if (longs.length < LONG_HEADER_SIZE)
+ throw new InvalidClusterException("Header size mismatch. Expected=" + ClusterImpl.LONG_HEADER_SIZE + ", got=" + longs.length);
+ if (longs[0] != type)
+ throw new InvalidClusterException("Type mismatch. Expected=" + type + ", got=" + longs[0] + " " + ClusterUID.make(longs[2], longs[3]));
+ if (longs[1] != ClusterImpl.LONG_HEADER_VERSION)
+ throw new InvalidClusterException("Header size mismatch. Expected=" + ClusterImpl.LONG_HEADER_VERSION + ", got=" + longs[1]);
+ return ClusterUID.make(longs[2], longs[3]);
+ }
+ protected static Id getUniqueId(long[] longs) {
+ return new IdImpl(new UUID(longs[3], longs[4]));
+ }
+ static final boolean DEBUG = false;
+ final public IClusterTable clusterTable;
+ // This can be null iff the cluster has been converted to big
+ public Change change = new Change();
+ public ClusterChange cc;
+ public byte[] foreignLookup;
+
+ private boolean dirtySizeInBytes = true;
+ private long sizeInBytes = 0;
+
+ protected ClusterImpl() {
+ clusterTable = null;
+ }
+
+ public ClusterImpl(IClusterTable clusterTable, ClusterUID clusterUID, int clusterKey, ClusterSupport support) {
+ super(support, clusterUID, clusterKey);
+// SessionImplSocket session = (SessionImplSocket)support.getSession();
+// if(session != null)
+ this.clusterTable = clusterTable;
+// else
+ }
+
+ public static ClusterImpl dummy() {
+ return new ClusterSmall();
+ }
+
+ public static ClusterImpl make(IClusterTable clusterTable, ClusterUID clusterUID, int clusterKey, ClusterSupport2 support) {
+ return new ClusterSmall(clusterUID, clusterKey, support, clusterTable);
+ }
+ public static ClusterSmall proxy(IClusterTable clusterTable, ClusterUID clusterUID, int clusterKey, long clusterId, ClusterSupport2 support) {
+ if (DEBUG)
+ new Exception("Cluster proxy for " + clusterUID).printStackTrace();
+ return new ClusterSmall(null, clusterUID, clusterKey, support);
+ }
+ public static ClusterImpl make(IClusterTable clusterTable, long[] longs, int[] ints, byte[] bytes, ClusterSupport2 support, int clusterKey)
+ throws DatabaseException {
+ if (longs[0] == 0)
+ return new ClusterBig(clusterTable, longs, ints, bytes, support, clusterKey);
+ else
+ return new ClusterSmall(clusterTable, longs, ints, bytes, support, clusterKey);
+ }
+
+// public boolean virtual = false;
+
+ @Override
+ public boolean hasVirtual() {
+ return false;
+// return clusterTable.hasVirtual(clusterKey);
+ }
+
+ @Override
+ public void markVirtual() {
+// clusterTable.markVirtual(clusterKey);
+// virtual = true;
+ }
+
+ @Override
+ public boolean isWriteOnly() {
+ return false;
+ }
+ @Override
+ public boolean isLoaded() {
+ return true;
+ }
+
+ @Override
+ public void resized() {
+ dirtySizeInBytes = true;
+// if(clusterTable != null)
+// clusterTable.setDirtySizeInBytes(true);
+ }
+
+ public long getCachedSize() {
+ if(dirtySizeInBytes) {
+ try {
+ sizeInBytes = getUsedSpace();
+ //System.err.println("recomputed size of cluster " + getClusterId() + " => " + sizeInBytes);
+ } catch (DatabaseException e) {
+ Logger.defaultLogError(e);
+ }
+ dirtySizeInBytes = false;
+ }
+ return sizeInBytes;
+ }
+
+ protected void calculateModifiedId() {
+// setModifiedId(new IdImpl(UUID.randomUUID()));
+ }
+
+ public static class ClusterTables {
+ public byte[] bytes;
+ public int[] ints;
+ public long[] longs;
+ }
+
+ public byte[] storeBytes() throws IOException {
+ throw new UnsupportedOperationException();
+ }
+
+ public ClusterTables store() throws IOException {
+ throw new UnsupportedOperationException();
+ }
+
+ abstract protected int getResourceTableCount();
+
+ public String dump(final ClusterSupport support) {
+
+ StringBuilder sb = new StringBuilder();
+ for(int i=1;i<getResourceTableCount();i++) {
+ sb.append(""+i+"\n");
+ final int resourceKey = i;
+ final ArrayList<String> stms = new ArrayList<String>();
+ try {
+
+ byte[] value = getValue(i, support);
+ if(value != null)
+ sb.append(" bytes: " + Arrays.toString(value) + "\n");
+
+ forPredicates(i, new PredicateProcedure<Integer>() {
+
+ @Override
+ public boolean execute(Integer c, final int predicateKey, int objectIndex) {
+
+ try {
+
+ forObjects(resourceKey, predicateKey, objectIndex, new ObjectProcedure<Integer>() {
+
+ @Override
+ public boolean execute(Integer context, int objectKey) throws DatabaseException {
+
+ ClusterUID puid = support.getClusterByResourceKey(predicateKey).getClusterUID();
+ ClusterUID ouid = support.getClusterByResourceKey(objectKey).getClusterUID();
+
+ stms.add(" " + puid + " " + (predicateKey&0xFFF) + " " + ouid + " " + (objectKey&0xFFF));
+
+ return false;
+
+ }
+
+ }, 0, support);
+ } catch (DatabaseException e) {
+ e.printStackTrace();
+ }
+
+ return false;
+
+ }
+
+ },0,support);
+
+ Collections.sort(stms, AlphanumComparator.COMPARATOR);
+
+ for(String s : stms) {
+ sb.append(s);
+ sb.append("\n");
+ }
+
+ } catch (DatabaseException e) {
+ e.printStackTrace();
+ }
+ }
+
+ return sb.toString();
+
+ }
+
+ abstract public boolean isValueEx(int resourceIndex) throws DatabaseException;
+
+ abstract public ClusterI addRelation(int resourceKey, ClusterUID puid, int predicateKey, ClusterUID ouid, int objectKey, ClusterSupport support) throws DatabaseException;
+
+ @Override
+ public IClusterTable getClusterTable() {
+ return clusterTable;
+ }
+}
--- /dev/null
+/*******************************************************************************
+ * Copyright (c) 2007, 2010 Association for Decentralized Information Management
+ * in Industry THTH ry.
+ * All rights reserved. This program and the accompanying materials
+ * are made available under the terms of the Eclipse Public License v1.0
+ * which accompanies this distribution, and is available at
+ * http://www.eclipse.org/legal/epl-v10.html
+ *
+ * Contributors:
+ * VTT Technical Research Centre of Finland - initial API and implementation
+ *******************************************************************************/
+package org.simantics.acorn.cluster;
+
+import java.io.ByteArrayInputStream;
+import java.io.IOException;
+import java.io.InputStream;
+import java.util.Arrays;
+
+import org.simantics.acorn.internal.ClusterChange;
+import org.simantics.acorn.internal.ClusterStream;
+import org.simantics.acorn.internal.ClusterSupport2;
+import org.simantics.acorn.internal.DebugPolicy;
+import org.simantics.db.Resource;
+import org.simantics.db.exception.DatabaseException;
+import org.simantics.db.exception.ExternalValueException;
+import org.simantics.db.exception.ValidationException;
+import org.simantics.db.impl.ClusterBase;
+import org.simantics.db.impl.ClusterI;
+import org.simantics.db.impl.ClusterSupport;
+import org.simantics.db.impl.ClusterTraitsBase;
+import org.simantics.db.impl.ForEachObjectContextProcedure;
+import org.simantics.db.impl.ForEachObjectProcedure;
+import org.simantics.db.impl.ForPossibleRelatedValueContextProcedure;
+import org.simantics.db.impl.ForPossibleRelatedValueProcedure;
+import org.simantics.db.impl.IClusterTable;
+import org.simantics.db.impl.Table;
+import org.simantics.db.impl.TableHeader;
+import org.simantics.db.impl.graph.ReadGraphImpl;
+import org.simantics.db.procedure.AsyncContextMultiProcedure;
+import org.simantics.db.procedure.AsyncMultiProcedure;
+import org.simantics.db.procore.cluster.ClusterMapSmall;
+import org.simantics.db.procore.cluster.ClusterTraits;
+import org.simantics.db.procore.cluster.ClusterTraitsSmall;
+import org.simantics.db.procore.cluster.CompleteTableSmall;
+import org.simantics.db.procore.cluster.ForeignTableSmall;
+import org.simantics.db.procore.cluster.ObjectTable;
+import org.simantics.db.procore.cluster.OutOfSpaceException;
+import org.simantics.db.procore.cluster.PredicateTable;
+import org.simantics.db.procore.cluster.ResourceTableSmall;
+import org.simantics.db.procore.cluster.ValueTableSmall;
+import org.simantics.db.service.Bytes;
+import org.simantics.db.service.ClusterUID;
+import org.simantics.db.service.ResourceUID;
+import org.simantics.utils.datastructures.Callback;
+
+import gnu.trove.map.hash.TIntShortHashMap;
+import gnu.trove.procedure.TIntProcedure;
+import gnu.trove.set.hash.TIntHashSet;
+
+final public class ClusterSmall extends ClusterImpl {
+ private static final int TABLE_HEADER_SIZE = TableHeader.HEADER_SIZE + TableHeader.EXTRA_SIZE;
+ private static final int RESOURCE_TABLE_OFFSET = 0;
+ private static final int PREDICATE_TABLE_OFFSET = RESOURCE_TABLE_OFFSET + TABLE_HEADER_SIZE;
+ private static final int OBJECT_TABLE_OFFSET = PREDICATE_TABLE_OFFSET + TABLE_HEADER_SIZE;
+ private static final int VALUE_TABLE_OFFSET = OBJECT_TABLE_OFFSET + TABLE_HEADER_SIZE;
+ private static final int FLAT_TABLE_OFFSET = VALUE_TABLE_OFFSET + TABLE_HEADER_SIZE;
+ private static final int COMPLETE_TABLE_OFFSET = FLAT_TABLE_OFFSET + TABLE_HEADER_SIZE;
+ private static final int FOREIGN_TABLE_OFFSET = COMPLETE_TABLE_OFFSET + TABLE_HEADER_SIZE;
+ private static final int INT_HEADER_SIZE = FOREIGN_TABLE_OFFSET + TABLE_HEADER_SIZE;
+ private final int clusterBits;
+ private final ResourceTableSmall resourceTable;
+ private final PredicateTable predicateTable;
+ private final ObjectTable objectTable;
+ private final ValueTableSmall valueTable;
+ private final ForeignTableSmall foreignTable;
+ private final CompleteTableSmall completeTable;
+ private final ClusterMapSmall clusterMap;
+ private final int[] headerTable;
+ public final ClusterSupport2 clusterSupport;
+ private boolean proxy;
+ private boolean deleted = false;
+
+ protected ClusterSmall() {
+ this.proxy = true;
+ this.headerTable = null;
+ this.resourceTable = null;
+ this.foreignTable = null;
+ this.predicateTable = null;
+ this.objectTable = null;
+ this.valueTable = null;
+ this.completeTable = null;
+ this.clusterMap = null;
+ this.clusterSupport = null;
+ this.clusterBits = 0;
+ this.importance = 0;
+ }
+
+ public ClusterSmall(IClusterTable clusterTable, ClusterUID clusterUID, int clusterKey, ClusterSupport2 support) {
+ super(clusterTable, clusterUID, clusterKey, support);
+ if(DebugPolicy.REPORT_CLUSTER_EVENTS)
+ new Exception(clusterUID.toString()).printStackTrace();
+ this.proxy = true;
+ this.headerTable = null;
+ this.resourceTable = null;
+ this.foreignTable = null;
+ this.predicateTable = null;
+ this.objectTable = null;
+ this.valueTable = null;
+ this.completeTable = null;
+ this.clusterMap = null;
+ this.clusterSupport = support;
+ this.clusterBits = 0;
+ this.importance = 0;
+// new Exception("ClusterSmall " + clusterKey).printStackTrace();
+ }
+ ClusterSmall(ClusterUID clusterUID, int clusterKey, ClusterSupport2 support, IClusterTable clusterTable) {
+ super(clusterTable, clusterUID, clusterKey, support);
+ if(DebugPolicy.REPORT_CLUSTER_EVENTS)
+ new Exception(clusterUID.toString()).printStackTrace();
+ this.proxy = false;
+ this.clusterSupport = support;
+ this.headerTable = new int[INT_HEADER_SIZE];
+ this.resourceTable = new ResourceTableSmall(this, headerTable, RESOURCE_TABLE_OFFSET);
+ this.foreignTable = new ForeignTableSmall(this, headerTable, FOREIGN_TABLE_OFFSET);
+ this.predicateTable = new PredicateTable(this, headerTable, PREDICATE_TABLE_OFFSET);
+ this.objectTable = new ObjectTable(this, headerTable, OBJECT_TABLE_OFFSET);
+ this.valueTable = new ValueTableSmall(this, headerTable, VALUE_TABLE_OFFSET);
+ this.completeTable = new CompleteTableSmall(this, headerTable, COMPLETE_TABLE_OFFSET);
+ this.clusterMap = new ClusterMapSmall(this, foreignTable);
+ this.clusterBits = ClusterTraitsBase.getClusterBits(clusterKey);
+// if(clusterTable != null)
+// this.importance = -clusterTable.timeCounter();
+// else
+ this.importance = 0;
+// new Exception("ClusterSmall " + clusterKey).printStackTrace();
+ }
+ protected ClusterSmall(IClusterTable clusterTable, long[] longs, int[] ints, byte[] bytes, ClusterSupport2 support, int clusterKey)
+ throws DatabaseException {
+ super(clusterTable, checkValidity(-1, longs, ints, bytes), clusterKey, support);
+ this.proxy = false;
+ this.clusterSupport = support;
+ if (ints.length < INT_HEADER_SIZE)
+ throw new IllegalArgumentException("Too small integer table for cluster.");
+ this.headerTable = ints;
+ if(DebugPolicy.REPORT_CLUSTER_EVENTS) new Exception(Long.toString(clusterId)).printStackTrace();
+ this.resourceTable = new ResourceTableSmall(this, ints, RESOURCE_TABLE_OFFSET, longs);
+ this.foreignTable = new ForeignTableSmall(this, headerTable, FOREIGN_TABLE_OFFSET, longs);
+ this.predicateTable = new PredicateTable(this, ints, PREDICATE_TABLE_OFFSET, ints);
+ this.objectTable = new ObjectTable(this, ints, OBJECT_TABLE_OFFSET, ints);
+ this.valueTable = new ValueTableSmall(this, ints, VALUE_TABLE_OFFSET, bytes);
+ this.completeTable = new CompleteTableSmall(this, headerTable, COMPLETE_TABLE_OFFSET, ints);
+ this.clusterMap = new ClusterMapSmall(this, foreignTable);
+ this.clusterBits = ClusterTraitsBase.getClusterBits(clusterKey);
+// if(clusterTable != null) {
+// this.importance = clusterTable.timeCounter();
+// clusterTable.markImmutable(this, getImmutable());
+// }
+// new Exception("ClusterSmall " + clusterKey).printStackTrace();
+ }
+ void analyse() {
+ System.out.println("Cluster " + clusterId);
+ System.out.println("-size:" + getUsedSpace());
+ System.out.println(" -rt:" + (resourceTable.getTableCapacity() * 8 + 8));
+ System.out.println(" -ft:" + foreignTable.getTableCapacity() * 8);
+ System.out.println(" -pt:" + predicateTable.getTableCapacity() * 4);
+ System.out.println(" -ot:" + objectTable.getTableCapacity() * 4);
+ System.out.println(" -ct:" + completeTable.getTableCapacity() * 4);
+ System.out.println(" -vt:" + valueTable.getTableCapacity());
+
+ System.out.println("-resourceTable:");
+ System.out.println(" -resourceCount=" + resourceTable.getResourceCount());
+ System.out.println(" -size=" + resourceTable.getTableSize());
+ System.out.println(" -capacity=" + resourceTable.getTableCapacity());
+ System.out.println(" -count=" + resourceTable.getTableCount());
+ System.out.println(" -size=" + resourceTable.getTableSize());
+ //resourceTable.analyse();
+ }
+ public void checkDirectReference(int dr)
+ throws DatabaseException {
+ if (!ClusterTraits.statementIndexIsDirect(dr))
+ throw new ValidationException("Reference is not direct. Reference=" + dr);
+ if (ClusterTraits.isFlat(dr))
+ throw new ValidationException("Reference is flat. Reference=" + dr);
+ if (ClusterTraits.isLocal(dr)) {
+ if (dr < 1 || dr > resourceTable.getUsedSize())
+ throw new ValidationException("Illegal local reference. Reference=" + dr);
+ } else {
+ int fi = ClusterTraits.getForeignIndexFromReference(dr);
+ int ri = ClusterTraits.getResourceIndexFromForeignReference(dr);
+ if (fi < 1 || fi > foreignTable.getUsedSize())
+ throw new ValidationException("Illegal foreign reference. Reference=" + dr + " foreign index=" + fi);
+ if (ri < 1 || ri > ClusterTraits.getMaxNumberOfResources())
+ throw new ValidationException("Illegal foreign reference. Reference=" + dr + " resource index=" + ri);
+ }
+ }
+ public void checkPredicateIndex(int pi)
+ throws DatabaseException {
+ // predicateTable.checkPredicateSetIndex(this, pi);
+ }
+ public void checkObjectSetReference(int or)
+ throws DatabaseException {
+ if (ClusterTraits.statementIndexIsDirect(or))
+ throw new ValidationException("Illegal object set reference. Reference=" + or);
+ int oi = ClusterTraits.statementIndexGet(or);
+ this.objectTable.checkObjectSetIndex(this, oi);
+ }
+
+ public void checkValueInit()
+ throws DatabaseException {
+ valueTable.checkValueInit();
+ }
+ public void checkValue(int capacity, int index)
+ throws DatabaseException {
+ valueTable.checkValue(capacity, index);
+ }
+ public void checkValueFini()
+ throws DatabaseException {
+ valueTable.checkValueFini();
+ }
+ public void checkForeingIndex(int fi)
+ throws DatabaseException {
+ if (fi<1 || fi > foreignTable.getUsedSize())
+ throw new ValidationException("Illegal foreign index=" + fi);
+ }
+ public void checkCompleteSetReference(int cr)
+ throws DatabaseException {
+ if (!ClusterTraits.completeReferenceIsMultiple(cr))
+ throw new ValidationException("Illegal complete set reference. Reference=" + cr);
+ int ci = cr;
+ this.completeTable.checkCompleteSetIndex(this, ci);
+ }
+ public void check()
+ throws DatabaseException {
+// this.completeTable.check(this);
+// this.objectTable.check(this);
+// // Must be after object table check.
+// this.predicateTable.check(this);
+// this.resourceTable.check(this);
+ }
+ @Override
+ public CompleteTypeEnum getCompleteType(int resourceKey, ClusterSupport support)
+ throws DatabaseException {
+ final int resourceRef = getLocalReference(resourceKey);
+ CompleteTypeEnum ct = resourceTable.getCompleteType(resourceRef);
+ if (DEBUG)
+ System.out.println("ClusterSmall.getCompleteType rk=" + resourceKey + " ct=" + ct);
+ return ct;
+ }
+
+ @Override
+ public int getCompleteObjectKey(int resourceKey, ClusterSupport support)
+ throws DatabaseException {
+ final int resourceIndexOld = getLocalReference(resourceKey);
+ short completeRef = resourceTable.getCompleteObjectRef(resourceIndexOld);
+ int clusterIndex;
+ int resourceIndex;
+ if (0 == completeRef)
+ throw new DatabaseException("Resource's complete object refernce is null. Resource key=" + resourceKey + ".");
+ ClusterI.CompleteTypeEnum completeType = resourceTable.getCompleteType(resourceIndexOld);
+ if (completeType == ClusterI.CompleteTypeEnum.NotComplete)
+ throw new DatabaseException("Resource has multiple complete objects. Resource key=" + resourceKey + ".");
+ if (ClusterTraitsSmall.resourceRefIsLocal(completeRef)) {
+ clusterIndex = clusterKey;
+ resourceIndex = completeRef;
+ } else { // Resource has one complete statement.
+ ResourceUID resourceUID = clusterMap.getForeignResourceUID(completeRef);
+ ClusterUID uid = resourceUID.asCID();
+ clusterIndex = clusterSupport.getClusterKeyByUID(0, uid.second);
+ //ClusterI c = clusterTable.getClusterByClusterUIDOrMakeProxy(uid);
+ //clusterIndex = c.getClusterKey();
+ //assert(clusterIndex == clusterTable.getClusterByClusterUIDOrMakeProxy(uid).getClusterKey());
+ resourceIndex = resourceUID.getIndex();
+ }
+ int key = ClusterTraits.createResourceKey(clusterIndex, resourceIndex);
+ if (DEBUG)
+ System.out.println("ClusterSmall.complete object rk=" + resourceKey + " ck=" + key);
+ return key;
+ }
+
+ @Override
+ public boolean isComplete(int resourceKey, ClusterSupport support)
+ throws DatabaseException {
+ final int resourceRef = getLocalReference(resourceKey);
+ final ClusterI.CompleteTypeEnum completeType = resourceTable.getCompleteType(resourceRef);
+ boolean complete = completeType != ClusterI.CompleteTypeEnum.NotComplete;
+ if (DEBUG)
+ System.out.println("ClusterSmall.key=" + resourceKey + " isComplete=" + complete);
+ return complete;
+ }
+ public int getSingleObject(int resourceKey, int predicateKey, int objectIndex, ClusterSupport support) throws DatabaseException {
+ if (DEBUG)
+ System.out.println("ClusterSmall.getSingleObject: rk=" + resourceKey + " pk=" + predicateKey);
+ if (0 == objectIndex) {
+ final int resourceIndex = ClusterTraitsBase.getResourceIndexFromResourceKeyNoThrow(resourceKey);
+ final short pRef = getInternalReferenceOrZero2(predicateKey, support);
+ final ClusterI.CompleteTypeEnum pCompleteType = ClusterTraitsBase.getCompleteTypeFromResourceKey(predicateKey);
+ return resourceTable.getSingleObject(resourceIndex, support, pRef, pCompleteType, completeTable, this);
+ }
+ return objectTable.getSingleObject(objectIndex, support, this);
+ }
+
+ public void forObjects(ReadGraphImpl graph, int resourceKey, int predicateKey, int objectIndex, AsyncMultiProcedure<Resource> procedure,
+ ClusterSupport support) throws DatabaseException {
+ if (DEBUG)
+ System.out.println("ClusterSmall.forObjects1: rk=" + resourceKey + " pk=" + predicateKey);
+ if (0 == objectIndex) {
+ final int resourceIndex = ClusterTraitsBase.getResourceIndexFromResourceKey(resourceKey);
+ final int pRef = getInternalReferenceOrZero2(predicateKey, support);
+ final ClusterI.CompleteTypeEnum pCompleteType = ClusterTraitsBase.getCompleteTypeFromResourceKey(predicateKey);
+ resourceTable.foreachObject(resourceIndex, graph, procedure, support, pRef, pCompleteType, completeTable, this);
+ return;
+ }
+ objectTable.foreachObject(graph, objectIndex, procedure, this);
+ }
+
+ public <C> void forObjects(ReadGraphImpl graph, int resourceKey, int predicateKey, int objectIndex, C context, AsyncContextMultiProcedure<C, Resource> procedure,
+ ClusterSupport support) throws DatabaseException {
+ if (DEBUG)
+ System.out.println("ClusterSmall.forObjects1: rk=" + resourceKey + " pk=" + predicateKey);
+ if (0 == objectIndex) {
+ final int resourceIndex = ClusterTraitsBase.getResourceIndexFromResourceKey(resourceKey);
+ final int pRef = getInternalReferenceOrZero2(predicateKey, support);
+ final ClusterI.CompleteTypeEnum pCompleteType = ClusterTraitsBase.getCompleteTypeFromResourceKey(predicateKey);
+ resourceTable.foreachObject(resourceIndex, graph, context, procedure, support, pRef, pCompleteType, completeTable, this);
+ return;
+ }
+ objectTable.foreachObject(graph, objectIndex, context, procedure, this);
+ }
+
+ @Override
+ public <Context> boolean forObjects(int resourceKey, int predicateKey, int objectIndex, ObjectProcedure<Context> procedure,
+ Context context, ClusterSupport support) throws DatabaseException {
+ if (DEBUG)
+ System.out.println("ClusterSmall.forObjects2: rk=" + resourceKey + " pk=" + predicateKey);
+ if (0 == objectIndex) {
+ final int resourceIndex = ClusterTraitsBase.getResourceIndexFromResourceKey(resourceKey);
+ final short pRef = getInternalReferenceOrZero2(predicateKey, support);
+ final ClusterI.CompleteTypeEnum pCompleteType = ClusterTraitsBase.getCompleteTypeFromResourceKey(predicateKey);
+ return resourceTable.foreachObject(resourceIndex, procedure, context, support, this, pRef, pCompleteType, completeTable);
+ }
+ return objectTable.foreachObject(objectIndex, procedure, context, support, this);
+ }
+
+ @Override
+ public int getSingleObject(int resourceKey, int predicateKey, ClusterSupport support) throws DatabaseException {
+ if (DEBUG)
+ System.out.println("ClusterSmall.getSingleObject2: rk=" + resourceKey + " pk=" + predicateKey);
+ final int resourceIndex = ClusterTraitsBase.getResourceIndexFromResourceKey(resourceKey);
+ final short pRef = getInternalReferenceOrZero2(predicateKey, support);
+ final int completeType = ClusterTraitsBase.getCompleteTypeIntFromResourceKey(predicateKey);
+ final ClusterI.CompleteTypeEnum pCompleteType = CompleteTypeEnum.make(completeType);
+ if (completeType > 0)
+ return resourceTable.getSingleObject(resourceIndex, support, pRef, pCompleteType, completeTable, this);
+ final int predicateIndex = (int)resourceTable.table[(resourceIndex<<1) - 1 + resourceTable.offset] & 0xFFFFFF;
+ if (0 == predicateIndex) // All relevant data is in resource table.
+ return resourceTable.getSingleObject(resourceIndex, support, pRef, pCompleteType, completeTable, this);
+ int objectIndex = predicateTable.getObjectIndex(predicateIndex, pRef & 0xFFFF);
+ return getSingleObject(resourceKey, predicateKey, objectIndex, support);
+ }
+
+ @Override
+ public <T> int getSingleObject(int resourceKey, ForPossibleRelatedValueProcedure<T> procedure, ClusterSupport support) throws DatabaseException {
+ final short resourceIndex = (short)ClusterTraitsBase.getResourceIndexFromResourceKey(resourceKey);
+ final int predicateKey = procedure.predicateKey;
+ int clusterKey = ClusterTraitsBase.getClusterMaskFromResourceKey(resourceKey);
+ short pRef = 0;
+ if(procedure.clusterKey[0] == clusterKey) {
+ pRef = (short)procedure.predicateReference[0];
+ } else {
+ pRef = getInternalReferenceOrZero2(predicateKey, support);
+ procedure.clusterKey[0] = clusterKey;
+ procedure.predicateReference[0] = pRef;
+ }
+
+ final ClusterI.CompleteTypeEnum pCompleteType = procedure.completeType;
+ if (CompleteTypeEnum.NotComplete != pCompleteType)
+ return resourceTable.getSingleObject(resourceIndex, support, pRef, pCompleteType, completeTable, this);
+ final int predicateIndex = (int)resourceTable.table[(resourceIndex<<1) - 1 + resourceTable.offset] & 0xFFFFFF;
+ if (0 == predicateIndex) // All relevant data is in resource table.
+ return resourceTable.getSingleObject(resourceIndex, support, pRef, pCompleteType, completeTable, this);
+ int objectIndex = predicateTable.getObjectIndex(predicateIndex, pRef & 0xFFFF);
+ return getSingleObject(resourceKey, predicateKey, objectIndex, support);
+ }
+
+ @Override
+ public <C, T> int getSingleObject(int resourceKey, ForPossibleRelatedValueContextProcedure<C, T> procedure, ClusterSupport support) throws DatabaseException {
+ final short resourceIndex = (short)ClusterTraitsBase.getResourceIndexFromResourceKey(resourceKey);
+ final int predicateKey = procedure.predicateKey;
+ int clusterKey = ClusterTraitsBase.getClusterMaskFromResourceKey(resourceKey);
+ short pRef = 0;
+ if(procedure.clusterKey[0] == clusterKey) {
+ pRef = (short)procedure.predicateReference[0];
+ } else {
+ pRef = getInternalReferenceOrZero2(predicateKey, support);
+ procedure.clusterKey[0] = clusterKey;
+ procedure.predicateReference[0] = pRef;
+ }
+ final ClusterI.CompleteTypeEnum pCompleteType = procedure.completeType;
+ if (CompleteTypeEnum.NotComplete != pCompleteType)
+ return resourceTable.getSingleObject(resourceIndex, support, pRef, pCompleteType, completeTable, this);
+ final int predicateIndex = (int)resourceTable.table[(resourceIndex<<1) - 1 + resourceTable.offset] & 0xFFFFFF;
+ if (0 == predicateIndex) // All relevant data is in resource table.
+ return resourceTable.getSingleObject(resourceIndex, support, pRef, pCompleteType, completeTable, this);
+ int objectIndex = predicateTable.getObjectIndex(predicateIndex, pRef & 0xFFFF);
+ return getSingleObject(resourceKey, predicateKey, objectIndex, support);
+ }
+
+ @Override
+ public void forObjects(ReadGraphImpl graph, int resourceKey,
+ int predicateKey, AsyncMultiProcedure<Resource> procedure) throws DatabaseException {
+
+ throw new UnsupportedOperationException();
+
+// SessionImplSocket session = (SessionImplSocket)graph.getSession();
+// ClusterSupport support = session.clusterTranslator;
+// if (DEBUG)
+// System.out.println("ClusterSmall.forObjects3: rk=" + resourceKey + " pk=" + predicateKey);
+// final int resourceIndex = ClusterTraitsBase.getResourceIndexFromResourceKey(resourceKey);
+// final int pRef = getInternalReferenceOrZero2(predicateKey, support);
+// final int completeType = ClusterTraitsBase.getCompleteTypeIntFromResourceKey(predicateKey);
+// final ClusterI.CompleteTypeEnum pCompleteType = CompleteTypeEnum.make(completeType);
+// if (completeType > 0) {
+// resourceTable.foreachObject(resourceIndex, graph, procedure, support, pRef, pCompleteType, completeTable, this);
+// return;
+// }
+// final int predicateIndex = (int)resourceTable.table[(resourceIndex<<1) - 1 + resourceTable.offset] & 0xFFFFFF;
+// if (0 == predicateIndex) {
+// resourceTable.foreachObject(resourceIndex, graph, procedure, support, pRef, pCompleteType, completeTable, this);
+// return;
+// }
+// int objectIndex = predicateTable.getObjectIndex(predicateIndex, pRef & 0xFFFF);
+// forObjects(graph, resourceKey, predicateKey, objectIndex, procedure, support);
+ }
+
+ public void forObjects(ReadGraphImpl graph, int resourceKey, ForEachObjectProcedure procedure) throws DatabaseException {
+
+ throw new UnsupportedOperationException();
+
+// final int resourceIndex = ClusterTraitsBase.getResourceIndexFromResourceKey(resourceKey);
+// final int predicateKey = procedure.predicateKey;
+// int clusterKey = ClusterTraitsBase.getClusterMaskFromResourceKey(resourceKey);
+// int pRef = 0;
+// if(procedure.clusterKey[0] == clusterKey) {
+// pRef = procedure.predicateReference[0];
+// } else {
+// SessionImplSocket session = (SessionImplSocket)graph.getSession();
+// ClusterSupport support = session.clusterTranslator;
+// pRef = getInternalReferenceOrZero2(predicateKey, support);
+// procedure.clusterKey[0] = clusterKey;
+// procedure.predicateReference[0] = pRef;
+// }
+// final ClusterI.CompleteTypeEnum pCompleteType = procedure.completeType;
+// if (ClusterI.CompleteTypeEnum.NotComplete != pCompleteType) {
+// SessionImplSocket session = (SessionImplSocket)graph.getSession();
+// ClusterSupport support = session.clusterTranslator;
+// resourceTable.foreachObject(resourceIndex, graph, procedure, support, pRef, pCompleteType, completeTable, this);
+// return;
+// }
+// final int predicateIndex = (int)resourceTable.table[(resourceIndex<<1) - 1 + resourceTable.offset] & 0xFFFFFF;
+// if (0 == predicateIndex) {
+// SessionImplSocket session = (SessionImplSocket)graph.getSession();
+// ClusterSupport support = session.clusterTranslator;
+// resourceTable.foreachObject(resourceIndex, graph, procedure, support, pRef, pCompleteType, completeTable, this);
+// return;
+// }
+// int hashBase = predicateIndex + predicateTable.offset;
+// if (predicateTable.table[hashBase-1] < 0) {
+// int objectIndex = TableIntArraySet2.get(predicateTable.table, hashBase, pRef & 0xFFFF);
+// //int objectIndex = predicateTable.getObjectIndex(predicateIndex, pRef & 0xFFFF);
+// SessionImplSocket session = (SessionImplSocket)graph.getSession();
+// ClusterSupport support = session.clusterTranslator;
+// forObjects(graph, resourceKey, predicateKey, objectIndex, procedure, support);
+// } else {
+// procedure.finished(graph);
+//// graph.dec();
+// }
+ }
+
+ public <C> void forObjects(ReadGraphImpl graph, int resourceKey, C context, ForEachObjectContextProcedure<C> procedure) throws DatabaseException {
+
+ throw new UnsupportedOperationException();
+
+// final int resourceIndex = ClusterTraitsBase.getResourceIndexFromResourceKey(resourceKey);
+// final int predicateKey = procedure.predicateKey;
+// int clusterKey = ClusterTraitsBase.getClusterMaskFromResourceKey(resourceKey);
+// int pRef = 0;
+// if(procedure.clusterKey[0] == clusterKey) {
+// pRef = procedure.predicateReference[0];
+// } else {
+// SessionImplSocket session = (SessionImplSocket)graph.getSession();
+// ClusterSupport support = session.clusterTranslator;
+// pRef = getInternalReferenceOrZero2(predicateKey, support);
+// procedure.clusterKey[0] = clusterKey;
+// procedure.predicateReference[0] = pRef;
+// }
+//
+// final ClusterI.CompleteTypeEnum pCompleteType = procedure.completeType;
+// if (ClusterI.CompleteTypeEnum.NotComplete != pCompleteType) {
+// SessionImplSocket session = (SessionImplSocket)graph.getSession();
+// ClusterSupport support = session.clusterTranslator;
+// resourceTable.foreachObject(resourceIndex, graph, context, procedure, support, pRef, pCompleteType, completeTable, this);
+// return;
+// }
+// final int predicateIndex = (int)resourceTable.table[(resourceIndex<<1) - 1 + resourceTable.offset] & 0xFFFFFF;
+// if (0 == predicateIndex) {
+// SessionImplSocket session = (SessionImplSocket)graph.getSession();
+// ClusterSupport support = session.clusterTranslator;
+// resourceTable.foreachObject(resourceIndex, graph, context, procedure, support, pRef, pCompleteType, completeTable, this);
+// return;
+// }
+// int hashBase = predicateIndex + predicateTable.offset;
+// if(predicateTable.table[hashBase-1] < 0) {
+// int objectIndex = TableIntArraySet2.get(predicateTable.table, hashBase, pRef & 0xFFFF);
+// SessionImplSocket session = (SessionImplSocket)graph.getSession();
+// ClusterSupport support = session.clusterTranslator;
+// forObjects(graph, resourceKey, predicateKey, objectIndex, context, procedure, support);
+// } else {
+// int objectIndex = TableIntSet2.get(predicateTable.table, hashBase, pRef & 0xFFFF);
+// SessionImplSocket session = (SessionImplSocket)graph.getSession();
+// ClusterSupport support = session.clusterTranslator;
+// forObjects(graph, resourceKey, predicateKey, objectIndex, context, procedure, support);
+// }
+ }
+ @Override
+ public <Context> boolean forObjects(int resourceKey, int predicateKey,
+ ObjectProcedure<Context> procedure, Context context, ClusterSupport support)
+ throws DatabaseException {
+ if (DEBUG)
+ System.out.println("ClusterSmall.forObjects4: rk=" + resourceKey + " pk=" + predicateKey);
+ final short resourceIndex = (short)ClusterTraitsBase.getResourceIndexFromResourceKey(resourceKey);
+ final short pRef = getInternalReferenceOrZero2(predicateKey, support);
+ final ClusterI.CompleteTypeEnum pCompleteType = ClusterTraitsBase.getCompleteTypeFromResourceKey(predicateKey);
+ // PredicateType is complete i.e. all relevant data is in resource table.
+ if (ClusterI.CompleteTypeEnum.NotComplete != pCompleteType) {
+ if (DEBUG)
+ System.out.println("ClusterSmall.forObjects: complete type was " + pCompleteType + " cluster=" + getClusterUID());
+ return resourceTable.foreachObject(resourceIndex, procedure, context, support, this, pRef, pCompleteType, completeTable);
+ }
+ final int predicateIndex = resourceTable.getPredicateIndex(resourceIndex);
+ if (0 == predicateIndex) { // All relevant data is in resource table.
+ if (DEBUG)
+ System.out.println("ClusterSmall.forObjects: no predicate table " + pCompleteType);
+ return resourceTable.foreachObject(resourceIndex, procedure, context, support, this, pRef, pCompleteType, completeTable);
+ }
+ int objectIndex = predicateTable.getObjectIndex(predicateIndex, pRef & 0xFFFF);
+ return forObjects(resourceKey, predicateKey, objectIndex, procedure, context, support);
+ }
+ @Override
+ public <Context> boolean forPredicates(int resourceKey,
+ PredicateProcedure<Context> procedure, Context context, ClusterSupport support)
+ throws DatabaseException {
+ if (DEBUG)
+ System.out.println("ClusterSmall.forPredicates: rk=" + resourceKey );
+ final int resourceIndex = getLocalReference(resourceKey);
+ final int predicateIndex = resourceTable.getPredicateIndex(resourceIndex);
+ if (0 == predicateIndex)
+ return resourceTable.foreachPredicate(resourceIndex,
+ procedure, context, support, this, completeTable);
+ else {
+ boolean broken = resourceTable.foreachPredicate(resourceIndex,
+ procedure, context, support, this, completeTable);
+ if (broken)
+ return true;
+ }
+ return predicateTable.foreachPredicate(predicateIndex,
+ procedure, context, support, this);
+ }
+
+ @Override
+ public ClusterI addRelation(int sResourceKey, ClusterUID puid, int pResourceKey, ClusterUID ouid, int oResourceKey, ClusterSupport support) throws DatabaseException {
+
+ if(proxy) {
+ throw new UnsupportedOperationException();
+// ClusterImpl cluster = clusterTable.load2(clusterId, clusterKey);
+// return cluster.addRelation(sResourceKey, pResourceKey, oResourceKey, support);
+ }
+
+ // check();
+ boolean ret;
+ try {
+ short sri = getLocalReferenceAnd(sResourceKey, support, ClusterChange.ADD_OPERATION);
+ short pri = getReferenceOrCreateIfForeign(pResourceKey, puid, support, ClusterStream.NULL_OPERATION);
+ short ori = getReferenceOrCreateIfForeign(oResourceKey, ouid, support, ClusterStream.NULL_OPERATION);
+ ClusterI.CompleteTypeEnum completeType = ClusterTraitsBase.getCompleteTypeFromResourceKey(pResourceKey);
+ ret = addRelationInternal(sri, pri, ori, completeType);
+ calculateModifiedId();
+ } catch (OutOfSpaceException e) {
+ boolean streamOff = support.getStreamOff();
+ if (!streamOff) {
+ support.cancelStatement(this);
+ support.setStreamOff(true);
+ }
+ ClusterI cluster = toBig(clusterSupport);
+ if (!streamOff)
+ support.setStreamOff(false);
+ ClusterI cluster2 = cluster.addRelation(sResourceKey, pResourceKey, oResourceKey, support);
+ if (cluster != cluster2)
+ throw new DatabaseException("Internal error. Contact application support.");
+ return cluster;
+ }
+// check();
+ if (ret) {
+ support.addStatement(this);
+ return this;
+ } else {
+ support.cancelStatement(this);
+ return null;
+ }
+
+ }
+
+ @Override
+ public ClusterI addRelation(int sResourceKey, int pResourceKey, int oResourceKey, ClusterSupport support) throws DatabaseException {
+
+ if (DEBUG)
+ System.out.println("add rk=" + sResourceKey + " pk=" + pResourceKey + " ok=" + oResourceKey);
+
+ if(proxy) {
+ throw new UnsupportedOperationException();
+// ClusterImpl cluster = clusterTable.load2(clusterId, clusterKey);
+// return cluster.addRelation(sResourceKey, pResourceKey, oResourceKey, support);
+ }
+
+ // check();
+ boolean ret;
+ try {
+ short sri = getLocalReferenceAnd(sResourceKey, support, ClusterChange.ADD_OPERATION);
+ short pri = getReferenceOrCreateIfForeign(pResourceKey, support, ClusterStream.NULL_OPERATION);
+ short ori = getReferenceOrCreateIfForeign(oResourceKey, support, ClusterStream.NULL_OPERATION);
+ ClusterI.CompleteTypeEnum completeType = ClusterTraitsBase.getCompleteTypeFromResourceKey(pResourceKey);
+ ret = addRelationInternal(sri, pri, ori, completeType);
+ calculateModifiedId();
+ } catch (OutOfSpaceException e) {
+ boolean streamOff = support.getStreamOff();
+ if (!streamOff) {
+ support.cancelStatement(this);
+ support.setStreamOff(true);
+ }
+ ClusterI cluster = toBig(clusterSupport);
+ if (!streamOff)
+ support.setStreamOff(false);
+ ClusterI cluster2 = cluster.addRelation(sResourceKey, pResourceKey, oResourceKey, support);
+ if (cluster != cluster2)
+ throw new DatabaseException("Internal error. Contact application support.");
+ return cluster;
+ }
+// check();
+ if (ret) {
+ support.addStatement(this);
+ return this;
+ } else {
+ support.cancelStatement(this);
+ return null;
+ }
+ }
+ @Override
+ public boolean removeRelation(int sResourceKey, int pResourceKey, int oResourceKey, ClusterSupport support)
+ throws DatabaseException {
+ // check();
+ short sri = getLocalReferenceAnd(sResourceKey, support, ClusterChange.REMOVE_OPERATION);
+ short pri = getInternalReferenceOrZeroAnd(pResourceKey, support, ClusterStream.NULL_OPERATION);
+ short ori = getInternalReferenceOrZeroAnd(oResourceKey, support, ClusterStream.NULL_OPERATION);
+ boolean ret = false;
+ if (0 != pri && 0 != ori) {
+ ClusterI.CompleteTypeEnum completeType = ClusterTraitsBase.getCompleteTypeFromResourceKey(pResourceKey);
+ ret = removeRelationInternal(sri, pri, ori, completeType, support);
+ calculateModifiedId();
+ }
+ if (ret)
+ support.removeStatement(this);
+ else
+ support.cancelStatement(this);
+ // check();
+ return ret;
+ }
+ @Override
+ public void denyRelation(int sResourceKey, int pResourceKey, int oResourceKey, ClusterSupport support)
+ throws DatabaseException {
+ short s = checkResourceKeyIsOursAndGetResourceIndexIf(sResourceKey, support);
+ ResourceReferenceAndCluster p = checkResourceKeyAndGetResourceIndexIf(pResourceKey, support);
+ ResourceReferenceAndCluster o = checkResourceKeyAndGetResourceIndexIf(oResourceKey, support);
+ if (0 == s || 0 == p.reference || 0 == o.reference)
+ return;
+ // check();
+ ClusterI.CompleteTypeEnum completeType = ClusterTraitsBase.getCompleteTypeFromResourceKey(pResourceKey);
+ boolean ret = removeRelationInternal(s, p.reference, o.reference, completeType, support);
+ if (ret) {
+ support.addStatementIndex(this, sResourceKey, getClusterUID(), ClusterChange.REMOVE_OPERATION);
+ support.addStatementIndex(this, pResourceKey, p.clusterUID, ClusterStream.NULL_OPERATION);
+ support.addStatementIndex(this, oResourceKey, o.clusterUID, ClusterStream.NULL_OPERATION);
+ support.removeStatement(this);
+ }
+ calculateModifiedId();
+ // check();
+ return;
+ }
+ @Override
+ public InputStream getValueStream(int resourceKey, ClusterSupport support) throws DatabaseException {
+ if (DEBUG)
+ System.out.println("ClusterSmall.getValue " + resourceKey);
+ int resourceIndex = ClusterTraitsBase.getResourceIndexFromResourceKeyNoThrow(resourceKey);
+ try {
+ byte[] buffer = resourceTable.getValue(valueTable, resourceIndex);
+ if(buffer == null) return null;
+ return new ByteArrayInputStream(buffer);
+ } catch (ExternalValueException e) {
+ return support.getValueStreamEx(resourceIndex, clusterUID.second);
+ }
+ }
+ @Override
+ public byte[] getValue(int resourceKey, ClusterSupport support)
+ throws DatabaseException {
+ if (DEBUG)
+ System.out.println("ClusterSmall.getValue " + resourceKey);
+ int resourceIndex = ClusterTraitsBase.getResourceIndexFromResourceKeyNoThrow(resourceKey);
+ try {
+ return resourceTable.getValue(valueTable, resourceIndex);
+ } catch (ExternalValueException e) {
+ return clusterSupport.impl.getResourceFile(clusterUID.asBytes(), resourceIndex);
+ //return support.getValueEx(resourceIndex, clusterUID.second);
+ }
+ }
+ @Override
+ public boolean hasValue(int resourceKey, ClusterSupport support)
+ throws DatabaseException {
+ int resourceIndex = getLocalReference(resourceKey);
+ return resourceTable.hasValue(resourceIndex);
+ }
+ @Override
+ public boolean removeValue(int resourceKey, ClusterSupport support)
+ throws DatabaseException {
+ int resourceIndex = getLocalReferenceAnd(resourceKey, support, ClusterChange.DELETE_OPERATION);
+ support.removeValue(this);
+ calculateModifiedId();
+ return resourceTable.removeValue(valueTable, resourceIndex);
+ }
+ @Override
+ public ClusterI setValue(int rResourceId, byte[] value, int length, ClusterSupport support)
+ throws DatabaseException {
+ int resourceIndex = getLocalReferenceAnd(rResourceId, support, ClusterStream.SET_OPERATION);
+ support.setValue(this, getClusterId(), value, length);
+ try {
+ resourceTable.setValue(valueTable, resourceIndex, value, length);
+ calculateModifiedId();
+ return this;
+ } catch (OutOfSpaceException e) {
+ boolean streamOff = support.getStreamOff();
+ if (!streamOff)
+ support.setStreamOff(true);
+ ClusterI cluster = toBig(support);
+ cluster.setValue(rResourceId, value, length, support);
+ if (!streamOff)
+ support.setStreamOff(false);
+ return cluster;
+ }
+ }
+ @Override
+ public ClusterI modiValueEx(int rResourceId, long voffset, int length, byte[] value, int offset, ClusterSupport support)
+ throws DatabaseException {
+ int resourceIndex = getLocalReferenceAnd(rResourceId, support, ClusterStream.MODI_OPERATION);
+ support.modiValue(this, getClusterId(), voffset, length, value, offset);
+ resourceTable.setValueEx(valueTable, resourceIndex);
+ calculateModifiedId();
+ return this;
+ }
+ @Override
+ public byte[] readValueEx(int rResourceId, long voffset, int length, ClusterSupport support)
+ throws DatabaseException {
+ int resourceIndex = getLocalReference(rResourceId);
+ boolean isExternal = resourceTable.isValueEx(valueTable, resourceIndex);
+ if (!isExternal)
+ throw new DatabaseException("ClusterI.readValue supported only for external value. Resource key=" + rResourceId);
+ return support.getValueEx(resourceIndex, getClusterId(), voffset, length);
+ }
+ @Override
+ public boolean isValueEx(int resourceKey) throws DatabaseException {
+ int resourceIndex = getLocalReference(resourceKey);
+ return resourceTable.isValueEx(valueTable, resourceIndex);
+ }
+ @Override
+ public long getValueSizeEx(int rResourceId, ClusterSupport support)
+ throws DatabaseException, ExternalValueException {
+ int resourceIndex = getLocalReference(rResourceId);
+ boolean isExternal = resourceTable.isValueEx(valueTable, resourceIndex);
+ if (!isExternal)
+ throw new ExternalValueException("ClusterI.getValueSizeEx supported only for external value. Resource key=" + rResourceId);
+ return support.getValueSizeEx(resourceIndex, getClusterId());
+ }
+ @Override
+ public void setValueEx(int rResourceId)
+ throws DatabaseException {
+ int resourceIndex = getLocalReference(rResourceId);
+ resourceTable.setValueEx(valueTable, resourceIndex);
+ }
+ @Override
+ public int createResource(ClusterSupport support)
+ throws DatabaseException {
+
+ if(proxy) {
+ throw new UnsupportedOperationException();
+// ClusterImpl cluster = clusterTable.load2(clusterId, clusterKey);
+// return cluster.createResource(support);
+ }
+
+ short resourceIndex = resourceTable.createResource();
+ calculateModifiedId();
+ if(DebugPolicy.REPORT_RESOURCE_ID_ALLOCATION)
+ System.out.println("[RID_ALLOCATION]: ClusterSmall[" + clusterId + "] allocates " + resourceIndex);
+ support.createResource(this, resourceIndex, getClusterId());
+ return ClusterTraits.createResourceKey(clusterKey, resourceIndex);
+ }
+ @Override
+ public boolean hasResource(int resourceKey, ClusterSupport support) {
+ int clusterKey = ClusterTraitsBase.getClusterKeyFromResourceKeyNoThrow(resourceKey);
+ if (this.clusterKey != clusterKey) // foreign resource
+ return false;
+ int resourceIndex;
+ try {
+ resourceIndex = ClusterTraits.getResourceIndexFromResourceKey(resourceKey);
+ } catch (DatabaseException e) {
+ return false;
+ }
+ if (resourceIndex > 0 & resourceIndex <= resourceTable.getTableCount())
+ return true;
+ else
+ return false;
+ }
+ @Override
+ public int getNumberOfResources(ClusterSupport support)
+ throws DatabaseException {
+
+ if(proxy) {
+ throw new UnsupportedOperationException();
+// ClusterImpl cluster = clusterTable.load2(clusterId, clusterKey);
+// return cluster.getNumberOfResources(support);
+ }
+
+ return resourceTable.getUsedSize();
+ }
+
+ public int getNumberOfResources() {
+
+ if(proxy) throw new IllegalStateException();
+
+ return resourceTable.getUsedSize();
+
+ }
+
+ @Override
+ public long getUsedSpace() {
+ if(isEmpty()) return 0;
+ long rt = resourceTable.getTableCapacity() * 8 + 8; // (8 = cluster id)
+ long ft = foreignTable.getTableCapacity() * 8;
+ long pt = predicateTable.getTableCapacity() * 4;
+ long ot = objectTable.getTableCapacity() * 4;
+ long ct = completeTable.getTableCapacity() * 4;
+ long vt = valueTable.getTableCapacity() * 1;
+ long cm = clusterMap.getUsedSpace();
+ return rt + ft + pt + ot + ct + vt + cm;
+ }
+ @Override
+ public boolean isEmpty() {
+ if(resourceTable == null) return true;
+ return resourceTable.getTableCount() == 0;
+ }
+ @Override
+ public void printDebugInfo(String message, ClusterSupport support)
+ throws DatabaseException {
+ throw new DatabaseException("Not implemented!");
+ }
+ private short getInternalReferenceOrZero2(int resourceKey, ClusterSupport support) throws DatabaseException {
+ int resourceIndex = ClusterTraitsBase.getResourceIndexFromResourceKeyNoThrow(resourceKey);
+ if (!ClusterTraitsBase.isCluster(clusterBits, resourceKey)) {
+ return clusterMap.getForeignReferenceOrZero(resourceKey);
+ } else {
+ return (short)resourceIndex;
+ }
+ }
+ private short getInternalReferenceOrZeroAnd(int resourceKey, ClusterSupport support, byte op)
+ throws DatabaseException {
+ int clusterKey = ClusterTraits.getClusterKeyFromResourceKey(resourceKey);
+ int resourceIndex = ClusterTraits.getResourceIndexFromResourceKey(resourceKey);
+ if (this.clusterKey != clusterKey) { // foreign resource
+ ClusterUID clusterUID = clusterSupport.getClusterUIDByResourceKey(resourceKey);
+ short foreignRef = clusterMap.getForeignReferenceOrZero(resourceKey);
+ support.addStatementIndex(this, resourceKey, clusterUID, op);
+ return foreignRef;
+ }
+ support.addStatementIndex(this, resourceKey, getClusterUID(), op);
+ return (short)resourceIndex;
+ }
+ private final short getLocalReference(int resourceKey) throws DatabaseException {
+ return ClusterTraits.getResourceIndexFromResourceKeyNoThrow(resourceKey);
+ }
+ private final short getLocalReferenceAnd(int resourceKey, ClusterSupport support, byte op)
+ throws DatabaseException {
+ short resourceIndex = getLocalReference(resourceKey);
+ support.addStatementIndex(this, resourceKey, getClusterUID(), op);
+ return resourceIndex;
+ }
+ private short checkResourceKeyIsOursAndGetResourceIndexIf(int resourceKey, ClusterSupport support)
+ throws DatabaseException {
+ int clusterShortId = ClusterTraits.getClusterKeyFromResourceKey(resourceKey);
+ if (this.clusterKey != clusterShortId)
+ return 0;
+ int resourceIndex = ClusterTraits.getResourceIndexFromResourceKey(resourceKey);
+ return (short)resourceIndex;
+ }
+ private short getReferenceOrCreateIfForeign(int resourceKey, ClusterUID clusterUID, ClusterSupport support, byte op)
+ throws DatabaseException {
+ int clusterKey = ClusterTraits.getClusterKeyFromResourceKey(resourceKey);
+ short resourceIndex = (short)ClusterTraits.getResourceIndexFromResourceKey(resourceKey);
+ if (this.clusterKey != clusterKey) {
+ support.addStatementIndex(this, resourceKey, clusterUID, op);
+ short ref = clusterMap.getForeignReferenceOrCreateByResourceKey(resourceKey, clusterUID);
+ return ref;
+ }
+ support.addStatementIndex(this, resourceKey, getClusterUID(), op);
+ return resourceIndex;
+ }
+ private short getReferenceOrCreateIfForeign(int resourceKey, ClusterSupport support, byte op)
+ throws DatabaseException {
+ int clusterKey = ClusterTraits.getClusterKeyFromResourceKey(resourceKey);
+ short resourceIndex = (short)ClusterTraits.getResourceIndexFromResourceKey(resourceKey);
+ if (this.clusterKey != clusterKey) {
+ ClusterUID clusterUID = clusterSupport.getClusterUIDByResourceKey(resourceKey);
+ support.addStatementIndex(this, resourceKey, clusterUID, op);
+ short ref = clusterMap.getForeignReferenceOrCreateByResourceKey(resourceKey, clusterUID);
+ return ref;
+ }
+ support.addStatementIndex(this, resourceKey, getClusterUID(), op);
+ return resourceIndex;
+ }
+ private class ResourceReferenceAndCluster {
+ ResourceReferenceAndCluster(short reference, ClusterUID clusterUID) {
+ this.reference = reference;
+ this.clusterUID = clusterUID;
+ }
+ public final short reference;
+ public final ClusterUID clusterUID;
+ }
+ private ResourceReferenceAndCluster checkResourceKeyAndGetResourceIndexIf(int resourceKey, ClusterSupport support)
+ throws DatabaseException {
+ int clusterKey = ClusterTraits.getClusterKeyFromResourceKey(resourceKey);
+ short resourceIndex = (short)ClusterTraits.getResourceIndexFromResourceKey(resourceKey);
+ if (this.clusterKey != clusterKey) { // foreign resource
+ ClusterI foreignCluster = support.getClusterByClusterKey(clusterKey);
+ ClusterUID clusterUID = foreignCluster.getClusterUID();
+ short ref = clusterMap.getForeignReferenceOrZero(resourceKey);
+ return new ResourceReferenceAndCluster(ref, clusterUID);
+ }
+ return new ResourceReferenceAndCluster(resourceIndex, getClusterUID());
+ }
+
+ static long fTime = 0;
+
+ @Override
+ final public int execute(int resourceReference) throws DatabaseException {
+ short resourceRef = (short)resourceReference;
+ int key;
+ if (ClusterTraitsSmall.resourceRefIsLocal(resourceRef)) {
+ key = clusterBits | resourceRef;
+ } else {
+ short foreignIndex = ClusterTraitsSmall.resourceRefGetForeignIndex((short)resourceRef);
+ //long start = System.nanoTime();
+ ResourceUID resourceUID = foreignTable.getResourceUID(foreignIndex);
+ int clusterKey = clusterSupport.getClusterKeyByClusterUIDOrMake(resourceUID.asCID());
+// ClusterBase cluster = clusterSupport.getClusterByClusterUIDOrMake(resourceUID.asCID());
+ key = ClusterTraitsBase.createResourceKey(clusterKey, resourceUID.getIndex());
+ //fTime += System.nanoTime() - start;
+ //System.err.println("fTime: " + 1e-9*fTime);
+ }
+ if (DEBUG)
+ System.out.println("ClusterSmall.execute key=" + key);
+ return key;
+ }
+
+ private boolean addRelationInternal(short sReference, short pReference, short oReference, ClusterI.CompleteTypeEnum completeType)
+ throws DatabaseException {
+ int predicateIndex = resourceTable.addStatement(sReference, pReference, oReference, predicateTable, objectTable, completeType, completeTable);
+ if (0 == predicateIndex)
+ return true; // added to resourceTable
+ else if (0 > predicateIndex)
+ return false; // old complete statemenent
+ int newPredicateIndex = predicateTable.addPredicate(predicateIndex, 0xFFFF & pReference, 0xFFFF & oReference, objectTable);
+ if (0 == newPredicateIndex)
+ return false;
+ if (predicateIndex != newPredicateIndex)
+ resourceTable.setPredicateIndex(sReference, newPredicateIndex);
+ return true;
+ }
+ private boolean removeRelationInternal(int sResourceIndex, short pResourceIndex,
+ short oResourceIndex, ClusterI.CompleteTypeEnum completeType, ClusterSupport support)
+ throws DatabaseException {
+ int predicateIndex = resourceTable.getPredicateIndex(sResourceIndex);
+ if (0 == predicateIndex || ClusterI.CompleteTypeEnum.NotComplete != completeType)
+ return resourceTable.removeStatementFromCache(sResourceIndex,
+ pResourceIndex, oResourceIndex, completeType, completeTable);
+ PredicateTable.Status ret = predicateTable.removePredicate(predicateIndex, 0xFFFF & pResourceIndex, 0xFFFF & oResourceIndex, objectTable);
+ switch (ret) {
+ case NothingRemoved:
+ return false;
+ case PredicateRemoved: {
+ if (0 == predicateTable.getPredicateSetSize(predicateIndex))
+ resourceTable.setPredicateIndex(sResourceIndex, 0);
+ // intentionally dropping to next case
+ } default:
+ break;
+ }
+ resourceTable.removeStatement(sResourceIndex,
+ pResourceIndex, oResourceIndex,
+ completeType, completeTable,
+ predicateTable, objectTable, support);
+ return true;
+ }
+ @Override
+ public void load() {
+ throw new Error("Not supported.");
+ }
+
+ @Override
+ public void load(Callback<DatabaseException> r) {
+ throw new Error("Not supported.");
+ }
+
+ public boolean contains(int resourceKey) {
+ return ClusterTraitsBase.isCluster(clusterBits, resourceKey);
+ }
+ @Override
+ public void load(final ClusterSupport support, final Runnable callback) {
+
+ throw new UnsupportedOperationException();
+
+// try {
+// clusterTable.load2(clusterId, clusterKey);
+// callback.run();
+// } catch (DatabaseException e) {
+// e.printStackTrace();
+// }
+
+ }
+ @Override
+ public ClusterI getClusterByResourceKey(int resourceKey,
+ ClusterSupport support) {
+ throw new Error();
+ }
+ @Override
+ public void increaseReferenceCount(int amount) {
+ throw new Error();
+ }
+ @Override
+ public void decreaseReferenceCount(int amount) {
+ throw new Error();
+ }
+ @Override
+ public int getReferenceCount() {
+ throw new Error();
+ }
+ @Override
+ public void releaseMemory() {
+ }
+ @Override
+ public void compact() {
+ clusterMap.compact();
+ }
+ @Override
+ public boolean isLoaded() {
+ return !proxy;
+ }
+
+// public ClusterImpl tryLoad(SessionImplSocket sessionImpl) {
+//
+// throw new UnsupportedOperationException();
+// assert(Constants.ReservedClusterId != clusterId);
+//
+// return clusterTable.tryLoad(clusterId, clusterKey);
+//
+// }
+
+
+ @Override
+ public ClusterBig toBig(ClusterSupport support)
+ throws DatabaseException {
+ if (DEBUG) {
+ System.out.println("DEBUG: toBig cluster=" + clusterId);
+ new Exception().printStackTrace();
+ }
+ ClusterBig big = new ClusterBig(clusterSupport, getClusterUID(), clusterKey, (ClusterSupport2)support);
+ big.cc = this.cc;
+// if(big.cc != null)
+// big.cc.clusterImpl = this;
+ resourceTable.toBig(big, support, this);
+ big.foreignLookup = this.foreignLookup;
+ big.change = this.change;
+ this.cc = null;
+ this.foreignLookup = null;
+ this.change = null;
+ return big;
+ }
+
+ @Override
+ public ClusterTypeEnum getType() {
+ return ClusterTypeEnum.SMALL;
+ }
+ @Override
+ public boolean getImmutable() {
+ int status = resourceTable.getClusterStatus();
+ return (status & ClusterStatus.ImmutableMaskSet) == 1;
+ }
+ @Override
+ public void setImmutable(boolean immutable, ClusterSupport support) {
+ if(resourceTable != null) {
+ int status = resourceTable.getClusterStatus();
+ if (immutable)
+ status |= ClusterStatus.ImmutableMaskSet;
+ else
+ status &= ClusterStatus.ImmutableMaskClear;
+ resourceTable.setClusterStatus(status);
+ }
+ support.setImmutable(this, immutable);
+ }
+
+ @Override
+ public String toString() {
+ try {
+ final TIntHashSet set = new TIntHashSet();
+ TIntShortHashMap map = foreignTable.getResourceHashMap();
+ map.forEachKey(new TIntProcedure() {
+ @Override
+ public boolean execute(int value) {
+ set.add(value & 0xfffff000);
+ return true;
+ }
+ });
+ return "ClusterSmall[" + getClusterUID() + " - " + getClusterId() + " - " + getNumberOfResources() + " - " + foreignTable.getResourceHashMap().size() + " - " + set.size() + "]";
+ } catch (DatabaseException e) {
+ return "ClusterSmall[" + getNumberOfResources() + "]";
+ }
+ }
+
+ // Memory map
+ // bytes (b) | headers(i) | predicateTable (i) | objectTable (i) | completeTable (i) | resourceTable (l) | foreignTable (l)
+
+ @Override
+ public byte[] storeBytes() throws IOException {
+
+ int byteSize = valueTable.getTableSize();
+ int longSize = LONG_HEADER_SIZE + resourceTable.getTableSize() + foreignTable.getTableSize();
+ int intSize = INT_HEADER_SIZE + predicateTable.getTableSize() + objectTable.getTableSize() + completeTable.getTableSize();
+
+ byte[] raw = new byte[12 + byteSize + 8*longSize + 4*intSize];
+
+ int[] currentHeader = Arrays.copyOf(headerTable, INT_HEADER_SIZE);
+
+ Bytes.writeLE(raw, 0, byteSize);
+ Bytes.writeLE(raw, 4, intSize);
+ Bytes.writeLE(raw, 8, longSize);
+
+ int rawPos = valueTable.storeBytes(raw, 0, 12);
+
+ int intBase = rawPos;
+
+ rawPos += 4*INT_HEADER_SIZE;
+ rawPos = predicateTable.storeBytes(raw, (rawPos-intBase)>>2, rawPos);
+ rawPos = objectTable.storeBytes(raw, (rawPos-intBase)>>2, rawPos);
+ rawPos = completeTable.storeBytes(raw, (rawPos-intBase)>>2, rawPos);
+
+ int longBase = rawPos;
+
+ rawPos += 8*LONG_HEADER_SIZE;
+ rawPos = resourceTable.storeBytes(raw, (rawPos-longBase)>>3, rawPos);
+ rawPos = foreignTable.storeBytes(raw, (rawPos-longBase)>>3, rawPos);
+
+ Bytes.writeLE8(raw, longBase, -1);
+ Bytes.writeLE8(raw, longBase+8, LONG_HEADER_VERSION);
+ Bytes.writeLE8(raw, longBase+16, 0);
+ Bytes.writeLE8(raw, longBase+24, clusterUID.second);
+
+ // write header
+ for(int i=0;i<INT_HEADER_SIZE;i++) {
+ int v = headerTable[i];
+ Bytes.writeLE(raw, intBase, v);
+ intBase+=4;
+ }
+
+ for(int i=0;i<INT_HEADER_SIZE;i++)
+ headerTable[i] = currentHeader[i];
+
+ return raw;
+
+ }
+
+ @Override
+ public ClusterTables store() throws IOException {
+
+ ClusterTables result = new ClusterTables();
+
+ int[] currentHeader = Arrays.copyOf(headerTable, INT_HEADER_SIZE);
+
+ int byteSize = valueTable.getTableSize();
+ byte[] byteBytes = new byte[byteSize];
+ valueTable.store(byteBytes, 0);
+
+ result.bytes = byteBytes;
+
+ int longSize = LONG_HEADER_SIZE + resourceTable.getTableSize() + foreignTable.getTableSize();
+ long[] longBytes = new long[longSize];
+
+ longBytes[0] = -1;
+ longBytes[1] = LONG_HEADER_VERSION;
+ longBytes[2] = 0;
+ longBytes[3] = clusterUID.second;
+
+ int longPos = resourceTable.store(longBytes, LONG_HEADER_SIZE);
+ foreignTable.store(longBytes, longPos);
+
+ result.longs = longBytes;
+
+ int intSize = INT_HEADER_SIZE + predicateTable.getTableSize() + objectTable.getTableSize() + completeTable.getTableSize();
+ int[] intBytes = new int[intSize];
+ int intPos = INT_HEADER_SIZE;
+ intPos = predicateTable.store(intBytes, intPos);
+ intPos = objectTable.store(intBytes, intPos);
+ intPos = completeTable.store(intBytes, intPos);
+ // write header
+ for(int i=0;i<INT_HEADER_SIZE;i++) {
+ int v = headerTable[i];
+ intBytes[i] = v;
+ //Bytes.writeLE(intBytes, i<<2, v);
+ }
+
+ result.ints = intBytes;
+
+ for(int i=0;i<INT_HEADER_SIZE;i++)
+ headerTable[i] = currentHeader[i];
+
+ return result;
+
+ }
+
+ @Override
+ protected int getResourceTableCount() {
+ return resourceTable.getTableCount();
+ }
+
+ @Override
+ public boolean getDeleted() {
+ if (deleted) return true;
+ int status = resourceTable.getClusterStatus();
+ return (status & ClusterStatus.DeletedMaskSet) == ClusterStatus.DeletedMaskSet;
+ }
+ @Override
+ public void setDeleted(boolean set, ClusterSupport support) {
+ deleted = set;
+ if(resourceTable != null) {
+ int status = resourceTable.getClusterStatus();
+ if (set)
+ status |= ClusterStatus.DeletedMaskSet;
+ else
+ status &= ClusterStatus.DeletedMaskClear;
+ resourceTable.setClusterStatus(status);
+ }
+ if (null != support)
+ support.setDeleted(this, set);
+ }
+
+ @Override
+ public Table<?> getPredicateTable() {
+ return predicateTable;
+ }
+
+ @Override
+ public Table getForeignTable() {
+ return foreignTable;
+ }
+
+ @Override
+ public int makeResourceKey(int pRef) throws DatabaseException {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public Table<?> getCompleteTable() {
+ return completeTable;
+ }
+
+ @Override
+ public Table<?> getValueTable() {
+ return valueTable;
+ }
+
+ @Override
+ public Table<?> getObjectTable() {
+ return objectTable;
+ }
+
+}
+
+class ClusterStatus {
+ public static final int ImmutableMaskClear = 0xFFFFFFFE;
+ public static final int ImmutableMaskSet = 0x00000001;
+ public static final int DeletedMaskClear = 0xFFFFFFFD;
+ public static final int DeletedMaskSet = 0x00000002;
+}
--- /dev/null
+package org.simantics.acorn.internal;
+
+import java.io.File;
+import java.io.IOException;
+import java.io.RandomAccessFile;
+import java.nio.channels.FileLock;
+import java.nio.file.DirectoryStream;
+import java.nio.file.FileVisitOption;
+import java.nio.file.FileVisitResult;
+import java.nio.file.Files;
+import java.nio.file.Path;
+import java.nio.file.SimpleFileVisitor;
+import java.nio.file.attribute.BasicFileAttributes;
+import java.util.EnumSet;
+import java.util.Properties;
+
+import org.simantics.acorn.GraphClientImpl2;
+import org.simantics.db.Database;
+import org.simantics.db.DatabaseUserAgent;
+import org.simantics.db.ServiceLocator;
+import org.simantics.db.common.utils.Logger;
+import org.simantics.db.server.ProCoreException;
+
+/**
+ * @author Tuukka Lehtonen
+ */
+public class AcornDatabase implements Database {
+
+ private final Path folder;
+
+ private DatabaseUserAgent userAgent;
+
+ private RandomAccessFile raLockFile;
+
+ private FileLock lock;
+
+ private boolean isRunning;
+
+ public AcornDatabase(Path folder) {
+ this.folder = folder;
+ }
+
+ @Override
+ public DatabaseUserAgent getUserAgent() {
+ return userAgent;
+ }
+
+ @Override
+ public void setUserAgent(DatabaseUserAgent dbUserAgent) {
+ userAgent = dbUserAgent;
+ }
+
+ @Override
+ public Status getStatus() {
+ return Status.Local;
+ }
+
+ @Override
+ public File getFolder() {
+ return folder.toFile();
+ }
+
+ @Override
+ public boolean isFolderOk() {
+ return isFolderOk(folder.toFile());
+ }
+
+ @Override
+ public boolean isFolderOk(File aFolder) {
+ if (!aFolder.isDirectory())
+ return false;
+ return true;
+ }
+
+ @Override
+ public boolean isFolderEmpty() {
+ return isFolderEmpty(folder.toFile());
+ }
+
+ @Override
+ public boolean isFolderEmpty(File aFolder) {
+ Path path = aFolder.toPath();
+ if (!Files.isDirectory(path))
+ return false;
+ try (DirectoryStream<Path> folderStream = Files.newDirectoryStream(path)) {
+ return !folderStream.iterator().hasNext();
+ } catch (IOException e) {
+ Logger.defaultLogError("Failed to open folder stream. folder=" + path, e);
+ return false;
+ }
+ }
+
+ @Override
+ public void initFolder(Properties properties) throws ProCoreException {
+ try {
+ Files.createDirectories(folder);
+ } catch (IOException e) {
+ throw new ProCoreException(e);
+ }
+ }
+
+ @Override
+ public void deleteFiles() throws ProCoreException {
+ deleteTree(folder);
+ }
+
+ @Override
+ public void start() throws ProCoreException {
+ Path lockFile = folder.resolve("lock");
+ try {
+ if (!Files.exists(lockFile))
+ Files.createFile(lockFile);
+
+ raLockFile = new RandomAccessFile(lockFile.toFile(), "rw");
+ lock = raLockFile.getChannel().tryLock();
+ if (lock == null) {
+ throw new ProCoreException("The database in folder " + folder.toAbsolutePath() + " is already in use!");
+ }
+
+ isRunning = true;
+
+ } catch (IOException e) {
+ e.printStackTrace();
+ }
+ }
+
+ @Override
+ public boolean isRunning() throws ProCoreException {
+ return isRunning;
+ }
+
+ @Override
+ public boolean tryToStop() throws ProCoreException {
+ try {
+ lock.release();
+ raLockFile.close();
+
+ Files.deleteIfExists(folder.resolve("lock"));
+
+ isRunning = false;
+
+ } catch (IOException e) {
+ e.printStackTrace();
+ }
+
+ return true;
+ }
+
+ @Override
+ public void connect() throws ProCoreException {
+ }
+
+ @Override
+ public boolean isConnected() throws ProCoreException {
+ return isRunning;
+ }
+
+ @Override
+ public String execute(String command) throws ProCoreException {
+ throw new UnsupportedOperationException("execute(" + command + ")");
+ }
+
+ @Override
+ public void disconnect() throws ProCoreException {
+ }
+
+ @Override
+ public void clone(File to, int revision, boolean saveHistory) throws ProCoreException {
+ // TODO: implement
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public Path createFromChangeSets(int revision) throws ProCoreException {
+ // TODO: implement
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public void deleteGuard() throws ProCoreException {
+ // TODO: implement
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public Path dumpChangeSets() throws ProCoreException {
+ // TODO: implement
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public void purgeDatabase() throws ProCoreException {
+ // TODO: implement
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public long serverGetTailChangeSetId() throws ProCoreException {
+ // "We have it all"
+ // But after purging we don't so beware.
+ // TODO: beware for purge
+ return 1;
+ }
+
+ @Override
+ public Session newSession(ServiceLocator locator) throws ProCoreException {
+ try {
+ return new GraphClientImpl2(this, folder, locator);
+ } catch (IOException e) {
+ throw new ProCoreException(e);
+ }
+ }
+
+ @Override
+ public Journal getJournal() throws ProCoreException {
+ // TODO: implement
+ throw new UnsupportedOperationException();
+ }
+
+ private static void deleteTree(Path path) throws ProCoreException {
+ if (!Files.exists(path))
+ return;
+
+ class Visitor extends SimpleFileVisitor<Path> {
+ @Override
+ public FileVisitResult visitFile(Path file, BasicFileAttributes attrs) throws IOException {
+ try {
+ Files.delete(file);
+ } catch (IOException ioe) {
+ ioe.printStackTrace();
+ throw ioe;
+ }
+ return FileVisitResult.CONTINUE;
+ }
+ @Override
+ public FileVisitResult postVisitDirectory(Path dir, IOException e) throws IOException {
+ if (e == null) {
+ try {
+ Files.delete(dir);
+ } catch (IOException ioe) {
+ ioe.printStackTrace();
+ throw ioe;
+ }
+ return FileVisitResult.CONTINUE;
+ }
+ throw e;
+ }
+ }
+ try {
+ Visitor v = new Visitor();
+ EnumSet<FileVisitOption> opts = EnumSet.noneOf(FileVisitOption.class);
+ Files.walkFileTree(path, opts, Integer.MAX_VALUE, v);
+ } catch (IOException e) {
+ throw new ProCoreException("Could not delete " + path, e);
+ }
+ }
+
+ @Override
+ public String getCompression() {
+ return "LZ4";
+ }
+
+}
--- /dev/null
+/*******************************************************************************
+ * Copyright (c) 2007, 2010 Association for Decentralized Information Management
+ * in Industry THTH ry.
+ * All rights reserved. This program and the accompanying materials
+ * are made available under the terms of the Eclipse Public License v1.0
+ * which accompanies this distribution, and is available at
+ * http://www.eclipse.org/legal/epl-v10.html
+ *
+ * Contributors:
+ * VTT Technical Research Centre of Finland - initial API and implementation
+ *******************************************************************************/
+package org.simantics.acorn.internal;
+
+import org.eclipse.core.runtime.Plugin;
+import org.osgi.framework.BundleContext;
+
+/**
+ * @author Antti Villberg
+ */
+public class Activator extends Plugin {
+
+ // The plug-in ID
+ public static final String BUNDLE_ID = "org.simantics.acorn"; //$NON-NLS-1$
+ // The shared instance
+ private static Activator plugin;
+
+ /**
+ * The constructor
+ */
+ public Activator() {
+ }
+
+ /*
+ * (non-Javadoc)
+ * @see org.eclipse.ui.plugin.AbstractUIPlugin#start(org.osgi.framework.BundleContext)
+ */
+ @Override
+ public void start(BundleContext context) throws Exception {
+ super.start(context);
+ plugin = this;
+ }
+
+ /*
+ * (non-Javadoc)
+ * @see org.eclipse.ui.plugin.AbstractUIPlugin#stop(org.osgi.framework.BundleContext)
+ */
+ @Override
+ public void stop(BundleContext context) throws Exception {
+ plugin = null;
+ super.stop(context);
+ }
+
+ /**
+ * Returns the shared instance
+ *
+ * @return the shared instance
+ */
+ public static Activator getDefault() {
+ return plugin;
+ }
+
+}
--- /dev/null
+/*******************************************************************************
+ * Copyright (c) 2007, 2010 Association for Decentralized Information Management
+ * in Industry THTH ry.
+ * All rights reserved. This program and the accompanying materials
+ * are made available under the terms of the Eclipse Public License v1.0
+ * which accompanies this distribution, and is available at
+ * http://www.eclipse.org/legal/epl-v10.html
+ *
+ * Contributors:
+ * VTT Technical Research Centre of Finland - initial API and implementation
+ *******************************************************************************/
+/*
+ * Created on Jan 21, 2005
+ *
+ * Copyright Toni Kalajainen
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.simantics.acorn.internal;
+
+import java.util.HashMap;
+import java.util.Map;
+import java.util.Set;
+
+/**
+ * Bijection map is a Map that has no values or keys, only 1:1 mappings
+ * of values. These value/keys will be called with left and right side
+ * values.
+ *
+ * Each value can exist only once on a side
+ *
+ * @author Toni Kalajainen
+ */
+public class BijectionMap<L, R> {
+
+ /** The keys of tableLeft are left-side-values and
+ * values are right-side-values */
+ private final Map<L, R> tableLeft = new HashMap<L, R>();
+ /** The keys of tableRight are right-side-values and
+ * values on it are left-side-values */
+ private final Map<R, L> tableRight = new HashMap<R, L>();
+
+ public boolean containsLeft(L leftValue)
+ {
+ return tableLeft.containsKey(leftValue);
+ }
+
+ public boolean containsRight(R rightValue)
+ {
+ return tableRight.containsKey(rightValue);
+ }
+
+ public void map(L leftValue, R rightValue)
+ {
+ // Remove possible old mapping
+ R oldRight = tableLeft.remove(leftValue);
+ if (oldRight != null) {
+ tableRight.remove(oldRight);
+ } else {
+ L oldLeft = tableRight.remove(rightValue);
+ if (oldLeft != null) {
+ tableLeft.remove(oldLeft);
+ }
+ }
+
+ tableLeft.put(leftValue, rightValue);
+ tableRight.put(rightValue, leftValue);
+ }
+
+ public int size()
+ {
+ return tableLeft.size();
+ }
+
+ public L getLeft(R rightValue) {
+ return tableRight.get(rightValue);
+ }
+
+ public R getRight(L leftValue) {
+ return tableLeft.get(leftValue);
+ }
+
+ public R removeWithLeft(L leftValue) {
+ R rightValue = tableLeft.remove(leftValue);
+ if (rightValue!=null)
+ tableRight.remove(rightValue);
+ return rightValue;
+ }
+
+ public L removeWithRight(R rightValue) {
+ L leftValue = tableRight.remove(rightValue);
+ if (leftValue!=null)
+ tableLeft.remove(leftValue);
+ return leftValue;
+ }
+
+ public Set<L> getLeftSet() {
+ return tableLeft.keySet();
+ }
+
+ public Set<R> getRightSet() {
+ return tableRight.keySet();
+ }
+
+ public void clear() {
+ tableLeft.clear();
+ tableRight.clear();
+ }
+}
--- /dev/null
+package org.simantics.acorn.internal;
+
+import org.simantics.db.service.ClusterUID;
+
+final public class Change {
+
+ byte op0;
+ int key0;
+ int key1;
+ int key2;
+ ClusterUID clusterUID1;
+ ClusterUID clusterUID2;
+ byte[] lookup1;
+ byte[] lookup2;
+ byte lookIndex1;
+ byte lookIndex2;
+ int lastArg = 0;
+
+ @Override
+ public String toString() {
+ return "Change " + (key0&0xffff) + " " + (key1&0xffff) + " " + (key2&0xffff) + " " + clusterUID2 + " " + clusterUID2;
+ }
+
+ public final void init() {
+ lastArg = 0;
+ }
+
+ public final void initValue() {
+ lastArg = 0;
+ }
+
+ final void addStatementIndex0(int key, byte op) {
+ assert (op != 0);
+ key0 = key;
+ op0 = op;
+ }
+
+ final void addStatementIndex1(int key, ClusterUID clusterUID, byte lookIndex, byte[] lookup) {
+ key1 = key;
+ clusterUID1 = clusterUID;
+ lookIndex1 = lookIndex;
+ lookup1 = lookup;
+// if(lookIndex > 0)
+// System.err.println("statementIndex1 " + pos + " " + lookIndex);
+ }
+
+ final void addStatementIndex2(int key, ClusterUID clusterUID, byte lookIndex, byte[] lookup) {
+ key2 = key;
+ clusterUID2 = clusterUID;
+ lookIndex2 = lookIndex;
+ lookup2 = lookup;
+ }
+
+ final public void addStatementIndex(int key, ClusterUID clusterUID, byte op) {
+
+ // new Exception("lastArg=" + lastArg).printStackTrace();
+
+ assert (lastArg < 3);
+
+ if (0 == lastArg)
+ addStatementIndex0(key, op);
+ else if (1 == lastArg)
+ addStatementIndex1(key, clusterUID, (byte)0, null);
+ else if (2 == lastArg)
+ addStatementIndex2(key, clusterUID, (byte)0, null);
+
+ lastArg++;
+
+ }
+}
--- /dev/null
+package org.simantics.acorn.internal;
+
+import java.nio.ByteBuffer;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.List;
+
+import org.simantics.acorn.internal.ClusterStream.ClusterEnum;
+import org.simantics.acorn.internal.ClusterStream.Data;
+import org.simantics.acorn.internal.ClusterStream.DebugInfo;
+import org.simantics.acorn.internal.ClusterStream.OpEnum;
+import org.simantics.acorn.internal.ClusterStream.StmEnum;
+import org.simantics.compressions.Compressions;
+import org.simantics.db.exception.RuntimeDatabaseException;
+import org.simantics.db.impl.ClusterTraitsBase;
+import org.simantics.db.procore.cluster.ClusterTraits;
+import org.simantics.db.procore.cluster.ClusterTraitsSmall;
+import org.simantics.db.service.Bytes;
+import org.simantics.db.service.ClusterUID;
+import org.simantics.utils.datastructures.Pair;
+
+import gnu.trove.map.hash.TIntByteHashMap;
+import gnu.trove.map.hash.TLongIntHashMap;
+
+
+public final class ClusterChange {
+
+ public static final int VERSION = 1;
+ public static final byte ADD_OPERATION = 2;
+ public static final byte REMOVE_OPERATION = 3;
+ public static final byte DELETE_OPERATION = 5;
+
+ public static final boolean DEBUG = false;
+ public static final boolean DEBUG_STAT = false;
+ public static final boolean DEBUG_CCS = false;
+
+ private static DebugInfo sum = new DebugInfo();
+
+ public final TIntByteHashMap foreignTable = new TIntByteHashMap();
+ private final DebugInfo info;
+// private final GraphSession graphSession;
+ public final ClusterUID clusterUID;
+ private final int SIZE_OFFSET;
+// private final int HEADER_SIZE;
+ // How much buffer is used before stream is flushed to server. The bigger the better.
+ public static final int MAX_FIXED_BYTES = (1<<15) + (1<<14);
+ private static final int MAX_FIXED_OPERATION_SIZE = 17 + 16;
+ private static final int MAX_FIXED_OPERATION_SIZE_AND_ROOM_FOR_ERROR = MAX_FIXED_OPERATION_SIZE + 36;
+ private int nextSize = MAX_FIXED_BYTES;
+ int byteIndex = 0;
+ private byte[] bytes = null; // Operation data.
+// private final byte[] header;
+ private boolean flushed = false;
+ private ArrayList<Pair<ClusterUID, byte[]>> stream;
+
+// public ClusterImpl clusterImpl;
+
+ public ClusterChange( ArrayList<Pair<ClusterUID, byte[]>> stream, ClusterUID clusterUID) {
+ this.clusterUID = clusterUID;
+ long[] longs = new long[ClusterUID.getLongLength()];
+ clusterUID.toLong(longs, 0);
+ this.stream = stream;
+// this.graphSession = clusterStream.graphSession;
+ info = new DebugInfo();
+// HEADER_SIZE = 8 + longs.length * 8;
+// header = new byte[HEADER_SIZE];
+ SIZE_OFFSET = 0;
+// Bytes.writeLE(header, SIZE_OFFSET + 0, 0); // Correct byte vector size is set with setHeaderVectorSize() later.
+// Bytes.writeLE(header, SIZE_OFFSET + 4, VERSION);
+// for (int i=0, offset=8; i<longs.length; ++i, offset+=8)
+// Bytes.writeLE(header, offset, longs[i]);
+ //initBuffer();
+// this.clusterStream = clusterStream;
+// this.clusterChange2 = new ClusterChange2(clusterUID, clusterImpl);
+// clusterStream.changes.add(this);
+ }
+
+// private void setHeaderVectorSize(int size) {
+// if (size < 0)
+// throw new RuntimeDatabaseException("Change set size can't be negative.");
+// int len = size + HEADER_SIZE - SIZE_OFFSET - 4;
+// Bytes.writeLE(header, SIZE_OFFSET, len);
+// }
+ @Override
+ public String toString() {
+ return super.toString() + " cluster=" + clusterUID + " off=" + byteIndex;
+ }
+ final public void initBuffer() {
+ flushed = false;
+ if (null == bytes || bytes.length < nextSize) {
+ bytes = new byte[nextSize];
+ nextSize = MAX_FIXED_BYTES;
+ }
+ byteIndex = 0;
+ }
+ private final void clear() {
+// if(clusterImpl != null && clusterImpl.change != null)
+// clusterImpl.change.init();
+ foreignTable.clear();
+ //initBuffer();
+ bytes = null;
+ byteIndex = 0;
+ if (DEBUG_STAT)
+ info.clear();
+ }
+ private final void checkInitialization() {
+// if (0 == byteIndex)
+// clusterStream.changes.addChange(this);
+ }
+ private final void printlnd(String s) {
+ System.out.println("DEBUG: ClusterChange " + clusterUID + ": " + s);
+ }
+ public final void createResource(short index) {
+ checkInitialization();
+ if (DEBUG)
+ printlnd("New ri=" + index + " offset=" + byteIndex);
+ if (index > ClusterTraits.getMaxNumberOfResources())
+ throw new RuntimeDatabaseException("Illegal resource index=" + index + ".");
+ checkBufferSpace(null);
+ bytes[byteIndex++] = (byte)52;
+ bytes[byteIndex++] = (byte)index;
+ bytes[byteIndex++] = (byte)(index>>>8);
+ }
+ void flushCollect(Change c) {
+ throw new UnsupportedOperationException();
+// flushInternal(graphSession, clusterUID);
+// if (DEBUG)
+// printlnd("Cluster change data was flushed.");
+// if (null != c) {
+// if (DEBUG)
+// printlnd("Clearing lookup for " + c.toString());
+// c.lookup1 = null;
+// c.lookup2 = null;
+// }
+// if (null != clusterImpl) {
+// clusterImpl.foreignLookup = null;
+// }
+ }
+
+ private final boolean checkBufferSpace(Change c) {
+// clusterStream.changes.checkFlush();
+ if(bytes == null) initBuffer();
+ if (MAX_FIXED_BYTES - byteIndex > MAX_FIXED_OPERATION_SIZE_AND_ROOM_FOR_ERROR) {
+ return false;
+ }
+ flush();
+// initBuffer();
+ return true;
+ }
+
+ private final void checkBufferSpace(int size) {
+ if(bytes == null) initBuffer();
+ if (bytes.length - byteIndex >= size)
+ return;
+ nextSize = Math.max(MAX_FIXED_BYTES, size);
+ flush();
+ initBuffer();
+ }
+
+ public final void addChange(Change c) {
+ checkInitialization();
+ checkBufferSpace(c);
+ byte operation = c.op0;
+ if(operation == ADD_OPERATION)
+ addStm(c, StmEnum.Add);
+ else if (operation == REMOVE_OPERATION)
+ addStm(c, StmEnum.Remove);
+ else if (operation == DELETE_OPERATION) {
+ if (DEBUG)
+ printlnd("Delete value offset=" + byteIndex + " " + c);
+ addByte(OpEnum.Delete.getOrMask());
+ addShort(ClusterTraitsBase.getResourceIndexFromResourceKeyNoThrow(c.key0));
+ }
+ c.lastArg = 0;
+ }
+
+ private final void addForeignLong(short index, ClusterUID clusterUID) {
+ byteIndex = clusterUID.toByte(bytes, byteIndex);
+ bytes[byteIndex++] = (byte)(index & 0xFF);
+ bytes[byteIndex++] = (byte)(index >>> 8);
+ }
+
+ private final ClusterEnum addIndexAndCluster(int key, ClusterUID clusterUID, byte lookIndex, byte[] lookup) {
+ assert(!clusterUID.equals(ClusterUID.Null));
+ short resourceIndex = ClusterTraitsBase.getResourceIndexFromResourceKeyNoThrow(key);
+ if (clusterUID.equals(this.clusterUID)) {
+ bytes[byteIndex++] = (byte)(resourceIndex & 0xFF);
+ bytes[byteIndex++] = (byte)(resourceIndex >>> 8);
+ return ClusterEnum.Local;
+ }
+
+ byte foreign = 0;
+ if(lookIndex > 0) {
+ if(lookup != null)
+ foreign = lookup[lookIndex];
+ } else {
+ foreign = foreignTable.get(key);
+ }
+ if (0 != foreign) {
+ if (foreign > 256)
+ throw new RuntimeDatabaseException("Internal error, contact application support." +
+ "Too big foreing index=" + foreign + " max=256");
+ --foreign;
+ bytes[byteIndex++] = foreign;
+ return ClusterEnum.ForeignShort;
+ } else {
+ byte position = (byte) (foreignTable.size() + 1);
+ if(lookup != null)
+ lookup[lookIndex] = position;
+ foreignTable.put(key, position);
+ if (DEBUG_STAT)
+ info.sForeign = foreignTable.size();
+ if (clusterUID.equals(ClusterUID.Null))
+ throw new RuntimeDatabaseException("Internal error, contact application support." +
+ "Cluster unique id not defined for foreing cluster.");
+ addForeignLong(resourceIndex, clusterUID);
+ return ClusterEnum.ForeignLong;
+ }
+ }
+
+ private final void addByte(byte b) {
+ bytes[byteIndex++] = b;
+ }
+
+ private final void addShort(short s) {
+ bytes[byteIndex++] = (byte)(s & 0xFF);
+ bytes[byteIndex++] = (byte)(s >>> 8);
+ }
+
+// private final void addShort(int s) {
+// bytes[byteIndex++] = (byte) (s & 0xFF);
+// bytes[byteIndex++] = (byte) ((s >>> 8) & 0xFF);
+// }
+
+ private final void addInt(int i) {
+// System.err.println("addInt " + i + " " + i);
+ bytes[byteIndex++] = (byte) (i & 0xFF);
+ bytes[byteIndex++] = (byte) ((i >>> 8) & 0xFF);
+ bytes[byteIndex++] = (byte) ((i >>> 16) & 0xFF);
+ bytes[byteIndex++] = (byte) ((i >>> 24) & 0xFF);
+ // buffer.asIntBuffer().put(i);
+ // buffer.position(buffer.position()+4);
+ }
+
+// private void addLong6(long l) {
+//// System.err.println("addLong " + l);
+// bytes[byteIndex++] = (byte) (l & 0xFF);
+// bytes[byteIndex++] = (byte) ((l >>> 8) & 0xFF);
+// bytes[byteIndex++] = (byte) ((l >>> 16) & 0xFF);
+// bytes[byteIndex++] = (byte) ((l >>> 24) & 0xFF);
+// bytes[byteIndex++] = (byte) ((l >>> 32) & 0xFF);
+// bytes[byteIndex++] = (byte) ((l >>> 40) & 0xFF);
+// // buffer.asLongBuffer().put(l);
+// // buffer.position(buffer.position() + 6);
+// }
+
+ private void addLong7(long l) {
+ bytes[byteIndex++] = (byte) (l & 0xFF);
+ bytes[byteIndex++] = (byte) ((l >>> 8) & 0xFF);
+ bytes[byteIndex++] = (byte) ((l >>> 16) & 0xFF);
+ bytes[byteIndex++] = (byte) ((l >>> 24) & 0xFF);
+ bytes[byteIndex++] = (byte) ((l >>> 32) & 0xFF);
+ bytes[byteIndex++] = (byte) ((l >>> 40) & 0xFF);
+ bytes[byteIndex++] = (byte) ((l >>> 48) & 0xFF);
+ // buffer.asLongBuffer().put(l);
+ // buffer.position(buffer.position() + 7);
+ }
+
+// private void addLong(long l) {
+// bytes[byteIndex++] = (byte) (l & 0xFF);
+// bytes[byteIndex++] = (byte) ((l >>> 8) & 0xFF);
+// bytes[byteIndex++] = (byte) ((l >>> 16) & 0xFF);
+// bytes[byteIndex++] = (byte) ((l >>> 24) & 0xFF);
+// bytes[byteIndex++] = (byte) ((l >>> 32) & 0xFF);
+// bytes[byteIndex++] = (byte) ((l >>> 40) & 0xFF);
+// bytes[byteIndex++] = (byte) ((l >>> 48) & 0xFF);
+// bytes[byteIndex++] = (byte) ((l >>> 56) & 0xFF);
+// }
+ private final byte bufferPop() {
+ return bytes[--byteIndex];
+ }
+
+ final class DebugStm {
+ StmEnum e;
+ int r;
+ int p;
+ int o;
+ ClusterUID pc;
+ ClusterUID oc;
+
+ DebugStm(StmEnum e, int r, int p, ClusterUID pc, int o, ClusterUID oc) {
+ this.e = e;
+ this.r = r;
+ this.p = p;
+ this.o = o;
+ this.pc = pc;
+ this.oc = oc;
+ }
+
+ @Override
+ public String toString() {
+ short ri = ClusterTraitsBase.getResourceIndexFromResourceKeyNoThrow(r);
+ short pi = ClusterTraitsBase.getResourceIndexFromResourceKeyNoThrow(p);
+ short oi = ClusterTraitsBase.getResourceIndexFromResourceKeyNoThrow(o);
+ return "" + e + " rk=" + r + " ri=" + ri + " rc=" + clusterUID
+ + " pk=" + p + " pi=" + pi + " pc=" + pc
+ + " ok=" + o + " oi=" + oi + " oc=" + oc;
+ }
+
+ public String toString2() {
+ return "" + e + " r=" + r + " rc=" + clusterUID + " p=" + p
+ + " pc=" + pc + " o=" + o + " oc=" + oc;
+ }
+
+ public String toString3() {
+ short ri = ClusterTraitsBase.getResourceIndexFromResourceKeyNoThrow(r);
+ short pi = ClusterTraitsBase.getResourceIndexFromResourceKeyNoThrow(p);
+ short oi = ClusterTraitsBase.getResourceIndexFromResourceKeyNoThrow(o);
+ return "" + e + " ri=" + ri
+ + " pi=" + pi + " pc=" + pc
+ + " oi=" + oi + " oc=" + oc;
+ }
+ }
+
+ private List<DebugStm> debugStms = new ArrayList<DebugStm>();
+
+ @SuppressWarnings("unused")
+ private final void addStm(Change c, StmEnum stmEnum) {
+
+ if (DEBUG_STAT)
+ ++info.nStms;
+ if (DEBUG || DEBUG_CCS) {
+ DebugStm d = new DebugStm(stmEnum, c.key0, c.key1, c.clusterUID1, c.key2, c.clusterUID2);
+ if (DEBUG_CCS)
+ debugStms.add(d);
+ if (DEBUG) {
+ printlnd(d.toString3() + " offset=" + byteIndex);
+ }
+ }
+ // int opPos = buffer.position();
+ int opPos = byteIndex++;
+ // buffer.put((byte)0); // operation code
+ // addByte((byte)0);
+
+ boolean done = true;
+
+ ClusterEnum a = addIndexAndCluster(c.key1, c.clusterUID1, c.lookIndex1, c.lookup1);
+ byte ab = 0;
+
+ // ForeignShort = byte
+ // Local = short
+ // ForeignLong = 8 byte
+ if (a != ClusterEnum.ForeignShort) {
+ ab = bufferPop();
+ done = false;
+ }
+
+ ClusterEnum b = addIndexAndCluster(c.key2, c.clusterUID2, c.lookIndex2, c.lookup2);
+ byte bb = 0;
+ if (b != ClusterEnum.ForeignShort) {
+ bb = bufferPop();
+ done = false;
+ }
+
+ int ri = ClusterTraitsBase.getResourceIndexFromResourceKeyNoThrow(c.key0);
+ if (ClusterTraitsSmall.isIllegalResourceIndex(ri))
+ throw new RuntimeDatabaseException("Assertion error. Illegal resource index=" + ri);
+ bytes[byteIndex++] = (byte)ri; // index low byte
+ if(!done) {
+ Data data = ClusterEnum.getData(stmEnum, a, b);
+ int left = 6 - data.bits;
+ int op = ri >>> (8 + left);
+ ri >>>= 8;
+ ri &= (1 << left) - 1;
+ if (a != ClusterEnum.ForeignShort) {
+ ri |= ab << left;
+ left += 6;
+ }
+ if (b != ClusterEnum.ForeignShort) {
+ ri |= bb << left;
+ left += 6;
+ }
+ switch (data.bytes) {
+ default:
+ throw new RuntimeDatabaseException("Assertion error. Illegal number of bytes=" + data.bytes);
+ case 2:
+ bytes[byteIndex++] = (byte)(ri & 0xFF);
+ bytes[byteIndex++] = (byte)((ri >>> 8) & 0xFF);
+ break;
+ case 1:
+ bytes[byteIndex++] = (byte)(ri & 0xFF);
+ break;
+ case 0:
+ break;
+ }
+ op |= data.mask;
+ this.bytes[opPos] = (byte)op;
+ } else {
+ if (stmEnum == StmEnum.Add)
+ bytes[opPos] = (byte)((ri >>> 8) + 64);
+ else
+ bytes[opPos] = (byte)((ri >>> 8) + 128);
+ }
+ if (DEBUG_STAT) {
+ if (a == ClusterEnum.Local && b == ClusterEnum.Local) {
+ ++info.nLocal;
+ } else if (a == ClusterEnum.Local || b == ClusterEnum.Local) {
+ ++info.nPartly;
+ } else {
+ ++info.nForeign;
+ }
+ }
+ if (foreignTable.size() > 252)
+ flush();
+// throw new UnsupportedOperationException();
+ //flushInternal(graphSession, clusterUID);
+ }
+
+ private final int modiValue(int ri, long value_offset, byte[] bytes, int offset, int size) {
+ if (DEBUG)
+ printlnd("Modify value ri=" + ri + " vo=" + value_offset + " size=" + size + " total=" + bytes.length);
+ if (ClusterTraitsBase.isIllegalResourceIndex(ri))
+ throw new RuntimeDatabaseException("Assertion error. Illegal resource index=" + ri);
+ if (value_offset > (1L << 58 - 1))
+ throw new RuntimeDatabaseException("Illegal value offset="
+ + value_offset);
+ if (size < 0 || size > MAX_FIXED_BYTES - 1)
+ throw new RuntimeDatabaseException("Illegal value size=" + size);
+ if (offset + size > bytes.length)
+ throw new RuntimeDatabaseException("Illegal value size=" + size);
+ checkBufferSpace(12 + size);
+ addByte(OpEnum.Modify.getOrMask());
+ ri |= (value_offset >>> 56) << 14; // add top two bits
+ addShort((short) ri);
+ value_offset &= (1L << 56) - 1;
+ addLong7(value_offset);
+ addShort((short) size);
+ if (DEBUG)
+ System.out.println("Modify value fixed part end offset=" + byteIndex);
+ int copied = Math.min(size, this.bytes.length - byteIndex);
+ System.arraycopy(bytes, offset, this.bytes, byteIndex, copied);
+ byteIndex += size;
+ return copied;
+ }
+
+// private final void modiValueBig(int ri, long voffset, int left, byte[] bytes, int offset) {
+// checkBufferSpace(0);
+// int current = Math.min(this.bytes.length - byteIndex - 12, left);
+// if(current >= 0) {
+// int written = modiValue(ri, voffset, bytes, offset, current);
+// voffset += written;
+// offset += written;
+// left -= written;
+// }
+//// flushInternal(graphSession, clusterUID);
+// while (left > 0) {
+// int length = Math.min(left, (1 << 16) - 1);
+// if (DEBUG)
+// printlnd("Modify big value ri=" + ri + " vo=" + voffset + " len=" + length);
+// int psize = length + 12;
+//// setHeaderVectorSize(psize);
+// byte[] message = new byte[psize/*+HEADER_SIZE*/];
+//// System.arraycopy(header, 0, message, 0, HEADER_SIZE);
+// int to = 0;
+// Bytes.write(message, to++, OpEnum.Modify.getOrMask());
+// short index = (short)(ri | (voffset >>> 56)<<14); // add top two bits
+// Bytes.writeLE(message, to, index); to += 2;
+// Bytes.writeLE7(message, to, voffset & ((1L << 56) - 1)); to += 7;
+// Bytes.writeLE(message, to, (short)length); to += 2;
+// System.arraycopy(bytes, offset, message, to, length);
+//// graphSession.updateCluster(new UpdateClusterFunction(message));
+// voffset += length;
+// offset += length;
+// left -= length;
+// }
+// }
+
+ private final int setValueBig(int ri, byte[] bytes, int length_) {
+ checkBufferSpace(12);
+ int sum = 0;
+ int voffset = 0;
+ int offset = 0;
+ int left = length_;
+ while (left > 0) {
+ int length = Math.min(left, MAX_FIXED_BYTES - 12 - byteIndex);
+ if (DEBUG)
+ printlnd("Set big value ri=" + ri + " vo=" + voffset + " len=" + length);
+ int written = modiValue(ri, voffset, bytes, offset, length);
+ sum += written;
+ voffset += written;
+ offset += written;
+ left -= written;
+ checkBufferSpace(12);
+ }
+ return sum;
+ }
+
+ private final int setValueSmall(int ri, byte[] bytes, int length) {
+ checkBufferSpace(5 + length);
+ int pos = byteIndex;
+ int i = length << 14 | ri;
+ if (length < 32) {
+ byte op = (byte) (OpEnum.SetShort.getOrMask() | length >>> 2);
+ addByte(op);
+ short s = (short) i;
+ addShort(s);
+ } else {
+ addByte(OpEnum.Set.getOrMask());
+ addInt(i);
+ }
+ System.arraycopy(bytes, 0, this.bytes, byteIndex, length);
+ byteIndex += length;
+ int len = byteIndex - pos;
+ return len;
+ }
+
+ final void setValue(short index, byte[] bytes) {
+ setValue(index, bytes, bytes.length);
+ }
+
+ final public void setValue(short index, byte[] bytes, int length) {
+ checkInitialization();
+ if (ClusterTraitsBase.isIllegalResourceIndex(index))
+ throw new RuntimeDatabaseException("Assertion error. Illegal resource index=" + index);
+ if (DEBUG)
+ printlnd("Set value ri=" + index
+ + " len=" + length
+ + " bytes=" + Arrays.toString(Arrays.copyOfRange(bytes, 0, Math.min(10, length))));
+ int len;
+ /*
+ * The limit for the cluster stream is (1<18)-1 but this avoids the
+ * conversion to big cluster.
+ */
+ if (length > ClusterTraitsSmall.VALUE_SIZE_MAX)
+ len = setValueBig(index, bytes, length);
+ else
+ len = setValueSmall(index, bytes, length);
+ if (DEBUG_STAT) {
+ ++info.nValues;
+ info.sValues += len + length;
+ }
+ }
+
+// final void setValue(Change c, byte[] bytes, int length) {
+// short ri = ClusterTraitsBase.getResourceIndexFromResourceKeyNoThrow(c.key0);
+// setValue(ri, bytes, length);
+// c.initValue();
+// }
+
+// final void modiValue(Change c, long voffset, int length, byte[] bytes, int offset) {
+// checkInitialization();
+// int ri = ClusterTraitsBase.getResourceIndexFromResourceKeyNoThrow(c.key0);
+// if (DEBUG)
+// printlnd("Modify value ri=" + ri
+// + " voff=" + voffset
+// + " vlen=" + length
+// + " blen=" + bytes.length
+// + " boff=" + offset
+// + " bytes=" + Arrays.toString(Arrays.copyOfRange(bytes, 0, Math.min(10, bytes.length))));
+// modiValueBig(ri, voffset, length, bytes, offset);
+// c.init();
+// if (DEBUG_STAT) {
+// ++info.nValues;
+// info.sValues += length;
+// }
+// }
+ final void setImmutable(boolean immutable) {
+ checkInitialization();
+// clusterChange2.setImmutable(immutable);
+ }
+ final void undoValueEx(int resourceIndex) {
+ checkInitialization();
+// clusterChange2.undoValueEx(resourceIndex);
+ }
+ final void setDeleted(boolean deleted) {
+ checkInitialization();
+// clusterChange2.setDeleted(deleted);
+ }
+ final void corrupt() {
+ checkInitialization();
+ addByte((byte)0);
+ }
+
+ public byte[] getBytes() {
+ byte[] copy = new byte[byteIndex];
+ System.arraycopy(bytes, 0, copy, 0, byteIndex);
+ return copy;
+ }
+
+ /**
+ * @param graphSession
+ * @param clusterId
+ * @return true if actually flushed something
+ */
+ final boolean flush(/*GraphSession graphSession,*/ ClusterUID clusterUID) {
+ throw new UnsupportedOperationException();
+// if (byteIndex > 0) {
+// if(DebugPolicy.REPORT_CLUSTER_STREAM)
+// System.err.println("Flush cluster change set stream " + this);
+// setHeaderVectorSize(byteIndex);
+// byte[] copy = new byte[byteIndex + HEADER_SIZE];
+// System.arraycopy(header, 0, copy, 0, HEADER_SIZE);
+// System.arraycopy(bytes, 0, copy, HEADER_SIZE, byteIndex);
+// UpdateClusterFunction updateClusterFunction = new UpdateClusterFunction(copy);
+// if (DEBUG_CCS) {
+// for (DebugStm stm : debugStms)
+// printlnd(stm.toString2());
+// debugStms.clear();
+// }
+// if (DEBUG_STAT) {
+// info.tot = updateClusterFunction.operation.length;
+// printlnd("ReallyFlush: " + info.toString());
+// sum.add(info);
+// printlnd("ReallyFlush sum: " + sum.toString());
+// }
+// // long start = System.nanoTime();
+// graphSession.updateCluster(updateClusterFunction);
+// // long duration = System.nanoTime() - start;
+// // duration2 += duration;
+// // System.err.println("updateCluster " + 1e-9*duration);
+// // System.err.println("updateCluster total " + 1e-9*duration2);
+// clear();
+// clusterChange2.flush(graphSession);
+// return true;
+// } else if (clusterChange2.isDirty()) {
+// clusterChange2.flush(graphSession);
+// clear();
+// return true;
+// } else if (flushed) {
+// flushed = false;
+// return true;
+// } else {
+// return true;
+// }
+ }
+
+ final void flushInternal(ClusterUID clusterUID) {
+ throw new UnsupportedOperationException();
+// flush(graphSession, clusterUID);
+// flushed = true;
+ }
+
+ final class ForeignTable {
+ private final TLongIntHashMap table = new TLongIntHashMap();
+
+ private long createKey(short index, long cluster) {
+ assert (cluster <= (1L << 48) - 1);
+ return (cluster << 14) | index;
+ }
+
+ public int get(short index, long cluster) {
+ int value = table.get(createKey(index, cluster));
+ if (DEBUG)
+ printlnd("ForeignTable get c=" + clusterUID + " i="
+ + (value - 1) + " r=" + index + " rc=" + cluster);
+ return value;
+ }
+
+ public int put(short index, long cluster, int value) {
+ if (DEBUG)
+ printlnd("ForeignTable put c=" + clusterUID + " i="
+ + (value - 1) + " r=" + index + " rc=" + cluster);
+ return table.put(createKey(index, cluster), value);
+ }
+
+ public int size() {
+ return table.size();
+ }
+
+ public void clear() {
+ table.clear();
+ }
+ }
+
+ @Override
+ public int hashCode() {
+ return 31*clusterUID.hashCode();
+ }
+
+ @Override
+ public boolean equals(Object object) {
+ if (this == object)
+ return true;
+ else if (object == null)
+ return false;
+ else if (!(object instanceof ClusterChange))
+ return false;
+ ClusterChange r = (ClusterChange)object;
+ return r.clusterUID.equals(clusterUID);
+ }
+
+ public void flush() {
+
+ if(byteIndex > 0) {
+
+ final ClusterUID cuid = clusterUID;
+
+ byte[] block = getBytes();
+ byte[] raw = new byte[block.length + 28];
+ Bytes.writeLE(raw, 0, 1);
+ System.arraycopy(cuid.asBytes(), 0, raw, 4, 16);
+ Bytes.writeLE(raw, 20, block.length);
+ System.arraycopy(block, 0, raw, 24, block.length);
+ Bytes.writeLE(raw, 24+block.length, 0);
+
+ ByteBuffer rawBB = ByteBuffer.wrap(raw);
+ ByteBuffer outputBB = ByteBuffer.allocate(raw.length + raw.length/8);
+ //outputBB.order(ByteOrder.LITTLE_ENDIAN);
+ int compressedSize = Compressions.get(Compressions.LZ4).compressBuffer(rawBB, 0, raw.length, outputBB, 0);
+
+ byte[] data_ = null;
+ if(compressedSize < raw.length) {
+ data_ = new byte[compressedSize];
+ outputBB.get(data_,0,compressedSize);
+ } else {
+ data_ = raw;
+ }
+
+ byte[] data = new byte[data_.length+24];
+ Bytes.writeLE(data, 0, 0);
+ Bytes.write